refactor: improve code quality with safer error handling, trait decomposition, and dead code cleanup (#1997)

This commit is contained in:
安正超
2026-02-28 01:19:47 +08:00
committed by GitHub
parent 7ce23c6b54
commit af6c32efac
46 changed files with 939 additions and 1612 deletions

View File

@@ -34,7 +34,7 @@ use crate::global::GLOBAL_LocalNodeName;
use crate::global::{GLOBAL_LifecycleSys, GLOBAL_TierConfigMgr, get_global_deployment_id};
use crate::store::ECStore;
use crate::store_api::StorageAPI;
use crate::store_api::{GetObjectReader, HTTPRangeSpec, ObjectInfo, ObjectOptions, ObjectToDelete};
use crate::store_api::{GetObjectReader, HTTPRangeSpec, ObjectInfo, ObjectOperations, ObjectOptions, ObjectToDelete};
use crate::tier::warm_backend::WarmBackendGetOpts;
use async_channel::{Receiver as A_Receiver, Sender as A_Sender, bounded};
use bytes::BytesMut;

View File

@@ -15,13 +15,13 @@
use super::metadata::{BucketMetadata, load_bucket_metadata};
use super::quota::BucketQuota;
use super::target::BucketTargets;
use crate::StorageAPI as _;
use crate::bucket::bucket_target_sys::BucketTargetSys;
use crate::bucket::metadata::{BUCKET_LIFECYCLE_CONFIG, load_bucket_metadata_parse};
use crate::bucket::utils::{deserialize, is_meta_bucketname};
use crate::error::{Error, Result, is_err_bucket_not_found};
use crate::global::{GLOBAL_Endpoints, is_dist_erasure, is_erasure, new_object_layer_fn};
use crate::store::ECStore;
use crate::store_api::HealOperations as _;
use futures::future::join_all;
use lazy_static::lazy_static;
use rustfs_common::heal_channel::HealOpts;

View File

@@ -12,12 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::StorageAPI;
use crate::bucket::lifecycle::lifecycle;
use crate::bucket::versioning::VersioningApi;
use crate::bucket::versioning_sys::BucketVersioningSys;
use crate::store::ECStore;
use crate::store_api::{ObjectOptions, ObjectToDelete};
use crate::store_api::{ObjectOperations, ObjectOptions, ObjectToDelete};
use rustfs_lock::MAX_DELETE_LIST;
pub async fn delete_object_versions(api: ECStore, bucket: &str, to_del: &[ObjectToDelete], _lc_event: lifecycle::Event) {

View File

@@ -15,8 +15,12 @@
pub mod local_snapshot;
use crate::{
bucket::metadata_sys::get_replication_config, config::com::read_config, disk::DiskAPI, error::Error, store::ECStore,
store_api::StorageAPI,
bucket::metadata_sys::get_replication_config,
config::com::read_config,
disk::DiskAPI,
error::Error,
store::ECStore,
store_api::{BucketOperations, ListOperations},
};
pub use local_snapshot::{
DATA_USAGE_DIR, DATA_USAGE_STATE_DIR, LOCAL_USAGE_SNAPSHOT_VERSION, LocalUsageSnapshot, LocalUsageSnapshotMeta,

View File

@@ -20,186 +20,156 @@ use s3s::{S3Error, S3ErrorCode};
pub type Error = StorageError;
pub type Result<T> = core::result::Result<T, Error>;
/// Storage layer error type covering disk, volume, bucket, object, multipart,
/// erasure-coding, and operational error conditions.
///
/// Variants are organized by domain:
/// - **Disk / Volume** low-level storage I/O
/// - **Bucket** bucket-level operations
/// - **Object** object-level operations (including versioning)
/// - **Multipart** multipart upload lifecycle
/// - **Erasure / Quorum** erasure coding and quorum failures
/// - **Operational** decommission, healing, rate-limiting, etc.
/// - **Generic** I/O, locks, and catch-all errors
#[derive(Debug, thiserror::Error)]
pub enum StorageError {
// ── Disk / Volume ────────────────────────────────────────────────
#[error("Faulty disk")]
FaultyDisk,
#[error("Disk full")]
DiskFull,
#[error("Volume not found")]
VolumeNotFound,
#[error("Volume exists")]
VolumeExists,
#[error("File not found")]
FileNotFound,
#[error("File version not found")]
FileVersionNotFound,
#[error("File name too long")]
FileNameTooLong,
#[error("File access denied")]
FileAccessDenied,
#[error("File is corrupted")]
FileCorrupt,
#[error("Not a regular file")]
IsNotRegular,
#[error("Volume not empty")]
VolumeNotEmpty,
#[error("Volume access denied")]
VolumeAccessDenied,
#[error("Corrupted format")]
CorruptedFormat,
#[error("Corrupted backend")]
CorruptedBackend,
#[error("Unformatted disk")]
UnformattedDisk,
#[error("Disk not found")]
DiskNotFound,
#[error("Drive is root")]
DriveIsRoot,
#[error("Faulty remote disk")]
FaultyRemoteDisk,
#[error("Disk full")]
DiskFull,
#[error("Disk not found")]
DiskNotFound,
#[error("Disk access denied")]
DiskAccessDenied,
#[error("Unexpected error")]
Unexpected,
#[error("Drive is root")]
DriveIsRoot,
#[error("Unformatted disk")]
UnformattedDisk,
#[error("Corrupted format")]
CorruptedFormat,
#[error("Corrupted backend")]
CorruptedBackend,
#[error("Too many open files")]
TooManyOpenFiles,
#[error("No heal required")]
NoHealRequired,
#[error("Volume not found")]
VolumeNotFound,
#[error("Volume exists")]
VolumeExists,
#[error("Volume not empty")]
VolumeNotEmpty,
#[error("Volume access denied")]
VolumeAccessDenied,
#[error("Config not found")]
ConfigNotFound,
#[error("not implemented")]
NotImplemented,
#[error("Invalid arguments provided for {0}/{1}-{2}")]
InvalidArgument(String, String, String),
#[error("method not allowed")]
MethodNotAllowed,
#[error("File not found")]
FileNotFound,
#[error("File version not found")]
FileVersionNotFound,
#[error("File name too long")]
FileNameTooLong,
#[error("File access denied")]
FileAccessDenied,
#[error("File is corrupted")]
FileCorrupt,
#[error("Not a regular file")]
IsNotRegular,
// ── Bucket ───────────────────────────────────────────────────────
#[error("Bucket not found: {0}")]
BucketNotFound(String),
#[error("Bucket exists: {0}")]
BucketExists(String),
#[error("Bucket not empty: {0}")]
BucketNotEmpty(String),
#[error("Bucket name invalid: {0}")]
BucketNameInvalid(String),
#[error("Object name invalid: {0}/{1}")]
ObjectNameInvalid(String, String),
#[error("Bucket exists: {0}")]
BucketExists(String),
#[error("Storage reached its minimum free drive threshold.")]
StorageFull,
#[error("Please reduce your request rate")]
SlowDown,
#[error("Prefix access is denied:{0}/{1}")]
PrefixAccessDenied(String, String),
#[error("Invalid UploadID KeyCombination: {0}/{1}")]
InvalidUploadIDKeyCombination(String, String),
#[error("Malformed UploadID: {0}")]
MalformedUploadID(String),
#[error("Object name too long: {0}/{1}")]
ObjectNameTooLong(String, String),
#[error("Object name contains forward slash as prefix: {0}/{1}")]
ObjectNamePrefixAsSlash(String, String),
// ── Object ───────────────────────────────────────────────────────
#[error("Object not found: {0}/{1}")]
ObjectNotFound(String, String),
#[error("Object name invalid: {0}/{1}")]
ObjectNameInvalid(String, String),
#[error("Object name too long: {0}/{1}")]
ObjectNameTooLong(String, String),
#[error("Object name contains forward slash as prefix: {0}/{1}")]
ObjectNamePrefixAsSlash(String, String),
#[error("Object exists on :{0} as directory {1}")]
ObjectExistsAsDirectory(String, String),
#[error("Version not found: {0}/{1}-{2}")]
VersionNotFound(String, String, String),
#[error("Invalid upload id: {0}/{1}-{2}")]
InvalidUploadID(String, String, String),
#[error("Specified part could not be found. PartNumber {0}, Expected {1}, got {2}")]
InvalidPart(usize, String, String),
#[error("Your proposed upload is smaller than the minimum allowed size. Part {0} size {1} is less than minimum {2}")]
EntityTooSmall(usize, i64, i64),
#[error("Invalid version id: {0}/{1}-{2}")]
InvalidVersionID(String, String, String),
#[error("invalid data movement operation, source and destination pool are the same for : {0}/{1}-{2}")]
DataMovementOverwriteErr(String, String, String),
#[error("Prefix access is denied:{0}/{1}")]
PrefixAccessDenied(String, String),
#[error("Object exists on :{0} as directory {1}")]
ObjectExistsAsDirectory(String, String),
// ── Multipart ────────────────────────────────────────────────────
#[error("Invalid upload id: {0}/{1}-{2}")]
InvalidUploadID(String, String, String),
#[error("Invalid UploadID KeyCombination: {0}/{1}")]
InvalidUploadIDKeyCombination(String, String),
#[error("Malformed UploadID: {0}")]
MalformedUploadID(String),
#[error("Specified part could not be found. PartNumber {0}, Expected {1}, got {2}")]
InvalidPart(usize, String, String),
#[error("Invalid part number: {0}")]
InvalidPartNumber(usize),
#[error("Your proposed upload is smaller than the minimum allowed size. Part {0} size {1} is less than minimum {2}")]
EntityTooSmall(usize, i64, i64),
// ── Erasure / Quorum ─────────────────────────────────────────────
#[error("erasure read quorum")]
ErasureReadQuorum,
#[error("erasure write quorum")]
ErasureWriteQuorum,
#[error("Storage resources are insufficient for the read operation: {0}/{1}")]
InsufficientReadQuorum(String, String),
#[error("Storage resources are insufficient for the write operation: {0}/{1}")]
InsufficientWriteQuorum(String, String),
#[error("not first disk")]
NotFirstDisk,
#[error("first disk wait")]
FirstDiskWait,
// ── Operational ──────────────────────────────────────────────────
#[error("Storage reached its minimum free drive threshold.")]
StorageFull,
#[error("Please reduce your request rate")]
SlowDown,
#[error("Decommission not started")]
DecommissionNotStarted,
#[error("Decommission already running")]
DecommissionAlreadyRunning,
#[error("No heal required")]
NoHealRequired,
#[error("DoneForNow")]
DoneForNow,
#[error("erasure read quorum")]
ErasureReadQuorum,
#[error("erasure write quorum")]
ErasureWriteQuorum,
#[error("not first disk")]
NotFirstDisk,
#[error("first disk wait")]
FirstDiskWait,
#[error("Io error: {0}")]
Io(std::io::Error),
#[error("Lock error: {0}")]
Lock(#[from] rustfs_lock::LockError),
#[error("Config not found")]
ConfigNotFound,
#[error("Precondition failed")]
PreconditionFailed,
#[error("Not modified")]
NotModified,
#[error("Invalid part number: {0}")]
InvalidPartNumber(usize),
#[error("Invalid range specified: {0}")]
InvalidRangeSpec(String),
// ── Generic ──────────────────────────────────────────────────────
#[error("Unexpected error")]
Unexpected,
#[error("not implemented")]
NotImplemented,
#[error("Invalid arguments provided for {0}/{1}-{2}")]
InvalidArgument(String, String, String),
#[error("method not allowed")]
MethodNotAllowed,
#[error("Io error: {0}")]
Io(std::io::Error),
#[error("Lock error: {0}")]
Lock(#[from] rustfs_lock::LockError),
}
impl StorageError {
@@ -209,6 +179,29 @@ impl StorageError {
{
StorageError::Io(std::io::Error::other(error))
}
pub fn is_not_found(&self) -> bool {
matches!(
self,
StorageError::FileNotFound
| StorageError::ObjectNotFound(_, _)
| StorageError::FileVersionNotFound
| StorageError::VersionNotFound(_, _, _)
| StorageError::VolumeNotFound
| StorageError::DiskNotFound
| StorageError::BucketNotFound(_)
)
}
pub fn is_quorum_error(&self) -> bool {
matches!(
self,
StorageError::ErasureReadQuorum
| StorageError::ErasureWriteQuorum
| StorageError::InsufficientReadQuorum(_, _)
| StorageError::InsufficientWriteQuorum(_, _)
)
}
}
impl From<DiskError> for StorageError {
@@ -724,24 +717,6 @@ pub fn is_all_volume_not_found(errs: &[Option<Error>]) -> bool {
!errs.is_empty()
}
// pub fn is_all_not_found(errs: &[Option<Error>]) -> bool {
// for err in errs.iter() {
// if let Some(err) = err {
// if let Some(err) = err.downcast_ref::<DiskError>() {
// match err {
// DiskError::FileNotFound | DiskError::VolumeNotFound | &DiskError::FileVersionNotFound => {
// continue;
// }
// _ => return false,
// }
// }
// }
// return false;
// }
// !errs.is_empty()
// }
pub fn to_object_err(err: Error, params: Vec<&str>) -> Error {
match err {
StorageError::DiskFull => StorageError::StorageFull,
@@ -938,10 +913,6 @@ pub fn storage_to_object_err(err: Error, params: Vec<&str>) -> S3Error {
object = decode_dir_object(params[1]);
}
match storage_err {
/*StorageError::NotImplemented => s3_error!(NotImplemented),
StorageError::InvalidArgument(bucket, object, version_id) => {
s3_error!(InvalidArgument, "Invalid arguments provided for {}/{}-{}", bucket, object, version_id)
}*/
StorageError::MethodNotAllowed => S3Error::with_message(
S3ErrorCode::MethodNotAllowed,
ObjectApiError::MethodNotAllowed(GenericError {
@@ -951,73 +922,6 @@ pub fn storage_to_object_err(err: Error, params: Vec<&str>) -> S3Error {
})
.to_string(),
),
/*StorageError::BucketNotFound(bucket) => {
s3_error!(NoSuchBucket, "bucket not found {}", bucket)
}
StorageError::BucketNotEmpty(bucket) => s3_error!(BucketNotEmpty, "bucket not empty {}", bucket),
StorageError::BucketNameInvalid(bucket) => s3_error!(InvalidBucketName, "invalid bucket name {}", bucket),
StorageError::ObjectNameInvalid(bucket, object) => {
s3_error!(InvalidArgument, "invalid object name {}/{}", bucket, object)
}
StorageError::BucketExists(bucket) => s3_error!(BucketAlreadyExists, "{}", bucket),
StorageError::StorageFull => s3_error!(ServiceUnavailable, "Storage reached its minimum free drive threshold."),
StorageError::SlowDown => s3_error!(SlowDown, "Please reduce your request rate"),
StorageError::PrefixAccessDenied(bucket, object) => {
s3_error!(AccessDenied, "PrefixAccessDenied {}/{}", bucket, object)
}
StorageError::InvalidUploadIDKeyCombination(bucket, object) => {
s3_error!(InvalidArgument, "Invalid UploadID KeyCombination: {}/{}", bucket, object)
}
StorageError::MalformedUploadID(bucket) => s3_error!(InvalidArgument, "Malformed UploadID: {}", bucket),
StorageError::ObjectNameTooLong(bucket, object) => {
s3_error!(InvalidArgument, "Object name too long: {}/{}", bucket, object)
}
StorageError::ObjectNamePrefixAsSlash(bucket, object) => {
s3_error!(InvalidArgument, "Object name contains forward slash as prefix: {}/{}", bucket, object)
}
StorageError::ObjectNotFound(bucket, object) => s3_error!(NoSuchKey, "{}/{}", bucket, object),
StorageError::VersionNotFound(bucket, object, version_id) => {
s3_error!(NoSuchVersion, "{}/{}/{}", bucket, object, version_id)
}
StorageError::InvalidUploadID(bucket, object, version_id) => {
s3_error!(InvalidPart, "Invalid upload id: {}/{}-{}", bucket, object, version_id)
}
StorageError::InvalidVersionID(bucket, object, version_id) => {
s3_error!(InvalidArgument, "Invalid version id: {}/{}-{}", bucket, object, version_id)
}
// extended
StorageError::DataMovementOverwriteErr(bucket, object, version_id) => s3_error!(
InvalidArgument,
"invalid data movement operation, source and destination pool are the same for : {}/{}-{}",
bucket,
object,
version_id
),
// extended
StorageError::ObjectExistsAsDirectory(bucket, object) => {
s3_error!(InvalidArgument, "Object exists on :{} as directory {}", bucket, object)
}
StorageError::InsufficientReadQuorum => {
s3_error!(SlowDown, "Storage resources are insufficient for the read operation")
}
StorageError::InsufficientWriteQuorum => {
s3_error!(SlowDown, "Storage resources are insufficient for the write operation")
}
StorageError::DecommissionNotStarted => s3_error!(InvalidArgument, "Decommission Not Started"),
StorageError::VolumeNotFound(bucket) => {
s3_error!(NoSuchBucket, "bucket not found {}", bucket)
}
StorageError::InvalidPart(bucket, object, version_id) => {
s3_error!(
InvalidPart,
"Specified part could not be found. PartNumber {}, Expected {}, got {}",
bucket,
object,
version_id
)
}
StorageError::DoneForNow => s3_error!(InternalError, "DoneForNow"),*/
_ => s3s::S3Error::with_message(S3ErrorCode::Custom("err".into()), err.to_string()),
}
}

View File

@@ -27,7 +27,8 @@ use crate::new_object_layer_fn;
use crate::notification_sys::get_global_notification_sys;
use crate::set_disk::SetDisks;
use crate::store_api::{
BucketOptions, CompletePart, GetObjectReader, MakeBucketOptions, ObjectIO, ObjectOptions, PutObjReader, StorageAPI,
BucketOperations, BucketOptions, CompletePart, GetObjectReader, HealOperations, MakeBucketOptions, MultipartOperations,
ObjectIO, ObjectOperations, ObjectOptions, PutObjReader, StorageAPI,
};
use crate::{sets::Sets, store::ECStore};
use byteorder::{ByteOrder, LittleEndian, WriteBytesExt};

View File

@@ -22,7 +22,9 @@ use crate::global::get_global_endpoints;
use crate::pools::ListCallback;
use crate::set_disk::SetDisks;
use crate::store::ECStore;
use crate::store_api::{CompletePart, GetObjectReader, ObjectIO, ObjectOptions, PutObjReader};
use crate::store_api::{
CompletePart, GetObjectReader, MultipartOperations, ObjectIO, ObjectOperations, ObjectOptions, PutObjReader,
};
use http::HeaderMap;
use rustfs_common::defer;
use rustfs_filemeta::{FileInfo, MetaCacheEntries, MetaCacheEntry, MetadataResolutionParams};

View File

@@ -52,9 +52,9 @@ use crate::{
event_notification::{EventArgs, send_event},
global::{GLOBAL_LOCAL_DISK_MAP, GLOBAL_LOCAL_DISK_SET_DRIVES, get_global_deployment_id, is_dist_erasure},
store_api::{
BucketInfo, BucketOptions, CompletePart, DeleteBucketOptions, DeletedObject, GetObjectReader, HTTPRangeSpec,
ListMultipartsInfo, ListObjectsV2Info, MakeBucketOptions, MultipartInfo, MultipartUploadResult, ObjectIO, ObjectInfo,
PartInfo, PutObjReader, StorageAPI,
BucketInfo, BucketOperations, BucketOptions, CompletePart, DeleteBucketOptions, DeletedObject, GetObjectReader,
HTTPRangeSpec, HealOperations, ListMultipartsInfo, ListObjectsV2Info, ListOperations, MakeBucketOptions, MultipartInfo,
MultipartOperations, MultipartUploadResult, ObjectIO, ObjectInfo, ObjectOperations, PartInfo, PutObjReader, StorageAPI,
},
store_init::load_format_erasure,
};
@@ -842,10 +842,20 @@ impl StorageAPI for SetDisks {
get_storage_info(&local_disks, &local_endpoints).await
}
#[tracing::instrument(skip(self))]
async fn list_bucket(&self, _opts: &BucketOptions) -> Result<Vec<BucketInfo>> {
async fn get_disks(&self, _pool_idx: usize, _set_idx: usize) -> Result<Vec<Option<DiskStore>>> {
Ok(self.get_disks_internal().await)
}
#[tracing::instrument(skip(self))]
fn set_drive_counts(&self) -> Vec<usize> {
unimplemented!()
}
}
#[async_trait::async_trait]
impl BucketOperations for SetDisks {
#[tracing::instrument(skip(self))]
async fn make_bucket(&self, _bucket: &str, _opts: &MakeBucketOptions) -> Result<()> {
unimplemented!()
@@ -856,6 +866,19 @@ impl StorageAPI for SetDisks {
unimplemented!()
}
#[tracing::instrument(skip(self))]
async fn list_bucket(&self, _opts: &BucketOptions) -> Result<Vec<BucketInfo>> {
unimplemented!()
}
#[tracing::instrument(skip(self))]
async fn delete_bucket(&self, _bucket: &str, _opts: &DeleteBucketOptions) -> Result<()> {
unimplemented!()
}
}
#[async_trait::async_trait]
impl ObjectOperations for SetDisks {
#[tracing::instrument(skip(self))]
async fn copy_object(
&self,
@@ -1400,45 +1423,6 @@ impl StorageAPI for SetDisks {
Ok(obj_info)
}
#[tracing::instrument(skip(self))]
async fn list_objects_v2(
self: Arc<Self>,
_bucket: &str,
_prefix: &str,
_continuation_token: Option<String>,
_delimiter: Option<String>,
_max_keys: i32,
_fetch_owner: bool,
_start_after: Option<String>,
_incl_deleted: bool,
) -> Result<ListObjectsV2Info> {
unimplemented!()
}
#[tracing::instrument(skip(self))]
async fn list_object_versions(
self: Arc<Self>,
_bucket: &str,
_prefix: &str,
_marker: Option<String>,
_version_marker: Option<String>,
_delimiter: Option<String>,
_max_keys: i32,
) -> Result<ListObjectVersionsInfo> {
unimplemented!()
}
async fn walk(
self: Arc<Self>,
_rx: CancellationToken,
_bucket: &str,
_prefix: &str,
_result: Sender<ObjectInfoOrErr>,
_opts: WalkOptions,
) -> Result<()> {
unimplemented!()
}
#[tracing::instrument(skip(self))]
async fn get_object_info(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo> {
// Acquire a shared read-lock to protect consistency during info fetch
@@ -1890,6 +1874,60 @@ impl StorageAPI for SetDisks {
self.put_object_tags(bucket, object, "", opts).await
}
#[tracing::instrument(skip(self))]
async fn verify_object_integrity(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()> {
let get_object_reader = <Self as ObjectIO>::get_object_reader(self, bucket, object, None, HeaderMap::new(), opts).await?;
// Stream to sink to avoid loading entire object into memory during verification
let mut reader = get_object_reader.stream;
tokio::io::copy(&mut reader, &mut tokio::io::sink()).await?;
Ok(())
}
}
#[async_trait::async_trait]
impl ListOperations for SetDisks {
#[tracing::instrument(skip(self))]
async fn list_objects_v2(
self: Arc<Self>,
_bucket: &str,
_prefix: &str,
_continuation_token: Option<String>,
_delimiter: Option<String>,
_max_keys: i32,
_fetch_owner: bool,
_start_after: Option<String>,
_incl_deleted: bool,
) -> Result<ListObjectsV2Info> {
unimplemented!()
}
#[tracing::instrument(skip(self))]
async fn list_object_versions(
self: Arc<Self>,
_bucket: &str,
_prefix: &str,
_marker: Option<String>,
_version_marker: Option<String>,
_delimiter: Option<String>,
_max_keys: i32,
) -> Result<ListObjectVersionsInfo> {
unimplemented!()
}
async fn walk(
self: Arc<Self>,
_rx: CancellationToken,
_bucket: &str,
_prefix: &str,
_result: Sender<ObjectInfoOrErr>,
_opts: WalkOptions,
) -> Result<()> {
unimplemented!()
}
}
#[async_trait::async_trait]
impl MultipartOperations for SetDisks {
#[tracing::instrument(skip(self))]
async fn copy_object_part(
&self,
@@ -2922,22 +2960,10 @@ impl StorageAPI for SetDisks {
Ok(ObjectInfo::from_file_info(&fi, bucket, object, opts.versioned || opts.version_suspended))
}
}
#[tracing::instrument(skip(self))]
async fn get_disks(&self, _pool_idx: usize, _set_idx: usize) -> Result<Vec<Option<DiskStore>>> {
Ok(self.get_disks_internal().await)
}
#[tracing::instrument(skip(self))]
fn set_drive_counts(&self) -> Vec<usize> {
unimplemented!()
}
#[tracing::instrument(skip(self))]
async fn delete_bucket(&self, _bucket: &str, _opts: &DeleteBucketOptions) -> Result<()> {
unimplemented!()
}
#[async_trait::async_trait]
impl HealOperations for SetDisks {
#[tracing::instrument(skip(self))]
async fn heal_format(&self, _dry_run: bool) -> Result<(HealResultItem, Option<Error>)> {
unimplemented!()
@@ -3023,15 +3049,6 @@ impl StorageAPI for SetDisks {
async fn check_abandoned_parts(&self, _bucket: &str, _object: &str, _opts: &HealOpts) -> Result<()> {
unimplemented!()
}
#[tracing::instrument(skip(self))]
async fn verify_object_integrity(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()> {
let get_object_reader = <Self as ObjectIO>::get_object_reader(self, bucket, object, None, HeaderMap::new(), opts).await?;
// Stream to sink to avoid loading entire object into memory during verification
let mut reader = get_object_reader.stream;
tokio::io::copy(&mut reader, &mut tokio::io::sink()).await?;
Ok(())
}
}
#[derive(Debug, PartialEq, Eq)]

View File

@@ -28,9 +28,10 @@ use crate::{
global::{GLOBAL_LOCAL_DISK_SET_DRIVES, get_global_lock_clients, is_dist_erasure},
set_disk::SetDisks,
store_api::{
BucketInfo, BucketOptions, CompletePart, DeleteBucketOptions, DeletedObject, GetObjectReader, HTTPRangeSpec,
ListMultipartsInfo, ListObjectVersionsInfo, ListObjectsV2Info, MakeBucketOptions, MultipartInfo, MultipartUploadResult,
ObjectIO, ObjectInfo, ObjectOptions, ObjectToDelete, PartInfo, PutObjReader, StorageAPI,
BucketInfo, BucketOperations, BucketOptions, CompletePart, DeleteBucketOptions, DeletedObject, GetObjectReader,
HTTPRangeSpec, HealOperations, ListMultipartsInfo, ListObjectVersionsInfo, ListObjectsV2Info, ListOperations,
MakeBucketOptions, MultipartInfo, MultipartOperations, MultipartUploadResult, ObjectIO, ObjectInfo, ObjectOperations,
ObjectOptions, ObjectToDelete, PartInfo, PutObjReader, StorageAPI,
},
store_init::{check_format_erasure_values, get_format_erasure_in_quorum, load_format_erasure_all, save_format_file},
};
@@ -357,56 +358,7 @@ impl ObjectIO for Sets {
}
#[async_trait::async_trait]
impl StorageAPI for Sets {
#[tracing::instrument(skip(self))]
async fn new_ns_lock(&self, bucket: &str, object: &str) -> Result<NamespaceLockWrapper> {
self.disk_set[0].new_ns_lock(bucket, object).await
}
#[tracing::instrument(skip(self))]
async fn backend_info(&self) -> rustfs_madmin::BackendInfo {
unimplemented!()
}
#[tracing::instrument(skip(self))]
async fn storage_info(&self) -> rustfs_madmin::StorageInfo {
let mut futures = Vec::with_capacity(self.disk_set.len());
for set in self.disk_set.iter() {
futures.push(set.storage_info())
}
let results = join_all(futures).await;
let mut disks = Vec::new();
for res in results.into_iter() {
disks.extend_from_slice(&res.disks);
}
rustfs_madmin::StorageInfo {
disks,
..Default::default()
}
}
#[tracing::instrument(skip(self))]
async fn local_storage_info(&self) -> rustfs_madmin::StorageInfo {
let mut futures = Vec::with_capacity(self.disk_set.len());
for set in self.disk_set.iter() {
futures.push(set.local_storage_info())
}
let results = join_all(futures).await;
let mut disks = Vec::new();
for res in results.into_iter() {
disks.extend_from_slice(&res.disks);
}
rustfs_madmin::StorageInfo {
disks,
..Default::default()
}
}
impl BucketOperations for Sets {
#[tracing::instrument(skip(self))]
async fn make_bucket(&self, _bucket: &str, _opts: &MakeBucketOptions) -> Result<()> {
unimplemented!()
@@ -425,51 +377,26 @@ impl StorageAPI for Sets {
async fn delete_bucket(&self, _bucket: &str, _opts: &DeleteBucketOptions) -> Result<()> {
unimplemented!()
}
}
#[tracing::instrument(skip(self))]
async fn list_objects_v2(
self: Arc<Self>,
_bucket: &str,
_prefix: &str,
_continuation_token: Option<String>,
_delimiter: Option<String>,
_max_keys: i32,
_fetch_owner: bool,
_start_after: Option<String>,
_incl_deleted: bool,
) -> Result<ListObjectsV2Info> {
unimplemented!()
}
#[tracing::instrument(skip(self))]
async fn list_object_versions(
self: Arc<Self>,
_bucket: &str,
_prefix: &str,
_marker: Option<String>,
_version_marker: Option<String>,
_delimiter: Option<String>,
_max_keys: i32,
) -> Result<ListObjectVersionsInfo> {
unimplemented!()
}
async fn walk(
self: Arc<Self>,
_rx: CancellationToken,
_bucket: &str,
_prefix: &str,
_result: tokio::sync::mpsc::Sender<ObjectInfoOrErr>,
_opts: WalkOptions,
) -> Result<()> {
unimplemented!()
}
#[async_trait::async_trait]
impl ObjectOperations for Sets {
#[tracing::instrument(skip(self))]
async fn get_object_info(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo> {
self.get_disks_by_key(object).get_object_info(bucket, object, opts).await
}
#[tracing::instrument(level = "debug", skip(self))]
async fn verify_object_integrity(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()> {
let gor = self.get_object_reader(bucket, object, None, HeaderMap::new(), opts).await?;
let mut reader = gor.stream;
// Stream data to sink instead of reading all into memory to prevent OOM
tokio::io::copy(&mut reader, &mut tokio::io::sink()).await?;
Ok(())
}
#[tracing::instrument(skip(self))]
async fn copy_object(
&self,
@@ -600,20 +527,90 @@ impl StorageAPI for Sets {
(del_objects, del_errs)
}
async fn list_object_parts(
&self,
bucket: &str,
object: &str,
upload_id: &str,
part_number_marker: Option<usize>,
max_parts: usize,
opts: &ObjectOptions,
) -> Result<ListPartsInfo> {
#[tracing::instrument(skip(self))]
async fn put_object_metadata(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo> {
self.get_disks_by_key(object).put_object_metadata(bucket, object, opts).await
}
#[tracing::instrument(skip(self))]
async fn get_object_tags(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<String> {
self.get_disks_by_key(object).get_object_tags(bucket, object, opts).await
}
#[tracing::instrument(level = "debug", skip(self))]
async fn put_object_tags(&self, bucket: &str, object: &str, tags: &str, opts: &ObjectOptions) -> Result<ObjectInfo> {
self.get_disks_by_key(object)
.list_object_parts(bucket, object, upload_id, part_number_marker, max_parts, opts)
.put_object_tags(bucket, object, tags, opts)
.await
}
#[tracing::instrument(skip(self))]
async fn delete_object_tags(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo> {
self.get_disks_by_key(object).delete_object_tags(bucket, object, opts).await
}
#[tracing::instrument(skip(self))]
async fn add_partial(&self, bucket: &str, object: &str, version_id: &str) -> Result<()> {
self.get_disks_by_key(object).add_partial(bucket, object, version_id).await
}
#[tracing::instrument(skip(self))]
async fn transition_object(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()> {
self.get_disks_by_key(object).transition_object(bucket, object, opts).await
}
#[tracing::instrument(skip(self))]
async fn restore_transitioned_object(self: Arc<Self>, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()> {
self.get_disks_by_key(object)
.restore_transitioned_object(bucket, object, opts)
.await
}
}
#[async_trait::async_trait]
impl ListOperations for Sets {
#[tracing::instrument(skip(self))]
async fn list_objects_v2(
self: Arc<Self>,
_bucket: &str,
_prefix: &str,
_continuation_token: Option<String>,
_delimiter: Option<String>,
_max_keys: i32,
_fetch_owner: bool,
_start_after: Option<String>,
_incl_deleted: bool,
) -> Result<ListObjectsV2Info> {
unimplemented!()
}
#[tracing::instrument(skip(self))]
async fn list_object_versions(
self: Arc<Self>,
_bucket: &str,
_prefix: &str,
_marker: Option<String>,
_version_marker: Option<String>,
_delimiter: Option<String>,
_max_keys: i32,
) -> Result<ListObjectVersionsInfo> {
unimplemented!()
}
async fn walk(
self: Arc<Self>,
_rx: CancellationToken,
_bucket: &str,
_prefix: &str,
_result: tokio::sync::mpsc::Sender<ObjectInfoOrErr>,
_opts: WalkOptions,
) -> Result<()> {
unimplemented!()
}
}
#[async_trait::async_trait]
impl MultipartOperations for Sets {
#[tracing::instrument(skip(self))]
async fn list_multipart_uploads(
&self,
@@ -633,23 +630,6 @@ impl StorageAPI for Sets {
self.get_disks_by_key(object).new_multipart_upload(bucket, object, opts).await
}
#[tracing::instrument(skip(self))]
async fn transition_object(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()> {
self.get_disks_by_key(object).transition_object(bucket, object, opts).await
}
#[tracing::instrument(skip(self))]
async fn add_partial(&self, bucket: &str, object: &str, version_id: &str) -> Result<()> {
self.get_disks_by_key(object).add_partial(bucket, object, version_id).await
}
#[tracing::instrument(skip(self))]
async fn restore_transitioned_object(self: Arc<Self>, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()> {
self.get_disks_by_key(object)
.restore_transitioned_object(bucket, object, opts)
.await
}
#[tracing::instrument(skip(self))]
async fn copy_object_part(
&self,
@@ -696,6 +676,20 @@ impl StorageAPI for Sets {
.await
}
async fn list_object_parts(
&self,
bucket: &str,
object: &str,
upload_id: &str,
part_number_marker: Option<usize>,
max_parts: usize,
opts: &ObjectOptions,
) -> Result<ListPartsInfo> {
self.get_disks_by_key(object)
.list_object_parts(bucket, object, upload_id, part_number_marker, max_parts, opts)
.await
}
#[tracing::instrument(skip(self))]
async fn abort_multipart_upload(&self, bucket: &str, object: &str, upload_id: &str, opts: &ObjectOptions) -> Result<()> {
self.get_disks_by_key(object)
@@ -716,39 +710,10 @@ impl StorageAPI for Sets {
.complete_multipart_upload(bucket, object, upload_id, uploaded_parts, opts)
.await
}
}
#[tracing::instrument(skip(self))]
async fn get_disks(&self, _pool_idx: usize, _set_idx: usize) -> Result<Vec<Option<DiskStore>>> {
unimplemented!()
}
#[tracing::instrument(skip(self))]
fn set_drive_counts(&self) -> Vec<usize> {
unimplemented!()
}
#[tracing::instrument(skip(self))]
async fn put_object_metadata(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo> {
self.get_disks_by_key(object).put_object_metadata(bucket, object, opts).await
}
#[tracing::instrument(skip(self))]
async fn get_object_tags(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<String> {
self.get_disks_by_key(object).get_object_tags(bucket, object, opts).await
}
#[tracing::instrument(level = "debug", skip(self))]
async fn put_object_tags(&self, bucket: &str, object: &str, tags: &str, opts: &ObjectOptions) -> Result<ObjectInfo> {
self.get_disks_by_key(object)
.put_object_tags(bucket, object, tags, opts)
.await
}
#[tracing::instrument(skip(self))]
async fn delete_object_tags(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo> {
self.get_disks_by_key(object).delete_object_tags(bucket, object, opts).await
}
#[async_trait::async_trait]
impl HealOperations for Sets {
#[tracing::instrument(skip(self))]
async fn heal_format(&self, dry_run: bool) -> Result<(HealResultItem, Option<Error>)> {
let (disks, _) = init_storage_disks_with_errors(
@@ -857,16 +822,68 @@ impl StorageAPI for Sets {
async fn check_abandoned_parts(&self, _bucket: &str, _object: &str, _opts: &HealOpts) -> Result<()> {
unimplemented!()
}
}
#[tracing::instrument(level = "debug", skip(self))]
async fn verify_object_integrity(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()> {
let gor = self.get_object_reader(bucket, object, None, HeaderMap::new(), opts).await?;
let mut reader = gor.stream;
#[async_trait::async_trait]
impl StorageAPI for Sets {
#[tracing::instrument(skip(self))]
async fn new_ns_lock(&self, bucket: &str, object: &str) -> Result<NamespaceLockWrapper> {
self.disk_set[0].new_ns_lock(bucket, object).await
}
#[tracing::instrument(skip(self))]
async fn backend_info(&self) -> rustfs_madmin::BackendInfo {
unimplemented!()
}
#[tracing::instrument(skip(self))]
async fn storage_info(&self) -> rustfs_madmin::StorageInfo {
let mut futures = Vec::with_capacity(self.disk_set.len());
// Stream data to sink instead of reading all into memory to prevent OOM
tokio::io::copy(&mut reader, &mut tokio::io::sink()).await?;
for set in self.disk_set.iter() {
futures.push(set.storage_info())
}
Ok(())
let results = join_all(futures).await;
let mut disks = Vec::new();
for res in results.into_iter() {
disks.extend_from_slice(&res.disks);
}
rustfs_madmin::StorageInfo {
disks,
..Default::default()
}
}
#[tracing::instrument(skip(self))]
async fn local_storage_info(&self) -> rustfs_madmin::StorageInfo {
let mut futures = Vec::with_capacity(self.disk_set.len());
for set in self.disk_set.iter() {
futures.push(set.local_storage_info())
}
let results = join_all(futures).await;
let mut disks = Vec::new();
for res in results.into_iter() {
disks.extend_from_slice(&res.disks);
}
rustfs_madmin::StorageInfo {
disks,
..Default::default()
}
}
#[tracing::instrument(skip(self))]
async fn get_disks(&self, _pool_idx: usize, _set_idx: usize) -> Result<Vec<Option<DiskStore>>> {
unimplemented!()
}
#[tracing::instrument(skip(self))]
fn set_drive_counts(&self) -> Vec<usize> {
unimplemented!()
}
}

View File

@@ -58,9 +58,9 @@ use crate::{
rpc::S3PeerSys,
sets::Sets,
store_api::{
BucketInfo, BucketOptions, CompletePart, DeleteBucketOptions, DeletedObject, GetObjectReader, HTTPRangeSpec,
ListObjectsV2Info, MakeBucketOptions, MultipartUploadResult, ObjectInfo, ObjectOptions, ObjectToDelete, PartInfo,
PutObjReader, StorageAPI,
BucketInfo, BucketOperations, BucketOptions, CompletePart, DeleteBucketOptions, DeletedObject, GetObjectReader,
HTTPRangeSpec, HealOperations, ListObjectsV2Info, ListOperations, MakeBucketOptions, MultipartOperations,
MultipartUploadResult, ObjectInfo, ObjectOperations, ObjectOptions, ObjectToDelete, PartInfo, PutObjReader, StorageAPI,
},
store_init,
};
@@ -259,24 +259,7 @@ lazy_static! {
}
#[async_trait::async_trait]
impl StorageAPI for ECStore {
#[instrument(skip(self))]
async fn new_ns_lock(&self, bucket: &str, object: &str) -> Result<NamespaceLockWrapper> {
self.handle_new_ns_lock(bucket, object).await
}
#[instrument(skip(self))]
async fn backend_info(&self) -> rustfs_madmin::BackendInfo {
self.handle_backend_info().await
}
#[instrument(skip(self))]
async fn storage_info(&self) -> rustfs_madmin::StorageInfo {
self.handle_storage_info().await
}
#[instrument(skip(self))]
async fn local_storage_info(&self) -> rustfs_madmin::StorageInfo {
self.handle_local_storage_info().await
}
impl BucketOperations for ECStore {
#[instrument(skip(self))]
async fn make_bucket(&self, bucket: &str, opts: &MakeBucketOptions) -> Result<()> {
self.handle_make_bucket(bucket, opts).await
@@ -294,7 +277,91 @@ impl StorageAPI for ECStore {
async fn delete_bucket(&self, bucket: &str, opts: &DeleteBucketOptions) -> Result<()> {
self.handle_delete_bucket(bucket, opts).await
}
}
#[async_trait::async_trait]
impl ObjectOperations for ECStore {
#[instrument(skip(self))]
async fn get_object_info(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo> {
self.handle_get_object_info(bucket, object, opts).await
}
async fn verify_object_integrity(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()> {
self.handle_verify_object_integrity(bucket, object, opts).await
}
// TODO: review
#[instrument(skip(self))]
async fn copy_object(
&self,
src_bucket: &str,
src_object: &str,
dst_bucket: &str,
dst_object: &str,
src_info: &mut ObjectInfo,
src_opts: &ObjectOptions,
dst_opts: &ObjectOptions,
) -> Result<ObjectInfo> {
self.handle_copy_object(src_bucket, src_object, dst_bucket, dst_object, src_info, src_opts, dst_opts)
.await
}
#[instrument(skip(self))]
async fn delete_object_version(&self, bucket: &str, object: &str, fi: &FileInfo, force_del_marker: bool) -> Result<()> {
self.handle_delete_object_version(bucket, object, fi, force_del_marker).await
}
#[instrument(skip(self))]
async fn delete_object(&self, bucket: &str, object: &str, opts: ObjectOptions) -> Result<ObjectInfo> {
self.handle_delete_object(bucket, object, opts).await
}
// TODO: review
#[instrument(skip(self))]
async fn delete_objects(
&self,
bucket: &str,
objects: Vec<ObjectToDelete>,
opts: ObjectOptions,
) -> (Vec<DeletedObject>, Vec<Option<Error>>) {
self.handle_delete_objects(bucket, objects, opts).await
}
#[instrument(skip(self))]
async fn put_object_metadata(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo> {
self.handle_put_object_metadata(bucket, object, opts).await
}
#[instrument(skip(self))]
async fn get_object_tags(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<String> {
self.handle_get_object_tags(bucket, object, opts).await
}
#[instrument(level = "debug", skip(self))]
async fn put_object_tags(&self, bucket: &str, object: &str, tags: &str, opts: &ObjectOptions) -> Result<ObjectInfo> {
self.handle_put_object_tags(bucket, object, tags, opts).await
}
#[instrument(skip(self))]
async fn delete_object_tags(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo> {
self.handle_delete_object_tags(bucket, object, opts).await
}
#[instrument(skip(self))]
async fn add_partial(&self, bucket: &str, object: &str, version_id: &str) -> Result<()> {
self.handle_add_partial(bucket, object, version_id).await
}
#[instrument(skip(self))]
async fn transition_object(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()> {
self.handle_transition_object(bucket, object, opts).await
}
#[instrument(skip(self))]
async fn restore_transitioned_object(self: Arc<Self>, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()> {
self.handle_restore_transitioned_object(bucket, object, opts).await
}
}
#[async_trait::async_trait]
impl ListOperations for ECStore {
// @continuation_token marker
// @start_after as marker when continuation_token empty
// @delimiter default="/", empty when recursive
@@ -348,56 +415,10 @@ impl StorageAPI for ECStore {
) -> Result<()> {
self.handle_walk(rx, bucket, prefix, result, opts).await
}
}
#[instrument(skip(self))]
async fn get_object_info(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo> {
self.handle_get_object_info(bucket, object, opts).await
}
// TODO: review
#[instrument(skip(self))]
async fn copy_object(
&self,
src_bucket: &str,
src_object: &str,
dst_bucket: &str,
dst_object: &str,
src_info: &mut ObjectInfo,
src_opts: &ObjectOptions,
dst_opts: &ObjectOptions,
) -> Result<ObjectInfo> {
self.handle_copy_object(src_bucket, src_object, dst_bucket, dst_object, src_info, src_opts, dst_opts)
.await
}
#[instrument(skip(self))]
async fn delete_object(&self, bucket: &str, object: &str, opts: ObjectOptions) -> Result<ObjectInfo> {
self.handle_delete_object(bucket, object, opts).await
}
// TODO: review
#[instrument(skip(self))]
async fn delete_objects(
&self,
bucket: &str,
objects: Vec<ObjectToDelete>,
opts: ObjectOptions,
) -> (Vec<DeletedObject>, Vec<Option<Error>>) {
self.handle_delete_objects(bucket, objects, opts).await
}
#[instrument(skip(self))]
async fn list_object_parts(
&self,
bucket: &str,
object: &str,
upload_id: &str,
part_number_marker: Option<usize>,
max_parts: usize,
opts: &ObjectOptions,
) -> Result<ListPartsInfo> {
self.handle_list_object_parts(bucket, object, upload_id, part_number_marker, max_parts, opts)
.await
}
#[async_trait::async_trait]
impl MultipartOperations for ECStore {
#[instrument(skip(self))]
async fn list_multipart_uploads(
&self,
@@ -417,20 +438,6 @@ impl StorageAPI for ECStore {
self.handle_new_multipart_upload(bucket, object, opts).await
}
#[instrument(skip(self))]
async fn add_partial(&self, bucket: &str, object: &str, version_id: &str) -> Result<()> {
self.handle_add_partial(bucket, object, version_id).await
}
#[instrument(skip(self))]
async fn transition_object(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()> {
self.handle_transition_object(bucket, object, opts).await
}
#[instrument(skip(self))]
async fn restore_transitioned_object(self: Arc<Self>, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()> {
self.handle_restore_transitioned_object(bucket, object, opts).await
}
#[instrument(skip(self))]
async fn copy_object_part(
&self,
@@ -485,6 +492,21 @@ impl StorageAPI for ECStore {
) -> Result<MultipartInfo> {
self.handle_get_multipart_info(bucket, object, upload_id, opts).await
}
#[instrument(skip(self))]
async fn list_object_parts(
&self,
bucket: &str,
object: &str,
upload_id: &str,
part_number_marker: Option<usize>,
max_parts: usize,
opts: &ObjectOptions,
) -> Result<ListPartsInfo> {
self.handle_list_object_parts(bucket, object, upload_id, part_number_marker, max_parts, opts)
.await
}
#[instrument(skip(self))]
async fn abort_multipart_upload(&self, bucket: &str, object: &str, upload_id: &str, opts: &ObjectOptions) -> Result<()> {
self.handle_abort_multipart_upload(bucket, object, upload_id, opts).await
@@ -502,40 +524,10 @@ impl StorageAPI for ECStore {
self.handle_complete_multipart_upload(bucket, object, upload_id, uploaded_parts, opts)
.await
}
}
#[instrument(skip(self))]
async fn get_disks(&self, pool_idx: usize, set_idx: usize) -> Result<Vec<Option<DiskStore>>> {
self.handle_get_disks(pool_idx, set_idx).await
}
#[instrument(skip(self))]
fn set_drive_counts(&self) -> Vec<usize> {
self.handle_set_drive_counts()
}
#[instrument(skip(self))]
async fn put_object_metadata(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo> {
self.handle_put_object_metadata(bucket, object, opts).await
}
#[instrument(skip(self))]
async fn get_object_tags(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<String> {
self.handle_get_object_tags(bucket, object, opts).await
}
#[instrument(level = "debug", skip(self))]
async fn put_object_tags(&self, bucket: &str, object: &str, tags: &str, opts: &ObjectOptions) -> Result<ObjectInfo> {
self.handle_put_object_tags(bucket, object, tags, opts).await
}
#[instrument(skip(self))]
async fn delete_object_version(&self, bucket: &str, object: &str, fi: &FileInfo, force_del_marker: bool) -> Result<()> {
self.handle_delete_object_version(bucket, object, fi, force_del_marker).await
}
#[instrument(skip(self))]
async fn delete_object_tags(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo> {
self.handle_delete_object_tags(bucket, object, opts).await
}
#[async_trait::async_trait]
impl HealOperations for ECStore {
#[instrument(skip(self))]
async fn heal_format(&self, dry_run: bool) -> Result<(HealResultItem, Option<Error>)> {
self.handle_heal_format(dry_run).await
@@ -565,9 +557,35 @@ impl StorageAPI for ECStore {
async fn check_abandoned_parts(&self, bucket: &str, object: &str, opts: &HealOpts) -> Result<()> {
self.handle_check_abandoned_parts(bucket, object, opts).await
}
}
async fn verify_object_integrity(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()> {
self.handle_verify_object_integrity(bucket, object, opts).await
#[async_trait::async_trait]
impl StorageAPI for ECStore {
#[instrument(skip(self))]
async fn new_ns_lock(&self, bucket: &str, object: &str) -> Result<NamespaceLockWrapper> {
self.handle_new_ns_lock(bucket, object).await
}
#[instrument(skip(self))]
async fn backend_info(&self) -> rustfs_madmin::BackendInfo {
self.handle_backend_info().await
}
#[instrument(skip(self))]
async fn storage_info(&self) -> rustfs_madmin::StorageInfo {
self.handle_storage_info().await
}
#[instrument(skip(self))]
async fn local_storage_info(&self) -> rustfs_madmin::StorageInfo {
self.handle_local_storage_info().await
}
#[instrument(skip(self))]
async fn get_disks(&self, pool_idx: usize, set_idx: usize) -> Result<Vec<Option<DiskStore>>> {
self.handle_get_disks(pool_idx, set_idx).await
}
#[instrument(skip(self))]
fn set_drive_counts(&self) -> Vec<usize> {
self.handle_set_drive_counts()
}
}

View File

@@ -2,7 +2,6 @@ use super::*;
#[async_trait::async_trait]
pub trait ObjectIO: Send + Sync + Debug + 'static {
// GetObjectNInfo FIXME:
async fn get_object_reader(
&self,
bucket: &str,
@@ -11,55 +10,23 @@ pub trait ObjectIO: Send + Sync + Debug + 'static {
h: HeaderMap,
opts: &ObjectOptions,
) -> Result<GetObjectReader>;
// PutObject
async fn put_object(&self, bucket: &str, object: &str, data: &mut PutObjReader, opts: &ObjectOptions) -> Result<ObjectInfo>;
}
/// Bucket-level storage operations.
#[async_trait::async_trait]
#[allow(clippy::too_many_arguments)]
pub trait StorageAPI: ObjectIO + Debug {
async fn new_ns_lock(&self, bucket: &str, object: &str) -> Result<NamespaceLockWrapper>;
async fn backend_info(&self) -> rustfs_madmin::BackendInfo;
async fn storage_info(&self) -> rustfs_madmin::StorageInfo;
async fn local_storage_info(&self) -> rustfs_madmin::StorageInfo;
pub trait BucketOperations: Send + Sync + Debug {
async fn make_bucket(&self, bucket: &str, opts: &MakeBucketOptions) -> Result<()>;
async fn get_bucket_info(&self, bucket: &str, opts: &BucketOptions) -> Result<BucketInfo>;
async fn list_bucket(&self, opts: &BucketOptions) -> Result<Vec<BucketInfo>>;
async fn delete_bucket(&self, bucket: &str, opts: &DeleteBucketOptions) -> Result<()>;
// ListObjects TODO: FIXME:
async fn list_objects_v2(
self: Arc<Self>,
bucket: &str,
prefix: &str,
continuation_token: Option<String>,
delimiter: Option<String>,
max_keys: i32,
fetch_owner: bool,
start_after: Option<String>,
incl_deleted: bool,
) -> Result<ListObjectsV2Info>;
// ListObjectVersions TODO: FIXME:
async fn list_object_versions(
self: Arc<Self>,
bucket: &str,
prefix: &str,
marker: Option<String>,
version_marker: Option<String>,
delimiter: Option<String>,
max_keys: i32,
) -> Result<ListObjectVersionsInfo>;
async fn walk(
self: Arc<Self>,
rx: CancellationToken,
bucket: &str,
prefix: &str,
result: tokio::sync::mpsc::Sender<ObjectInfoOrErr>,
opts: WalkOptions,
) -> Result<()>;
}
/// Object-level storage operations (beyond basic I/O in ObjectIO).
#[async_trait::async_trait]
#[allow(clippy::too_many_arguments)]
pub trait ObjectOperations: Send + Sync + Debug {
async fn get_object_info(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo>;
async fn verify_object_integrity(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()>;
async fn copy_object(
@@ -80,10 +47,55 @@ pub trait StorageAPI: ObjectIO + Debug {
objects: Vec<ObjectToDelete>,
opts: ObjectOptions,
) -> (Vec<DeletedObject>, Vec<Option<Error>>);
async fn put_object_metadata(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo>;
async fn get_object_tags(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<String>;
async fn put_object_tags(&self, bucket: &str, object: &str, tags: &str, opts: &ObjectOptions) -> Result<ObjectInfo>;
async fn delete_object_tags(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo>;
async fn add_partial(&self, bucket: &str, object: &str, version_id: &str) -> Result<()>;
async fn transition_object(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()>;
async fn restore_transitioned_object(self: Arc<Self>, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()>;
}
// TransitionObject TODO:
// RestoreTransitionedObject TODO:
/// Listing and walking operations.
#[async_trait::async_trait]
#[allow(clippy::too_many_arguments)]
pub trait ListOperations: Send + Sync + Debug {
async fn list_objects_v2(
self: Arc<Self>,
bucket: &str,
prefix: &str,
continuation_token: Option<String>,
delimiter: Option<String>,
max_keys: i32,
fetch_owner: bool,
start_after: Option<String>,
incl_deleted: bool,
) -> Result<ListObjectsV2Info>;
async fn list_object_versions(
self: Arc<Self>,
bucket: &str,
prefix: &str,
marker: Option<String>,
version_marker: Option<String>,
delimiter: Option<String>,
max_keys: i32,
) -> Result<ListObjectVersionsInfo>;
async fn walk(
self: Arc<Self>,
rx: CancellationToken,
bucket: &str,
prefix: &str,
result: tokio::sync::mpsc::Sender<ObjectInfoOrErr>,
opts: WalkOptions,
) -> Result<()>;
}
/// Multipart upload operations.
#[async_trait::async_trait]
#[allow(clippy::too_many_arguments)]
pub trait MultipartOperations: Send + Sync + Debug {
async fn list_multipart_uploads(
&self,
bucket: &str,
@@ -142,19 +154,11 @@ pub trait StorageAPI: ObjectIO + Debug {
uploaded_parts: Vec<CompletePart>,
opts: &ObjectOptions,
) -> Result<ObjectInfo>;
async fn get_disks(&self, pool_idx: usize, set_idx: usize) -> Result<Vec<Option<DiskStore>>>;
fn set_drive_counts(&self) -> Vec<usize>;
// Health TODO:
async fn put_object_metadata(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo>;
// DecomTieredObject
async fn get_object_tags(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<String>;
async fn add_partial(&self, bucket: &str, object: &str, version_id: &str) -> Result<()>;
async fn transition_object(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()>;
async fn restore_transitioned_object(self: Arc<Self>, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()>;
async fn put_object_tags(&self, bucket: &str, object: &str, tags: &str, opts: &ObjectOptions) -> Result<ObjectInfo>;
async fn delete_object_tags(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo>;
}
/// Healing and repair operations.
#[async_trait::async_trait]
pub trait HealOperations: Send + Sync + Debug {
async fn heal_format(&self, dry_run: bool) -> Result<(HealResultItem, Option<Error>)>;
async fn heal_bucket(&self, bucket: &str, opts: &HealOpts) -> Result<HealResultItem>;
async fn heal_object(
@@ -164,8 +168,25 @@ pub trait StorageAPI: ObjectIO + Debug {
version_id: &str,
opts: &HealOpts,
) -> Result<(HealResultItem, Option<Error>)>;
// async fn heal_objects(&self, bucket: &str, prefix: &str, opts: &HealOpts, hs: Arc<HealSequence>, is_meta: bool)
// -> Result<()>;
async fn get_pool_and_set(&self, id: &str) -> Result<(Option<usize>, Option<usize>, Option<usize>)>;
async fn check_abandoned_parts(&self, bucket: &str, object: &str, opts: &HealOpts) -> Result<()>;
}
/// Unified storage API combining all operation groups.
///
/// Consumers can depend on specific sub-traits (e.g., `BucketOperations`)
/// when they don't need the full API surface.
#[async_trait::async_trait]
#[allow(clippy::too_many_arguments)]
pub trait StorageAPI:
ObjectIO + BucketOperations + ObjectOperations + ListOperations + MultipartOperations + HealOperations + Debug
{
async fn new_ns_lock(&self, bucket: &str, object: &str) -> Result<NamespaceLockWrapper>;
async fn backend_info(&self) -> rustfs_madmin::BackendInfo;
async fn storage_info(&self) -> rustfs_madmin::StorageInfo;
async fn local_storage_info(&self) -> rustfs_madmin::StorageInfo;
async fn get_disks(&self, pool_idx: usize, set_idx: usize) -> Result<Vec<Option<DiskStore>>>;
fn set_drive_counts(&self) -> Vec<usize>;
}

View File

@@ -12,7 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::StorageAPI;
use crate::bucket::metadata_sys::get_versioning_config;
use crate::bucket::utils::check_list_objs_args;
use crate::bucket::versioning::VersioningApi;
@@ -24,7 +23,8 @@ use crate::error::{
};
use crate::set_disk::SetDisks;
use crate::store_api::{
ListObjectVersionsInfo, ListObjectsInfo, ObjectInfo, ObjectInfoOrErr, ObjectOptions, WalkOptions, WalkVersionsSortOrder,
ListObjectVersionsInfo, ListObjectsInfo, ObjectInfo, ObjectInfoOrErr, ObjectOperations, ObjectOptions, WalkOptions,
WalkVersionsSortOrder,
};
use crate::store_utils::is_reserved_or_invalid_bucket;
use crate::{store::ECStore, store_api::ListObjectsV2Info};

View File

@@ -18,7 +18,7 @@ use rustfs_common::heal_channel::{HealOpts, HealScanMode};
use rustfs_ecstore::{
disk::{DiskStore, endpoint::Endpoint},
store::ECStore,
store_api::{BucketInfo, ObjectIO, StorageAPI},
store_api::{BucketInfo, BucketOperations, HealOperations, ListOperations, ObjectIO, ObjectOperations, StorageAPI},
};
use rustfs_madmin::heal_commands::HealResultItem;
use std::sync::Arc;

View File

@@ -18,7 +18,7 @@ use rustfs_ecstore::{
disk::endpoint::Endpoint,
endpoints::{EndpointServerPools, Endpoints, PoolEndpoints},
store::ECStore,
store_api::{ObjectIO, ObjectOptions, PutObjReader, StorageAPI},
store_api::{BucketOperations, ObjectIO, ObjectOperations, ObjectOptions, PutObjReader},
};
use rustfs_heal::heal::{
manager::{HealConfig, HealManager},

View File

@@ -21,8 +21,7 @@ use crate::{
};
use futures::future::join_all;
use rustfs_credentials::get_global_action_cred;
use rustfs_ecstore::StorageAPI as _;
use rustfs_ecstore::store_api::{ObjectInfoOrErr, WalkOptions};
use rustfs_ecstore::store_api::{ListOperations as _, ObjectInfoOrErr, WalkOptions};
use rustfs_ecstore::{
config::{
RUSTFS_CONFIG_PREFIX,

View File

@@ -25,7 +25,7 @@ use crate::format::report_metrics;
use rustfs_ecstore::bucket::metadata_sys::get_quota_config;
use rustfs_ecstore::data_usage::load_data_usage_from_backend;
use rustfs_ecstore::pools::{get_total_usable_capacity, get_total_usable_capacity_free};
use rustfs_ecstore::store_api::BucketOptions;
use rustfs_ecstore::store_api::{BucketOperations, BucketOptions};
use rustfs_ecstore::{StorageAPI, new_object_layer_fn};
use rustfs_utils::get_env_opt_u64;
use std::sync::OnceLock;

View File

@@ -161,7 +161,7 @@ impl VariableResolver {
fn resolve_userid(&self) -> Option<String> {
self.get_claim_as_strings("sub")
.or_else(|| self.get_claim_as_strings("parent"))
.and_then(|mut vec| vec.pop()) // 取第一个值,保持原有逻辑
.and_then(|mut vec| vec.pop())
}
fn resolve_principal_type(&self) -> String {

View File

@@ -25,11 +25,11 @@ use object_store::{
};
use pin_project_lite::pin_project;
use rustfs_common::DEFAULT_DELIMITER;
use rustfs_ecstore::StorageAPI;
use rustfs_ecstore::new_object_layer_fn;
use rustfs_ecstore::set_disk::DEFAULT_READ_BUFFER_SIZE;
use rustfs_ecstore::store::ECStore;
use rustfs_ecstore::store_api::ObjectIO;
use rustfs_ecstore::store_api::ObjectOperations;
use rustfs_ecstore::store_api::ObjectOptions;
use s3s::S3Result;
use s3s::dto::SelectObjectContentInput;

View File

@@ -34,7 +34,7 @@ use rustfs_ecstore::error::{Error, StorageError};
use rustfs_ecstore::global::GLOBAL_TierConfigMgr;
use rustfs_ecstore::new_object_layer_fn;
use rustfs_ecstore::set_disk::SetDisks;
use rustfs_ecstore::store_api::{BucketInfo, BucketOptions, ObjectInfo};
use rustfs_ecstore::store_api::{BucketInfo, BucketOperations, BucketOptions, ObjectInfo};
use rustfs_ecstore::{StorageAPI, error::Result, store::ECStore};
use rustfs_filemeta::FileMeta;
use rustfs_utils::path::{SLASH_SEPARATOR, path_join_buf};

View File

@@ -19,7 +19,7 @@ use rustfs_ecstore::{
endpoints::{EndpointServerPools, Endpoints, PoolEndpoints},
global::GLOBAL_TierConfigMgr,
store::ECStore,
store_api::{MakeBucketOptions, ObjectIO, ObjectOptions, PutObjReader, StorageAPI},
store_api::{BucketOperations, MakeBucketOptions, ObjectIO, ObjectOperations, ObjectOptions, PutObjReader},
tier::tier_config::{TierConfig, TierMinIO, TierType},
};
use rustfs_scanner::scanner::init_data_scanner;

View File

@@ -21,7 +21,7 @@ use matchit::Params;
use rustfs_credentials::get_global_action_cred;
use rustfs_ecstore::bucket::versioning_sys::BucketVersioningSys;
use rustfs_ecstore::new_object_layer_fn;
use rustfs_ecstore::store_api::{BucketOptions, StorageAPI};
use rustfs_ecstore::store_api::{BucketOperations, BucketOptions, StorageAPI};
use rustfs_iam::store::MappedPolicy;
use rustfs_policy::policy::BucketPolicy;
use rustfs_policy::policy::default::DEFAULT_POLICIES;

View File

@@ -25,7 +25,10 @@ use hyper::Method;
use matchit::Params;
use rustfs_config::MAX_BUCKET_METADATA_IMPORT_SIZE;
use rustfs_ecstore::{
StorageAPI,
bucket::utils::{deserialize, serialize},
store_api::MakeBucketOptions,
};
use rustfs_ecstore::{
bucket::{
metadata::{
BUCKET_LIFECYCLE_CONFIG, BUCKET_NOTIFICATION_CONFIG, BUCKET_POLICY_CONFIG, BUCKET_QUOTA_CONFIG_FILE,
@@ -38,11 +41,7 @@ use rustfs_ecstore::{
},
error::StorageError,
new_object_layer_fn,
store_api::BucketOptions,
};
use rustfs_ecstore::{
bucket::utils::{deserialize, serialize},
store_api::MakeBucketOptions,
store_api::{BucketOperations, BucketOptions},
};
use rustfs_policy::policy::{
BucketPolicy,

View File

@@ -30,6 +30,7 @@ use rustfs_ecstore::{
new_object_layer_fn,
notification_sys::get_global_notification_sys,
rebalance::{DiskStat, RebalSaveOpt},
store_api::BucketOperations,
store_api::BucketOptions,
};
use rustfs_policy::policy::action::{Action, AdminAction};

View File

@@ -27,7 +27,7 @@ use rustfs_ecstore::bucket::metadata_sys;
use rustfs_ecstore::bucket::target::BucketTarget;
use rustfs_ecstore::global::global_rustfs_port;
use rustfs_ecstore::new_object_layer_fn;
use rustfs_ecstore::store_api::{BucketOptions, StorageAPI};
use rustfs_ecstore::store_api::{BucketOperations, BucketOptions};
use rustfs_policy::policy::action::{Action, AdminAction};
use s3s::header::CONTENT_TYPE;
use s3s::{Body, S3Error, S3ErrorCode, S3Request, S3Response, S3Result, s3_error};

View File

@@ -13,7 +13,6 @@
// limitations under the License.
//! Admin application use-case contracts.
#![allow(dead_code)]
use crate::app::context::{AppContext, get_global_app_context};
use crate::error::ApiError;
@@ -58,31 +57,13 @@ pub struct QueryPoolStatusRequest {
pub by_id: bool,
}
#[async_trait::async_trait]
pub trait AdminUsecase: Send + Sync {
async fn query_server_info(&self, req: QueryServerInfoRequest) -> AdminUsecaseResult<QueryServerInfoResponse>;
async fn query_storage_info(&self) -> AdminUsecaseResult<StorageInfo>;
async fn query_data_usage_info(&self) -> AdminUsecaseResult<DataUsageInfo>;
async fn list_pool_statuses(&self) -> AdminUsecaseResult<Vec<PoolStatus>>;
async fn query_pool_status(&self, req: QueryPoolStatusRequest) -> AdminUsecaseResult<PoolStatus>;
fn collect_dependency_readiness(&self) -> DependencyReadiness;
}
#[derive(Clone, Default)]
pub struct DefaultAdminUsecase {
context: Option<Arc<AppContext>>,
}
impl DefaultAdminUsecase {
pub fn new(context: Arc<AppContext>) -> Self {
Self { context: Some(context) }
}
#[cfg(test)]
pub fn without_context() -> Self {
Self { context: None }
}
@@ -93,10 +74,6 @@ impl DefaultAdminUsecase {
}
}
pub fn context(&self) -> Option<Arc<AppContext>> {
self.context.clone()
}
fn endpoints(&self) -> Option<EndpointServerPools> {
self.context.as_ref().and_then(|context| context.endpoints().handle())
}
@@ -292,33 +269,6 @@ impl DefaultAdminUsecase {
}
}
#[async_trait::async_trait]
impl AdminUsecase for DefaultAdminUsecase {
async fn query_server_info(&self, req: QueryServerInfoRequest) -> AdminUsecaseResult<QueryServerInfoResponse> {
self.execute_query_server_info(req).await
}
async fn query_storage_info(&self) -> AdminUsecaseResult<StorageInfo> {
self.execute_query_storage_info().await
}
async fn query_data_usage_info(&self) -> AdminUsecaseResult<DataUsageInfo> {
self.execute_query_data_usage_info().await
}
async fn list_pool_statuses(&self) -> AdminUsecaseResult<Vec<PoolStatus>> {
self.execute_list_pool_statuses().await
}
async fn query_pool_status(&self, req: QueryPoolStatusRequest) -> AdminUsecaseResult<PoolStatus> {
self.execute_query_pool_status(req).await
}
fn collect_dependency_readiness(&self) -> DependencyReadiness {
self.execute_collect_dependency_readiness()
}
}
#[cfg(test)]
mod tests {
use super::*;

View File

@@ -13,7 +13,6 @@
// limitations under the License.
//! Bucket application use-case contracts.
#![allow(dead_code)]
use crate::app::context::{AppContext, default_notify_interface, get_global_app_context};
use crate::auth::get_condition_values;
@@ -46,7 +45,7 @@ use rustfs_ecstore::bucket::{
use rustfs_ecstore::client::object_api_utils::to_s3s_etag;
use rustfs_ecstore::error::StorageError;
use rustfs_ecstore::new_object_layer_fn;
use rustfs_ecstore::store_api::{BucketOptions, DeleteBucketOptions, MakeBucketOptions, StorageAPI};
use rustfs_ecstore::store_api::{BucketOperations, BucketOptions, DeleteBucketOptions, ListOperations, MakeBucketOptions};
use rustfs_policy::policy::{
action::{Action, S3Action},
{BucketPolicy, BucketPolicyArgs, Effect, Validator},
@@ -65,8 +64,6 @@ use std::{fmt::Display, sync::Arc};
use tracing::{debug, error, info, instrument, warn};
use urlencoding::encode;
pub type BucketUsecaseResult<T> = Result<T, ApiError>;
fn serialize_config<T: xml::Serialize>(value: &T) -> S3Result<Vec<u8>> {
serialize(value).map_err(to_internal_error)
}
@@ -75,72 +72,13 @@ fn to_internal_error(err: impl Display) -> S3Error {
S3Error::with_message(S3ErrorCode::InternalError, format!("{err}"))
}
fn default_region() -> Region {
// RUSTFS_REGION is a compile-time constant ("us-east-1") guaranteed to be valid.
Region::new(RUSTFS_REGION.into()).expect("RUSTFS_REGION constant must be a valid region")
}
fn resolve_notification_region(global_region: Option<Region>, request_region: Option<Region>) -> Region {
global_region.unwrap_or_else(|| request_region.unwrap_or_else(|| Region::new(RUSTFS_REGION.into()).expect("valid region")))
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct CreateBucketRequest {
pub bucket: String,
pub object_lock_enabled: Option<bool>,
}
#[derive(Debug, Clone, Default, PartialEq, Eq)]
pub struct CreateBucketResponse;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct DeleteBucketRequest {
pub bucket: String,
}
#[derive(Debug, Clone, Default, PartialEq, Eq)]
pub struct DeleteBucketResponse;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct HeadBucketRequest {
pub bucket: String,
}
#[derive(Debug, Clone, Default, PartialEq, Eq)]
pub struct HeadBucketResponse;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct ListObjectsV2Request {
pub bucket: String,
pub prefix: Option<String>,
pub delimiter: Option<String>,
pub continuation_token: Option<String>,
pub max_keys: Option<i32>,
pub fetch_owner: Option<bool>,
pub start_after: Option<String>,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct ListObjectsV2Item {
pub key: String,
pub etag: Option<String>,
pub size: i64,
pub version_id: Option<String>,
}
#[derive(Debug, Clone, Default, PartialEq, Eq)]
pub struct ListObjectsV2Response {
pub objects: Vec<ListObjectsV2Item>,
pub common_prefixes: Vec<String>,
pub key_count: i32,
pub is_truncated: bool,
pub next_continuation_token: Option<String>,
}
#[async_trait::async_trait]
pub trait BucketUsecase: Send + Sync {
async fn create_bucket(&self, req: CreateBucketRequest) -> BucketUsecaseResult<CreateBucketResponse>;
async fn delete_bucket(&self, req: DeleteBucketRequest) -> BucketUsecaseResult<DeleteBucketResponse>;
async fn head_bucket(&self, req: HeadBucketRequest) -> BucketUsecaseResult<HeadBucketResponse>;
async fn list_objects_v2(&self, req: ListObjectsV2Request) -> BucketUsecaseResult<ListObjectsV2Response>;
global_region.or(request_region).unwrap_or_else(default_region)
}
#[derive(Clone, Default)]
@@ -149,10 +87,7 @@ pub struct DefaultBucketUsecase {
}
impl DefaultBucketUsecase {
pub fn new(context: Arc<AppContext>) -> Self {
Self { context: Some(context) }
}
#[cfg(test)]
pub fn without_context() -> Self {
Self { context: None }
}
@@ -163,10 +98,6 @@ impl DefaultBucketUsecase {
}
}
pub fn context(&self) -> Option<Arc<AppContext>> {
self.context.clone()
}
fn global_region(&self) -> Option<Region> {
self.context.as_ref().and_then(|context| context.region().get())
}
@@ -466,7 +397,10 @@ impl DefaultBucketUsecase {
list_bucket_infos = futures::stream::iter(list_bucket_infos)
.filter_map(|info| async {
let mut req_clone = req.clone();
let req_info = req_clone.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let Some(req_info) = req_clone.extensions.get_mut::<ReqInfo>() else {
debug!(bucket = %info.name, "ReqInfo missing in extensions, skipping bucket authorization");
return None;
};
req_info.bucket = Some(info.name.clone());
if authorize_request(&mut req_clone, Action::S3Action(S3Action::ListBucketAction))
@@ -1750,37 +1684,6 @@ impl DefaultBucketUsecase {
}
}
#[async_trait::async_trait]
impl BucketUsecase for DefaultBucketUsecase {
async fn create_bucket(&self, req: CreateBucketRequest) -> BucketUsecaseResult<CreateBucketResponse> {
let _ = req;
Err(ApiError::from(StorageError::other(
"DefaultBucketUsecase::create_bucket DTO path is not implemented yet",
)))
}
async fn delete_bucket(&self, req: DeleteBucketRequest) -> BucketUsecaseResult<DeleteBucketResponse> {
let _ = req;
Err(ApiError::from(StorageError::other(
"DefaultBucketUsecase::delete_bucket DTO path is not implemented yet",
)))
}
async fn head_bucket(&self, req: HeadBucketRequest) -> BucketUsecaseResult<HeadBucketResponse> {
let _ = req;
Err(ApiError::from(StorageError::other(
"DefaultBucketUsecase::head_bucket DTO path is not implemented yet",
)))
}
async fn list_objects_v2(&self, req: ListObjectsV2Request) -> BucketUsecaseResult<ListObjectsV2Response> {
let _ = req;
Err(ApiError::from(StorageError::other(
"DefaultBucketUsecase::list_objects_v2 DTO path is not implemented yet",
)))
}
}
#[cfg(test)]
mod tests {
use super::*;

View File

@@ -15,7 +15,6 @@
//! Application-layer dependency context.
//! This module introduces explicit dependency injection entry points
//! for storage, IAM, and KMS handles.
#![allow(dead_code)]
use crate::config::workload_profiles::{RustFSBufferConfig, get_global_buffer_config};
use async_trait::async_trait;
@@ -34,11 +33,13 @@ use tokio::sync::RwLock;
/// IAM interface for application-layer use-cases.
pub trait IamInterface: Send + Sync {
#[allow(dead_code)]
fn handle(&self) -> Arc<IamSys<ObjectStore>>;
fn is_ready(&self) -> bool;
}
/// KMS interface for application-layer use-cases.
#[allow(dead_code)]
pub trait KmsInterface: Send + Sync {
fn handle(&self) -> Arc<KmsServiceManager>;
}
@@ -95,6 +96,7 @@ pub trait BufferConfigInterface: Send + Sync {
/// Default IAM interface adapter.
pub struct IamHandle {
#[allow(dead_code)]
iam: Arc<IamSys<ObjectStore>>,
}
@@ -115,6 +117,7 @@ impl IamInterface for IamHandle {
}
/// Default KMS interface adapter.
#[allow(dead_code)]
pub struct KmsHandle {
kms: Arc<KmsServiceManager>,
}
@@ -230,6 +233,7 @@ impl BufferConfigInterface for BufferConfigHandle {
pub struct AppContext {
object_store: Arc<ECStore>,
iam: Arc<dyn IamInterface>,
#[allow(dead_code)]
kms: Arc<dyn KmsInterface>,
kms_runtime: Arc<dyn KmsRuntimeInterface>,
notify: Arc<dyn NotifyInterface>,
@@ -274,6 +278,7 @@ impl AppContext {
self.iam.clone()
}
#[allow(dead_code)]
pub fn kms(&self) -> Arc<dyn KmsInterface> {
self.kms.clone()
}

View File

@@ -13,7 +13,6 @@
// limitations under the License.
//! Multipart application use-case contracts.
#![allow(dead_code)]
use crate::app::context::{AppContext, get_global_app_context};
use crate::error::ApiError;
@@ -28,7 +27,6 @@ use crate::storage::*;
use bytes::Bytes;
use futures::StreamExt;
use rustfs_config::RUSTFS_REGION;
use rustfs_ecstore::StorageAPI;
use rustfs_ecstore::bucket::quota::checker::QuotaChecker;
use rustfs_ecstore::bucket::{
metadata_sys,
@@ -41,6 +39,7 @@ use rustfs_ecstore::error::{StorageError, is_err_object_not_found, is_err_versio
use rustfs_ecstore::new_object_layer_fn;
use rustfs_ecstore::set_disk::{MAX_PARTS_COUNT, is_valid_storage_class};
use rustfs_ecstore::store_api::{CompletePart, MultipartUploadResult, ObjectIO, ObjectOptions, PutObjReader};
use rustfs_ecstore::store_api::{MultipartOperations, ObjectOperations};
use rustfs_filemeta::{ReplicationStatusType, ReplicationType};
use rustfs_rio::{CompressReader, HashReader, Reader, WarpReader};
use rustfs_targets::EventName;
@@ -50,6 +49,7 @@ use rustfs_utils::http::{
headers::{AMZ_DECODED_CONTENT_LENGTH, AMZ_OBJECT_TAGGING, RESERVED_METADATA_PREFIX_LOWER},
};
use s3s::dto::*;
use s3s::region::Region;
use s3s::{S3Error, S3ErrorCode, S3Request, S3Response, S3Result, s3_error};
use std::collections::HashMap;
use std::str::FromStr;
@@ -58,95 +58,13 @@ use tokio::sync::RwLock;
use tokio_util::io::StreamReader;
use tracing::{info, instrument, warn};
pub type MultipartUsecaseResult<T> = Result<T, ApiError>;
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct CreateMultipartUploadRequest {
pub bucket: String,
pub key: String,
pub metadata: HashMap<String, String>,
pub content_type: Option<String>,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct CreateMultipartUploadResponse {
pub upload_id: String,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct UploadPartRequest {
pub bucket: String,
pub key: String,
pub upload_id: String,
pub part_number: i32,
pub content_length: Option<i64>,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct UploadPartResponse {
pub etag: String,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct CompleteMultipartUploadPart {
pub part_number: i32,
pub etag: String,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct CompleteMultipartUploadRequest {
pub bucket: String,
pub key: String,
pub upload_id: String,
pub parts: Vec<CompleteMultipartUploadPart>,
}
#[derive(Debug, Clone, Default, PartialEq, Eq)]
pub struct CompleteMultipartUploadResponse {
pub etag: Option<String>,
pub version_id: Option<String>,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct AbortMultipartUploadRequest {
pub bucket: String,
pub key: String,
pub upload_id: String,
}
#[derive(Debug, Clone, Default, PartialEq, Eq)]
pub struct AbortMultipartUploadResponse;
#[async_trait::async_trait]
pub trait MultipartUsecase: Send + Sync {
async fn create_multipart_upload(
&self,
req: CreateMultipartUploadRequest,
) -> MultipartUsecaseResult<CreateMultipartUploadResponse>;
async fn upload_part(&self, req: UploadPartRequest) -> MultipartUsecaseResult<UploadPartResponse>;
async fn complete_multipart_upload(
&self,
req: CompleteMultipartUploadRequest,
) -> MultipartUsecaseResult<CompleteMultipartUploadResponse>;
async fn abort_multipart_upload(
&self,
req: AbortMultipartUploadRequest,
) -> MultipartUsecaseResult<AbortMultipartUploadResponse>;
}
#[derive(Clone, Default)]
pub struct DefaultMultipartUsecase {
context: Option<Arc<AppContext>>,
}
impl DefaultMultipartUsecase {
pub fn new(context: Arc<AppContext>) -> Self {
Self { context: Some(context) }
}
#[cfg(test)]
pub fn without_context() -> Self {
Self { context: None }
}
@@ -157,10 +75,6 @@ impl DefaultMultipartUsecase {
}
}
pub fn context(&self) -> Option<Arc<AppContext>> {
self.context.clone()
}
fn bucket_metadata_sys(&self) -> Option<Arc<RwLock<metadata_sys::BucketMetadataSys>>> {
self.context.as_ref().and_then(|context| context.bucket_metadata().handle())
}
@@ -423,7 +337,9 @@ impl DefaultMultipartUsecase {
}
}
let region = self.global_region().unwrap_or_else(|| RUSTFS_REGION.parse().unwrap());
let region = self
.global_region()
.unwrap_or_else(|| Region::new(RUSTFS_REGION.into()).expect("RUSTFS_REGION constant must be a valid region"));
let output = CompleteMultipartUploadOutput {
bucket: Some(bucket.clone()),
key: Some(key.clone()),
@@ -1181,46 +1097,6 @@ impl DefaultMultipartUsecase {
}
}
#[async_trait::async_trait]
impl MultipartUsecase for DefaultMultipartUsecase {
async fn create_multipart_upload(
&self,
req: CreateMultipartUploadRequest,
) -> MultipartUsecaseResult<CreateMultipartUploadResponse> {
let _ = req;
Err(ApiError::from(StorageError::other(
"DefaultMultipartUsecase::create_multipart_upload is not implemented yet",
)))
}
async fn upload_part(&self, req: UploadPartRequest) -> MultipartUsecaseResult<UploadPartResponse> {
let _ = req;
Err(ApiError::from(StorageError::other(
"DefaultMultipartUsecase::upload_part is not implemented yet",
)))
}
async fn complete_multipart_upload(
&self,
req: CompleteMultipartUploadRequest,
) -> MultipartUsecaseResult<CompleteMultipartUploadResponse> {
let _ = req;
Err(ApiError::from(StorageError::other(
"DefaultMultipartUsecase::complete_multipart_upload is not implemented yet",
)))
}
async fn abort_multipart_upload(
&self,
req: AbortMultipartUploadRequest,
) -> MultipartUsecaseResult<AbortMultipartUploadResponse> {
let _ = req;
Err(ApiError::from(StorageError::other(
"DefaultMultipartUsecase::abort_multipart_upload is not implemented yet",
)))
}
}
#[cfg(test)]
mod tests {
use super::*;

View File

@@ -13,12 +13,11 @@
// limitations under the License.
//! Object application use-case contracts.
#![allow(dead_code)]
use crate::app::context::{AppContext, default_notify_interface, get_global_app_context};
use crate::config::workload_profiles::RustFSBufferConfig;
use crate::error::ApiError;
use crate::storage::access::{ReqInfo, authorize_request, has_bypass_governance_header};
use crate::storage::access::{ReqInfo, authorize_request, has_bypass_governance_header, req_info_mut};
use crate::storage::concurrency::{
CachedGetObject, ConcurrencyManager, GetObjectGuard, get_concurrency_aware_buffer_size, get_concurrency_manager,
};
@@ -38,7 +37,6 @@ use datafusion::arrow::{
use futures::StreamExt;
use http::{HeaderMap, HeaderValue, StatusCode};
use metrics::{counter, histogram};
use rustfs_ecstore::StorageAPI;
use rustfs_ecstore::bucket::quota::checker::QuotaChecker;
use rustfs_ecstore::bucket::{
lifecycle::{
@@ -65,7 +63,8 @@ use rustfs_ecstore::error::{StorageError, is_err_bucket_not_found, is_err_object
use rustfs_ecstore::new_object_layer_fn;
use rustfs_ecstore::set_disk::is_valid_storage_class;
use rustfs_ecstore::store_api::{
BucketOptions, HTTPRangeSpec, ObjectIO, ObjectInfo, ObjectOptions, ObjectToDelete, PutObjReader,
BucketOperations, BucketOptions, HTTPRangeSpec, ObjectIO, ObjectInfo, ObjectOperations, ObjectOptions, ObjectToDelete,
PutObjReader,
};
use rustfs_filemeta::{
REPLICATE_INCOMING_DELETE, ReplicationStatusType, ReplicationType, RestoreStatusOps, VersionPurgeStatusType,
@@ -112,7 +111,49 @@ use tokio_util::io::{ReaderStream, StreamReader};
use tracing::{debug, error, info, instrument, warn};
use uuid::Uuid;
pub type ObjectUsecaseResult<T> = Result<T, ApiError>;
/// Extract trailing-header checksum values, overriding the corresponding input fields.
fn apply_trailing_checksums(
algorithm: Option<&str>,
trailing_headers: &Option<s3s::TrailingHeaders>,
checksums: &mut PutObjectChecksums,
) {
let Some(alg) = algorithm else { return };
let Some(checksum_str) = trailing_headers.as_ref().and_then(|trailer| {
let key = match alg {
ChecksumAlgorithm::CRC32 => rustfs_rio::ChecksumType::CRC32.key(),
ChecksumAlgorithm::CRC32C => rustfs_rio::ChecksumType::CRC32C.key(),
ChecksumAlgorithm::SHA1 => rustfs_rio::ChecksumType::SHA1.key(),
ChecksumAlgorithm::SHA256 => rustfs_rio::ChecksumType::SHA256.key(),
ChecksumAlgorithm::CRC64NVME => rustfs_rio::ChecksumType::CRC64_NVME.key(),
_ => return None,
};
trailer.read(|headers| {
headers
.get(key.unwrap_or_default())
.and_then(|value| value.to_str().ok().map(|s| s.to_string()))
})
}) else {
return;
};
match alg {
ChecksumAlgorithm::CRC32 => checksums.crc32 = checksum_str,
ChecksumAlgorithm::CRC32C => checksums.crc32c = checksum_str,
ChecksumAlgorithm::SHA1 => checksums.sha1 = checksum_str,
ChecksumAlgorithm::SHA256 => checksums.sha256 = checksum_str,
ChecksumAlgorithm::CRC64NVME => checksums.crc64nvme = checksum_str,
_ => (),
}
}
#[derive(Default)]
struct PutObjectChecksums {
crc32: Option<String>,
crc32c: Option<String>,
sha1: Option<String>,
sha256: Option<String>,
crc64nvme: Option<String>,
}
fn normalize_delete_objects_version_id(version_id: Option<String>) -> Result<(Option<String>, Option<Uuid>), String> {
let version_id = version_id.map(|v| v.trim().to_string()).filter(|v| !v.is_empty());
@@ -129,67 +170,13 @@ fn normalize_delete_objects_version_id(version_id: Option<String>) -> Result<(Op
}
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct PutObjectRequest {
pub bucket: String,
pub key: String,
pub content_length: Option<i64>,
pub content_type: Option<String>,
pub version_id: Option<String>,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct PutObjectResponse {
pub etag: String,
pub version_id: Option<String>,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct GetObjectRequest {
pub bucket: String,
pub key: String,
pub version_id: Option<String>,
}
#[derive(Debug, Clone, Default, PartialEq, Eq)]
pub struct GetObjectResponse {
pub etag: Option<String>,
pub content_length: i64,
pub version_id: Option<String>,
}
#[derive(Debug, Clone, PartialEq, Eq)]
pub struct DeleteObjectRequest {
pub bucket: String,
pub key: String,
pub version_id: Option<String>,
}
#[derive(Debug, Clone, Default, PartialEq, Eq)]
pub struct DeleteObjectResponse {
pub delete_marker: Option<bool>,
pub version_id: Option<String>,
}
#[async_trait::async_trait]
pub trait ObjectUsecase: Send + Sync {
async fn put_object(&self, req: PutObjectRequest) -> ObjectUsecaseResult<PutObjectResponse>;
async fn get_object(&self, req: GetObjectRequest) -> ObjectUsecaseResult<GetObjectResponse>;
async fn delete_object(&self, req: DeleteObjectRequest) -> ObjectUsecaseResult<DeleteObjectResponse>;
}
#[derive(Clone, Default)]
pub struct DefaultObjectUsecase {
context: Option<Arc<AppContext>>,
}
impl DefaultObjectUsecase {
pub fn new(context: Arc<AppContext>) -> Self {
Self { context: Some(context) }
}
#[cfg(test)]
pub fn without_context() -> Self {
Self { context: None }
}
@@ -200,10 +187,6 @@ impl DefaultObjectUsecase {
}
}
pub fn context(&self) -> Option<Arc<AppContext>> {
self.context.clone()
}
fn bucket_metadata_sys(&self) -> Option<Arc<RwLock<metadata_sys::BucketMetadataSys>>> {
self.context.as_ref().and_then(|context| context.bucket_metadata().handle())
}
@@ -216,6 +199,35 @@ impl DefaultObjectUsecase {
.unwrap_or_else(|| RustFSBufferConfig::default().base_config.default_unknown)
}
async fn check_bucket_quota(&self, bucket: &str, op: QuotaOperation, size: u64) -> S3Result<()> {
let Some(metadata_sys) = self.bucket_metadata_sys() else {
return Ok(());
};
let quota_checker = QuotaChecker::new(metadata_sys);
match quota_checker.check_quota(bucket, op, size).await {
Ok(result) if !result.allowed => Err(S3Error::with_message(
S3ErrorCode::InvalidRequest,
format!(
"Bucket quota exceeded. Current usage: {} bytes, limit: {} bytes",
result.current_usage.unwrap_or(0),
result.quota_limit.unwrap_or(0)
),
)),
Err(e) => {
warn!("Quota check failed for bucket {bucket}: {e}, allowing operation");
Ok(())
}
_ => Ok(()),
}
}
fn spawn_cache_invalidation(bucket: String, key: String, version_id: Option<String>) {
let manager = get_concurrency_manager();
tokio::spawn(async move {
manager.invalidate_cache_versioned(&bucket, &key, version_id.as_deref()).await;
});
}
#[instrument(level = "debug", skip(self, _fs, req))]
pub async fn execute_put_object(&self, _fs: &FS, req: S3Request<PutObjectInput>) -> S3Result<S3Response<PutObjectOutput>> {
if let Some(context) = &self.context {
@@ -273,32 +285,9 @@ impl DefaultObjectUsecase {
return Err(s3_error!(AccessDenied, "Access Denied"));
}
// check quota for put operation
if let Some(size) = content_length
&& let Some(metadata_sys) = self.bucket_metadata_sys()
{
let quota_checker = QuotaChecker::new(metadata_sys);
match quota_checker
.check_quota(&bucket, QuotaOperation::PutObject, size as u64)
.await
{
Ok(check_result) => {
if !check_result.allowed {
return Err(S3Error::with_message(
S3ErrorCode::InvalidRequest,
format!(
"Bucket quota exceeded. Current usage: {} bytes, limit: {} bytes",
check_result.current_usage.unwrap_or(0),
check_result.quota_limit.unwrap_or(0)
),
));
}
}
Err(e) => {
warn!("Quota check failed for bucket {}: {}, allowing operation", bucket, e);
}
}
if let Some(size) = content_length {
self.check_bucket_quota(&bucket, QuotaOperation::PutObject, size as u64)
.await?;
}
let Some(body) = body else { return Err(s3_error!(IncompleteBody)) };
@@ -330,10 +319,6 @@ impl DefaultObjectUsecase {
StreamReader::new(body.map(|f| f.map_err(|e| std::io::Error::other(e.to_string())))),
);
// let body = Box::new(StreamReader::new(body.map(|f| f.map_err(|e| std::io::Error::other(e.to_string())))));
// let mut reader = PutObjReader::new(body, content_length as usize);
let store = get_validated_store(&bucket).await?;
// TDD: Get bucket default encryption configuration
@@ -514,10 +499,6 @@ impl DefaultObjectUsecase {
// Fast in-memory update for immediate quota consistency
rustfs_ecstore::data_usage::increment_bucket_usage_memory(&bucket, obj_info.size as u64).await;
// Invalidate cache for the written object to prevent stale data
let manager = get_concurrency_manager();
let put_bucket = bucket.clone();
let put_key = key.clone();
let put_version = obj_info.version_id.map(|v| v.to_string());
helper = helper.object(obj_info.clone());
@@ -525,12 +506,7 @@ impl DefaultObjectUsecase {
helper = helper.version_id(version_id.clone());
}
let put_version_clone = put_version.clone();
tokio::spawn(async move {
manager
.invalidate_cache_versioned(&put_bucket, &put_key, put_version_clone.as_deref())
.await;
});
Self::spawn_cache_invalidation(bucket.clone(), key.clone(), put_version.clone());
let e_tag = obj_info.etag.clone().map(|etag| to_s3s_etag(&etag));
@@ -543,77 +519,35 @@ impl DefaultObjectUsecase {
schedule_replication(obj_info, store, dsc, ReplicationType::Object).await;
}
let mut checksum_crc32 = input.checksum_crc32;
let mut checksum_crc32c = input.checksum_crc32c;
let mut checksum_sha1 = input.checksum_sha1;
let mut checksum_sha256 = input.checksum_sha256;
let mut checksum_crc64nvme = input.checksum_crc64nvme;
if let Some(alg) = &input.checksum_algorithm
&& let Some(Some(checksum_str)) = req.trailing_headers.as_ref().map(|trailer| {
let key = match alg.as_str() {
ChecksumAlgorithm::CRC32 => rustfs_rio::ChecksumType::CRC32.key(),
ChecksumAlgorithm::CRC32C => rustfs_rio::ChecksumType::CRC32C.key(),
ChecksumAlgorithm::SHA1 => rustfs_rio::ChecksumType::SHA1.key(),
ChecksumAlgorithm::SHA256 => rustfs_rio::ChecksumType::SHA256.key(),
ChecksumAlgorithm::CRC64NVME => rustfs_rio::ChecksumType::CRC64_NVME.key(),
_ => return None,
};
trailer.read(|headers| {
headers
.get(key.unwrap_or_default())
.and_then(|value| value.to_str().ok().map(|s| s.to_string()))
})
})
{
match alg.as_str() {
ChecksumAlgorithm::CRC32 => checksum_crc32 = checksum_str,
ChecksumAlgorithm::CRC32C => checksum_crc32c = checksum_str,
ChecksumAlgorithm::SHA1 => checksum_sha1 = checksum_str,
ChecksumAlgorithm::SHA256 => checksum_sha256 = checksum_str,
ChecksumAlgorithm::CRC64NVME => checksum_crc64nvme = checksum_str,
_ => (),
}
}
let mut checksums = PutObjectChecksums {
crc32: input.checksum_crc32,
crc32c: input.checksum_crc32c,
sha1: input.checksum_sha1,
sha256: input.checksum_sha256,
crc64nvme: input.checksum_crc64nvme,
};
apply_trailing_checksums(
input.checksum_algorithm.as_ref().map(|a| a.as_str()),
&req.trailing_headers,
&mut checksums,
);
let output = PutObjectOutput {
e_tag,
server_side_encryption: effective_sse, // TDD: Return effective encryption config
server_side_encryption: effective_sse,
sse_customer_algorithm: sse_customer_algorithm.clone(),
sse_customer_key_md5: sse_customer_key_md5.clone(),
ssekms_key_id: effective_kms_key_id, // TDD: Return effective KMS key ID
checksum_crc32,
checksum_crc32c,
checksum_sha1,
checksum_sha256,
checksum_crc64nvme,
ssekms_key_id: effective_kms_key_id,
checksum_crc32: checksums.crc32,
checksum_crc32c: checksums.crc32c,
checksum_sha1: checksums.sha1,
checksum_sha256: checksums.sha256,
checksum_crc64nvme: checksums.crc64nvme,
version_id: put_version,
..Default::default()
};
// TODO fix response for POST Policy (multipart/form-data) wait s3s crate update,fix issue #1564
// // If it is a POST Policy(multipart/form-data) path, the PutObjectInput carries the success_action_* field
// // Here, the response is uniformly rewritten, with the default being 204, redirect prioritizing 303, and status supporting 200/201/204
// if input.success_action_status.is_some() || input.success_action_redirect.is_some() {
// let mut form_fields = HashMap::<String, String>::new();
// if let Some(v) = &input.success_action_status {
// form_fields.insert("success_action_status".to_string(), v.to_string());
// }
// if let Some(v) = &input.success_action_redirect {
// form_fields.insert("success_action_redirect".to_string(), v.to_string());
// }
//
// // obj_info.etag has been converted to e_tag (s3s etag) above, so try to pass the original string here
// let etag_str = e_tag.as_ref().map(|v| v.as_str());
//
// // Returns using POST semantics: 204/303/201/200
// let resp = build_post_object_success_response(&form_fields, &bucket, &key, etag_str, None)?;
//
// // Keep helper event complete (note: (StatusCode, Body) is returned here instead of PutObjectOutput)
// let result = Ok(resp);
// let _ = helper.complete(&result);
// return result;
// }
// TODO fix response for POST Policy (multipart/form-data), wait s3s crate update, fix issue #1564
let result = Ok(S3Response::new(output));
let _ = helper.complete(&result);
@@ -1200,7 +1134,10 @@ impl DefaultObjectUsecase {
// based on the wait time. Longer wait times indicate higher system
// load, which triggers more conservative I/O parameters.
let permit_wait_start = std::time::Instant::now();
let _disk_permit = manager.acquire_disk_read_permit().await;
let _disk_permit = manager
.acquire_disk_read_permit()
.await
.map_err(|_| s3_error!(InternalError, "disk read semaphore closed"))?;
let permit_wait_duration = permit_wait_start.elapsed();
// Calculate adaptive I/O strategy from permit wait time
@@ -2107,34 +2044,9 @@ impl DefaultObjectUsecase {
src_info.user_defined.insert(k, v);
}
// check quota for copy operation
let has_bucket_metadata = if let Some(metadata_sys) = self.bucket_metadata_sys() {
let quota_checker = QuotaChecker::new(metadata_sys);
match quota_checker
.check_quota(&bucket, QuotaOperation::CopyObject, src_info.size as u64)
.await
{
Ok(check_result) => {
if !check_result.allowed {
return Err(S3Error::with_message(
S3ErrorCode::InvalidRequest,
format!(
"Bucket quota exceeded. Current usage: {} bytes, limit: {} bytes",
check_result.current_usage.unwrap_or(0),
check_result.quota_limit.unwrap_or(0)
),
));
}
}
Err(e) => {
warn!("Quota check failed for bucket {}: {}, allowing operation", bucket, e);
}
}
true
} else {
false
};
self.check_bucket_quota(&bucket, QuotaOperation::CopyObject, src_info.size as u64)
.await?;
let has_bucket_metadata = self.bucket_metadata_sys().is_some();
let oi = store
.copy_object(&src_bucket, &src_key, &bucket, &key, &mut src_info, &src_opts, &dst_opts)
@@ -2146,17 +2058,8 @@ impl DefaultObjectUsecase {
rustfs_ecstore::data_usage::increment_bucket_usage_memory(&bucket, oi.size as u64).await;
}
// Invalidate cache for the destination object to prevent stale data
let manager = get_concurrency_manager();
let dest_bucket = bucket.clone();
let dest_key = key.clone();
let dest_version = oi.version_id.map(|v| v.to_string());
let dest_version_clone = dest_version.clone();
tokio::spawn(async move {
manager
.invalidate_cache_versioned(&dest_bucket, &dest_key, dest_version_clone.as_deref())
.await;
});
Self::spawn_cache_invalidation(bucket.clone(), key.clone(), dest_version.clone());
// warn!("copy_object oi {:?}", &oi);
let object_info = oi.clone();
@@ -2254,7 +2157,7 @@ impl DefaultObjectUsecase {
};
{
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = req_info_mut(&mut req)?;
req_info.bucket = Some(bucket.clone());
req_info.object = Some(obj_id.key.clone());
req_info.version_id = version_id.clone();
@@ -2606,16 +2509,7 @@ impl DefaultObjectUsecase {
// Fast in-memory update for immediate quota consistency
rustfs_ecstore::data_usage::decrement_bucket_usage_memory(&bucket, obj_info.size as u64).await;
// Invalidate cache for the deleted object
let manager = get_concurrency_manager();
let del_bucket = bucket.clone();
let del_key = key.clone();
let del_version = obj_info.version_id.map(|v| v.to_string());
tokio::spawn(async move {
manager
.invalidate_cache_versioned(&del_bucket, &del_key, del_version.as_deref())
.await;
});
Self::spawn_cache_invalidation(bucket.clone(), key.clone(), obj_info.version_id.map(|v| v.to_string()));
if obj_info.name.is_empty() {
if replicate_force_delete {
@@ -3489,49 +3383,30 @@ impl DefaultObjectUsecase {
}
}
let mut checksum_crc32 = input.checksum_crc32;
let mut checksum_crc32c = input.checksum_crc32c;
let mut checksum_sha1 = input.checksum_sha1;
let mut checksum_sha256 = input.checksum_sha256;
let mut checksum_crc64nvme = input.checksum_crc64nvme;
if let Some(alg) = &input.checksum_algorithm
&& let Some(Some(checksum_str)) = req.trailing_headers.as_ref().map(|trailer| {
let key = match alg.as_str() {
ChecksumAlgorithm::CRC32 => rustfs_rio::ChecksumType::CRC32.key(),
ChecksumAlgorithm::CRC32C => rustfs_rio::ChecksumType::CRC32C.key(),
ChecksumAlgorithm::SHA1 => rustfs_rio::ChecksumType::SHA1.key(),
ChecksumAlgorithm::SHA256 => rustfs_rio::ChecksumType::SHA256.key(),
ChecksumAlgorithm::CRC64NVME => rustfs_rio::ChecksumType::CRC64_NVME.key(),
_ => return None,
};
trailer.read(|headers| {
headers
.get(key.unwrap_or_default())
.and_then(|value| value.to_str().ok().map(|s| s.to_string()))
})
})
{
match alg.as_str() {
ChecksumAlgorithm::CRC32 => checksum_crc32 = checksum_str,
ChecksumAlgorithm::CRC32C => checksum_crc32c = checksum_str,
ChecksumAlgorithm::SHA1 => checksum_sha1 = checksum_str,
ChecksumAlgorithm::SHA256 => checksum_sha256 = checksum_str,
ChecksumAlgorithm::CRC64NVME => checksum_crc64nvme = checksum_str,
_ => (),
}
}
let mut checksums = PutObjectChecksums {
crc32: input.checksum_crc32,
crc32c: input.checksum_crc32c,
sha1: input.checksum_sha1,
sha256: input.checksum_sha256,
crc64nvme: input.checksum_crc64nvme,
};
apply_trailing_checksums(
input.checksum_algorithm.as_ref().map(|a| a.as_str()),
&req.trailing_headers,
&mut checksums,
);
warn!(
"put object extract checksum_crc32={checksum_crc32:?}, checksum_crc32c={checksum_crc32c:?}, checksum_sha1={checksum_sha1:?}, checksum_sha256={checksum_sha256:?}, checksum_crc64nvme={checksum_crc64nvme:?}",
"put object extract checksum_crc32={:?}, checksum_crc32c={:?}, checksum_sha1={:?}, checksum_sha256={:?}, checksum_crc64nvme={:?}",
checksums.crc32, checksums.crc32c, checksums.sha1, checksums.sha256, checksums.crc64nvme,
);
let output = PutObjectOutput {
checksum_crc32,
checksum_crc32c,
checksum_sha1,
checksum_sha256,
checksum_crc64nvme,
checksum_crc32: checksums.crc32,
checksum_crc32c: checksums.crc32c,
checksum_sha1: checksums.sha1,
checksum_sha256: checksums.sha256,
checksum_crc64nvme: checksums.crc64nvme,
..Default::default()
};
let result = Ok(S3Response::new(output));
@@ -3540,30 +3415,6 @@ impl DefaultObjectUsecase {
}
}
#[async_trait::async_trait]
impl ObjectUsecase for DefaultObjectUsecase {
async fn put_object(&self, req: PutObjectRequest) -> ObjectUsecaseResult<PutObjectResponse> {
let _ = req;
Err(ApiError::from(StorageError::other(
"DefaultObjectUsecase::put_object DTO path is not implemented yet",
)))
}
async fn get_object(&self, req: GetObjectRequest) -> ObjectUsecaseResult<GetObjectResponse> {
let _ = req;
Err(ApiError::from(StorageError::other(
"DefaultObjectUsecase::get_object DTO path is not implemented yet",
)))
}
async fn delete_object(&self, req: DeleteObjectRequest) -> ObjectUsecaseResult<DeleteObjectResponse> {
let _ = req;
Err(ApiError::from(StorageError::other(
"DefaultObjectUsecase::delete_object DTO path is not implemented yet",
)))
}
}
#[cfg(test)]
mod tests {
use super::*;

View File

@@ -12,8 +12,6 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(dead_code)]
//! Adaptive buffer sizing optimization for different workload types.
//!
//! This module provides intelligent buffer size selection based on file size and workload profile
@@ -86,6 +84,7 @@ pub enum WorkloadProfile {
/// Secure storage: security first, memory constrained for compliance
SecureStorage,
/// Custom configuration for specialized requirements
#[allow(dead_code)]
Custom(BufferConfig),
}
@@ -107,6 +106,7 @@ pub struct BufferConfig {
#[derive(Debug, Clone)]
pub struct RustFSBufferConfig {
/// Selected workload profile
#[allow(dead_code)]
pub workload: WorkloadProfile,
/// Computed buffer configuration (either from profile or custom)
pub base_config: BufferConfig,
@@ -309,6 +309,7 @@ impl BufferConfig {
}
/// Validate the buffer configuration
#[allow(dead_code)]
pub fn validate(&self) -> Result<(), String> {
if self.min_size == 0 {
return Err("min_size must be greater than 0".to_string());

View File

@@ -55,21 +55,6 @@ pub fn license_check() -> Result<()> {
Ok(())
});
// let invalid_license = config::get_config().license.as_ref().map(|license| {
// if license.is_empty() {
// error!("License is empty");
// return Err(Error::other("Incorrect license, please contact RustFS.".to_string()));
// }
// let token = appauth::token::parse_license(license)?;
// if token.expired < SystemTime::now().duration_since(UNIX_EPOCH)?.as_secs() {
// error!("License expired");
// return Err(Error::other("Incorrect license, please contact RustFS.".to_string()));
// }
// info!("License is valid ! expired at {}", token.expired);
// Ok(())
// });
if invalid_license.is_none() || invalid_license.is_some_and(|v| v.is_err()) {
return Err(Error::other("Incorrect license, please contact RustFS."));
}

View File

@@ -46,7 +46,6 @@ use rustfs_common::{GlobalReadiness, SystemStage, set_global_addr};
use rustfs_credentials::init_global_action_credentials;
use rustfs_ecstore::store::init_lock_clients;
use rustfs_ecstore::{
StorageAPI,
bucket::metadata_sys::init_bucket_metadata_sys,
bucket::replication::{get_global_replication_pool, init_background_replication},
config as ecconfig,
@@ -56,6 +55,7 @@ use rustfs_ecstore::{
set_global_endpoints,
store::ECStore,
store::init_local_disks,
store_api::BucketOperations,
store_api::BucketOptions,
update_erasure_type,
};
@@ -94,8 +94,8 @@ fn main() {
.expect("Failed to build Tokio runtime");
let result = runtime.block_on(async_main());
if let Err(ref e) = result {
eprintln!("{} Server encountered an error and is shutting down: {}", jiff::Zoned::now(), e);
error!("Server encountered an error and is shutting down: {}", e);
// Use eprintln as tracing may not be initialized at this point
eprintln!("[FATAL] Server encountered an error and is shutting down: {e}");
std::process::exit(1);
}
}
@@ -110,7 +110,8 @@ async fn async_main() -> Result<()> {
let guard = match init_obs(Some(config.clone().obs_endpoint)).await {
Ok(g) => g,
Err(e) => {
println!("Failed to initialize observability: {e}");
// Use eprintln as tracing is not yet initialized
eprintln!("[FATAL] Failed to initialize observability: {e}");
return Err(Error::other(e));
}
};
@@ -434,13 +435,13 @@ async fn run(config: config::Config) -> Result<()> {
init_metrics_system(ctx.clone());
}
println!(
info!(
target: "rustfs::main::run",
"RustFS server version: {} started successfully at {}, current time: {}",
version::get_version(),
&server_address,
jiff::Zoned::now()
);
info!(target: "rustfs::main::run","server started successfully at {}", &server_address);
// 4. Mark as Full Ready now that critical components are warm
readiness.mark_stage(SystemStage::FullReady);
@@ -580,9 +581,5 @@ async fn handle_shutdown(
// the last updated status is stopped
state_manager.update(ServiceState::Stopped);
info!(
target: "rustfs::main::handle_shutdown",
"Server stopped current "
);
println!("Server stopped successfully.");
info!(target: "rustfs::main::handle_shutdown", "Server stopped successfully.");
}

View File

@@ -187,14 +187,9 @@ pub async fn start_http_server(
"Console WebUI (localhost): {protocol}://127.0.0.1:{server_port}/rustfs/console/index.html",
);
println!("Console WebUI Start Time: {now_time}");
println!("Console WebUI available at: {protocol}://{local_ip_str}:{server_port}/rustfs/console/index.html");
println!("Console WebUI (localhost): {protocol}://127.0.0.1:{server_port}/rustfs/console/index.html");
} else {
info!(target: "rustfs::main::startup","RustFS API: {api_endpoints} {localhost_endpoint}");
println!("RustFS Http API: {api_endpoints} {localhost_endpoint}");
println!("RustFS Start Time: {now_time}");
info!(target: "rustfs::main::startup", "RustFS API: {api_endpoints} {localhost_endpoint}");
info!(target: "rustfs::main::startup", "RustFS Start Time: {now_time}");
if rustfs_credentials::DEFAULT_ACCESS_KEY.eq(&config.access_key)
&& rustfs_credentials::DEFAULT_SECRET_KEY.eq(&config.secret_key)
{

View File

@@ -129,29 +129,23 @@ pub(crate) fn get_tokio_runtime_builder() -> tokio::runtime::Builder {
if print_tokio_thread_enable() {
builder
.on_thread_start(|| {
let id = std::thread::current().id();
println!(
"RustFS Worker Thread running - initializing resources time: {:?}, thread id: {:?}",
jiff::Zoned::now().to_string(),
id
);
tracing::trace!(thread_id = ?std::thread::current().id(), "worker thread started");
})
.on_thread_stop(|| {
let id = std::thread::current().id();
println!(
"RustFS Worker Thread stopping - cleaning up resources time: {:?}, thread id: {:?}",
jiff::Zoned::now().to_string(),
id
)
tracing::trace!(thread_id = ?std::thread::current().id(), "worker thread stopped");
});
}
if !rustfs_obs::is_production_environment() {
println!(
"Starting Tokio runtime with configured parameters:\n\
worker_threads: {worker_threads}, max_blocking_threads: {max_blocking_threads}, \
thread_stack_size: {thread_stack_size}, thread_keep_alive: {thread_keep_alive}, \
global_queue_interval: {global_queue_interval}, event_interval: {event_interval}, \
max_io_events_per_tick: {max_io_events_per_tick}, thread_name: {thread_name}"
tracing::debug!(
worker_threads,
max_blocking_threads,
thread_stack_size,
thread_keep_alive,
global_queue_interval,
event_interval,
max_io_events_per_tick,
thread_name,
"Starting Tokio runtime with configured parameters"
);
}
builder

View File

@@ -24,11 +24,11 @@ use crate::error::ApiError;
use crate::license::license_check;
use crate::server::RemoteAddr;
use metrics::counter;
use rustfs_ecstore::StorageAPI;
use rustfs_ecstore::bucket::metadata_sys;
use rustfs_ecstore::bucket::policy_sys::PolicySys;
use rustfs_ecstore::error::StorageError;
use rustfs_ecstore::new_object_layer_fn;
use rustfs_ecstore::store_api::ObjectOperations;
use rustfs_iam::error::Error as IamError;
use rustfs_policy::policy::action::{Action, S3Action};
use rustfs_policy::policy::{Args, BucketPolicyArgs};
@@ -37,7 +37,6 @@ use s3s::access::{S3Access, S3AccessContext};
use s3s::{S3Error, S3ErrorCode, S3Request, S3Result, dto::*, s3_error};
use std::collections::HashMap;
#[allow(dead_code)]
#[derive(Default, Clone, Debug)]
pub(crate) struct ReqInfo {
pub cred: Option<rustfs_credentials::Credentials>,
@@ -45,6 +44,7 @@ pub(crate) struct ReqInfo {
pub bucket: Option<String>,
pub object: Option<String>,
pub version_id: Option<String>,
#[allow(dead_code)]
pub region: Option<s3s::region::Region>,
}
@@ -201,6 +201,23 @@ async fn check_acl_access<T>(req: &S3Request<T>, req_info: &ReqInfo, action: &Ac
Ok(acl_allows(&acl, user_id, is_authenticated, permission, ignore_public_acls))
}
pub(crate) fn req_info_ref<T>(req: &S3Request<T>) -> S3Result<&ReqInfo> {
req.extensions
.get::<ReqInfo>()
.ok_or_else(|| s3_error!(InternalError, "ReqInfo not found in request extensions"))
}
pub(crate) fn req_info_mut<T>(req: &mut S3Request<T>) -> S3Result<&mut ReqInfo> {
req.extensions
.get_mut::<ReqInfo>()
.ok_or_else(|| s3_error!(InternalError, "ReqInfo not found in request extensions"))
}
fn ext_req_info_mut(ext: &mut http::Extensions) -> S3Result<&mut ReqInfo> {
ext.get_mut::<ReqInfo>()
.ok_or_else(|| s3_error!(InternalError, "ReqInfo not found in request extensions"))
}
#[derive(Clone, Debug)]
pub(crate) struct ObjectTagConditions(pub HashMap<String, Vec<String>>);
@@ -241,7 +258,7 @@ pub async fn authorize_request<T>(req: &mut S3Request<T>, action: Action) -> S3R
let remote_addr = req.extensions.get::<Option<RemoteAddr>>().and_then(|opt| opt.map(|a| a.0));
let object_tag_conditions = req.extensions.get::<ObjectTagConditions>().cloned();
let req_info = req.extensions.get::<ReqInfo>().expect("ReqInfo not found");
let req_info = req_info_ref(req)?;
if let Some(cred) = &req_info.cred {
let Ok(iam_store) = rustfs_iam::get() else {
@@ -550,7 +567,7 @@ impl S3Access for FS {
async fn create_bucket(&self, req: &mut S3Request<CreateBucketInput>) -> S3Result<()> {
license_check().map_err(|er| s3_error!(AccessDenied, "{:?}", er.to_string()))?;
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::CreateBucketAction)).await?;
@@ -588,7 +605,7 @@ impl S3Access for FS {
}
};
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(src_bucket.clone());
req_info.object = Some(src_key.clone());
req_info.version_id = version_id.clone();
@@ -601,7 +618,7 @@ impl S3Access for FS {
authorize_request(req, Action::S3Action(S3Action::GetObjectAction)).await?;
}
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
req_info.object = Some(req.input.key.clone());
@@ -622,7 +639,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn delete_bucket(&self, req: &mut S3Request<DeleteBucketInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::DeleteBucketAction)).await?;
@@ -647,7 +664,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn delete_bucket_cors(&self, req: &mut S3Request<DeleteBucketCorsInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::DeleteBucketCorsAction)).await
@@ -657,7 +674,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn delete_bucket_encryption(&self, req: &mut S3Request<DeleteBucketEncryptionInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::PutBucketEncryptionAction)).await
@@ -687,7 +704,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn delete_bucket_lifecycle(&self, req: &mut S3Request<DeleteBucketLifecycleInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::PutBucketLifecycleAction)).await
@@ -714,7 +731,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn delete_bucket_policy(&self, req: &mut S3Request<DeleteBucketPolicyInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::DeleteBucketPolicyAction)).await
@@ -724,7 +741,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn delete_bucket_replication(&self, req: &mut S3Request<DeleteBucketReplicationInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::PutReplicationConfigurationAction)).await
@@ -734,7 +751,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn delete_bucket_tagging(&self, req: &mut S3Request<DeleteBucketTaggingInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::PutBucketTaggingAction)).await
@@ -751,7 +768,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn delete_object(&self, req: &mut S3Request<DeleteObjectInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
req_info.object = Some(req.input.key.clone());
req_info.version_id = req.input.version_id.clone();
@@ -775,7 +792,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn delete_object_tagging(&self, req: &mut S3Request<DeleteObjectTaggingInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
req_info.object = Some(req.input.key.clone());
req_info.version_id = req.input.version_id.clone();
@@ -797,7 +814,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn delete_objects(&self, req: &mut S3Request<DeleteObjectsInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
req_info.object = None;
req_info.version_id = None;
@@ -816,7 +833,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn delete_public_access_block(&self, req: &mut S3Request<DeletePublicAccessBlockInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::DeleteBucketPublicAccessBlockAction)).await
@@ -836,7 +853,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn get_bucket_acl(&self, req: &mut S3Request<GetBucketAclInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::GetBucketAclAction)).await
@@ -856,7 +873,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn get_bucket_cors(&self, req: &mut S3Request<GetBucketCorsInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::GetBucketCorsAction)).await
@@ -866,7 +883,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn get_bucket_encryption(&self, req: &mut S3Request<GetBucketEncryptionInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::GetBucketEncryptionAction)).await
@@ -899,7 +916,7 @@ impl S3Access for FS {
&self,
req: &mut S3Request<GetBucketLifecycleConfigurationInput>,
) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::GetBucketLifecycleAction)).await
@@ -909,7 +926,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn get_bucket_location(&self, req: &mut S3Request<GetBucketLocationInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::GetBucketLocationAction)).await
@@ -936,7 +953,7 @@ impl S3Access for FS {
&self,
req: &mut S3Request<GetBucketNotificationConfigurationInput>,
) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::GetBucketNotificationAction)).await
@@ -953,7 +970,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn get_bucket_policy(&self, req: &mut S3Request<GetBucketPolicyInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, get_bucket_policy_authorize_action()).await
@@ -963,7 +980,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn get_bucket_policy_status(&self, req: &mut S3Request<GetBucketPolicyStatusInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::GetBucketPolicyStatusAction)).await
@@ -973,7 +990,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn get_bucket_replication(&self, req: &mut S3Request<GetBucketReplicationInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::GetReplicationConfigurationAction)).await
@@ -990,7 +1007,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn get_bucket_tagging(&self, req: &mut S3Request<GetBucketTaggingInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::GetBucketTaggingAction)).await
@@ -1000,7 +1017,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn get_bucket_versioning(&self, req: &mut S3Request<GetBucketVersioningInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::GetBucketVersioningAction)).await
@@ -1017,7 +1034,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn get_object(&self, req: &mut S3Request<GetObjectInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
req_info.object = Some(req.input.key.clone());
req_info.version_id = req.input.version_id.clone();
@@ -1034,7 +1051,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn get_object_acl(&self, req: &mut S3Request<GetObjectAclInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
req_info.object = Some(req.input.key.clone());
req_info.version_id = req.input.version_id.clone();
@@ -1051,7 +1068,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn get_object_attributes(&self, req: &mut S3Request<GetObjectAttributesInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
req_info.object = Some(req.input.key.clone());
req_info.version_id = req.input.version_id.clone();
@@ -1081,7 +1098,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn get_object_legal_hold(&self, req: &mut S3Request<GetObjectLegalHoldInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
req_info.object = Some(req.input.key.clone());
req_info.version_id = req.input.version_id.clone();
@@ -1093,7 +1110,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn get_object_lock_configuration(&self, req: &mut S3Request<GetObjectLockConfigurationInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::GetBucketObjectLockConfigurationAction)).await
@@ -1103,7 +1120,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn get_object_retention(&self, req: &mut S3Request<GetObjectRetentionInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
req_info.object = Some(req.input.key.clone());
req_info.version_id = req.input.version_id.clone();
@@ -1115,7 +1132,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn get_object_tagging(&self, req: &mut S3Request<GetObjectTaggingInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
req_info.object = Some(req.input.key.clone());
req_info.version_id = req.input.version_id.clone();
@@ -1139,7 +1156,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn get_public_access_block(&self, req: &mut S3Request<GetPublicAccessBlockInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::GetBucketPublicAccessBlockAction)).await
@@ -1149,7 +1166,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn head_bucket(&self, req: &mut S3Request<HeadBucketInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::ListBucketAction)).await
@@ -1159,7 +1176,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn head_object(&self, req: &mut S3Request<HeadObjectInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
req_info.object = Some(req.input.key.clone());
req_info.version_id = req.input.version_id.clone();
@@ -1224,7 +1241,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn list_multipart_uploads(&self, req: &mut S3Request<ListMultipartUploadsInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::ListBucketMultipartUploadsAction)).await
@@ -1235,7 +1252,7 @@ impl S3Access for FS {
/// Returns `Ok(())` if the request is allowed, or an error if access is denied or another
/// authorization-related issue occurs.
async fn list_object_versions(&self, req: &mut S3Request<ListObjectVersionsInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::ListBucketVersionsAction)).await
}
@@ -1244,7 +1261,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn list_objects(&self, req: &mut S3Request<ListObjectsInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::ListBucketAction)).await
@@ -1254,7 +1271,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn list_objects_v2(&self, req: &mut S3Request<ListObjectsV2Input>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::ListBucketAction)).await
@@ -1281,7 +1298,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn put_bucket_acl(&self, req: &mut S3Request<PutBucketAclInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::PutBucketAclAction)).await
@@ -1301,7 +1318,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn put_bucket_cors(&self, req: &mut S3Request<PutBucketCorsInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::PutBucketCorsAction)).await
@@ -1311,7 +1328,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn put_bucket_encryption(&self, req: &mut S3Request<PutBucketEncryptionInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::PutBucketEncryptionAction)).await
@@ -1344,7 +1361,7 @@ impl S3Access for FS {
&self,
req: &mut S3Request<PutBucketLifecycleConfigurationInput>,
) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::PutBucketLifecycleAction)).await
@@ -1371,7 +1388,7 @@ impl S3Access for FS {
&self,
req: &mut S3Request<PutBucketNotificationConfigurationInput>,
) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::PutBucketNotificationAction)).await
@@ -1388,7 +1405,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn put_bucket_policy(&self, req: &mut S3Request<PutBucketPolicyInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, put_bucket_policy_authorize_action()).await
@@ -1398,7 +1415,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn put_bucket_replication(&self, req: &mut S3Request<PutBucketReplicationInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::PutReplicationConfigurationAction)).await
@@ -1415,7 +1432,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn put_bucket_tagging(&self, req: &mut S3Request<PutBucketTaggingInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::PutBucketTaggingAction)).await
@@ -1425,7 +1442,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn put_bucket_versioning(&self, req: &mut S3Request<PutBucketVersioningInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::PutBucketVersioningAction)).await
@@ -1444,7 +1461,7 @@ impl S3Access for FS {
async fn put_object(&self, req: &mut S3Request<PutObjectInput>) -> S3Result<()> {
license_check().map_err(|er| s3_error!(AccessDenied, "{:?}", er.to_string()))?;
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
req_info.object = Some(req.input.key.clone());
req_info.version_id = req.input.version_id.clone();
@@ -1456,7 +1473,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn put_object_acl(&self, req: &mut S3Request<PutObjectAclInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
req_info.object = Some(req.input.key.clone());
req_info.version_id = req.input.version_id.clone();
@@ -1473,7 +1490,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn put_object_legal_hold(&self, req: &mut S3Request<PutObjectLegalHoldInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
req_info.object = Some(req.input.key.clone());
req_info.version_id = req.input.version_id.clone();
@@ -1485,7 +1502,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn put_object_lock_configuration(&self, req: &mut S3Request<PutObjectLockConfigurationInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::PutBucketObjectLockConfigurationAction)).await
@@ -1493,7 +1510,7 @@ impl S3Access for FS {
/// Checks whether the PutObjectRetention request has accesses to the resources.
async fn put_object_retention(&self, req: &mut S3Request<PutObjectRetentionInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
req_info.object = Some(req.input.key.clone());
req_info.version_id = req.input.version_id.clone();
@@ -1512,7 +1529,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn put_object_tagging(&self, req: &mut S3Request<PutObjectTaggingInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
req_info.object = Some(req.input.key.clone());
req_info.version_id = req.input.version_id.clone();
@@ -1529,7 +1546,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn put_public_access_block(&self, req: &mut S3Request<PutPublicAccessBlockInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
authorize_request(req, Action::S3Action(S3Action::PutBucketPublicAccessBlockAction)).await
@@ -1539,7 +1556,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn restore_object(&self, req: &mut S3Request<RestoreObjectInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
req_info.object = Some(req.input.key.clone());
req_info.version_id = req.input.version_id.clone();
@@ -1551,7 +1568,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn select_object_content(&self, req: &mut S3Request<SelectObjectContentInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
req_info.object = Some(req.input.key.clone());
@@ -1562,7 +1579,7 @@ impl S3Access for FS {
///
/// This method returns `Ok(())` by default.
async fn upload_part(&self, req: &mut S3Request<UploadPartInput>) -> S3Result<()> {
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
let req_info = ext_req_info_mut(&mut req.extensions)?;
req_info.bucket = Some(req.input.bucket.clone());
req_info.object = Some(req.input.key.clone());

View File

@@ -134,7 +134,6 @@ impl IoLoadLevel {
/// let enable_readahead = strategy.enable_readahead;
/// let enable_cache_writeback = strategy.cache_writeback_enabled;
/// ```
#[allow(dead_code)]
#[derive(Debug, Clone)]
pub struct IoStrategy {
/// Recommended buffer size for I/O operations (in bytes).
@@ -164,6 +163,7 @@ pub struct IoStrategy {
/// Whether to use tokio BufReader for improved async I/O.
///
/// Always enabled for better async performance.
#[allow(dead_code)]
pub use_buffered_io: bool,
/// The detected I/O load level.
@@ -668,7 +668,6 @@ struct CachedObject {
/// };
/// manager.put_cached_object(cache_key, cached).await;
/// ```
#[allow(dead_code)]
#[derive(Clone, Debug)]
pub struct CachedGetObject {
/// The object body data
@@ -682,6 +681,7 @@ pub struct CachedGetObject {
/// Last modified time as RFC3339 string (e.g., "2024-01-01T12:00:00Z")
pub last_modified: Option<String>,
/// Expiration time as RFC3339 string
#[allow(dead_code)]
pub expires: Option<String>,
/// Cache-Control header value
pub cache_control: Option<String>,
@@ -700,10 +700,12 @@ pub struct CachedGetObject {
/// Number of tags associated with the object
pub tag_count: Option<i32>,
/// Replication status
#[allow(dead_code)]
pub replication_status: Option<String>,
/// User-defined metadata (x-amz-meta-*)
pub user_metadata: std::collections::HashMap<String, String>,
/// When this object was cached (for internal use, automatically set)
#[allow(dead_code)]
cached_at: Option<Instant>,
/// Access count for hot key tracking (automatically managed)
access_count: Arc<AtomicU64>,
@@ -1320,11 +1322,8 @@ impl ConcurrencyManager {
///
/// This ensures we don't overwhelm the disk subsystem with too many
/// concurrent reads, which can cause performance degradation.
pub async fn acquire_disk_read_permit(&self) -> tokio::sync::SemaphorePermit<'_> {
self.disk_read_semaphore
.acquire()
.await
.expect("semaphore closed unexpectedly")
pub async fn acquire_disk_read_permit(&self) -> Result<tokio::sync::SemaphorePermit<'_>, tokio::sync::AcquireError> {
self.disk_read_semaphore.acquire().await
}
// ============================================
@@ -1791,10 +1790,10 @@ mod tests {
async fn test_disk_read_permits() {
let manager = ConcurrencyManager::new();
let permit1 = manager.acquire_disk_read_permit().await;
let permit1 = manager.acquire_disk_read_permit().await.unwrap();
assert_eq!(manager.disk_read_semaphore.available_permits(), 63);
let permit2 = manager.acquire_disk_read_permit().await;
let permit2 = manager.acquire_disk_read_permit().await.unwrap();
assert_eq!(manager.disk_read_semaphore.available_permits(), 62);
drop(permit1);

View File

@@ -258,7 +258,7 @@ mod tests {
.map(|_| {
let mgr = Arc::new(manager.clone());
tokio::spawn(async move {
let _permit = mgr.acquire_disk_read_permit().await;
let _permit = mgr.acquire_disk_read_permit().await.unwrap();
sleep(Duration::from_millis(10)).await;
})
})

View File

@@ -16,11 +16,10 @@ use crate::app::bucket_usecase::DefaultBucketUsecase;
use crate::app::multipart_usecase::DefaultMultipartUsecase;
use crate::app::object_usecase::DefaultObjectUsecase;
use rustfs_ecstore::{
StorageAPI,
bucket::tagging::decode_tags_to_map,
error::{is_err_object_not_found, is_err_version_not_found},
new_object_layer_fn,
store_api::ObjectOptions,
store_api::{ObjectOperations, ObjectOptions},
};
use s3s::{S3, S3Error, S3ErrorCode, S3Request, S3Response, S3Result, dto::*, s3_error};
use serde::{Deserialize, Serialize};

View File

@@ -25,8 +25,8 @@ use rustfs_ecstore::bucket::metadata_sys::get_replication_config;
use rustfs_ecstore::bucket::object_lock::objectlock_sys;
use rustfs_ecstore::bucket::replication::ReplicationConfigurationExt;
use rustfs_ecstore::error::StorageError;
use rustfs_ecstore::store_api::{BucketOptions, ObjectInfo, ObjectToDelete};
use rustfs_ecstore::{StorageAPI, new_object_layer_fn};
use rustfs_ecstore::new_object_layer_fn;
use rustfs_ecstore::store_api::{BucketOperations, BucketOptions, ObjectInfo, ObjectToDelete};
use rustfs_targets::EventName;
use rustfs_targets::arn::{TargetID, TargetIDError};
use rustfs_utils::http::{

View File

@@ -12,13 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(dead_code)]
use s3s::dto::{
BucketKeyEnabled, BucketName, ChecksumCRC32, ChecksumCRC32C, ChecksumCRC64NVME, ChecksumSHA1, ChecksumSHA256, ChecksumType,
ETag, Expiration, Location, ObjectKey, ObjectVersionId, RequestCharged, SSEKMSKeyId, ServerSideEncryption,
};
#[allow(dead_code)]
#[derive(Debug, Clone, Default)]
pub struct CompleteMultipartUploadOutput {
pub bucket: Option<BucketName>,

View File

@@ -12,8 +12,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use rustfs_ecstore::StorageAPI;
use rustfs_ecstore::store::ECStore;
use rustfs_ecstore::store_api::ListOperations;
use std::sync::Arc;
/// Determines if the key "looks like a prefix" (ends with `/`).

View File

@@ -633,39 +633,6 @@ impl NodeService {
pub(super) async fn handle_write(&self, _request: Request<WriteRequest>) -> Result<Response<WriteResponse>, Status> {
unimplemented!("write");
// let request = request.into_inner();
// if let Some(disk) = self.find_disk(&request.disk).await {
// let file_writer = if request.is_append {
// disk.append_file(&request.volume, &request.path).await
// } else {
// disk.create_file("", &request.volume, &request.path, 0).await
// };
// match file_writer {
// Ok(mut file_writer) => match file_writer.write(&request.data).await {
// Ok(_) => Ok(Response::new(WriteResponse {
// success: true,
// error: None,
// })),
// Err(err) => Ok(Response::new(WriteResponse {
// success: false,
// error: Some(err_to_proto_err(&err, &format!("write failed: {}", err))),
// })),
// },
// Err(err) => Ok(Response::new(WriteResponse {
// success: false,
// error: Some(err_to_proto_err(&err, &format!("get writer failed: {}", err))),
// })),
// }
// } else {
// Ok(Response::new(WriteResponse {
// success: false,
// error: Some(err_to_proto_err(
// &EcsError::new(StorageError::InvalidArgument(Default::default(), Default::default(), Default::default())),
// "can not find disk",
// )),
// }))
// }
}
pub(super) async fn handle_rename_file(

View File

@@ -63,26 +63,6 @@ mod lock;
#[path = "metrics.rs"]
mod metrics;
// fn match_for_io_error(err_status: &Status) -> Option<&std::io::Error> {
// let mut err: &(dyn Error + 'static) = err_status;
// loop {
// if let Some(io_err) = err.downcast_ref::<std::io::Error>() {
// return Some(io_err);
// }
// // h2::Error do not expose std::io::Error with `source()`
// // https://github.com/hyperium/h2/pull/462
// if let Some(h2_err) = err.downcast_ref::<h2::Error>() {
// if let Some(io_err) = h2_err.get_io() {
// return Some(io_err);
// }
// }
// err = err.source()?;
// }
// }
#[derive(Debug)]
pub struct NodeService {
local_peer: LocalPeerS3Client,
@@ -198,200 +178,12 @@ impl Node for NodeService {
let _ = request;
unimplemented!("write_stream");
// let mut in_stream = request.into_inner();
// let (tx, rx) = mpsc::channel(128);
// tokio::spawn(async move {
// let mut file_ref = None;
// while let Some(result) = in_stream.next().await {
// match result {
// // Ok(v) => tx
// // .send(Ok(EchoResponse { message: v.message }))
// // .await
// // .expect("working rx"),
// Ok(v) => {
// match file_ref.as_ref() {
// Some(_) => (),
// None => {
// if let Some(disk) = find_local_disk(&v.disk).await {
// let file_writer = if v.is_append {
// disk.append_file(&v.volume, &v.path).await
// } else {
// disk.create_file("", &v.volume, &v.path, 0).await
// };
// match file_writer {
// Ok(file_writer) => file_ref = Some(file_writer),
// Err(err) => {
// tx.send(Ok(WriteResponse {
// success: false,
// error: Some(err_to_proto_err(
// &err,
// &format!("get file writer failed: {}", err),
// )),
// }))
// .await
// .expect("working rx");
// break;
// }
// }
// } else {
// tx.send(Ok(WriteResponse {
// success: false,
// error: Some(err_to_proto_err(
// &EcsError::new(StorageError::InvalidArgument(
// Default::default(),
// Default::default(),
// Default::default(),
// )),
// "can not find disk",
// )),
// }))
// .await
// .expect("working rx");
// break;
// }
// }
// };
// match file_ref.as_mut().unwrap().write(&v.data).await {
// Ok(_) => tx.send(Ok(WriteResponse {
// success: true,
// error: None,
// })),
// Err(err) => tx.send(Ok(WriteResponse {
// success: false,
// error: Some(err_to_proto_err(&err, &format!("write failed: {}", err))),
// })),
// }
// .await
// .unwrap();
// }
// Err(err) => {
// if let Some(io_err) = match_for_io_error(&err) {
// if io_err.kind() == ErrorKind::BrokenPipe {
// // here you can handle special case when client
// // disconnected in unexpected way
// eprintln!("\tclient disconnected: broken pipe");
// break;
// }
// }
// match tx.send(Err(err)).await {
// Ok(_) => (),
// Err(_err) => break, // response was dropped
// }
// }
// }
// }
// println!("\tstream ended");
// });
// let out_stream = ReceiverStream::new(rx);
// Ok(Response::new(Box::pin(out_stream)))
}
type ReadAtStream = ResponseStream<ReadAtResponse>;
async fn read_at(&self, _request: Request<Streaming<ReadAtRequest>>) -> Result<Response<Self::ReadAtStream>, Status> {
info!("read_at");
unimplemented!("read_at");
// let mut in_stream = request.into_inner();
// let (tx, rx) = mpsc::channel(128);
// tokio::spawn(async move {
// let mut file_ref = None;
// while let Some(result) = in_stream.next().await {
// match result {
// Ok(v) => {
// match file_ref.as_ref() {
// Some(_) => (),
// None => {
// if let Some(disk) = find_local_disk(&v.disk).await {
// match disk.read_file(&v.volume, &v.path).await {
// Ok(file_reader) => file_ref = Some(file_reader),
// Err(err) => {
// tx.send(Ok(ReadAtResponse {
// success: false,
// data: Vec::new(),
// error: Some(err_to_proto_err(&err, &format!("read file failed: {}", err))),
// read_size: -1,
// }))
// .await
// .expect("working rx");
// break;
// }
// }
// } else {
// tx.send(Ok(ReadAtResponse {
// success: false,
// data: Vec::new(),
// error: Some(err_to_proto_err(
// &EcsError::new(StorageError::InvalidArgument(
// Default::default(),
// Default::default(),
// Default::default(),
// )),
// "can not find disk",
// )),
// read_size: -1,
// }))
// .await
// .expect("working rx");
// break;
// }
// }
// };
// let mut data = vec![0u8; v.length.try_into().unwrap()];
// match file_ref
// .as_mut()
// .unwrap()
// .read_at(v.offset.try_into().unwrap(), &mut data)
// .await
// {
// Ok(read_size) => tx.send(Ok(ReadAtResponse {
// success: true,
// data,
// read_size: read_size.try_into().unwrap(),
// error: None,
// })),
// Err(err) => tx.send(Ok(ReadAtResponse {
// success: false,
// data: Vec::new(),
// error: Some(err_to_proto_err(&err, &format!("read at failed: {}", err))),
// read_size: -1,
// })),
// }
// .await
// .unwrap();
// }
// Err(err) => {
// if let Some(io_err) = match_for_io_error(&err) {
// if io_err.kind() == ErrorKind::BrokenPipe {
// // here you can handle special case when client
// // disconnected in unexpected way
// eprintln!("\tclient disconnected: broken pipe");
// break;
// }
// }
// match tx.send(Err(err)).await {
// Ok(_) => (),
// Err(_err) => break, // response was dropped
// }
// }
// }
// }
// println!("\tstream ended");
// });
// let out_stream = ReceiverStream::new(rx);
// Ok(Response::new(Box::pin(out_stream)))
}
async fn list_dir(&self, request: Request<ListDirRequest>) -> Result<Response<ListDirResponse>, Status> {

View File

@@ -12,11 +12,12 @@
// See the License for the specific language governing permissions and
// limitations under the License.
#![allow(dead_code)]
//! Facade modules for incremental S3 API extraction from `ecfs.rs`.
//!
//! This file intentionally starts as skeleton-only. Behavior remains in place
//! until each helper is moved with dedicated small refactor steps.
#![allow(dead_code)]
pub(crate) mod acl;
pub(crate) mod bucket;

View File

@@ -684,7 +684,7 @@ async fn apply_ssec_prepare_encryption_material(
Ok(EncryptionMaterial {
sse_type: SSEType::SseC,
server_side_encryption: ServerSideEncryption::AES256.parse().unwrap(),
server_side_encryption: ServerSideEncryption::from_static(ServerSideEncryption::AES256),
kms_key_id: None,
algorithm,
key_bytes: [0; 32],
@@ -731,7 +731,7 @@ async fn apply_ssec_encryption_material(
Ok(EncryptionMaterial {
sse_type: SSEType::SseC,
server_side_encryption: ServerSideEncryption::AES256.parse().unwrap(),
server_side_encryption: ServerSideEncryption::from_static(ServerSideEncryption::AES256),
kms_key_id: None,
algorithm: validated.algorithm,
key_bytes: validated.key_bytes,
@@ -776,7 +776,7 @@ async fn apply_ssec_decryption_material(
Ok(DecryptionMaterial {
sse_type: SSEType::SseC,
server_side_encryption: ServerSideEncryption::AES256.parse().unwrap(), // const
server_side_encryption: ServerSideEncryption::from_static(ServerSideEncryption::AES256), // const
kms_key_id: None,
algorithm: SSECustomerAlgorithm::from(algorithm),
@@ -941,7 +941,8 @@ async fn apply_managed_decryption_material(
return Ok(None);
}
let server_side_encryption = metadata.get("x-amz-server-side-encryption").unwrap().clone();
// Safe: presence is guaranteed by the contains_key check above.
let server_side_encryption = metadata.get("x-amz-server-side-encryption").cloned().unwrap_or_default();
// Parse metadata - try using service if available, otherwise parse manually
let (encrypted_data_key, iv, algorithm) = if let Some(service) = get_global_encryption_service().await {
@@ -1169,17 +1170,17 @@ impl TestSseDekProvider {
let decoded_len = v.len();
match v.try_into() {
Ok(arr) => {
println!("Successfully loaded master key (32 bytes)");
tracing::info!("Successfully loaded SSE master key (32 bytes)");
arr
}
Err(_) => {
eprintln!("Failed to load master key: decoded key is not 32 bytes (got {} bytes)", decoded_len);
tracing::error!("Failed to load master key: decoded key is not 32 bytes (got {decoded_len} bytes)");
[0u8; 32]
}
}
}
Err(e) => {
eprintln!("Failed to load master key: invalid base64 encoding: {}", e);
tracing::error!("Failed to load master key: invalid base64 encoding: {e}");
[0u8; 32]
}
}
@@ -1188,8 +1189,9 @@ impl TestSseDekProvider {
};
if master_key == [0u8; 32] {
eprintln!("✗ Failed to load master key: no valid master key loaded! All encryption operations will fail.");
eprintln!(" Set __RUSTFS_SSE_SIMPLE_CMK environment variable to a base64-encoded 32-byte key.");
tracing::error!(
"No valid SSE master key loaded. Set __RUSTFS_SSE_SIMPLE_CMK environment variable to a base64-encoded 32-byte key."
);
std::process::exit(1);
}
@@ -1459,8 +1461,9 @@ pub fn validate_ssec_params(params: SsecParams) -> Result<ValidatedSsecParams, A
return Err(ApiError::from(StorageError::other("SSE-C key MD5 mismatch")));
}
// SAFETY: We validated the length is exactly 32 bytes above
let key_array: [u8; 32] = key_bytes.try_into().expect("key length already validated to be 32 bytes");
let key_array: [u8; 32] = key_bytes
.try_into()
.map_err(|_| ApiError::from(StorageError::other("SSE-C key must be exactly 32 bytes")))?;
Ok(ValidatedSsecParams {
algorithm: params.algorithm,
@@ -1706,7 +1709,7 @@ mod tests {
.await
.expect("Failed to generate DEK");
// 3. Prepare test data (明文)
// 3. Prepare test data (plaintext)
let plaintext = b"Hello, World! This is a test message for encryption and decryption.";
println!("Original plaintext: {:?}", String::from_utf8_lossy(plaintext));
println!("Plaintext length: {} bytes", plaintext.len());

View File

@@ -1,8 +1,19 @@
cycle|app<->infra
cycle|infra<->interface
dep|rustfs/src/main.rs|infra->app|crate::app::context::
dep|rustfs/src/server/audit.rs|infra->app|crate::app::context::resolve_server_config
dep|rustfs/src/server/event.rs|infra->app|crate::app::context::resolve_server_config
dep|rustfs/src/server/http.rs|infra->interface|crate::admin
dep|rustfs/src/server/layer.rs|infra->interface|crate::admin::console::is_console_path
dep|rustfs/src/storage/objects/put_object.rs|infra->app|crate::app::context::resolve_bucket_metadata_handle
# Layer dependency baseline for the rustfs binary crate.
#
# These are intra-crate module references within rustfs/ that cross the
# conceptual layer boundaries (app, infra/server, interface/admin).
# Since they live inside one Cargo crate, Rust doesn't enforce separation.
# The list is maintained for architectural awareness during code review.
#
# Format: status|source_file|direction|imported_symbol
#
# Status:
# accepted - reviewed and intentionally allowed
# todo - should be resolved in a future refactor
accepted|rustfs/src/main.rs|infra->app|crate::app::context::*|main wires all layers
accepted|rustfs/src/server/audit.rs|infra->app|crate::app::context::resolve_server_config|config resolution uses global AppContext
accepted|rustfs/src/server/event.rs|infra->app|crate::app::context::resolve_server_config|config resolution uses global AppContext
accepted|rustfs/src/server/http.rs|infra->interface|crate::admin|HTTP server routes to admin handlers
accepted|rustfs/src/server/layer.rs|infra->interface|crate::admin::console::is_console_path|pure path predicate for routing
accepted|rustfs/src/storage/objects/put_object.rs|infra->app|crate::app::context::resolve_bucket_metadata_handle|metadata resolution uses global AppContext