mirror of
https://github.com/rustfs/rustfs.git
synced 2026-01-17 01:30:33 +00:00
fix clippy
This commit is contained in:
@@ -18,8 +18,8 @@ pub const BLOCK_SIZE_V2: usize = 1024 * 1024; // 1M
|
||||
pub const NULL_VERSION_ID: &str = "null";
|
||||
// pub const RUSTFS_ERASURE_UPGRADED: &str = "x-rustfs-internal-erasure-upgraded";
|
||||
|
||||
pub const TIER_FV_ID: &str = "tier-free-versionID";
|
||||
pub const TIER_FV_MARKER: &str = "tier-free-marker";
|
||||
pub const TIER_FV_ID: &str = "tier-free-versionID";
|
||||
pub const TIER_FV_MARKER: &str = "tier-free-marker";
|
||||
pub const TIER_SKIP_FV_ID: &str = "tier-skip-fvid";
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Clone, Default)]
|
||||
@@ -309,7 +309,8 @@ impl FileInfo {
|
||||
}
|
||||
|
||||
pub fn set_tier_free_version_id(&mut self, version_id: &str) {
|
||||
self.metadata.insert(format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, TIER_FV_ID), version_id.to_string());
|
||||
self.metadata
|
||||
.insert(format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, TIER_FV_ID), version_id.to_string());
|
||||
}
|
||||
|
||||
pub fn tier_free_version_id(&self) -> String {
|
||||
@@ -317,19 +318,23 @@ impl FileInfo {
|
||||
}
|
||||
|
||||
pub fn set_tier_free_version(&mut self) {
|
||||
self.metadata.insert(format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, TIER_FV_MARKER), "".to_string());
|
||||
self.metadata
|
||||
.insert(format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, TIER_FV_MARKER), "".to_string());
|
||||
}
|
||||
|
||||
pub fn set_skip_tier_free_version(&mut self) {
|
||||
self.metadata.insert(format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, TIER_SKIP_FV_ID), "".to_string());
|
||||
self.metadata
|
||||
.insert(format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, TIER_SKIP_FV_ID), "".to_string());
|
||||
}
|
||||
|
||||
pub fn skip_tier_free_version(&self) -> bool {
|
||||
self.metadata.contains_key(&format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, TIER_SKIP_FV_ID))
|
||||
self.metadata
|
||||
.contains_key(&format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, TIER_SKIP_FV_ID))
|
||||
}
|
||||
|
||||
pub fn tier_free_version(&self) -> bool {
|
||||
self.metadata.contains_key(&format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, TIER_FV_MARKER))
|
||||
self.metadata
|
||||
.contains_key(&format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, TIER_FV_MARKER))
|
||||
}
|
||||
|
||||
pub fn set_inline_data(&mut self) {
|
||||
|
||||
@@ -47,7 +47,7 @@ pub const TRANSITIONED_OBJECTNAME: &str = "transitioned-object";
|
||||
pub const TRANSITIONED_VERSION_ID: &str = "transitioned-versionID";
|
||||
pub const TRANSITION_TIER: &str = "transition-tier";
|
||||
|
||||
const X_AMZ_RESTORE_EXPIRY_DAYS: &str = "X-Amz-Restore-Expiry-Days";
|
||||
const X_AMZ_RESTORE_EXPIRY_DAYS: &str = "X-Amz-Restore-Expiry-Days";
|
||||
const X_AMZ_RESTORE_REQUEST_DATE: &str = "X-Amz-Restore-Request-Date";
|
||||
|
||||
// type ScanHeaderVersionFn = Box<dyn Fn(usize, &[u8], &[u8]) -> Result<()>>;
|
||||
@@ -486,11 +486,13 @@ impl FileMeta {
|
||||
return Err(Error::other("attempted to add invalid version"));
|
||||
}
|
||||
let encoded = ver.marshal_msg()?;
|
||||
|
||||
if self.versions.len()+1 > 100 {
|
||||
return Err(Error::other("You've exceeded the limit on the number of versions you can create on this object"));
|
||||
|
||||
if self.versions.len() + 1 > 100 {
|
||||
return Err(Error::other(
|
||||
"You've exceeded the limit on the number of versions you can create on this object",
|
||||
));
|
||||
}
|
||||
|
||||
|
||||
self.versions.push(FileMetaShallowVersion {
|
||||
header: FileMetaVersionHeader {
|
||||
mod_time: Some(OffsetDateTime::from_unix_timestamp(-1)?),
|
||||
@@ -498,15 +500,15 @@ impl FileMeta {
|
||||
},
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
|
||||
let len = self.versions.len();
|
||||
for (i, existing) in self.versions.iter().enumerate() {
|
||||
if existing.header.mod_time.unwrap().nanosecond() <= mod_time {
|
||||
let vers = self.versions[i..len-1].to_vec();
|
||||
self.versions[i+1..].clone_from_slice(vers.as_slice());
|
||||
let vers = self.versions[i..len - 1].to_vec();
|
||||
self.versions[i + 1..].clone_from_slice(vers.as_slice());
|
||||
self.versions[i] = FileMetaShallowVersion {
|
||||
header: ver.header(),
|
||||
meta: encoded,
|
||||
meta: encoded,
|
||||
};
|
||||
return Ok(());
|
||||
}
|
||||
@@ -564,7 +566,7 @@ impl FileMeta {
|
||||
ver.object.as_mut().unwrap().reset_inline_data();
|
||||
self.set_idx(i, ver.clone())?;
|
||||
} else {
|
||||
let vers = self.versions[i+1..].to_vec();
|
||||
let vers = self.versions[i + 1..].to_vec();
|
||||
self.versions.extend(vers.iter().cloned());
|
||||
let (free_version, to_free) = ver.object.as_ref().unwrap().init_free_version(fi);
|
||||
if to_free {
|
||||
@@ -1927,10 +1929,22 @@ impl MetaObject {
|
||||
}
|
||||
|
||||
pub fn set_transition(&mut self, fi: &FileInfo) {
|
||||
self.meta_sys.insert(format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, TRANSITION_STATUS), fi.transition_status.as_bytes().to_vec());
|
||||
self.meta_sys.insert(format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, TRANSITIONED_OBJECTNAME), fi.transitioned_objname.as_bytes().to_vec());
|
||||
self.meta_sys.insert(format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, TRANSITIONED_VERSION_ID), fi.transition_version_id.unwrap().as_bytes().to_vec());
|
||||
self.meta_sys.insert(format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, TRANSITION_TIER), fi.transition_tier.as_bytes().to_vec());
|
||||
self.meta_sys.insert(
|
||||
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, TRANSITION_STATUS),
|
||||
fi.transition_status.as_bytes().to_vec(),
|
||||
);
|
||||
self.meta_sys.insert(
|
||||
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, TRANSITIONED_OBJECTNAME),
|
||||
fi.transitioned_objname.as_bytes().to_vec(),
|
||||
);
|
||||
self.meta_sys.insert(
|
||||
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, TRANSITIONED_VERSION_ID),
|
||||
fi.transition_version_id.unwrap().as_bytes().to_vec(),
|
||||
);
|
||||
self.meta_sys.insert(
|
||||
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, TRANSITION_TIER),
|
||||
fi.transition_tier.as_bytes().to_vec(),
|
||||
);
|
||||
}
|
||||
|
||||
pub fn remove_restore_hdrs(&mut self) {
|
||||
@@ -1977,25 +1991,39 @@ impl MetaObject {
|
||||
if fi.skip_tier_free_version() {
|
||||
return (FileMetaVersion::default(), false);
|
||||
}
|
||||
if let Some(status) = self.meta_sys.get(&format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, TRANSITION_STATUS)) {
|
||||
if let Some(status) = self
|
||||
.meta_sys
|
||||
.get(&format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, TRANSITION_STATUS))
|
||||
{
|
||||
if *status == TRANSITION_COMPLETE.as_bytes().to_vec() {
|
||||
let vid = Uuid::parse_str(&fi.tier_free_version_id());
|
||||
if let Err(err) = vid {
|
||||
panic!("Invalid Tier Object delete marker versionId {} {}", fi.tier_free_version_id(), err.to_string());
|
||||
panic!(
|
||||
"Invalid Tier Object delete marker versionId {} {}",
|
||||
fi.tier_free_version_id(),
|
||||
err.to_string()
|
||||
);
|
||||
}
|
||||
let vid = vid.unwrap();
|
||||
let mut free_entry = FileMetaVersion {
|
||||
let mut free_entry = FileMetaVersion {
|
||||
version_type: VersionType::Delete,
|
||||
write_version: 0,
|
||||
..Default::default()
|
||||
};
|
||||
free_entry.delete_marker = Some(MetaDeleteMarker {
|
||||
version_id: Some(vid),
|
||||
mod_time: self.mod_time,
|
||||
meta_sys: Some(HashMap::<String, Vec<u8>>::new()),
|
||||
mod_time: self.mod_time,
|
||||
meta_sys: Some(HashMap::<String, Vec<u8>>::new()),
|
||||
});
|
||||
|
||||
free_entry.delete_marker.as_mut().unwrap().meta_sys.as_mut().unwrap().insert(format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, FREE_VERSION), vec![]);
|
||||
free_entry
|
||||
.delete_marker
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.meta_sys
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.insert(format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, FREE_VERSION), vec![]);
|
||||
let tier_key = format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, TRANSITION_TIER);
|
||||
let tier_obj_key = format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, TRANSITIONED_OBJECTNAME);
|
||||
let tier_obj_vid_key = format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, TRANSITIONED_VERSION_ID);
|
||||
@@ -2003,7 +2031,14 @@ impl MetaObject {
|
||||
let aa = [tier_key, tier_obj_key, tier_obj_vid_key];
|
||||
for (k, v) in &self.meta_sys {
|
||||
if aa.contains(&k) {
|
||||
free_entry.delete_marker.as_mut().unwrap().meta_sys.as_mut().unwrap().insert(k.clone(), v.clone());
|
||||
free_entry
|
||||
.delete_marker
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.meta_sys
|
||||
.as_mut()
|
||||
.unwrap()
|
||||
.insert(k.clone(), v.clone());
|
||||
}
|
||||
}
|
||||
return (free_entry, true);
|
||||
|
||||
@@ -122,7 +122,9 @@ pub fn get_endpoint_url(endpoint: &str, secure: bool) -> Result<Url, std::io::Er
|
||||
}
|
||||
|
||||
let endpoint_url_str = format!("{scheme}://{endpoint}");
|
||||
let Ok(endpoint_url) = Url::parse(&endpoint_url_str) else { return Err(std::io::Error::other("url parse error.")); };
|
||||
let Ok(endpoint_url) = Url::parse(&endpoint_url_str) else {
|
||||
return Err(std::io::Error::other("url parse error."));
|
||||
};
|
||||
|
||||
//is_valid_endpoint_url(endpoint_url)?;
|
||||
Ok(endpoint_url)
|
||||
@@ -137,42 +139,40 @@ pub fn new_remotetarget_http_transport(insecure: bool) -> Builder<TokioExecutor>
|
||||
lazy_static! {
|
||||
static ref SUPPORTED_QUERY_VALUES: HashMap<String, bool> = {
|
||||
let mut m = HashMap::new();
|
||||
m.insert("attributes".to_string(), true);
|
||||
m.insert("partNumber".to_string(), true);
|
||||
m.insert("versionId".to_string(), true);
|
||||
m.insert("response-cache-control".to_string(), true);
|
||||
m.insert("attributes".to_string(), true);
|
||||
m.insert("partNumber".to_string(), true);
|
||||
m.insert("versionId".to_string(), true);
|
||||
m.insert("response-cache-control".to_string(), true);
|
||||
m.insert("response-content-disposition".to_string(), true);
|
||||
m.insert("response-content-encoding".to_string(), true);
|
||||
m.insert("response-content-language".to_string(), true);
|
||||
m.insert("response-content-type".to_string(), true);
|
||||
m.insert("response-expires".to_string(), true);
|
||||
m.insert("response-content-encoding".to_string(), true);
|
||||
m.insert("response-content-language".to_string(), true);
|
||||
m.insert("response-content-type".to_string(), true);
|
||||
m.insert("response-expires".to_string(), true);
|
||||
m
|
||||
};
|
||||
|
||||
static ref SUPPORTED_HEADERS: HashMap<String, bool> = {
|
||||
let mut m = HashMap::new();
|
||||
m.insert("content-type".to_string(), true);
|
||||
m.insert("cache-control".to_string(), true);
|
||||
m.insert("content-encoding".to_string(), true);
|
||||
m.insert("content-disposition".to_string(), true);
|
||||
m.insert("content-language".to_string(), true);
|
||||
m.insert("x-amz-website-redirect-location".to_string(), true);
|
||||
m.insert("x-amz-object-lock-mode".to_string(), true);
|
||||
m.insert("x-amz-metadata-directive".to_string(), true);
|
||||
m.insert("content-type".to_string(), true);
|
||||
m.insert("cache-control".to_string(), true);
|
||||
m.insert("content-encoding".to_string(), true);
|
||||
m.insert("content-disposition".to_string(), true);
|
||||
m.insert("content-language".to_string(), true);
|
||||
m.insert("x-amz-website-redirect-location".to_string(), true);
|
||||
m.insert("x-amz-object-lock-mode".to_string(), true);
|
||||
m.insert("x-amz-metadata-directive".to_string(), true);
|
||||
m.insert("x-amz-object-lock-retain-until-date".to_string(), true);
|
||||
m.insert("expires".to_string(), true);
|
||||
m.insert("x-amz-replication-status".to_string(), true);
|
||||
m.insert("expires".to_string(), true);
|
||||
m.insert("x-amz-replication-status".to_string(), true);
|
||||
m
|
||||
};
|
||||
|
||||
static ref SSE_HEADERS: HashMap<String, bool> = {
|
||||
let mut m = HashMap::new();
|
||||
m.insert("x-amz-server-side-encryption".to_string(), true);
|
||||
m.insert("x-amz-server-side-encryption-aws-kms-key-id".to_string(), true);
|
||||
m.insert("x-amz-server-side-encryption-context".to_string(), true);
|
||||
m.insert("x-amz-server-side-encryption".to_string(), true);
|
||||
m.insert("x-amz-server-side-encryption-aws-kms-key-id".to_string(), true);
|
||||
m.insert("x-amz-server-side-encryption-context".to_string(), true);
|
||||
m.insert("x-amz-server-side-encryption-customer-algorithm".to_string(), true);
|
||||
m.insert("x-amz-server-side-encryption-customer-key".to_string(), true);
|
||||
m.insert("x-amz-server-side-encryption-customer-key-md5".to_string(), true);
|
||||
m.insert("x-amz-server-side-encryption-customer-key".to_string(), true);
|
||||
m.insert("x-amz-server-side-encryption-customer-key-md5".to_string(), true);
|
||||
m
|
||||
};
|
||||
}
|
||||
@@ -201,7 +201,11 @@ pub fn is_sse_header(header_key: &str) -> bool {
|
||||
|
||||
pub fn is_amz_header(header_key: &str) -> bool {
|
||||
let key = header_key.to_lowercase();
|
||||
key.starts_with("x-amz-meta-") || key.starts_with("x-amz-grant-") || key == "x-amz-acl" || is_sse_header(header_key) || key.starts_with("x-amz-checksum-")
|
||||
key.starts_with("x-amz-meta-")
|
||||
|| key.starts_with("x-amz-grant-")
|
||||
|| key == "x-amz-acl"
|
||||
|| is_sse_header(header_key)
|
||||
|| key.starts_with("x-amz-checksum-")
|
||||
}
|
||||
|
||||
pub fn is_rustfs_header(header_key: &str) -> bool {
|
||||
|
||||
@@ -1,8 +1,4 @@
|
||||
//use tokio_stream::Stream;
|
||||
use std::future::Future;
|
||||
use std::time::{Duration, Instant};
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use std::time::Duration;
|
||||
|
||||
// MaxRetry is the maximum number of retries before stopping.
|
||||
pub const MAX_RETRY: i64 = 10;
|
||||
|
||||
@@ -24,9 +24,6 @@ pub struct LcAuditEvent {
|
||||
|
||||
impl LcAuditEvent {
|
||||
pub fn new(event: lifecycle::Event, source: LcEventSrc) -> Self {
|
||||
Self {
|
||||
event,
|
||||
source,
|
||||
}
|
||||
Self { event, source }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,53 +1,49 @@
|
||||
use async_channel::{Receiver as A_Receiver, Sender as A_Sender, bounded};
|
||||
use futures::Future;
|
||||
use http::HeaderMap;
|
||||
use lazy_static::lazy_static;
|
||||
use s3s::Body;
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::any::{Any, TypeId};
|
||||
use std::collections::HashMap;
|
||||
use std::env;
|
||||
use std::io::{Cursor, Write};
|
||||
use std::pin::Pin;
|
||||
use std::sync::atomic::{AtomicI32, AtomicI64, Ordering};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use futures::Future;
|
||||
use lazy_static::lazy_static;
|
||||
use s3s::Body;
|
||||
use std::collections::HashMap;
|
||||
use tracing::{error, info, warn};
|
||||
use sha2::{Digest, Sha256};
|
||||
use xxhash_rust::xxh64;
|
||||
use uuid::Uuid;
|
||||
use http::HeaderMap;
|
||||
use tokio::select;
|
||||
use tokio::sync::mpsc::{Receiver, Sender};
|
||||
use tokio::sync::{mpsc, RwLock};
|
||||
use async_channel::{bounded, Receiver as A_Receiver, Sender as A_Sender};
|
||||
use tokio::sync::{RwLock, mpsc};
|
||||
use tracing::{error, info, warn};
|
||||
use uuid::Uuid;
|
||||
use xxhash_rust::xxh64;
|
||||
|
||||
use s3s::dto::BucketLifecycleConfiguration;
|
||||
use super::bucket_lifecycle_audit::{LcAuditEvent, LcEventSrc};
|
||||
use super::lifecycle::{self, ExpirationOptions, IlmAction, Lifecycle, TransitionOptions};
|
||||
use super::tier_last_day_stats::{DailyAllTierStats, LastDayTierStats};
|
||||
use super::tier_sweeper::{Jentry, delete_object_from_remote_tier};
|
||||
use crate::bucket::{metadata_sys::get_lifecycle_config, versioning_sys::BucketVersioningSys};
|
||||
use crate::client::object_api_utils::new_getobjectreader;
|
||||
use crate::error::Error;
|
||||
use crate::event::name::EventName;
|
||||
use crate::store::ECStore;
|
||||
use crate::store_api::StorageAPI;
|
||||
use crate::store_api::{ObjectInfo, ObjectOptions, ObjectToDelete, GetObjectReader, HTTPRangeSpec,};
|
||||
use crate::error::{error_resp_to_object_err, is_err_object_not_found, is_err_version_not_found, is_network_or_host_down};
|
||||
use crate::event::name::EventName;
|
||||
use crate::event_notification::{EventArgs, send_event};
|
||||
use crate::global::GLOBAL_LocalNodeName;
|
||||
use crate::global::{GLOBAL_LifecycleSys, GLOBAL_TierConfigMgr, get_global_deployment_id};
|
||||
use crate::client::object_api_utils::{new_getobjectreader,};
|
||||
use crate::event_notification::{send_event, EventArgs};
|
||||
use crate::heal::{
|
||||
data_scanner::{apply_expiry_on_non_transitioned_objects, apply_expiry_on_transitioned_object},
|
||||
data_scanner_metric::ScannerMetrics,
|
||||
data_scanner::{
|
||||
apply_expiry_on_transitioned_object, apply_expiry_on_non_transitioned_objects,
|
||||
},
|
||||
data_usage_cache::TierStats,
|
||||
};
|
||||
use crate::global::GLOBAL_LocalNodeName;
|
||||
use crate::bucket::{
|
||||
metadata_sys::get_lifecycle_config,
|
||||
versioning_sys::BucketVersioningSys,
|
||||
};
|
||||
use crate::store::ECStore;
|
||||
use crate::store_api::StorageAPI;
|
||||
use crate::store_api::{GetObjectReader, HTTPRangeSpec, ObjectInfo, ObjectOptions, ObjectToDelete};
|
||||
use crate::tier::warm_backend::WarmBackendGetOpts;
|
||||
use super::lifecycle::{self, ExpirationOptions, IlmAction, Lifecycle, TransitionOptions};
|
||||
use super::tier_last_day_stats::{LastDayTierStats, DailyAllTierStats};
|
||||
use super::tier_sweeper::{delete_object_from_remote_tier, Jentry};
|
||||
use super::bucket_lifecycle_audit::{LcEventSrc, LcAuditEvent};
|
||||
use s3s::dto::BucketLifecycleConfiguration;
|
||||
|
||||
pub type TimeFn = Arc<dyn Fn() -> Pin<Box<dyn Future<Output = ()> + Send>> + Send + Sync + 'static>;
|
||||
pub type TraceFn = Arc<dyn Fn(String, HashMap<String, String>) -> Pin<Box<dyn Future<Output = ()> + Send>> + Send + Sync + 'static>;
|
||||
pub type TraceFn =
|
||||
Arc<dyn Fn(String, HashMap<String, String>) -> Pin<Box<dyn Future<Output = ()> + Send>> + Send + Sync + 'static>;
|
||||
pub type ExpiryOpType = Box<dyn ExpiryOp + Send + Sync + 'static>;
|
||||
|
||||
static XXHASH_SEED: u64 = 0;
|
||||
@@ -74,8 +70,7 @@ impl LifecycleSys {
|
||||
Some(lc)
|
||||
}
|
||||
|
||||
pub fn trace(oi: &ObjectInfo) -> TraceFn
|
||||
{
|
||||
pub fn trace(oi: &ObjectInfo) -> TraceFn {
|
||||
todo!();
|
||||
}
|
||||
}
|
||||
@@ -132,11 +127,11 @@ pub trait ExpiryOp: 'static {
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct TransitionedObject {
|
||||
pub name: String,
|
||||
pub version_id: String,
|
||||
pub tier: String,
|
||||
pub name: String,
|
||||
pub version_id: String,
|
||||
pub tier: String,
|
||||
pub free_version: bool,
|
||||
pub status: String,
|
||||
pub status: String,
|
||||
}
|
||||
|
||||
struct FreeVersionTask(ObjectInfo);
|
||||
@@ -181,8 +176,6 @@ pub struct ExpiryState {
|
||||
stats: Option<ExpiryStats>,
|
||||
}
|
||||
|
||||
|
||||
|
||||
impl ExpiryState {
|
||||
#[allow(clippy::new_ret_no_self)]
|
||||
pub fn new() -> Arc<RwLock<Self>> {
|
||||
@@ -203,7 +196,7 @@ impl ExpiryState {
|
||||
if rxs.len() == 0 {
|
||||
return 0;
|
||||
}
|
||||
let mut tasks=0;
|
||||
let mut tasks = 0;
|
||||
for rx in rxs.iter() {
|
||||
tasks += rx.lock().await.len();
|
||||
}
|
||||
@@ -244,7 +237,11 @@ impl ExpiryState {
|
||||
}
|
||||
|
||||
pub async fn enqueue_by_days(&mut self, oi: &ObjectInfo, event: &lifecycle::Event, src: &LcEventSrc) {
|
||||
let task = ExpiryTask {obj_info: oi.clone(), event: event.clone(), src: src.clone()};
|
||||
let task = ExpiryTask {
|
||||
obj_info: oi.clone(),
|
||||
event: event.clone(),
|
||||
src: src.clone(),
|
||||
};
|
||||
let wrkr = self.get_worker_ch(task.op_hash());
|
||||
if wrkr.is_none() {
|
||||
*self.stats.as_mut().expect("err").missed_expiry_tasks.get_mut() += 1;
|
||||
@@ -265,7 +262,11 @@ impl ExpiryState {
|
||||
return;
|
||||
}
|
||||
|
||||
let task = NewerNoncurrentTask {bucket: String::from(bucket), versions: versions, event: lc_event};
|
||||
let task = NewerNoncurrentTask {
|
||||
bucket: String::from(bucket),
|
||||
versions: versions,
|
||||
event: lc_event,
|
||||
};
|
||||
let wrkr = self.get_worker_ch(task.op_hash());
|
||||
if wrkr.is_none() {
|
||||
*self.stats.as_mut().expect("err").missed_expiry_tasks.get_mut() += 1;
|
||||
@@ -285,7 +286,7 @@ impl ExpiryState {
|
||||
if self.tasks_tx.len() == 0 {
|
||||
return None;
|
||||
}
|
||||
Some(self.tasks_tx[h as usize %self.tasks_tx.len()].clone())
|
||||
Some(self.tasks_tx[h as usize % self.tasks_tx.len()].clone())
|
||||
}
|
||||
|
||||
pub async fn resize_workers(n: usize, api: Arc<ECStore>) {
|
||||
@@ -311,10 +312,10 @@ impl ExpiryState {
|
||||
|
||||
let mut l = state.tasks_tx.len();
|
||||
while l > n {
|
||||
let worker = state.tasks_tx[l-1].clone();
|
||||
let worker = state.tasks_tx[l - 1].clone();
|
||||
worker.send(None).await.unwrap_or(());
|
||||
state.tasks_tx.remove(l-1);
|
||||
state.tasks_rx.remove(l-1);
|
||||
state.tasks_tx.remove(l - 1);
|
||||
state.tasks_rx.remove(l - 1);
|
||||
*state.stats.as_mut().expect("err").workers.get_mut() -= 1;
|
||||
l -= 1;
|
||||
}
|
||||
@@ -357,7 +358,7 @@ impl ExpiryState {
|
||||
else if v.as_any().is::<FreeVersionTask>() {
|
||||
let v = v.as_any().downcast_ref::<FreeVersionTask>().expect("err!");
|
||||
let oi = v.0.clone();
|
||||
|
||||
|
||||
}
|
||||
else {
|
||||
//info!("Invalid work type - {:?}", v);
|
||||
@@ -422,7 +423,11 @@ impl TransitionState {
|
||||
}
|
||||
|
||||
pub async fn queue_transition_task(&self, oi: &ObjectInfo, event: &lifecycle::Event, src: &LcEventSrc) {
|
||||
let task = TransitionTask {obj_info: oi.clone(), src: src.clone(), event: event.clone()};
|
||||
let task = TransitionTask {
|
||||
obj_info: oi.clone(),
|
||||
src: src.clone(),
|
||||
event: event.clone(),
|
||||
};
|
||||
select! {
|
||||
//_ -> t.ctx.Done() => (),
|
||||
_ = self.transition_tx.send(Some(task)) => (),
|
||||
@@ -438,8 +443,8 @@ impl TransitionState {
|
||||
}
|
||||
|
||||
pub async fn init(api: Arc<ECStore>) {
|
||||
let mut n = 10;//globalAPIConfig.getTransitionWorkers();
|
||||
let tw = 10;//globalILMConfig.getTransitionWorkers();
|
||||
let mut n = 10; //globalAPIConfig.getTransitionWorkers();
|
||||
let tw = 10; //globalILMConfig.getTransitionWorkers();
|
||||
if tw > 0 {
|
||||
n = tw;
|
||||
}
|
||||
@@ -512,8 +517,10 @@ impl TransitionState {
|
||||
|
||||
pub fn add_lastday_stats(&self, tier: &str, ts: TierStats) {
|
||||
let mut tier_stats = self.last_day_stats.lock().unwrap();
|
||||
tier_stats.entry(tier.to_string()).and_modify(|e| e.add_stats(ts))
|
||||
.or_insert(LastDayTierStats::default());
|
||||
tier_stats
|
||||
.entry(tier.to_string())
|
||||
.and_modify(|e| e.add_stats(ts))
|
||||
.or_insert(LastDayTierStats::default());
|
||||
}
|
||||
|
||||
pub fn get_daily_all_tier_stats(&self) -> DailyAllTierStats {
|
||||
@@ -574,7 +581,10 @@ impl AuditTierOp {
|
||||
}
|
||||
|
||||
pub fn string(&self) -> String {
|
||||
format!("tier:{},respNS:{},tx:{},err:{}", self.tier, self.time_to_responsens, self.output_bytes, self.error)
|
||||
format!(
|
||||
"tier:{},respNS:{},tx:{},err:{}",
|
||||
self.tier, self.time_to_responsens, self.output_bytes, self.error
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -636,16 +646,21 @@ pub async fn enqueue_transition_immediate(oi: &ObjectInfo, src: LcEventSrc) {
|
||||
}
|
||||
GLOBAL_TransitionState.queue_transition_task(oi, &event, &src).await;
|
||||
}
|
||||
_ => ()
|
||||
_ => (),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn expire_transitioned_object(api: Arc<ECStore>, oi: &ObjectInfo, lc_event: &lifecycle::Event, src: &LcEventSrc) -> Result<ObjectInfo, std::io::Error> {
|
||||
pub async fn expire_transitioned_object(
|
||||
api: Arc<ECStore>,
|
||||
oi: &ObjectInfo,
|
||||
lc_event: &lifecycle::Event,
|
||||
src: &LcEventSrc,
|
||||
) -> Result<ObjectInfo, std::io::Error> {
|
||||
//let traceFn = GLOBAL_LifecycleSys.trace(oi);
|
||||
let mut opts = ObjectOptions {
|
||||
versioned: BucketVersioningSys::prefix_enabled(&oi.bucket, &oi.name).await,
|
||||
expiration: ExpirationOptions {expire: true},
|
||||
versioned: BucketVersioningSys::prefix_enabled(&oi.bucket, &oi.name).await,
|
||||
expiration: ExpirationOptions { expire: true },
|
||||
..Default::default()
|
||||
};
|
||||
if lc_event.action == IlmAction::DeleteVersionAction {
|
||||
@@ -660,10 +675,15 @@ pub async fn expire_transitioned_object(api: Arc<ECStore>, oi: &ObjectInfo, lc_e
|
||||
return Ok(dobj);
|
||||
}
|
||||
Err(err) => return Err(std::io::Error::other(err)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let ret = delete_object_from_remote_tier(&oi.transitioned_object.name, &oi.transitioned_object.version_id, &oi.transitioned_object.tier).await;
|
||||
let ret = delete_object_from_remote_tier(
|
||||
&oi.transitioned_object.name,
|
||||
&oi.transitioned_object.version_id,
|
||||
&oi.transitioned_object.tier,
|
||||
)
|
||||
.await;
|
||||
if ret.is_ok() {
|
||||
opts.skip_decommissioned = true;
|
||||
} else {
|
||||
@@ -679,8 +699,8 @@ pub async fn expire_transitioned_object(api: Arc<ECStore>, oi: &ObjectInfo, lc_e
|
||||
event_name = EventName::ObjectRemovedDeleteMarkerCreated;
|
||||
}
|
||||
let obj_info = ObjectInfo {
|
||||
name: oi.name.clone(),
|
||||
version_id: oi.version_id,
|
||||
name: oi.name.clone(),
|
||||
version_id: oi.version_id,
|
||||
delete_marker: oi.delete_marker,
|
||||
..Default::default()
|
||||
};
|
||||
@@ -712,15 +732,15 @@ pub async fn transition_object(api: Arc<ECStore>, oi: &ObjectInfo, lae: LcAuditE
|
||||
let opts = ObjectOptions {
|
||||
transition: TransitionOptions {
|
||||
status: lifecycle::TRANSITION_PENDING.to_string(),
|
||||
tier: lae.event.storage_class,
|
||||
etag: oi.etag.clone().expect("err").to_string(),
|
||||
tier: lae.event.storage_class,
|
||||
etag: oi.etag.clone().expect("err").to_string(),
|
||||
..Default::default()
|
||||
},
|
||||
//lifecycle_audit_event: lae,
|
||||
version_id: Some(oi.version_id.expect("err").to_string()),
|
||||
versioned: BucketVersioningSys::prefix_enabled(&oi.bucket, &oi.name).await,
|
||||
version_suspended: BucketVersioningSys::prefix_suspended(&oi.bucket, &oi.name).await,
|
||||
mod_time: oi.mod_time,
|
||||
version_id: Some(oi.version_id.expect("err").to_string()),
|
||||
versioned: BucketVersioningSys::prefix_enabled(&oi.bucket, &oi.name).await,
|
||||
version_suspended: BucketVersioningSys::prefix_suspended(&oi.bucket, &oi.name).await,
|
||||
mod_time: oi.mod_time,
|
||||
..Default::default()
|
||||
};
|
||||
time_ilm(1);
|
||||
@@ -731,7 +751,14 @@ pub fn audit_tier_actions(api: ECStore, tier: &str, bytes: i64) -> TimeFn {
|
||||
todo!();
|
||||
}
|
||||
|
||||
pub async fn get_transitioned_object_reader(bucket: &str, object: &str, rs: HTTPRangeSpec, h: HeaderMap, oi: ObjectInfo, opts: &ObjectOptions) -> Result<GetObjectReader, std::io::Error> {
|
||||
pub async fn get_transitioned_object_reader(
|
||||
bucket: &str,
|
||||
object: &str,
|
||||
rs: HTTPRangeSpec,
|
||||
h: HeaderMap,
|
||||
oi: ObjectInfo,
|
||||
opts: &ObjectOptions,
|
||||
) -> Result<GetObjectReader, std::io::Error> {
|
||||
let mut tier_config_mgr = GLOBAL_TierConfigMgr.write().await;
|
||||
let tgt_client = match tier_config_mgr.get_driver(&oi.transitioned_object.tier).await {
|
||||
Ok(d) => d,
|
||||
@@ -752,7 +779,9 @@ pub async fn get_transitioned_object_reader(bucket: &str, object: &str, rs: HTTP
|
||||
|
||||
//return Ok(HttpFileReader::new(rs, &oi, opts, &h));
|
||||
//timeTierAction := auditTierActions(oi.transitioned_object.Tier, length)
|
||||
let reader = tgt_client.get(&oi.transitioned_object.name, &oi.transitioned_object.version_id, gopts).await?;
|
||||
let reader = tgt_client
|
||||
.get(&oi.transitioned_object.name, &oi.transitioned_object.version_id, gopts)
|
||||
.await?;
|
||||
Ok(get_fn(reader, h))
|
||||
}
|
||||
|
||||
@@ -771,18 +800,18 @@ pub trait LifecycleOps {
|
||||
impl LifecycleOps for ObjectInfo {
|
||||
fn to_lifecycle_opts(&self) -> lifecycle::ObjectOpts {
|
||||
lifecycle::ObjectOpts {
|
||||
name: self.name.clone(),
|
||||
user_tags: self.user_tags.clone(),
|
||||
version_id: self.version_id.expect("err").to_string(),
|
||||
mod_time: self.mod_time,
|
||||
size: self.size,
|
||||
is_latest: self.is_latest,
|
||||
num_versions: self.num_versions,
|
||||
delete_marker: self.delete_marker,
|
||||
name: self.name.clone(),
|
||||
user_tags: self.user_tags.clone(),
|
||||
version_id: self.version_id.expect("err").to_string(),
|
||||
mod_time: self.mod_time,
|
||||
size: self.size,
|
||||
is_latest: self.is_latest,
|
||||
num_versions: self.num_versions,
|
||||
delete_marker: self.delete_marker,
|
||||
successor_mod_time: self.successor_mod_time,
|
||||
//restore_ongoing: self.restore_ongoing,
|
||||
//restore_expires: self.restore_expires,
|
||||
transition_status: self.transitioned_object.status.clone(),
|
||||
transition_status: self.transitioned_object.status.clone(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
@@ -790,9 +819,9 @@ impl LifecycleOps for ObjectInfo {
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct S3Location {
|
||||
pub bucketname: String,
|
||||
pub bucketname: String,
|
||||
//pub encryption: Encryption,
|
||||
pub prefix: String,
|
||||
pub prefix: String,
|
||||
pub storage_class: String,
|
||||
//pub tagging: Tags,
|
||||
pub user_metadata: HashMap<String, String>,
|
||||
@@ -803,13 +832,12 @@ pub struct OutputLocation(pub S3Location);
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct RestoreObjectRequest {
|
||||
pub days: i64,
|
||||
pub ror_type: String,
|
||||
pub tier: String,
|
||||
pub description: String,
|
||||
pub days: i64,
|
||||
pub ror_type: String,
|
||||
pub tier: String,
|
||||
pub description: String,
|
||||
//pub select_parameters: SelectParameters,
|
||||
pub output_location: OutputLocation,
|
||||
pub output_location: OutputLocation,
|
||||
}
|
||||
|
||||
const MAX_RESTORE_OBJECT_REQUEST_SIZE: i64 = 2 << 20;
|
||||
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
use s3s::dto::{
|
||||
BucketLifecycleConfiguration, ExpirationStatus, LifecycleExpiration, LifecycleRule, NoncurrentVersionTransition,
|
||||
ObjectLockConfiguration, ObjectLockEnabled, Transition,
|
||||
};
|
||||
use std::cmp::Ordering;
|
||||
use std::env;
|
||||
use std::fmt::Display;
|
||||
use s3s::dto::{
|
||||
BucketLifecycleConfiguration, ExpirationStatus, LifecycleRule, ObjectLockConfiguration,
|
||||
ObjectLockEnabled, LifecycleExpiration, Transition, NoncurrentVersionTransition,
|
||||
};
|
||||
use time::macros::{datetime, offset};
|
||||
use time::{self, OffsetDateTime, Duration};
|
||||
use time::{self, Duration, OffsetDateTime};
|
||||
|
||||
use crate::bucket::lifecycle::rule::TransitionOps;
|
||||
|
||||
@@ -16,10 +16,11 @@ pub const TRANSITION_COMPLETE: &str = "complete";
|
||||
pub const TRANSITION_PENDING: &str = "pending";
|
||||
|
||||
const ERR_LIFECYCLE_TOO_MANY_RULES: &str = "Lifecycle configuration allows a maximum of 1000 rules";
|
||||
const ERR_LIFECYCLE_NO_RULE: &str = "Lifecycle configuration should have at least one rule";
|
||||
const ERR_LIFECYCLE_DUPLICATE_ID: &str = "Rule ID must be unique. Found same ID for more than one rule";
|
||||
const ERR_XML_NOT_WELL_FORMED: &str = "The XML you provided was not well-formed or did not validate against our published schema";
|
||||
const ERR_LIFECYCLE_BUCKET_LOCKED: &str = "ExpiredObjectAllVersions element and DelMarkerExpiration action cannot be used on an object locked bucket";
|
||||
const ERR_LIFECYCLE_NO_RULE: &str = "Lifecycle configuration should have at least one rule";
|
||||
const ERR_LIFECYCLE_DUPLICATE_ID: &str = "Rule ID must be unique. Found same ID for more than one rule";
|
||||
const ERR_XML_NOT_WELL_FORMED: &str = "The XML you provided was not well-formed or did not validate against our published schema";
|
||||
const ERR_LIFECYCLE_BUCKET_LOCKED: &str =
|
||||
"ExpiredObjectAllVersions element and DelMarkerExpiration action cannot be used on an object locked bucket";
|
||||
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum IlmAction {
|
||||
@@ -52,7 +53,10 @@ impl IlmAction {
|
||||
if self.delete_restored() {
|
||||
return true;
|
||||
}
|
||||
*self == Self::DeleteVersionAction || *self == Self::DeleteAction || *self == Self::DeleteAllVersionsAction || *self == Self::DelMarkerDeleteAllVersionsAction
|
||||
*self == Self::DeleteVersionAction
|
||||
|| *self == Self::DeleteAction
|
||||
|| *self == Self::DeleteAllVersionsAction
|
||||
|| *self == Self::DelMarkerDeleteAllVersionsAction
|
||||
}
|
||||
}
|
||||
|
||||
@@ -204,7 +208,10 @@ impl Lifecycle for BucketLifecycleConfiguration {
|
||||
return true;
|
||||
}
|
||||
let rule_expiration = rule.expiration.as_ref().expect("err!");
|
||||
if !rule_expiration.date.is_none() && OffsetDateTime::from(rule_expiration.date.clone().expect("err!")).unix_timestamp() < OffsetDateTime::now_utc().unix_timestamp() {
|
||||
if !rule_expiration.date.is_none()
|
||||
&& OffsetDateTime::from(rule_expiration.date.clone().expect("err!")).unix_timestamp()
|
||||
< OffsetDateTime::now_utc().unix_timestamp()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
if !rule_expiration.date.is_none() {
|
||||
@@ -213,9 +220,12 @@ impl Lifecycle for BucketLifecycleConfiguration {
|
||||
if rule_expiration.expired_object_delete_marker.expect("err!") {
|
||||
return true;
|
||||
}
|
||||
let rule_transitions: &[Transition]= &rule.transitions.as_ref().expect("err!");
|
||||
let rule_transitions: &[Transition] = &rule.transitions.as_ref().expect("err!");
|
||||
let rule_transitions_0 = rule_transitions[0].clone();
|
||||
if !rule_transitions_0.date.is_none() && OffsetDateTime::from(rule_transitions_0.date.expect("err!")).unix_timestamp() < OffsetDateTime::now_utc().unix_timestamp() {
|
||||
if !rule_transitions_0.date.is_none()
|
||||
&& OffsetDateTime::from(rule_transitions_0.date.expect("err!")).unix_timestamp()
|
||||
< OffsetDateTime::now_utc().unix_timestamp()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
if !rule.transitions.is_none() {
|
||||
@@ -242,18 +252,18 @@ impl Lifecycle for BucketLifecycleConfiguration {
|
||||
return Err(std::io::Error::other(ERR_LIFECYCLE_BUCKET_LOCKED));
|
||||
}
|
||||
} /*else {
|
||||
if object_lock_enabled.as_str() == ObjectLockEnabled::ENABLED {
|
||||
return Err(Error::msg(ERR_LIFECYCLE_BUCKET_LOCKED));
|
||||
}
|
||||
if object_lock_enabled.as_str() == ObjectLockEnabled::ENABLED {
|
||||
return Err(Error::msg(ERR_LIFECYCLE_BUCKET_LOCKED));
|
||||
}
|
||||
}*/
|
||||
}
|
||||
}
|
||||
}
|
||||
for (i,_) in self.rules.iter().enumerate() {
|
||||
if i == self.rules.len()-1 {
|
||||
for (i, _) in self.rules.iter().enumerate() {
|
||||
if i == self.rules.len() - 1 {
|
||||
break;
|
||||
}
|
||||
let other_rules = &self.rules[i+1..];
|
||||
let other_rules = &self.rules[i + 1..];
|
||||
for other_rule in other_rules {
|
||||
if self.rules[i].id == other_rule.id {
|
||||
return Err(std::io::Error::other(ERR_LIFECYCLE_DUPLICATE_ID));
|
||||
@@ -281,7 +291,7 @@ impl Lifecycle for BucketLifecycleConfiguration {
|
||||
continue;
|
||||
}*/
|
||||
//if !obj.delete_marker && !rule.filter.BySize(obj.size) {
|
||||
if !obj.delete_marker && false{
|
||||
if !obj.delete_marker && false {
|
||||
continue;
|
||||
}
|
||||
rules.push(rule.clone());
|
||||
@@ -306,9 +316,9 @@ impl Lifecycle for BucketLifecycleConfiguration {
|
||||
action = IlmAction::DeleteRestoredVersionAction;
|
||||
}
|
||||
|
||||
events.push(Event{
|
||||
events.push(Event {
|
||||
action: action,
|
||||
due: Some(now),
|
||||
due: Some(now),
|
||||
rule_id: "".into(),
|
||||
noncurrent_days: 0,
|
||||
newer_noncurrent_versions: 0,
|
||||
@@ -322,10 +332,10 @@ impl Lifecycle for BucketLifecycleConfiguration {
|
||||
if obj.expired_object_deletemarker() {
|
||||
if let Some(expiration) = rule.expiration.as_ref() {
|
||||
if let Some(expired_object_delete_marker) = expiration.expired_object_delete_marker {
|
||||
events.push(Event{
|
||||
action: IlmAction::DeleteVersionAction,
|
||||
events.push(Event {
|
||||
action: IlmAction::DeleteVersionAction,
|
||||
rule_id: rule.id.clone().expect("err!"),
|
||||
due: Some(now),
|
||||
due: Some(now),
|
||||
noncurrent_days: 0,
|
||||
newer_noncurrent_versions: 0,
|
||||
storage_class: "".into(),
|
||||
@@ -336,12 +346,12 @@ impl Lifecycle for BucketLifecycleConfiguration {
|
||||
|
||||
if let Some(expiration) = rule.expiration.as_ref() {
|
||||
if let Some(days) = expiration.days {
|
||||
let expected_expiry = expected_expiry_time(obj.mod_time.expect("err!"), days/*, date*/);
|
||||
let expected_expiry = expected_expiry_time(obj.mod_time.expect("err!"), days /*, date*/);
|
||||
if now.unix_timestamp() == 0 || now.unix_timestamp() > expected_expiry.unix_timestamp() {
|
||||
events.push(Event{
|
||||
action: IlmAction::DeleteVersionAction,
|
||||
events.push(Event {
|
||||
action: IlmAction::DeleteVersionAction,
|
||||
rule_id: rule.id.clone().expect("err!"),
|
||||
due: Some(expected_expiry),
|
||||
due: Some(expected_expiry),
|
||||
noncurrent_days: 0,
|
||||
newer_noncurrent_versions: 0,
|
||||
storage_class: "".into(),
|
||||
@@ -359,10 +369,10 @@ impl Lifecycle for BucketLifecycleConfiguration {
|
||||
let due = expiration.next_due(obj);
|
||||
if let Some(due) = due {
|
||||
if now.unix_timestamp() == 0 || now.unix_timestamp() > due.unix_timestamp() {
|
||||
events.push(Event{
|
||||
events.push(Event {
|
||||
action: IlmAction::DelMarkerDeleteAllVersionsAction,
|
||||
rule_id: rule.id.clone().expect("err!"),
|
||||
due: Some(due),
|
||||
due: Some(due),
|
||||
noncurrent_days: 0,
|
||||
newer_noncurrent_versions: 0,
|
||||
storage_class: "".into(),
|
||||
@@ -392,10 +402,10 @@ impl Lifecycle for BucketLifecycleConfiguration {
|
||||
if let Some(successor_mod_time) = obj.successor_mod_time {
|
||||
let expected_expiry = expected_expiry_time(successor_mod_time, noncurrent_days);
|
||||
if now.unix_timestamp() == 0 || now.unix_timestamp() > expected_expiry.unix_timestamp() {
|
||||
events.push(Event{
|
||||
action: IlmAction::DeleteVersionAction,
|
||||
events.push(Event {
|
||||
action: IlmAction::DeleteVersionAction,
|
||||
rule_id: rule.id.clone().expect("err!"),
|
||||
due: Some(expected_expiry),
|
||||
due: Some(expected_expiry),
|
||||
noncurrent_days: 0,
|
||||
newer_noncurrent_versions: 0,
|
||||
storage_class: "".into(),
|
||||
@@ -413,12 +423,19 @@ impl Lifecycle for BucketLifecycleConfiguration {
|
||||
if storage_class.as_str() != "" {
|
||||
if !obj.delete_marker && obj.transition_status != TRANSITION_COMPLETE {
|
||||
let due = rule.noncurrent_version_transitions.as_ref().unwrap()[0].next_due(obj);
|
||||
if due.is_some() && (now.unix_timestamp() == 0 || now.unix_timestamp() > due.unwrap().unix_timestamp()) {
|
||||
if due.is_some()
|
||||
&& (now.unix_timestamp() == 0 || now.unix_timestamp() > due.unwrap().unix_timestamp())
|
||||
{
|
||||
events.push(Event {
|
||||
action: IlmAction::TransitionVersionAction,
|
||||
rule_id: rule.id.clone().expect("err!"),
|
||||
action: IlmAction::TransitionVersionAction,
|
||||
rule_id: rule.id.clone().expect("err!"),
|
||||
due,
|
||||
storage_class: rule.noncurrent_version_transitions.as_ref().unwrap()[0].storage_class.clone().unwrap().as_str().to_string(),
|
||||
storage_class: rule.noncurrent_version_transitions.as_ref().unwrap()[0]
|
||||
.storage_class
|
||||
.clone()
|
||||
.unwrap()
|
||||
.as_str()
|
||||
.to_string(),
|
||||
..Default::default()
|
||||
});
|
||||
}
|
||||
@@ -434,10 +451,10 @@ impl Lifecycle for BucketLifecycleConfiguration {
|
||||
let date0 = OffsetDateTime::from(date.clone());
|
||||
if date0.unix_timestamp() != 0 {
|
||||
if now.unix_timestamp() == 0 || now.unix_timestamp() > date0.unix_timestamp() {
|
||||
events.push(Event{
|
||||
action: IlmAction::DeleteAction,
|
||||
events.push(Event {
|
||||
action: IlmAction::DeleteAction,
|
||||
rule_id: rule.id.clone().expect("err!"),
|
||||
due: Some(date0),
|
||||
due: Some(date0),
|
||||
noncurrent_days: 0,
|
||||
newer_noncurrent_versions: 0,
|
||||
storage_class: "".into(),
|
||||
@@ -448,10 +465,10 @@ impl Lifecycle for BucketLifecycleConfiguration {
|
||||
if days != 0 {
|
||||
let expected_expiry: OffsetDateTime = expected_expiry_time(obj.mod_time.expect("err!"), days);
|
||||
if now.unix_timestamp() == 0 || now.unix_timestamp() > expected_expiry.unix_timestamp() {
|
||||
let mut event = Event{
|
||||
action: IlmAction::DeleteAction,
|
||||
let mut event = Event {
|
||||
action: IlmAction::DeleteAction,
|
||||
rule_id: rule.id.clone().expect("err!"),
|
||||
due: Some(expected_expiry),
|
||||
due: Some(expected_expiry),
|
||||
noncurrent_days: 0,
|
||||
newer_noncurrent_versions: 0,
|
||||
storage_class: "".into(),
|
||||
@@ -469,10 +486,12 @@ impl Lifecycle for BucketLifecycleConfiguration {
|
||||
if let Some(ref transitions) = rule.transitions {
|
||||
let due = transitions[0].next_due(obj);
|
||||
if let Some(due) = due {
|
||||
if due.unix_timestamp() > 0 && (now.unix_timestamp() == 0 || now.unix_timestamp() > due.unix_timestamp()) {
|
||||
events.push(Event{
|
||||
action: IlmAction::TransitionAction,
|
||||
rule_id: rule.id.clone().expect("err!"),
|
||||
if due.unix_timestamp() > 0
|
||||
&& (now.unix_timestamp() == 0 || now.unix_timestamp() > due.unix_timestamp())
|
||||
{
|
||||
events.push(Event {
|
||||
action: IlmAction::TransitionAction,
|
||||
rule_id: rule.id.clone().expect("err!"),
|
||||
due: Some(due),
|
||||
storage_class: transitions[0].storage_class.clone().expect("err!").as_str().to_string(),
|
||||
noncurrent_days: 0,
|
||||
@@ -488,20 +507,27 @@ impl Lifecycle for BucketLifecycleConfiguration {
|
||||
|
||||
if events.len() > 0 {
|
||||
events.sort_by(|a, b| {
|
||||
if now.unix_timestamp() > a.due.expect("err!").unix_timestamp() && now.unix_timestamp() > b.due.expect("err").unix_timestamp() || a.due.expect("err").unix_timestamp() == b.due.expect("err").unix_timestamp() {
|
||||
if now.unix_timestamp() > a.due.expect("err!").unix_timestamp()
|
||||
&& now.unix_timestamp() > b.due.expect("err").unix_timestamp()
|
||||
|| a.due.expect("err").unix_timestamp() == b.due.expect("err").unix_timestamp()
|
||||
{
|
||||
match a.action {
|
||||
IlmAction::DeleteAllVersionsAction | IlmAction::DelMarkerDeleteAllVersionsAction
|
||||
| IlmAction::DeleteAction | IlmAction::DeleteVersionAction => {
|
||||
IlmAction::DeleteAllVersionsAction
|
||||
| IlmAction::DelMarkerDeleteAllVersionsAction
|
||||
| IlmAction::DeleteAction
|
||||
| IlmAction::DeleteVersionAction => {
|
||||
return Ordering::Less;
|
||||
}
|
||||
_ => ()
|
||||
_ => (),
|
||||
}
|
||||
match b.action {
|
||||
IlmAction::DeleteAllVersionsAction | IlmAction::DelMarkerDeleteAllVersionsAction
|
||||
| IlmAction::DeleteAction | IlmAction::DeleteVersionAction => {
|
||||
IlmAction::DeleteAllVersionsAction
|
||||
| IlmAction::DelMarkerDeleteAllVersionsAction
|
||||
| IlmAction::DeleteAction
|
||||
| IlmAction::DeleteVersionAction => {
|
||||
return Ordering::Greater;
|
||||
}
|
||||
_ => ()
|
||||
_ => (),
|
||||
}
|
||||
return Ordering::Less;
|
||||
}
|
||||
@@ -526,18 +552,18 @@ impl Lifecycle for BucketLifecycleConfiguration {
|
||||
continue;
|
||||
}
|
||||
return Event {
|
||||
action: IlmAction::DeleteVersionAction,
|
||||
rule_id: rule.id.clone().expect("err"),
|
||||
noncurrent_days: noncurrent_version_expiration.noncurrent_days.expect("noncurrent_days err.") as u32,
|
||||
action: IlmAction::DeleteVersionAction,
|
||||
rule_id: rule.id.clone().expect("err"),
|
||||
noncurrent_days: noncurrent_version_expiration.noncurrent_days.expect("noncurrent_days err.") as u32,
|
||||
newer_noncurrent_versions: newer_noncurrent_versions as usize,
|
||||
due: Some(OffsetDateTime::UNIX_EPOCH),
|
||||
storage_class: "".into(),
|
||||
};
|
||||
} else {
|
||||
return Event {
|
||||
action: IlmAction::DeleteVersionAction,
|
||||
rule_id: rule.id.clone().expect("err"),
|
||||
noncurrent_days: noncurrent_version_expiration.noncurrent_days.expect("noncurrent_days err.") as u32,
|
||||
action: IlmAction::DeleteVersionAction,
|
||||
rule_id: rule.id.clone().expect("err"),
|
||||
noncurrent_days: noncurrent_version_expiration.noncurrent_days.expect("noncurrent_days err.") as u32,
|
||||
newer_noncurrent_versions: 0,
|
||||
due: Some(OffsetDateTime::UNIX_EPOCH),
|
||||
storage_class: "".into(),
|
||||
@@ -601,7 +627,9 @@ pub fn expected_expiry_time(mod_time: OffsetDateTime, days: i32) -> OffsetDateTi
|
||||
if days == 0 {
|
||||
return mod_time;
|
||||
}
|
||||
let t = mod_time.to_offset(offset!(-0:00:00)).saturating_add(Duration::days(0/*days as i64*/)); //debug
|
||||
let t = mod_time
|
||||
.to_offset(offset!(-0:00:00))
|
||||
.saturating_add(Duration::days(0 /*days as i64*/)); //debug
|
||||
let mut hour = 3600;
|
||||
if let Ok(env_ilm_hour) = env::var("_RUSTFS_ILM_HOUR") {
|
||||
if let Ok(num_hour) = env_ilm_hour.parse::<usize>() {
|
||||
@@ -661,7 +689,7 @@ impl Default for Event {
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct ExpirationOptions {
|
||||
pub expire: bool
|
||||
pub expire: bool,
|
||||
}
|
||||
|
||||
impl ExpirationOptions {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
pub mod rule;
|
||||
pub mod lifecycle;
|
||||
pub mod tier_sweeper;
|
||||
pub mod tier_last_day_stats;
|
||||
pub mod bucket_lifecycle_audit;
|
||||
pub mod bucket_lifecycle_ops;
|
||||
pub mod bucket_lifecycle_audit;
|
||||
pub mod lifecycle;
|
||||
pub mod rule;
|
||||
pub mod tier_last_day_stats;
|
||||
pub mod tier_sweeper;
|
||||
|
||||
@@ -1,10 +1,9 @@
|
||||
use s3s::dto::{
|
||||
LifecycleRuleFilter, Transition,
|
||||
};
|
||||
use s3s::dto::{LifecycleRuleFilter, Transition};
|
||||
|
||||
const ERR_TRANSITION_INVALID_DAYS: &str = "Days must be 0 or greater when used with Transition";
|
||||
const ERR_TRANSITION_INVALID_DATE: &str = "Date must be provided in ISO 8601 format";
|
||||
const ERR_TRANSITION_INVALID: &str = "Exactly one of Days (0 or greater) or Date (positive ISO 8601 format) should be present in Transition.";
|
||||
const ERR_TRANSITION_INVALID_DAYS: &str = "Days must be 0 or greater when used with Transition";
|
||||
const ERR_TRANSITION_INVALID_DATE: &str = "Date must be provided in ISO 8601 format";
|
||||
const ERR_TRANSITION_INVALID: &str =
|
||||
"Exactly one of Days (0 or greater) or Date (positive ISO 8601 format) should be present in Transition.";
|
||||
const ERR_TRANSITION_DATE_NOT_MIDNIGHT: &str = "'Date' must be at midnight GMT";
|
||||
|
||||
pub trait Filter {
|
||||
@@ -39,7 +38,6 @@ impl TransitionOps for Transition {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use super::*;
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
use sha2::Sha256;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::ops::Sub;
|
||||
use time::OffsetDateTime;
|
||||
use tracing::{error, warn};
|
||||
use std::ops::Sub;
|
||||
|
||||
use crate::heal::data_usage_cache::TierStats;
|
||||
|
||||
@@ -79,8 +79,5 @@ impl LastDayTierStats {
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
|
||||
}
|
||||
mod test {}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
use sha2::{Digest, Sha256};
|
||||
use xxhash_rust::xxh64;
|
||||
use std::any::Any;
|
||||
use std::io::{Cursor, Write};
|
||||
use xxhash_rust::xxh64;
|
||||
|
||||
use crate::global::GLOBAL_TierConfigMgr;
|
||||
use super::bucket_lifecycle_ops::{ExpiryOp, GLOBAL_ExpiryState, TransitionedObject};
|
||||
use super::lifecycle::{self, ObjectOpts};
|
||||
use crate::global::GLOBAL_TierConfigMgr;
|
||||
|
||||
static XXHASH_SEED: u64 = 0;
|
||||
|
||||
@@ -44,9 +44,9 @@ impl ObjSweeper {
|
||||
}
|
||||
|
||||
pub fn get_opts(&self) -> lifecycle::ObjectOpts {
|
||||
let mut opts = ObjectOpts{
|
||||
version_id: self.version_id.clone(),
|
||||
versioned: self.versioned,
|
||||
let mut opts = ObjectOpts {
|
||||
version_id: self.version_id.clone(),
|
||||
versioned: self.versioned,
|
||||
version_suspended: self.suspended,
|
||||
..Default::default()
|
||||
};
|
||||
@@ -69,16 +69,18 @@ impl ObjSweeper {
|
||||
}
|
||||
|
||||
let mut del_tier = false;
|
||||
if !self.versioned || self.suspended { // 1, 2.a, 2.b
|
||||
if !self.versioned || self.suspended {
|
||||
// 1, 2.a, 2.b
|
||||
del_tier = true;
|
||||
} else if self.versioned && self.version_id != "" { // 3.a
|
||||
} else if self.versioned && self.version_id != "" {
|
||||
// 3.a
|
||||
del_tier = true;
|
||||
}
|
||||
if del_tier {
|
||||
return Some(Jentry {
|
||||
obj_name: self.remote_object.clone(),
|
||||
obj_name: self.remote_object.clone(),
|
||||
version_id: self.transition_version_id.clone(),
|
||||
tier_name: self.transition_tier.clone(),
|
||||
tier_name: self.transition_tier.clone(),
|
||||
});
|
||||
}
|
||||
None
|
||||
@@ -123,8 +125,5 @@ pub async fn delete_object_from_remote_tier(obj_name: &str, rv_id: &str, tier_na
|
||||
w.remove(obj_name, rv_id).await
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
|
||||
}
|
||||
mod test {}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
pub mod error;
|
||||
pub mod lifecycle;
|
||||
pub mod metadata;
|
||||
pub mod metadata_sys;
|
||||
pub mod object_lock;
|
||||
@@ -10,4 +11,3 @@ pub mod target;
|
||||
pub mod utils;
|
||||
pub mod versioning;
|
||||
pub mod versioning_sys;
|
||||
pub mod lifecycle;
|
||||
@@ -3,13 +3,8 @@ use std::collections::HashMap;
|
||||
use time::{OffsetDateTime, format_description};
|
||||
use tracing::{error, warn};
|
||||
|
||||
use s3s::dto::{
|
||||
ObjectLockRetentionMode, ObjectLockRetention, ObjectLockLegalHoldStatus, ObjectLockLegalHold,
|
||||
Date,
|
||||
};
|
||||
use s3s::header::{
|
||||
X_AMZ_OBJECT_LOCK_MODE, X_AMZ_OBJECT_LOCK_RETAIN_UNTIL_DATE, X_AMZ_OBJECT_LOCK_LEGAL_HOLD,
|
||||
};
|
||||
use s3s::dto::{Date, ObjectLockLegalHold, ObjectLockLegalHoldStatus, ObjectLockRetention, ObjectLockRetentionMode};
|
||||
use s3s::header::{X_AMZ_OBJECT_LOCK_LEGAL_HOLD, X_AMZ_OBJECT_LOCK_MODE, X_AMZ_OBJECT_LOCK_RETAIN_UNTIL_DATE};
|
||||
|
||||
//const AMZ_OBJECTLOCK_BYPASS_RET_GOVERNANCE: &str = "X-Amz-Bypass-Governance-Retention";
|
||||
//const AMZ_OBJECTLOCK_RETAIN_UNTIL_DATE: &str = "X-Amz-Object-Lock-Retain-Until-Date";
|
||||
@@ -17,12 +12,14 @@ use s3s::header::{
|
||||
//const AMZ_OBJECTLOCK_LEGALHOLD: &str = "X-Amz-Object-Lock-Legal-Hold";
|
||||
|
||||
const ERR_MALFORMED_BUCKET_OBJECT_CONFIG: &str = "invalid bucket object lock config";
|
||||
const ERR_INVALID_RETENTION_DATE: &str = "date must be provided in ISO 8601 format";
|
||||
const ERR_PAST_OBJECTLOCK_RETAIN_DATE: &str = "the retain until date must be in the future";
|
||||
const ERR_UNKNOWN_WORMMODE_DIRECTIVE: &str = "unknown WORM mode directive";
|
||||
const ERR_OBJECTLOCK_MISSING_CONTENT_MD5: &str = "content-MD5 HTTP header is required for Put Object requests with Object Lock parameters";
|
||||
const ERR_OBJECTLOCK_INVALID_HEADERS: &str = "x-amz-object-lock-retain-until-date and x-amz-object-lock-mode must both be supplied";
|
||||
const ERR_MALFORMED_XML: &str = "the XML you provided was not well-formed or did not validate against our published schema";
|
||||
const ERR_INVALID_RETENTION_DATE: &str = "date must be provided in ISO 8601 format";
|
||||
const ERR_PAST_OBJECTLOCK_RETAIN_DATE: &str = "the retain until date must be in the future";
|
||||
const ERR_UNKNOWN_WORMMODE_DIRECTIVE: &str = "unknown WORM mode directive";
|
||||
const ERR_OBJECTLOCK_MISSING_CONTENT_MD5: &str =
|
||||
"content-MD5 HTTP header is required for Put Object requests with Object Lock parameters";
|
||||
const ERR_OBJECTLOCK_INVALID_HEADERS: &str =
|
||||
"x-amz-object-lock-retain-until-date and x-amz-object-lock-mode must both be supplied";
|
||||
const ERR_MALFORMED_XML: &str = "the XML you provided was not well-formed or did not validate against our published schema";
|
||||
|
||||
pub fn utc_now_ntp() -> OffsetDateTime {
|
||||
return OffsetDateTime::now_utc();
|
||||
@@ -39,7 +36,10 @@ pub fn get_object_retention_meta(meta: HashMap<String, String>) -> ObjectLockRet
|
||||
if let Some(mode_str) = mode_str {
|
||||
mode = parse_ret_mode(mode_str.as_str());
|
||||
} else {
|
||||
return ObjectLockRetention {mode: None, retain_until_date: None};
|
||||
return ObjectLockRetention {
|
||||
mode: None,
|
||||
retain_until_date: None,
|
||||
};
|
||||
}
|
||||
|
||||
let mut till_str = meta.get(X_AMZ_OBJECT_LOCK_RETAIN_UNTIL_DATE.as_str().to_lowercase().as_str());
|
||||
@@ -49,10 +49,13 @@ pub fn get_object_retention_meta(meta: HashMap<String, String>) -> ObjectLockRet
|
||||
if let Some(till_str) = till_str {
|
||||
let t = OffsetDateTime::parse(till_str, &format_description::well_known::Iso8601::DEFAULT);
|
||||
if t.is_err() {
|
||||
retain_until_date = Date::from(t.expect("err")); //TODO: utc
|
||||
retain_until_date = Date::from(t.expect("err")); //TODO: utc
|
||||
}
|
||||
}
|
||||
ObjectLockRetention {mode: Some(mode), retain_until_date: Some(retain_until_date)}
|
||||
ObjectLockRetention {
|
||||
mode: Some(mode),
|
||||
retain_until_date: Some(retain_until_date),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_object_legalhold_meta(meta: HashMap<String, String>) -> ObjectLockLegalHold {
|
||||
@@ -61,9 +64,11 @@ pub fn get_object_legalhold_meta(meta: HashMap<String, String>) -> ObjectLockLeg
|
||||
hold_str = Some(&meta[X_AMZ_OBJECT_LOCK_LEGAL_HOLD.as_str()]);
|
||||
}
|
||||
if let Some(hold_str) = hold_str {
|
||||
return ObjectLockLegalHold {status: Some(parse_legalhold_status(hold_str))};
|
||||
return ObjectLockLegalHold {
|
||||
status: Some(parse_legalhold_status(hold_str)),
|
||||
};
|
||||
}
|
||||
ObjectLockLegalHold {status: None}
|
||||
ObjectLockLegalHold { status: None }
|
||||
}
|
||||
|
||||
pub fn parse_ret_mode(mode_str: &str) -> ObjectLockRetentionMode {
|
||||
@@ -75,7 +80,7 @@ pub fn parse_ret_mode(mode_str: &str) -> ObjectLockRetentionMode {
|
||||
"COMPLIANCE" => {
|
||||
mode = ObjectLockRetentionMode::from_static(ObjectLockRetentionMode::COMPLIANCE);
|
||||
}
|
||||
_ => unreachable!()
|
||||
_ => unreachable!(),
|
||||
}
|
||||
mode
|
||||
}
|
||||
@@ -89,7 +94,7 @@ pub fn parse_legalhold_status(hold_str: &str) -> ObjectLockLegalHoldStatus {
|
||||
"OFF" => {
|
||||
st = ObjectLockLegalHoldStatus::from_static(ObjectLockLegalHoldStatus::OFF);
|
||||
}
|
||||
_ => unreachable!()
|
||||
_ => unreachable!(),
|
||||
}
|
||||
st
|
||||
}
|
||||
|
||||
@@ -3,12 +3,10 @@ use std::sync::Arc;
|
||||
use time::OffsetDateTime;
|
||||
use tracing::{error, warn};
|
||||
|
||||
use s3s::dto::{
|
||||
DefaultRetention, ObjectLockRetentionMode, ObjectLockLegalHoldStatus,
|
||||
};
|
||||
use s3s::dto::{DefaultRetention, ObjectLockLegalHoldStatus, ObjectLockRetentionMode};
|
||||
|
||||
use crate::store_api::ObjectInfo;
|
||||
use crate::bucket::metadata_sys::get_object_lock_config;
|
||||
use crate::store_api::ObjectInfo;
|
||||
|
||||
use super::objectlock;
|
||||
|
||||
@@ -21,7 +19,12 @@ impl BucketObjectLockSys {
|
||||
}
|
||||
|
||||
pub async fn get(bucket: &str) -> Option<DefaultRetention> {
|
||||
if let Some(object_lock_rule) = get_object_lock_config(bucket).await.expect("get_object_lock_config err!").0.rule {
|
||||
if let Some(object_lock_rule) = get_object_lock_config(bucket)
|
||||
.await
|
||||
.expect("get_object_lock_config err!")
|
||||
.0
|
||||
.rule
|
||||
{
|
||||
return object_lock_rule.default_retention;
|
||||
}
|
||||
None
|
||||
@@ -35,10 +38,10 @@ pub fn enforce_retention_for_deletion(obj_info: &ObjectInfo) -> bool {
|
||||
|
||||
let lhold = objectlock::get_object_legalhold_meta(obj_info.user_defined.clone().expect("err"));
|
||||
match lhold.status {
|
||||
Some(st) if st.as_str()==ObjectLockLegalHoldStatus::ON => {
|
||||
Some(st) if st.as_str() == ObjectLockLegalHoldStatus::ON => {
|
||||
return true;
|
||||
}
|
||||
_ => ()
|
||||
_ => (),
|
||||
}
|
||||
|
||||
let ret = objectlock::get_object_retention_meta(obj_info.user_defined.clone().expect("err"));
|
||||
@@ -49,7 +52,7 @@ pub fn enforce_retention_for_deletion(obj_info: &ObjectInfo) -> bool {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
_ => ()
|
||||
_ => (),
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
@@ -1,23 +1,17 @@
|
||||
#![allow(clippy::map_entry)]
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
use std::ops::{BitAnd, BitOr};
|
||||
use lazy_static::lazy_static;
|
||||
use std::ops::{BitAnd, BitOr};
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
|
||||
use crate::client::{api_put_object::PutObjectOptions, api_s3_datatypes::ObjectPart};
|
||||
use crate::{disk::DiskAPI, store_api::GetObjectReader};
|
||||
use reader::hasher::{Hasher, Sha256};
|
||||
use rustfs_utils::crypto::{base64_decode, base64_encode};
|
||||
use s3s::header::{
|
||||
X_AMZ_CHECKSUM_ALGORITHM, X_AMZ_CHECKSUM_CRC32, X_AMZ_CHECKSUM_CRC32C, X_AMZ_CHECKSUM_SHA1, X_AMZ_CHECKSUM_SHA256,
|
||||
};
|
||||
use crate::client::{
|
||||
api_put_object::PutObjectOptions,
|
||||
api_s3_datatypes::ObjectPart,
|
||||
};
|
||||
use rustfs_utils::crypto::{base64_decode, base64_encode};
|
||||
use crate::{
|
||||
disk::DiskAPI,
|
||||
store_api::GetObjectReader,
|
||||
};
|
||||
|
||||
use enumset::{enum_set, EnumSet, EnumSetType};
|
||||
use enumset::{EnumSet, EnumSetType, enum_set};
|
||||
|
||||
#[derive(Debug, EnumSetType, Default)]
|
||||
#[enumset(repr = "u8")]
|
||||
@@ -38,8 +32,10 @@ lazy_static! {
|
||||
s.remove(ChecksumMode::ChecksumFullObject);
|
||||
s
|
||||
};
|
||||
static ref C_ChecksumFullObjectCRC32: EnumSet<ChecksumMode> = enum_set!(ChecksumMode::ChecksumCRC32 | ChecksumMode::ChecksumFullObject);
|
||||
static ref C_ChecksumFullObjectCRC32C: EnumSet<ChecksumMode> = enum_set!(ChecksumMode::ChecksumCRC32C | ChecksumMode::ChecksumFullObject);
|
||||
static ref C_ChecksumFullObjectCRC32: EnumSet<ChecksumMode> =
|
||||
enum_set!(ChecksumMode::ChecksumCRC32 | ChecksumMode::ChecksumFullObject);
|
||||
static ref C_ChecksumFullObjectCRC32C: EnumSet<ChecksumMode> =
|
||||
enum_set!(ChecksumMode::ChecksumCRC32C | ChecksumMode::ChecksumFullObject);
|
||||
}
|
||||
const AMZ_CHECKSUM_CRC64NVME: &str = "x-amz-checksum-crc64nvme";
|
||||
|
||||
@@ -49,24 +45,12 @@ impl ChecksumMode {
|
||||
pub fn base(&self) -> ChecksumMode {
|
||||
let s = EnumSet::from(*self).intersection(*C_ChecksumMask);
|
||||
match s.as_u8() {
|
||||
1_u8 => {
|
||||
ChecksumMode::ChecksumNone
|
||||
}
|
||||
2_u8 => {
|
||||
ChecksumMode::ChecksumSHA256
|
||||
}
|
||||
4_u8 => {
|
||||
ChecksumMode::ChecksumSHA1
|
||||
}
|
||||
8_u8 => {
|
||||
ChecksumMode::ChecksumCRC32
|
||||
}
|
||||
16_u8 => {
|
||||
ChecksumMode::ChecksumCRC32C
|
||||
}
|
||||
32_u8 => {
|
||||
ChecksumMode::ChecksumCRC64NVME
|
||||
}
|
||||
1_u8 => ChecksumMode::ChecksumNone,
|
||||
2_u8 => ChecksumMode::ChecksumSHA256,
|
||||
4_u8 => ChecksumMode::ChecksumSHA1,
|
||||
8_u8 => ChecksumMode::ChecksumCRC32,
|
||||
16_u8 => ChecksumMode::ChecksumCRC32C,
|
||||
32_u8 => ChecksumMode::ChecksumCRC64NVME,
|
||||
_ => panic!("enum err."),
|
||||
}
|
||||
}
|
||||
@@ -119,17 +103,13 @@ impl ChecksumMode {
|
||||
let u = EnumSet::from(*self).intersection(*C_ChecksumMask).as_u8();
|
||||
if u == ChecksumMode::ChecksumCRC32 as u8 || u == ChecksumMode::ChecksumCRC32C as u8 {
|
||||
4
|
||||
}
|
||||
else if u == ChecksumMode::ChecksumSHA1 as u8 {
|
||||
4//sha1.size
|
||||
}
|
||||
else if u == ChecksumMode::ChecksumSHA256 as u8 {
|
||||
4//sha256.size
|
||||
}
|
||||
else if u == ChecksumMode::ChecksumCRC64NVME as u8 {
|
||||
4//crc64.size
|
||||
}
|
||||
else {
|
||||
} else if u == ChecksumMode::ChecksumSHA1 as u8 {
|
||||
4 //sha1.size
|
||||
} else if u == ChecksumMode::ChecksumSHA256 as u8 {
|
||||
4 //sha256.size
|
||||
} else if u == ChecksumMode::ChecksumCRC64NVME as u8 {
|
||||
4 //crc64.size
|
||||
} else {
|
||||
0
|
||||
}
|
||||
}
|
||||
@@ -196,7 +176,7 @@ impl ChecksumMode {
|
||||
ChecksumMode::ChecksumCRC64NVME => {
|
||||
return "CRC64NVME".to_string();
|
||||
}
|
||||
_=> {
|
||||
_ => {
|
||||
return "<invalid>".to_string();
|
||||
}
|
||||
}
|
||||
@@ -226,10 +206,13 @@ impl ChecksumMode {
|
||||
}
|
||||
});
|
||||
let c = self.base();
|
||||
let mut crc_bytes = Vec::<u8>::with_capacity(p.len()*self.raw_byte_len() as usize);
|
||||
let mut crc_bytes = Vec::<u8>::with_capacity(p.len() * self.raw_byte_len() as usize);
|
||||
let mut h = self.hasher()?;
|
||||
h.write(&crc_bytes);
|
||||
Ok(Checksum {checksum_type: self.clone(), r: h.sum().as_bytes().to_vec()})
|
||||
Ok(Checksum {
|
||||
checksum_type: self.clone(),
|
||||
r: h.sum().as_bytes().to_vec(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn full_object_checksum(&self, p: &mut [ObjectPart]) -> Result<Checksum, std::io::Error> {
|
||||
@@ -246,7 +229,10 @@ struct Checksum {
|
||||
impl Checksum {
|
||||
fn new(t: ChecksumMode, b: &[u8]) -> Checksum {
|
||||
if t.is_set() && b.len() == t.raw_byte_len() {
|
||||
return Checksum {checksum_type: t, r: b.to_vec()};
|
||||
return Checksum {
|
||||
checksum_type: t,
|
||||
r: b.to_vec(),
|
||||
};
|
||||
}
|
||||
Checksum::default()
|
||||
}
|
||||
@@ -257,7 +243,7 @@ impl Checksum {
|
||||
Err(err) => return Err(std::io::Error::other(err.to_string())),
|
||||
};
|
||||
if t.is_set() && b.len() == t.raw_byte_len() {
|
||||
return Ok(Checksum {checksum_type: t, r: b});
|
||||
return Ok(Checksum { checksum_type: t, r: b });
|
||||
}
|
||||
Ok(Checksum::default())
|
||||
}
|
||||
@@ -282,28 +268,30 @@ impl Checksum {
|
||||
}
|
||||
|
||||
pub fn add_auto_checksum_headers(opts: &mut PutObjectOptions) {
|
||||
opts.user_metadata.insert("X-Amz-Checksum-Algorithm".to_string(), opts.auto_checksum.to_string());
|
||||
opts.user_metadata
|
||||
.insert("X-Amz-Checksum-Algorithm".to_string(), opts.auto_checksum.to_string());
|
||||
if opts.auto_checksum.full_object_requested() {
|
||||
opts.user_metadata.insert("X-Amz-Checksum-Type".to_string(), "FULL_OBJECT".to_string());
|
||||
opts.user_metadata
|
||||
.insert("X-Amz-Checksum-Type".to_string(), "FULL_OBJECT".to_string());
|
||||
}
|
||||
}
|
||||
|
||||
pub fn apply_auto_checksum(opts: &mut PutObjectOptions, all_parts: &mut [ObjectPart]) -> Result<(), std::io::Error> {
|
||||
if opts.auto_checksum.can_composite() && !opts.auto_checksum.is(ChecksumMode::ChecksumFullObject) {
|
||||
let crc = opts.auto_checksum.composite_checksum(all_parts)?;
|
||||
opts.user_metadata = {
|
||||
let mut hm = HashMap::new();
|
||||
hm.insert(opts.auto_checksum.key(), crc.encoded());
|
||||
hm
|
||||
}
|
||||
opts.user_metadata = {
|
||||
let mut hm = HashMap::new();
|
||||
hm.insert(opts.auto_checksum.key(), crc.encoded());
|
||||
hm
|
||||
}
|
||||
} else if opts.auto_checksum.can_merge_crc() {
|
||||
let crc = opts.auto_checksum.full_object_checksum(all_parts)?;
|
||||
opts.user_metadata = {
|
||||
let mut hm = HashMap::new();
|
||||
hm.insert(opts.auto_checksum.key_capitalized(), crc.encoded());
|
||||
hm.insert("X-Amz-Checksum-Type".to_string(), "FULL_OBJECT".to_string());
|
||||
hm
|
||||
}
|
||||
opts.user_metadata = {
|
||||
let mut hm = HashMap::new();
|
||||
hm.insert(opts.auto_checksum.key_capitalized(), crc.encoded());
|
||||
hm.insert("X-Amz-Checksum-Type".to_string(), "FULL_OBJECT".to_string());
|
||||
hm
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -30,4 +30,4 @@ impl AdminError {
|
||||
status_code: StatusCode::INTERNAL_SERVER_ERROR,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
#![allow(clippy::map_entry)]
|
||||
use std::collections::HashMap;
|
||||
use http::{HeaderMap, StatusCode};
|
||||
use bytes::Bytes;
|
||||
use http::{HeaderMap, StatusCode};
|
||||
use std::collections::HashMap;
|
||||
|
||||
use crate::client::{
|
||||
api_error_response::http_resp_to_error_response,
|
||||
transition_api::{RequestMetadata, TransitionClient, ReaderImpl}
|
||||
transition_api::{ReaderImpl, RequestMetadata, TransitionClient},
|
||||
};
|
||||
use rustfs_utils::hash::EMPTY_STRING_SHA256_HASH;
|
||||
|
||||
@@ -23,9 +23,9 @@ impl TransitionClient {
|
||||
url_values.insert("policy".to_string(), "".to_string());
|
||||
|
||||
let mut req_metadata = RequestMetadata {
|
||||
bucket_name: bucket_name.to_string(),
|
||||
query_values: url_values,
|
||||
content_body: ReaderImpl::Body(Bytes::from(policy.as_bytes().to_vec())),
|
||||
bucket_name: bucket_name.to_string(),
|
||||
query_values: url_values,
|
||||
content_body: ReaderImpl::Body(Bytes::from(policy.as_bytes().to_vec())),
|
||||
content_length: policy.len() as i64,
|
||||
object_name: "".to_string(),
|
||||
custom_header: HeaderMap::new(),
|
||||
@@ -43,9 +43,9 @@ impl TransitionClient {
|
||||
let resp = self.execute_method(http::Method::PUT, &mut req_metadata).await?;
|
||||
//defer closeResponse(resp)
|
||||
//if resp != nil {
|
||||
if resp.status() != StatusCode::NO_CONTENT && resp.status() != StatusCode::OK {
|
||||
return Err(std::io::Error::other(http_resp_to_error_response(resp, vec![], bucket_name, "")));
|
||||
}
|
||||
if resp.status() != StatusCode::NO_CONTENT && resp.status() != StatusCode::OK {
|
||||
return Err(std::io::Error::other(http_resp_to_error_response(resp, vec![], bucket_name, "")));
|
||||
}
|
||||
//}
|
||||
Ok(())
|
||||
}
|
||||
@@ -54,23 +54,28 @@ impl TransitionClient {
|
||||
let mut url_values = HashMap::new();
|
||||
url_values.insert("policy".to_string(), "".to_string());
|
||||
|
||||
let resp = self.execute_method(http::Method::DELETE, &mut RequestMetadata {
|
||||
bucket_name: bucket_name.to_string(),
|
||||
query_values: url_values,
|
||||
content_sha256_hex: EMPTY_STRING_SHA256_HASH.to_string(),
|
||||
object_name: "".to_string(),
|
||||
custom_header: HeaderMap::new(),
|
||||
content_body: ReaderImpl::Body(Bytes::new()),
|
||||
content_length: 0,
|
||||
content_md5_base64: "".to_string(),
|
||||
stream_sha256: false,
|
||||
trailer: HeaderMap::new(),
|
||||
pre_sign_url: Default::default(),
|
||||
add_crc: Default::default(),
|
||||
extra_pre_sign_header: Default::default(),
|
||||
bucket_location: Default::default(),
|
||||
expires: Default::default(),
|
||||
}).await?;
|
||||
let resp = self
|
||||
.execute_method(
|
||||
http::Method::DELETE,
|
||||
&mut RequestMetadata {
|
||||
bucket_name: bucket_name.to_string(),
|
||||
query_values: url_values,
|
||||
content_sha256_hex: EMPTY_STRING_SHA256_HASH.to_string(),
|
||||
object_name: "".to_string(),
|
||||
custom_header: HeaderMap::new(),
|
||||
content_body: ReaderImpl::Body(Bytes::new()),
|
||||
content_length: 0,
|
||||
content_md5_base64: "".to_string(),
|
||||
stream_sha256: false,
|
||||
trailer: HeaderMap::new(),
|
||||
pre_sign_url: Default::default(),
|
||||
add_crc: Default::default(),
|
||||
extra_pre_sign_header: Default::default(),
|
||||
bucket_location: Default::default(),
|
||||
expires: Default::default(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
//defer closeResponse(resp)
|
||||
|
||||
if resp.status() != StatusCode::NO_CONTENT {
|
||||
@@ -89,23 +94,28 @@ impl TransitionClient {
|
||||
let mut url_values = HashMap::new();
|
||||
url_values.insert("policy".to_string(), "".to_string());
|
||||
|
||||
let resp = self.execute_method(http::Method::GET, &mut RequestMetadata {
|
||||
bucket_name: bucket_name.to_string(),
|
||||
query_values: url_values,
|
||||
content_sha256_hex: EMPTY_STRING_SHA256_HASH.to_string(),
|
||||
object_name: "".to_string(),
|
||||
custom_header: HeaderMap::new(),
|
||||
content_body: ReaderImpl::Body(Bytes::new()),
|
||||
content_length: 0,
|
||||
content_md5_base64: "".to_string(),
|
||||
stream_sha256: false,
|
||||
trailer: HeaderMap::new(),
|
||||
pre_sign_url: Default::default(),
|
||||
add_crc: Default::default(),
|
||||
extra_pre_sign_header: Default::default(),
|
||||
bucket_location: Default::default(),
|
||||
expires: Default::default(),
|
||||
}).await?;
|
||||
let resp = self
|
||||
.execute_method(
|
||||
http::Method::GET,
|
||||
&mut RequestMetadata {
|
||||
bucket_name: bucket_name.to_string(),
|
||||
query_values: url_values,
|
||||
content_sha256_hex: EMPTY_STRING_SHA256_HASH.to_string(),
|
||||
object_name: "".to_string(),
|
||||
custom_header: HeaderMap::new(),
|
||||
content_body: ReaderImpl::Body(Bytes::new()),
|
||||
content_length: 0,
|
||||
content_md5_base64: "".to_string(),
|
||||
stream_sha256: false,
|
||||
trailer: HeaderMap::new(),
|
||||
pre_sign_url: Default::default(),
|
||||
add_crc: Default::default(),
|
||||
extra_pre_sign_header: Default::default(),
|
||||
bucket_location: Default::default(),
|
||||
expires: Default::default(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
let policy = String::from_utf8_lossy(&resp.body().bytes().expect("err").to_vec()).to_string();
|
||||
Ok(policy)
|
||||
|
||||
@@ -1,26 +1,25 @@
|
||||
#![allow(clippy::map_entry)]
|
||||
use std::fmt::Display;
|
||||
use http::StatusCode;
|
||||
use serde::{Serialize, Deserialize};
|
||||
use serde::{ser::Serializer, de::Deserializer};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde::{de::Deserializer, ser::Serializer};
|
||||
use std::fmt::Display;
|
||||
|
||||
use s3s::S3ErrorCode;
|
||||
use s3s::Body;
|
||||
use s3s::S3ErrorCode;
|
||||
|
||||
const REPORT_ISSUE: &str = "Please report this issue at https://github.com/rustfs/rustfs/issues.";
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, thiserror::Error, PartialEq, Eq)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone, thiserror::Error, PartialEq, Eq)]
|
||||
#[serde(default, rename_all = "PascalCase")]
|
||||
pub struct ErrorResponse {
|
||||
#[serde(serialize_with = "serialize_code", deserialize_with = "deserialize_code")]
|
||||
pub code: S3ErrorCode,
|
||||
pub message: String,
|
||||
pub code: S3ErrorCode,
|
||||
pub message: String,
|
||||
pub bucket_name: String,
|
||||
pub key: String,
|
||||
pub resource: String,
|
||||
pub request_id: String,
|
||||
pub host_id: String,
|
||||
pub key: String,
|
||||
pub resource: String,
|
||||
pub request_id: String,
|
||||
pub host_id: String,
|
||||
pub region: String,
|
||||
pub server: String,
|
||||
#[serde(skip)]
|
||||
@@ -29,14 +28,14 @@ pub struct ErrorResponse {
|
||||
|
||||
fn serialize_code<S>(data: &S3ErrorCode, s: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer
|
||||
S: Serializer,
|
||||
{
|
||||
s.serialize_str("")
|
||||
}
|
||||
|
||||
fn deserialize_code<'de, D>(d: D) -> Result<S3ErrorCode, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
Ok(S3ErrorCode::from_bytes(String::deserialize(d)?.as_bytes()).unwrap_or(S3ErrorCode::Custom("".into())))
|
||||
}
|
||||
@@ -45,14 +44,14 @@ impl Default for ErrorResponse {
|
||||
fn default() -> Self {
|
||||
ErrorResponse {
|
||||
code: S3ErrorCode::Custom("".into()),
|
||||
message: Default::default(),
|
||||
message: Default::default(),
|
||||
bucket_name: Default::default(),
|
||||
key: Default::default(),
|
||||
resource: Default::default(),
|
||||
request_id: Default::default(),
|
||||
host_id: Default::default(),
|
||||
region: Default::default(),
|
||||
server: Default::default(),
|
||||
key: Default::default(),
|
||||
resource: Default::default(),
|
||||
request_id: Default::default(),
|
||||
host_id: Default::default(),
|
||||
region: Default::default(),
|
||||
server: Default::default(),
|
||||
status_code: Default::default(),
|
||||
}
|
||||
}
|
||||
@@ -76,7 +75,12 @@ pub fn to_error_response(err: &std::io::Error) -> ErrorResponse {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn http_resp_to_error_response(resp: http::Response<Body>, b: Vec<u8>, bucket_name: &str, object_name: &str) -> ErrorResponse {
|
||||
pub fn http_resp_to_error_response(
|
||||
resp: http::Response<Body>,
|
||||
b: Vec<u8>,
|
||||
bucket_name: &str,
|
||||
object_name: &str,
|
||||
) -> ErrorResponse {
|
||||
let err_body = String::from_utf8(b).unwrap();
|
||||
//let err_body = xml_decode_and_body(resp.body, &err_resp);
|
||||
let err_resp_ = serde_xml_rs::from_str::<ErrorResponse>(&err_body);
|
||||
@@ -87,18 +91,18 @@ pub fn http_resp_to_error_response(resp: http::Response<Body>, b: Vec<u8>, bucke
|
||||
if object_name == "" {
|
||||
err_resp = ErrorResponse {
|
||||
status_code: resp.status(),
|
||||
code: S3ErrorCode::NoSuchBucket,
|
||||
message: "The specified bucket does not exist.".to_string(),
|
||||
code: S3ErrorCode::NoSuchBucket,
|
||||
message: "The specified bucket does not exist.".to_string(),
|
||||
bucket_name: bucket_name.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
} else {
|
||||
err_resp = ErrorResponse {
|
||||
status_code: resp.status(),
|
||||
code: S3ErrorCode::NoSuchKey,
|
||||
message: "The specified key does not exist.".to_string(),
|
||||
code: S3ErrorCode::NoSuchKey,
|
||||
message: "The specified key does not exist.".to_string(),
|
||||
bucket_name: bucket_name.to_string(),
|
||||
key: object_name.to_string(),
|
||||
key: object_name.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
}
|
||||
@@ -106,18 +110,18 @@ pub fn http_resp_to_error_response(resp: http::Response<Body>, b: Vec<u8>, bucke
|
||||
StatusCode::FORBIDDEN => {
|
||||
err_resp = ErrorResponse {
|
||||
status_code: resp.status(),
|
||||
code: S3ErrorCode::AccessDenied,
|
||||
message: "Access Denied.".to_string(),
|
||||
code: S3ErrorCode::AccessDenied,
|
||||
message: "Access Denied.".to_string(),
|
||||
bucket_name: bucket_name.to_string(),
|
||||
key: object_name.to_string(),
|
||||
key: object_name.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
}
|
||||
StatusCode::CONFLICT => {
|
||||
err_resp = ErrorResponse {
|
||||
status_code: resp.status(),
|
||||
code: S3ErrorCode::BucketNotEmpty,
|
||||
message: "Bucket not empty.".to_string(),
|
||||
code: S3ErrorCode::BucketNotEmpty,
|
||||
message: "Bucket not empty.".to_string(),
|
||||
bucket_name: bucket_name.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
@@ -125,10 +129,10 @@ pub fn http_resp_to_error_response(resp: http::Response<Body>, b: Vec<u8>, bucke
|
||||
StatusCode::PRECONDITION_FAILED => {
|
||||
err_resp = ErrorResponse {
|
||||
status_code: resp.status(),
|
||||
code: S3ErrorCode::PreconditionFailed,
|
||||
message: "Pre condition failed.".to_string(),
|
||||
code: S3ErrorCode::PreconditionFailed,
|
||||
message: "Pre condition failed.".to_string(),
|
||||
bucket_name: bucket_name.to_string(),
|
||||
key: object_name.to_string(),
|
||||
key: object_name.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
}
|
||||
@@ -137,10 +141,10 @@ pub fn http_resp_to_error_response(resp: http::Response<Body>, b: Vec<u8>, bucke
|
||||
if err_body.len() > 0 {
|
||||
msg = err_body;
|
||||
}
|
||||
err_resp = ErrorResponse{
|
||||
err_resp = ErrorResponse {
|
||||
status_code: resp.status(),
|
||||
code: S3ErrorCode::Custom(resp.status().to_string().into()),
|
||||
message: msg,
|
||||
code: S3ErrorCode::Custom(resp.status().to_string().into()),
|
||||
message: msg,
|
||||
bucket_name: bucket_name.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
@@ -188,45 +192,55 @@ pub fn http_resp_to_error_response(resp: http::Response<Body>, b: Vec<u8>, bucke
|
||||
pub fn err_transfer_acceleration_bucket(bucket_name: &str) -> ErrorResponse {
|
||||
ErrorResponse {
|
||||
status_code: StatusCode::BAD_REQUEST,
|
||||
code: S3ErrorCode::InvalidArgument,
|
||||
message: "The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods ‘.’.".to_string(),
|
||||
code: S3ErrorCode::InvalidArgument,
|
||||
message: "The name of the bucket used for Transfer Acceleration must be DNS-compliant and must not contain periods ‘.’."
|
||||
.to_string(),
|
||||
bucket_name: bucket_name.to_string(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn err_entity_too_large(total_size: i64, max_object_size: i64, bucket_name: &str, object_name: &str) -> ErrorResponse {
|
||||
let msg = format!("Your proposed upload size ‘{}’ exceeds the maximum allowed object size ‘{}’ for single PUT operation.", total_size, max_object_size);
|
||||
let msg = format!(
|
||||
"Your proposed upload size ‘{}’ exceeds the maximum allowed object size ‘{}’ for single PUT operation.",
|
||||
total_size, max_object_size
|
||||
);
|
||||
ErrorResponse {
|
||||
status_code: StatusCode::BAD_REQUEST,
|
||||
code: S3ErrorCode::EntityTooLarge,
|
||||
message: msg,
|
||||
code: S3ErrorCode::EntityTooLarge,
|
||||
message: msg,
|
||||
bucket_name: bucket_name.to_string(),
|
||||
key: object_name.to_string(),
|
||||
key: object_name.to_string(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn err_entity_too_small(total_size: i64, bucket_name: &str, object_name: &str) -> ErrorResponse {
|
||||
let msg = format!("Your proposed upload size ‘{}’ is below the minimum allowed object size ‘0B’ for single PUT operation.", total_size);
|
||||
let msg = format!(
|
||||
"Your proposed upload size ‘{}’ is below the minimum allowed object size ‘0B’ for single PUT operation.",
|
||||
total_size
|
||||
);
|
||||
ErrorResponse {
|
||||
status_code: StatusCode::BAD_REQUEST,
|
||||
code: S3ErrorCode::EntityTooSmall,
|
||||
message: msg,
|
||||
code: S3ErrorCode::EntityTooSmall,
|
||||
message: msg,
|
||||
bucket_name: bucket_name.to_string(),
|
||||
key: object_name.to_string(),
|
||||
key: object_name.to_string(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn err_unexpected_eof(total_read: i64, total_size: i64, bucket_name: &str, object_name: &str) -> ErrorResponse {
|
||||
let msg = format!("Data read ‘{}’ is not equal to the size ‘{}’ of the input Reader.", total_read, total_size);
|
||||
let msg = format!(
|
||||
"Data read ‘{}’ is not equal to the size ‘{}’ of the input Reader.",
|
||||
total_read, total_size
|
||||
);
|
||||
ErrorResponse {
|
||||
status_code: StatusCode::BAD_REQUEST,
|
||||
code: S3ErrorCode::Custom("UnexpectedEOF".into()),
|
||||
message: msg,
|
||||
code: S3ErrorCode::Custom("UnexpectedEOF".into()),
|
||||
message: msg,
|
||||
bucket_name: bucket_name.to_string(),
|
||||
key: object_name.to_string(),
|
||||
key: object_name.to_string(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
@@ -234,9 +248,9 @@ pub fn err_unexpected_eof(total_read: i64, total_size: i64, bucket_name: &str, o
|
||||
pub fn err_invalid_argument(message: &str) -> ErrorResponse {
|
||||
ErrorResponse {
|
||||
status_code: StatusCode::BAD_REQUEST,
|
||||
code: S3ErrorCode::InvalidArgument,
|
||||
message: message.to_string(),
|
||||
request_id: "rustfs".to_string(),
|
||||
code: S3ErrorCode::InvalidArgument,
|
||||
message: message.to_string(),
|
||||
request_id: "rustfs".to_string(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
@@ -244,9 +258,9 @@ pub fn err_invalid_argument(message: &str) -> ErrorResponse {
|
||||
pub fn err_api_not_supported(message: &str) -> ErrorResponse {
|
||||
ErrorResponse {
|
||||
status_code: StatusCode::NOT_IMPLEMENTED,
|
||||
code: S3ErrorCode::Custom("APINotSupported".into()),
|
||||
message: message.to_string(),
|
||||
request_id: "rustfs".to_string(),
|
||||
code: S3ErrorCode::Custom("APINotSupported".into()),
|
||||
message: message.to_string(),
|
||||
request_id: "rustfs".to_string(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
#![allow(clippy::map_entry)]
|
||||
use bytes::Bytes;
|
||||
use http::HeaderMap;
|
||||
use tokio::io::BufReader;
|
||||
use std::io::Cursor;
|
||||
use tokio::io::BufReader;
|
||||
|
||||
use crate::client::{
|
||||
transition_api::{ObjectInfo, to_object_info, ReadCloser, ReaderImpl, RequestMetadata, TransitionClient},
|
||||
api_error_response::err_invalid_argument,
|
||||
api_get_options::GetObjectOptions,
|
||||
transition_api::{ObjectInfo, ReadCloser, ReaderImpl, RequestMetadata, TransitionClient, to_object_info},
|
||||
};
|
||||
use rustfs_utils::hash::EMPTY_STRING_SHA256_HASH;
|
||||
|
||||
@@ -16,24 +16,34 @@ impl TransitionClient {
|
||||
todo!();
|
||||
}
|
||||
|
||||
pub async fn get_object_inner(&self, bucket_name: &str, object_name: &str, opts: &GetObjectOptions) -> Result<(ObjectInfo, HeaderMap, ReadCloser), std::io::Error> {
|
||||
let resp = self.execute_method(http::Method::GET, &mut RequestMetadata {
|
||||
bucket_name: bucket_name.to_string(),
|
||||
object_name: object_name.to_string(),
|
||||
query_values: opts.to_query_values(),
|
||||
custom_header: opts.header(),
|
||||
content_sha256_hex: EMPTY_STRING_SHA256_HASH.to_string(),
|
||||
content_body: ReaderImpl::Body(Bytes::new()),
|
||||
content_length: 0,
|
||||
content_md5_base64: "".to_string(),
|
||||
stream_sha256: false,
|
||||
trailer: HeaderMap::new(),
|
||||
pre_sign_url: Default::default(),
|
||||
add_crc: Default::default(),
|
||||
extra_pre_sign_header: Default::default(),
|
||||
bucket_location: Default::default(),
|
||||
expires: Default::default(),
|
||||
}).await?;
|
||||
pub async fn get_object_inner(
|
||||
&self,
|
||||
bucket_name: &str,
|
||||
object_name: &str,
|
||||
opts: &GetObjectOptions,
|
||||
) -> Result<(ObjectInfo, HeaderMap, ReadCloser), std::io::Error> {
|
||||
let resp = self
|
||||
.execute_method(
|
||||
http::Method::GET,
|
||||
&mut RequestMetadata {
|
||||
bucket_name: bucket_name.to_string(),
|
||||
object_name: object_name.to_string(),
|
||||
query_values: opts.to_query_values(),
|
||||
custom_header: opts.header(),
|
||||
content_sha256_hex: EMPTY_STRING_SHA256_HASH.to_string(),
|
||||
content_body: ReaderImpl::Body(Bytes::new()),
|
||||
content_length: 0,
|
||||
content_md5_base64: "".to_string(),
|
||||
stream_sha256: false,
|
||||
trailer: HeaderMap::new(),
|
||||
pre_sign_url: Default::default(),
|
||||
add_crc: Default::default(),
|
||||
extra_pre_sign_header: Default::default(),
|
||||
bucket_location: Default::default(),
|
||||
expires: Default::default(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
let resp = &resp;
|
||||
let object_stat = to_object_info(bucket_name, object_name, resp.headers())?;
|
||||
@@ -45,20 +55,20 @@ impl TransitionClient {
|
||||
|
||||
#[derive(Default)]
|
||||
struct GetRequest {
|
||||
pub buffer: Vec<u8>,
|
||||
pub offset: i64,
|
||||
pub did_offset_change: bool,
|
||||
pub been_read: bool,
|
||||
pub is_read_at: bool,
|
||||
pub is_read_op: bool,
|
||||
pub is_first_req: bool,
|
||||
pub buffer: Vec<u8>,
|
||||
pub offset: i64,
|
||||
pub did_offset_change: bool,
|
||||
pub been_read: bool,
|
||||
pub is_read_at: bool,
|
||||
pub is_read_op: bool,
|
||||
pub is_first_req: bool,
|
||||
pub setting_object_info: bool,
|
||||
}
|
||||
|
||||
struct GetResponse {
|
||||
pub size: i64,
|
||||
pub size: i64,
|
||||
//pub error: error,
|
||||
pub did_read: bool,
|
||||
pub did_read: bool,
|
||||
pub object_info: ObjectInfo,
|
||||
}
|
||||
|
||||
@@ -79,9 +89,7 @@ struct Object {
|
||||
|
||||
impl Object {
|
||||
pub fn new() -> Object {
|
||||
Self {
|
||||
..Default::default()
|
||||
}
|
||||
Self { ..Default::default() }
|
||||
}
|
||||
|
||||
fn do_get_request(&self, request: &GetRequest) -> Result<GetResponse, std::io::Error> {
|
||||
@@ -121,7 +129,7 @@ impl Object {
|
||||
fn stat(&self) -> Result<ObjectInfo, std::io::Error> {
|
||||
if !self.is_started || !self.object_info_set {
|
||||
let _ = self.do_get_request(&GetRequest {
|
||||
is_first_req: !self.is_started,
|
||||
is_first_req: !self.is_started,
|
||||
setting_object_info: !self.object_info_set,
|
||||
..Default::default()
|
||||
})?;
|
||||
@@ -134,12 +142,12 @@ impl Object {
|
||||
self.curr_offset = offset;
|
||||
|
||||
let mut read_at_req = GetRequest {
|
||||
is_read_op: true,
|
||||
is_read_at: true,
|
||||
is_read_op: true,
|
||||
is_read_at: true,
|
||||
did_offset_change: true,
|
||||
been_read: self.been_read,
|
||||
been_read: self.been_read,
|
||||
offset,
|
||||
buffer: b.to_vec(),
|
||||
buffer: b.to_vec(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
@@ -160,8 +168,8 @@ impl Object {
|
||||
fn seek(&mut self, offset: i64, whence: i64) -> Result<i64, std::io::Error> {
|
||||
if !self.is_started || !self.object_info_set {
|
||||
let seek_req = GetRequest {
|
||||
is_read_op: false,
|
||||
offset: offset,
|
||||
is_read_op: false,
|
||||
offset: offset,
|
||||
is_first_req: true,
|
||||
..Default::default()
|
||||
};
|
||||
@@ -195,4 +203,4 @@ impl Object {
|
||||
self.is_closed = true;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
#![allow(clippy::map_entry)]
|
||||
use std::collections::HashMap;
|
||||
use http::{HeaderMap, HeaderName, HeaderValue};
|
||||
use std::collections::HashMap;
|
||||
use time::OffsetDateTime;
|
||||
use tracing::warn;
|
||||
|
||||
@@ -8,9 +8,9 @@ use crate::client::api_error_response::err_invalid_argument;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct AdvancedGetOptions {
|
||||
replication_deletemarker: bool,
|
||||
replication_deletemarker: bool,
|
||||
is_replication_ready_for_deletemarker: bool,
|
||||
replication_proxy_request: String,
|
||||
replication_proxy_request: String,
|
||||
}
|
||||
|
||||
pub struct GetObjectOptions {
|
||||
@@ -50,7 +50,7 @@ impl GetObjectOptions {
|
||||
}
|
||||
}
|
||||
if self.checksum {
|
||||
headers.insert("x-amz-checksum-mode", "ENABLED".parse().expect("err"));
|
||||
headers.insert("x-amz-checksum-mode", "ENABLED".parse().expect("err"));
|
||||
}
|
||||
headers
|
||||
}
|
||||
@@ -96,15 +96,15 @@ impl GetObjectOptions {
|
||||
pub fn set_range(&mut self, start: i64, end: i64) -> Result<(), std::io::Error> {
|
||||
if start == 0 && end < 0 {
|
||||
self.set("Range", &format!("bytes={}", end));
|
||||
}
|
||||
else if 0 < start && end == 0 {
|
||||
} else if 0 < start && end == 0 {
|
||||
self.set("Range", &format!("bytes={}-", start));
|
||||
}
|
||||
else if 0 <= start && start <= end {
|
||||
} else if 0 <= start && start <= end {
|
||||
self.set("Range", &format!("bytes={}-{}", start, end));
|
||||
}
|
||||
else {
|
||||
return Err(std::io::Error::other(err_invalid_argument(&format!("Invalid range specified: start={} end={}", start, end))));
|
||||
} else {
|
||||
return Err(std::io::Error::other(err_invalid_argument(&format!(
|
||||
"Invalid range specified: start={} end={}",
|
||||
start, end
|
||||
))));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -124,4 +124,4 @@ impl GetObjectOptions {
|
||||
|
||||
url_values
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,23 +1,36 @@
|
||||
#![allow(clippy::map_entry)]
|
||||
use std::collections::HashMap;
|
||||
use bytes::Bytes;
|
||||
use http::{HeaderMap, StatusCode};
|
||||
use std::collections::HashMap;
|
||||
|
||||
use crate::client::{
|
||||
api_error_response::http_resp_to_error_response,
|
||||
api_s3_datatypes::{
|
||||
ListBucketResult, ListBucketV2Result, ListMultipartUploadsResult, ListObjectPartsResult, ListVersionsResult, ObjectPart,
|
||||
},
|
||||
credentials,
|
||||
api_s3_datatypes::{ListBucketV2Result, ListMultipartUploadsResult, ListBucketResult, ListObjectPartsResult, ListVersionsResult, ObjectPart},
|
||||
transition_api::{ReaderImpl, TransitionClient, RequestMetadata,},
|
||||
transition_api::{ReaderImpl, RequestMetadata, TransitionClient},
|
||||
};
|
||||
use rustfs_utils::hash::EMPTY_STRING_SHA256_HASH;
|
||||
use crate::store_api::BucketInfo;
|
||||
use rustfs_utils::hash::EMPTY_STRING_SHA256_HASH;
|
||||
|
||||
impl TransitionClient {
|
||||
pub fn list_buckets(&self) -> Result<Vec<BucketInfo>, std::io::Error> {
|
||||
todo!();
|
||||
}
|
||||
|
||||
pub async fn list_objects_v2_query(&self, bucket_name: &str, object_prefix: &str, continuation_token: &str, fetch_owner: bool, metadata: bool, delimiter: &str, start_after: &str, max_keys: i64, headers: HeaderMap) -> Result<ListBucketV2Result, std::io::Error> {
|
||||
pub async fn list_objects_v2_query(
|
||||
&self,
|
||||
bucket_name: &str,
|
||||
object_prefix: &str,
|
||||
continuation_token: &str,
|
||||
fetch_owner: bool,
|
||||
metadata: bool,
|
||||
delimiter: &str,
|
||||
start_after: &str,
|
||||
max_keys: i64,
|
||||
headers: HeaderMap,
|
||||
) -> Result<ListBucketV2Result, std::io::Error> {
|
||||
let mut url_values = HashMap::new();
|
||||
|
||||
url_values.insert("list-type".to_string(), "2".to_string());
|
||||
@@ -43,23 +56,28 @@ impl TransitionClient {
|
||||
url_values.insert("max-keys".to_string(), max_keys.to_string());
|
||||
}
|
||||
|
||||
let mut resp = self.execute_method(http::Method::GET, &mut RequestMetadata {
|
||||
bucket_name: bucket_name.to_string(),
|
||||
object_name: "".to_string(),
|
||||
query_values: url_values,
|
||||
content_sha256_hex: EMPTY_STRING_SHA256_HASH.to_string(),
|
||||
custom_header: headers,
|
||||
content_body: ReaderImpl::Body(Bytes::new()),
|
||||
content_length: 0,
|
||||
content_md5_base64: "".to_string(),
|
||||
stream_sha256: false,
|
||||
trailer: HeaderMap::new(),
|
||||
pre_sign_url: Default::default(),
|
||||
add_crc: Default::default(),
|
||||
extra_pre_sign_header: Default::default(),
|
||||
bucket_location: Default::default(),
|
||||
expires: Default::default(),
|
||||
}).await?;
|
||||
let mut resp = self
|
||||
.execute_method(
|
||||
http::Method::GET,
|
||||
&mut RequestMetadata {
|
||||
bucket_name: bucket_name.to_string(),
|
||||
object_name: "".to_string(),
|
||||
query_values: url_values,
|
||||
content_sha256_hex: EMPTY_STRING_SHA256_HASH.to_string(),
|
||||
custom_header: headers,
|
||||
content_body: ReaderImpl::Body(Bytes::new()),
|
||||
content_length: 0,
|
||||
content_md5_base64: "".to_string(),
|
||||
stream_sha256: false,
|
||||
trailer: HeaderMap::new(),
|
||||
pre_sign_url: Default::default(),
|
||||
add_crc: Default::default(),
|
||||
extra_pre_sign_header: Default::default(),
|
||||
bucket_location: Default::default(),
|
||||
expires: Default::default(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
if resp.status() != StatusCode::OK {
|
||||
return Err(std::io::Error::other(http_resp_to_error_response(resp, vec![], bucket_name, "")));
|
||||
}
|
||||
@@ -76,12 +94,12 @@ impl TransitionClient {
|
||||
|
||||
if list_bucket_result.is_truncated && list_bucket_result.next_continuation_token == "" {
|
||||
return Err(std::io::Error::other(credentials::ErrorResponse {
|
||||
sts_error: credentials::STSError {
|
||||
r#type: "".to_string(),
|
||||
code: "NotImplemented".to_string(),
|
||||
message: "Truncated response should have continuation token set".to_string(),
|
||||
},
|
||||
request_id: "".to_string(),
|
||||
sts_error: credentials::STSError {
|
||||
r#type: "".to_string(),
|
||||
code: "NotImplemented".to_string(),
|
||||
message: "Truncated response should have continuation token set".to_string(),
|
||||
},
|
||||
request_id: "".to_string(),
|
||||
}));
|
||||
}
|
||||
|
||||
@@ -97,7 +115,14 @@ impl TransitionClient {
|
||||
Ok(list_bucket_result)
|
||||
}
|
||||
|
||||
pub fn list_object_versions_query(&self, bucket_name: &str, opts: &ListObjectsOptions, key_marker: &str, version_id_marker: &str, delimiter: &str) -> Result<ListVersionsResult, std::io::Error> {
|
||||
pub fn list_object_versions_query(
|
||||
&self,
|
||||
bucket_name: &str,
|
||||
opts: &ListObjectsOptions,
|
||||
key_marker: &str,
|
||||
version_id_marker: &str,
|
||||
delimiter: &str,
|
||||
) -> Result<ListVersionsResult, std::io::Error> {
|
||||
/*if err := s3utils.CheckValidBucketName(bucketName); err != nil {
|
||||
return ListVersionsResult{}, err
|
||||
}
|
||||
@@ -177,15 +202,36 @@ impl TransitionClient {
|
||||
todo!();
|
||||
}
|
||||
|
||||
pub fn list_objects_query(&self, bucket_name: &str, object_prefix: &str, object_marker: &str, delimiter: &str, max_keys: i64, headers: HeaderMap) -> Result<ListBucketResult, std::io::Error> {
|
||||
pub fn list_objects_query(
|
||||
&self,
|
||||
bucket_name: &str,
|
||||
object_prefix: &str,
|
||||
object_marker: &str,
|
||||
delimiter: &str,
|
||||
max_keys: i64,
|
||||
headers: HeaderMap,
|
||||
) -> Result<ListBucketResult, std::io::Error> {
|
||||
todo!();
|
||||
}
|
||||
|
||||
pub fn list_multipart_uploads_query(&self, bucket_name: &str, key_marker: &str, upload_id_marker: &str, prefix: &str, delimiter: &str, max_uploads: i64) -> Result<ListMultipartUploadsResult, std::io::Error> {
|
||||
pub fn list_multipart_uploads_query(
|
||||
&self,
|
||||
bucket_name: &str,
|
||||
key_marker: &str,
|
||||
upload_id_marker: &str,
|
||||
prefix: &str,
|
||||
delimiter: &str,
|
||||
max_uploads: i64,
|
||||
) -> Result<ListMultipartUploadsResult, std::io::Error> {
|
||||
todo!();
|
||||
}
|
||||
|
||||
pub fn list_object_parts(&self, bucket_name: &str, object_name: &str, upload_id: &str) -> Result<HashMap<i64, ObjectPart>, std::io::Error> {
|
||||
pub fn list_object_parts(
|
||||
&self,
|
||||
bucket_name: &str,
|
||||
object_name: &str,
|
||||
upload_id: &str,
|
||||
) -> Result<HashMap<i64, ObjectPart>, std::io::Error> {
|
||||
todo!();
|
||||
}
|
||||
|
||||
@@ -193,7 +239,14 @@ impl TransitionClient {
|
||||
todo!();
|
||||
}
|
||||
|
||||
pub async fn list_object_parts_query(&self, bucket_name: &str, object_name: &str, upload_id: &str, part_number_marker: i64, max_parts: i64) -> Result<ListObjectPartsResult, std::io::Error> {
|
||||
pub async fn list_object_parts_query(
|
||||
&self,
|
||||
bucket_name: &str,
|
||||
object_name: &str,
|
||||
upload_id: &str,
|
||||
part_number_marker: i64,
|
||||
max_parts: i64,
|
||||
) -> Result<ListObjectPartsResult, std::io::Error> {
|
||||
todo!();
|
||||
}
|
||||
}
|
||||
@@ -226,4 +279,4 @@ fn decode_s3_name(name: &str, encoding_type: &str) -> Result<String, std::io::Er
|
||||
return Ok(name.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,33 +1,30 @@
|
||||
#![allow(clippy::map_entry)]
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
use bytes::Bytes;
|
||||
use time::{OffsetDateTime, macros::format_description, Duration};
|
||||
use http::{HeaderMap, HeaderName, HeaderValue};
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
use time::{Duration, OffsetDateTime, macros::format_description};
|
||||
use tracing::{error, info, warn};
|
||||
|
||||
use s3s::dto::{
|
||||
ObjectLockRetentionMode, ObjectLockLegalHoldStatus,
|
||||
ReplicationStatus,
|
||||
};
|
||||
use s3s::header::{
|
||||
X_AMZ_OBJECT_LOCK_MODE, X_AMZ_OBJECT_LOCK_RETAIN_UNTIL_DATE, X_AMZ_OBJECT_LOCK_LEGAL_HOLD,
|
||||
X_AMZ_STORAGE_CLASS, X_AMZ_WEBSITE_REDIRECT_LOCATION, X_AMZ_REPLICATION_STATUS,
|
||||
};
|
||||
use reader::hasher::Hasher;
|
||||
//use crate::disk::{BufferReader, Reader};
|
||||
use rustfs_utils::{
|
||||
crypto::base64_encode,
|
||||
net::{is_amz_header, is_standard_header, is_storageclass_header, is_rustfs_header, is_minio_header},
|
||||
use s3s::dto::{ObjectLockLegalHoldStatus, ObjectLockRetentionMode, ReplicationStatus};
|
||||
use s3s::header::{
|
||||
X_AMZ_OBJECT_LOCK_LEGAL_HOLD, X_AMZ_OBJECT_LOCK_MODE, X_AMZ_OBJECT_LOCK_RETAIN_UNTIL_DATE, X_AMZ_REPLICATION_STATUS,
|
||||
X_AMZ_STORAGE_CLASS, X_AMZ_WEBSITE_REDIRECT_LOCATION,
|
||||
};
|
||||
//use crate::disk::{BufferReader, Reader};
|
||||
use crate::checksum::ChecksumMode;
|
||||
use crate::client::{
|
||||
api_s3_datatypes::{CompletePart, ObjectPart, CompleteMultipartUpload},
|
||||
api_error_response::{err_entity_too_large, err_invalid_argument},
|
||||
api_put_object_common::optimal_part_info,
|
||||
transition_api::{TransitionClient, UploadInfo, ReaderImpl},
|
||||
api_error_response::{err_invalid_argument, err_entity_too_large},
|
||||
api_put_object_multipart::UploadPartParams,
|
||||
api_s3_datatypes::{CompleteMultipartUpload, CompletePart, ObjectPart},
|
||||
constants::{ISO8601_DATEFORMAT, MAX_MULTIPART_PUT_OBJECT_SIZE, MIN_PART_SIZE, TOTAL_WORKERS},
|
||||
credentials::SignatureType,
|
||||
constants::{MAX_MULTIPART_PUT_OBJECT_SIZE, TOTAL_WORKERS, MIN_PART_SIZE, ISO8601_DATEFORMAT,},
|
||||
transition_api::{ReaderImpl, TransitionClient, UploadInfo},
|
||||
};
|
||||
use rustfs_utils::{
|
||||
crypto::base64_encode,
|
||||
net::{is_amz_header, is_minio_header, is_rustfs_header, is_standard_header, is_storageclass_header},
|
||||
};
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
@@ -123,17 +120,21 @@ impl Default for PutObjectOptions {
|
||||
impl PutObjectOptions {
|
||||
fn set_matche_tag(&mut self, etag: &str) {
|
||||
if etag == "*" {
|
||||
self.custom_header.insert("If-Match", HeaderValue::from_str("*").expect("err"));
|
||||
self.custom_header
|
||||
.insert("If-Match", HeaderValue::from_str("*").expect("err"));
|
||||
} else {
|
||||
self.custom_header.insert("If-Match", HeaderValue::from_str(&format!("\"{}\"", etag)).expect("err"));
|
||||
self.custom_header
|
||||
.insert("If-Match", HeaderValue::from_str(&format!("\"{}\"", etag)).expect("err"));
|
||||
}
|
||||
}
|
||||
|
||||
fn set_matche_tag_except(&mut self, etag: &str) {
|
||||
if etag == "*" {
|
||||
self.custom_header.insert("If-None-Match", HeaderValue::from_str("*").expect("err"));
|
||||
self.custom_header
|
||||
.insert("If-None-Match", HeaderValue::from_str("*").expect("err"));
|
||||
} else {
|
||||
self.custom_header.insert("If-None-Match", HeaderValue::from_str(&format!("\"{etag}\"")).expect("err"));
|
||||
self.custom_header
|
||||
.insert("If-None-Match", HeaderValue::from_str(&format!("\"{etag}\"")).expect("err"));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -160,7 +161,10 @@ impl PutObjectOptions {
|
||||
}
|
||||
|
||||
if self.expires.unix_timestamp() != 0 {
|
||||
header.insert("Expires", HeaderValue::from_str(&self.expires.format(ISO8601_DATEFORMAT).unwrap()).expect("err")); //rustfs invalid heade
|
||||
header.insert(
|
||||
"Expires",
|
||||
HeaderValue::from_str(&self.expires.format(ISO8601_DATEFORMAT).unwrap()).expect("err"),
|
||||
); //rustfs invalid heade
|
||||
}
|
||||
|
||||
if self.mode.as_str() != "" {
|
||||
@@ -168,7 +172,10 @@ impl PutObjectOptions {
|
||||
}
|
||||
|
||||
if self.retain_until_date.unix_timestamp() != 0 {
|
||||
header.insert(X_AMZ_OBJECT_LOCK_RETAIN_UNTIL_DATE, HeaderValue::from_str(&self.retain_until_date.format(ISO8601_DATEFORMAT).unwrap()).expect("err"));
|
||||
header.insert(
|
||||
X_AMZ_OBJECT_LOCK_RETAIN_UNTIL_DATE,
|
||||
HeaderValue::from_str(&self.retain_until_date.format(ISO8601_DATEFORMAT).unwrap()).expect("err"),
|
||||
);
|
||||
}
|
||||
|
||||
if self.legalhold.as_str() != "" {
|
||||
@@ -180,15 +187,22 @@ impl PutObjectOptions {
|
||||
}
|
||||
|
||||
if self.website_redirect_location != "" {
|
||||
header.insert(X_AMZ_WEBSITE_REDIRECT_LOCATION, HeaderValue::from_str(&self.website_redirect_location).expect("err"));
|
||||
header.insert(
|
||||
X_AMZ_WEBSITE_REDIRECT_LOCATION,
|
||||
HeaderValue::from_str(&self.website_redirect_location).expect("err"),
|
||||
);
|
||||
}
|
||||
|
||||
if !self.internal.replication_status.as_str().is_empty() {
|
||||
header.insert(X_AMZ_REPLICATION_STATUS, HeaderValue::from_str(self.internal.replication_status.as_str()).expect("err"));
|
||||
header.insert(
|
||||
X_AMZ_REPLICATION_STATUS,
|
||||
HeaderValue::from_str(self.internal.replication_status.as_str()).expect("err"),
|
||||
);
|
||||
}
|
||||
|
||||
for (k, v) in &self.user_metadata {
|
||||
if is_amz_header(k) || is_standard_header(k) || is_storageclass_header(k) || is_rustfs_header(k) || is_minio_header(k) {
|
||||
if is_amz_header(k) || is_standard_header(k) || is_storageclass_header(k) || is_rustfs_header(k) || is_minio_header(k)
|
||||
{
|
||||
if let Ok(header_name) = HeaderName::from_bytes(k.as_bytes()) {
|
||||
header.insert(header_name, HeaderValue::from_str(&v).unwrap());
|
||||
}
|
||||
@@ -208,12 +222,12 @@ impl PutObjectOptions {
|
||||
|
||||
fn validate(&self, c: TransitionClient) -> Result<(), std::io::Error> {
|
||||
//if self.checksum.is_set() {
|
||||
/*if !self.trailing_header_support {
|
||||
return Err(Error::from(err_invalid_argument("Checksum requires Client with TrailingHeaders enabled")));
|
||||
}*/
|
||||
/*else if self.override_signer_type == SignatureType::SignatureV2 {
|
||||
return Err(Error::from(err_invalid_argument("Checksum cannot be used with v2 signatures")));
|
||||
}*/
|
||||
/*if !self.trailing_header_support {
|
||||
return Err(Error::from(err_invalid_argument("Checksum requires Client with TrailingHeaders enabled")));
|
||||
}*/
|
||||
/*else if self.override_signer_type == SignatureType::SignatureV2 {
|
||||
return Err(Error::from(err_invalid_argument("Checksum cannot be used with v2 signatures")));
|
||||
}*/
|
||||
//}
|
||||
|
||||
Ok(())
|
||||
@@ -221,19 +235,37 @@ impl PutObjectOptions {
|
||||
}
|
||||
|
||||
impl TransitionClient {
|
||||
pub async fn put_object(self: Arc<Self>, bucket_name: &str, object_name: &str, mut reader: ReaderImpl, object_size: i64,
|
||||
opts: &PutObjectOptions
|
||||
pub async fn put_object(
|
||||
self: Arc<Self>,
|
||||
bucket_name: &str,
|
||||
object_name: &str,
|
||||
mut reader: ReaderImpl,
|
||||
object_size: i64,
|
||||
opts: &PutObjectOptions,
|
||||
) -> Result<UploadInfo, std::io::Error> {
|
||||
if object_size < 0 && opts.disable_multipart {
|
||||
return Err(std::io::Error::other("object size must be provided with disable multipart upload"));
|
||||
}
|
||||
|
||||
self.put_object_common(bucket_name, object_name, reader, object_size, opts).await
|
||||
self.put_object_common(bucket_name, object_name, reader, object_size, opts)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn put_object_common(self: Arc<Self>, bucket_name: &str, object_name: &str, mut reader: ReaderImpl, size: i64, opts: &PutObjectOptions) -> Result<UploadInfo, std::io::Error> {
|
||||
pub async fn put_object_common(
|
||||
self: Arc<Self>,
|
||||
bucket_name: &str,
|
||||
object_name: &str,
|
||||
mut reader: ReaderImpl,
|
||||
size: i64,
|
||||
opts: &PutObjectOptions,
|
||||
) -> Result<UploadInfo, std::io::Error> {
|
||||
if size > MAX_MULTIPART_PUT_OBJECT_SIZE {
|
||||
return Err(std::io::Error::other(err_entity_too_large(size, MAX_MULTIPART_PUT_OBJECT_SIZE, bucket_name, object_name)));
|
||||
return Err(std::io::Error::other(err_entity_too_large(
|
||||
size,
|
||||
MAX_MULTIPART_PUT_OBJECT_SIZE,
|
||||
bucket_name,
|
||||
object_name,
|
||||
)));
|
||||
}
|
||||
let mut opts = opts.clone();
|
||||
opts.auto_checksum.set_default(ChecksumMode::ChecksumCRC32C);
|
||||
@@ -255,19 +287,30 @@ impl TransitionClient {
|
||||
return Err(std::io::Error::other("no length provided and multipart disabled"));
|
||||
}
|
||||
if opts.concurrent_stream_parts && opts.num_threads > 1 {
|
||||
return self.put_object_multipart_stream_parallel(bucket_name, object_name, reader, &opts).await;
|
||||
return self
|
||||
.put_object_multipart_stream_parallel(bucket_name, object_name, reader, &opts)
|
||||
.await;
|
||||
}
|
||||
return self.put_object_multipart_stream_no_length(bucket_name, object_name, reader, &opts).await;
|
||||
return self
|
||||
.put_object_multipart_stream_no_length(bucket_name, object_name, reader, &opts)
|
||||
.await;
|
||||
}
|
||||
|
||||
if size <= part_size || opts.disable_multipart {
|
||||
return self.put_object_gcs(bucket_name, object_name, reader, size, &opts).await;
|
||||
}
|
||||
|
||||
self.put_object_multipart_stream(bucket_name, object_name, reader, size, &opts).await
|
||||
self.put_object_multipart_stream(bucket_name, object_name, reader, size, &opts)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn put_object_multipart_stream_no_length(&self, bucket_name: &str, object_name: &str, mut reader: ReaderImpl, opts: &PutObjectOptions) -> Result<UploadInfo, std::io::Error> {
|
||||
pub async fn put_object_multipart_stream_no_length(
|
||||
&self,
|
||||
bucket_name: &str,
|
||||
object_name: &str,
|
||||
mut reader: ReaderImpl,
|
||||
opts: &PutObjectOptions,
|
||||
) -> Result<UploadInfo, std::io::Error> {
|
||||
let mut total_uploaded_size: i64 = 0;
|
||||
|
||||
let mut compl_multipart_upload = CompleteMultipartUpload::default();
|
||||
@@ -295,12 +338,8 @@ impl TransitionClient {
|
||||
|
||||
while part_number <= total_parts_count {
|
||||
buf = match &mut reader {
|
||||
ReaderImpl::Body(content_body) => {
|
||||
content_body.to_vec()
|
||||
}
|
||||
ReaderImpl::ObjectBody(content_body) => {
|
||||
content_body.read_all().await?
|
||||
}
|
||||
ReaderImpl::Body(content_body) => content_body.to_vec(),
|
||||
ReaderImpl::ObjectBody(content_body) => content_body.read_all().await?,
|
||||
};
|
||||
let length = buf.len();
|
||||
|
||||
@@ -354,13 +393,13 @@ impl TransitionClient {
|
||||
let part = parts_info[&i].clone();
|
||||
all_parts.push(part.clone());
|
||||
compl_multipart_upload.parts.push(CompletePart {
|
||||
etag: part.etag,
|
||||
part_num: part.part_num,
|
||||
checksum_crc32: part.checksum_crc32,
|
||||
checksum_crc32c: part.checksum_crc32c,
|
||||
checksum_sha1: part.checksum_sha1,
|
||||
checksum_sha256: part.checksum_sha256,
|
||||
checksum_crc64nvme: part.checksum_crc64nvme,
|
||||
etag: part.etag,
|
||||
part_num: part.part_num,
|
||||
checksum_crc32: part.checksum_crc32,
|
||||
checksum_crc32c: part.checksum_crc32c,
|
||||
checksum_sha1: part.checksum_sha1,
|
||||
checksum_sha256: part.checksum_sha256,
|
||||
checksum_crc64nvme: part.checksum_crc64nvme,
|
||||
..Default::default()
|
||||
});
|
||||
}
|
||||
@@ -374,9 +413,11 @@ impl TransitionClient {
|
||||
};
|
||||
//apply_auto_checksum(&mut opts, all_parts);
|
||||
|
||||
let mut upload_info = self.complete_multipart_upload(bucket_name, object_name, &upload_id, compl_multipart_upload, &opts).await?;
|
||||
let mut upload_info = self
|
||||
.complete_multipart_upload(bucket_name, object_name, &upload_id, compl_multipart_upload, &opts)
|
||||
.await?;
|
||||
|
||||
upload_info.size = total_uploaded_size;
|
||||
Ok(upload_info)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
#![allow(clippy::map_entry)]
|
||||
use crate::client::{
|
||||
api_put_object::PutObjectOptions,
|
||||
constants::{ABS_MIN_PART_SIZE, MAX_MULTIPART_PUT_OBJECT_SIZE, MAX_PARTS_COUNT, MAX_PART_SIZE, MIN_PART_SIZE},
|
||||
transition_api::TransitionClient,
|
||||
transition_api::ReaderImpl,
|
||||
api_error_response::{err_entity_too_large, err_invalid_argument},
|
||||
api_put_object::PutObjectOptions,
|
||||
constants::{ABS_MIN_PART_SIZE, MAX_MULTIPART_PUT_OBJECT_SIZE, MAX_PART_SIZE, MAX_PARTS_COUNT, MIN_PART_SIZE},
|
||||
transition_api::ReaderImpl,
|
||||
transition_api::TransitionClient,
|
||||
};
|
||||
|
||||
const NULL_VERSION_ID: &str = "null";
|
||||
@@ -28,27 +28,43 @@ pub fn optimal_part_info(object_size: i64, configured_part_size: u64) -> Result<
|
||||
}
|
||||
|
||||
if object_size > MAX_MULTIPART_PUT_OBJECT_SIZE {
|
||||
return Err(std::io::Error::other(err_entity_too_large(object_size, MAX_MULTIPART_PUT_OBJECT_SIZE, "", "")));
|
||||
return Err(std::io::Error::other(err_entity_too_large(
|
||||
object_size,
|
||||
MAX_MULTIPART_PUT_OBJECT_SIZE,
|
||||
"",
|
||||
"",
|
||||
)));
|
||||
}
|
||||
|
||||
let mut part_size_flt: f64;
|
||||
if configured_part_size > 0 {
|
||||
if configured_part_size as i64 > object_size {
|
||||
return Err(std::io::Error::other(err_entity_too_large(configured_part_size as i64, object_size, "", "")));
|
||||
return Err(std::io::Error::other(err_entity_too_large(
|
||||
configured_part_size as i64,
|
||||
object_size,
|
||||
"",
|
||||
"",
|
||||
)));
|
||||
}
|
||||
|
||||
if !unknown_size {
|
||||
if object_size > (configured_part_size as i64 * MAX_PARTS_COUNT) {
|
||||
return Err(std::io::Error::other(err_invalid_argument("Part size * max_parts(10000) is lesser than input objectSize.")));
|
||||
return Err(std::io::Error::other(err_invalid_argument(
|
||||
"Part size * max_parts(10000) is lesser than input objectSize.",
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
||||
if (configured_part_size as i64) < ABS_MIN_PART_SIZE {
|
||||
return Err(std::io::Error::other(err_invalid_argument("Input part size is smaller than allowed minimum of 5MiB.")));
|
||||
return Err(std::io::Error::other(err_invalid_argument(
|
||||
"Input part size is smaller than allowed minimum of 5MiB.",
|
||||
)));
|
||||
}
|
||||
|
||||
if configured_part_size as i64 > MAX_PART_SIZE {
|
||||
return Err(std::io::Error::other(err_invalid_argument("Input part size is bigger than allowed maximum of 5GiB.")));
|
||||
return Err(std::io::Error::other(err_invalid_argument(
|
||||
"Input part size is bigger than allowed maximum of 5GiB.",
|
||||
)));
|
||||
}
|
||||
|
||||
part_size_flt = configured_part_size as f64;
|
||||
@@ -64,13 +80,18 @@ pub fn optimal_part_info(object_size: i64, configured_part_size: u64) -> Result<
|
||||
|
||||
let total_parts_count = (object_size as f64 / part_size_flt).ceil() as i64;
|
||||
let part_size = part_size_flt.ceil() as i64;
|
||||
let last_part_size = object_size - (total_parts_count-1) * part_size;
|
||||
let last_part_size = object_size - (total_parts_count - 1) * part_size;
|
||||
Ok((total_parts_count, part_size, last_part_size))
|
||||
}
|
||||
|
||||
impl TransitionClient {
|
||||
pub async fn new_upload_id(&self ,bucket_name: &str, object_name: &str, opts: &PutObjectOptions) -> Result<String, std::io::Error> {
|
||||
pub async fn new_upload_id(
|
||||
&self,
|
||||
bucket_name: &str,
|
||||
object_name: &str,
|
||||
opts: &PutObjectOptions,
|
||||
) -> Result<String, std::io::Error> {
|
||||
let init_multipart_upload_result = self.initiate_multipart_upload(bucket_name, object_name, opts).await?;
|
||||
Ok(init_multipart_upload_result.upload_id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,50 +1,61 @@
|
||||
#![allow(clippy::map_entry)]
|
||||
use bytes::Bytes;
|
||||
use http::{HeaderMap, HeaderName, HeaderValue, StatusCode};
|
||||
use s3s::S3ErrorCode;
|
||||
use std::io::Read;
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
use bytes::Bytes;
|
||||
use s3s::S3ErrorCode;
|
||||
use time::{format_description, OffsetDateTime};
|
||||
use uuid::Uuid;
|
||||
use http::{HeaderMap, HeaderName, HeaderValue, StatusCode};
|
||||
use url::form_urlencoded::Serializer;
|
||||
use time::{OffsetDateTime, format_description};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::warn;
|
||||
use tracing::{error, info};
|
||||
use url::form_urlencoded::Serializer;
|
||||
use uuid::Uuid;
|
||||
|
||||
use s3s::{dto::StreamingBlob, Body};
|
||||
use s3s::header::{X_AMZ_EXPIRATION, X_AMZ_VERSION_ID};
|
||||
use reader::hasher::Hasher;
|
||||
use s3s::header::{X_AMZ_EXPIRATION, X_AMZ_VERSION_ID};
|
||||
use s3s::{Body, dto::StreamingBlob};
|
||||
//use crate::disk::{Reader, BufferReader};
|
||||
use crate::client::{
|
||||
transition_api::{RequestMetadata, TransitionClient, UploadInfo, ReaderImpl,},
|
||||
api_error_response::{err_entity_too_large, err_entity_too_small, err_invalid_argument, http_resp_to_error_response, to_error_response},
|
||||
api_error_response::{
|
||||
err_entity_too_large, err_entity_too_small, err_invalid_argument, http_resp_to_error_response, to_error_response,
|
||||
},
|
||||
api_put_object::PutObjectOptions,
|
||||
api_put_object_common::optimal_part_info,
|
||||
api_s3_datatypes::{CompleteMultipartUpload, CompleteMultipartUploadResult, CompletePart, InitiateMultipartUploadResult, ObjectPart},
|
||||
constants::{ABS_MIN_PART_SIZE, MAX_PART_SIZE, MAX_SINGLE_PUT_OBJECT_SIZE, ISO8601_DATEFORMAT, },
|
||||
};
|
||||
use rustfs_utils::{
|
||||
path::trim_etag,
|
||||
crypto::base64_encode,
|
||||
api_s3_datatypes::{
|
||||
CompleteMultipartUpload, CompleteMultipartUploadResult, CompletePart, InitiateMultipartUploadResult, ObjectPart,
|
||||
},
|
||||
constants::{ABS_MIN_PART_SIZE, ISO8601_DATEFORMAT, MAX_PART_SIZE, MAX_SINGLE_PUT_OBJECT_SIZE},
|
||||
transition_api::{ReaderImpl, RequestMetadata, TransitionClient, UploadInfo},
|
||||
};
|
||||
use crate::{
|
||||
disk::DiskAPI,
|
||||
store_api::{
|
||||
GetObjectReader, StorageAPI,
|
||||
},
|
||||
checksum::ChecksumMode,
|
||||
disk::DiskAPI,
|
||||
store_api::{GetObjectReader, StorageAPI},
|
||||
};
|
||||
use rustfs_utils::{crypto::base64_encode, path::trim_etag};
|
||||
|
||||
impl TransitionClient {
|
||||
pub async fn put_object_multipart(&self, bucket_name: &str, object_name: &str, mut reader: ReaderImpl, size: i64,
|
||||
opts: &PutObjectOptions
|
||||
pub async fn put_object_multipart(
|
||||
&self,
|
||||
bucket_name: &str,
|
||||
object_name: &str,
|
||||
mut reader: ReaderImpl,
|
||||
size: i64,
|
||||
opts: &PutObjectOptions,
|
||||
) -> Result<UploadInfo, std::io::Error> {
|
||||
let info = self.put_object_multipart_no_stream(bucket_name, object_name, &mut reader, opts).await;
|
||||
let info = self
|
||||
.put_object_multipart_no_stream(bucket_name, object_name, &mut reader, opts)
|
||||
.await;
|
||||
if let Err(err) = &info {
|
||||
let err_resp = to_error_response(err);
|
||||
if err_resp.code == S3ErrorCode::AccessDenied && err_resp.message.contains("Access Denied") {
|
||||
if size > MAX_SINGLE_PUT_OBJECT_SIZE {
|
||||
return Err(std::io::Error::other(err_entity_too_large(size, MAX_SINGLE_PUT_OBJECT_SIZE, bucket_name, object_name)));
|
||||
return Err(std::io::Error::other(err_entity_too_large(
|
||||
size,
|
||||
MAX_SINGLE_PUT_OBJECT_SIZE,
|
||||
bucket_name,
|
||||
object_name,
|
||||
)));
|
||||
}
|
||||
return self.put_object_gcs(bucket_name, object_name, reader, size, opts).await;
|
||||
}
|
||||
@@ -52,7 +63,13 @@ impl TransitionClient {
|
||||
Ok(info?)
|
||||
}
|
||||
|
||||
pub async fn put_object_multipart_no_stream(&self, bucket_name: &str, object_name: &str, reader: &mut ReaderImpl, opts: &PutObjectOptions) -> Result<UploadInfo, std::io::Error> {
|
||||
pub async fn put_object_multipart_no_stream(
|
||||
&self,
|
||||
bucket_name: &str,
|
||||
object_name: &str,
|
||||
reader: &mut ReaderImpl,
|
||||
opts: &PutObjectOptions,
|
||||
) -> Result<UploadInfo, std::io::Error> {
|
||||
let mut total_uploaded_size: i64 = 0;
|
||||
let mut compl_multipart_upload = CompleteMultipartUpload::default();
|
||||
|
||||
@@ -91,10 +108,10 @@ impl TransitionClient {
|
||||
let mut sha256_hex: String;
|
||||
|
||||
//if hash_sums["md5"] != nil {
|
||||
md5_base64 = base64_encode(&hash_sums["md5"]);
|
||||
md5_base64 = base64_encode(&hash_sums["md5"]);
|
||||
//}
|
||||
//if hash_sums["sha256"] != nil {
|
||||
sha256_hex = hex_simd::encode_to_string(hash_sums["sha256"].clone(), hex_simd::AsciiCase::Lower);
|
||||
sha256_hex = hex_simd::encode_to_string(hash_sums["sha256"].clone(), hex_simd::AsciiCase::Lower);
|
||||
//}
|
||||
if hash_sums.len() == 0 {
|
||||
let csum;
|
||||
@@ -137,12 +154,12 @@ impl TransitionClient {
|
||||
let part = parts_info[&i].clone();
|
||||
all_parts.push(part.clone());
|
||||
compl_multipart_upload.parts.push(CompletePart {
|
||||
etag: part.etag,
|
||||
part_num: part.part_num,
|
||||
checksum_crc32: part.checksum_crc32,
|
||||
checksum_crc32c: part.checksum_crc32c,
|
||||
checksum_sha1: part.checksum_sha1,
|
||||
checksum_sha256: part.checksum_sha256,
|
||||
etag: part.etag,
|
||||
part_num: part.part_num,
|
||||
checksum_crc32: part.checksum_crc32,
|
||||
checksum_crc32c: part.checksum_crc32c,
|
||||
checksum_sha1: part.checksum_sha1,
|
||||
checksum_sha256: part.checksum_sha256,
|
||||
checksum_crc64nvme: part.checksum_crc64nvme,
|
||||
..Default::default()
|
||||
});
|
||||
@@ -151,18 +168,25 @@ impl TransitionClient {
|
||||
compl_multipart_upload.parts.sort();
|
||||
let mut opts = PutObjectOptions {
|
||||
//server_side_encryption: opts.server_side_encryption,
|
||||
auto_checksum: opts.auto_checksum,
|
||||
auto_checksum: opts.auto_checksum,
|
||||
..Default::default()
|
||||
};
|
||||
//apply_auto_checksum(&mut opts, all_parts);
|
||||
|
||||
let mut upload_info = self.complete_multipart_upload(bucket_name, object_name, &upload_id, compl_multipart_upload, &opts).await?;
|
||||
let mut upload_info = self
|
||||
.complete_multipart_upload(bucket_name, object_name, &upload_id, compl_multipart_upload, &opts)
|
||||
.await?;
|
||||
|
||||
upload_info.size = total_uploaded_size;
|
||||
Ok(upload_info)
|
||||
}
|
||||
|
||||
pub async fn initiate_multipart_upload(&self, bucket_name: &str, object_name: &str, opts: &PutObjectOptions) -> Result<InitiateMultipartUploadResult, std::io::Error> {
|
||||
pub async fn initiate_multipart_upload(
|
||||
&self,
|
||||
bucket_name: &str,
|
||||
object_name: &str,
|
||||
opts: &PutObjectOptions,
|
||||
) -> Result<InitiateMultipartUploadResult, std::io::Error> {
|
||||
let mut url_values = HashMap::new();
|
||||
url_values.insert("uploads".to_string(), "".to_string());
|
||||
|
||||
@@ -180,9 +204,9 @@ impl TransitionClient {
|
||||
let mut req_metadata = RequestMetadata {
|
||||
bucket_name: bucket_name.to_string(),
|
||||
object_name: object_name.to_string(),
|
||||
query_values: url_values,
|
||||
query_values: url_values,
|
||||
custom_header,
|
||||
content_body: ReaderImpl::Body(Bytes::new()),
|
||||
content_body: ReaderImpl::Body(Bytes::new()),
|
||||
content_length: 0,
|
||||
content_md5_base64: "".to_string(),
|
||||
content_sha256_hex: "".to_string(),
|
||||
@@ -197,9 +221,9 @@ impl TransitionClient {
|
||||
|
||||
let resp = self.execute_method(http::Method::POST, &mut req_metadata).await?;
|
||||
//if resp.is_none() {
|
||||
if resp.status() != StatusCode::OK {
|
||||
return Err(std::io::Error::other(http_resp_to_error_response(resp, vec![], bucket_name, object_name)));
|
||||
}
|
||||
if resp.status() != StatusCode::OK {
|
||||
return Err(std::io::Error::other(http_resp_to_error_response(resp, vec![], bucket_name, object_name)));
|
||||
}
|
||||
//}
|
||||
let initiate_multipart_upload_result = InitiateMultipartUploadResult::default();
|
||||
Ok(initiate_multipart_upload_result)
|
||||
@@ -207,13 +231,20 @@ impl TransitionClient {
|
||||
|
||||
pub async fn upload_part(&self, p: &mut UploadPartParams) -> Result<ObjectPart, std::io::Error> {
|
||||
if p.size > MAX_PART_SIZE {
|
||||
return Err(std::io::Error::other(err_entity_too_large(p.size, MAX_PART_SIZE, &p.bucket_name, &p.object_name)));
|
||||
return Err(std::io::Error::other(err_entity_too_large(
|
||||
p.size,
|
||||
MAX_PART_SIZE,
|
||||
&p.bucket_name,
|
||||
&p.object_name,
|
||||
)));
|
||||
}
|
||||
if p.size <= -1 {
|
||||
return Err(std::io::Error::other(err_entity_too_small(p.size, &p.bucket_name, &p.object_name)));
|
||||
}
|
||||
if p.part_number <= 0 {
|
||||
return Err(std::io::Error::other(err_invalid_argument("Part number cannot be negative or equal to zero.")));
|
||||
return Err(std::io::Error::other(err_invalid_argument(
|
||||
"Part number cannot be negative or equal to zero.",
|
||||
)));
|
||||
}
|
||||
if p.upload_id == "" {
|
||||
return Err(std::io::Error::other(err_invalid_argument("UploadID cannot be empty.")));
|
||||
@@ -224,55 +255,85 @@ impl TransitionClient {
|
||||
url_values.insert("uploadId".to_string(), p.upload_id.clone());
|
||||
|
||||
let buf = match &mut p.reader {
|
||||
ReaderImpl::Body(content_body) => {
|
||||
content_body.to_vec()
|
||||
}
|
||||
ReaderImpl::ObjectBody(content_body) => {
|
||||
content_body.read_all().await?
|
||||
}
|
||||
ReaderImpl::Body(content_body) => content_body.to_vec(),
|
||||
ReaderImpl::ObjectBody(content_body) => content_body.read_all().await?,
|
||||
};
|
||||
let mut req_metadata = RequestMetadata {
|
||||
bucket_name: p.bucket_name.clone(),
|
||||
object_name: p.object_name.clone(),
|
||||
query_values: url_values,
|
||||
custom_header: p.custom_header.clone(),
|
||||
content_body: ReaderImpl::Body(Bytes::from(buf)),
|
||||
content_length: p.size,
|
||||
bucket_name: p.bucket_name.clone(),
|
||||
object_name: p.object_name.clone(),
|
||||
query_values: url_values,
|
||||
custom_header: p.custom_header.clone(),
|
||||
content_body: ReaderImpl::Body(Bytes::from(buf)),
|
||||
content_length: p.size,
|
||||
content_md5_base64: p.md5_base64.clone(),
|
||||
content_sha256_hex: p.sha256_hex.clone(),
|
||||
stream_sha256: p.stream_sha256,
|
||||
trailer: p.trailer.clone(),
|
||||
pre_sign_url: Default::default(),
|
||||
stream_sha256: p.stream_sha256,
|
||||
trailer: p.trailer.clone(),
|
||||
pre_sign_url: Default::default(),
|
||||
add_crc: Default::default(),
|
||||
extra_pre_sign_header: Default::default(),
|
||||
bucket_location: Default::default(),
|
||||
expires: Default::default(),
|
||||
bucket_location: Default::default(),
|
||||
expires: Default::default(),
|
||||
};
|
||||
|
||||
let resp = self.execute_method(http::Method::PUT, &mut req_metadata).await?;
|
||||
//defer closeResponse(resp)
|
||||
//if resp.is_none() {
|
||||
if resp.status() != StatusCode::OK {
|
||||
return Err(std::io::Error::other(http_resp_to_error_response(resp, vec![], &p.bucket_name.clone(), &p.object_name)));
|
||||
}
|
||||
if resp.status() != StatusCode::OK {
|
||||
return Err(std::io::Error::other(http_resp_to_error_response(
|
||||
resp,
|
||||
vec![],
|
||||
&p.bucket_name.clone(),
|
||||
&p.object_name,
|
||||
)));
|
||||
}
|
||||
//}
|
||||
let h = resp.headers();
|
||||
let mut obj_part = ObjectPart {
|
||||
checksum_crc32: if let Some(h_checksum_crc32) = h.get(ChecksumMode::ChecksumCRC32.key()) { h_checksum_crc32.to_str().expect("err").to_string() } else { "".to_string() },
|
||||
checksum_crc32c: if let Some(h_checksum_crc32c) = h.get(ChecksumMode::ChecksumCRC32C.key()) { h_checksum_crc32c.to_str().expect("err").to_string() } else { "".to_string() },
|
||||
checksum_sha1: if let Some(h_checksum_sha1) = h.get(ChecksumMode::ChecksumSHA1.key()) { h_checksum_sha1.to_str().expect("err").to_string() } else { "".to_string() },
|
||||
checksum_sha256: if let Some(h_checksum_sha256) = h.get(ChecksumMode::ChecksumSHA256.key()) { h_checksum_sha256.to_str().expect("err").to_string() } else { "".to_string() },
|
||||
checksum_crc64nvme: if let Some(h_checksum_crc64nvme) = h.get(ChecksumMode::ChecksumCRC64NVME.key()) { h_checksum_crc64nvme.to_str().expect("err").to_string() } else { "".to_string() },
|
||||
checksum_crc32: if let Some(h_checksum_crc32) = h.get(ChecksumMode::ChecksumCRC32.key()) {
|
||||
h_checksum_crc32.to_str().expect("err").to_string()
|
||||
} else {
|
||||
"".to_string()
|
||||
},
|
||||
checksum_crc32c: if let Some(h_checksum_crc32c) = h.get(ChecksumMode::ChecksumCRC32C.key()) {
|
||||
h_checksum_crc32c.to_str().expect("err").to_string()
|
||||
} else {
|
||||
"".to_string()
|
||||
},
|
||||
checksum_sha1: if let Some(h_checksum_sha1) = h.get(ChecksumMode::ChecksumSHA1.key()) {
|
||||
h_checksum_sha1.to_str().expect("err").to_string()
|
||||
} else {
|
||||
"".to_string()
|
||||
},
|
||||
checksum_sha256: if let Some(h_checksum_sha256) = h.get(ChecksumMode::ChecksumSHA256.key()) {
|
||||
h_checksum_sha256.to_str().expect("err").to_string()
|
||||
} else {
|
||||
"".to_string()
|
||||
},
|
||||
checksum_crc64nvme: if let Some(h_checksum_crc64nvme) = h.get(ChecksumMode::ChecksumCRC64NVME.key()) {
|
||||
h_checksum_crc64nvme.to_str().expect("err").to_string()
|
||||
} else {
|
||||
"".to_string()
|
||||
},
|
||||
..Default::default()
|
||||
};
|
||||
obj_part.size = p.size;
|
||||
obj_part.part_num = p.part_number;
|
||||
obj_part.etag = if let Some(h_etag) = h.get("ETag") { h_etag.to_str().expect("err").trim_matches('"').to_string() } else { "".to_string() };
|
||||
obj_part.etag = if let Some(h_etag) = h.get("ETag") {
|
||||
h_etag.to_str().expect("err").trim_matches('"').to_string()
|
||||
} else {
|
||||
"".to_string()
|
||||
};
|
||||
Ok(obj_part)
|
||||
}
|
||||
|
||||
pub async fn complete_multipart_upload(&self, bucket_name: &str, object_name: &str, upload_id: &str,
|
||||
complete: CompleteMultipartUpload, opts: &PutObjectOptions
|
||||
pub async fn complete_multipart_upload(
|
||||
&self,
|
||||
bucket_name: &str,
|
||||
object_name: &str,
|
||||
upload_id: &str,
|
||||
complete: CompleteMultipartUpload,
|
||||
opts: &PutObjectOptions,
|
||||
) -> Result<UploadInfo, std::io::Error> {
|
||||
let mut url_values = HashMap::new();
|
||||
url_values.insert("uploadId".to_string(), upload_id.to_string());
|
||||
@@ -284,19 +345,19 @@ impl TransitionClient {
|
||||
let mut req_metadata = RequestMetadata {
|
||||
bucket_name: bucket_name.to_string(),
|
||||
object_name: object_name.to_string(),
|
||||
query_values: url_values,
|
||||
content_body: ReaderImpl::Body(complete_multipart_upload_buffer),
|
||||
content_length: 100,//complete_multipart_upload_bytes.len(),
|
||||
content_sha256_hex: "".to_string(),//hex_simd::encode_to_string(complete_multipart_upload_bytes, hex_simd::AsciiCase::Lower),
|
||||
custom_header: headers,
|
||||
stream_sha256: Default::default(),
|
||||
trailer: Default::default(),
|
||||
query_values: url_values,
|
||||
content_body: ReaderImpl::Body(complete_multipart_upload_buffer),
|
||||
content_length: 100, //complete_multipart_upload_bytes.len(),
|
||||
content_sha256_hex: "".to_string(), //hex_simd::encode_to_string(complete_multipart_upload_bytes, hex_simd::AsciiCase::Lower),
|
||||
custom_header: headers,
|
||||
stream_sha256: Default::default(),
|
||||
trailer: Default::default(),
|
||||
content_md5_base64: "".to_string(),
|
||||
pre_sign_url: Default::default(),
|
||||
pre_sign_url: Default::default(),
|
||||
add_crc: Default::default(),
|
||||
extra_pre_sign_header: Default::default(),
|
||||
bucket_location: Default::default(),
|
||||
expires: Default::default(),
|
||||
bucket_location: Default::default(),
|
||||
expires: Default::default(),
|
||||
};
|
||||
|
||||
let resp = self.execute_method(http::Method::POST, &mut req_metadata).await?;
|
||||
@@ -307,7 +368,7 @@ impl TransitionClient {
|
||||
let (exp_time, rule_id) = if let Some(h_x_amz_expiration) = resp.headers().get(X_AMZ_EXPIRATION) {
|
||||
(
|
||||
OffsetDateTime::parse(h_x_amz_expiration.to_str().unwrap(), ISO8601_DATEFORMAT).unwrap(),
|
||||
"".to_string()
|
||||
"".to_string(),
|
||||
)
|
||||
} else {
|
||||
(OffsetDateTime::now_utc(), "".to_string())
|
||||
@@ -315,17 +376,21 @@ impl TransitionClient {
|
||||
|
||||
let h = resp.headers();
|
||||
Ok(UploadInfo {
|
||||
bucket: complete_multipart_upload_result.bucket,
|
||||
key: complete_multipart_upload_result.key,
|
||||
etag: trim_etag(&complete_multipart_upload_result.etag),
|
||||
version_id: if let Some(h_x_amz_version_id) = h.get(X_AMZ_VERSION_ID) { h_x_amz_version_id.to_str().expect("err").to_string() } else { "".to_string() },
|
||||
location: complete_multipart_upload_result.location,
|
||||
expiration: exp_time,
|
||||
bucket: complete_multipart_upload_result.bucket,
|
||||
key: complete_multipart_upload_result.key,
|
||||
etag: trim_etag(&complete_multipart_upload_result.etag),
|
||||
version_id: if let Some(h_x_amz_version_id) = h.get(X_AMZ_VERSION_ID) {
|
||||
h_x_amz_version_id.to_str().expect("err").to_string()
|
||||
} else {
|
||||
"".to_string()
|
||||
},
|
||||
location: complete_multipart_upload_result.location,
|
||||
expiration: exp_time,
|
||||
expiration_rule_id: rule_id,
|
||||
checksum_sha256: complete_multipart_upload_result.checksum_sha256,
|
||||
checksum_sha1: complete_multipart_upload_result.checksum_sha1,
|
||||
checksum_crc32: complete_multipart_upload_result.checksum_crc32,
|
||||
checksum_crc32c: complete_multipart_upload_result.checksum_crc32c,
|
||||
checksum_sha256: complete_multipart_upload_result.checksum_sha256,
|
||||
checksum_sha1: complete_multipart_upload_result.checksum_sha1,
|
||||
checksum_crc32: complete_multipart_upload_result.checksum_crc32,
|
||||
checksum_crc32c: complete_multipart_upload_result.checksum_crc32c,
|
||||
checksum_crc64nvme: complete_multipart_upload_result.checksum_crc64nvme,
|
||||
..Default::default()
|
||||
})
|
||||
|
||||
@@ -1,28 +1,28 @@
|
||||
#![allow(clippy::map_entry)]
|
||||
use std::sync::RwLock;
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
use bytes::Bytes;
|
||||
use futures::future::join_all;
|
||||
use http::{HeaderMap, HeaderName, HeaderValue, StatusCode};
|
||||
use time::{format_description, OffsetDateTime};
|
||||
use std::sync::RwLock;
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
use time::{OffsetDateTime, format_description};
|
||||
use tokio::{select, sync::mpsc};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::warn;
|
||||
use uuid::Uuid;
|
||||
|
||||
use s3s::header::{X_AMZ_EXPIRATION, X_AMZ_VERSION_ID};
|
||||
use reader::hasher::Hasher;
|
||||
use crate::checksum::{ChecksumMode, add_auto_checksum_headers, apply_auto_checksum};
|
||||
use crate::client::{
|
||||
constants::ISO8601_DATEFORMAT,
|
||||
api_error_response::{err_invalid_argument, err_unexpected_eof, http_resp_to_error_response},
|
||||
api_put_object::PutObjectOptions,
|
||||
api_put_object_common::{is_object, optimal_part_info},
|
||||
api_put_object_multipart::UploadPartParams,
|
||||
api_s3_datatypes::{CompleteMultipartUpload, CompletePart, ObjectPart},
|
||||
transition_api::{TransitionClient, RequestMetadata, UploadInfo, ReaderImpl},
|
||||
api_put_object_common::{is_object, optimal_part_info,},
|
||||
api_error_response::{err_invalid_argument, http_resp_to_error_response, err_unexpected_eof},
|
||||
constants::ISO8601_DATEFORMAT,
|
||||
transition_api::{ReaderImpl, RequestMetadata, TransitionClient, UploadInfo},
|
||||
};
|
||||
use reader::hasher::Hasher;
|
||||
use rustfs_utils::{crypto::base64_encode, path::trim_etag};
|
||||
use crate::checksum::{add_auto_checksum_headers, apply_auto_checksum, ChecksumMode};
|
||||
use s3s::header::{X_AMZ_EXPIRATION, X_AMZ_VERSION_ID};
|
||||
|
||||
pub struct UploadedPartRes {
|
||||
pub error: std::io::Error,
|
||||
@@ -37,23 +37,39 @@ pub struct UploadPartReq {
|
||||
}
|
||||
|
||||
impl TransitionClient {
|
||||
pub async fn put_object_multipart_stream(self: Arc<Self>, bucket_name: &str, object_name: &str,
|
||||
mut reader: ReaderImpl, size: i64, opts: &PutObjectOptions
|
||||
pub async fn put_object_multipart_stream(
|
||||
self: Arc<Self>,
|
||||
bucket_name: &str,
|
||||
object_name: &str,
|
||||
mut reader: ReaderImpl,
|
||||
size: i64,
|
||||
opts: &PutObjectOptions,
|
||||
) -> Result<UploadInfo, std::io::Error> {
|
||||
let info: UploadInfo;
|
||||
if opts.concurrent_stream_parts && opts.num_threads > 1 {
|
||||
info = self.put_object_multipart_stream_parallel(bucket_name, object_name, reader, opts).await?;
|
||||
info = self
|
||||
.put_object_multipart_stream_parallel(bucket_name, object_name, reader, opts)
|
||||
.await?;
|
||||
} else if !is_object(&reader) && !opts.send_content_md5 {
|
||||
info = self.put_object_multipart_stream_from_readat(bucket_name, object_name, reader, size, opts).await?;
|
||||
info = self
|
||||
.put_object_multipart_stream_from_readat(bucket_name, object_name, reader, size, opts)
|
||||
.await?;
|
||||
} else {
|
||||
info = self.put_object_multipart_stream_optional_checksum(bucket_name, object_name, reader, size, opts).await?;
|
||||
info = self
|
||||
.put_object_multipart_stream_optional_checksum(bucket_name, object_name, reader, size, opts)
|
||||
.await?;
|
||||
}
|
||||
|
||||
|
||||
Ok(info)
|
||||
}
|
||||
|
||||
pub async fn put_object_multipart_stream_from_readat(&self, bucket_name: &str, object_name: &str,
|
||||
mut reader: ReaderImpl, size: i64, opts: &PutObjectOptions
|
||||
pub async fn put_object_multipart_stream_from_readat(
|
||||
&self,
|
||||
bucket_name: &str,
|
||||
object_name: &str,
|
||||
mut reader: ReaderImpl,
|
||||
size: i64,
|
||||
opts: &PutObjectOptions,
|
||||
) -> Result<UploadInfo, std::io::Error> {
|
||||
let ret = optimal_part_info(size, opts.part_size)?;
|
||||
let (total_parts_count, part_size, lastpart_size) = ret;
|
||||
@@ -68,8 +84,13 @@ impl TransitionClient {
|
||||
todo!();
|
||||
}
|
||||
|
||||
pub async fn put_object_multipart_stream_optional_checksum(&self, bucket_name: &str, object_name: &str,
|
||||
mut reader: ReaderImpl, size: i64, opts: &PutObjectOptions
|
||||
pub async fn put_object_multipart_stream_optional_checksum(
|
||||
&self,
|
||||
bucket_name: &str,
|
||||
object_name: &str,
|
||||
mut reader: ReaderImpl,
|
||||
size: i64,
|
||||
opts: &PutObjectOptions,
|
||||
) -> Result<UploadInfo, std::io::Error> {
|
||||
let mut opts = opts.clone();
|
||||
if opts.checksum.is_set() {
|
||||
@@ -130,7 +151,7 @@ impl TransitionClient {
|
||||
}
|
||||
}
|
||||
|
||||
let hooked = ReaderImpl::Body(Bytes::from(buf));//newHook(BufferReader::new(buf), opts.progress);
|
||||
let hooked = ReaderImpl::Body(Bytes::from(buf)); //newHook(BufferReader::new(buf), opts.progress);
|
||||
let mut p = UploadPartParams {
|
||||
bucket_name: bucket_name.to_string(),
|
||||
object_name: object_name.to_string(),
|
||||
@@ -154,7 +175,12 @@ impl TransitionClient {
|
||||
|
||||
if size > 0 {
|
||||
if total_uploaded_size != size {
|
||||
return Err(std::io::Error::other(err_unexpected_eof(total_uploaded_size, size, bucket_name, object_name)));
|
||||
return Err(std::io::Error::other(err_unexpected_eof(
|
||||
total_uploaded_size,
|
||||
size,
|
||||
bucket_name,
|
||||
object_name,
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -167,12 +193,12 @@ impl TransitionClient {
|
||||
|
||||
all_parts.push(part.clone());
|
||||
compl_multipart_upload.parts.push(CompletePart {
|
||||
etag: part.etag,
|
||||
part_num: part.part_num,
|
||||
checksum_crc32: part.checksum_crc32,
|
||||
checksum_crc32c: part.checksum_crc32c,
|
||||
checksum_sha1: part.checksum_sha1,
|
||||
checksum_sha256: part.checksum_sha256,
|
||||
etag: part.etag,
|
||||
part_num: part.part_num,
|
||||
checksum_crc32: part.checksum_crc32,
|
||||
checksum_crc32c: part.checksum_crc32c,
|
||||
checksum_sha1: part.checksum_sha1,
|
||||
checksum_sha256: part.checksum_sha256,
|
||||
checksum_crc64nvme: part.checksum_crc64nvme,
|
||||
});
|
||||
}
|
||||
@@ -181,18 +207,24 @@ impl TransitionClient {
|
||||
|
||||
let mut opts = PutObjectOptions {
|
||||
//server_side_encryption: opts.server_side_encryption,
|
||||
auto_checksum: opts.auto_checksum,
|
||||
auto_checksum: opts.auto_checksum,
|
||||
..Default::default()
|
||||
};
|
||||
apply_auto_checksum(&mut opts, &mut all_parts);
|
||||
let mut upload_info = self.complete_multipart_upload(bucket_name, object_name, &upload_id, compl_multipart_upload, &opts).await?;
|
||||
let mut upload_info = self
|
||||
.complete_multipart_upload(bucket_name, object_name, &upload_id, compl_multipart_upload, &opts)
|
||||
.await?;
|
||||
|
||||
upload_info.size = total_uploaded_size;
|
||||
Ok(upload_info)
|
||||
}
|
||||
|
||||
pub async fn put_object_multipart_stream_parallel(self: Arc<Self>, bucket_name: &str, object_name: &str,
|
||||
mut reader: ReaderImpl/*GetObjectReader*/, opts: &PutObjectOptions
|
||||
pub async fn put_object_multipart_stream_parallel(
|
||||
self: Arc<Self>,
|
||||
bucket_name: &str,
|
||||
object_name: &str,
|
||||
mut reader: ReaderImpl, /*GetObjectReader*/
|
||||
opts: &PutObjectOptions,
|
||||
) -> Result<UploadInfo, std::io::Error> {
|
||||
let mut opts = opts.clone();
|
||||
if opts.checksum.is_set() {
|
||||
@@ -239,7 +271,11 @@ impl TransitionClient {
|
||||
}
|
||||
|
||||
if buf.len() != part_size as usize {
|
||||
return Err(std::io::Error::other(format!("read buffer < {} than expected partSize: {}", buf.len(), part_size)));
|
||||
return Err(std::io::Error::other(format!(
|
||||
"read buffer < {} than expected partSize: {}",
|
||||
buf.len(),
|
||||
part_size
|
||||
)));
|
||||
}
|
||||
|
||||
match &mut reader {
|
||||
@@ -286,21 +322,21 @@ impl TransitionClient {
|
||||
|
||||
//defer wg.Done()
|
||||
let mut p = UploadPartParams {
|
||||
bucket_name: bucket_name.to_string(),
|
||||
object_name: object_name.to_string(),
|
||||
bucket_name: bucket_name.to_string(),
|
||||
object_name: object_name.to_string(),
|
||||
upload_id: clone_upload_id,
|
||||
reader: ReaderImpl::Body(Bytes::from(buf.clone())),
|
||||
reader: ReaderImpl::Body(Bytes::from(buf.clone())),
|
||||
part_number,
|
||||
md5_base64,
|
||||
size: length as i64,
|
||||
size: length as i64,
|
||||
//sse: opts.server_side_encryption,
|
||||
stream_sha256: !opts.disable_content_sha256,
|
||||
custom_header,
|
||||
sha256_hex: "".to_string(),
|
||||
trailer: HeaderMap::new(),
|
||||
sha256_hex: "".to_string(),
|
||||
trailer: HeaderMap::new(),
|
||||
};
|
||||
let obj_part = clone_self.upload_part(&mut p).await.expect("err");
|
||||
|
||||
|
||||
let mut clone_parts_info = clone_parts_info.write().unwrap();
|
||||
clone_parts_info.entry(part_number).or_insert(obj_part);
|
||||
|
||||
@@ -328,12 +364,12 @@ impl TransitionClient {
|
||||
|
||||
all_parts.push(part.clone());
|
||||
compl_multipart_upload.parts.push(CompletePart {
|
||||
etag: part.etag,
|
||||
part_num: part.part_num,
|
||||
checksum_crc32: part.checksum_crc32,
|
||||
checksum_crc32c: part.checksum_crc32c,
|
||||
checksum_sha1: part.checksum_sha1,
|
||||
checksum_sha256: part.checksum_sha256,
|
||||
etag: part.etag,
|
||||
part_num: part.part_num,
|
||||
checksum_crc32: part.checksum_crc32,
|
||||
checksum_crc32c: part.checksum_crc32c,
|
||||
checksum_sha1: part.checksum_sha1,
|
||||
checksum_sha256: part.checksum_sha256,
|
||||
checksum_crc64nvme: part.checksum_crc64nvme,
|
||||
..Default::default()
|
||||
});
|
||||
@@ -343,41 +379,60 @@ impl TransitionClient {
|
||||
|
||||
let mut opts = PutObjectOptions {
|
||||
//server_side_encryption: opts.server_side_encryption,
|
||||
auto_checksum: opts.auto_checksum,
|
||||
auto_checksum: opts.auto_checksum,
|
||||
..Default::default()
|
||||
};
|
||||
apply_auto_checksum(&mut opts, &mut all_parts);
|
||||
|
||||
let mut upload_info = self.complete_multipart_upload(bucket_name, object_name, &upload_id, compl_multipart_upload, &opts).await?;
|
||||
let mut upload_info = self
|
||||
.complete_multipart_upload(bucket_name, object_name, &upload_id, compl_multipart_upload, &opts)
|
||||
.await?;
|
||||
|
||||
upload_info.size = total_uploaded_size;
|
||||
Ok(upload_info)
|
||||
}
|
||||
|
||||
pub async fn put_object_gcs(&self, bucket_name: &str, object_name: &str, mut reader: ReaderImpl, size: i64, opts: &PutObjectOptions) -> Result<UploadInfo, std::io::Error> {
|
||||
pub async fn put_object_gcs(
|
||||
&self,
|
||||
bucket_name: &str,
|
||||
object_name: &str,
|
||||
mut reader: ReaderImpl,
|
||||
size: i64,
|
||||
opts: &PutObjectOptions,
|
||||
) -> Result<UploadInfo, std::io::Error> {
|
||||
let mut opts = opts.clone();
|
||||
if opts.checksum.is_set() {
|
||||
opts.send_content_md5 = false;
|
||||
}
|
||||
|
||||
let mut md5_base64: String = "".to_string();
|
||||
let progress_reader = reader;//newHook(reader, opts.progress);
|
||||
let progress_reader = reader; //newHook(reader, opts.progress);
|
||||
|
||||
self.put_object_do(bucket_name, object_name, progress_reader, &md5_base64, "", size, &opts).await
|
||||
self.put_object_do(bucket_name, object_name, progress_reader, &md5_base64, "", size, &opts)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn put_object_do(&self, bucket_name: &str, object_name: &str, reader: ReaderImpl, md5_base64: &str, sha256_hex: &str, size: i64, opts: &PutObjectOptions) -> Result<UploadInfo, std::io::Error> {
|
||||
pub async fn put_object_do(
|
||||
&self,
|
||||
bucket_name: &str,
|
||||
object_name: &str,
|
||||
reader: ReaderImpl,
|
||||
md5_base64: &str,
|
||||
sha256_hex: &str,
|
||||
size: i64,
|
||||
opts: &PutObjectOptions,
|
||||
) -> Result<UploadInfo, std::io::Error> {
|
||||
let custom_header = opts.header();
|
||||
|
||||
let mut req_metadata = RequestMetadata {
|
||||
bucket_name: bucket_name.to_string(),
|
||||
object_name: object_name.to_string(),
|
||||
bucket_name: bucket_name.to_string(),
|
||||
object_name: object_name.to_string(),
|
||||
custom_header,
|
||||
content_body: reader,
|
||||
content_length: size,
|
||||
content_body: reader,
|
||||
content_length: size,
|
||||
content_md5_base64: md5_base64.to_string(),
|
||||
content_sha256_hex: sha256_hex.to_string(),
|
||||
stream_sha256: !opts.disable_content_sha256,
|
||||
stream_sha256: !opts.disable_content_sha256,
|
||||
add_crc: Default::default(),
|
||||
bucket_location: Default::default(),
|
||||
pre_sign_url: Default::default(),
|
||||
@@ -386,7 +441,7 @@ impl TransitionClient {
|
||||
expires: Default::default(),
|
||||
trailer: Default::default(),
|
||||
};
|
||||
let mut add_crc = false;//self.trailing_header_support && md5_base64 == "" && !s3utils.IsGoogleEndpoint(self.endpoint_url) && (opts.disable_content_sha256 || self.secure);
|
||||
let mut add_crc = false; //self.trailing_header_support && md5_base64 == "" && !s3utils.IsGoogleEndpoint(self.endpoint_url) && (opts.disable_content_sha256 || self.secure);
|
||||
let mut opts = opts.clone();
|
||||
if opts.checksum.is_set() {
|
||||
req_metadata.add_crc = opts.checksum;
|
||||
@@ -422,26 +477,50 @@ impl TransitionClient {
|
||||
let (exp_time, rule_id) = if let Some(h_x_amz_expiration) = resp.headers().get(X_AMZ_EXPIRATION) {
|
||||
(
|
||||
OffsetDateTime::parse(h_x_amz_expiration.to_str().unwrap(), ISO8601_DATEFORMAT).unwrap(),
|
||||
"".to_string()
|
||||
"".to_string(),
|
||||
)
|
||||
} else {
|
||||
(OffsetDateTime::now_utc(), "".to_string())
|
||||
};
|
||||
let h = resp.headers();
|
||||
Ok(UploadInfo {
|
||||
bucket: bucket_name.to_string(),
|
||||
key: object_name.to_string(),
|
||||
etag: trim_etag(h.get("ETag").expect("err").to_str().expect("err")),
|
||||
version_id: if let Some(h_x_amz_version_id) = h.get(X_AMZ_VERSION_ID) { h_x_amz_version_id.to_str().expect("err").to_string() } else { "".to_string() },
|
||||
size: size,
|
||||
expiration: exp_time,
|
||||
bucket: bucket_name.to_string(),
|
||||
key: object_name.to_string(),
|
||||
etag: trim_etag(h.get("ETag").expect("err").to_str().expect("err")),
|
||||
version_id: if let Some(h_x_amz_version_id) = h.get(X_AMZ_VERSION_ID) {
|
||||
h_x_amz_version_id.to_str().expect("err").to_string()
|
||||
} else {
|
||||
"".to_string()
|
||||
},
|
||||
size: size,
|
||||
expiration: exp_time,
|
||||
expiration_rule_id: rule_id,
|
||||
checksum_crc32: if let Some(h_checksum_crc32) = h.get(ChecksumMode::ChecksumCRC32.key()) { h_checksum_crc32.to_str().expect("err").to_string() } else { "".to_string() },
|
||||
checksum_crc32c: if let Some(h_checksum_crc32c) = h.get(ChecksumMode::ChecksumCRC32C.key()) { h_checksum_crc32c.to_str().expect("err").to_string() } else { "".to_string() },
|
||||
checksum_sha1: if let Some(h_checksum_sha1) = h.get(ChecksumMode::ChecksumSHA1.key()) { h_checksum_sha1.to_str().expect("err").to_string() } else { "".to_string() },
|
||||
checksum_sha256: if let Some(h_checksum_sha256) = h.get(ChecksumMode::ChecksumSHA256.key()) { h_checksum_sha256.to_str().expect("err").to_string() } else { "".to_string() },
|
||||
checksum_crc64nvme: if let Some(h_checksum_crc64nvme) = h.get(ChecksumMode::ChecksumCRC64NVME.key()) { h_checksum_crc64nvme.to_str().expect("err").to_string() } else { "".to_string() },
|
||||
checksum_crc32: if let Some(h_checksum_crc32) = h.get(ChecksumMode::ChecksumCRC32.key()) {
|
||||
h_checksum_crc32.to_str().expect("err").to_string()
|
||||
} else {
|
||||
"".to_string()
|
||||
},
|
||||
checksum_crc32c: if let Some(h_checksum_crc32c) = h.get(ChecksumMode::ChecksumCRC32C.key()) {
|
||||
h_checksum_crc32c.to_str().expect("err").to_string()
|
||||
} else {
|
||||
"".to_string()
|
||||
},
|
||||
checksum_sha1: if let Some(h_checksum_sha1) = h.get(ChecksumMode::ChecksumSHA1.key()) {
|
||||
h_checksum_sha1.to_str().expect("err").to_string()
|
||||
} else {
|
||||
"".to_string()
|
||||
},
|
||||
checksum_sha256: if let Some(h_checksum_sha256) = h.get(ChecksumMode::ChecksumSHA256.key()) {
|
||||
h_checksum_sha256.to_str().expect("err").to_string()
|
||||
} else {
|
||||
"".to_string()
|
||||
},
|
||||
checksum_crc64nvme: if let Some(h_checksum_crc64nvme) = h.get(ChecksumMode::ChecksumCRC64NVME.key()) {
|
||||
h_checksum_crc64nvme.to_str().expect("err").to_string()
|
||||
} else {
|
||||
"".to_string()
|
||||
},
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,26 +1,24 @@
|
||||
#![allow(clippy::map_entry)]
|
||||
use bytes::Bytes;
|
||||
use http::{HeaderMap, HeaderValue, Method, StatusCode};
|
||||
use s3s::S3ErrorCode;
|
||||
use s3s::dto::ReplicationStatus;
|
||||
use s3s::header::X_AMZ_BYPASS_GOVERNANCE_RETENTION;
|
||||
use std::fmt::Display;
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
use bytes::Bytes;
|
||||
use s3s::header::X_AMZ_BYPASS_GOVERNANCE_RETENTION;
|
||||
use s3s::S3ErrorCode;
|
||||
use time::OffsetDateTime;
|
||||
use s3s::dto::ReplicationStatus;
|
||||
use tokio::sync::mpsc::{self, Receiver, Sender};
|
||||
use http::{HeaderMap, HeaderValue, Method, StatusCode};
|
||||
|
||||
use reader::hasher::{sum_sha256_hex, sum_md5_base64};
|
||||
use rustfs_utils::hash::EMPTY_STRING_SHA256_HASH;
|
||||
use crate::client::{
|
||||
api_error_response::{ErrorResponse, http_resp_to_error_response, to_error_response},
|
||||
transition_api::{ReaderImpl, RequestMetadata, TransitionClient},
|
||||
};
|
||||
use crate::{
|
||||
disk::DiskAPI,
|
||||
store_api::{
|
||||
GetObjectReader, ObjectInfo, StorageAPI,
|
||||
},
|
||||
};
|
||||
use crate::client::{
|
||||
transition_api::{TransitionClient, RequestMetadata, ReaderImpl},
|
||||
api_error_response::{http_resp_to_error_response, to_error_response, ErrorResponse,},
|
||||
store_api::{GetObjectReader, ObjectInfo, StorageAPI},
|
||||
};
|
||||
use reader::hasher::{sum_md5_base64, sum_sha256_hex};
|
||||
use rustfs_utils::hash::EMPTY_STRING_SHA256_HASH;
|
||||
|
||||
struct RemoveBucketOptions {
|
||||
forced_elete: bool,
|
||||
@@ -62,23 +60,28 @@ impl TransitionClient {
|
||||
headers.insert(rustFSForceDelete, "true");
|
||||
}*/
|
||||
|
||||
let resp = self.execute_method(Method::DELETE, &mut RequestMetadata {
|
||||
bucket_name: bucket_name.to_string(),
|
||||
content_sha256_hex: EMPTY_STRING_SHA256_HASH.to_string(),
|
||||
custom_header: headers,
|
||||
object_name: "".to_string(),
|
||||
query_values: Default::default(),
|
||||
content_body: ReaderImpl::Body(Bytes::new()),
|
||||
content_length: 0,
|
||||
content_md5_base64: "".to_string(),
|
||||
stream_sha256: false,
|
||||
trailer: HeaderMap::new(),
|
||||
pre_sign_url: Default::default(),
|
||||
add_crc: Default::default(),
|
||||
extra_pre_sign_header: Default::default(),
|
||||
bucket_location: Default::default(),
|
||||
expires: Default::default(),
|
||||
}).await?;
|
||||
let resp = self
|
||||
.execute_method(
|
||||
Method::DELETE,
|
||||
&mut RequestMetadata {
|
||||
bucket_name: bucket_name.to_string(),
|
||||
content_sha256_hex: EMPTY_STRING_SHA256_HASH.to_string(),
|
||||
custom_header: headers,
|
||||
object_name: "".to_string(),
|
||||
query_values: Default::default(),
|
||||
content_body: ReaderImpl::Body(Bytes::new()),
|
||||
content_length: 0,
|
||||
content_md5_base64: "".to_string(),
|
||||
stream_sha256: false,
|
||||
trailer: HeaderMap::new(),
|
||||
pre_sign_url: Default::default(),
|
||||
add_crc: Default::default(),
|
||||
extra_pre_sign_header: Default::default(),
|
||||
bucket_location: Default::default(),
|
||||
expires: Default::default(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
{
|
||||
let mut bucket_loc_cache = self.bucket_loc_cache.lock().unwrap();
|
||||
@@ -88,23 +91,28 @@ impl TransitionClient {
|
||||
}
|
||||
|
||||
pub async fn remove_bucket(&self, bucket_name: &str) -> Result<(), std::io::Error> {
|
||||
let resp = self.execute_method(http::Method::DELETE, &mut RequestMetadata {
|
||||
bucket_name: bucket_name.to_string(),
|
||||
content_sha256_hex: EMPTY_STRING_SHA256_HASH.to_string(),
|
||||
custom_header: Default::default(),
|
||||
object_name: "".to_string(),
|
||||
query_values: Default::default(),
|
||||
content_body: ReaderImpl::Body(Bytes::new()),
|
||||
content_length: 0,
|
||||
content_md5_base64: "".to_string(),
|
||||
stream_sha256: false,
|
||||
trailer: HeaderMap::new(),
|
||||
pre_sign_url: Default::default(),
|
||||
add_crc: Default::default(),
|
||||
extra_pre_sign_header: Default::default(),
|
||||
bucket_location: Default::default(),
|
||||
expires: Default::default(),
|
||||
}).await?;
|
||||
let resp = self
|
||||
.execute_method(
|
||||
http::Method::DELETE,
|
||||
&mut RequestMetadata {
|
||||
bucket_name: bucket_name.to_string(),
|
||||
content_sha256_hex: EMPTY_STRING_SHA256_HASH.to_string(),
|
||||
custom_header: Default::default(),
|
||||
object_name: "".to_string(),
|
||||
query_values: Default::default(),
|
||||
content_body: ReaderImpl::Body(Bytes::new()),
|
||||
content_length: 0,
|
||||
content_md5_base64: "".to_string(),
|
||||
stream_sha256: false,
|
||||
trailer: HeaderMap::new(),
|
||||
pre_sign_url: Default::default(),
|
||||
add_crc: Default::default(),
|
||||
extra_pre_sign_header: Default::default(),
|
||||
bucket_location: Default::default(),
|
||||
expires: Default::default(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
{
|
||||
let mut bucket_loc_cache = self.bucket_loc_cache.lock().unwrap();
|
||||
@@ -119,7 +127,12 @@ impl TransitionClient {
|
||||
res.err
|
||||
}
|
||||
|
||||
pub async fn remove_object_inner(&self, bucket_name: &str, object_name: &str, opts: RemoveObjectOptions) -> Result<RemoveObjectResult, std::io::Error> {
|
||||
pub async fn remove_object_inner(
|
||||
&self,
|
||||
bucket_name: &str,
|
||||
object_name: &str,
|
||||
opts: RemoveObjectOptions,
|
||||
) -> Result<RemoveObjectResult, std::io::Error> {
|
||||
let mut url_values = HashMap::new();
|
||||
|
||||
if opts.version_id != "" {
|
||||
@@ -129,49 +142,72 @@ impl TransitionClient {
|
||||
let mut headers = HeaderMap::new();
|
||||
|
||||
if opts.governance_bypass {
|
||||
headers.insert(X_AMZ_BYPASS_GOVERNANCE_RETENTION, "true".parse().expect("err"));//amzBypassGovernance
|
||||
headers.insert(X_AMZ_BYPASS_GOVERNANCE_RETENTION, "true".parse().expect("err")); //amzBypassGovernance
|
||||
}
|
||||
|
||||
let resp = self.execute_method(http::Method::DELETE, &mut RequestMetadata {
|
||||
bucket_name: bucket_name.to_string(),
|
||||
object_name: object_name.to_string(),
|
||||
content_sha256_hex: EMPTY_STRING_SHA256_HASH.to_string(),
|
||||
query_values: url_values,
|
||||
custom_header: headers,
|
||||
content_body: ReaderImpl::Body(Bytes::new()),
|
||||
content_length: 0,
|
||||
content_md5_base64: "".to_string(),
|
||||
stream_sha256: false,
|
||||
trailer: HeaderMap::new(),
|
||||
pre_sign_url: Default::default(),
|
||||
add_crc: Default::default(),
|
||||
extra_pre_sign_header: Default::default(),
|
||||
bucket_location: Default::default(),
|
||||
expires: Default::default(),
|
||||
}).await?;
|
||||
let resp = self
|
||||
.execute_method(
|
||||
http::Method::DELETE,
|
||||
&mut RequestMetadata {
|
||||
bucket_name: bucket_name.to_string(),
|
||||
object_name: object_name.to_string(),
|
||||
content_sha256_hex: EMPTY_STRING_SHA256_HASH.to_string(),
|
||||
query_values: url_values,
|
||||
custom_header: headers,
|
||||
content_body: ReaderImpl::Body(Bytes::new()),
|
||||
content_length: 0,
|
||||
content_md5_base64: "".to_string(),
|
||||
stream_sha256: false,
|
||||
trailer: HeaderMap::new(),
|
||||
pre_sign_url: Default::default(),
|
||||
add_crc: Default::default(),
|
||||
extra_pre_sign_header: Default::default(),
|
||||
bucket_location: Default::default(),
|
||||
expires: Default::default(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(RemoveObjectResult {
|
||||
object_name: object_name.to_string(),
|
||||
object_version_id: opts.version_id,
|
||||
delete_marker: resp.headers().get("x-amz-delete-marker").expect("err") == "true",
|
||||
delete_marker_version_id: resp.headers().get("x-amz-version-id").expect("err").to_str().expect("err").to_string(),
|
||||
object_version_id: opts.version_id,
|
||||
delete_marker: resp.headers().get("x-amz-delete-marker").expect("err") == "true",
|
||||
delete_marker_version_id: resp
|
||||
.headers()
|
||||
.get("x-amz-version-id")
|
||||
.expect("err")
|
||||
.to_str()
|
||||
.expect("err")
|
||||
.to_string(),
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn remove_objects_with_result(self: Arc<Self>, bucket_name: &str, objects_rx: Receiver<ObjectInfo>, opts: RemoveObjectsOptions) -> Receiver<RemoveObjectResult> {
|
||||
pub async fn remove_objects_with_result(
|
||||
self: Arc<Self>,
|
||||
bucket_name: &str,
|
||||
objects_rx: Receiver<ObjectInfo>,
|
||||
opts: RemoveObjectsOptions,
|
||||
) -> Receiver<RemoveObjectResult> {
|
||||
let (result_tx, mut result_rx) = mpsc::channel(1);
|
||||
|
||||
let self_clone = Arc::clone(&self);
|
||||
let bucket_name_owned = bucket_name.to_string();
|
||||
|
||||
tokio::spawn(async move {
|
||||
self_clone.remove_objects_inner(&bucket_name_owned, objects_rx, &result_tx, opts).await;
|
||||
self_clone
|
||||
.remove_objects_inner(&bucket_name_owned, objects_rx, &result_tx, opts)
|
||||
.await;
|
||||
});
|
||||
result_rx
|
||||
}
|
||||
|
||||
pub async fn remove_objects(self: Arc<Self>, bucket_name: &str, objects_rx: Receiver<ObjectInfo>, opts: RemoveObjectsOptions) -> Receiver<RemoveObjectError> {
|
||||
pub async fn remove_objects(
|
||||
self: Arc<Self>,
|
||||
bucket_name: &str,
|
||||
objects_rx: Receiver<ObjectInfo>,
|
||||
opts: RemoveObjectsOptions,
|
||||
) -> Receiver<RemoveObjectError> {
|
||||
let (error_tx, mut error_rx) = mpsc::channel(1);
|
||||
|
||||
let self_clone = Arc::clone(&self);
|
||||
@@ -179,26 +215,36 @@ impl TransitionClient {
|
||||
|
||||
let (result_tx, mut result_rx) = mpsc::channel(1);
|
||||
tokio::spawn(async move {
|
||||
self_clone.remove_objects_inner(&bucket_name_owned, objects_rx, &result_tx, opts).await;
|
||||
self_clone
|
||||
.remove_objects_inner(&bucket_name_owned, objects_rx, &result_tx, opts)
|
||||
.await;
|
||||
});
|
||||
tokio::spawn(async move {
|
||||
while let Some(res) = result_rx.recv().await {
|
||||
if res.err.is_none() {
|
||||
continue;
|
||||
}
|
||||
error_tx.send(RemoveObjectError {
|
||||
object_name: res.object_name,
|
||||
version_id: res.object_version_id,
|
||||
err: res.err,
|
||||
..Default::default()
|
||||
}).await;
|
||||
error_tx
|
||||
.send(RemoveObjectError {
|
||||
object_name: res.object_name,
|
||||
version_id: res.object_version_id,
|
||||
err: res.err,
|
||||
..Default::default()
|
||||
})
|
||||
.await;
|
||||
}
|
||||
});
|
||||
|
||||
error_rx
|
||||
}
|
||||
|
||||
pub async fn remove_objects_inner(&self, bucket_name: &str, mut objects_rx: Receiver<ObjectInfo>, result_tx: &Sender<RemoveObjectResult>, opts: RemoveObjectsOptions) -> Result<(), std::io::Error> {
|
||||
pub async fn remove_objects_inner(
|
||||
&self,
|
||||
bucket_name: &str,
|
||||
mut objects_rx: Receiver<ObjectInfo>,
|
||||
result_tx: &Sender<RemoveObjectResult>,
|
||||
opts: RemoveObjectsOptions,
|
||||
) -> Result<(), std::io::Error> {
|
||||
let max_entries = 1000;
|
||||
let mut finish = false;
|
||||
let mut url_values = HashMap::new();
|
||||
@@ -213,11 +259,17 @@ impl TransitionClient {
|
||||
|
||||
while let Some(object) = objects_rx.recv().await {
|
||||
if has_invalid_xml_char(&object.name) {
|
||||
let remove_result = self.remove_object_inner(bucket_name, &object.name, RemoveObjectOptions {
|
||||
version_id: object.version_id.expect("err").to_string(),
|
||||
governance_bypass: opts.governance_bypass,
|
||||
..Default::default()
|
||||
}).await?;
|
||||
let remove_result = self
|
||||
.remove_object_inner(
|
||||
bucket_name,
|
||||
&object.name,
|
||||
RemoveObjectOptions {
|
||||
version_id: object.version_id.expect("err").to_string(),
|
||||
governance_bypass: opts.governance_bypass,
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
let remove_result_clone = remove_result.clone();
|
||||
if !remove_result.err.is_none() {
|
||||
match to_error_response(&remove_result.err.expect("err")).code {
|
||||
@@ -241,7 +293,7 @@ impl TransitionClient {
|
||||
}
|
||||
if count == 0 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
if count < max_entries {
|
||||
finish = true;
|
||||
}
|
||||
@@ -252,23 +304,28 @@ impl TransitionClient {
|
||||
}
|
||||
|
||||
let remove_bytes = generate_remove_multi_objects_request(&batch);
|
||||
let resp = self.execute_method(http::Method::POST, &mut RequestMetadata {
|
||||
bucket_name: bucket_name.to_string(),
|
||||
query_values: url_values.clone(),
|
||||
content_body: ReaderImpl::Body(Bytes::from(remove_bytes.clone())),
|
||||
content_length: remove_bytes.len() as i64,
|
||||
content_md5_base64: sum_md5_base64(&remove_bytes),
|
||||
content_sha256_hex: sum_sha256_hex(&remove_bytes),
|
||||
custom_header: headers,
|
||||
object_name: "".to_string(),
|
||||
stream_sha256: false,
|
||||
trailer: HeaderMap::new(),
|
||||
pre_sign_url: Default::default(),
|
||||
add_crc: Default::default(),
|
||||
extra_pre_sign_header: Default::default(),
|
||||
bucket_location: Default::default(),
|
||||
expires: Default::default(),
|
||||
}).await?;
|
||||
let resp = self
|
||||
.execute_method(
|
||||
http::Method::POST,
|
||||
&mut RequestMetadata {
|
||||
bucket_name: bucket_name.to_string(),
|
||||
query_values: url_values.clone(),
|
||||
content_body: ReaderImpl::Body(Bytes::from(remove_bytes.clone())),
|
||||
content_length: remove_bytes.len() as i64,
|
||||
content_md5_base64: sum_md5_base64(&remove_bytes),
|
||||
content_sha256_hex: sum_sha256_hex(&remove_bytes),
|
||||
custom_header: headers,
|
||||
object_name: "".to_string(),
|
||||
stream_sha256: false,
|
||||
trailer: HeaderMap::new(),
|
||||
pre_sign_url: Default::default(),
|
||||
add_crc: Default::default(),
|
||||
extra_pre_sign_header: Default::default(),
|
||||
bucket_location: Default::default(),
|
||||
expires: Default::default(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
|
||||
let body_bytes: Vec<u8> = resp.body().bytes().expect("err").to_vec();
|
||||
process_remove_multi_objects_response(ReaderImpl::Body(Bytes::from(body_bytes)), result_tx.clone());
|
||||
@@ -285,49 +342,77 @@ impl TransitionClient {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn abort_multipart_upload(&self, bucket_name: &str, object_name: &str, upload_id: &str) -> Result<(), std::io::Error> {
|
||||
pub async fn abort_multipart_upload(
|
||||
&self,
|
||||
bucket_name: &str,
|
||||
object_name: &str,
|
||||
upload_id: &str,
|
||||
) -> Result<(), std::io::Error> {
|
||||
let mut url_values = HashMap::new();
|
||||
url_values.insert("uploadId".to_string(), upload_id.to_string());
|
||||
|
||||
let resp = self.execute_method(http::Method::DELETE, &mut RequestMetadata {
|
||||
bucket_name: bucket_name.to_string(),
|
||||
object_name: object_name.to_string(),
|
||||
query_values: url_values,
|
||||
content_sha256_hex: EMPTY_STRING_SHA256_HASH.to_string(),
|
||||
custom_header: HeaderMap::new(),
|
||||
content_body: ReaderImpl::Body(Bytes::new()),
|
||||
content_length: 0,
|
||||
content_md5_base64: "".to_string(),
|
||||
stream_sha256: false,
|
||||
trailer: HeaderMap::new(),
|
||||
pre_sign_url: Default::default(),
|
||||
add_crc: Default::default(),
|
||||
extra_pre_sign_header: Default::default(),
|
||||
bucket_location: Default::default(),
|
||||
expires: Default::default(),
|
||||
}).await?;
|
||||
let resp = self
|
||||
.execute_method(
|
||||
http::Method::DELETE,
|
||||
&mut RequestMetadata {
|
||||
bucket_name: bucket_name.to_string(),
|
||||
object_name: object_name.to_string(),
|
||||
query_values: url_values,
|
||||
content_sha256_hex: EMPTY_STRING_SHA256_HASH.to_string(),
|
||||
custom_header: HeaderMap::new(),
|
||||
content_body: ReaderImpl::Body(Bytes::new()),
|
||||
content_length: 0,
|
||||
content_md5_base64: "".to_string(),
|
||||
stream_sha256: false,
|
||||
trailer: HeaderMap::new(),
|
||||
pre_sign_url: Default::default(),
|
||||
add_crc: Default::default(),
|
||||
extra_pre_sign_header: Default::default(),
|
||||
bucket_location: Default::default(),
|
||||
expires: Default::default(),
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
//if resp.is_some() {
|
||||
if resp.status() != StatusCode::NO_CONTENT {
|
||||
let error_response: ErrorResponse;
|
||||
match resp.status() {
|
||||
StatusCode::NOT_FOUND => {
|
||||
error_response = ErrorResponse {
|
||||
code: S3ErrorCode::NoSuchUpload,
|
||||
message: "The specified multipart upload does not exist.".to_string(),
|
||||
bucket_name: bucket_name.to_string(),
|
||||
key: object_name.to_string(),
|
||||
request_id: resp.headers().get("x-amz-request-id").expect("err").to_str().expect("err").to_string(),
|
||||
host_id: resp.headers().get("x-amz-id-2").expect("err").to_str().expect("err").to_string(),
|
||||
region: resp.headers().get("x-amz-bucket-region").expect("err").to_str().expect("err").to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
}
|
||||
_ => {
|
||||
return Err(std::io::Error::other(http_resp_to_error_response(resp, vec![], bucket_name, object_name)));
|
||||
}
|
||||
if resp.status() != StatusCode::NO_CONTENT {
|
||||
let error_response: ErrorResponse;
|
||||
match resp.status() {
|
||||
StatusCode::NOT_FOUND => {
|
||||
error_response = ErrorResponse {
|
||||
code: S3ErrorCode::NoSuchUpload,
|
||||
message: "The specified multipart upload does not exist.".to_string(),
|
||||
bucket_name: bucket_name.to_string(),
|
||||
key: object_name.to_string(),
|
||||
request_id: resp
|
||||
.headers()
|
||||
.get("x-amz-request-id")
|
||||
.expect("err")
|
||||
.to_str()
|
||||
.expect("err")
|
||||
.to_string(),
|
||||
host_id: resp
|
||||
.headers()
|
||||
.get("x-amz-id-2")
|
||||
.expect("err")
|
||||
.to_str()
|
||||
.expect("err")
|
||||
.to_string(),
|
||||
region: resp
|
||||
.headers()
|
||||
.get("x-amz-bucket-region")
|
||||
.expect("err")
|
||||
.to_str()
|
||||
.expect("err")
|
||||
.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
}
|
||||
_ => {
|
||||
return Err(std::io::Error::other(http_resp_to_error_response(resp, vec![], bucket_name, object_name)));
|
||||
}
|
||||
return Err(std::io::Error::other(error_response));
|
||||
}
|
||||
return Err(std::io::Error::other(error_response));
|
||||
}
|
||||
//}
|
||||
Ok(())
|
||||
}
|
||||
@@ -336,8 +421,8 @@ impl TransitionClient {
|
||||
#[derive(Debug, Default)]
|
||||
struct RemoveObjectError {
|
||||
object_name: String,
|
||||
version_id: String,
|
||||
err: Option<std::io::Error>,
|
||||
version_id: String,
|
||||
err: Option<std::io::Error>,
|
||||
}
|
||||
|
||||
impl Display for RemoveObjectError {
|
||||
@@ -367,7 +452,7 @@ impl Clone for RemoveObjectResult {
|
||||
delete_marker_version_id: self.delete_marker_version_id.clone(),
|
||||
err: None, //err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct RemoveObjectsOptions {
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
#![allow(clippy::map_entry)]
|
||||
use std::collections::HashMap;
|
||||
use s3s::dto::Owner;
|
||||
use serde::{Serialize, Deserialize};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use time::OffsetDateTime;
|
||||
|
||||
use rustfs_utils::crypto::base64_decode;
|
||||
use crate::checksum::ChecksumMode;
|
||||
use crate::client::transition_api::ObjectMultipartInfo;
|
||||
use rustfs_utils::crypto::base64_decode;
|
||||
|
||||
use super::transition_api;
|
||||
|
||||
@@ -31,15 +31,15 @@ pub struct ListBucketV2Result {
|
||||
pub name: String,
|
||||
pub next_continuation_token: String,
|
||||
pub continuation_token: String,
|
||||
pub prefix: String,
|
||||
pub prefix: String,
|
||||
pub fetch_owner: String,
|
||||
pub start_after: String,
|
||||
}
|
||||
|
||||
pub struct Version {
|
||||
etag: String,
|
||||
is_latest: bool,
|
||||
key: String,
|
||||
etag: String,
|
||||
is_latest: bool,
|
||||
key: String,
|
||||
last_modified: OffsetDateTime,
|
||||
owner: Owner,
|
||||
size: i64,
|
||||
@@ -53,15 +53,15 @@ pub struct Version {
|
||||
pub struct ListVersionsResult {
|
||||
versions: Vec<Version>,
|
||||
common_prefixes: Vec<CommonPrefix>,
|
||||
name: String,
|
||||
prefix: String,
|
||||
delimiter: String,
|
||||
max_keys: i64,
|
||||
encoding_type: String,
|
||||
is_truncated: bool,
|
||||
key_marker: String,
|
||||
version_id_marker: String,
|
||||
next_key_marker: String,
|
||||
name: String,
|
||||
prefix: String,
|
||||
delimiter: String,
|
||||
max_keys: i64,
|
||||
encoding_type: String,
|
||||
is_truncated: bool,
|
||||
key_marker: String,
|
||||
version_id_marker: String,
|
||||
next_key_marker: String,
|
||||
next_version_id_marker: String,
|
||||
}
|
||||
|
||||
@@ -77,35 +77,35 @@ pub struct ListBucketResult {
|
||||
delimiter: String,
|
||||
encoding_type: String,
|
||||
is_truncated: bool,
|
||||
marker: String,
|
||||
max_keys: i64,
|
||||
name: String,
|
||||
next_marker: String,
|
||||
prefix: String,
|
||||
marker: String,
|
||||
max_keys: i64,
|
||||
name: String,
|
||||
next_marker: String,
|
||||
prefix: String,
|
||||
}
|
||||
|
||||
pub struct ListMultipartUploadsResult {
|
||||
bucket: String,
|
||||
key_marker: String,
|
||||
upload_id_marker: String,
|
||||
next_key_marker: String,
|
||||
bucket: String,
|
||||
key_marker: String,
|
||||
upload_id_marker: String,
|
||||
next_key_marker: String,
|
||||
next_upload_id_marker: String,
|
||||
encoding_type: String,
|
||||
max_uploads: i64,
|
||||
is_truncated: bool,
|
||||
uploads: Vec<ObjectMultipartInfo>,
|
||||
prefix: String,
|
||||
delimiter: String,
|
||||
common_prefixes: Vec<CommonPrefix>,
|
||||
encoding_type: String,
|
||||
max_uploads: i64,
|
||||
is_truncated: bool,
|
||||
uploads: Vec<ObjectMultipartInfo>,
|
||||
prefix: String,
|
||||
delimiter: String,
|
||||
common_prefixes: Vec<CommonPrefix>,
|
||||
}
|
||||
|
||||
pub struct Initiator {
|
||||
id: String,
|
||||
id: String,
|
||||
display_name: String,
|
||||
}
|
||||
|
||||
pub struct CopyObjectResult {
|
||||
pub etag: String,
|
||||
pub etag: String,
|
||||
pub last_modified: OffsetDateTime,
|
||||
}
|
||||
|
||||
@@ -115,10 +115,10 @@ pub struct ObjectPart {
|
||||
pub part_num: i64,
|
||||
pub last_modified: OffsetDateTime,
|
||||
pub size: i64,
|
||||
pub checksum_crc32: String,
|
||||
pub checksum_crc32c: String,
|
||||
pub checksum_sha1: String,
|
||||
pub checksum_sha256: String,
|
||||
pub checksum_crc32: String,
|
||||
pub checksum_crc32c: String,
|
||||
pub checksum_sha1: String,
|
||||
pub checksum_sha256: String,
|
||||
pub checksum_crc64nvme: String,
|
||||
}
|
||||
|
||||
@@ -129,14 +129,13 @@ impl Default for ObjectPart {
|
||||
part_num: 0,
|
||||
last_modified: OffsetDateTime::now_utc(),
|
||||
size: 0,
|
||||
checksum_crc32: Default::default(),
|
||||
checksum_crc32c: Default::default(),
|
||||
checksum_sha1: Default::default(),
|
||||
checksum_sha256: Default::default(),
|
||||
checksum_crc32: Default::default(),
|
||||
checksum_crc32c: Default::default(),
|
||||
checksum_sha1: Default::default(),
|
||||
checksum_sha256: Default::default(),
|
||||
checksum_crc64nvme: Default::default(),
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
impl ObjectPart {
|
||||
@@ -216,15 +215,15 @@ pub struct CompleteMultipartUploadResult {
|
||||
pub checksum_crc64nvme: String,
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord)]
|
||||
#[derive(serde::Serialize)]
|
||||
pub struct CompletePart { //api has
|
||||
#[derive(Clone, Debug, Default, PartialEq, Eq, PartialOrd, Ord, serde::Serialize)]
|
||||
pub struct CompletePart {
|
||||
//api has
|
||||
pub etag: String,
|
||||
pub part_num: i64,
|
||||
pub checksum_crc32: String,
|
||||
pub checksum_crc32c: String,
|
||||
pub checksum_sha1: String,
|
||||
pub checksum_sha256: String,
|
||||
pub checksum_crc32: String,
|
||||
pub checksum_crc32c: String,
|
||||
pub checksum_sha1: String,
|
||||
pub checksum_sha256: String,
|
||||
pub checksum_crc64nvme: String,
|
||||
}
|
||||
|
||||
@@ -258,8 +257,7 @@ pub struct CopyObjectPartResult {
|
||||
pub last_modified: OffsetDateTime,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
#[derive(serde::Serialize)]
|
||||
#[derive(Debug, Default, serde::Serialize)]
|
||||
pub struct CompleteMultipartUpload {
|
||||
pub parts: Vec<CompletePart>,
|
||||
}
|
||||
@@ -287,12 +285,14 @@ pub struct CreateBucketConfiguration {
|
||||
}
|
||||
|
||||
#[derive(serde::Serialize)]
|
||||
pub struct DeleteObject { //api has
|
||||
pub struct DeleteObject {
|
||||
//api has
|
||||
pub key: String,
|
||||
pub version_id: String,
|
||||
}
|
||||
|
||||
pub struct DeletedObject { //s3s has
|
||||
pub struct DeletedObject {
|
||||
//s3s has
|
||||
pub key: String,
|
||||
pub version_id: String,
|
||||
pub deletemarker: bool,
|
||||
@@ -300,15 +300,15 @@ pub struct DeletedObject { //s3s has
|
||||
}
|
||||
|
||||
pub struct NonDeletedObject {
|
||||
pub key: String,
|
||||
pub code: String,
|
||||
pub message: String,
|
||||
pub key: String,
|
||||
pub code: String,
|
||||
pub message: String,
|
||||
pub version_id: String,
|
||||
}
|
||||
|
||||
#[derive(serde::Serialize)]
|
||||
pub struct DeleteMultiObjects {
|
||||
pub quiet: bool,
|
||||
pub quiet: bool,
|
||||
pub objects: Vec<DeleteObject>,
|
||||
}
|
||||
|
||||
@@ -331,6 +331,6 @@ impl DeleteMultiObjects {
|
||||
}
|
||||
|
||||
pub struct DeleteMultiObjectsResult {
|
||||
pub deleted_objects: Vec<DeletedObject>,
|
||||
pub deleted_objects: Vec<DeletedObject>,
|
||||
pub undeleted_objects: Vec<NonDeletedObject>,
|
||||
}
|
||||
|
||||
@@ -1,20 +1,20 @@
|
||||
#![allow(clippy::map_entry)]
|
||||
use http::Request;
|
||||
use hyper::StatusCode;
|
||||
use hyper::body::Incoming;
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
use tracing::warn;
|
||||
use tracing::{error, info, debug};
|
||||
use hyper::StatusCode;
|
||||
use tracing::{debug, error, info};
|
||||
|
||||
use reader::hasher::{Hasher, Sha256};
|
||||
use s3s::S3ErrorCode;
|
||||
use s3s::Body;
|
||||
use crate::client::{
|
||||
api_error_response::{http_resp_to_error_response, to_error_response},
|
||||
transition_api::{TransitionClient, Document},
|
||||
transition_api::{Document, TransitionClient},
|
||||
};
|
||||
use crate::signer;
|
||||
use reader::hasher::{Hasher, Sha256};
|
||||
use rustfs_utils::hash::EMPTY_STRING_SHA256_HASH;
|
||||
use s3s::Body;
|
||||
use s3s::S3ErrorCode;
|
||||
|
||||
use super::constants::UNSIGNED_PAYLOAD;
|
||||
use super::credentials::SignatureType;
|
||||
@@ -25,9 +25,7 @@ pub struct BucketLocationCache {
|
||||
|
||||
impl BucketLocationCache {
|
||||
pub fn new() -> BucketLocationCache {
|
||||
BucketLocationCache{
|
||||
items: HashMap::new(),
|
||||
}
|
||||
BucketLocationCache { items: HashMap::new() }
|
||||
}
|
||||
|
||||
pub fn get(&self, bucket_name: &str) -> Option<String> {
|
||||
@@ -50,7 +48,7 @@ impl TransitionClient {
|
||||
|
||||
async fn get_bucket_location_inner(&self, bucket_name: &str) -> Result<String, std::io::Error> {
|
||||
if self.region != "" {
|
||||
return Ok(self.region.clone())
|
||||
return Ok(self.region.clone());
|
||||
}
|
||||
|
||||
let mut location;
|
||||
@@ -81,11 +79,7 @@ impl TransitionClient {
|
||||
let mut target_url = self.endpoint_url.clone();
|
||||
let scheme = self.endpoint_url.scheme();
|
||||
let h = target_url.host().expect("host is none.");
|
||||
let default_port = if scheme == "https" {
|
||||
443
|
||||
} else {
|
||||
80
|
||||
};
|
||||
let default_port = if scheme == "https" { 443 } else { 80 };
|
||||
let p = target_url.port().unwrap_or(default_port);
|
||||
|
||||
let is_virtual_style = self.is_virtual_host_style_request(&target_url, bucket_name);
|
||||
@@ -128,9 +122,9 @@ impl TransitionClient {
|
||||
}
|
||||
|
||||
let mut signer_type = value.signer_type.clone();
|
||||
let mut access_key_id = value.access_key_id;
|
||||
let mut secret_access_key = value.secret_access_key;
|
||||
let mut session_token = value.session_token;
|
||||
let mut access_key_id = value.access_key_id;
|
||||
let mut secret_access_key = value.secret_access_key;
|
||||
let mut session_token = value.session_token;
|
||||
|
||||
if self.override_signer_type != SignatureType::SignatureDefault {
|
||||
signer_type = self.override_signer_type.clone();
|
||||
@@ -164,7 +158,10 @@ impl TransitionClient {
|
||||
content_sha256 = UNSIGNED_PAYLOAD.to_string();
|
||||
}
|
||||
|
||||
req_builder.headers_mut().expect("err").insert("X-Amz-Content-Sha256", content_sha256.parse().unwrap());
|
||||
req_builder
|
||||
.headers_mut()
|
||||
.expect("err")
|
||||
.insert("X-Amz-Content-Sha256", content_sha256.parse().unwrap());
|
||||
let req_builder = signer::sign_v4(req_builder, 0, &access_key_id, &secret_access_key, &session_token, "us-east-1");
|
||||
let req = match req_builder.body(Body::empty()) {
|
||||
Ok(req) => return Ok(req),
|
||||
@@ -177,9 +174,9 @@ impl TransitionClient {
|
||||
|
||||
async fn process_bucket_location_response(mut resp: http::Response<Body>, bucket_name: &str) -> Result<String, std::io::Error> {
|
||||
//if resp != nil {
|
||||
if resp.status() != StatusCode::OK {
|
||||
let err_resp = http_resp_to_error_response(resp, vec![], bucket_name, "");
|
||||
match err_resp.code {
|
||||
if resp.status() != StatusCode::OK {
|
||||
let err_resp = http_resp_to_error_response(resp, vec![], bucket_name, "");
|
||||
match err_resp.code {
|
||||
S3ErrorCode::NotImplemented => {
|
||||
match err_resp.server.as_str() {
|
||||
"AmazonSnowball" => {
|
||||
@@ -205,13 +202,13 @@ async fn process_bucket_location_response(mut resp: http::Response<Body>, bucket
|
||||
return Err(std::io::Error::other(err_resp));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
//}
|
||||
|
||||
let b = resp.body_mut().store_all_unlimited().await.unwrap().to_vec();
|
||||
let Document(location_constraint) = serde_xml_rs::from_str::<Document>(&String::from_utf8(b).unwrap()).unwrap();
|
||||
|
||||
let mut location = location_constraint;
|
||||
let mut location = location_constraint;
|
||||
if location == "" {
|
||||
location = "us-east-1".to_string();
|
||||
}
|
||||
@@ -221,4 +218,4 @@ async fn process_bucket_location_response(mut resp: http::Response<Body>, bucket
|
||||
}
|
||||
|
||||
Ok(location)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
#![allow(clippy::map_entry)]
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
use time::{macros::format_description, format_description::FormatItem};
|
||||
use lazy_static::lazy_static;
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
use time::{format_description::FormatItem, macros::format_description};
|
||||
|
||||
pub const ABS_MIN_PART_SIZE: i64 = 1024 * 1024 * 5;
|
||||
pub const MAX_PARTS_COUNT: i64 = 10000;
|
||||
@@ -16,8 +16,9 @@ pub const UNSIGNED_PAYLOAD_TRAILER: &str = "STREAMING-UNSIGNED-PAYLOAD-TRAILER";
|
||||
|
||||
pub const TOTAL_WORKERS: i64 = 4;
|
||||
|
||||
pub const SIGN_V4_ALGORITHM: &str = "AWS4-HMAC-SHA256";
|
||||
pub const ISO8601_DATEFORMAT: &[FormatItem<'_>] = format_description!("[year]-[month]-[day]T[hour]:[minute]:[second].[subsecond]Z");
|
||||
pub const SIGN_V4_ALGORITHM: &str = "AWS4-HMAC-SHA256";
|
||||
pub const ISO8601_DATEFORMAT: &[FormatItem<'_>] =
|
||||
format_description!("[year]-[month]-[day]T[hour]:[minute]:[second].[subsecond]Z");
|
||||
|
||||
const GetObjectAttributesTags: &str = "ETag,Checksum,StorageClass,ObjectSize,ObjectParts";
|
||||
const GetObjectAttributesMaxParts: i64 = 1000;
|
||||
|
||||
@@ -19,8 +19,7 @@ pub struct Credentials<P: Provider + Default> {
|
||||
provider: P,
|
||||
}
|
||||
|
||||
impl<P: Provider + Default> Credentials<P>
|
||||
{
|
||||
impl<P: Provider + Default> Credentials<P> {
|
||||
pub fn new(provider: P) -> Self {
|
||||
Self {
|
||||
provider: provider,
|
||||
@@ -109,8 +108,8 @@ impl Provider for Static {
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct STSError {
|
||||
pub r#type: String,
|
||||
pub code: String,
|
||||
pub r#type: String,
|
||||
pub code: String,
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
@@ -128,21 +127,21 @@ impl Display for ErrorResponse {
|
||||
|
||||
impl ErrorResponse {
|
||||
fn error(&self) -> String {
|
||||
if self.sts_error.message == "" {
|
||||
return format!("Error response code {}.", self.sts_error.code);
|
||||
}
|
||||
return self.sts_error.message.clone();
|
||||
if self.sts_error.message == "" {
|
||||
return format!("Error response code {}.", self.sts_error.code);
|
||||
}
|
||||
return self.sts_error.message.clone();
|
||||
}
|
||||
}
|
||||
|
||||
struct Error {
|
||||
code: String,
|
||||
message: String,
|
||||
code: String,
|
||||
message: String,
|
||||
bucket_name: String,
|
||||
key: String,
|
||||
resource: String,
|
||||
request_id: String,
|
||||
host_id: String,
|
||||
key: String,
|
||||
resource: String,
|
||||
request_id: String,
|
||||
host_id: String,
|
||||
region: String,
|
||||
server: String,
|
||||
status_code: i64,
|
||||
|
||||
@@ -1,18 +1,18 @@
|
||||
pub mod constants;
|
||||
pub mod transition_api;
|
||||
pub mod api_list;
|
||||
pub mod api_error_response;
|
||||
pub mod api_s3_datatypes;
|
||||
pub mod admin_handler_utils;
|
||||
pub mod api_bucket_policy;
|
||||
pub mod api_put_object_common;
|
||||
pub mod api_get_options;
|
||||
pub mod api_error_response;
|
||||
pub mod api_get_object;
|
||||
pub mod api_get_options;
|
||||
pub mod api_list;
|
||||
pub mod api_put_object;
|
||||
pub mod api_put_object_streaming;
|
||||
pub mod api_put_object_common;
|
||||
pub mod api_put_object_multipart;
|
||||
pub mod api_put_object_streaming;
|
||||
pub mod api_remove;
|
||||
pub mod api_s3_datatypes;
|
||||
pub mod bucket_cache;
|
||||
pub mod constants;
|
||||
pub mod credentials;
|
||||
pub mod object_api_utils;
|
||||
pub mod object_handlers_common;
|
||||
pub mod admin_handler_utils;
|
||||
pub mod credentials;
|
||||
pub mod bucket_cache;
|
||||
pub mod transition_api;
|
||||
|
||||
@@ -1,17 +1,14 @@
|
||||
#![allow(clippy::map_entry)]
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
use http::HeaderMap;
|
||||
use tokio::io::BufReader;
|
||||
use std::io::Cursor;
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
use tokio::io::BufReader;
|
||||
|
||||
use s3s::S3ErrorCode;
|
||||
use crate::store_api::{
|
||||
GetObjectReader, HTTPRangeSpec,
|
||||
ObjectInfo, ObjectOptions,
|
||||
};
|
||||
use crate::error::ErrorResponse;
|
||||
use crate::store_api::{GetObjectReader, HTTPRangeSpec, ObjectInfo, ObjectOptions};
|
||||
use rustfs_filemeta::fileinfo::ObjectPartInfo;
|
||||
use rustfs_rio::HashReader;
|
||||
use crate::error::ErrorResponse;
|
||||
use s3s::S3ErrorCode;
|
||||
|
||||
//#[derive(Clone)]
|
||||
pub struct PutObjReader {
|
||||
@@ -57,7 +54,11 @@ fn part_number_to_rangespec(oi: ObjectInfo, part_number: usize) -> Option<HTTPRa
|
||||
i += 1;
|
||||
}
|
||||
|
||||
Some(HTTPRangeSpec {start: start as usize, end: Some(end as usize), is_suffix_length: false})
|
||||
Some(HTTPRangeSpec {
|
||||
start: start as usize,
|
||||
end: Some(end as usize),
|
||||
is_suffix_length: false,
|
||||
})
|
||||
}
|
||||
|
||||
fn get_compressed_offsets(oi: ObjectInfo, offset: i64) -> (i64, i64, i64, i64, u64) {
|
||||
@@ -71,7 +72,7 @@ fn get_compressed_offsets(oi: ObjectInfo, offset: i64) -> (i64, i64, i64, i64, u
|
||||
for (i, part) in oi.parts.iter().enumerate() {
|
||||
cumulative_actual_size += part.actual_size as i64;
|
||||
if cumulative_actual_size <= offset {
|
||||
compressed_offset += part.size as i64;
|
||||
compressed_offset += part.size as i64;
|
||||
} else {
|
||||
first_part_idx = i as i64;
|
||||
skip_length = cumulative_actual_size - part.actual_size as i64;
|
||||
@@ -81,23 +82,32 @@ fn get_compressed_offsets(oi: ObjectInfo, offset: i64) -> (i64, i64, i64, i64, u
|
||||
skip_length = offset - skip_length;
|
||||
|
||||
let parts: &[ObjectPartInfo] = &oi.parts;
|
||||
if skip_length > 0 && parts.len() > first_part_idx as usize && parts[first_part_idx as usize].index.as_ref().expect("err").len() > 0 {
|
||||
if skip_length > 0
|
||||
&& parts.len() > first_part_idx as usize
|
||||
&& parts[first_part_idx as usize].index.as_ref().expect("err").len() > 0
|
||||
{
|
||||
todo!();
|
||||
}
|
||||
|
||||
(compressed_offset, part_skip, first_part_idx, decrypt_skip, seq_num)
|
||||
}
|
||||
|
||||
pub fn new_getobjectreader(rs: HTTPRangeSpec, oi: &ObjectInfo, opts: &ObjectOptions, h: &HeaderMap) -> Result<(ObjReaderFn, i64, i64), ErrorResponse> {
|
||||
pub fn new_getobjectreader(
|
||||
rs: HTTPRangeSpec,
|
||||
oi: &ObjectInfo,
|
||||
opts: &ObjectOptions,
|
||||
h: &HeaderMap,
|
||||
) -> Result<(ObjReaderFn, i64, i64), ErrorResponse> {
|
||||
//let (_, mut is_encrypted) = crypto.is_encrypted(oi.user_defined)?;
|
||||
let mut is_encrypted = false;
|
||||
let is_compressed = false;//oi.is_compressed_ok();
|
||||
let is_compressed = false; //oi.is_compressed_ok();
|
||||
|
||||
let mut get_fn: ObjReaderFn;
|
||||
|
||||
let (off, length) = match rs.get_offset_length(oi.size) {
|
||||
Ok(x) => x,
|
||||
Err(err) => return Err(ErrorResponse {
|
||||
let (off, length) = match rs.get_offset_length(oi.size) {
|
||||
Ok(x) => x,
|
||||
Err(err) => {
|
||||
return Err(ErrorResponse {
|
||||
code: S3ErrorCode::InvalidRange,
|
||||
message: err.to_string(),
|
||||
key: None,
|
||||
@@ -105,26 +115,27 @@ pub fn new_getobjectreader(rs: HTTPRangeSpec, oi: &ObjectInfo, opts: &ObjectOpti
|
||||
region: None,
|
||||
request_id: None,
|
||||
host_id: "".to_string(),
|
||||
}),
|
||||
});
|
||||
}
|
||||
};
|
||||
get_fn = Arc::new(move |input_reader: BufReader<Cursor<Vec<u8>>>, _: HeaderMap| {
|
||||
//Box::pin({
|
||||
/*let r = GetObjectReader {
|
||||
object_info: oi.clone(),
|
||||
stream: StreamingBlob::new(HashReader::new(input_reader, 10, None, None, 10)),
|
||||
};
|
||||
get_fn = Arc::new(move |input_reader: BufReader<Cursor<Vec<u8>>>, _: HeaderMap| {
|
||||
//Box::pin({
|
||||
/*let r = GetObjectReader {
|
||||
object_info: oi.clone(),
|
||||
stream: StreamingBlob::new(HashReader::new(input_reader, 10, None, None, 10)),
|
||||
};
|
||||
r*/
|
||||
todo!();
|
||||
//})
|
||||
});
|
||||
r*/
|
||||
todo!();
|
||||
//})
|
||||
});
|
||||
|
||||
Ok((get_fn, off as i64, length as i64))
|
||||
}
|
||||
|
||||
pub fn extract_etag(metadata: &HashMap<String, String>) -> String {
|
||||
if let Some(etag) = metadata.get("etag") {
|
||||
if let Some(etag) = metadata.get("etag") {
|
||||
etag.clone()
|
||||
} else {
|
||||
metadata["md5Sum"].clone()
|
||||
}
|
||||
}
|
||||
metadata["md5Sum"].clone()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,15 +1,17 @@
|
||||
use lock::local_locker::MAX_DELETE_LIST;
|
||||
use crate::StorageAPI;
|
||||
use crate::bucket::lifecycle::lifecycle;
|
||||
use crate::bucket::versioning::VersioningApi;
|
||||
use crate::bucket::versioning_sys::BucketVersioningSys;
|
||||
use crate::store::ECStore;
|
||||
use crate::store_api::{ObjectOptions, ObjectToDelete,};
|
||||
use crate::StorageAPI;
|
||||
use crate::bucket::lifecycle::lifecycle;
|
||||
use crate::store_api::{ObjectOptions, ObjectToDelete};
|
||||
use lock::local_locker::MAX_DELETE_LIST;
|
||||
|
||||
pub async fn delete_object_versions(api: ECStore, bucket: &str, to_del: &[ObjectToDelete], lc_event: lifecycle::Event) {
|
||||
let mut remaining = to_del;
|
||||
loop {
|
||||
if remaining.len() <= 0 {break};
|
||||
if remaining.len() <= 0 {
|
||||
break;
|
||||
};
|
||||
let mut to_del = remaining;
|
||||
if to_del.len() > MAX_DELETE_LIST {
|
||||
remaining = &to_del[MAX_DELETE_LIST..];
|
||||
@@ -18,10 +20,14 @@ pub async fn delete_object_versions(api: ECStore, bucket: &str, to_del: &[Object
|
||||
remaining = &[];
|
||||
}
|
||||
let vc = BucketVersioningSys::get(bucket).await.expect("err!");
|
||||
let deleted_objs = api.delete_objects(bucket, to_del.to_vec(), ObjectOptions {
|
||||
//prefix_enabled_fn: vc.prefix_enabled(""),
|
||||
version_suspended: vc.suspended(),
|
||||
..Default::default()
|
||||
});
|
||||
let deleted_objs = api.delete_objects(
|
||||
bucket,
|
||||
to_del.to_vec(),
|
||||
ObjectOptions {
|
||||
//prefix_enabled_fn: vc.prefix_enabled(""),
|
||||
version_suspended: vc.suspended(),
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,61 +1,63 @@
|
||||
#![allow(clippy::map_entry)]
|
||||
use std::pin::Pin;
|
||||
use bytes::Bytes;
|
||||
use futures::Future;
|
||||
use http::{HeaderMap, HeaderName};
|
||||
use serde::{Serialize, Deserialize};
|
||||
use uuid::Uuid;
|
||||
use http::{
|
||||
HeaderValue, Response, StatusCode,
|
||||
request::{Builder, Request},
|
||||
};
|
||||
use hyper_rustls::{ConfigBuilderExt, HttpsConnector};
|
||||
use hyper_util::{client::legacy::Client, client::legacy::connect::HttpConnector, rt::TokioExecutor};
|
||||
use rand::Rng;
|
||||
use std::{collections::HashMap, sync::{Arc, Mutex}};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::io::Cursor;
|
||||
use std::pin::Pin;
|
||||
use std::sync::atomic::{AtomicI32, Ordering};
|
||||
use std::task::{Context, Poll};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
sync::{Arc, Mutex},
|
||||
};
|
||||
use time::Duration;
|
||||
use time::OffsetDateTime;
|
||||
use hyper_rustls::{ConfigBuilderExt, HttpsConnector};
|
||||
use hyper_util::{client::legacy::Client, rt::TokioExecutor, client::legacy::connect::HttpConnector};
|
||||
use http::{StatusCode, HeaderValue, request::{Request, Builder}, Response};
|
||||
use tracing::{error, debug};
|
||||
use url::{form_urlencoded, Url};
|
||||
use tokio::io::BufReader;
|
||||
use std::io::Cursor;
|
||||
use tracing::{debug, error};
|
||||
use url::{Url, form_urlencoded};
|
||||
use uuid::Uuid;
|
||||
|
||||
use s3s::S3ErrorCode;
|
||||
use s3s::{dto::Owner, Body};
|
||||
use s3s::dto::ReplicationStatus;
|
||||
use crate::client::bucket_cache::BucketLocationCache;
|
||||
use reader::hasher::{Sha256, MD5,};
|
||||
use crate::client::{
|
||||
api_error_response::{err_invalid_argument, http_resp_to_error_response, to_error_response},
|
||||
api_get_options::GetObjectOptions,
|
||||
api_put_object::PutObjectOptions,
|
||||
api_put_object_multipart::UploadPartParams,
|
||||
api_s3_datatypes::{
|
||||
CompleteMultipartUpload, CompletePart, ListBucketResult, ListBucketV2Result, ListMultipartUploadsResult,
|
||||
ListObjectPartsResult, ObjectPart,
|
||||
},
|
||||
constants::{UNSIGNED_PAYLOAD, UNSIGNED_PAYLOAD_TRAILER},
|
||||
credentials::{CredContext, Credentials, SignatureType, Static},
|
||||
};
|
||||
use crate::signer;
|
||||
use crate::{checksum::ChecksumMode, store_api::GetObjectReader};
|
||||
use reader::hasher::{MD5, Sha256};
|
||||
use rustfs_rio::HashReader;
|
||||
use rustfs_utils::{
|
||||
net::get_endpoint_url,
|
||||
retry::{new_retry_timer, MAX_RETRY},
|
||||
};
|
||||
use crate::{
|
||||
store_api::GetObjectReader,
|
||||
checksum::ChecksumMode,
|
||||
};
|
||||
use crate::signer;
|
||||
use crate::client::{
|
||||
constants::{UNSIGNED_PAYLOAD, UNSIGNED_PAYLOAD_TRAILER},
|
||||
credentials::{Credentials, SignatureType, CredContext, Static,},
|
||||
api_error_response::{to_error_response, http_resp_to_error_response, err_invalid_argument},
|
||||
api_put_object_multipart::UploadPartParams,
|
||||
api_put_object::PutObjectOptions,
|
||||
api_get_options::GetObjectOptions,
|
||||
api_s3_datatypes::{CompleteMultipartUpload, CompletePart, ListBucketResult, ListBucketV2Result, ListMultipartUploadsResult, ListObjectPartsResult, ObjectPart},
|
||||
retry::{MAX_RETRY, new_retry_timer},
|
||||
};
|
||||
use s3s::S3ErrorCode;
|
||||
use s3s::dto::ReplicationStatus;
|
||||
use s3s::{Body, dto::Owner};
|
||||
|
||||
const C_USER_AGENT_PREFIX: &str = "RustFS (linux; x86)";
|
||||
const C_USER_AGENT: &str = "RustFS (linux; x86)";
|
||||
const C_USER_AGENT: &str = "RustFS (linux; x86)";
|
||||
|
||||
const SUCCESS_STATUS: [StatusCode; 3] = [
|
||||
StatusCode::OK,
|
||||
StatusCode::NO_CONTENT,
|
||||
StatusCode::PARTIAL_CONTENT,
|
||||
];
|
||||
const SUCCESS_STATUS: [StatusCode; 3] = [StatusCode::OK, StatusCode::NO_CONTENT, StatusCode::PARTIAL_CONTENT];
|
||||
|
||||
const C_UNKNOWN: i32 = -1;
|
||||
const C_OFFLINE: i32 = 0;
|
||||
const C_ONLINE: i32 = 1;
|
||||
const C_ONLINE: i32 = 1;
|
||||
|
||||
//pub type ReaderImpl = Box<dyn Reader + Send + Sync + 'static>;
|
||||
pub enum ReaderImpl {
|
||||
@@ -71,7 +73,7 @@ pub struct TransitionClient {
|
||||
pub override_signer_type: SignatureType,
|
||||
/*app_info: TODO*/
|
||||
pub secure: bool,
|
||||
pub http_client: Client<HttpsConnector<HttpConnector>, Body>,
|
||||
pub http_client: Client<HttpsConnector<HttpConnector>, Body>,
|
||||
//pub http_trace: Httptrace.ClientTrace,
|
||||
pub bucket_loc_cache: Arc<Mutex<BucketLocationCache>>,
|
||||
pub is_trace_enabled: Arc<Mutex<bool>>,
|
||||
@@ -83,7 +85,7 @@ pub struct TransitionClient {
|
||||
pub random: u64,
|
||||
pub lookup: BucketLookupType,
|
||||
//pub lookupFn: func(u url.URL, bucketName string) BucketLookupType,
|
||||
pub md5_hasher: Arc<Mutex<Option<MD5>>>,
|
||||
pub md5_hasher: Arc<Mutex<Option<MD5>>>,
|
||||
pub sha256_hasher: Option<Sha256>,
|
||||
pub health_status: AtomicI32,
|
||||
pub trailing_header_support: bool,
|
||||
@@ -92,16 +94,16 @@ pub struct TransitionClient {
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
pub struct Options {
|
||||
pub creds: Credentials<Static>,
|
||||
pub secure: bool,
|
||||
pub creds: Credentials<Static>,
|
||||
pub secure: bool,
|
||||
//pub transport: http.RoundTripper,
|
||||
//pub trace: *httptrace.ClientTrace,
|
||||
pub region: String,
|
||||
pub region: String,
|
||||
pub bucket_lookup: BucketLookupType,
|
||||
//pub custom_region_via_url: func(u url.URL) string,
|
||||
//pub bucket_lookup_via_url: func(u url.URL, bucketName string) BucketLookupType,
|
||||
pub trailing_headers: bool,
|
||||
pub custom_md5: Option<MD5>,
|
||||
pub custom_md5: Option<MD5>,
|
||||
pub custom_sha256: Option<Sha256>,
|
||||
pub max_retries: i64,
|
||||
}
|
||||
@@ -136,15 +138,13 @@ impl TransitionClient {
|
||||
//if scheme == "https" {
|
||||
// client = Client::builder(TokioExecutor::new()).build_http();
|
||||
//} else {
|
||||
let tls = rustls::ClientConfig::builder()
|
||||
.with_native_roots()?
|
||||
.with_no_client_auth();
|
||||
let https = hyper_rustls::HttpsConnectorBuilder::new()
|
||||
.with_tls_config(tls)
|
||||
.https_or_http()
|
||||
.enable_http1()
|
||||
.build();
|
||||
client = Client::builder(TokioExecutor::new()).build(https);
|
||||
let tls = rustls::ClientConfig::builder().with_native_roots()?.with_no_client_auth();
|
||||
let https = hyper_rustls::HttpsConnectorBuilder::new()
|
||||
.with_tls_config(tls)
|
||||
.https_or_http()
|
||||
.enable_http1()
|
||||
.build();
|
||||
client = Client::builder(TokioExecutor::new()).build(https);
|
||||
//}
|
||||
|
||||
let mut clnt = TransitionClient {
|
||||
@@ -167,7 +167,7 @@ impl TransitionClient {
|
||||
trailing_header_support: opts.trailing_headers,
|
||||
max_retries: opts.max_retries,
|
||||
};
|
||||
|
||||
|
||||
{
|
||||
let mut md5_hasher = clnt.md5_hasher.lock().unwrap();
|
||||
if md5_hasher.is_none() {
|
||||
@@ -218,7 +218,11 @@ impl TransitionClient {
|
||||
todo!();
|
||||
}
|
||||
|
||||
pub fn hash_materials(&self, is_md5_requested: bool, is_sha256_requested: bool) -> (HashMap<String, MD5>, HashMap<String, Vec<u8>>) {
|
||||
pub fn hash_materials(
|
||||
&self,
|
||||
is_md5_requested: bool,
|
||||
is_sha256_requested: bool,
|
||||
) -> (HashMap<String, MD5>, HashMap<String, Vec<u8>>) {
|
||||
todo!();
|
||||
}
|
||||
|
||||
@@ -227,7 +231,8 @@ impl TransitionClient {
|
||||
}
|
||||
|
||||
fn mark_offline(&self) {
|
||||
self.health_status.compare_exchange(C_ONLINE, C_OFFLINE, Ordering::SeqCst, Ordering::SeqCst);
|
||||
self.health_status
|
||||
.compare_exchange(C_ONLINE, C_OFFLINE, Ordering::SeqCst, Ordering::SeqCst);
|
||||
}
|
||||
|
||||
fn is_offline(&self) -> bool {
|
||||
@@ -261,7 +266,9 @@ impl TransitionClient {
|
||||
debug!("endpoint_url: {}", self.endpoint_url.as_str().to_string());
|
||||
resp = http_client.request(req);
|
||||
}
|
||||
let resp = resp.await/*.map_err(Into::into)*/.map(|res| res.map(Body::from));
|
||||
let resp = resp
|
||||
.await /*.map_err(Into::into)*/
|
||||
.map(|res| res.map(Body::from));
|
||||
debug!("http_client url: {} {}", req_method, req_uri);
|
||||
debug!("http_client headers: {:?}", req_headers);
|
||||
if let Err(err) = resp {
|
||||
@@ -282,7 +289,11 @@ impl TransitionClient {
|
||||
Ok(resp)
|
||||
}
|
||||
|
||||
pub async fn execute_method(&self, method: http::Method, metadata: &mut RequestMetadata) -> Result<http::Response<Body>, std::io::Error> {
|
||||
pub async fn execute_method(
|
||||
&self,
|
||||
method: http::Method,
|
||||
metadata: &mut RequestMetadata,
|
||||
) -> Result<http::Response<Body>, std::io::Error> {
|
||||
if self.is_offline() {
|
||||
let mut s = self.endpoint_url.to_string();
|
||||
s.push_str(" is offline.");
|
||||
@@ -295,16 +306,18 @@ impl TransitionClient {
|
||||
let mut resp: http::Response<Body>;
|
||||
|
||||
//if metadata.content_body != nil {
|
||||
//body_seeker = BufferReader::new(metadata.content_body.read_all().await?);
|
||||
retryable = true;
|
||||
if !retryable {
|
||||
req_retry = 1;
|
||||
}
|
||||
//body_seeker = BufferReader::new(metadata.content_body.read_all().await?);
|
||||
retryable = true;
|
||||
if !retryable {
|
||||
req_retry = 1;
|
||||
}
|
||||
//}
|
||||
|
||||
//let mut retry_timer = RetryTimer::new();
|
||||
//while let Some(v) = retry_timer.next().await {
|
||||
for _ in [1;1]/*new_retry_timer(req_retry, DefaultRetryUnit, DefaultRetryCap, MaxJitter)*/ {
|
||||
for _ in [1; 1]
|
||||
/*new_retry_timer(req_retry, DefaultRetryUnit, DefaultRetryCap, MaxJitter)*/
|
||||
{
|
||||
let req = self.new_request(method, metadata).await?;
|
||||
|
||||
resp = self.doit(req).await?;
|
||||
@@ -355,7 +368,11 @@ impl TransitionClient {
|
||||
Err(std::io::Error::other("resp err"))
|
||||
}
|
||||
|
||||
async fn new_request(&self, method: http::Method, metadata: &mut RequestMetadata) -> Result<http::Request<Body>, std::io::Error> {
|
||||
async fn new_request(
|
||||
&self,
|
||||
method: http::Method,
|
||||
metadata: &mut RequestMetadata,
|
||||
) -> Result<http::Request<Body>, std::io::Error> {
|
||||
let location = metadata.bucket_location.clone();
|
||||
if location == "" {
|
||||
if metadata.bucket_name != "" {
|
||||
@@ -366,8 +383,13 @@ impl TransitionClient {
|
||||
let is_makebucket = metadata.object_name == "" && method == http::Method::PUT && metadata.query_values.len() == 0;
|
||||
let is_virtual_host = self.is_virtual_host_style_request(&self.endpoint_url, &metadata.bucket_name) && !is_makebucket;
|
||||
|
||||
let target_url = self.make_target_url(&metadata.bucket_name, &metadata.object_name, &location,
|
||||
is_virtual_host, &metadata.query_values)?;
|
||||
let target_url = self.make_target_url(
|
||||
&metadata.bucket_name,
|
||||
&metadata.object_name,
|
||||
&location,
|
||||
is_virtual_host,
|
||||
&metadata.query_values,
|
||||
)?;
|
||||
|
||||
let mut req_builder = Request::builder().method(method).uri(target_url.to_string());
|
||||
|
||||
@@ -377,10 +399,10 @@ impl TransitionClient {
|
||||
value = creds_provider.get_with_context(Some(self.cred_context()))?;
|
||||
}
|
||||
|
||||
let mut signer_type = value.signer_type.clone();
|
||||
let access_key_id = value.access_key_id;
|
||||
let mut signer_type = value.signer_type.clone();
|
||||
let access_key_id = value.access_key_id;
|
||||
let secret_access_key = value.secret_access_key;
|
||||
let session_token = value.session_token;
|
||||
let session_token = value.session_token;
|
||||
|
||||
if self.override_signer_type != SignatureType::SignatureDefault {
|
||||
signer_type = self.override_signer_type.clone();
|
||||
@@ -392,24 +414,39 @@ impl TransitionClient {
|
||||
|
||||
if metadata.expires != 0 && metadata.pre_sign_url {
|
||||
if signer_type == SignatureType::SignatureAnonymous {
|
||||
return Err(std::io::Error::other(err_invalid_argument("Presigned URLs cannot be generated with anonymous credentials.")));
|
||||
return Err(std::io::Error::other(err_invalid_argument(
|
||||
"Presigned URLs cannot be generated with anonymous credentials.",
|
||||
)));
|
||||
}
|
||||
if metadata.extra_pre_sign_header.is_some() {
|
||||
if signer_type == SignatureType::SignatureV2 {
|
||||
return Err(std::io::Error::other(err_invalid_argument("Extra signed headers for Presign with Signature V2 is not supported.")));
|
||||
return Err(std::io::Error::other(err_invalid_argument(
|
||||
"Extra signed headers for Presign with Signature V2 is not supported.",
|
||||
)));
|
||||
}
|
||||
for (k, v) in metadata.extra_pre_sign_header.as_ref().unwrap() {
|
||||
req_builder = req_builder.header(k, v);
|
||||
}
|
||||
}
|
||||
if signer_type == SignatureType::SignatureV2 {
|
||||
req_builder = signer::pre_sign_v2(req_builder, &access_key_id, &secret_access_key, metadata.expires, is_virtual_host);
|
||||
req_builder =
|
||||
signer::pre_sign_v2(req_builder, &access_key_id, &secret_access_key, metadata.expires, is_virtual_host);
|
||||
} else if signer_type == SignatureType::SignatureV4 {
|
||||
req_builder = signer::pre_sign_v4(req_builder, &access_key_id, &secret_access_key, &session_token, &location, metadata.expires, OffsetDateTime::now_utc());
|
||||
req_builder = signer::pre_sign_v4(
|
||||
req_builder,
|
||||
&access_key_id,
|
||||
&secret_access_key,
|
||||
&session_token,
|
||||
&location,
|
||||
metadata.expires,
|
||||
OffsetDateTime::now_utc(),
|
||||
);
|
||||
}
|
||||
let req = match req_builder.body(Body::empty()) {
|
||||
Ok(req) => req,
|
||||
Err(err) => { return Err(std::io::Error::other(err)); }
|
||||
Err(err) => {
|
||||
return Err(std::io::Error::other(err));
|
||||
}
|
||||
};
|
||||
return Ok(req);
|
||||
}
|
||||
@@ -423,7 +460,10 @@ impl TransitionClient {
|
||||
//req.content_length = metadata.content_length;
|
||||
if metadata.content_length <= -1 {
|
||||
let chunked_value = HeaderValue::from_str(&vec!["chunked"].join(",")).expect("err");
|
||||
req_builder.headers_mut().expect("err").insert(http::header::TRANSFER_ENCODING, chunked_value);
|
||||
req_builder
|
||||
.headers_mut()
|
||||
.expect("err")
|
||||
.insert(http::header::TRANSFER_ENCODING, chunked_value);
|
||||
}
|
||||
|
||||
if metadata.content_md5_base64.len() > 0 {
|
||||
@@ -434,15 +474,17 @@ impl TransitionClient {
|
||||
if signer_type == SignatureType::SignatureAnonymous {
|
||||
let req = match req_builder.body(Body::empty()) {
|
||||
Ok(req) => req,
|
||||
Err(err) => { return Err(std::io::Error::other(err)); }
|
||||
Err(err) => {
|
||||
return Err(std::io::Error::other(err));
|
||||
}
|
||||
};
|
||||
return Ok(req);
|
||||
}
|
||||
|
||||
if signer_type == SignatureType::SignatureV2 {
|
||||
req_builder = signer::sign_v2(req_builder, metadata.content_length, &access_key_id, &secret_access_key, is_virtual_host);
|
||||
}
|
||||
else if metadata.stream_sha256 && !self.secure {
|
||||
req_builder =
|
||||
signer::sign_v2(req_builder, metadata.content_length, &access_key_id, &secret_access_key, is_virtual_host);
|
||||
} else if metadata.stream_sha256 && !self.secure {
|
||||
if metadata.trailer.len() > 0 {
|
||||
//req.Trailer = metadata.trailer;
|
||||
for (_, v) in &metadata.trailer {
|
||||
@@ -451,8 +493,7 @@ impl TransitionClient {
|
||||
}
|
||||
//req_builder = signer::streaming_sign_v4(req_builder, &access_key_id,
|
||||
// &secret_access_key, &session_token, &location, metadata.content_length, OffsetDateTime::now_utc(), self.sha256_hasher());
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
let mut sha_header = UNSIGNED_PAYLOAD.to_string();
|
||||
if metadata.content_sha256_hex != "" {
|
||||
sha_header = metadata.content_sha256_hex.clone();
|
||||
@@ -462,9 +503,17 @@ impl TransitionClient {
|
||||
} else if metadata.trailer.len() > 0 {
|
||||
sha_header = UNSIGNED_PAYLOAD_TRAILER.to_string();
|
||||
}
|
||||
req_builder = req_builder.header::<HeaderName, HeaderValue>("X-Amz-Content-Sha256".parse().unwrap(), sha_header.parse().expect("err"));
|
||||
req_builder = req_builder
|
||||
.header::<HeaderName, HeaderValue>("X-Amz-Content-Sha256".parse().unwrap(), sha_header.parse().expect("err"));
|
||||
|
||||
req_builder = signer::sign_v4_trailer(req_builder, &access_key_id, &secret_access_key, &session_token, &location, metadata.trailer.clone());
|
||||
req_builder = signer::sign_v4_trailer(
|
||||
req_builder,
|
||||
&access_key_id,
|
||||
&secret_access_key,
|
||||
&session_token,
|
||||
&location,
|
||||
metadata.trailer.clone(),
|
||||
);
|
||||
}
|
||||
|
||||
let req;
|
||||
@@ -482,11 +531,9 @@ impl TransitionClient {
|
||||
//req = req_builder.body(s3s::Body::from(metadata.content_body.read_all().await?));
|
||||
}
|
||||
|
||||
match req {
|
||||
match req {
|
||||
Ok(req) => Ok(req),
|
||||
Err(err) => {
|
||||
Err(std::io::Error::other(err))
|
||||
}
|
||||
Err(err) => Err(std::io::Error::other(err)),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -498,14 +545,17 @@ impl TransitionClient {
|
||||
}*/
|
||||
}
|
||||
|
||||
fn make_target_url(&self, bucket_name: &str, object_name: &str, bucket_location: &str, is_virtual_host_style: bool, query_values: &HashMap<String, String>) -> Result<Url, std::io::Error> {
|
||||
fn make_target_url(
|
||||
&self,
|
||||
bucket_name: &str,
|
||||
object_name: &str,
|
||||
bucket_location: &str,
|
||||
is_virtual_host_style: bool,
|
||||
query_values: &HashMap<String, String>,
|
||||
) -> Result<Url, std::io::Error> {
|
||||
let scheme = self.endpoint_url.scheme();
|
||||
let host = self.endpoint_url.host().unwrap();
|
||||
let default_port = if scheme == "https" {
|
||||
443
|
||||
} else {
|
||||
80
|
||||
};
|
||||
let default_port = if scheme == "https" { 443 } else { 80 };
|
||||
let port = self.endpoint_url.port().unwrap_or(default_port);
|
||||
|
||||
let mut url_str = format!("{scheme}://{host}:{port}/");
|
||||
@@ -562,7 +612,7 @@ impl TransitionClient {
|
||||
}
|
||||
|
||||
struct LockedRandSource {
|
||||
src: u64,//rand.Source,
|
||||
src: u64, //rand.Source,
|
||||
}
|
||||
|
||||
impl LockedRandSource {
|
||||
@@ -604,76 +654,159 @@ impl TransitionCore {
|
||||
Ok(Self(Arc::new(client)))
|
||||
}
|
||||
|
||||
pub fn list_objects(&self, bucket: &str, prefix: &str, marker: &str, delimiter: &str, max_keys: i64) -> Result<ListBucketResult, std::io::Error> {
|
||||
pub fn list_objects(
|
||||
&self,
|
||||
bucket: &str,
|
||||
prefix: &str,
|
||||
marker: &str,
|
||||
delimiter: &str,
|
||||
max_keys: i64,
|
||||
) -> Result<ListBucketResult, std::io::Error> {
|
||||
let client = self.0.clone();
|
||||
client.list_objects_query(bucket, prefix, marker, delimiter, max_keys, HeaderMap::new())
|
||||
}
|
||||
|
||||
pub async fn list_objects_v2(&self, bucket_name: &str, object_prefix: &str, start_after: &str, continuation_token: &str, delimiter: &str, max_keys: i64) -> Result<ListBucketV2Result, std::io::Error> {
|
||||
pub async fn list_objects_v2(
|
||||
&self,
|
||||
bucket_name: &str,
|
||||
object_prefix: &str,
|
||||
start_after: &str,
|
||||
continuation_token: &str,
|
||||
delimiter: &str,
|
||||
max_keys: i64,
|
||||
) -> Result<ListBucketV2Result, std::io::Error> {
|
||||
let client = self.0.clone();
|
||||
client.list_objects_v2_query(bucket_name, object_prefix, continuation_token, true, false, delimiter, start_after, max_keys, HeaderMap::new()).await
|
||||
client
|
||||
.list_objects_v2_query(
|
||||
bucket_name,
|
||||
object_prefix,
|
||||
continuation_token,
|
||||
true,
|
||||
false,
|
||||
delimiter,
|
||||
start_after,
|
||||
max_keys,
|
||||
HeaderMap::new(),
|
||||
)
|
||||
.await
|
||||
}
|
||||
|
||||
/*pub fn copy_object(&self, source_bucket: &str, source_object: &str, dest_bucket: &str, dest_object: &str, metadata: HashMap<String, String>, src_opts: CopySrcOptions, dst_opts: PutObjectOptions) -> Result<ObjectInfo> {
|
||||
self.0.copy_object_do(source_bucket, source_object, dest_bucket, dest_object, metadata, src_opts, dst_opts)
|
||||
}*/
|
||||
|
||||
pub fn copy_object_part(&self, src_bucket: &str, src_object: &str, dest_bucket: &str, dest_object: &str, upload_id: &str,
|
||||
part_id: i32, start_offset: i32, length: i64, metadata: HashMap<String, String>,
|
||||
pub fn copy_object_part(
|
||||
&self,
|
||||
src_bucket: &str,
|
||||
src_object: &str,
|
||||
dest_bucket: &str,
|
||||
dest_object: &str,
|
||||
upload_id: &str,
|
||||
part_id: i32,
|
||||
start_offset: i32,
|
||||
length: i64,
|
||||
metadata: HashMap<String, String>,
|
||||
) -> Result<CompletePart, std::io::Error> {
|
||||
//self.0.copy_object_part_do(src_bucket, src_object, dest_bucket, dest_object, upload_id,
|
||||
// part_id, start_offset, length, metadata)
|
||||
todo!();
|
||||
}
|
||||
|
||||
pub async fn put_object(&self, bucket: &str, object: &str, data: ReaderImpl, size: i64, md5_base64: &str, sha256_hex: &str, opts: &PutObjectOptions) -> Result<UploadInfo, std::io::Error> {
|
||||
let hook_reader = data;//newHook(data, opts.progress);
|
||||
pub async fn put_object(
|
||||
&self,
|
||||
bucket: &str,
|
||||
object: &str,
|
||||
data: ReaderImpl,
|
||||
size: i64,
|
||||
md5_base64: &str,
|
||||
sha256_hex: &str,
|
||||
opts: &PutObjectOptions,
|
||||
) -> Result<UploadInfo, std::io::Error> {
|
||||
let hook_reader = data; //newHook(data, opts.progress);
|
||||
let client = self.0.clone();
|
||||
client.put_object_do(bucket, object, hook_reader, md5_base64, sha256_hex, size, opts).await
|
||||
client
|
||||
.put_object_do(bucket, object, hook_reader, md5_base64, sha256_hex, size, opts)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn new_multipart_upload(&self, bucket: &str, object: &str, opts: PutObjectOptions) -> Result<String, std::io::Error> {
|
||||
pub async fn new_multipart_upload(
|
||||
&self,
|
||||
bucket: &str,
|
||||
object: &str,
|
||||
opts: PutObjectOptions,
|
||||
) -> Result<String, std::io::Error> {
|
||||
let client = self.0.clone();
|
||||
let result = client.initiate_multipart_upload(bucket, object, &opts).await?;
|
||||
Ok(result.upload_id)
|
||||
}
|
||||
|
||||
pub fn list_multipart_uploads(&self, bucket: &str, prefix: &str, key_marker: &str, upload_id_marker: &str, delimiter: &str, max_uploads: i64) -> Result<ListMultipartUploadsResult, std::io::Error> {
|
||||
pub fn list_multipart_uploads(
|
||||
&self,
|
||||
bucket: &str,
|
||||
prefix: &str,
|
||||
key_marker: &str,
|
||||
upload_id_marker: &str,
|
||||
delimiter: &str,
|
||||
max_uploads: i64,
|
||||
) -> Result<ListMultipartUploadsResult, std::io::Error> {
|
||||
let client = self.0.clone();
|
||||
client.list_multipart_uploads_query(bucket, key_marker, upload_id_marker, prefix, delimiter, max_uploads)
|
||||
}
|
||||
|
||||
pub async fn put_object_part(&self, bucket: &str, object: &str, upload_id: &str, part_id: i64,
|
||||
data: ReaderImpl, size: i64, opts: PutObjectPartOptions
|
||||
pub async fn put_object_part(
|
||||
&self,
|
||||
bucket: &str,
|
||||
object: &str,
|
||||
upload_id: &str,
|
||||
part_id: i64,
|
||||
data: ReaderImpl,
|
||||
size: i64,
|
||||
opts: PutObjectPartOptions,
|
||||
) -> Result<ObjectPart, std::io::Error> {
|
||||
let mut p = UploadPartParams {
|
||||
bucket_name: bucket.to_string(),
|
||||
object_name: object.to_string(),
|
||||
upload_id: upload_id.to_string(),
|
||||
reader: data,
|
||||
part_number: part_id,
|
||||
md5_base64: opts.md5_base64,
|
||||
sha256_hex: opts.sha256_hex,
|
||||
size: size,
|
||||
bucket_name: bucket.to_string(),
|
||||
object_name: object.to_string(),
|
||||
upload_id: upload_id.to_string(),
|
||||
reader: data,
|
||||
part_number: part_id,
|
||||
md5_base64: opts.md5_base64,
|
||||
sha256_hex: opts.sha256_hex,
|
||||
size: size,
|
||||
//sse: opts.sse,
|
||||
stream_sha256: !opts.disable_content_sha256,
|
||||
custom_header: opts.custom_header,
|
||||
trailer: opts.trailer,
|
||||
trailer: opts.trailer,
|
||||
};
|
||||
let client = self.0.clone();
|
||||
client.upload_part(&mut p).await
|
||||
}
|
||||
|
||||
pub async fn list_object_parts(&self, bucket: &str, object: &str, upload_id: &str, part_number_marker: i64, max_parts: i64) -> Result<ListObjectPartsResult, std::io::Error> {
|
||||
pub async fn list_object_parts(
|
||||
&self,
|
||||
bucket: &str,
|
||||
object: &str,
|
||||
upload_id: &str,
|
||||
part_number_marker: i64,
|
||||
max_parts: i64,
|
||||
) -> Result<ListObjectPartsResult, std::io::Error> {
|
||||
let client = self.0.clone();
|
||||
client.list_object_parts_query(bucket, object, upload_id, part_number_marker, max_parts).await
|
||||
client
|
||||
.list_object_parts_query(bucket, object, upload_id, part_number_marker, max_parts)
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn complete_multipart_upload(&self, bucket: &str, object: &str, upload_id: &str, parts: &[CompletePart], opts: PutObjectOptions) -> Result<UploadInfo, std::io::Error> {
|
||||
pub async fn complete_multipart_upload(
|
||||
&self,
|
||||
bucket: &str,
|
||||
object: &str,
|
||||
upload_id: &str,
|
||||
parts: &[CompletePart],
|
||||
opts: PutObjectOptions,
|
||||
) -> Result<UploadInfo, std::io::Error> {
|
||||
let client = self.0.clone();
|
||||
let res = client.complete_multipart_upload(bucket, object, upload_id, CompleteMultipartUpload {
|
||||
parts: parts.to_vec(),
|
||||
}, &opts).await?;
|
||||
let res = client
|
||||
.complete_multipart_upload(bucket, object, upload_id, CompleteMultipartUpload { parts: parts.to_vec() }, &opts)
|
||||
.await?;
|
||||
Ok(res)
|
||||
}
|
||||
|
||||
@@ -692,7 +825,12 @@ impl TransitionCore {
|
||||
client.put_bucket_policy(bucket_name, bucket_policy).await
|
||||
}
|
||||
|
||||
pub async fn get_object(&self, bucket_name: &str, object_name: &str, opts: &GetObjectOptions) -> Result<(ObjectInfo, HeaderMap, ReadCloser), std::io::Error> {
|
||||
pub async fn get_object(
|
||||
&self,
|
||||
bucket_name: &str,
|
||||
object_name: &str,
|
||||
opts: &GetObjectOptions,
|
||||
) -> Result<(ObjectInfo, HeaderMap, ReadCloser), std::io::Error> {
|
||||
let client = self.0.clone();
|
||||
client.get_object_inner(bucket_name, object_name, opts).await
|
||||
}
|
||||
@@ -709,8 +847,8 @@ pub struct PutObjectPartOptions {
|
||||
|
||||
#[derive(Debug, Clone, Deserialize, Serialize)]
|
||||
pub struct ObjectInfo {
|
||||
pub etag: String,
|
||||
pub name: String,
|
||||
pub etag: String,
|
||||
pub name: String,
|
||||
pub mod_time: OffsetDateTime,
|
||||
pub size: usize,
|
||||
pub content_type: Option<String>,
|
||||
@@ -730,16 +868,16 @@ pub struct ObjectInfo {
|
||||
#[serde(skip, default = "replication_status_default")]
|
||||
pub replication_status: ReplicationStatus,
|
||||
pub replication_ready: bool,
|
||||
pub expiration: OffsetDateTime,
|
||||
pub expiration: OffsetDateTime,
|
||||
pub expiration_rule_id: String,
|
||||
pub num_versions: usize,
|
||||
|
||||
|
||||
pub restore: RestoreInfo,
|
||||
|
||||
pub checksum_crc32: String,
|
||||
pub checksum_crc32c: String,
|
||||
pub checksum_sha1: String,
|
||||
pub checksum_sha256: String,
|
||||
pub checksum_crc32: String,
|
||||
pub checksum_crc32c: String,
|
||||
pub checksum_sha1: String,
|
||||
pub checksum_sha256: String,
|
||||
pub checksum_crc64nvme: String,
|
||||
pub checksum_mode: String,
|
||||
}
|
||||
@@ -767,22 +905,21 @@ impl Default for ObjectInfo {
|
||||
version_id: Uuid::nil(),
|
||||
replication_status: ReplicationStatus::from_static(ReplicationStatus::PENDING),
|
||||
replication_ready: false,
|
||||
expiration: OffsetDateTime::now_utc(),
|
||||
expiration: OffsetDateTime::now_utc(),
|
||||
expiration_rule_id: "".to_string(),
|
||||
num_versions: 0,
|
||||
restore: RestoreInfo::default(),
|
||||
checksum_crc32: "".to_string(),
|
||||
checksum_crc32c: "".to_string(),
|
||||
checksum_sha1: "".to_string(),
|
||||
checksum_sha256: "".to_string(),
|
||||
checksum_crc32: "".to_string(),
|
||||
checksum_crc32c: "".to_string(),
|
||||
checksum_sha1: "".to_string(),
|
||||
checksum_sha256: "".to_string(),
|
||||
checksum_crc64nvme: "".to_string(),
|
||||
checksum_mode: "".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone)]
|
||||
#[derive(Serialize, Deserialize, Debug, Clone)]
|
||||
pub struct RestoreInfo {
|
||||
ongoing_restore: bool,
|
||||
expiry_time: OffsetDateTime,
|
||||
@@ -809,19 +946,19 @@ pub struct ObjectMultipartInfo {
|
||||
}
|
||||
|
||||
pub struct UploadInfo {
|
||||
pub bucket: String,
|
||||
pub key: String,
|
||||
pub etag: String,
|
||||
pub size: i64,
|
||||
pub bucket: String,
|
||||
pub key: String,
|
||||
pub etag: String,
|
||||
pub size: i64,
|
||||
pub last_modified: OffsetDateTime,
|
||||
pub location: String,
|
||||
pub version_id: String,
|
||||
pub expiration: OffsetDateTime,
|
||||
pub location: String,
|
||||
pub version_id: String,
|
||||
pub expiration: OffsetDateTime,
|
||||
pub expiration_rule_id: String,
|
||||
pub checksum_crc32: String,
|
||||
pub checksum_crc32c: String,
|
||||
pub checksum_sha1: String,
|
||||
pub checksum_sha256: String,
|
||||
pub checksum_crc32: String,
|
||||
pub checksum_crc32c: String,
|
||||
pub checksum_sha1: String,
|
||||
pub checksum_sha256: String,
|
||||
pub checksum_crc64nvme: String,
|
||||
pub checksum_mode: String,
|
||||
}
|
||||
@@ -885,4 +1022,4 @@ impl tower::Service<Request<Body>> for SendRequest {
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct Document(pub String);
|
||||
pub struct Document(pub String);
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use s3s::{S3ErrorCode, S3Error};
|
||||
use s3s::{S3Error, S3ErrorCode};
|
||||
|
||||
use rustfs_utils::path::decode_dir_object;
|
||||
|
||||
@@ -727,16 +727,16 @@ pub fn to_object_err(err: Error, params: Vec<&str>) -> Error {
|
||||
}
|
||||
|
||||
pub fn is_network_or_host_down(err: &str, expect_timeouts: bool) -> bool {
|
||||
err.contains("Connection closed by foreign host") ||
|
||||
err.contains("TLS handshake timeout") ||
|
||||
err.contains("i/o timeout") ||
|
||||
err.contains("connection timed out") ||
|
||||
err.contains("connection reset by peer") ||
|
||||
err.contains("broken pipe") ||
|
||||
err.to_lowercase().contains("503 service unavailable") ||
|
||||
err.contains("use of closed network connection") ||
|
||||
err.contains("An existing connection was forcibly closed by the remote host") ||
|
||||
err.contains("client error (Connect)")
|
||||
err.contains("Connection closed by foreign host")
|
||||
|| err.contains("TLS handshake timeout")
|
||||
|| err.contains("i/o timeout")
|
||||
|| err.contains("connection timed out")
|
||||
|| err.contains("connection reset by peer")
|
||||
|| err.contains("broken pipe")
|
||||
|| err.to_lowercase().contains("503 service unavailable")
|
||||
|| err.contains("use of closed network connection")
|
||||
|| err.contains("An existing connection was forcibly closed by the remote host")
|
||||
|| err.contains("client error (Connect)")
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, PartialEq, Eq)]
|
||||
@@ -834,7 +834,7 @@ pub fn error_resp_to_object_err(err: ErrorResponse, params: Vec<&str>) -> std::i
|
||||
err = std::io::Error::other(StorageError::BucketNameInvalid(bucket));
|
||||
}
|
||||
S3ErrorCode::InvalidPart => {
|
||||
err = std::io::Error::other(StorageError::InvalidPart(0, bucket, object/* , version_id */));
|
||||
err = std::io::Error::other(StorageError::InvalidPart(0, bucket, object /* , version_id */));
|
||||
}
|
||||
S3ErrorCode::NoSuchBucket => {
|
||||
err = std::io::Error::other(StorageError::BucketNotFound(bucket));
|
||||
@@ -848,7 +848,7 @@ pub fn error_resp_to_object_err(err: ErrorResponse, params: Vec<&str>) -> std::i
|
||||
}
|
||||
S3ErrorCode::NoSuchVersion => {
|
||||
if object != "" {
|
||||
err = std::io::Error::other(StorageError::ObjectNotFound(bucket, object));//, version_id);
|
||||
err = std::io::Error::other(StorageError::ObjectNotFound(bucket, object)); //, version_id);
|
||||
} else {
|
||||
err = std::io::Error::other(StorageError::BucketNotFound(bucket));
|
||||
}
|
||||
@@ -894,9 +894,15 @@ pub fn storage_to_object_err(err: Error, params: Vec<&str>) -> S3Error {
|
||||
StorageError::InvalidArgument(bucket, object, version_id) => {
|
||||
s3_error!(InvalidArgument, "Invalid arguments provided for {}/{}-{}", bucket, object, version_id)
|
||||
}*/
|
||||
StorageError::MethodNotAllowed => {
|
||||
S3Error::with_message(S3ErrorCode::MethodNotAllowed, ObjectApiError::MethodNotAllowed(GenericError {bucket: bucket, object: object, ..Default::default()}).to_string())
|
||||
}
|
||||
StorageError::MethodNotAllowed => S3Error::with_message(
|
||||
S3ErrorCode::MethodNotAllowed,
|
||||
ObjectApiError::MethodNotAllowed(GenericError {
|
||||
bucket: bucket,
|
||||
object: object,
|
||||
..Default::default()
|
||||
})
|
||||
.to_string(),
|
||||
),
|
||||
/*StorageError::BucketNotFound(bucket) => {
|
||||
s3_error!(NoSuchBucket, "bucket not found {}", bucket)
|
||||
}
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
pub mod name;
|
||||
pub mod targetid;
|
||||
pub mod targetlist;
|
||||
pub mod targetlist;
|
||||
|
||||
@@ -55,7 +55,7 @@ impl EventName {
|
||||
}
|
||||
|
||||
impl AsRef<str> for EventName {
|
||||
fn as_ref(&self) -> &str {
|
||||
fn as_ref(&self) -> &str {
|
||||
match self {
|
||||
EventName::BucketCreated => "s3:BucketCreated:*",
|
||||
EventName::BucketRemoved => "s3:BucketRemoved:*",
|
||||
@@ -103,123 +103,45 @@ impl AsRef<str> for EventName {
|
||||
impl From<&str> for EventName {
|
||||
fn from(s: &str) -> Self {
|
||||
match s {
|
||||
"s3:BucketCreated:*" => {
|
||||
EventName::BucketCreated
|
||||
}
|
||||
"s3:BucketRemoved:*" => {
|
||||
EventName::BucketRemoved
|
||||
}
|
||||
"s3:ObjectAccessed:*" => {
|
||||
EventName::ObjectAccessedAll
|
||||
}
|
||||
"s3:ObjectAccessed:Get" => {
|
||||
EventName::ObjectAccessedGet
|
||||
}
|
||||
"s3:ObjectAccessed:GetRetention" => {
|
||||
EventName::ObjectAccessedGetRetention
|
||||
}
|
||||
"s3:ObjectAccessed:GetLegalHold" => {
|
||||
EventName::ObjectAccessedGetLegalHold
|
||||
}
|
||||
"s3:ObjectAccessed:Head" => {
|
||||
EventName::ObjectAccessedHead
|
||||
}
|
||||
"s3:ObjectAccessed:Attributes" => {
|
||||
EventName::ObjectAccessedAttributes
|
||||
}
|
||||
"s3:ObjectCreated:*" => {
|
||||
EventName::ObjectCreatedAll
|
||||
}
|
||||
"s3:ObjectCreated:CompleteMultipartUpload" => {
|
||||
EventName::ObjectCreatedCompleteMultipartUpload
|
||||
}
|
||||
"s3:ObjectCreated:Copy" => {
|
||||
EventName::ObjectCreatedCopy
|
||||
}
|
||||
"s3:ObjectCreated:Post" => {
|
||||
EventName::ObjectCreatedPost
|
||||
}
|
||||
"s3:ObjectCreated:Put" => {
|
||||
EventName::ObjectCreatedPut
|
||||
}
|
||||
"s3:ObjectCreated:PutRetention" => {
|
||||
EventName::ObjectCreatedPutRetention
|
||||
}
|
||||
"s3:ObjectCreated:PutLegalHold" => {
|
||||
EventName::ObjectCreatedPutLegalHold
|
||||
}
|
||||
"s3:ObjectCreated:PutTagging" => {
|
||||
EventName::ObjectCreatedPutTagging
|
||||
}
|
||||
"s3:ObjectCreated:DeleteTagging" => {
|
||||
EventName::ObjectCreatedDeleteTagging
|
||||
}
|
||||
"s3:ObjectRemoved:*" => {
|
||||
EventName::ObjectRemovedAll
|
||||
}
|
||||
"s3:ObjectRemoved:Delete" => {
|
||||
EventName::ObjectRemovedDelete
|
||||
}
|
||||
"s3:ObjectRemoved:DeleteMarkerCreated" => {
|
||||
EventName::ObjectRemovedDeleteMarkerCreated
|
||||
}
|
||||
"s3:ObjectRemoved:NoOP" => {
|
||||
EventName::ObjectRemovedNoOP
|
||||
}
|
||||
"s3:ObjectRemoved:DeleteAllVersions" => {
|
||||
EventName::ObjectRemovedDeleteAllVersions
|
||||
}
|
||||
"s3:LifecycleDelMarkerExpiration:Delete" => {
|
||||
EventName::ILMDelMarkerExpirationDelete
|
||||
}
|
||||
"s3:Replication:*" => {
|
||||
EventName::ObjectReplicationAll
|
||||
}
|
||||
"s3:Replication:OperationFailedReplication" => {
|
||||
EventName::ObjectReplicationFailed
|
||||
}
|
||||
"s3:Replication:OperationCompletedReplication" => {
|
||||
EventName::ObjectReplicationComplete
|
||||
}
|
||||
"s3:Replication:OperationMissedThreshold" => {
|
||||
EventName::ObjectReplicationMissedThreshold
|
||||
}
|
||||
"s3:Replication:OperationReplicatedAfterThreshold" => {
|
||||
EventName::ObjectReplicationReplicatedAfterThreshold
|
||||
}
|
||||
"s3:Replication:OperationNotTracked" => {
|
||||
EventName::ObjectReplicationNotTracked
|
||||
}
|
||||
"s3:ObjectRestore:*" => {
|
||||
EventName::ObjectRestoreAll
|
||||
}
|
||||
"s3:ObjectRestore:Post" => {
|
||||
EventName::ObjectRestorePost
|
||||
}
|
||||
"s3:ObjectRestore:Completed" => {
|
||||
EventName::ObjectRestoreCompleted
|
||||
}
|
||||
"s3:ObjectTransition:Failed" => {
|
||||
EventName::ObjectTransitionFailed
|
||||
}
|
||||
"s3:ObjectTransition:Complete" => {
|
||||
EventName::ObjectTransitionComplete
|
||||
}
|
||||
"s3:ObjectTransition:*" => {
|
||||
EventName::ObjectTransitionAll
|
||||
}
|
||||
"s3:Scanner:ManyVersions" => {
|
||||
EventName::ObjectManyVersions
|
||||
}
|
||||
"s3:Scanner:LargeVersions" => {
|
||||
EventName::ObjectLargeVersions
|
||||
}
|
||||
"s3:Scanner:BigPrefix" => {
|
||||
EventName::PrefixManyFolders
|
||||
}
|
||||
_ => {
|
||||
EventName::Everything
|
||||
}
|
||||
"s3:BucketCreated:*" => EventName::BucketCreated,
|
||||
"s3:BucketRemoved:*" => EventName::BucketRemoved,
|
||||
"s3:ObjectAccessed:*" => EventName::ObjectAccessedAll,
|
||||
"s3:ObjectAccessed:Get" => EventName::ObjectAccessedGet,
|
||||
"s3:ObjectAccessed:GetRetention" => EventName::ObjectAccessedGetRetention,
|
||||
"s3:ObjectAccessed:GetLegalHold" => EventName::ObjectAccessedGetLegalHold,
|
||||
"s3:ObjectAccessed:Head" => EventName::ObjectAccessedHead,
|
||||
"s3:ObjectAccessed:Attributes" => EventName::ObjectAccessedAttributes,
|
||||
"s3:ObjectCreated:*" => EventName::ObjectCreatedAll,
|
||||
"s3:ObjectCreated:CompleteMultipartUpload" => EventName::ObjectCreatedCompleteMultipartUpload,
|
||||
"s3:ObjectCreated:Copy" => EventName::ObjectCreatedCopy,
|
||||
"s3:ObjectCreated:Post" => EventName::ObjectCreatedPost,
|
||||
"s3:ObjectCreated:Put" => EventName::ObjectCreatedPut,
|
||||
"s3:ObjectCreated:PutRetention" => EventName::ObjectCreatedPutRetention,
|
||||
"s3:ObjectCreated:PutLegalHold" => EventName::ObjectCreatedPutLegalHold,
|
||||
"s3:ObjectCreated:PutTagging" => EventName::ObjectCreatedPutTagging,
|
||||
"s3:ObjectCreated:DeleteTagging" => EventName::ObjectCreatedDeleteTagging,
|
||||
"s3:ObjectRemoved:*" => EventName::ObjectRemovedAll,
|
||||
"s3:ObjectRemoved:Delete" => EventName::ObjectRemovedDelete,
|
||||
"s3:ObjectRemoved:DeleteMarkerCreated" => EventName::ObjectRemovedDeleteMarkerCreated,
|
||||
"s3:ObjectRemoved:NoOP" => EventName::ObjectRemovedNoOP,
|
||||
"s3:ObjectRemoved:DeleteAllVersions" => EventName::ObjectRemovedDeleteAllVersions,
|
||||
"s3:LifecycleDelMarkerExpiration:Delete" => EventName::ILMDelMarkerExpirationDelete,
|
||||
"s3:Replication:*" => EventName::ObjectReplicationAll,
|
||||
"s3:Replication:OperationFailedReplication" => EventName::ObjectReplicationFailed,
|
||||
"s3:Replication:OperationCompletedReplication" => EventName::ObjectReplicationComplete,
|
||||
"s3:Replication:OperationMissedThreshold" => EventName::ObjectReplicationMissedThreshold,
|
||||
"s3:Replication:OperationReplicatedAfterThreshold" => EventName::ObjectReplicationReplicatedAfterThreshold,
|
||||
"s3:Replication:OperationNotTracked" => EventName::ObjectReplicationNotTracked,
|
||||
"s3:ObjectRestore:*" => EventName::ObjectRestoreAll,
|
||||
"s3:ObjectRestore:Post" => EventName::ObjectRestorePost,
|
||||
"s3:ObjectRestore:Completed" => EventName::ObjectRestoreCompleted,
|
||||
"s3:ObjectTransition:Failed" => EventName::ObjectTransitionFailed,
|
||||
"s3:ObjectTransition:Complete" => EventName::ObjectTransitionComplete,
|
||||
"s3:ObjectTransition:*" => EventName::ObjectTransitionAll,
|
||||
"s3:Scanner:ManyVersions" => EventName::ObjectManyVersions,
|
||||
"s3:Scanner:LargeVersions" => EventName::ObjectLargeVersions,
|
||||
"s3:Scanner:BigPrefix" => EventName::PrefixManyFolders,
|
||||
_ => EventName::Everything,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
pub struct TargetID {
|
||||
id: String,
|
||||
name: String,
|
||||
id: String,
|
||||
name: String,
|
||||
}
|
||||
|
||||
impl TargetID {
|
||||
|
||||
@@ -4,9 +4,9 @@ use super::targetid::TargetID;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct TargetList {
|
||||
pub current_send_calls: AtomicI64,
|
||||
pub total_events: AtomicI64,
|
||||
pub events_skipped: AtomicI64,
|
||||
pub current_send_calls: AtomicI64,
|
||||
pub total_events: AtomicI64,
|
||||
pub events_skipped: AtomicI64,
|
||||
pub events_errors_total: AtomicI64,
|
||||
//pub targets: HashMap<TargetID, Target>,
|
||||
//pub queue: AsyncEvent,
|
||||
@@ -26,6 +26,6 @@ struct TargetStat {
|
||||
}
|
||||
|
||||
struct TargetIDResult {
|
||||
id: TargetID,
|
||||
id: TargetID,
|
||||
err: std::io::Error,
|
||||
}
|
||||
|
||||
@@ -2,11 +2,11 @@ use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::store_api::ObjectInfo;
|
||||
use crate::bucket::metadata::BucketMetadata;
|
||||
use crate::event::name::EventName;
|
||||
use crate::event::targetlist::TargetList;
|
||||
use crate::store::ECStore;
|
||||
use crate::bucket::metadata::BucketMetadata;
|
||||
use crate::store_api::ObjectInfo;
|
||||
|
||||
pub struct EventNotifier {
|
||||
target_list: TargetList,
|
||||
@@ -16,7 +16,7 @@ pub struct EventNotifier {
|
||||
impl EventNotifier {
|
||||
pub fn new() -> Arc<RwLock<Self>> {
|
||||
Arc::new(RwLock::new(Self {
|
||||
target_list: TargetList::new(),
|
||||
target_list: TargetList::new(),
|
||||
//bucket_rules_map: HashMap::new(),
|
||||
}))
|
||||
}
|
||||
@@ -54,8 +54,6 @@ pub struct EventArgs {
|
||||
pub user_agent: String,
|
||||
}
|
||||
|
||||
impl EventArgs {
|
||||
}
|
||||
impl EventArgs {}
|
||||
|
||||
pub fn send_event(args: EventArgs) {
|
||||
}
|
||||
pub fn send_event(args: EventArgs) {}
|
||||
|
||||
@@ -9,13 +9,13 @@ use uuid::Uuid;
|
||||
|
||||
use crate::heal::mrf::MRFState;
|
||||
use crate::{
|
||||
bucket::lifecycle::bucket_lifecycle_ops::LifecycleSys,
|
||||
disk::DiskStore,
|
||||
endpoints::{EndpointServerPools, PoolEndpoints, SetupType},
|
||||
heal::{background_heal_ops::HealRoutine, heal_ops::AllHealState},
|
||||
bucket::lifecycle::bucket_lifecycle_ops::LifecycleSys,
|
||||
tier::tier::TierConfigMgr,
|
||||
event_notification::EventNotifier,
|
||||
heal::{background_heal_ops::HealRoutine, heal_ops::AllHealState},
|
||||
store::ECStore,
|
||||
tier::tier::TierConfigMgr,
|
||||
};
|
||||
|
||||
pub const DISK_ASSUME_UNKNOWN_SIZE: u64 = 1 << 30;
|
||||
@@ -26,29 +26,29 @@ pub const DISK_RESERVE_FRACTION: f64 = 0.15;
|
||||
pub const DEFAULT_PORT: u16 = 9000;
|
||||
|
||||
lazy_static! {
|
||||
static ref GLOBAL_RUSTFS_PORT: OnceLock<u16> = OnceLock::new();
|
||||
pub static ref GLOBAL_OBJECT_API: OnceLock<Arc<ECStore>> = OnceLock::new();
|
||||
pub static ref GLOBAL_LOCAL_DISK: Arc<RwLock<Vec<Option<DiskStore>>>> = Arc::new(RwLock::new(Vec::new()));
|
||||
pub static ref GLOBAL_IsErasure: RwLock<bool> = RwLock::new(false);
|
||||
pub static ref GLOBAL_IsDistErasure: RwLock<bool> = RwLock::new(false);
|
||||
pub static ref GLOBAL_IsErasureSD: RwLock<bool> = RwLock::new(false);
|
||||
pub static ref GLOBAL_LOCAL_DISK_MAP: Arc<RwLock<HashMap<String, Option<DiskStore>>>> = Arc::new(RwLock::new(HashMap::new()));
|
||||
pub static ref GLOBAL_LOCAL_DISK_SET_DRIVES: Arc<RwLock<TypeLocalDiskSetDrives>> = Arc::new(RwLock::new(Vec::new()));
|
||||
pub static ref GLOBAL_Endpoints: OnceLock<EndpointServerPools> = OnceLock::new();
|
||||
pub static ref GLOBAL_RootDiskThreshold: RwLock<u64> = RwLock::new(0);
|
||||
pub static ref GLOBAL_BackgroundHealRoutine: Arc<HealRoutine> = HealRoutine::new();
|
||||
pub static ref GLOBAL_BackgroundHealState: Arc<AllHealState> = AllHealState::new(false);
|
||||
pub static ref GLOBAL_TierConfigMgr: Arc<RwLock<TierConfigMgr>> = TierConfigMgr::new();
|
||||
pub static ref GLOBAL_LifecycleSys: Arc<LifecycleSys> = LifecycleSys::new();
|
||||
pub static ref GLOBAL_EventNotifier: Arc<RwLock<EventNotifier>> = EventNotifier::new();
|
||||
//pub static ref GLOBAL_RemoteTargetTransport
|
||||
pub static ref GLOBAL_ALlHealState: Arc<AllHealState> = AllHealState::new(false);
|
||||
pub static ref GLOBAL_MRFState: Arc<MRFState> = Arc::new(MRFState::new());
|
||||
static ref globalDeploymentIDPtr: OnceLock<Uuid> = OnceLock::new();
|
||||
pub static ref GLOBAL_BOOT_TIME: OnceCell<SystemTime> = OnceCell::new();
|
||||
pub static ref GLOBAL_LocalNodeName: String = "127.0.0.1:9000".to_string();
|
||||
pub static ref GLOBAL_LocalNodeNameHex: String = rustfs_utils::crypto::hex(GLOBAL_LocalNodeName.as_bytes());
|
||||
pub static ref GLOBAL_NodeNamesHex: HashMap<String, ()> = HashMap::new();}
|
||||
static ref GLOBAL_RUSTFS_PORT: OnceLock<u16> = OnceLock::new();
|
||||
pub static ref GLOBAL_OBJECT_API: OnceLock<Arc<ECStore>> = OnceLock::new();
|
||||
pub static ref GLOBAL_LOCAL_DISK: Arc<RwLock<Vec<Option<DiskStore>>>> = Arc::new(RwLock::new(Vec::new()));
|
||||
pub static ref GLOBAL_IsErasure: RwLock<bool> = RwLock::new(false);
|
||||
pub static ref GLOBAL_IsDistErasure: RwLock<bool> = RwLock::new(false);
|
||||
pub static ref GLOBAL_IsErasureSD: RwLock<bool> = RwLock::new(false);
|
||||
pub static ref GLOBAL_LOCAL_DISK_MAP: Arc<RwLock<HashMap<String, Option<DiskStore>>>> = Arc::new(RwLock::new(HashMap::new()));
|
||||
pub static ref GLOBAL_LOCAL_DISK_SET_DRIVES: Arc<RwLock<TypeLocalDiskSetDrives>> = Arc::new(RwLock::new(Vec::new()));
|
||||
pub static ref GLOBAL_Endpoints: OnceLock<EndpointServerPools> = OnceLock::new();
|
||||
pub static ref GLOBAL_RootDiskThreshold: RwLock<u64> = RwLock::new(0);
|
||||
pub static ref GLOBAL_BackgroundHealRoutine: Arc<HealRoutine> = HealRoutine::new();
|
||||
pub static ref GLOBAL_BackgroundHealState: Arc<AllHealState> = AllHealState::new(false);
|
||||
pub static ref GLOBAL_TierConfigMgr: Arc<RwLock<TierConfigMgr>> = TierConfigMgr::new();
|
||||
pub static ref GLOBAL_LifecycleSys: Arc<LifecycleSys> = LifecycleSys::new();
|
||||
pub static ref GLOBAL_EventNotifier: Arc<RwLock<EventNotifier>> = EventNotifier::new();
|
||||
//pub static ref GLOBAL_RemoteTargetTransport
|
||||
pub static ref GLOBAL_ALlHealState: Arc<AllHealState> = AllHealState::new(false);
|
||||
pub static ref GLOBAL_MRFState: Arc<MRFState> = Arc::new(MRFState::new());
|
||||
static ref globalDeploymentIDPtr: OnceLock<Uuid> = OnceLock::new();
|
||||
pub static ref GLOBAL_BOOT_TIME: OnceCell<SystemTime> = OnceCell::new();
|
||||
pub static ref GLOBAL_LocalNodeName: String = "127.0.0.1:9000".to_string();
|
||||
pub static ref GLOBAL_LocalNodeNameHex: String = rustfs_utils::crypto::hex(GLOBAL_LocalNodeName.as_bytes());
|
||||
pub static ref GLOBAL_NodeNamesHex: HashMap<String, ()> = HashMap::new();}
|
||||
|
||||
/// Get the global rustfs port
|
||||
pub fn global_rustfs_port() -> u16 {
|
||||
|
||||
@@ -6,16 +6,15 @@ use std::{
|
||||
path::{Path, PathBuf},
|
||||
pin::Pin,
|
||||
sync::{
|
||||
atomic::{AtomicBool, AtomicU32, AtomicU64, Ordering},
|
||||
Arc,
|
||||
atomic::{AtomicBool, AtomicU32, AtomicU64, Ordering},
|
||||
},
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
|
||||
use time::{self, OffsetDateTime};
|
||||
use common::defer;
|
||||
use time::{self, OffsetDateTime};
|
||||
|
||||
use rustfs_utils::path::encode_dir_object;
|
||||
use super::{
|
||||
data_scanner_metric::{ScannerMetric, ScannerMetrics, globalScannerMetrics},
|
||||
data_usage::{DATA_USAGE_BLOOM_NAME_PATH, store_data_usage_in_backend},
|
||||
@@ -23,28 +22,25 @@ use super::{
|
||||
heal_commands::{HEAL_DEEP_SCAN, HEAL_NORMAL_SCAN, HealScanMode},
|
||||
};
|
||||
use crate::bucket::{
|
||||
object_lock::objectlock_sys::{
|
||||
enforce_retention_for_deletion,
|
||||
BucketObjectLockSys,
|
||||
}, utils::is_meta_bucketname,
|
||||
object_lock::objectlock_sys::{BucketObjectLockSys, enforce_retention_for_deletion},
|
||||
utils::is_meta_bucketname,
|
||||
};
|
||||
use crate::cmd::bucket_replication::queue_replication_heal;
|
||||
use crate::event::name::EventName;
|
||||
use crate::{
|
||||
bucket::{
|
||||
lifecycle::{
|
||||
bucket_lifecycle_audit::{LcAuditEvent, LcEventSrc},
|
||||
lifecycle::{self, Lifecycle, ExpirationOptions},
|
||||
bucket_lifecycle_ops::{
|
||||
self, GLOBAL_ExpiryState, GLOBAL_TransitionState, LifecycleOps,
|
||||
expire_transitioned_object,
|
||||
},
|
||||
bucket_lifecycle_ops::{self, GLOBAL_ExpiryState, GLOBAL_TransitionState, LifecycleOps, expire_transitioned_object},
|
||||
lifecycle::{self, ExpirationOptions, Lifecycle},
|
||||
},
|
||||
metadata_sys
|
||||
metadata_sys,
|
||||
},
|
||||
event_notification::{send_event, EventArgs},
|
||||
global::GLOBAL_LocalNodeName, heal::{data_scanner},
|
||||
event_notification::{EventArgs, send_event},
|
||||
global::GLOBAL_LocalNodeName,
|
||||
heal::data_scanner,
|
||||
store_api::{ObjectOptions, ObjectToDelete, StorageAPI},
|
||||
};
|
||||
use crate::cmd::bucket_replication::queue_replication_heal;
|
||||
use crate::{
|
||||
bucket::{versioning::VersioningApi, versioning_sys::BucketVersioningSys},
|
||||
cmd::bucket_replication::ReplicationStatusType,
|
||||
@@ -70,7 +66,6 @@ use crate::{
|
||||
peer::is_reserved_or_invalid_bucket,
|
||||
store::ECStore,
|
||||
};
|
||||
use crate::event::name::EventName;
|
||||
use crate::{disk::DiskAPI, store_api::ObjectInfo};
|
||||
use crate::{
|
||||
disk::error::DiskError,
|
||||
@@ -82,8 +77,12 @@ use lazy_static::lazy_static;
|
||||
use rand::Rng;
|
||||
use rmp_serde::{Deserializer, Serializer};
|
||||
use rustfs_filemeta::{FileInfo, MetaCacheEntries, MetaCacheEntry, MetadataResolutionParams};
|
||||
use rustfs_utils::path::encode_dir_object;
|
||||
use rustfs_utils::path::{SLASH_SEPARATOR, path_join, path_to_bucket_object, path_to_bucket_object_with_base_path};
|
||||
use s3s::dto::{BucketLifecycleConfiguration, DefaultRetention, ExpirationStatus, LifecycleRule, ReplicationConfiguration, ReplicationRuleStatus, VersioningConfiguration};
|
||||
use s3s::dto::{
|
||||
BucketLifecycleConfiguration, DefaultRetention, ExpirationStatus, LifecycleRule, ReplicationConfiguration,
|
||||
ReplicationRuleStatus, VersioningConfiguration,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::{
|
||||
sync::{
|
||||
@@ -561,20 +560,33 @@ impl ScannerItem {
|
||||
lr = BucketObjectLockSys::get(&self.bucket).await;
|
||||
rcfg = if let Ok(replication_config) = metadata_sys::get_replication_config(&self.bucket).await {
|
||||
Some(replication_config)
|
||||
} else { None };
|
||||
} else {
|
||||
None
|
||||
};
|
||||
}
|
||||
|
||||
let lc_evt = eval_action_from_lifecycle(self.lifecycle.as_ref().expect("err"), lr, rcfg, oi).await;
|
||||
if self.debug {
|
||||
if !version_id.is_none() {
|
||||
info!("lifecycle: {} (version-id={}), Initial scan: {}", self.object_path().to_string_lossy().to_string(), version_id.expect("err"), lc_evt.action);
|
||||
info!(
|
||||
"lifecycle: {} (version-id={}), Initial scan: {}",
|
||||
self.object_path().to_string_lossy().to_string(),
|
||||
version_id.expect("err"),
|
||||
lc_evt.action
|
||||
);
|
||||
} else {
|
||||
info!("lifecycle: {} Initial scan: {}", self.object_path().to_string_lossy().to_string(), lc_evt.action);
|
||||
info!(
|
||||
"lifecycle: {} Initial scan: {}",
|
||||
self.object_path().to_string_lossy().to_string(),
|
||||
lc_evt.action
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
match lc_evt.action {
|
||||
lifecycle::IlmAction::DeleteVersionAction | lifecycle::IlmAction::DeleteAllVersionsAction | lifecycle::IlmAction::DelMarkerDeleteAllVersionsAction => {
|
||||
lifecycle::IlmAction::DeleteVersionAction
|
||||
| lifecycle::IlmAction::DeleteAllVersionsAction
|
||||
| lifecycle::IlmAction::DelMarkerDeleteAllVersionsAction => {
|
||||
size = 0;
|
||||
}
|
||||
lifecycle::IlmAction::DeleteAction => {
|
||||
@@ -582,7 +594,7 @@ impl ScannerItem {
|
||||
size = 0
|
||||
}
|
||||
}
|
||||
_ => ()
|
||||
_ => (),
|
||||
}
|
||||
|
||||
apply_lifecycle_action(&lc_evt, &LcEventSrc::Scanner, oi).await;
|
||||
@@ -612,7 +624,9 @@ impl ScannerItem {
|
||||
|
||||
let lock_enabled = if let Some(rcfg) = BucketObjectLockSys::get(&self.bucket).await {
|
||||
rcfg.mode.is_some()
|
||||
} else { false };
|
||||
} else {
|
||||
false
|
||||
};
|
||||
let vcfg = BucketVersioningSys::get(&self.bucket).await?;
|
||||
|
||||
let versioned = match BucketVersioningSys::get(&self.bucket).await {
|
||||
@@ -633,28 +647,43 @@ impl ScannerItem {
|
||||
return Ok(object_infos);
|
||||
}
|
||||
|
||||
let event = self.lifecycle.as_ref().expect("lifecycle err.").noncurrent_versions_expiration_limit(&lifecycle::ObjectOpts {
|
||||
name: self.object_path().to_string_lossy().to_string(),
|
||||
..Default::default()
|
||||
}).await;
|
||||
let event = self
|
||||
.lifecycle
|
||||
.as_ref()
|
||||
.expect("lifecycle err.")
|
||||
.noncurrent_versions_expiration_limit(&lifecycle::ObjectOpts {
|
||||
name: self.object_path().to_string_lossy().to_string(),
|
||||
..Default::default()
|
||||
})
|
||||
.await;
|
||||
let lim = event.newer_noncurrent_versions;
|
||||
if lim == 0 || fivs.len() <= lim+1 {
|
||||
if lim == 0 || fivs.len() <= lim + 1 {
|
||||
for fi in fivs.iter() {
|
||||
object_infos.push(ObjectInfo::from_file_info(fi, &self.bucket, &self.object_path().to_string_lossy(), versioned));
|
||||
object_infos.push(ObjectInfo::from_file_info(
|
||||
fi,
|
||||
&self.bucket,
|
||||
&self.object_path().to_string_lossy(),
|
||||
versioned,
|
||||
));
|
||||
}
|
||||
return Ok(object_infos);
|
||||
}
|
||||
|
||||
let overflow_versions = &fivs[lim+1..];
|
||||
for fi in fivs[..lim+1].iter() {
|
||||
object_infos.push(ObjectInfo::from_file_info(fi, &self.bucket, &self.object_path().to_string_lossy(), versioned));
|
||||
let overflow_versions = &fivs[lim + 1..];
|
||||
for fi in fivs[..lim + 1].iter() {
|
||||
object_infos.push(ObjectInfo::from_file_info(
|
||||
fi,
|
||||
&self.bucket,
|
||||
&self.object_path().to_string_lossy(),
|
||||
versioned,
|
||||
));
|
||||
}
|
||||
|
||||
let mut to_del = Vec::<ObjectToDelete>::with_capacity(overflow_versions.len());
|
||||
for fi in overflow_versions.iter() {
|
||||
let obj = ObjectInfo::from_file_info(fi, &self.bucket, &self.object_path().to_string_lossy(), versioned);
|
||||
if lock_enabled && enforce_retention_for_deletion(&obj) {
|
||||
//if enforce_retention_for_deletion(&obj) {
|
||||
//if enforce_retention_for_deletion(&obj) {
|
||||
if self.debug {
|
||||
if obj.version_id.is_some() {
|
||||
info!("lifecycle: {} v({}) is locked, not deleting\n", obj.name, obj.version_id.expect("err"));
|
||||
@@ -666,7 +695,10 @@ impl ScannerItem {
|
||||
continue;
|
||||
}
|
||||
|
||||
if OffsetDateTime::now_utc().unix_timestamp() < lifecycle::expected_expiry_time(obj.successor_mod_time.expect("err"), event.noncurrent_days as i32).unix_timestamp() {
|
||||
if OffsetDateTime::now_utc().unix_timestamp()
|
||||
< lifecycle::expected_expiry_time(obj.successor_mod_time.expect("err"), event.noncurrent_days as i32)
|
||||
.unix_timestamp()
|
||||
{
|
||||
object_infos.push(obj);
|
||||
continue;
|
||||
}
|
||||
@@ -688,7 +720,7 @@ impl ScannerItem {
|
||||
|
||||
pub async fn apply_actions(&mut self, oi: &ObjectInfo, _size_s: &mut SizeSummary) -> (bool, usize) {
|
||||
let done = ScannerMetrics::time(ScannerMetric::Ilm);
|
||||
|
||||
|
||||
let (action, size) = self.apply_lifecycle(oi).await;
|
||||
|
||||
info!(
|
||||
@@ -1535,15 +1567,18 @@ pub async fn scan_data_folder(
|
||||
Ok(s.new_cache)
|
||||
}
|
||||
|
||||
pub async fn eval_action_from_lifecycle(lc: &BucketLifecycleConfiguration, lr: Option<DefaultRetention>, rcfg: Option<(ReplicationConfiguration, OffsetDateTime)>, oi: &ObjectInfo) -> lifecycle::Event {
|
||||
pub async fn eval_action_from_lifecycle(
|
||||
lc: &BucketLifecycleConfiguration,
|
||||
lr: Option<DefaultRetention>,
|
||||
rcfg: Option<(ReplicationConfiguration, OffsetDateTime)>,
|
||||
oi: &ObjectInfo,
|
||||
) -> lifecycle::Event {
|
||||
let event = lc.eval(&oi.to_lifecycle_opts()).await;
|
||||
//if serverDebugLog {
|
||||
info!("lifecycle: Secondary scan: {}", event.action);
|
||||
info!("lifecycle: Secondary scan: {}", event.action);
|
||||
//}
|
||||
|
||||
let lock_enabled = if let Some(lr) = lr {
|
||||
lr.mode.is_some()
|
||||
} else { false };
|
||||
let lock_enabled = if let Some(lr) = lr { lr.mode.is_some() } else { false };
|
||||
|
||||
match event.action {
|
||||
lifecycle::IlmAction::DeleteAllVersionsAction | lifecycle::IlmAction::DelMarkerDeleteAllVersionsAction => {
|
||||
@@ -1557,11 +1592,11 @@ pub async fn eval_action_from_lifecycle(lc: &BucketLifecycleConfiguration, lr: O
|
||||
}
|
||||
if lock_enabled && enforce_retention_for_deletion(oi) {
|
||||
//if serverDebugLog {
|
||||
if !oi.version_id.is_none() {
|
||||
info!("lifecycle: {} v({}) is locked, not deleting", oi.name, oi.version_id.expect("err"));
|
||||
} else {
|
||||
info!("lifecycle: {} is locked, not deleting", oi.name);
|
||||
}
|
||||
if !oi.version_id.is_none() {
|
||||
info!("lifecycle: {} v({}) is locked, not deleting", oi.name, oi.version_id.expect("err"));
|
||||
} else {
|
||||
info!("lifecycle: {} is locked, not deleting", oi.name);
|
||||
}
|
||||
//}
|
||||
return lifecycle::Event::default();
|
||||
}
|
||||
@@ -1571,7 +1606,7 @@ pub async fn eval_action_from_lifecycle(lc: &BucketLifecycleConfiguration, lr: O
|
||||
}
|
||||
}
|
||||
}
|
||||
_ => ()
|
||||
_ => (),
|
||||
}
|
||||
|
||||
event
|
||||
@@ -1585,7 +1620,12 @@ async fn apply_transition_rule(event: &lifecycle::Event, src: &LcEventSrc, oi: &
|
||||
true
|
||||
}
|
||||
|
||||
pub async fn apply_expiry_on_transitioned_object(api: Arc<ECStore>, oi: &ObjectInfo, lc_event: &lifecycle::Event, src: &LcEventSrc) -> bool {
|
||||
pub async fn apply_expiry_on_transitioned_object(
|
||||
api: Arc<ECStore>,
|
||||
oi: &ObjectInfo,
|
||||
lc_event: &lifecycle::Event,
|
||||
src: &LcEventSrc,
|
||||
) -> bool {
|
||||
let time_ilm = ScannerMetrics::time_ilm(lc_event.action.clone());
|
||||
if let Err(_err) = expire_transitioned_object(api, oi, lc_event, src).await {
|
||||
return false;
|
||||
@@ -1595,9 +1635,14 @@ pub async fn apply_expiry_on_transitioned_object(api: Arc<ECStore>, oi: &ObjectI
|
||||
true
|
||||
}
|
||||
|
||||
pub async fn apply_expiry_on_non_transitioned_objects(api: Arc<ECStore>, oi: &ObjectInfo, lc_event: &lifecycle::Event, src: &LcEventSrc) -> bool {
|
||||
pub async fn apply_expiry_on_non_transitioned_objects(
|
||||
api: Arc<ECStore>,
|
||||
oi: &ObjectInfo,
|
||||
lc_event: &lifecycle::Event,
|
||||
src: &LcEventSrc,
|
||||
) -> bool {
|
||||
let mut opts = ObjectOptions {
|
||||
expiration: ExpirationOptions {expire: true},
|
||||
expiration: ExpirationOptions { expire: true },
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
@@ -1617,7 +1662,10 @@ pub async fn apply_expiry_on_non_transitioned_objects(api: Arc<ECStore>, oi: &Ob
|
||||
|
||||
let time_ilm = ScannerMetrics::time_ilm(lc_event.action.clone());
|
||||
|
||||
let mut dobj = api.delete_object(&oi.bucket, &encode_dir_object(&oi.name), opts).await.unwrap();
|
||||
let mut dobj = api
|
||||
.delete_object(&oi.bucket, &encode_dir_object(&oi.name), opts)
|
||||
.await
|
||||
.unwrap();
|
||||
if dobj.name == "" {
|
||||
dobj = oi.clone();
|
||||
}
|
||||
@@ -1630,13 +1678,9 @@ pub async fn apply_expiry_on_non_transitioned_objects(api: Arc<ECStore>, oi: &Ob
|
||||
event_name = EventName::ObjectRemovedDeleteMarkerCreated;
|
||||
}
|
||||
match lc_event.action {
|
||||
lifecycle::IlmAction::DeleteAllVersionsAction => {
|
||||
event_name = EventName::ObjectRemovedDeleteAllVersions
|
||||
}
|
||||
lifecycle::IlmAction::DelMarkerDeleteAllVersionsAction => {
|
||||
event_name = EventName::ILMDelMarkerExpirationDelete
|
||||
}
|
||||
_ => ()
|
||||
lifecycle::IlmAction::DeleteAllVersionsAction => event_name = EventName::ObjectRemovedDeleteAllVersions,
|
||||
lifecycle::IlmAction::DelMarkerDeleteAllVersionsAction => event_name = EventName::ILMDelMarkerExpirationDelete,
|
||||
_ => (),
|
||||
}
|
||||
send_event(EventArgs {
|
||||
event_name: event_name.as_ref().to_string(),
|
||||
@@ -1667,20 +1711,22 @@ async fn apply_expiry_rule(event: &lifecycle::Event, src: &LcEventSrc, oi: &Obje
|
||||
pub async fn apply_lifecycle_action(event: &lifecycle::Event, src: &LcEventSrc, oi: &ObjectInfo) -> bool {
|
||||
let mut success = false;
|
||||
match event.action {
|
||||
lifecycle::IlmAction::DeleteVersionAction | lifecycle::IlmAction::DeleteAction
|
||||
| lifecycle::IlmAction::DeleteRestoredAction | lifecycle::IlmAction::DeleteRestoredVersionAction
|
||||
| lifecycle::IlmAction::DeleteAllVersionsAction | lifecycle::IlmAction::DelMarkerDeleteAllVersionsAction => {
|
||||
lifecycle::IlmAction::DeleteVersionAction
|
||||
| lifecycle::IlmAction::DeleteAction
|
||||
| lifecycle::IlmAction::DeleteRestoredAction
|
||||
| lifecycle::IlmAction::DeleteRestoredVersionAction
|
||||
| lifecycle::IlmAction::DeleteAllVersionsAction
|
||||
| lifecycle::IlmAction::DelMarkerDeleteAllVersionsAction => {
|
||||
success = apply_expiry_rule(event, src, oi).await;
|
||||
}
|
||||
lifecycle::IlmAction::TransitionAction | lifecycle::IlmAction::TransitionVersionAction => {
|
||||
success = apply_transition_rule(event, src, oi).await;
|
||||
}
|
||||
_ => ()
|
||||
_ => (),
|
||||
}
|
||||
success
|
||||
}
|
||||
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use std::io::Cursor;
|
||||
|
||||
@@ -312,9 +312,7 @@ impl ScannerMetrics {
|
||||
pub fn time_ilm(a: lifecycle::IlmAction) -> Box<dyn Fn(u64) -> Box<dyn Fn() + Send + Sync> + Send + Sync> {
|
||||
let a_clone = a as usize;
|
||||
if a_clone == lifecycle::IlmAction::NoneAction as usize || a_clone >= lifecycle::IlmAction::ActionCount as usize {
|
||||
return Box::new(move |_: u64| {
|
||||
Box::new(move || {})
|
||||
});
|
||||
return Box::new(move |_: u64| Box::new(move || {}));
|
||||
}
|
||||
let start = SystemTime::now();
|
||||
Box::new(move |versions: u64| {
|
||||
|
||||
@@ -139,9 +139,9 @@ pub struct TierStats {
|
||||
impl TierStats {
|
||||
pub fn add(&self, u: &TierStats) -> TierStats {
|
||||
TierStats {
|
||||
total_size: self.total_size + u.total_size,
|
||||
total_size: self.total_size + u.total_size,
|
||||
num_versions: self.num_versions + u.num_versions,
|
||||
num_objects: self.num_objects + u.num_objects,
|
||||
num_objects: self.num_objects + u.num_objects,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -153,9 +153,7 @@ struct AllTierStats {
|
||||
|
||||
impl AllTierStats {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
tiers: HashMap::new(),
|
||||
}
|
||||
Self { tiers: HashMap::new() }
|
||||
}
|
||||
|
||||
fn add_sizes(&mut self, tiers: HashMap<String, TierStats>) {
|
||||
@@ -172,11 +170,14 @@ impl AllTierStats {
|
||||
|
||||
fn populate_stats(&self, stats: &mut HashMap<String, TierStats>) {
|
||||
for (tier, st) in &self.tiers {
|
||||
stats.insert(tier.clone(), TierStats {
|
||||
total_size: st.total_size.clone(),
|
||||
num_versions: st.num_versions.clone(),
|
||||
num_objects: st.num_objects.clone(),
|
||||
});
|
||||
stats.insert(
|
||||
tier.clone(),
|
||||
TierStats {
|
||||
total_size: st.total_size.clone(),
|
||||
num_versions: st.num_versions.clone(),
|
||||
num_objects: st.num_objects.clone(),
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,11 +27,11 @@ pub mod store_list_objects;
|
||||
mod store_utils;
|
||||
|
||||
pub mod checksum;
|
||||
pub mod client;
|
||||
pub mod event;
|
||||
pub mod event_notification;
|
||||
pub mod client;
|
||||
pub mod tier;
|
||||
pub mod signer;
|
||||
pub mod tier;
|
||||
|
||||
pub use global::new_object_layer_fn;
|
||||
pub use global::set_global_endpoints;
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
use s3s::header::X_AMZ_RESTORE;
|
||||
use crate::error::ObjectApiError;
|
||||
use crate::bitrot::{create_bitrot_reader, create_bitrot_writer};
|
||||
use crate::bucket::lifecycle::lifecycle::TRANSITION_COMPLETE;
|
||||
use crate::client::{object_api_utils::extract_etag, transition_api::ReaderImpl};
|
||||
use crate::disk::error_reduce::{OBJECT_OP_IGNORED_ERRS, reduce_read_quorum_errs, reduce_write_quorum_errs};
|
||||
use crate::disk::{
|
||||
self, CHECK_PART_DISK_NOT_FOUND, CHECK_PART_FILE_CORRUPT, CHECK_PART_FILE_NOT_FOUND, CHECK_PART_SUCCESS,
|
||||
@@ -8,12 +8,15 @@ use crate::disk::{
|
||||
};
|
||||
use crate::erasure_coding;
|
||||
use crate::erasure_coding::bitrot_verify;
|
||||
use crate::error::ObjectApiError;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::global::GLOBAL_MRFState;
|
||||
use crate::global::{GLOBAL_LocalNodeName, GLOBAL_TierConfigMgr};
|
||||
use crate::heal::data_usage_cache::DataUsageCache;
|
||||
use crate::heal::heal_ops::{HealEntryFn, HealSequence};
|
||||
use crate::store_api::ObjectToDelete;
|
||||
use crate::{
|
||||
bucket::lifecycle::bucket_lifecycle_ops::{gen_transition_objname, get_transitioned_object_reader, put_restore_opts},
|
||||
cache_value::metacache_set::{ListPathRawOptions, list_path_raw},
|
||||
config::{GLOBAL_StorageClass, storageclass},
|
||||
disk::{
|
||||
@@ -22,8 +25,8 @@ use crate::{
|
||||
UpdateMetadataOpts, endpoint::Endpoint, error::DiskError, format::FormatV3, new_disk,
|
||||
},
|
||||
error::{StorageError, to_object_err},
|
||||
event::name::EventName, event_notification::{send_event, EventArgs},
|
||||
bucket::lifecycle::bucket_lifecycle_ops::{gen_transition_objname, put_restore_opts, get_transitioned_object_reader,},
|
||||
event::name::EventName,
|
||||
event_notification::{EventArgs, send_event},
|
||||
global::{
|
||||
GLOBAL_BackgroundHealState, GLOBAL_LOCAL_DISK_MAP, GLOBAL_LOCAL_DISK_SET_DRIVES, get_global_deployment_id,
|
||||
is_dist_erasure,
|
||||
@@ -44,16 +47,11 @@ use crate::{
|
||||
},
|
||||
store_init::load_format_erasure,
|
||||
};
|
||||
use crate::client::{
|
||||
object_api_utils::extract_etag,
|
||||
transition_api::ReaderImpl,
|
||||
};
|
||||
use crate::{disk::STORAGE_FORMAT_FILE, heal::mrf::PartialOperation};
|
||||
use crate::{
|
||||
heal::data_scanner::{HEAL_DELETE_DANGLING, globalHealConfig},
|
||||
store_api::ListObjectVersionsInfo,
|
||||
};
|
||||
use crate::bucket::lifecycle::lifecycle::TRANSITION_COMPLETE;
|
||||
use bytesize::ByteSize;
|
||||
use chrono::Utc;
|
||||
use futures::future::join_all;
|
||||
@@ -75,6 +73,7 @@ use rustfs_utils::{
|
||||
crypto::{base64_decode, base64_encode, hex},
|
||||
path::{SLASH_SEPARATOR, encode_dir_object, has_suffix, path_join_buf},
|
||||
};
|
||||
use s3s::header::X_AMZ_RESTORE;
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::hash::Hash;
|
||||
use std::mem;
|
||||
@@ -100,7 +99,6 @@ use tracing::error;
|
||||
use tracing::{debug, info, warn};
|
||||
use uuid::Uuid;
|
||||
use workers::workers::Workers;
|
||||
use crate::global::{GLOBAL_TierConfigMgr, GLOBAL_LocalNodeName};
|
||||
|
||||
pub const DEFAULT_READ_BUFFER_SIZE: usize = 1024 * 1024;
|
||||
|
||||
@@ -3677,7 +3675,13 @@ impl SetDisks {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn update_restore_metadata(&self, bucket: &str, object: &str, obj_info: &ObjectInfo, opts: &ObjectOptions) -> Result<()> {
|
||||
pub async fn update_restore_metadata(
|
||||
&self,
|
||||
bucket: &str,
|
||||
object: &str,
|
||||
obj_info: &ObjectInfo,
|
||||
opts: &ObjectOptions,
|
||||
) -> Result<()> {
|
||||
let mut oi = obj_info.clone();
|
||||
oi.metadata_only = true;
|
||||
|
||||
@@ -3686,13 +3690,23 @@ impl SetDisks {
|
||||
}
|
||||
|
||||
let version_id = oi.version_id.clone().map(|v| v.to_string());
|
||||
let obj = self.copy_object(bucket, object, bucket, object, &mut oi, &ObjectOptions {
|
||||
version_id: version_id.clone(),
|
||||
..Default::default()
|
||||
}, &ObjectOptions {
|
||||
version_id: version_id,
|
||||
..Default::default()
|
||||
}).await;
|
||||
let obj = self
|
||||
.copy_object(
|
||||
bucket,
|
||||
object,
|
||||
bucket,
|
||||
object,
|
||||
&mut oi,
|
||||
&ObjectOptions {
|
||||
version_id: version_id.clone(),
|
||||
..Default::default()
|
||||
},
|
||||
&ObjectOptions {
|
||||
version_id: version_id,
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await;
|
||||
if let Err(err) = obj {
|
||||
//storagelogif(ctx, fmt.Errorf("Unable to update transition restore metadata for %s/%s(%s): %s", bucket, object, oi.VersionID, err))
|
||||
return Err(err);
|
||||
@@ -4169,7 +4183,10 @@ impl StorageAPI for SetDisks {
|
||||
for disk in disks.iter() {
|
||||
futures.push(async move {
|
||||
if let Some(disk) = disk {
|
||||
match disk.delete_version(&bucket, &object, fi.clone(), force_del_marker, DeleteOptions::default()).await {
|
||||
match disk
|
||||
.delete_version(&bucket, &object, fi.clone(), force_del_marker, DeleteOptions::default())
|
||||
.await
|
||||
{
|
||||
Ok(r) => Ok(r),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
@@ -4392,16 +4409,17 @@ impl StorageAPI for SetDisks {
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn add_partial(&self, bucket: &str, object: &str, version_id: &str) -> Result<()> {
|
||||
GLOBAL_MRFState.add_partial(PartialOperation {
|
||||
bucket: bucket.to_string(),
|
||||
object: object.to_string(),
|
||||
version_id: Some(version_id.to_string()),
|
||||
queued: Utc::now(),
|
||||
set_index: self.set_index,
|
||||
pool_index: self.pool_index,
|
||||
..Default::default()
|
||||
})
|
||||
.await;
|
||||
GLOBAL_MRFState
|
||||
.add_partial(PartialOperation {
|
||||
bucket: bucket.to_string(),
|
||||
object: object.to_string(),
|
||||
version_id: Some(version_id.to_string()),
|
||||
queued: Utc::now(),
|
||||
set_index: self.set_index,
|
||||
pool_index: self.pool_index,
|
||||
..Default::default()
|
||||
})
|
||||
.await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -4518,7 +4536,9 @@ impl StorageAPI for SetDisks {
|
||||
}
|
||||
return Err(to_object_err(ERR_METHOD_NOT_ALLOWED, vec![bucket, object]));
|
||||
}*/
|
||||
if !opts.mod_time.expect("err").unix_timestamp() == fi.mod_time.as_ref().expect("err").unix_timestamp() || !(opts.transition.etag == extract_etag(&fi.metadata)) {
|
||||
if !opts.mod_time.expect("err").unix_timestamp() == fi.mod_time.as_ref().expect("err").unix_timestamp()
|
||||
|| !(opts.transition.etag == extract_etag(&fi.metadata))
|
||||
{
|
||||
return Err(to_object_err(Error::from(DiskError::FileNotFound), vec![bucket, object]));
|
||||
}
|
||||
if fi.transition_status == TRANSITION_COMPLETE {
|
||||
@@ -4548,7 +4568,10 @@ impl StorageAPI for SetDisks {
|
||||
let (pr, mut pw) = tokio::io::duplex(fi.erasure.block_size);
|
||||
//let h = HeaderMap::new();
|
||||
//let reader = ReaderImpl::ObjectBody(GetObjectReader {stream: StreamingBlob::wrap(tokio_util::io::ReaderStream::new(pr)), object_info: oi});
|
||||
let reader = ReaderImpl::ObjectBody(GetObjectReader {stream: Box::new(pr), object_info: oi});
|
||||
let reader = ReaderImpl::ObjectBody(GetObjectReader {
|
||||
stream: Box::new(pr),
|
||||
object_info: oi,
|
||||
});
|
||||
|
||||
let cloned_bucket = bucket.to_string();
|
||||
let cloned_object = object.to_string();
|
||||
@@ -4557,7 +4580,16 @@ impl StorageAPI for SetDisks {
|
||||
let pool_index = self.pool_index;
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = Self::get_object_with_fileinfo(
|
||||
&cloned_bucket, &cloned_object, 0, cloned_fi.size, &mut pw, cloned_fi, meta_arr, &online_disks, set_index, pool_index
|
||||
&cloned_bucket,
|
||||
&cloned_object,
|
||||
0,
|
||||
cloned_fi.size,
|
||||
&mut pw,
|
||||
cloned_fi,
|
||||
meta_arr,
|
||||
&online_disks,
|
||||
set_index,
|
||||
pool_index,
|
||||
)
|
||||
.await
|
||||
{
|
||||
@@ -4565,11 +4597,13 @@ impl StorageAPI for SetDisks {
|
||||
};
|
||||
});
|
||||
|
||||
let rv = tgt_client.put_with_meta(&dest_obj, reader, fi.size as i64, {
|
||||
let mut m = HashMap::<String, String>::new();
|
||||
m.insert("name".to_string(), object.to_string());
|
||||
m
|
||||
}).await;
|
||||
let rv = tgt_client
|
||||
.put_with_meta(&dest_obj, reader, fi.size as i64, {
|
||||
let mut m = HashMap::<String, String>::new();
|
||||
m.insert("name".to_string(), object.to_string());
|
||||
m
|
||||
})
|
||||
.await;
|
||||
//pr.CloseWithError(err);
|
||||
if let Err(err) = rv {
|
||||
//traceFn(ILMTransition, nil, err)
|
||||
@@ -4579,7 +4613,7 @@ impl StorageAPI for SetDisks {
|
||||
fi.transition_status = TRANSITION_COMPLETE.to_string();
|
||||
fi.transitioned_objname = dest_obj;
|
||||
fi.transition_tier = opts.transition.tier.clone();
|
||||
fi.transition_version_id = if rv=="" { None } else { Some(Uuid::parse_str(&rv)?) };
|
||||
fi.transition_version_id = if rv == "" { None } else { Some(Uuid::parse_str(&rv)?) };
|
||||
let mut event_name = EventName::ObjectTransitionComplete.as_ref();
|
||||
|
||||
let disks = self.get_disks(0, 0).await?;
|
||||
@@ -4594,7 +4628,8 @@ impl StorageAPI for SetDisks {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
self.add_partial(bucket, object, &opts.version_id.as_ref().expect("err")).await;
|
||||
self.add_partial(bucket, object, &opts.version_id.as_ref().expect("err"))
|
||||
.await;
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -4602,9 +4637,9 @@ impl StorageAPI for SetDisks {
|
||||
send_event(EventArgs {
|
||||
event_name: event_name.to_string(),
|
||||
bucket_name: bucket.to_string(),
|
||||
object: obj_info,
|
||||
user_agent: "Internal: [ILM-Transition]".to_string(),
|
||||
host: GLOBAL_LocalNodeName.to_string(),
|
||||
object: obj_info,
|
||||
user_agent: "Internal: [ILM-Transition]".to_string(),
|
||||
host: GLOBAL_LocalNodeName.to_string(),
|
||||
..Default::default()
|
||||
});
|
||||
//let tags = opts.lifecycle_audit_event.tags();
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
#![allow(clippy::map_entry)]
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
|
||||
use rustfs_filemeta::FileInfo;
|
||||
use crate::disk::error_reduce::count_errs;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::{
|
||||
@@ -30,6 +29,7 @@ use futures::future::join_all;
|
||||
use http::HeaderMap;
|
||||
use lock::{LockApi, namespace_lock::NsLockMap, new_lock_api};
|
||||
use madmin::heal_commands::{HealDriveInfo, HealResultItem};
|
||||
use rustfs_filemeta::FileInfo;
|
||||
use rustfs_utils::{crc_hash, path::path_join_buf, sip_hash};
|
||||
use tokio::sync::RwLock;
|
||||
use uuid::Uuid;
|
||||
@@ -591,7 +591,9 @@ impl StorageAPI for Sets {
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn restore_transitioned_object(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()> {
|
||||
self.get_disks_by_key(object).restore_transitioned_object(bucket, object, opts).await
|
||||
self.get_disks_by_key(object)
|
||||
.restore_transitioned_object(bucket, object, opts)
|
||||
.await
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
@@ -688,8 +690,6 @@ impl StorageAPI for Sets {
|
||||
.await
|
||||
}
|
||||
|
||||
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn delete_object_tags(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo> {
|
||||
self.get_disks_by_key(object).delete_object_tags(bucket, object, opts).await
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
pub mod utils;
|
||||
pub mod request_signature_v2;
|
||||
pub mod request_signature_v4;
|
||||
pub mod ordered_qs;
|
||||
pub mod request_signature_streaming;
|
||||
pub mod request_signature_streaming_unsigned_trailer;
|
||||
pub mod ordered_qs;
|
||||
pub mod request_signature_v2;
|
||||
pub mod request_signature_v4;
|
||||
pub mod utils;
|
||||
|
||||
pub use request_signature_v2::sign_v2;
|
||||
pub use request_signature_streaming::streaming_sign_v4;
|
||||
pub use request_signature_v2::pre_sign_v2;
|
||||
pub use request_signature_v4::sign_v4;
|
||||
pub use request_signature_v2::sign_v2;
|
||||
pub use request_signature_v4::pre_sign_v4;
|
||||
pub use request_signature_v4::sign_v4;
|
||||
pub use request_signature_v4::sign_v4_trailer;
|
||||
pub use request_signature_streaming::streaming_sign_v4;
|
||||
@@ -1,37 +1,37 @@
|
||||
use std::pin::Pin;
|
||||
use std::sync::Mutex;
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use futures::prelude::*;
|
||||
use futures::task;
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use http::header::TRAILER;
|
||||
use http::HeaderMap;
|
||||
use http::Uri;
|
||||
use http::header::TRAILER;
|
||||
use http::request::{self, Request};
|
||||
use hyper::Method;
|
||||
use lazy_static::lazy_static;
|
||||
use std::collections::HashMap;
|
||||
use stdx::str::StrExt;
|
||||
use std::fmt::Write;
|
||||
use tracing::{error, info, warn, debug};
|
||||
use time::{OffsetDateTime, macros::datetime, macros::format_description, format_description};
|
||||
use http::request::{self, Request};
|
||||
use http::HeaderMap;
|
||||
use hyper::Method;
|
||||
use std::pin::Pin;
|
||||
use std::sync::Mutex;
|
||||
use stdx::str::StrExt;
|
||||
use time::{OffsetDateTime, format_description, macros::datetime, macros::format_description};
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
use super::request_signature_v4::{SERVICE_TYPE_S3, get_scope, get_signature, get_signing_key};
|
||||
use crate::client::constants::UNSIGNED_PAYLOAD;
|
||||
use rustfs_utils::{
|
||||
crypto::{hex, hex_sha256, hex_sha256_chunk, hmac_sha256},
|
||||
hash::EMPTY_STRING_SHA256_HASH
|
||||
hash::EMPTY_STRING_SHA256_HASH,
|
||||
};
|
||||
use crate::client::constants::UNSIGNED_PAYLOAD;
|
||||
use super::request_signature_v4::{get_scope, get_signing_key, get_signature, SERVICE_TYPE_S3};
|
||||
|
||||
const STREAMING_SIGN_ALGORITHM: &str = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD";
|
||||
const STREAMING_SIGN_ALGORITHM: &str = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD";
|
||||
const STREAMING_SIGN_TRAILER_ALGORITHM: &str = "STREAMING-AWS4-HMAC-SHA256-PAYLOAD-TRAILER";
|
||||
const STREAMING_PAYLOAD_HDR: &str = "AWS4-HMAC-SHA256-PAYLOAD";
|
||||
const STREAMING_TRAILER_HDR: &str = "AWS4-HMAC-SHA256-TRAILER";
|
||||
const PAYLOAD_CHUNK_SIZE: i64 = 64 * 1024;
|
||||
const CHUNK_SIGCONST_LEN: i64 = 17;
|
||||
const SIGNATURESTR_LEN: i64 = 64;
|
||||
const CRLF_LEN: i64 = 2;
|
||||
const TRAILER_KV_SEPARATOR: &str = ":";
|
||||
const TRAILER_SIGNATURE: &str = "x-amz-trailer-signature";
|
||||
const STREAMING_PAYLOAD_HDR: &str = "AWS4-HMAC-SHA256-PAYLOAD";
|
||||
const STREAMING_TRAILER_HDR: &str = "AWS4-HMAC-SHA256-TRAILER";
|
||||
const PAYLOAD_CHUNK_SIZE: i64 = 64 * 1024;
|
||||
const CHUNK_SIGCONST_LEN: i64 = 17;
|
||||
const SIGNATURESTR_LEN: i64 = 64;
|
||||
const CRLF_LEN: i64 = 2;
|
||||
const TRAILER_KV_SEPARATOR: &str = ":";
|
||||
const TRAILER_SIGNATURE: &str = "x-amz-trailer-signature";
|
||||
|
||||
lazy_static! {
|
||||
static ref ignored_streaming_headers: HashMap<String, bool> = {
|
||||
@@ -55,17 +55,26 @@ fn build_chunk_string_to_sign(t: OffsetDateTime, region: &str, previous_sig: &st
|
||||
string_to_sign_parts.join("\n")
|
||||
}
|
||||
|
||||
fn build_chunk_signature(chunk_check_sum: &str, req_time: OffsetDateTime, region: &str,
|
||||
previous_signature: &str, secret_access_key: &str
|
||||
fn build_chunk_signature(
|
||||
chunk_check_sum: &str,
|
||||
req_time: OffsetDateTime,
|
||||
region: &str,
|
||||
previous_signature: &str,
|
||||
secret_access_key: &str,
|
||||
) -> String {
|
||||
let chunk_string_to_sign = build_chunk_string_to_sign(req_time, region,
|
||||
previous_signature, chunk_check_sum);
|
||||
let chunk_string_to_sign = build_chunk_string_to_sign(req_time, region, previous_signature, chunk_check_sum);
|
||||
let signing_key = get_signing_key(secret_access_key, region, req_time, SERVICE_TYPE_S3);
|
||||
get_signature(signing_key, &chunk_string_to_sign)
|
||||
}
|
||||
|
||||
pub fn streaming_sign_v4(req: request::Builder, access_key_id: &str, secret_access_key: &str, session_token: &str,
|
||||
region: &str, data_len: i64, req_time: OffsetDateTime/*, sh256: md5simd.Hasher*/
|
||||
pub fn streaming_sign_v4(
|
||||
req: request::Builder,
|
||||
access_key_id: &str,
|
||||
secret_access_key: &str,
|
||||
session_token: &str,
|
||||
region: &str,
|
||||
data_len: i64,
|
||||
req_time: OffsetDateTime, /*, sh256: md5simd.Hasher*/
|
||||
) -> request::Builder {
|
||||
todo!();
|
||||
}
|
||||
|
||||
@@ -1,6 +1,11 @@
|
||||
use http::request;
|
||||
use time::OffsetDateTime;
|
||||
|
||||
pub fn streaming_unsigned_v4(mut req: request::Builder, session_token: &str, data_len: i64, req_time: OffsetDateTime) -> request::Builder{
|
||||
pub fn streaming_unsigned_v4(
|
||||
mut req: request::Builder,
|
||||
session_token: &str,
|
||||
data_len: i64,
|
||||
req_time: OffsetDateTime,
|
||||
) -> request::Builder {
|
||||
todo!();
|
||||
}
|
||||
|
||||
@@ -1,15 +1,15 @@
|
||||
use std::collections::HashMap;
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use http::request;
|
||||
use hyper::Uri;
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::Write;
|
||||
use time::{OffsetDateTime, macros::format_description, format_description};
|
||||
use time::{OffsetDateTime, format_description, macros::format_description};
|
||||
|
||||
use rustfs_utils::crypto::{base64_encode, hex, hmac_sha1};
|
||||
|
||||
use super::utils::get_host_addr;
|
||||
|
||||
const SIGN_V4_ALGORITHM: &str = "AWS4-HMAC-SHA256";
|
||||
const SIGN_V4_ALGORITHM: &str = "AWS4-HMAC-SHA256";
|
||||
const SIGN_V2_ALGORITHM: &str = "AWS";
|
||||
|
||||
fn encode_url2path(req: &request::Builder, virtual_host: bool) -> String {
|
||||
@@ -20,7 +20,13 @@ fn encode_url2path(req: &request::Builder, virtual_host: bool) -> String {
|
||||
path
|
||||
}
|
||||
|
||||
pub fn pre_sign_v2(mut req: request::Builder, access_key_id: &str, secret_access_key: &str, expires: i64, virtual_host: bool) -> request::Builder {
|
||||
pub fn pre_sign_v2(
|
||||
mut req: request::Builder,
|
||||
access_key_id: &str,
|
||||
secret_access_key: &str,
|
||||
expires: i64,
|
||||
virtual_host: bool,
|
||||
) -> request::Builder {
|
||||
if access_key_id == "" || secret_access_key == "" {
|
||||
return req;
|
||||
}
|
||||
@@ -50,7 +56,11 @@ pub fn pre_sign_v2(mut req: request::Builder, access_key_id: &str, secret_access
|
||||
|
||||
let uri = req.uri_ref().unwrap().clone();
|
||||
let mut parts = req.uri_ref().unwrap().clone().into_parts();
|
||||
parts.path_and_query = Some(format!("{}?{}&Signature={}", uri.path(), serde_urlencoded::to_string(&query).unwrap(), signature).parse().unwrap());
|
||||
parts.path_and_query = Some(
|
||||
format!("{}?{}&Signature={}", uri.path(), serde_urlencoded::to_string(&query).unwrap(), signature)
|
||||
.parse()
|
||||
.unwrap(),
|
||||
);
|
||||
let req = req.uri(Uri::from_parts(parts).unwrap());
|
||||
|
||||
req
|
||||
@@ -61,7 +71,13 @@ fn post_pre_sign_signature_v2(policy_base64: &str, secret_access_key: &str) -> S
|
||||
signature
|
||||
}
|
||||
|
||||
pub fn sign_v2(mut req: request::Builder, content_len: i64, access_key_id: &str, secret_access_key: &str, virtual_host: bool) -> request::Builder {
|
||||
pub fn sign_v2(
|
||||
mut req: request::Builder,
|
||||
content_len: i64,
|
||||
access_key_id: &str,
|
||||
secret_access_key: &str,
|
||||
virtual_host: bool,
|
||||
) -> request::Builder {
|
||||
if access_key_id == "" || secret_access_key == "" {
|
||||
return req;
|
||||
}
|
||||
@@ -74,7 +90,14 @@ pub fn sign_v2(mut req: request::Builder, content_len: i64, access_key_id: &str,
|
||||
|
||||
let date = headers.get("Date").unwrap();
|
||||
if date.to_str().unwrap() == "" {
|
||||
headers.insert("Date", d2.format(&format_description::well_known::Rfc2822).unwrap().to_string().parse().unwrap());
|
||||
headers.insert(
|
||||
"Date",
|
||||
d2.format(&format_description::well_known::Rfc2822)
|
||||
.unwrap()
|
||||
.to_string()
|
||||
.parse()
|
||||
.unwrap(),
|
||||
);
|
||||
}
|
||||
|
||||
let mut auth_header = format!("{} {}:", SIGN_V2_ALGORITHM, access_key_id);
|
||||
@@ -130,21 +153,27 @@ fn write_canonicalized_headers(buf: &mut BytesMut, req: &request::Builder) {
|
||||
let lk = k.as_str().to_lowercase();
|
||||
if lk.starts_with("x-amz") {
|
||||
proto_headers.push(lk.clone());
|
||||
let vv = req.headers_ref().expect("err").get_all(k).iter().map(|e| e.to_str().unwrap().to_string()).collect();
|
||||
let vv = req
|
||||
.headers_ref()
|
||||
.expect("err")
|
||||
.get_all(k)
|
||||
.iter()
|
||||
.map(|e| e.to_str().unwrap().to_string())
|
||||
.collect();
|
||||
vals.insert(lk, vv);
|
||||
}
|
||||
}
|
||||
proto_headers.sort();
|
||||
for k in proto_headers {
|
||||
buf.write_str(&k);
|
||||
buf.write_char(':');
|
||||
for (idx, v) in vals[&k].iter().enumerate() {
|
||||
if idx > 0 {
|
||||
buf.write_char(',');
|
||||
}
|
||||
buf.write_str(v);
|
||||
}
|
||||
buf.write_char('\n');
|
||||
buf.write_str(&k);
|
||||
buf.write_char(':');
|
||||
for (idx, v) in vals[&k].iter().enumerate() {
|
||||
if idx > 0 {
|
||||
buf.write_char(',');
|
||||
}
|
||||
buf.write_str(v);
|
||||
}
|
||||
buf.write_char('\n');
|
||||
}
|
||||
}
|
||||
|
||||
@@ -181,7 +210,7 @@ fn write_canonicalized_resource(buf: &mut BytesMut, req: &request::Builder, virt
|
||||
let mut vals = result.unwrap_or_default();
|
||||
for resource in INCLUDED_QUERY {
|
||||
let vv = &vals[*resource];
|
||||
if vv.len() > 0 {
|
||||
if vv.len() > 0 {
|
||||
n += 1;
|
||||
match n {
|
||||
1 => {
|
||||
@@ -199,4 +228,4 @@ fn write_canonicalized_resource(buf: &mut BytesMut, req: &request::Builder, virt
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,21 +1,21 @@
|
||||
use bytes::{Bytes, BytesMut};
|
||||
use http::header::TRAILER;
|
||||
use http::HeaderMap;
|
||||
use http::Uri;
|
||||
use http::header::TRAILER;
|
||||
use http::request::{self, Request};
|
||||
use lazy_static::lazy_static;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::Write;
|
||||
use tracing::{error, info, warn, debug};
|
||||
use time::{OffsetDateTime, macros::datetime, macros::format_description, format_description};
|
||||
use http::request::{self, Request};
|
||||
use http::HeaderMap;
|
||||
use time::{OffsetDateTime, format_description, macros::datetime, macros::format_description};
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
use rustfs_utils::crypto::{hex, hex_sha256, hmac_sha256};
|
||||
use rustfs_utils::hash::EMPTY_STRING_SHA256_HASH;
|
||||
use crate::client::constants::UNSIGNED_PAYLOAD;
|
||||
use super::ordered_qs::OrderedQs;
|
||||
use super::request_signature_streaming_unsigned_trailer::streaming_unsigned_v4;
|
||||
use super::utils::stable_sort_by_first;
|
||||
use super::utils::{get_host_addr, sign_v4_trim_all};
|
||||
use crate::client::constants::UNSIGNED_PAYLOAD;
|
||||
use rustfs_utils::crypto::{hex, hex_sha256, hmac_sha256};
|
||||
use rustfs_utils::hash::EMPTY_STRING_SHA256_HASH;
|
||||
|
||||
pub const SIGN_V4_ALGORITHM: &str = "AWS4-HMAC-SHA256";
|
||||
pub const SERVICE_TYPE_S3: &str = "s3";
|
||||
@@ -69,7 +69,7 @@ fn get_credential(access_key_id: &str, location: &str, t: OffsetDateTime, servic
|
||||
fn get_hashed_payload(req: &request::Builder) -> String {
|
||||
let headers = req.headers_ref().unwrap();
|
||||
let mut hashed_payload = "";
|
||||
if let Some(payload)= headers.get("X-Amz-Content-Sha256") {
|
||||
if let Some(payload) = headers.get("X-Amz-Content-Sha256") {
|
||||
hashed_payload = payload.to_str().unwrap();
|
||||
}
|
||||
if hashed_payload == "" {
|
||||
@@ -86,7 +86,13 @@ fn get_canonical_headers(req: &request::Builder, ignored_headers: &HashMap<Strin
|
||||
continue;
|
||||
}
|
||||
headers.push(k.as_str().to_lowercase());
|
||||
let vv = req.headers_ref().expect("err").get_all(k).iter().map(|e| e.to_str().unwrap().to_string()).collect();
|
||||
let vv = req
|
||||
.headers_ref()
|
||||
.expect("err")
|
||||
.get_all(k)
|
||||
.iter()
|
||||
.map(|e| e.to_str().unwrap().to_string())
|
||||
.collect();
|
||||
vals.insert(k.as_str().to_lowercase(), vv);
|
||||
}
|
||||
if !header_exists("host", &headers) {
|
||||
@@ -155,7 +161,7 @@ fn get_canonical_request(req: &request::Builder, ignored_headers: &HashMap<Strin
|
||||
canonical_query_string = query.join("&");
|
||||
canonical_query_string = canonical_query_string.replace("+", "%20");
|
||||
}
|
||||
|
||||
|
||||
let mut canonical_request = <Vec<String>>::new();
|
||||
canonical_request.push(req.method_ref().unwrap().to_string());
|
||||
canonical_request.push(req.uri_ref().unwrap().path().to_string());
|
||||
@@ -178,7 +184,15 @@ fn get_string_to_sign_v4(t: OffsetDateTime, location: &str, canonical_request: &
|
||||
string_to_sign
|
||||
}
|
||||
|
||||
pub fn pre_sign_v4(req: request::Builder, access_key_id: &str, secret_access_key: &str, session_token: &str, location: &str, expires: i64, t: OffsetDateTime) -> request::Builder {
|
||||
pub fn pre_sign_v4(
|
||||
req: request::Builder,
|
||||
access_key_id: &str,
|
||||
secret_access_key: &str,
|
||||
session_token: &str,
|
||||
location: &str,
|
||||
expires: i64,
|
||||
t: OffsetDateTime,
|
||||
) -> request::Builder {
|
||||
if access_key_id == "" || secret_access_key == "" {
|
||||
return req;
|
||||
}
|
||||
@@ -208,9 +222,13 @@ pub fn pre_sign_v4(req: request::Builder, access_key_id: &str, secret_access_key
|
||||
|
||||
let uri = req.uri_ref().unwrap().clone();
|
||||
let mut parts = req.uri_ref().unwrap().clone().into_parts();
|
||||
parts.path_and_query = Some(format!("{}?{}", uri.path(), serde_urlencoded::to_string(&query).unwrap()).parse().unwrap());
|
||||
parts.path_and_query = Some(
|
||||
format!("{}?{}", uri.path(), serde_urlencoded::to_string(&query).unwrap())
|
||||
.parse()
|
||||
.unwrap(),
|
||||
);
|
||||
let req = req.uri(Uri::from_parts(parts).unwrap());
|
||||
|
||||
|
||||
let canonical_request = get_canonical_request(&req, &v4_ignored_headers, &get_hashed_payload(&req));
|
||||
let string_to_sign = get_string_to_sign_v4(t, location, &canonical_request, SERVICE_TYPE_S3);
|
||||
//println!("canonical_request: \n{}\n", canonical_request);
|
||||
@@ -220,7 +238,16 @@ pub fn pre_sign_v4(req: request::Builder, access_key_id: &str, secret_access_key
|
||||
|
||||
let uri = req.uri_ref().unwrap().clone();
|
||||
let mut parts = req.uri_ref().unwrap().clone().into_parts();
|
||||
parts.path_and_query = Some(format!("{}?{}&X-Amz-Signature={}", uri.path(), serde_urlencoded::to_string(&query).unwrap(), signature).parse().unwrap());
|
||||
parts.path_and_query = Some(
|
||||
format!(
|
||||
"{}?{}&X-Amz-Signature={}",
|
||||
uri.path(),
|
||||
serde_urlencoded::to_string(&query).unwrap(),
|
||||
signature
|
||||
)
|
||||
.parse()
|
||||
.unwrap(),
|
||||
);
|
||||
let req = req.uri(Uri::from_parts(parts).unwrap());
|
||||
|
||||
req
|
||||
@@ -236,7 +263,16 @@ fn sign_v4_sts(mut req: request::Builder, access_key_id: &str, secret_access_key
|
||||
sign_v4_inner(req, 0, access_key_id, secret_access_key, "", location, SERVICE_TYPE_STS, HeaderMap::new())
|
||||
}
|
||||
|
||||
fn sign_v4_inner(mut req: request::Builder, content_len: i64, access_key_id: &str, secret_access_key: &str, session_token: &str, location: &str, service_type: &str, trailer: HeaderMap) -> request::Builder {
|
||||
fn sign_v4_inner(
|
||||
mut req: request::Builder,
|
||||
content_len: i64,
|
||||
access_key_id: &str,
|
||||
secret_access_key: &str,
|
||||
session_token: &str,
|
||||
location: &str,
|
||||
service_type: &str,
|
||||
trailer: HeaderMap,
|
||||
) -> request::Builder {
|
||||
if access_key_id == "" || secret_access_key == "" {
|
||||
return req;
|
||||
}
|
||||
@@ -252,7 +288,7 @@ fn sign_v4_inner(mut req: request::Builder, content_len: i64, access_key_id: &st
|
||||
headers.insert("X-Amz-Security-Token", session_token.parse().unwrap());
|
||||
}
|
||||
|
||||
if trailer.len() > 0{
|
||||
if trailer.len() > 0 {
|
||||
for (k, _) in &trailer {
|
||||
headers.append("X-Amz-Trailer", k.as_str().to_lowercase().parse().unwrap());
|
||||
}
|
||||
@@ -276,9 +312,12 @@ fn sign_v4_inner(mut req: request::Builder, content_len: i64, access_key_id: &st
|
||||
|
||||
let mut headers = req.headers_mut().expect("err");
|
||||
|
||||
let auth = format!("{} Credential={}, SignedHeaders={}, Signature={}", SIGN_V4_ALGORITHM, credential, signed_headers, signature);
|
||||
let auth = format!(
|
||||
"{} Credential={}, SignedHeaders={}, Signature={}",
|
||||
SIGN_V4_ALGORITHM, credential, signed_headers, signature
|
||||
);
|
||||
headers.insert("Authorization", auth.parse().unwrap());
|
||||
|
||||
|
||||
if trailer.len() > 0 {
|
||||
//req.Trailer = trailer;
|
||||
for (_, v) in &trailer {
|
||||
@@ -315,12 +354,44 @@ fn unsigned_trailer(mut req: request::Builder, content_len: i64, trailer: Header
|
||||
streaming_unsigned_v4(req, "", content_len, t);
|
||||
}
|
||||
|
||||
pub fn sign_v4(mut req: request::Builder, content_len: i64, access_key_id: &str, secret_access_key: &str, session_token: &str, location: &str) -> request::Builder {
|
||||
sign_v4_inner(req, content_len, access_key_id, secret_access_key, session_token, location, SERVICE_TYPE_S3, HeaderMap::new())
|
||||
pub fn sign_v4(
|
||||
mut req: request::Builder,
|
||||
content_len: i64,
|
||||
access_key_id: &str,
|
||||
secret_access_key: &str,
|
||||
session_token: &str,
|
||||
location: &str,
|
||||
) -> request::Builder {
|
||||
sign_v4_inner(
|
||||
req,
|
||||
content_len,
|
||||
access_key_id,
|
||||
secret_access_key,
|
||||
session_token,
|
||||
location,
|
||||
SERVICE_TYPE_S3,
|
||||
HeaderMap::new(),
|
||||
)
|
||||
}
|
||||
|
||||
pub fn sign_v4_trailer(req: request::Builder, access_key_id: &str, secret_access_key: &str, session_token: &str, location: &str, trailer: HeaderMap) -> request::Builder {
|
||||
sign_v4_inner(req, 0, access_key_id, secret_access_key, session_token, location, SERVICE_TYPE_S3, trailer)
|
||||
pub fn sign_v4_trailer(
|
||||
req: request::Builder,
|
||||
access_key_id: &str,
|
||||
secret_access_key: &str,
|
||||
session_token: &str,
|
||||
location: &str,
|
||||
trailer: HeaderMap,
|
||||
) -> request::Builder {
|
||||
sign_v4_inner(
|
||||
req,
|
||||
0,
|
||||
access_key_id,
|
||||
secret_access_key,
|
||||
session_token,
|
||||
location,
|
||||
SERVICE_TYPE_S3,
|
||||
trailer,
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -338,10 +409,17 @@ mod tests {
|
||||
let service = "s3";
|
||||
let path = "/";
|
||||
|
||||
let mut req = Request::builder().method(http::Method::GET).uri("http://examplebucket.s3.amazonaws.com/?");
|
||||
let mut req = Request::builder()
|
||||
.method(http::Method::GET)
|
||||
.uri("http://examplebucket.s3.amazonaws.com/?");
|
||||
let mut headers = req.headers_mut().expect("err");
|
||||
headers.insert("host", "examplebucket.s3.amazonaws.com".parse().unwrap());
|
||||
headers.insert("x-amz-content-sha256", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855".parse().unwrap());
|
||||
headers.insert(
|
||||
"x-amz-content-sha256",
|
||||
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||
.parse()
|
||||
.unwrap(),
|
||||
);
|
||||
headers.insert("x-amz-date", timestamp.parse().unwrap());
|
||||
|
||||
let mut query = <Vec<(String, String)>>::new();
|
||||
@@ -349,7 +427,11 @@ mod tests {
|
||||
query.push(("prefix".to_string(), "J".to_string()));
|
||||
let uri = req.uri_ref().unwrap().clone();
|
||||
let mut parts = req.uri_ref().unwrap().clone().into_parts();
|
||||
parts.path_and_query = Some(format!("{}?{}", uri.path(), serde_urlencoded::to_string(&query).unwrap()).parse().unwrap());
|
||||
parts.path_and_query = Some(
|
||||
format!("{}?{}", uri.path(), serde_urlencoded::to_string(&query).unwrap())
|
||||
.parse()
|
||||
.unwrap(),
|
||||
);
|
||||
let req = req.uri(Uri::from_parts(parts).unwrap());
|
||||
|
||||
let canonical_request = get_canonical_request(&req, &v4_ignored_headers, &get_hashed_payload(&req));
|
||||
@@ -361,7 +443,9 @@ mod tests {
|
||||
"max-keys=2&prefix=J\n",
|
||||
"host:examplebucket.s3.amazonaws.com\n",
|
||||
"x-amz-content-sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\n",
|
||||
"x-amz-date:", "20130524T000000Z", "\n",
|
||||
"x-amz-date:",
|
||||
"20130524T000000Z",
|
||||
"\n",
|
||||
"\n",
|
||||
"host;x-amz-content-sha256;x-amz-date\n",
|
||||
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
@@ -373,7 +457,8 @@ mod tests {
|
||||
string_to_sign,
|
||||
concat!(
|
||||
"AWS4-HMAC-SHA256\n",
|
||||
"20130524T000000Z", "\n",
|
||||
"20130524T000000Z",
|
||||
"\n",
|
||||
"20130524/us-east-1/s3/aws4_request\n",
|
||||
"df57d21db20da04d7fa30298dd4488ba3a2b47ca3a489c74750e0f1e7df1b9b7",
|
||||
)
|
||||
@@ -396,17 +481,28 @@ mod tests {
|
||||
let service = "s3";
|
||||
let path = "/mblock2/";
|
||||
|
||||
let mut req = Request::builder().method(http::Method::GET).uri("http://192.168.1.11:9020/mblock2/?");
|
||||
|
||||
let mut req = Request::builder()
|
||||
.method(http::Method::GET)
|
||||
.uri("http://192.168.1.11:9020/mblock2/?");
|
||||
|
||||
let mut headers = req.headers_mut().expect("err");
|
||||
headers.insert("host", "192.168.1.11:9020".parse().unwrap());
|
||||
headers.insert("x-amz-content-sha256", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855".parse().unwrap());
|
||||
headers.insert(
|
||||
"x-amz-content-sha256",
|
||||
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||
.parse()
|
||||
.unwrap(),
|
||||
);
|
||||
headers.insert("x-amz-date", timestamp.parse().unwrap());
|
||||
|
||||
let mut query: Vec<(String, String)> = Vec::new();
|
||||
let uri = req.uri_ref().unwrap().clone();
|
||||
let mut parts = req.uri_ref().unwrap().clone().into_parts();
|
||||
parts.path_and_query = Some(format!("{}?{}", uri.path(), serde_urlencoded::to_string(&query).unwrap()).parse().unwrap());
|
||||
parts.path_and_query = Some(
|
||||
format!("{}?{}", uri.path(), serde_urlencoded::to_string(&query).unwrap())
|
||||
.parse()
|
||||
.unwrap(),
|
||||
);
|
||||
let req = req.uri(Uri::from_parts(parts).unwrap());
|
||||
|
||||
let canonical_request = get_canonical_request(&req, &v4_ignored_headers, &get_hashed_payload(&req));
|
||||
@@ -419,7 +515,9 @@ mod tests {
|
||||
"\n",
|
||||
"host:192.168.1.11:9020\n",
|
||||
"x-amz-content-sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\n",
|
||||
"x-amz-date:", "20250505T011054Z", "\n",
|
||||
"x-amz-date:",
|
||||
"20250505T011054Z",
|
||||
"\n",
|
||||
"\n",
|
||||
"host;x-amz-content-sha256;x-amz-date\n",
|
||||
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
@@ -432,7 +530,8 @@ mod tests {
|
||||
string_to_sign,
|
||||
concat!(
|
||||
"AWS4-HMAC-SHA256\n",
|
||||
"20250505T011054Z", "\n",
|
||||
"20250505T011054Z",
|
||||
"\n",
|
||||
"20250505/us-east-1/s3/aws4_request\n",
|
||||
"c2960d00cc7de7bed3e2e2d1330ec298ded8f78a231c1d32dedac72ebec7f9b0",
|
||||
)
|
||||
@@ -456,10 +555,15 @@ mod tests {
|
||||
let path = "/mblock2/";
|
||||
|
||||
let mut req = Request::builder().method(http::Method::GET).uri("http://192.168.1.11:9020/mblock2/?list-type=2&encoding-type=url&prefix=mypre&delimiter=%2F&fetch-owner=true&max-keys=1");
|
||||
|
||||
|
||||
let mut headers = req.headers_mut().expect("err");
|
||||
headers.insert("host", "192.168.1.11:9020".parse().unwrap());
|
||||
headers.insert("x-amz-content-sha256", "e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855".parse().unwrap());
|
||||
headers.insert(
|
||||
"x-amz-content-sha256",
|
||||
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855"
|
||||
.parse()
|
||||
.unwrap(),
|
||||
);
|
||||
headers.insert("x-amz-date", timestamp.parse().unwrap());
|
||||
|
||||
/*let uri = req.uri_ref().unwrap().clone();
|
||||
@@ -475,7 +579,7 @@ mod tests {
|
||||
let mut parts = req.uri_ref().unwrap().clone().into_parts();
|
||||
parts.path_and_query = Some(format!("{}?{}", uri.path(), canonical_query_string).parse().unwrap());
|
||||
let req = req.uri(Uri::from_parts(parts).unwrap());*/
|
||||
println!("{:?}", req.uri_ref().unwrap().query());
|
||||
println!("{:?}", req.uri_ref().unwrap().query());
|
||||
let canonical_request = get_canonical_request(&req, &v4_ignored_headers, &get_hashed_payload(&req));
|
||||
println!("canonical_request: \n{}\n", canonical_request);
|
||||
assert_eq!(
|
||||
@@ -486,7 +590,9 @@ println!("{:?}", req.uri_ref().unwrap().query());
|
||||
"delimiter=%2F&encoding-type=url&fetch-owner=true&list-type=2&max-keys=1&prefix=mypre\n",
|
||||
"host:192.168.1.11:9020\n",
|
||||
"x-amz-content-sha256:e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855\n",
|
||||
"x-amz-date:", "20250507T051030Z", "\n",
|
||||
"x-amz-date:",
|
||||
"20250507T051030Z",
|
||||
"\n",
|
||||
"\n",
|
||||
"host;x-amz-content-sha256;x-amz-date\n",
|
||||
"e3b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855",
|
||||
@@ -499,7 +605,8 @@ println!("{:?}", req.uri_ref().unwrap().query());
|
||||
string_to_sign,
|
||||
concat!(
|
||||
"AWS4-HMAC-SHA256\n",
|
||||
"20250507T051030Z", "\n",
|
||||
"20250507T051030Z",
|
||||
"\n",
|
||||
"20250507/us-east-1/s3/aws4_request\n",
|
||||
"e6db9e09e9c873aff0b9ca170998b4753f6a6c36c90bc2dca80613affb47f999",
|
||||
)
|
||||
@@ -525,8 +632,10 @@ println!("{:?}", req.uri_ref().unwrap().query());
|
||||
let path = "/";
|
||||
let session_token = "";
|
||||
|
||||
let mut req = Request::builder().method(http::Method::GET).uri("http://examplebucket.s3.amazonaws.com/test.txt");
|
||||
|
||||
let mut req = Request::builder()
|
||||
.method(http::Method::GET)
|
||||
.uri("http://examplebucket.s3.amazonaws.com/test.txt");
|
||||
|
||||
let mut headers = req.headers_mut().expect("err");
|
||||
headers.insert("host", "examplebucket.s3.amazonaws.com".parse().unwrap());
|
||||
|
||||
@@ -573,7 +682,7 @@ println!("{:?}", req.uri_ref().unwrap().query());
|
||||
let session_token = "";
|
||||
|
||||
let mut req = Request::builder().method(http::Method::GET).uri("http://192.168.1.11:9020/mblock2/test.txt?delimiter=%2F&fetch-owner=true&prefix=mypre&encoding-type=url&max-keys=1&list-type=2");
|
||||
|
||||
|
||||
let mut headers = req.headers_mut().expect("err");
|
||||
headers.insert("host", "192.168.1.11:9020".parse().unwrap());
|
||||
|
||||
@@ -604,4 +713,4 @@ println!("{:?}", req.uri_ref().unwrap().query());
|
||||
)
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,4 +28,4 @@ where
|
||||
T: Ord,
|
||||
{
|
||||
v.sort_by(|lhs, rhs| lhs.0.cmp(&rhs.0));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,20 +1,21 @@
|
||||
#![allow(clippy::map_entry)]
|
||||
|
||||
use rustfs_filemeta::FileInfo;
|
||||
use crate::bucket::lifecycle::bucket_lifecycle_ops::init_background_expiry;
|
||||
use crate::bucket::metadata_sys::{self, set_bucket_metadata};
|
||||
use crate::bucket::utils::{check_valid_bucket_name, check_valid_bucket_name_strict, is_meta_bucketname};
|
||||
use crate::config::GLOBAL_StorageClass;
|
||||
use crate::config::storageclass;
|
||||
use crate::disk::endpoint::{Endpoint, EndpointType};
|
||||
use crate::disk::{DiskAPI, DiskInfo, DiskInfoOptions};
|
||||
use crate::error::{Error, Result};
|
||||
use crate::error::{
|
||||
StorageError, is_err_bucket_exists, is_err_invalid_upload_id, is_err_object_not_found, is_err_read_quorum,
|
||||
is_err_version_not_found, to_object_err,
|
||||
};
|
||||
use crate::global::{
|
||||
DISK_ASSUME_UNKNOWN_SIZE, DISK_FILL_FRACTION, DISK_MIN_INODES, DISK_RESERVE_FRACTION, GLOBAL_BOOT_TIME,
|
||||
GLOBAL_LOCAL_DISK_MAP, GLOBAL_LOCAL_DISK_SET_DRIVES, get_global_endpoints, is_dist_erasure, is_erasure_sd,
|
||||
set_global_deployment_id, set_object_layer, GLOBAL_TierConfigMgr,
|
||||
GLOBAL_LOCAL_DISK_MAP, GLOBAL_LOCAL_DISK_SET_DRIVES, GLOBAL_TierConfigMgr, get_global_endpoints, is_dist_erasure,
|
||||
is_erasure_sd, set_global_deployment_id, set_object_layer,
|
||||
};
|
||||
use crate::heal::data_usage::{DATA_USAGE_ROOT, DataUsageInfo};
|
||||
use crate::heal::data_usage_cache::{DataUsageCache, DataUsageCacheInfo};
|
||||
@@ -27,10 +28,7 @@ use crate::rebalance::RebalanceMeta;
|
||||
use crate::store_api::{ListMultipartsInfo, ListObjectVersionsInfo, MultipartInfo, ObjectIO};
|
||||
use crate::store_init::{check_disk_fatal_errs, ec_drives_no_config};
|
||||
use crate::{
|
||||
bucket::{
|
||||
metadata::BucketMetadata,
|
||||
lifecycle::bucket_lifecycle_ops::TransitionState
|
||||
},
|
||||
bucket::{lifecycle::bucket_lifecycle_ops::TransitionState, metadata::BucketMetadata},
|
||||
disk::{BUCKET_META_PREFIX, DiskOption, DiskStore, RUSTFS_META_BUCKET, new_disk},
|
||||
endpoints::EndpointServerPools,
|
||||
peer::S3PeerSys,
|
||||
@@ -42,10 +40,6 @@ use crate::{
|
||||
},
|
||||
store_init,
|
||||
};
|
||||
use rustfs_utils::crypto::base64_decode;
|
||||
use rustfs_utils::path::{SLASH_SEPARATOR, decode_dir_object, encode_dir_object, path_join_buf};
|
||||
use crate::bucket::lifecycle::bucket_lifecycle_ops::init_background_expiry;
|
||||
use crate::error::{Error, Result};
|
||||
use common::globals::{GLOBAL_Local_Node_Name, GLOBAL_Rustfs_Host, GLOBAL_Rustfs_Port};
|
||||
use futures::future::join_all;
|
||||
use glob::Pattern;
|
||||
@@ -53,7 +47,10 @@ use http::HeaderMap;
|
||||
use lazy_static::lazy_static;
|
||||
use madmin::heal_commands::HealResultItem;
|
||||
use rand::Rng as _;
|
||||
use rustfs_filemeta::FileInfo;
|
||||
use rustfs_filemeta::MetaCacheEntry;
|
||||
use rustfs_utils::crypto::base64_decode;
|
||||
use rustfs_utils::path::{SLASH_SEPARATOR, decode_dir_object, encode_dir_object, path_join_buf};
|
||||
use s3s::dto::{BucketVersioningStatus, ObjectLockConfiguration, ObjectLockEnabled, VersioningConfiguration};
|
||||
use std::cmp::Ordering;
|
||||
use std::process::exit;
|
||||
@@ -332,7 +329,7 @@ impl ECStore {
|
||||
|
||||
TransitionState::init(self.clone()).await;
|
||||
|
||||
if let Err(err) = GLOBAL_TierConfigMgr.write().await.init(self.clone()).await {
|
||||
if let Err(err) = GLOBAL_TierConfigMgr.write().await.init(self.clone()).await {
|
||||
info!("TierConfigMgr init error: {}", err);
|
||||
}
|
||||
|
||||
@@ -1868,7 +1865,9 @@ impl StorageAPI for ECStore {
|
||||
self.pools[0].add_partial(bucket, object.as_str(), version_id).await;
|
||||
}
|
||||
|
||||
let idx = self.get_pool_idx_existing_with_opts(bucket, object.as_str(), &ObjectOptions::default()).await?;
|
||||
let idx = self
|
||||
.get_pool_idx_existing_with_opts(bucket, object.as_str(), &ObjectOptions::default())
|
||||
.await?;
|
||||
|
||||
self.pools[idx].add_partial(bucket, object.as_str(), version_id).await;
|
||||
Ok(())
|
||||
@@ -2134,7 +2133,9 @@ impl StorageAPI for ECStore {
|
||||
let object = rustfs_utils::path::encode_dir_object(object);
|
||||
|
||||
if self.single_pool() {
|
||||
return self.pools[0].delete_object_version(bucket, object.as_str(), fi, force_del_marker).await;
|
||||
return self.pools[0]
|
||||
.delete_object_version(bucket, object.as_str(), fi, force_del_marker)
|
||||
.await;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -2,22 +2,19 @@ use crate::bucket::metadata_sys::get_versioning_config;
|
||||
use crate::bucket::versioning::VersioningApi as _;
|
||||
use crate::cmd::bucket_replication::{ReplicationStatusType, VersionPurgeStatusType};
|
||||
use crate::error::{Error, Result};
|
||||
use rustfs_utils::path::decode_dir_object;
|
||||
use crate::heal::heal_ops::HealSequence;
|
||||
use crate::store_utils::clean_metadata;
|
||||
use crate::{disk::DiskStore, heal::heal_commands::HealOpts,};
|
||||
use crate::{
|
||||
bucket::lifecycle::{
|
||||
lifecycle::TransitionOptions,
|
||||
bucket_lifecycle_ops::TransitionedObject,
|
||||
},
|
||||
bucket::lifecycle::bucket_lifecycle_audit::LcAuditEvent,
|
||||
bucket::lifecycle::lifecycle::ExpirationOptions,
|
||||
bucket::lifecycle::{bucket_lifecycle_ops::TransitionedObject, lifecycle::TransitionOptions},
|
||||
};
|
||||
use crate::{disk::DiskStore, heal::heal_commands::HealOpts};
|
||||
use http::{HeaderMap, HeaderValue};
|
||||
use madmin::heal_commands::HealResultItem;
|
||||
use rustfs_filemeta::{FileInfo, MetaCacheEntriesSorted, ObjectPartInfo, headers::AMZ_OBJECT_TAGGING};
|
||||
use rustfs_rio::{HashReader, Reader};
|
||||
use rustfs_utils::path::decode_dir_object;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::Debug;
|
||||
@@ -444,15 +441,18 @@ impl ObjectInfo {
|
||||
|
||||
// TODO:expires
|
||||
// TODO:ReplicationState
|
||||
|
||||
let transitioned_object = TransitionedObject {
|
||||
name: fi.transitioned_objname.clone(),
|
||||
version_id: if let Some(transition_version_id) = fi.transition_version_id { transition_version_id.to_string() } else { "".to_string() },
|
||||
status: fi.transition_status.clone(),
|
||||
free_version: fi.tier_free_version(),
|
||||
tier: fi.transition_tier.clone(),
|
||||
};
|
||||
|
||||
let transitioned_object = TransitionedObject {
|
||||
name: fi.transitioned_objname.clone(),
|
||||
version_id: if let Some(transition_version_id) = fi.transition_version_id {
|
||||
transition_version_id.to_string()
|
||||
} else {
|
||||
"".to_string()
|
||||
},
|
||||
status: fi.transition_status.clone(),
|
||||
free_version: fi.tier_free_version(),
|
||||
tier: fi.transition_tier.clone(),
|
||||
};
|
||||
|
||||
let metadata = {
|
||||
let mut v = fi.metadata.clone();
|
||||
|
||||
@@ -1,9 +1,9 @@
|
||||
pub mod warm_backend_s3;
|
||||
pub mod warm_backend_minio;
|
||||
pub mod warm_backend_rustfs;
|
||||
pub mod warm_backend;
|
||||
pub mod tier;
|
||||
pub mod tier_admin;
|
||||
pub mod tier_config;
|
||||
pub mod tier;
|
||||
pub mod tier_gen;
|
||||
pub mod tier_handlers;
|
||||
pub mod tier_handlers;
|
||||
pub mod warm_backend;
|
||||
pub mod warm_backend_minio;
|
||||
pub mod warm_backend_rustfs;
|
||||
pub mod warm_backend_s3;
|
||||
|
||||
@@ -1,46 +1,49 @@
|
||||
use std::{
|
||||
collections::{hash_map::Entry, HashMap}, io::Cursor, sync::Arc, time::{Duration,}
|
||||
};
|
||||
use bytes::Bytes;
|
||||
use serde::{Serialize, Deserialize};
|
||||
use time::OffsetDateTime;
|
||||
use tokio::{select, sync::RwLock, time::interval};
|
||||
use rand::Rng;
|
||||
use tracing::{info, debug, warn, error};
|
||||
use http::status::StatusCode;
|
||||
use lazy_static::lazy_static;
|
||||
use rand::Rng;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
collections::{HashMap, hash_map::Entry},
|
||||
io::Cursor,
|
||||
sync::Arc,
|
||||
time::Duration,
|
||||
};
|
||||
use time::OffsetDateTime;
|
||||
use tokio::io::BufReader;
|
||||
use tokio::{select, sync::RwLock, time::interval};
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
use s3s::S3ErrorCode;
|
||||
use crate::error::{Error, Result, StorageError};
|
||||
use rustfs_utils::path::{path_join, SLASH_SEPARATOR};
|
||||
use crate::{
|
||||
config::com::{read_config, CONFIG_PREFIX},
|
||||
disk::RUSTFS_META_BUCKET,
|
||||
store::ECStore, store_api::{ObjectOptions, PutObjReader}, StorageAPI
|
||||
};
|
||||
use crate::client::admin_handler_utils::AdminError;
|
||||
use crate::tier::{
|
||||
warm_backend::{check_warm_backend, new_warm_backend},
|
||||
tier_handlers::{
|
||||
ERR_TIER_NAME_NOT_UPPERCASE, ERR_TIER_ALREADY_EXISTS, ERR_TIER_NOT_FOUND,
|
||||
},
|
||||
tier_admin::TierCreds,
|
||||
tier_config::{TierType, TierConfig,},
|
||||
};
|
||||
use crate::error::{Error, Result, StorageError};
|
||||
use crate::new_object_layer_fn;
|
||||
use crate::tier::{
|
||||
tier_admin::TierCreds,
|
||||
tier_config::{TierConfig, TierType},
|
||||
tier_handlers::{ERR_TIER_ALREADY_EXISTS, ERR_TIER_NAME_NOT_UPPERCASE, ERR_TIER_NOT_FOUND},
|
||||
warm_backend::{check_warm_backend, new_warm_backend},
|
||||
};
|
||||
use crate::{
|
||||
StorageAPI,
|
||||
config::com::{CONFIG_PREFIX, read_config},
|
||||
disk::RUSTFS_META_BUCKET,
|
||||
store::ECStore,
|
||||
store_api::{ObjectOptions, PutObjReader},
|
||||
};
|
||||
use rustfs_rio::HashReader;
|
||||
use rustfs_utils::path::{SLASH_SEPARATOR, path_join};
|
||||
use s3s::S3ErrorCode;
|
||||
|
||||
use super::{
|
||||
tier_handlers::{ERR_TIER_PERM_ERR, ERR_TIER_CONNECT_ERR, ERR_TIER_INVALID_CREDENTIALS, ERR_TIER_BUCKET_NOT_FOUND},
|
||||
tier_handlers::{ERR_TIER_BUCKET_NOT_FOUND, ERR_TIER_CONNECT_ERR, ERR_TIER_INVALID_CREDENTIALS, ERR_TIER_PERM_ERR},
|
||||
warm_backend::WarmBackendImpl,
|
||||
};
|
||||
|
||||
const TIER_CFG_REFRESH: Duration = Duration::from_secs(15 * 60);
|
||||
|
||||
pub const TIER_CONFIG_FILE: &str = "tier-config.json";
|
||||
pub const TIER_CONFIG_FORMAT: u16 = 1;
|
||||
pub const TIER_CONFIG_V1: u16 = 1;
|
||||
pub const TIER_CONFIG_FILE: &str = "tier-config.json";
|
||||
pub const TIER_CONFIG_FORMAT: u16 = 1;
|
||||
pub const TIER_CONFIG_V1: u16 = 1;
|
||||
pub const TIER_CONFIG_VERSION: u16 = 1;
|
||||
|
||||
lazy_static! {
|
||||
@@ -50,32 +53,32 @@ lazy_static! {
|
||||
const TIER_CFG_REFRESH_AT_HDR: &str = "X-RustFS-TierCfg-RefreshedAt";
|
||||
|
||||
pub const ERR_TIER_MISSING_CREDENTIALS: AdminError = AdminError {
|
||||
code: "XRustFSAdminTierMissingCredentials",
|
||||
message: "Specified remote credentials are empty",
|
||||
code: "XRustFSAdminTierMissingCredentials",
|
||||
message: "Specified remote credentials are empty",
|
||||
status_code: StatusCode::FORBIDDEN,
|
||||
};
|
||||
|
||||
pub const ERR_TIER_BACKEND_IN_USE: AdminError = AdminError {
|
||||
code: "XRustFSAdminTierBackendInUse",
|
||||
message: "Specified remote tier is already in use",
|
||||
code: "XRustFSAdminTierBackendInUse",
|
||||
message: "Specified remote tier is already in use",
|
||||
status_code: StatusCode::CONFLICT,
|
||||
};
|
||||
|
||||
pub const ERR_TIER_TYPE_UNSUPPORTED: AdminError = AdminError {
|
||||
code: "XRustFSAdminTierTypeUnsupported",
|
||||
message: "Specified tier type is unsupported",
|
||||
code: "XRustFSAdminTierTypeUnsupported",
|
||||
message: "Specified tier type is unsupported",
|
||||
status_code: StatusCode::BAD_REQUEST,
|
||||
};
|
||||
|
||||
pub const ERR_TIER_BACKEND_NOT_EMPTY: AdminError = AdminError {
|
||||
code: "XRustFSAdminTierBackendNotEmpty",
|
||||
message: "Specified remote backend is not empty",
|
||||
code: "XRustFSAdminTierBackendNotEmpty",
|
||||
message: "Specified remote backend is not empty",
|
||||
status_code: StatusCode::BAD_REQUEST,
|
||||
};
|
||||
|
||||
pub const ERR_TIER_INVALID_CONFIG: AdminError = AdminError {
|
||||
code: "XRustFSAdminTierInvalidConfig",
|
||||
message: "Unable to setup remote tier, check tier configuration",
|
||||
code: "XRustFSAdminTierInvalidConfig",
|
||||
message: "Unable to setup remote tier, check tier configuration",
|
||||
status_code: StatusCode::BAD_REQUEST,
|
||||
};
|
||||
|
||||
@@ -147,22 +150,22 @@ impl TierConfigMgr {
|
||||
if !force {
|
||||
let in_use = d.in_use().await;
|
||||
match in_use {
|
||||
Ok(b) => {
|
||||
if b {
|
||||
return Err(ERR_TIER_BACKEND_IN_USE);
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
warn!("tier add failed, err: {:?}", err);
|
||||
if err.to_string().contains("connect") {
|
||||
return Err(ERR_TIER_CONNECT_ERR);
|
||||
} else if err.to_string().contains("authorization") {
|
||||
return Err(ERR_TIER_INVALID_CREDENTIALS);
|
||||
} else if err.to_string().contains("bucket") {
|
||||
return Err(ERR_TIER_BUCKET_NOT_FOUND);
|
||||
}
|
||||
return Err(ERR_TIER_PERM_ERR);
|
||||
}
|
||||
Ok(b) => {
|
||||
if b {
|
||||
return Err(ERR_TIER_BACKEND_IN_USE);
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
warn!("tier add failed, err: {:?}", err);
|
||||
if err.to_string().contains("connect") {
|
||||
return Err(ERR_TIER_CONNECT_ERR);
|
||||
} else if err.to_string().contains("authorization") {
|
||||
return Err(ERR_TIER_INVALID_CREDENTIALS);
|
||||
} else if err.to_string().contains("bucket") {
|
||||
return Err(ERR_TIER_BUCKET_NOT_FOUND);
|
||||
}
|
||||
return Err(ERR_TIER_PERM_ERR);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -279,7 +282,7 @@ impl TierConfigMgr {
|
||||
minio.access_key = creds.access_key;
|
||||
minio.secret_key = creds.secret_key;
|
||||
}
|
||||
_ => ()
|
||||
_ => (),
|
||||
}
|
||||
|
||||
let d = new_warm_backend(&cfg, true).await?;
|
||||
@@ -290,9 +293,7 @@ impl TierConfigMgr {
|
||||
|
||||
pub async fn get_driver<'a>(&'a mut self, tier_name: &str) -> std::result::Result<&'a WarmBackendImpl, AdminError> {
|
||||
Ok(match self.driver_cache.entry(tier_name.to_string()) {
|
||||
Entry::Occupied(e) => {
|
||||
e.into_mut()
|
||||
}
|
||||
Entry::Occupied(e) => e.into_mut(),
|
||||
Entry::Vacant(e) => {
|
||||
let t = self.tiers.get(tier_name);
|
||||
if t.is_none() {
|
||||
@@ -326,7 +327,9 @@ impl TierConfigMgr {
|
||||
|
||||
#[tracing::instrument(level = "debug", name = "tier_save", skip(self))]
|
||||
pub async fn save(&self) -> std::result::Result<(), std::io::Error> {
|
||||
let Some(api) = new_object_layer_fn() else { return Err(std::io::Error::other("errServerNotInitialized")) };
|
||||
let Some(api) = new_object_layer_fn() else {
|
||||
return Err(std::io::Error::other("errServerNotInitialized"));
|
||||
};
|
||||
//let (pr, opts) = GLOBAL_TierConfigMgr.write().config_reader()?;
|
||||
|
||||
self.save_tiering_config(api).await
|
||||
@@ -340,7 +343,12 @@ impl TierConfigMgr {
|
||||
self.save_config(api, &config_file, data).await
|
||||
}
|
||||
|
||||
pub async fn save_config<S: StorageAPI>(&self, api: Arc<S>, file: &str, data: Bytes) -> std::result::Result<(), std::io::Error> {
|
||||
pub async fn save_config<S: StorageAPI>(
|
||||
&self,
|
||||
api: Arc<S>,
|
||||
file: &str,
|
||||
data: Bytes,
|
||||
) -> std::result::Result<(), std::io::Error> {
|
||||
self.save_config_with_opts(
|
||||
api,
|
||||
file,
|
||||
@@ -353,7 +361,13 @@ impl TierConfigMgr {
|
||||
.await
|
||||
}
|
||||
|
||||
pub async fn save_config_with_opts<S: StorageAPI>(&self, api: Arc<S>, file: &str, data: Bytes, opts: &ObjectOptions) -> std::result::Result<(), std::io::Error> {
|
||||
pub async fn save_config_with_opts<S: StorageAPI>(
|
||||
&self,
|
||||
api: Arc<S>,
|
||||
file: &str,
|
||||
data: Bytes,
|
||||
opts: &ObjectOptions,
|
||||
) -> std::result::Result<(), std::io::Error> {
|
||||
debug!("save tier config:{}", file);
|
||||
let _ = api
|
||||
.put_object(RUSTFS_META_BUCKET, file, &mut PutObjReader::from_vec(data.to_vec()), opts)
|
||||
@@ -365,9 +379,7 @@ impl TierConfigMgr {
|
||||
//let r = rand.New(rand.NewSource(time.Now().UnixNano()));
|
||||
let mut rng = rand::rng();
|
||||
let r = rng.random_range(0.0..1.0);
|
||||
let rand_interval = || {
|
||||
Duration::from_secs((r * 60_f64).round() as u64)
|
||||
};
|
||||
let rand_interval = || Duration::from_secs((r * 60_f64).round() as u64);
|
||||
|
||||
let mut t = interval(TIER_CFG_REFRESH + rand_interval());
|
||||
loop {
|
||||
@@ -394,10 +406,10 @@ impl TierConfigMgr {
|
||||
|
||||
async fn new_and_save_tiering_config<S: StorageAPI>(api: Arc<S>) -> Result<TierConfigMgr> {
|
||||
let mut cfg = TierConfigMgr {
|
||||
driver_cache: HashMap::new(),
|
||||
tiers: HashMap::new(),
|
||||
last_refreshed_at: OffsetDateTime::now_utc(),
|
||||
};
|
||||
driver_cache: HashMap::new(),
|
||||
tiers: HashMap::new(),
|
||||
last_refreshed_at: OffsetDateTime::now_utc(),
|
||||
};
|
||||
//lookup_configs(&mut cfg, api.clone()).await;
|
||||
cfg.save_tiering_config(api).await?;
|
||||
|
||||
@@ -420,7 +432,7 @@ async fn load_tier_config(api: Arc<ECStore>) -> std::result::Result<TierConfigMg
|
||||
}
|
||||
|
||||
let cfg;
|
||||
let version = 1;//LittleEndian::read_u16(&data[2..4]);
|
||||
let version = 1; //LittleEndian::read_u16(&data[2..4]);
|
||||
match version {
|
||||
TIER_CONFIG_V1/* | TIER_CONFIG_VERSION */ => {
|
||||
cfg = match TierConfigMgr::unmarshal(&data.unwrap()) {
|
||||
@@ -440,4 +452,4 @@ async fn load_tier_config(api: Arc<ECStore>) -> std::result::Result<TierConfigMg
|
||||
|
||||
pub fn is_err_config_not_found(err: &StorageError) -> bool {
|
||||
matches!(err, StorageError::ObjectNotFound(_, _))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,13 +1,10 @@
|
||||
use std::{
|
||||
time::{Duration, SystemTime, UNIX_EPOCH},
|
||||
};
|
||||
use rand::Rng;
|
||||
use tracing::warn;
|
||||
use http::status::StatusCode;
|
||||
use serde::{Serialize, Deserialize};
|
||||
use rand::Rng;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::time::{Duration, SystemTime, UNIX_EPOCH};
|
||||
use tracing::warn;
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[derive(Default, Debug, Clone)]
|
||||
#[derive(Serialize, Deserialize, Default, Debug, Clone)]
|
||||
#[serde(default)]
|
||||
pub struct TierCreds {
|
||||
#[serde(rename = "accessKey")]
|
||||
@@ -26,4 +23,4 @@ pub struct TierCreds {
|
||||
|
||||
//#[serde(rename = "credsJson")]
|
||||
pub creds_json: Vec<u8>,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::fmt::Display;
|
||||
use serde::{Serialize, Deserialize};
|
||||
use tracing::info;
|
||||
|
||||
const C_TierConfigVer: &str = "v1";
|
||||
@@ -9,8 +9,7 @@ const ERR_TIER_INVALID_CONFIG: &str = "invalid tier config";
|
||||
const ERR_TIER_INVALID_CONFIG_VERSION: &str = "invalid tier config version";
|
||||
const ERR_TIER_TYPE_UNSUPPORTED: &str = "unsupported tier type";
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[derive(Default, Debug, Clone)]
|
||||
#[derive(Serialize, Deserialize, Default, Debug, Clone)]
|
||||
pub enum TierType {
|
||||
#[default]
|
||||
Unsupported,
|
||||
@@ -48,35 +47,19 @@ impl Display for TierType {
|
||||
impl TierType {
|
||||
pub fn new(sc_type: &str) -> Self {
|
||||
match sc_type {
|
||||
"S3" => {
|
||||
TierType::S3
|
||||
}
|
||||
"RustFS" => {
|
||||
TierType::RustFS
|
||||
}
|
||||
"MinIO" => {
|
||||
TierType::MinIO
|
||||
}
|
||||
_ => {
|
||||
TierType::Unsupported
|
||||
}
|
||||
"S3" => TierType::S3,
|
||||
"RustFS" => TierType::RustFS,
|
||||
"MinIO" => TierType::MinIO,
|
||||
_ => TierType::Unsupported,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn to_string(&self) -> String {
|
||||
match self {
|
||||
TierType::S3 => {
|
||||
"s3".to_string()
|
||||
}
|
||||
TierType::RustFS => {
|
||||
"rustfs".to_string()
|
||||
}
|
||||
TierType::MinIO => {
|
||||
"minio".to_string()
|
||||
}
|
||||
_ => {
|
||||
"unsupported".to_string()
|
||||
}
|
||||
TierType::S3 => "s3".to_string(),
|
||||
TierType::RustFS => "rustfs".to_string(),
|
||||
TierType::MinIO => "minio".to_string(),
|
||||
_ => "unsupported".to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -123,17 +106,17 @@ impl Clone for TierConfig {
|
||||
m_.secret_key = "REDACTED".to_string();
|
||||
m = Some(m_);
|
||||
}
|
||||
_ => ()
|
||||
_ => (),
|
||||
}
|
||||
TierConfig {
|
||||
version: self.version.clone(),
|
||||
tier_type: self.tier_type.clone(),
|
||||
name: self.name.clone(),
|
||||
name: self.name.clone(),
|
||||
s3: s3,
|
||||
//azure: az,
|
||||
//gcs: gcs,
|
||||
rustfs: r,
|
||||
minio: m,
|
||||
rustfs: r,
|
||||
minio: m,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -154,15 +137,9 @@ impl TierConfig {
|
||||
|
||||
fn endpoint(&self) -> String {
|
||||
match self.tier_type {
|
||||
TierType::S3 => {
|
||||
self.s3.as_ref().expect("err").endpoint.clone()
|
||||
}
|
||||
TierType::RustFS => {
|
||||
self.rustfs.as_ref().expect("err").endpoint.clone()
|
||||
}
|
||||
TierType::MinIO => {
|
||||
self.minio.as_ref().expect("err").endpoint.clone()
|
||||
}
|
||||
TierType::S3 => self.s3.as_ref().expect("err").endpoint.clone(),
|
||||
TierType::RustFS => self.rustfs.as_ref().expect("err").endpoint.clone(),
|
||||
TierType::MinIO => self.minio.as_ref().expect("err").endpoint.clone(),
|
||||
_ => {
|
||||
info!("unexpected tier type {}", self.tier_type);
|
||||
"".to_string()
|
||||
@@ -172,15 +149,9 @@ impl TierConfig {
|
||||
|
||||
fn bucket(&self) -> String {
|
||||
match self.tier_type {
|
||||
TierType::S3 => {
|
||||
self.s3.as_ref().expect("err").bucket.clone()
|
||||
}
|
||||
TierType::RustFS => {
|
||||
self.rustfs.as_ref().expect("err").bucket.clone()
|
||||
}
|
||||
TierType::MinIO => {
|
||||
self.minio.as_ref().expect("err").bucket.clone()
|
||||
}
|
||||
TierType::S3 => self.s3.as_ref().expect("err").bucket.clone(),
|
||||
TierType::RustFS => self.rustfs.as_ref().expect("err").bucket.clone(),
|
||||
TierType::MinIO => self.minio.as_ref().expect("err").bucket.clone(),
|
||||
_ => {
|
||||
info!("unexpected tier type {}", self.tier_type);
|
||||
"".to_string()
|
||||
@@ -190,15 +161,9 @@ impl TierConfig {
|
||||
|
||||
fn prefix(&self) -> String {
|
||||
match self.tier_type {
|
||||
TierType::S3 => {
|
||||
self.s3.as_ref().expect("err").prefix.clone()
|
||||
}
|
||||
TierType::RustFS => {
|
||||
self.rustfs.as_ref().expect("err").prefix.clone()
|
||||
}
|
||||
TierType::MinIO => {
|
||||
self.minio.as_ref().expect("err").prefix.clone()
|
||||
}
|
||||
TierType::S3 => self.s3.as_ref().expect("err").prefix.clone(),
|
||||
TierType::RustFS => self.rustfs.as_ref().expect("err").prefix.clone(),
|
||||
TierType::MinIO => self.minio.as_ref().expect("err").prefix.clone(),
|
||||
_ => {
|
||||
info!("unexpected tier type {}", self.tier_type);
|
||||
"".to_string()
|
||||
@@ -208,18 +173,12 @@ impl TierConfig {
|
||||
|
||||
fn region(&self) -> String {
|
||||
match self.tier_type {
|
||||
TierType::S3 => {
|
||||
self.s3.as_ref().expect("err").region.clone()
|
||||
}
|
||||
TierType::RustFS => {
|
||||
self.rustfs.as_ref().expect("err").region.clone()
|
||||
}
|
||||
TierType::MinIO => {
|
||||
self.minio.as_ref().expect("err").region.clone()
|
||||
}
|
||||
TierType::S3 => self.s3.as_ref().expect("err").region.clone(),
|
||||
TierType::RustFS => self.rustfs.as_ref().expect("err").region.clone(),
|
||||
TierType::MinIO => self.minio.as_ref().expect("err").region.clone(),
|
||||
_ => {
|
||||
info!("unexpected tier type {}", self.tier_type);
|
||||
"".to_string()
|
||||
"".to_string()
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -227,8 +186,7 @@ impl TierConfig {
|
||||
|
||||
//type S3Options = impl Fn(TierS3) -> Pin<Box<Result<()>>> + Send + Sync + 'static;
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[derive(Default, Debug, Clone)]
|
||||
#[derive(Serialize, Deserialize, Default, Debug, Clone)]
|
||||
#[serde(default)]
|
||||
pub struct TierS3 {
|
||||
pub name: String,
|
||||
@@ -257,17 +215,17 @@ pub struct TierS3 {
|
||||
impl TierS3 {
|
||||
fn new<F>(name: &str, access_key: &str, secret_key: &str, bucket: &str, options: Vec<F>) -> Result<TierConfig, std::io::Error>
|
||||
where
|
||||
F: Fn(TierS3) -> Box<Result<(), std::io::Error>> + Send + Sync + 'static
|
||||
F: Fn(TierS3) -> Box<Result<(), std::io::Error>> + Send + Sync + 'static,
|
||||
{
|
||||
if name == "" {
|
||||
return Err(std::io::Error::other(ERR_TIER_NAME_EMPTY));
|
||||
}
|
||||
let sc = TierS3 {
|
||||
access_key: access_key.to_string(),
|
||||
secret_key: secret_key.to_string(),
|
||||
bucket: bucket.to_string(),
|
||||
endpoint: "https://s3.amazonaws.com".to_string(),
|
||||
region: "".to_string(),
|
||||
access_key: access_key.to_string(),
|
||||
secret_key: secret_key.to_string(),
|
||||
bucket: bucket.to_string(),
|
||||
endpoint: "https://s3.amazonaws.com".to_string(),
|
||||
region: "".to_string(),
|
||||
storage_class: "".to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
@@ -281,49 +239,54 @@ impl TierS3 {
|
||||
Ok(TierConfig {
|
||||
version: C_TierConfigVer.to_string(),
|
||||
tier_type: TierType::S3,
|
||||
name: name.to_string(),
|
||||
s3: Some(sc),
|
||||
name: name.to_string(),
|
||||
s3: Some(sc),
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[derive(Default, Debug, Clone)]
|
||||
#[derive(Serialize, Deserialize, Default, Debug, Clone)]
|
||||
#[serde(default)]
|
||||
pub struct TierRustFS {
|
||||
pub name: String,
|
||||
pub endpoint: String,
|
||||
pub endpoint: String,
|
||||
#[serde(rename = "accesskey")]
|
||||
pub access_key: String,
|
||||
#[serde(rename = "secretkey")]
|
||||
pub secret_key: String,
|
||||
pub bucket: String,
|
||||
pub prefix: String,
|
||||
pub region: String,
|
||||
pub bucket: String,
|
||||
pub prefix: String,
|
||||
pub region: String,
|
||||
#[serde(rename = "storageclass")]
|
||||
pub storage_class: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
#[derive(Default, Debug, Clone)]
|
||||
#[derive(Serialize, Deserialize, Default, Debug, Clone)]
|
||||
#[serde(default)]
|
||||
pub struct TierMinIO {
|
||||
pub name: String,
|
||||
pub endpoint: String,
|
||||
pub endpoint: String,
|
||||
#[serde(rename = "accesskey")]
|
||||
pub access_key: String,
|
||||
#[serde(rename = "secretkey")]
|
||||
pub secret_key: String,
|
||||
pub bucket: String,
|
||||
pub prefix: String,
|
||||
pub region: String,
|
||||
pub bucket: String,
|
||||
pub prefix: String,
|
||||
pub region: String,
|
||||
}
|
||||
|
||||
impl TierMinIO {
|
||||
fn new<F>(name: &str, endpoint: &str, access_key: &str, secret_key: &str, bucket: &str, options: Vec<F>) -> Result<TierConfig, std::io::Error>
|
||||
fn new<F>(
|
||||
name: &str,
|
||||
endpoint: &str,
|
||||
access_key: &str,
|
||||
secret_key: &str,
|
||||
bucket: &str,
|
||||
options: Vec<F>,
|
||||
) -> Result<TierConfig, std::io::Error>
|
||||
where
|
||||
F: Fn(TierMinIO) -> Box<Result<(), std::io::Error>> + Send + Sync + 'static
|
||||
F: Fn(TierMinIO) -> Box<Result<(), std::io::Error>> + Send + Sync + 'static,
|
||||
{
|
||||
if name == "" {
|
||||
return Err(std::io::Error::other(ERR_TIER_NAME_EMPTY));
|
||||
@@ -331,8 +294,8 @@ impl TierMinIO {
|
||||
let m = TierMinIO {
|
||||
access_key: access_key.to_string(),
|
||||
secret_key: secret_key.to_string(),
|
||||
bucket: bucket.to_string(),
|
||||
endpoint: endpoint.to_string(),
|
||||
bucket: bucket.to_string(),
|
||||
endpoint: endpoint.to_string(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
@@ -345,8 +308,8 @@ impl TierMinIO {
|
||||
Ok(TierConfig {
|
||||
version: C_TierConfigVer.to_string(),
|
||||
tier_type: TierType::MinIO,
|
||||
name: name.to_string(),
|
||||
minio: Some(m),
|
||||
name: name.to_string(),
|
||||
minio: Some(m),
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
@@ -1,51 +1,51 @@
|
||||
use crate::client::admin_handler_utils::AdminError;
|
||||
use tracing::warn;
|
||||
use http::status::StatusCode;
|
||||
use tracing::warn;
|
||||
|
||||
pub const ERR_TIER_ALREADY_EXISTS: AdminError = AdminError {
|
||||
code: "XRustFSAdminTierAlreadyExists",
|
||||
message: "Specified remote tier already exists",
|
||||
code: "XRustFSAdminTierAlreadyExists",
|
||||
message: "Specified remote tier already exists",
|
||||
status_code: StatusCode::CONFLICT,
|
||||
};
|
||||
|
||||
pub const ERR_TIER_NOT_FOUND: AdminError = AdminError {
|
||||
code: "XRustFSAdminTierNotFound",
|
||||
message: "Specified remote tier was not found",
|
||||
code: "XRustFSAdminTierNotFound",
|
||||
message: "Specified remote tier was not found",
|
||||
status_code: StatusCode::NOT_FOUND,
|
||||
};
|
||||
|
||||
pub const ERR_TIER_NAME_NOT_UPPERCASE: AdminError = AdminError {
|
||||
code: "XRustFSAdminTierNameNotUpperCase",
|
||||
message: "Tier name must be in uppercase",
|
||||
code: "XRustFSAdminTierNameNotUpperCase",
|
||||
message: "Tier name must be in uppercase",
|
||||
status_code: StatusCode::BAD_REQUEST,
|
||||
};
|
||||
|
||||
pub const ERR_TIER_BUCKET_NOT_FOUND: AdminError = AdminError {
|
||||
code: "XRustFSAdminTierBucketNotFound",
|
||||
message: "Remote tier bucket not found",
|
||||
code: "XRustFSAdminTierBucketNotFound",
|
||||
message: "Remote tier bucket not found",
|
||||
status_code: StatusCode::BAD_REQUEST,
|
||||
};
|
||||
|
||||
pub const ERR_TIER_INVALID_CREDENTIALS: AdminError = AdminError {
|
||||
code: "XRustFSAdminTierInvalidCredentials",
|
||||
message: "Invalid remote tier credentials",
|
||||
code: "XRustFSAdminTierInvalidCredentials",
|
||||
message: "Invalid remote tier credentials",
|
||||
status_code: StatusCode::BAD_REQUEST,
|
||||
};
|
||||
|
||||
pub const ERR_TIER_RESERVED_NAME: AdminError = AdminError {
|
||||
code: "XRustFSAdminTierReserved",
|
||||
message: "Cannot use reserved tier name",
|
||||
code: "XRustFSAdminTierReserved",
|
||||
message: "Cannot use reserved tier name",
|
||||
status_code: StatusCode::BAD_REQUEST,
|
||||
};
|
||||
|
||||
pub const ERR_TIER_PERM_ERR: AdminError = AdminError {
|
||||
code: "TierPermErr",
|
||||
message: "Tier Perm Err",
|
||||
code: "TierPermErr",
|
||||
message: "Tier Perm Err",
|
||||
status_code: StatusCode::OK,
|
||||
};
|
||||
|
||||
pub const ERR_TIER_CONNECT_ERR: AdminError = AdminError {
|
||||
code: "TierConnectErr",
|
||||
message: "Tier Connect Err",
|
||||
code: "TierConnectErr",
|
||||
message: "Tier Connect Err",
|
||||
status_code: StatusCode::OK,
|
||||
};
|
||||
};
|
||||
|
||||
@@ -1,20 +1,20 @@
|
||||
use std::collections::HashMap;
|
||||
use bytes::Bytes;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use crate::client::{
|
||||
admin_handler_utils::AdminError,
|
||||
transition_api::{ReadCloser, ReaderImpl,},
|
||||
transition_api::{ReadCloser, ReaderImpl},
|
||||
};
|
||||
use crate::error::is_err_bucket_not_found;
|
||||
use tracing::{info, warn};
|
||||
use crate::tier::{
|
||||
tier_config::{TierType, TierConfig},
|
||||
tier::{ERR_TIER_INVALID_CONFIG, ERR_TIER_TYPE_UNSUPPORTED},
|
||||
tier_config::{TierConfig, TierType},
|
||||
tier_handlers::{ERR_TIER_BUCKET_NOT_FOUND, ERR_TIER_PERM_ERR},
|
||||
tier::{ERR_TIER_INVALID_CONFIG, ERR_TIER_TYPE_UNSUPPORTED,},
|
||||
warm_backend_s3::WarmBackendS3,
|
||||
warm_backend_rustfs::WarmBackendRustFS,
|
||||
warm_backend_minio::WarmBackendMinIO,
|
||||
warm_backend_rustfs::WarmBackendRustFS,
|
||||
warm_backend_s3::WarmBackendS3,
|
||||
};
|
||||
use tracing::{info, warn};
|
||||
|
||||
pub type WarmBackendImpl = Box<dyn WarmBackend + Send + Sync + 'static>;
|
||||
|
||||
@@ -29,7 +29,13 @@ pub struct WarmBackendGetOpts {
|
||||
#[async_trait::async_trait]
|
||||
pub trait WarmBackend {
|
||||
async fn put(&self, object: &str, r: ReaderImpl, length: i64) -> Result<String, std::io::Error>;
|
||||
async fn put_with_meta(&self, object: &str, r: ReaderImpl, length: i64, meta: HashMap<String, String>) -> Result<String, std::io::Error>;
|
||||
async fn put_with_meta(
|
||||
&self,
|
||||
object: &str,
|
||||
r: ReaderImpl,
|
||||
length: i64,
|
||||
meta: HashMap<String, String>,
|
||||
) -> Result<String, std::io::Error>;
|
||||
async fn get(&self, object: &str, rv: &str, opts: WarmBackendGetOpts) -> Result<ReadCloser, std::io::Error>;
|
||||
async fn remove(&self, object: &str, rv: &str) -> Result<(), std::io::Error>;
|
||||
async fn in_use(&self) -> Result<bool, std::io::Error>;
|
||||
@@ -37,7 +43,9 @@ pub trait WarmBackend {
|
||||
|
||||
pub async fn check_warm_backend(w: Option<&WarmBackendImpl>) -> Result<(), AdminError> {
|
||||
let w = w.expect("err");
|
||||
let remote_version_id = w.put(PROBE_OBJECT, ReaderImpl::Body(Bytes::from("RustFS".as_bytes().to_vec())), 5).await;
|
||||
let remote_version_id = w
|
||||
.put(PROBE_OBJECT, ReaderImpl::Body(Bytes::from("RustFS".as_bytes().to_vec())), 5)
|
||||
.await;
|
||||
if let Err(err) = remote_version_id {
|
||||
return Err(ERR_TIER_PERM_ERR);
|
||||
}
|
||||
@@ -52,7 +60,7 @@ pub async fn check_warm_backend(w: Option<&WarmBackendImpl>) -> Result<(), Admin
|
||||
return Err(ERR_TIER_MISSING_CREDENTIALS);
|
||||
}*/
|
||||
//else {
|
||||
return Err(ERR_TIER_PERM_ERR);
|
||||
return Err(ERR_TIER_PERM_ERR);
|
||||
//}
|
||||
}
|
||||
if let Err(err) = w.remove(PROBE_OBJECT, &remote_version_id.expect("err")).await {
|
||||
@@ -94,4 +102,4 @@ pub async fn new_warm_backend(tier: &TierConfig, probe: bool) -> Result<WarmBack
|
||||
}
|
||||
|
||||
Ok(d.expect("err"))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,24 +1,23 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use tracing::warn;
|
||||
use crate::client::{
|
||||
admin_handler_utils::AdminError,
|
||||
transition_api::{Options, ReaderImpl, ReadCloser, TransitionClient, TransitionCore},
|
||||
credentials::{Credentials, SignatureType, Static, Value},
|
||||
api_put_object::PutObjectOptions,
|
||||
credentials::{Credentials, SignatureType, Static, Value},
|
||||
transition_api::{Options, ReadCloser, ReaderImpl, TransitionClient, TransitionCore},
|
||||
};
|
||||
use crate::tier::{
|
||||
tier_config::TierMinIO,
|
||||
warm_backend::{WarmBackend, WarmBackendGetOpts},
|
||||
warm_backend_s3::WarmBackendS3,
|
||||
};
|
||||
|
||||
use tracing::warn;
|
||||
|
||||
const MAX_MULTIPART_PUT_OBJECT_SIZE: i64 = 1024 * 1024 * 1024 * 1024 * 5;
|
||||
const MAX_PARTS_COUNT: i64 = 10000;
|
||||
const MAX_PART_SIZE: i64 = 1024 * 1024 * 1024 * 5;
|
||||
const MIN_PART_SIZE: i64 = 1024 * 1024 * 128;
|
||||
const MAX_PARTS_COUNT: i64 = 10000;
|
||||
const MAX_PART_SIZE: i64 = 1024 * 1024 * 1024 * 5;
|
||||
const MIN_PART_SIZE: i64 = 1024 * 1024 * 128;
|
||||
|
||||
pub struct WarmBackendMinIO(WarmBackendS3);
|
||||
|
||||
@@ -40,26 +39,23 @@ impl WarmBackendMinIO {
|
||||
};
|
||||
|
||||
let creds = Credentials::new(Static(Value {
|
||||
access_key_id: conf.access_key.clone(),
|
||||
access_key_id: conf.access_key.clone(),
|
||||
secret_access_key: conf.secret_key.clone(),
|
||||
session_token: "".to_string(),
|
||||
session_token: "".to_string(),
|
||||
signer_type: SignatureType::SignatureV4,
|
||||
..Default::default()
|
||||
}));
|
||||
let opts = Options {
|
||||
creds: creds,
|
||||
secure: u.scheme() == "https",
|
||||
creds: creds,
|
||||
secure: u.scheme() == "https",
|
||||
//transport: GLOBAL_RemoteTargetTransport,
|
||||
trailing_headers: true,
|
||||
..Default::default()
|
||||
};
|
||||
let scheme = u.scheme();
|
||||
let default_port = if scheme == "https" {
|
||||
443
|
||||
} else {
|
||||
80
|
||||
};
|
||||
let client = TransitionClient::new(&format!("{}:{}", u.host_str().expect("err"), u.port().unwrap_or(default_port)), opts).await?;
|
||||
let default_port = if scheme == "https" { 443 } else { 80 };
|
||||
let client =
|
||||
TransitionClient::new(&format!("{}:{}", u.host_str().expect("err"), u.port().unwrap_or(default_port)), opts).await?;
|
||||
//client.set_appinfo(format!("minio-tier-{}", tier), ReleaseTag);
|
||||
|
||||
let client = Arc::new(client);
|
||||
@@ -67,8 +63,8 @@ impl WarmBackendMinIO {
|
||||
Ok(Self(WarmBackendS3 {
|
||||
client,
|
||||
core,
|
||||
bucket: conf.bucket.clone(),
|
||||
prefix: conf.prefix.strip_suffix("/").unwrap_or(&conf.prefix).to_owned(),
|
||||
bucket: conf.bucket.clone(),
|
||||
prefix: conf.prefix.strip_suffix("/").unwrap_or(&conf.prefix).to_owned(),
|
||||
storage_class: "".to_string(),
|
||||
}))
|
||||
}
|
||||
@@ -76,16 +72,30 @@ impl WarmBackendMinIO {
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl WarmBackend for WarmBackendMinIO {
|
||||
async fn put_with_meta(&self, object: &str, r: ReaderImpl, length: i64, meta: HashMap<String, String>) -> Result<String, std::io::Error> {
|
||||
async fn put_with_meta(
|
||||
&self,
|
||||
object: &str,
|
||||
r: ReaderImpl,
|
||||
length: i64,
|
||||
meta: HashMap<String, String>,
|
||||
) -> Result<String, std::io::Error> {
|
||||
let part_size = optimal_part_size(length)?;
|
||||
let client = self.0.client.clone();
|
||||
let res = client.put_object(&self.0.bucket, &self.0.get_dest(object), r, length, &PutObjectOptions {
|
||||
storage_class: self.0.storage_class.clone(),
|
||||
part_size: part_size as u64,
|
||||
disable_content_sha256: true,
|
||||
user_metadata: meta,
|
||||
..Default::default()
|
||||
}).await?;
|
||||
let res = client
|
||||
.put_object(
|
||||
&self.0.bucket,
|
||||
&self.0.get_dest(object),
|
||||
r,
|
||||
length,
|
||||
&PutObjectOptions {
|
||||
storage_class: self.0.storage_class.clone(),
|
||||
part_size: part_size as u64,
|
||||
disable_content_sha256: true,
|
||||
user_metadata: meta,
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
//self.ToObjectError(err, object)
|
||||
Ok(res.version_id)
|
||||
}
|
||||
|
||||
@@ -4,9 +4,9 @@ use tracing::warn;
|
||||
|
||||
use crate::client::{
|
||||
admin_handler_utils::AdminError,
|
||||
transition_api::{Options, ReaderImpl, ReadCloser, TransitionClient, TransitionCore},
|
||||
credentials::{Credentials, SignatureType, Static, Value},
|
||||
api_put_object::PutObjectOptions,
|
||||
credentials::{Credentials, SignatureType, Static, Value},
|
||||
transition_api::{Options, ReadCloser, ReaderImpl, TransitionClient, TransitionCore},
|
||||
};
|
||||
use crate::tier::{
|
||||
tier_config::TierRustFS,
|
||||
@@ -15,9 +15,9 @@ use crate::tier::{
|
||||
};
|
||||
|
||||
const MAX_MULTIPART_PUT_OBJECT_SIZE: i64 = 1024 * 1024 * 1024 * 1024 * 5;
|
||||
const MAX_PARTS_COUNT: i64 = 10000;
|
||||
const MAX_PART_SIZE: i64 = 1024 * 1024 * 1024 * 5;
|
||||
const MIN_PART_SIZE: i64 = 1024 * 1024 * 128;
|
||||
const MAX_PARTS_COUNT: i64 = 10000;
|
||||
const MAX_PART_SIZE: i64 = 1024 * 1024 * 1024 * 5;
|
||||
const MIN_PART_SIZE: i64 = 1024 * 1024 * 128;
|
||||
|
||||
pub struct WarmBackendRustFS(WarmBackendS3);
|
||||
|
||||
@@ -37,26 +37,23 @@ impl WarmBackendRustFS {
|
||||
};
|
||||
|
||||
let creds = Credentials::new(Static(Value {
|
||||
access_key_id: conf.access_key.clone(),
|
||||
access_key_id: conf.access_key.clone(),
|
||||
secret_access_key: conf.secret_key.clone(),
|
||||
session_token: "".to_string(),
|
||||
session_token: "".to_string(),
|
||||
signer_type: SignatureType::SignatureV4,
|
||||
..Default::default()
|
||||
}));
|
||||
let opts = Options {
|
||||
creds: creds,
|
||||
secure: u.scheme() == "https",
|
||||
creds: creds,
|
||||
secure: u.scheme() == "https",
|
||||
//transport: GLOBAL_RemoteTargetTransport,
|
||||
trailing_headers: true,
|
||||
..Default::default()
|
||||
};
|
||||
let scheme = u.scheme();
|
||||
let default_port = if scheme == "https" {
|
||||
443
|
||||
} else {
|
||||
80
|
||||
};
|
||||
let client = TransitionClient::new(&format!("{}:{}", u.host_str().expect("err"), u.port().unwrap_or(default_port)), opts).await?;
|
||||
let default_port = if scheme == "https" { 443 } else { 80 };
|
||||
let client =
|
||||
TransitionClient::new(&format!("{}:{}", u.host_str().expect("err"), u.port().unwrap_or(default_port)), opts).await?;
|
||||
//client.set_appinfo(format!("rustfs-tier-{}", tier), ReleaseTag);
|
||||
|
||||
let client = Arc::new(client);
|
||||
@@ -64,8 +61,8 @@ impl WarmBackendRustFS {
|
||||
Ok(Self(WarmBackendS3 {
|
||||
client,
|
||||
core,
|
||||
bucket: conf.bucket.clone(),
|
||||
prefix: conf.prefix.strip_suffix("/").unwrap_or(&conf.prefix).to_owned(),
|
||||
bucket: conf.bucket.clone(),
|
||||
prefix: conf.prefix.strip_suffix("/").unwrap_or(&conf.prefix).to_owned(),
|
||||
storage_class: "".to_string(),
|
||||
}))
|
||||
}
|
||||
@@ -73,16 +70,30 @@ impl WarmBackendRustFS {
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl WarmBackend for WarmBackendRustFS {
|
||||
async fn put_with_meta(&self, object: &str, r: ReaderImpl, length: i64, meta: HashMap<String, String>) -> Result<String, std::io::Error> {
|
||||
async fn put_with_meta(
|
||||
&self,
|
||||
object: &str,
|
||||
r: ReaderImpl,
|
||||
length: i64,
|
||||
meta: HashMap<String, String>,
|
||||
) -> Result<String, std::io::Error> {
|
||||
let part_size = optimal_part_size(length)?;
|
||||
let client = self.0.client.clone();
|
||||
let res = client.put_object(&self.0.bucket, &self.0.get_dest(object), r, length, &PutObjectOptions {
|
||||
storage_class: self.0.storage_class.clone(),
|
||||
part_size: part_size as u64,
|
||||
disable_content_sha256: true,
|
||||
user_metadata: meta,
|
||||
..Default::default()
|
||||
}).await?;
|
||||
let res = client
|
||||
.put_object(
|
||||
&self.0.bucket,
|
||||
&self.0.get_dest(object),
|
||||
r,
|
||||
length,
|
||||
&PutObjectOptions {
|
||||
storage_class: self.0.storage_class.clone(),
|
||||
part_size: part_size as u64,
|
||||
disable_content_sha256: true,
|
||||
user_metadata: meta,
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
//self.ToObjectError(err, object)
|
||||
Ok(res.version_id)
|
||||
}
|
||||
|
||||
@@ -2,21 +2,21 @@ use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use url::Url;
|
||||
|
||||
use crate::error::ErrorResponse;
|
||||
use crate::error::error_resp_to_object_err;
|
||||
use crate::client::{
|
||||
api_get_options::GetObjectOptions,
|
||||
credentials::{Credentials, Static, Value, SignatureType},
|
||||
transition_api::{ReaderImpl, ReadCloser},
|
||||
api_put_object::PutObjectOptions,
|
||||
api_remove::RemoveObjectOptions,
|
||||
transition_api::{Options, TransitionClient, TransitionCore,},
|
||||
credentials::{Credentials, SignatureType, Static, Value},
|
||||
transition_api::{Options, TransitionClient, TransitionCore},
|
||||
transition_api::{ReadCloser, ReaderImpl},
|
||||
};
|
||||
use rustfs_utils::path::SLASH_SEPARATOR;
|
||||
use crate::error::ErrorResponse;
|
||||
use crate::error::error_resp_to_object_err;
|
||||
use crate::tier::{
|
||||
tier_config::TierS3,
|
||||
warm_backend::{WarmBackend, WarmBackendGetOpts,}
|
||||
warm_backend::{WarmBackend, WarmBackendGetOpts},
|
||||
};
|
||||
use rustfs_utils::path::SLASH_SEPARATOR;
|
||||
|
||||
pub struct WarmBackendS3 {
|
||||
pub client: Arc<TransitionClient>,
|
||||
@@ -35,16 +35,22 @@ impl WarmBackendS3 {
|
||||
}
|
||||
};
|
||||
|
||||
if conf.aws_role_web_identity_token_file == "" && conf.aws_role_arn != "" || conf.aws_role_web_identity_token_file != "" && conf.aws_role_arn == "" {
|
||||
if conf.aws_role_web_identity_token_file == "" && conf.aws_role_arn != ""
|
||||
|| conf.aws_role_web_identity_token_file != "" && conf.aws_role_arn == ""
|
||||
{
|
||||
return Err(std::io::Error::other("both the token file and the role ARN are required"));
|
||||
}
|
||||
else if conf.access_key == "" && conf.secret_key != "" || conf.access_key != "" && conf.secret_key == "" {
|
||||
} else if conf.access_key == "" && conf.secret_key != "" || conf.access_key != "" && conf.secret_key == "" {
|
||||
return Err(std::io::Error::other("both the access and secret keys are required"));
|
||||
}
|
||||
else if conf.aws_role && (conf.aws_role_web_identity_token_file != "" || conf.aws_role_arn != "" || conf.access_key != "" || conf.secret_key != "") {
|
||||
return Err(std::io::Error::other("AWS Role cannot be activated with static credentials or the web identity token file"));
|
||||
}
|
||||
else if conf.bucket == "" {
|
||||
} else if conf.aws_role
|
||||
&& (conf.aws_role_web_identity_token_file != ""
|
||||
|| conf.aws_role_arn != ""
|
||||
|| conf.access_key != ""
|
||||
|| conf.secret_key != "")
|
||||
{
|
||||
return Err(std::io::Error::other(
|
||||
"AWS Role cannot be activated with static credentials or the web identity token file",
|
||||
));
|
||||
} else if conf.bucket == "" {
|
||||
return Err(std::io::Error::other("no bucket name was provided"));
|
||||
}
|
||||
|
||||
@@ -53,19 +59,18 @@ impl WarmBackendS3 {
|
||||
if conf.access_key != "" && conf.secret_key != "" {
|
||||
//creds = Credentials::new_static_v4(conf.access_key, conf.secret_key, "");
|
||||
creds = Credentials::new(Static(Value {
|
||||
access_key_id: conf.access_key.clone(),
|
||||
access_key_id: conf.access_key.clone(),
|
||||
secret_access_key: conf.secret_key.clone(),
|
||||
session_token: "".to_string(),
|
||||
session_token: "".to_string(),
|
||||
signer_type: SignatureType::SignatureV4,
|
||||
..Default::default()
|
||||
}));
|
||||
}
|
||||
else {
|
||||
} else {
|
||||
return Err(std::io::Error::other("insufficient parameters for S3 backend authentication"));
|
||||
}
|
||||
let opts = Options {
|
||||
creds: creds,
|
||||
secure: u.scheme() == "https",
|
||||
creds: creds,
|
||||
secure: u.scheme() == "https",
|
||||
//transport: GLOBAL_RemoteTargetTransport,
|
||||
..Default::default()
|
||||
};
|
||||
@@ -77,8 +82,8 @@ impl WarmBackendS3 {
|
||||
Ok(Self {
|
||||
client,
|
||||
core,
|
||||
bucket: conf.bucket.clone(),
|
||||
prefix: conf.prefix.clone().trim_matches('/').to_string(),
|
||||
bucket: conf.bucket.clone(),
|
||||
prefix: conf.prefix.clone().trim_matches('/').to_string(),
|
||||
storage_class: conf.storage_class.clone(),
|
||||
})
|
||||
}
|
||||
@@ -103,14 +108,28 @@ impl WarmBackendS3 {
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl WarmBackend for WarmBackendS3 {
|
||||
async fn put_with_meta(&self, object: &str, r: ReaderImpl, length: i64, meta: HashMap<String, String>) -> Result<String, std::io::Error> {
|
||||
async fn put_with_meta(
|
||||
&self,
|
||||
object: &str,
|
||||
r: ReaderImpl,
|
||||
length: i64,
|
||||
meta: HashMap<String, String>,
|
||||
) -> Result<String, std::io::Error> {
|
||||
let client = self.client.clone();
|
||||
let res = client.put_object(&self.bucket, &self.get_dest(object), r, length, &PutObjectOptions {
|
||||
send_content_md5: true,
|
||||
storage_class: self.storage_class.clone(),
|
||||
user_metadata: meta,
|
||||
..Default::default()
|
||||
}).await?;
|
||||
let res = client
|
||||
.put_object(
|
||||
&self.bucket,
|
||||
&self.get_dest(object),
|
||||
r,
|
||||
length,
|
||||
&PutObjectOptions {
|
||||
send_content_md5: true,
|
||||
storage_class: self.storage_class.clone(),
|
||||
user_metadata: meta,
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
Ok(res.version_id)
|
||||
}
|
||||
|
||||
@@ -125,7 +144,7 @@ impl WarmBackend for WarmBackendS3 {
|
||||
gopts.version_id = rv.to_string();
|
||||
}
|
||||
if opts.start_offset >= 0 && opts.length > 0 {
|
||||
if let Err(err) = gopts.set_range(opts.start_offset, opts.start_offset+opts.length-1) {
|
||||
if let Err(err) = gopts.set_range(opts.start_offset, opts.start_offset + opts.length - 1) {
|
||||
return Err(std::io::Error::other(err));
|
||||
}
|
||||
}
|
||||
@@ -146,8 +165,11 @@ impl WarmBackend for WarmBackendS3 {
|
||||
}
|
||||
|
||||
async fn in_use(&self) -> Result<bool, std::io::Error> {
|
||||
let result = self.core.list_objects_v2(&self.bucket, &self.prefix, "", "", SLASH_SEPARATOR, 1).await?;
|
||||
let result = self
|
||||
.core
|
||||
.list_objects_v2(&self.bucket, &self.prefix, "", "", SLASH_SEPARATOR, 1)
|
||||
.await?;
|
||||
|
||||
Ok(result.common_prefixes.len() > 0 || result.contents.len() > 0)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
use md5::{Digest as Md5Digest, Md5};
|
||||
use sha2::{
|
||||
digest::{Reset, Update},
|
||||
Digest, Sha256 as sha_sha256,
|
||||
digest::{Reset, Update},
|
||||
};
|
||||
pub trait Hasher {
|
||||
fn write(&mut self, bytes: &[u8]);
|
||||
@@ -181,4 +181,4 @@ pub fn sum_md5_base64(data: &[u8]) -> String {
|
||||
let mut hash = MD5::new();
|
||||
hash.write(data);
|
||||
base64_simd::URL_SAFE_NO_PAD.encode_to_string(hash.sum())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -13,9 +13,9 @@ use crate::{
|
||||
};
|
||||
|
||||
// use futures::stream::Stream;
|
||||
use std::io::{Error, Result};
|
||||
use super::hasher::{Hasher, Sha256, MD5};
|
||||
use super::hasher::{Hasher, MD5, Sha256};
|
||||
use futures::Stream;
|
||||
use std::io::{Error, Result};
|
||||
|
||||
pin_project_lite::pin_project! {
|
||||
#[derive(Default)]
|
||||
@@ -101,20 +101,8 @@ pin_project_lite::pin_project! {
|
||||
|
||||
impl<S> HashReader<S> {
|
||||
pub fn new(inner: S, size: usize, md5_hex: Option<String>, sha256_hex: Option<String>, actual_size: usize) -> Self {
|
||||
let md5 = {
|
||||
if md5_hex.is_some() {
|
||||
Some(MD5::new())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
};
|
||||
let sha256 = {
|
||||
if sha256_hex.is_some() {
|
||||
Some(Sha256::new())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
};
|
||||
let md5 = { if md5_hex.is_some() { Some(MD5::new()) } else { None } };
|
||||
let sha256 = { if sha256_hex.is_some() { Some(Sha256::new()) } else { None } };
|
||||
Self {
|
||||
inner,
|
||||
size,
|
||||
|
||||
@@ -63,9 +63,9 @@ pub mod pools;
|
||||
pub mod rebalance;
|
||||
pub mod service_account;
|
||||
pub mod sts;
|
||||
pub mod tier;
|
||||
pub mod trace;
|
||||
pub mod user;
|
||||
pub mod tier;
|
||||
use urlencoding::decode;
|
||||
|
||||
#[derive(Debug, Serialize, Default)]
|
||||
|
||||
@@ -3,28 +3,28 @@ use std::str::from_utf8;
|
||||
use http::{HeaderMap, StatusCode};
|
||||
use iam::get_global_action_cred;
|
||||
use matchit::Params;
|
||||
use s3s::{header::CONTENT_TYPE, s3_error, Body, S3Error, S3ErrorCode, S3Request, S3Response, S3Result};
|
||||
use s3s::{Body, S3Error, S3ErrorCode, S3Request, S3Response, S3Result, header::CONTENT_TYPE, s3_error};
|
||||
use serde::Deserialize;
|
||||
use serde_urlencoded::from_bytes;
|
||||
use tracing::{warn, debug, info};
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
use crypto::{encrypt_data, decrypt_data};
|
||||
use crate::{
|
||||
admin::{router::Operation, utils::has_space_be},
|
||||
auth::{check_key_valid, get_session_token},
|
||||
};
|
||||
use crypto::{decrypt_data, encrypt_data};
|
||||
use ecstore::{
|
||||
client::admin_handler_utils::AdminError,
|
||||
config::storageclass,
|
||||
global::GLOBAL_TierConfigMgr,
|
||||
tier::{
|
||||
tier::{ERR_TIER_BACKEND_IN_USE, ERR_TIER_BACKEND_NOT_EMPTY, ERR_TIER_MISSING_CREDENTIALS},
|
||||
tier_admin::TierCreds,
|
||||
tier_config::{TierConfig, TierType},
|
||||
tier_handlers::{
|
||||
ERR_TIER_NAME_NOT_UPPERCASE, ERR_TIER_ALREADY_EXISTS, ERR_TIER_NOT_FOUND, ERR_TIER_CONNECT_ERR,
|
||||
ERR_TIER_INVALID_CREDENTIALS,
|
||||
ERR_TIER_ALREADY_EXISTS, ERR_TIER_CONNECT_ERR, ERR_TIER_INVALID_CREDENTIALS, ERR_TIER_NAME_NOT_UPPERCASE,
|
||||
ERR_TIER_NOT_FOUND,
|
||||
},
|
||||
tier::{ERR_TIER_MISSING_CREDENTIALS, ERR_TIER_BACKEND_NOT_EMPTY, ERR_TIER_BACKEND_IN_USE},
|
||||
},
|
||||
};
|
||||
|
||||
@@ -92,29 +92,44 @@ impl Operation for AddTier {
|
||||
}
|
||||
match args.name.as_str() {
|
||||
storageclass::STANDARD | storageclass::RRS => {
|
||||
warn!("tier reserved name, args.name: {}", args.name);
|
||||
return Err(s3_error!(InvalidRequest, "Cannot use reserved tier name"));
|
||||
warn!("tier reserved name, args.name: {}", args.name);
|
||||
return Err(s3_error!(InvalidRequest, "Cannot use reserved tier name"));
|
||||
}
|
||||
&_ => ()
|
||||
&_ => (),
|
||||
}
|
||||
|
||||
let mut tier_config_mgr = GLOBAL_TierConfigMgr.write().await;
|
||||
//tier_config_mgr.reload(api);
|
||||
match tier_config_mgr.add(args, force).await {
|
||||
Err(ERR_TIER_ALREADY_EXISTS) => {
|
||||
return Err(S3Error::with_message(S3ErrorCode::Custom("TierNameAlreadyExist".into()), "tier name already exists!"));
|
||||
return Err(S3Error::with_message(
|
||||
S3ErrorCode::Custom("TierNameAlreadyExist".into()),
|
||||
"tier name already exists!",
|
||||
));
|
||||
}
|
||||
Err(ERR_TIER_NAME_NOT_UPPERCASE) => {
|
||||
return Err(S3Error::with_message(S3ErrorCode::Custom("TierNameNotUppercase".into()), "tier name not uppercase!"));
|
||||
return Err(S3Error::with_message(
|
||||
S3ErrorCode::Custom("TierNameNotUppercase".into()),
|
||||
"tier name not uppercase!",
|
||||
));
|
||||
}
|
||||
Err(ERR_TIER_BACKEND_IN_USE) => {
|
||||
return Err(S3Error::with_message(S3ErrorCode::Custom("TierNameBackendInUse!".into()), "tier name backend in use!"));
|
||||
return Err(S3Error::with_message(
|
||||
S3ErrorCode::Custom("TierNameBackendInUse!".into()),
|
||||
"tier name backend in use!",
|
||||
));
|
||||
}
|
||||
Err(ERR_TIER_CONNECT_ERR) => {
|
||||
return Err(S3Error::with_message(S3ErrorCode::Custom("TierConnectError".into()), "tier connect error!"));
|
||||
return Err(S3Error::with_message(
|
||||
S3ErrorCode::Custom("TierConnectError".into()),
|
||||
"tier connect error!",
|
||||
));
|
||||
}
|
||||
Err(ERR_TIER_INVALID_CREDENTIALS) => {
|
||||
return Err(S3Error::with_message(S3ErrorCode::Custom(ERR_TIER_INVALID_CREDENTIALS.code.into()), ERR_TIER_INVALID_CREDENTIALS.message));
|
||||
return Err(S3Error::with_message(
|
||||
S3ErrorCode::Custom(ERR_TIER_INVALID_CREDENTIALS.code.into()),
|
||||
ERR_TIER_INVALID_CREDENTIALS.message,
|
||||
));
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("tier_config_mgr add failed, e: {:?}", e);
|
||||
@@ -182,7 +197,10 @@ impl Operation for EditTier {
|
||||
return Err(S3Error::with_message(S3ErrorCode::Custom("TierNotFound".into()), "tier not found!"));
|
||||
}
|
||||
Err(ERR_TIER_MISSING_CREDENTIALS) => {
|
||||
return Err(S3Error::with_message(S3ErrorCode::Custom("TierMissingCredentials".into()), "tier missing credentials!"));
|
||||
return Err(S3Error::with_message(
|
||||
S3ErrorCode::Custom("TierMissingCredentials".into()),
|
||||
"tier missing credentials!",
|
||||
));
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("tier_config_mgr edit failed, e: {:?}", e);
|
||||
@@ -281,7 +299,10 @@ impl Operation for RemoveTier {
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("tier_config_mgr remove failed, e: {:?}", e);
|
||||
return Err(S3Error::with_message(S3ErrorCode::Custom("TierRemoveFailed".into()), "tier remove failed"));
|
||||
return Err(S3Error::with_message(
|
||||
S3ErrorCode::Custom("TierRemoveFailed".into()),
|
||||
"tier remove failed",
|
||||
));
|
||||
}
|
||||
Ok(_) => (),
|
||||
}
|
||||
|
||||
@@ -7,7 +7,7 @@ pub mod utils;
|
||||
use handlers::{
|
||||
group, policys, pools, rebalance,
|
||||
service_account::{AddServiceAccount, DeleteServiceAccount, InfoServiceAccount, ListServiceAccount, UpdateServiceAccount},
|
||||
sts, user, tier,
|
||||
sts, tier, user,
|
||||
};
|
||||
|
||||
use handlers::{GetReplicationMetricsHandler, ListRemoteTargetHandler, RemoveRemoteTargetHandler, SetRemoteTargetHandler};
|
||||
|
||||
@@ -49,11 +49,11 @@ use ecstore::store_api::ObjectToDelete;
|
||||
use ecstore::store_api::PutObjReader;
|
||||
use ecstore::store_api::StorageAPI;
|
||||
// use ecstore::store_api::RESERVED_METADATA_PREFIX;
|
||||
use ecstore::bucket::lifecycle::bucket_lifecycle_ops::validate_transition_tier;
|
||||
use ecstore::bucket::utils::serialize;
|
||||
use ecstore::cmd::bucket_replication::ReplicationStatusType;
|
||||
use ecstore::cmd::bucket_replication::ReplicationType;
|
||||
use ecstore::store_api::RESERVED_METADATA_PREFIX_LOWER;
|
||||
use ecstore::bucket::lifecycle::bucket_lifecycle_ops::validate_transition_tier;
|
||||
use futures::pin_mut;
|
||||
use futures::{Stream, StreamExt};
|
||||
use http::HeaderMap;
|
||||
@@ -96,10 +96,7 @@ use transform_stream::AsyncTryStream;
|
||||
use uuid::Uuid;
|
||||
|
||||
use ecstore::bucket::{
|
||||
lifecycle::{
|
||||
lifecycle::Lifecycle,
|
||||
bucket_lifecycle_ops::ERR_INVALID_STORAGECLASS
|
||||
},
|
||||
lifecycle::{bucket_lifecycle_ops::ERR_INVALID_STORAGECLASS, lifecycle::Lifecycle},
|
||||
object_lock::objectlock_sys::BucketObjectLockSys,
|
||||
};
|
||||
|
||||
@@ -1602,7 +1599,10 @@ impl S3 for FS {
|
||||
|
||||
let rcfg = metadata_sys::get_object_lock_config(&bucket).await;
|
||||
if rcfg.is_err() {
|
||||
return Err(S3Error::with_message(S3ErrorCode::Custom("BucketLockIsNotExist".into()), "bucket lock is not exist."));
|
||||
return Err(S3Error::with_message(
|
||||
S3ErrorCode::Custom("BucketLockIsNotExist".into()),
|
||||
"bucket lock is not exist.",
|
||||
));
|
||||
}
|
||||
let rcfg = rcfg.expect("get_lifecycle_config err!").0;
|
||||
|
||||
@@ -1612,12 +1612,18 @@ impl S3 for FS {
|
||||
|
||||
if let Err(err) = input_cfg.validate(&rcfg).await {
|
||||
//return Err(S3Error::with_message(S3ErrorCode::Custom("BucketLockValidateFailed".into()), "bucket lock validate failed."));
|
||||
return Err(S3Error::with_message(S3ErrorCode::Custom("ValidateFailed".into()), format!("{}", err.to_string())));
|
||||
return Err(S3Error::with_message(
|
||||
S3ErrorCode::Custom("ValidateFailed".into()),
|
||||
format!("{}", err.to_string()),
|
||||
));
|
||||
}
|
||||
|
||||
|
||||
if let Err(err) = validate_transition_tier(&input_cfg).await {
|
||||
//warn!("lifecycle_configuration add failed, err: {:?}", err);
|
||||
return Err(S3Error::with_message(S3ErrorCode::Custom("CustomError".into()), format!("{}", err.to_string())));
|
||||
return Err(S3Error::with_message(
|
||||
S3ErrorCode::Custom("CustomError".into()),
|
||||
format!("{}", err.to_string()),
|
||||
));
|
||||
}
|
||||
|
||||
let data = try_!(serialize(&input_cfg));
|
||||
@@ -2070,7 +2076,10 @@ impl S3 for FS {
|
||||
return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string()));
|
||||
};
|
||||
|
||||
if let Err(e) = store.get_object_reader(&bucket, &key, None, HeaderMap::new(), &ObjectOptions::default()).await {
|
||||
if let Err(e) = store
|
||||
.get_object_reader(&bucket, &key, None, HeaderMap::new(), &ObjectOptions::default())
|
||||
.await
|
||||
{
|
||||
return Err(S3Error::with_message(S3ErrorCode::InternalError, format!("{}", e)));
|
||||
}
|
||||
|
||||
|
||||
Reference in New Issue
Block a user