chore: upgrade dependencies and migrate to aws-lc-rs (#1333)

This commit is contained in:
houseme
2026-01-02 00:02:34 +08:00
committed by GitHub
parent 61b3100260
commit 8d7cd4cb1b
96 changed files with 2555 additions and 2775 deletions

View File

@@ -498,19 +498,19 @@ impl BucketTargetSys {
bucket: bucket.to_string(),
})?;
if arn.arn_type == BucketTargetType::ReplicationService {
if let Ok((config, _)) = get_replication_config(bucket).await {
for rule in config.filter_target_arns(&ObjectOpts {
op_type: ReplicationType::All,
..Default::default()
}) {
if rule == arn_str || config.role == arn_str {
let arn_remotes_map = self.arn_remotes_map.read().await;
if arn_remotes_map.get(arn_str).is_some() {
return Err(BucketTargetError::BucketRemoteRemoveDisallowed {
bucket: bucket.to_string(),
});
}
if arn.arn_type == BucketTargetType::ReplicationService
&& let Ok((config, _)) = get_replication_config(bucket).await
{
for rule in config.filter_target_arns(&ObjectOpts {
op_type: ReplicationType::All,
..Default::default()
}) {
if rule == arn_str || config.role == arn_str {
let arn_remotes_map = self.arn_remotes_map.read().await;
if arn_remotes_map.get(arn_str).is_some() {
return Err(BucketTargetError::BucketRemoteRemoveDisallowed {
bucket: bucket.to_string(),
});
}
}
}
@@ -691,22 +691,22 @@ impl BucketTargetSys {
}
// Add new targets
if let Some(new_targets) = targets {
if !new_targets.is_empty() {
for target in &new_targets.targets {
if let Ok(client) = self.get_remote_target_client_internal(target).await {
arn_remotes_map.insert(
target.arn.clone(),
ArnTarget {
client: Some(Arc::new(client)),
last_refresh: OffsetDateTime::now_utc(),
},
);
self.update_bandwidth_limit(bucket, &target.arn, target.bandwidth_limit);
}
if let Some(new_targets) = targets
&& !new_targets.is_empty()
{
for target in &new_targets.targets {
if let Ok(client) = self.get_remote_target_client_internal(target).await {
arn_remotes_map.insert(
target.arn.clone(),
ArnTarget {
client: Some(Arc::new(client)),
last_refresh: OffsetDateTime::now_utc(),
},
);
self.update_bandwidth_limit(bucket, &target.arn, target.bandwidth_limit);
}
targets_map.insert(bucket.to_string(), new_targets.targets.clone());
}
targets_map.insert(bucket.to_string(), new_targets.targets.clone());
}
}

View File

@@ -31,10 +31,10 @@ impl BucketObjectLockSys {
}
pub async fn get(bucket: &str) -> Option<DefaultRetention> {
if let Ok(object_lock_config) = get_object_lock_config(bucket).await {
if let Some(object_lock_rule) = object_lock_config.0.rule {
return object_lock_rule.default_retention;
}
if let Ok(object_lock_config) = get_object_lock_config(bucket).await
&& let Some(object_lock_rule) = object_lock_config.0.rule
{
return object_lock_rule.default_retention;
}
None
}

View File

@@ -55,10 +55,10 @@ impl ReplicationConfigurationExt for ReplicationConfiguration {
if !has_arn {
has_arn = true;
}
if let Some(status) = &rule.existing_object_replication {
if status.status == ExistingObjectReplicationStatus::from_static(ExistingObjectReplicationStatus::ENABLED) {
return (true, true);
}
if let Some(status) = &rule.existing_object_replication
&& status.status == ExistingObjectReplicationStatus::from_static(ExistingObjectReplicationStatus::ENABLED)
{
return (true, true);
}
}
}
@@ -86,12 +86,11 @@ impl ReplicationConfigurationExt for ReplicationConfiguration {
continue;
}
if let Some(status) = &rule.existing_object_replication {
if obj.existing_object
&& status.status == ExistingObjectReplicationStatus::from_static(ExistingObjectReplicationStatus::DISABLED)
{
continue;
}
if let Some(status) = &rule.existing_object_replication
&& obj.existing_object
&& status.status == ExistingObjectReplicationStatus::from_static(ExistingObjectReplicationStatus::DISABLED)
{
continue;
}
if !obj.name.starts_with(rule.prefix()) {
@@ -145,12 +144,11 @@ impl ReplicationConfigurationExt for ReplicationConfiguration {
continue;
}
if let Some(status) = &rule.existing_object_replication {
if obj.existing_object
&& status.status == ExistingObjectReplicationStatus::from_static(ExistingObjectReplicationStatus::DISABLED)
{
return false;
}
if let Some(status) = &rule.existing_object_replication
&& obj.existing_object
&& status.status == ExistingObjectReplicationStatus::from_static(ExistingObjectReplicationStatus::DISABLED)
{
return false;
}
if obj.op_type == ReplicationType::Delete {
@@ -186,20 +184,20 @@ impl ReplicationConfigurationExt for ReplicationConfiguration {
continue;
}
if let Some(filter) = &rule.filter {
if let Some(filter_prefix) = &filter.prefix {
if !prefix.is_empty() && !filter_prefix.is_empty() {
// The provided prefix must fall within the rule prefix
if !recursive && !prefix.starts_with(filter_prefix) {
continue;
}
}
// When recursive, skip this rule if it does not match the test prefix or hierarchy
if recursive && !rule.prefix().starts_with(prefix) && !prefix.starts_with(rule.prefix()) {
if let Some(filter) = &rule.filter
&& let Some(filter_prefix) = &filter.prefix
{
if !prefix.is_empty() && !filter_prefix.is_empty() {
// The provided prefix must fall within the rule prefix
if !recursive && !prefix.starts_with(filter_prefix) {
continue;
}
}
// When recursive, skip this rule if it does not match the test prefix or hierarchy
if recursive && !rule.prefix().starts_with(prefix) && !prefix.starts_with(rule.prefix()) {
continue;
}
}
return true;
}

View File

@@ -512,20 +512,20 @@ impl<S: StorageAPI> ReplicationPool<S> {
if !lrg_workers.is_empty() {
let index = (hash as usize) % lrg_workers.len();
if let Some(worker) = lrg_workers.get(index) {
if worker.try_send(ReplicationOperation::Object(Box::new(ri.clone()))).is_err() {
// Queue to MRF if worker is busy
let _ = self.mrf_save_tx.try_send(ri.to_mrf_entry());
if let Some(worker) = lrg_workers.get(index)
&& worker.try_send(ReplicationOperation::Object(Box::new(ri.clone()))).is_err()
{
// Queue to MRF if worker is busy
let _ = self.mrf_save_tx.try_send(ri.to_mrf_entry());
// Try to add more workers if possible
let max_l_workers = *self.max_l_workers.read().await;
let existing = lrg_workers.len();
if self.active_lrg_workers() < std::cmp::min(max_l_workers, LARGE_WORKER_COUNT) as i32 {
let workers = std::cmp::min(existing + 1, max_l_workers);
// Try to add more workers if possible
let max_l_workers = *self.max_l_workers.read().await;
let existing = lrg_workers.len();
if self.active_lrg_workers() < std::cmp::min(max_l_workers, LARGE_WORKER_COUNT) as i32 {
let workers = std::cmp::min(existing + 1, max_l_workers);
drop(lrg_workers);
self.resize_lrg_workers(workers, existing).await;
}
drop(lrg_workers);
self.resize_lrg_workers(workers, existing).await;
}
}
}
@@ -539,47 +539,45 @@ impl<S: StorageAPI> ReplicationPool<S> {
_ => self.get_worker_ch(&ri.bucket, &ri.name, ri.size).await,
};
if let Some(channel) = ch {
if channel.try_send(ReplicationOperation::Object(Box::new(ri.clone()))).is_err() {
// Queue to MRF if all workers are busy
let _ = self.mrf_save_tx.try_send(ri.to_mrf_entry());
if let Some(channel) = ch
&& channel.try_send(ReplicationOperation::Object(Box::new(ri.clone()))).is_err()
{
// Queue to MRF if all workers are busy
let _ = self.mrf_save_tx.try_send(ri.to_mrf_entry());
// Try to scale up workers based on priority
let priority = self.priority.read().await.clone();
let max_workers = *self.max_workers.read().await;
// Try to scale up workers based on priority
let priority = self.priority.read().await.clone();
let max_workers = *self.max_workers.read().await;
match priority {
ReplicationPriority::Fast => {
// Log warning about unable to keep up
info!("Warning: Unable to keep up with incoming traffic");
match priority {
ReplicationPriority::Fast => {
// Log warning about unable to keep up
info!("Warning: Unable to keep up with incoming traffic");
}
ReplicationPriority::Slow => {
info!("Warning: Unable to keep up with incoming traffic - recommend increasing replication priority to auto");
}
ReplicationPriority::Auto => {
let max_w = std::cmp::min(max_workers, WORKER_MAX_LIMIT);
let active_workers = self.active_workers();
if active_workers < max_w as i32 {
let workers = self.workers.read().await;
let new_count = std::cmp::min(workers.len() + 1, max_w);
let existing = workers.len();
drop(workers);
self.resize_workers(new_count, existing).await;
}
ReplicationPriority::Slow => {
info!(
"Warning: Unable to keep up with incoming traffic - recommend increasing replication priority to auto"
);
}
ReplicationPriority::Auto => {
let max_w = std::cmp::min(max_workers, WORKER_MAX_LIMIT);
let active_workers = self.active_workers();
if active_workers < max_w as i32 {
let workers = self.workers.read().await;
let new_count = std::cmp::min(workers.len() + 1, max_w);
let existing = workers.len();
let max_mrf_workers = std::cmp::min(max_workers, MRF_WORKER_MAX_LIMIT);
let active_mrf = self.active_mrf_workers();
drop(workers);
self.resize_workers(new_count, existing).await;
}
if active_mrf < max_mrf_workers as i32 {
let current_mrf = self.mrf_worker_size.load(Ordering::SeqCst);
let new_mrf = std::cmp::min(current_mrf + 1, max_mrf_workers as i32);
let max_mrf_workers = std::cmp::min(max_workers, MRF_WORKER_MAX_LIMIT);
let active_mrf = self.active_mrf_workers();
if active_mrf < max_mrf_workers as i32 {
let current_mrf = self.mrf_worker_size.load(Ordering::SeqCst);
let new_mrf = std::cmp::min(current_mrf + 1, max_mrf_workers as i32);
self.resize_failed_workers(new_mrf).await;
}
self.resize_failed_workers(new_mrf).await;
}
}
}
@@ -593,31 +591,29 @@ impl<S: StorageAPI> ReplicationPool<S> {
_ => self.get_worker_ch(&doi.bucket, &doi.delete_object.object_name, 0).await,
};
if let Some(channel) = ch {
if channel.try_send(ReplicationOperation::Delete(Box::new(doi.clone()))).is_err() {
let _ = self.mrf_save_tx.try_send(doi.to_mrf_entry());
if let Some(channel) = ch
&& channel.try_send(ReplicationOperation::Delete(Box::new(doi.clone()))).is_err()
{
let _ = self.mrf_save_tx.try_send(doi.to_mrf_entry());
let priority = self.priority.read().await.clone();
let max_workers = *self.max_workers.read().await;
let priority = self.priority.read().await.clone();
let max_workers = *self.max_workers.read().await;
match priority {
ReplicationPriority::Fast => {
info!("Warning: Unable to keep up with incoming deletes");
}
ReplicationPriority::Slow => {
info!(
"Warning: Unable to keep up with incoming deletes - recommend increasing replication priority to auto"
);
}
ReplicationPriority::Auto => {
let max_w = std::cmp::min(max_workers, WORKER_MAX_LIMIT);
if self.active_workers() < max_w as i32 {
let workers = self.workers.read().await;
let new_count = std::cmp::min(workers.len() + 1, max_w);
let existing = workers.len();
drop(workers);
self.resize_workers(new_count, existing).await;
}
match priority {
ReplicationPriority::Fast => {
info!("Warning: Unable to keep up with incoming deletes");
}
ReplicationPriority::Slow => {
info!("Warning: Unable to keep up with incoming deletes - recommend increasing replication priority to auto");
}
ReplicationPriority::Auto => {
let max_w = std::cmp::min(max_workers, WORKER_MAX_LIMIT);
if self.active_workers() < max_w as i32 {
let workers = self.workers.read().await;
let new_count = std::cmp::min(workers.len() + 1, max_w);
let existing = workers.len();
drop(workers);
self.resize_workers(new_count, existing).await;
}
}
}

View File

@@ -242,11 +242,10 @@ impl ReplicationResyncer {
if let Some(last_update) = status.last_update {
if last_update > *last_update_times.get(bucket).unwrap_or(&OffsetDateTime::UNIX_EPOCH) {
if let Some(last_update) = status.last_update
&& last_update > *last_update_times.get(bucket).unwrap_or(&OffsetDateTime::UNIX_EPOCH) {
update = true;
}
}
if update {
if let Err(err) = save_resync_status(bucket, status, api.clone()).await {
@@ -345,13 +344,12 @@ impl ReplicationResyncer {
return;
};
if !heal {
if let Err(e) = self
if !heal
&& let Err(e) = self
.mark_status(ResyncStatusType::ResyncStarted, opts.clone(), storage.clone())
.await
{
error!("Failed to mark resync status: {}", e);
}
{
error!("Failed to mark resync status: {}", e);
}
let (tx, mut rx) = tokio::sync::mpsc::channel(100);
@@ -1463,21 +1461,18 @@ async fn replicate_delete_to_target(dobj: &DeletedObjectReplicationInfo, tgt_cli
Some(version_id.to_string())
};
if dobj.delete_object.delete_marker_version_id.is_some() {
if let Err(e) = tgt_client
if dobj.delete_object.delete_marker_version_id.is_some()
&& let Err(e) = tgt_client
.head_object(&tgt_client.bucket, &dobj.delete_object.object_name, version_id.clone())
.await
{
if let SdkError::ServiceError(service_err) = &e {
if !service_err.err().is_not_found() {
rinfo.replication_status = ReplicationStatusType::Failed;
rinfo.error = Some(e.to_string());
&& let SdkError::ServiceError(service_err) = &e
&& !service_err.err().is_not_found()
{
rinfo.replication_status = ReplicationStatusType::Failed;
rinfo.error = Some(e.to_string());
return rinfo;
}
}
};
}
return rinfo;
};
match tgt_client
.remove_object(

View File

@@ -49,13 +49,13 @@ impl ExponentialMovingAverage {
pub fn update_exponential_moving_average(&self, now: SystemTime) {
if let Ok(mut last_update_guard) = self.last_update.try_lock() {
let last_update = *last_update_guard;
if let Ok(duration) = now.duration_since(last_update) {
if duration.as_secs() > 0 {
let decay = (-duration.as_secs_f64() / 60.0).exp(); // 1 minute decay
let current_value = f64::from_bits(self.value.load(AtomicOrdering::Relaxed));
self.value.store((current_value * decay).to_bits(), AtomicOrdering::Relaxed);
*last_update_guard = now;
}
if let Ok(duration) = now.duration_since(last_update)
&& duration.as_secs() > 0
{
let decay = (-duration.as_secs_f64() / 60.0).exp(); // 1 minute decay
let current_value = f64::from_bits(self.value.load(AtomicOrdering::Relaxed));
self.value.store((current_value * decay).to_bits(), AtomicOrdering::Relaxed);
*last_update_guard = now;
}
}
}
@@ -757,10 +757,10 @@ impl ReplicationStats {
/// Check if bucket replication statistics have usage
pub fn has_replication_usage(&self, bucket: &str) -> bool {
if let Ok(cache) = self.cache.try_read() {
if let Some(stats) = cache.get(bucket) {
return stats.has_replication_usage();
}
if let Ok(cache) = self.cache.try_read()
&& let Some(stats) = cache.get(bucket)
{
return stats.has_replication_usage();
}
false
}

View File

@@ -37,10 +37,11 @@ impl VersioningApi for VersioningConfiguration {
return true;
}
if let Some(exclude_folders) = self.exclude_folders {
if exclude_folders && prefix.ends_with('/') {
return false;
}
if let Some(exclude_folders) = self.exclude_folders
&& exclude_folders
&& prefix.ends_with('/')
{
return false;
}
if let Some(ref excluded_prefixes) = self.excluded_prefixes {
@@ -67,10 +68,11 @@ impl VersioningApi for VersioningConfiguration {
return false;
}
if let Some(exclude_folders) = self.exclude_folders {
if exclude_folders && prefix.ends_with('/') {
return true;
}
if let Some(exclude_folders) = self.exclude_folders
&& exclude_folders
&& prefix.ends_with('/')
{
return true;
}
if let Some(ref excluded_prefixes) = self.excluded_prefixes {

View File

@@ -308,12 +308,11 @@ pub async fn list_path_raw(rx: CancellationToken, opts: ListPathRawOptions) -> d
// Break if all at EOF or error.
if at_eof + has_err == readers.len() {
if has_err > 0 {
if let Some(finished_fn) = opts.finished.as_ref() {
if has_err > 0 {
finished_fn(&errs).await;
}
}
if has_err > 0
&& let Some(finished_fn) = opts.finished.as_ref()
&& has_err > 0
{
finished_fn(&errs).await;
}
// error!("list_path_raw: at_eof + has_err == readers.len() break {:?}", &errs);

View File

@@ -161,7 +161,7 @@ impl TransitionClient {
async fn private_new(endpoint: &str, opts: Options, tier_type: &str) -> Result<TransitionClient, std::io::Error> {
let endpoint_url = get_endpoint_url(endpoint, opts.secure)?;
let _ = rustls::crypto::ring::default_provider().install_default();
let _ = rustls::crypto::aws_lc_rs::default_provider().install_default();
let scheme = endpoint_url.scheme();
let client;
let tls = if let Some(store) = load_root_store_from_tls_path() {

View File

@@ -211,10 +211,11 @@ async fn apply_dynamic_config_for_sub_sys<S: StorageAPI>(cfg: &mut Config, api:
for (i, count) in set_drive_counts.iter().enumerate() {
match storageclass::lookup_config(&kvs, *count) {
Ok(res) => {
if i == 0 && GLOBAL_STORAGE_CLASS.get().is_none() {
if let Err(r) = GLOBAL_STORAGE_CLASS.set(res) {
error!("GLOBAL_STORAGE_CLASS.set failed {:?}", r);
}
if i == 0
&& GLOBAL_STORAGE_CLASS.get().is_none()
&& let Err(r) = GLOBAL_STORAGE_CLASS.set(res)
{
error!("GLOBAL_STORAGE_CLASS.set failed {:?}", r);
}
}
Err(err) => {

View File

@@ -180,10 +180,10 @@ impl Config {
let mut default = HashMap::new();
default.insert(DEFAULT_DELIMITER.to_owned(), v.clone());
self.0.insert(k.clone(), default);
} else if !self.0[k].contains_key(DEFAULT_DELIMITER) {
if let Some(m) = self.0.get_mut(k) {
m.insert(DEFAULT_DELIMITER.to_owned(), v.clone());
}
} else if !self.0[k].contains_key(DEFAULT_DELIMITER)
&& let Some(m) = self.0.get_mut(k)
{
m.insert(DEFAULT_DELIMITER.to_owned(), v.clone());
}
}
}

View File

@@ -65,18 +65,16 @@ lazy_static::lazy_static! {
/// Store data usage info to backend storage
pub async fn store_data_usage_in_backend(data_usage_info: DataUsageInfo, store: Arc<ECStore>) -> Result<(), Error> {
// Prevent older data from overwriting newer persisted stats
if let Ok(buf) = read_config(store.clone(), &DATA_USAGE_OBJ_NAME_PATH).await {
if let Ok(existing) = serde_json::from_slice::<DataUsageInfo>(&buf) {
if let (Some(new_ts), Some(existing_ts)) = (data_usage_info.last_update, existing.last_update) {
if new_ts <= existing_ts {
info!(
"Skip persisting data usage: incoming last_update {:?} <= existing {:?}",
new_ts, existing_ts
);
return Ok(());
}
}
}
if let Ok(buf) = read_config(store.clone(), &DATA_USAGE_OBJ_NAME_PATH).await
&& let Ok(existing) = serde_json::from_slice::<DataUsageInfo>(&buf)
&& let (Some(new_ts), Some(existing_ts)) = (data_usage_info.last_update, existing.last_update)
&& new_ts <= existing_ts
{
info!(
"Skip persisting data usage: incoming last_update {:?} <= existing {:?}",
new_ts, existing_ts
);
return Ok(());
}
let data =
@@ -149,26 +147,24 @@ pub async fn load_data_usage_from_backend(store: Arc<ECStore>) -> Result<DataUsa
// Handle replication info
for (bucket, bui) in &data_usage_info.buckets_usage {
if bui.replicated_size_v1 > 0
if (bui.replicated_size_v1 > 0
|| bui.replication_failed_count_v1 > 0
|| bui.replication_failed_size_v1 > 0
|| bui.replication_pending_count_v1 > 0
|| bui.replication_pending_count_v1 > 0)
&& let Ok((cfg, _)) = get_replication_config(bucket).await
&& !cfg.role.is_empty()
{
if let Ok((cfg, _)) = get_replication_config(bucket).await {
if !cfg.role.is_empty() {
data_usage_info.replication_info.insert(
cfg.role.clone(),
BucketTargetUsageInfo {
replication_failed_size: bui.replication_failed_size_v1,
replication_failed_count: bui.replication_failed_count_v1,
replicated_size: bui.replicated_size_v1,
replication_pending_count: bui.replication_pending_count_v1,
replication_pending_size: bui.replication_pending_size_v1,
..Default::default()
},
);
}
}
data_usage_info.replication_info.insert(
cfg.role.clone(),
BucketTargetUsageInfo {
replication_failed_size: bui.replication_failed_size_v1,
replication_failed_count: bui.replication_failed_count_v1,
replicated_size: bui.replicated_size_v1,
replication_pending_count: bui.replication_pending_count_v1,
replication_pending_size: bui.replication_pending_size_v1,
..Default::default()
},
);
}
}
@@ -177,10 +173,10 @@ pub async fn load_data_usage_from_backend(store: Arc<ECStore>) -> Result<DataUsa
/// Aggregate usage information from local disk snapshots.
fn merge_snapshot(aggregated: &mut DataUsageInfo, mut snapshot: LocalUsageSnapshot, latest_update: &mut Option<SystemTime>) {
if let Some(update) = snapshot.last_update {
if latest_update.is_none_or(|current| update > current) {
*latest_update = Some(update);
}
if let Some(update) = snapshot.last_update
&& latest_update.is_none_or(|current| update > current)
{
*latest_update = Some(update);
}
snapshot.recompute_totals();
@@ -255,10 +251,10 @@ pub async fn aggregate_local_snapshots(store: Arc<ECStore>) -> Result<(Vec<DiskU
);
// Best-effort cleanup so next scan can rebuild a fresh snapshot instead of repeatedly failing
let snapshot_file = snapshot_path(root.as_path(), &disk_id);
if let Err(remove_err) = fs::remove_file(&snapshot_file).await {
if remove_err.kind() != std::io::ErrorKind::NotFound {
warn!("Failed to remove corrupted snapshot {:?}: {}", snapshot_file, remove_err);
}
if let Err(remove_err) = fs::remove_file(&snapshot_file).await
&& remove_err.kind() != std::io::ErrorKind::NotFound
{
warn!("Failed to remove corrupted snapshot {:?}: {}", snapshot_file, remove_err);
}
}

View File

@@ -288,15 +288,15 @@ impl LocalDisk {
let path = path_join(&[trash.clone(), name.into()]);
if file_type.is_dir() {
if let Err(e) = tokio::fs::remove_dir_all(path).await {
if e.kind() != ErrorKind::NotFound {
return Err(e.into());
}
}
} else if let Err(e) = tokio::fs::remove_file(path).await {
if e.kind() != ErrorKind::NotFound {
if let Err(e) = tokio::fs::remove_dir_all(path).await
&& e.kind() != ErrorKind::NotFound
{
return Err(e.into());
}
} else if let Err(e) = tokio::fs::remove_file(path).await
&& e.kind() != ErrorKind::NotFound
{
return Err(e.into());
}
}
@@ -684,13 +684,11 @@ impl LocalDisk {
Err(err) => {
if err == Error::FileNotFound
&& !skip_access_checks(volume_dir.as_ref().to_string_lossy().to_string().as_str())
&& let Err(e) = access(volume_dir.as_ref()).await
&& e.kind() == ErrorKind::NotFound
{
if let Err(e) = access(volume_dir.as_ref()).await {
if e.kind() == ErrorKind::NotFound {
// warn!("read_metadata_with_dmtime os err {:?}", &aerr);
return Err(DiskError::VolumeNotFound);
}
}
// warn!("read_metadata_with_dmtime os err {:?}", &aerr);
return Err(DiskError::VolumeNotFound);
}
Err(err)
@@ -763,13 +761,13 @@ impl LocalDisk {
let mut f = match super::fs::open_file(file_path.as_ref(), O_RDONLY).await {
Ok(f) => f,
Err(e) => {
if e.kind() == ErrorKind::NotFound && !skip_access_checks(volume) {
if let Err(er) = access(volume_dir.as_ref()).await {
if er.kind() == ErrorKind::NotFound {
warn!("read_all_data_with_dmtime os err {:?}", &er);
return Err(DiskError::VolumeNotFound);
}
}
if e.kind() == ErrorKind::NotFound
&& !skip_access_checks(volume)
&& let Err(er) = access(volume_dir.as_ref()).await
&& er.kind() == ErrorKind::NotFound
{
warn!("read_all_data_with_dmtime os err {:?}", &er);
return Err(DiskError::VolumeNotFound);
}
return Err(to_file_error(e).into());
@@ -828,10 +826,10 @@ impl LocalDisk {
let _ = fm.data.remove(vec![vid, dir]);
let dir_path = self.get_object_path(volume, format!("{path}/{dir}").as_str())?;
if let Err(err) = self.move_to_trash(&dir_path, true, false).await {
if !(err == DiskError::FileNotFound || err == DiskError::VolumeNotFound) {
return Err(err);
}
if let Err(err) = self.move_to_trash(&dir_path, true, false).await
&& !(err == DiskError::FileNotFound || err == DiskError::VolumeNotFound)
{
return Err(err);
};
}
}
@@ -1051,11 +1049,11 @@ impl LocalDisk {
continue;
}
if let Some(forward) = &forward {
if &entry < forward {
*item = "".to_owned();
continue;
}
if let Some(forward) = &forward
&& &entry < forward
{
*item = "".to_owned();
continue;
}
if entry.ends_with(SLASH_SEPARATOR) {
@@ -1133,10 +1131,10 @@ impl LocalDisk {
})
.await?;
if opts.recursive {
if let Err(er) = Box::pin(self.scan_dir(pop, prefix.clone(), opts, out, objs_returned)).await {
error!("scan_dir err {:?}", er);
}
if opts.recursive
&& let Err(er) = Box::pin(self.scan_dir(pop, prefix.clone(), opts, out, objs_returned)).await
{
error!("scan_dir err {:?}", er);
}
dir_stack.pop();
}
@@ -1200,10 +1198,10 @@ impl LocalDisk {
})
.await?;
if opts.recursive {
if let Err(er) = Box::pin(self.scan_dir(dir, prefix.clone(), opts, out, objs_returned)).await {
warn!("scan_dir err {:?}", &er);
}
if opts.recursive
&& let Err(er) = Box::pin(self.scan_dir(dir, prefix.clone(), opts, out, objs_returned)).await
{
warn!("scan_dir err {:?}", &er);
}
}
@@ -1345,23 +1343,23 @@ impl DiskAPI for LocalDisk {
if format_info.file_info.is_some() && id.is_some() {
// check last check time
if let Some(last_check) = format_info.last_check {
if last_check.unix_timestamp() + 1 < OffsetDateTime::now_utc().unix_timestamp() {
return Ok(id);
}
if let Some(last_check) = format_info.last_check
&& last_check.unix_timestamp() + 1 < OffsetDateTime::now_utc().unix_timestamp()
{
return Ok(id);
}
}
let file_meta = self.check_format_json().await?;
if let Some(file_info) = &format_info.file_info {
if super::fs::same_file(&file_meta, file_info) {
let mut format_info = self.format_info.write().await;
format_info.last_check = Some(OffsetDateTime::now_utc());
drop(format_info);
if let Some(file_info) = &format_info.file_info
&& super::fs::same_file(&file_meta, file_info)
{
let mut format_info = self.format_info.write().await;
format_info.last_check = Some(OffsetDateTime::now_utc());
drop(format_info);
return Ok(id);
}
return Ok(id);
}
debug!("get_disk_id: read format.json");
@@ -1420,10 +1418,10 @@ impl DiskAPI for LocalDisk {
#[tracing::instrument(skip(self))]
async fn delete(&self, volume: &str, path: &str, opt: DeleteOptions) -> Result<()> {
let volume_dir = self.get_bucket_path(volume)?;
if !skip_access_checks(volume) {
if let Err(e) = access(&volume_dir).await {
return Err(to_access_error(e, DiskError::VolumeAccessDenied).into());
}
if !skip_access_checks(volume)
&& let Err(e) = access(&volume_dir).await
{
return Err(to_access_error(e, DiskError::VolumeAccessDenied).into());
}
let file_path = volume_dir.join(Path::new(&path));
@@ -1438,10 +1436,10 @@ impl DiskAPI for LocalDisk {
#[tracing::instrument(skip(self))]
async fn verify_file(&self, volume: &str, path: &str, fi: &FileInfo) -> Result<CheckPartsResp> {
let volume_dir = self.get_bucket_path(volume)?;
if !skip_access_checks(volume) {
if let Err(e) = access(&volume_dir).await {
return Err(to_access_error(e, DiskError::VolumeAccessDenied).into());
}
if !skip_access_checks(volume)
&& let Err(e) = access(&volume_dir).await
{
return Err(to_access_error(e, DiskError::VolumeAccessDenied).into());
}
let mut resp = CheckPartsResp {
@@ -1466,14 +1464,14 @@ impl DiskAPI for LocalDisk {
.await
.err();
resp.results[i] = conv_part_err_to_int(&err);
if resp.results[i] == CHECK_PART_UNKNOWN {
if let Some(err) = err {
error!("verify_file: failed to bitrot verify file: {:?}, error: {:?}", &part_path, &err);
if err == DiskError::FileAccessDenied {
continue;
}
info!("part unknown, disk: {}, path: {:?}", self.to_string(), part_path);
if resp.results[i] == CHECK_PART_UNKNOWN
&& let Some(err) = err
{
error!("verify_file: failed to bitrot verify file: {:?}, error: {:?}", &part_path, &err);
if err == DiskError::FileAccessDenied {
continue;
}
info!("part unknown, disk: {}, path: {:?}", self.to_string(), part_path);
}
}
@@ -1572,13 +1570,12 @@ impl DiskAPI for LocalDisk {
let e: DiskError = to_file_error(err).into();
if e == DiskError::FileNotFound {
if !skip_access_checks(volume) {
if let Err(err) = access(&volume_dir).await {
if err.kind() == ErrorKind::NotFound {
resp.results[i] = CHECK_PART_VOLUME_NOT_FOUND;
continue;
}
}
if !skip_access_checks(volume)
&& let Err(err) = access(&volume_dir).await
&& err.kind() == ErrorKind::NotFound
{
resp.results[i] = CHECK_PART_VOLUME_NOT_FOUND;
continue;
}
resp.results[i] = CHECK_PART_FILE_NOT_FOUND;
} else {
@@ -1634,11 +1631,11 @@ impl DiskAPI for LocalDisk {
}
};
if let Some(meta) = meta_op {
if !meta.is_dir() {
warn!("rename_part src is not dir {:?}", &src_file_path);
return Err(DiskError::FileAccessDenied);
}
if let Some(meta) = meta_op
&& !meta.is_dir()
{
warn!("rename_part src is not dir {:?}", &src_file_path);
return Err(DiskError::FileAccessDenied);
}
remove_std(&dst_file_path).map_err(to_file_error)?;
@@ -1695,10 +1692,10 @@ impl DiskAPI for LocalDisk {
}
};
if let Some(meta) = meta_op {
if !meta.is_dir() {
return Err(DiskError::FileAccessDenied);
}
if let Some(meta) = meta_op
&& !meta.is_dir()
{
return Err(DiskError::FileAccessDenied);
}
remove(&dst_file_path).await.map_err(to_file_error)?;
@@ -1814,10 +1811,10 @@ impl DiskAPI for LocalDisk {
async fn list_dir(&self, origvolume: &str, volume: &str, dir_path: &str, count: i32) -> Result<Vec<String>> {
if !origvolume.is_empty() {
let origvolume_dir = self.get_bucket_path(origvolume)?;
if !skip_access_checks(origvolume) {
if let Err(e) = access(origvolume_dir).await {
return Err(to_access_error(e, DiskError::VolumeAccessDenied).into());
}
if !skip_access_checks(origvolume)
&& let Err(e) = access(origvolume_dir).await
{
return Err(to_access_error(e, DiskError::VolumeAccessDenied).into());
}
}
@@ -1827,10 +1824,11 @@ impl DiskAPI for LocalDisk {
let entries = match os::read_dir(&dir_path_abs, count).await {
Ok(res) => res,
Err(e) => {
if e.kind() == std::io::ErrorKind::NotFound && !skip_access_checks(volume) {
if let Err(e) = access(&volume_dir).await {
return Err(to_access_error(e, DiskError::VolumeAccessDenied).into());
}
if e.kind() == std::io::ErrorKind::NotFound
&& !skip_access_checks(volume)
&& let Err(e) = access(&volume_dir).await
{
return Err(to_access_error(e, DiskError::VolumeAccessDenied).into());
}
return Err(to_file_error(e).into());
@@ -1845,10 +1843,10 @@ impl DiskAPI for LocalDisk {
async fn walk_dir<W: AsyncWrite + Unpin + Send>(&self, opts: WalkDirOptions, wr: &mut W) -> Result<()> {
let volume_dir = self.get_bucket_path(&opts.bucket)?;
if !skip_access_checks(&opts.bucket) {
if let Err(e) = access(&volume_dir).await {
return Err(to_access_error(e, DiskError::VolumeAccessDenied).into());
}
if !skip_access_checks(&opts.bucket)
&& let Err(e) = access(&volume_dir).await
{
return Err(to_access_error(e, DiskError::VolumeAccessDenied).into());
}
let mut wr = wr;
@@ -1909,19 +1907,19 @@ impl DiskAPI for LocalDisk {
dst_path: &str,
) -> Result<RenameDataResp> {
let src_volume_dir = self.get_bucket_path(src_volume)?;
if !skip_access_checks(src_volume) {
if let Err(e) = super::fs::access_std(&src_volume_dir) {
info!("access checks failed, src_volume_dir: {:?}, err: {}", src_volume_dir, e.to_string());
return Err(to_access_error(e, DiskError::VolumeAccessDenied).into());
}
if !skip_access_checks(src_volume)
&& let Err(e) = super::fs::access_std(&src_volume_dir)
{
info!("access checks failed, src_volume_dir: {:?}, err: {}", src_volume_dir, e.to_string());
return Err(to_access_error(e, DiskError::VolumeAccessDenied).into());
}
let dst_volume_dir = self.get_bucket_path(dst_volume)?;
if !skip_access_checks(dst_volume) {
if let Err(e) = super::fs::access_std(&dst_volume_dir) {
info!("access checks failed, dst_volume_dir: {:?}, err: {}", dst_volume_dir, e.to_string());
return Err(to_access_error(e, DiskError::VolumeAccessDenied).into());
}
if !skip_access_checks(dst_volume)
&& let Err(e) = super::fs::access_std(&dst_volume_dir)
{
info!("access checks failed, dst_volume_dir: {:?}, err: {}", dst_volume_dir, e.to_string());
return Err(to_access_error(e, DiskError::VolumeAccessDenied).into());
}
// xl.meta path
@@ -1973,19 +1971,18 @@ impl DiskAPI for LocalDisk {
let mut xlmeta = FileMeta::new();
if let Some(dst_buf) = has_dst_buf.as_ref() {
if FileMeta::is_xl2_v1_format(dst_buf) {
if let Ok(nmeta) = FileMeta::load(dst_buf) {
xlmeta = nmeta
}
}
if let Some(dst_buf) = has_dst_buf.as_ref()
&& FileMeta::is_xl2_v1_format(dst_buf)
&& let Ok(nmeta) = FileMeta::load(dst_buf)
{
xlmeta = nmeta
}
let mut skip_parent = dst_volume_dir.clone();
if has_dst_buf.as_ref().is_some() {
if let Some(parent) = dst_file_path.parent() {
skip_parent = parent.to_path_buf();
}
if has_dst_buf.as_ref().is_some()
&& let Some(parent) = dst_file_path.parent()
{
skip_parent = parent.to_path_buf();
}
// TODO: Healing
@@ -2017,22 +2014,20 @@ impl DiskAPI for LocalDisk {
.await?;
if let Some((src_data_path, dst_data_path)) = has_data_dir_path.as_ref() {
let no_inline = fi.data.is_none() && fi.size > 0;
if no_inline {
if let Err(err) = rename_all(&src_data_path, &dst_data_path, &skip_parent).await {
let _ = self.delete_file(&dst_volume_dir, dst_data_path, false, false).await;
info!(
"rename all failed src_data_path: {:?}, dst_data_path: {:?}, err: {:?}",
src_data_path, dst_data_path, err
);
return Err(err);
}
if no_inline && let Err(err) = rename_all(&src_data_path, &dst_data_path, &skip_parent).await {
let _ = self.delete_file(&dst_volume_dir, dst_data_path, false, false).await;
info!(
"rename all failed src_data_path: {:?}, dst_data_path: {:?}, err: {:?}",
src_data_path, dst_data_path, err
);
return Err(err);
}
}
if let Some(old_data_dir) = has_old_data_dir {
// preserve current xl.meta inside the oldDataDir.
if let Some(dst_buf) = has_dst_buf {
if let Err(err) = self
if let Some(dst_buf) = has_dst_buf
&& let Err(err) = self
.write_all_private(
dst_volume,
format!("{}/{}/{}", &dst_path, &old_data_dir.to_string(), STORAGE_FORMAT_FILE).as_str(),
@@ -2041,10 +2036,9 @@ impl DiskAPI for LocalDisk {
&skip_parent,
)
.await
{
info!("write_all_private failed err: {:?}", err);
return Err(err);
}
{
info!("write_all_private failed err: {:?}", err);
return Err(err);
}
}
@@ -2075,11 +2069,11 @@ impl DiskAPI for LocalDisk {
#[tracing::instrument(skip(self))]
async fn make_volumes(&self, volumes: Vec<&str>) -> Result<()> {
for vol in volumes {
if let Err(e) = self.make_volume(vol).await {
if e != DiskError::VolumeExists {
error!("local disk make volumes failed: {e}");
return Err(e);
}
if let Err(e) = self.make_volume(vol).await
&& e != DiskError::VolumeExists
{
error!("local disk make volumes failed: {e}");
return Err(e);
}
// TODO: health check
}
@@ -2313,10 +2307,11 @@ impl DiskAPI for LocalDisk {
let old_path = file_path.join(Path::new(uuid.to_string().as_str()));
check_path_length(old_path.to_string_lossy().as_ref())?;
if let Err(err) = self.move_to_trash(&old_path, true, false).await {
if err != DiskError::FileNotFound && err != DiskError::VolumeNotFound {
return Err(err);
}
if let Err(err) = self.move_to_trash(&old_path, true, false).await
&& err != DiskError::FileNotFound
&& err != DiskError::VolumeNotFound
{
return Err(err);
}
}
@@ -2328,13 +2323,13 @@ impl DiskAPI for LocalDisk {
}
// opts.undo_write && opts.old_data_dir.is_some_and(f)
if let Some(old_data_dir) = opts.old_data_dir {
if opts.undo_write {
let src_path =
file_path.join(Path::new(format!("{old_data_dir}{SLASH_SEPARATOR}{STORAGE_FORMAT_FILE_BACKUP}").as_str()));
let dst_path = file_path.join(Path::new(format!("{path}{SLASH_SEPARATOR}{STORAGE_FORMAT_FILE}").as_str()));
return rename_all(src_path, dst_path, file_path).await;
}
if let Some(old_data_dir) = opts.old_data_dir
&& opts.undo_write
{
let src_path =
file_path.join(Path::new(format!("{old_data_dir}{SLASH_SEPARATOR}{STORAGE_FORMAT_FILE_BACKUP}").as_str()));
let dst_path = file_path.join(Path::new(format!("{path}{SLASH_SEPARATOR}{STORAGE_FORMAT_FILE}").as_str()));
return rename_all(src_path, dst_path, file_path).await;
}
self.delete_file(&volume_dir, &xl_path, true, false).await

View File

@@ -147,11 +147,11 @@ async fn reliable_rename(
dst_file_path: impl AsRef<Path>,
base_dir: impl AsRef<Path>,
) -> io::Result<()> {
if let Some(parent) = dst_file_path.as_ref().parent() {
if !file_exists(parent) {
// info!("reliable_rename reliable_mkdir_all parent: {:?}", parent);
reliable_mkdir_all(parent, base_dir.as_ref()).await?;
}
if let Some(parent) = dst_file_path.as_ref().parent()
&& !file_exists(parent)
{
// info!("reliable_rename reliable_mkdir_all parent: {:?}", parent);
reliable_mkdir_all(parent, base_dir.as_ref()).await?;
}
let mut i = 0;
@@ -190,12 +190,11 @@ pub async fn reliable_mkdir_all(path: impl AsRef<Path>, base_dir: impl AsRef<Pat
if e.kind() == io::ErrorKind::NotFound && i == 0 {
i += 1;
if let Some(base_parent) = base_dir.parent() {
if let Some(c) = base_parent.components().next() {
if c != Component::RootDir {
base_dir = base_parent
}
}
if let Some(base_parent) = base_dir.parent()
&& let Some(c) = base_parent.components().next()
&& c != Component::RootDir
{
base_dir = base_parent
}
continue;
}

View File

@@ -318,7 +318,7 @@ fn get_divisible_size(total_sizes: &[usize]) -> usize {
fn possible_set_counts(set_size: usize) -> Vec<usize> {
let mut ss = Vec::new();
for s in SET_SIZES {
if set_size % s == 0 {
if set_size.is_multiple_of(s) {
ss.push(s);
}
}
@@ -340,7 +340,7 @@ fn common_set_drive_count(divisible_size: usize, set_counts: &[usize]) -> usize
let mut prev_d = divisible_size / set_counts[0];
let mut set_size = 0;
for &cnt in set_counts {
if divisible_size % cnt == 0 {
if divisible_size.is_multiple_of(cnt) {
let d = divisible_size / cnt;
if d <= prev_d {
prev_d = d;

View File

@@ -266,12 +266,11 @@ impl Erasure {
let (mut shards, errs) = reader.read().await;
if ret_err.is_none() {
if let (_, Some(err)) = reduce_errs(&errs, &[]) {
if err == Error::FileNotFound || err == Error::FileCorrupt {
ret_err = Some(err.into());
}
}
if ret_err.is_none()
&& let (_, Some(err)) = reduce_errs(&errs, &[])
&& (err == Error::FileNotFound || err == Error::FileCorrupt)
{
ret_err = Some(err.into());
}
if !reader.can_decode(&shards) {

View File

@@ -150,10 +150,10 @@ impl Erasure {
}
Err(e) if e.kind() == std::io::ErrorKind::UnexpectedEof => {
// Check if the inner error is a checksum mismatch - if so, propagate it
if let Some(inner) = e.get_ref() {
if rustfs_rio::is_checksum_mismatch(inner) {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, e.to_string()));
}
if let Some(inner) = e.get_ref()
&& rustfs_rio::is_checksum_mismatch(inner)
{
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, e.to_string()));
}
break;
}

View File

@@ -45,7 +45,7 @@ impl super::Erasure {
let start_block = 0;
let mut end_block = total_length / self.block_size;
if total_length % self.block_size != 0 {
if !total_length.is_multiple_of(self.block_size) {
end_block += 1;
}

View File

@@ -244,10 +244,12 @@ impl PoolMeta {
}
pub fn decommission(&mut self, idx: usize, pi: PoolSpaceInfo) -> Result<()> {
if let Some(pool) = self.pools.get_mut(idx) {
if let Some(ref info) = pool.decommission {
if !info.complete && !info.failed && !info.canceled {
return Err(StorageError::DecommissionAlreadyRunning);
}
if let Some(ref info) = pool.decommission
&& !info.complete
&& !info.failed
&& !info.canceled
{
return Err(StorageError::DecommissionAlreadyRunning);
}
let now = OffsetDateTime::now_utc();
@@ -273,12 +275,12 @@ impl PoolMeta {
pub fn pending_buckets(&self, idx: usize) -> Vec<DecomBucketInfo> {
let mut list = Vec::new();
if let Some(pool) = self.pools.get(idx) {
if let Some(ref info) = pool.decommission {
for bk in info.queued_buckets.iter() {
let (name, prefix) = path2_bucket_object(bk);
list.push(DecomBucketInfo { name, prefix });
}
if let Some(pool) = self.pools.get(idx)
&& let Some(ref info) = pool.decommission
{
for bk in info.queued_buckets.iter() {
let (name, prefix) = path2_bucket_object(bk);
list.push(DecomBucketInfo { name, prefix });
}
}
@@ -306,15 +308,15 @@ impl PoolMeta {
}
pub fn count_item(&mut self, idx: usize, size: usize, failed: bool) {
if let Some(pool) = self.pools.get_mut(idx) {
if let Some(info) = pool.decommission.as_mut() {
if failed {
info.items_decommission_failed += 1;
info.bytes_failed += size;
} else {
info.items_decommissioned += 1;
info.bytes_done += size;
}
if let Some(pool) = self.pools.get_mut(idx)
&& let Some(info) = pool.decommission.as_mut()
{
if failed {
info.items_decommission_failed += 1;
info.bytes_failed += size;
} else {
info.items_decommissioned += 1;
info.bytes_done += size;
}
}
}
@@ -324,11 +326,11 @@ impl PoolMeta {
return;
}
if let Some(pool) = self.pools.get_mut(idx) {
if let Some(info) = pool.decommission.as_mut() {
info.object = object;
info.bucket = bucket;
}
if let Some(pool) = self.pools.get_mut(idx)
&& let Some(info) = pool.decommission.as_mut()
{
info.object = object;
info.bucket = bucket;
}
}
@@ -407,10 +409,10 @@ impl PoolMeta {
if specified_pools.len() == remembered_pools.len() {
for (k, pi) in remembered_pools.iter() {
if let Some(pos) = specified_pools.get(k) {
if *pos != pi.position {
update = true; // Pool order changed, allow the update.
}
if let Some(pos) = specified_pools.get(k)
&& *pos != pi.position
{
update = true; // Pool order changed, allow the update.
}
}
}
@@ -640,10 +642,12 @@ impl ECStore {
pub async fn is_decommission_running(&self) -> bool {
let pool_meta = self.pool_meta.read().await;
for pool in pool_meta.pools.iter() {
if let Some(ref info) = pool.decommission {
if !info.complete && !info.failed && !info.canceled {
return true;
}
if let Some(ref info) = pool.decommission
&& !info.complete
&& !info.failed
&& !info.canceled
{
return true;
}
}
@@ -850,8 +854,8 @@ impl ECStore {
decommissioned += 1;
}
if decommissioned == fivs.versions.len() {
if let Err(err) = set
if decommissioned == fivs.versions.len()
&& let Err(err) = set
.delete_object(
bucket.as_str(),
&encode_dir_object(&entry.name),
@@ -863,9 +867,8 @@ impl ECStore {
},
)
.await
{
error!("decommission_pool: delete_object err {:?}", &err);
}
{
error!("decommission_pool: delete_object err {:?}", &err);
}
{
@@ -879,10 +882,8 @@ impl ECStore {
.unwrap_or_default();
drop(pool_meta);
if ok {
if let Some(notification_sys) = get_global_notification_sys() {
notification_sys.reload_pool_meta().await;
}
if ok && let Some(notification_sys) = get_global_notification_sys() {
notification_sys.reload_pool_meta().await;
}
}
@@ -1080,10 +1081,10 @@ impl ECStore {
{
let mut pool_meta = self.pool_meta.write().await;
if pool_meta.bucket_done(idx, bucket.to_string()) {
if let Err(err) = pool_meta.save(self.pools.clone()).await {
error!("decom pool_meta.save err {:?}", err);
}
if pool_meta.bucket_done(idx, bucket.to_string())
&& let Err(err) = pool_meta.save(self.pools.clone()).await
{
error!("decom pool_meta.save err {:?}", err);
}
}
continue;
@@ -1100,10 +1101,10 @@ impl ECStore {
{
let mut pool_meta = self.pool_meta.write().await;
if pool_meta.bucket_done(idx, bucket.to_string()) {
if let Err(err) = pool_meta.save(self.pools.clone()).await {
error!("decom pool_meta.save err {:?}", err);
}
if pool_meta.bucket_done(idx, bucket.to_string())
&& let Err(err) = pool_meta.save(self.pools.clone()).await
{
error!("decom pool_meta.save err {:?}", err);
}
warn!("decommission: decommission_pool bucket_done {}", &bucket.name);
@@ -1138,11 +1139,10 @@ impl ECStore {
if let Err(err) = self
.make_bucket(bk.to_string_lossy().to_string().as_str(), &MakeBucketOptions::default())
.await
&& !is_err_bucket_exists(&err)
{
if !is_err_bucket_exists(&err) {
error!("decommission: make bucket failed: {err}");
return Err(err);
}
error!("decommission: make bucket failed: {err}");
return Err(err);
}
}

View File

@@ -380,10 +380,10 @@ impl ECStore {
#[tracing::instrument(skip(self, fi))]
pub async fn update_pool_stats(&self, pool_index: usize, bucket: String, fi: &FileInfo) -> Result<()> {
let mut rebalance_meta = self.rebalance_meta.write().await;
if let Some(meta) = rebalance_meta.as_mut() {
if let Some(pool_stat) = meta.pool_stats.get_mut(pool_index) {
pool_stat.update(bucket, fi);
}
if let Some(meta) = rebalance_meta.as_mut()
&& let Some(pool_stat) = meta.pool_stats.get_mut(pool_index)
{
pool_stat.update(bucket, fi);
}
Ok(())
@@ -394,20 +394,20 @@ impl ECStore {
info!("next_rebal_bucket: pool_index: {}", pool_index);
let rebalance_meta = self.rebalance_meta.read().await;
info!("next_rebal_bucket: rebalance_meta: {:?}", rebalance_meta);
if let Some(meta) = rebalance_meta.as_ref() {
if let Some(pool_stat) = meta.pool_stats.get(pool_index) {
if pool_stat.info.status == RebalStatus::Completed || !pool_stat.participating {
info!("next_rebal_bucket: pool_index: {} completed or not participating", pool_index);
return Ok(None);
}
if pool_stat.buckets.is_empty() {
info!("next_rebal_bucket: pool_index: {} buckets is empty", pool_index);
return Ok(None);
}
info!("next_rebal_bucket: pool_index: {} bucket: {}", pool_index, pool_stat.buckets[0]);
return Ok(Some(pool_stat.buckets[0].clone()));
if let Some(meta) = rebalance_meta.as_ref()
&& let Some(pool_stat) = meta.pool_stats.get(pool_index)
{
if pool_stat.info.status == RebalStatus::Completed || !pool_stat.participating {
info!("next_rebal_bucket: pool_index: {} completed or not participating", pool_index);
return Ok(None);
}
if pool_stat.buckets.is_empty() {
info!("next_rebal_bucket: pool_index: {} buckets is empty", pool_index);
return Ok(None);
}
info!("next_rebal_bucket: pool_index: {} bucket: {}", pool_index, pool_stat.buckets[0]);
return Ok(Some(pool_stat.buckets[0].clone()));
}
info!("next_rebal_bucket: pool_index: {} None", pool_index);
@@ -417,28 +417,28 @@ impl ECStore {
#[tracing::instrument(skip(self))]
pub async fn bucket_rebalance_done(&self, pool_index: usize, bucket: String) -> Result<()> {
let mut rebalance_meta = self.rebalance_meta.write().await;
if let Some(meta) = rebalance_meta.as_mut() {
if let Some(pool_stat) = meta.pool_stats.get_mut(pool_index) {
info!("bucket_rebalance_done: buckets {:?}", &pool_stat.buckets);
if let Some(meta) = rebalance_meta.as_mut()
&& let Some(pool_stat) = meta.pool_stats.get_mut(pool_index)
{
info!("bucket_rebalance_done: buckets {:?}", &pool_stat.buckets);
// Use retain to filter out buckets slated for removal
let mut found = false;
pool_stat.buckets.retain(|b| {
if b.as_str() == bucket.as_str() {
found = true;
pool_stat.rebalanced_buckets.push(b.clone());
false // Remove this element
} else {
true // Keep this element
}
});
if found {
info!("bucket_rebalance_done: bucket {} rebalanced", &bucket);
return Ok(());
// Use retain to filter out buckets slated for removal
let mut found = false;
pool_stat.buckets.retain(|b| {
if b.as_str() == bucket.as_str() {
found = true;
pool_stat.rebalanced_buckets.push(b.clone());
false // Remove this element
} else {
info!("bucket_rebalance_done: bucket {} not found", bucket);
true // Keep this element
}
});
if found {
info!("bucket_rebalance_done: bucket {} rebalanced", &bucket);
return Ok(());
} else {
info!("bucket_rebalance_done: bucket {} not found", bucket);
}
}
info!("bucket_rebalance_done: bucket {} not found", bucket);
@@ -492,10 +492,10 @@ impl ECStore {
#[tracing::instrument(skip(self))]
pub async fn stop_rebalance(self: &Arc<Self>) -> Result<()> {
let rebalance_meta = self.rebalance_meta.read().await;
if let Some(meta) = rebalance_meta.as_ref() {
if let Some(cancel_tx) = meta.cancel.as_ref() {
cancel_tx.cancel();
}
if let Some(meta) = rebalance_meta.as_ref()
&& let Some(cancel_tx) = meta.cancel.as_ref()
{
cancel_tx.cancel();
}
Ok(())
@@ -690,24 +690,24 @@ impl ECStore {
async fn check_if_rebalance_done(&self, pool_index: usize) -> bool {
let mut rebalance_meta = self.rebalance_meta.write().await;
if let Some(meta) = rebalance_meta.as_mut() {
if let Some(pool_stat) = meta.pool_stats.get_mut(pool_index) {
// Check if the pool's rebalance status is already completed
if pool_stat.info.status == RebalStatus::Completed {
info!("check_if_rebalance_done: pool {} is already completed", pool_index);
return true;
}
if let Some(meta) = rebalance_meta.as_mut()
&& let Some(pool_stat) = meta.pool_stats.get_mut(pool_index)
{
// Check if the pool's rebalance status is already completed
if pool_stat.info.status == RebalStatus::Completed {
info!("check_if_rebalance_done: pool {} is already completed", pool_index);
return true;
}
// Calculate the percentage of free space improvement
let pfi = (pool_stat.init_free_space + pool_stat.bytes) as f64 / pool_stat.init_capacity as f64;
// Calculate the percentage of free space improvement
let pfi = (pool_stat.init_free_space + pool_stat.bytes) as f64 / pool_stat.init_capacity as f64;
// Mark pool rebalance as done if within 5% of the PercentFreeGoal
if (pfi - meta.percent_free_goal).abs() <= 0.05 {
pool_stat.info.status = RebalStatus::Completed;
pool_stat.info.end_time = Some(OffsetDateTime::now_utc());
info!("check_if_rebalance_done: pool {} is completed, pfi: {}", pool_index, pfi);
return true;
}
// Mark pool rebalance as done if within 5% of the PercentFreeGoal
if (pfi - meta.percent_free_goal).abs() <= 0.05 {
pool_stat.info.status = RebalStatus::Completed;
pool_stat.info.end_time = Some(OffsetDateTime::now_utc());
info!("check_if_rebalance_done: pool {} is completed, pfi: {}", pool_index, pfi);
return true;
}
}
@@ -1102,11 +1102,11 @@ impl ECStore {
pub async fn save_rebalance_stats(&self, pool_idx: usize, opt: RebalSaveOpt) -> Result<()> {
// TODO: lock
let mut meta = RebalanceMeta::new();
if let Err(err) = meta.load(self.pools[0].clone()).await {
if err != Error::ConfigNotFound {
info!("save_rebalance_stats: load err: {:?}", err);
return Err(err);
}
if let Err(err) = meta.load(self.pools[0].clone()).await
&& err != Error::ConfigNotFound
{
info!("save_rebalance_stats: load err: {:?}", err);
return Err(err);
}
match opt {

View File

@@ -66,13 +66,13 @@ impl PeerRestClient {
let mut remote = Vec::with_capacity(hosts.len());
let mut all = vec![None; hosts.len()];
for (i, hs_host) in hosts.iter().enumerate() {
if let Some(host) = hs_host {
if let Some(grid_host) = eps.find_grid_hosts_from_peer(host) {
let client = PeerRestClient::new(host.clone(), grid_host);
if let Some(host) = hs_host
&& let Some(grid_host) = eps.find_grid_hosts_from_peer(host)
{
let client = PeerRestClient::new(host.clone(), grid_host);
all[i] = Some(client.clone());
remote.push(Some(client));
}
all[i] = Some(client.clone());
remote.push(Some(client));
}
}

View File

@@ -101,10 +101,10 @@ impl S3PeerSys {
for pool_idx in 0..self.pools_count {
let mut per_pool_errs = vec![None; self.clients.len()];
for (i, client) in self.clients.iter().enumerate() {
if let Some(v) = client.get_pools() {
if v.contains(&pool_idx) {
per_pool_errs[i] = errs[i].clone();
}
if let Some(v) = client.get_pools()
&& v.contains(&pool_idx)
{
per_pool_errs[i] = errs[i].clone();
}
}
let qu = per_pool_errs.len() / 2;
@@ -136,10 +136,10 @@ impl S3PeerSys {
for pool_idx in 0..self.pools_count {
let mut per_pool_errs = vec![None; self.clients.len()];
for (i, client) in self.clients.iter().enumerate() {
if let Some(v) = client.get_pools() {
if v.contains(&pool_idx) {
per_pool_errs[i] = errs[i].clone();
}
if let Some(v) = client.get_pools()
&& v.contains(&pool_idx)
{
per_pool_errs[i] = errs[i].clone();
}
}
let qu = per_pool_errs.len() / 2;

View File

@@ -266,10 +266,10 @@ impl SetDisks {
let mut new_disk = Vec::with_capacity(disks.len());
for disk in disks.iter() {
if let Some(d) = disk {
if d.is_online().await {
new_disk.push(disk.clone());
}
if let Some(d) = disk
&& d.is_online().await
{
new_disk.push(disk.clone());
}
}
@@ -1417,22 +1417,21 @@ impl SetDisks {
let mut valid_obj_map = HashMap::new();
for (i, op_hash) in meta_hashes.iter().enumerate() {
if let Some(hash) = op_hash {
if let Some(max_hash) = max_val {
if hash == max_hash {
if metas[i].is_valid() && !found {
found_fi = Some(metas[i].clone());
found = true;
}
let props = ObjProps {
mod_time: metas[i].mod_time,
num_versions: metas[i].num_versions,
};
*valid_obj_map.entry(props).or_insert(0) += 1;
}
if let Some(hash) = op_hash
&& let Some(max_hash) = max_val
&& hash == max_hash
{
if metas[i].is_valid() && !found {
found_fi = Some(metas[i].clone());
found = true;
}
let props = ObjProps {
mod_time: metas[i].mod_time,
num_versions: metas[i].num_versions,
};
*valid_obj_map.entry(props).or_insert(0) += 1;
}
}
@@ -3572,17 +3571,17 @@ impl SetDisks {
let mut offline = 0;
for (i, err) in errs.iter().enumerate() {
let mut found = false;
if let Some(err) = err {
if err == &DiskError::DiskNotFound {
found = true;
}
if let Some(err) = err
&& err == &DiskError::DiskNotFound
{
found = true;
}
for p in data_errs_by_part {
if let Some(v) = p.1.get(i) {
if *v == CHECK_PART_DISK_NOT_FOUND {
found = true;
break;
}
if let Some(v) = p.1.get(i)
&& *v == CHECK_PART_DISK_NOT_FOUND
{
found = true;
break;
}
}
@@ -3838,10 +3837,10 @@ impl ObjectIO for SetDisks {
None
};
if let Some(http_preconditions) = opts.http_preconditions.clone() {
if let Some(err) = self.check_write_precondition(bucket, object, opts).await {
return Err(err);
}
if let Some(http_preconditions) = opts.http_preconditions.clone()
&& let Some(err) = self.check_write_precondition(bucket, object, opts).await
{
return Err(err);
}
let mut user_defined = opts.user_defined.clone();
@@ -4002,16 +4001,16 @@ impl ObjectIO for SetDisks {
}
}
if fi.checksum.is_none() {
if let Some(content_hash) = data.as_hash_reader().content_hash() {
fi.checksum = Some(content_hash.to_bytes(&[]));
}
if fi.checksum.is_none()
&& let Some(content_hash) = data.as_hash_reader().content_hash()
{
fi.checksum = Some(content_hash.to_bytes(&[]));
}
if let Some(sc) = user_defined.get(AMZ_STORAGE_CLASS) {
if sc == storageclass::STANDARD {
let _ = user_defined.remove(AMZ_STORAGE_CLASS);
}
if let Some(sc) = user_defined.get(AMZ_STORAGE_CLASS)
&& sc == storageclass::STANDARD
{
let _ = user_defined.remove(AMZ_STORAGE_CLASS);
}
let mod_time = if let Some(mod_time) = opts.mod_time {
@@ -4062,11 +4061,11 @@ impl ObjectIO for SetDisks {
self.delete_all(RUSTFS_META_TMP_BUCKET, &tmp_dir).await?;
for (i, op_disk) in online_disks.iter().enumerate() {
if let Some(disk) = op_disk {
if disk.is_online().await {
fi = parts_metadatas[i].clone();
break;
}
if let Some(disk) = op_disk
&& disk.is_online().await
{
fi = parts_metadatas[i].clone();
break;
}
}
@@ -5568,10 +5567,10 @@ impl StorageAPI for SetDisks {
user_defined.insert("etag".to_owned(), etag.clone());
}
if let Some(sc) = user_defined.get(AMZ_STORAGE_CLASS) {
if sc == storageclass::STANDARD {
let _ = user_defined.remove(AMZ_STORAGE_CLASS);
}
if let Some(sc) = user_defined.get(AMZ_STORAGE_CLASS)
&& sc == storageclass::STANDARD
{
let _ = user_defined.remove(AMZ_STORAGE_CLASS);
}
let sc_parity_drives = {
@@ -5620,10 +5619,10 @@ impl StorageAPI for SetDisks {
// TODO: get content-type
}
if let Some(sc) = user_defined.get(AMZ_STORAGE_CLASS) {
if sc == storageclass::STANDARD {
let _ = user_defined.remove(AMZ_STORAGE_CLASS);
}
if let Some(sc) = user_defined.get(AMZ_STORAGE_CLASS)
&& sc == storageclass::STANDARD
{
let _ = user_defined.remove(AMZ_STORAGE_CLASS);
}
if let Some(checksum) = &opts.want_checksum {
@@ -5925,14 +5924,14 @@ impl StorageAPI for SetDisks {
return Err(Error::InvalidPart(p.part_num, ext_part.etag.clone(), p.etag.clone().unwrap_or_default()));
}
if checksum_type.full_object_requested() {
if let Err(err) = checksum.add_part(&cs, ext_part.actual_size) {
error!(
"complete_multipart_upload checksum add_part failed part_id={}, bucket={}, object={}",
p.part_num, bucket, object
);
return Err(Error::InvalidPart(p.part_num, ext_part.etag.clone(), p.etag.clone().unwrap_or_default()));
}
if checksum_type.full_object_requested()
&& let Err(err) = checksum.add_part(&cs, ext_part.actual_size)
{
error!(
"complete_multipart_upload checksum add_part failed part_id={}, bucket={}, object={}",
p.part_num, bucket, object
);
return Err(Error::InvalidPart(p.part_num, ext_part.etag.clone(), p.etag.clone().unwrap_or_default()));
}
checksum_combined.extend_from_slice(cs.raw.as_slice());
@@ -6112,11 +6111,11 @@ impl StorageAPI for SetDisks {
});
for (i, op_disk) in online_disks.iter().enumerate() {
if let Some(disk) = op_disk {
if disk.is_online().await {
fi = parts_metadatas[i].clone();
break;
}
if let Some(disk) = op_disk
&& disk.is_online().await
{
fi = parts_metadatas[i].clone();
break;
}
}
@@ -6210,16 +6209,15 @@ impl StorageAPI for SetDisks {
let _write_lock_guard = if !opts.no_lock {
let key = rustfs_lock::fast_lock::types::ObjectKey::new(bucket, object);
let mut skip_lock = false;
if let Some(lock_info) = self.fast_lock_manager.get_lock_info(&key) {
if lock_info.owner.as_ref() == self.locker_owner.as_str()
&& matches!(lock_info.mode, rustfs_lock::fast_lock::types::LockMode::Exclusive)
{
debug!(
"Reusing existing exclusive lock for heal operation on {}/{} held by {}",
bucket, object, self.locker_owner
);
skip_lock = true;
}
if let Some(lock_info) = self.fast_lock_manager.get_lock_info(&key)
&& lock_info.owner.as_ref() == self.locker_owner.as_str()
&& matches!(lock_info.mode, rustfs_lock::fast_lock::types::LockMode::Exclusive)
{
debug!(
"Reusing existing exclusive lock for heal operation on {}/{} held by {}",
bucket, object, self.locker_owner
);
skip_lock = true;
}
if skip_lock {
None
@@ -6563,14 +6561,14 @@ async fn disks_with_all_parts(
if err.is_some() {
let part_err = conv_part_err_to_int(err);
for p in 0..latest_meta.parts.len() {
if let Some(vec) = data_errs_by_part.get_mut(&p) {
if index < vec.len() {
info!(
"data_errs_by_part: copy meta errors to part errors: object_name={}, index: {index}, part: {p}, part_err: {part_err}",
object_name
);
vec[index] = part_err;
}
if let Some(vec) = data_errs_by_part.get_mut(&p)
&& index < vec.len()
{
info!(
"data_errs_by_part: copy meta errors to part errors: object_name={}, index: {index}, part: {p}, part_err: {part_err}",
object_name
);
vec[index] = part_err;
}
}
}
@@ -6609,14 +6607,14 @@ async fn disks_with_all_parts(
.await
.err();
if let Some(vec) = data_errs_by_part.get_mut(&0) {
if index < vec.len() {
vec[index] = conv_part_err_to_int(&verify_err.map(|e| e.into()));
info!(
"data_errs_by_part:bitrot check result: object_name={}, index: {index}, result: {}",
object_name, vec[index]
);
}
if let Some(vec) = data_errs_by_part.get_mut(&0)
&& index < vec.len()
{
vec[index] = conv_part_err_to_int(&verify_err.map(|e| e.into()));
info!(
"data_errs_by_part:bitrot check result: object_name={}, index: {index}, result: {}",
object_name, vec[index]
);
}
}
continue;
@@ -6654,32 +6652,32 @@ async fn disks_with_all_parts(
// Update dataErrsByPart for all parts
for p in 0..latest_meta.parts.len() {
if let Some(vec) = data_errs_by_part.get_mut(&p) {
if index < vec.len() {
if verify_err.is_some() {
if let Some(vec) = data_errs_by_part.get_mut(&p)
&& index < vec.len()
{
if verify_err.is_some() {
info!(
"data_errs_by_part: verify_err: object_name={}, index: {index}, part: {p}, verify_err: {verify_err:?}",
object_name
);
vec[index] = conv_part_err_to_int(&verify_err.clone());
} else {
// Fix: verify_resp.results length is based on meta.parts, not latest_meta.parts
// We need to check bounds to avoid panic
if p < verify_resp.results.len() {
info!(
"data_errs_by_part: verify_err: object_name={}, index: {index}, part: {p}, verify_err: {verify_err:?}",
"data_errs_by_part: update data_errs_by_part: object_name={}, index: {}, part: {}, verify_resp.results: {:?}",
object_name, index, p, verify_resp.results[p]
);
vec[index] = verify_resp.results[p];
} else {
debug!(
"data_errs_by_part: verify_resp.results length mismatch: expected at least {}, got {}, object_name={}, index: {index}, part: {p}",
p + 1,
verify_resp.results.len(),
object_name
);
vec[index] = conv_part_err_to_int(&verify_err.clone());
} else {
// Fix: verify_resp.results length is based on meta.parts, not latest_meta.parts
// We need to check bounds to avoid panic
if p < verify_resp.results.len() {
info!(
"data_errs_by_part: update data_errs_by_part: object_name={}, index: {}, part: {}, verify_resp.results: {:?}",
object_name, index, p, verify_resp.results[p]
);
vec[index] = verify_resp.results[p];
} else {
debug!(
"data_errs_by_part: verify_resp.results length mismatch: expected at least {}, got {}, object_name={}, index: {index}, part: {p}",
p + 1,
verify_resp.results.len(),
object_name
);
vec[index] = CHECK_PART_SUCCESS;
}
vec[index] = CHECK_PART_SUCCESS;
}
}
}
@@ -6689,14 +6687,14 @@ async fn disks_with_all_parts(
// Build dataErrsByDisk from dataErrsByPart
for (part, disks) in data_errs_by_part.iter() {
for (disk_idx, disk_err) in disks.iter().enumerate() {
if let Some(vec) = data_errs_by_disk.get_mut(&disk_idx) {
if *part < vec.len() {
vec[*part] = *disk_err;
info!(
"data_errs_by_disk: update data_errs_by_disk: object_name={}, part: {part}, disk_idx: {disk_idx}, disk_err: {disk_err}",
object_name,
);
}
if let Some(vec) = data_errs_by_disk.get_mut(&disk_idx)
&& *part < vec.len()
{
vec[*part] = *disk_err;
info!(
"data_errs_by_disk: update data_errs_by_disk: object_name={}, part: {part}, disk_idx: {disk_idx}, disk_err: {disk_err}",
object_name,
);
}
}
}
@@ -6738,10 +6736,10 @@ pub fn should_heal_object_on_disk(
meta: &FileInfo,
latest_meta: &FileInfo,
) -> (bool, Option<DiskError>) {
if let Some(err) = err {
if err == &DiskError::FileNotFound || err == &DiskError::FileVersionNotFound || err == &DiskError::FileCorrupt {
return (true, Some(err.clone()));
}
if let Some(err) = err
&& (err == &DiskError::FileNotFound || err == &DiskError::FileVersionNotFound || err == &DiskError::FileCorrupt)
{
return (true, Some(err.clone()));
}
if latest_meta.volume != meta.volume
@@ -6906,15 +6904,15 @@ pub fn e_tag_matches(etag: &str, condition: &str) -> bool {
pub fn should_prevent_write(oi: &ObjectInfo, if_none_match: Option<String>, if_match: Option<String>) -> bool {
match &oi.etag {
Some(etag) => {
if let Some(if_none_match) = if_none_match {
if e_tag_matches(etag, &if_none_match) {
return true;
}
if let Some(if_none_match) = if_none_match
&& e_tag_matches(etag, &if_none_match)
{
return true;
}
if let Some(if_match) = if_match {
if !e_tag_matches(etag, &if_match) {
return true;
}
if let Some(if_match) = if_match
&& !e_tag_matches(etag, &if_match)
{
return true;
}
false
}

View File

@@ -491,12 +491,12 @@ impl StorageAPI for Sets {
let cp_src_dst_same = path_join_buf(&[src_bucket, src_object]) == path_join_buf(&[dst_bucket, dst_object]);
if cp_src_dst_same {
if let (Some(src_vid), Some(dst_vid)) = (&src_opts.version_id, &dst_opts.version_id) {
if src_vid == dst_vid {
return src_set
.copy_object(src_bucket, src_object, dst_bucket, dst_object, src_info, src_opts, dst_opts)
.await;
}
if let (Some(src_vid), Some(dst_vid)) = (&src_opts.version_id, &dst_opts.version_id)
&& src_vid == dst_vid
{
return src_set
.copy_object(src_bucket, src_object, dst_bucket, dst_object, src_info, src_opts, dst_opts)
.await;
}
if !dst_opts.versioned && src_opts.version_id.is_none() {
@@ -823,10 +823,10 @@ impl StorageAPI for Sets {
Ok((m, n)) => (m, n),
Err(_) => continue,
};
if let Some(set) = self.disk_set.get(m) {
if let Some(Some(disk)) = set.disks.read().await.get(n) {
let _ = disk.close().await;
}
if let Some(set) = self.disk_set.get(m)
&& let Some(Some(disk)) = set.disks.read().await.get(n)
{
let _ = disk.close().await;
}
if let Some(Some(disk)) = disks.get(index) {
@@ -980,25 +980,24 @@ fn new_heal_format_sets(
let mut current_disks_info = vec![vec![DiskInfo::default(); set_drive_count]; set_count];
for (i, set) in ref_format.erasure.sets.iter().enumerate() {
for j in 0..set.len() {
if let Some(Some(err)) = errs.get(i * set_drive_count + j) {
if *err == DiskError::UnformattedDisk {
let mut fm = FormatV3::new(set_count, set_drive_count);
fm.id = ref_format.id;
fm.format = ref_format.format.clone();
fm.version = ref_format.version.clone();
fm.erasure.this = ref_format.erasure.sets[i][j];
fm.erasure.sets = ref_format.erasure.sets.clone();
fm.erasure.version = ref_format.erasure.version.clone();
fm.erasure.distribution_algo = ref_format.erasure.distribution_algo.clone();
new_formats[i][j] = Some(fm);
}
if let Some(Some(err)) = errs.get(i * set_drive_count + j)
&& *err == DiskError::UnformattedDisk
{
let mut fm = FormatV3::new(set_count, set_drive_count);
fm.id = ref_format.id;
fm.format = ref_format.format.clone();
fm.version = ref_format.version.clone();
fm.erasure.this = ref_format.erasure.sets[i][j];
fm.erasure.sets = ref_format.erasure.sets.clone();
fm.erasure.version = ref_format.erasure.version.clone();
fm.erasure.distribution_algo = ref_format.erasure.distribution_algo.clone();
new_formats[i][j] = Some(fm);
}
if let (Some(format), None) = (&formats[i * set_drive_count + j], &errs[i * set_drive_count + j]) {
if let Some(info) = &format.disk_info {
if !info.endpoint.is_empty() {
current_disks_info[i][j] = info.clone();
}
}
if let (Some(format), None) = (&formats[i * set_drive_count + j], &errs[i * set_drive_count + j])
&& let Some(info) = &format.disk_info
&& !info.endpoint.is_empty()
{
current_disks_info[i][j] = info.clone();
}
}
}

View File

@@ -243,10 +243,10 @@ impl ECStore {
});
// Only set it when the global deployment ID is not yet configured
if let Some(dep_id) = deployment_id {
if get_global_deployment_id().is_none() {
set_global_deployment_id(dep_id);
}
if let Some(dep_id) = deployment_id
&& get_global_deployment_id().is_none()
{
set_global_deployment_id(dep_id);
}
let wait_sec = 5;
@@ -768,10 +768,10 @@ impl ECStore {
def_pool = pinfo.clone();
has_def_pool = true;
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-deletes.html
if is_err_object_not_found(err) {
if let Err(err) = opts.precondition_check(&pinfo.object_info) {
return Err(err.clone());
}
if is_err_object_not_found(err)
&& let Err(err) = opts.precondition_check(&pinfo.object_info)
{
return Err(err.clone());
}
if !is_err_object_not_found(err) && !is_err_version_not_found(err) {
@@ -885,13 +885,14 @@ impl ECStore {
return Ok((obj, res.idx));
}
if let Some(err) = res.err {
if !is_err_object_not_found(&err) && !is_err_version_not_found(&err) {
return Err(err);
}
// TODO: delete marker
if let Some(err) = res.err
&& !is_err_object_not_found(&err)
&& !is_err_version_not_found(&err)
{
return Err(err);
}
// TODO: delete marker
}
let object = decode_dir_object(object);
@@ -918,12 +919,12 @@ impl ECStore {
let mut derrs = Vec::new();
for pe in errs.iter() {
if let Some(err) = &pe.err {
if err == &StorageError::ErasureWriteQuorum {
objs.push(None);
derrs.push(Some(StorageError::ErasureWriteQuorum));
continue;
}
if let Some(err) = &pe.err
&& err == &StorageError::ErasureWriteQuorum
{
objs.push(None);
derrs.push(Some(StorageError::ErasureWriteQuorum));
continue;
}
if let Some(idx) = pe.index {
@@ -1226,14 +1227,14 @@ impl StorageAPI for ECStore {
#[instrument(skip(self))]
async fn make_bucket(&self, bucket: &str, opts: &MakeBucketOptions) -> Result<()> {
if !is_meta_bucketname(bucket) {
if let Err(err) = check_valid_bucket_name_strict(bucket) {
return Err(StorageError::BucketNameInvalid(err.to_string()));
}
// TODO: nslock
if !is_meta_bucketname(bucket)
&& let Err(err) = check_valid_bucket_name_strict(bucket)
{
return Err(StorageError::BucketNameInvalid(err.to_string()));
}
// TODO: nslock
if let Err(err) = self.peer_sys.make_bucket(bucket, opts).await {
let err = to_object_err(err.into(), vec![bucket]);
if !is_err_bucket_exists(&err) {
@@ -1427,12 +1428,12 @@ impl StorageAPI for ECStore {
let pool_idx = self.get_pool_idx_no_lock(src_bucket, &src_object, src_info.size).await?;
if cp_src_dst_same {
if let (Some(src_vid), Some(dst_vid)) = (&src_opts.version_id, &dst_opts.version_id) {
if src_vid == dst_vid {
return self.pools[pool_idx]
.copy_object(src_bucket, &src_object, dst_bucket, &dst_object, src_info, src_opts, dst_opts)
.await;
}
if let (Some(src_vid), Some(dst_vid)) = (&src_opts.version_id, &dst_opts.version_id)
&& src_vid == dst_vid
{
return self.pools[pool_idx]
.copy_object(src_bucket, &src_object, dst_bucket, &dst_object, src_info, src_opts, dst_opts)
.await;
}
if !dst_opts.versioned && src_opts.version_id.is_none() {
@@ -2433,13 +2434,13 @@ fn check_list_multipart_args(
check_list_objs_args(bucket, prefix, key_marker)?;
if let Some(upload_id_marker) = upload_id_marker {
if let Some(key_marker) = key_marker {
if key_marker.ends_with('/') {
return Err(StorageError::InvalidUploadIDKeyCombination(
upload_id_marker.to_string(),
key_marker.to_string(),
));
}
if let Some(key_marker) = key_marker
&& key_marker.ends_with('/')
{
return Err(StorageError::InvalidUploadIDKeyCombination(
upload_id_marker.to_string(),
key_marker.to_string(),
));
}
if let Err(_e) = base64_simd::URL_SAFE_NO_PAD.decode_to_vec(upload_id_marker.as_bytes()) {
@@ -2510,10 +2511,10 @@ pub async fn get_disk_infos(disks: &[Option<DiskStore>]) -> Vec<Option<DiskInfo>
let opts = &DiskInfoOptions::default();
let mut res = vec![None; disks.len()];
for (idx, disk_op) in disks.iter().enumerate() {
if let Some(disk) = disk_op {
if let Ok(info) = disk.disk_info(opts).await {
res[idx] = Some(info);
}
if let Some(disk) = disk_op
&& let Ok(info) = disk.disk_info(opts).await
{
res[idx] = Some(info);
}
}

View File

@@ -144,10 +144,10 @@ impl GetObjectReader {
) -> Result<(Self, usize, i64)> {
let mut rs = rs;
if let Some(part_number) = opts.part_number {
if rs.is_none() {
rs = HTTPRangeSpec::from_object_info(oi, part_number);
}
if let Some(part_number) = opts.part_number
&& rs.is_none()
{
rs = HTTPRangeSpec::from_object_info(oi, part_number);
}
// TODO:Encrypted
@@ -462,32 +462,30 @@ impl ObjectOptions {
pub fn precondition_check(&self, obj_info: &ObjectInfo) -> Result<()> {
let has_valid_mod_time = obj_info.mod_time.is_some_and(|t| t != OffsetDateTime::UNIX_EPOCH);
if let Some(part_number) = self.part_number {
if part_number > 1 && !obj_info.parts.is_empty() {
let part_found = obj_info.parts.iter().any(|pi| pi.number == part_number);
if !part_found {
return Err(Error::InvalidPartNumber(part_number));
}
if let Some(part_number) = self.part_number
&& part_number > 1
&& !obj_info.parts.is_empty()
{
let part_found = obj_info.parts.iter().any(|pi| pi.number == part_number);
if !part_found {
return Err(Error::InvalidPartNumber(part_number));
}
}
if let Some(pre) = &self.http_preconditions {
if let Some(if_none_match) = &pre.if_none_match {
if let Some(etag) = &obj_info.etag {
if is_etag_equal(etag, if_none_match) {
return Err(Error::NotModified);
}
}
if let Some(if_none_match) = &pre.if_none_match
&& let Some(etag) = &obj_info.etag
&& is_etag_equal(etag, if_none_match)
{
return Err(Error::NotModified);
}
if has_valid_mod_time {
if let Some(if_modified_since) = &pre.if_modified_since {
if let Some(mod_time) = &obj_info.mod_time {
if !is_modified_since(mod_time, if_modified_since) {
return Err(Error::NotModified);
}
}
}
if has_valid_mod_time
&& let Some(if_modified_since) = &pre.if_modified_since
&& let Some(mod_time) = &obj_info.mod_time
&& !is_modified_since(mod_time, if_modified_since)
{
return Err(Error::NotModified);
}
if let Some(if_match) = &pre.if_match {
@@ -499,14 +497,13 @@ impl ObjectOptions {
return Err(Error::PreconditionFailed);
}
}
if has_valid_mod_time && pre.if_match.is_none() {
if let Some(if_unmodified_since) = &pre.if_unmodified_since {
if let Some(mod_time) = &obj_info.mod_time {
if is_modified_since(mod_time, if_unmodified_since) {
return Err(Error::PreconditionFailed);
}
}
}
if has_valid_mod_time
&& pre.if_match.is_none()
&& let Some(if_unmodified_since) = &pre.if_unmodified_since
&& let Some(mod_time) = &obj_info.mod_time
&& is_modified_since(mod_time, if_unmodified_since)
{
return Err(Error::PreconditionFailed);
}
}
@@ -698,12 +695,12 @@ impl ObjectInfo {
}
if self.is_compressed() {
if let Some(size_str) = self.user_defined.get(&format!("{RESERVED_METADATA_PREFIX_LOWER}actual-size")) {
if !size_str.is_empty() {
// Todo: deal with error
let size = size_str.parse::<i64>().map_err(|e| std::io::Error::other(e.to_string()))?;
return Ok(size);
}
if let Some(size_str) = self.user_defined.get(&format!("{RESERVED_METADATA_PREFIX_LOWER}actual-size"))
&& !size_str.is_empty()
{
// Todo: deal with error
let size = size_str.parse::<i64>().map_err(|e| std::io::Error::other(e.to_string()))?;
return Ok(size);
}
let mut actual_size = 0;
self.parts.iter().for_each(|part| {
@@ -881,32 +878,31 @@ impl ObjectInfo {
continue;
}
if entry.is_dir() {
if let Some(delimiter) = &delimiter {
if let Some(idx) = {
let remaining = if entry.name.starts_with(prefix) {
&entry.name[prefix.len()..]
} else {
entry.name.as_str()
};
remaining.find(delimiter.as_str())
} {
let idx = prefix.len() + idx + delimiter.len();
if let Some(curr_prefix) = entry.name.get(0..idx) {
if curr_prefix == prev_prefix {
continue;
}
prev_prefix = curr_prefix;
objects.push(ObjectInfo {
is_dir: true,
bucket: bucket.to_owned(),
name: curr_prefix.to_owned(),
..Default::default()
});
}
if entry.is_dir()
&& let Some(delimiter) = &delimiter
&& let Some(idx) = {
let remaining = if entry.name.starts_with(prefix) {
&entry.name[prefix.len()..]
} else {
entry.name.as_str()
};
remaining.find(delimiter.as_str())
}
{
let idx = prefix.len() + idx + delimiter.len();
if let Some(curr_prefix) = entry.name.get(0..idx) {
if curr_prefix == prev_prefix {
continue;
}
prev_prefix = curr_prefix;
objects.push(ObjectInfo {
is_dir: true,
bucket: bucket.to_owned(),
name: curr_prefix.to_owned(),
..Default::default()
});
}
}
}
@@ -966,32 +962,31 @@ impl ObjectInfo {
continue;
}
if entry.is_dir() {
if let Some(delimiter) = &delimiter {
if let Some(idx) = {
let remaining = if entry.name.starts_with(prefix) {
&entry.name[prefix.len()..]
} else {
entry.name.as_str()
};
remaining.find(delimiter.as_str())
} {
let idx = prefix.len() + idx + delimiter.len();
if let Some(curr_prefix) = entry.name.get(0..idx) {
if curr_prefix == prev_prefix {
continue;
}
prev_prefix = curr_prefix;
objects.push(ObjectInfo {
is_dir: true,
bucket: bucket.to_owned(),
name: curr_prefix.to_owned(),
..Default::default()
});
}
if entry.is_dir()
&& let Some(delimiter) = &delimiter
&& let Some(idx) = {
let remaining = if entry.name.starts_with(prefix) {
&entry.name[prefix.len()..]
} else {
entry.name.as_str()
};
remaining.find(delimiter.as_str())
}
{
let idx = prefix.len() + idx + delimiter.len();
if let Some(curr_prefix) = entry.name.get(0..idx) {
if curr_prefix == prev_prefix {
continue;
}
prev_prefix = curr_prefix;
objects.push(ObjectInfo {
is_dir: true,
bucket: bucket.to_owned(),
name: curr_prefix.to_owned(),
..Default::default()
});
}
}
}
@@ -1026,10 +1021,10 @@ impl ObjectInfo {
}
pub fn decrypt_checksums(&self, part: usize, _headers: &HeaderMap) -> Result<(HashMap<String, String>, bool)> {
if part > 0 {
if let Some(checksums) = self.parts.iter().find(|p| p.number == part).and_then(|p| p.checksums.clone()) {
return Ok((checksums, true));
}
if part > 0
&& let Some(checksums) = self.parts.iter().find(|p| p.number == part).and_then(|p| p.checksums.clone())
{
return Ok((checksums, true));
}
// TODO: decrypt checksums

View File

@@ -302,10 +302,10 @@ impl ECStore {
..Default::default()
});
if let Some(err) = list_result.err.clone() {
if err != rustfs_filemeta::Error::Unexpected {
return Err(to_object_err(err.into(), vec![bucket, prefix]));
}
if let Some(err) = list_result.err.clone()
&& err != rustfs_filemeta::Error::Unexpected
{
return Err(to_object_err(err.into(), vec![bucket, prefix]));
}
if let Some(result) = list_result.entries.as_mut() {
@@ -418,10 +418,10 @@ impl ECStore {
},
};
if let Some(err) = list_result.err.clone() {
if err != rustfs_filemeta::Error::Unexpected {
return Err(to_object_err(err.into(), vec![bucket, prefix]));
}
if let Some(err) = list_result.err.clone()
&& err != rustfs_filemeta::Error::Unexpected
{
return Err(to_object_err(err.into(), vec![bucket, prefix]));
}
if let Some(result) = list_result.entries.as_mut() {
@@ -509,10 +509,11 @@ impl ECStore {
let mut o = o.clone();
o.marker = o.marker.filter(|v| v >= &o.prefix);
if let Some(marker) = &o.marker {
if !o.prefix.is_empty() && !marker.starts_with(&o.prefix) {
return Err(Error::Unexpected);
}
if let Some(marker) = &o.marker
&& !o.prefix.is_empty()
&& !marker.starts_with(&o.prefix)
{
return Err(Error::Unexpected);
}
if o.limit == 0 {
@@ -817,10 +818,10 @@ impl ECStore {
let value = tx2.clone();
let resolver = resolver.clone();
async move {
if let Some(entry) = entries.resolve(resolver) {
if let Err(err) = value.send(entry).await {
error!("list_path send fail {:?}", err);
}
if let Some(entry) = entries.resolve(resolver)
&& let Err(err) = value.send(entry).await
{
error!("list_path send fail {:?}", err);
}
}
})
@@ -986,20 +987,21 @@ async fn gather_results(
continue;
}
if let Some(marker) = &opts.marker {
if &entry.name < marker {
continue;
}
if let Some(marker) = &opts.marker
&& &entry.name < marker
{
continue;
}
if !entry.name.starts_with(&opts.prefix) {
continue;
}
if let Some(separator) = &opts.separator {
if !opts.recursive && !entry.is_in_dir(&opts.prefix, separator) {
continue;
}
if let Some(separator) = &opts.separator
&& !opts.recursive
&& !entry.is_in_dir(&opts.prefix, separator)
{
continue;
}
if !opts.incl_deleted && entry.is_object() && entry.is_latest_delete_marker() && !entry.is_object_dir() {
@@ -1200,16 +1202,16 @@ async fn merge_entry_channels(
}
}
if let Some(xl) = has_xl.as_mut() {
if !versions.is_empty() {
xl.versions = merge_file_meta_versions(read_quorum, true, 0, &versions);
if let Some(xl) = has_xl.as_mut()
&& !versions.is_empty()
{
xl.versions = merge_file_meta_versions(read_quorum, true, 0, &versions);
if let Ok(meta) = xl.marshal_msg() {
if let Some(b) = best.as_mut() {
b.metadata = meta;
b.cached = Some(xl.clone());
}
}
if let Ok(meta) = xl.marshal_msg()
&& let Some(b) = best.as_mut()
{
b.metadata = meta;
b.cached = Some(xl.clone());
}
}
}
@@ -1217,11 +1219,11 @@ async fn merge_entry_channels(
to_merge.clear();
}
if let Some(best_entry) = &best {
if best_entry.name > last {
out_channel.send(best_entry.clone()).await.map_err(Error::other)?;
last = best_entry.name.clone();
}
if let Some(best_entry) = &best
&& best_entry.name > last
{
out_channel.send(best_entry.clone()).await.map_err(Error::other)?;
last = best_entry.name.clone();
}
select_from(&mut in_channels, best_idx, &mut top, &mut n_done).await?;
@@ -1307,10 +1309,10 @@ impl SetDisks {
let value = tx2.clone();
let resolver = resolver.clone();
async move {
if let Some(entry) = entries.resolve(resolver) {
if let Err(err) = value.send(entry).await {
error!("list_path send fail {:?}", err);
}
if let Some(entry) = entries.resolve(resolver)
&& let Err(err) = value.send(entry).await
{
error!("list_path send fail {:?}", err);
}
}
})