chore: upgrade dependencies and migrate to aws-lc-rs (#1333)

This commit is contained in:
houseme
2026-01-02 00:02:34 +08:00
committed by GitHub
parent 61b3100260
commit 8d7cd4cb1b
96 changed files with 2555 additions and 2775 deletions

374
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -50,7 +50,7 @@ resolver = "2"
edition = "2024"
license = "Apache-2.0"
repository = "https://github.com/rustfs/rustfs"
rust-version = "1.85"
rust-version = "1.88"
version = "0.0.5"
homepage = "https://rustfs.com"
description = "RustFS is a high-performance distributed object storage software built using Rust, one of the most popular languages worldwide. "
@@ -100,21 +100,21 @@ async-compression = { version = "0.4.19" }
async-recursion = "1.1.1"
async-trait = "0.1.89"
axum = "0.8.8"
axum-server = { version = "0.8.0", features = ["tls-rustls-no-provider"], default-features = false }
axum-server = { version = "0.8.0", features = ["tls-rustls"], default-features = false }
futures = "0.3.31"
futures-core = "0.3.31"
futures-util = "0.3.31"
pollster = "0.4.0"
hyper = { version = "1.8.1", features = ["http2", "http1", "server"] }
hyper-rustls = { version = "0.27.7", default-features = false, features = ["native-tokio", "http1", "tls12", "logging", "http2", "ring", "webpki-roots"] }
hyper-rustls = { version = "0.27.7", default-features = false, features = ["native-tokio", "http1", "tls12", "logging", "http2", "aws-lc-rs", "webpki-roots"] }
hyper-util = { version = "0.1.19", features = ["tokio", "server-auto", "server-graceful"] }
http = "1.4.0"
http-body = "1.0.1"
http-body-util = "0.1.3"
reqwest = { version = "0.12.28", default-features = false, features = ["rustls-tls-webpki-roots", "charset", "http2", "system-proxy", "stream", "json", "blocking"] }
reqwest = { version = "0.12.28", default-features = false, features = ["rustls-tls-no-provider", "charset", "http2", "system-proxy", "stream", "json", "blocking"] }
socket2 = "0.6.1"
tokio = { version = "1.48.0", features = ["fs", "rt-multi-thread"] }
tokio-rustls = { version = "0.26.4", default-features = false, features = ["logging", "tls12", "ring"] }
tokio-rustls = { version = "0.26.4", default-features = false, features = ["logging", "tls12", "aws-lc-rs"] }
tokio-stream = { version = "0.1.17" }
tokio-test = "0.4.4"
tokio-util = { version = "0.7.17", features = ["io", "compat"] }
@@ -150,7 +150,7 @@ hmac = { version = "0.13.0-rc.3" }
jsonwebtoken = { version = "10.2.0", features = ["rust_crypto"] }
pbkdf2 = "0.13.0-rc.5"
rsa = { version = "0.10.0-rc.10" }
rustls = { version = "0.23.35", features = ["ring", "logging", "std", "tls12"], default-features = false }
rustls = { version = "0.23.35" }
rustls-pemfile = "2.2.0"
rustls-pki-types = "1.13.2"
sha1 = "0.11.0-rc.3"
@@ -171,7 +171,7 @@ atoi = "2.0.0"
atomic_enum = "0.3.0"
aws-config = { version = "1.8.12" }
aws-credential-types = { version = "1.2.11" }
aws-sdk-s3 = { version = "1.119.0", default-features = false, features = ["sigv4a", "rustls", "rt-tokio"] }
aws-sdk-s3 = { version = "1.119.0", default-features = false, features = ["sigv4a", "default-https-client", "rt-tokio"] }
aws-smithy-types = { version = "1.3.5" }
base64 = "0.22.1"
base64-simd = "0.8.0"

View File

@@ -348,7 +348,7 @@ impl ErasureSetHealer {
}
// save checkpoint periodically
if global_obj_idx % 100 == 0 {
if global_obj_idx.is_multiple_of(100) {
checkpoint_manager
.update_position(bucket_index, *current_object_index)
.await?;

View File

@@ -492,12 +492,11 @@ impl HealManager {
for (_, disk_opt) in GLOBAL_LOCAL_DISK_MAP.read().await.iter() {
if let Some(disk) = disk_opt {
// detect unformatted disk via get_disk_id()
if let Err(err) = disk.get_disk_id().await {
if err == DiskError::UnformattedDisk {
if let Err(err) = disk.get_disk_id().await
&& err == DiskError::UnformattedDisk {
endpoints.push(disk.endpoint());
continue;
}
}
}
}

View File

@@ -541,10 +541,10 @@ impl ResumeUtils {
for entry in entries {
if entry.ends_with(&format!("_{RESUME_STATE_FILE}")) {
// Extract task ID from filename: {task_id}_ahm_resume_state.json
if let Some(task_id) = entry.strip_suffix(&format!("_{RESUME_STATE_FILE}")) {
if !task_id.is_empty() {
task_ids.push(task_id.to_string());
}
if let Some(task_id) = entry.strip_suffix(&format!("_{RESUME_STATE_FILE}"))
&& !task_id.is_empty()
{
task_ids.push(task_id.to_string());
}
}
}

View File

@@ -83,10 +83,10 @@ pub struct CheckpointManager {
impl CheckpointManager {
pub fn new(node_id: &str, data_dir: &Path) -> Self {
if !data_dir.exists() {
if let Err(e) = std::fs::create_dir_all(data_dir) {
error!("create data dir failed {:?}: {}", data_dir, e);
}
if !data_dir.exists()
&& let Err(e) = std::fs::create_dir_all(data_dir)
{
error!("create data dir failed {:?}: {}", data_dir, e);
}
let checkpoint_file = data_dir.join(format!("scanner_checkpoint_{node_id}.json"));

View File

@@ -401,10 +401,10 @@ impl Scanner {
let mut latest_update: Option<SystemTime> = None;
for snapshot in &outcome.snapshots {
if let Some(update) = snapshot.last_update {
if latest_update.is_none_or(|current| update > current) {
latest_update = Some(update);
}
if let Some(update) = snapshot.last_update
&& latest_update.is_none_or(|current| update > current)
{
latest_update = Some(update);
}
aggregated.objects_total_count = aggregated.objects_total_count.saturating_add(snapshot.objects_total_count);
@@ -527,28 +527,20 @@ impl Scanner {
let (disks, _) = set_disks.get_online_disks_with_healing(false).await;
if let Some(disk) = disks.first() {
let bucket_path = disk.path().join(bucket_name);
if bucket_path.exists() {
if let Ok(entries) = std::fs::read_dir(&bucket_path) {
for entry in entries.flatten() {
if let Ok(file_type) = entry.file_type() {
if file_type.is_dir() {
if let Some(object_name) = entry.file_name().to_str() {
if !object_name.starts_with('.') {
debug!("Deep scanning object: {}/{}", bucket_name, object_name);
if let Err(e) = self.verify_object_integrity(bucket_name, object_name).await {
warn!(
"Object integrity verification failed for {}/{}: {}",
bucket_name, object_name, e
);
} else {
debug!(
"Object integrity verification passed for {}/{}",
bucket_name, object_name
);
}
}
}
}
if bucket_path.exists()
&& let Ok(entries) = std::fs::read_dir(&bucket_path)
{
for entry in entries.flatten() {
if let Ok(file_type) = entry.file_type()
&& file_type.is_dir()
&& let Some(object_name) = entry.file_name().to_str()
&& !object_name.starts_with('.')
{
debug!("Deep scanning object: {}/{}", bucket_name, object_name);
if let Err(e) = self.verify_object_integrity(bucket_name, object_name).await {
warn!("Object integrity verification failed for {}/{}: {}", bucket_name, object_name, e);
} else {
debug!("Object integrity verification passed for {}/{}", bucket_name, object_name);
}
}
}
@@ -859,10 +851,10 @@ impl Scanner {
// Phase 2: Minimal EC verification for critical objects only
// Note: The main scanning is now handled by NodeScanner in the background
if let Some(ecstore) = rustfs_ecstore::new_object_layer_fn() {
if let Err(e) = self.minimal_ec_verification(&ecstore).await {
error!("Minimal EC verification failed: {}", e);
}
if let Some(ecstore) = rustfs_ecstore::new_object_layer_fn()
&& let Err(e) = self.minimal_ec_verification(&ecstore).await
{
error!("Minimal EC verification failed: {}", e);
}
// Update scan duration
@@ -950,13 +942,12 @@ impl Scanner {
}
// If there is still no data, try backend before persisting zeros
if data_usage.buckets_usage.is_empty() {
if let Ok(existing) = rustfs_ecstore::data_usage::load_data_usage_from_backend(ecstore.clone()).await {
if !existing.buckets_usage.is_empty() {
info!("Using existing backend data usage during fallback backoff");
data_usage = existing;
}
}
if data_usage.buckets_usage.is_empty()
&& let Ok(existing) = rustfs_ecstore::data_usage::load_data_usage_from_backend(ecstore.clone()).await
&& !existing.buckets_usage.is_empty()
{
info!("Using existing backend data usage during fallback backoff");
data_usage = existing;
}
// Avoid overwriting valid backend stats with zeros when fallback is throttled
@@ -1721,36 +1712,34 @@ impl Scanner {
// check disk status, if offline, submit erasure set heal task
if !metrics.is_online {
let enable_healing = self.config.read().await.enable_healing;
if enable_healing {
if let Some(heal_manager) = &self.heal_manager {
// Get bucket list for erasure set healing
let buckets = match rustfs_ecstore::new_object_layer_fn() {
Some(ecstore) => match ecstore.list_bucket(&ecstore::store_api::BucketOptions::default()).await {
Ok(buckets) => buckets.iter().map(|b| b.name.clone()).collect::<Vec<String>>(),
Err(e) => {
error!("Failed to get bucket list for disk healing: {}", e);
return Err(Error::Storage(e));
}
},
None => {
error!("No ECStore available for getting bucket list");
return Err(Error::Storage(ecstore::error::StorageError::other("No ECStore available")));
}
};
let set_disk_id = format!("pool_{}_set_{}", disk.endpoint().pool_idx, disk.endpoint().set_idx);
let req = HealRequest::new(
crate::heal::task::HealType::ErasureSet { buckets, set_disk_id },
crate::heal::task::HealOptions::default(),
crate::heal::task::HealPriority::High,
);
match heal_manager.submit_heal_request(req).await {
Ok(task_id) => {
warn!("disk offline, submit erasure set heal task: {} {}", task_id, disk_path);
}
if enable_healing && let Some(heal_manager) = &self.heal_manager {
// Get bucket list for erasure set healing
let buckets = match rustfs_ecstore::new_object_layer_fn() {
Some(ecstore) => match ecstore.list_bucket(&ecstore::store_api::BucketOptions::default()).await {
Ok(buckets) => buckets.iter().map(|b| b.name.clone()).collect::<Vec<String>>(),
Err(e) => {
error!("disk offline, submit erasure set heal task failed: {} {}", disk_path, e);
error!("Failed to get bucket list for disk healing: {}", e);
return Err(Error::Storage(e));
}
},
None => {
error!("No ECStore available for getting bucket list");
return Err(Error::Storage(ecstore::error::StorageError::other("No ECStore available")));
}
};
let set_disk_id = format!("pool_{}_set_{}", disk.endpoint().pool_idx, disk.endpoint().set_idx);
let req = HealRequest::new(
crate::heal::task::HealType::ErasureSet { buckets, set_disk_id },
crate::heal::task::HealOptions::default(),
crate::heal::task::HealPriority::High,
);
match heal_manager.submit_heal_request(req).await {
Ok(task_id) => {
warn!("disk offline, submit erasure set heal task: {} {}", task_id, disk_path);
}
Err(e) => {
error!("disk offline, submit erasure set heal task failed: {} {}", disk_path, e);
}
}
}
@@ -1778,36 +1767,34 @@ impl Scanner {
// disk access failed, submit erasure set heal task
let enable_healing = self.config.read().await.enable_healing;
if enable_healing {
if let Some(heal_manager) = &self.heal_manager {
// Get bucket list for erasure set healing
let buckets = match rustfs_ecstore::new_object_layer_fn() {
Some(ecstore) => match ecstore.list_bucket(&ecstore::store_api::BucketOptions::default()).await {
Ok(buckets) => buckets.iter().map(|b| b.name.clone()).collect::<Vec<String>>(),
Err(e) => {
error!("Failed to get bucket list for disk healing: {}", e);
return Err(Error::Storage(e));
}
},
None => {
error!("No ECStore available for getting bucket list");
return Err(Error::Storage(ecstore::error::StorageError::other("No ECStore available")));
if enable_healing && let Some(heal_manager) = &self.heal_manager {
// Get bucket list for erasure set healing
let buckets = match rustfs_ecstore::new_object_layer_fn() {
Some(ecstore) => match ecstore.list_bucket(&ecstore::store_api::BucketOptions::default()).await {
Ok(buckets) => buckets.iter().map(|b| b.name.clone()).collect::<Vec<String>>(),
Err(e) => {
error!("Failed to get bucket list for disk healing: {}", e);
return Err(Error::Storage(e));
}
};
},
None => {
error!("No ECStore available for getting bucket list");
return Err(Error::Storage(ecstore::error::StorageError::other("No ECStore available")));
}
};
let set_disk_id = format!("pool_{}_set_{}", disk.endpoint().pool_idx, disk.endpoint().set_idx);
let req = HealRequest::new(
crate::heal::task::HealType::ErasureSet { buckets, set_disk_id },
crate::heal::task::HealOptions::default(),
crate::heal::task::HealPriority::Urgent,
);
match heal_manager.submit_heal_request(req).await {
Ok(task_id) => {
warn!("disk access failed, submit erasure set heal task: {} {}", task_id, disk_path);
}
Err(heal_err) => {
error!("disk access failed, submit erasure set heal task failed: {} {}", disk_path, heal_err);
}
let set_disk_id = format!("pool_{}_set_{}", disk.endpoint().pool_idx, disk.endpoint().set_idx);
let req = HealRequest::new(
crate::heal::task::HealType::ErasureSet { buckets, set_disk_id },
crate::heal::task::HealOptions::default(),
crate::heal::task::HealPriority::Urgent,
);
match heal_manager.submit_heal_request(req).await {
Ok(task_id) => {
warn!("disk access failed, submit erasure set heal task: {} {}", task_id, disk_path);
}
Err(heal_err) => {
error!("disk access failed, submit erasure set heal task failed: {} {}", disk_path, heal_err);
}
}
}
@@ -1820,11 +1807,11 @@ impl Scanner {
let mut disk_objects = HashMap::new();
for volume in volumes {
// check cancel token
if let Some(cancel_token) = get_ahm_services_cancel_token() {
if cancel_token.is_cancelled() {
info!("Cancellation requested, stopping disk scan");
break;
}
if let Some(cancel_token) = get_ahm_services_cancel_token()
&& cancel_token.is_cancelled()
{
info!("Cancellation requested, stopping disk scan");
break;
}
match self.scan_volume(disk, &volume.name).await {
@@ -1955,104 +1942,96 @@ impl Scanner {
// object metadata damaged, submit metadata heal task
let enable_healing = self.config.read().await.enable_healing;
if enable_healing {
if let Some(heal_manager) = &self.heal_manager {
let req = HealRequest::metadata(bucket.to_string(), entry.name.clone());
match heal_manager.submit_heal_request(req).await {
Ok(task_id) => {
warn!(
"object metadata damaged, submit heal task: {} {} / {}",
task_id, bucket, entry.name
);
}
Err(e) => {
error!(
"object metadata damaged, submit heal task failed: {} / {} {}",
bucket, entry.name, e
);
}
if enable_healing && let Some(heal_manager) = &self.heal_manager {
let req = HealRequest::metadata(bucket.to_string(), entry.name.clone());
match heal_manager.submit_heal_request(req).await {
Ok(task_id) => {
warn!("object metadata damaged, submit heal task: {} {} / {}", task_id, bucket, entry.name);
}
Err(e) => {
error!("object metadata damaged, submit heal task failed: {} / {} {}", bucket, entry.name, e);
}
}
}
} else {
// Apply lifecycle actions
if let Some(lifecycle_config) = &lifecycle_config {
if disk.is_local() {
let vcfg = BucketVersioningSys::get(bucket).await.ok();
if let Some(lifecycle_config) = &lifecycle_config
&& disk.is_local()
{
let vcfg = BucketVersioningSys::get(bucket).await.ok();
let mut scanner_item = ScannerItem {
bucket: bucket.to_string(),
object_name: entry.name.clone(),
lifecycle: Some(lifecycle_config.clone()),
versioning: versioning_config.clone(),
};
//ScannerItem::new(bucket.to_string(), Some(lifecycle_config.clone()), versioning_config.clone());
let fivs = match entry.clone().file_info_versions(&scanner_item.bucket) {
Ok(fivs) => fivs,
Err(_err) => {
stop_fn();
return Err(Error::other("skip this file"));
}
};
let mut size_s = SizeSummary::default();
let obj_infos = match scanner_item.apply_versions_actions(&fivs.versions).await {
Ok(obj_infos) => obj_infos,
Err(_err) => {
stop_fn();
return Err(Error::other("skip this file"));
}
};
let mut scanner_item = ScannerItem {
bucket: bucket.to_string(),
object_name: entry.name.clone(),
lifecycle: Some(lifecycle_config.clone()),
versioning: versioning_config.clone(),
};
//ScannerItem::new(bucket.to_string(), Some(lifecycle_config.clone()), versioning_config.clone());
let fivs = match entry.clone().file_info_versions(&scanner_item.bucket) {
Ok(fivs) => fivs,
Err(_err) => {
stop_fn();
return Err(Error::other("skip this file"));
}
};
let mut size_s = SizeSummary::default();
let obj_infos = match scanner_item.apply_versions_actions(&fivs.versions).await {
Ok(obj_infos) => obj_infos,
Err(_err) => {
stop_fn();
return Err(Error::other("skip this file"));
}
};
let versioned = if let Some(vcfg) = vcfg.as_ref() {
vcfg.versioned(&scanner_item.object_name)
} else {
false
};
let versioned = if let Some(vcfg) = vcfg.as_ref() {
vcfg.versioned(&scanner_item.object_name)
} else {
false
};
#[allow(unused_assignments)]
let mut obj_deleted = false;
for info in obj_infos.iter() {
let sz: i64;
(obj_deleted, sz) = scanner_item.apply_actions(info, &mut size_s).await;
#[allow(unused_assignments)]
let mut obj_deleted = false;
for info in obj_infos.iter() {
let sz: i64;
(obj_deleted, sz) = scanner_item.apply_actions(info, &mut size_s).await;
if obj_deleted {
break;
}
let actual_sz = match info.get_actual_size() {
Ok(size) => size,
Err(_) => continue,
};
if info.delete_marker {
size_s.delete_markers += 1;
}
if info.version_id.is_some() && sz == actual_sz {
size_s.versions += 1;
}
size_s.total_size += sz as usize;
if info.delete_marker {
continue;
}
if obj_deleted {
break;
}
for free_version in fivs.free_versions.iter() {
let _obj_info = rustfs_ecstore::store_api::ObjectInfo::from_file_info(
free_version,
&scanner_item.bucket,
&scanner_item.object_name,
versioned,
);
let actual_sz = match info.get_actual_size() {
Ok(size) => size,
Err(_) => continue,
};
if info.delete_marker {
size_s.delete_markers += 1;
}
// todo: global trace
/*if obj_deleted {
return Err(Error::other(ERR_IGNORE_FILE_CONTRIB).into());
}*/
if info.version_id.is_some() && sz == actual_sz {
size_s.versions += 1;
}
size_s.total_size += sz as usize;
if info.delete_marker {
continue;
}
}
for free_version in fivs.free_versions.iter() {
let _obj_info = rustfs_ecstore::store_api::ObjectInfo::from_file_info(
free_version,
&scanner_item.bucket,
&scanner_item.object_name,
versioned,
);
}
// todo: global trace
/*if obj_deleted {
return Err(Error::other(ERR_IGNORE_FILE_CONTRIB).into());
}*/
}
// Store object metadata for later analysis
@@ -2064,22 +2043,17 @@ impl Scanner {
// object metadata parse failed, submit metadata heal task
let enable_healing = self.config.read().await.enable_healing;
if enable_healing {
if let Some(heal_manager) = &self.heal_manager {
let req = HealRequest::metadata(bucket.to_string(), entry.name.clone());
match heal_manager.submit_heal_request(req).await {
Ok(task_id) => {
warn!(
"object metadata parse failed, submit heal task: {} {} / {}",
task_id, bucket, entry.name
);
}
Err(e) => {
error!(
"object metadata parse failed, submit heal task failed: {} / {} {}",
bucket, entry.name, e
);
}
if enable_healing && let Some(heal_manager) = &self.heal_manager {
let req = HealRequest::metadata(bucket.to_string(), entry.name.clone());
match heal_manager.submit_heal_request(req).await {
Ok(task_id) => {
warn!("object metadata parse failed, submit heal task: {} {} / {}", task_id, bucket, entry.name);
}
Err(e) => {
error!(
"object metadata parse failed, submit heal task failed: {} / {} {}",
bucket, entry.name, e
);
}
}
}
@@ -2190,17 +2164,14 @@ impl Scanner {
// the delete marker, but we keep it conservative here.
let mut has_latest_delete_marker = false;
for &disk_idx in locations {
if let Some(bucket_map) = all_disk_objects.get(disk_idx) {
if let Some(file_map) = bucket_map.get(bucket) {
if let Some(fm) = file_map.get(object_name) {
if let Some(first_ver) = fm.versions.first() {
if first_ver.header.version_type == VersionType::Delete {
has_latest_delete_marker = true;
break;
}
}
}
}
if let Some(bucket_map) = all_disk_objects.get(disk_idx)
&& let Some(file_map) = bucket_map.get(bucket)
&& let Some(fm) = file_map.get(object_name)
&& let Some(first_ver) = fm.versions.first()
&& first_ver.header.version_type == VersionType::Delete
{
has_latest_delete_marker = true;
break;
}
}
if has_latest_delete_marker {
@@ -2248,28 +2219,26 @@ impl Scanner {
// submit heal task
let enable_healing = self.config.read().await.enable_healing;
if enable_healing {
if let Some(heal_manager) = &self.heal_manager {
use crate::heal::{HealPriority, HealRequest};
let req = HealRequest::new(
crate::heal::HealType::Object {
bucket: bucket.clone(),
object: object_name.clone(),
version_id: None,
},
crate::heal::HealOptions::default(),
HealPriority::High,
);
match heal_manager.submit_heal_request(req).await {
Ok(task_id) => {
warn!(
"object missing, submit heal task: {} {} / {} (missing disks: {:?})",
task_id, bucket, object_name, missing_disks
);
}
Err(e) => {
error!("object missing, submit heal task failed: {} / {} {}", bucket, object_name, e);
}
if enable_healing && let Some(heal_manager) = &self.heal_manager {
use crate::heal::{HealPriority, HealRequest};
let req = HealRequest::new(
crate::heal::HealType::Object {
bucket: bucket.clone(),
object: object_name.clone(),
version_id: None,
},
crate::heal::HealOptions::default(),
HealPriority::High,
);
match heal_manager.submit_heal_request(req).await {
Ok(task_id) => {
warn!(
"object missing, submit heal task: {} {} / {} (missing disks: {:?})",
task_id, bucket, object_name, missing_disks
);
}
Err(e) => {
error!("object missing, submit heal task failed: {} / {} {}", bucket, object_name, e);
}
}
}
@@ -2277,11 +2246,11 @@ impl Scanner {
// Step 3: Deep scan EC verification
let config = self.config.read().await;
if config.scan_mode == ScanMode::Deep {
if let Err(e) = self.verify_object_integrity(bucket, object_name).await {
objects_with_ec_issues += 1;
warn!("Object integrity verification failed for object {}/{}: {}", bucket, object_name, e);
}
if config.scan_mode == ScanMode::Deep
&& let Err(e) = self.verify_object_integrity(bucket, object_name).await
{
objects_with_ec_issues += 1;
warn!("Object integrity verification failed for object {}/{}: {}", bucket, object_name, e);
}
}
}
@@ -2293,10 +2262,10 @@ impl Scanner {
// Step 4: Collect data usage statistics if enabled
let config = self.config.read().await;
if config.enable_data_usage_stats {
if let Err(e) = self.collect_data_usage_statistics(all_disk_objects).await {
error!("Failed to collect data usage statistics: {}", e);
}
if config.enable_data_usage_stats
&& let Err(e) = self.collect_data_usage_statistics(all_disk_objects).await
{
error!("Failed to collect data usage statistics: {}", e);
}
drop(config);
@@ -2526,11 +2495,11 @@ impl Scanner {
info!("Starting legacy scan loop for backward compatibility");
loop {
if let Some(token) = get_ahm_services_cancel_token() {
if token.is_cancelled() {
info!("Cancellation requested, exiting legacy scan loop");
break;
}
if let Some(token) = get_ahm_services_cancel_token()
&& token.is_cancelled()
{
info!("Cancellation requested, exiting legacy scan loop");
break;
}
let (enable_data_usage_stats, scan_interval) = {
@@ -2538,10 +2507,8 @@ impl Scanner {
(config.enable_data_usage_stats, config.scan_interval)
};
if enable_data_usage_stats {
if let Err(e) = self.collect_and_persist_data_usage().await {
warn!("Background data usage collection failed: {}", e);
}
if enable_data_usage_stats && let Err(e) = self.collect_and_persist_data_usage().await {
warn!("Background data usage collection failed: {}", e);
}
// Update local stats in aggregator after latest scan
@@ -2656,10 +2623,10 @@ mod tests {
// create temp dir as 4 disks
let test_base_dir = test_dir.unwrap_or("/tmp/rustfs_ahm_test");
let temp_dir = std::path::PathBuf::from(test_base_dir);
if temp_dir.exists() {
if let Err(e) = fs::remove_dir_all(&temp_dir) {
panic!("Failed to remove test directory: {e}");
}
if temp_dir.exists()
&& let Err(e) = fs::remove_dir_all(&temp_dir)
{
panic!("Failed to remove test directory: {e}");
}
if let Err(e) = fs::create_dir_all(&temp_dir) {
panic!("Failed to create test directory: {e}");

View File

@@ -305,10 +305,10 @@ fn compute_object_usage(bucket: &str, object: &str, file_meta: &FileMeta) -> Res
has_live_object = true;
versions_count = versions_count.saturating_add(1);
if latest_file_info.is_none() {
if let Ok(info) = file_meta.into_fileinfo(bucket, object, "", false, false) {
latest_file_info = Some(info);
}
if latest_file_info.is_none()
&& let Ok(info) = file_meta.into_fileinfo(bucket, object, "", false, false)
{
latest_file_info = Some(info);
}
}
}

View File

@@ -112,10 +112,10 @@ impl LocalStatsManager {
/// create new local stats manager
pub fn new(node_id: &str, data_dir: &Path) -> Self {
// ensure data directory exists
if !data_dir.exists() {
if let Err(e) = std::fs::create_dir_all(data_dir) {
error!("create stats data directory failed {:?}: {}", data_dir, e);
}
if !data_dir.exists()
&& let Err(e) = std::fs::create_dir_all(data_dir)
{
error!("create stats data directory failed {:?}: {}", data_dir, e);
}
let stats_file = data_dir.join(format!("scanner_stats_{node_id}.json"));

View File

@@ -436,10 +436,10 @@ impl NodeScanner {
/// create a new node scanner
pub fn new(node_id: String, config: NodeScannerConfig) -> Self {
// Ensure data directory exists
if !config.data_dir.exists() {
if let Err(e) = std::fs::create_dir_all(&config.data_dir) {
error!("create data directory failed {:?}: {}", config.data_dir, e);
}
if !config.data_dir.exists()
&& let Err(e) = std::fs::create_dir_all(&config.data_dir)
{
error!("create data directory failed {:?}: {}", config.data_dir, e);
}
let stats_manager = Arc::new(LocalStatsManager::new(&node_id, &config.data_dir));

View File

@@ -327,16 +327,16 @@ impl DecentralizedStatsAggregator {
);
// Check cache validity if timestamp is not initial value (UNIX_EPOCH)
if cache_timestamp != SystemTime::UNIX_EPOCH {
if let Ok(elapsed) = now.duration_since(cache_timestamp) {
if elapsed < cache_ttl {
if let Some(cached) = self.cached_stats.read().await.as_ref() {
debug!("Returning cached aggregated stats, remaining TTL: {:?}", cache_ttl - elapsed);
return Ok(cached.clone());
}
} else {
debug!("Cache expired: elapsed={:?} >= ttl={:?}", elapsed, cache_ttl);
if cache_timestamp != SystemTime::UNIX_EPOCH
&& let Ok(elapsed) = now.duration_since(cache_timestamp)
{
if elapsed < cache_ttl {
if let Some(cached) = self.cached_stats.read().await.as_ref() {
debug!("Returning cached aggregated stats, remaining TTL: {:?}", cache_ttl - elapsed);
return Ok(cached.clone());
}
} else {
debug!("Cache expired: elapsed={:?} >= ttl={:?}", elapsed, cache_ttl);
}
}

View File

@@ -421,86 +421,86 @@ mod serial_tests {
}
};
if let Some(lmdb_env) = GLOBAL_LMDB_ENV.get() {
if let Some(lmdb) = GLOBAL_LMDB_DB.get() {
let mut wtxn = lmdb_env.write_txn().unwrap();
if let Some(lmdb_env) = GLOBAL_LMDB_ENV.get()
&& let Some(lmdb) = GLOBAL_LMDB_DB.get()
{
let mut wtxn = lmdb_env.write_txn().unwrap();
/*if let Ok((lc_config, _)) = rustfs_ecstore::bucket::metadata_sys::get_lifecycle_config(bucket_name.as_str()).await {
if let Ok(object_info) = ecstore
.get_object_info(bucket_name.as_str(), object_name, &rustfs_ecstore::store_api::ObjectOptions::default())
.await
{
let event = rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::eval_action_from_lifecycle(
&lc_config,
None,
None,
&object_info,
)
.await;
rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::apply_expiry_on_non_transitioned_objects(
ecstore.clone(),
&object_info,
&event,
&rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_audit::LcEventSrc::Scanner,
)
.await;
expired = wait_for_object_absence(&ecstore, bucket_name.as_str(), object_name, Duration::from_secs(2)).await;
}
}*/
for record in records {
if !record.usage.has_live_object {
continue;
}
let object_info = convert_record_to_object_info(record);
println!("object_info2: {object_info:?}");
let mod_time = object_info.mod_time.unwrap_or(OffsetDateTime::now_utc());
let expiry_time = rustfs_ecstore::bucket::lifecycle::lifecycle::expected_expiry_time(mod_time, 1);
let version_id = if let Some(version_id) = object_info.version_id {
version_id.to_string()
} else {
"zzzzzzzz-zzzz-zzzz-zzzz-zzzzzzzzzzzz".to_string()
};
lmdb.put(
&mut wtxn,
&expiry_time.unix_timestamp(),
&LifecycleContent {
ver_no: 0,
ver_id: version_id,
mod_time,
type_: LifecycleType::TransitionNoncurrent,
object_name: object_info.name,
},
/*if let Ok((lc_config, _)) = rustfs_ecstore::bucket::metadata_sys::get_lifecycle_config(bucket_name.as_str()).await {
if let Ok(object_info) = ecstore
.get_object_info(bucket_name.as_str(), object_name, &rustfs_ecstore::store_api::ObjectOptions::default())
.await
{
let event = rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::eval_action_from_lifecycle(
&lc_config,
None,
None,
&object_info,
)
.unwrap();
.await;
rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::apply_expiry_on_non_transitioned_objects(
ecstore.clone(),
&object_info,
&event,
&rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_audit::LcEventSrc::Scanner,
)
.await;
expired = wait_for_object_absence(&ecstore, bucket_name.as_str(), object_name, Duration::from_secs(2)).await;
}
}*/
for record in records {
if !record.usage.has_live_object {
continue;
}
wtxn.commit().unwrap();
let object_info = convert_record_to_object_info(record);
println!("object_info2: {object_info:?}");
let mod_time = object_info.mod_time.unwrap_or(OffsetDateTime::now_utc());
let expiry_time = rustfs_ecstore::bucket::lifecycle::lifecycle::expected_expiry_time(mod_time, 1);
let mut wtxn = lmdb_env.write_txn().unwrap();
let iter = lmdb.iter_mut(&mut wtxn).unwrap();
//let _ = unsafe { iter.del_current().unwrap() };
for row in iter {
if let Ok(ref elm) = row {
let LifecycleContent {
ver_no,
ver_id,
mod_time,
type_,
object_name,
} = &elm.1;
println!("cache row:{ver_no} {ver_id} {mod_time} {type_:?} {object_name}");
}
println!("row:{row:?}");
}
//drop(iter);
wtxn.commit().unwrap();
let version_id = if let Some(version_id) = object_info.version_id {
version_id.to_string()
} else {
"zzzzzzzz-zzzz-zzzz-zzzz-zzzzzzzzzzzz".to_string()
};
lmdb.put(
&mut wtxn,
&expiry_time.unix_timestamp(),
&LifecycleContent {
ver_no: 0,
ver_id: version_id,
mod_time,
type_: LifecycleType::TransitionNoncurrent,
object_name: object_info.name,
},
)
.unwrap();
}
wtxn.commit().unwrap();
let mut wtxn = lmdb_env.write_txn().unwrap();
let iter = lmdb.iter_mut(&mut wtxn).unwrap();
//let _ = unsafe { iter.del_current().unwrap() };
for row in iter {
if let Ok(ref elm) = row {
let LifecycleContent {
ver_no,
ver_id,
mod_time,
type_,
object_name,
} = &elm.1;
println!("cache row:{ver_no} {ver_id} {mod_time} {type_:?} {object_name}");
}
println!("row:{row:?}");
}
//drop(iter);
wtxn.commit().unwrap();
}
println!("Lifecycle cache test completed");

View File

@@ -415,29 +415,28 @@ mod serial_tests {
.await;
println!("Pending expiry tasks: {pending}");
if let Ok((lc_config, _)) = rustfs_ecstore::bucket::metadata_sys::get_lifecycle_config(bucket_name.as_str()).await {
if let Ok(object_info) = ecstore
if let Ok((lc_config, _)) = rustfs_ecstore::bucket::metadata_sys::get_lifecycle_config(bucket_name.as_str()).await
&& let Ok(object_info) = ecstore
.get_object_info(bucket_name.as_str(), object_name, &rustfs_ecstore::store_api::ObjectOptions::default())
.await
{
let event = rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::eval_action_from_lifecycle(
&lc_config,
None,
None,
&object_info,
)
.await;
{
let event = rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::eval_action_from_lifecycle(
&lc_config,
None,
None,
&object_info,
)
.await;
rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::apply_expiry_on_non_transitioned_objects(
ecstore.clone(),
&object_info,
&event,
&rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_audit::LcEventSrc::Scanner,
)
.await;
rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::apply_expiry_on_non_transitioned_objects(
ecstore.clone(),
&object_info,
&event,
&rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_audit::LcEventSrc::Scanner,
)
.await;
expired = wait_for_object_absence(&ecstore, bucket_name.as_str(), object_name, Duration::from_secs(2)).await;
}
expired = wait_for_object_absence(&ecstore, bucket_name.as_str(), object_name, Duration::from_secs(2)).await;
}
if !expired {
@@ -550,32 +549,31 @@ mod serial_tests {
.await;
println!("Pending expiry tasks: {pending}");
if let Ok((lc_config, _)) = rustfs_ecstore::bucket::metadata_sys::get_lifecycle_config(bucket_name.as_str()).await {
if let Ok(obj_info) = ecstore
if let Ok((lc_config, _)) = rustfs_ecstore::bucket::metadata_sys::get_lifecycle_config(bucket_name.as_str()).await
&& let Ok(obj_info) = ecstore
.get_object_info(bucket_name.as_str(), object_name, &rustfs_ecstore::store_api::ObjectOptions::default())
.await
{
let event = rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::eval_action_from_lifecycle(
&lc_config, None, None, &obj_info,
)
.await;
{
let event = rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::eval_action_from_lifecycle(
&lc_config, None, None, &obj_info,
)
.await;
rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::apply_expiry_on_non_transitioned_objects(
ecstore.clone(),
&obj_info,
&event,
&rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_audit::LcEventSrc::Scanner,
)
.await;
rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::apply_expiry_on_non_transitioned_objects(
ecstore.clone(),
&obj_info,
&event,
&rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_audit::LcEventSrc::Scanner,
)
.await;
deleted = wait_for_object_absence(&ecstore, bucket_name.as_str(), object_name, Duration::from_secs(2)).await;
deleted = wait_for_object_absence(&ecstore, bucket_name.as_str(), object_name, Duration::from_secs(2)).await;
if !deleted {
println!(
"Object info: name={}, size={}, mod_time={:?}",
obj_info.name, obj_info.size, obj_info.mod_time
);
}
if !deleted {
println!(
"Object info: name={}, size={}, mod_time={:?}",
obj_info.name, obj_info.size, obj_info.mod_time
);
}
}

View File

@@ -204,10 +204,10 @@ impl TargetFactory for MQTTTargetFactory {
if !std::path::Path::new(&queue_dir).is_absolute() {
return Err(TargetError::Configuration("MQTT queue directory must be an absolute path".to_string()));
}
if let Some(qos_str) = config.lookup(MQTT_QOS) {
if qos_str == "0" {
warn!("Using queue_dir with QoS 0 may result in event loss");
}
if let Some(qos_str) = config.lookup(MQTT_QOS)
&& qos_str == "0"
{
warn!("Using queue_dir with QoS 0 may result in event loss");
}
}

View File

@@ -138,12 +138,11 @@ impl AuditRegistry {
format!("{ENV_PREFIX}{AUDIT_ROUTE_PREFIX}{target_type}{DEFAULT_DELIMITER}{ENABLE_KEY}{DEFAULT_DELIMITER}")
.to_uppercase();
for (key, value) in &all_env {
if EnableState::from_str(value).ok().map(|s| s.is_enabled()).unwrap_or(false) {
if let Some(id) = key.strip_prefix(&enable_prefix) {
if !id.is_empty() {
instance_ids_from_env.insert(id.to_lowercase());
}
}
if EnableState::from_str(value).ok().map(|s| s.is_enabled()).unwrap_or(false)
&& let Some(id) = key.strip_prefix(&enable_prefix)
&& !id.is_empty()
{
instance_ids_from_env.insert(id.to_lowercase());
}
}
@@ -292,10 +291,10 @@ impl AuditRegistry {
for section in sections {
let mut section_map: std::collections::HashMap<String, KVS> = std::collections::HashMap::new();
// Add default item
if let Some(default_kvs) = section_defaults.get(&section) {
if !default_kvs.is_empty() {
section_map.insert(DEFAULT_DELIMITER.to_string(), default_kvs.clone());
}
if let Some(default_kvs) = section_defaults.get(&section)
&& !default_kvs.is_empty()
{
section_map.insert(DEFAULT_DELIMITER.to_string(), default_kvs.clone());
}
// Add successful instance item

View File

@@ -573,10 +573,10 @@ impl AuditSystem {
}
// Remove existing target if present
if let Some(old_target) = registry.remove_target(&target_id) {
if let Err(e) = old_target.close().await {
error!(target_id = %target_id, error = %e, "Failed to close old target during upsert");
}
if let Some(old_target) = registry.remove_target(&target_id)
&& let Err(e) = old_target.close().await
{
error!(target_id = %target_id, error = %e, "Failed to close old target during upsert");
}
registry.add_target(target_id.clone(), target);

View File

@@ -605,13 +605,12 @@ impl DataUsageCache {
pub fn search_parent(&self, hash: &DataUsageHash) -> Option<DataUsageHash> {
let want = hash.key();
if let Some(last_index) = want.rfind('/') {
if let Some(v) = self.find(&want[0..last_index]) {
if v.children.contains(&want) {
let found = hash_path(&want[0..last_index]);
return Some(found);
}
}
if let Some(last_index) = want.rfind('/')
&& let Some(v) = self.find(&want[0..last_index])
&& v.children.contains(&want)
{
let found = hash_path(&want[0..last_index]);
return Some(found);
}
for (k, v) in self.cache.iter() {
@@ -1150,10 +1149,10 @@ impl DataUsageInfo {
self.buckets_count = self.buckets_usage.len() as u64;
// Update last update time
if let Some(other_update) = other.last_update {
if self.last_update.is_none() || other_update > self.last_update.unwrap() {
self.last_update = Some(other_update);
}
if let Some(other_update) = other.last_update
&& (self.last_update.is_none() || other_update > self.last_update.unwrap())
{
self.last_update = Some(other_update);
}
}
}

View File

@@ -403,10 +403,10 @@ fn lc_get_prefix(rule: &LifecycleRule) -> String {
} else if let Some(filter) = &rule.filter {
if let Some(p) = &filter.prefix {
return p.to_string();
} else if let Some(and) = &filter.and {
if let Some(p) = &and.prefix {
return p.to_string();
}
} else if let Some(and) = &filter.and
&& let Some(p) = &and.prefix
{
return p.to_string();
}
}
@@ -475,21 +475,19 @@ pub fn rep_has_active_rules(config: &ReplicationConfiguration, prefix: &str, rec
{
continue;
}
if !prefix.is_empty() {
if let Some(filter) = &rule.filter {
if let Some(r_prefix) = &filter.prefix {
if !r_prefix.is_empty() {
// incoming prefix must be in rule prefix
if !recursive && !prefix.starts_with(r_prefix) {
continue;
}
// If recursive, we can skip this rule if it doesn't match the tested prefix or level below prefix
// does not match
if recursive && !r_prefix.starts_with(prefix) && !prefix.starts_with(r_prefix) {
continue;
}
}
}
if !prefix.is_empty()
&& let Some(filter) = &rule.filter
&& let Some(r_prefix) = &filter.prefix
&& !r_prefix.is_empty()
{
// incoming prefix must be in rule prefix
if !recursive && !prefix.starts_with(r_prefix) {
continue;
}
// If recursive, we can skip this rule if it doesn't match the tested prefix or level below prefix
// does not match
if recursive && !r_prefix.starts_with(prefix) && !prefix.starts_with(r_prefix) {
continue;
}
}
return true;

View File

@@ -466,21 +466,21 @@ impl Metrics {
// Lifetime operations
for i in 0..Metric::Last as usize {
let count = self.operations[i].load(Ordering::Relaxed);
if count > 0 {
if let Some(metric) = Metric::from_index(i) {
metrics.life_time_ops.insert(metric.as_str().to_string(), count);
}
if count > 0
&& let Some(metric) = Metric::from_index(i)
{
metrics.life_time_ops.insert(metric.as_str().to_string(), count);
}
}
// Last minute statistics for realtime metrics
for i in 0..Metric::LastRealtime as usize {
let last_min = self.latency[i].total().await;
if last_min.n > 0 {
if let Some(_metric) = Metric::from_index(i) {
// Convert to madmin TimedAction format if needed
// This would require implementing the conversion
}
if last_min.n > 0
&& let Some(_metric) = Metric::from_index(i)
{
// Convert to madmin TimedAction format if needed
// This would require implementing the conversion
}
}

View File

@@ -178,11 +178,11 @@ impl RustFSTestEnvironment {
info!("Cleaning up any existing RustFS processes");
let output = Command::new("pkill").args(["-f", "rustfs"]).output();
if let Ok(output) = output {
if output.status.success() {
info!("Killed existing RustFS processes");
sleep(Duration::from_millis(1000)).await;
}
if let Ok(output) = output
&& output.status.success()
{
info!("Killed existing RustFS processes");
sleep(Duration::from_millis(1000)).await;
}
Ok(())
}

View File

@@ -406,11 +406,11 @@ impl VaultTestEnvironment {
let port_check = TcpStream::connect(VAULT_ADDRESS).await.is_ok();
if port_check {
// Additional check by making a health request
if let Ok(response) = reqwest::get(&format!("{VAULT_URL}/v1/sys/health")).await {
if response.status().is_success() {
info!("Vault server is ready after {} seconds", i);
return Ok(());
}
if let Ok(response) = reqwest::get(&format!("{VAULT_URL}/v1/sys/health")).await
&& response.status().is_success()
{
info!("Vault server is ready after {} seconds", i);
return Ok(());
}
}

View File

@@ -498,19 +498,19 @@ impl BucketTargetSys {
bucket: bucket.to_string(),
})?;
if arn.arn_type == BucketTargetType::ReplicationService {
if let Ok((config, _)) = get_replication_config(bucket).await {
for rule in config.filter_target_arns(&ObjectOpts {
op_type: ReplicationType::All,
..Default::default()
}) {
if rule == arn_str || config.role == arn_str {
let arn_remotes_map = self.arn_remotes_map.read().await;
if arn_remotes_map.get(arn_str).is_some() {
return Err(BucketTargetError::BucketRemoteRemoveDisallowed {
bucket: bucket.to_string(),
});
}
if arn.arn_type == BucketTargetType::ReplicationService
&& let Ok((config, _)) = get_replication_config(bucket).await
{
for rule in config.filter_target_arns(&ObjectOpts {
op_type: ReplicationType::All,
..Default::default()
}) {
if rule == arn_str || config.role == arn_str {
let arn_remotes_map = self.arn_remotes_map.read().await;
if arn_remotes_map.get(arn_str).is_some() {
return Err(BucketTargetError::BucketRemoteRemoveDisallowed {
bucket: bucket.to_string(),
});
}
}
}
@@ -691,22 +691,22 @@ impl BucketTargetSys {
}
// Add new targets
if let Some(new_targets) = targets {
if !new_targets.is_empty() {
for target in &new_targets.targets {
if let Ok(client) = self.get_remote_target_client_internal(target).await {
arn_remotes_map.insert(
target.arn.clone(),
ArnTarget {
client: Some(Arc::new(client)),
last_refresh: OffsetDateTime::now_utc(),
},
);
self.update_bandwidth_limit(bucket, &target.arn, target.bandwidth_limit);
}
if let Some(new_targets) = targets
&& !new_targets.is_empty()
{
for target in &new_targets.targets {
if let Ok(client) = self.get_remote_target_client_internal(target).await {
arn_remotes_map.insert(
target.arn.clone(),
ArnTarget {
client: Some(Arc::new(client)),
last_refresh: OffsetDateTime::now_utc(),
},
);
self.update_bandwidth_limit(bucket, &target.arn, target.bandwidth_limit);
}
targets_map.insert(bucket.to_string(), new_targets.targets.clone());
}
targets_map.insert(bucket.to_string(), new_targets.targets.clone());
}
}

View File

@@ -31,10 +31,10 @@ impl BucketObjectLockSys {
}
pub async fn get(bucket: &str) -> Option<DefaultRetention> {
if let Ok(object_lock_config) = get_object_lock_config(bucket).await {
if let Some(object_lock_rule) = object_lock_config.0.rule {
return object_lock_rule.default_retention;
}
if let Ok(object_lock_config) = get_object_lock_config(bucket).await
&& let Some(object_lock_rule) = object_lock_config.0.rule
{
return object_lock_rule.default_retention;
}
None
}

View File

@@ -55,10 +55,10 @@ impl ReplicationConfigurationExt for ReplicationConfiguration {
if !has_arn {
has_arn = true;
}
if let Some(status) = &rule.existing_object_replication {
if status.status == ExistingObjectReplicationStatus::from_static(ExistingObjectReplicationStatus::ENABLED) {
return (true, true);
}
if let Some(status) = &rule.existing_object_replication
&& status.status == ExistingObjectReplicationStatus::from_static(ExistingObjectReplicationStatus::ENABLED)
{
return (true, true);
}
}
}
@@ -86,12 +86,11 @@ impl ReplicationConfigurationExt for ReplicationConfiguration {
continue;
}
if let Some(status) = &rule.existing_object_replication {
if obj.existing_object
&& status.status == ExistingObjectReplicationStatus::from_static(ExistingObjectReplicationStatus::DISABLED)
{
continue;
}
if let Some(status) = &rule.existing_object_replication
&& obj.existing_object
&& status.status == ExistingObjectReplicationStatus::from_static(ExistingObjectReplicationStatus::DISABLED)
{
continue;
}
if !obj.name.starts_with(rule.prefix()) {
@@ -145,12 +144,11 @@ impl ReplicationConfigurationExt for ReplicationConfiguration {
continue;
}
if let Some(status) = &rule.existing_object_replication {
if obj.existing_object
&& status.status == ExistingObjectReplicationStatus::from_static(ExistingObjectReplicationStatus::DISABLED)
{
return false;
}
if let Some(status) = &rule.existing_object_replication
&& obj.existing_object
&& status.status == ExistingObjectReplicationStatus::from_static(ExistingObjectReplicationStatus::DISABLED)
{
return false;
}
if obj.op_type == ReplicationType::Delete {
@@ -186,20 +184,20 @@ impl ReplicationConfigurationExt for ReplicationConfiguration {
continue;
}
if let Some(filter) = &rule.filter {
if let Some(filter_prefix) = &filter.prefix {
if !prefix.is_empty() && !filter_prefix.is_empty() {
// The provided prefix must fall within the rule prefix
if !recursive && !prefix.starts_with(filter_prefix) {
continue;
}
}
// When recursive, skip this rule if it does not match the test prefix or hierarchy
if recursive && !rule.prefix().starts_with(prefix) && !prefix.starts_with(rule.prefix()) {
if let Some(filter) = &rule.filter
&& let Some(filter_prefix) = &filter.prefix
{
if !prefix.is_empty() && !filter_prefix.is_empty() {
// The provided prefix must fall within the rule prefix
if !recursive && !prefix.starts_with(filter_prefix) {
continue;
}
}
// When recursive, skip this rule if it does not match the test prefix or hierarchy
if recursive && !rule.prefix().starts_with(prefix) && !prefix.starts_with(rule.prefix()) {
continue;
}
}
return true;
}

View File

@@ -512,20 +512,20 @@ impl<S: StorageAPI> ReplicationPool<S> {
if !lrg_workers.is_empty() {
let index = (hash as usize) % lrg_workers.len();
if let Some(worker) = lrg_workers.get(index) {
if worker.try_send(ReplicationOperation::Object(Box::new(ri.clone()))).is_err() {
// Queue to MRF if worker is busy
let _ = self.mrf_save_tx.try_send(ri.to_mrf_entry());
if let Some(worker) = lrg_workers.get(index)
&& worker.try_send(ReplicationOperation::Object(Box::new(ri.clone()))).is_err()
{
// Queue to MRF if worker is busy
let _ = self.mrf_save_tx.try_send(ri.to_mrf_entry());
// Try to add more workers if possible
let max_l_workers = *self.max_l_workers.read().await;
let existing = lrg_workers.len();
if self.active_lrg_workers() < std::cmp::min(max_l_workers, LARGE_WORKER_COUNT) as i32 {
let workers = std::cmp::min(existing + 1, max_l_workers);
// Try to add more workers if possible
let max_l_workers = *self.max_l_workers.read().await;
let existing = lrg_workers.len();
if self.active_lrg_workers() < std::cmp::min(max_l_workers, LARGE_WORKER_COUNT) as i32 {
let workers = std::cmp::min(existing + 1, max_l_workers);
drop(lrg_workers);
self.resize_lrg_workers(workers, existing).await;
}
drop(lrg_workers);
self.resize_lrg_workers(workers, existing).await;
}
}
}
@@ -539,47 +539,45 @@ impl<S: StorageAPI> ReplicationPool<S> {
_ => self.get_worker_ch(&ri.bucket, &ri.name, ri.size).await,
};
if let Some(channel) = ch {
if channel.try_send(ReplicationOperation::Object(Box::new(ri.clone()))).is_err() {
// Queue to MRF if all workers are busy
let _ = self.mrf_save_tx.try_send(ri.to_mrf_entry());
if let Some(channel) = ch
&& channel.try_send(ReplicationOperation::Object(Box::new(ri.clone()))).is_err()
{
// Queue to MRF if all workers are busy
let _ = self.mrf_save_tx.try_send(ri.to_mrf_entry());
// Try to scale up workers based on priority
let priority = self.priority.read().await.clone();
let max_workers = *self.max_workers.read().await;
// Try to scale up workers based on priority
let priority = self.priority.read().await.clone();
let max_workers = *self.max_workers.read().await;
match priority {
ReplicationPriority::Fast => {
// Log warning about unable to keep up
info!("Warning: Unable to keep up with incoming traffic");
match priority {
ReplicationPriority::Fast => {
// Log warning about unable to keep up
info!("Warning: Unable to keep up with incoming traffic");
}
ReplicationPriority::Slow => {
info!("Warning: Unable to keep up with incoming traffic - recommend increasing replication priority to auto");
}
ReplicationPriority::Auto => {
let max_w = std::cmp::min(max_workers, WORKER_MAX_LIMIT);
let active_workers = self.active_workers();
if active_workers < max_w as i32 {
let workers = self.workers.read().await;
let new_count = std::cmp::min(workers.len() + 1, max_w);
let existing = workers.len();
drop(workers);
self.resize_workers(new_count, existing).await;
}
ReplicationPriority::Slow => {
info!(
"Warning: Unable to keep up with incoming traffic - recommend increasing replication priority to auto"
);
}
ReplicationPriority::Auto => {
let max_w = std::cmp::min(max_workers, WORKER_MAX_LIMIT);
let active_workers = self.active_workers();
if active_workers < max_w as i32 {
let workers = self.workers.read().await;
let new_count = std::cmp::min(workers.len() + 1, max_w);
let existing = workers.len();
let max_mrf_workers = std::cmp::min(max_workers, MRF_WORKER_MAX_LIMIT);
let active_mrf = self.active_mrf_workers();
drop(workers);
self.resize_workers(new_count, existing).await;
}
if active_mrf < max_mrf_workers as i32 {
let current_mrf = self.mrf_worker_size.load(Ordering::SeqCst);
let new_mrf = std::cmp::min(current_mrf + 1, max_mrf_workers as i32);
let max_mrf_workers = std::cmp::min(max_workers, MRF_WORKER_MAX_LIMIT);
let active_mrf = self.active_mrf_workers();
if active_mrf < max_mrf_workers as i32 {
let current_mrf = self.mrf_worker_size.load(Ordering::SeqCst);
let new_mrf = std::cmp::min(current_mrf + 1, max_mrf_workers as i32);
self.resize_failed_workers(new_mrf).await;
}
self.resize_failed_workers(new_mrf).await;
}
}
}
@@ -593,31 +591,29 @@ impl<S: StorageAPI> ReplicationPool<S> {
_ => self.get_worker_ch(&doi.bucket, &doi.delete_object.object_name, 0).await,
};
if let Some(channel) = ch {
if channel.try_send(ReplicationOperation::Delete(Box::new(doi.clone()))).is_err() {
let _ = self.mrf_save_tx.try_send(doi.to_mrf_entry());
if let Some(channel) = ch
&& channel.try_send(ReplicationOperation::Delete(Box::new(doi.clone()))).is_err()
{
let _ = self.mrf_save_tx.try_send(doi.to_mrf_entry());
let priority = self.priority.read().await.clone();
let max_workers = *self.max_workers.read().await;
let priority = self.priority.read().await.clone();
let max_workers = *self.max_workers.read().await;
match priority {
ReplicationPriority::Fast => {
info!("Warning: Unable to keep up with incoming deletes");
}
ReplicationPriority::Slow => {
info!(
"Warning: Unable to keep up with incoming deletes - recommend increasing replication priority to auto"
);
}
ReplicationPriority::Auto => {
let max_w = std::cmp::min(max_workers, WORKER_MAX_LIMIT);
if self.active_workers() < max_w as i32 {
let workers = self.workers.read().await;
let new_count = std::cmp::min(workers.len() + 1, max_w);
let existing = workers.len();
drop(workers);
self.resize_workers(new_count, existing).await;
}
match priority {
ReplicationPriority::Fast => {
info!("Warning: Unable to keep up with incoming deletes");
}
ReplicationPriority::Slow => {
info!("Warning: Unable to keep up with incoming deletes - recommend increasing replication priority to auto");
}
ReplicationPriority::Auto => {
let max_w = std::cmp::min(max_workers, WORKER_MAX_LIMIT);
if self.active_workers() < max_w as i32 {
let workers = self.workers.read().await;
let new_count = std::cmp::min(workers.len() + 1, max_w);
let existing = workers.len();
drop(workers);
self.resize_workers(new_count, existing).await;
}
}
}

View File

@@ -242,11 +242,10 @@ impl ReplicationResyncer {
if let Some(last_update) = status.last_update {
if last_update > *last_update_times.get(bucket).unwrap_or(&OffsetDateTime::UNIX_EPOCH) {
if let Some(last_update) = status.last_update
&& last_update > *last_update_times.get(bucket).unwrap_or(&OffsetDateTime::UNIX_EPOCH) {
update = true;
}
}
if update {
if let Err(err) = save_resync_status(bucket, status, api.clone()).await {
@@ -345,13 +344,12 @@ impl ReplicationResyncer {
return;
};
if !heal {
if let Err(e) = self
if !heal
&& let Err(e) = self
.mark_status(ResyncStatusType::ResyncStarted, opts.clone(), storage.clone())
.await
{
error!("Failed to mark resync status: {}", e);
}
{
error!("Failed to mark resync status: {}", e);
}
let (tx, mut rx) = tokio::sync::mpsc::channel(100);
@@ -1463,21 +1461,18 @@ async fn replicate_delete_to_target(dobj: &DeletedObjectReplicationInfo, tgt_cli
Some(version_id.to_string())
};
if dobj.delete_object.delete_marker_version_id.is_some() {
if let Err(e) = tgt_client
if dobj.delete_object.delete_marker_version_id.is_some()
&& let Err(e) = tgt_client
.head_object(&tgt_client.bucket, &dobj.delete_object.object_name, version_id.clone())
.await
{
if let SdkError::ServiceError(service_err) = &e {
if !service_err.err().is_not_found() {
rinfo.replication_status = ReplicationStatusType::Failed;
rinfo.error = Some(e.to_string());
&& let SdkError::ServiceError(service_err) = &e
&& !service_err.err().is_not_found()
{
rinfo.replication_status = ReplicationStatusType::Failed;
rinfo.error = Some(e.to_string());
return rinfo;
}
}
};
}
return rinfo;
};
match tgt_client
.remove_object(

View File

@@ -49,13 +49,13 @@ impl ExponentialMovingAverage {
pub fn update_exponential_moving_average(&self, now: SystemTime) {
if let Ok(mut last_update_guard) = self.last_update.try_lock() {
let last_update = *last_update_guard;
if let Ok(duration) = now.duration_since(last_update) {
if duration.as_secs() > 0 {
let decay = (-duration.as_secs_f64() / 60.0).exp(); // 1 minute decay
let current_value = f64::from_bits(self.value.load(AtomicOrdering::Relaxed));
self.value.store((current_value * decay).to_bits(), AtomicOrdering::Relaxed);
*last_update_guard = now;
}
if let Ok(duration) = now.duration_since(last_update)
&& duration.as_secs() > 0
{
let decay = (-duration.as_secs_f64() / 60.0).exp(); // 1 minute decay
let current_value = f64::from_bits(self.value.load(AtomicOrdering::Relaxed));
self.value.store((current_value * decay).to_bits(), AtomicOrdering::Relaxed);
*last_update_guard = now;
}
}
}
@@ -757,10 +757,10 @@ impl ReplicationStats {
/// Check if bucket replication statistics have usage
pub fn has_replication_usage(&self, bucket: &str) -> bool {
if let Ok(cache) = self.cache.try_read() {
if let Some(stats) = cache.get(bucket) {
return stats.has_replication_usage();
}
if let Ok(cache) = self.cache.try_read()
&& let Some(stats) = cache.get(bucket)
{
return stats.has_replication_usage();
}
false
}

View File

@@ -37,10 +37,11 @@ impl VersioningApi for VersioningConfiguration {
return true;
}
if let Some(exclude_folders) = self.exclude_folders {
if exclude_folders && prefix.ends_with('/') {
return false;
}
if let Some(exclude_folders) = self.exclude_folders
&& exclude_folders
&& prefix.ends_with('/')
{
return false;
}
if let Some(ref excluded_prefixes) = self.excluded_prefixes {
@@ -67,10 +68,11 @@ impl VersioningApi for VersioningConfiguration {
return false;
}
if let Some(exclude_folders) = self.exclude_folders {
if exclude_folders && prefix.ends_with('/') {
return true;
}
if let Some(exclude_folders) = self.exclude_folders
&& exclude_folders
&& prefix.ends_with('/')
{
return true;
}
if let Some(ref excluded_prefixes) = self.excluded_prefixes {

View File

@@ -308,12 +308,11 @@ pub async fn list_path_raw(rx: CancellationToken, opts: ListPathRawOptions) -> d
// Break if all at EOF or error.
if at_eof + has_err == readers.len() {
if has_err > 0 {
if let Some(finished_fn) = opts.finished.as_ref() {
if has_err > 0 {
finished_fn(&errs).await;
}
}
if has_err > 0
&& let Some(finished_fn) = opts.finished.as_ref()
&& has_err > 0
{
finished_fn(&errs).await;
}
// error!("list_path_raw: at_eof + has_err == readers.len() break {:?}", &errs);

View File

@@ -161,7 +161,7 @@ impl TransitionClient {
async fn private_new(endpoint: &str, opts: Options, tier_type: &str) -> Result<TransitionClient, std::io::Error> {
let endpoint_url = get_endpoint_url(endpoint, opts.secure)?;
let _ = rustls::crypto::ring::default_provider().install_default();
let _ = rustls::crypto::aws_lc_rs::default_provider().install_default();
let scheme = endpoint_url.scheme();
let client;
let tls = if let Some(store) = load_root_store_from_tls_path() {

View File

@@ -211,10 +211,11 @@ async fn apply_dynamic_config_for_sub_sys<S: StorageAPI>(cfg: &mut Config, api:
for (i, count) in set_drive_counts.iter().enumerate() {
match storageclass::lookup_config(&kvs, *count) {
Ok(res) => {
if i == 0 && GLOBAL_STORAGE_CLASS.get().is_none() {
if let Err(r) = GLOBAL_STORAGE_CLASS.set(res) {
error!("GLOBAL_STORAGE_CLASS.set failed {:?}", r);
}
if i == 0
&& GLOBAL_STORAGE_CLASS.get().is_none()
&& let Err(r) = GLOBAL_STORAGE_CLASS.set(res)
{
error!("GLOBAL_STORAGE_CLASS.set failed {:?}", r);
}
}
Err(err) => {

View File

@@ -180,10 +180,10 @@ impl Config {
let mut default = HashMap::new();
default.insert(DEFAULT_DELIMITER.to_owned(), v.clone());
self.0.insert(k.clone(), default);
} else if !self.0[k].contains_key(DEFAULT_DELIMITER) {
if let Some(m) = self.0.get_mut(k) {
m.insert(DEFAULT_DELIMITER.to_owned(), v.clone());
}
} else if !self.0[k].contains_key(DEFAULT_DELIMITER)
&& let Some(m) = self.0.get_mut(k)
{
m.insert(DEFAULT_DELIMITER.to_owned(), v.clone());
}
}
}

View File

@@ -65,18 +65,16 @@ lazy_static::lazy_static! {
/// Store data usage info to backend storage
pub async fn store_data_usage_in_backend(data_usage_info: DataUsageInfo, store: Arc<ECStore>) -> Result<(), Error> {
// Prevent older data from overwriting newer persisted stats
if let Ok(buf) = read_config(store.clone(), &DATA_USAGE_OBJ_NAME_PATH).await {
if let Ok(existing) = serde_json::from_slice::<DataUsageInfo>(&buf) {
if let (Some(new_ts), Some(existing_ts)) = (data_usage_info.last_update, existing.last_update) {
if new_ts <= existing_ts {
info!(
"Skip persisting data usage: incoming last_update {:?} <= existing {:?}",
new_ts, existing_ts
);
return Ok(());
}
}
}
if let Ok(buf) = read_config(store.clone(), &DATA_USAGE_OBJ_NAME_PATH).await
&& let Ok(existing) = serde_json::from_slice::<DataUsageInfo>(&buf)
&& let (Some(new_ts), Some(existing_ts)) = (data_usage_info.last_update, existing.last_update)
&& new_ts <= existing_ts
{
info!(
"Skip persisting data usage: incoming last_update {:?} <= existing {:?}",
new_ts, existing_ts
);
return Ok(());
}
let data =
@@ -149,26 +147,24 @@ pub async fn load_data_usage_from_backend(store: Arc<ECStore>) -> Result<DataUsa
// Handle replication info
for (bucket, bui) in &data_usage_info.buckets_usage {
if bui.replicated_size_v1 > 0
if (bui.replicated_size_v1 > 0
|| bui.replication_failed_count_v1 > 0
|| bui.replication_failed_size_v1 > 0
|| bui.replication_pending_count_v1 > 0
|| bui.replication_pending_count_v1 > 0)
&& let Ok((cfg, _)) = get_replication_config(bucket).await
&& !cfg.role.is_empty()
{
if let Ok((cfg, _)) = get_replication_config(bucket).await {
if !cfg.role.is_empty() {
data_usage_info.replication_info.insert(
cfg.role.clone(),
BucketTargetUsageInfo {
replication_failed_size: bui.replication_failed_size_v1,
replication_failed_count: bui.replication_failed_count_v1,
replicated_size: bui.replicated_size_v1,
replication_pending_count: bui.replication_pending_count_v1,
replication_pending_size: bui.replication_pending_size_v1,
..Default::default()
},
);
}
}
data_usage_info.replication_info.insert(
cfg.role.clone(),
BucketTargetUsageInfo {
replication_failed_size: bui.replication_failed_size_v1,
replication_failed_count: bui.replication_failed_count_v1,
replicated_size: bui.replicated_size_v1,
replication_pending_count: bui.replication_pending_count_v1,
replication_pending_size: bui.replication_pending_size_v1,
..Default::default()
},
);
}
}
@@ -177,10 +173,10 @@ pub async fn load_data_usage_from_backend(store: Arc<ECStore>) -> Result<DataUsa
/// Aggregate usage information from local disk snapshots.
fn merge_snapshot(aggregated: &mut DataUsageInfo, mut snapshot: LocalUsageSnapshot, latest_update: &mut Option<SystemTime>) {
if let Some(update) = snapshot.last_update {
if latest_update.is_none_or(|current| update > current) {
*latest_update = Some(update);
}
if let Some(update) = snapshot.last_update
&& latest_update.is_none_or(|current| update > current)
{
*latest_update = Some(update);
}
snapshot.recompute_totals();
@@ -255,10 +251,10 @@ pub async fn aggregate_local_snapshots(store: Arc<ECStore>) -> Result<(Vec<DiskU
);
// Best-effort cleanup so next scan can rebuild a fresh snapshot instead of repeatedly failing
let snapshot_file = snapshot_path(root.as_path(), &disk_id);
if let Err(remove_err) = fs::remove_file(&snapshot_file).await {
if remove_err.kind() != std::io::ErrorKind::NotFound {
warn!("Failed to remove corrupted snapshot {:?}: {}", snapshot_file, remove_err);
}
if let Err(remove_err) = fs::remove_file(&snapshot_file).await
&& remove_err.kind() != std::io::ErrorKind::NotFound
{
warn!("Failed to remove corrupted snapshot {:?}: {}", snapshot_file, remove_err);
}
}

View File

@@ -288,15 +288,15 @@ impl LocalDisk {
let path = path_join(&[trash.clone(), name.into()]);
if file_type.is_dir() {
if let Err(e) = tokio::fs::remove_dir_all(path).await {
if e.kind() != ErrorKind::NotFound {
return Err(e.into());
}
}
} else if let Err(e) = tokio::fs::remove_file(path).await {
if e.kind() != ErrorKind::NotFound {
if let Err(e) = tokio::fs::remove_dir_all(path).await
&& e.kind() != ErrorKind::NotFound
{
return Err(e.into());
}
} else if let Err(e) = tokio::fs::remove_file(path).await
&& e.kind() != ErrorKind::NotFound
{
return Err(e.into());
}
}
@@ -684,13 +684,11 @@ impl LocalDisk {
Err(err) => {
if err == Error::FileNotFound
&& !skip_access_checks(volume_dir.as_ref().to_string_lossy().to_string().as_str())
&& let Err(e) = access(volume_dir.as_ref()).await
&& e.kind() == ErrorKind::NotFound
{
if let Err(e) = access(volume_dir.as_ref()).await {
if e.kind() == ErrorKind::NotFound {
// warn!("read_metadata_with_dmtime os err {:?}", &aerr);
return Err(DiskError::VolumeNotFound);
}
}
// warn!("read_metadata_with_dmtime os err {:?}", &aerr);
return Err(DiskError::VolumeNotFound);
}
Err(err)
@@ -763,13 +761,13 @@ impl LocalDisk {
let mut f = match super::fs::open_file(file_path.as_ref(), O_RDONLY).await {
Ok(f) => f,
Err(e) => {
if e.kind() == ErrorKind::NotFound && !skip_access_checks(volume) {
if let Err(er) = access(volume_dir.as_ref()).await {
if er.kind() == ErrorKind::NotFound {
warn!("read_all_data_with_dmtime os err {:?}", &er);
return Err(DiskError::VolumeNotFound);
}
}
if e.kind() == ErrorKind::NotFound
&& !skip_access_checks(volume)
&& let Err(er) = access(volume_dir.as_ref()).await
&& er.kind() == ErrorKind::NotFound
{
warn!("read_all_data_with_dmtime os err {:?}", &er);
return Err(DiskError::VolumeNotFound);
}
return Err(to_file_error(e).into());
@@ -828,10 +826,10 @@ impl LocalDisk {
let _ = fm.data.remove(vec![vid, dir]);
let dir_path = self.get_object_path(volume, format!("{path}/{dir}").as_str())?;
if let Err(err) = self.move_to_trash(&dir_path, true, false).await {
if !(err == DiskError::FileNotFound || err == DiskError::VolumeNotFound) {
return Err(err);
}
if let Err(err) = self.move_to_trash(&dir_path, true, false).await
&& !(err == DiskError::FileNotFound || err == DiskError::VolumeNotFound)
{
return Err(err);
};
}
}
@@ -1051,11 +1049,11 @@ impl LocalDisk {
continue;
}
if let Some(forward) = &forward {
if &entry < forward {
*item = "".to_owned();
continue;
}
if let Some(forward) = &forward
&& &entry < forward
{
*item = "".to_owned();
continue;
}
if entry.ends_with(SLASH_SEPARATOR) {
@@ -1133,10 +1131,10 @@ impl LocalDisk {
})
.await?;
if opts.recursive {
if let Err(er) = Box::pin(self.scan_dir(pop, prefix.clone(), opts, out, objs_returned)).await {
error!("scan_dir err {:?}", er);
}
if opts.recursive
&& let Err(er) = Box::pin(self.scan_dir(pop, prefix.clone(), opts, out, objs_returned)).await
{
error!("scan_dir err {:?}", er);
}
dir_stack.pop();
}
@@ -1200,10 +1198,10 @@ impl LocalDisk {
})
.await?;
if opts.recursive {
if let Err(er) = Box::pin(self.scan_dir(dir, prefix.clone(), opts, out, objs_returned)).await {
warn!("scan_dir err {:?}", &er);
}
if opts.recursive
&& let Err(er) = Box::pin(self.scan_dir(dir, prefix.clone(), opts, out, objs_returned)).await
{
warn!("scan_dir err {:?}", &er);
}
}
@@ -1345,23 +1343,23 @@ impl DiskAPI for LocalDisk {
if format_info.file_info.is_some() && id.is_some() {
// check last check time
if let Some(last_check) = format_info.last_check {
if last_check.unix_timestamp() + 1 < OffsetDateTime::now_utc().unix_timestamp() {
return Ok(id);
}
if let Some(last_check) = format_info.last_check
&& last_check.unix_timestamp() + 1 < OffsetDateTime::now_utc().unix_timestamp()
{
return Ok(id);
}
}
let file_meta = self.check_format_json().await?;
if let Some(file_info) = &format_info.file_info {
if super::fs::same_file(&file_meta, file_info) {
let mut format_info = self.format_info.write().await;
format_info.last_check = Some(OffsetDateTime::now_utc());
drop(format_info);
if let Some(file_info) = &format_info.file_info
&& super::fs::same_file(&file_meta, file_info)
{
let mut format_info = self.format_info.write().await;
format_info.last_check = Some(OffsetDateTime::now_utc());
drop(format_info);
return Ok(id);
}
return Ok(id);
}
debug!("get_disk_id: read format.json");
@@ -1420,10 +1418,10 @@ impl DiskAPI for LocalDisk {
#[tracing::instrument(skip(self))]
async fn delete(&self, volume: &str, path: &str, opt: DeleteOptions) -> Result<()> {
let volume_dir = self.get_bucket_path(volume)?;
if !skip_access_checks(volume) {
if let Err(e) = access(&volume_dir).await {
return Err(to_access_error(e, DiskError::VolumeAccessDenied).into());
}
if !skip_access_checks(volume)
&& let Err(e) = access(&volume_dir).await
{
return Err(to_access_error(e, DiskError::VolumeAccessDenied).into());
}
let file_path = volume_dir.join(Path::new(&path));
@@ -1438,10 +1436,10 @@ impl DiskAPI for LocalDisk {
#[tracing::instrument(skip(self))]
async fn verify_file(&self, volume: &str, path: &str, fi: &FileInfo) -> Result<CheckPartsResp> {
let volume_dir = self.get_bucket_path(volume)?;
if !skip_access_checks(volume) {
if let Err(e) = access(&volume_dir).await {
return Err(to_access_error(e, DiskError::VolumeAccessDenied).into());
}
if !skip_access_checks(volume)
&& let Err(e) = access(&volume_dir).await
{
return Err(to_access_error(e, DiskError::VolumeAccessDenied).into());
}
let mut resp = CheckPartsResp {
@@ -1466,14 +1464,14 @@ impl DiskAPI for LocalDisk {
.await
.err();
resp.results[i] = conv_part_err_to_int(&err);
if resp.results[i] == CHECK_PART_UNKNOWN {
if let Some(err) = err {
error!("verify_file: failed to bitrot verify file: {:?}, error: {:?}", &part_path, &err);
if err == DiskError::FileAccessDenied {
continue;
}
info!("part unknown, disk: {}, path: {:?}", self.to_string(), part_path);
if resp.results[i] == CHECK_PART_UNKNOWN
&& let Some(err) = err
{
error!("verify_file: failed to bitrot verify file: {:?}, error: {:?}", &part_path, &err);
if err == DiskError::FileAccessDenied {
continue;
}
info!("part unknown, disk: {}, path: {:?}", self.to_string(), part_path);
}
}
@@ -1572,13 +1570,12 @@ impl DiskAPI for LocalDisk {
let e: DiskError = to_file_error(err).into();
if e == DiskError::FileNotFound {
if !skip_access_checks(volume) {
if let Err(err) = access(&volume_dir).await {
if err.kind() == ErrorKind::NotFound {
resp.results[i] = CHECK_PART_VOLUME_NOT_FOUND;
continue;
}
}
if !skip_access_checks(volume)
&& let Err(err) = access(&volume_dir).await
&& err.kind() == ErrorKind::NotFound
{
resp.results[i] = CHECK_PART_VOLUME_NOT_FOUND;
continue;
}
resp.results[i] = CHECK_PART_FILE_NOT_FOUND;
} else {
@@ -1634,11 +1631,11 @@ impl DiskAPI for LocalDisk {
}
};
if let Some(meta) = meta_op {
if !meta.is_dir() {
warn!("rename_part src is not dir {:?}", &src_file_path);
return Err(DiskError::FileAccessDenied);
}
if let Some(meta) = meta_op
&& !meta.is_dir()
{
warn!("rename_part src is not dir {:?}", &src_file_path);
return Err(DiskError::FileAccessDenied);
}
remove_std(&dst_file_path).map_err(to_file_error)?;
@@ -1695,10 +1692,10 @@ impl DiskAPI for LocalDisk {
}
};
if let Some(meta) = meta_op {
if !meta.is_dir() {
return Err(DiskError::FileAccessDenied);
}
if let Some(meta) = meta_op
&& !meta.is_dir()
{
return Err(DiskError::FileAccessDenied);
}
remove(&dst_file_path).await.map_err(to_file_error)?;
@@ -1814,10 +1811,10 @@ impl DiskAPI for LocalDisk {
async fn list_dir(&self, origvolume: &str, volume: &str, dir_path: &str, count: i32) -> Result<Vec<String>> {
if !origvolume.is_empty() {
let origvolume_dir = self.get_bucket_path(origvolume)?;
if !skip_access_checks(origvolume) {
if let Err(e) = access(origvolume_dir).await {
return Err(to_access_error(e, DiskError::VolumeAccessDenied).into());
}
if !skip_access_checks(origvolume)
&& let Err(e) = access(origvolume_dir).await
{
return Err(to_access_error(e, DiskError::VolumeAccessDenied).into());
}
}
@@ -1827,10 +1824,11 @@ impl DiskAPI for LocalDisk {
let entries = match os::read_dir(&dir_path_abs, count).await {
Ok(res) => res,
Err(e) => {
if e.kind() == std::io::ErrorKind::NotFound && !skip_access_checks(volume) {
if let Err(e) = access(&volume_dir).await {
return Err(to_access_error(e, DiskError::VolumeAccessDenied).into());
}
if e.kind() == std::io::ErrorKind::NotFound
&& !skip_access_checks(volume)
&& let Err(e) = access(&volume_dir).await
{
return Err(to_access_error(e, DiskError::VolumeAccessDenied).into());
}
return Err(to_file_error(e).into());
@@ -1845,10 +1843,10 @@ impl DiskAPI for LocalDisk {
async fn walk_dir<W: AsyncWrite + Unpin + Send>(&self, opts: WalkDirOptions, wr: &mut W) -> Result<()> {
let volume_dir = self.get_bucket_path(&opts.bucket)?;
if !skip_access_checks(&opts.bucket) {
if let Err(e) = access(&volume_dir).await {
return Err(to_access_error(e, DiskError::VolumeAccessDenied).into());
}
if !skip_access_checks(&opts.bucket)
&& let Err(e) = access(&volume_dir).await
{
return Err(to_access_error(e, DiskError::VolumeAccessDenied).into());
}
let mut wr = wr;
@@ -1909,19 +1907,19 @@ impl DiskAPI for LocalDisk {
dst_path: &str,
) -> Result<RenameDataResp> {
let src_volume_dir = self.get_bucket_path(src_volume)?;
if !skip_access_checks(src_volume) {
if let Err(e) = super::fs::access_std(&src_volume_dir) {
info!("access checks failed, src_volume_dir: {:?}, err: {}", src_volume_dir, e.to_string());
return Err(to_access_error(e, DiskError::VolumeAccessDenied).into());
}
if !skip_access_checks(src_volume)
&& let Err(e) = super::fs::access_std(&src_volume_dir)
{
info!("access checks failed, src_volume_dir: {:?}, err: {}", src_volume_dir, e.to_string());
return Err(to_access_error(e, DiskError::VolumeAccessDenied).into());
}
let dst_volume_dir = self.get_bucket_path(dst_volume)?;
if !skip_access_checks(dst_volume) {
if let Err(e) = super::fs::access_std(&dst_volume_dir) {
info!("access checks failed, dst_volume_dir: {:?}, err: {}", dst_volume_dir, e.to_string());
return Err(to_access_error(e, DiskError::VolumeAccessDenied).into());
}
if !skip_access_checks(dst_volume)
&& let Err(e) = super::fs::access_std(&dst_volume_dir)
{
info!("access checks failed, dst_volume_dir: {:?}, err: {}", dst_volume_dir, e.to_string());
return Err(to_access_error(e, DiskError::VolumeAccessDenied).into());
}
// xl.meta path
@@ -1973,19 +1971,18 @@ impl DiskAPI for LocalDisk {
let mut xlmeta = FileMeta::new();
if let Some(dst_buf) = has_dst_buf.as_ref() {
if FileMeta::is_xl2_v1_format(dst_buf) {
if let Ok(nmeta) = FileMeta::load(dst_buf) {
xlmeta = nmeta
}
}
if let Some(dst_buf) = has_dst_buf.as_ref()
&& FileMeta::is_xl2_v1_format(dst_buf)
&& let Ok(nmeta) = FileMeta::load(dst_buf)
{
xlmeta = nmeta
}
let mut skip_parent = dst_volume_dir.clone();
if has_dst_buf.as_ref().is_some() {
if let Some(parent) = dst_file_path.parent() {
skip_parent = parent.to_path_buf();
}
if has_dst_buf.as_ref().is_some()
&& let Some(parent) = dst_file_path.parent()
{
skip_parent = parent.to_path_buf();
}
// TODO: Healing
@@ -2017,22 +2014,20 @@ impl DiskAPI for LocalDisk {
.await?;
if let Some((src_data_path, dst_data_path)) = has_data_dir_path.as_ref() {
let no_inline = fi.data.is_none() && fi.size > 0;
if no_inline {
if let Err(err) = rename_all(&src_data_path, &dst_data_path, &skip_parent).await {
let _ = self.delete_file(&dst_volume_dir, dst_data_path, false, false).await;
info!(
"rename all failed src_data_path: {:?}, dst_data_path: {:?}, err: {:?}",
src_data_path, dst_data_path, err
);
return Err(err);
}
if no_inline && let Err(err) = rename_all(&src_data_path, &dst_data_path, &skip_parent).await {
let _ = self.delete_file(&dst_volume_dir, dst_data_path, false, false).await;
info!(
"rename all failed src_data_path: {:?}, dst_data_path: {:?}, err: {:?}",
src_data_path, dst_data_path, err
);
return Err(err);
}
}
if let Some(old_data_dir) = has_old_data_dir {
// preserve current xl.meta inside the oldDataDir.
if let Some(dst_buf) = has_dst_buf {
if let Err(err) = self
if let Some(dst_buf) = has_dst_buf
&& let Err(err) = self
.write_all_private(
dst_volume,
format!("{}/{}/{}", &dst_path, &old_data_dir.to_string(), STORAGE_FORMAT_FILE).as_str(),
@@ -2041,10 +2036,9 @@ impl DiskAPI for LocalDisk {
&skip_parent,
)
.await
{
info!("write_all_private failed err: {:?}", err);
return Err(err);
}
{
info!("write_all_private failed err: {:?}", err);
return Err(err);
}
}
@@ -2075,11 +2069,11 @@ impl DiskAPI for LocalDisk {
#[tracing::instrument(skip(self))]
async fn make_volumes(&self, volumes: Vec<&str>) -> Result<()> {
for vol in volumes {
if let Err(e) = self.make_volume(vol).await {
if e != DiskError::VolumeExists {
error!("local disk make volumes failed: {e}");
return Err(e);
}
if let Err(e) = self.make_volume(vol).await
&& e != DiskError::VolumeExists
{
error!("local disk make volumes failed: {e}");
return Err(e);
}
// TODO: health check
}
@@ -2313,10 +2307,11 @@ impl DiskAPI for LocalDisk {
let old_path = file_path.join(Path::new(uuid.to_string().as_str()));
check_path_length(old_path.to_string_lossy().as_ref())?;
if let Err(err) = self.move_to_trash(&old_path, true, false).await {
if err != DiskError::FileNotFound && err != DiskError::VolumeNotFound {
return Err(err);
}
if let Err(err) = self.move_to_trash(&old_path, true, false).await
&& err != DiskError::FileNotFound
&& err != DiskError::VolumeNotFound
{
return Err(err);
}
}
@@ -2328,13 +2323,13 @@ impl DiskAPI for LocalDisk {
}
// opts.undo_write && opts.old_data_dir.is_some_and(f)
if let Some(old_data_dir) = opts.old_data_dir {
if opts.undo_write {
let src_path =
file_path.join(Path::new(format!("{old_data_dir}{SLASH_SEPARATOR}{STORAGE_FORMAT_FILE_BACKUP}").as_str()));
let dst_path = file_path.join(Path::new(format!("{path}{SLASH_SEPARATOR}{STORAGE_FORMAT_FILE}").as_str()));
return rename_all(src_path, dst_path, file_path).await;
}
if let Some(old_data_dir) = opts.old_data_dir
&& opts.undo_write
{
let src_path =
file_path.join(Path::new(format!("{old_data_dir}{SLASH_SEPARATOR}{STORAGE_FORMAT_FILE_BACKUP}").as_str()));
let dst_path = file_path.join(Path::new(format!("{path}{SLASH_SEPARATOR}{STORAGE_FORMAT_FILE}").as_str()));
return rename_all(src_path, dst_path, file_path).await;
}
self.delete_file(&volume_dir, &xl_path, true, false).await

View File

@@ -147,11 +147,11 @@ async fn reliable_rename(
dst_file_path: impl AsRef<Path>,
base_dir: impl AsRef<Path>,
) -> io::Result<()> {
if let Some(parent) = dst_file_path.as_ref().parent() {
if !file_exists(parent) {
// info!("reliable_rename reliable_mkdir_all parent: {:?}", parent);
reliable_mkdir_all(parent, base_dir.as_ref()).await?;
}
if let Some(parent) = dst_file_path.as_ref().parent()
&& !file_exists(parent)
{
// info!("reliable_rename reliable_mkdir_all parent: {:?}", parent);
reliable_mkdir_all(parent, base_dir.as_ref()).await?;
}
let mut i = 0;
@@ -190,12 +190,11 @@ pub async fn reliable_mkdir_all(path: impl AsRef<Path>, base_dir: impl AsRef<Pat
if e.kind() == io::ErrorKind::NotFound && i == 0 {
i += 1;
if let Some(base_parent) = base_dir.parent() {
if let Some(c) = base_parent.components().next() {
if c != Component::RootDir {
base_dir = base_parent
}
}
if let Some(base_parent) = base_dir.parent()
&& let Some(c) = base_parent.components().next()
&& c != Component::RootDir
{
base_dir = base_parent
}
continue;
}

View File

@@ -318,7 +318,7 @@ fn get_divisible_size(total_sizes: &[usize]) -> usize {
fn possible_set_counts(set_size: usize) -> Vec<usize> {
let mut ss = Vec::new();
for s in SET_SIZES {
if set_size % s == 0 {
if set_size.is_multiple_of(s) {
ss.push(s);
}
}
@@ -340,7 +340,7 @@ fn common_set_drive_count(divisible_size: usize, set_counts: &[usize]) -> usize
let mut prev_d = divisible_size / set_counts[0];
let mut set_size = 0;
for &cnt in set_counts {
if divisible_size % cnt == 0 {
if divisible_size.is_multiple_of(cnt) {
let d = divisible_size / cnt;
if d <= prev_d {
prev_d = d;

View File

@@ -266,12 +266,11 @@ impl Erasure {
let (mut shards, errs) = reader.read().await;
if ret_err.is_none() {
if let (_, Some(err)) = reduce_errs(&errs, &[]) {
if err == Error::FileNotFound || err == Error::FileCorrupt {
ret_err = Some(err.into());
}
}
if ret_err.is_none()
&& let (_, Some(err)) = reduce_errs(&errs, &[])
&& (err == Error::FileNotFound || err == Error::FileCorrupt)
{
ret_err = Some(err.into());
}
if !reader.can_decode(&shards) {

View File

@@ -150,10 +150,10 @@ impl Erasure {
}
Err(e) if e.kind() == std::io::ErrorKind::UnexpectedEof => {
// Check if the inner error is a checksum mismatch - if so, propagate it
if let Some(inner) = e.get_ref() {
if rustfs_rio::is_checksum_mismatch(inner) {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, e.to_string()));
}
if let Some(inner) = e.get_ref()
&& rustfs_rio::is_checksum_mismatch(inner)
{
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, e.to_string()));
}
break;
}

View File

@@ -45,7 +45,7 @@ impl super::Erasure {
let start_block = 0;
let mut end_block = total_length / self.block_size;
if total_length % self.block_size != 0 {
if !total_length.is_multiple_of(self.block_size) {
end_block += 1;
}

View File

@@ -244,10 +244,12 @@ impl PoolMeta {
}
pub fn decommission(&mut self, idx: usize, pi: PoolSpaceInfo) -> Result<()> {
if let Some(pool) = self.pools.get_mut(idx) {
if let Some(ref info) = pool.decommission {
if !info.complete && !info.failed && !info.canceled {
return Err(StorageError::DecommissionAlreadyRunning);
}
if let Some(ref info) = pool.decommission
&& !info.complete
&& !info.failed
&& !info.canceled
{
return Err(StorageError::DecommissionAlreadyRunning);
}
let now = OffsetDateTime::now_utc();
@@ -273,12 +275,12 @@ impl PoolMeta {
pub fn pending_buckets(&self, idx: usize) -> Vec<DecomBucketInfo> {
let mut list = Vec::new();
if let Some(pool) = self.pools.get(idx) {
if let Some(ref info) = pool.decommission {
for bk in info.queued_buckets.iter() {
let (name, prefix) = path2_bucket_object(bk);
list.push(DecomBucketInfo { name, prefix });
}
if let Some(pool) = self.pools.get(idx)
&& let Some(ref info) = pool.decommission
{
for bk in info.queued_buckets.iter() {
let (name, prefix) = path2_bucket_object(bk);
list.push(DecomBucketInfo { name, prefix });
}
}
@@ -306,15 +308,15 @@ impl PoolMeta {
}
pub fn count_item(&mut self, idx: usize, size: usize, failed: bool) {
if let Some(pool) = self.pools.get_mut(idx) {
if let Some(info) = pool.decommission.as_mut() {
if failed {
info.items_decommission_failed += 1;
info.bytes_failed += size;
} else {
info.items_decommissioned += 1;
info.bytes_done += size;
}
if let Some(pool) = self.pools.get_mut(idx)
&& let Some(info) = pool.decommission.as_mut()
{
if failed {
info.items_decommission_failed += 1;
info.bytes_failed += size;
} else {
info.items_decommissioned += 1;
info.bytes_done += size;
}
}
}
@@ -324,11 +326,11 @@ impl PoolMeta {
return;
}
if let Some(pool) = self.pools.get_mut(idx) {
if let Some(info) = pool.decommission.as_mut() {
info.object = object;
info.bucket = bucket;
}
if let Some(pool) = self.pools.get_mut(idx)
&& let Some(info) = pool.decommission.as_mut()
{
info.object = object;
info.bucket = bucket;
}
}
@@ -407,10 +409,10 @@ impl PoolMeta {
if specified_pools.len() == remembered_pools.len() {
for (k, pi) in remembered_pools.iter() {
if let Some(pos) = specified_pools.get(k) {
if *pos != pi.position {
update = true; // Pool order changed, allow the update.
}
if let Some(pos) = specified_pools.get(k)
&& *pos != pi.position
{
update = true; // Pool order changed, allow the update.
}
}
}
@@ -640,10 +642,12 @@ impl ECStore {
pub async fn is_decommission_running(&self) -> bool {
let pool_meta = self.pool_meta.read().await;
for pool in pool_meta.pools.iter() {
if let Some(ref info) = pool.decommission {
if !info.complete && !info.failed && !info.canceled {
return true;
}
if let Some(ref info) = pool.decommission
&& !info.complete
&& !info.failed
&& !info.canceled
{
return true;
}
}
@@ -850,8 +854,8 @@ impl ECStore {
decommissioned += 1;
}
if decommissioned == fivs.versions.len() {
if let Err(err) = set
if decommissioned == fivs.versions.len()
&& let Err(err) = set
.delete_object(
bucket.as_str(),
&encode_dir_object(&entry.name),
@@ -863,9 +867,8 @@ impl ECStore {
},
)
.await
{
error!("decommission_pool: delete_object err {:?}", &err);
}
{
error!("decommission_pool: delete_object err {:?}", &err);
}
{
@@ -879,10 +882,8 @@ impl ECStore {
.unwrap_or_default();
drop(pool_meta);
if ok {
if let Some(notification_sys) = get_global_notification_sys() {
notification_sys.reload_pool_meta().await;
}
if ok && let Some(notification_sys) = get_global_notification_sys() {
notification_sys.reload_pool_meta().await;
}
}
@@ -1080,10 +1081,10 @@ impl ECStore {
{
let mut pool_meta = self.pool_meta.write().await;
if pool_meta.bucket_done(idx, bucket.to_string()) {
if let Err(err) = pool_meta.save(self.pools.clone()).await {
error!("decom pool_meta.save err {:?}", err);
}
if pool_meta.bucket_done(idx, bucket.to_string())
&& let Err(err) = pool_meta.save(self.pools.clone()).await
{
error!("decom pool_meta.save err {:?}", err);
}
}
continue;
@@ -1100,10 +1101,10 @@ impl ECStore {
{
let mut pool_meta = self.pool_meta.write().await;
if pool_meta.bucket_done(idx, bucket.to_string()) {
if let Err(err) = pool_meta.save(self.pools.clone()).await {
error!("decom pool_meta.save err {:?}", err);
}
if pool_meta.bucket_done(idx, bucket.to_string())
&& let Err(err) = pool_meta.save(self.pools.clone()).await
{
error!("decom pool_meta.save err {:?}", err);
}
warn!("decommission: decommission_pool bucket_done {}", &bucket.name);
@@ -1138,11 +1139,10 @@ impl ECStore {
if let Err(err) = self
.make_bucket(bk.to_string_lossy().to_string().as_str(), &MakeBucketOptions::default())
.await
&& !is_err_bucket_exists(&err)
{
if !is_err_bucket_exists(&err) {
error!("decommission: make bucket failed: {err}");
return Err(err);
}
error!("decommission: make bucket failed: {err}");
return Err(err);
}
}

View File

@@ -380,10 +380,10 @@ impl ECStore {
#[tracing::instrument(skip(self, fi))]
pub async fn update_pool_stats(&self, pool_index: usize, bucket: String, fi: &FileInfo) -> Result<()> {
let mut rebalance_meta = self.rebalance_meta.write().await;
if let Some(meta) = rebalance_meta.as_mut() {
if let Some(pool_stat) = meta.pool_stats.get_mut(pool_index) {
pool_stat.update(bucket, fi);
}
if let Some(meta) = rebalance_meta.as_mut()
&& let Some(pool_stat) = meta.pool_stats.get_mut(pool_index)
{
pool_stat.update(bucket, fi);
}
Ok(())
@@ -394,20 +394,20 @@ impl ECStore {
info!("next_rebal_bucket: pool_index: {}", pool_index);
let rebalance_meta = self.rebalance_meta.read().await;
info!("next_rebal_bucket: rebalance_meta: {:?}", rebalance_meta);
if let Some(meta) = rebalance_meta.as_ref() {
if let Some(pool_stat) = meta.pool_stats.get(pool_index) {
if pool_stat.info.status == RebalStatus::Completed || !pool_stat.participating {
info!("next_rebal_bucket: pool_index: {} completed or not participating", pool_index);
return Ok(None);
}
if pool_stat.buckets.is_empty() {
info!("next_rebal_bucket: pool_index: {} buckets is empty", pool_index);
return Ok(None);
}
info!("next_rebal_bucket: pool_index: {} bucket: {}", pool_index, pool_stat.buckets[0]);
return Ok(Some(pool_stat.buckets[0].clone()));
if let Some(meta) = rebalance_meta.as_ref()
&& let Some(pool_stat) = meta.pool_stats.get(pool_index)
{
if pool_stat.info.status == RebalStatus::Completed || !pool_stat.participating {
info!("next_rebal_bucket: pool_index: {} completed or not participating", pool_index);
return Ok(None);
}
if pool_stat.buckets.is_empty() {
info!("next_rebal_bucket: pool_index: {} buckets is empty", pool_index);
return Ok(None);
}
info!("next_rebal_bucket: pool_index: {} bucket: {}", pool_index, pool_stat.buckets[0]);
return Ok(Some(pool_stat.buckets[0].clone()));
}
info!("next_rebal_bucket: pool_index: {} None", pool_index);
@@ -417,28 +417,28 @@ impl ECStore {
#[tracing::instrument(skip(self))]
pub async fn bucket_rebalance_done(&self, pool_index: usize, bucket: String) -> Result<()> {
let mut rebalance_meta = self.rebalance_meta.write().await;
if let Some(meta) = rebalance_meta.as_mut() {
if let Some(pool_stat) = meta.pool_stats.get_mut(pool_index) {
info!("bucket_rebalance_done: buckets {:?}", &pool_stat.buckets);
if let Some(meta) = rebalance_meta.as_mut()
&& let Some(pool_stat) = meta.pool_stats.get_mut(pool_index)
{
info!("bucket_rebalance_done: buckets {:?}", &pool_stat.buckets);
// Use retain to filter out buckets slated for removal
let mut found = false;
pool_stat.buckets.retain(|b| {
if b.as_str() == bucket.as_str() {
found = true;
pool_stat.rebalanced_buckets.push(b.clone());
false // Remove this element
} else {
true // Keep this element
}
});
if found {
info!("bucket_rebalance_done: bucket {} rebalanced", &bucket);
return Ok(());
// Use retain to filter out buckets slated for removal
let mut found = false;
pool_stat.buckets.retain(|b| {
if b.as_str() == bucket.as_str() {
found = true;
pool_stat.rebalanced_buckets.push(b.clone());
false // Remove this element
} else {
info!("bucket_rebalance_done: bucket {} not found", bucket);
true // Keep this element
}
});
if found {
info!("bucket_rebalance_done: bucket {} rebalanced", &bucket);
return Ok(());
} else {
info!("bucket_rebalance_done: bucket {} not found", bucket);
}
}
info!("bucket_rebalance_done: bucket {} not found", bucket);
@@ -492,10 +492,10 @@ impl ECStore {
#[tracing::instrument(skip(self))]
pub async fn stop_rebalance(self: &Arc<Self>) -> Result<()> {
let rebalance_meta = self.rebalance_meta.read().await;
if let Some(meta) = rebalance_meta.as_ref() {
if let Some(cancel_tx) = meta.cancel.as_ref() {
cancel_tx.cancel();
}
if let Some(meta) = rebalance_meta.as_ref()
&& let Some(cancel_tx) = meta.cancel.as_ref()
{
cancel_tx.cancel();
}
Ok(())
@@ -690,24 +690,24 @@ impl ECStore {
async fn check_if_rebalance_done(&self, pool_index: usize) -> bool {
let mut rebalance_meta = self.rebalance_meta.write().await;
if let Some(meta) = rebalance_meta.as_mut() {
if let Some(pool_stat) = meta.pool_stats.get_mut(pool_index) {
// Check if the pool's rebalance status is already completed
if pool_stat.info.status == RebalStatus::Completed {
info!("check_if_rebalance_done: pool {} is already completed", pool_index);
return true;
}
if let Some(meta) = rebalance_meta.as_mut()
&& let Some(pool_stat) = meta.pool_stats.get_mut(pool_index)
{
// Check if the pool's rebalance status is already completed
if pool_stat.info.status == RebalStatus::Completed {
info!("check_if_rebalance_done: pool {} is already completed", pool_index);
return true;
}
// Calculate the percentage of free space improvement
let pfi = (pool_stat.init_free_space + pool_stat.bytes) as f64 / pool_stat.init_capacity as f64;
// Calculate the percentage of free space improvement
let pfi = (pool_stat.init_free_space + pool_stat.bytes) as f64 / pool_stat.init_capacity as f64;
// Mark pool rebalance as done if within 5% of the PercentFreeGoal
if (pfi - meta.percent_free_goal).abs() <= 0.05 {
pool_stat.info.status = RebalStatus::Completed;
pool_stat.info.end_time = Some(OffsetDateTime::now_utc());
info!("check_if_rebalance_done: pool {} is completed, pfi: {}", pool_index, pfi);
return true;
}
// Mark pool rebalance as done if within 5% of the PercentFreeGoal
if (pfi - meta.percent_free_goal).abs() <= 0.05 {
pool_stat.info.status = RebalStatus::Completed;
pool_stat.info.end_time = Some(OffsetDateTime::now_utc());
info!("check_if_rebalance_done: pool {} is completed, pfi: {}", pool_index, pfi);
return true;
}
}
@@ -1102,11 +1102,11 @@ impl ECStore {
pub async fn save_rebalance_stats(&self, pool_idx: usize, opt: RebalSaveOpt) -> Result<()> {
// TODO: lock
let mut meta = RebalanceMeta::new();
if let Err(err) = meta.load(self.pools[0].clone()).await {
if err != Error::ConfigNotFound {
info!("save_rebalance_stats: load err: {:?}", err);
return Err(err);
}
if let Err(err) = meta.load(self.pools[0].clone()).await
&& err != Error::ConfigNotFound
{
info!("save_rebalance_stats: load err: {:?}", err);
return Err(err);
}
match opt {

View File

@@ -66,13 +66,13 @@ impl PeerRestClient {
let mut remote = Vec::with_capacity(hosts.len());
let mut all = vec![None; hosts.len()];
for (i, hs_host) in hosts.iter().enumerate() {
if let Some(host) = hs_host {
if let Some(grid_host) = eps.find_grid_hosts_from_peer(host) {
let client = PeerRestClient::new(host.clone(), grid_host);
if let Some(host) = hs_host
&& let Some(grid_host) = eps.find_grid_hosts_from_peer(host)
{
let client = PeerRestClient::new(host.clone(), grid_host);
all[i] = Some(client.clone());
remote.push(Some(client));
}
all[i] = Some(client.clone());
remote.push(Some(client));
}
}

View File

@@ -101,10 +101,10 @@ impl S3PeerSys {
for pool_idx in 0..self.pools_count {
let mut per_pool_errs = vec![None; self.clients.len()];
for (i, client) in self.clients.iter().enumerate() {
if let Some(v) = client.get_pools() {
if v.contains(&pool_idx) {
per_pool_errs[i] = errs[i].clone();
}
if let Some(v) = client.get_pools()
&& v.contains(&pool_idx)
{
per_pool_errs[i] = errs[i].clone();
}
}
let qu = per_pool_errs.len() / 2;
@@ -136,10 +136,10 @@ impl S3PeerSys {
for pool_idx in 0..self.pools_count {
let mut per_pool_errs = vec![None; self.clients.len()];
for (i, client) in self.clients.iter().enumerate() {
if let Some(v) = client.get_pools() {
if v.contains(&pool_idx) {
per_pool_errs[i] = errs[i].clone();
}
if let Some(v) = client.get_pools()
&& v.contains(&pool_idx)
{
per_pool_errs[i] = errs[i].clone();
}
}
let qu = per_pool_errs.len() / 2;

View File

@@ -266,10 +266,10 @@ impl SetDisks {
let mut new_disk = Vec::with_capacity(disks.len());
for disk in disks.iter() {
if let Some(d) = disk {
if d.is_online().await {
new_disk.push(disk.clone());
}
if let Some(d) = disk
&& d.is_online().await
{
new_disk.push(disk.clone());
}
}
@@ -1417,22 +1417,21 @@ impl SetDisks {
let mut valid_obj_map = HashMap::new();
for (i, op_hash) in meta_hashes.iter().enumerate() {
if let Some(hash) = op_hash {
if let Some(max_hash) = max_val {
if hash == max_hash {
if metas[i].is_valid() && !found {
found_fi = Some(metas[i].clone());
found = true;
}
let props = ObjProps {
mod_time: metas[i].mod_time,
num_versions: metas[i].num_versions,
};
*valid_obj_map.entry(props).or_insert(0) += 1;
}
if let Some(hash) = op_hash
&& let Some(max_hash) = max_val
&& hash == max_hash
{
if metas[i].is_valid() && !found {
found_fi = Some(metas[i].clone());
found = true;
}
let props = ObjProps {
mod_time: metas[i].mod_time,
num_versions: metas[i].num_versions,
};
*valid_obj_map.entry(props).or_insert(0) += 1;
}
}
@@ -3572,17 +3571,17 @@ impl SetDisks {
let mut offline = 0;
for (i, err) in errs.iter().enumerate() {
let mut found = false;
if let Some(err) = err {
if err == &DiskError::DiskNotFound {
found = true;
}
if let Some(err) = err
&& err == &DiskError::DiskNotFound
{
found = true;
}
for p in data_errs_by_part {
if let Some(v) = p.1.get(i) {
if *v == CHECK_PART_DISK_NOT_FOUND {
found = true;
break;
}
if let Some(v) = p.1.get(i)
&& *v == CHECK_PART_DISK_NOT_FOUND
{
found = true;
break;
}
}
@@ -3838,10 +3837,10 @@ impl ObjectIO for SetDisks {
None
};
if let Some(http_preconditions) = opts.http_preconditions.clone() {
if let Some(err) = self.check_write_precondition(bucket, object, opts).await {
return Err(err);
}
if let Some(http_preconditions) = opts.http_preconditions.clone()
&& let Some(err) = self.check_write_precondition(bucket, object, opts).await
{
return Err(err);
}
let mut user_defined = opts.user_defined.clone();
@@ -4002,16 +4001,16 @@ impl ObjectIO for SetDisks {
}
}
if fi.checksum.is_none() {
if let Some(content_hash) = data.as_hash_reader().content_hash() {
fi.checksum = Some(content_hash.to_bytes(&[]));
}
if fi.checksum.is_none()
&& let Some(content_hash) = data.as_hash_reader().content_hash()
{
fi.checksum = Some(content_hash.to_bytes(&[]));
}
if let Some(sc) = user_defined.get(AMZ_STORAGE_CLASS) {
if sc == storageclass::STANDARD {
let _ = user_defined.remove(AMZ_STORAGE_CLASS);
}
if let Some(sc) = user_defined.get(AMZ_STORAGE_CLASS)
&& sc == storageclass::STANDARD
{
let _ = user_defined.remove(AMZ_STORAGE_CLASS);
}
let mod_time = if let Some(mod_time) = opts.mod_time {
@@ -4062,11 +4061,11 @@ impl ObjectIO for SetDisks {
self.delete_all(RUSTFS_META_TMP_BUCKET, &tmp_dir).await?;
for (i, op_disk) in online_disks.iter().enumerate() {
if let Some(disk) = op_disk {
if disk.is_online().await {
fi = parts_metadatas[i].clone();
break;
}
if let Some(disk) = op_disk
&& disk.is_online().await
{
fi = parts_metadatas[i].clone();
break;
}
}
@@ -5568,10 +5567,10 @@ impl StorageAPI for SetDisks {
user_defined.insert("etag".to_owned(), etag.clone());
}
if let Some(sc) = user_defined.get(AMZ_STORAGE_CLASS) {
if sc == storageclass::STANDARD {
let _ = user_defined.remove(AMZ_STORAGE_CLASS);
}
if let Some(sc) = user_defined.get(AMZ_STORAGE_CLASS)
&& sc == storageclass::STANDARD
{
let _ = user_defined.remove(AMZ_STORAGE_CLASS);
}
let sc_parity_drives = {
@@ -5620,10 +5619,10 @@ impl StorageAPI for SetDisks {
// TODO: get content-type
}
if let Some(sc) = user_defined.get(AMZ_STORAGE_CLASS) {
if sc == storageclass::STANDARD {
let _ = user_defined.remove(AMZ_STORAGE_CLASS);
}
if let Some(sc) = user_defined.get(AMZ_STORAGE_CLASS)
&& sc == storageclass::STANDARD
{
let _ = user_defined.remove(AMZ_STORAGE_CLASS);
}
if let Some(checksum) = &opts.want_checksum {
@@ -5925,14 +5924,14 @@ impl StorageAPI for SetDisks {
return Err(Error::InvalidPart(p.part_num, ext_part.etag.clone(), p.etag.clone().unwrap_or_default()));
}
if checksum_type.full_object_requested() {
if let Err(err) = checksum.add_part(&cs, ext_part.actual_size) {
error!(
"complete_multipart_upload checksum add_part failed part_id={}, bucket={}, object={}",
p.part_num, bucket, object
);
return Err(Error::InvalidPart(p.part_num, ext_part.etag.clone(), p.etag.clone().unwrap_or_default()));
}
if checksum_type.full_object_requested()
&& let Err(err) = checksum.add_part(&cs, ext_part.actual_size)
{
error!(
"complete_multipart_upload checksum add_part failed part_id={}, bucket={}, object={}",
p.part_num, bucket, object
);
return Err(Error::InvalidPart(p.part_num, ext_part.etag.clone(), p.etag.clone().unwrap_or_default()));
}
checksum_combined.extend_from_slice(cs.raw.as_slice());
@@ -6112,11 +6111,11 @@ impl StorageAPI for SetDisks {
});
for (i, op_disk) in online_disks.iter().enumerate() {
if let Some(disk) = op_disk {
if disk.is_online().await {
fi = parts_metadatas[i].clone();
break;
}
if let Some(disk) = op_disk
&& disk.is_online().await
{
fi = parts_metadatas[i].clone();
break;
}
}
@@ -6210,16 +6209,15 @@ impl StorageAPI for SetDisks {
let _write_lock_guard = if !opts.no_lock {
let key = rustfs_lock::fast_lock::types::ObjectKey::new(bucket, object);
let mut skip_lock = false;
if let Some(lock_info) = self.fast_lock_manager.get_lock_info(&key) {
if lock_info.owner.as_ref() == self.locker_owner.as_str()
&& matches!(lock_info.mode, rustfs_lock::fast_lock::types::LockMode::Exclusive)
{
debug!(
"Reusing existing exclusive lock for heal operation on {}/{} held by {}",
bucket, object, self.locker_owner
);
skip_lock = true;
}
if let Some(lock_info) = self.fast_lock_manager.get_lock_info(&key)
&& lock_info.owner.as_ref() == self.locker_owner.as_str()
&& matches!(lock_info.mode, rustfs_lock::fast_lock::types::LockMode::Exclusive)
{
debug!(
"Reusing existing exclusive lock for heal operation on {}/{} held by {}",
bucket, object, self.locker_owner
);
skip_lock = true;
}
if skip_lock {
None
@@ -6563,14 +6561,14 @@ async fn disks_with_all_parts(
if err.is_some() {
let part_err = conv_part_err_to_int(err);
for p in 0..latest_meta.parts.len() {
if let Some(vec) = data_errs_by_part.get_mut(&p) {
if index < vec.len() {
info!(
"data_errs_by_part: copy meta errors to part errors: object_name={}, index: {index}, part: {p}, part_err: {part_err}",
object_name
);
vec[index] = part_err;
}
if let Some(vec) = data_errs_by_part.get_mut(&p)
&& index < vec.len()
{
info!(
"data_errs_by_part: copy meta errors to part errors: object_name={}, index: {index}, part: {p}, part_err: {part_err}",
object_name
);
vec[index] = part_err;
}
}
}
@@ -6609,14 +6607,14 @@ async fn disks_with_all_parts(
.await
.err();
if let Some(vec) = data_errs_by_part.get_mut(&0) {
if index < vec.len() {
vec[index] = conv_part_err_to_int(&verify_err.map(|e| e.into()));
info!(
"data_errs_by_part:bitrot check result: object_name={}, index: {index}, result: {}",
object_name, vec[index]
);
}
if let Some(vec) = data_errs_by_part.get_mut(&0)
&& index < vec.len()
{
vec[index] = conv_part_err_to_int(&verify_err.map(|e| e.into()));
info!(
"data_errs_by_part:bitrot check result: object_name={}, index: {index}, result: {}",
object_name, vec[index]
);
}
}
continue;
@@ -6654,32 +6652,32 @@ async fn disks_with_all_parts(
// Update dataErrsByPart for all parts
for p in 0..latest_meta.parts.len() {
if let Some(vec) = data_errs_by_part.get_mut(&p) {
if index < vec.len() {
if verify_err.is_some() {
if let Some(vec) = data_errs_by_part.get_mut(&p)
&& index < vec.len()
{
if verify_err.is_some() {
info!(
"data_errs_by_part: verify_err: object_name={}, index: {index}, part: {p}, verify_err: {verify_err:?}",
object_name
);
vec[index] = conv_part_err_to_int(&verify_err.clone());
} else {
// Fix: verify_resp.results length is based on meta.parts, not latest_meta.parts
// We need to check bounds to avoid panic
if p < verify_resp.results.len() {
info!(
"data_errs_by_part: verify_err: object_name={}, index: {index}, part: {p}, verify_err: {verify_err:?}",
"data_errs_by_part: update data_errs_by_part: object_name={}, index: {}, part: {}, verify_resp.results: {:?}",
object_name, index, p, verify_resp.results[p]
);
vec[index] = verify_resp.results[p];
} else {
debug!(
"data_errs_by_part: verify_resp.results length mismatch: expected at least {}, got {}, object_name={}, index: {index}, part: {p}",
p + 1,
verify_resp.results.len(),
object_name
);
vec[index] = conv_part_err_to_int(&verify_err.clone());
} else {
// Fix: verify_resp.results length is based on meta.parts, not latest_meta.parts
// We need to check bounds to avoid panic
if p < verify_resp.results.len() {
info!(
"data_errs_by_part: update data_errs_by_part: object_name={}, index: {}, part: {}, verify_resp.results: {:?}",
object_name, index, p, verify_resp.results[p]
);
vec[index] = verify_resp.results[p];
} else {
debug!(
"data_errs_by_part: verify_resp.results length mismatch: expected at least {}, got {}, object_name={}, index: {index}, part: {p}",
p + 1,
verify_resp.results.len(),
object_name
);
vec[index] = CHECK_PART_SUCCESS;
}
vec[index] = CHECK_PART_SUCCESS;
}
}
}
@@ -6689,14 +6687,14 @@ async fn disks_with_all_parts(
// Build dataErrsByDisk from dataErrsByPart
for (part, disks) in data_errs_by_part.iter() {
for (disk_idx, disk_err) in disks.iter().enumerate() {
if let Some(vec) = data_errs_by_disk.get_mut(&disk_idx) {
if *part < vec.len() {
vec[*part] = *disk_err;
info!(
"data_errs_by_disk: update data_errs_by_disk: object_name={}, part: {part}, disk_idx: {disk_idx}, disk_err: {disk_err}",
object_name,
);
}
if let Some(vec) = data_errs_by_disk.get_mut(&disk_idx)
&& *part < vec.len()
{
vec[*part] = *disk_err;
info!(
"data_errs_by_disk: update data_errs_by_disk: object_name={}, part: {part}, disk_idx: {disk_idx}, disk_err: {disk_err}",
object_name,
);
}
}
}
@@ -6738,10 +6736,10 @@ pub fn should_heal_object_on_disk(
meta: &FileInfo,
latest_meta: &FileInfo,
) -> (bool, Option<DiskError>) {
if let Some(err) = err {
if err == &DiskError::FileNotFound || err == &DiskError::FileVersionNotFound || err == &DiskError::FileCorrupt {
return (true, Some(err.clone()));
}
if let Some(err) = err
&& (err == &DiskError::FileNotFound || err == &DiskError::FileVersionNotFound || err == &DiskError::FileCorrupt)
{
return (true, Some(err.clone()));
}
if latest_meta.volume != meta.volume
@@ -6906,15 +6904,15 @@ pub fn e_tag_matches(etag: &str, condition: &str) -> bool {
pub fn should_prevent_write(oi: &ObjectInfo, if_none_match: Option<String>, if_match: Option<String>) -> bool {
match &oi.etag {
Some(etag) => {
if let Some(if_none_match) = if_none_match {
if e_tag_matches(etag, &if_none_match) {
return true;
}
if let Some(if_none_match) = if_none_match
&& e_tag_matches(etag, &if_none_match)
{
return true;
}
if let Some(if_match) = if_match {
if !e_tag_matches(etag, &if_match) {
return true;
}
if let Some(if_match) = if_match
&& !e_tag_matches(etag, &if_match)
{
return true;
}
false
}

View File

@@ -491,12 +491,12 @@ impl StorageAPI for Sets {
let cp_src_dst_same = path_join_buf(&[src_bucket, src_object]) == path_join_buf(&[dst_bucket, dst_object]);
if cp_src_dst_same {
if let (Some(src_vid), Some(dst_vid)) = (&src_opts.version_id, &dst_opts.version_id) {
if src_vid == dst_vid {
return src_set
.copy_object(src_bucket, src_object, dst_bucket, dst_object, src_info, src_opts, dst_opts)
.await;
}
if let (Some(src_vid), Some(dst_vid)) = (&src_opts.version_id, &dst_opts.version_id)
&& src_vid == dst_vid
{
return src_set
.copy_object(src_bucket, src_object, dst_bucket, dst_object, src_info, src_opts, dst_opts)
.await;
}
if !dst_opts.versioned && src_opts.version_id.is_none() {
@@ -823,10 +823,10 @@ impl StorageAPI for Sets {
Ok((m, n)) => (m, n),
Err(_) => continue,
};
if let Some(set) = self.disk_set.get(m) {
if let Some(Some(disk)) = set.disks.read().await.get(n) {
let _ = disk.close().await;
}
if let Some(set) = self.disk_set.get(m)
&& let Some(Some(disk)) = set.disks.read().await.get(n)
{
let _ = disk.close().await;
}
if let Some(Some(disk)) = disks.get(index) {
@@ -980,25 +980,24 @@ fn new_heal_format_sets(
let mut current_disks_info = vec![vec![DiskInfo::default(); set_drive_count]; set_count];
for (i, set) in ref_format.erasure.sets.iter().enumerate() {
for j in 0..set.len() {
if let Some(Some(err)) = errs.get(i * set_drive_count + j) {
if *err == DiskError::UnformattedDisk {
let mut fm = FormatV3::new(set_count, set_drive_count);
fm.id = ref_format.id;
fm.format = ref_format.format.clone();
fm.version = ref_format.version.clone();
fm.erasure.this = ref_format.erasure.sets[i][j];
fm.erasure.sets = ref_format.erasure.sets.clone();
fm.erasure.version = ref_format.erasure.version.clone();
fm.erasure.distribution_algo = ref_format.erasure.distribution_algo.clone();
new_formats[i][j] = Some(fm);
}
if let Some(Some(err)) = errs.get(i * set_drive_count + j)
&& *err == DiskError::UnformattedDisk
{
let mut fm = FormatV3::new(set_count, set_drive_count);
fm.id = ref_format.id;
fm.format = ref_format.format.clone();
fm.version = ref_format.version.clone();
fm.erasure.this = ref_format.erasure.sets[i][j];
fm.erasure.sets = ref_format.erasure.sets.clone();
fm.erasure.version = ref_format.erasure.version.clone();
fm.erasure.distribution_algo = ref_format.erasure.distribution_algo.clone();
new_formats[i][j] = Some(fm);
}
if let (Some(format), None) = (&formats[i * set_drive_count + j], &errs[i * set_drive_count + j]) {
if let Some(info) = &format.disk_info {
if !info.endpoint.is_empty() {
current_disks_info[i][j] = info.clone();
}
}
if let (Some(format), None) = (&formats[i * set_drive_count + j], &errs[i * set_drive_count + j])
&& let Some(info) = &format.disk_info
&& !info.endpoint.is_empty()
{
current_disks_info[i][j] = info.clone();
}
}
}

View File

@@ -243,10 +243,10 @@ impl ECStore {
});
// Only set it when the global deployment ID is not yet configured
if let Some(dep_id) = deployment_id {
if get_global_deployment_id().is_none() {
set_global_deployment_id(dep_id);
}
if let Some(dep_id) = deployment_id
&& get_global_deployment_id().is_none()
{
set_global_deployment_id(dep_id);
}
let wait_sec = 5;
@@ -768,10 +768,10 @@ impl ECStore {
def_pool = pinfo.clone();
has_def_pool = true;
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-deletes.html
if is_err_object_not_found(err) {
if let Err(err) = opts.precondition_check(&pinfo.object_info) {
return Err(err.clone());
}
if is_err_object_not_found(err)
&& let Err(err) = opts.precondition_check(&pinfo.object_info)
{
return Err(err.clone());
}
if !is_err_object_not_found(err) && !is_err_version_not_found(err) {
@@ -885,13 +885,14 @@ impl ECStore {
return Ok((obj, res.idx));
}
if let Some(err) = res.err {
if !is_err_object_not_found(&err) && !is_err_version_not_found(&err) {
return Err(err);
}
// TODO: delete marker
if let Some(err) = res.err
&& !is_err_object_not_found(&err)
&& !is_err_version_not_found(&err)
{
return Err(err);
}
// TODO: delete marker
}
let object = decode_dir_object(object);
@@ -918,12 +919,12 @@ impl ECStore {
let mut derrs = Vec::new();
for pe in errs.iter() {
if let Some(err) = &pe.err {
if err == &StorageError::ErasureWriteQuorum {
objs.push(None);
derrs.push(Some(StorageError::ErasureWriteQuorum));
continue;
}
if let Some(err) = &pe.err
&& err == &StorageError::ErasureWriteQuorum
{
objs.push(None);
derrs.push(Some(StorageError::ErasureWriteQuorum));
continue;
}
if let Some(idx) = pe.index {
@@ -1226,14 +1227,14 @@ impl StorageAPI for ECStore {
#[instrument(skip(self))]
async fn make_bucket(&self, bucket: &str, opts: &MakeBucketOptions) -> Result<()> {
if !is_meta_bucketname(bucket) {
if let Err(err) = check_valid_bucket_name_strict(bucket) {
return Err(StorageError::BucketNameInvalid(err.to_string()));
}
// TODO: nslock
if !is_meta_bucketname(bucket)
&& let Err(err) = check_valid_bucket_name_strict(bucket)
{
return Err(StorageError::BucketNameInvalid(err.to_string()));
}
// TODO: nslock
if let Err(err) = self.peer_sys.make_bucket(bucket, opts).await {
let err = to_object_err(err.into(), vec![bucket]);
if !is_err_bucket_exists(&err) {
@@ -1427,12 +1428,12 @@ impl StorageAPI for ECStore {
let pool_idx = self.get_pool_idx_no_lock(src_bucket, &src_object, src_info.size).await?;
if cp_src_dst_same {
if let (Some(src_vid), Some(dst_vid)) = (&src_opts.version_id, &dst_opts.version_id) {
if src_vid == dst_vid {
return self.pools[pool_idx]
.copy_object(src_bucket, &src_object, dst_bucket, &dst_object, src_info, src_opts, dst_opts)
.await;
}
if let (Some(src_vid), Some(dst_vid)) = (&src_opts.version_id, &dst_opts.version_id)
&& src_vid == dst_vid
{
return self.pools[pool_idx]
.copy_object(src_bucket, &src_object, dst_bucket, &dst_object, src_info, src_opts, dst_opts)
.await;
}
if !dst_opts.versioned && src_opts.version_id.is_none() {
@@ -2433,13 +2434,13 @@ fn check_list_multipart_args(
check_list_objs_args(bucket, prefix, key_marker)?;
if let Some(upload_id_marker) = upload_id_marker {
if let Some(key_marker) = key_marker {
if key_marker.ends_with('/') {
return Err(StorageError::InvalidUploadIDKeyCombination(
upload_id_marker.to_string(),
key_marker.to_string(),
));
}
if let Some(key_marker) = key_marker
&& key_marker.ends_with('/')
{
return Err(StorageError::InvalidUploadIDKeyCombination(
upload_id_marker.to_string(),
key_marker.to_string(),
));
}
if let Err(_e) = base64_simd::URL_SAFE_NO_PAD.decode_to_vec(upload_id_marker.as_bytes()) {
@@ -2510,10 +2511,10 @@ pub async fn get_disk_infos(disks: &[Option<DiskStore>]) -> Vec<Option<DiskInfo>
let opts = &DiskInfoOptions::default();
let mut res = vec![None; disks.len()];
for (idx, disk_op) in disks.iter().enumerate() {
if let Some(disk) = disk_op {
if let Ok(info) = disk.disk_info(opts).await {
res[idx] = Some(info);
}
if let Some(disk) = disk_op
&& let Ok(info) = disk.disk_info(opts).await
{
res[idx] = Some(info);
}
}

View File

@@ -144,10 +144,10 @@ impl GetObjectReader {
) -> Result<(Self, usize, i64)> {
let mut rs = rs;
if let Some(part_number) = opts.part_number {
if rs.is_none() {
rs = HTTPRangeSpec::from_object_info(oi, part_number);
}
if let Some(part_number) = opts.part_number
&& rs.is_none()
{
rs = HTTPRangeSpec::from_object_info(oi, part_number);
}
// TODO:Encrypted
@@ -462,32 +462,30 @@ impl ObjectOptions {
pub fn precondition_check(&self, obj_info: &ObjectInfo) -> Result<()> {
let has_valid_mod_time = obj_info.mod_time.is_some_and(|t| t != OffsetDateTime::UNIX_EPOCH);
if let Some(part_number) = self.part_number {
if part_number > 1 && !obj_info.parts.is_empty() {
let part_found = obj_info.parts.iter().any(|pi| pi.number == part_number);
if !part_found {
return Err(Error::InvalidPartNumber(part_number));
}
if let Some(part_number) = self.part_number
&& part_number > 1
&& !obj_info.parts.is_empty()
{
let part_found = obj_info.parts.iter().any(|pi| pi.number == part_number);
if !part_found {
return Err(Error::InvalidPartNumber(part_number));
}
}
if let Some(pre) = &self.http_preconditions {
if let Some(if_none_match) = &pre.if_none_match {
if let Some(etag) = &obj_info.etag {
if is_etag_equal(etag, if_none_match) {
return Err(Error::NotModified);
}
}
if let Some(if_none_match) = &pre.if_none_match
&& let Some(etag) = &obj_info.etag
&& is_etag_equal(etag, if_none_match)
{
return Err(Error::NotModified);
}
if has_valid_mod_time {
if let Some(if_modified_since) = &pre.if_modified_since {
if let Some(mod_time) = &obj_info.mod_time {
if !is_modified_since(mod_time, if_modified_since) {
return Err(Error::NotModified);
}
}
}
if has_valid_mod_time
&& let Some(if_modified_since) = &pre.if_modified_since
&& let Some(mod_time) = &obj_info.mod_time
&& !is_modified_since(mod_time, if_modified_since)
{
return Err(Error::NotModified);
}
if let Some(if_match) = &pre.if_match {
@@ -499,14 +497,13 @@ impl ObjectOptions {
return Err(Error::PreconditionFailed);
}
}
if has_valid_mod_time && pre.if_match.is_none() {
if let Some(if_unmodified_since) = &pre.if_unmodified_since {
if let Some(mod_time) = &obj_info.mod_time {
if is_modified_since(mod_time, if_unmodified_since) {
return Err(Error::PreconditionFailed);
}
}
}
if has_valid_mod_time
&& pre.if_match.is_none()
&& let Some(if_unmodified_since) = &pre.if_unmodified_since
&& let Some(mod_time) = &obj_info.mod_time
&& is_modified_since(mod_time, if_unmodified_since)
{
return Err(Error::PreconditionFailed);
}
}
@@ -698,12 +695,12 @@ impl ObjectInfo {
}
if self.is_compressed() {
if let Some(size_str) = self.user_defined.get(&format!("{RESERVED_METADATA_PREFIX_LOWER}actual-size")) {
if !size_str.is_empty() {
// Todo: deal with error
let size = size_str.parse::<i64>().map_err(|e| std::io::Error::other(e.to_string()))?;
return Ok(size);
}
if let Some(size_str) = self.user_defined.get(&format!("{RESERVED_METADATA_PREFIX_LOWER}actual-size"))
&& !size_str.is_empty()
{
// Todo: deal with error
let size = size_str.parse::<i64>().map_err(|e| std::io::Error::other(e.to_string()))?;
return Ok(size);
}
let mut actual_size = 0;
self.parts.iter().for_each(|part| {
@@ -881,32 +878,31 @@ impl ObjectInfo {
continue;
}
if entry.is_dir() {
if let Some(delimiter) = &delimiter {
if let Some(idx) = {
let remaining = if entry.name.starts_with(prefix) {
&entry.name[prefix.len()..]
} else {
entry.name.as_str()
};
remaining.find(delimiter.as_str())
} {
let idx = prefix.len() + idx + delimiter.len();
if let Some(curr_prefix) = entry.name.get(0..idx) {
if curr_prefix == prev_prefix {
continue;
}
prev_prefix = curr_prefix;
objects.push(ObjectInfo {
is_dir: true,
bucket: bucket.to_owned(),
name: curr_prefix.to_owned(),
..Default::default()
});
}
if entry.is_dir()
&& let Some(delimiter) = &delimiter
&& let Some(idx) = {
let remaining = if entry.name.starts_with(prefix) {
&entry.name[prefix.len()..]
} else {
entry.name.as_str()
};
remaining.find(delimiter.as_str())
}
{
let idx = prefix.len() + idx + delimiter.len();
if let Some(curr_prefix) = entry.name.get(0..idx) {
if curr_prefix == prev_prefix {
continue;
}
prev_prefix = curr_prefix;
objects.push(ObjectInfo {
is_dir: true,
bucket: bucket.to_owned(),
name: curr_prefix.to_owned(),
..Default::default()
});
}
}
}
@@ -966,32 +962,31 @@ impl ObjectInfo {
continue;
}
if entry.is_dir() {
if let Some(delimiter) = &delimiter {
if let Some(idx) = {
let remaining = if entry.name.starts_with(prefix) {
&entry.name[prefix.len()..]
} else {
entry.name.as_str()
};
remaining.find(delimiter.as_str())
} {
let idx = prefix.len() + idx + delimiter.len();
if let Some(curr_prefix) = entry.name.get(0..idx) {
if curr_prefix == prev_prefix {
continue;
}
prev_prefix = curr_prefix;
objects.push(ObjectInfo {
is_dir: true,
bucket: bucket.to_owned(),
name: curr_prefix.to_owned(),
..Default::default()
});
}
if entry.is_dir()
&& let Some(delimiter) = &delimiter
&& let Some(idx) = {
let remaining = if entry.name.starts_with(prefix) {
&entry.name[prefix.len()..]
} else {
entry.name.as_str()
};
remaining.find(delimiter.as_str())
}
{
let idx = prefix.len() + idx + delimiter.len();
if let Some(curr_prefix) = entry.name.get(0..idx) {
if curr_prefix == prev_prefix {
continue;
}
prev_prefix = curr_prefix;
objects.push(ObjectInfo {
is_dir: true,
bucket: bucket.to_owned(),
name: curr_prefix.to_owned(),
..Default::default()
});
}
}
}
@@ -1026,10 +1021,10 @@ impl ObjectInfo {
}
pub fn decrypt_checksums(&self, part: usize, _headers: &HeaderMap) -> Result<(HashMap<String, String>, bool)> {
if part > 0 {
if let Some(checksums) = self.parts.iter().find(|p| p.number == part).and_then(|p| p.checksums.clone()) {
return Ok((checksums, true));
}
if part > 0
&& let Some(checksums) = self.parts.iter().find(|p| p.number == part).and_then(|p| p.checksums.clone())
{
return Ok((checksums, true));
}
// TODO: decrypt checksums

View File

@@ -302,10 +302,10 @@ impl ECStore {
..Default::default()
});
if let Some(err) = list_result.err.clone() {
if err != rustfs_filemeta::Error::Unexpected {
return Err(to_object_err(err.into(), vec![bucket, prefix]));
}
if let Some(err) = list_result.err.clone()
&& err != rustfs_filemeta::Error::Unexpected
{
return Err(to_object_err(err.into(), vec![bucket, prefix]));
}
if let Some(result) = list_result.entries.as_mut() {
@@ -418,10 +418,10 @@ impl ECStore {
},
};
if let Some(err) = list_result.err.clone() {
if err != rustfs_filemeta::Error::Unexpected {
return Err(to_object_err(err.into(), vec![bucket, prefix]));
}
if let Some(err) = list_result.err.clone()
&& err != rustfs_filemeta::Error::Unexpected
{
return Err(to_object_err(err.into(), vec![bucket, prefix]));
}
if let Some(result) = list_result.entries.as_mut() {
@@ -509,10 +509,11 @@ impl ECStore {
let mut o = o.clone();
o.marker = o.marker.filter(|v| v >= &o.prefix);
if let Some(marker) = &o.marker {
if !o.prefix.is_empty() && !marker.starts_with(&o.prefix) {
return Err(Error::Unexpected);
}
if let Some(marker) = &o.marker
&& !o.prefix.is_empty()
&& !marker.starts_with(&o.prefix)
{
return Err(Error::Unexpected);
}
if o.limit == 0 {
@@ -817,10 +818,10 @@ impl ECStore {
let value = tx2.clone();
let resolver = resolver.clone();
async move {
if let Some(entry) = entries.resolve(resolver) {
if let Err(err) = value.send(entry).await {
error!("list_path send fail {:?}", err);
}
if let Some(entry) = entries.resolve(resolver)
&& let Err(err) = value.send(entry).await
{
error!("list_path send fail {:?}", err);
}
}
})
@@ -986,20 +987,21 @@ async fn gather_results(
continue;
}
if let Some(marker) = &opts.marker {
if &entry.name < marker {
continue;
}
if let Some(marker) = &opts.marker
&& &entry.name < marker
{
continue;
}
if !entry.name.starts_with(&opts.prefix) {
continue;
}
if let Some(separator) = &opts.separator {
if !opts.recursive && !entry.is_in_dir(&opts.prefix, separator) {
continue;
}
if let Some(separator) = &opts.separator
&& !opts.recursive
&& !entry.is_in_dir(&opts.prefix, separator)
{
continue;
}
if !opts.incl_deleted && entry.is_object() && entry.is_latest_delete_marker() && !entry.is_object_dir() {
@@ -1200,16 +1202,16 @@ async fn merge_entry_channels(
}
}
if let Some(xl) = has_xl.as_mut() {
if !versions.is_empty() {
xl.versions = merge_file_meta_versions(read_quorum, true, 0, &versions);
if let Some(xl) = has_xl.as_mut()
&& !versions.is_empty()
{
xl.versions = merge_file_meta_versions(read_quorum, true, 0, &versions);
if let Ok(meta) = xl.marshal_msg() {
if let Some(b) = best.as_mut() {
b.metadata = meta;
b.cached = Some(xl.clone());
}
}
if let Ok(meta) = xl.marshal_msg()
&& let Some(b) = best.as_mut()
{
b.metadata = meta;
b.cached = Some(xl.clone());
}
}
}
@@ -1217,11 +1219,11 @@ async fn merge_entry_channels(
to_merge.clear();
}
if let Some(best_entry) = &best {
if best_entry.name > last {
out_channel.send(best_entry.clone()).await.map_err(Error::other)?;
last = best_entry.name.clone();
}
if let Some(best_entry) = &best
&& best_entry.name > last
{
out_channel.send(best_entry.clone()).await.map_err(Error::other)?;
last = best_entry.name.clone();
}
select_from(&mut in_channels, best_idx, &mut top, &mut n_done).await?;
@@ -1307,10 +1309,10 @@ impl SetDisks {
let value = tx2.clone();
let resolver = resolver.clone();
async move {
if let Some(entry) = entries.resolve(resolver) {
if let Err(err) = value.send(entry).await {
error!("list_path send fail {:?}", err);
}
if let Some(entry) = entries.resolve(resolver)
&& let Err(err) = value.send(entry).await
{
error!("list_path send fail {:?}", err);
}
}
})

View File

@@ -635,10 +635,10 @@ fn parse_restore_obj_status(restore_hdr: &str) -> Result<RestoreStatus> {
}
pub fn is_restored_object_on_disk(meta: &HashMap<String, String>) -> bool {
if let Some(restore_hdr) = meta.get(X_AMZ_RESTORE.as_str()) {
if let Ok(restore_status) = parse_restore_obj_status(restore_hdr) {
return restore_status.on_disk();
}
if let Some(restore_hdr) = meta.get(X_AMZ_RESTORE.as_str())
&& let Ok(restore_status) = parse_restore_obj_status(restore_hdr)
{
return restore_status.on_disk();
}
false
}

View File

@@ -575,13 +575,12 @@ impl FileMeta {
let mod_time = version.get_mod_time();
for (idx, exist) in self.versions.iter().enumerate() {
if let Some(ref ex_mt) = exist.header.mod_time {
if let Some(ref in_md) = mod_time {
if ex_mt <= in_md {
self.versions.insert(idx, FileMetaShallowVersion::try_from(version)?);
return Ok(());
}
}
if let Some(ref ex_mt) = exist.header.mod_time
&& let Some(ref in_md) = mod_time
&& ex_mt <= in_md
{
self.versions.insert(idx, FileMetaShallowVersion::try_from(version)?);
return Ok(());
}
}
Err(Error::other("add_version failed"))
@@ -657,58 +656,44 @@ impl FileMeta {
}
if fi.deleted {
if !fi.delete_marker_replication_status().is_empty() {
if let Some(delete_marker) = ventry.delete_marker.as_mut() {
if fi.delete_marker_replication_status() == ReplicationStatusType::Replica {
delete_marker.meta_sys.insert(
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replica-status"),
fi.replication_state_internal
.as_ref()
.map(|v| v.replica_status.clone())
.unwrap_or_default()
.as_str()
.as_bytes()
.to_vec(),
);
delete_marker.meta_sys.insert(
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replica-timestamp"),
fi.replication_state_internal
.as_ref()
.map(|v| v.replica_timestamp.unwrap_or(OffsetDateTime::UNIX_EPOCH).to_string())
.unwrap_or_default()
.as_bytes()
.to_vec(),
);
} else {
delete_marker.meta_sys.insert(
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replication-status"),
fi.replication_state_internal
.as_ref()
.map(|v| v.replication_status_internal.clone().unwrap_or_default())
.unwrap_or_default()
.as_bytes()
.to_vec(),
);
delete_marker.meta_sys.insert(
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replication-timestamp"),
fi.replication_state_internal
.as_ref()
.map(|v| v.replication_timestamp.unwrap_or(OffsetDateTime::UNIX_EPOCH).to_string())
.unwrap_or_default()
.as_bytes()
.to_vec(),
);
}
}
}
if !fi.version_purge_status().is_empty() {
if let Some(delete_marker) = ventry.delete_marker.as_mut() {
if !fi.delete_marker_replication_status().is_empty()
&& let Some(delete_marker) = ventry.delete_marker.as_mut()
{
if fi.delete_marker_replication_status() == ReplicationStatusType::Replica {
delete_marker.meta_sys.insert(
VERSION_PURGE_STATUS_KEY.to_string(),
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replica-status"),
fi.replication_state_internal
.as_ref()
.map(|v| v.version_purge_status_internal.clone().unwrap_or_default())
.map(|v| v.replica_status.clone())
.unwrap_or_default()
.as_str()
.as_bytes()
.to_vec(),
);
delete_marker.meta_sys.insert(
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replica-timestamp"),
fi.replication_state_internal
.as_ref()
.map(|v| v.replica_timestamp.unwrap_or(OffsetDateTime::UNIX_EPOCH).to_string())
.unwrap_or_default()
.as_bytes()
.to_vec(),
);
} else {
delete_marker.meta_sys.insert(
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replication-status"),
fi.replication_state_internal
.as_ref()
.map(|v| v.replication_status_internal.clone().unwrap_or_default())
.unwrap_or_default()
.as_bytes()
.to_vec(),
);
delete_marker.meta_sys.insert(
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replication-timestamp"),
fi.replication_state_internal
.as_ref()
.map(|v| v.replication_timestamp.unwrap_or(OffsetDateTime::UNIX_EPOCH).to_string())
.unwrap_or_default()
.as_bytes()
.to_vec(),
@@ -716,6 +701,20 @@ impl FileMeta {
}
}
if !fi.version_purge_status().is_empty()
&& let Some(delete_marker) = ventry.delete_marker.as_mut()
{
delete_marker.meta_sys.insert(
VERSION_PURGE_STATUS_KEY.to_string(),
fi.replication_state_internal
.as_ref()
.map(|v| v.version_purge_status_internal.clone().unwrap_or_default())
.unwrap_or_default()
.as_bytes()
.to_vec(),
);
}
if let Some(delete_marker) = ventry.delete_marker.as_mut() {
for (k, v) in fi
.replication_state_internal
@@ -1917,42 +1916,41 @@ impl MetaObject {
if let Some(status) = self
.meta_sys
.get(&format!("{RESERVED_METADATA_PREFIX_LOWER}{TRANSITION_STATUS}"))
&& *status == TRANSITION_COMPLETE.as_bytes().to_vec()
{
if *status == TRANSITION_COMPLETE.as_bytes().to_vec() {
let vid = Uuid::parse_str(&fi.tier_free_version_id());
if let Err(err) = vid {
panic!("Invalid Tier Object delete marker versionId {} {}", fi.tier_free_version_id(), err);
}
let vid = vid.unwrap();
let mut free_entry = FileMetaVersion {
version_type: VersionType::Delete,
write_version: 0,
..Default::default()
};
free_entry.delete_marker = Some(MetaDeleteMarker {
version_id: Some(vid),
mod_time: self.mod_time,
meta_sys: HashMap::<String, Vec<u8>>::new(),
});
let delete_marker = free_entry.delete_marker.as_mut().unwrap();
delete_marker
.meta_sys
.insert(format!("{RESERVED_METADATA_PREFIX_LOWER}{FREE_VERSION}"), vec![]);
let tier_key = format!("{RESERVED_METADATA_PREFIX_LOWER}{TRANSITION_TIER}");
let tier_obj_key = format!("{RESERVED_METADATA_PREFIX_LOWER}{TRANSITIONED_OBJECTNAME}");
let tier_obj_vid_key = format!("{RESERVED_METADATA_PREFIX_LOWER}{TRANSITIONED_VERSION_ID}");
let aa = [tier_key, tier_obj_key, tier_obj_vid_key];
for (k, v) in &self.meta_sys {
if aa.contains(k) {
delete_marker.meta_sys.insert(k.clone(), v.clone());
}
}
return (free_entry, true);
let vid = Uuid::parse_str(&fi.tier_free_version_id());
if let Err(err) = vid {
panic!("Invalid Tier Object delete marker versionId {} {}", fi.tier_free_version_id(), err);
}
let vid = vid.unwrap();
let mut free_entry = FileMetaVersion {
version_type: VersionType::Delete,
write_version: 0,
..Default::default()
};
free_entry.delete_marker = Some(MetaDeleteMarker {
version_id: Some(vid),
mod_time: self.mod_time,
meta_sys: HashMap::<String, Vec<u8>>::new(),
});
let delete_marker = free_entry.delete_marker.as_mut().unwrap();
delete_marker
.meta_sys
.insert(format!("{RESERVED_METADATA_PREFIX_LOWER}{FREE_VERSION}"), vec![]);
let tier_key = format!("{RESERVED_METADATA_PREFIX_LOWER}{TRANSITION_TIER}");
let tier_obj_key = format!("{RESERVED_METADATA_PREFIX_LOWER}{TRANSITIONED_OBJECTNAME}");
let tier_obj_vid_key = format!("{RESERVED_METADATA_PREFIX_LOWER}{TRANSITIONED_VERSION_ID}");
let aa = [tier_key, tier_obj_key, tier_obj_vid_key];
for (k, v) in &self.meta_sys {
if aa.contains(k) {
delete_marker.meta_sys.insert(k.clone(), v.clone());
}
}
return (free_entry, true);
}
(FileMetaVersion::default(), false)
}
@@ -3568,15 +3566,15 @@ impl FileMeta {
match version.header.version_type {
VersionType::Object => {
stats.object_versions += 1;
if let Ok(ver) = FileMetaVersion::try_from(version.meta.as_slice()) {
if let Some(obj) = &ver.object {
stats.total_size += obj.size;
if obj.uses_data_dir() {
stats.versions_with_data_dir += 1;
}
if obj.inlinedata() {
stats.versions_with_inline_data += 1;
}
if let Ok(ver) = FileMetaVersion::try_from(version.meta.as_slice())
&& let Some(obj) = &ver.object
{
stats.total_size += obj.size;
if obj.uses_data_dir() {
stats.versions_with_data_dir += 1;
}
if obj.inlinedata() {
stats.versions_with_inline_data += 1;
}
}
}

View File

@@ -442,10 +442,10 @@ impl MetaCacheEntriesSorted {
}
pub fn forward_past(&mut self, marker: Option<String>) {
if let Some(val) = marker {
if let Some(idx) = self.o.0.iter().flatten().position(|v| v.name > val) {
self.o.0 = self.o.0.split_off(idx);
}
if let Some(val) = marker
&& let Some(idx) = self.o.0.iter().flatten().position(|v| v.name > val)
{
self.o.0 = self.o.0.split_off(idx);
}
}
}
@@ -788,22 +788,23 @@ impl<T: Clone + Debug + Send + 'static> Cache<T> {
.duration_since(UNIX_EPOCH)
.expect("Time went backwards")
.as_secs();
if now - self.last_update_ms.load(AtomicOrdering::SeqCst) < self.ttl.as_secs() {
if let Some(v) = v {
return Ok(v);
}
if now - self.last_update_ms.load(AtomicOrdering::SeqCst) < self.ttl.as_secs()
&& let Some(v) = v
{
return Ok(v);
}
if self.opts.no_wait && now - self.last_update_ms.load(AtomicOrdering::SeqCst) < self.ttl.as_secs() * 2 {
if let Some(value) = v {
if self.updating.try_lock().is_ok() {
let this = Arc::clone(&self);
spawn(async move {
let _ = this.update().await;
});
}
return Ok(value);
if self.opts.no_wait
&& now - self.last_update_ms.load(AtomicOrdering::SeqCst) < self.ttl.as_secs() * 2
&& let Some(value) = v
{
if self.updating.try_lock().is_ok() {
let this = Arc::clone(&self);
spawn(async move {
let _ = this.update().await;
});
}
return Ok(value);
}
let _ = self.updating.lock().await;
@@ -811,10 +812,9 @@ impl<T: Clone + Debug + Send + 'static> Cache<T> {
if let (Ok(duration), Some(value)) = (
SystemTime::now().duration_since(UNIX_EPOCH + Duration::from_secs(self.last_update_ms.load(AtomicOrdering::SeqCst))),
v,
) {
if duration < self.ttl {
return Ok(value);
}
) && duration < self.ttl
{
return Ok(value);
}
match self.update().await {

View File

@@ -270,14 +270,12 @@ impl ReplicationState {
return repl_status;
}
if repl_status == ReplicationStatusType::Completed {
if let (Some(replica_timestamp), Some(replication_timestamp)) =
if repl_status == ReplicationStatusType::Completed
&& let (Some(replica_timestamp), Some(replication_timestamp)) =
(self.replica_timestamp, self.replication_timestamp)
{
if replica_timestamp > replication_timestamp {
return self.replica_status.clone();
}
}
&& replica_timestamp > replication_timestamp
{
return self.replica_status.clone();
}
return repl_status;

View File

@@ -246,12 +246,12 @@ where
}
let sts_user = has_sts_user.map(|sts| sts.credentials.access_key.clone());
if let Some(ref sts) = sts_user {
if let Some(plc) = sts_policy_map.get(sts) {
for p in plc.to_slice().iter() {
if !policy_docs_map.contains_key(p) {
let _ = self.api.load_policy_doc(p, &mut policy_docs_map).await;
}
if let Some(ref sts) = sts_user
&& let Some(plc) = sts_policy_map.get(sts)
{
for p in plc.to_slice().iter() {
if !policy_docs_map.contains_key(p) {
let _ = self.api.load_policy_doc(p, &mut policy_docs_map).await;
}
}
}
@@ -635,10 +635,10 @@ where
}
let users = self.cache.users.load();
if let Some(x) = users.get(&cred.access_key) {
if x.credentials.is_service_account() {
return Err(Error::IAMActionNotAllowed);
}
if let Some(x) = users.get(&cred.access_key)
&& x.credentials.is_service_account()
{
return Err(Error::IAMActionNotAllowed);
}
let u = UserIdentity::new(cred);
@@ -789,10 +789,10 @@ where
if !policy_present {
let mut m = HashMap::new();
if let Err(err) = self.api.load_mapped_policy(name, UserType::Reg, true, &mut m).await {
if !is_err_no_such_policy(&err) {
return Err(err);
}
if let Err(err) = self.api.load_mapped_policy(name, UserType::Reg, true, &mut m).await
&& !is_err_no_such_policy(&err)
{
return Err(err);
}
if let Some(p) = m.get(name) {
Cache::add_or_update(&self.cache.group_policies, name, p, OffsetDateTime::now_utc());
@@ -815,10 +815,10 @@ where
Some(p) => p.clone(),
None => {
let mut m = HashMap::new();
if let Err(err) = self.api.load_mapped_policy(name, UserType::Reg, false, &mut m).await {
if !is_err_no_such_policy(&err) {
return Err(err);
}
if let Err(err) = self.api.load_mapped_policy(name, UserType::Reg, false, &mut m).await
&& !is_err_no_such_policy(&err)
{
return Err(err);
}
if let Some(p) = m.get(name) {
Cache::add_or_update(&self.cache.user_policies, name, p, OffsetDateTime::now_utc());
@@ -828,10 +828,10 @@ where
Some(p) => p.clone(),
None => {
let mut m = HashMap::new();
if let Err(err) = self.api.load_mapped_policy(name, UserType::Sts, false, &mut m).await {
if !is_err_no_such_policy(&err) {
return Err(err);
}
if let Err(err) = self.api.load_mapped_policy(name, UserType::Sts, false, &mut m).await
&& !is_err_no_such_policy(&err)
{
return Err(err);
}
if let Some(p) = m.get(name) {
Cache::add_or_update(&self.cache.sts_policies, name, p, OffsetDateTime::now_utc());
@@ -864,10 +864,10 @@ where
Some(p) => p.clone(),
None => {
let mut m = HashMap::new();
if let Err(err) = self.api.load_mapped_policy(group, UserType::Reg, true, &mut m).await {
if !is_err_no_such_policy(&err) {
return Err(err);
}
if let Err(err) = self.api.load_mapped_policy(group, UserType::Reg, true, &mut m).await
&& !is_err_no_such_policy(&err)
{
return Err(err);
}
if let Some(p) = m.get(group) {
Cache::add_or_update(&self.cache.group_policies, group, p, OffsetDateTime::now_utc());
@@ -910,10 +910,10 @@ where
Some(p) => p.clone(),
None => {
let mut m = HashMap::new();
if let Err(err) = self.api.load_mapped_policy(group, UserType::Reg, true, &mut m).await {
if !is_err_no_such_policy(&err) {
return Err(err);
}
if let Err(err) = self.api.load_mapped_policy(group, UserType::Reg, true, &mut m).await
&& !is_err_no_such_policy(&err)
{
return Err(err);
}
if let Some(p) = m.get(group) {
Cache::add_or_update(&self.cache.group_policies, group, p, OffsetDateTime::now_utc());
@@ -937,10 +937,10 @@ where
}
if policy.is_empty() {
if let Err(err) = self.api.delete_mapped_policy(name, user_type, is_group).await {
if !is_err_no_such_policy(&err) {
return Err(err);
}
if let Err(err) = self.api.delete_mapped_policy(name, user_type, is_group).await
&& !is_err_no_such_policy(&err)
{
return Err(err);
}
if is_group {
@@ -1220,10 +1220,10 @@ where
Cache::delete(&self.cache.user_policies, access_key, OffsetDateTime::now_utc());
if let Err(err) = self.api.delete_user_identity(access_key, utype).await {
if !is_err_no_such_user(&err) {
return Err(err);
}
if let Err(err) = self.api.delete_user_identity(access_key, utype).await
&& !is_err_no_such_user(&err)
{
return Err(err);
}
if utype == UserType::Sts {
@@ -1532,16 +1532,16 @@ where
}
if members.is_empty() {
if let Err(err) = self.api.delete_mapped_policy(group, UserType::Reg, true).await {
if !is_err_no_such_policy(&err) {
return Err(err);
}
if let Err(err) = self.api.delete_mapped_policy(group, UserType::Reg, true).await
&& !is_err_no_such_policy(&err)
{
return Err(err);
}
if let Err(err) = self.api.delete_group_info(group).await {
if !is_err_no_such_group(&err) {
return Err(err);
}
if let Err(err) = self.api.delete_group_info(group).await
&& !is_err_no_such_group(&err)
{
return Err(err);
}
Cache::delete(&self.cache.groups, group, OffsetDateTime::now_utc());
@@ -1691,10 +1691,10 @@ where
let member_of = self.cache.user_group_memberships.load();
if let Some(m) = member_of.get(name) {
for group in m.iter() {
if let Err(err) = self.remove_members_from_group(group, vec![name.to_string()], true).await {
if !is_err_no_such_group(&err) {
return Err(err);
}
if let Err(err) = self.remove_members_from_group(group, vec![name.to_string()], true).await
&& !is_err_no_such_group(&err)
{
return Err(err);
}
}
}
@@ -1859,11 +1859,11 @@ fn filter_policies(cache: &Cache, policy_name: &str, bucket_name: &str) -> (Stri
continue;
}
if let Some(p) = cache.policy_docs.load().get(&policy) {
if bucket_name.is_empty() || pollster::block_on(p.policy.match_resource(bucket_name)) {
policies.push(policy);
to_merge.push(p.policy.clone());
}
if let Some(p) = cache.policy_docs.load().get(&policy)
&& (bucket_name.is_empty() || pollster::block_on(p.policy.match_resource(bucket_name)))
{
policies.push(policy);
to_merge.push(p.policy.clone());
}
}

View File

@@ -633,10 +633,10 @@ impl Store for ObjectStore {
if let Some(item) = v.item {
let name = rustfs_utils::path::dir(&item);
if let Err(err) = self.load_group(&name, m).await {
if !is_err_no_such_group(&err) {
return Err(err);
}
if let Err(err) = self.load_group(&name, m).await
&& !is_err_no_such_group(&err)
{
return Err(err);
}
}
}
@@ -936,10 +936,10 @@ impl Store for ObjectStore {
let name = item.trim_end_matches(".json");
info!("load group policy: {}", name);
if let Err(err) = self.load_mapped_policy(name, UserType::Reg, true, &mut items_cache).await {
if !is_err_no_such_policy(&err) {
return Err(Error::other(format!("load group policy failed: {err}")));
}
if let Err(err) = self.load_mapped_policy(name, UserType::Reg, true, &mut items_cache).await
&& !is_err_no_such_policy(&err)
{
return Err(Error::other(format!("load group policy failed: {err}")));
};
}
@@ -955,10 +955,10 @@ impl Store for ObjectStore {
for item in item_name_list.iter() {
let name = rustfs_utils::path::dir(item);
info!("load svc user: {}", name);
if let Err(err) = self.load_user(&name, UserType::Svc, &mut items_cache).await {
if !is_err_no_such_user(&err) {
return Err(Error::other(format!("load svc user failed: {err}")));
}
if let Err(err) = self.load_user(&name, UserType::Svc, &mut items_cache).await
&& !is_err_no_such_user(&err)
{
return Err(Error::other(format!("load svc user failed: {err}")));
};
}
@@ -969,10 +969,9 @@ impl Store for ObjectStore {
if let Err(err) = self
.load_mapped_policy(&parent, UserType::Sts, false, &mut sts_policies_cache)
.await
&& !is_err_no_such_policy(&err)
{
if !is_err_no_such_policy(&err) {
return Err(Error::other(format!("load_mapped_policy failed: {err}")));
}
return Err(Error::other(format!("load_mapped_policy failed: {err}")));
}
}
}

View File

@@ -203,13 +203,13 @@ impl<T: Store> IamSys<T> {
pub async fn set_policy(&self, name: &str, policy: Policy) -> Result<OffsetDateTime> {
let updated_at = self.store.set_policy(name, policy).await?;
if !self.has_watcher() {
if let Some(notification_sys) = get_global_notification_sys() {
let resp = notification_sys.load_policy(name).await;
for r in resp {
if let Some(err) = r.err {
warn!("notify load_policy failed: {}", err);
}
if !self.has_watcher()
&& let Some(notification_sys) = get_global_notification_sys()
{
let resp = notification_sys.load_policy(name).await;
for r in resp {
if let Some(err) = r.err {
warn!("notify load_policy failed: {}", err);
}
}
}
@@ -232,13 +232,14 @@ impl<T: Store> IamSys<T> {
pub async fn delete_user(&self, name: &str, notify: bool) -> Result<()> {
self.store.delete_user(name, UserType::Reg).await?;
if notify && !self.has_watcher() {
if let Some(notification_sys) = get_global_notification_sys() {
let resp = notification_sys.delete_user(name).await;
for r in resp {
if let Some(err) = r.err {
warn!("notify delete_user failed: {}", err);
}
if notify
&& !self.has_watcher()
&& let Some(notification_sys) = get_global_notification_sys()
{
let resp = notification_sys.delete_user(name).await;
for r in resp {
if let Some(err) = r.err {
warn!("notify delete_user failed: {}", err);
}
}
}
@@ -476,13 +477,12 @@ impl<T: Store> IamSys<T> {
let op_pt = claims.get(&iam_policy_claim_name_sa());
let op_sp = claims.get(SESSION_POLICY_NAME);
if let (Some(pt), Some(sp)) = (op_pt, op_sp) {
if pt == EMBEDDED_POLICY_TYPE {
let policy = serde_json::from_slice(
&base64_simd::URL_SAFE_NO_PAD.decode_to_vec(sp.as_str().unwrap_or_default().as_bytes())?,
)?;
return Ok((sa, Some(policy)));
}
if let (Some(pt), Some(sp)) = (op_pt, op_sp)
&& pt == EMBEDDED_POLICY_TYPE
{
let policy =
serde_json::from_slice(&base64_simd::URL_SAFE_NO_PAD.decode_to_vec(sp.as_str().unwrap_or_default().as_bytes())?)?;
return Ok((sa, Some(policy)));
}
Ok((sa, None))
@@ -537,13 +537,12 @@ impl<T: Store> IamSys<T> {
let op_pt = claims.get(&iam_policy_claim_name_sa());
let op_sp = claims.get(SESSION_POLICY_NAME);
if let (Some(pt), Some(sp)) = (op_pt, op_sp) {
if pt == EMBEDDED_POLICY_TYPE {
let policy = serde_json::from_slice(
&base64_simd::URL_SAFE_NO_PAD.decode_to_vec(sp.as_str().unwrap_or_default().as_bytes())?,
)?;
return Ok((sa, Some(policy)));
}
if let (Some(pt), Some(sp)) = (op_pt, op_sp)
&& pt == EMBEDDED_POLICY_TYPE
{
let policy =
serde_json::from_slice(&base64_simd::URL_SAFE_NO_PAD.decode_to_vec(sp.as_str().unwrap_or_default().as_bytes())?)?;
return Ok((sa, Some(policy)));
}
Ok((sa, None))
@@ -572,13 +571,14 @@ impl<T: Store> IamSys<T> {
self.store.delete_user(access_key, UserType::Svc).await?;
if notify && !self.has_watcher() {
if let Some(notification_sys) = get_global_notification_sys() {
let resp = notification_sys.delete_service_account(access_key).await;
for r in resp {
if let Some(err) = r.err {
warn!("notify delete_service_account failed: {}", err);
}
if notify
&& !self.has_watcher()
&& let Some(notification_sys) = get_global_notification_sys()
{
let resp = notification_sys.delete_service_account(access_key).await;
for r in resp {
if let Some(err) = r.err {
warn!("notify delete_service_account failed: {}", err);
}
}
}
@@ -651,10 +651,10 @@ impl<T: Store> IamSys<T> {
}
pub async fn check_key(&self, access_key: &str) -> Result<(Option<UserIdentity>, bool)> {
if let Some(sys_cred) = get_global_action_cred() {
if sys_cred.access_key == access_key {
return Ok((Some(UserIdentity::new(sys_cred)), true));
}
if let Some(sys_cred) = get_global_action_cred()
&& sys_cred.access_key == access_key
{
return Ok((Some(UserIdentity::new(sys_cred)), true));
}
match self.store.get_user(access_key).await {
@@ -725,13 +725,13 @@ impl<T: Store> IamSys<T> {
pub async fn policy_db_set(&self, name: &str, user_type: UserType, is_group: bool, policy: &str) -> Result<OffsetDateTime> {
let updated_at = self.store.policy_db_set(name, user_type, is_group, policy).await?;
if !self.has_watcher() {
if let Some(notification_sys) = get_global_notification_sys() {
let resp = notification_sys.load_policy_mapping(name, user_type.to_u64(), is_group).await;
for r in resp {
if let Some(err) = r.err {
warn!("notify load_policy failed: {}", err);
}
if !self.has_watcher()
&& let Some(notification_sys) = get_global_notification_sys()
{
let resp = notification_sys.load_policy_mapping(name, user_type.to_u64(), is_group).await;
for r in resp {
if let Some(err) = r.err {
warn!("notify load_policy failed: {}", err);
}
}
}

View File

@@ -452,27 +452,25 @@ impl KmsClient for LocalKmsClient {
}
let path = entry.path();
if path.extension().is_some_and(|ext| ext == "key") {
if let Some(stem) = path.file_stem() {
if let Some(key_id) = stem.to_str() {
if let Ok(key_info) = self.describe_key(key_id, None).await {
// Apply filters
if let Some(ref status_filter) = request.status_filter {
if &key_info.status != status_filter {
continue;
}
}
if let Some(ref usage_filter) = request.usage_filter {
if &key_info.usage != usage_filter {
continue;
}
}
keys.push(key_info);
count += 1;
}
}
if path.extension().is_some_and(|ext| ext == "key")
&& let Some(stem) = path.file_stem()
&& let Some(key_id) = stem.to_str()
&& let Ok(key_info) = self.describe_key(key_id, None).await
{
// Apply filters
if let Some(ref status_filter) = request.status_filter
&& &key_info.status != status_filter
{
continue;
}
if let Some(ref usage_filter) = request.usage_filter
&& &key_info.usage != usage_filter
{
continue;
}
keys.push(key_info);
count += 1;
}
}

View File

@@ -279,14 +279,13 @@ impl KmsConfig {
}
// Validate TLS configuration if using HTTPS
if config.address.starts_with("https://") {
if let Some(ref tls) = config.tls {
if !tls.skip_verify {
// In production, we should have proper TLS configuration
if tls.ca_cert_path.is_none() && tls.client_cert_path.is_none() {
tracing::warn!("Using HTTPS without custom TLS configuration - relying on system CA");
}
}
if config.address.starts_with("https://")
&& let Some(ref tls) = config.tls
&& !tls.skip_verify
{
// In production, we should have proper TLS configuration
if tls.ca_cert_path.is_none() && tls.client_cert_path.is_none() {
tracing::warn!("Using HTTPS without custom TLS configuration - relying on system CA");
}
}
}

View File

@@ -74,14 +74,14 @@ impl KmsManager {
// Check cache first if enabled
if self.config.enable_cache {
let cache = self.cache.read().await;
if let Some(cached_key) = cache.get_data_key(&request.key_id).await {
if cached_key.key_spec == request.key_spec {
return Ok(GenerateDataKeyResponse {
key_id: request.key_id.clone(),
plaintext_key: cached_key.plaintext.clone(),
ciphertext_blob: cached_key.ciphertext.clone(),
});
}
if let Some(cached_key) = cache.get_data_key(&request.key_id).await
&& cached_key.key_spec == request.key_spec
{
return Ok(GenerateDataKeyResponse {
key_id: request.key_id.clone(),
plaintext_key: cached_key.plaintext.clone(),
ciphertext_blob: cached_key.ciphertext.clone(),
});
}
}

View File

@@ -104,20 +104,20 @@ mod tests {
let response = client.acquire_exclusive(&request).await;
assert!(response.is_ok());
if let Ok(response) = response {
if response.success {
let lock_info = response.lock_info.unwrap();
if let Ok(response) = response
&& response.success
{
let lock_info = response.lock_info.unwrap();
// Test status check
let status = client.check_status(&lock_info.id).await;
assert!(status.is_ok());
assert!(status.unwrap().is_some());
// Test status check
let status = client.check_status(&lock_info.id).await;
assert!(status.is_ok());
assert!(status.unwrap().is_some());
// Test lock release
let released = client.release(&lock_info.id).await;
assert!(released.is_ok());
assert!(released.unwrap());
}
// Test lock release
let released = client.release(&lock_info.id).await;
assert!(released.is_ok());
assert!(released.unwrap());
}
}
}

View File

@@ -953,10 +953,10 @@ mod tests {
// Wait for all operations to complete
let mut successful_operations = 0;
for handle in handles {
if let Ok(success) = handle.await {
if success {
successful_operations += 1;
}
if let Ok(success) = handle.await
&& success
{
successful_operations += 1;
}
}

View File

@@ -414,10 +414,10 @@ impl Default for FastObjectLockManager {
impl Drop for FastObjectLockManager {
fn drop(&mut self) {
// Note: We can't use async in Drop, so we just abort the cleanup task
if let Ok(handle_guard) = self.cleanup_handle.try_read() {
if let Some(handle) = handle_guard.as_ref() {
handle.abort();
}
if let Ok(handle_guard) = self.cleanup_handle.try_read()
&& let Some(handle) = handle_guard.as_ref()
{
handle.abort();
}
}
}

View File

@@ -69,10 +69,10 @@ impl LockShard {
/// Try fast path only (without fallback to slow path)
pub fn try_fast_path_only(&self, request: &ObjectLockRequest) -> bool {
// Early check to avoid unnecessary lock contention
if let Some(state) = self.objects.read().get(&request.key) {
if !state.atomic_state.is_fast_path_available(request.mode) {
return false;
}
if let Some(state) = self.objects.read().get(&request.key)
&& !state.atomic_state.is_fast_path_available(request.mode)
{
return false;
}
self.try_fast_path(request).is_some()
}
@@ -441,36 +441,36 @@ impl LockShard {
/// Get lock information for monitoring
pub fn get_lock_info(&self, key: &ObjectKey) -> Option<crate::fast_lock::types::ObjectLockInfo> {
let objects = self.objects.read();
if let Some(state) = objects.get(key) {
if let Some(mode) = state.current_mode() {
let (owner, acquired_at, lock_timeout) = match mode {
LockMode::Exclusive => {
let current_owner = state.current_owner.read();
let info = current_owner.clone()?;
(info.owner, info.acquired_at, info.lock_timeout)
}
LockMode::Shared => {
let shared_owners = state.shared_owners.read();
let entry = shared_owners.first()?.clone();
(entry.owner, entry.acquired_at, entry.lock_timeout)
}
};
if let Some(state) = objects.get(key)
&& let Some(mode) = state.current_mode()
{
let (owner, acquired_at, lock_timeout) = match mode {
LockMode::Exclusive => {
let current_owner = state.current_owner.read();
let info = current_owner.clone()?;
(info.owner, info.acquired_at, info.lock_timeout)
}
LockMode::Shared => {
let shared_owners = state.shared_owners.read();
let entry = shared_owners.first()?.clone();
(entry.owner, entry.acquired_at, entry.lock_timeout)
}
};
let priority = *state.priority.read();
let priority = *state.priority.read();
let expires_at = acquired_at
.checked_add(lock_timeout)
.unwrap_or_else(|| acquired_at + crate::fast_lock::DEFAULT_LOCK_TIMEOUT);
let expires_at = acquired_at
.checked_add(lock_timeout)
.unwrap_or_else(|| acquired_at + crate::fast_lock::DEFAULT_LOCK_TIMEOUT);
return Some(crate::fast_lock::types::ObjectLockInfo {
key: key.clone(),
mode,
owner,
acquired_at,
expires_at,
priority,
});
}
return Some(crate::fast_lock::types::ObjectLockInfo {
key: key.clone(),
mode,
owner,
acquired_at,
expires_at,
priority,
});
}
None
}

View File

@@ -165,10 +165,10 @@ impl NamespaceLock {
let mut successful_clients = Vec::new();
for (idx, res) in results {
if let Ok(resp) = res {
if resp.success {
successful_clients.push(idx);
}
if let Ok(resp) = res
&& resp.success
{
successful_clients.push(idx);
}
}

View File

@@ -204,10 +204,10 @@ impl TargetFactory for MQTTTargetFactory {
if !std::path::Path::new(&queue_dir).is_absolute() {
return Err(TargetError::Configuration("MQTT queue directory must be an absolute path".to_string()));
}
if let Some(qos_str) = config.lookup(MQTT_QOS) {
if qos_str == "0" {
warn!("Using queue_dir with QoS 0 may result in event loss");
}
if let Some(qos_str) = config.lookup(MQTT_QOS)
&& qos_str == "0"
{
warn!("Using queue_dir with QoS 0 may result in event loss");
}
}

View File

@@ -345,10 +345,10 @@ impl TargetList {
pub async fn clear_targets_only(&mut self) {
let target_ids_to_clear: Vec<TargetID> = self.targets.keys().cloned().collect();
for id in target_ids_to_clear {
if let Some(target_arc) = self.targets.remove(&id) {
if let Err(e) = target_arc.close().await {
error!("Failed to close target {} during clear: {}", id, e);
}
if let Some(target_arc) = self.targets.remove(&id)
&& let Err(e) = target_arc.close().await
{
error!("Failed to close target {} during clear: {}", id, e);
}
}
self.targets.clear();

View File

@@ -119,12 +119,11 @@ impl TargetRegistry {
format!("{ENV_PREFIX}{NOTIFY_ROUTE_PREFIX}{target_type}{DEFAULT_DELIMITER}{ENABLE_KEY}{DEFAULT_DELIMITER}")
.to_uppercase();
for (key, value) in &all_env {
if EnableState::from_str(value).ok().map(|s| s.is_enabled()).unwrap_or(false) {
if let Some(id) = key.strip_prefix(&enable_prefix) {
if !id.is_empty() {
instance_ids_from_env.insert(id.to_lowercase());
}
}
if EnableState::from_str(value).ok().map(|s| s.is_enabled()).unwrap_or(false)
&& let Some(id) = key.strip_prefix(&enable_prefix)
&& !id.is_empty()
{
instance_ids_from_env.insert(id.to_lowercase());
}
}
@@ -273,10 +272,10 @@ impl TargetRegistry {
for section in sections {
let mut section_map: std::collections::HashMap<String, KVS> = std::collections::HashMap::new();
// Add default item
if let Some(default_kvs) = section_defaults.get(&section) {
if !default_kvs.is_empty() {
section_map.insert(DEFAULT_DELIMITER.to_string(), default_kvs.clone());
}
if let Some(default_kvs) = section_defaults.get(&section)
&& !default_kvs.is_empty()
{
section_map.insert(DEFAULT_DELIMITER.to_string(), default_kvs.clone());
}
// Add successful instance item

View File

@@ -24,30 +24,30 @@ pub fn new_pattern(prefix: Option<&str>, suffix: Option<&str>) -> String {
let mut pattern = String::new();
// Process the prefix part
if let Some(p) = prefix {
if !p.is_empty() {
pattern.push_str(p);
if !p.ends_with('*') {
pattern.push('*');
}
if let Some(p) = prefix
&& !p.is_empty()
{
pattern.push_str(p);
if !p.ends_with('*') {
pattern.push('*');
}
}
// Process the suffix part
if let Some(s) = suffix {
if !s.is_empty() {
let mut s_to_append = s.to_string();
if !s.starts_with('*') {
s_to_append.insert(0, '*');
}
if let Some(s) = suffix
&& !s.is_empty()
{
let mut s_to_append = s.to_string();
if !s.starts_with('*') {
s_to_append.insert(0, '*');
}
// If the pattern is empty (only suffixes are provided), then the pattern is the suffix
// Otherwise, append the suffix to the pattern
if pattern.is_empty() {
pattern = s_to_append;
} else {
pattern.push_str(&s_to_append);
}
// If the pattern is empty (only suffixes are provided), then the pattern is the suffix
// Otherwise, append the suffix to the pattern
if pattern.is_empty() {
pattern = s_to_append;
} else {
pattern.push_str(&s_to_append);
}
}

View File

@@ -86,21 +86,21 @@ impl std::fmt::Debug for OtelGuard {
impl Drop for OtelGuard {
fn drop(&mut self) {
if let Some(provider) = self.tracer_provider.take() {
if let Err(err) = provider.shutdown() {
eprintln!("Tracer shutdown error: {err:?}");
}
if let Some(provider) = self.tracer_provider.take()
&& let Err(err) = provider.shutdown()
{
eprintln!("Tracer shutdown error: {err:?}");
}
if let Some(provider) = self.meter_provider.take() {
if let Err(err) = provider.shutdown() {
eprintln!("Meter shutdown error: {err:?}");
}
if let Some(provider) = self.meter_provider.take()
&& let Err(err) = provider.shutdown()
{
eprintln!("Meter shutdown error: {err:?}");
}
if let Some(provider) = self.logger_provider.take() {
if let Err(err) = provider.shutdown() {
eprintln!("Logger shutdown error: {err:?}");
}
if let Some(provider) = self.logger_provider.take()
&& let Err(err) = provider.shutdown()
{
eprintln!("Logger shutdown error: {err:?}");
}
if let Some(handle) = self.flexi_logger_handles.take() {

View File

@@ -122,10 +122,10 @@ impl Resource {
// Apply condition substitutions
if !conditions.is_empty() {
for key in KeyName::COMMON_KEYS {
if let Some(rvalue) = conditions.get(key.name()) {
if matches!(rvalue.first().map(|c| !c.is_empty()), Some(true)) {
resolved_pattern = resolved_pattern.replace(&key.var_name(), &rvalue[0]);
}
if let Some(rvalue) = conditions.get(key.name())
&& matches!(rvalue.first().map(|c| !c.is_empty()), Some(true))
{
resolved_pattern = resolved_pattern.replace(&key.var_name(), &rvalue[0]);
}
}
}

View File

@@ -387,7 +387,7 @@ impl Checksum {
// Ensure we don't divide by 0
let raw_len = self.checksum_type.raw_byte_len();
if raw_len == 0 || parts.len() % raw_len != 0 {
if raw_len == 0 || !parts.len().is_multiple_of(raw_len) {
checksums = 0;
} else if !parts.is_empty() {
checksums = (parts.len() / raw_len) as i32;
@@ -506,16 +506,16 @@ pub fn get_content_checksum(headers: &HeaderMap) -> Result<Option<Checksum>, std
for header in trailing_headers {
let mut duplicates = false;
for &checksum_type in crate::checksum::BASE_CHECKSUM_TYPES {
if let Some(key) = checksum_type.key() {
if header.eq_ignore_ascii_case(key) {
duplicates = result.is_some();
result = Some(Checksum {
checksum_type: ChecksumType(checksum_type.0 | ChecksumType::TRAILING.0),
encoded: String::new(),
raw: Vec::new(),
want_parts: 0,
});
}
if let Some(key) = checksum_type.key()
&& header.eq_ignore_ascii_case(key)
{
duplicates = result.is_some();
result = Some(Checksum {
checksum_type: ChecksumType(checksum_type.0 | ChecksumType::TRAILING.0),
encoded: String::new(),
raw: Vec::new(),
want_parts: 0,
});
}
}
if duplicates {
@@ -567,13 +567,13 @@ fn get_content_checksum_direct(headers: &HeaderMap) -> (ChecksumType, String) {
checksum_type = ChecksumType(checksum_type.0 | ChecksumType::FULL_OBJECT.0);
}
if checksum_type.is_set() {
if let Some(key) = checksum_type.key() {
if let Some(value) = headers.get(key).and_then(|v| v.to_str().ok()) {
return (checksum_type, value.to_string());
} else {
return (ChecksumType::NONE, String::new());
}
if checksum_type.is_set()
&& let Some(key) = checksum_type.key()
{
if let Some(value) = headers.get(key).and_then(|v| v.to_str().ok()) {
return (checksum_type, value.to_string());
} else {
return (ChecksumType::NONE, String::new());
}
}
return (checksum_type, String::new());
@@ -581,22 +581,22 @@ fn get_content_checksum_direct(headers: &HeaderMap) -> (ChecksumType, String) {
// Check individual checksum headers
for &ct in crate::checksum::BASE_CHECKSUM_TYPES {
if let Some(key) = ct.key() {
if let Some(value) = headers.get(key).and_then(|v| v.to_str().ok()) {
// If already set, invalid
if checksum_type != ChecksumType::NONE {
if let Some(key) = ct.key()
&& let Some(value) = headers.get(key).and_then(|v| v.to_str().ok())
{
// If already set, invalid
if checksum_type != ChecksumType::NONE {
return (ChecksumType::INVALID, String::new());
}
checksum_type = ct;
if headers.get("x-amz-checksum-type").and_then(|v| v.to_str().ok()) == Some("FULL_OBJECT") {
if !checksum_type.can_merge() {
return (ChecksumType::INVALID, String::new());
}
checksum_type = ct;
if headers.get("x-amz-checksum-type").and_then(|v| v.to_str().ok()) == Some("FULL_OBJECT") {
if !checksum_type.can_merge() {
return (ChecksumType::INVALID, String::new());
}
checksum_type = ChecksumType(checksum_type.0 | ChecksumType::FULL_OBJECT.0);
}
return (checksum_type, value.to_string());
checksum_type = ChecksumType(checksum_type.0 | ChecksumType::FULL_OBJECT.0);
}
return (checksum_type, value.to_string());
}
}
@@ -965,10 +965,10 @@ fn gf2_matrix_times(mat: &[u64], mut vec: u64) -> u64 {
let mut mat_iter = mat.iter();
while vec != 0 {
if vec & 1 != 0 {
if let Some(&m) = mat_iter.next() {
sum ^= m;
}
if vec & 1 != 0
&& let Some(&m) = mat_iter.next()
{
sum ^= m;
}
vec >>= 1;
mat_iter.next();

View File

@@ -178,12 +178,11 @@ impl HashReader {
));
}
if let Some(checksum) = existing_hash_reader.checksum() {
if let Some(ref md5) = md5hex {
if checksum != md5 {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "HashReader checksum mismatch"));
}
}
if let Some(checksum) = existing_hash_reader.checksum()
&& let Some(ref md5) = md5hex
&& checksum != md5
{
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "HashReader checksum mismatch"));
}
if existing_hash_reader.size() > 0 && size > 0 && existing_hash_reader.size() != size {
@@ -359,15 +358,15 @@ impl HashReader {
}
if checksum.checksum_type.trailing() {
if let Some(trailer) = self.trailer_s3s.as_ref() {
if let Some(Some(checksum_str)) = trailer.read(|headers| {
if let Some(trailer) = self.trailer_s3s.as_ref()
&& let Some(Some(checksum_str)) = trailer.read(|headers| {
checksum
.checksum_type
.key()
.and_then(|key| headers.get(key).and_then(|value| value.to_str().ok().map(|s| s.to_string())))
}) {
map.insert(checksum.checksum_type.to_string(), checksum_str);
}
})
{
map.insert(checksum.checksum_type.to_string(), checksum_str);
}
return map;
}
@@ -450,18 +449,18 @@ impl AsyncRead for HashReader {
if filled > 0 {
// Update SHA256 hasher
if let Some(hasher) = this.content_sha256_hasher {
if let Err(e) = hasher.write_all(data) {
error!("SHA256 hasher write error, error={:?}", e);
return Poll::Ready(Err(std::io::Error::other(e)));
}
if let Some(hasher) = this.content_sha256_hasher
&& let Err(e) = hasher.write_all(data)
{
error!("SHA256 hasher write error, error={:?}", e);
return Poll::Ready(Err(std::io::Error::other(e)));
}
// Update content hasher
if let Some(hasher) = this.content_hasher {
if let Err(e) = hasher.write_all(data) {
return Poll::Ready(Err(std::io::Error::other(e)));
}
if let Some(hasher) = this.content_hasher
&& let Err(e) = hasher.write_all(data)
{
return Poll::Ready(Err(std::io::Error::other(e)));
}
}
@@ -477,22 +476,22 @@ impl AsyncRead for HashReader {
// check content hasher
if let (Some(hasher), Some(expected_content_hash)) = (this.content_hasher, this.content_hash) {
if expected_content_hash.checksum_type.trailing() {
if let Some(trailer) = this.trailer_s3s.as_ref() {
if let Some(Some(checksum_str)) = trailer.read(|headers| {
expected_content_hash.checksum_type.key().and_then(|key| {
headers.get(key).and_then(|value| value.to_str().ok().map(|s| s.to_string()))
})
}) {
expected_content_hash.encoded = checksum_str;
expected_content_hash.raw = general_purpose::STANDARD
.decode(&expected_content_hash.encoded)
.map_err(|_| std::io::Error::other("Invalid base64 checksum"))?;
if expected_content_hash.checksum_type.trailing()
&& let Some(trailer) = this.trailer_s3s.as_ref()
&& let Some(Some(checksum_str)) = trailer.read(|headers| {
expected_content_hash
.checksum_type
.key()
.and_then(|key| headers.get(key).and_then(|value| value.to_str().ok().map(|s| s.to_string())))
})
{
expected_content_hash.encoded = checksum_str;
expected_content_hash.raw = general_purpose::STANDARD
.decode(&expected_content_hash.encoded)
.map_err(|_| std::io::Error::other("Invalid base64 checksum"))?;
if expected_content_hash.raw.is_empty() {
return Poll::Ready(Err(std::io::Error::other("Content hash mismatch")));
}
}
if expected_content_hash.raw.is_empty() {
return Poll::Ready(Err(std::io::Error::other("Content hash mismatch")));
}
}

View File

@@ -175,10 +175,10 @@ impl SimpleQueryDispatcher {
.clone()
.map(|e| e.as_bytes().first().copied().unwrap_or_default()),
);
if let Some(delimiter) = csv.field_delimiter.as_ref() {
if delimiter.len() == 1 {
file_format = file_format.with_delimiter(delimiter.as_bytes()[0]);
}
if let Some(delimiter) = csv.field_delimiter.as_ref()
&& delimiter.len() == 1
{
file_format = file_format.with_delimiter(delimiter.as_bytes()[0]);
}
// TODO waiting for processing @junxiang Mu
// if csv.file_header_info.is_some() {}

View File

@@ -25,10 +25,10 @@ pub fn get_host_addr(req: &request::Request<Body>) -> String {
} else {
req_host = uri.host().unwrap().to_string();
}
if let Some(host) = host {
if req_host != *host.to_str().unwrap() {
return (*host.to_str().unwrap()).to_string();
}
if let Some(host) = host
&& req_host != *host.to_str().unwrap()
{
return (*host.to_str().unwrap()).to_string();
}
/*if req.uri_ref().unwrap().host().is_some() {
return req.uri_ref().unwrap().host().unwrap();

View File

@@ -97,11 +97,11 @@ pub fn parse_key(s: &str) -> Key {
}
// Number of batch items parsed
if let Some(colon_pos) = name.find(':') {
if let Ok(count) = name[..colon_pos].parse::<usize>() {
item_count = count;
name = name[colon_pos + 1..].to_string();
}
if let Some(colon_pos) = name.find(':')
&& let Ok(count) = name[..colon_pos].parse::<usize>()
{
item_count = count;
name = name[colon_pos + 1..].to_string();
}
// Resolve extension

View File

@@ -225,17 +225,20 @@ where
match tokio::time::timeout(DEFAULT_CONNECTION_TIMEOUT, async {
while !self.connected.load(Ordering::SeqCst) {
if let Some(handle) = self.bg_task_manager.init_cell.get() {
if handle.is_finished() && !self.connected.load(Ordering::SeqCst) {
error!(target_id = %self.id, "MQTT background task exited prematurely before connection was established.");
return Err(TargetError::Network("MQTT background task exited prematurely".to_string()));
}
if let Some(handle) = self.bg_task_manager.init_cell.get()
&& handle.is_finished()
&& !self.connected.load(Ordering::SeqCst)
{
error!(target_id = %self.id, "MQTT background task exited prematurely before connection was established.");
return Err(TargetError::Network("MQTT background task exited prematurely".to_string()));
}
tokio::time::sleep(Duration::from_millis(100)).await;
}
debug!(target_id = %self.id, "MQTT target connected successfully.");
Ok(())
}).await {
})
.await
{
Ok(Ok(_)) => {
info!(target_id = %self.id, "MQTT target initialized and connected.");
Ok(())
@@ -243,9 +246,7 @@ where
Ok(Err(e)) => Err(e),
Err(_) => {
error!(target_id = %self.id, "Timeout waiting for MQTT connection after task spawn.");
Err(TargetError::Network(
"Timeout waiting for MQTT connection".to_string(),
))
Err(TargetError::Network("Timeout waiting for MQTT connection".to_string()))
}
}
}
@@ -470,11 +471,11 @@ where
debug!(target_id = %self.id, "Checking if MQTT target is active.");
if self.client.lock().await.is_none() && !self.connected.load(Ordering::SeqCst) {
// Check if the background task is running and has not panicked
if let Some(handle) = self.bg_task_manager.init_cell.get() {
if handle.is_finished() {
error!(target_id = %self.id, "MQTT background task has finished, possibly due to an error. Target is not active.");
return Err(TargetError::Network("MQTT background task terminated".to_string()));
}
if let Some(handle) = self.bg_task_manager.init_cell.get()
&& handle.is_finished()
{
error!(target_id = %self.id, "MQTT background task has finished, possibly due to an error. Target is not active.");
return Err(TargetError::Network("MQTT background task terminated".to_string()));
}
debug!(target_id = %self.id, "MQTT client not yet initialized or task not running/connected.");
return Err(TargetError::Configuration(

View File

@@ -292,7 +292,7 @@ pub fn create_multi_cert_resolver(
for (domain, (certs, key)) in cert_key_pairs {
// create a signature
let signing_key = rustls::crypto::ring::sign::any_supported_type(&key)
let signing_key = rustls::crypto::aws_lc_rs::sign::any_supported_type(&key)
.map_err(|e| certs_error(format!("unsupported private key types:{domain}, err:{e:?}")))?;
// create a CertifiedKey

View File

@@ -56,36 +56,36 @@ fn is_xff_header_enabled() -> bool {
///
pub fn get_source_scheme(headers: &HeaderMap) -> Option<String> {
// Retrieve the scheme from X-Forwarded-Proto.
if let Some(proto) = headers.get(X_FORWARDED_PROTO) {
if let Ok(proto_str) = proto.to_str() {
return Some(proto_str.to_lowercase());
}
if let Some(proto) = headers.get(X_FORWARDED_PROTO)
&& let Ok(proto_str) = proto.to_str()
{
return Some(proto_str.to_lowercase());
}
if let Some(proto) = headers.get(X_FORWARDED_SCHEME) {
if let Ok(proto_str) = proto.to_str() {
return Some(proto_str.to_lowercase());
}
if let Some(proto) = headers.get(X_FORWARDED_SCHEME)
&& let Ok(proto_str) = proto.to_str()
{
return Some(proto_str.to_lowercase());
}
if let Some(forwarded) = headers.get(FORWARDED) {
if let Ok(forwarded_str) = forwarded.to_str() {
// match should contain at least two elements if the protocol was
// specified in the Forwarded header. The first element will always be
// the 'for=', which we ignore, subsequently we proceed to look for
// 'proto=' which should precede right after `for=` if not
// we simply ignore the values and return empty. This is in line
// with the approach we took for returning first ip from multiple
// params.
if let Some(for_match) = FOR_REGEX.captures(forwarded_str) {
if for_match.len() > 1 {
let remaining = &for_match[2];
if let Some(proto_match) = PROTO_REGEX.captures(remaining) {
if proto_match.len() > 1 {
return Some(proto_match[2].to_lowercase());
}
}
}
if let Some(forwarded) = headers.get(FORWARDED)
&& let Ok(forwarded_str) = forwarded.to_str()
{
// match should contain at least two elements if the protocol was
// specified in the Forwarded header. The first element will always be
// the 'for=', which we ignore, subsequently we proceed to look for
// 'proto=' which should precede right after `for=` if not
// we simply ignore the values and return empty. This is in line
// with the approach we took for returning first ip from multiple
// params.
if let Some(for_match) = FOR_REGEX.captures(forwarded_str)
&& for_match.len() > 1
{
let remaining = &for_match[2];
if let Some(proto_match) = PROTO_REGEX.captures(remaining)
&& proto_match.len() > 1
{
return Some(proto_match[2].to_lowercase());
}
}
}
@@ -105,17 +105,16 @@ pub fn get_source_scheme(headers: &HeaderMap) -> Option<String> {
pub fn get_source_ip_from_headers(headers: &HeaderMap) -> Option<String> {
let mut addr = None;
if is_xff_header_enabled() {
if let Some(forwarded_for) = headers.get(X_FORWARDED_FOR) {
if let Ok(forwarded_str) = forwarded_for.to_str() {
// Only grab the first (client) address. Note that '192.168.0.1,
// 10.1.1.1' is a valid key for X-Forwarded-For where addresses after
// the first may represent forwarding proxies earlier in the chain.
let first_comma = forwarded_str.find(", ");
let end = first_comma.unwrap_or(forwarded_str.len());
addr = Some(forwarded_str[..end].to_string());
}
}
if is_xff_header_enabled()
&& let Some(forwarded_for) = headers.get(X_FORWARDED_FOR)
&& let Ok(forwarded_str) = forwarded_for.to_str()
{
// Only grab the first (client) address. Note that '192.168.0.1,
// 10.1.1.1' is a valid key for X-Forwarded-For where addresses after
// the first may represent forwarding proxies earlier in the chain.
let first_comma = forwarded_str.find(", ");
let end = first_comma.unwrap_or(forwarded_str.len());
addr = Some(forwarded_str[..end].to_string());
}
if addr.is_none() {
@@ -125,21 +124,21 @@ pub fn get_source_ip_from_headers(headers: &HeaderMap) -> Option<String> {
// request).
addr = Some(real_ip_str.to_string());
}
} else if let Some(forwarded) = headers.get(FORWARDED) {
if let Ok(forwarded_str) = forwarded.to_str() {
// match should contain at least two elements if the protocol was
// specified in the Forwarded header. The first element will always be
// the 'for=' capture, which we ignore. In the case of multiple IP
// addresses (for=8.8.8.8, 8.8.4.4, 172.16.1.20 is valid) we only
// extract the first, which should be the client IP.
if let Some(for_match) = FOR_REGEX.captures(forwarded_str) {
if for_match.len() > 1 {
// IPv6 addresses in Forwarded headers are quoted-strings. We strip
// these quotes.
let ip = for_match[1].trim_matches('"');
addr = Some(ip.to_string());
}
}
} else if let Some(forwarded) = headers.get(FORWARDED)
&& let Ok(forwarded_str) = forwarded.to_str()
{
// match should contain at least two elements if the protocol was
// specified in the Forwarded header. The first element will always be
// the 'for=' capture, which we ignore. In the case of multiple IP
// addresses (for=8.8.8.8, 8.8.4.4, 172.16.1.20 is valid) we only
// extract the first, which should be the client IP.
if let Some(for_match) = FOR_REGEX.captures(forwarded_str)
&& for_match.len() > 1
{
// IPv6 addresses in Forwarded headers are quoted-strings. We strip
// these quotes.
let ip = for_match[1].trim_matches('"');
addr = Some(ip.to_string());
}
}
}

View File

@@ -174,16 +174,15 @@ pub async fn get_host_ip(host: Host<&str>) -> std::io::Result<HashSet<IpAddr>> {
match host {
Host::Domain(domain) => {
// Check cache first
if CUSTOM_DNS_RESOLVER.read().unwrap().is_none() {
if let Ok(mut cache) = DNS_CACHE.lock() {
if let Some(entry) = cache.get(domain) {
if !entry.is_expired(DNS_CACHE_TTL) {
return Ok(entry.ips.clone());
}
// Remove expired entry
cache.remove(domain);
}
if CUSTOM_DNS_RESOLVER.read().unwrap().is_none()
&& let Ok(mut cache) = DNS_CACHE.lock()
&& let Some(entry) = cache.get(domain)
{
if !entry.is_expired(DNS_CACHE_TTL) {
return Ok(entry.ips.clone());
}
// Remove expired entry
cache.remove(domain);
}
info!("Cache miss for domain {domain}, querying system resolver.");

View File

@@ -196,13 +196,12 @@ impl ParsedURL {
impl std::fmt::Display for ParsedURL {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
let mut url = self.0.clone();
if let Some(host) = url.host_str().map(|h| h.to_string()) {
if let Some(port) = url.port() {
if (url.scheme() == "http" && port == 80) || (url.scheme() == "https" && port == 443) {
url.set_host(Some(&host)).unwrap();
url.set_port(None).unwrap();
}
}
if let Some(host) = url.host_str().map(|h| h.to_string())
&& let Some(port) = url.port()
&& ((url.scheme() == "http" && port == 80) || (url.scheme() == "https" && port == 443))
{
url.set_host(Some(&host)).unwrap();
url.set_port(None).unwrap();
}
let mut s = url.to_string();
@@ -251,12 +250,12 @@ impl<'de> serde::Deserialize<'de> for ParsedURL {
/// Returns NetError if parsing fails or host is invalid.
///
pub fn parse_url(s: &str) -> Result<ParsedURL, NetError> {
if let Some(scheme_end) = s.find("://") {
if s[scheme_end + 3..].starts_with('/') {
let scheme = &s[..scheme_end];
if !scheme.is_empty() {
return Err(NetError::SchemeWithEmptyHost);
}
if let Some(scheme_end) = s.find("://")
&& s[scheme_end + 3..].starts_with('/')
{
let scheme = &s[..scheme_end];
if !scheme.is_empty() {
return Err(NetError::SchemeWithEmptyHost);
}
}

View File

@@ -367,35 +367,35 @@ async fn _setup_console_tls_config(tls_path: Option<&String>) -> Result<Option<R
debug!("Found TLS directory for console, checking for certificates");
// Make sure to use a modern encryption suite
let _ = rustls::crypto::ring::default_provider().install_default();
let _ = rustls::crypto::aws_lc_rs::default_provider().install_default();
// 1. Attempt to load all certificates in the directory (multi-certificate support, for SNI)
if let Ok(cert_key_pairs) = rustfs_utils::load_all_certs_from_directory(tls_path) {
if !cert_key_pairs.is_empty() {
debug!(
"Found {} certificates for console, creating SNI-aware multi-cert resolver",
cert_key_pairs.len()
);
if let Ok(cert_key_pairs) = rustfs_utils::load_all_certs_from_directory(tls_path)
&& !cert_key_pairs.is_empty()
{
debug!(
"Found {} certificates for console, creating SNI-aware multi-cert resolver",
cert_key_pairs.len()
);
// Create an SNI-enabled certificate resolver
let resolver = rustfs_utils::create_multi_cert_resolver(cert_key_pairs)?;
// Create an SNI-enabled certificate resolver
let resolver = rustfs_utils::create_multi_cert_resolver(cert_key_pairs)?;
// Configure the server to enable SNI support
let mut server_config = ServerConfig::builder()
.with_no_client_auth()
.with_cert_resolver(Arc::new(resolver));
// Configure the server to enable SNI support
let mut server_config = ServerConfig::builder()
.with_no_client_auth()
.with_cert_resolver(Arc::new(resolver));
// Configure ALPN protocol priority
server_config.alpn_protocols = vec![b"h2".to_vec(), b"http/1.1".to_vec(), b"http/1.0".to_vec()];
// Configure ALPN protocol priority
server_config.alpn_protocols = vec![b"h2".to_vec(), b"http/1.1".to_vec(), b"http/1.0".to_vec()];
// Log SNI requests
if rustfs_utils::tls_key_log() {
server_config.key_log = Arc::new(rustls::KeyLogFile::new());
}
info!(target: "rustfs::console::tls", "Console TLS enabled with multi-certificate SNI support");
return Ok(Some(RustlsConfig::from_config(Arc::new(server_config))));
// Log SNI requests
if rustfs_utils::tls_key_log() {
server_config.key_log = Arc::new(rustls::KeyLogFile::new());
}
info!(target: "rustfs::console::tls", "Console TLS enabled with multi-certificate SNI support");
return Ok(Some(RustlsConfig::from_config(Arc::new(server_config))));
}
// 2. Revert to the traditional single-certificate mode

View File

@@ -636,50 +636,50 @@ fn extract_metrics_init_params(uri: &Uri) -> MetricsParams {
for param in params {
let mut parts = param.split('=');
if let Some(key) = parts.next() {
if key == "disks" {
if let Some(value) = parts.next() {
mp.disks = value.to_string();
}
if key == "disks"
&& let Some(value) = parts.next()
{
mp.disks = value.to_string();
}
if key == "hosts" {
if let Some(value) = parts.next() {
mp.hosts = value.to_string();
}
if key == "hosts"
&& let Some(value) = parts.next()
{
mp.hosts = value.to_string();
}
if key == "interval" {
if let Some(value) = parts.next() {
mp.tick = value.to_string();
}
if key == "interval"
&& let Some(value) = parts.next()
{
mp.tick = value.to_string();
}
if key == "n" {
if let Some(value) = parts.next() {
mp.n = value.parse::<u64>().unwrap_or(u64::MAX);
}
if key == "n"
&& let Some(value) = parts.next()
{
mp.n = value.parse::<u64>().unwrap_or(u64::MAX);
}
if key == "types" {
if let Some(value) = parts.next() {
mp.types = value.parse::<u32>().unwrap_or_default();
}
if key == "types"
&& let Some(value) = parts.next()
{
mp.types = value.parse::<u32>().unwrap_or_default();
}
if key == "by-disk" {
if let Some(value) = parts.next() {
mp.by_disk = value.to_string();
}
if key == "by-disk"
&& let Some(value) = parts.next()
{
mp.by_disk = value.to_string();
}
if key == "by-host" {
if let Some(value) = parts.next() {
mp.by_host = value.to_string();
}
if key == "by-host"
&& let Some(value) = parts.next()
{
mp.by_host = value.to_string();
}
if key == "by-jobID" {
if let Some(value) = parts.next() {
mp.by_job_id = value.to_string();
}
if key == "by-jobID"
&& let Some(value) = parts.next()
{
mp.by_job_id = value.to_string();
}
if key == "by-depID" {
if let Some(value) = parts.next() {
mp.by_dep_id = value.to_string();
}
if key == "by-depID"
&& let Some(value) = parts.next()
{
mp.by_dep_id = value.to_string();
}
}
}
@@ -830,10 +830,10 @@ fn extract_heal_init_params(body: &Bytes, uri: &Uri, params: Params<'_, '_>) ->
for param in params {
let mut parts = param.split('=');
if let Some(key) = parts.next() {
if key == "clientToken" {
if let Some(value) = parts.next() {
hip.client_token = value.to_string();
}
if key == "clientToken"
&& let Some(value) = parts.next()
{
hip.client_token = value.to_string();
}
if key == "forceStart" && parts.next().is_some() {
hip.force_start = true;

View File

@@ -277,10 +277,11 @@ impl Operation for UpdateGroupMembers {
} else {
warn!("add group members");
if let Err(err) = iam_store.get_group_description(&args.group).await {
if is_err_no_such_group(&err) && has_space_be(&args.group) {
return Err(s3_error!(InvalidArgument, "not such group"));
}
if let Err(err) = iam_store.get_group_description(&args.group).await
&& is_err_no_such_group(&err)
&& has_space_be(&args.group)
{
return Err(s3_error!(InvalidArgument, "not such group"));
}
iam_store.add_users_to_group(&args.group, args.members).await.map_err(|e| {

View File

@@ -96,10 +96,10 @@ impl Operation for AddUser {
return Err(s3_error!(InvalidArgument, "access key is empty"));
}
if let Some(sys_cred) = get_global_action_cred() {
if constant_time_eq(&sys_cred.access_key, ak) {
return Err(s3_error!(InvalidArgument, "can't create user with system access key"));
}
if let Some(sys_cred) = get_global_action_cred()
&& constant_time_eq(&sys_cred.access_key, ak)
{
return Err(s3_error!(InvalidArgument, "can't create user with system access key"));
}
let Ok(iam_store) = rustfs_iam::get() else {
@@ -777,10 +777,10 @@ impl Operation for ImportIam {
let groups: HashMap<String, GroupInfo> = serde_json::from_slice(&file_content)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
for (group_name, group_info) in groups {
if let Err(e) = iam_store.get_group_description(&group_name).await {
if matches!(e, rustfs_iam::error::Error::NoSuchGroup(_)) || has_space_be(&group_name) {
return Err(s3_error!(InvalidArgument, "group not found or has space be"));
}
if let Err(e) = iam_store.get_group_description(&group_name).await
&& (matches!(e, rustfs_iam::error::Error::NoSuchGroup(_)) || has_space_be(&group_name))
{
return Err(s3_error!(InvalidArgument, "group not found or has space be"));
}
if let Err(e) = iam_store.add_users_to_group(&group_name, group_info.members.clone()).await {

View File

@@ -175,10 +175,10 @@ pub async fn check_key_valid(session_token: &str, access_key: &str) -> S3Result<
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("check claims failed1 {e}")))?;
if !ok {
if let Some(u) = u {
if u.credentials.status == "off" {
return Err(s3_error!(InvalidRequest, "ErrAccessKeyDisabled"));
}
if let Some(u) = u
&& u.credentials.status == "off"
{
return Err(s3_error!(InvalidRequest, "ErrAccessKeyDisabled"));
}
return Err(s3_error!(InvalidRequest, "ErrAccessKeyDisabled"));
@@ -200,10 +200,10 @@ pub async fn check_key_valid(session_token: &str, access_key: &str) -> S3Result<
constant_time_eq(&sys_cred.access_key, &cred.access_key) || constant_time_eq(&cred.parent_user, &sys_cred.access_key);
// permitRootAccess
if let Some(claims) = &cred.claims {
if claims.contains_key(SESSION_POLICY_NAME) {
owner = false
}
if let Some(claims) = &cred.claims
&& claims.contains_key(SESSION_POLICY_NAME)
{
owner = false
}
Ok((cred, owner))
@@ -358,10 +358,10 @@ pub fn get_condition_values(
args.insert("authType".to_owned(), vec![auth_type]);
}
if let Some(lc) = region {
if !lc.is_empty() {
args.insert("LocationConstraint".to_owned(), vec![lc.to_string()]);
}
if let Some(lc) = region
&& !lc.is_empty()
{
args.insert("LocationConstraint".to_owned(), vec![lc.to_string()]);
}
let mut clone_header = header.clone();
@@ -411,23 +411,23 @@ pub fn get_condition_values(
}
}
if let Some(grps_val) = claims.get("groups") {
if let Some(grps_is) = grps_val.as_array() {
let grps = grps_is
.iter()
.filter_map(|g| g.as_str().map(|s| s.to_string()))
.collect::<Vec<String>>();
if !grps.is_empty() {
args.insert("groups".to_string(), grps);
}
if let Some(grps_val) = claims.get("groups")
&& let Some(grps_is) = grps_val.as_array()
{
let grps = grps_is
.iter()
.filter_map(|g| g.as_str().map(|s| s.to_string()))
.collect::<Vec<String>>();
if !grps.is_empty() {
args.insert("groups".to_string(), grps);
}
}
}
if let Some(groups) = &cred.groups {
if !args.contains_key("groups") {
args.insert("groups".to_string(), groups.clone());
}
if let Some(groups) = &cred.groups
&& !args.contains_key("groups")
{
args.insert("groups".to_string(), groups.clone());
}
args
@@ -502,10 +502,10 @@ fn determine_auth_type_and_version(header: &HeaderMap) -> (String, String) {
/// # Returns
/// * `bool` - True if request has JWT, false otherwise
fn is_request_jwt(header: &HeaderMap) -> bool {
if let Some(auth) = header.get("authorization") {
if let Ok(auth_str) = auth.to_str() {
return auth_str.starts_with(JWT_ALGORITHM);
}
if let Some(auth) = header.get("authorization")
&& let Ok(auth_str) = auth.to_str()
{
return auth_str.starts_with(JWT_ALGORITHM);
}
false
}
@@ -518,10 +518,10 @@ fn is_request_jwt(header: &HeaderMap) -> bool {
/// # Returns
/// * `bool` - True if request has AWS Signature Version '4', false otherwise
fn is_request_signature_v4(header: &HeaderMap) -> bool {
if let Some(auth) = header.get("authorization") {
if let Ok(auth_str) = auth.to_str() {
return auth_str.starts_with(SIGN_V4_ALGORITHM);
}
if let Some(auth) = header.get("authorization")
&& let Ok(auth_str) = auth.to_str()
{
return auth_str.starts_with(SIGN_V4_ALGORITHM);
}
false
}
@@ -534,10 +534,10 @@ fn is_request_signature_v4(header: &HeaderMap) -> bool {
/// # Returns
/// * `bool` - True if request has AWS Signature Version '2', false otherwise
fn is_request_signature_v2(header: &HeaderMap) -> bool {
if let Some(auth) = header.get("authorization") {
if let Ok(auth_str) = auth.to_str() {
return !auth_str.starts_with(SIGN_V4_ALGORITHM) && auth_str.starts_with(SIGN_V2_ALGORITHM);
}
if let Some(auth) = header.get("authorization")
&& let Ok(auth_str) = auth.to_str()
{
return !auth_str.starts_with(SIGN_V4_ALGORITHM) && auth_str.starts_with(SIGN_V2_ALGORITHM);
}
false
}
@@ -578,40 +578,40 @@ fn is_request_presigned_signature_v2(header: &HeaderMap) -> bool {
/// # Returns
/// * `bool` - True if request has AWS Post policy Signature Version '4', false otherwise
fn is_request_post_policy_signature_v4(header: &HeaderMap) -> bool {
if let Some(content_type) = header.get("content-type") {
if let Ok(ct) = content_type.to_str() {
return ct.contains("multipart/form-data");
}
if let Some(content_type) = header.get("content-type")
&& let Ok(ct) = content_type.to_str()
{
return ct.contains("multipart/form-data");
}
false
}
/// Verify if the request has AWS Streaming Signature Version '4'
fn is_request_sign_streaming_v4(header: &HeaderMap) -> bool {
if let Some(content_sha256) = header.get("x-amz-content-sha256") {
if let Ok(sha256_str) = content_sha256.to_str() {
return sha256_str == STREAMING_CONTENT_SHA256;
}
if let Some(content_sha256) = header.get("x-amz-content-sha256")
&& let Ok(sha256_str) = content_sha256.to_str()
{
return sha256_str == STREAMING_CONTENT_SHA256;
}
false
}
// Verify if the request has AWS Streaming Signature Version '4' with trailer
fn is_request_sign_streaming_trailer_v4(header: &HeaderMap) -> bool {
if let Some(content_sha256) = header.get("x-amz-content-sha256") {
if let Ok(sha256_str) = content_sha256.to_str() {
return sha256_str == STREAMING_CONTENT_SHA256_TRAILER;
}
if let Some(content_sha256) = header.get("x-amz-content-sha256")
&& let Ok(sha256_str) = content_sha256.to_str()
{
return sha256_str == STREAMING_CONTENT_SHA256_TRAILER;
}
false
}
// Verify if the request has AWS Streaming Signature Version '4' with unsigned content and trailer
fn is_request_unsigned_trailer_v4(header: &HeaderMap) -> bool {
if let Some(content_sha256) = header.get("x-amz-content-sha256") {
if let Ok(sha256_str) = content_sha256.to_str() {
return sha256_str == UNSIGNED_PAYLOAD_TRAILER;
}
if let Some(content_sha256) = header.get("x-amz-content-sha256")
&& let Ok(sha256_str) = content_sha256.to_str()
{
return sha256_str == UNSIGNED_PAYLOAD_TRAILER;
}
false
}
@@ -634,10 +634,10 @@ pub fn get_query_param<'a>(query: &'a str, param_name: &str) -> Option<&'a str>
for pair in query.split('&') {
let mut parts = pair.splitn(2, '=');
if let (Some(key), Some(value)) = (parts.next(), parts.next()) {
if key.to_lowercase() == param_name {
return Some(value);
}
if let (Some(key), Some(value)) = (parts.next(), parts.next())
&& key.to_lowercase() == param_name
{
return Some(value);
}
}
None

View File

@@ -193,18 +193,16 @@ impl From<ApiError> for S3Error {
impl From<StorageError> for ApiError {
fn from(err: StorageError) -> Self {
// Special handling for Io errors that may contain ChecksumMismatch
if let StorageError::Io(ref io_err) = err {
if let Some(inner) = io_err.get_ref() {
if inner.downcast_ref::<rustfs_rio::ChecksumMismatch>().is_some()
|| inner.downcast_ref::<rustfs_rio::BadDigest>().is_some()
{
return ApiError {
code: S3ErrorCode::BadDigest,
message: ApiError::error_code_to_message(&S3ErrorCode::BadDigest),
source: Some(Box::new(err)),
};
}
}
if let StorageError::Io(ref io_err) = err
&& let Some(inner) = io_err.get_ref()
&& (inner.downcast_ref::<rustfs_rio::ChecksumMismatch>().is_some()
|| inner.downcast_ref::<rustfs_rio::BadDigest>().is_some())
{
return ApiError {
code: S3ErrorCode::BadDigest,
message: ApiError::error_code_to_message(&S3ErrorCode::BadDigest),
source: Some(Box::new(err)),
};
}
let code = match &err {

View File

@@ -735,14 +735,14 @@ impl StorageBackend<super::server::FtpsUser> for FtpsDriver {
match s3_client.list_objects_v2(list_input).await {
Ok(output) => {
if let Some(objects) = output.contents {
if !objects.is_empty() {
debug!("FTPS RMD - bucket '{}' is not empty, cannot delete", bucket);
return Err(Error::new(
ErrorKind::PermanentFileNotAvailable,
format!("Bucket '{}' is not empty", bucket),
));
}
if let Some(objects) = output.contents
&& !objects.is_empty()
{
debug!("FTPS RMD - bucket '{}' is not empty, cannot delete", bucket);
return Err(Error::new(
ErrorKind::PermanentFileNotAvailable,
format!("Bucket '{}' is not empty", bucket),
));
}
}
Err(e) => {

View File

@@ -98,16 +98,16 @@ impl FtpsConfig {
));
}
if let Some(path) = &self.cert_file {
if !tokio::fs::try_exists(path).await.unwrap_or(false) {
return Err(FtpsInitError::InvalidConfig(format!("Certificate file not found: {}", path)));
}
if let Some(path) = &self.cert_file
&& !tokio::fs::try_exists(path).await.unwrap_or(false)
{
return Err(FtpsInitError::InvalidConfig(format!("Certificate file not found: {}", path)));
}
if let Some(path) = &self.key_file {
if !tokio::fs::try_exists(path).await.unwrap_or(false) {
return Err(FtpsInitError::InvalidConfig(format!("Key file not found: {}", path)));
}
if let Some(path) = &self.key_file
&& !tokio::fs::try_exists(path).await.unwrap_or(false)
{
return Err(FtpsInitError::InvalidConfig(format!("Key file not found: {}", path)));
}
// Validate passive ports format

View File

@@ -753,16 +753,16 @@ impl Handler for SftpHandler {
match s3_client.list_objects_v2(list_input).await {
Ok(output) => {
if let Some(objects) = output.contents {
if !objects.is_empty() {
debug!("SFTP REMOVE - bucket '{}' is not empty, cannot delete", bucket);
return Ok(Status {
id,
status_code: StatusCode::Failure,
error_message: format!("Bucket '{}' is not empty", bucket),
language_tag: "en".into(),
});
}
if let Some(objects) = output.contents
&& !objects.is_empty()
{
debug!("SFTP REMOVE - bucket '{}' is not empty, cannot delete", bucket);
return Ok(Status {
id,
status_code: StatusCode::Failure,
error_message: format!("Bucket '{}' is not empty", bucket),
language_tag: "en".into(),
});
}
}
Err(e) => {

View File

@@ -696,10 +696,10 @@ fn compare_keys(stored_key: &str, client_key_base64: &str) -> bool {
return true;
}
if let Ok(stored_bytes) = BASE64.decode(stored_key_data) {
if let Ok(client_bytes) = BASE64.decode(client_key_base64) {
return stored_bytes == client_bytes;
}
if let Ok(stored_bytes) = BASE64.decode(stored_key_data)
&& let Ok(client_bytes) = BASE64.decode(client_key_base64)
{
return stored_bytes == client_bytes;
}
false

View File

@@ -260,12 +260,12 @@ async fn walk_dir(path: PathBuf, cert_name: &str, cert_data: &mut Vec<u8>) {
// Only check direct subdirectories, no deeper recursion
if let Ok(mut sub_rd) = tokio::fs::read_dir(&entry.path()).await {
while let Ok(Some(sub_entry)) = sub_rd.next_entry().await {
if let Ok(sub_ft) = sub_entry.file_type().await {
if sub_ft.is_file() {
load_if_matches(&sub_entry, cert_name, cert_data).await;
}
// Ignore subdirectories and symlinks in subdirs to limit to one level
if let Ok(sub_ft) = sub_entry.file_type().await
&& sub_ft.is_file()
{
load_if_matches(&sub_entry, cert_name, cert_data).await;
}
// Ignore subdirectories and symlinks in subdirs to limit to one level
}
}
} else if ft.is_symlink() {
@@ -277,12 +277,12 @@ async fn walk_dir(path: PathBuf, cert_name: &str, cert_data: &mut Vec<u8>) {
// Treat as directory but only check its direct contents
if let Ok(mut sub_rd) = tokio::fs::read_dir(&entry.path()).await {
while let Ok(Some(sub_entry)) = sub_rd.next_entry().await {
if let Ok(sub_ft) = sub_entry.file_type().await {
if sub_ft.is_file() {
load_if_matches(&sub_entry, cert_name, cert_data).await;
}
// Ignore deeper levels
if let Ok(sub_ft) = sub_entry.file_type().await
&& sub_ft.is_file()
{
load_if_matches(&sub_entry, cert_name, cert_data).await;
}
// Ignore deeper levels
}
}
}

View File

@@ -282,40 +282,35 @@ impl Predicate for CompressionPredicate {
// CompressionLayer before calling this predicate, so we don't need to check them here.
// Check Content-Length header for minimum size threshold
if let Some(content_length) = response.headers().get(http::header::CONTENT_LENGTH) {
if let Ok(length_str) = content_length.to_str() {
if let Ok(length) = length_str.parse::<u64>() {
if length < self.config.min_size {
debug!(
"Skipping compression for small response: size={} bytes, min_size={}",
length, self.config.min_size
);
return false;
}
}
}
if let Some(content_length) = response.headers().get(http::header::CONTENT_LENGTH)
&& let Ok(length_str) = content_length.to_str()
&& let Ok(length) = length_str.parse::<u64>()
&& length < self.config.min_size
{
debug!(
"Skipping compression for small response: size={} bytes, min_size={}",
length, self.config.min_size
);
return false;
}
// Check if the response matches configured extension via Content-Disposition
if let Some(content_disposition) = response.headers().get(http::header::CONTENT_DISPOSITION) {
if let Ok(cd) = content_disposition.to_str() {
if let Some(filename) = CompressionConfig::extract_filename_from_content_disposition(cd) {
if self.config.matches_extension(&filename) {
debug!("Compressing response: filename '{}' matches configured extension", filename);
return true;
}
}
}
if let Some(content_disposition) = response.headers().get(http::header::CONTENT_DISPOSITION)
&& let Ok(cd) = content_disposition.to_str()
&& let Some(filename) = CompressionConfig::extract_filename_from_content_disposition(cd)
&& self.config.matches_extension(&filename)
{
debug!("Compressing response: filename '{}' matches configured extension", filename);
return true;
}
// Check if the response matches configured MIME type
if let Some(content_type) = response.headers().get(http::header::CONTENT_TYPE) {
if let Ok(ct) = content_type.to_str() {
if self.config.matches_mime_type(ct) {
debug!("Compressing response: Content-Type '{}' matches configured MIME pattern", ct);
return true;
}
}
if let Some(content_type) = response.headers().get(http::header::CONTENT_TYPE)
&& let Ok(ct) = content_type.to_str()
&& self.config.matches_mime_type(ct)
{
debug!("Compressing response: Content-Type '{}' matches configured MIME pattern", ct);
return true;
}
// Default: don't compress (whitelist approach)

View File

@@ -139,13 +139,13 @@ pub async fn start_http_server(
};
// If address is IPv6 try to enable dual-stack; on failure, switch to IPv4 socket.
if server_addr.is_ipv6() {
if let Err(e) = socket.set_only_v6(false) {
warn!("Failed to set IPV6_V6ONLY=false, attempting IPv4 fallback: {}", e);
let ipv4_addr = SocketAddr::new(std::net::Ipv4Addr::UNSPECIFIED.into(), server_addr.port());
server_addr = ipv4_addr;
socket = socket2::Socket::new(socket2::Domain::IPV4, socket2::Type::STREAM, Some(socket2::Protocol::TCP))?;
}
if server_addr.is_ipv6()
&& let Err(e) = socket.set_only_v6(false)
{
warn!("Failed to set IPV6_V6ONLY=false, attempting IPv4 fallback: {}", e);
let ipv4_addr = SocketAddr::new(std::net::Ipv4Addr::UNSPECIFIED.into(), server_addr.port());
server_addr = ipv4_addr;
socket = socket2::Socket::new(socket2::Domain::IPV4, socket2::Type::STREAM, Some(socket2::Protocol::TCP))?;
}
// Common setup for both IPv4 and successful dual-stack IPv6
@@ -434,38 +434,38 @@ async fn setup_tls_acceptor(tls_path: &str) -> Result<Option<TlsAcceptor>> {
debug!("Found TLS directory, checking for certificates");
// Make sure to use a modern encryption suite
let _ = rustls::crypto::ring::default_provider().install_default();
let _ = rustls::crypto::aws_lc_rs::default_provider().install_default();
let mtls_verifier = rustfs_utils::build_webpki_client_verifier(tls_path)?;
// 1. Attempt to load all certificates in the directory (multi-certificate support, for SNI)
if let Ok(cert_key_pairs) = rustfs_utils::load_all_certs_from_directory(tls_path) {
if !cert_key_pairs.is_empty() {
debug!("Found {} certificates, creating SNI-aware multi-cert resolver", cert_key_pairs.len());
if let Ok(cert_key_pairs) = rustfs_utils::load_all_certs_from_directory(tls_path)
&& !cert_key_pairs.is_empty()
{
debug!("Found {} certificates, creating SNI-aware multi-cert resolver", cert_key_pairs.len());
// Create an SNI-enabled certificate resolver
let resolver = rustfs_utils::create_multi_cert_resolver(cert_key_pairs)?;
// Create an SNI-enabled certificate resolver
let resolver = rustfs_utils::create_multi_cert_resolver(cert_key_pairs)?;
// Configure the server to enable SNI support
let mut server_config = if let Some(verifier) = mtls_verifier.clone() {
ServerConfig::builder()
.with_client_cert_verifier(verifier)
.with_cert_resolver(Arc::new(resolver))
} else {
ServerConfig::builder()
.with_no_client_auth()
.with_cert_resolver(Arc::new(resolver))
};
// Configure the server to enable SNI support
let mut server_config = if let Some(verifier) = mtls_verifier.clone() {
ServerConfig::builder()
.with_client_cert_verifier(verifier)
.with_cert_resolver(Arc::new(resolver))
} else {
ServerConfig::builder()
.with_no_client_auth()
.with_cert_resolver(Arc::new(resolver))
};
// Configure ALPN protocol priority
server_config.alpn_protocols = vec![b"h2".to_vec(), b"http/1.1".to_vec(), b"http/1.0".to_vec()];
// Configure ALPN protocol priority
server_config.alpn_protocols = vec![b"h2".to_vec(), b"http/1.1".to_vec(), b"http/1.0".to_vec()];
// Log SNI requests
if rustfs_utils::tls_key_log() {
server_config.key_log = Arc::new(rustls::KeyLogFile::new());
}
return Ok(Some(TlsAcceptor::from(Arc::new(server_config))));
// Log SNI requests
if rustfs_utils::tls_key_log() {
server_config.key_log = Arc::new(rustls::KeyLogFile::new());
}
return Ok(Some(TlsAcceptor::from(Arc::new(server_config))));
}
// 2. Revert to the traditional single-certificate mode
@@ -520,7 +520,8 @@ struct ConnectionContext {
/// 2. Build a complete service stack for this connection, including S3, RPC services, and all middleware.
/// 3. Use Hyper to handle HTTP requests on this connection.
/// 4. Incorporate connections into the management of elegant closures.
#[instrument(skip_all, fields(peer_addr = %socket.peer_addr().map(|a| a.to_string()).unwrap_or_else(|_| "unknown".to_string())))]
#[instrument(skip_all, fields(peer_addr = %socket.peer_addr().map(|a| a.to_string()).unwrap_or_else(|_| "unknown".to_string())
))]
fn process_connection(
socket: TcpStream,
tls_acceptor: Option<Arc<TlsAcceptor>>,

View File

@@ -519,13 +519,13 @@ fn validate_list_object_unordered_with_delimiter(delimiter: Option<&Delimiter>,
return Ok(());
};
if let Ok(params) = from_bytes::<ListObjectUnorderedQuery>(query.as_bytes()) {
if params.allow_unordered.as_deref() == Some("true") {
return Err(S3Error::with_message(
S3ErrorCode::InvalidArgument,
"The allow-unordered parameter cannot be used when delimiter is specified.".to_string(),
));
}
if let Ok(params) = from_bytes::<ListObjectUnorderedQuery>(query.as_bytes())
&& params.allow_unordered.as_deref() == Some("true")
{
return Err(S3Error::with_message(
S3ErrorCode::InvalidArgument,
"The allow-unordered parameter cannot be used when delimiter is specified.".to_string(),
));
}
Ok(())
@@ -735,8 +735,8 @@ impl FS {
let mut checksum_sha256 = input.checksum_sha256;
let mut checksum_crc64nvme = input.checksum_crc64nvme;
if let Some(alg) = &input.checksum_algorithm {
if let Some(Some(checksum_str)) = req.trailing_headers.as_ref().map(|trailer| {
if let Some(alg) = &input.checksum_algorithm
&& let Some(Some(checksum_str)) = req.trailing_headers.as_ref().map(|trailer| {
let key = match alg.as_str() {
ChecksumAlgorithm::CRC32 => rustfs_rio::ChecksumType::CRC32.key(),
ChecksumAlgorithm::CRC32C => rustfs_rio::ChecksumType::CRC32C.key(),
@@ -750,15 +750,15 @@ impl FS {
.get(key.unwrap_or_default())
.and_then(|value| value.to_str().ok().map(|s| s.to_string()))
})
}) {
match alg.as_str() {
ChecksumAlgorithm::CRC32 => checksum_crc32 = checksum_str,
ChecksumAlgorithm::CRC32C => checksum_crc32c = checksum_str,
ChecksumAlgorithm::SHA1 => checksum_sha1 = checksum_str,
ChecksumAlgorithm::SHA256 => checksum_sha256 = checksum_str,
ChecksumAlgorithm::CRC64NVME => checksum_crc64nvme = checksum_str,
_ => (),
}
})
{
match alg.as_str() {
ChecksumAlgorithm::CRC32 => checksum_crc32 = checksum_str,
ChecksumAlgorithm::CRC32C => checksum_crc32c = checksum_str,
ChecksumAlgorithm::SHA1 => checksum_sha1 = checksum_str,
ChecksumAlgorithm::SHA256 => checksum_sha256 = checksum_str,
ChecksumAlgorithm::CRC64NVME => checksum_crc64nvme = checksum_str,
_ => (),
}
}
@@ -977,64 +977,63 @@ impl S3 for FS {
let mut reader = HashReader::new(reader, length, actual_size, None, None, false).map_err(ApiError::from)?;
if let Some(ref sse_alg) = effective_sse {
if is_managed_sse(sse_alg) {
let material =
create_managed_encryption_material(&bucket, &key, sse_alg, effective_kms_key_id.clone(), actual_size).await?;
if let Some(ref sse_alg) = effective_sse
&& is_managed_sse(sse_alg)
{
let material =
create_managed_encryption_material(&bucket, &key, sse_alg, effective_kms_key_id.clone(), actual_size).await?;
let ManagedEncryptionMaterial {
data_key,
headers,
kms_key_id: kms_key_used,
} = material;
let ManagedEncryptionMaterial {
data_key,
headers,
kms_key_id: kms_key_used,
} = material;
let key_bytes = data_key.plaintext_key;
let nonce = data_key.nonce;
let key_bytes = data_key.plaintext_key;
let nonce = data_key.nonce;
src_info.user_defined.extend(headers.into_iter());
effective_kms_key_id = Some(kms_key_used.clone());
src_info.user_defined.extend(headers.into_iter());
effective_kms_key_id = Some(kms_key_used.clone());
let encrypt_reader = EncryptReader::new(reader, key_bytes, nonce);
reader = HashReader::new(Box::new(encrypt_reader), -1, actual_size, None, None, false).map_err(ApiError::from)?;
}
let encrypt_reader = EncryptReader::new(reader, key_bytes, nonce);
reader = HashReader::new(Box::new(encrypt_reader), -1, actual_size, None, None, false).map_err(ApiError::from)?;
}
// Apply SSE-C encryption if customer-provided key is specified
if let (Some(sse_alg), Some(sse_key), Some(sse_md5)) = (&sse_customer_algorithm, &sse_customer_key, &sse_customer_key_md5)
&& sse_alg.as_str() == "AES256"
{
if sse_alg.as_str() == "AES256" {
let key_bytes = BASE64_STANDARD.decode(sse_key.as_str()).map_err(|e| {
error!("Failed to decode SSE-C key: {}", e);
ApiError::from(StorageError::other("Invalid SSE-C key"))
})?;
let key_bytes = BASE64_STANDARD.decode(sse_key.as_str()).map_err(|e| {
error!("Failed to decode SSE-C key: {}", e);
ApiError::from(StorageError::other("Invalid SSE-C key"))
})?;
if key_bytes.len() != 32 {
return Err(ApiError::from(StorageError::other("SSE-C key must be 32 bytes")).into());
}
let computed_md5 = BASE64_STANDARD.encode(md5::compute(&key_bytes).0);
if computed_md5 != sse_md5.as_str() {
return Err(ApiError::from(StorageError::other("SSE-C key MD5 mismatch")).into());
}
// Store original size before encryption
src_info
.user_defined
.insert("x-amz-server-side-encryption-customer-original-size".to_string(), actual_size.to_string());
// SAFETY: The length of `key_bytes` is checked to be 32 bytes above,
// so this conversion cannot fail.
let key_array: [u8; 32] = key_bytes.try_into().expect("key length already checked");
// Generate deterministic nonce from bucket-key
let nonce_source = format!("{bucket}-{key}");
let nonce_hash = md5::compute(nonce_source.as_bytes());
let nonce: [u8; 12] = nonce_hash.0[..12]
.try_into()
.expect("MD5 hash is always 16 bytes; taking first 12 bytes for nonce is safe");
let encrypt_reader = EncryptReader::new(reader, key_array, nonce);
reader = HashReader::new(Box::new(encrypt_reader), -1, actual_size, None, None, false).map_err(ApiError::from)?;
if key_bytes.len() != 32 {
return Err(ApiError::from(StorageError::other("SSE-C key must be 32 bytes")).into());
}
let computed_md5 = BASE64_STANDARD.encode(md5::compute(&key_bytes).0);
if computed_md5 != sse_md5.as_str() {
return Err(ApiError::from(StorageError::other("SSE-C key MD5 mismatch")).into());
}
// Store original size before encryption
src_info
.user_defined
.insert("x-amz-server-side-encryption-customer-original-size".to_string(), actual_size.to_string());
// SAFETY: The length of `key_bytes` is checked to be 32 bytes above,
// so this conversion cannot fail.
let key_array: [u8; 32] = key_bytes.try_into().expect("key length already checked");
// Generate deterministic nonce from bucket-key
let nonce_source = format!("{bucket}-{key}");
let nonce_hash = md5::compute(nonce_source.as_bytes());
let nonce: [u8; 12] = nonce_hash.0[..12]
.try_into()
.expect("MD5 hash is always 16 bytes; taking first 12 bytes for nonce is safe");
let encrypt_reader = EncryptReader::new(reader, key_array, nonce);
reader = HashReader::new(Box::new(encrypt_reader), -1, actual_size, None, None, false).map_err(ApiError::from)?;
}
src_info.put_object_reader = Some(PutObjReader::new(reader));
@@ -1246,15 +1245,14 @@ impl S3 for FS {
let restore_object = Uuid::new_v4().to_string();
//if let Some(rreq) = rreq {
if let Some(output_location) = &rreq.output_location {
if let Some(s3) = &output_location.s3 {
if !s3.bucket_name.is_empty() {
header.insert(
X_AMZ_RESTORE_OUTPUT_PATH,
format!("{}{}{}", s3.bucket_name, s3.prefix, restore_object).parse().unwrap(),
);
}
}
if let Some(output_location) = &rreq.output_location
&& let Some(s3) = &output_location.s3
&& !s3.bucket_name.is_empty()
{
header.insert(
X_AMZ_RESTORE_OUTPUT_PATH,
format!("{}{}{}", s3.bucket_name, s3.prefix, restore_object).parse().unwrap(),
);
}
//}
/*send_event(EventArgs {
@@ -1730,24 +1728,23 @@ impl S3 for FS {
};
for dobjs in delete_results.iter() {
if let Some(dobj) = &dobjs.delete_object {
if replicate_deletes
&& (dobj.delete_marker_replication_status() == ReplicationStatusType::Pending
|| dobj.version_purge_status() == VersionPurgeStatusType::Pending)
{
let mut dobj = dobj.clone();
if is_dir_object(dobj.object_name.as_str()) && dobj.version_id.is_none() {
dobj.version_id = Some(Uuid::nil());
}
let deleted_object = DeletedObjectReplicationInfo {
delete_object: dobj,
bucket: bucket.clone(),
event_type: REPLICATE_INCOMING_DELETE.to_string(),
..Default::default()
};
schedule_replication_delete(deleted_object).await;
if let Some(dobj) = &dobjs.delete_object
&& replicate_deletes
&& (dobj.delete_marker_replication_status() == ReplicationStatusType::Pending
|| dobj.version_purge_status() == VersionPurgeStatusType::Pending)
{
let mut dobj = dobj.clone();
if is_dir_object(dobj.object_name.as_str()) && dobj.version_id.is_none() {
dobj.version_id = Some(Uuid::nil());
}
let deleted_object = DeletedObjectReplicationInfo {
delete_object: dobj,
bucket: bucket.clone(),
event_type: REPLICATE_INCOMING_DELETE.to_string(),
..Default::default()
};
schedule_replication_delete(deleted_object).await;
}
}
@@ -1854,96 +1851,98 @@ impl S3 for FS {
let cache_key = ConcurrencyManager::make_cache_key(&bucket, &key, version_id.as_deref());
// Only attempt cache lookup if caching is enabled and for objects without range/part requests
if manager.is_cache_enabled() && part_number.is_none() && range.is_none() {
if let Some(cached) = manager.get_cached_object(&cache_key).await {
let cache_serve_duration = request_start.elapsed();
if manager.is_cache_enabled()
&& part_number.is_none()
&& range.is_none()
&& let Some(cached) = manager.get_cached_object(&cache_key).await
{
let cache_serve_duration = request_start.elapsed();
debug!("Serving object from response cache: {} (latency: {:?})", cache_key, cache_serve_duration);
debug!("Serving object from response cache: {} (latency: {:?})", cache_key, cache_serve_duration);
#[cfg(feature = "metrics")]
{
use metrics::{counter, histogram};
counter!("rustfs.get.object.cache.served.total").increment(1);
histogram!("rustfs.get.object.cache.serve.duration.seconds").record(cache_serve_duration.as_secs_f64());
histogram!("rustfs.get.object.cache.size.bytes").record(cached.body.len() as f64);
}
#[cfg(feature = "metrics")]
{
use metrics::{counter, histogram};
counter!("rustfs.get.object.cache.served.total").increment(1);
histogram!("rustfs.get.object.cache.serve.duration.seconds").record(cache_serve_duration.as_secs_f64());
histogram!("rustfs.get.object.cache.size.bytes").record(cached.body.len() as f64);
}
// Build response from cached data with full metadata
let body_data = cached.body.clone();
let body = Some(StreamingBlob::wrap::<_, Infallible>(futures::stream::once(async move { Ok(body_data) })));
// Build response from cached data with full metadata
let body_data = cached.body.clone();
let body = Some(StreamingBlob::wrap::<_, Infallible>(futures::stream::once(async move { Ok(body_data) })));
// Parse last_modified from RFC3339 string if available
let last_modified = cached
// Parse last_modified from RFC3339 string if available
let last_modified = cached
.last_modified
.as_ref()
.and_then(|s| match OffsetDateTime::parse(s, &Rfc3339) {
Ok(dt) => Some(Timestamp::from(dt)),
Err(e) => {
warn!("Failed to parse cached last_modified '{}': {}", s, e);
None
}
});
// Parse content_type
let content_type = cached.content_type.as_ref().and_then(|ct| ContentType::from_str(ct).ok());
let output = GetObjectOutput {
body,
content_length: Some(cached.content_length),
accept_ranges: Some("bytes".to_string()),
e_tag: cached.e_tag.as_ref().map(|etag| to_s3s_etag(etag)),
last_modified,
content_type,
cache_control: cached.cache_control.clone(),
content_disposition: cached.content_disposition.clone(),
content_encoding: cached.content_encoding.clone(),
content_language: cached.content_language.clone(),
version_id: cached.version_id.clone(),
delete_marker: Some(cached.delete_marker),
tag_count: cached.tag_count,
metadata: if cached.user_metadata.is_empty() {
None
} else {
Some(cached.user_metadata.clone())
},
..Default::default()
};
// CRITICAL: Build ObjectInfo for event notification before calling complete().
// This ensures S3 bucket notifications (s3:GetObject events) include proper
// object metadata for event-driven workflows (Lambda, SNS, SQS).
let event_info = ObjectInfo {
bucket: bucket.clone(),
name: key.clone(),
storage_class: cached.storage_class.clone(),
mod_time: cached
.last_modified
.as_ref()
.and_then(|s| match OffsetDateTime::parse(s, &Rfc3339) {
Ok(dt) => Some(Timestamp::from(dt)),
Err(e) => {
warn!("Failed to parse cached last_modified '{}': {}", s, e);
None
}
});
.and_then(|s| OffsetDateTime::parse(s, &Rfc3339).ok()),
size: cached.content_length,
actual_size: cached.content_length,
is_dir: false,
user_defined: cached.user_metadata.clone(),
version_id: cached.version_id.as_ref().and_then(|v| Uuid::parse_str(v).ok()),
delete_marker: cached.delete_marker,
content_type: cached.content_type.clone(),
content_encoding: cached.content_encoding.clone(),
etag: cached.e_tag.clone(),
..Default::default()
};
// Parse content_type
let content_type = cached.content_type.as_ref().and_then(|ct| ContentType::from_str(ct).ok());
// Set object info and version_id on helper for proper event notification
let version_id_str = req.input.version_id.clone().unwrap_or_default();
helper = helper.object(event_info).version_id(version_id_str);
let output = GetObjectOutput {
body,
content_length: Some(cached.content_length),
accept_ranges: Some("bytes".to_string()),
e_tag: cached.e_tag.as_ref().map(|etag| to_s3s_etag(etag)),
last_modified,
content_type,
cache_control: cached.cache_control.clone(),
content_disposition: cached.content_disposition.clone(),
content_encoding: cached.content_encoding.clone(),
content_language: cached.content_language.clone(),
version_id: cached.version_id.clone(),
delete_marker: Some(cached.delete_marker),
tag_count: cached.tag_count,
metadata: if cached.user_metadata.is_empty() {
None
} else {
Some(cached.user_metadata.clone())
},
..Default::default()
};
// CRITICAL: Build ObjectInfo for event notification before calling complete().
// This ensures S3 bucket notifications (s3:GetObject events) include proper
// object metadata for event-driven workflows (Lambda, SNS, SQS).
let event_info = ObjectInfo {
bucket: bucket.clone(),
name: key.clone(),
storage_class: cached.storage_class.clone(),
mod_time: cached
.last_modified
.as_ref()
.and_then(|s| OffsetDateTime::parse(s, &Rfc3339).ok()),
size: cached.content_length,
actual_size: cached.content_length,
is_dir: false,
user_defined: cached.user_metadata.clone(),
version_id: cached.version_id.as_ref().and_then(|v| Uuid::parse_str(v).ok()),
delete_marker: cached.delete_marker,
content_type: cached.content_type.clone(),
content_encoding: cached.content_encoding.clone(),
etag: cached.e_tag.clone(),
..Default::default()
};
// Set object info and version_id on helper for proper event notification
let version_id_str = req.input.version_id.clone().unwrap_or_default();
helper = helper.object(event_info).version_id(version_id_str);
// Call helper.complete() for cache hits to ensure
// S3 bucket notifications (s3:GetObject events) are triggered.
// This ensures event-driven workflows (Lambda, SNS) work correctly
// for both cache hits and misses.
let result = Ok(S3Response::new(output));
let _ = helper.complete(&result);
return result;
}
// Call helper.complete() for cache hits to ensure
// S3 bucket notifications (s3:GetObject events) are triggered.
// This ensures event-driven workflows (Lambda, SNS) work correctly
// for both cache hits and misses.
let result = Ok(S3Response::new(output));
let _ = helper.complete(&result);
return result;
}
// TODO: getObjectInArchiveFileHandler object = xxx.zip/xxx/xxx.xxx
@@ -1954,10 +1953,10 @@ impl S3 for FS {
let part_number = part_number.map(|v| v as usize);
if let Some(part_num) = part_number {
if part_num == 0 {
return Err(s3_error!(InvalidArgument, "Invalid part number: part number must be greater than 0"));
}
if let Some(part_num) = part_number
&& part_num == 0
{
return Err(s3_error!(InvalidArgument, "Invalid part number: part number must be greater than 0"));
}
let rs = range.map(|v| match v {
@@ -2065,10 +2064,10 @@ impl S3 for FS {
let mut rs = rs;
if let Some(part_number) = part_number {
if rs.is_none() {
rs = HTTPRangeSpec::from_object_info(&info, part_number);
}
if let Some(part_number) = part_number
&& rs.is_none()
{
rs = HTTPRangeSpec::from_object_info(&info, part_number);
}
let mut content_length = info.get_actual_size().map_err(ApiError::from)?;
@@ -2183,24 +2182,23 @@ impl S3 for FS {
}
}
if stored_sse_algorithm.is_none() {
if let Some((key_bytes, nonce, original_size)) =
if stored_sse_algorithm.is_none()
&& let Some((key_bytes, nonce, original_size)) =
decrypt_managed_encryption_key(&bucket, &key, &info.user_defined).await?
{
if info.parts.len() > 1 {
let (reader, plain_size) = decrypt_multipart_managed_stream(final_stream, &info.parts, key_bytes, nonce)
.await
.map_err(ApiError::from)?;
final_stream = reader;
managed_original_size = Some(plain_size);
} else {
let warp_reader = WarpReader::new(final_stream);
let decrypt_reader = DecryptReader::new(warp_reader, key_bytes, nonce);
final_stream = Box::new(decrypt_reader);
managed_original_size = original_size;
}
managed_encryption_applied = true;
{
if info.parts.len() > 1 {
let (reader, plain_size) = decrypt_multipart_managed_stream(final_stream, &info.parts, key_bytes, nonce)
.await
.map_err(ApiError::from)?;
final_stream = reader;
managed_original_size = Some(plain_size);
} else {
let warp_reader = WarpReader::new(final_stream);
let decrypt_reader = DecryptReader::new(warp_reader, key_bytes, nonce);
final_stream = Box::new(decrypt_reader);
managed_original_size = original_size;
}
managed_encryption_applied = true;
}
// For SSE-C encrypted objects, use the original size instead of encrypted size
@@ -2518,10 +2516,10 @@ impl S3 for FS {
let part_number = part_number.map(|v| v as usize);
if let Some(part_num) = part_number {
if part_num == 0 {
return Err(s3_error!(InvalidArgument, "part_number invalid"));
}
if let Some(part_num) = part_number
&& part_num == 0
{
return Err(s3_error!(InvalidArgument, "part_number invalid"));
}
let rs = range.map(|v| match v {
@@ -2558,16 +2556,14 @@ impl S3 for FS {
return Err(S3Error::new(S3ErrorCode::MethodNotAllowed));
}
if let Some(match_etag) = if_none_match {
if let Some(strong_etag) = match_etag.into_etag() {
if info
.etag
.as_ref()
.is_some_and(|etag| ETag::Strong(etag.clone()) == strong_etag)
{
return Err(S3Error::new(S3ErrorCode::NotModified));
}
}
if let Some(match_etag) = if_none_match
&& let Some(strong_etag) = match_etag.into_etag()
&& info
.etag
.as_ref()
.is_some_and(|etag| ETag::Strong(etag.clone()) == strong_etag)
{
return Err(S3Error::new(S3ErrorCode::NotModified));
}
if let Some(modified_since) = if_modified_since {
@@ -2581,22 +2577,21 @@ impl S3 for FS {
}
if let Some(match_etag) = if_match {
if let Some(strong_etag) = match_etag.into_etag() {
if info
if let Some(strong_etag) = match_etag.into_etag()
&& info
.etag
.as_ref()
.is_some_and(|etag| ETag::Strong(etag.clone()) != strong_etag)
{
return Err(S3Error::new(S3ErrorCode::PreconditionFailed));
}
}
} else if let Some(unmodified_since) = if_unmodified_since {
if info.mod_time.is_some_and(|mod_time| {
let give_time: OffsetDateTime = unmodified_since.into();
mod_time > give_time.add(time::Duration::seconds(1))
}) {
{
return Err(S3Error::new(S3ErrorCode::PreconditionFailed));
}
} else if let Some(unmodified_since) = if_unmodified_since
&& info.mod_time.is_some_and(|mod_time| {
let give_time: OffsetDateTime = unmodified_since.into();
mod_time > give_time.add(time::Duration::seconds(1))
})
{
return Err(S3Error::new(S3ErrorCode::PreconditionFailed));
}
let event_info = info.clone();
@@ -3080,10 +3075,10 @@ impl S3 for FS {
let input = req.input;
// Save SSE-C parameters before moving input
if let Some(ref storage_class) = input.storage_class {
if !is_valid_storage_class(storage_class.as_str()) {
return Err(s3_error!(InvalidStorageClass));
}
if let Some(ref storage_class) = input.storage_class
&& !is_valid_storage_class(storage_class.as_str())
{
return Err(s3_error!(InvalidStorageClass));
}
let PutObjectInput {
body,
@@ -3116,27 +3111,23 @@ impl S3 for FS {
match store.get_object_info(&bucket, &key, &ObjectOptions::default()).await {
Ok(info) => {
if !info.delete_marker {
if let Some(ifmatch) = if_match {
if let Some(strong_etag) = ifmatch.into_etag() {
if info
.etag
.as_ref()
.is_some_and(|etag| ETag::Strong(etag.clone()) != strong_etag)
{
return Err(s3_error!(PreconditionFailed));
}
}
if let Some(ifmatch) = if_match
&& let Some(strong_etag) = ifmatch.into_etag()
&& info
.etag
.as_ref()
.is_some_and(|etag| ETag::Strong(etag.clone()) != strong_etag)
{
return Err(s3_error!(PreconditionFailed));
}
if let Some(ifnonematch) = if_none_match {
if let Some(strong_etag) = ifnonematch.into_etag() {
if info
.etag
.as_ref()
.is_some_and(|etag| ETag::Strong(etag.clone()) == strong_etag)
{
return Err(s3_error!(PreconditionFailed));
}
}
if let Some(ifnonematch) = if_none_match
&& let Some(strong_etag) = ifnonematch.into_etag()
&& info
.etag
.as_ref()
.is_some_and(|etag| ETag::Strong(etag.clone()) == strong_etag)
{
return Err(s3_error!(PreconditionFailed));
}
}
}
@@ -3344,30 +3335,27 @@ impl S3 for FS {
}
// Apply managed SSE (SSE-S3 or SSE-KMS) when requested
if sse_customer_algorithm.is_none() {
if let Some(sse_alg) = &effective_sse {
if is_managed_sse(sse_alg) {
let material =
create_managed_encryption_material(&bucket, &key, sse_alg, effective_kms_key_id.clone(), actual_size)
.await?;
if sse_customer_algorithm.is_none()
&& let Some(sse_alg) = &effective_sse
&& is_managed_sse(sse_alg)
{
let material =
create_managed_encryption_material(&bucket, &key, sse_alg, effective_kms_key_id.clone(), actual_size).await?;
let ManagedEncryptionMaterial {
data_key,
headers,
kms_key_id: kms_key_used,
} = material;
let ManagedEncryptionMaterial {
data_key,
headers,
kms_key_id: kms_key_used,
} = material;
let key_bytes = data_key.plaintext_key;
let nonce = data_key.nonce;
let key_bytes = data_key.plaintext_key;
let nonce = data_key.nonce;
metadata.extend(headers);
effective_kms_key_id = Some(kms_key_used.clone());
metadata.extend(headers);
effective_kms_key_id = Some(kms_key_used.clone());
let encrypt_reader = EncryptReader::new(reader, key_bytes, nonce);
reader =
HashReader::new(Box::new(encrypt_reader), -1, actual_size, None, None, false).map_err(ApiError::from)?;
}
}
let encrypt_reader = EncryptReader::new(reader, key_bytes, nonce);
reader = HashReader::new(Box::new(encrypt_reader), -1, actual_size, None, None, false).map_err(ApiError::from)?;
}
let mut reader = PutObjReader::new(reader);
@@ -3428,8 +3416,8 @@ impl S3 for FS {
let mut checksum_sha256 = input.checksum_sha256;
let mut checksum_crc64nvme = input.checksum_crc64nvme;
if let Some(alg) = &input.checksum_algorithm {
if let Some(Some(checksum_str)) = req.trailing_headers.as_ref().map(|trailer| {
if let Some(alg) = &input.checksum_algorithm
&& let Some(Some(checksum_str)) = req.trailing_headers.as_ref().map(|trailer| {
let key = match alg.as_str() {
ChecksumAlgorithm::CRC32 => rustfs_rio::ChecksumType::CRC32.key(),
ChecksumAlgorithm::CRC32C => rustfs_rio::ChecksumType::CRC32C.key(),
@@ -3443,15 +3431,15 @@ impl S3 for FS {
.get(key.unwrap_or_default())
.and_then(|value| value.to_str().ok().map(|s| s.to_string()))
})
}) {
match alg.as_str() {
ChecksumAlgorithm::CRC32 => checksum_crc32 = checksum_str,
ChecksumAlgorithm::CRC32C => checksum_crc32c = checksum_str,
ChecksumAlgorithm::SHA1 => checksum_sha1 = checksum_str,
ChecksumAlgorithm::SHA256 => checksum_sha256 = checksum_str,
ChecksumAlgorithm::CRC64NVME => checksum_crc64nvme = checksum_str,
_ => (),
}
})
{
match alg.as_str() {
ChecksumAlgorithm::CRC32 => checksum_crc32 = checksum_str,
ChecksumAlgorithm::CRC32C => checksum_crc32c = checksum_str,
ChecksumAlgorithm::SHA1 => checksum_sha1 = checksum_str,
ChecksumAlgorithm::SHA256 => checksum_sha256 = checksum_str,
ChecksumAlgorithm::CRC64NVME => checksum_crc64nvme = checksum_str,
_ => (),
}
}
@@ -3495,10 +3483,10 @@ impl S3 for FS {
} = req.input.clone();
// Validate storage class if provided
if let Some(ref storage_class) = storage_class {
if !is_valid_storage_class(storage_class.as_str()) {
return Err(s3_error!(InvalidStorageClass));
}
if let Some(ref storage_class) = storage_class
&& !is_valid_storage_class(storage_class.as_str())
{
return Err(s3_error!(InvalidStorageClass));
}
// mc cp step 3
@@ -3654,10 +3642,10 @@ impl S3 for FS {
let mut body_stream = body.ok_or_else(|| s3_error!(IncompleteBody))?;
if size.is_none() {
if let Some(val) = req.headers.get(AMZ_DECODED_CONTENT_LENGTH) {
if let Some(x) = atoi::atoi::<i64>(val.as_bytes()) {
size = Some(x);
}
if let Some(val) = req.headers.get(AMZ_DECODED_CONTENT_LENGTH)
&& let Some(x) = atoi::atoi::<i64>(val.as_bytes())
{
size = Some(x);
}
if size.is_none() {
@@ -3828,8 +3816,8 @@ impl S3 for FS {
let mut checksum_sha256 = input.checksum_sha256;
let mut checksum_crc64nvme = input.checksum_crc64nvme;
if let Some(alg) = &input.checksum_algorithm {
if let Some(Some(checksum_str)) = req.trailing_headers.as_ref().map(|trailer| {
if let Some(alg) = &input.checksum_algorithm
&& let Some(Some(checksum_str)) = req.trailing_headers.as_ref().map(|trailer| {
let key = match alg.as_str() {
ChecksumAlgorithm::CRC32 => rustfs_rio::ChecksumType::CRC32.key(),
ChecksumAlgorithm::CRC32C => rustfs_rio::ChecksumType::CRC32C.key(),
@@ -3843,15 +3831,15 @@ impl S3 for FS {
.get(key.unwrap_or_default())
.and_then(|value| value.to_str().ok().map(|s| s.to_string()))
})
}) {
match alg.as_str() {
ChecksumAlgorithm::CRC32 => checksum_crc32 = checksum_str,
ChecksumAlgorithm::CRC32C => checksum_crc32c = checksum_str,
ChecksumAlgorithm::SHA1 => checksum_sha1 = checksum_str,
ChecksumAlgorithm::SHA256 => checksum_sha256 = checksum_str,
ChecksumAlgorithm::CRC64NVME => checksum_crc64nvme = checksum_str,
_ => (),
}
})
{
match alg.as_str() {
ChecksumAlgorithm::CRC32 => checksum_crc32 = checksum_str,
ChecksumAlgorithm::CRC32C => checksum_crc32c = checksum_str,
ChecksumAlgorithm::SHA1 => checksum_sha1 = checksum_str,
ChecksumAlgorithm::SHA256 => checksum_sha256 = checksum_str,
ChecksumAlgorithm::CRC64NVME => checksum_crc64nvme = checksum_str,
_ => (),
}
}
@@ -3949,16 +3937,14 @@ impl S3 for FS {
}
}
if let Some(if_none_match) = copy_source_if_none_match {
if let Some(ref etag) = src_info.etag {
if let Some(strong_etag) = if_none_match.into_etag() {
if ETag::Strong(etag.clone()) == strong_etag {
return Err(s3_error!(PreconditionFailed));
}
}
// Weak ETag in If-None-Match is ignored (doesn't match)
}
if let Some(if_none_match) = copy_source_if_none_match
&& let Some(ref etag) = src_info.etag
&& let Some(strong_etag) = if_none_match.into_etag()
&& ETag::Strong(etag.clone()) == strong_etag
{
return Err(s3_error!(PreconditionFailed));
}
// Weak ETag in If-None-Match is ignored (doesn't match)
// TODO: Implement proper time comparison for if_modified_since and if_unmodified_since
// For now, we'll skip these conditions
@@ -4157,10 +4143,10 @@ impl S3 for FS {
let max_uploads = max_uploads.map(|x| x as usize).unwrap_or(MAX_PARTS_COUNT);
if let Some(key_marker) = &key_marker {
if !key_marker.starts_with(prefix.as_str()) {
return Err(s3_error!(NotImplemented, "Invalid key marker"));
}
if let Some(key_marker) = &key_marker
&& !key_marker.starts_with(prefix.as_str())
{
return Err(s3_error!(NotImplemented, "Invalid key marker"));
}
let result = store
@@ -4227,27 +4213,23 @@ impl S3 for FS {
match store.get_object_info(&bucket, &key, &ObjectOptions::default()).await {
Ok(info) => {
if !info.delete_marker {
if let Some(ifmatch) = if_match {
if let Some(strong_etag) = ifmatch.into_etag() {
if info
.etag
.as_ref()
.is_some_and(|etag| ETag::Strong(etag.clone()) != strong_etag)
{
return Err(s3_error!(PreconditionFailed));
}
}
if let Some(ifmatch) = if_match
&& let Some(strong_etag) = ifmatch.into_etag()
&& info
.etag
.as_ref()
.is_some_and(|etag| ETag::Strong(etag.clone()) != strong_etag)
{
return Err(s3_error!(PreconditionFailed));
}
if let Some(ifnonematch) = if_none_match {
if let Some(strong_etag) = ifnonematch.into_etag() {
if info
.etag
.as_ref()
.is_some_and(|etag| ETag::Strong(etag.clone()) == strong_etag)
{
return Err(s3_error!(PreconditionFailed));
}
}
if let Some(ifnonematch) = if_none_match
&& let Some(strong_etag) = ifnonematch.into_etag()
&& info
.etag
.as_ref()
.is_some_and(|etag| ETag::Strong(etag.clone()) == strong_etag)
{
return Err(s3_error!(PreconditionFailed));
}
}
}
@@ -4852,11 +4834,11 @@ impl S3 for FS {
let Some(input_cfg) = lifecycle_configuration else { return Err(s3_error!(InvalidArgument)) };
let rcfg = metadata_sys::get_object_lock_config(&bucket).await;
if let Ok(rcfg) = rcfg {
if let Err(err) = input_cfg.validate(&rcfg.0).await {
//return Err(S3Error::with_message(S3ErrorCode::Custom("BucketLockValidateFailed".into()), err.to_string()));
return Err(S3Error::with_message(S3ErrorCode::Custom("ValidateFailed".into()), err.to_string()));
}
if let Ok(rcfg) = rcfg
&& let Err(err) = input_cfg.validate(&rcfg.0).await
{
//return Err(S3Error::with_message(S3ErrorCode::Custom("BucketLockValidateFailed".into()), err.to_string()));
return Err(S3Error::with_message(S3ErrorCode::Custom("ValidateFailed".into()), err.to_string()));
}
if let Err(err) = validate_transition_tier(&input_cfg).await {
@@ -5735,23 +5717,23 @@ impl S3 for FS {
/// Auxiliary functions: extract prefixes and suffixes
fn extract_prefix_suffix(filter: Option<&NotificationConfigurationFilter>) -> (String, String) {
if let Some(filter) = filter {
if let Some(filter_rules) = &filter.key {
let mut prefix = String::new();
let mut suffix = String::new();
if let Some(rules) = &filter_rules.filter_rules {
for rule in rules {
if let (Some(name), Some(value)) = (rule.name.as_ref(), rule.value.as_ref()) {
match name.as_str() {
"prefix" => prefix = value.clone(),
"suffix" => suffix = value.clone(),
_ => {}
}
if let Some(filter) = filter
&& let Some(filter_rules) = &filter.key
{
let mut prefix = String::new();
let mut suffix = String::new();
if let Some(rules) = &filter_rules.filter_rules {
for rule in rules {
if let (Some(name), Some(value)) = (rule.name.as_ref(), rule.value.as_ref()) {
match name.as_str() {
"prefix" => prefix = value.clone(),
"suffix" => suffix = value.clone(),
_ => {}
}
}
}
return (prefix, suffix);
}
return (prefix, suffix);
}
(String::new(), String::new())
}

View File

@@ -86,10 +86,10 @@ impl OperationHelper {
.req_path(req.uri.path().to_string())
.req_query(extract_req_params(req));
if let Some(req_id) = req.headers.get("x-amz-request-id") {
if let Ok(id_str) = req_id.to_str() {
audit_builder = audit_builder.request_id(id_str);
}
if let Some(req_id) = req.headers.get("x-amz-request-id")
&& let Ok(id_str) = req_id.to_str()
{
audit_builder = audit_builder.request_id(id_str);
}
// initialize event builder
@@ -194,15 +194,15 @@ impl Drop for OperationHelper {
}
// Distribute event notification (only on success)
if self.api_builder.0.status.as_deref() == Some("success") {
if let Some(builder) = self.event_builder.take() {
let event_args = builder.build();
// Avoid generating notifications for copy requests
if !event_args.is_replication_request() {
spawn_background(async move {
notifier_global::notify(event_args).await;
});
}
if self.api_builder.0.status.as_deref() == Some("success")
&& let Some(builder) = self.event_builder.take()
{
let event_args = builder.build();
// Avoid generating notifications for copy requests
if !event_args.is_replication_request() {
spawn_background(async move {
notifier_global::notify(event_args).await;
});
}
}
}

View File

@@ -64,13 +64,12 @@ pub async fn del_opts(
let vid = vid.map(|v| v.as_str().trim().to_owned());
if let Some(ref id) = vid {
if *id != Uuid::nil().to_string()
&& let Err(err) = Uuid::parse_str(id.as_str())
{
error!("del_opts: invalid version id: {} error: {}", id, err);
return Err(StorageError::InvalidVersionID(bucket.to_owned(), object.to_owned(), id.clone()));
}
if let Some(ref id) = vid
&& *id != Uuid::nil().to_string()
&& let Err(err) = Uuid::parse_str(id.as_str())
{
error!("del_opts: invalid version id: {} error: {}", id, err);
return Err(StorageError::InvalidVersionID(bucket.to_owned(), object.to_owned(), id.clone()));
}
let mut opts = put_opts_from_headers(headers, metadata.clone()).map_err(|err| {
@@ -111,12 +110,11 @@ pub async fn get_opts(
let vid = vid.map(|v| v.as_str().trim().to_owned());
if let Some(ref id) = vid {
if *id != Uuid::nil().to_string()
&& let Err(_err) = Uuid::parse_str(id.as_str())
{
return Err(StorageError::InvalidVersionID(bucket.to_owned(), object.to_owned(), id.clone()));
}
if let Some(ref id) = vid
&& *id != Uuid::nil().to_string()
&& let Err(_err) = Uuid::parse_str(id.as_str())
{
return Err(StorageError::InvalidVersionID(bucket.to_owned(), object.to_owned(), id.clone()));
}
let mut opts = get_default_opts(headers, HashMap::new(), false)
@@ -187,12 +185,11 @@ pub async fn put_opts(
let vid = vid.map(|v| v.as_str().trim().to_owned());
if let Some(ref id) = vid {
if *id != Uuid::nil().to_string()
&& let Err(_err) = Uuid::parse_str(id.as_str())
{
return Err(StorageError::InvalidVersionID(bucket.to_owned(), object.to_owned(), id.clone()));
}
if let Some(ref id) = vid
&& *id != Uuid::nil().to_string()
&& let Err(_err) = Uuid::parse_str(id.as_str())
{
return Err(StorageError::InvalidVersionID(bucket.to_owned(), object.to_owned(), id.clone()));
}
let mut opts = put_opts_from_headers(headers, metadata)
@@ -512,12 +509,11 @@ fn skip_content_sha256_cksum(headers: &HeaderMap<HeaderValue>) -> bool {
// such broken clients and content-length > 0.
// For now, we'll assume strict compatibility is disabled
// In a real implementation, you would check a global config
if let Some(content_length) = headers.get("content-length") {
if let Ok(length_str) = content_length.to_str() {
if let Ok(length) = length_str.parse::<i64>() {
return length > 0; // && !global_server_ctxt.strict_s3_compat
}
}
if let Some(content_length) = headers.get("content-length")
&& let Ok(length_str) = content_length.to_str()
&& let Ok(length) = length_str.parse::<i64>()
{
return length > 0; // && !global_server_ctxt.strict_s3_compat
}
false
}
@@ -546,10 +542,10 @@ fn get_content_sha256_cksum(headers: &HeaderMap<HeaderValue>, service_type: Serv
};
// We found 'X-Amz-Content-Sha256' return the captured value.
if let Some(header_value) = content_sha256 {
if let Ok(value) = header_value.to_str() {
return value.to_string();
}
if let Some(header_value) = content_sha256
&& let Ok(value) = header_value.to_str()
{
return value.to_string();
}
// We couldn't find 'X-Amz-Content-Sha256'.

View File

@@ -75,10 +75,10 @@ fn increment_version(version: &str) -> Result<String, Box<dyn std::error::Error>
let (major, minor, patch, pre_release) = parse_version(version)?;
// If there's a pre-release identifier, increment the pre-release version number
if let Some(pre) = pre_release {
if let Some(new_pre) = increment_pre_release(&pre) {
return Ok(format!("{major}.{minor}.{patch}-{new_pre}"));
}
if let Some(pre) = pre_release
&& let Some(new_pre) = increment_pre_release(&pre)
{
return Ok(format!("{major}.{minor}.{patch}-{new_pre}"));
}
// Otherwise increment patch version number
@@ -107,10 +107,10 @@ pub fn parse_version(version: &str) -> VersionParseResult {
fn increment_pre_release(pre_release: &str) -> Option<String> {
// Handle pre-release versions like "alpha.19"
let parts: Vec<&str> = pre_release.split('.').collect();
if parts.len() == 2 {
if let Ok(num) = parts[1].parse::<u32>() {
return Some(format!("{}.{}", parts[0], num + 1));
}
if parts.len() == 2
&& let Ok(num) = parts[1].parse::<u32>()
{
return Some(format!("{}.{}", parts[0], num + 1));
}
// Handle pre-release versions like "alpha19"