diff --git a/crates/obs/src/telemetry.rs b/crates/obs/src/telemetry.rs index 96b200a6..c034ad10 100644 --- a/crates/obs/src/telemetry.rs +++ b/crates/obs/src/telemetry.rs @@ -1,19 +1,19 @@ use crate::OtelConfig; -use flexi_logger::{style, Age, Cleanup, Criterion, DeferredNow, FileSpec, LogSpecification, Naming, Record, WriteMode}; +use flexi_logger::{Age, Cleanup, Criterion, DeferredNow, FileSpec, LogSpecification, Naming, Record, WriteMode, style}; use nu_ansi_term::Color; use opentelemetry::trace::TracerProvider; -use opentelemetry::{global, KeyValue}; +use opentelemetry::{KeyValue, global}; use opentelemetry_appender_tracing::layer::OpenTelemetryTracingBridge; use opentelemetry_otlp::WithExportConfig; use opentelemetry_sdk::logs::SdkLoggerProvider; use opentelemetry_sdk::{ + Resource, metrics::{MeterProviderBuilder, PeriodicReader, SdkMeterProvider}, trace::{RandomIdGenerator, Sampler, SdkTracerProvider}, - Resource, }; use opentelemetry_semantic_conventions::{ - attribute::{DEPLOYMENT_ENVIRONMENT_NAME, NETWORK_LOCAL_ADDRESS, SERVICE_VERSION as OTEL_SERVICE_VERSION}, SCHEMA_URL, + attribute::{DEPLOYMENT_ENVIRONMENT_NAME, NETWORK_LOCAL_ADDRESS, SERVICE_VERSION as OTEL_SERVICE_VERSION}, }; use rustfs_config::{ APP_NAME, DEFAULT_LOG_DIR, DEFAULT_LOG_KEEP_FILES, DEFAULT_LOG_LEVEL, ENVIRONMENT, METER_INTERVAL, SAMPLE_RATIO, @@ -27,7 +27,7 @@ use tracing::info; use tracing_error::ErrorLayer; use tracing_opentelemetry::{MetricsLayer, OpenTelemetryLayer}; use tracing_subscriber::fmt::format::FmtSpan; -use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilter, Layer}; +use tracing_subscriber::{EnvFilter, Layer, layer::SubscriberExt, util::SubscriberInitExt}; /// A guard object that manages the lifecycle of OpenTelemetry components. /// @@ -333,9 +333,7 @@ pub(crate) fn init_telemetry(config: &OtelConfig) -> OtelGuard { .unwrap_or_else(|e| { eprintln!( "Invalid logger level: {}, using default: {},failed error:{}", - logger_level, - DEFAULT_LOG_LEVEL, - e.to_string() + logger_level, DEFAULT_LOG_LEVEL, e ); flexi_logger::Logger::with(log_spec.clone()) }) diff --git a/ecstore/src/bucket/error.rs b/ecstore/src/bucket/error.rs index 5a5aae38..6b1afd38 100644 --- a/ecstore/src/bucket/error.rs +++ b/ecstore/src/bucket/error.rs @@ -53,8 +53,7 @@ impl From for BucketMetadataError { impl From for BucketMetadataError { fn from(e: std::io::Error) -> Self { - e.downcast::() - .unwrap_or_else(|e| BucketMetadataError::other(e)) + e.downcast::().unwrap_or_else(BucketMetadataError::other) } } diff --git a/ecstore/src/error.rs b/ecstore/src/error.rs index 7af8138c..ca80059d 100644 --- a/ecstore/src/error.rs +++ b/ecstore/src/error.rs @@ -226,9 +226,9 @@ impl From for StorageError { } } -impl Into for StorageError { - fn into(self) -> DiskError { - match self { +impl From for DiskError { + fn from(val: StorageError) -> Self { + match val { StorageError::Io(io_error) => io_error.into(), StorageError::Unexpected => DiskError::Unexpected, StorageError::FileNotFound => DiskError::FileNotFound, @@ -250,7 +250,7 @@ impl Into for StorageError { StorageError::VolumeNotFound => DiskError::VolumeNotFound, StorageError::VolumeExists => DiskError::VolumeExists, StorageError::FileNameTooLong => DiskError::FileNameTooLong, - _ => DiskError::other(self), + _ => DiskError::other(val), } } } @@ -292,9 +292,9 @@ impl From for StorageError { } } -impl Into for StorageError { - fn into(self) -> rustfs_filemeta::Error { - match self { +impl From for rustfs_filemeta::Error { + fn from(val: StorageError) -> Self { + match val { StorageError::Unexpected => rustfs_filemeta::Error::Unexpected, StorageError::FileNotFound => rustfs_filemeta::Error::FileNotFound, StorageError::FileVersionNotFound => rustfs_filemeta::Error::FileVersionNotFound, @@ -303,7 +303,7 @@ impl Into for StorageError { StorageError::MethodNotAllowed => rustfs_filemeta::Error::MethodNotAllowed, StorageError::VolumeNotFound => rustfs_filemeta::Error::VolumeNotFound, StorageError::Io(io_error) => io_error.into(), - _ => rustfs_filemeta::Error::other(self), + _ => rustfs_filemeta::Error::other(val), } } } @@ -372,7 +372,7 @@ impl Clone for StorageError { StorageError::DecommissionNotStarted => StorageError::DecommissionNotStarted, StorageError::DecommissionAlreadyRunning => StorageError::DecommissionAlreadyRunning, StorageError::DoneForNow => StorageError::DoneForNow, - StorageError::InvalidPart(a, b, c) => StorageError::InvalidPart(a.clone(), b.clone(), c.clone()), + StorageError::InvalidPart(a, b, c) => StorageError::InvalidPart(*a, b.clone(), c.clone()), StorageError::ErasureReadQuorum => StorageError::ErasureReadQuorum, StorageError::ErasureWriteQuorum => StorageError::ErasureWriteQuorum, StorageError::NotFirstDisk => StorageError::NotFirstDisk, @@ -717,7 +717,7 @@ pub fn to_object_err(err: Error, params: Vec<&str>) -> Error { let bucket = params.first().cloned().unwrap_or_default().to_owned(); let object = params.get(1).cloned().map(decode_dir_object).unwrap_or_default(); - return StorageError::PrefixAccessDenied(bucket, object); + StorageError::PrefixAccessDenied(bucket, object) } _ => err, diff --git a/ecstore/src/peer.rs b/ecstore/src/peer.rs index 71911a85..bcb8835f 100644 --- a/ecstore/src/peer.rs +++ b/ecstore/src/peer.rs @@ -771,7 +771,7 @@ pub async fn heal_bucket_local(bucket: &str, opts: &HealOpts) -> Result>(); + let errs_clone = errs.to_vec(); futures.push(async move { if bs_clone.read().await[idx] == DRIVE_STATE_MISSING { info!("bucket not find, will recreate"); diff --git a/ecstore/src/rebalance.rs b/ecstore/src/rebalance.rs index 11cea68c..9fb39edb 100644 --- a/ecstore/src/rebalance.rs +++ b/ecstore/src/rebalance.rs @@ -500,7 +500,7 @@ impl ECStore { if get_global_endpoints() .as_ref() .get(idx) - .is_none_or(|v| v.endpoints.as_ref().first().map_or(true, |e| e.is_local)) + .is_none_or(|v| v.endpoints.as_ref().first().is_none_or(|e| e.is_local)) { warn!("start_rebalance: pool {} is not local, skipping", idx); continue; diff --git a/ecstore/src/set_disk.rs b/ecstore/src/set_disk.rs index ddf392a1..4e005707 100644 --- a/ecstore/src/set_disk.rs +++ b/ecstore/src/set_disk.rs @@ -2828,22 +2828,17 @@ impl SetDisks { return Ok((result, None)); } for (index, (err, disk)) in errs.iter().zip(disks.iter()).enumerate() { - if let (Some(err), Some(disk)) = (err, disk) { - match err { - DiskError::VolumeNotFound | DiskError::FileNotFound => { - let vol_path = Path::new(bucket).join(object); - let drive_state = match disk.make_volume(vol_path.to_str().unwrap()).await { - Ok(_) => DRIVE_STATE_OK, - Err(merr) => match merr { - DiskError::VolumeExists => DRIVE_STATE_OK, - DiskError::DiskNotFound => DRIVE_STATE_OFFLINE, - _ => DRIVE_STATE_CORRUPT, - }, - }; - result.after.drives[index].state = drive_state.to_string(); - } - _ => {} - } + if let (Some(DiskError::VolumeNotFound | DiskError::FileNotFound), Some(disk)) = (err, disk) { + let vol_path = Path::new(bucket).join(object); + let drive_state = match disk.make_volume(vol_path.to_str().unwrap()).await { + Ok(_) => DRIVE_STATE_OK, + Err(merr) => match merr { + DiskError::VolumeExists => DRIVE_STATE_OK, + DiskError::DiskNotFound => DRIVE_STATE_OFFLINE, + _ => DRIVE_STATE_CORRUPT, + }, + }; + result.after.drives[index].state = drive_state.to_string(); } } @@ -5563,7 +5558,7 @@ async fn disks_with_all_parts( if index < vec.len() { if verify_err.is_some() { info!("verify_err"); - vec[index] = conv_part_err_to_int(&verify_err.as_ref().map(|e| e.clone().into())); + vec[index] = conv_part_err_to_int(&verify_err.clone()); } else { info!("verify_resp, verify_resp.results {}", verify_resp.results[p]); vec[index] = verify_resp.results[p]; @@ -5620,7 +5615,7 @@ pub fn should_heal_object_on_disk( } } } - (false, err.as_ref().map(|e| e.clone())) + (false, err.clone()) } async fn get_disks_info(disks: &[Option], eps: &[Endpoint]) -> Vec { diff --git a/ecstore/src/store.rs b/ecstore/src/store.rs index 1e1c97fb..cee0320f 100644 --- a/ecstore/src/store.rs +++ b/ecstore/src/store.rs @@ -1319,7 +1319,7 @@ impl StorageAPI for ECStore { async fn make_bucket(&self, bucket: &str, opts: &MakeBucketOptions) -> Result<()> { if !is_meta_bucketname(bucket) { if let Err(err) = check_valid_bucket_name_strict(bucket) { - return Err(StorageError::BucketNameInvalid(err.to_string()).into()); + return Err(StorageError::BucketNameInvalid(err.to_string())); } // TODO: nslock @@ -1390,11 +1390,11 @@ impl StorageAPI for ECStore { #[tracing::instrument(skip(self))] async fn delete_bucket(&self, bucket: &str, opts: &DeleteBucketOptions) -> Result<()> { if is_meta_bucketname(bucket) { - return Err(StorageError::BucketNameInvalid(bucket.to_string()).into()); + return Err(StorageError::BucketNameInvalid(bucket.to_string())); } if let Err(err) = check_valid_bucket_name(bucket) { - return Err(StorageError::BucketNameInvalid(err.to_string()).into()); + return Err(StorageError::BucketNameInvalid(err.to_string())); } // TODO: nslock @@ -2186,7 +2186,7 @@ impl StorageAPI for ECStore { for (index, err) in errs.iter().enumerate() { match err { Some(err) => { - if is_err_object_not_found(&err) || is_err_version_not_found(&err) { + if is_err_object_not_found(err) || is_err_version_not_found(err) { continue; } return Ok((ress.remove(index), Some(err.clone()))); diff --git a/ecstore/src/store_list_objects.rs b/ecstore/src/store_list_objects.rs index 2d4c02ca..df105bcd 100644 --- a/ecstore/src/store_list_objects.rs +++ b/ecstore/src/store_list_objects.rs @@ -1,24 +1,24 @@ +use crate::StorageAPI; use crate::bucket::metadata_sys::get_versioning_config; use crate::bucket::versioning::VersioningApi; -use crate::cache_value::metacache_set::{list_path_raw, ListPathRawOptions}; +use crate::cache_value::metacache_set::{ListPathRawOptions, list_path_raw}; use crate::disk::error::DiskError; use crate::disk::{DiskInfo, DiskStore}; use crate::error::{ - is_all_not_found, is_all_volume_not_found, is_err_bucket_not_found, to_object_err, Error, Result, StorageError, + Error, Result, StorageError, is_all_not_found, is_all_volume_not_found, is_err_bucket_not_found, to_object_err, }; use crate::peer::is_reserved_or_invalid_bucket; use crate::set_disk::SetDisks; use crate::store::check_list_objs_args; use crate::store_api::{ListObjectVersionsInfo, ListObjectsInfo, ObjectInfo, ObjectOptions}; -use crate::utils::path::{self, base_dir_from_prefix, SLASH_SEPARATOR}; -use crate::StorageAPI; +use crate::utils::path::{self, SLASH_SEPARATOR, base_dir_from_prefix}; use crate::{store::ECStore, store_api::ListObjectsV2Info}; use futures::future::join_all; use rand::seq::SliceRandom; use rand::thread_rng; use rustfs_filemeta::{ - merge_file_meta_versions, FileInfo, MetaCacheEntries, MetaCacheEntriesSorted, MetaCacheEntriesSortedResult, MetaCacheEntry, - MetadataResolutionParams, + FileInfo, MetaCacheEntries, MetaCacheEntriesSorted, MetaCacheEntriesSortedResult, MetaCacheEntry, MetadataResolutionParams, + merge_file_meta_versions, }; use std::collections::HashMap; use std::sync::Arc; @@ -837,11 +837,7 @@ impl ECStore { if fiter(&fi) { let item = ObjectInfoOrErr { item: Some(ObjectInfo::from_file_info(&fi, &bucket, &fi.name, { - if let Some(v) = &vcf { - v.versioned(&fi.name) - } else { - false - } + if let Some(v) = &vcf { v.versioned(&fi.name) } else { false } })), err: None, }; @@ -853,11 +849,7 @@ impl ECStore { } else { let item = ObjectInfoOrErr { item: Some(ObjectInfo::from_file_info(&fi, &bucket, &fi.name, { - if let Some(v) = &vcf { - v.versioned(&fi.name) - } else { - false - } + if let Some(v) = &vcf { v.versioned(&fi.name) } else { false } })), err: None, }; @@ -892,12 +884,8 @@ impl ECStore { if let Some(fiter) = opts.filter { if fiter(fi) { let item = ObjectInfoOrErr { - item: Some(ObjectInfo::from_file_info(&fi, &bucket, &fi.name, { - if let Some(v) = &vcf { - v.versioned(&fi.name) - } else { - false - } + item: Some(ObjectInfo::from_file_info(fi, &bucket, &fi.name, { + if let Some(v) = &vcf { v.versioned(&fi.name) } else { false } })), err: None, }; @@ -908,12 +896,8 @@ impl ECStore { } } else { let item = ObjectInfoOrErr { - item: Some(ObjectInfo::from_file_info(&fi, &bucket, &fi.name, { - if let Some(v) = &vcf { - v.versioned(&fi.name) - } else { - false - } + item: Some(ObjectInfo::from_file_info(fi, &bucket, &fi.name, { + if let Some(v) = &vcf { v.versioned(&fi.name) } else { false } })), err: None, }; diff --git a/ecstore/src/utils/ellipses.rs b/ecstore/src/utils/ellipses.rs index f323fd2a..9c48b0ed 100644 --- a/ecstore/src/utils/ellipses.rs +++ b/ecstore/src/utils/ellipses.rs @@ -188,8 +188,8 @@ pub fn parse_ellipses_range(pattern: &str) -> Result> { } // TODO: Add support for hexadecimals. - let start = ellipses_range[0].parse::().map_err(|e| Error::other(e))?; - let end = ellipses_range[1].parse::().map_err(|e| Error::other(e))?; + let start = ellipses_range[0].parse::().map_err(Error::other)?; + let end = ellipses_range[1].parse::().map_err(Error::other)?; if start > end { return Err(Error::other("Invalid argument:range start cannot be bigger than end")); diff --git a/ecstore/src/utils/net.rs b/ecstore/src/utils/net.rs index 831ae617..9544ed2b 100644 --- a/ecstore/src/utils/net.rs +++ b/ecstore/src/utils/net.rs @@ -53,10 +53,7 @@ pub fn is_local_host(host: Host<&str>, port: u16, local_port: u16) -> Result = LOCAL_IPS.iter().copied().collect(); let is_local_host = match host { Host::Domain(domain) => { - let ips = match (domain, 0).to_socket_addrs().map(|v| v.map(|v| v.ip()).collect::>()) { - Ok(ips) => ips, - Err(err) => return Err(err), - }; + let ips = (domain, 0).to_socket_addrs().map(|v| v.map(|v| v.ip()).collect::>())?; ips.iter().any(|ip| local_set.contains(ip)) } diff --git a/rustfs/src/admin/router.rs b/rustfs/src/admin/router.rs index 7a58ca55..dd7544c0 100644 --- a/rustfs/src/admin/router.rs +++ b/rustfs/src/admin/router.rs @@ -32,7 +32,7 @@ impl S3Router { // warn!("set uri {}", &path); - self.router.insert(path, operation).map_err(|e| std::io::Error::other(e))?; + self.router.insert(path, operation).map_err(std::io::Error::other)?; Ok(()) } diff --git a/rustfs/src/error.rs b/rustfs/src/error.rs index 5c01e427..3c4569a2 100644 --- a/rustfs/src/error.rs +++ b/rustfs/src/error.rs @@ -1,9 +1,6 @@ use ecstore::error::StorageError; use s3s::{S3Error, S3ErrorCode}; -pub type Error = ApiError; -pub type Result = core::result::Result; - #[derive(Debug)] pub struct ApiError { pub code: S3ErrorCode, diff --git a/rustfs/src/grpc.rs b/rustfs/src/grpc.rs index c063edc9..cf2a298b 100644 --- a/rustfs/src/grpc.rs +++ b/rustfs/src/grpc.rs @@ -8,7 +8,6 @@ use ecstore::{ DeleteOptions, DiskAPI, DiskInfoOptions, DiskStore, FileInfoVersions, ReadMultipleReq, ReadOptions, UpdateMetadataOpts, error::DiskError, }, - error::StorageError, heal::{ data_usage_cache::DataUsageCache, heal_commands::{HealOpts, get_local_background_heal_status}, diff --git a/rustfs/src/main.rs b/rustfs/src/main.rs index 50e66678..b968d92c 100644 --- a/rustfs/src/main.rs +++ b/rustfs/src/main.rs @@ -121,7 +121,7 @@ async fn run(opt: config::Opt) -> Result<()> { // Initialize event notifier event::init_event_notifier(opt.event_config).await; - let server_addr = net::parse_and_resolve_address(opt.address.as_str()).map_err(|err| Error::other(err))?; + let server_addr = net::parse_and_resolve_address(opt.address.as_str()).map_err(Error::other)?; let server_port = server_addr.port(); let server_address = server_addr.to_string(); @@ -140,8 +140,8 @@ async fn run(opt: config::Opt) -> Result<()> { let local_ip = rustfs_utils::get_local_ip().ok_or(local_addr.ip()).unwrap(); // For RPC - let (endpoint_pools, setup_type) = EndpointServerPools::from_volumes(server_address.clone().as_str(), opt.volumes.clone()) - .map_err(|err| Error::other(err.to_string()))?; + let (endpoint_pools, setup_type) = + EndpointServerPools::from_volumes(server_address.clone().as_str(), opt.volumes.clone()).map_err(Error::other)?; // Print RustFS-style logging for pool formatting for (i, eps) in endpoint_pools.as_ref().iter().enumerate() { @@ -188,9 +188,7 @@ async fn run(opt: config::Opt) -> Result<()> { update_erasure_type(setup_type).await; // Initialize the local disk - init_local_disks(endpoint_pools.clone()) - .await - .map_err(|err| Error::other(err))?; + init_local_disks(endpoint_pools.clone()).await.map_err(Error::other)?; // Setup S3 service // This project uses the S3S library to implement S3 services @@ -505,9 +503,8 @@ async fn run(opt: config::Opt) -> Result<()> { // init store let store = ECStore::new(server_address.clone(), endpoint_pools.clone()) .await - .map_err(|err| { - error!("ECStore::new {:?}", &err); - err + .inspect_err(|err| { + error!("ECStore::new {:?}", err); })?; ecconfig::init(); @@ -519,7 +516,7 @@ async fn run(opt: config::Opt) -> Result<()> { ..Default::default() }) .await - .map_err(|err| Error::other(err))?; + .map_err(Error::other)?; let buckets = buckets_list.into_iter().map(|v| v.name).collect(); diff --git a/rustfs/src/storage/ecfs.rs b/rustfs/src/storage/ecfs.rs index c45de300..5d30cb7f 100644 --- a/rustfs/src/storage/ecfs.rs +++ b/rustfs/src/storage/ecfs.rs @@ -4,7 +4,6 @@ use super::options::extract_metadata; use super::options::put_opts; use crate::auth::get_condition_values; use crate::error::ApiError; -use crate::error::Result; use crate::storage::access::ReqInfo; use crate::storage::options::copy_dst_opts; use crate::storage::options::copy_src_opts;