fix clippy

This commit is contained in:
weisd
2025-06-07 15:39:06 +08:00
parent b570b6aa36
commit 62a7824d0d
15 changed files with 59 additions and 94 deletions

View File

@@ -1,19 +1,19 @@
use crate::OtelConfig;
use flexi_logger::{style, Age, Cleanup, Criterion, DeferredNow, FileSpec, LogSpecification, Naming, Record, WriteMode};
use flexi_logger::{Age, Cleanup, Criterion, DeferredNow, FileSpec, LogSpecification, Naming, Record, WriteMode, style};
use nu_ansi_term::Color;
use opentelemetry::trace::TracerProvider;
use opentelemetry::{global, KeyValue};
use opentelemetry::{KeyValue, global};
use opentelemetry_appender_tracing::layer::OpenTelemetryTracingBridge;
use opentelemetry_otlp::WithExportConfig;
use opentelemetry_sdk::logs::SdkLoggerProvider;
use opentelemetry_sdk::{
Resource,
metrics::{MeterProviderBuilder, PeriodicReader, SdkMeterProvider},
trace::{RandomIdGenerator, Sampler, SdkTracerProvider},
Resource,
};
use opentelemetry_semantic_conventions::{
attribute::{DEPLOYMENT_ENVIRONMENT_NAME, NETWORK_LOCAL_ADDRESS, SERVICE_VERSION as OTEL_SERVICE_VERSION},
SCHEMA_URL,
attribute::{DEPLOYMENT_ENVIRONMENT_NAME, NETWORK_LOCAL_ADDRESS, SERVICE_VERSION as OTEL_SERVICE_VERSION},
};
use rustfs_config::{
APP_NAME, DEFAULT_LOG_DIR, DEFAULT_LOG_KEEP_FILES, DEFAULT_LOG_LEVEL, ENVIRONMENT, METER_INTERVAL, SAMPLE_RATIO,
@@ -27,7 +27,7 @@ use tracing::info;
use tracing_error::ErrorLayer;
use tracing_opentelemetry::{MetricsLayer, OpenTelemetryLayer};
use tracing_subscriber::fmt::format::FmtSpan;
use tracing_subscriber::{layer::SubscriberExt, util::SubscriberInitExt, EnvFilter, Layer};
use tracing_subscriber::{EnvFilter, Layer, layer::SubscriberExt, util::SubscriberInitExt};
/// A guard object that manages the lifecycle of OpenTelemetry components.
///
@@ -333,9 +333,7 @@ pub(crate) fn init_telemetry(config: &OtelConfig) -> OtelGuard {
.unwrap_or_else(|e| {
eprintln!(
"Invalid logger level: {}, using default: {},failed error:{}",
logger_level,
DEFAULT_LOG_LEVEL,
e.to_string()
logger_level, DEFAULT_LOG_LEVEL, e
);
flexi_logger::Logger::with(log_spec.clone())
})

View File

@@ -53,8 +53,7 @@ impl From<Error> for BucketMetadataError {
impl From<std::io::Error> for BucketMetadataError {
fn from(e: std::io::Error) -> Self {
e.downcast::<BucketMetadataError>()
.unwrap_or_else(|e| BucketMetadataError::other(e))
e.downcast::<BucketMetadataError>().unwrap_or_else(BucketMetadataError::other)
}
}

View File

@@ -226,9 +226,9 @@ impl From<DiskError> for StorageError {
}
}
impl Into<DiskError> for StorageError {
fn into(self) -> DiskError {
match self {
impl From<StorageError> for DiskError {
fn from(val: StorageError) -> Self {
match val {
StorageError::Io(io_error) => io_error.into(),
StorageError::Unexpected => DiskError::Unexpected,
StorageError::FileNotFound => DiskError::FileNotFound,
@@ -250,7 +250,7 @@ impl Into<DiskError> for StorageError {
StorageError::VolumeNotFound => DiskError::VolumeNotFound,
StorageError::VolumeExists => DiskError::VolumeExists,
StorageError::FileNameTooLong => DiskError::FileNameTooLong,
_ => DiskError::other(self),
_ => DiskError::other(val),
}
}
}
@@ -292,9 +292,9 @@ impl From<rustfs_filemeta::Error> for StorageError {
}
}
impl Into<rustfs_filemeta::Error> for StorageError {
fn into(self) -> rustfs_filemeta::Error {
match self {
impl From<StorageError> for rustfs_filemeta::Error {
fn from(val: StorageError) -> Self {
match val {
StorageError::Unexpected => rustfs_filemeta::Error::Unexpected,
StorageError::FileNotFound => rustfs_filemeta::Error::FileNotFound,
StorageError::FileVersionNotFound => rustfs_filemeta::Error::FileVersionNotFound,
@@ -303,7 +303,7 @@ impl Into<rustfs_filemeta::Error> for StorageError {
StorageError::MethodNotAllowed => rustfs_filemeta::Error::MethodNotAllowed,
StorageError::VolumeNotFound => rustfs_filemeta::Error::VolumeNotFound,
StorageError::Io(io_error) => io_error.into(),
_ => rustfs_filemeta::Error::other(self),
_ => rustfs_filemeta::Error::other(val),
}
}
}
@@ -372,7 +372,7 @@ impl Clone for StorageError {
StorageError::DecommissionNotStarted => StorageError::DecommissionNotStarted,
StorageError::DecommissionAlreadyRunning => StorageError::DecommissionAlreadyRunning,
StorageError::DoneForNow => StorageError::DoneForNow,
StorageError::InvalidPart(a, b, c) => StorageError::InvalidPart(a.clone(), b.clone(), c.clone()),
StorageError::InvalidPart(a, b, c) => StorageError::InvalidPart(*a, b.clone(), c.clone()),
StorageError::ErasureReadQuorum => StorageError::ErasureReadQuorum,
StorageError::ErasureWriteQuorum => StorageError::ErasureWriteQuorum,
StorageError::NotFirstDisk => StorageError::NotFirstDisk,
@@ -717,7 +717,7 @@ pub fn to_object_err(err: Error, params: Vec<&str>) -> Error {
let bucket = params.first().cloned().unwrap_or_default().to_owned();
let object = params.get(1).cloned().map(decode_dir_object).unwrap_or_default();
return StorageError::PrefixAccessDenied(bucket, object);
StorageError::PrefixAccessDenied(bucket, object)
}
_ => err,

View File

@@ -771,7 +771,7 @@ pub async fn heal_bucket_local(bucket: &str, opts: &HealOpts) -> Result<HealResu
let bucket = bucket.to_string();
let bs_clone = before_state.clone();
let as_clone = after_state.clone();
let errs_clone = errs.iter().map(|e| e.clone()).collect::<Vec<_>>();
let errs_clone = errs.to_vec();
futures.push(async move {
if bs_clone.read().await[idx] == DRIVE_STATE_MISSING {
info!("bucket not find, will recreate");

View File

@@ -500,7 +500,7 @@ impl ECStore {
if get_global_endpoints()
.as_ref()
.get(idx)
.is_none_or(|v| v.endpoints.as_ref().first().map_or(true, |e| e.is_local))
.is_none_or(|v| v.endpoints.as_ref().first().is_none_or(|e| e.is_local))
{
warn!("start_rebalance: pool {} is not local, skipping", idx);
continue;

View File

@@ -2828,22 +2828,17 @@ impl SetDisks {
return Ok((result, None));
}
for (index, (err, disk)) in errs.iter().zip(disks.iter()).enumerate() {
if let (Some(err), Some(disk)) = (err, disk) {
match err {
DiskError::VolumeNotFound | DiskError::FileNotFound => {
let vol_path = Path::new(bucket).join(object);
let drive_state = match disk.make_volume(vol_path.to_str().unwrap()).await {
Ok(_) => DRIVE_STATE_OK,
Err(merr) => match merr {
DiskError::VolumeExists => DRIVE_STATE_OK,
DiskError::DiskNotFound => DRIVE_STATE_OFFLINE,
_ => DRIVE_STATE_CORRUPT,
},
};
result.after.drives[index].state = drive_state.to_string();
}
_ => {}
}
if let (Some(DiskError::VolumeNotFound | DiskError::FileNotFound), Some(disk)) = (err, disk) {
let vol_path = Path::new(bucket).join(object);
let drive_state = match disk.make_volume(vol_path.to_str().unwrap()).await {
Ok(_) => DRIVE_STATE_OK,
Err(merr) => match merr {
DiskError::VolumeExists => DRIVE_STATE_OK,
DiskError::DiskNotFound => DRIVE_STATE_OFFLINE,
_ => DRIVE_STATE_CORRUPT,
},
};
result.after.drives[index].state = drive_state.to_string();
}
}
@@ -5563,7 +5558,7 @@ async fn disks_with_all_parts(
if index < vec.len() {
if verify_err.is_some() {
info!("verify_err");
vec[index] = conv_part_err_to_int(&verify_err.as_ref().map(|e| e.clone().into()));
vec[index] = conv_part_err_to_int(&verify_err.clone());
} else {
info!("verify_resp, verify_resp.results {}", verify_resp.results[p]);
vec[index] = verify_resp.results[p];
@@ -5620,7 +5615,7 @@ pub fn should_heal_object_on_disk(
}
}
}
(false, err.as_ref().map(|e| e.clone()))
(false, err.clone())
}
async fn get_disks_info(disks: &[Option<DiskStore>], eps: &[Endpoint]) -> Vec<madmin::Disk> {

View File

@@ -1319,7 +1319,7 @@ impl StorageAPI for ECStore {
async fn make_bucket(&self, bucket: &str, opts: &MakeBucketOptions) -> Result<()> {
if !is_meta_bucketname(bucket) {
if let Err(err) = check_valid_bucket_name_strict(bucket) {
return Err(StorageError::BucketNameInvalid(err.to_string()).into());
return Err(StorageError::BucketNameInvalid(err.to_string()));
}
// TODO: nslock
@@ -1390,11 +1390,11 @@ impl StorageAPI for ECStore {
#[tracing::instrument(skip(self))]
async fn delete_bucket(&self, bucket: &str, opts: &DeleteBucketOptions) -> Result<()> {
if is_meta_bucketname(bucket) {
return Err(StorageError::BucketNameInvalid(bucket.to_string()).into());
return Err(StorageError::BucketNameInvalid(bucket.to_string()));
}
if let Err(err) = check_valid_bucket_name(bucket) {
return Err(StorageError::BucketNameInvalid(err.to_string()).into());
return Err(StorageError::BucketNameInvalid(err.to_string()));
}
// TODO: nslock
@@ -2186,7 +2186,7 @@ impl StorageAPI for ECStore {
for (index, err) in errs.iter().enumerate() {
match err {
Some(err) => {
if is_err_object_not_found(&err) || is_err_version_not_found(&err) {
if is_err_object_not_found(err) || is_err_version_not_found(err) {
continue;
}
return Ok((ress.remove(index), Some(err.clone())));

View File

@@ -1,24 +1,24 @@
use crate::StorageAPI;
use crate::bucket::metadata_sys::get_versioning_config;
use crate::bucket::versioning::VersioningApi;
use crate::cache_value::metacache_set::{list_path_raw, ListPathRawOptions};
use crate::cache_value::metacache_set::{ListPathRawOptions, list_path_raw};
use crate::disk::error::DiskError;
use crate::disk::{DiskInfo, DiskStore};
use crate::error::{
is_all_not_found, is_all_volume_not_found, is_err_bucket_not_found, to_object_err, Error, Result, StorageError,
Error, Result, StorageError, is_all_not_found, is_all_volume_not_found, is_err_bucket_not_found, to_object_err,
};
use crate::peer::is_reserved_or_invalid_bucket;
use crate::set_disk::SetDisks;
use crate::store::check_list_objs_args;
use crate::store_api::{ListObjectVersionsInfo, ListObjectsInfo, ObjectInfo, ObjectOptions};
use crate::utils::path::{self, base_dir_from_prefix, SLASH_SEPARATOR};
use crate::StorageAPI;
use crate::utils::path::{self, SLASH_SEPARATOR, base_dir_from_prefix};
use crate::{store::ECStore, store_api::ListObjectsV2Info};
use futures::future::join_all;
use rand::seq::SliceRandom;
use rand::thread_rng;
use rustfs_filemeta::{
merge_file_meta_versions, FileInfo, MetaCacheEntries, MetaCacheEntriesSorted, MetaCacheEntriesSortedResult, MetaCacheEntry,
MetadataResolutionParams,
FileInfo, MetaCacheEntries, MetaCacheEntriesSorted, MetaCacheEntriesSortedResult, MetaCacheEntry, MetadataResolutionParams,
merge_file_meta_versions,
};
use std::collections::HashMap;
use std::sync::Arc;
@@ -837,11 +837,7 @@ impl ECStore {
if fiter(&fi) {
let item = ObjectInfoOrErr {
item: Some(ObjectInfo::from_file_info(&fi, &bucket, &fi.name, {
if let Some(v) = &vcf {
v.versioned(&fi.name)
} else {
false
}
if let Some(v) = &vcf { v.versioned(&fi.name) } else { false }
})),
err: None,
};
@@ -853,11 +849,7 @@ impl ECStore {
} else {
let item = ObjectInfoOrErr {
item: Some(ObjectInfo::from_file_info(&fi, &bucket, &fi.name, {
if let Some(v) = &vcf {
v.versioned(&fi.name)
} else {
false
}
if let Some(v) = &vcf { v.versioned(&fi.name) } else { false }
})),
err: None,
};
@@ -892,12 +884,8 @@ impl ECStore {
if let Some(fiter) = opts.filter {
if fiter(fi) {
let item = ObjectInfoOrErr {
item: Some(ObjectInfo::from_file_info(&fi, &bucket, &fi.name, {
if let Some(v) = &vcf {
v.versioned(&fi.name)
} else {
false
}
item: Some(ObjectInfo::from_file_info(fi, &bucket, &fi.name, {
if let Some(v) = &vcf { v.versioned(&fi.name) } else { false }
})),
err: None,
};
@@ -908,12 +896,8 @@ impl ECStore {
}
} else {
let item = ObjectInfoOrErr {
item: Some(ObjectInfo::from_file_info(&fi, &bucket, &fi.name, {
if let Some(v) = &vcf {
v.versioned(&fi.name)
} else {
false
}
item: Some(ObjectInfo::from_file_info(fi, &bucket, &fi.name, {
if let Some(v) = &vcf { v.versioned(&fi.name) } else { false }
})),
err: None,
};

View File

@@ -188,8 +188,8 @@ pub fn parse_ellipses_range(pattern: &str) -> Result<Vec<String>> {
}
// TODO: Add support for hexadecimals.
let start = ellipses_range[0].parse::<usize>().map_err(|e| Error::other(e))?;
let end = ellipses_range[1].parse::<usize>().map_err(|e| Error::other(e))?;
let start = ellipses_range[0].parse::<usize>().map_err(Error::other)?;
let end = ellipses_range[1].parse::<usize>().map_err(Error::other)?;
if start > end {
return Err(Error::other("Invalid argument:range start cannot be bigger than end"));

View File

@@ -53,10 +53,7 @@ pub fn is_local_host(host: Host<&str>, port: u16, local_port: u16) -> Result<boo
let local_set: HashSet<IpAddr> = LOCAL_IPS.iter().copied().collect();
let is_local_host = match host {
Host::Domain(domain) => {
let ips = match (domain, 0).to_socket_addrs().map(|v| v.map(|v| v.ip()).collect::<Vec<_>>()) {
Ok(ips) => ips,
Err(err) => return Err(err),
};
let ips = (domain, 0).to_socket_addrs().map(|v| v.map(|v| v.ip()).collect::<Vec<_>>())?;
ips.iter().any(|ip| local_set.contains(ip))
}

View File

@@ -32,7 +32,7 @@ impl<T: Operation> S3Router<T> {
// warn!("set uri {}", &path);
self.router.insert(path, operation).map_err(|e| std::io::Error::other(e))?;
self.router.insert(path, operation).map_err(std::io::Error::other)?;
Ok(())
}

View File

@@ -1,9 +1,6 @@
use ecstore::error::StorageError;
use s3s::{S3Error, S3ErrorCode};
pub type Error = ApiError;
pub type Result<T> = core::result::Result<T, Error>;
#[derive(Debug)]
pub struct ApiError {
pub code: S3ErrorCode,

View File

@@ -8,7 +8,6 @@ use ecstore::{
DeleteOptions, DiskAPI, DiskInfoOptions, DiskStore, FileInfoVersions, ReadMultipleReq, ReadOptions, UpdateMetadataOpts,
error::DiskError,
},
error::StorageError,
heal::{
data_usage_cache::DataUsageCache,
heal_commands::{HealOpts, get_local_background_heal_status},

View File

@@ -121,7 +121,7 @@ async fn run(opt: config::Opt) -> Result<()> {
// Initialize event notifier
event::init_event_notifier(opt.event_config).await;
let server_addr = net::parse_and_resolve_address(opt.address.as_str()).map_err(|err| Error::other(err))?;
let server_addr = net::parse_and_resolve_address(opt.address.as_str()).map_err(Error::other)?;
let server_port = server_addr.port();
let server_address = server_addr.to_string();
@@ -140,8 +140,8 @@ async fn run(opt: config::Opt) -> Result<()> {
let local_ip = rustfs_utils::get_local_ip().ok_or(local_addr.ip()).unwrap();
// For RPC
let (endpoint_pools, setup_type) = EndpointServerPools::from_volumes(server_address.clone().as_str(), opt.volumes.clone())
.map_err(|err| Error::other(err.to_string()))?;
let (endpoint_pools, setup_type) =
EndpointServerPools::from_volumes(server_address.clone().as_str(), opt.volumes.clone()).map_err(Error::other)?;
// Print RustFS-style logging for pool formatting
for (i, eps) in endpoint_pools.as_ref().iter().enumerate() {
@@ -188,9 +188,7 @@ async fn run(opt: config::Opt) -> Result<()> {
update_erasure_type(setup_type).await;
// Initialize the local disk
init_local_disks(endpoint_pools.clone())
.await
.map_err(|err| Error::other(err))?;
init_local_disks(endpoint_pools.clone()).await.map_err(Error::other)?;
// Setup S3 service
// This project uses the S3S library to implement S3 services
@@ -505,9 +503,8 @@ async fn run(opt: config::Opt) -> Result<()> {
// init store
let store = ECStore::new(server_address.clone(), endpoint_pools.clone())
.await
.map_err(|err| {
error!("ECStore::new {:?}", &err);
err
.inspect_err(|err| {
error!("ECStore::new {:?}", err);
})?;
ecconfig::init();
@@ -519,7 +516,7 @@ async fn run(opt: config::Opt) -> Result<()> {
..Default::default()
})
.await
.map_err(|err| Error::other(err))?;
.map_err(Error::other)?;
let buckets = buckets_list.into_iter().map(|v| v.name).collect();

View File

@@ -4,7 +4,6 @@ use super::options::extract_metadata;
use super::options::put_opts;
use crate::auth::get_condition_values;
use crate::error::ApiError;
use crate::error::Result;
use crate::storage::access::ReqInfo;
use crate::storage::options::copy_dst_opts;
use crate::storage::options::copy_src_opts;