fix:Apply suggestions from clippy 1.88

This commit is contained in:
houseme
2025-06-27 18:16:29 +08:00
parent 35489ea352
commit 749537664f
108 changed files with 642 additions and 682 deletions

View File

@@ -30,7 +30,7 @@ pub fn Home() -> Element {
#[allow(clippy::redundant_closure)]
let service = use_signal(|| ServiceManager::new());
let conf = RustFSConfig::load().unwrap_or_else(|e| {
ServiceManager::show_error(&format!("加载配置失败:{}", e));
ServiceManager::show_error(&format!("加载配置失败:{e}"));
RustFSConfig::default()
});
@@ -78,7 +78,7 @@ pub fn Home() -> Element {
}
}
Err(e) => {
ServiceManager::show_error(&format!("服务启动失败:{}", e));
ServiceManager::show_error(&format!("服务启动失败:{e}"));
}
}
// Only set loading to false when it's actually done
@@ -104,7 +104,7 @@ pub fn Home() -> Element {
}
}
Err(e) => {
ServiceManager::show_error(&format!("服务停止失败:{}", e));
ServiceManager::show_error(&format!("服务停止失败:{e}"));
}
}
debug!("service_state: {:?}", service_state.read());

View File

@@ -49,7 +49,7 @@ pub fn Setting() -> Element {
let config = config.read().clone();
spawn(async move {
if let Err(e) = service.read().restart(config).await {
ServiceManager::show_error(&format!("发送重启命令失败:{}", e));
ServiceManager::show_error(&format!("发送重启命令失败:{e}"));
}
// reset the status when you're done
loading.set(false);

View File

@@ -288,7 +288,7 @@ mod tests {
for (input, expected) in test_cases {
let result = RustFSConfig::extract_host_port(input);
assert_eq!(result, expected, "Failed for input: {}", input);
assert_eq!(result, expected, "Failed for input: {input}");
}
}
@@ -306,7 +306,7 @@ mod tests {
for input in invalid_cases {
let result = RustFSConfig::extract_host_port(input);
assert_eq!(result, None, "Should be None for input: {}", input);
assert_eq!(result, None, "Should be None for input: {input}");
}
// Special case: empty host but valid port should still work
@@ -437,7 +437,7 @@ mod tests {
#[test]
fn test_debug_format() {
let config = RustFSConfig::default_config();
let debug_str = format!("{:?}", config);
let debug_str = format!("{config:?}");
assert!(debug_str.contains("RustFSConfig"));
assert!(debug_str.contains("address"));
@@ -484,14 +484,14 @@ mod tests {
fn test_very_long_strings() {
let long_string = "a".repeat(1000);
let config = RustFSConfig {
address: format!("{}:9000", long_string),
address: format!("{long_string}:9000"),
host: long_string.clone(),
port: "9000".to_string(),
access_key: long_string.clone(),
secret_key: long_string.clone(),
domain_name: format!("{}.com", long_string),
volume_name: format!("/data/{}", long_string),
console_address: format!("{}:9001", long_string),
domain_name: format!("{long_string}.com"),
volume_name: format!("/data/{long_string}"),
console_address: format!("{long_string}:9001"),
};
assert_eq!(config.host.len(), 1000);

View File

@@ -184,7 +184,7 @@ impl ServiceManager {
let cached_hash = fs::read_to_string(&hash_path).await?;
let expected_hash = RUSTFS_HASH.lock().await;
if cached_hash == *expected_hash {
println!("Use cached rustfs: {:?}", executable_path);
println!("Use cached rustfs: {executable_path:?}");
return Ok(executable_path);
}
}
@@ -235,23 +235,23 @@ impl ServiceManager {
match cmd {
ServiceCommand::Start(config) => {
if let Err(e) = Self::start_service(&config).await {
Self::show_error(&format!("启动服务失败:{}", e));
Self::show_error(&format!("启动服务失败:{e}"));
}
}
ServiceCommand::Stop => {
if let Err(e) = Self::stop_service().await {
Self::show_error(&format!("停止服务失败:{}", e));
Self::show_error(&format!("停止服务失败:{e}"));
}
}
ServiceCommand::Restart(config) => {
if Self::check_service_status().await.is_some() {
if let Err(e) = Self::stop_service().await {
Self::show_error(&format!("重启服务失败:{}", e));
Self::show_error(&format!("重启服务失败:{e}"));
continue;
}
}
if let Err(e) = Self::start_service(&config).await {
Self::show_error(&format!("重启服务失败:{}", e));
Self::show_error(&format!("重启服务失败:{e}"));
}
}
}
@@ -283,7 +283,7 @@ impl ServiceManager {
async fn start_service(config: &RustFSConfig) -> Result<(), Box<dyn Error>> {
// Check if the service is already running
if let Some(existing_pid) = Self::check_service_status().await {
return Err(format!("服务已经在运行PID: {}", existing_pid).into());
return Err(format!("服务已经在运行PID: {existing_pid}").into());
}
// Prepare the service program
@@ -304,7 +304,7 @@ impl ServiceManager {
let ports = vec![main_port, console_port];
for port in ports {
if Self::is_port_in_use(host, port).await {
return Err(format!("端口 {} 已被占用", port).into());
return Err(format!("端口 {port} 已被占用").into());
}
}
@@ -327,7 +327,7 @@ impl ServiceManager {
// Check if the service started successfully
if Self::is_port_in_use(host, main_port).await {
Self::show_info(&format!("服务启动成功!进程 ID: {}", process_pid));
Self::show_info(&format!("服务启动成功!进程 ID: {process_pid}"));
Ok(())
} else {
@@ -387,7 +387,7 @@ impl ServiceManager {
/// println!("{:?}", result);
/// ```
async fn is_port_in_use(host: &str, port: u16) -> bool {
TcpStream::connect(format!("{}:{}", host, port)).await.is_ok()
TcpStream::connect(format!("{host}:{port}")).await.is_ok()
}
/// Show an error message
@@ -674,7 +674,7 @@ mod tests {
message: "Test message".to_string(),
};
let debug_str = format!("{:?}", result);
let debug_str = format!("{result:?}");
assert!(debug_str.contains("ServiceOperationResult"));
assert!(debug_str.contains("success: true"));
assert!(debug_str.contains("Test message"));
@@ -691,8 +691,8 @@ mod tests {
let cloned_manager = service_manager.clone();
// Both should be valid (we can't test much more without async runtime)
assert!(format!("{:?}", service_manager).contains("ServiceManager"));
assert!(format!("{:?}", cloned_manager).contains("ServiceManager"));
assert!(format!("{service_manager:?}").contains("ServiceManager"));
assert!(format!("{cloned_manager:?}").contains("ServiceManager"));
});
}
@@ -710,7 +710,7 @@ mod tests {
for (input, expected) in test_cases {
let result = ServiceManager::extract_port(input);
assert_eq!(result, expected, "Failed for input: {}", input);
assert_eq!(result, expected, "Failed for input: {input}");
}
}
@@ -729,7 +729,7 @@ mod tests {
for input in invalid_cases {
let result = ServiceManager::extract_port(input);
assert_eq!(result, None, "Should be None for input: {}", input);
assert_eq!(result, None, "Should be None for input: {input}");
}
// Special case: empty host but valid port should still work

View File

@@ -94,7 +94,7 @@ mod tests {
// We can't actually build it without creating directories,
// but we can verify the builder pattern works
let debug_str = format!("{:?}", builder);
let debug_str = format!("{builder:?}");
// The actual debug format might be different, so just check it's not empty
assert!(!debug_str.is_empty());
// Check that it contains some expected parts
@@ -112,10 +112,10 @@ mod tests {
let never = Rotation::NEVER;
// Test that rotation types can be created and formatted
assert!(!format!("{:?}", daily).is_empty());
assert!(!format!("{:?}", hourly).is_empty());
assert!(!format!("{:?}", minutely).is_empty());
assert!(!format!("{:?}", never).is_empty());
assert!(!format!("{daily:?}").is_empty());
assert!(!format!("{hourly:?}").is_empty());
assert!(!format!("{minutely:?}").is_empty());
assert!(!format!("{never:?}").is_empty());
}
#[test]
@@ -159,10 +159,10 @@ mod tests {
let error_filter = tracing_subscriber::EnvFilter::new("error");
// Test that filters can be created
assert!(!format!("{:?}", info_filter).is_empty());
assert!(!format!("{:?}", debug_filter).is_empty());
assert!(!format!("{:?}", warn_filter).is_empty());
assert!(!format!("{:?}", error_filter).is_empty());
assert!(!format!("{info_filter:?}").is_empty());
assert!(!format!("{debug_filter:?}").is_empty());
assert!(!format!("{warn_filter:?}").is_empty());
assert!(!format!("{error_filter:?}").is_empty());
}
#[test]
@@ -201,7 +201,7 @@ mod tests {
assert_eq!(suffix, "log");
// Test that these would create valid filenames
let sample_filename = format!("{}.2024-01-01.{}", prefix, suffix);
let sample_filename = format!("{prefix}.2024-01-01.{suffix}");
assert_eq!(sample_filename, "rustfs-cli.2024-01-01.log");
}
@@ -243,7 +243,7 @@ mod tests {
assert_eq!(logs_dir_name, "logs");
// Test path joining
let combined = format!("{}/{}", rustfs_dir_name, logs_dir_name);
let combined = format!("{rustfs_dir_name}/{logs_dir_name}");
assert_eq!(combined, "rustfs/logs");
}

View File

@@ -71,7 +71,7 @@ impl std::fmt::Display for SizeCategory {
SizeCategory::SizeGreaterThan1GiB => "SizeGreaterThan1GiB",
SizeCategory::SizeLastElemMarker => "SizeLastElemMarker",
};
write!(f, "{}", s)
write!(f, "{s}")
}
}
@@ -639,7 +639,7 @@ mod tests {
assert_eq!(elem.n, cloned.n);
// Test Debug trait
let debug_str = format!("{:?}", elem);
let debug_str = format!("{elem:?}");
assert!(debug_str.contains("100"));
assert!(debug_str.contains("200"));
assert!(debug_str.contains("5"));
@@ -755,8 +755,7 @@ mod tests {
assert_eq!(
test_latency.totals[expected_idx].n, 1,
"Failed for timestamp {} (expected index {})",
timestamp, expected_idx
"Failed for timestamp {timestamp} (expected index {expected_idx})"
);
}
}
@@ -796,7 +795,7 @@ mod tests {
n: 789,
};
let debug_str = format!("{:?}", elem);
let debug_str = format!("{elem:?}");
assert!(debug_str.contains("123"));
assert!(debug_str.contains("456"));
assert!(debug_str.contains("789"));

View File

@@ -612,7 +612,7 @@ mod tests {
timeout: Duration::from_secs(5),
retry_interval: Duration::from_millis(100),
};
let debug_str = format!("{:?}", opts);
let debug_str = format!("{opts:?}");
assert!(debug_str.contains("timeout"));
assert!(debug_str.contains("retry_interval"));
}
@@ -850,7 +850,7 @@ mod tests {
let lockers = create_mock_lockers(1);
let mutex = DRWMutex::new("owner1".to_string(), names, lockers);
let debug_str = format!("{:?}", mutex);
let debug_str = format!("{mutex:?}");
assert!(debug_str.contains("DRWMutex"));
assert!(debug_str.contains("owner"));
assert!(debug_str.contains("names"));

View File

@@ -117,8 +117,7 @@ impl Locker for LocalLocker {
async fn lock(&mut self, args: &LockArgs) -> Result<bool> {
if args.resources.len() > MAX_DELETE_LIST {
return Err(Error::other(format!(
"internal error: LocalLocker.lock called with more than {} resources",
MAX_DELETE_LIST
"internal error: LocalLocker.lock called with more than {MAX_DELETE_LIST} resources"
)));
}
@@ -153,8 +152,7 @@ impl Locker for LocalLocker {
async fn unlock(&mut self, args: &LockArgs) -> Result<bool> {
if args.resources.len() > MAX_DELETE_LIST {
return Err(Error::other(format!(
"internal error: LocalLocker.unlock called with more than {} resources",
MAX_DELETE_LIST
"internal error: LocalLocker.unlock called with more than {MAX_DELETE_LIST} resources"
)));
}
@@ -165,9 +163,9 @@ impl Locker for LocalLocker {
Some(lris) => {
if !is_write_lock(lris) {
if err_info.is_empty() {
err_info = format!("unlock attempted on a read locked entity: {}", resource);
err_info = format!("unlock attempted on a read locked entity: {resource}");
} else {
err_info.push_str(&format!(", {}", resource));
err_info.push_str(&format!(", {resource}"));
}
} else {
lris.retain(|lri| {
@@ -249,7 +247,7 @@ impl Locker for LocalLocker {
match self.lock_map.get_mut(resource) {
Some(lris) => {
if is_write_lock(lris) {
return Err(Error::other(format!("runlock attempted on a write locked entity: {}", resource)));
return Err(Error::other(format!("runlock attempted on a write locked entity: {resource}")));
} else {
lris.retain(|lri| {
if lri.uid == args.uid && (args.owner.is_empty() || lri.owner == args.owner) {
@@ -405,10 +403,10 @@ mod test {
};
local_locker.lock(&args).await?;
println!("lock local_locker: {:?} \n", local_locker);
println!("lock local_locker: {local_locker:?} \n");
local_locker.unlock(&args).await?;
println!("unlock local_locker: {:?}", local_locker);
println!("unlock local_locker: {local_locker:?}");
Ok(())
}

View File

@@ -25,7 +25,7 @@ impl Locker for RemoteClient {
let args = serde_json::to_string(args)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(GenerallyLockRequest { args });
let response = client.lock(request).await.map_err(Error::other)?.into_inner();
@@ -42,7 +42,7 @@ impl Locker for RemoteClient {
let args = serde_json::to_string(args)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(GenerallyLockRequest { args });
let response = client.un_lock(request).await.map_err(Error::other)?.into_inner();
@@ -59,7 +59,7 @@ impl Locker for RemoteClient {
let args = serde_json::to_string(args)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(GenerallyLockRequest { args });
let response = client.r_lock(request).await.map_err(Error::other)?.into_inner();
@@ -76,7 +76,7 @@ impl Locker for RemoteClient {
let args = serde_json::to_string(args)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(GenerallyLockRequest { args });
let response = client.r_un_lock(request).await.map_err(Error::other)?.into_inner();
@@ -93,7 +93,7 @@ impl Locker for RemoteClient {
let args = serde_json::to_string(args)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(GenerallyLockRequest { args });
let response = client.refresh(request).await.map_err(Error::other)?.into_inner();
@@ -110,7 +110,7 @@ impl Locker for RemoteClient {
let args = serde_json::to_string(args)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(GenerallyLockRequest { args });
let response = client.force_un_lock(request).await.map_err(Error::other)?.into_inner();

View File

@@ -261,11 +261,11 @@ fn fmt() {
if status.success() {
println!("cargo fmt executed successfully.");
} else {
eprintln!("cargo fmt failed with status: {:?}", status);
eprintln!("cargo fmt failed with status: {status:?}");
}
}
Err(e) => {
eprintln!("Failed to execute cargo fmt: {}", e);
eprintln!("Failed to execute cargo fmt: {e}");
}
}
}

View File

@@ -231,10 +231,10 @@ mod tests {
#[test]
fn test_const_str_concat_functionality() {
// Test const_str::concat macro functionality
let expected_address = format!(":{}", DEFAULT_PORT);
let expected_address = format!(":{DEFAULT_PORT}");
assert_eq!(DEFAULT_ADDRESS, expected_address);
let expected_console_address = format!(":{}", DEFAULT_CONSOLE_PORT);
let expected_console_address = format!(":{DEFAULT_CONSOLE_PORT}");
assert_eq!(DEFAULT_CONSOLE_ADDRESS, expected_console_address);
}
@@ -256,9 +256,9 @@ mod tests {
];
for constant in &string_constants {
assert!(!constant.is_empty(), "String constant should not be empty: {}", constant);
assert!(!constant.starts_with(' '), "String constant should not start with space: {}", constant);
assert!(!constant.ends_with(' '), "String constant should not end with space: {}", constant);
assert!(!constant.is_empty(), "String constant should not be empty: {constant}");
assert!(!constant.starts_with(' '), "String constant should not start with space: {constant}");
assert!(!constant.ends_with(' '), "String constant should not end with space: {constant}");
}
}
@@ -284,8 +284,8 @@ mod tests {
// These are default values, should be changed in production environments
println!("Security Warning: Default credentials detected!");
println!("Access Key: {}", DEFAULT_ACCESS_KEY);
println!("Secret Key: {}", DEFAULT_SECRET_KEY);
println!("Access Key: {DEFAULT_ACCESS_KEY}");
println!("Secret Key: {DEFAULT_SECRET_KEY}");
println!("These should be changed in production environments!");
// Verify that key lengths meet minimum security requirements
@@ -312,11 +312,11 @@ mod tests {
let ports = [DEFAULT_PORT, DEFAULT_CONSOLE_PORT];
let mut unique_ports = std::collections::HashSet::new();
for port in &ports {
assert!(unique_ports.insert(port), "Port {} is duplicated", port);
assert!(unique_ports.insert(port), "Port {port} is duplicated");
}
// Address format consistency
assert_eq!(DEFAULT_ADDRESS, format!(":{}", DEFAULT_PORT));
assert_eq!(DEFAULT_CONSOLE_ADDRESS, format!(":{}", DEFAULT_CONSOLE_PORT));
assert_eq!(DEFAULT_ADDRESS, format!(":{DEFAULT_PORT}"));
assert_eq!(DEFAULT_CONSOLE_ADDRESS, format!(":{DEFAULT_CONSOLE_PORT}"));
}
}

View File

@@ -178,7 +178,7 @@ impl From<uuid::Error> for Error {
impl From<rmp::decode::MarkerReadError> for Error {
fn from(e: rmp::decode::MarkerReadError) -> Self {
let serr = format!("{:?}", e);
let serr = format!("{e:?}");
Error::RmpDecodeMarkerRead(serr)
}
}
@@ -423,7 +423,7 @@ mod tests {
];
for kind in io_error_kinds {
let io_error = IoError::new(kind, format!("test error for {:?}", kind));
let io_error = IoError::new(kind, format!("test error for {kind:?}"));
let filemeta_error: Error = io_error.into();
match filemeta_error {
@@ -434,7 +434,7 @@ mod tests {
assert_eq!(extracted_io_error.kind(), kind);
assert!(extracted_io_error.to_string().contains("test error"));
}
_ => panic!("Expected Io variant for kind {:?}", kind),
_ => panic!("Expected Io variant for kind {kind:?}"),
}
}
}

View File

@@ -72,7 +72,7 @@ impl std::fmt::Display for ErasureAlgo {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
match self {
ErasureAlgo::Invalid => write!(f, "Invalid"),
ErasureAlgo::ReedSolomon => write!(f, "{}", ERASURE_ALGORITHM),
ErasureAlgo::ReedSolomon => write!(f, "{ERASURE_ALGORITHM}"),
}
}
}
@@ -312,53 +312,53 @@ impl FileInfo {
pub fn set_tier_free_version_id(&mut self, version_id: &str) {
self.metadata
.insert(format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, TIER_FV_ID), version_id.to_string());
.insert(format!("{RESERVED_METADATA_PREFIX_LOWER}{TIER_FV_ID}"), version_id.to_string());
}
pub fn tier_free_version_id(&self) -> String {
self.metadata[&format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, TIER_FV_ID)].clone()
self.metadata[&format!("{RESERVED_METADATA_PREFIX_LOWER}{TIER_FV_ID}")].clone()
}
pub fn set_tier_free_version(&mut self) {
self.metadata
.insert(format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, TIER_FV_MARKER), "".to_string());
.insert(format!("{RESERVED_METADATA_PREFIX_LOWER}{TIER_FV_MARKER}"), "".to_string());
}
pub fn set_skip_tier_free_version(&mut self) {
self.metadata
.insert(format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, TIER_SKIP_FV_ID), "".to_string());
.insert(format!("{RESERVED_METADATA_PREFIX_LOWER}{TIER_SKIP_FV_ID}"), "".to_string());
}
pub fn skip_tier_free_version(&self) -> bool {
self.metadata
.contains_key(&format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, TIER_SKIP_FV_ID))
.contains_key(&format!("{RESERVED_METADATA_PREFIX_LOWER}{TIER_SKIP_FV_ID}"))
}
pub fn tier_free_version(&self) -> bool {
self.metadata
.contains_key(&format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, TIER_FV_MARKER))
.contains_key(&format!("{RESERVED_METADATA_PREFIX_LOWER}{TIER_FV_MARKER}"))
}
pub fn set_inline_data(&mut self) {
self.metadata
.insert(format!("{}inline-data", RESERVED_METADATA_PREFIX_LOWER).to_owned(), "true".to_owned());
.insert(format!("{RESERVED_METADATA_PREFIX_LOWER}inline-data").to_owned(), "true".to_owned());
}
pub fn set_data_moved(&mut self) {
self.metadata
.insert(format!("{}data-moved", RESERVED_METADATA_PREFIX_LOWER).to_owned(), "true".to_owned());
.insert(format!("{RESERVED_METADATA_PREFIX_LOWER}data-moved").to_owned(), "true".to_owned());
}
pub fn inline_data(&self) -> bool {
self.metadata
.contains_key(format!("{}inline-data", RESERVED_METADATA_PREFIX_LOWER).as_str())
.contains_key(format!("{RESERVED_METADATA_PREFIX_LOWER}inline-data").as_str())
&& !self.is_remote()
}
/// Check if the object is compressed
pub fn is_compressed(&self) -> bool {
self.metadata
.contains_key(&format!("{}compression", RESERVED_METADATA_PREFIX_LOWER))
.contains_key(&format!("{RESERVED_METADATA_PREFIX_LOWER}compression"))
}
/// Check if the object is remote (transitioned to another tier)

View File

@@ -929,15 +929,13 @@ impl FileMetaVersion {
}
pub fn get_data_dir(&self) -> Option<Uuid> {
self.valid()
.then(|| {
if self.valid() { {
if self.version_type == VersionType::Object {
self.object.as_ref().map(|v| v.data_dir).unwrap_or_default()
} else {
None
}
})
.unwrap_or_default()
} } else { Default::default() }
}
pub fn get_version_id(&self) -> Option<Uuid> {
@@ -1028,7 +1026,7 @@ impl FileMetaVersion {
"v" => {
self.write_version = rmp::decode::read_int(&mut cur)?;
}
name => return Err(Error::other(format!("not suport field name {}", name))),
name => return Err(Error::other(format!("not suport field name {name}"))),
}
}
@@ -1325,7 +1323,7 @@ impl FileMetaVersionHeader {
let mut cur = Cursor::new(buf);
let alen = rmp::decode::read_array_len(&mut cur)?;
if alen != 7 {
return Err(Error::other(format!("version header array len err need 7 got {}", alen)));
return Err(Error::other(format!("version header array len err need 7 got {alen}")));
}
// version_id
@@ -1709,7 +1707,7 @@ impl MetaObject {
}
}
name => return Err(Error::other(format!("not suport field name {}", name))),
name => return Err(Error::other(format!("not suport field name {name}"))),
}
}
@@ -1938,19 +1936,19 @@ impl MetaObject {
pub fn set_transition(&mut self, fi: &FileInfo) {
self.meta_sys.insert(
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, TRANSITION_STATUS),
format!("{RESERVED_METADATA_PREFIX_LOWER}{TRANSITION_STATUS}"),
fi.transition_status.as_bytes().to_vec(),
);
self.meta_sys.insert(
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, TRANSITIONED_OBJECTNAME),
format!("{RESERVED_METADATA_PREFIX_LOWER}{TRANSITIONED_OBJECTNAME}"),
fi.transitioned_objname.as_bytes().to_vec(),
);
self.meta_sys.insert(
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, TRANSITIONED_VERSION_ID),
format!("{RESERVED_METADATA_PREFIX_LOWER}{TRANSITIONED_VERSION_ID}"),
fi.transition_version_id.unwrap().as_bytes().to_vec(),
);
self.meta_sys.insert(
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, TRANSITION_TIER),
format!("{RESERVED_METADATA_PREFIX_LOWER}{TRANSITION_TIER}"),
fi.transition_tier.as_bytes().to_vec(),
);
}
@@ -1968,12 +1966,12 @@ impl MetaObject {
pub fn inlinedata(&self) -> bool {
self.meta_sys
.contains_key(format!("{}inline-data", RESERVED_METADATA_PREFIX_LOWER).as_str())
.contains_key(format!("{RESERVED_METADATA_PREFIX_LOWER}inline-data").as_str())
}
pub fn reset_inline_data(&mut self) {
self.meta_sys
.remove(format!("{}inline-data", RESERVED_METADATA_PREFIX_LOWER).as_str());
.remove(format!("{RESERVED_METADATA_PREFIX_LOWER}inline-data").as_str());
}
/// Remove restore headers
@@ -2001,7 +1999,7 @@ impl MetaObject {
}
if let Some(status) = self
.meta_sys
.get(&format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, TRANSITION_STATUS))
.get(&format!("{RESERVED_METADATA_PREFIX_LOWER}{TRANSITION_STATUS}"))
{
if *status == TRANSITION_COMPLETE.as_bytes().to_vec() {
let vid = Uuid::parse_str(&fi.tier_free_version_id());
@@ -2027,10 +2025,10 @@ impl MetaObject {
.meta_sys
.as_mut()
.unwrap()
.insert(format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, FREE_VERSION), vec![]);
let tier_key = format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, TRANSITION_TIER);
let tier_obj_key = format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, TRANSITIONED_OBJECTNAME);
let tier_obj_vid_key = format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, TRANSITIONED_VERSION_ID);
.insert(format!("{RESERVED_METADATA_PREFIX_LOWER}{FREE_VERSION}"), vec![]);
let tier_key = format!("{RESERVED_METADATA_PREFIX_LOWER}{TRANSITION_TIER}");
let tier_obj_key = format!("{RESERVED_METADATA_PREFIX_LOWER}{TRANSITIONED_OBJECTNAME}");
let tier_obj_vid_key = format!("{RESERVED_METADATA_PREFIX_LOWER}{TRANSITIONED_VERSION_ID}");
let aa = [tier_key, tier_obj_key, tier_obj_vid_key];
for (k, v) in &self.meta_sys {
@@ -2192,7 +2190,7 @@ impl MetaDeleteMarker {
self.meta_sys = Some(map);
}
name => return Err(Error::other(format!("not suport field name {}", name))),
name => return Err(Error::other(format!("not suport field name {name}"))),
}
}
@@ -2913,9 +2911,9 @@ mod test {
let serialization_time = start.elapsed();
println!("性能测试结果:");
println!(" 创建时间:{:?}", creation_time);
println!(" 解析时间:{:?}", parsing_time);
println!(" 序列化时间:{:?}", serialization_time);
println!(" 创建时间:{creation_time:?}");
println!(" 解析时间:{parsing_time:?}");
println!(" 序列化时间:{serialization_time:?}");
// 基本性能断言(这些值可能需要根据实际性能调整)
assert!(parsing_time.as_millis() < 100, "解析时间应该小于 100ms");
@@ -2986,7 +2984,7 @@ mod test {
for i in 0..10 {
let fm_clone: Arc<Mutex<FileMeta>> = Arc::clone(&fm);
let handle = tokio::spawn(async move {
let mut fi = crate::fileinfo::FileInfo::new(&format!("test-{}", i), 2, 1);
let mut fi = crate::fileinfo::FileInfo::new(&format!("test-{i}"), 2, 1);
fi.version_id = Some(Uuid::new_v4());
fi.mod_time = Some(OffsetDateTime::now_utc());
@@ -3013,19 +3011,19 @@ mod test {
// 测试空结构体的内存占用
let empty_fm = FileMeta::new();
let empty_size = mem::size_of_val(&empty_fm);
println!("Empty FileMeta size: {} bytes", empty_size);
println!("Empty FileMeta size: {empty_size} bytes");
// 测试包含大量版本的内存占用
let mut large_fm = FileMeta::new();
for i in 0..100 {
let mut fi = crate::fileinfo::FileInfo::new(&format!("test-{}", i), 2, 1);
let mut fi = crate::fileinfo::FileInfo::new(&format!("test-{i}"), 2, 1);
fi.version_id = Some(Uuid::new_v4());
fi.mod_time = Some(OffsetDateTime::now_utc());
large_fm.add_version(fi).unwrap();
}
let large_size = mem::size_of_val(&large_fm);
println!("Large FileMeta size: {} bytes", large_size);
println!("Large FileMeta size: {large_size} bytes");
// 验证内存使用是合理的注意size_of_val 只计算栈上的大小,不包括堆分配)
// 对于包含 Vec 的结构体size_of_val 可能相同,因为 Vec 的容量在堆上
@@ -3041,7 +3039,7 @@ mod test {
// 添加相同时间戳的版本
let same_time = OffsetDateTime::now_utc();
for i in 0..5 {
let mut fi = crate::fileinfo::FileInfo::new(&format!("test-{}", i), 2, 1);
let mut fi = crate::fileinfo::FileInfo::new(&format!("test-{i}"), 2, 1);
fi.version_id = Some(Uuid::new_v4());
fi.mod_time = Some(same_time);
fm.add_version(fi).unwrap();
@@ -3122,7 +3120,7 @@ mod test {
// 测试适量用户元数据
for i in 0..10 {
obj.meta_user
.insert(format!("key-{:04}", i), format!("value-{:04}-{}", i, "x".repeat(10)));
.insert(format!("key-{i:04}"), format!("value-{:04}-{}", i, "x".repeat(10)));
}
// 验证可以序列化元数据
@@ -3146,7 +3144,7 @@ mod test {
// 添加对象版本
for i in 0..object_count {
let mut fi = crate::fileinfo::FileInfo::new(&format!("obj-{}", i), 2, 1);
let mut fi = crate::fileinfo::FileInfo::new(&format!("obj-{i}"), 2, 1);
fi.version_id = Some(Uuid::new_v4());
fi.mod_time = Some(OffsetDateTime::now_utc());
fm.add_version(fi).unwrap();
@@ -3240,11 +3238,11 @@ mod test {
// 创建两组不同的版本
for i in 0..3 {
let mut fi1 = crate::fileinfo::FileInfo::new(&format!("test1-{}", i), 2, 1);
let mut fi1 = crate::fileinfo::FileInfo::new(&format!("test1-{i}"), 2, 1);
fi1.version_id = Some(Uuid::new_v4());
fi1.mod_time = Some(OffsetDateTime::from_unix_timestamp(1000 + i * 10).unwrap());
let mut fi2 = crate::fileinfo::FileInfo::new(&format!("test2-{}", i), 2, 1);
let mut fi2 = crate::fileinfo::FileInfo::new(&format!("test2-{i}"), 2, 1);
fi2.version_id = Some(Uuid::new_v4());
fi2.mod_time = Some(OffsetDateTime::from_unix_timestamp(1005 + i * 10).unwrap());

View File

@@ -463,7 +463,7 @@ impl<W: AsyncWrite + Unpin> MetacacheWriter<W> {
pub async fn init(&mut self) -> Result<()> {
if !self.created {
rmp::encode::write_u8(&mut self.buf, METACACHE_STREAM_VERSION).map_err(|e| Error::other(format!("{:?}", e)))?;
rmp::encode::write_u8(&mut self.buf, METACACHE_STREAM_VERSION).map_err(|e| Error::other(format!("{e:?}")))?;
self.flush().await?;
self.created = true;
}
@@ -491,16 +491,16 @@ impl<W: AsyncWrite + Unpin> MetacacheWriter<W> {
pub async fn write_obj(&mut self, obj: &MetaCacheEntry) -> Result<()> {
self.init().await?;
rmp::encode::write_bool(&mut self.buf, true).map_err(|e| Error::other(format!("{:?}", e)))?;
rmp::encode::write_str(&mut self.buf, &obj.name).map_err(|e| Error::other(format!("{:?}", e)))?;
rmp::encode::write_bin(&mut self.buf, &obj.metadata).map_err(|e| Error::other(format!("{:?}", e)))?;
rmp::encode::write_bool(&mut self.buf, true).map_err(|e| Error::other(format!("{e:?}")))?;
rmp::encode::write_str(&mut self.buf, &obj.name).map_err(|e| Error::other(format!("{e:?}")))?;
rmp::encode::write_bin(&mut self.buf, &obj.metadata).map_err(|e| Error::other(format!("{e:?}")))?;
self.flush().await?;
Ok(())
}
pub async fn close(&mut self) -> Result<()> {
rmp::encode::write_bool(&mut self.buf, false).map_err(|e| Error::other(format!("{:?}", e)))?;
rmp::encode::write_bool(&mut self.buf, false).map_err(|e| Error::other(format!("{e:?}")))?;
self.flush().await?;
Ok(())
}
@@ -559,7 +559,7 @@ impl<R: AsyncRead + Unpin> MetacacheReader<R> {
let ver = match rmp::decode::read_u8(&mut self.read_more(2).await?) {
Ok(res) => res,
Err(err) => {
self.err = Some(Error::other(format!("{:?}", err)));
self.err = Some(Error::other(format!("{err:?}")));
0
}
};
@@ -852,7 +852,7 @@ mod tests {
let mut objs = Vec::new();
for i in 0..10 {
let info = MetaCacheEntry {
name: format!("item{}", i),
name: format!("item{i}"),
metadata: vec![0u8, 10],
cached: None,
reusable: false,

View File

@@ -98,7 +98,7 @@ pub fn create_complex_xlmeta() -> Result<Vec<u8>> {
let mut metadata = HashMap::new();
metadata.insert("Content-Type".to_string(), "application/octet-stream".to_string());
metadata.insert("X-Amz-Meta-Version".to_string(), i.to_string());
metadata.insert("X-Amz-Meta-Test".to_string(), format!("test-value-{}", i));
metadata.insert("X-Amz-Meta-Test".to_string(), format!("test-value-{i}"));
let object_version = MetaObject {
version_id: Some(version_id),

View File

@@ -37,12 +37,12 @@ async fn main() {
let server_addr = match parse_and_resolve_address(":3020") {
Ok(addr) => addr,
Err(e) => {
eprintln!("Failed to parse address: {}", e);
eprintln!("Failed to parse address: {e}");
return;
}
};
let listener = TcpListener::bind(server_addr).await.unwrap();
println!("Server running on {}", server_addr);
println!("Server running on {server_addr}");
// Self-checking after the service is started
tokio::spawn(async move {
@@ -52,7 +52,7 @@ async fn main() {
match is_service_active(server_addr).await {
Ok(true) => println!("Service health check: Successful - Service is running normally"),
Ok(false) => eprintln!("Service Health Check: Failed - Service Not Responded"),
Err(e) => eprintln!("Service health check errors:{}", e),
Err(e) => eprintln!("Service health check errors:{e}"),
}
});
@@ -60,7 +60,7 @@ async fn main() {
tokio::select! {
result = axum::serve(listener, app) => {
if let Err(e) = result {
eprintln!("Server error: {}", e);
eprintln!("Server error: {e}");
}
}
_ = tokio::signal::ctrl_c() => {
@@ -73,9 +73,9 @@ async fn main() {
async fn reset_webhook_count_with_path(axum::extract::Path(reason): axum::extract::Path<String>) -> Response<String> {
// Output the value of the current counter
let current_count = WEBHOOK_COUNT.load(Ordering::SeqCst);
println!("Current webhook count: {}", current_count);
println!("Current webhook count: {current_count}");
println!("Reset webhook count, reason: {}", reason);
println!("Reset webhook count, reason: {reason}");
// Reset the counter to 0
WEBHOOK_COUNT.store(0, Ordering::SeqCst);
println!("Webhook count has been reset to 0.");
@@ -84,8 +84,7 @@ async fn reset_webhook_count_with_path(axum::extract::Path(reason): axum::extrac
.header("Foo", "Bar")
.status(StatusCode::OK)
.body(format!(
"Webhook count reset successfully. Previous count: {}. Reason: {}",
current_count, reason
"Webhook count reset successfully. Previous count: {current_count}. Reason: {reason}"
))
.unwrap()
}
@@ -95,14 +94,14 @@ async fn reset_webhook_count_with_path(axum::extract::Path(reason): axum::extrac
async fn reset_webhook_count(Query(params): Query<ResetParams>, headers: HeaderMap) -> Response<String> {
// Output the value of the current counter
let current_count = WEBHOOK_COUNT.load(Ordering::SeqCst);
println!("Current webhook count: {}", current_count);
println!("Current webhook count: {current_count}");
let reason = params.reason.unwrap_or_else(|| "Reason not provided".to_string());
println!("Reset webhook count, reason: {}", reason);
println!("Reset webhook count, reason: {reason}");
for header in headers {
let (key, value) = header;
println!("Header: {:?}: {:?}", key, value);
println!("Header: {key:?}: {value:?}");
}
println!("Reset webhook count printed headers");
@@ -112,18 +111,18 @@ async fn reset_webhook_count(Query(params): Query<ResetParams>, headers: HeaderM
Response::builder()
.header("Foo", "Bar")
.status(StatusCode::OK)
.body(format!("Webhook count reset successfully current_count:{}", current_count))
.body(format!("Webhook count reset successfully current_count:{current_count}"))
.unwrap()
}
async fn is_service_active(addr: SocketAddr) -> Result<bool, String> {
let socket_addr = tokio::net::lookup_host(addr)
.await
.map_err(|e| format!("Unable to resolve host:{}", e))?
.map_err(|e| format!("Unable to resolve host:{e}"))?
.next()
.ok_or_else(|| "Address not found".to_string())?;
println!("Checking service status:{}", socket_addr);
println!("Checking service status:{socket_addr}");
match tokio::time::timeout(std::time::Duration::from_secs(5), tokio::net::TcpStream::connect(socket_addr)).await {
Ok(Ok(_)) => Ok(true),
@@ -131,7 +130,7 @@ async fn is_service_active(addr: SocketAddr) -> Result<bool, String> {
if e.kind() == std::io::ErrorKind::ConnectionRefused {
Ok(false)
} else {
Err(format!("Connection failed:{}", e))
Err(format!("Connection failed:{e}"))
}
}
Err(_) => Err("Connection timeout".to_string()),
@@ -149,7 +148,7 @@ async fn receive_webhook(Json(payload): Json<Value>) -> StatusCode {
let (year, month, day, hour, minute, second) = convert_seconds_to_date(seconds);
// output result
println!("current time:{:04}-{:02}-{:02} {:02}:{:02}:{:02}", year, month, day, hour, minute, second);
println!("current time:{year:04}-{month:02}-{day:02} {hour:02}:{minute:02}:{second:02}");
println!(
"received a webhook request time:{} content:\n {}",
seconds,

View File

@@ -115,6 +115,6 @@ pub enum NotificationError {
impl From<url::ParseError> for TargetError {
fn from(err: url::ParseError) -> Self {
TargetError::Configuration(format!("URL parse error: {}", err))
TargetError::Configuration(format!("URL parse error: {err}"))
}
}

View File

@@ -413,7 +413,7 @@ impl Event {
owner_identity: Identity {
principal_id: "rustfs".to_string(),
},
arn: format!("arn:rustfs:s3:::{}", bucket),
arn: format!("arn:rustfs:s3:::{bucket}"),
},
object: Object {
key: key.to_string(),

View File

@@ -68,7 +68,7 @@ impl TargetFactory for WebhookTargetFactory {
let endpoint = get(ENV_WEBHOOK_ENDPOINT, WEBHOOK_ENDPOINT)
.ok_or_else(|| TargetError::Configuration("Missing webhook endpoint".to_string()))?;
let endpoint_url = Url::parse(&endpoint)
.map_err(|e| TargetError::Configuration(format!("Invalid endpoint URL: {} (value: '{}')", e, endpoint)))?;
.map_err(|e| TargetError::Configuration(format!("Invalid endpoint URL: {e} (value: '{endpoint}')")))?;
let auth_token = get(ENV_WEBHOOK_AUTH_TOKEN, WEBHOOK_AUTH_TOKEN).unwrap_or_default();
let queue_dir = get(ENV_WEBHOOK_QUEUE_DIR, WEBHOOK_QUEUE_DIR).unwrap_or(DEFAULT_DIR.to_string());
@@ -110,7 +110,7 @@ impl TargetFactory for WebhookTargetFactory {
debug!("endpoint: {}", endpoint);
let parsed_endpoint = endpoint.trim();
Url::parse(parsed_endpoint)
.map_err(|e| TargetError::Configuration(format!("Invalid endpoint URL: {} (value: '{}')", e, parsed_endpoint)))?;
.map_err(|e| TargetError::Configuration(format!("Invalid endpoint URL: {e} (value: '{parsed_endpoint}')")))?;
let client_cert = get(ENV_WEBHOOK_CLIENT_CERT, WEBHOOK_CLIENT_CERT).unwrap_or_default();
let client_key = get(ENV_WEBHOOK_CLIENT_KEY, WEBHOOK_CLIENT_KEY).unwrap_or_default();
@@ -151,7 +151,7 @@ impl TargetFactory for MQTTTargetFactory {
let broker =
get(ENV_MQTT_BROKER, MQTT_BROKER).ok_or_else(|| TargetError::Configuration("Missing MQTT broker".to_string()))?;
let broker_url = Url::parse(&broker)
.map_err(|e| TargetError::Configuration(format!("Invalid broker URL: {} (value: '{}')", e, broker)))?;
.map_err(|e| TargetError::Configuration(format!("Invalid broker URL: {e} (value: '{broker}')")))?;
let topic =
get(ENV_MQTT_TOPIC, MQTT_TOPIC).ok_or_else(|| TargetError::Configuration("Missing MQTT topic".to_string()))?;
@@ -217,7 +217,7 @@ impl TargetFactory for MQTTTargetFactory {
let broker =
get(ENV_MQTT_BROKER, MQTT_BROKER).ok_or_else(|| TargetError::Configuration("Missing MQTT broker".to_string()))?;
let url = Url::parse(&broker)
.map_err(|e| TargetError::Configuration(format!("Invalid broker URL: {} (value: '{}')", e, broker)))?;
.map_err(|e| TargetError::Configuration(format!("Invalid broker URL: {e} (value: '{broker}')")))?;
match url.scheme() {
"tcp" | "ssl" | "ws" | "wss" | "mqtt" | "mqtts" => {}

View File

@@ -472,9 +472,9 @@ impl Drop for NotificationSystem {
pub async fn load_config_from_file(path: &str, system: &NotificationSystem) -> Result<(), NotificationError> {
let config_data = tokio::fs::read(path)
.await
.map_err(|e| NotificationError::Configuration(format!("Failed to read config file: {}", e)))?;
.map_err(|e| NotificationError::Configuration(format!("Failed to read config file: {e}")))?;
let config = Config::unmarshal(config_data.as_slice())
.map_err(|e| NotificationError::Configuration(format!("Failed to parse config: {}", e)))?;
.map_err(|e| NotificationError::Configuration(format!("Failed to parse config: {e}")))?;
system.reload_config(config).await
}

View File

@@ -50,7 +50,7 @@ impl TargetRegistry {
let factory = self
.factories
.get(target_type)
.ok_or_else(|| TargetError::Configuration(format!("Unknown target type: {}", target_type)))?;
.ok_or_else(|| TargetError::Configuration(format!("Unknown target type: {target_type}")))?;
// Validate configuration before creating target
factory.validate_config(&id, config)?;

View File

@@ -62,7 +62,7 @@ impl std::fmt::Display for Key {
if self.compress {
file_name.push_str(COMPRESS_EXT);
}
write!(f, "{}", file_name)
write!(f, "{file_name}")
}
}
@@ -377,7 +377,7 @@ where
match deserializer.next() {
Some(Ok(item)) => items.push(item),
Some(Err(e)) => {
return Err(StoreError::Deserialization(format!("Failed to deserialize item in batch: {}", e)));
return Err(StoreError::Deserialization(format!("Failed to deserialize item in batch: {e}")));
}
None => {
// Reached end of stream sooner than item_count
@@ -393,8 +393,7 @@ where
} else if items.is_empty() {
// No items at all, but file existed
return Err(StoreError::Deserialization(format!(
"No items deserialized for key {} though file existed.",
key
"No items deserialized for key {key} though file existed."
)));
}
break;

View File

@@ -115,7 +115,7 @@ impl MQTTTarget {
error = %e,
"Failed to open store for MQTT target"
);
return Err(TargetError::Storage(format!("{}", e)));
return Err(TargetError::Storage(format!("{e}")));
}
Some(Box::new(store) as Box<dyn Store<Event, Error = StoreError, Key = Key> + Send + Sync>)
} else {
@@ -172,7 +172,7 @@ impl MQTTTarget {
if let Err(e) = new_client.subscribe(&args_clone.topic, args_clone.qos).await {
error!(target_id = %target_id_clone, error = %e, "Failed to subscribe to MQTT topic during init");
return Err(TargetError::Network(format!("MQTT subscribe failed: {}", e)));
return Err(TargetError::Network(format!("MQTT subscribe failed: {e}")));
}
let mut rx_guard = bg_task_manager.initial_cancel_rx.lock().await;
@@ -231,7 +231,7 @@ impl MQTTTarget {
.ok_or_else(|| TargetError::Configuration("MQTT client not initialized".to_string()))?;
let object_name = urlencoding::decode(&event.s3.object.key)
.map_err(|e| TargetError::Encoding(format!("Failed to decode object key: {}", e)))?;
.map_err(|e| TargetError::Encoding(format!("Failed to decode object key: {e}")))?;
let key = format!("{}/{}", event.s3.bucket.name, object_name);
@@ -242,11 +242,11 @@ impl MQTTTarget {
};
let data =
serde_json::to_vec(&log).map_err(|e| TargetError::Serialization(format!("Failed to serialize event: {}", e)))?;
serde_json::to_vec(&log).map_err(|e| TargetError::Serialization(format!("Failed to serialize event: {e}")))?;
// Vec<u8> Convert to String, only for printing logs
let data_string = String::from_utf8(data.clone())
.map_err(|e| TargetError::Encoding(format!("Failed to convert event data to UTF-8: {}", e)))?;
.map_err(|e| TargetError::Encoding(format!("Failed to convert event data to UTF-8: {e}")))?;
debug!("Sending event to mqtt target: {}, event log: {}", self.id, data_string);
client
@@ -258,7 +258,7 @@ impl MQTTTarget {
warn!(target_id = %self.id, error = %e, "Publish failed due to connection issue, marking as not connected.");
TargetError::NotConnected
} else {
TargetError::Request(format!("Failed to publish message: {}", e))
TargetError::Request(format!("Failed to publish message: {e}"))
}
})?;
@@ -476,7 +476,7 @@ impl Target for MQTTTarget {
}
Err(e) => {
error!(target_id = %self.id, error = %e, "Failed to save event to store");
return Err(TargetError::Storage(format!("Failed to save event to store: {}", e)));
return Err(TargetError::Storage(format!("Failed to save event to store: {e}")));
}
}
} else {
@@ -547,7 +547,7 @@ impl Target for MQTTTarget {
error = %e,
"Failed to get event from store"
);
return Err(TargetError::Storage(format!("Failed to get event from store: {}", e)));
return Err(TargetError::Storage(format!("Failed to get event from store: {e}")));
}
};
@@ -571,7 +571,7 @@ impl Target for MQTTTarget {
}
Err(e) => {
error!(target_id = %self.id, error = %e, "Failed to delete event from store after send.");
return Err(TargetError::Storage(format!("Failed to delete event from store: {}", e)));
return Err(TargetError::Storage(format!("Failed to delete event from store: {e}")));
}
}

View File

@@ -111,19 +111,19 @@ impl WebhookTarget {
if !args.client_cert.is_empty() && !args.client_key.is_empty() {
// Add client certificate
let cert = std::fs::read(&args.client_cert)
.map_err(|e| TargetError::Configuration(format!("Failed to read client cert: {}", e)))?;
.map_err(|e| TargetError::Configuration(format!("Failed to read client cert: {e}")))?;
let key = std::fs::read(&args.client_key)
.map_err(|e| TargetError::Configuration(format!("Failed to read client key: {}", e)))?;
.map_err(|e| TargetError::Configuration(format!("Failed to read client key: {e}")))?;
let identity = reqwest::Identity::from_pem(&[cert, key].concat())
.map_err(|e| TargetError::Configuration(format!("Failed to create identity: {}", e)))?;
.map_err(|e| TargetError::Configuration(format!("Failed to create identity: {e}")))?;
client_builder = client_builder.identity(identity);
}
let http_client = Arc::new(
client_builder
.build()
.map_err(|e| TargetError::Configuration(format!("Failed to build HTTP client: {}", e)))?,
.map_err(|e| TargetError::Configuration(format!("Failed to build HTTP client: {e}")))?,
);
// Build storage
@@ -138,7 +138,7 @@ impl WebhookTarget {
if let Err(e) = store.open() {
error!("Failed to open store for Webhook target {}: {}", target_id.id, e);
return Err(TargetError::Storage(format!("{}", e)));
return Err(TargetError::Storage(format!("{e}")));
}
// Make sure that the Store trait implemented by QueueStore matches the expected error type
@@ -154,7 +154,7 @@ impl WebhookTarget {
.endpoint
.port()
.unwrap_or_else(|| if args.endpoint.scheme() == "https" { 443 } else { 80 });
format!("{}:{}", host, port)
format!("{host}:{port}")
};
// Create a cancel channel
@@ -196,7 +196,7 @@ impl WebhookTarget {
async fn send(&self, event: &Event) -> Result<(), TargetError> {
info!("Webhook Sending event to webhook target: {}", self.id);
let object_name = urlencoding::decode(&event.s3.object.key)
.map_err(|e| TargetError::Encoding(format!("Failed to decode object key: {}", e)))?;
.map_err(|e| TargetError::Encoding(format!("Failed to decode object key: {e}")))?;
let key = format!("{}/{}", event.s3.bucket.name, object_name);
@@ -207,11 +207,11 @@ impl WebhookTarget {
};
let data =
serde_json::to_vec(&log).map_err(|e| TargetError::Serialization(format!("Failed to serialize event: {}", e)))?;
serde_json::to_vec(&log).map_err(|e| TargetError::Serialization(format!("Failed to serialize event: {e}")))?;
// Vec<u8> Convert to String
let data_string = String::from_utf8(data.clone())
.map_err(|e| TargetError::Encoding(format!("Failed to convert event data to UTF-8: {}", e)))?;
.map_err(|e| TargetError::Encoding(format!("Failed to convert event data to UTF-8: {e}")))?;
debug!("Sending event to webhook target: {}, event log: {}", self.id, data_string);
// build request
@@ -243,7 +243,7 @@ impl WebhookTarget {
if e.is_timeout() || e.is_connect() {
TargetError::NotConnected
} else {
TargetError::Request(format!("Failed to send request: {}", e))
TargetError::Request(format!("Failed to send request: {e}"))
}
})?;
@@ -275,7 +275,7 @@ impl Target for WebhookTarget {
async fn is_active(&self) -> Result<bool, TargetError> {
let socket_addr = lookup_host(&self.addr)
.await
.map_err(|e| TargetError::Network(format!("Failed to resolve host: {}", e)))?
.map_err(|e| TargetError::Network(format!("Failed to resolve host: {e}")))?
.next()
.ok_or_else(|| TargetError::Network("No address found".to_string()))?;
debug!("is_active socket addr: {},target id:{}", socket_addr, self.id.id);
@@ -289,7 +289,7 @@ impl Target for WebhookTarget {
if e.kind() == std::io::ErrorKind::ConnectionRefused {
Err(TargetError::NotConnected)
} else {
Err(TargetError::Network(format!("Connection failed: {}", e)))
Err(TargetError::Network(format!("Connection failed: {e}")))
}
}
Err(_) => Err(TargetError::Timeout("Connection timed out".to_string())),
@@ -301,7 +301,7 @@ impl Target for WebhookTarget {
// Call the store method directly, no longer need to acquire the lock
store
.put(event)
.map_err(|e| TargetError::Storage(format!("Failed to save event to store: {}", e)))?;
.map_err(|e| TargetError::Storage(format!("Failed to save event to store: {e}")))?;
debug!("Event saved to store for target: {}", self.id);
Ok(())
} else {
@@ -338,7 +338,7 @@ impl Target for WebhookTarget {
Ok(event) => event,
Err(StoreError::NotFound) => return Ok(()),
Err(e) => {
return Err(TargetError::Storage(format!("Failed to get event from store: {}", e)));
return Err(TargetError::Storage(format!("Failed to get event from store: {e}")));
}
};
@@ -355,7 +355,7 @@ impl Target for WebhookTarget {
Ok(_) => debug!("Event deleted from store for target: {}, key:{}, end", self.id, key.to_string()),
Err(e) => {
error!("Failed to delete event from store: {}", e);
return Err(TargetError::Storage(format!("Failed to delete event from store: {}", e)));
return Err(TargetError::Storage(format!("Failed to delete event from store: {e}")));
}
}

View File

@@ -199,7 +199,7 @@ impl FileSinkConfig {
let temp_dir = env::temp_dir().join("rustfs");
if let Err(e) = std::fs::create_dir_all(&temp_dir) {
eprintln!("Failed to create log directory: {}", e);
eprintln!("Failed to create log directory: {e}");
return "rustfs/rustfs.log".to_string();
}
temp_dir

View File

@@ -108,7 +108,7 @@ impl FileSink {
#[async_trait]
impl Sink for FileSink {
async fn write(&self, entry: &UnifiedLogEntry) {
let line = format!("{:?}\n", entry);
let line = format!("{entry:?}\n");
let mut writer = self.writer.lock().await;
if let Err(e) = writer.write_all(line.as_bytes()).await {
@@ -156,7 +156,7 @@ impl Drop for FileSink {
rt.block_on(async {
let mut writer = writer.lock().await;
if let Err(e) = writer.flush().await {
eprintln!("Failed to flush log file {}: {}", path, e);
eprintln!("Failed to flush log file {path}: {e}");
}
});
});

View File

@@ -65,18 +65,18 @@ impl Drop for OtelGuard {
fn drop(&mut self) {
if let Some(provider) = self.tracer_provider.take() {
if let Err(err) = provider.shutdown() {
eprintln!("Tracer shutdown error: {:?}", err);
eprintln!("Tracer shutdown error: {err:?}");
}
}
if let Some(provider) = self.meter_provider.take() {
if let Err(err) = provider.shutdown() {
eprintln!("Meter shutdown error: {:?}", err);
eprintln!("Meter shutdown error: {err:?}");
}
}
if let Some(provider) = self.logger_provider.take() {
if let Err(err) = provider.shutdown() {
eprintln!("Logger shutdown error: {:?}", err);
eprintln!("Logger shutdown error: {err:?}");
}
}
}
@@ -334,8 +334,7 @@ pub(crate) fn init_telemetry(config: &OtelConfig) -> OtelGuard {
let flexi_logger_result = flexi_logger::Logger::try_with_env_or_str(logger_level)
.unwrap_or_else(|e| {
eprintln!(
"Invalid logger level: {}, using default: {}, failed error: {:?}",
logger_level, DEFAULT_LOG_LEVEL, e
"Invalid logger level: {logger_level}, using default: {DEFAULT_LOG_LEVEL}, failed error: {e:?}"
);
flexi_logger::Logger::with(log_spec.clone())
})
@@ -356,19 +355,18 @@ pub(crate) fn init_telemetry(config: &OtelConfig) -> OtelGuard {
// Save the logger handle to keep the logging
flexi_logger_handle = Some(logger);
eprintln!("Flexi logger initialized with file logging to {}/{}.log", log_directory, log_filename);
eprintln!("Flexi logger initialized with file logging to {log_directory}/{log_filename}.log");
// Log logging of log cutting conditions
match (config.log_rotation_time.as_deref(), config.log_rotation_size_mb) {
(Some(time), Some(size)) => eprintln!(
"Log rotation configured for: every {} or when size exceeds {}MB, keeping {} files",
time, size, keep_files
"Log rotation configured for: every {time} or when size exceeds {size}MB, keeping {keep_files} files"
),
(Some(time), None) => eprintln!("Log rotation configured for: every {}, keeping {} files", time, keep_files),
(Some(time), None) => eprintln!("Log rotation configured for: every {time}, keeping {keep_files} files"),
(None, Some(size)) => {
eprintln!("Log rotation configured for: when size exceeds {}MB, keeping {} files", size, keep_files)
eprintln!("Log rotation configured for: when size exceeds {size}MB, keeping {keep_files} files")
}
_ => eprintln!("Log rotation configured for: daily, keeping {} files", keep_files),
_ => eprintln!("Log rotation configured for: daily, keeping {keep_files} files"),
}
} else {
eprintln!("Failed to initialize flexi_logger: {:?}", flexi_logger_result.err());
@@ -389,7 +387,7 @@ fn build_env_filter(logger_level: &str, default_level: Option<&str>) -> EnvFilte
if !matches!(logger_level, "trace" | "debug") {
let directives: SmallVec<[&str; 5]> = smallvec::smallvec!["hyper", "tonic", "h2", "reqwest", "tower"];
for directive in directives {
filter = filter.add_directive(format!("{}=off", directive).parse().unwrap());
filter = filter.add_directive(format!("{directive}=off").parse().unwrap());
}
}

View File

@@ -660,7 +660,7 @@ mod tests {
let json = index.to_json().unwrap();
let json_str = String::from_utf8(json).unwrap();
println!("json_str: {}", json_str);
println!("json_str: {json_str}");
// 验证 JSON 内容
assert!(json_str.contains("\"compressed\": 100"));

View File

@@ -191,7 +191,7 @@ mod tests {
// Extract ETag using our generic system
let extracted_etag = resolve_etag_generic(&mut compress_reader);
println!("📋 Extracted ETag: {:?}", extracted_etag);
println!("📋 Extracted ETag: {extracted_etag:?}");
assert_eq!(extracted_etag, Some("real_world_etag".to_string()));
@@ -206,7 +206,7 @@ mod tests {
let mut compress_reader2 = CompressReader::new(encrypt_reader2, CompressionAlgorithm::Zstd);
let trait_etag = resolve_etag_generic(&mut compress_reader2);
println!("📋 Trait-based ETag: {:?}", trait_etag);
println!("📋 Trait-based ETag: {trait_etag:?}");
assert_eq!(trait_etag, Some("core_etag".to_string()));

View File

@@ -112,7 +112,7 @@ mod tests {
// 读取超限,应该返回错误
let err = match read_full(&mut r, &mut buf).await {
Ok(n) => {
println!("Read {} bytes", n);
println!("Read {n} bytes");
assert_eq!(n, 3);
assert_eq!(&buf[..n], b"abc");
None

View File

@@ -396,7 +396,7 @@ mod tests {
let expected = format!("{:x}", hasher.finalize());
println!("expected: {}", expected);
println!("expected: {expected}");
let reader = Cursor::new(data.clone());
let reader = BufReader::new(reader);
@@ -485,8 +485,7 @@ mod tests {
// 验证 etag注意压缩会改变数据所以这里的 etag 验证可能需要调整)
println!(
"Test completed successfully with compression: {}, encryption: {}",
is_compress, is_encrypt
"Test completed successfully with compression: {is_compress}, encryption: {is_encrypt}"
);
}
@@ -549,7 +548,7 @@ mod tests {
];
for algorithm in algorithms {
println!("\nTesting algorithm: {:?}", algorithm);
println!("\nTesting algorithm: {algorithm:?}");
let reader = BufReader::new(Cursor::new(data.clone()));
let reader = Box::new(WarpReader::new(reader));
@@ -576,7 +575,7 @@ mod tests {
// Verify
assert_eq!(decompressed_data.len(), data.len());
assert_eq!(&decompressed_data, &data);
println!(" ✓ Algorithm {:?} test passed", algorithm);
println!(" ✓ Algorithm {algorithm:?} test passed");
}
}
}

View File

@@ -26,7 +26,7 @@ static HTTP_DEBUG_LOG: bool = false;
#[inline(always)]
fn http_debug_log(args: std::fmt::Arguments) {
if HTTP_DEBUG_LOG {
println!("{}", args);
println!("{args}");
}
}
macro_rules! http_log {
@@ -87,7 +87,7 @@ impl HttpReader {
let resp = request
.send()
.await
.map_err(|e| Error::other(format!("HttpReader HTTP request error: {}", e)))?;
.map_err(|e| Error::other(format!("HttpReader HTTP request error: {e}")))?;
if resp.status().is_success().not() {
return Err(Error::other(format!(
@@ -98,7 +98,7 @@ impl HttpReader {
let stream = resp
.bytes_stream()
.map_err(|e| Error::other(format!("HttpReader stream error: {}", e)));
.map_err(|e| Error::other(format!("HttpReader stream error: {e}")));
Ok(Self {
inner: StreamReader::new(Box::pin(stream)),
@@ -250,8 +250,8 @@ impl HttpWriter {
}
Err(e) => {
// http_log!("[HttpWriter::spawn] HTTP request error: {e}");
let _ = err_tx.send(Error::other(format!("HTTP request failed: {}", e)));
return Err(Error::other(format!("HTTP request failed: {}", e)));
let _ = err_tx.send(Error::other(format!("HTTP request failed: {e}")));
return Err(Error::other(format!("HTTP request failed: {e}")));
}
}
@@ -298,7 +298,7 @@ impl AsyncWrite for HttpWriter {
self.sender
.try_send(Some(Bytes::copy_from_slice(buf)))
.map_err(|e| Error::other(format!("HttpWriter send error: {}", e)))?;
.map_err(|e| Error::other(format!("HttpWriter send error: {e}")))?;
Poll::Ready(Ok(buf.len()))
}
@@ -315,7 +315,7 @@ impl AsyncWrite for HttpWriter {
// http_log!("[HttpWriter::poll_shutdown] url: {}, method: {:?}", url, method);
self.sender
.try_send(None)
.map_err(|e| Error::other(format!("HttpWriter shutdown error: {}", e)))?;
.map_err(|e| Error::other(format!("HttpWriter shutdown error: {e}")))?;
// http_log!(
// "[HttpWriter::poll_shutdown] sent shutdown signal to HTTP request, url: {}, method: {:?}",
// url,
@@ -336,7 +336,7 @@ impl AsyncWrite for HttpWriter {
}
Poll::Ready(Err(e)) => {
// http_log!("[HttpWriter::poll_shutdown] HTTP request failed: {e}, url: {}, method: {:?}", url, method);
return Poll::Ready(Err(Error::other(format!("HTTP request failed: {}", e))));
return Poll::Ready(Err(Error::other(format!("HTTP request failed: {e}"))));
}
Poll::Pending => {
// http_log!("[HttpWriter::poll_shutdown] HTTP request pending, url: {}, method: {:?}", url, method);

View File

@@ -14,17 +14,16 @@ use tracing::{debug, warn};
/// This function loads a public certificate from the specified file.
pub fn load_certs(filename: &str) -> io::Result<Vec<CertificateDer<'static>>> {
// Open certificate file.
let cert_file = fs::File::open(filename).map_err(|e| certs_error(format!("failed to open {}: {}", filename, e)))?;
let cert_file = fs::File::open(filename).map_err(|e| certs_error(format!("failed to open {filename}: {e}")))?;
let mut reader = io::BufReader::new(cert_file);
// Load and return certificate.
let certs = certs(&mut reader)
.collect::<Result<Vec<_>, _>>()
.map_err(|e| certs_error(format!("certificate file {} format error:{:?}", filename, e)))?;
.map_err(|e| certs_error(format!("certificate file {filename} format error:{e:?}")))?;
if certs.is_empty() {
return Err(certs_error(format!(
"No valid certificate was found in the certificate file {}",
filename
"No valid certificate was found in the certificate file {filename}"
)));
}
Ok(certs)
@@ -34,11 +33,11 @@ pub fn load_certs(filename: &str) -> io::Result<Vec<CertificateDer<'static>>> {
/// This function loads a private key from the specified file.
pub fn load_private_key(filename: &str) -> io::Result<PrivateKeyDer<'static>> {
// Open keyfile.
let keyfile = fs::File::open(filename).map_err(|e| certs_error(format!("failed to open {}: {}", filename, e)))?;
let keyfile = fs::File::open(filename).map_err(|e| certs_error(format!("failed to open {filename}: {e}")))?;
let mut reader = io::BufReader::new(keyfile);
// Load and return a single private key.
private_key(&mut reader)?.ok_or_else(|| certs_error(format!("no private key found in {}", filename)))
private_key(&mut reader)?.ok_or_else(|| certs_error(format!("no private key found in {filename}")))
}
/// error function
@@ -58,8 +57,7 @@ pub fn load_all_certs_from_directory(
if !dir.exists() || !dir.is_dir() {
return Err(certs_error(format!(
"The certificate directory does not exist or is not a directory: {}",
dir_path
"The certificate directory does not exist or is not a directory: {dir_path}"
)));
}
@@ -71,10 +69,10 @@ pub fn load_all_certs_from_directory(
debug!("find the root directory certificate: {:?}", root_cert_path);
let root_cert_str = root_cert_path
.to_str()
.ok_or_else(|| certs_error(format!("Invalid UTF-8 in root certificate path: {:?}", root_cert_path)))?;
.ok_or_else(|| certs_error(format!("Invalid UTF-8 in root certificate path: {root_cert_path:?}")))?;
let root_key_str = root_key_path
.to_str()
.ok_or_else(|| certs_error(format!("Invalid UTF-8 in root key path: {:?}", root_key_path)))?;
.ok_or_else(|| certs_error(format!("Invalid UTF-8 in root key path: {root_key_path:?}")))?;
match load_cert_key_pair(root_cert_str, root_key_str) {
Ok((certs, key)) => {
// The root directory certificate is used as the default certificate and is stored using special keys.
@@ -95,7 +93,7 @@ pub fn load_all_certs_from_directory(
let domain_name = path
.file_name()
.and_then(|name| name.to_str())
.ok_or_else(|| certs_error(format!("invalid domain name directory:{:?}", path)))?;
.ok_or_else(|| certs_error(format!("invalid domain name directory:{path:?}")))?;
// find certificate and private key files
let cert_path = path.join(RUSTFS_TLS_CERT); // e.g., rustfs_cert.pem
@@ -117,8 +115,7 @@ pub fn load_all_certs_from_directory(
if cert_key_pairs.is_empty() {
return Err(certs_error(format!(
"No valid certificate/private key pair found in directory {}",
dir_path
"No valid certificate/private key pair found in directory {dir_path}"
)));
}
@@ -165,7 +162,7 @@ pub fn create_multi_cert_resolver(
for (domain, (certs, key)) in cert_key_pairs {
// create a signature
let signing_key = rustls::crypto::aws_lc_rs::sign::any_supported_type(&key)
.map_err(|e| certs_error(format!("unsupported private key types:{}, err:{:?}", domain, e)))?;
.map_err(|e| certs_error(format!("unsupported private key types:{domain}, err:{e:?}")))?;
// create a CertifiedKey
let certified_key = CertifiedKey::new(certs, signing_key);
@@ -175,7 +172,7 @@ pub fn create_multi_cert_resolver(
// add certificate to resolver
resolver
.add(&domain, certified_key)
.map_err(|e| certs_error(format!("failed to add a domain name certificate:{},err: {:?}", domain, e)))?;
.map_err(|e| certs_error(format!("failed to add a domain name certificate:{domain},err: {e:?}")))?;
}
}
@@ -343,10 +340,10 @@ mod tests {
];
for (input, _expected_pattern) in test_cases {
let error1 = certs_error(format!("failed to open test.pem: {}", input));
let error1 = certs_error(format!("failed to open test.pem: {input}"));
assert!(error1.to_string().contains(input));
let error2 = certs_error(format!("failed to open key.pem: {}", input));
let error2 = certs_error(format!("failed to open key.pem: {input}"));
assert!(error2.to_string().contains(input));
}
}
@@ -455,6 +452,6 @@ mod tests {
let error_size = mem::size_of_val(&error);
// Error should not be excessively large
assert!(error_size < 1024, "Error size should be reasonable, got {} bytes", error_size);
assert!(error_size < 1024, "Error size should be reasonable, got {error_size} bytes");
}
}

View File

@@ -44,7 +44,7 @@ impl std::str::FromStr for CompressionAlgorithm {
"brotli" => Ok(CompressionAlgorithm::Brotli),
"snappy" => Ok(CompressionAlgorithm::Snappy),
"none" => Ok(CompressionAlgorithm::None),
_ => Err(std::io::Error::other(format!("Unsupported compression algorithm: {}", s))),
_ => Err(std::io::Error::other(format!("Unsupported compression algorithm: {s}"))),
}
}
}
@@ -243,7 +243,7 @@ mod tests {
println!("Compression results:");
for (name, dur, size) in &times {
println!("{}: {} bytes, {:?}", name, size, dur);
println!("{name}: {size} bytes, {dur:?}");
}
// All should decompress to the original
assert_eq!(decompress_block(&gzip, CompressionAlgorithm::Gzip).unwrap(), data);

View File

@@ -54,7 +54,7 @@ mod tests {
assert!(path.exists(), "The project root directory does not exist:{}", path.display());
println!("The test is passed, the project root directory:{}", path.display());
}
Err(e) => panic!("Failed to get the project root directory:{}", e),
Err(e) => panic!("Failed to get the project root directory:{e}"),
}
}
}

View File

@@ -29,7 +29,7 @@ pub async fn read_full<R: AsyncRead + Send + Sync + Unpin>(mut reader: R, mut bu
}
return Err(std::io::Error::new(
std::io::ErrorKind::UnexpectedEof,
format!("read {} bytes, error: {}", total, e),
format!("read {total} bytes, error: {e}"),
));
}
};
@@ -116,7 +116,7 @@ mod tests {
rev[total - n..total].copy_from_slice(&buf[..n]);
count += 1;
println!("count: {}, total: {}, n: {}", count, total, n);
println!("count: {count}, total: {total}, n: {n}");
}
assert_eq!(total, size);
@@ -167,8 +167,8 @@ mod tests {
for &v in &[1u64, 127, 128, 255, 300, 16384, u32::MAX as u64] {
let n = put_uvarint(&mut buf, v);
let (decoded, m) = uvarint(&buf[..n]);
assert_eq!(decoded, v, "decode mismatch for {}", v);
assert_eq!(m as usize, n, "length mismatch for {}", v);
assert_eq!(decoded, v, "decode mismatch for {v}");
assert_eq!(m as usize, n, "length mismatch for {v}");
}
}

View File

@@ -40,16 +40,16 @@ mod tests {
assert!(ip.is_some(), "Should be able to get local IP address");
if let Some(ip_addr) = ip {
println!("Local IP address: {}", ip_addr);
println!("Local IP address: {ip_addr}");
// Verify that the returned IP address is valid
match ip_addr {
IpAddr::V4(ipv4) => {
assert!(!ipv4.is_unspecified(), "IPv4 should not be unspecified (0.0.0.0)");
println!("Got IPv4 address: {}", ipv4);
println!("Got IPv4 address: {ipv4}");
}
IpAddr::V6(ipv6) => {
assert!(!ipv6.is_unspecified(), "IPv6 should not be unspecified (::)");
println!("Got IPv6 address: {}", ipv6);
println!("Got IPv6 address: {ipv6}");
}
}
}
@@ -63,9 +63,9 @@ mod tests {
// Verify that the returned string can be parsed as a valid IP address
let parsed_ip: Result<IpAddr, _> = ip_string.parse();
assert!(parsed_ip.is_ok(), "Returned string should be a valid IP address: {}", ip_string);
assert!(parsed_ip.is_ok(), "Returned string should be a valid IP address: {ip_string}");
println!("Local IP with default: {}", ip_string);
println!("Local IP with default: {ip_string}");
}
#[test]
@@ -91,22 +91,22 @@ mod tests {
match ip {
IpAddr::V4(ipv4) => {
// Test IPv4 address properties
println!("IPv4 address: {}", ipv4);
println!("IPv4 address: {ipv4}");
assert!(!ipv4.is_multicast(), "Local IP should not be multicast");
assert!(!ipv4.is_broadcast(), "Local IP should not be broadcast");
// Check if it's a private address (usually local IP is private)
let is_private = ipv4.is_private();
let is_loopback = ipv4.is_loopback();
println!("IPv4 is private: {}, is loopback: {}", is_private, is_loopback);
println!("IPv4 is private: {is_private}, is loopback: {is_loopback}");
}
IpAddr::V6(ipv6) => {
// Test IPv6 address properties
println!("IPv6 address: {}", ipv6);
println!("IPv6 address: {ipv6}");
assert!(!ipv6.is_multicast(), "Local IP should not be multicast");
let is_loopback = ipv6.is_loopback();
println!("IPv6 is loopback: {}", is_loopback);
println!("IPv6 is loopback: {is_loopback}");
}
}
}
@@ -126,7 +126,7 @@ mod tests {
let back_to_string = parsed_ip.to_string();
// For standard IP addresses, round-trip conversion should be consistent
println!("Original: {}, Parsed back: {}", ip_string, back_to_string);
println!("Original: {ip_string}, Parsed back: {back_to_string}");
}
#[test]
@@ -186,7 +186,7 @@ mod tests {
// If it's not a loopback address, it should be routable
if !ipv4.is_loopback() {
println!("Got routable IPv4: {}", ipv4);
println!("Got routable IPv4: {ipv4}");
}
}
IpAddr::V6(ipv6) => {
@@ -194,7 +194,7 @@ mod tests {
assert!(!ipv6.is_unspecified(), "Should not be ::");
if !ipv6.is_loopback() {
println!("Got routable IPv6: {}", ipv6);
println!("Got routable IPv6: {ipv6}");
}
}
}

View File

@@ -111,7 +111,7 @@ pub fn get_available_port() -> u16 {
pub fn must_get_local_ips() -> std::io::Result<Vec<IpAddr>> {
match netif::up() {
Ok(up) => Ok(up.map(|x| x.address().to_owned()).collect()),
Err(err) => Err(std::io::Error::other(format!("Unable to get IP addresses of this host: {}", err))),
Err(err) => Err(std::io::Error::other(format!("Unable to get IP addresses of this host: {err}"))),
}
}
@@ -260,7 +260,7 @@ pub fn parse_and_resolve_address(addr_str: &str) -> std::io::Result<SocketAddr>
let port_str = port;
let port: u16 = port_str
.parse()
.map_err(|e| std::io::Error::other(format!("Invalid port format: {}, err:{:?}", addr_str, e)))?;
.map_err(|e| std::io::Error::other(format!("Invalid port format: {addr_str}, err:{e:?}")))?;
let final_port = if port == 0 {
get_available_port() // assume get_available_port is available here
} else {
@@ -342,7 +342,7 @@ mod test {
for (addr, expected) in test_cases {
let result = is_socket_addr(addr);
assert_eq!(expected, result, "addr: '{}', expected: {}, got: {}", addr, expected, result);
assert_eq!(expected, result, "addr: '{addr}', expected: {expected}, got: {result}");
}
}
@@ -353,7 +353,7 @@ mod test {
for addr in valid_cases {
let result = check_local_server_addr(addr);
assert!(result.is_ok(), "Expected '{}' to be valid, but got error: {:?}", addr, result);
assert!(result.is_ok(), "Expected '{addr}' to be valid, but got error: {result:?}");
}
// Test invalid addresses
@@ -368,15 +368,12 @@ mod test {
for (addr, expected_error_pattern) in invalid_cases {
let result = check_local_server_addr(addr);
assert!(result.is_err(), "Expected '{}' to be invalid, but it was accepted: {:?}", addr, result);
assert!(result.is_err(), "Expected '{addr}' to be invalid, but it was accepted: {result:?}");
let error_msg = result.unwrap_err().to_string();
assert!(
error_msg.contains(expected_error_pattern) || error_msg.contains("invalid socket address"),
"Error message '{}' doesn't contain expected pattern '{}' for address '{}'",
error_msg,
expected_error_pattern,
addr
"Error message '{error_msg}' doesn't contain expected pattern '{expected_error_pattern}' for address '{addr}'"
);
}
}

View File

@@ -60,7 +60,7 @@ mod tests {
let temp_dir = tempfile::tempdir().unwrap();
let info = get_info(temp_dir.path()).unwrap();
println!("Disk Info: {:?}", info);
println!("Disk Info: {info:?}");
assert!(info.total > 0);
assert!(info.free > 0);
@@ -98,7 +98,7 @@ mod tests {
let result = same_disk(path1, path2).unwrap();
// Since both temporary directories are created in the same file system,
// they should be on the same disk in most cases
println!("Path1: {}, Path2: {}, Same disk: {}", path1, path2, result);
println!("Path1: {path1}, Path2: {path2}, Same disk: {result}");
// Test passes if the function doesn't panic - the actual result depends on test environment
}

View File

@@ -44,7 +44,7 @@ pub fn retain_slash(s: &str) -> String {
if s.ends_with(SLASH_SEPARATOR) {
s.to_string()
} else {
format!("{}{}", s, SLASH_SEPARATOR)
format!("{s}{SLASH_SEPARATOR}")
}
}
@@ -91,7 +91,7 @@ pub fn path_join_buf(elements: &[&str]) -> String {
let clean_path = cpath.to_string_lossy();
if trailing_slash {
return format!("{}{}", clean_path, SLASH_SEPARATOR);
return format!("{clean_path}{SLASH_SEPARATOR}");
}
clean_path.to_string()
}
@@ -265,9 +265,9 @@ mod tests {
#[test]
fn test_base_dir_from_prefix() {
let a = "da/";
println!("---- in {}", a);
println!("---- in {a}");
let a = base_dir_from_prefix(a);
println!("---- out {}", a);
println!("---- out {a}");
}
#[test]

View File

@@ -7,7 +7,7 @@ pub fn parse_bool(str: &str) -> Result<bool> {
match str {
"1" | "t" | "T" | "true" | "TRUE" | "True" | "on" | "ON" | "On" | "enabled" => Ok(true),
"0" | "f" | "F" | "false" | "FALSE" | "False" | "off" | "OFF" | "Off" | "disabled" => Ok(false),
_ => Err(Error::other(format!("ParseBool: parsing {}", str))),
_ => Err(Error::other(format!("ParseBool: parsing {str}"))),
}
}
@@ -208,8 +208,7 @@ pub fn find_ellipses_patterns(arg: &str) -> Result<ArgPattern> {
Some(caps) => caps,
None => {
return Err(Error::other(format!(
"Invalid ellipsis format in ({}), Ellipsis range must be provided in format {{N...M}} where N and M are positive integers, M must be greater than N, with an allowed minimum range of 4",
arg
"Invalid ellipsis format in ({arg}), Ellipsis range must be provided in format {{N...M}} where N and M are positive integers, M must be greater than N, with an allowed minimum range of 4"
)));
}
};
@@ -248,8 +247,7 @@ pub fn find_ellipses_patterns(arg: &str) -> Result<ArgPattern> {
|| p.suffix.contains(CLOSE_BRACES)
{
return Err(Error::other(format!(
"Invalid ellipsis format in ({}), Ellipsis range must be provided in format {{N...M}} where N and M are positive integers, M must be greater than N, with an allowed minimum range of 4",
arg
"Invalid ellipsis format in ({arg}), Ellipsis range must be provided in format {{N...M}} where N and M are positive integers, M must be greater than N, with an allowed minimum range of 4"
)));
}
}
@@ -300,7 +298,7 @@ pub fn parse_ellipses_range(pattern: &str) -> Result<Vec<String>> {
if ellipses_range[0].starts_with('0') && ellipses_range[0].len() > 1 {
ret.push(format!("{:0width$}", i, width = ellipses_range[1].len()));
} else {
ret.push(format!("{}", i));
ret.push(format!("{i}"));
}
}
@@ -381,7 +379,7 @@ mod tests {
for (i, args, expected) in test_cases {
let ret = has_ellipses(&args);
assert_eq!(ret, expected, "Test{}: Expected {}, got {}", i, expected, ret);
assert_eq!(ret, expected, "Test{i}: Expected {expected}, got {ret}");
}
}

View File

@@ -95,7 +95,7 @@ impl UserAgent {
let cpu_info = if arch == "aarch64" { "Apple" } else { "Intel" };
// Convert to User-Agent format
format!("Macintosh; {} Mac OS X {}_{}_{}", cpu_info, major, minor, patch)
format!("Macintosh; {cpu_info} Mac OS X {major}_{minor}_{patch}")
}
#[cfg(not(target_os = "macos"))]
@@ -145,40 +145,40 @@ mod tests {
fn test_user_agent_format_basis() {
let ua = get_user_agent(ServiceType::Basis);
assert!(ua.starts_with("Mozilla/5.0"));
assert!(ua.contains(&format!("RustFS/{}", VERSION).to_string()));
println!("User-Agent: {}", ua);
assert!(ua.contains(&format!("RustFS/{VERSION}").to_string()));
println!("User-Agent: {ua}");
}
#[test]
fn test_user_agent_format_core() {
let ua = get_user_agent(ServiceType::Core);
assert!(ua.starts_with("Mozilla/5.0"));
assert!(ua.contains(&format!("RustFS/{} (core)", VERSION).to_string()));
println!("User-Agent: {}", ua);
assert!(ua.contains(&format!("RustFS/{VERSION} (core)").to_string()));
println!("User-Agent: {ua}");
}
#[test]
fn test_user_agent_format_event() {
let ua = get_user_agent(ServiceType::Event);
assert!(ua.starts_with("Mozilla/5.0"));
assert!(ua.contains(&format!("RustFS/{} (event)", VERSION).to_string()));
println!("User-Agent: {}", ua);
assert!(ua.contains(&format!("RustFS/{VERSION} (event)").to_string()));
println!("User-Agent: {ua}");
}
#[test]
fn test_user_agent_format_logger() {
let ua = get_user_agent(ServiceType::Logger);
assert!(ua.starts_with("Mozilla/5.0"));
assert!(ua.contains(&format!("RustFS/{} (logger)", VERSION).to_string()));
println!("User-Agent: {}", ua);
assert!(ua.contains(&format!("RustFS/{VERSION} (logger)").to_string()));
println!("User-Agent: {ua}");
}
#[test]
fn test_user_agent_format_custom() {
let ua = get_user_agent(ServiceType::Custom("monitor".to_string()));
assert!(ua.starts_with("Mozilla/5.0"));
assert!(ua.contains(&format!("RustFS/{} (monitor)", VERSION).to_string()));
println!("User-Agent: {}", ua);
assert!(ua.contains(&format!("RustFS/{VERSION} (monitor)").to_string()));
println!("User-Agent: {ua}");
}
#[test]
@@ -189,9 +189,9 @@ mod tests {
let ua_logger = get_user_agent(ServiceType::Logger);
let ua_custom = get_user_agent(ServiceType::Custom("monitor".to_string()));
println!("Core User-Agent: {}", ua_core);
println!("Event User-Agent: {}", ua_event);
println!("Logger User-Agent: {}", ua_logger);
println!("Custom User-Agent: {}", ua_custom);
println!("Core User-Agent: {ua_core}");
println!("Event User-Agent: {ua_event}");
println!("Logger User-Agent: {ua_logger}");
println!("Custom User-Agent: {ua_custom}");
}
}

View File

@@ -347,11 +347,11 @@ mod tests {
fn test_compression_format_debug() {
// Test Debug trait implementation
let format = CompressionFormat::Gzip;
let debug_str = format!("{:?}", format);
let debug_str = format!("{format:?}");
assert_eq!(debug_str, "Gzip");
let unknown_format = CompressionFormat::Unknown;
let unknown_debug_str = format!("{:?}", unknown_format);
let unknown_debug_str = format!("{unknown_format:?}");
assert_eq!(unknown_debug_str, "Unknown");
}
@@ -419,7 +419,7 @@ mod tests {
for format in supported_formats {
let cursor = Cursor::new(sample_content);
let decoder_result = format.get_decoder(cursor);
assert!(decoder_result.is_ok(), "Format {:?} should create decoder successfully", format);
assert!(decoder_result.is_ok(), "Format {format:?} should create decoder successfully");
}
}
@@ -453,7 +453,7 @@ mod tests {
for format in all_formats {
// Verify each format has corresponding Debug implementation
let _debug_str = format!("{:?}", format);
let _debug_str = format!("{format:?}");
// Verify each format has corresponding PartialEq implementation
assert_eq!(format, format);
@@ -480,9 +480,7 @@ mod tests {
assert_eq!(
CompressionFormat::from_extension(ext),
expected_format,
"Extension '{}' should map to {:?}",
ext,
expected_format
"Extension '{ext}' should map to {expected_format:?}"
);
}
}
@@ -502,7 +500,7 @@ mod tests {
for (format, expected_str) in format_strings {
assert_eq!(
format!("{:?}", format),
format!("{format:?}"),
expected_str,
"Format {:?} should have string representation '{}'",
format,
@@ -531,14 +529,13 @@ mod tests {
// Verify enum size is reasonable
let size = mem::size_of::<CompressionFormat>();
assert!(size <= 8, "CompressionFormat should be memory efficient, got {} bytes", size);
assert!(size <= 8, "CompressionFormat should be memory efficient, got {size} bytes");
// Verify Option<CompressionFormat> size
let option_size = mem::size_of::<Option<CompressionFormat>>();
assert!(
option_size <= 16,
"Option<CompressionFormat> should be efficient, got {} bytes",
option_size
"Option<CompressionFormat> should be efficient, got {option_size} bytes"
);
}
@@ -567,8 +564,7 @@ mod tests {
let is_known = format != CompressionFormat::Unknown;
assert_eq!(
is_known, should_be_known,
"Extension '{}' recognition mismatch: expected {}, got {}",
ext, should_be_known, is_known
"Extension '{ext}' recognition mismatch: expected {should_be_known}, got {is_known}"
);
}
}
@@ -601,7 +597,7 @@ mod tests {
for (format, ext) in consistency_tests {
let parsed_format = CompressionFormat::from_extension(ext);
assert_eq!(parsed_format, format, "Extension '{}' should consistently map to {:?}", ext, format);
assert_eq!(parsed_format, format, "Extension '{ext}' should consistently map to {format:?}");
}
}
@@ -770,7 +766,7 @@ mod tests {
for ext in unknown_extensions {
let format = CompressionFormat::from_extension(ext);
assert_eq!(format, CompressionFormat::Unknown, "Extension '{}' should default to Unknown", ext);
assert_eq!(format, CompressionFormat::Unknown, "Extension '{ext}' should default to Unknown");
}
}
@@ -929,7 +925,7 @@ mod tests {
for level in levels {
// 验证每个级别都有对应的 Debug 实现
let _debug_str = format!("{:?}", level);
let _debug_str = format!("{level:?}");
}
}
@@ -955,7 +951,7 @@ mod tests {
let _supported = format.is_supported();
// 验证 Debug 实现
let _debug = format!("{:?}", format);
let _debug = format!("{format:?}");
}
}
}

View File

@@ -80,9 +80,9 @@ mod tests {
let argon2_chacha = ID::Argon2idChaCHa20Poly1305;
let pbkdf2 = ID::Pbkdf2AESGCM;
assert_eq!(format!("{:?}", argon2_aes), "Argon2idAESGCM");
assert_eq!(format!("{:?}", argon2_chacha), "Argon2idChaCHa20Poly1305");
assert_eq!(format!("{:?}", pbkdf2), "Pbkdf2AESGCM");
assert_eq!(format!("{argon2_aes:?}"), "Argon2idAESGCM");
assert_eq!(format!("{argon2_chacha:?}"), "Argon2idChaCHa20Poly1305");
assert_eq!(format!("{pbkdf2:?}"), "Pbkdf2AESGCM");
}
#[test]
@@ -205,13 +205,13 @@ mod tests {
for algorithm in &algorithms {
let result = algorithm.get_key(password, salt);
assert!(result.is_ok(), "Algorithm {:?} should generate valid key", algorithm);
assert!(result.is_ok(), "Algorithm {algorithm:?} should generate valid key");
let key = result.expect("Key generation should succeed for all algorithms");
assert_eq!(key.len(), 32, "Key length should be 32 bytes for {:?}", algorithm);
assert_eq!(key.len(), 32, "Key length should be 32 bytes for {algorithm:?}");
// Verify key is not all zeros (very unlikely with proper implementation)
assert_ne!(key, [0u8; 32], "Key should not be all zeros for {:?}", algorithm);
assert_ne!(key, [0u8; 32], "Key should not be all zeros for {algorithm:?}");
}
}

View File

@@ -103,7 +103,7 @@ fn test_encrypt_decrypt_unicode_data() -> Result<(), crate::Error> {
let data = text.as_bytes();
let encrypted = encrypt_data(PASSWORD, data)?;
let decrypted = decrypt_data(PASSWORD, &encrypted)?;
assert_eq!(data, decrypted.as_slice(), "Unicode data mismatch for: {}", text);
assert_eq!(data, decrypted.as_slice(), "Unicode data mismatch for: {text}");
}
Ok(())
}
@@ -125,7 +125,7 @@ fn test_decrypt_with_corrupted_data() {
corrupted[*corrupt_index] ^= 0xFF; // Flip all bits
let result = decrypt_data(PASSWORD, &corrupted);
assert!(result.is_err(), "{} should cause decryption to fail", description);
assert!(result.is_err(), "{description} should cause decryption to fail");
}
}
@@ -146,7 +146,7 @@ fn test_decrypt_with_truncated_data() {
for &length in &truncation_lengths {
let truncated = &encrypted[..length.min(encrypted.len())];
let result = decrypt_data(PASSWORD, truncated);
assert!(result.is_err(), "Truncated data (length {}) should cause decryption to fail", length);
assert!(result.is_err(), "Truncated data (length {length}) should cause decryption to fail");
}
}
@@ -219,7 +219,7 @@ fn test_password_variations() -> Result<(), crate::Error> {
for password in &password_variations {
let encrypted = encrypt_data(password, data)?;
let decrypted = decrypt_data(password, &encrypted)?;
assert_eq!(data, decrypted.as_slice(), "Failed with password: {:?}", password);
assert_eq!(data, decrypted.as_slice(), "Failed with password: {password:?}");
}
Ok(())
@@ -292,7 +292,7 @@ fn test_concurrent_encryption_safety() -> Result<(), crate::Error> {
thread::spawn(move || {
let encrypted = encrypt_data(&password, &data).expect("Encryption should succeed");
let decrypted = decrypt_data(&password, &encrypted).expect("Decryption should succeed");
assert_eq!(**data, decrypted, "Thread {} failed", i);
assert_eq!(**data, decrypted, "Thread {i} failed");
})
})
.collect();

View File

@@ -74,7 +74,7 @@ fn test_jwt_decode_invalid_token_format() {
for invalid_token in &invalid_tokens {
let result = decode(invalid_token, secret);
assert!(result.is_err(), "Invalid token '{}' should fail to decode", invalid_token);
assert!(result.is_err(), "Invalid token '{invalid_token}' should fail to decode");
}
}
@@ -207,7 +207,7 @@ fn test_jwt_token_structure() {
// Each part should be non-empty
for (i, part) in parts.iter().enumerate() {
assert!(!part.is_empty(), "JWT part {} should not be empty", i);
assert!(!part.is_empty(), "JWT part {i} should not be empty");
}
}

View File

@@ -34,13 +34,13 @@ async fn test_lock_unlock_rpc() -> Result<(), Box<dyn Error>> {
let response = client.lock(request).await?.into_inner();
println!("request ended");
if let Some(error_info) = response.error_info {
panic!("can not get lock: {}", error_info);
panic!("can not get lock: {error_info}");
}
let request = Request::new(GenerallyLockRequest { args });
let response = client.un_lock(request).await?.into_inner();
if let Some(error_info) = response.error_info {
panic!("can not get un_lock: {}", error_info);
panic!("can not get un_lock: {error_info}");
}
Ok(())

View File

@@ -52,9 +52,9 @@ async fn ping() -> Result<(), Box<dyn Error>> {
// Print response
let ping_response_body = flatbuffers::root::<PingBody>(&response.body);
if let Err(e) = ping_response_body {
eprintln!("{}", e);
eprintln!("{e}");
} else {
println!("ping_resp:body(flatbuffer): {:?}", ping_response_body);
println!("ping_resp:body(flatbuffer): {ping_response_body:?}");
}
Ok(())
@@ -93,7 +93,7 @@ async fn list_volumes() -> Result<(), Box<dyn Error>> {
.filter_map(|json_str| serde_json::from_str::<VolumeInfo>(&json_str).ok())
.collect();
println!("{:?}", volume_infos);
println!("{volume_infos:?}");
Ok(())
}
@@ -127,7 +127,7 @@ async fn walk_dir() -> Result<(), Box<dyn Error>> {
println!("{}", resp.error_info.unwrap_or("".to_string()));
}
let entry = serde_json::from_str::<MetaCacheEntry>(&resp.meta_cache_entry)
.map_err(|_e| std::io::Error::other(format!("Unexpected response: {:?}", response)))
.map_err(|_e| std::io::Error::other(format!("Unexpected response: {response:?}")))
.unwrap();
out.write_obj(&entry).await.unwrap();
}
@@ -136,7 +136,7 @@ async fn walk_dir() -> Result<(), Box<dyn Error>> {
break;
}
_ => {
println!("Unexpected response: {:?}", response);
println!("Unexpected response: {response:?}");
let _ = out.close().await;
break;
}
@@ -146,7 +146,7 @@ async fn walk_dir() -> Result<(), Box<dyn Error>> {
let job2 = spawn(async move {
let mut reader = MetacacheReader::new(rd);
while let Ok(Some(entry)) = reader.peek().await {
println!("{:?}", entry);
println!("{entry:?}");
}
});
@@ -168,7 +168,7 @@ async fn read_all() -> Result<(), Box<dyn Error>> {
let volume_infos = response.data;
println!("{}", response.success);
println!("{:?}", volume_infos);
println!("{volume_infos:?}");
Ok(())
}
@@ -187,6 +187,6 @@ async fn storage_info() -> Result<(), Box<dyn Error>> {
let mut buf = Deserializer::new(Cursor::new(info));
let storage_info: madmin::StorageInfo = Deserialize::deserialize(&mut buf).unwrap();
println!("{:?}", storage_info);
println!("{storage_info:?}");
Ok(())
}

View File

@@ -81,7 +81,7 @@ fn bench_encode_analysis(c: &mut Criterion) {
);
}
Err(e) => {
println!("⚠️ Skipping test {} - configuration not supported: {}", test_name, e);
println!("⚠️ Skipping test {test_name} - configuration not supported: {e}");
}
}
group.finish();
@@ -130,7 +130,7 @@ fn bench_decode_analysis(c: &mut Criterion) {
group.finish();
}
Err(e) => {
println!("⚠️ Skipping decode test {} - configuration not supported: {}", test_name, e);
println!("⚠️ Skipping decode test {test_name} - configuration not supported: {e}");
}
}
}
@@ -152,7 +152,7 @@ fn bench_shard_size_analysis(c: &mut Criterion) {
for shard_size in shard_sizes {
let total_size = shard_size * data_shards;
let data = (0..total_size).map(|i| (i % 256) as u8).collect::<Vec<u8>>();
let test_name = format!("{}B_shard_simd", shard_size);
let test_name = format!("{shard_size}B_shard_simd");
group.throughput(Throughput::Bytes(total_size as u64));
@@ -169,7 +169,7 @@ fn bench_shard_size_analysis(c: &mut Criterion) {
});
}
Err(e) => {
println!("⚠️ Skipping shard size test {} - not supported: {}", test_name, e);
println!("⚠️ Skipping shard size test {test_name} - not supported: {e}");
}
}
}
@@ -238,7 +238,7 @@ fn bench_error_recovery_analysis(c: &mut Criterion) {
match erasure.encode_data(&data) {
Ok(encoded_shards) => {
let test_name = format!("{}+{}_{}", data_shards, parity_shards, scenario_name);
let test_name = format!("{data_shards}+{parity_shards}_{scenario_name}");
group.bench_with_input(
BenchmarkId::new("recovery", &test_name),
@@ -261,7 +261,7 @@ fn bench_error_recovery_analysis(c: &mut Criterion) {
);
}
Err(e) => {
println!("⚠️ Skipping recovery test {}: {}", scenario_name, e);
println!("⚠️ Skipping recovery test {scenario_name}: {e}");
}
}
}

View File

@@ -250,7 +250,7 @@ fn bench_shard_size_impact(c: &mut Criterion) {
group.throughput(Throughput::Bytes(total_data_size as u64));
// Test SIMD implementation
group.bench_with_input(BenchmarkId::new("simd", format!("shard_{}B", shard_size)), &data, |b, data| {
group.bench_with_input(BenchmarkId::new("simd", format!("shard_{shard_size}B")), &data, |b, data| {
let erasure = Erasure::new(data_shards, parity_shards, total_data_size);
b.iter(|| {
let shards = erasure.encode_data(black_box(data)).unwrap();
@@ -282,7 +282,7 @@ fn bench_coding_configurations(c: &mut Criterion) {
group.measurement_time(Duration::from_secs(5));
for (data_shards, parity_shards) in configs {
let config_name = format!("{}+{}", data_shards, parity_shards);
let config_name = format!("{data_shards}+{parity_shards}");
group.bench_with_input(BenchmarkId::new("encode", &config_name), &data, |b, data| {
let erasure = Erasure::new(data_shards, parity_shards, data_size);

View File

@@ -102,9 +102,9 @@ async fn is_server_resolvable(endpoint: &Endpoint) -> Result<()> {
// 打印响应
let ping_response_body = flatbuffers::root::<PingBody>(&response.body);
if let Err(e) = ping_response_body {
eprintln!("{}", e);
eprintln!("{e}");
} else {
println!("ping_resp:body(flatbuffer): {:?}", ping_response_body);
println!("ping_resp:body(flatbuffer): {ping_response_body:?}");
}
Ok(())

View File

@@ -168,7 +168,7 @@ mod tests {
assert!(wrapper.is_err());
let error = wrapper.unwrap_err();
println!("error: {:?}", error);
println!("error: {error:?}");
assert_eq!(error, DiskError::DiskNotFound);
}
}

View File

@@ -285,7 +285,7 @@ impl BucketMetadata {
self.bucket_targets_config_json = data.clone();
self.bucket_targets_config_updated_at = updated;
}
_ => return Err(Error::other(format!("config file not found : {}", config_file))),
_ => return Err(Error::other(format!("config file not found : {config_file}"))),
}
Ok(updated)

View File

@@ -32,7 +32,7 @@ impl VersioningApi for VersioningConfiguration {
if let Some(ref excluded_prefixes) = self.excluded_prefixes {
for p in excluded_prefixes.iter() {
if let Some(ref sprefix) = p.prefix {
let pattern = format!("{}*", sprefix);
let pattern = format!("{sprefix}*");
if match_simple(&pattern, prefix) {
return false;
}
@@ -62,7 +62,7 @@ impl VersioningApi for VersioningConfiguration {
if let Some(ref excluded_prefixes) = self.excluded_prefixes {
for p in excluded_prefixes.iter() {
if let Some(ref sprefix) = p.prefix {
let pattern = format!("{}*", sprefix);
let pattern = format!("{sprefix}*");
if match_simple(&pattern, prefix) {
return true;
}

View File

@@ -394,7 +394,7 @@ pub async fn check_replicate_delete(
// use crate::global::*;
fn target_reset_header(arn: &str) -> String {
format!("{}{}-{}", RESERVED_METADATA_PREFIX_LOWER, REPLICATION_RESET, arn)
format!("{RESERVED_METADATA_PREFIX_LOWER}{REPLICATION_RESET}-{arn}")
}
pub async fn get_heal_replicate_object_info(
@@ -491,7 +491,7 @@ pub async fn get_heal_replicate_object_info(
let asz = oi.get_actual_size().unwrap_or(0);
let key = format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, REPLICATION_TIMESTAMP);
let key = format!("{RESERVED_METADATA_PREFIX_LOWER}{REPLICATION_TIMESTAMP}");
let tm: Option<DateTime<Utc>> = user_defined
.get(&key)
.and_then(|v| DateTime::parse_from_rfc3339(v).ok())
@@ -819,7 +819,7 @@ impl ReplicationPool {
// }
fn get_worker_ch(&self, bucket: &str, object: &str, _sz: i64) -> Option<&Sender<Box<dyn ReplicationWorkerOperation>>> {
let h = xxh3_64(format!("{}{}", bucket, object).as_bytes()); // 计算哈希值
let h = xxh3_64(format!("{bucket}{object}").as_bytes()); // 计算哈希值
// need lock;
let workers = &self.workers_sender; // 读锁
@@ -1067,7 +1067,7 @@ impl fmt::Display for VersionPurgeStatusType {
VersionPurgeStatusType::Empty => "",
VersionPurgeStatusType::Unknown => "UNKNOWN",
};
write!(f, "{}", s)
write!(f, "{s}")
}
}
@@ -1307,7 +1307,7 @@ impl fmt::Display for ReplicateDecision {
fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
let mut entries = Vec::new();
for (key, value) in &self.targets_map {
entries.push(format!("{}={}", key, value));
entries.push(format!("{key}={value}"));
}
write!(f, "{}", entries.join(","))
}
@@ -2123,7 +2123,7 @@ async fn replicate_object_with_multipart(
.provider(provider)
.secure(false)
.build()
.map_err(|e| Error::other(format!("build minio client failed: {}", e)))?;
.map_err(|e| Error::other(format!("build minio client failed: {e}")))?;
let ret = minio_cli
.create_multipart_upload_with_versionid(tgt_cli.bucket.clone(), local_obj_info.name.clone(), rep_obj.version_id.clone())
@@ -2168,7 +2168,7 @@ async fn replicate_object_with_multipart(
}
Err(err) => {
error!("upload part {} failed: {}", index + 1, err);
Err(Error::other(format!("upload error: {}", err)))
Err(Error::other(format!("upload error: {err}")))
}
}
}
@@ -2179,7 +2179,7 @@ async fn replicate_object_with_multipart(
},
Err(err) => {
error!("reader error for part {}: {}", index + 1, err);
Err(Error::other(format!("reader error: {}", err)))
Err(Error::other(format!("reader error: {err}")))
}
}
}));
@@ -2196,7 +2196,7 @@ async fn replicate_object_with_multipart(
}
Err(join_err) => {
error!("tokio join error: {}", join_err);
return Err(Error::other(format!("join error: {}", join_err)));
return Err(Error::other(format!("join error: {join_err}")));
}
}
}
@@ -2210,12 +2210,12 @@ async fn replicate_object_with_multipart(
}
Err(err) => {
error!("finish upload failed:{}", err);
return Err(Error::other(format!("finish upload failed:{}", err)));
return Err(Error::other(format!("finish upload failed:{err}")));
}
}
}
Err(err) => {
return Err(Error::other(format!("finish upload failed:{}", err)));
return Err(Error::other(format!("finish upload failed:{err}")));
}
}
Ok(())
@@ -2729,7 +2729,7 @@ pub async fn replicate_object(ri: ReplicateObjectInfo, object_api: Arc<store::EC
// }
}
Err(err) => {
println!("Failed to get replication config: {:?}", err);
println!("Failed to get replication config: {err:?}");
}
}
}

View File

@@ -147,7 +147,7 @@ pub struct BucketRemoteTargetNotFound {
}
pub async fn init_bucket_targets(bucket: &str, meta: Arc<bucket::metadata::BucketMetadata>) {
println!("140 {}", bucket);
println!("140 {bucket}");
if let Some(sys) = GLOBAL_Bucket_Target_Sys.get() {
if let Some(tgts) = meta.bucket_target_config.clone() {
for tgt in tgts.targets {
@@ -282,7 +282,7 @@ impl BucketTargetSys {
let _ = metadata_sys::update(bucket, "bucket-targets.json", json).await;
}
Err(e) => {
println!("序列化失败{}", e);
println!("序列化失败{e}");
}
}
@@ -379,11 +379,11 @@ impl BucketTargetSys {
// .get_bucket_info(bucket, &ecstore::store_api::BucketOptions::default()).await;
match store.get_bucket_info(_bucket, &store_api::BucketOptions::default()).await {
Ok(info) => {
println!("Bucket Info: {:?}", info);
println!("Bucket Info: {info:?}");
info.versionning
}
Err(err) => {
eprintln!("Error: {:?}", err);
eprintln!("Error: {err:?}");
false
}
}
@@ -433,7 +433,7 @@ impl BucketTargetSys {
let url_str = format!("http://{}", tgt.endpoint.clone());
println!("url str is {}", url_str);
println!("url str is {url_str}");
// 转换为 Url 类型
let parsed_url = url::Url::parse(&url_str).unwrap();
@@ -451,7 +451,7 @@ impl BucketTargetSys {
.await
{
Ok(info) => {
println!("Bucket Info: {:?}", info);
println!("Bucket Info: {info:?}");
if !info.versionning {
println!("2222222222 {}", info.versionning);
return Err(SetTargetError::TargetNotVersioned(tgt.target_bucket.to_string()));
@@ -459,7 +459,7 @@ impl BucketTargetSys {
}
Err(err) => {
println!("remote bucket 369 is:{}", tgt.target_bucket);
eprintln!("Error: {:?}", err);
eprintln!("Error: {err:?}");
return Err(SetTargetError::SourceNotVersioned(tgt.target_bucket.to_string()));
}
}
@@ -629,12 +629,12 @@ impl ARN {
pub fn parse(s: &str) -> Result<Self, String> {
// ARN 必须是格式 arn:rustfs:<Type>:<REGION>:<ID>:<remote-bucket>
if !s.starts_with("arn:rustfs:") {
return Err(format!("Invalid ARN {}", s));
return Err(format!("Invalid ARN {s}"));
}
let tokens: Vec<&str> = s.split(':').collect();
if tokens.len() != 6 || tokens[4].is_empty() || tokens[5].is_empty() {
return Err(format!("Invalid ARN {}", s));
return Err(format!("Invalid ARN {s}"));
}
Ok(ARN {

View File

@@ -116,7 +116,7 @@ async fn new_and_save_server_config<S: StorageAPI>(api: Arc<S>) -> Result<Config
}
fn get_config_file() -> String {
format!("{}{}{}", CONFIG_PREFIX, SLASH_SEPARATOR, CONFIG_FILE)
format!("{CONFIG_PREFIX}{SLASH_SEPARATOR}{CONFIG_FILE}")
}
/// Handle the situation where the configuration file does not exist, create and save a new configuration

View File

@@ -48,8 +48,7 @@ fn parse_bitrot_config(s: &str) -> Result<Duration> {
Ok(months) => {
if months < RUSTFS_BITROT_CYCLE_IN_MONTHS {
return Err(Error::other(format!(
"minimum bitrot cycle is {} month(s)",
RUSTFS_BITROT_CYCLE_IN_MONTHS
"minimum bitrot cycle is {RUSTFS_BITROT_CYCLE_IN_MONTHS} month(s)"
)));
}

View File

@@ -201,7 +201,7 @@ pub fn lookup_config(kvs: &KVS, set_drive_count: usize) -> Result<Config> {
}
block.as_u64() as usize
} else {
return Err(Error::other(format!("parse {} format failed", INLINE_BLOCK_ENV)));
return Err(Error::other(format!("parse {INLINE_BLOCK_ENV} format failed")));
}
} else {
DEFAULT_INLINE_BLOCK
@@ -223,8 +223,7 @@ pub fn parse_storage_class(env: &str) -> Result<StorageClass> {
// only two elements allowed in the string - "scheme" and "number of parity drives"
if s.len() != 2 {
return Err(Error::other(format!(
"Invalid storage class format: {}. Expected 'Scheme:Number of parity drives'.",
env
"Invalid storage class format: {env}. Expected 'Scheme:Number of parity drives'."
)));
}
@@ -300,8 +299,7 @@ pub fn validate_parity_inner(ss_parity: usize, rrs_parity: usize, set_drive_coun
if ss_parity > 0 && rrs_parity > 0 && ss_parity < rrs_parity {
return Err(Error::other(format!(
"Standard storage class parity drives {} should be greater than or equal to Reduced redundancy storage class parity drives {}",
ss_parity, rrs_parity
"Standard storage class parity drives {ss_parity} should be greater than or equal to Reduced redundancy storage class parity drives {rrs_parity}"
)));
}
Ok(())

View File

@@ -101,7 +101,7 @@ impl TryFrom<&str> for Endpoint {
is_local = true;
url_parse_from_file_path(value)?
}
_ => return Err(Error::other(format!("invalid URL endpoint format: {}", e))),
_ => return Err(Error::other(format!("invalid URL endpoint format: {e}"))),
},
};
@@ -163,8 +163,8 @@ impl Endpoint {
pub fn host_port(&self) -> String {
match (self.url.host(), self.url.port()) {
(Some(host), Some(port)) => format!("{}:{}", host, port),
(Some(host), None) => format!("{}", host),
(Some(host), Some(port)) => format!("{host}:{port}"),
(Some(host), None) => format!("{host}"),
_ => String::new(),
}
}
@@ -191,7 +191,7 @@ fn url_parse_from_file_path(value: &str) -> Result<Url> {
let file_path = match Path::new(value).absolutize() {
Ok(path) => path,
Err(err) => return Err(Error::other(format!("absolute path failed: {}", err))),
Err(err) => return Err(Error::other(format!("absolute path failed: {err}"))),
};
match Url::from_file_path(file_path) {
@@ -377,12 +377,12 @@ mod test {
fn test_endpoint_display() {
// Test file path display
let file_endpoint = Endpoint::try_from("/tmp/data").unwrap();
let display_str = format!("{}", file_endpoint);
let display_str = format!("{file_endpoint}");
assert_eq!(display_str, "/tmp/data");
// Test URL display
let url_endpoint = Endpoint::try_from("http://example.com:9000/path").unwrap();
let display_str = format!("{}", url_endpoint);
let display_str = format!("{url_endpoint}");
assert_eq!(display_str, "http://example.com:9000/path");
}

View File

@@ -693,7 +693,7 @@ mod tests {
source: io_error,
};
let display_str = format!("{}", context_error);
let display_str = format!("{context_error}");
assert!(display_str.contains("/test/path"));
assert!(display_str.contains("file access denied"));
}
@@ -701,11 +701,11 @@ mod tests {
#[test]
fn test_error_debug_format() {
let error = DiskError::FileNotFound;
let debug_str = format!("{:?}", error);
let debug_str = format!("{error:?}");
assert_eq!(debug_str, "FileNotFound");
let io_error = DiskError::other("test error");
let debug_str = format!("{:?}", io_error);
let debug_str = format!("{io_error:?}");
assert!(debug_str.contains("Io"));
}

View File

@@ -410,9 +410,7 @@ mod tests {
let result = to_file_error(create_io_error(kind));
assert!(
contains_disk_error(result, expected_disk_error.clone()),
"Failed for ErrorKind::{:?} -> DiskError::{:?}",
kind,
expected_disk_error
"Failed for ErrorKind::{kind:?} -> DiskError::{expected_disk_error:?}"
);
}
}
@@ -430,9 +428,7 @@ mod tests {
let result = to_volume_error(create_io_error(kind));
assert!(
contains_disk_error(result, expected_disk_error.clone()),
"Failed for ErrorKind::{:?} -> DiskError::{:?}",
kind,
expected_disk_error
"Failed for ErrorKind::{kind:?} -> DiskError::{expected_disk_error:?}"
);
}
}

View File

@@ -180,7 +180,7 @@ impl FormatV3 {
}
}
Err(Error::other(format!("disk id not found {}", disk_id)))
Err(Error::other(format!("disk id not found {disk_id}")))
}
pub fn check_other(&self, other: &FormatV3) -> Result<()> {
@@ -242,7 +242,7 @@ mod test {
let format = FormatV3::new(1, 4);
let str = serde_json::to_string(&format);
println!("{:?}", str);
println!("{str:?}");
let data = r#"
{
@@ -266,7 +266,7 @@ mod test {
let p = FormatV3::try_from(data);
println!("{:?}", p);
println!("{p:?}");
}
#[test]

View File

@@ -327,7 +327,7 @@ impl LocalDisk {
Ok(md)
}
async fn make_meta_volumes(&self) -> Result<()> {
let buckets = format!("{}/{}", RUSTFS_META_BUCKET, BUCKET_META_PREFIX);
let buckets = format!("{RUSTFS_META_BUCKET}/{BUCKET_META_PREFIX}");
let multipart = format!("{}/{}", RUSTFS_META_BUCKET, "multipart");
let config = format!("{}/{}", RUSTFS_META_BUCKET, "config");
let tmp = format!("{}/{}", RUSTFS_META_BUCKET, "tmp");
@@ -623,7 +623,7 @@ impl LocalDisk {
async fn delete_versions_internal(&self, volume: &str, path: &str, fis: &Vec<FileInfo>) -> Result<()> {
let volume_dir = self.get_bucket_path(volume)?;
let xlpath = self.get_object_path(volume, format!("{}/{}", path, STORAGE_FORMAT_FILE).as_str())?;
let xlpath = self.get_object_path(volume, format!("{path}/{STORAGE_FORMAT_FILE}").as_str())?;
let (data, _) = self.read_all_data_with_dmtime(volume, volume_dir.as_path(), &xlpath).await?;
@@ -652,7 +652,7 @@ impl LocalDisk {
let vid = fi.version_id.unwrap_or_default();
let _ = fm.data.remove(vec![vid, dir]);
let dir_path = self.get_object_path(volume, format!("{}/{}", path, dir).as_str())?;
let dir_path = self.get_object_path(volume, format!("{path}/{dir}").as_str())?;
if let Err(err) = self.move_to_trash(&dir_path, true, false).await {
if !(err == DiskError::FileNotFound || err == DiskError::VolumeNotFound) {
return Err(err);
@@ -674,7 +674,7 @@ impl LocalDisk {
self.write_all_private(
volume,
format!("{}/{}", path, STORAGE_FORMAT_FILE).as_str(),
format!("{path}/{STORAGE_FORMAT_FILE}").as_str(),
buf.into(),
true,
&volume_dir,
@@ -1399,7 +1399,7 @@ impl DiskAPI for LocalDisk {
rename_all(&src_file_path, &dst_file_path, &dst_volume_dir).await?;
self.write_all(dst_volume, format!("{}.meta", dst_path).as_str(), meta)
self.write_all(dst_volume, format!("{dst_path}.meta").as_str(), meta)
.await?;
if let Some(parent) = src_file_path.parent() {
@@ -1938,7 +1938,7 @@ impl DiskAPI for LocalDisk {
let wbuf = xl_meta.marshal_msg()?;
return self
.write_all_meta(volume, format!("{}/{}", path, STORAGE_FORMAT_FILE).as_str(), &wbuf, !opts.no_persistence)
.write_all_meta(volume, format!("{path}/{STORAGE_FORMAT_FILE}").as_str(), &wbuf, !opts.no_persistence)
.await;
}
@@ -1947,7 +1947,7 @@ impl DiskAPI for LocalDisk {
#[tracing::instrument(skip(self))]
async fn write_metadata(&self, _org_volume: &str, volume: &str, path: &str, fi: FileInfo) -> Result<()> {
let p = self.get_object_path(volume, format!("{}/{}", path, STORAGE_FORMAT_FILE).as_str())?;
let p = self.get_object_path(volume, format!("{path}/{STORAGE_FORMAT_FILE}").as_str())?;
let mut meta = FileMeta::new();
if !fi.fresh {
@@ -1963,7 +1963,7 @@ impl DiskAPI for LocalDisk {
let fm_data = meta.marshal_msg()?;
self.write_all(volume, format!("{}/{}", path, STORAGE_FORMAT_FILE).as_str(), fm_data.into())
self.write_all(volume, format!("{path}/{STORAGE_FORMAT_FILE}").as_str(), fm_data.into())
.await?;
Ok(())
@@ -2070,7 +2070,7 @@ impl DiskAPI for LocalDisk {
if !meta.versions.is_empty() {
let buf = meta.marshal_msg()?;
return self
.write_all_meta(volume, format!("{}{}{}", path, SLASH_SEPARATOR, STORAGE_FORMAT_FILE).as_str(), &buf, true)
.write_all_meta(volume, format!("{path}{SLASH_SEPARATOR}{STORAGE_FORMAT_FILE}").as_str(), &buf, true)
.await;
}
@@ -2078,9 +2078,9 @@ impl DiskAPI for LocalDisk {
if let Some(old_data_dir) = opts.old_data_dir {
if opts.undo_write {
let src_path = file_path.join(Path::new(
format!("{}{}{}", old_data_dir, SLASH_SEPARATOR, STORAGE_FORMAT_FILE_BACKUP).as_str(),
format!("{old_data_dir}{SLASH_SEPARATOR}{STORAGE_FORMAT_FILE_BACKUP}").as_str(),
));
let dst_path = file_path.join(Path::new(format!("{}{}{}", path, SLASH_SEPARATOR, STORAGE_FORMAT_FILE).as_str()));
let dst_path = file_path.join(Path::new(format!("{path}{SLASH_SEPARATOR}{STORAGE_FORMAT_FILE}").as_str()));
return rename_all(src_path, dst_path, file_path).await;
}
}
@@ -2250,7 +2250,7 @@ impl DiskAPI for LocalDisk {
let disk = disk_clone.clone();
let vcfg = vcfg.clone();
Box::pin(async move {
if !item.path.ends_with(&format!("{}{}", SLASH_SEPARATOR, STORAGE_FORMAT_FILE)) {
if !item.path.ends_with(&format!("{SLASH_SEPARATOR}{STORAGE_FORMAT_FILE}")) {
return Err(Error::other(ERR_SKIP_FILE).into());
}
let stop_fn = ScannerMetrics::log(ScannerMetric::ScanObject);

View File

@@ -189,7 +189,7 @@ fn get_all_sets<T: AsRef<str>>(set_drive_count: usize, is_ellipses: bool, args:
for args in set_args.iter() {
for arg in args {
if unique_args.contains(arg) {
return Err(Error::other(format!("Input args {} has duplicate ellipses", arg)));
return Err(Error::other(format!("Input args {arg} has duplicate ellipses")));
}
unique_args.insert(arg);
}
@@ -383,7 +383,7 @@ fn get_set_indexes<T: AsRef<str>>(
for &size in total_sizes {
// Check if total_sizes has minimum range upto set_size
if size < SET_SIZES[0] || size < set_drive_count {
return Err(Error::other(format!("Incorrect number of endpoints provided, size {}", size)));
return Err(Error::other(format!("Incorrect number of endpoints provided, size {size}")));
}
}
@@ -655,9 +655,9 @@ mod test {
let mut seq = Vec::new();
for i in start..=number {
if padding_len == 0 {
seq.push(format!("{}", i));
seq.push(format!("{i}"));
} else {
seq.push(format!("{:0width$}", i, width = padding_len));
seq.push(format!("{i:0padding_len$}"));
}
}
seq

View File

@@ -228,7 +228,7 @@ impl PoolEndpointList {
let host = ep.url.host().unwrap();
let host_ip_set = host_ip_cache.entry(host.clone()).or_insert({
get_host_ip(host.clone()).map_err(|e| Error::other(format!("host '{}' cannot resolve: {}", host, e)))?
get_host_ip(host.clone()).map_err(|e| Error::other(format!("host '{host}' cannot resolve: {e}")))?
});
let path = ep.get_file_path();
@@ -236,8 +236,7 @@ impl PoolEndpointList {
Entry::Occupied(mut e) => {
if e.get().intersection(host_ip_set).count() > 0 {
return Err(Error::other(format!(
"same path '{}' can not be served by different port on same address",
path
"same path '{path}' can not be served by different port on same address"
)));
}
e.get_mut().extend(host_ip_set.iter());
@@ -258,8 +257,7 @@ impl PoolEndpointList {
let path = ep.get_file_path();
if local_path_set.contains(path) {
return Err(Error::other(format!(
"path '{}' cannot be served by different address on same server",
path
"path '{path}' cannot be served by different address on same server"
)));
}
local_path_set.insert(path);
@@ -751,67 +749,67 @@ mod test {
}
let non_loop_back_ip = non_loop_back_i_ps[0];
let case1_endpoint1 = format!("http://{}/d1", non_loop_back_ip);
let case1_endpoint2 = format!("http://{}/d2", non_loop_back_ip);
let case1_endpoint1 = format!("http://{non_loop_back_ip}/d1");
let case1_endpoint2 = format!("http://{non_loop_back_ip}/d2");
let args = vec![
format!("http://{}:10000/d1", non_loop_back_ip),
format!("http://{}:10000/d2", non_loop_back_ip),
"http://example.org:10000/d3".to_string(),
"http://example.com:10000/d4".to_string(),
];
let (case1_ur_ls, case1_local_flags) = get_expected_endpoints(args, format!("http://{}:10000/", non_loop_back_ip));
let (case1_ur_ls, case1_local_flags) = get_expected_endpoints(args, format!("http://{non_loop_back_ip}:10000/"));
let case2_endpoint1 = format!("http://{}/d1", non_loop_back_ip);
let case2_endpoint2 = format!("http://{}:9000/d2", non_loop_back_ip);
let case2_endpoint1 = format!("http://{non_loop_back_ip}/d1");
let case2_endpoint2 = format!("http://{non_loop_back_ip}:9000/d2");
let args = vec![
format!("http://{}:10000/d1", non_loop_back_ip),
format!("http://{}:9000/d2", non_loop_back_ip),
"http://example.org:10000/d3".to_string(),
"http://example.com:10000/d4".to_string(),
];
let (case2_ur_ls, case2_local_flags) = get_expected_endpoints(args, format!("http://{}:10000/", non_loop_back_ip));
let (case2_ur_ls, case2_local_flags) = get_expected_endpoints(args, format!("http://{non_loop_back_ip}:10000/"));
let case3_endpoint1 = format!("http://{}/d1", non_loop_back_ip);
let case3_endpoint1 = format!("http://{non_loop_back_ip}/d1");
let args = vec![
format!("http://{}:80/d1", non_loop_back_ip),
"http://example.org:9000/d2".to_string(),
"http://example.com:80/d3".to_string(),
"http://example.net:80/d4".to_string(),
];
let (case3_ur_ls, case3_local_flags) = get_expected_endpoints(args, format!("http://{}:80/", non_loop_back_ip));
let (case3_ur_ls, case3_local_flags) = get_expected_endpoints(args, format!("http://{non_loop_back_ip}:80/"));
let case4_endpoint1 = format!("http://{}/d1", non_loop_back_ip);
let case4_endpoint1 = format!("http://{non_loop_back_ip}/d1");
let args = vec![
format!("http://{}:9000/d1", non_loop_back_ip),
"http://example.org:9000/d2".to_string(),
"http://example.com:9000/d3".to_string(),
"http://example.net:9000/d4".to_string(),
];
let (case4_ur_ls, case4_local_flags) = get_expected_endpoints(args, format!("http://{}:9000/", non_loop_back_ip));
let (case4_ur_ls, case4_local_flags) = get_expected_endpoints(args, format!("http://{non_loop_back_ip}:9000/"));
let case5_endpoint1 = format!("http://{}:9000/d1", non_loop_back_ip);
let case5_endpoint2 = format!("http://{}:9001/d2", non_loop_back_ip);
let case5_endpoint3 = format!("http://{}:9002/d3", non_loop_back_ip);
let case5_endpoint4 = format!("http://{}:9003/d4", non_loop_back_ip);
let case5_endpoint1 = format!("http://{non_loop_back_ip}:9000/d1");
let case5_endpoint2 = format!("http://{non_loop_back_ip}:9001/d2");
let case5_endpoint3 = format!("http://{non_loop_back_ip}:9002/d3");
let case5_endpoint4 = format!("http://{non_loop_back_ip}:9003/d4");
let args = vec![
case5_endpoint1.clone(),
case5_endpoint2.clone(),
case5_endpoint3.clone(),
case5_endpoint4.clone(),
];
let (case5_ur_ls, case5_local_flags) = get_expected_endpoints(args, format!("http://{}:9000/", non_loop_back_ip));
let (case5_ur_ls, case5_local_flags) = get_expected_endpoints(args, format!("http://{non_loop_back_ip}:9000/"));
let case6_endpoint1 = format!("http://{}:9003/d4", non_loop_back_ip);
let case6_endpoint1 = format!("http://{non_loop_back_ip}:9003/d4");
let args = vec![
"http://localhost:9000/d1".to_string(),
"http://localhost:9001/d2".to_string(),
"http://127.0.0.1:9002/d3".to_string(),
case6_endpoint1.clone(),
];
let (case6_ur_ls, case6_local_flags) = get_expected_endpoints(args, format!("http://{}:9003/", non_loop_back_ip));
let (case6_ur_ls, case6_local_flags) = get_expected_endpoints(args, format!("http://{non_loop_back_ip}:9003/"));
let case7_endpoint1 = format!("http://{}:9001/export", non_loop_back_ip);
let case7_endpoint2 = format!("http://{}:9000/export", non_loop_back_ip);
let case7_endpoint1 = format!("http://{non_loop_back_ip}:9001/export");
let case7_endpoint2 = format!("http://{non_loop_back_ip}:9000/export");
let test_cases = [
TestCase {

View File

@@ -128,7 +128,7 @@ impl Erasure {
total += n;
let res = self.encode_data(&buf[..n])?;
if let Err(err) = tx.send(res).await {
return Err(std::io::Error::other(format!("Failed to send encoded data : {}", err)));
return Err(std::io::Error::other(format!("Failed to send encoded data : {err}")));
}
}
Ok(_) => break,

View File

@@ -98,7 +98,7 @@ impl ReedSolomonEncoder {
warn!("Failed to reset SIMD encoder: {:?}, creating new one", e);
// 如果reset失败创建新的encoder
reed_solomon_simd::ReedSolomonEncoder::new(self.data_shards, self.parity_shards, shard_len)
.map_err(|e| io::Error::other(format!("Failed to create SIMD encoder: {:?}", e)))?
.map_err(|e| io::Error::other(format!("Failed to create SIMD encoder: {e:?}")))?
} else {
cached_encoder
}
@@ -106,7 +106,7 @@ impl ReedSolomonEncoder {
None => {
// 第一次使用创建新encoder
reed_solomon_simd::ReedSolomonEncoder::new(self.data_shards, self.parity_shards, shard_len)
.map_err(|e| io::Error::other(format!("Failed to create SIMD encoder: {:?}", e)))?
.map_err(|e| io::Error::other(format!("Failed to create SIMD encoder: {e:?}")))?
}
}
};
@@ -115,13 +115,13 @@ impl ReedSolomonEncoder {
for (i, shard) in shards_vec.iter().enumerate().take(self.data_shards) {
encoder
.add_original_shard(shard)
.map_err(|e| io::Error::other(format!("Failed to add shard {}: {:?}", i, e)))?;
.map_err(|e| io::Error::other(format!("Failed to add shard {i}: {e:?}")))?;
}
// 编码并获取恢复shards
let result = encoder
.encode()
.map_err(|e| io::Error::other(format!("SIMD encoding failed: {:?}", e)))?;
.map_err(|e| io::Error::other(format!("SIMD encoding failed: {e:?}")))?;
// 将恢复shards复制到输出缓冲区
for (i, recovery_shard) in result.recovery_iter().enumerate() {
@@ -176,7 +176,7 @@ impl ReedSolomonEncoder {
warn!("Failed to reset SIMD decoder: {:?}, creating new one", e);
// 如果reset失败创建新的decoder
reed_solomon_simd::ReedSolomonDecoder::new(self.data_shards, self.parity_shards, shard_len)
.map_err(|e| io::Error::other(format!("Failed to create SIMD decoder: {:?}", e)))?
.map_err(|e| io::Error::other(format!("Failed to create SIMD decoder: {e:?}")))?
} else {
cached_decoder
}
@@ -184,7 +184,7 @@ impl ReedSolomonEncoder {
None => {
// 第一次使用创建新decoder
reed_solomon_simd::ReedSolomonDecoder::new(self.data_shards, self.parity_shards, shard_len)
.map_err(|e| io::Error::other(format!("Failed to create SIMD decoder: {:?}", e)))?
.map_err(|e| io::Error::other(format!("Failed to create SIMD decoder: {e:?}")))?
}
}
};
@@ -195,19 +195,19 @@ impl ReedSolomonEncoder {
if i < self.data_shards {
decoder
.add_original_shard(i, shard)
.map_err(|e| io::Error::other(format!("Failed to add original shard for reconstruction: {:?}", e)))?;
.map_err(|e| io::Error::other(format!("Failed to add original shard for reconstruction: {e:?}")))?;
} else {
let recovery_idx = i - self.data_shards;
decoder
.add_recovery_shard(recovery_idx, shard)
.map_err(|e| io::Error::other(format!("Failed to add recovery shard for reconstruction: {:?}", e)))?;
.map_err(|e| io::Error::other(format!("Failed to add recovery shard for reconstruction: {e:?}")))?;
}
}
}
let result = decoder
.decode()
.map_err(|e| io::Error::other(format!("SIMD decode error: {:?}", e)))?;
.map_err(|e| io::Error::other(format!("SIMD decode error: {e:?}")))?;
// Fill in missing data shards from reconstruction result
for (i, shard_opt) in shards.iter_mut().enumerate() {
@@ -596,11 +596,11 @@ mod tests {
fn test_shard_file_offset() {
let erasure = Erasure::new(8, 8, 1024 * 1024);
let offset = erasure.shard_file_offset(0, 86, 86);
println!("offset={}", offset);
println!("offset={offset}");
assert!(offset > 0);
let total_length = erasure.shard_file_size(86);
println!("total_length={}", total_length);
println!("total_length={total_length}");
assert!(total_length > 0);
}
@@ -746,7 +746,7 @@ mod tests {
// Verify that all data shards are zeros
for (i, shard) in encoded_shards.iter().enumerate().take(data_shards) {
assert!(shard.iter().all(|&x| x == 0), "Data shard {} should be all zeros", i);
assert!(shard.iter().all(|&x| x == 0), "Data shard {i} should be all zeros");
}
// Test recovery with some shards missing
@@ -839,7 +839,7 @@ mod tests {
}
}
Err(e) => {
println!("SIMD encoding failed with small shard size: {}", e);
println!("SIMD encoding failed with small shard size: {e}");
// This is expected for very small shard sizes
}
}
@@ -909,19 +909,19 @@ mod tests {
recovered.extend_from_slice(shard.as_ref().unwrap());
}
recovered.truncate(small_data.len());
println!("recovered: {:?}", recovered);
println!("small_data: {:?}", small_data);
println!("recovered: {recovered:?}");
println!("small_data: {small_data:?}");
assert_eq!(&recovered, &small_data);
println!("✅ Data recovery successful with SIMD");
}
Err(e) => {
println!("❌ SIMD decode failed: {}", e);
println!("❌ SIMD decode failed: {e}");
// For very small data, decode failure might be acceptable
}
}
}
Err(e) => {
println!("❌ SIMD encode failed: {}", e);
println!("❌ SIMD encode failed: {e}");
// For very small data or configuration issues, encoding might fail
}
}
@@ -953,7 +953,7 @@ mod tests {
let shards = erasure.encode_data(&data).unwrap();
let encode_duration = start.elapsed();
println!("⏱️ Encoding completed in: {:?}", encode_duration);
println!("⏱️ Encoding completed in: {encode_duration:?}");
println!("📦 Generated {} shards, each shard size: {}KB", shards.len(), shards[0].len() / 1024);
assert_eq!(shards.len(), data_shards + parity_shards);
@@ -977,7 +977,7 @@ mod tests {
erasure.decode_data(&mut shards_opt).unwrap();
let decode_duration = start.elapsed();
println!("⏱️ Decoding completed in: {:?}", decode_duration);
println!("⏱️ Decoding completed in: {decode_duration:?}");
// 验证恢复的数据完整性
let mut recovered = Vec::new();

View File

@@ -39,7 +39,7 @@ impl super::Erasure {
let (mut shards, errs) = reader.read().await;
if errs.iter().filter(|e| e.is_none()).count() < self.data_shards {
return Err(Error::other(format!("can not reconstruct data: not enough data shards {:?}", errs)));
return Err(Error::other(format!("can not reconstruct data: not enough data shards {errs:?}")));
}
if self.parity_shards > 0 {

View File

@@ -806,7 +806,7 @@ pub fn error_resp_to_object_err(err: ErrorResponse, params: Vec<&str>) -> std::i
}
if is_network_or_host_down(&err.to_string(), false) {
return std::io::Error::other(ObjectApiError::BackendDown(format!("{}", err)));
return std::io::Error::other(ObjectApiError::BackendDown(format!("{err}")));
}
let r_err = err;
@@ -1118,7 +1118,7 @@ mod tests {
// For errors with parameters, we only check the variant type
assert_eq!(std::mem::discriminant(&original_error), std::mem::discriminant(&recovered_error));
} else {
panic!("Failed to recover error from code: {:#x}", code);
panic!("Failed to recover error from code: {code:#x}");
}
}
}
@@ -1211,7 +1211,7 @@ mod tests {
assert_eq!(inner_io.kind(), kind);
assert!(inner_io.to_string().contains(message));
}
_ => panic!("Expected StorageError::Io variant for kind: {:?}", kind),
_ => panic!("Expected StorageError::Io variant for kind: {kind:?}"),
}
}
}

View File

@@ -147,8 +147,7 @@ async fn heal_fresh_disk(endpoint: &Endpoint) -> Result<()> {
Some(disk) => disk,
None => {
return Err(Error::other(format!(
"Unexpected error disk must be initialized by now after formatting: {}",
endpoint
"Unexpected error disk must be initialized by now after formatting: {endpoint}"
)));
}
};

View File

@@ -487,7 +487,7 @@ impl CurrentScannerCycle {
Deserialize::deserialize(&mut Deserializer::new(&buf[..])).expect("Deserialization failed");
self.cycle_completed = u;
}
name => return Err(Error::other(format!("not support field name {}", name))),
name => return Err(Error::other(format!("not support field name {name}"))),
}
}

View File

@@ -372,7 +372,7 @@ impl ScannerMetrics {
for (disk, tracker) in paths.iter() {
let path = tracker.get_path().await;
result.push(format!("{}/{}", disk, path));
result.push(format!("{disk}/{path}"));
}
result

View File

@@ -318,7 +318,7 @@ impl HealSequence {
self.count_scanned(heal_type.clone()).await;
if source.no_wait {
let task_str = format!("{:?}", task);
let task_str = format!("{task:?}");
if GLOBAL_BackgroundHealRoutine.tasks_tx.try_send(task).is_ok() {
info!("Task in the queue: {:?}", task_str);
}
@@ -328,7 +328,7 @@ impl HealSequence {
let (resp_tx, mut resp_rx) = mpsc::channel(1);
task.resp_tx = Some(resp_tx);
let task_str = format!("{:?}", task);
let task_str = format!("{task:?}");
if GLOBAL_BackgroundHealRoutine.tasks_tx.try_send(task).is_ok() {
info!("Task in the queue: {:?}", task_str);
} else {
@@ -793,8 +793,7 @@ impl AllHealState {
for (k, v) in self.heal_seq_map.read().await.iter() {
if (has_prefix(k, path_s) || has_prefix(path_s, k)) && !v.has_ended().await {
return Err(Error::other(format!(
"The provided heal sequence path overlaps with an existing heal path: {}",
k
"The provided heal sequence path overlaps with an existing heal path: {k}"
)));
}
}

View File

@@ -121,11 +121,11 @@ impl PoolMeta {
};
let format = LittleEndian::read_u16(&data[0..2]);
if format != POOL_META_FORMAT {
return Err(Error::other(format!("PoolMeta: unknown format: {}", format)));
return Err(Error::other(format!("PoolMeta: unknown format: {format}")));
}
let version = LittleEndian::read_u16(&data[2..4]);
if version != POOL_META_VERSION {
return Err(Error::other(format!("PoolMeta: unknown version: {}", version)));
return Err(Error::other(format!("PoolMeta: unknown version: {version}")));
}
let mut buf = Deserializer::new(Cursor::new(&data[4..]));

View File

@@ -95,7 +95,7 @@ impl fmt::Display for RebalStatus {
RebalStatus::Stopped => "Stopped",
RebalStatus::Failed => "Failed",
};
write!(f, "{}", status)
write!(f, "{status}")
}
}
@@ -172,11 +172,11 @@ impl RebalanceMeta {
// Read header
match u16::from_le_bytes([data[0], data[1]]) {
REBAL_META_FMT => {}
fmt => return Err(Error::other(format!("rebalanceMeta load_with_opts: unknown format: {}", fmt))),
fmt => return Err(Error::other(format!("rebalanceMeta load_with_opts: unknown format: {fmt}"))),
}
match u16::from_le_bytes([data[2], data[3]]) {
REBAL_META_VER => {}
ver => return Err(Error::other(format!("rebalanceMeta load_with_opts: unknown version: {}", ver))),
ver => return Err(Error::other(format!("rebalanceMeta load_with_opts: unknown version: {ver}"))),
}
let meta: Self = rmp_serde::from_read(Cursor::new(&data[4..]))?;
@@ -586,16 +586,16 @@ impl ECStore {
let state = match result {
Ok(_) => {
info!("rebalance_buckets: completed");
msg = format!("Rebalance completed at {:?}", now);
msg = format!("Rebalance completed at {now:?}");
RebalStatus::Completed},
Err(err) => {
info!("rebalance_buckets: error: {:?}", err);
// TODO: check stop
if err.to_string().contains("canceled") {
msg = format!("Rebalance stopped at {:?}", now);
msg = format!("Rebalance stopped at {now:?}");
RebalStatus::Stopped
} else {
msg = format!("Rebalance stopped at {:?} with err {:?}", now, err);
msg = format!("Rebalance stopped at {now:?} with err {err:?}");
RebalStatus::Failed
}
}
@@ -616,7 +616,7 @@ impl ECStore {
}
_ = timer.tick() => {
let now = OffsetDateTime::now_utc();
msg = format!("Saving rebalance metadata at {:?}", now);
msg = format!("Saving rebalance metadata at {now:?}");
}
}

View File

@@ -34,7 +34,7 @@ fn generate_signature(secret: &str, url: &str, method: &Method, timestamp: i64)
let url = path_and_query.to_string();
let data = format!("{}|{}|{}", url, method, timestamp);
let data = format!("{url}|{method}|{timestamp}");
let mut mac = HmacSha256::new_from_slice(secret.as_bytes()).expect("HMAC can take key of any size");
mac.update(data.as_bytes());
let result = mac.finalize();
@@ -369,7 +369,7 @@ mod tests {
// Verify the signature should succeed
let result = verify_rpc_signature(url, &method, &headers);
assert!(result.is_ok(), "Round-trip test failed for {} {}", method, url);
assert!(result.is_ok(), "Round-trip test failed for {method} {url}");
}
}
}

View File

@@ -513,7 +513,7 @@ impl PeerS3Client for RemotePeerS3Client {
let options: String = serde_json::to_string(opts)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(HealBucketRequest {
bucket: bucket.to_string(),
options,
@@ -539,7 +539,7 @@ impl PeerS3Client for RemotePeerS3Client {
let options = serde_json::to_string(opts)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(ListBucketRequest { options });
let response = client.list_bucket(request).await?.into_inner();
if !response.success {
@@ -561,7 +561,7 @@ impl PeerS3Client for RemotePeerS3Client {
let options = serde_json::to_string(opts)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(MakeBucketRequest {
name: bucket.to_string(),
options,
@@ -583,7 +583,7 @@ impl PeerS3Client for RemotePeerS3Client {
let options = serde_json::to_string(opts)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(GetBucketInfoRequest {
bucket: bucket.to_string(),
options,
@@ -604,7 +604,7 @@ impl PeerS3Client for RemotePeerS3Client {
async fn delete_bucket(&self, bucket: &str, _opts: &DeleteBucketOptions) -> Result<()> {
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(DeleteBucketRequest {
bucket: bucket.to_string(),

View File

@@ -154,7 +154,7 @@ impl DiskAPI for RemoteDisk {
info!("make_volume");
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(MakeVolumeRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
@@ -174,7 +174,7 @@ impl DiskAPI for RemoteDisk {
info!("make_volumes");
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(MakeVolumesRequest {
disk: self.endpoint.to_string(),
volumes: volumes.iter().map(|s| (*s).to_string()).collect(),
@@ -194,7 +194,7 @@ impl DiskAPI for RemoteDisk {
info!("list_volumes");
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(ListVolumesRequest {
disk: self.endpoint.to_string(),
});
@@ -219,7 +219,7 @@ impl DiskAPI for RemoteDisk {
info!("stat_volume");
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(StatVolumeRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
@@ -241,7 +241,7 @@ impl DiskAPI for RemoteDisk {
info!("delete_volume {}/{}", self.endpoint.to_string(), volume);
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(DeleteVolumeRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
@@ -321,7 +321,7 @@ impl DiskAPI for RemoteDisk {
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(DeleteVersionRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
@@ -357,7 +357,7 @@ impl DiskAPI for RemoteDisk {
}
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(DeleteVersionsRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
@@ -391,7 +391,7 @@ impl DiskAPI for RemoteDisk {
let paths = paths.to_owned();
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(DeletePathsRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
@@ -413,7 +413,7 @@ impl DiskAPI for RemoteDisk {
let file_info = serde_json::to_string(&fi)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(WriteMetadataRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
@@ -438,7 +438,7 @@ impl DiskAPI for RemoteDisk {
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(UpdateMetadataRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
@@ -469,7 +469,7 @@ impl DiskAPI for RemoteDisk {
let opts = serde_json::to_string(opts)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(ReadVersionRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
@@ -494,7 +494,7 @@ impl DiskAPI for RemoteDisk {
info!("read_xl {}/{}/{}", self.endpoint.to_string(), volume, path);
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(ReadXlRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
@@ -526,7 +526,7 @@ impl DiskAPI for RemoteDisk {
let file_info = serde_json::to_string(&fi)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(RenameDataRequest {
disk: self.endpoint.to_string(),
src_volume: src_volume.to_string(),
@@ -552,7 +552,7 @@ impl DiskAPI for RemoteDisk {
info!("list_dir {}/{}", volume, _dir_path);
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(ListDirRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
@@ -687,7 +687,7 @@ impl DiskAPI for RemoteDisk {
info!("rename_file");
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(RenameFileRequest {
disk: self.endpoint.to_string(),
src_volume: src_volume.to_string(),
@@ -710,7 +710,7 @@ impl DiskAPI for RemoteDisk {
info!("rename_part {}/{}", src_volume, src_path);
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(RenamePartRequest {
disk: self.endpoint.to_string(),
src_volume: src_volume.to_string(),
@@ -735,7 +735,7 @@ impl DiskAPI for RemoteDisk {
let options = serde_json::to_string(&opt)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(DeleteRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
@@ -758,7 +758,7 @@ impl DiskAPI for RemoteDisk {
let file_info = serde_json::to_string(&fi)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(VerifyFileRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
@@ -783,7 +783,7 @@ impl DiskAPI for RemoteDisk {
let file_info = serde_json::to_string(&fi)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(CheckPartsRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
@@ -808,7 +808,7 @@ impl DiskAPI for RemoteDisk {
let read_multiple_req = serde_json::to_string(&req)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(ReadMultipleRequest {
disk: self.endpoint.to_string(),
read_multiple_req,
@@ -834,7 +834,7 @@ impl DiskAPI for RemoteDisk {
info!("write_all");
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(WriteAllRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
@@ -856,7 +856,7 @@ impl DiskAPI for RemoteDisk {
info!("read_all {}/{}", volume, path);
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(ReadAllRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
@@ -877,7 +877,7 @@ impl DiskAPI for RemoteDisk {
let opts = serde_json::to_string(&opts)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(DiskInfoRequest {
disk: self.endpoint.to_string(),
opts,
@@ -906,7 +906,7 @@ impl DiskAPI for RemoteDisk {
let cache = serde_json::to_string(cache)?;
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let (tx, rx) = mpsc::channel(10);
let in_stream = ReceiverStream::new(rx);
@@ -918,7 +918,7 @@ impl DiskAPI for RemoteDisk {
};
tx.send(request)
.await
.map_err(|err| Error::other(format!("can not send request, err: {}", err)))?;
.map_err(|err| Error::other(format!("can not send request, err: {err}")))?;
loop {
match response.next().await {

View File

@@ -121,7 +121,7 @@ impl Node for NodeService {
Err(err) => {
return Ok(tonic::Response::new(HealBucketResponse {
success: false,
error: Some(DiskError::other(format!("decode HealOpts failed: {}", err)).into()),
error: Some(DiskError::other(format!("decode HealOpts failed: {err}")).into()),
}));
}
};
@@ -149,7 +149,7 @@ impl Node for NodeService {
return Ok(tonic::Response::new(ListBucketResponse {
success: false,
bucket_infos: Vec::new(),
error: Some(DiskError::other(format!("decode BucketOptions failed: {}", err)).into()),
error: Some(DiskError::other(format!("decode BucketOptions failed: {err}")).into()),
}));
}
};
@@ -183,7 +183,7 @@ impl Node for NodeService {
Err(err) => {
return Ok(tonic::Response::new(MakeBucketResponse {
success: false,
error: Some(DiskError::other(format!("decode MakeBucketOptions failed: {}", err)).into()),
error: Some(DiskError::other(format!("decode MakeBucketOptions failed: {err}")).into()),
}));
}
};
@@ -209,7 +209,7 @@ impl Node for NodeService {
return Ok(tonic::Response::new(GetBucketInfoResponse {
success: false,
bucket_info: String::new(),
error: Some(DiskError::other(format!("decode BucketOptions failed: {}", err)).into()),
error: Some(DiskError::other(format!("decode BucketOptions failed: {err}")).into()),
}));
}
};
@@ -221,7 +221,7 @@ impl Node for NodeService {
return Ok(tonic::Response::new(GetBucketInfoResponse {
success: false,
bucket_info: String::new(),
error: Some(DiskError::other(format!("encode data failed: {}", err)).into()),
error: Some(DiskError::other(format!("encode data failed: {err}")).into()),
}));
}
};
@@ -323,7 +323,7 @@ impl Node for NodeService {
Err(err) => {
return Ok(tonic::Response::new(DeleteResponse {
success: false,
error: Some(DiskError::other(format!("decode DeleteOptions failed: {}", err)).into()),
error: Some(DiskError::other(format!("decode DeleteOptions failed: {err}")).into()),
}));
}
};
@@ -354,7 +354,7 @@ impl Node for NodeService {
return Ok(tonic::Response::new(VerifyFileResponse {
success: false,
check_parts_resp: "".to_string(),
error: Some(DiskError::other(format!("decode FileInfo failed: {}", err)).into()),
error: Some(DiskError::other(format!("decode FileInfo failed: {err}")).into()),
}));
}
};
@@ -366,7 +366,7 @@ impl Node for NodeService {
return Ok(tonic::Response::new(VerifyFileResponse {
success: false,
check_parts_resp: String::new(),
error: Some(DiskError::other(format!("encode data failed: {}", err)).into()),
error: Some(DiskError::other(format!("encode data failed: {err}")).into()),
}));
}
};
@@ -400,7 +400,7 @@ impl Node for NodeService {
return Ok(tonic::Response::new(CheckPartsResponse {
success: false,
check_parts_resp: "".to_string(),
error: Some(DiskError::other(format!("decode FileInfo failed: {}", err)).into()),
error: Some(DiskError::other(format!("decode FileInfo failed: {err}")).into()),
}));
}
};
@@ -412,7 +412,7 @@ impl Node for NodeService {
return Ok(tonic::Response::new(CheckPartsResponse {
success: false,
check_parts_resp: String::new(),
error: Some(DiskError::other(format!("encode data failed: {}", err)).into()),
error: Some(DiskError::other(format!("encode data failed: {err}")).into()),
}));
}
};
@@ -773,7 +773,7 @@ impl Node for NodeService {
let (rd, mut wr) = tokio::io::duplex(64);
let job1 = spawn(async move {
if let Err(err) = disk.walk_dir(opts, &mut wr).await {
println!("walk_dir err {:?}", err);
println!("walk_dir err {err:?}");
}
});
let job2 = spawn(async move {
@@ -829,7 +829,7 @@ impl Node for NodeService {
break;
}
println!("get err {:?}", err);
println!("get err {err:?}");
let _ = tx
.send(Ok(WalkDirResponse {
@@ -862,7 +862,7 @@ impl Node for NodeService {
return Ok(tonic::Response::new(RenameDataResponse {
success: false,
rename_data_resp: String::new(),
error: Some(DiskError::other(format!("decode FileInfo failed: {}", err)).into()),
error: Some(DiskError::other(format!("decode FileInfo failed: {err}")).into()),
}));
}
};
@@ -877,7 +877,7 @@ impl Node for NodeService {
return Ok(tonic::Response::new(RenameDataResponse {
success: false,
rename_data_resp: String::new(),
error: Some(DiskError::other(format!("encode data failed: {}", err)).into()),
error: Some(DiskError::other(format!("encode data failed: {err}")).into()),
}));
}
};
@@ -987,7 +987,7 @@ impl Node for NodeService {
Err(err) => Ok(tonic::Response::new(StatVolumeResponse {
success: false,
volume_info: String::new(),
error: Some(DiskError::other(format!("encode data failed: {}", err)).into()),
error: Some(DiskError::other(format!("encode data failed: {err}")).into()),
})),
},
Err(err) => Ok(tonic::Response::new(StatVolumeResponse {
@@ -1034,7 +1034,7 @@ impl Node for NodeService {
Err(err) => {
return Ok(tonic::Response::new(UpdateMetadataResponse {
success: false,
error: Some(DiskError::other(format!("decode FileInfo failed: {}", err)).into()),
error: Some(DiskError::other(format!("decode FileInfo failed: {err}")).into()),
}));
}
};
@@ -1043,7 +1043,7 @@ impl Node for NodeService {
Err(err) => {
return Ok(tonic::Response::new(UpdateMetadataResponse {
success: false,
error: Some(DiskError::other(format!("decode UpdateMetadataOpts failed: {}", err)).into()),
error: Some(DiskError::other(format!("decode UpdateMetadataOpts failed: {err}")).into()),
}));
}
};
@@ -1074,7 +1074,7 @@ impl Node for NodeService {
Err(err) => {
return Ok(tonic::Response::new(WriteMetadataResponse {
success: false,
error: Some(DiskError::other(format!("decode FileInfo failed: {}", err)).into()),
error: Some(DiskError::other(format!("decode FileInfo failed: {err}")).into()),
}));
}
};
@@ -1105,7 +1105,7 @@ impl Node for NodeService {
return Ok(tonic::Response::new(ReadVersionResponse {
success: false,
file_info: String::new(),
error: Some(DiskError::other(format!("decode ReadOptions failed: {}", err)).into()),
error: Some(DiskError::other(format!("decode ReadOptions failed: {err}")).into()),
}));
}
};
@@ -1122,7 +1122,7 @@ impl Node for NodeService {
Err(err) => Ok(tonic::Response::new(ReadVersionResponse {
success: false,
file_info: String::new(),
error: Some(DiskError::other(format!("encode data failed: {}", err)).into()),
error: Some(DiskError::other(format!("encode data failed: {err}")).into()),
})),
},
Err(err) => Ok(tonic::Response::new(ReadVersionResponse {
@@ -1153,7 +1153,7 @@ impl Node for NodeService {
Err(err) => Ok(tonic::Response::new(ReadXlResponse {
success: false,
raw_file_info: String::new(),
error: Some(DiskError::other(format!("encode data failed: {}", err)).into()),
error: Some(DiskError::other(format!("encode data failed: {err}")).into()),
})),
},
Err(err) => Ok(tonic::Response::new(ReadXlResponse {
@@ -1180,7 +1180,7 @@ impl Node for NodeService {
return Ok(tonic::Response::new(DeleteVersionResponse {
success: false,
raw_file_info: "".to_string(),
error: Some(DiskError::other(format!("decode FileInfo failed: {}", err)).into()),
error: Some(DiskError::other(format!("decode FileInfo failed: {err}")).into()),
}));
}
};
@@ -1190,7 +1190,7 @@ impl Node for NodeService {
return Ok(tonic::Response::new(DeleteVersionResponse {
success: false,
raw_file_info: "".to_string(),
error: Some(DiskError::other(format!("decode DeleteOptions failed: {}", err)).into()),
error: Some(DiskError::other(format!("decode DeleteOptions failed: {err}")).into()),
}));
}
};
@@ -1207,7 +1207,7 @@ impl Node for NodeService {
Err(err) => Ok(tonic::Response::new(DeleteVersionResponse {
success: false,
raw_file_info: "".to_string(),
error: Some(DiskError::other(format!("encode data failed: {}", err)).into()),
error: Some(DiskError::other(format!("encode data failed: {err}")).into()),
})),
},
Err(err) => Ok(tonic::Response::new(DeleteVersionResponse {
@@ -1236,7 +1236,7 @@ impl Node for NodeService {
return Ok(tonic::Response::new(DeleteVersionsResponse {
success: false,
errors: Vec::new(),
error: Some(DiskError::other(format!("decode FileInfoVersions failed: {}", err)).into()),
error: Some(DiskError::other(format!("decode FileInfoVersions failed: {err}")).into()),
}));
}
};
@@ -1247,7 +1247,7 @@ impl Node for NodeService {
return Ok(tonic::Response::new(DeleteVersionsResponse {
success: false,
errors: Vec::new(),
error: Some(DiskError::other(format!("decode DeleteOptions failed: {}", err)).into()),
error: Some(DiskError::other(format!("decode DeleteOptions failed: {err}")).into()),
}));
}
};
@@ -1291,7 +1291,7 @@ impl Node for NodeService {
return Ok(tonic::Response::new(ReadMultipleResponse {
success: false,
read_multiple_resps: Vec::new(),
error: Some(DiskError::other(format!("decode ReadMultipleReq failed: {}", err)).into()),
error: Some(DiskError::other(format!("decode ReadMultipleReq failed: {err}")).into()),
}));
}
};
@@ -1353,7 +1353,7 @@ impl Node for NodeService {
return Ok(tonic::Response::new(DiskInfoResponse {
success: false,
disk_info: "".to_string(),
error: Some(DiskError::other(format!("decode DiskInfoOptions failed: {}", err)).into()),
error: Some(DiskError::other(format!("decode DiskInfoOptions failed: {err}")).into()),
}));
}
};
@@ -1367,7 +1367,7 @@ impl Node for NodeService {
Err(err) => Ok(tonic::Response::new(DiskInfoResponse {
success: false,
disk_info: "".to_string(),
error: Some(DiskError::other(format!("encode data failed: {}", err)).into()),
error: Some(DiskError::other(format!("encode data failed: {err}")).into()),
})),
},
Err(err) => Ok(tonic::Response::new(DiskInfoResponse {
@@ -1403,7 +1403,7 @@ impl Node for NodeService {
success: false,
update: "".to_string(),
data_usage_cache: "".to_string(),
error: Some(DiskError::other(format!("decode DataUsageCache failed: {}", err)).into()),
error: Some(DiskError::other(format!("decode DataUsageCache failed: {err}")).into()),
}))
.await
.expect("working rx");
@@ -1485,12 +1485,12 @@ impl Node for NodeService {
})),
Err(err) => Ok(tonic::Response::new(GenerallyLockResponse {
success: false,
error_info: Some(format!("can not lock, args: {}, err: {}", args, err)),
error_info: Some(format!("can not lock, args: {args}, err: {err}")),
})),
},
Err(err) => Ok(tonic::Response::new(GenerallyLockResponse {
success: false,
error_info: Some(format!("can not decode args, err: {}", err)),
error_info: Some(format!("can not decode args, err: {err}")),
})),
}
}
@@ -1505,12 +1505,12 @@ impl Node for NodeService {
})),
Err(err) => Ok(tonic::Response::new(GenerallyLockResponse {
success: false,
error_info: Some(format!("can not unlock, args: {}, err: {}", args, err)),
error_info: Some(format!("can not unlock, args: {args}, err: {err}")),
})),
},
Err(err) => Ok(tonic::Response::new(GenerallyLockResponse {
success: false,
error_info: Some(format!("can not decode args, err: {}", err)),
error_info: Some(format!("can not decode args, err: {err}")),
})),
}
}
@@ -1525,12 +1525,12 @@ impl Node for NodeService {
})),
Err(err) => Ok(tonic::Response::new(GenerallyLockResponse {
success: false,
error_info: Some(format!("can not rlock, args: {}, err: {}", args, err)),
error_info: Some(format!("can not rlock, args: {args}, err: {err}")),
})),
},
Err(err) => Ok(tonic::Response::new(GenerallyLockResponse {
success: false,
error_info: Some(format!("can not decode args, err: {}", err)),
error_info: Some(format!("can not decode args, err: {err}")),
})),
}
}
@@ -1545,12 +1545,12 @@ impl Node for NodeService {
})),
Err(err) => Ok(tonic::Response::new(GenerallyLockResponse {
success: false,
error_info: Some(format!("can not runlock, args: {}, err: {}", args, err)),
error_info: Some(format!("can not runlock, args: {args}, err: {err}")),
})),
},
Err(err) => Ok(tonic::Response::new(GenerallyLockResponse {
success: false,
error_info: Some(format!("can not decode args, err: {}", err)),
error_info: Some(format!("can not decode args, err: {err}")),
})),
}
}
@@ -1565,12 +1565,12 @@ impl Node for NodeService {
})),
Err(err) => Ok(tonic::Response::new(GenerallyLockResponse {
success: false,
error_info: Some(format!("can not force_unlock, args: {}, err: {}", args, err)),
error_info: Some(format!("can not force_unlock, args: {args}, err: {err}")),
})),
},
Err(err) => Ok(tonic::Response::new(GenerallyLockResponse {
success: false,
error_info: Some(format!("can not decode args, err: {}", err)),
error_info: Some(format!("can not decode args, err: {err}")),
})),
}
}
@@ -1585,12 +1585,12 @@ impl Node for NodeService {
})),
Err(err) => Ok(tonic::Response::new(GenerallyLockResponse {
success: false,
error_info: Some(format!("can not refresh, args: {}, err: {}", args, err)),
error_info: Some(format!("can not refresh, args: {args}, err: {err}")),
})),
},
Err(err) => Ok(tonic::Response::new(GenerallyLockResponse {
success: false,
error_info: Some(format!("can not decode args, err: {}", err)),
error_info: Some(format!("can not decode args, err: {err}")),
})),
}
}
@@ -3595,7 +3595,7 @@ mod tests {
#[test]
fn test_node_service_debug() {
let service = create_test_node_service();
let debug_str = format!("{:?}", service);
let debug_str = format!("{service:?}");
assert!(debug_str.contains("NodeService"));
}
@@ -3605,8 +3605,8 @@ mod tests {
let service2 = make_server();
// Both services should be created successfully
assert!(format!("{:?}", service1).contains("NodeService"));
assert!(format!("{:?}", service2).contains("NodeService"));
assert!(format!("{service1:?}").contains("NodeService"));
assert!(format!("{service2:?}").contains("NodeService"));
}
#[tokio::test]

View File

@@ -467,7 +467,7 @@ impl SetDisks {
data_dir: &str,
write_quorum: usize,
) -> disk::error::Result<()> {
let file_path = Arc::new(format!("{}/{}", object, data_dir));
let file_path = Arc::new(format!("{object}/{data_dir}"));
let bucket = Arc::new(bucket.to_string());
let futures = disks.iter().map(|disk| {
let file_path = file_path.clone();
@@ -585,7 +585,7 @@ impl SetDisks {
if let Some(err) = reduce_write_quorum_errs(&errs, OBJECT_OP_IGNORED_ERRS, write_quorum) {
warn!("rename_part errs {:?}", &errs);
Self::cleanup_multipart_path(disks, &[dst_object.to_string(), format!("{}.meta", dst_object)]).await;
Self::cleanup_multipart_path(disks, &[dst_object.to_string(), format!("{dst_object}.meta")]).await;
return Err(err);
}
@@ -723,7 +723,7 @@ impl SetDisks {
}
fn get_multipart_sha_dir(bucket: &str, object: &str) -> String {
let path = format!("{}/{}", bucket, object);
let path = format!("{bucket}/{object}");
let mut hasher = Sha256::new();
hasher.update(path);
hex(hasher.finalize())
@@ -1966,7 +1966,7 @@ impl SetDisks {
return Err(to_object_err(read_err.into(), vec![bucket, object]));
}
error!("create_bitrot_reader not enough disks to read: {:?}", &errors);
return Err(Error::other(format!("not enough disks to read: {:?}", errors)));
return Err(Error::other(format!("not enough disks to read: {errors:?}")));
}
// debug!(
@@ -2161,7 +2161,7 @@ impl SetDisks {
_ = list_path_raw(rx, lopts)
.await
.map_err(|err| Error::other(format!("listPathRaw returned {}: bucket: {}, path: {}", err, bucket, path)));
.map_err(|err| Error::other(format!("listPathRaw returned {err}: bucket: {bucket}, path: {path}")));
Ok(())
}
@@ -2677,8 +2677,7 @@ impl SetDisks {
return Ok((
result,
Some(DiskError::other(format!(
"all drives had write errors, unable to heal {}/{}",
bucket, object
"all drives had write errors, unable to heal {bucket}/{object}"
))),
));
}
@@ -2955,7 +2954,7 @@ impl SetDisks {
tags.insert("set", self.set_index.to_string());
tags.insert("pool", self.pool_index.to_string());
tags.insert("merrs", join_errs(errs));
tags.insert("derrs", format!("{:?}", data_errs_by_part));
tags.insert("derrs", format!("{data_errs_by_part:?}"));
if m.is_valid() {
tags.insert("sz", m.size.to_string());
tags.insert(
@@ -3268,7 +3267,7 @@ impl SetDisks {
Ok(info) => info,
Err(err) => {
defer.await;
return Err(Error::other(format!("unable to get disk information before healing it: {}", err)));
return Err(Error::other(format!("unable to get disk information before healing it: {err}")));
}
};
let num_cores = num_cpus::get(); // 使用 num_cpus crate 获取核心数
@@ -4010,7 +4009,7 @@ impl ObjectIO for SetDisks {
return Err(to_object_err(write_err.into(), vec![bucket, object]));
}
return Err(Error::other(format!("not enough disks to write: {:?}", errors)));
return Err(Error::other(format!("not enough disks to write: {errors:?}")));
}
let stream = mem::replace(
@@ -4035,8 +4034,8 @@ impl ObjectIO for SetDisks {
return Err(Error::other("put_object write size < data.size()"));
}
if user_defined.contains_key(&format!("{}compression", RESERVED_METADATA_PREFIX_LOWER)) {
user_defined.insert(format!("{}compression-size", RESERVED_METADATA_PREFIX_LOWER), w_size.to_string());
if user_defined.contains_key(&format!("{RESERVED_METADATA_PREFIX_LOWER}compression")) {
user_defined.insert(format!("{RESERVED_METADATA_PREFIX_LOWER}compression-size"), w_size.to_string());
}
let index_op = data.stream.try_get_index().map(|v| v.clone().into_vec());
@@ -4879,9 +4878,9 @@ impl StorageAPI for SetDisks {
let disks = disks.clone();
let shuffle_disks = Self::shuffle_disks(&disks, &fi.erasure.distribution);
let part_suffix = format!("part.{}", part_id);
let part_suffix = format!("part.{part_id}");
let tmp_part = format!("{}x{}", Uuid::new_v4(), OffsetDateTime::now_utc().unix_timestamp());
let tmp_part_path = Arc::new(format!("{}/{}", tmp_part, part_suffix));
let tmp_part_path = Arc::new(format!("{tmp_part}/{part_suffix}"));
// let mut writers = Vec::with_capacity(disks.len());
// let erasure = Erasure::new(fi.erasure.data_blocks, fi.erasure.parity_blocks, fi.erasure.block_size);
@@ -4979,7 +4978,7 @@ impl StorageAPI for SetDisks {
return Err(to_object_err(write_err.into(), vec![bucket, object]));
}
return Err(Error::other(format!("not enough disks to write: {:?}", errors)));
return Err(Error::other(format!("not enough disks to write: {errors:?}")));
}
let stream = mem::replace(
@@ -5473,11 +5472,11 @@ impl StorageAPI for SetDisks {
fi.metadata.insert("etag".to_owned(), etag);
fi.metadata
.insert(format!("{}actual-size", RESERVED_METADATA_PREFIX_LOWER), object_actual_size.to_string());
.insert(format!("{RESERVED_METADATA_PREFIX_LOWER}actual-size"), object_actual_size.to_string());
if fi.is_compressed() {
fi.metadata
.insert(format!("{}compression-size", RESERVED_METADATA_PREFIX_LOWER), object_size.to_string());
.insert(format!("{RESERVED_METADATA_PREFIX_LOWER}compression-size"), object_size.to_string());
}
if opts.data_movement {

View File

@@ -1446,7 +1446,7 @@ impl StorageAPI for ECStore {
// TODO: replication opts.srdelete_op
// 删除 meta
self.delete_all(RUSTFS_META_BUCKET, format!("{}/{}", BUCKET_META_PREFIX, bucket).as_str())
self.delete_all(RUSTFS_META_BUCKET, format!("{BUCKET_META_PREFIX}/{bucket}").as_str())
.await?;
Ok(())
}
@@ -2096,7 +2096,7 @@ impl StorageAPI for ECStore {
if pool_idx < self.pools.len() && set_idx < self.pools[pool_idx].disk_set.len() {
self.pools[pool_idx].disk_set[set_idx].get_disks(0, 0).await
} else {
Err(Error::other(format!("pool idx {}, set idx {}, not found", pool_idx, set_idx)))
Err(Error::other(format!("pool idx {pool_idx}, set idx {set_idx}, not found")))
}
}
@@ -2458,11 +2458,11 @@ async fn init_local_peer(endpoint_pools: &EndpointServerPools, host: &String, po
if peer_set.is_empty() {
if !host.is_empty() {
*GLOBAL_Local_Node_Name.write().await = format!("{}:{}", host, port);
*GLOBAL_Local_Node_Name.write().await = format!("{host}:{port}");
return;
}
*GLOBAL_Local_Node_Name.write().await = format!("127.0.0.1:{}", port);
*GLOBAL_Local_Node_Name.write().await = format!("127.0.0.1:{port}");
return;
}
@@ -2599,7 +2599,7 @@ fn check_new_multipart_args(bucket: &str, object: &str) -> Result<()> {
fn check_multipart_object_args(bucket: &str, object: &str, upload_id: &str) -> Result<()> {
if let Err(e) = base64_decode(upload_id.as_bytes()) {
return Err(StorageError::MalformedUploadID(format!("{}/{}-{},err:{}", bucket, object, upload_id, e)));
return Err(StorageError::MalformedUploadID(format!("{bucket}/{object}-{upload_id},err:{e}")));
};
check_object_args(bucket, object)
}

View File

@@ -131,7 +131,7 @@ impl GetObjectReader {
let actual_size = if actual_size > 0 {
actual_size as usize
} else {
return Err(Error::other(format!("invalid decompressed size {}", actual_size)));
return Err(Error::other(format!("invalid decompressed size {actual_size}")));
};
let dec_reader = LimitReader::new(dec_reader, actual_size);
@@ -428,13 +428,13 @@ impl Clone for ObjectInfo {
impl ObjectInfo {
pub fn is_compressed(&self) -> bool {
self.user_defined
.contains_key(&format!("{}compression", RESERVED_METADATA_PREFIX_LOWER))
.contains_key(&format!("{RESERVED_METADATA_PREFIX_LOWER}compression"))
}
pub fn is_compressed_ok(&self) -> Result<(CompressionAlgorithm, bool)> {
let scheme = self
.user_defined
.get(&format!("{}compression", RESERVED_METADATA_PREFIX_LOWER))
.get(&format!("{RESERVED_METADATA_PREFIX_LOWER}compression"))
.cloned();
if let Some(scheme) = scheme {
@@ -457,7 +457,7 @@ impl ObjectInfo {
if self.is_compressed() {
if let Some(size_str) = self
.user_defined
.get(&format!("{}actual-size", RESERVED_METADATA_PREFIX_LOWER))
.get(&format!("{RESERVED_METADATA_PREFIX_LOWER}actual-size"))
{
if !size_str.is_empty() {
// Todo: deal with error

View File

@@ -130,7 +130,7 @@ impl ListPathOptions {
pub fn parse_marker(&mut self) {
if let Some(marker) = &self.marker {
let s = marker.clone();
if !s.contains(format!("[rustfs_cache:{}", MARKER_TAG_VERSION).as_str()) {
if !s.contains(format!("[rustfs_cache:{MARKER_TAG_VERSION}").as_str()) {
return;
}
@@ -188,7 +188,7 @@ impl ListPathOptions {
self.pool_idx.unwrap_or_default(),
)
} else {
format!("{}[rustfs_cache:{},return:]", marker, MARKER_TAG_VERSION)
format!("{marker}[rustfs_cache:{MARKER_TAG_VERSION},return:]")
}
}
}

View File

@@ -119,7 +119,7 @@ impl Clone for Error {
match self {
Error::PolicyError(e) => Error::StringError(e.to_string()), // Convert to string since PolicyError may not be cloneable
Error::StringError(s) => Error::StringError(s.clone()),
Error::CryptoError(e) => Error::StringError(format!("crypto: {}", e)), // Convert to string
Error::CryptoError(e) => Error::StringError(format!("crypto: {e}")), // Convert to string
Error::NoSuchUser(s) => Error::NoSuchUser(s.clone()),
Error::NoSuchAccount(s) => Error::NoSuchAccount(s.clone()),
Error::NoSuchServiceAccount(s) => Error::NoSuchServiceAccount(s.clone()),
@@ -137,7 +137,7 @@ impl Clone for Error {
Error::InvalidSecretKeyLength => Error::InvalidSecretKeyLength,
Error::ContainsReservedChars => Error::ContainsReservedChars,
Error::GroupNameContainsReservedChars => Error::GroupNameContainsReservedChars,
Error::JWTError(e) => Error::StringError(format!("jwt err {}", e)), // Convert to string
Error::JWTError(e) => Error::StringError(format!("jwt err {e}")), // Convert to string
Error::NoAccessKey => Error::NoAccessKey,
Error::InvalidToken => Error::InvalidToken,
Error::InvalidAccessKey => Error::InvalidAccessKey,

View File

@@ -64,15 +64,15 @@ fn get_policy_doc_path(name: &str) -> String {
fn get_mapped_policy_path(name: &str, user_type: UserType, is_group: bool) -> String {
if is_group {
return path_join_buf(&[&IAM_CONFIG_POLICY_DB_GROUPS_PREFIX, format!("{}.json", name).as_str()]);
return path_join_buf(&[&IAM_CONFIG_POLICY_DB_GROUPS_PREFIX, format!("{name}.json").as_str()]);
}
match user_type {
UserType::Svc => path_join_buf(&[
&IAM_CONFIG_POLICY_DB_SERVICE_ACCOUNTS_PREFIX,
format!("{}.json", name).as_str(),
format!("{name}.json").as_str(),
]),
UserType::Sts => path_join_buf(&[&IAM_CONFIG_POLICY_DB_STS_USERS_PREFIX, format!("{}.json", name).as_str()]),
_ => path_join_buf(&[&IAM_CONFIG_POLICY_DB_USERS_PREFIX, format!("{}.json", name).as_str()]),
UserType::Sts => path_join_buf(&[&IAM_CONFIG_POLICY_DB_STS_USERS_PREFIX, format!("{name}.json").as_str()]),
_ => path_join_buf(&[&IAM_CONFIG_POLICY_DB_USERS_PREFIX, format!("{name}.json").as_str()]),
}
}
@@ -212,7 +212,7 @@ impl ObjectStore {
Ok(p) => Ok(p),
Err(err) => {
if !is_err_no_such_policy(&err) {
Err(Error::other(format!("load policy doc failed: {}", err)))
Err(Error::other(format!("load policy doc failed: {err}")))
} else {
Ok(PolicyDoc::default())
}
@@ -244,7 +244,7 @@ impl ObjectStore {
Ok(res) => Ok(res),
Err(err) => {
if !is_err_no_such_user(&err) {
Err(Error::other(format!("load user failed: {}", err)))
Err(Error::other(format!("load user failed: {err}")))
} else {
Ok(UserIdentity::default())
}
@@ -295,7 +295,7 @@ impl ObjectStore {
Ok(p) => Ok(p),
Err(err) => {
if !is_err_no_such_policy(&err) {
Err(Error::other(format!("load mapped policy failed: {}", err)))
Err(Error::other(format!("load mapped policy failed: {err}")))
} else {
Ok(MappedPolicy::default())
}
@@ -767,7 +767,7 @@ impl Store for ObjectStore {
let name = rustfs_utils::path::dir(item);
info!("load group: {}", name);
if let Err(err) = self.load_group(&name, &mut items_cache).await {
return Err(Error::other(format!("load group failed: {}", err)));
return Err(Error::other(format!("load group failed: {err}")));
};
}
@@ -828,7 +828,7 @@ impl Store for ObjectStore {
info!("load group policy: {}", name);
if let Err(err) = self.load_mapped_policy(name, UserType::Reg, true, &mut items_cache).await {
if !is_err_no_such_policy(&err) {
return Err(Error::other(format!("load group policy failed: {}", err)));
return Err(Error::other(format!("load group policy failed: {err}")));
}
};
}
@@ -847,7 +847,7 @@ impl Store for ObjectStore {
info!("load svc user: {}", name);
if let Err(err) = self.load_user(&name, UserType::Svc, &mut items_cache).await {
if !is_err_no_such_user(&err) {
return Err(Error::other(format!("load svc user failed: {}", err)));
return Err(Error::other(format!("load svc user failed: {err}")));
}
};
}
@@ -861,7 +861,7 @@ impl Store for ObjectStore {
.await
{
if !is_err_no_such_policy(&err) {
return Err(Error::other(format!("load_mapped_policy failed: {}", err)));
return Err(Error::other(format!("load_mapped_policy failed: {err}")));
}
}
}

View File

@@ -287,7 +287,7 @@ mod tests {
for invalid_token in &invalid_tokens {
let result = extract_claims::<Claims>(invalid_token, "secret");
assert!(result.is_err(), "Should fail with invalid token: {}", invalid_token);
assert!(result.is_err(), "Should fail with invalid token: {invalid_token}");
}
}

View File

@@ -667,12 +667,12 @@ mod tests {
let mem_info = MemInfo::default();
// Test that all structures can be formatted with Debug
assert!(!format!("{:?}", node).is_empty());
assert!(!format!("{:?}", cpu).is_empty());
assert!(!format!("{:?}", partition).is_empty());
assert!(!format!("{:?}", proc_info).is_empty());
assert!(!format!("{:?}", service).is_empty());
assert!(!format!("{:?}", mem_info).is_empty());
assert!(!format!("{node:?}").is_empty());
assert!(!format!("{cpu:?}").is_empty());
assert!(!format!("{partition:?}").is_empty());
assert!(!format!("{proc_info:?}").is_empty());
assert!(!format!("{service:?}").is_empty());
assert!(!format!("{mem_info:?}").is_empty());
}
#[test]

View File

@@ -1066,15 +1066,15 @@ mod tests {
let server_props = ServerProperties::default();
// Test that all structures can be formatted with Debug
assert!(!format!("{:?}", item_state).is_empty());
assert!(!format!("{:?}", disk_metrics).is_empty());
assert!(!format!("{:?}", disk).is_empty());
assert!(!format!("{:?}", healing_disk).is_empty());
assert!(!format!("{:?}", backend_byte).is_empty());
assert!(!format!("{:?}", storage_info).is_empty());
assert!(!format!("{:?}", backend_info).is_empty());
assert!(!format!("{:?}", mem_stats).is_empty());
assert!(!format!("{:?}", server_props).is_empty());
assert!(!format!("{item_state:?}").is_empty());
assert!(!format!("{disk_metrics:?}").is_empty());
assert!(!format!("{disk:?}").is_empty());
assert!(!format!("{healing_disk:?}").is_empty());
assert!(!format!("{backend_byte:?}").is_empty());
assert!(!format!("{storage_info:?}").is_empty());
assert!(!format!("{backend_info:?}").is_empty());
assert!(!format!("{mem_stats:?}").is_empty());
assert!(!format!("{server_props:?}").is_empty());
}
#[test]

View File

@@ -29,7 +29,7 @@ impl TryFrom<&str> for AccountStatus {
match s {
"enabled" => Ok(AccountStatus::Enabled),
"disabled" => Ok(AccountStatus::Disabled),
_ => Err(format!("invalid account status: {}", s)),
_ => Err(format!("invalid account status: {s}")),
}
}
}
@@ -737,10 +737,10 @@ mod tests {
};
// Test that all structures can be formatted with Debug
assert!(!format!("{:?}", account_status).is_empty());
assert!(!format!("{:?}", user_auth_type).is_empty());
assert!(!format!("{:?}", user_info).is_empty());
assert!(!format!("{:?}", service_account).is_empty());
assert!(!format!("{account_status:?}").is_empty());
assert!(!format!("{user_auth_type:?}").is_empty());
assert!(!format!("{user_info:?}").is_empty());
assert!(!format!("{service_account:?}").is_empty());
}
#[test]

View File

@@ -16,22 +16,22 @@ mod test {
fn test_parse_dur() {
let s = String::from("3s");
let dur = parse_duration(&s);
println!("{:?}", dur);
println!("{dur:?}");
assert_eq!(Ok(Duration::from_secs(3)), dur);
let s = String::from("3ms");
let dur = parse_duration(&s);
println!("{:?}", dur);
println!("{dur:?}");
assert_eq!(Ok(Duration::from_millis(3)), dur);
let s = String::from("3m");
let dur = parse_duration(&s);
println!("{:?}", dur);
println!("{dur:?}");
assert_eq!(Ok(Duration::from_secs(3 * 60)), dur);
let s = String::from("3h");
let dur = parse_duration(&s);
println!("{:?}", dur);
println!("{dur:?}");
assert_eq!(Ok(Duration::from_secs(3 * 60 * 60)), dur);
}
}

View File

@@ -103,7 +103,7 @@ impl<'de> Deserialize<'de> for Functions {
let mut inner_data = Functions::default();
while let Some(key) = map.next_key::<&str>()? {
if hash.contains(&key) {
return Err(Error::custom(format!("duplicate condition operator `{}`", key)));
return Err(Error::custom(format!("duplicate condition operator `{key}`")));
}
hash.insert(key);

View File

@@ -149,7 +149,7 @@ where
let got = hash.sum();
let src = hex.as_str();
if src != got.as_str() {
println!("sha256 err src:{},got:{}", src, got);
println!("sha256 err src:{src},got:{got}");
return Poll::Ready(Some(Err(Box::new(ReaderError::SHA256Mismatch(src.to_string(), got)))));
}
}
@@ -161,7 +161,7 @@ where
let src = hex.as_str();
if src != got.as_str() {
// TODO: ERR
println!("md5 err src:{},got:{}", src, got);
println!("md5 err src:{src},got:{got}");
return Poll::Ready(Some(Err(Box::new(ReaderError::ChecksumMismatch(src.to_string(), got)))));
}
}
@@ -435,7 +435,7 @@ mod test {
println!("bytes: {}, {:?}", bytes.len(), bytes);
}
Err(err) => {
println!("err:{:?}", err);
println!("err:{err:?}");
break;
}
},
@@ -483,7 +483,7 @@ mod test {
println!("bytes: {}, {:?}", bytes.len(), bytes);
}
Err(err) => {
println!("err:{:?}", err);
println!("err:{err:?}");
break;
}
},
@@ -533,7 +533,7 @@ mod test {
println!("bytes: {}, {:?}", bytes.len(), bytes);
}
Err(err) => {
println!("err:{:?}", err);
println!("err:{err:?}");
break;
}
},

View File

@@ -217,7 +217,7 @@ impl Operation for AccountInfoHandler {
let policies = iam_store
.policy_db_get(&account_name, &cred.groups)
.await
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("get policy failed: {}", e)))?;
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("get policy failed: {e}")))?;
effective_policy = iam_store.get_combined_policy(&policies).await;
};
@@ -1036,6 +1036,6 @@ mod test {
fn test_decode() {
let b = b"{\"recursive\":false,\"dryRun\":false,\"remove\":false,\"recreate\":false,\"scanMode\":1,\"updateParity\":false,\"nolock\":false}";
let s: HealOpts = serde_urlencoded::from_bytes(b).unwrap();
println!("{:?}", s);
println!("{s:?}");
}
}

View File

@@ -136,7 +136,7 @@ impl Operation for UpdateGroupMembers {
};
let args: GroupAddRemove = serde_json::from_slice(&body)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("unmarshal body err {}", e)))?;
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("unmarshal body err {e}")))?;
warn!("UpdateGroupMembers args {:?}", args);
@@ -148,7 +148,7 @@ impl Operation for UpdateGroupMembers {
if is_temp {
return Err(S3Error::with_message(
S3ErrorCode::MethodNotAllowed,
format!("can't add temp user {}", member),
format!("can't add temp user {member}"),
));
}
@@ -157,7 +157,7 @@ impl Operation for UpdateGroupMembers {
if cred.access_key == *member {
return Err(S3Error::with_message(
S3ErrorCode::MethodNotAllowed,
format!("can't add root {}", member),
format!("can't add root {member}"),
));
}
Ok(())

View File

@@ -498,7 +498,7 @@ impl Operation for ListServiceAccount {
.collect();
let data = serde_json::to_vec(&ListServiceAccountsResp { accounts })
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("marshal users err {}", e)))?;
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("marshal users err {e}")))?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());

View File

@@ -111,7 +111,7 @@ impl Operation for AssumeRoleHandle {
info!("AssumeRole get claims {:?}", &claims);
let mut new_cred = get_new_credentials_with_metadata(&claims, &secret)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("get new cred failed {}", e)))?;
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("get new cred failed {e}")))?;
new_cred.parent_user = cred.access_key.clone();
@@ -147,13 +147,13 @@ impl Operation for AssumeRoleHandle {
pub fn populate_session_policy(claims: &mut HashMap<String, Value>, policy: &str) -> S3Result<()> {
if !policy.is_empty() {
let session_policy = Policy::parse_config(policy.as_bytes())
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("parse policy err {}", e)))?;
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("parse policy err {e}")))?;
if session_policy.version.is_empty() {
return Err(s3_error!(InvalidRequest, "invalid policy"));
}
let policy_buf = serde_json::to_vec(&session_policy)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("marshal policy err {}", e)))?;
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("marshal policy err {e}")))?;
if policy_buf.len() > 2048 {
return Err(s3_error!(InvalidRequest, "policy too large"));

View File

@@ -84,7 +84,7 @@ impl Operation for AddTier {
};
let mut args: TierConfig = serde_json::from_slice(&body)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("unmarshal body err {}", e)))?;
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("unmarshal body err {e}")))?;
match args.tier_type {
TierType::S3 => {
@@ -202,7 +202,7 @@ impl Operation for EditTier {
};
let creds: TierCreds = serde_json::from_slice(&body)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("unmarshal body err {}", e)))?;
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("unmarshal body err {e}")))?;
debug!("edit tier args {:?}", creds);
@@ -264,7 +264,7 @@ impl Operation for ListTiers {
let tiers = tier_config_mgr.list_tiers();
let data = serde_json::to_vec(&tiers)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("marshal tiers err {}", e)))?;
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("marshal tiers err {e}")))?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
@@ -397,7 +397,7 @@ impl Operation for GetTierInfo {
let info = tier_config_mgr.get(&query.tier.unwrap());
let data = serde_json::to_vec(&info)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("marshal tier err {}", e)))?;
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("marshal tier err {e}")))?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());

View File

@@ -63,7 +63,7 @@ impl Operation for AddUser {
// .map_err(|e| S3Error::with_message(S3ErrorCode::InvalidArgument, format!("decrypt_data err {}", e)))?;
let args: AddOrUpdateUserReq = serde_json::from_slice(&body)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("unmarshal body err {}", e)))?;
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("unmarshal body err {e}")))?;
if args.secret_key.is_empty() {
return Err(s3_error!(InvalidArgument, "access key is empty"));
@@ -111,7 +111,7 @@ impl Operation for AddUser {
iam_store
.create_user(ak, &args)
.await
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("create_user err {}", e)))?;
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("create_user err {e}")))?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
@@ -156,7 +156,7 @@ impl Operation for SetUserStatus {
iam_store
.set_user_status(ak, status)
.await
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("set_user_status err {}", e)))?;
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("set_user_status err {e}")))?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
@@ -201,7 +201,7 @@ impl Operation for ListUsers {
};
let data = serde_json::to_vec(&users)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("marshal users err {}", e)))?;
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("marshal users err {e}")))?;
// let Some(input_cred) = req.credentials else {
// return Err(s3_error!(InvalidRequest, "get cred failed"));
@@ -242,7 +242,7 @@ impl Operation for RemoveUser {
let (is_temp, _) = iam_store
.is_temp_user(ak)
.await
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("is_temp_user err {}", e)))?;
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("is_temp_user err {e}")))?;
if is_temp {
return Err(s3_error!(InvalidArgument, "can't remove temp user"));
@@ -269,7 +269,7 @@ impl Operation for RemoveUser {
iam_store
.delete_user(ak, true)
.await
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("delete_user err {}", e)))?;
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("delete_user err {e}")))?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
@@ -332,7 +332,7 @@ impl Operation for GetUserInfo {
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
let data = serde_json::to_vec(&info)
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("marshal user err {}", e)))?;
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, format!("marshal user err {e}")))?;
let mut header = HeaderMap::new();
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());

Some files were not shown because too many files have changed in this diff Show More