refactor: simplify initialization flow and modernize string formatting (#548)

This commit is contained in:
weisd
2025-09-16 15:44:50 +08:00
committed by GitHub
parent a12a3bedc3
commit ae7e86d7ef
16 changed files with 71 additions and 97 deletions

View File

@@ -340,7 +340,7 @@ impl HealTask {
Ok((result, error)) => {
if let Some(e) = error {
// Check if this is a "File not found" error during delete operations
let error_msg = format!("{}", e);
let error_msg = format!("{e}");
if error_msg.contains("File not found") || error_msg.contains("not found") {
info!(
"Object {}/{} not found during heal - likely deleted intentionally, treating as successful",
@@ -395,7 +395,7 @@ impl HealTask {
}
Err(e) => {
// Check if this is a "File not found" error during delete operations
let error_msg = format!("{}", e);
let error_msg = format!("{e}");
if error_msg.contains("File not found") || error_msg.contains("not found") {
info!(
"Object {}/{} not found during heal - likely deleted intentionally, treating as successful",

View File

@@ -458,7 +458,7 @@ impl LocalDisk {
{
let cache = self.path_cache.read();
for (i, (bucket, key)) in requests.iter().enumerate() {
let cache_key = format!("{}/{}", bucket, key);
let cache_key = format!("{bucket}/{key}");
if let Some(cached_path) = cache.get(&cache_key) {
results.push((i, cached_path.clone()));
} else {

View File

@@ -65,7 +65,7 @@ impl OptimizedFileCache {
// Cache miss, read file
let data = tokio::fs::read(&path)
.await
.map_err(|e| Error::other(format!("Read metadata failed: {}", e)))?;
.map_err(|e| Error::other(format!("Read metadata failed: {e}")))?;
let mut meta = FileMeta::default();
meta.unmarshal_msg(&data)?;
@@ -86,7 +86,7 @@ impl OptimizedFileCache {
let data = tokio::fs::read(&path)
.await
.map_err(|e| Error::other(format!("Read file failed: {}", e)))?;
.map_err(|e| Error::other(format!("Read file failed: {e}")))?;
let bytes = Bytes::from(data);
self.file_content_cache.insert(path, bytes.clone()).await;
@@ -295,9 +295,9 @@ mod tests {
let mut paths = Vec::new();
for i in 0..5 {
let file_path = dir.path().join(format!("test_{}.txt", i));
let file_path = dir.path().join(format!("test_{i}.txt"));
let mut file = std::fs::File::create(&file_path).unwrap();
writeln!(file, "content {}", i).unwrap();
writeln!(file, "content {i}").unwrap();
paths.push(file_path);
}

View File

@@ -2430,7 +2430,7 @@ impl SetDisks {
.map_err(|e| {
let elapsed = start_time.elapsed();
error!("Failed to acquire write lock for heal operation after {:?}: {:?}", elapsed, e);
DiskError::other(format!("Failed to acquire write lock for heal operation: {:?}", e))
DiskError::other(format!("Failed to acquire write lock for heal operation: {e:?}"))
})?;
let elapsed = start_time.elapsed();
info!("Successfully acquired write lock for object: {} in {:?}", object, elapsed);
@@ -3045,7 +3045,7 @@ impl SetDisks {
.fast_lock_manager
.acquire_write_lock("", object, self.locker_owner.as_str())
.await
.map_err(|e| DiskError::other(format!("Failed to acquire write lock for heal directory operation: {:?}", e)))?;
.map_err(|e| DiskError::other(format!("Failed to acquire write lock for heal directory operation: {e:?}")))?;
let disks = {
let disks = self.disks.read().await;
@@ -5594,7 +5594,7 @@ impl StorageAPI for SetDisks {
self.fast_lock_manager
.acquire_write_lock("", object, self.locker_owner.as_str())
.await
.map_err(|e| Error::other(format!("Failed to acquire write lock for heal operation: {:?}", e)))?,
.map_err(|e| Error::other(format!("Failed to acquire write lock for heal operation: {e:?}")))?,
)
} else {
None

View File

@@ -23,7 +23,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("Lock system status: {}", if manager.is_disabled() { "DISABLED" } else { "ENABLED" });
match std::env::var("RUSTFS_ENABLE_LOCKS") {
Ok(value) => println!("RUSTFS_ENABLE_LOCKS set to: {}", value),
Ok(value) => println!("RUSTFS_ENABLE_LOCKS set to: {value}"),
Err(_) => println!("RUSTFS_ENABLE_LOCKS not set (defaults to enabled)"),
}
@@ -34,7 +34,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
println!("Lock acquired successfully! Disabled: {}", guard.is_disabled());
}
Err(e) => {
println!("Failed to acquire lock: {:?}", e);
println!("Failed to acquire lock: {e:?}");
}
}

View File

@@ -87,7 +87,7 @@ impl LockClient for LocalClient {
current_owner,
current_mode,
}) => Ok(LockResponse::failure(
format!("Lock conflict: resource held by {} in {:?} mode", current_owner, current_mode),
format!("Lock conflict: resource held by {current_owner} in {current_mode:?} mode"),
std::time::Duration::ZERO,
)),
Err(crate::fast_lock::LockResult::Acquired) => {
@@ -131,7 +131,7 @@ impl LockClient for LocalClient {
current_owner,
current_mode,
}) => Ok(LockResponse::failure(
format!("Lock conflict: resource held by {} in {:?} mode", current_owner, current_mode),
format!("Lock conflict: resource held by {current_owner} in {current_mode:?} mode"),
std::time::Duration::ZERO,
)),
Err(crate::fast_lock::LockResult::Acquired) => {

View File

@@ -409,14 +409,14 @@ mod tests {
// Acquire multiple guards and verify unique IDs
let mut guards = Vec::new();
for i in 0..100 {
let object_name = format!("object_{}", i);
let object_name = format!("object_{i}");
let guard = manager
.acquire_write_lock("bucket", object_name.as_str(), "owner")
.await
.expect("Failed to acquire lock");
let guard_id = guard.guard_id();
assert!(guard_ids.insert(guard_id), "Guard ID {} is not unique", guard_id);
assert!(guard_ids.insert(guard_id), "Guard ID {guard_id} is not unique");
guards.push(guard);
}
@@ -501,7 +501,7 @@ mod tests {
let handle = tokio::spawn(async move {
for i in 0..10 {
let object_name = format!("obj_{}_{}", task_id, i);
let object_name = format!("obj_{task_id}_{i}");
// Acquire lock
let mut guard = match manager.acquire_write_lock("bucket", object_name.as_str(), "owner").await {
@@ -535,7 +535,7 @@ mod tests {
let blocked = double_release_blocked.load(Ordering::SeqCst);
// Should have many successful releases and all double releases blocked
assert!(successes > 150, "Expected many successful releases, got {}", successes);
assert!(successes > 150, "Expected many successful releases, got {successes}");
assert_eq!(blocked, successes, "All double releases should be blocked");
// Verify no active guards remain
@@ -567,7 +567,7 @@ mod tests {
// Acquire multiple locks for the same object to ensure they're in the same shard
let mut guards = Vec::new();
for i in 0..10 {
let owner_name = format!("owner_{}", i);
let owner_name = format!("owner_{i}");
let guard = manager
.acquire_read_lock("bucket", "shared_object", owner_name.as_str())
.await
@@ -586,7 +586,7 @@ mod tests {
let cleaned = shard.adaptive_cleanup();
// Should clean very little due to active guards
assert!(cleaned <= 5, "Should be conservative with active guards, cleaned: {}", cleaned);
assert!(cleaned <= 5, "Should be conservative with active guards, cleaned: {cleaned}");
// Locks should be protected by active guards
let remaining_locks = shard.lock_count();
@@ -809,7 +809,7 @@ mod tests {
let manager_clone = manager.clone();
let handle = tokio::spawn(async move {
let _guard = manager_clone
.acquire_read_lock("bucket", format!("object-{}", i), "background")
.acquire_read_lock("bucket", format!("object-{i}"), "background")
.await;
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
});
@@ -842,7 +842,7 @@ mod tests {
// High priority should generally perform reasonably well
// This is more of a performance validation than a strict requirement
println!("High priority: {:?}, Normal: {:?}", high_priority_duration, normal_duration);
println!("High priority: {high_priority_duration:?}, Normal: {normal_duration:?}");
// Both operations should complete in reasonable time (less than 100ms in test environment)
// This validates that the priority system isn't causing severe degradation
@@ -858,7 +858,7 @@ mod tests {
let mut _guards = Vec::new();
for i in 0..100 {
if let Ok(guard) = manager
.acquire_write_lock("bucket", format!("load-object-{}", i), "loader")
.acquire_write_lock("bucket", format!("load-object-{i}"), "loader")
.await
{
_guards.push(guard);
@@ -909,8 +909,8 @@ mod tests {
// Try to acquire all locks for this "query"
for obj_id in 0..objects_per_query {
let bucket = "databend";
let object = format!("table_partition_{}_{}", query_id, obj_id);
let owner = format!("query_{}", query_id);
let object = format!("table_partition_{query_id}_{obj_id}");
let owner = format!("query_{query_id}");
match manager_clone.acquire_high_priority_read_lock(bucket, object, owner).await {
Ok(guard) => query_locks.push(guard),
@@ -935,8 +935,8 @@ mod tests {
let manager_clone = manager.clone();
let handle = tokio::spawn(async move {
let bucket = "databend";
let object = format!("write_target_{}", write_id);
let owner = format!("writer_{}", write_id);
let object = format!("write_target_{write_id}");
let owner = format!("writer_{write_id}");
match manager_clone.acquire_write_lock(bucket, object, owner).await {
Ok(_guard) => {
@@ -988,7 +988,7 @@ mod tests {
let handle = tokio::spawn(async move {
let bucket = "test";
let object = format!("extreme_load_object_{}", i % 20); // Force some contention
let owner = format!("user_{}", i);
let owner = format!("user_{i}");
// Mix of read and write locks to create realistic contention
let result = if i % 3 == 0 {
@@ -1066,8 +1066,8 @@ mod tests {
};
let bucket = "datacenter-shared";
let object = format!("table_{}_{}_partition_{}", table_id, client_id, table_idx);
let owner = format!("client_{}_query_{}", client_id, query_id);
let object = format!("table_{table_id}_{client_id}_partition_{table_idx}");
let owner = format!("client_{client_id}_query_{query_id}");
// Mix of operations - mostly reads with some writes
let lock_result = if table_idx == 0 && query_id % 7 == 0 {
@@ -1129,10 +1129,10 @@ mod tests {
}
println!("\n=== Multi-Client Datacenter Simulation Results ===");
println!("Total execution time: {:?}", total_time);
println!("Total clients: {}", num_clients);
println!("Queries per client: {}", queries_per_client);
println!("Total queries executed: {}", total_queries);
println!("Total execution time: {total_time:?}");
println!("Total clients: {num_clients}");
println!("Queries per client: {queries_per_client}");
println!("Total queries executed: {total_queries}");
println!(
"Successful queries: {} ({:.1}%)",
successful_queries,
@@ -1179,8 +1179,7 @@ mod tests {
// Performance assertion - should complete in reasonable time
assert!(
total_time < std::time::Duration::from_secs(120),
"Multi-client test took too long: {:?}",
total_time
"Multi-client test took too long: {total_time:?}"
);
}
@@ -1209,8 +1208,8 @@ mod tests {
client_attempts += 1;
let bucket = "hot-data";
let object = format!("popular_table_{}", obj_id);
let owner = format!("thundering_client_{}", client_id);
let object = format!("popular_table_{obj_id}");
let owner = format!("thundering_client_{client_id}");
// Simulate different access patterns
let result = match obj_id % 3 {
@@ -1265,12 +1264,12 @@ mod tests {
let success_rate = total_successes as f64 / total_attempts as f64;
println!("\n=== Thundering Herd Scenario Results ===");
println!("Concurrent clients: {}", num_concurrent_clients);
println!("Hot objects: {}", hot_objects);
println!("Total attempts: {}", total_attempts);
println!("Total successes: {}", total_successes);
println!("Concurrent clients: {num_concurrent_clients}");
println!("Hot objects: {hot_objects}");
println!("Total attempts: {total_attempts}");
println!("Total successes: {total_successes}");
println!("Success rate: {:.1}%", success_rate * 100.0);
println!("Total time: {:?}", total_time);
println!("Total time: {total_time:?}");
println!(
"Average time per operation: {:.1}ms",
total_time.as_millis() as f64 / total_attempts as f64
@@ -1286,8 +1285,7 @@ mod tests {
// Should handle this volume in reasonable time
assert!(
total_time < std::time::Duration::from_secs(180),
"Thundering herd test took too long: {:?}",
total_time
"Thundering herd test took too long: {total_time:?}"
);
}
@@ -1314,7 +1312,7 @@ mod tests {
for op_id in 0..operations_per_transaction {
let bucket = "oltp-data";
let object = format!("record_{}_{}", oltp_id * 10 + tx_id, op_id);
let owner = format!("oltp_{}_{}", oltp_id, tx_id);
let owner = format!("oltp_{oltp_id}_{tx_id}");
// OLTP is mostly writes
let result = manager_clone.acquire_write_lock(bucket, object, owner).await;
@@ -1358,7 +1356,7 @@ mod tests {
table_id % 20, // Some overlap between queries
query_id
);
let owner = format!("olap_{}_{}", olap_id, query_id);
let owner = format!("olap_{olap_id}_{query_id}");
// OLAP is mostly reads with high priority
let result = manager_clone.acquire_critical_read_lock(bucket, object, owner).await;
@@ -1408,7 +1406,7 @@ mod tests {
let olap_success_rate = olap_successes as f64 / olap_attempts as f64;
println!("\n=== Mixed Workload Stress Test Results ===");
println!("Total time: {:?}", total_time);
println!("Total time: {total_time:?}");
println!(
"OLTP: {}/{} transactions succeeded ({:.1}%)",
oltp_successes,

View File

@@ -152,14 +152,14 @@ pub mod performance_comparison {
for i in 0..1000 {
let guard = fast_manager
.acquire_write_lock("bucket", format!("object_{}", i), owner)
.acquire_write_lock("bucket", format!("object_{i}"), owner)
.await
.expect("Failed to acquire fast lock");
guards.push(guard);
}
let fast_duration = start.elapsed();
println!("Fast lock: 1000 acquisitions in {:?}", fast_duration);
println!("Fast lock: 1000 acquisitions in {fast_duration:?}");
// Release all
drop(guards);

View File

@@ -27,7 +27,7 @@ mod tests {
let mut guards = Vec::new();
for i in 0..100 {
let bucket = format!("test-bucket-{}", i % 10); // Reuse some bucket names
let object = format!("test-object-{}", i);
let object = format!("test-object-{i}");
let guard = manager
.acquire_write_lock(bucket.as_str(), object.as_str(), "test-owner")
@@ -53,10 +53,7 @@ mod tests {
0.0
};
println!(
"Pool stats - Hits: {}, Misses: {}, Releases: {}, Pool size: {}",
hits, misses, releases, pool_size
);
println!("Pool stats - Hits: {hits}, Misses: {misses}, Releases: {releases}, Pool size: {pool_size}");
println!("Hit rate: {:.2}%", hit_rate * 100.0);
// We should see some pool activity
@@ -82,7 +79,7 @@ mod tests {
.expect("Failed to acquire second read lock");
let duration = start.elapsed();
println!("Two read locks on different objects took: {:?}", duration);
println!("Two read locks on different objects took: {duration:?}");
// Should be very fast since no contention
assert!(duration < Duration::from_millis(10), "Read locks should be fast with no contention");
@@ -103,7 +100,7 @@ mod tests {
.expect("Failed to acquire second read lock on same object");
let duration = start.elapsed();
println!("Two read locks on same object took: {:?}", duration);
println!("Two read locks on same object took: {duration:?}");
// Should still be fast since read locks are compatible
assert!(duration < Duration::from_millis(10), "Compatible read locks should be fast");
@@ -132,7 +129,7 @@ mod tests {
.expect("Failed to acquire second read lock");
let second_duration = start.elapsed();
println!("First lock: {:?}, Second lock: {:?}", first_duration, second_duration);
println!("First lock: {first_duration:?}, Second lock: {second_duration:?}");
// Both should be very fast (sub-millisecond typically)
assert!(first_duration < Duration::from_millis(10));
@@ -157,7 +154,7 @@ mod tests {
let result = manager.acquire_locks_batch(batch).await;
let duration = start.elapsed();
println!("Batch operation took: {:?}", duration);
println!("Batch operation took: {duration:?}");
assert!(result.all_acquired, "All locks should be acquired");
assert_eq!(result.successful_locks.len(), 3);

View File

@@ -580,7 +580,7 @@ mod tests {
for env_value in production_envs {
let is_production = env_value.to_lowercase() == "production";
assert!(is_production, "Should detect '{}' as production environment", env_value);
assert!(is_production, "Should detect '{env_value}' as production environment");
}
}
@@ -591,7 +591,7 @@ mod tests {
for env_value in non_production_envs {
let is_production = env_value.to_lowercase() == "production";
assert!(!is_production, "Should not detect '{}' as production environment", env_value);
assert!(!is_production, "Should not detect '{env_value}' as production environment");
}
}
@@ -674,8 +674,7 @@ mod tests {
assert_eq!(
filter_variant, expected_variant,
"Log level '{}' should map to '{}'",
input_level, expected_variant
"Log level '{input_level}' should map to '{expected_variant}'"
);
}
}

View File

@@ -396,7 +396,7 @@ mod tests {
// Test cache stats (note: moka cache might not immediately reflect changes)
let (total, _weighted_size) = resolver.cache_stats().await;
// Cache should have at least the entry we just added (might be 0 due to async nature)
assert!(total <= 1, "Cache should have at most 1 entry, got {}", total);
assert!(total <= 1, "Cache should have at most 1 entry, got {total}");
}
#[tokio::test]
@@ -407,12 +407,12 @@ mod tests {
match resolver.resolve("localhost").await {
Ok(ips) => {
assert!(!ips.is_empty());
println!("Resolved localhost to: {:?}", ips);
println!("Resolved localhost to: {ips:?}");
}
Err(e) => {
// In some test environments, even localhost might fail
// This is acceptable as long as our error handling works
println!("DNS resolution failed (might be expected in test environments): {}", e);
println!("DNS resolution failed (might be expected in test environments): {e}");
}
}
}
@@ -428,7 +428,7 @@ mod tests {
assert!(result.is_err());
if let Err(e) = result {
println!("Expected error for invalid domain: {}", e);
println!("Expected error for invalid domain: {e}");
// Should be AllAttemptsFailed since both system and public DNS should fail
assert!(matches!(e, DnsError::AllAttemptsFailed { .. }));
}
@@ -464,10 +464,10 @@ mod tests {
match resolve_domain("localhost").await {
Ok(ips) => {
assert!(!ips.is_empty());
println!("Global resolver resolved localhost to: {:?}", ips);
println!("Global resolver resolved localhost to: {ips:?}");
}
Err(e) => {
println!("Global resolver DNS resolution failed (might be expected in test environments): {}", e);
println!("Global resolver DNS resolution failed (might be expected in test environments): {e}");
}
}
}

View File

@@ -430,7 +430,7 @@ mod test {
match get_host_ip(invalid_host.clone()).await {
Ok(ips) => {
// Depending on DNS resolver behavior, it might return empty set or error
assert!(ips.is_empty(), "Expected empty IP set for invalid domain, got: {:?}", ips);
assert!(ips.is_empty(), "Expected empty IP set for invalid domain, got: {ips:?}");
}
Err(_) => {
error!("Expected error for invalid domain");

View File

@@ -241,7 +241,7 @@ pub async fn config_handler(uri: Uri, Host(host): Host, headers: HeaderMap) -> i
if ip.is_ipv6() { format!("[{ip}]") } else { format!("{ip}") }
} else if let Ok(ip) = raw_host.parse::<IpAddr>() {
// Pure IP (no ports)
if ip.is_ipv6() { format!("[{}]", ip) } else { ip.to_string() }
if ip.is_ipv6() { format!("[{ip}]") } else { ip.to_string() }
} else {
// The domain name may not be able to resolve directly to IP, remove the port
raw_host.split(':').next().unwrap_or(raw_host).to_string()

View File

@@ -1322,7 +1322,7 @@ impl Operation for ProfileHandler {
error!("Failed to build profiler report: {}", e);
return Ok(S3Response::new((
StatusCode::INTERNAL_SERVER_ERROR,
Body::from(format!("Failed to build profile report: {}", e)),
Body::from(format!("Failed to build profile report: {e}")),
)));
}
};
@@ -1353,7 +1353,7 @@ impl Operation for ProfileHandler {
error!("Failed to generate flamegraph: {}", e);
return Ok(S3Response::new((
StatusCode::INTERNAL_SERVER_ERROR,
Body::from(format!("Failed to generate flamegraph: {}", e)),
Body::from(format!("Failed to generate flamegraph: {e}")),
)));
}
};

View File

@@ -261,31 +261,11 @@ async fn run(opt: config::Opt) -> Result<()> {
// Collect bucket names into a vector
let buckets: Vec<String> = buckets_list.into_iter().map(|v| v.name).collect();
// Parallelize initialization tasks for better network performance
let bucket_metadata_task = tokio::spawn({
let store = store.clone();
let buckets = buckets.clone();
async move {
init_bucket_metadata_sys(store, buckets).await;
}
});
init_bucket_metadata_sys(store.clone(), buckets.clone()).await;
let iam_init_task = tokio::spawn({
let store = store.clone();
async move { init_iam_sys(store).await }
});
init_iam_sys(store.clone()).await.map_err(Error::other)?;
let notification_config_task = tokio::spawn({
let buckets = buckets.clone();
async move {
add_bucket_notification_configuration(buckets).await;
}
});
// Wait for all parallel initialization tasks to complete
bucket_metadata_task.await.map_err(Error::other)?;
iam_init_task.await.map_err(Error::other)??;
notification_config_task.await.map_err(Error::other)?;
add_bucket_notification_configuration(buckets.clone()).await;
// Initialize the global notification system
new_global_notification_sys(endpoint_pools.clone()).await.map_err(|err| {

View File

@@ -23,7 +23,7 @@ pub fn init_profiler() -> Result<(), Box<dyn std::error::Error>> {
.frequency(1000)
.blocklist(&["libc", "libgcc", "pthread", "vdso"])
.build()
.map_err(|e| format!("Failed to build profiler guard: {}", e))?;
.map_err(|e| format!("Failed to build profiler guard: {e}"))?;
PROFILER_GUARD
.set(Arc::new(Mutex::new(guard)))