fix: update tool chain make everything happy (#1134)

Signed-off-by: yihong0618 <zouzou0208@gmail.com>
This commit is contained in:
yihong
2025-12-13 20:32:42 +08:00
committed by GitHub
parent 1229fddb5d
commit 67095c05f9
9 changed files with 53 additions and 70 deletions

View File

@@ -127,12 +127,12 @@ async fn test_get_deleted_object_returns_nosuchkey() -> Result<(), Box<dyn std::
info!("Service error code: {:?}", s3_err.meta().code());
// The error should be NoSuchKey
assert!(s3_err.is_no_such_key(), "Error should be NoSuchKey, got: {:?}", s3_err);
assert!(s3_err.is_no_such_key(), "Error should be NoSuchKey, got: {s3_err:?}");
info!("✅ Test passed: GetObject on deleted object correctly returns NoSuchKey");
}
other_err => {
panic!("Expected ServiceError with NoSuchKey, but got: {:?}", other_err);
panic!("Expected ServiceError with NoSuchKey, but got: {other_err:?}");
}
}
@@ -182,13 +182,12 @@ async fn test_head_deleted_object_returns_nosuchkey() -> Result<(), Box<dyn std:
let s3_err = service_err.into_err();
assert!(
s3_err.meta().code() == Some("NoSuchKey") || s3_err.meta().code() == Some("NotFound"),
"Error should be NoSuchKey or NotFound, got: {:?}",
s3_err
"Error should be NoSuchKey or NotFound, got: {s3_err:?}"
);
info!("✅ HeadObject correctly returns NoSuchKey/NotFound");
}
other_err => {
panic!("Expected ServiceError but got: {:?}", other_err);
panic!("Expected ServiceError but got: {other_err:?}");
}
}
@@ -220,11 +219,11 @@ async fn test_get_nonexistent_object_returns_nosuchkey() -> Result<(), Box<dyn s
match get_result.unwrap_err() {
SdkError::ServiceError(service_err) => {
let s3_err = service_err.into_err();
assert!(s3_err.is_no_such_key(), "Error should be NoSuchKey, got: {:?}", s3_err);
assert!(s3_err.is_no_such_key(), "Error should be NoSuchKey, got: {s3_err:?}");
info!("✅ GetObject correctly returns NoSuchKey for non-existent object");
}
other_err => {
panic!("Expected ServiceError with NoSuchKey, but got: {:?}", other_err);
panic!("Expected ServiceError with NoSuchKey, but got: {other_err:?}");
}
}
@@ -266,15 +265,15 @@ async fn test_multiple_gets_deleted_object() -> Result<(), Box<dyn std::error::E
info!("Attempt {} to get deleted object", i);
let get_result = client.get_object().bucket(BUCKET).key(key).send().await;
assert!(get_result.is_err(), "Attempt {}: should return error", i);
assert!(get_result.is_err(), "Attempt {i}: should return error");
match get_result.unwrap_err() {
SdkError::ServiceError(service_err) => {
let s3_err = service_err.into_err();
assert!(s3_err.is_no_such_key(), "Attempt {}: Error should be NoSuchKey, got: {:?}", i, s3_err);
assert!(s3_err.is_no_such_key(), "Attempt {i}: Error should be NoSuchKey, got: {s3_err:?}");
}
other_err => {
panic!("Attempt {}: Expected ServiceError but got: {:?}", i, other_err);
panic!("Attempt {i}: Expected ServiceError but got: {other_err:?}");
}
}
}

View File

@@ -256,7 +256,7 @@ mod tests {
let output = result.unwrap();
let body_bytes = output.body.collect().await.unwrap().into_bytes();
assert_eq!(body_bytes.as_ref(), *content, "Content mismatch for key '{}'", key);
assert_eq!(body_bytes.as_ref(), *content, "Content mismatch for key '{key}'");
info!("✅ PUT/GET succeeded for key: {}", key);
}
@@ -472,7 +472,7 @@ mod tests {
info!("Testing COPY from '{}' to '{}'", src_key, dest_key);
// COPY object
let copy_source = format!("{}/{}", bucket, src_key);
let copy_source = format!("{bucket}/{src_key}");
let result = client
.copy_object()
.bucket(bucket)
@@ -543,7 +543,7 @@ mod tests {
let output = result.unwrap();
let body_bytes = output.body.collect().await.unwrap().into_bytes();
assert_eq!(body_bytes.as_ref(), *content, "Content mismatch for Unicode key '{}'", key);
assert_eq!(body_bytes.as_ref(), *content, "Content mismatch for Unicode key '{key}'");
info!("✅ PUT/GET succeeded for Unicode key: {}", key);
}
@@ -610,7 +610,7 @@ mod tests {
let output = result.unwrap();
let body_bytes = output.body.collect().await.unwrap().into_bytes();
assert_eq!(body_bytes.as_ref(), *content, "Content mismatch for key '{}'", key);
assert_eq!(body_bytes.as_ref(), *content, "Content mismatch for key '{key}'");
info!("✅ PUT/GET succeeded for key: {}", key);
}
@@ -658,7 +658,7 @@ mod tests {
// Note: The validation happens on the server side, so we expect an error
// For null byte, newline, and carriage return
if key.contains('\0') || key.contains('\n') || key.contains('\r') {
assert!(result.is_err(), "Control character should be rejected for key: {:?}", key);
assert!(result.is_err(), "Control character should be rejected for key: {key:?}");
if let Err(e) = result {
info!("✅ Control character correctly rejected: {:?}", e);
}

View File

@@ -46,7 +46,7 @@ fn main() -> Result<(), AnyError> {
};
if !need_compile {
println!("no need to compile protos.{}", need_compile);
println!("no need to compile protos.{need_compile}");
return Ok(());
}

View File

@@ -13,5 +13,5 @@
# limitations under the License.
[toolchain]
channel = "stable"
channel = "1.88"
components = ["rustfmt", "clippy", "rust-src", "rust-analyzer"]

View File

@@ -1328,8 +1328,7 @@ impl Operation for ProfileHandler {
let target_arch = std::env::consts::ARCH;
let target_env = option_env!("CARGO_CFG_TARGET_ENV").unwrap_or("unknown");
let msg = format!(
"CPU profiling is not supported on this platform. target_os={}, target_env={}, target_arch={}, requested_url={}",
target_os, target_env, target_arch, requested_url
"CPU profiling is not supported on this platform. target_os={target_os}, target_env={target_env}, target_arch={target_arch}, requested_url={requested_url}"
);
return Ok(S3Response::new((StatusCode::NOT_IMPLEMENTED, Body::from(msg))));
}

View File

@@ -38,8 +38,7 @@ fn get_platform_info() -> (String, String, String) {
pub async fn dump_cpu_pprof_for(_duration: std::time::Duration) -> Result<std::path::PathBuf, String> {
let (target_os, target_env, target_arch) = get_platform_info();
let msg = format!(
"CPU profiling is not supported on this platform. target_os={}, target_env={}, target_arch={}",
target_os, target_env, target_arch
"CPU profiling is not supported on this platform. target_os={target_os}, target_env={target_env}, target_arch={target_arch}"
);
Err(msg)
}
@@ -48,8 +47,7 @@ pub async fn dump_cpu_pprof_for(_duration: std::time::Duration) -> Result<std::p
pub async fn dump_memory_pprof_now() -> Result<std::path::PathBuf, String> {
let (target_os, target_env, target_arch) = get_platform_info();
let msg = format!(
"Memory profiling is not supported on this platform. target_os={}, target_env={}, target_arch={}",
target_os, target_env, target_arch
"Memory profiling is not supported on this platform. target_os={target_os}, target_env={target_env}, target_arch={target_arch}"
);
Err(msg)
}

View File

@@ -1165,12 +1165,12 @@ impl HotObjectCache {
#[allow(dead_code)]
async fn invalidate_versioned(&self, bucket: &str, key: &str, version_id: Option<&str>) {
// Always invalidate the latest version key
let base_key = format!("{}/{}", bucket, key);
let base_key = format!("{bucket}/{key}");
self.invalidate(&base_key).await;
// Also invalidate the specific version if provided
if let Some(vid) = version_id {
let versioned_key = format!("{}?versionId={}", base_key, vid);
let versioned_key = format!("{base_key}?versionId={vid}");
self.invalidate(&versioned_key).await;
}
}
@@ -1625,8 +1625,8 @@ impl ConcurrencyManager {
/// Cache key string
pub fn make_cache_key(bucket: &str, key: &str, version_id: Option<&str>) -> String {
match version_id {
Some(vid) => format!("{}/{}?versionId={}", bucket, key, vid),
None => format!("{}/{}", bucket, key),
Some(vid) => format!("{bucket}/{key}?versionId={vid}"),
None => format!("{bucket}/{key}"),
}
}
@@ -1728,7 +1728,7 @@ mod tests {
// Fill cache with objects
for i in 0..200 {
let data = vec![0u8; 64 * KI_B];
cache.put(format!("key_{}", i), data).await;
cache.put(format!("key_{i}"), data).await;
}
let stats = cache.stats().await;
@@ -1785,8 +1785,7 @@ mod tests {
let result = get_advanced_buffer_size(32 * KI_B as i64, 256 * KI_B, true);
assert!(
(16 * KI_B..=64 * KI_B).contains(&result),
"Small files should use reduced buffer: {}",
result
"Small files should use reduced buffer: {result}"
);
}

View File

@@ -214,9 +214,7 @@ mod tests {
// Allow widened range due to parallel test execution affecting global counter
assert!(
(64 * KI_B..=MI_B).contains(&buffer_size),
"{}: buffer should be in valid range 64KB-1MB, got {} bytes",
description,
buffer_size
"{description}: buffer should be in valid range 64KB-1MB, got {buffer_size} bytes"
);
}
}
@@ -229,22 +227,20 @@ mod tests {
let min_buffer = get_concurrency_aware_buffer_size(small_file, 64 * KI_B);
assert!(
min_buffer >= 32 * KI_B,
"Buffer should have minimum size of 32KB for tiny files, got {}",
min_buffer
"Buffer should have minimum size of 32KB for tiny files, got {min_buffer}"
);
// Test maximum buffer size (capped at 1MB when base is reasonable)
let huge_file = 10 * 1024 * MI_B as i64; // 10GB file
let max_buffer = get_concurrency_aware_buffer_size(huge_file, MI_B);
assert!(max_buffer <= MI_B, "Buffer should not exceed 1MB cap when requested, got {}", max_buffer);
assert!(max_buffer <= MI_B, "Buffer should not exceed 1MB cap when requested, got {max_buffer}");
// Test buffer size scaling with base - when base is small, result respects the limits
let medium_file = 200 * KI_B as i64; // 200KB file (>100KB so minimum is 64KB)
let buffer = get_concurrency_aware_buffer_size(medium_file, 128 * KI_B);
assert!(
(64 * KI_B..=MI_B).contains(&buffer),
"Buffer should be between 64KB and 1MB, got {}",
buffer
"Buffer should be between 64KB and 1MB, got {buffer}"
);
}
@@ -271,7 +267,7 @@ mod tests {
let elapsed = start.elapsed();
// With 64 permits, 10 concurrent tasks should complete quickly
assert!(elapsed < Duration::from_secs(1), "Should complete within 1 second, took {:?}", elapsed);
assert!(elapsed < Duration::from_secs(1), "Should complete within 1 second, took {elapsed:?}");
}
/// Test Moka cache operations: insert, retrieve, stats, and clear.
@@ -373,7 +369,7 @@ mod tests {
let num_objects = 20; // Total 120MB > 100MB limit
for i in 0..num_objects {
let key = format!("test/object{}", i);
let key = format!("test/object{i}");
let data = vec![i as u8; object_size];
manager.cache_object(key, data).await;
sleep(Duration::from_millis(10)).await; // Give Moka time to process
@@ -407,7 +403,7 @@ mod tests {
// Cache multiple objects
for i in 0..10 {
let key = format!("batch/object{}", i);
let key = format!("batch/object{i}");
let data = vec![i as u8; 100 * KI_B]; // 100KB each
manager.cache_object(key, data).await;
}
@@ -415,14 +411,14 @@ mod tests {
sleep(Duration::from_millis(100)).await;
// Test batch get
let keys: Vec<String> = (0..10).map(|i| format!("batch/object{}", i)).collect();
let keys: Vec<String> = (0..10).map(|i| format!("batch/object{i}")).collect();
let results = manager.get_cached_batch(&keys).await;
assert_eq!(results.len(), 10, "Should return result for each key");
// Verify all objects were retrieved
let hits = results.iter().filter(|r| r.is_some()).count();
assert!(hits >= 8, "Most objects should be cached (got {}/10 hits)", hits);
assert!(hits >= 8, "Most objects should be cached (got {hits}/10 hits)");
// Mix of existing and non-existing keys
let mixed_keys = vec![
@@ -442,7 +438,7 @@ mod tests {
// Prepare objects for warming
let objects: Vec<(String, Vec<u8>)> = (0..5)
.map(|i| (format!("warm/object{}", i), vec![i as u8; 500 * KI_B]))
.map(|i| (format!("warm/object{i}"), vec![i as u8; 500 * KI_B]))
.collect();
// Warm cache
@@ -452,8 +448,8 @@ mod tests {
// Verify all objects are cached
for (key, data) in objects {
let cached = manager.get_cached(&key).await;
assert!(cached.is_some(), "Warmed object {} should be cached", key);
assert_eq!(*cached.unwrap(), data, "Cached data for {} should match", key);
assert!(cached.is_some(), "Warmed object {key} should be cached");
assert_eq!(*cached.unwrap(), data, "Cached data for {key} should match");
}
let stats = manager.cache_stats().await;
@@ -467,7 +463,7 @@ mod tests {
// Cache objects with different access patterns
for i in 0..5 {
let key = format!("hot/object{}", i);
let key = format!("hot/object{i}");
let data = vec![i as u8; 100 * KI_B];
manager.cache_object(key, data).await;
}
@@ -540,19 +536,15 @@ mod tests {
let small_size = get_advanced_buffer_size(128 * KI_B as i64, base_buffer, false);
assert!(
small_size < base_buffer,
"Small files should use smaller buffers: {} < {}",
small_size,
base_buffer
"Small files should use smaller buffers: {small_size} < {base_buffer}"
);
assert!(small_size >= 16 * KI_B, "Should not go below minimum: {}", small_size);
assert!(small_size >= 16 * KI_B, "Should not go below minimum: {small_size}");
// Test sequential read optimization
let seq_size = get_advanced_buffer_size(32 * MI_B as i64, base_buffer, true);
assert!(
seq_size >= base_buffer,
"Sequential reads should use larger buffers: {} >= {}",
seq_size,
base_buffer
"Sequential reads should use larger buffers: {seq_size} >= {base_buffer}"
);
// Test large file with high concurrency
@@ -560,9 +552,7 @@ mod tests {
let large_concurrent = get_advanced_buffer_size(100 * MI_B as i64, base_buffer, false);
assert!(
large_concurrent <= base_buffer,
"High concurrency should reduce buffer: {} <= {}",
large_concurrent,
base_buffer
"High concurrency should reduce buffer: {large_concurrent} <= {base_buffer}"
);
}
@@ -573,7 +563,7 @@ mod tests {
// Pre-populate cache
for i in 0..20 {
let key = format!("concurrent/object{}", i);
let key = format!("concurrent/object{i}");
let data = vec![i as u8; 100 * KI_B];
manager.cache_object(key, data).await;
}
@@ -602,8 +592,7 @@ mod tests {
// Moka's lock-free design should handle this quickly
assert!(
elapsed < Duration::from_millis(500),
"Concurrent cache access should be fast (took {:?})",
elapsed
"Concurrent cache access should be fast (took {elapsed:?})"
);
}
@@ -637,7 +626,7 @@ mod tests {
// Cache some objects
for i in 0..5 {
let key = format!("hitrate/object{}", i);
let key = format!("hitrate/object{i}");
let data = vec![i as u8; 100 * KI_B];
manager.cache_object(key, data).await;
}
@@ -647,16 +636,16 @@ mod tests {
// Mix of hits and misses
for i in 0..10 {
let key = if i < 5 {
format!("hitrate/object{}", i) // Hit
format!("hitrate/object{i}") // Hit
} else {
format!("hitrate/missing{}", i) // Miss
format!("hitrate/missing{i}") // Miss
};
let _ = manager.get_cached(&key).await;
}
// Hit rate should be around 50%
let hit_rate = manager.cache_hit_rate();
assert!((40.0..=60.0).contains(&hit_rate), "Hit rate should be ~50%, got {:.1}%", hit_rate);
assert!((40.0..=60.0).contains(&hit_rate), "Hit rate should be ~50%, got {hit_rate:.1}%");
}
/// Test TTL expiration (Moka automatic cleanup)
@@ -688,7 +677,7 @@ mod tests {
// Pre-populate
for i in 0..50 {
let key = format!("bench/object{}", i);
let key = format!("bench/object{i}");
let data = vec![i as u8; 500 * KI_B];
manager.cache_object(key, data).await;
}
@@ -1224,14 +1213,13 @@ mod tests {
// Average should be around 20ms
assert!(
avg >= Duration::from_millis(15) && avg <= Duration::from_millis(25),
"Average should be around 20ms, got {:?}",
avg
"Average should be around 20ms, got {avg:?}"
);
// Max should be 30ms
assert_eq!(max, Duration::from_millis(30), "Max should be 30ms");
// P95 should be at or near 30ms
assert!(p95 >= Duration::from_millis(25), "P95 should be near 30ms, got {:?}", p95);
assert!(p95 >= Duration::from_millis(25), "P95 should be near 30ms, got {p95:?}");
}
}

View File

@@ -465,7 +465,7 @@ fn validate_object_key(key: &str, operation: &str) -> S3Result<()> {
if key.contains(['\0', '\n', '\r']) {
return Err(S3Error::with_message(
S3ErrorCode::InvalidArgument,
format!("Object key contains invalid control characters: {:?}", key),
format!("Object key contains invalid control characters: {key:?}"),
));
}
@@ -2152,7 +2152,7 @@ impl S3 for FS {
let mut buf = Vec::with_capacity(response_content_length as usize);
if let Err(e) = tokio::io::AsyncReadExt::read_to_end(&mut final_stream, &mut buf).await {
error!("Failed to read object into memory for caching: {}", e);
return Err(ApiError::from(StorageError::other(format!("Failed to read object for caching: {}", e))).into());
return Err(ApiError::from(StorageError::other(format!("Failed to read object for caching: {e}"))).into());
}
// Verify we read the expected amount