cargo fmt

This commit is contained in:
houseme
2025-05-25 17:46:59 +08:00
parent 1e48f8d74e
commit 864071d641
12 changed files with 251 additions and 273 deletions

96
Cargo.lock generated
View File

@@ -5678,6 +5678,16 @@ dependencies = [
"objc2-core-foundation",
]
[[package]]
name = "objc2-io-kit"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "71c1c64d6120e51cd86033f67176b1cb66780c2efe34dec55176f77befd93c0a"
dependencies = [
"libc",
"objc2-core-foundation",
]
[[package]]
name = "objc2-metal"
version = "0.2.2"
@@ -8414,15 +8424,16 @@ dependencies = [
[[package]]
name = "sysinfo"
version = "0.34.2"
version = "0.35.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a4b93974b3d3aeaa036504b8eefd4c039dced109171c1ae973f1dc63b2c7e4b2"
checksum = "79251336d17c72d9762b8b54be4befe38d2db56fbbc0241396d70f173c39d47a"
dependencies = [
"libc",
"memchr",
"ntapi",
"objc2-core-foundation",
"windows 0.57.0",
"objc2-io-kit",
"windows 0.61.1",
]
[[package]]
@@ -8729,9 +8740,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
[[package]]
name = "tokio"
version = "1.45.0"
version = "1.45.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "2513ca694ef9ede0fb23fe71a4ee4107cb102b9dc1930f6d0fd77aae068ae165"
checksum = "75ef51a33ef1da925cea3e4eb122833cb377c61439ca401b770f54902b806779"
dependencies = [
"backtrace",
"bytes",
@@ -9720,16 +9731,6 @@ version = "0.4.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "712e227841d057c1ee1cd2fb22fa7e5a5461ae8e48fa2ca79ec42cfc1931183f"
[[package]]
name = "windows"
version = "0.57.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "12342cb4d8e3b046f3d80effd474a7a02447231330ef77d71daa6fbc40681143"
dependencies = [
"windows-core 0.57.0",
"windows-targets 0.52.6",
]
[[package]]
name = "windows"
version = "0.58.0"
@@ -9741,15 +9742,25 @@ dependencies = [
]
[[package]]
name = "windows-core"
version = "0.57.0"
name = "windows"
version = "0.61.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d2ed2439a290666cd67ecce2b0ffaad89c2a56b976b736e6ece670297897832d"
checksum = "c5ee8f3d025738cb02bad7868bbb5f8a6327501e870bf51f1b455b0a2454a419"
dependencies = [
"windows-implement 0.57.0",
"windows-interface 0.57.0",
"windows-result 0.1.2",
"windows-targets 0.52.6",
"windows-collections",
"windows-core 0.61.0",
"windows-future",
"windows-link",
"windows-numerics",
]
[[package]]
name = "windows-collections"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "3beeceb5e5cfd9eb1d76b381630e82c4241ccd0d27f1a39ed41b2760b255c5e8"
dependencies = [
"windows-core 0.61.0",
]
[[package]]
@@ -9779,14 +9790,13 @@ dependencies = [
]
[[package]]
name = "windows-implement"
version = "0.57.0"
name = "windows-future"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9107ddc059d5b6fbfbffdfa7a7fe3e22a226def0b2608f72e9d552763d3e1ad7"
checksum = "7a1d6bbefcb7b60acd19828e1bc965da6fcf18a7e39490c5f8be71e54a19ba32"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.100",
"windows-core 0.61.0",
"windows-link",
]
[[package]]
@@ -9811,17 +9821,6 @@ dependencies = [
"syn 2.0.100",
]
[[package]]
name = "windows-interface"
version = "0.57.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "29bee4b38ea3cde66011baa44dba677c432a78593e202392d1e9070cf2a7fca7"
dependencies = [
"proc-macro2",
"quote",
"syn 2.0.100",
]
[[package]]
name = "windows-interface"
version = "0.58.0"
@@ -9850,6 +9849,16 @@ version = "0.1.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "76840935b766e1b0a05c0066835fb9ec80071d4c09a16f6bd5f7e655e3c14c38"
[[package]]
name = "windows-numerics"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9150af68066c4c5c07ddc0ce30421554771e528bde427614c61038bc2c92c2b1"
dependencies = [
"windows-core 0.61.0",
"windows-link",
]
[[package]]
name = "windows-registry"
version = "0.4.0"
@@ -9861,15 +9870,6 @@ dependencies = [
"windows-targets 0.53.0",
]
[[package]]
name = "windows-result"
version = "0.1.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5e383302e8ec8515204254685643de10811af0ed97ea37210dc26fb0032647f8"
dependencies = [
"windows-targets 0.52.6",
]
[[package]]
name = "windows-result"
version = "0.2.0"

View File

@@ -1,23 +1,23 @@
[workspace]
members = [
"appauth", # Application authentication and authorization
"cli/rustfs-gui", # Graphical user interface client
"common/common", # Shared utilities and data structures
"common/lock", # Distributed locking implementation
"common/protos", # Protocol buffer definitions
"common/workers", # Worker thread pools and task scheduling
"crates/config", # Configuration management
"appauth", # Application authentication and authorization
"cli/rustfs-gui", # Graphical user interface client
"common/common", # Shared utilities and data structures
"common/lock", # Distributed locking implementation
"common/protos", # Protocol buffer definitions
"common/workers", # Worker thread pools and task scheduling
"crates/config", # Configuration management
"crates/event-notifier", # Event notification system
"crates/obs", # Observability utilities
"crates/utils", # Utility functions and helpers
"crypto", # Cryptography and security features
"ecstore", # Erasure coding storage implementation
"e2e_test", # End-to-end test suite
"iam", # Identity and Access Management
"madmin", # Management dashboard and admin API interface
"rustfs", # Core file system implementation
"s3select/api", # S3 Select API interface
"s3select/query", # S3 Select query engine
"crates/obs", # Observability utilities
"crates/utils", # Utility functions and helpers
"crypto", # Cryptography and security features
"ecstore", # Erasure coding storage implementation
"e2e_test", # End-to-end test suite
"iam", # Identity and Access Management
"madmin", # Management dashboard and admin API interface
"rustfs", # Core file system implementation
"s3select/api", # S3 Select API interface
"s3select/query", # S3 Select query engine
"crates/zip",
]
resolver = "2"
@@ -162,7 +162,7 @@ smallvec = { version = "1.15.0", features = ["serde"] }
snafu = "0.8.5"
socket2 = "0.5.9"
strum = { version = "0.27.1", features = ["derive"] }
sysinfo = "0.34.2"
sysinfo = "0.35.1"
tempfile = "3.19.1"
test-case = "3.3.1"
thiserror = "2.0.12"
@@ -173,7 +173,7 @@ time = { version = "0.3.41", features = [
"macros",
"serde",
] }
tokio = { version = "1.45.0", features = ["fs", "rt-multi-thread"] }
tokio = { version = "1.45.1", features = ["fs", "rt-multi-thread"] }
tonic = { version = "0.13.1", features = ["gzip"] }
tonic-build = { version = "0.13.1" }
tokio-rustls = { version = "0.26.2", default-features = false }

View File

@@ -112,14 +112,15 @@ mod tests {
fn test_logging_constants() {
// Test logging related constants
assert_eq!(DEFAULT_LOG_LEVEL, "info");
assert!(["trace", "debug", "info", "warn", "error"].contains(&DEFAULT_LOG_LEVEL),
"Log level should be a valid tracing level");
assert!(
["trace", "debug", "info", "warn", "error"].contains(&DEFAULT_LOG_LEVEL),
"Log level should be a valid tracing level"
);
assert_eq!(USE_STDOUT, true);
assert_eq!(SAMPLE_RATIO, 1.0);
assert!(SAMPLE_RATIO >= 0.0 && SAMPLE_RATIO <= 1.0,
"Sample ratio should be between 0.0 and 1.0");
assert!(SAMPLE_RATIO >= 0.0 && SAMPLE_RATIO <= 1.0, "Sample ratio should be between 0.0 and 1.0");
assert_eq!(METER_INTERVAL, 30);
assert!(METER_INTERVAL > 0, "Meter interval should be positive");
@@ -129,8 +130,10 @@ mod tests {
fn test_environment_constants() {
// Test environment related constants
assert_eq!(ENVIRONMENT, "production");
assert!(["development", "staging", "production", "test"].contains(&ENVIRONMENT),
"Environment should be a standard environment name");
assert!(
["development", "staging", "production", "test"].contains(&ENVIRONMENT),
"Environment should be a standard environment name"
);
}
#[test]
@@ -186,8 +189,7 @@ mod tests {
assert!(DEFAULT_CONSOLE_PORT > 1024, "Console port should be above reserved range");
// u16 type automatically ensures port is in valid range (0-65535)
assert_ne!(DEFAULT_PORT, DEFAULT_CONSOLE_PORT,
"Main port and console port should be different");
assert_ne!(DEFAULT_PORT, DEFAULT_CONSOLE_PORT, "Main port and console port should be different");
}
#[test]
@@ -195,16 +197,22 @@ mod tests {
// Test address related constants
assert_eq!(DEFAULT_ADDRESS, ":9000");
assert!(DEFAULT_ADDRESS.starts_with(':'), "Address should start with colon");
assert!(DEFAULT_ADDRESS.contains(&DEFAULT_PORT.to_string()),
"Address should contain the default port");
assert!(
DEFAULT_ADDRESS.contains(&DEFAULT_PORT.to_string()),
"Address should contain the default port"
);
assert_eq!(DEFAULT_CONSOLE_ADDRESS, ":9002");
assert!(DEFAULT_CONSOLE_ADDRESS.starts_with(':'), "Console address should start with colon");
assert!(DEFAULT_CONSOLE_ADDRESS.contains(&DEFAULT_CONSOLE_PORT.to_string()),
"Console address should contain the console port");
assert!(
DEFAULT_CONSOLE_ADDRESS.contains(&DEFAULT_CONSOLE_PORT.to_string()),
"Console address should contain the console port"
);
assert_ne!(DEFAULT_ADDRESS, DEFAULT_CONSOLE_ADDRESS,
"Main address and console address should be different");
assert_ne!(
DEFAULT_ADDRESS, DEFAULT_CONSOLE_ADDRESS,
"Main address and console address should be different"
);
}
#[test]

View File

@@ -169,7 +169,7 @@ mod tests {
drop(rx); // Close receiver
// Create a test event
use crate::event::{Name, Metadata, Source, Bucket, Object, Identity};
use crate::event::{Bucket, Identity, Metadata, Name, Object, Source};
use std::collections::HashMap;
let identity = Identity::new("test-user".to_string());
@@ -260,7 +260,7 @@ mod tests {
let event_bus_error = Error::EventBusStarted;
match event_bus_error {
Error::EventBusStarted => {}, // 正确匹配
Error::EventBusStarted => {} // 正确匹配
_ => panic!("Pattern matching failed"),
}
}

View File

@@ -152,13 +152,11 @@ mod tests {
match ip_option {
Some(ip) => {
// If get_local_ip returns Some, then get_local_ip_with_default should return the same IP
assert_eq!(ip.to_string(), ip_string,
"Both functions should return the same IP when available");
assert_eq!(ip.to_string(), ip_string, "Both functions should return the same IP when available");
}
None => {
// If get_local_ip returns None, then get_local_ip_with_default should return default value
assert_eq!(ip_string, "127.0.0.1",
"Should return default value when no IP is available");
assert_eq!(ip_string, "127.0.0.1", "Should return default value when no IP is available");
}
}
}

View File

@@ -205,10 +205,7 @@ pub struct ZipEntry {
}
/// Simplified ZIP file processing (temporarily using standard library zip crate)
pub async fn extract_zip_simple<P: AsRef<Path>>(
zip_path: P,
extract_to: P,
) -> io::Result<Vec<ZipEntry>> {
pub async fn extract_zip_simple<P: AsRef<Path>>(zip_path: P, extract_to: P) -> io::Result<Vec<ZipEntry>> {
// Use standard library zip processing, return empty list as placeholder for now
// Actual implementation needs to be improved in future versions
let _zip_path = zip_path.as_ref();
@@ -224,10 +221,7 @@ pub async fn create_zip_simple<P: AsRef<Path>>(
_compression_level: CompressionLevel,
) -> io::Result<()> {
// Return unimplemented error for now
Err(io::Error::new(
io::ErrorKind::Unsupported,
"ZIP creation not yet implemented",
))
Err(io::Error::new(io::ErrorKind::Unsupported, "ZIP creation not yet implemented"))
}
/// Compression utility struct
@@ -312,8 +306,7 @@ mod tests {
use std::io::Cursor;
use tokio::io::AsyncReadExt;
#[test]
#[test]
fn test_compression_format_from_extension() {
// Test supported compression format recognition
assert_eq!(CompressionFormat::from_extension("gz"), CompressionFormat::Gzip);
@@ -414,7 +407,7 @@ mod tests {
assert!(decoder_result.is_err(), "Zip format should return error (not implemented)");
}
#[tokio::test]
#[tokio::test]
async fn test_get_decoder_all_supported_formats() {
// Test that all supported formats can create decoders successfully
let sample_content = b"Hello, compression world!";
@@ -435,7 +428,7 @@ mod tests {
}
}
#[tokio::test]
#[tokio::test]
async fn test_decoder_type_consistency() {
// Test decoder return type consistency
let sample_content = b"Hello, compression world!";
@@ -489,8 +482,13 @@ mod tests {
];
for (ext, expected_format) in extension_mappings {
assert_eq!(CompressionFormat::from_extension(ext), expected_format,
"Extension '{}' should map to {:?}", ext, expected_format);
assert_eq!(
CompressionFormat::from_extension(ext),
expected_format,
"Extension '{}' should map to {:?}",
ext,
expected_format
);
}
}
@@ -508,8 +506,13 @@ mod tests {
];
for (format, expected_str) in format_strings {
assert_eq!(format!("{:?}", format), expected_str,
"Format {:?} should have string representation '{}'", format, expected_str);
assert_eq!(
format!("{:?}", format),
expected_str,
"Format {:?} should have string representation '{}'",
format,
expected_str
);
}
}
@@ -537,7 +540,11 @@ mod tests {
// Verify Option<CompressionFormat> size
let option_size = mem::size_of::<Option<CompressionFormat>>();
assert!(option_size <= 16, "Option<CompressionFormat> should be efficient, got {} bytes", option_size);
assert!(
option_size <= 16,
"Option<CompressionFormat> should be efficient, got {} bytes",
option_size
);
}
#[test]
@@ -548,13 +555,11 @@ mod tests {
("gz", true),
("bz2", true),
("xz", true),
// Edge cases
("", false),
("g", false),
("gzz", false),
("gz2", false),
// Special characters
("gz.", false),
(".gz", false),
@@ -565,13 +570,15 @@ mod tests {
for (ext, should_be_known) in test_cases {
let format = CompressionFormat::from_extension(ext);
let is_known = format != CompressionFormat::Unknown;
assert_eq!(is_known, should_be_known,
assert_eq!(
is_known, should_be_known,
"Extension '{}' recognition mismatch: expected {}, got {}",
ext, should_be_known, is_known);
ext, should_be_known, is_known
);
}
}
#[tokio::test]
#[tokio::test]
async fn test_decoder_trait_bounds() {
// Test decoder trait bounds compliance
let sample_content = b"Hello, compression world!";
@@ -599,12 +606,11 @@ mod tests {
for (format, ext) in consistency_tests {
let parsed_format = CompressionFormat::from_extension(ext);
assert_eq!(parsed_format, format,
"Extension '{}' should consistently map to {:?}", ext, format);
assert_eq!(parsed_format, format, "Extension '{}' should consistently map to {:?}", ext, format);
}
}
#[tokio::test]
#[tokio::test]
async fn test_decompress_with_invalid_format() {
// Test decompression with invalid format
use std::sync::atomic::{AtomicUsize, Ordering};
@@ -616,20 +622,21 @@ mod tests {
let processed_entries_count = Arc::new(AtomicUsize::new(0));
let processed_entries_count_clone = processed_entries_count.clone();
let decompress_result = decompress(
cursor,
CompressionFormat::Unknown,
move |_archive_entry| {
processed_entries_count_clone.fetch_add(1, Ordering::SeqCst);
async move { Ok(()) }
}
).await;
let decompress_result = decompress(cursor, CompressionFormat::Unknown, move |_archive_entry| {
processed_entries_count_clone.fetch_add(1, Ordering::SeqCst);
async move { Ok(()) }
})
.await;
assert!(decompress_result.is_err(), "Decompress with Unknown format should fail");
assert_eq!(processed_entries_count.load(Ordering::SeqCst), 0, "No entries should be processed with invalid format");
assert_eq!(
processed_entries_count.load(Ordering::SeqCst),
0,
"No entries should be processed with invalid format"
);
}
#[tokio::test]
#[tokio::test]
async fn test_decompress_with_zip_format() {
// Test decompression with Zip format (currently not supported)
use std::sync::atomic::{AtomicUsize, Ordering};
@@ -641,17 +648,18 @@ mod tests {
let processed_entries_count = Arc::new(AtomicUsize::new(0));
let processed_entries_count_clone = processed_entries_count.clone();
let decompress_result = decompress(
cursor,
CompressionFormat::Zip,
move |_archive_entry| {
processed_entries_count_clone.fetch_add(1, Ordering::SeqCst);
async move { Ok(()) }
}
).await;
let decompress_result = decompress(cursor, CompressionFormat::Zip, move |_archive_entry| {
processed_entries_count_clone.fetch_add(1, Ordering::SeqCst);
async move { Ok(()) }
})
.await;
assert!(decompress_result.is_err(), "Decompress with Zip format should fail (not implemented)");
assert_eq!(processed_entries_count.load(Ordering::SeqCst), 0, "No entries should be processed with unsupported format");
assert_eq!(
processed_entries_count.load(Ordering::SeqCst),
0,
"No entries should be processed with unsupported format"
);
}
#[tokio::test]
@@ -666,21 +674,18 @@ mod tests {
let callback_invocation_count = Arc::new(AtomicUsize::new(0));
let callback_invocation_count_clone = callback_invocation_count.clone();
let decompress_result = decompress(
cursor,
CompressionFormat::Gzip,
move |_archive_entry| {
let invocation_number = callback_invocation_count_clone.fetch_add(1, Ordering::SeqCst);
async move {
if invocation_number == 0 {
// First invocation returns an error
Err(io::Error::new(io::ErrorKind::Other, "Simulated callback error"))
} else {
Ok(())
}
let decompress_result = decompress(cursor, CompressionFormat::Gzip, move |_archive_entry| {
let invocation_number = callback_invocation_count_clone.fetch_add(1, Ordering::SeqCst);
async move {
if invocation_number == 0 {
// First invocation returns an error
Err(io::Error::new(io::ErrorKind::Other, "Simulated callback error"))
} else {
Ok(())
}
}
).await;
})
.await;
// Since input data is not valid gzip format, it may fail during parsing phase
// This mainly tests the error handling mechanism
@@ -699,14 +704,11 @@ mod tests {
let callback_was_invoked = Arc::new(AtomicBool::new(false));
let callback_was_invoked_clone = callback_was_invoked.clone();
let _decompress_result = decompress(
cursor,
CompressionFormat::Gzip,
move |_archive_entry| {
callback_was_invoked_clone.store(true, Ordering::SeqCst);
async move { Ok(()) }
}
).await;
let _decompress_result = decompress(cursor, CompressionFormat::Gzip, move |_archive_entry| {
callback_was_invoked_clone.store(true, Ordering::SeqCst);
async move { Ok(()) }
})
.await;
// Note: Since test data is not valid gzip format, callback may not be invoked
// This test mainly verifies function signature and basic flow
@@ -767,15 +769,14 @@ mod tests {
assert!(true, "Extension parsing performance test completed");
}
#[test]
#[test]
fn test_format_default_behavior() {
// 测试格式的默认行为
let unknown_extensions = vec!["", "txt", "doc", "pdf", "unknown_ext"];
for ext in unknown_extensions {
let format = CompressionFormat::from_extension(ext);
assert_eq!(format, CompressionFormat::Unknown,
"Extension '{}' should default to Unknown", ext);
assert_eq!(format, CompressionFormat::Unknown, "Extension '{}' should default to Unknown", ext);
}
}
@@ -851,7 +852,7 @@ mod tests {
// 测试不支持的格式返回错误
use std::io::Cursor;
let output1 = Vec::new();
let output1 = Vec::new();
let cursor1 = Cursor::new(output1);
let unknown_format = CompressionFormat::Unknown;
@@ -865,7 +866,7 @@ mod tests {
assert!(zip_encoder_result.is_err(), "Zip format should return error (requires special handling)");
}
#[tokio::test]
#[tokio::test]
async fn test_compressor_basic_functionality() {
// Test basic compressor functionality (Note: current implementation returns empty vector as placeholder)
let compressor = Compressor::new(CompressionFormat::Gzip);
@@ -886,8 +887,7 @@ mod tests {
#[tokio::test]
async fn test_compressor_with_level() {
// Test compressor with custom compression level
let compressor = Compressor::new(CompressionFormat::Gzip)
.with_level(CompressionLevel::Best);
let compressor = Compressor::new(CompressionFormat::Gzip).with_level(CompressionLevel::Best);
let sample_text = b"Sample text for compression level testing";
let compression_result = compressor.compress(sample_text).await;

View File

@@ -190,11 +190,7 @@ mod tests {
#[test]
fn test_all_algorithms_produce_valid_keys() {
// Test that all algorithm variants can generate valid keys
let algorithms = [
ID::Argon2idAESGCM,
ID::Argon2idChaCHa20Poly1305,
ID::Pbkdf2AESGCM,
];
let algorithms = [ID::Argon2idAESGCM, ID::Argon2idChaCHa20Poly1305, ID::Pbkdf2AESGCM];
let password = b"test_password_123";
let salt = b"test_salt_16bytes";
@@ -214,20 +210,17 @@ mod tests {
#[test]
fn test_round_trip_conversion() {
// Test round-trip conversion: ID -> u8 -> ID
let original_ids = [
ID::Argon2idAESGCM,
ID::Argon2idChaCHa20Poly1305,
ID::Pbkdf2AESGCM,
];
let original_ids = [ID::Argon2idAESGCM, ID::Argon2idChaCHa20Poly1305, ID::Pbkdf2AESGCM];
for original in &original_ids {
let as_u8 = *original as u8;
let converted_back = ID::try_from(as_u8).unwrap();
assert!(matches!((original, converted_back),
(ID::Argon2idAESGCM, ID::Argon2idAESGCM) |
(ID::Argon2idChaCHa20Poly1305, ID::Argon2idChaCHa20Poly1305) |
(ID::Pbkdf2AESGCM, ID::Pbkdf2AESGCM)
assert!(matches!(
(original, converted_back),
(ID::Argon2idAESGCM, ID::Argon2idAESGCM)
| (ID::Argon2idChaCHa20Poly1305, ID::Argon2idChaCHa20Poly1305)
| (ID::Pbkdf2AESGCM, ID::Pbkdf2AESGCM)
));
}
}

View File

@@ -5666,7 +5666,7 @@ fn get_complete_multipart_md5(parts: &[CompletePart]) -> String {
mod tests {
use super::*;
use crate::disk::error::DiskError;
use crate::store_api::{CompletePart, FileInfo, ErasureInfo};
use crate::store_api::{CompletePart, ErasureInfo, FileInfo};
use common::error::Error;
use time::OffsetDateTime;
@@ -5958,7 +5958,7 @@ mod tests {
erasure: ErasureInfo {
data_blocks: 4,
parity_blocks: 2,
index: 1, // Must be > 0 for is_valid() to return true
index: 1, // Must be > 0 for is_valid() to return true
distribution: vec![1, 2, 3, 4, 5, 6], // Must match data_blocks + parity_blocks
..Default::default()
},
@@ -5970,7 +5970,7 @@ mod tests {
erasure: ErasureInfo {
data_blocks: 6,
parity_blocks: 3,
index: 2, // Must be > 0 for is_valid() to return true
index: 2, // Must be > 0 for is_valid() to return true
distribution: vec![1, 2, 3, 4, 5, 6, 7, 8, 9], // Must match data_blocks + parity_blocks
..Default::default()
},
@@ -5982,7 +5982,7 @@ mod tests {
erasure: ErasureInfo {
data_blocks: 2,
parity_blocks: 1,
index: 1, // Must be > 0 for is_valid() to return true
index: 1, // Must be > 0 for is_valid() to return true
distribution: vec![1, 2, 3], // Must match data_blocks + parity_blocks
..Default::default()
},
@@ -6007,13 +6007,22 @@ mod tests {
assert_eq!(result2[1], -1); // Should be -1 due to error
}
#[test]
#[test]
fn test_conv_part_err_to_int() {
// Test error to integer conversion
assert_eq!(conv_part_err_to_int(&None), CHECK_PART_SUCCESS);
assert_eq!(conv_part_err_to_int(&Some(Error::new(DiskError::DiskNotFound))), CHECK_PART_DISK_NOT_FOUND);
assert_eq!(conv_part_err_to_int(&Some(Error::new(DiskError::VolumeNotFound))), CHECK_PART_VOLUME_NOT_FOUND);
assert_eq!(conv_part_err_to_int(&Some(Error::new(DiskError::FileNotFound))), CHECK_PART_FILE_NOT_FOUND);
assert_eq!(
conv_part_err_to_int(&Some(Error::new(DiskError::DiskNotFound))),
CHECK_PART_DISK_NOT_FOUND
);
assert_eq!(
conv_part_err_to_int(&Some(Error::new(DiskError::VolumeNotFound))),
CHECK_PART_VOLUME_NOT_FOUND
);
assert_eq!(
conv_part_err_to_int(&Some(Error::new(DiskError::FileNotFound))),
CHECK_PART_FILE_NOT_FOUND
);
assert_eq!(conv_part_err_to_int(&Some(Error::new(DiskError::FileCorrupt))), CHECK_PART_FILE_CORRUPT);
// Test unknown error - function returns CHECK_PART_SUCCESS for non-DiskError
@@ -6062,32 +6071,17 @@ mod tests {
assert!(should_heal2);
// Test with no error but part errors
let (should_heal3, _) = should_heal_object_on_disk(
&None,
&[CHECK_PART_FILE_NOT_FOUND],
&latest_meta,
&latest_meta,
);
let (should_heal3, _) = should_heal_object_on_disk(&None, &[CHECK_PART_FILE_NOT_FOUND], &latest_meta, &latest_meta);
assert!(should_heal3);
// Test with no error and no part errors
let (should_heal4, _) = should_heal_object_on_disk(
&None,
&[CHECK_PART_SUCCESS],
&latest_meta,
&latest_meta,
);
let (should_heal4, _) = should_heal_object_on_disk(&None, &[CHECK_PART_SUCCESS], &latest_meta, &latest_meta);
assert!(!should_heal4);
// Test with outdated metadata
let mut old_meta = latest_meta.clone();
old_meta.name = "different-name".to_string();
let (should_heal5, _) = should_heal_object_on_disk(
&None,
&[CHECK_PART_SUCCESS],
&old_meta,
&latest_meta,
);
let (should_heal5, _) = should_heal_object_on_disk(&None, &[CHECK_PART_SUCCESS], &old_meta, &latest_meta);
assert!(should_heal5);
}
@@ -6156,12 +6150,7 @@ mod tests {
#[test]
fn test_join_errs() {
// Test error joining
let errs = vec![
None,
Some(Error::msg("error1")),
Some(Error::msg("error2")),
None,
];
let errs = vec![None, Some(Error::msg("error1")), Some(Error::msg("error2")), None];
let result = join_errs(&errs);
assert!(result.contains("error1"));
@@ -6204,11 +6193,7 @@ mod tests {
// Test disk evaluation based on errors
// This test would need mock DiskStore objects, so we'll test the logic conceptually
let disks = vec![None, None, None]; // Mock empty disks
let errs = vec![
None,
Some(Error::new(DiskError::DiskNotFound)),
None,
];
let errs = vec![None, Some(Error::new(DiskError::DiskNotFound)), None];
let result = SetDisks::eval_disks(&disks, &errs);
assert_eq!(result.len(), 3);
@@ -6219,9 +6204,18 @@ mod tests {
fn test_shuffle_parts_metadata() {
// Test metadata shuffling
let metadata = vec![
FileInfo { name: "file1".to_string(), ..Default::default() },
FileInfo { name: "file2".to_string(), ..Default::default() },
FileInfo { name: "file3".to_string(), ..Default::default() },
FileInfo {
name: "file1".to_string(),
..Default::default()
},
FileInfo {
name: "file2".to_string(),
..Default::default()
},
FileInfo {
name: "file3".to_string(),
..Default::default()
},
];
// Distribution uses 1-based indexing

View File

@@ -2829,13 +2829,11 @@ mod tests {
#[test]
fn test_server_pools_available_space_iter() {
let spaces = ServerPoolsAvailableSpace(vec![
PoolAvailableSpace {
index: 0,
available: 1000,
max_used_pct: 50,
},
]);
let spaces = ServerPoolsAvailableSpace(vec![PoolAvailableSpace {
index: 0,
available: 1000,
max_used_pct: 50,
}]);
let mut count = 0;
for space in spaces.iter() {
@@ -2884,4 +2882,3 @@ mod tests {
assert!(check_put_object_args("test-bucket", "").is_err());
}
}

View File

@@ -221,7 +221,7 @@ mod tests {
let invalid_paths = [
"/this/path/definitely/does/not/exist/anywhere",
"/dev/null/invalid", // /dev/null is a file, not a directory
"", // Empty path
"", // Empty path
];
for invalid_path in &invalid_paths {

View File

@@ -58,7 +58,7 @@ pub fn extract_claims<T: DeserializeOwned>(
#[cfg(test)]
mod tests {
use super::{gen_access_key, gen_secret_key, generate_jwt, extract_claims};
use super::{extract_claims, gen_access_key, gen_secret_key, generate_jwt};
use serde::{Deserialize, Serialize};
#[test]
@@ -89,7 +89,10 @@ mod tests {
let key = gen_access_key(100).unwrap();
for ch in key.chars() {
assert!(ch.is_ascii_alphanumeric(), "Access key should only contain alphanumeric characters");
assert!(ch.is_ascii_uppercase() || ch.is_ascii_digit(), "Access key should only contain uppercase letters and digits");
assert!(
ch.is_ascii_uppercase() || ch.is_ascii_digit(),
"Access key should only contain uppercase letters and digits"
);
}
}
@@ -130,8 +133,10 @@ mod tests {
// Should not contain invalid characters for URL-safe base64
for ch in key.chars() {
assert!(ch.is_ascii_alphanumeric() || ch == '+' || ch == '-' || ch == '_',
"Secret key should be URL-safe base64 compatible");
assert!(
ch.is_ascii_alphanumeric() || ch == '+' || ch == '-' || ch == '_',
"Secret key should be URL-safe base64 compatible"
);
}
}

View File

@@ -2422,39 +2422,32 @@ impl Node for NodeService {
#[cfg(test)]
mod tests {
use super::*;
use protos::proto_gen::node_service::{
BackgroundHealStatusRequest, BackgroundHealStatusResponse, CheckPartsRequest, CheckPartsResponse,
DeleteBucketMetadataRequest, DeleteBucketMetadataResponse, DeleteBucketRequest, DeleteBucketResponse, DeletePathsRequest,
DeletePathsResponse, DeletePolicyRequest, DeletePolicyResponse, DeleteRequest, DeleteResponse,
DeleteServiceAccountRequest, DeleteServiceAccountResponse, DeleteUserRequest, DeleteUserResponse, DeleteVersionRequest,
DeleteVersionResponse, DeleteVersionsRequest, DeleteVersionsResponse, DeleteVolumeRequest, DeleteVolumeResponse,
DiskInfoRequest, DiskInfoResponse, GenerallyLockRequest, GenerallyLockResponse, GetBucketInfoRequest,
GetBucketInfoResponse, GetCpusRequest, GetCpusResponse, GetMemInfoRequest, GetMemInfoResponse, GetNetInfoRequest,
GetNetInfoResponse, GetOsInfoRequest, GetOsInfoResponse, GetPartitionsRequest, GetPartitionsResponse, GetProcInfoRequest,
GetProcInfoResponse, GetSeLinuxInfoRequest, GetSeLinuxInfoResponse, GetSysConfigRequest, GetSysConfigResponse,
GetSysErrorsRequest, GetSysErrorsResponse, HealBucketRequest, HealBucketResponse, ListBucketRequest, ListBucketResponse,
ListDirRequest, ListDirResponse, ListVolumesRequest, ListVolumesResponse, LoadBucketMetadataRequest,
LoadBucketMetadataResponse, LoadGroupRequest, LoadGroupResponse, LoadPolicyMappingRequest, LoadPolicyMappingResponse,
LoadPolicyRequest, LoadPolicyResponse, LoadRebalanceMetaRequest, LoadRebalanceMetaResponse, LoadServiceAccountRequest,
LoadServiceAccountResponse, LoadUserRequest, LoadUserResponse, LocalStorageInfoRequest, LocalStorageInfoResponse,
MakeBucketRequest, MakeBucketResponse, MakeVolumeRequest, MakeVolumeResponse, MakeVolumesRequest, MakeVolumesResponse,
Mss, PingRequest, PingResponse, ReadAllRequest, ReadAllResponse, ReadMultipleRequest, ReadMultipleResponse,
ReadVersionRequest, ReadVersionResponse, ReadXlRequest, ReadXlResponse, ReloadPoolMetaRequest, ReloadPoolMetaResponse,
ReloadSiteReplicationConfigRequest, ReloadSiteReplicationConfigResponse, RenameDataRequest, RenameDataResponse,
RenameFileRequst, RenameFileResponse, RenamePartRequst, RenamePartResponse, ServerInfoRequest, ServerInfoResponse,
SignalServiceRequest, SignalServiceResponse, StatVolumeRequest, StatVolumeResponse, StopRebalanceRequest,
StopRebalanceResponse, UpdateMetadataRequest, UpdateMetadataResponse, VerifyFileRequest, VerifyFileResponse,
WriteAllRequest, WriteAllResponse, WriteMetadataRequest, WriteMetadataResponse,
};
use std::collections::HashMap;
use tonic::Request;
use protos::proto_gen::node_service::{
PingRequest, PingResponse, HealBucketRequest, HealBucketResponse,
ListBucketRequest, ListBucketResponse, MakeBucketRequest, MakeBucketResponse,
GetBucketInfoRequest, GetBucketInfoResponse, DeleteBucketRequest, DeleteBucketResponse,
ReadAllRequest, ReadAllResponse, WriteAllRequest, WriteAllResponse,
DeleteRequest, DeleteResponse, VerifyFileRequest, VerifyFileResponse,
CheckPartsRequest, CheckPartsResponse, RenamePartRequst, RenamePartResponse,
RenameFileRequst, RenameFileResponse, ListDirRequest, ListDirResponse,
RenameDataRequest, RenameDataResponse, MakeVolumesRequest, MakeVolumesResponse,
MakeVolumeRequest, MakeVolumeResponse, ListVolumesRequest, ListVolumesResponse,
StatVolumeRequest, StatVolumeResponse, DeletePathsRequest, DeletePathsResponse,
UpdateMetadataRequest, UpdateMetadataResponse, WriteMetadataRequest, WriteMetadataResponse,
ReadVersionRequest, ReadVersionResponse, ReadXlRequest, ReadXlResponse,
DeleteVersionRequest, DeleteVersionResponse, DeleteVersionsRequest, DeleteVersionsResponse,
ReadMultipleRequest, ReadMultipleResponse, DeleteVolumeRequest, DeleteVolumeResponse,
DiskInfoRequest, DiskInfoResponse, GenerallyLockRequest, GenerallyLockResponse,
LocalStorageInfoRequest, LocalStorageInfoResponse, ServerInfoRequest, ServerInfoResponse,
GetCpusRequest, GetCpusResponse, GetNetInfoRequest, GetNetInfoResponse,
GetPartitionsRequest, GetPartitionsResponse, GetOsInfoRequest, GetOsInfoResponse,
GetSeLinuxInfoRequest, GetSeLinuxInfoResponse, GetSysConfigRequest, GetSysConfigResponse,
GetSysErrorsRequest, GetSysErrorsResponse, GetMemInfoRequest, GetMemInfoResponse,
GetProcInfoRequest, GetProcInfoResponse, BackgroundHealStatusRequest, BackgroundHealStatusResponse,
ReloadPoolMetaRequest, ReloadPoolMetaResponse, StopRebalanceRequest, StopRebalanceResponse,
LoadRebalanceMetaRequest, LoadRebalanceMetaResponse, LoadBucketMetadataRequest, LoadBucketMetadataResponse,
DeleteBucketMetadataRequest, DeleteBucketMetadataResponse, DeletePolicyRequest, DeletePolicyResponse,
LoadPolicyRequest, LoadPolicyResponse, LoadPolicyMappingRequest, LoadPolicyMappingResponse,
DeleteUserRequest, DeleteUserResponse, DeleteServiceAccountRequest, DeleteServiceAccountResponse,
LoadUserRequest, LoadUserResponse, LoadServiceAccountRequest, LoadServiceAccountResponse,
LoadGroupRequest, LoadGroupResponse, ReloadSiteReplicationConfigRequest, ReloadSiteReplicationConfigResponse,
SignalServiceRequest, SignalServiceResponse, Mss,
};
fn create_test_node_service() -> NodeService {
make_server()
@@ -3382,9 +3375,7 @@ mod tests {
async fn test_local_storage_info() {
let service = create_test_node_service();
let request = Request::new(LocalStorageInfoRequest {
metrics: false,
});
let request = Request::new(LocalStorageInfoRequest { metrics: false });
let response = service.local_storage_info(request).await;
assert!(response.is_ok());
@@ -3399,9 +3390,7 @@ mod tests {
async fn test_server_info() {
let service = create_test_node_service();
let request = Request::new(ServerInfoRequest {
metrics: false,
});
let request = Request::new(ServerInfoRequest { metrics: false });
let response = service.server_info(request).await;
assert!(response.is_ok());
@@ -3585,9 +3574,7 @@ mod tests {
async fn test_load_rebalance_meta() {
let service = create_test_node_service();
let request = Request::new(LoadRebalanceMetaRequest {
start_rebalance: false,
});
let request = Request::new(LoadRebalanceMetaRequest { start_rebalance: false });
let response = service.load_rebalance_meta(request).await;
// Should return error because object layer is not initialized, or success if it's implemented
@@ -3598,9 +3585,7 @@ mod tests {
async fn test_load_bucket_metadata_empty_bucket() {
let service = create_test_node_service();
let request = Request::new(LoadBucketMetadataRequest {
bucket: "".to_string(),
});
let request = Request::new(LoadBucketMetadataRequest { bucket: "".to_string() });
let response = service.load_bucket_metadata(request).await;
assert!(response.is_ok());
@@ -3769,9 +3754,7 @@ mod tests {
async fn test_load_group_empty_name() {
let service = create_test_node_service();
let request = Request::new(LoadGroupRequest {
group: "".to_string(),
});
let request = Request::new(LoadGroupRequest { group: "".to_string() });
let response = service.load_group(request).await;
assert!(response.is_ok());
@@ -3797,7 +3780,7 @@ mod tests {
assert!(reload_response.error_info.is_some());
}
// Note: signal_service test is skipped because it contains todo!() and would panic
// Note: signal_service test is skipped because it contains todo!() and would panic
#[test]
fn test_node_service_debug() {