From dd47fcf2a81877446ea5e2d793fce12e922298ee Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?=E5=AE=89=E6=AD=A3=E8=B6=85?= Date: Wed, 29 Oct 2025 13:16:31 +0800 Subject: [PATCH] fix: restore localized samples in tests (#749) * fix: restore required localized examples * style: fix formatting issues --- README.md | 2 +- crates/audit/tests/performance_test.rs | 2 +- crates/crypto/src/encdec/tests.rs | 2 +- crates/e2e_test/src/kms/README.md | 274 +- crates/e2e_test/src/kms/common.rs | 22 +- .../src/kms/multipart_encryption_test.rs | 110 +- crates/e2e_test/src/kms/test_runner.rs | 42 +- crates/ecstore/run_benchmarks.sh | 170 +- crates/ecstore/src/admin_server_info.rs | 8 +- crates/ecstore/src/bucket/metadata.rs | 4 +- crates/ecstore/src/bucket/quota/mod.rs | 6 +- .../ecstore/src/bucket/replication/config.rs | 24 +- crates/ecstore/src/chunk_stream.rs | 30 +- crates/ecstore/src/disk/error_reduce.rs | 8 +- crates/ecstore/src/disk/local.rs | 30 +- crates/ecstore/src/disk/mod.rs | 8 +- crates/ecstore/src/disk/os.rs | 2 +- crates/ecstore/src/erasure.rs | 28 +- crates/ecstore/src/erasure_coding/bitrot.rs | 4 +- crates/ecstore/src/erasure_coding/erasure.rs | 6 +- crates/ecstore/src/pools.rs | 14 +- crates/ecstore/src/rebalance.rs | 10 +- crates/ecstore/src/rpc/peer_s3_client.rs | 2 +- crates/ecstore/src/rpc/remote_disk.rs | 2 +- crates/ecstore/src/set_disk.rs | 34 +- crates/ecstore/src/store.rs | 32 +- crates/ecstore/src/store_utils.rs | 10 +- crates/ecstore/src/tier/warm_backend_s3sdk.rs | 2 +- crates/filemeta/src/filemeta.rs | 356 +-- crates/iam/src/cache.rs | 8 +- crates/notify/src/registry.rs | 2 +- crates/obs/src/metrics/entry/subsystem.rs | 6 +- crates/rio/src/hash_reader.rs | 6 +- crates/utils/src/net.rs | 2 +- deploy/build/rustfs-zh.service | 81 +- deploy/build/rustfs.run-zh.md | 50 +- docs/PERFORMANCE_TESTING.md | 297 +- docs/README.md | 258 +- docs/kms/frontend-api-guide-zh.md | 2524 ++--------------- rustfs/README.md | 2 +- scripts/run_scanner_benchmarks.sh | 126 +- 41 files changed, 1294 insertions(+), 3312 deletions(-) diff --git a/README.md b/README.md index b6e5ae37..82c33139 100644 --- a/README.md +++ b/README.md @@ -25,7 +25,7 @@ English | français | 日本語 | 한국어 | - Português | + Portuguese | Русский

diff --git a/crates/audit/tests/performance_test.rs b/crates/audit/tests/performance_test.rs index 6921d21f..32dc87e0 100644 --- a/crates/audit/tests/performance_test.rs +++ b/crates/audit/tests/performance_test.rs @@ -94,7 +94,7 @@ async fn test_audit_log_dispatch_performance() { let start_result = system.start(config).await; if start_result.is_err() { println!("AuditSystem failed to start: {start_result:?}"); - return; // 或 assert!(false, "AuditSystem failed to start"); + return; // Alternatively: assert!(false, "AuditSystem failed to start"); } use chrono::Utc; diff --git a/crates/crypto/src/encdec/tests.rs b/crates/crypto/src/encdec/tests.rs index 03b4ace0..79e2dcbb 100644 --- a/crates/crypto/src/encdec/tests.rs +++ b/crates/crypto/src/encdec/tests.rs @@ -226,7 +226,7 @@ fn test_password_variations() -> Result<(), crate::Error> { b"12345".as_slice(), // Numeric b"!@#$%^&*()".as_slice(), // Special characters b"\x00\x01\x02\x03".as_slice(), // Binary password - "密码测试".as_bytes(), // Unicode password + "пароль тест".as_bytes(), // Unicode password &[0xFF; 64], // Long binary password ]; diff --git a/crates/e2e_test/src/kms/README.md b/crates/e2e_test/src/kms/README.md index 5293de90..ef25bdaf 100644 --- a/crates/e2e_test/src/kms/README.md +++ b/crates/e2e_test/src/kms/README.md @@ -1,267 +1,253 @@ # KMS End-to-End Tests -本目录包含 RustFS KMS (Key Management Service) 的端到端集成测试,用于验证完整的 KMS 功能流程。 +This directory contains the integration suites used to validate the full RustFS KMS (Key Management Service) workflow. -## 📁 测试文件说明 +## 📁 Test Overview ### `kms_local_test.rs` -本地KMS后端的端到端测试,包含: -- 自动启动和配置本地KMS后端 -- 通过动态配置API配置KMS服务 -- 测试SSE-C(客户端提供密钥)加密流程 -- 验证S3兼容的对象加密/解密操作 -- 密钥生命周期管理测试 +End-to-end coverage for the local KMS backend: +- Auto-start and configure the local backend +- Configure KMS through the dynamic configuration API +- Verify SSE-C (client-provided keys) +- Exercise S3-compatible encryption/decryption +- Validate key lifecycle management ### `kms_vault_test.rs` -Vault KMS后端的端到端测试,包含: -- 自动启动Vault开发服务器 -- 配置Vault transit engine和密钥 -- 通过动态配置API配置KMS服务 -- 测试完整的Vault KMS集成 -- 验证Token认证和加密操作 +End-to-end coverage for the Vault backend: +- Launch a Vault dev server automatically +- Configure the transit engine and encryption keys +- Configure KMS via the dynamic configuration API +- Run the full Vault integration flow +- Validate token authentication and encryption operations ### `kms_comprehensive_test.rs` -**完整的KMS功能测试套件**(当前因AWS SDK API兼容性问题暂时禁用),包含: -- **Bucket加密配置**: SSE-S3和SSE-KMS默认加密设置 -- **完整的SSE加密模式测试**: - - SSE-S3: S3管理的服务端加密 - - SSE-KMS: KMS管理的服务端加密 - - SSE-C: 客户端提供密钥的服务端加密 -- **对象操作测试**: 上传、下载、验证三种SSE模式 -- **分片上传测试**: 多部分上传支持所有SSE模式 -- **对象复制测试**: 不同SSE模式间的复制操作 -- **完整KMS API管理**: - - 密钥生命周期管理(创建、列表、描述、删除、取消删除) - - 直接加密/解密操作 - - 数据密钥生成和操作 - - KMS服务管理(启动、停止、状态查询) +**Full KMS capability suite** (currently disabled because of AWS SDK compatibility issues): +- **Bucket encryption configuration**: SSE-S3 and SSE-KMS defaults +- **All SSE encryption modes**: + - SSE-S3 (S3-managed server-side encryption) + - SSE-KMS (KMS-managed server-side encryption) + - SSE-C (client-provided keys) +- **Object operations**: upload, download, and validation for every SSE mode +- **Multipart uploads**: cover each SSE mode +- **Object replication**: cross-mode replication scenarios +- **Complete KMS API management**: + - Key lifecycle (create, list, describe, delete, cancel delete) + - Direct encrypt/decrypt operations + - Data key generation and handling + - KMS service lifecycle (start, stop, status) ### `kms_integration_test.rs` -综合性KMS集成测试,包含: -- 多后端兼容性测试 -- KMS服务生命周期测试 -- 错误处理和恢复测试 -- **注意**: 当前因AWS SDK API兼容性问题暂时禁用 +Broad integration tests that exercise: +- Multiple backends +- KMS lifecycle management +- Error handling and recovery +- **Note**: currently disabled because of AWS SDK compatibility gaps -## 🚀 如何运行测试 +## 🚀 Running Tests -### 前提条件 +### Prerequisites -1. **系统依赖**: +1. **System dependencies** ```bash # macOS brew install vault awscurl - + # Ubuntu/Debian apt-get install vault pip install awscurl ``` -2. **构建RustFS**: +2. **Build RustFS** ```bash - # 在项目根目录 cargo build ``` -### 运行单个测试 +### Run individual suites -#### 本地KMS测试 +#### Local backend ```bash cd crates/e2e_test cargo test test_local_kms_end_to_end -- --nocapture ``` -#### Vault KMS测试 +#### Vault backend ```bash cd crates/e2e_test cargo test test_vault_kms_end_to_end -- --nocapture ``` -#### 高可用性测试 +#### High availability ```bash cd crates/e2e_test cargo test test_vault_kms_high_availability -- --nocapture ``` -#### 完整功能测试(开发中) +#### Comprehensive features (disabled) ```bash cd crates/e2e_test -# 注意:以下测试因AWS SDK API兼容性问题暂时禁用 +# Disabled due to AWS SDK compatibility gaps # cargo test test_comprehensive_kms_functionality -- --nocapture -# cargo test test_sse_modes_compatibility -- --nocapture +# cargo test test_sse_modes_compatibility -- --nocapture # cargo test test_kms_api_comprehensive -- --nocapture ``` -### 运行所有KMS测试 +### Run all KMS suites ```bash cd crates/e2e_test cargo test kms -- --nocapture ``` -### 串行运行(避免端口冲突) +### Run serially (avoid port conflicts) ```bash cd crates/e2e_test cargo test kms -- --nocapture --test-threads=1 ``` -## 🔧 测试配置 +## 🔧 Configuration -### 环境变量 +### Environment variables ```bash -# 可选:自定义端口(默认使用9050) +# Optional: custom RustFS port (default 9050) export RUSTFS_TEST_PORT=9050 -# 可选:自定义Vault端口(默认使用8200) +# Optional: custom Vault port (default 8200) export VAULT_TEST_PORT=8200 -# 可选:启用详细日志 +# Optional: enable verbose logging export RUST_LOG=debug ``` -### 依赖的二进制文件路径 +### Required binaries -测试会自动查找以下二进制文件: -- `../../target/debug/rustfs` - RustFS服务器 -- `vault` - Vault (需要在PATH中) -- `/Users/dandan/Library/Python/3.9/bin/awscurl` - AWS签名工具 +Tests look for: +- `../../target/debug/rustfs` – RustFS server +- `vault` – Vault CLI (must be on PATH) +- `/Users/dandan/Library/Python/3.9/bin/awscurl` – AWS SigV4 helper -## 📋 测试流程说明 +## 📋 Test Flow -### Local KMS测试流程 -1. **环境准备**:创建临时目录,设置KMS密钥存储路径 -2. **启动服务**:启动RustFS服务器,启用KMS功能 -3. **等待就绪**:检查端口监听和S3 API响应 -4. **配置KMS**:通过awscurl发送配置请求到admin API -5. **启动KMS**:激活KMS服务 -6. **功能测试**: - - 创建测试存储桶 - - 测试SSE-C加密(客户端提供密钥) - - 验证对象加密/解密 -7. **清理**:终止进程,清理临时文件 +### Local backend +1. **Prepare environment** – create temporary directories and key storage paths +2. **Start RustFS** – launch the server with KMS enabled +3. **Wait for readiness** – confirm the port listener and S3 API +4. **Configure KMS** – send configuration via awscurl to the admin API +5. **Start KMS** – activate the KMS service +6. **Exercise functionality** + - Create a test bucket + - Run SSE-C encryption with client-provided keys + - Validate encryption/decryption behavior +7. **Cleanup** – stop processes and remove temporary files -### Vault KMS测试流程 -1. **启动Vault**:使用开发模式启动Vault服务器 -2. **配置Vault**: - - 启用transit secrets engine - - 创建加密密钥(rustfs-master-key) -3. **启动RustFS**:启用KMS功能的RustFS服务器 -4. **配置KMS**:通过API配置Vault后端,包含: - - Vault地址和Token认证 - - Transit engine配置 - - 密钥路径设置 -5. **功能测试**:完整的加密/解密流程测试 -6. **清理**:终止所有进程 +### Vault backend +1. **Launch Vault** – start the dev-mode server +2. **Configure Vault** + - Enable the transit secrets engine + - Create the `rustfs-master-key` +3. **Start RustFS** – run the server with KMS enabled +4. **Configure KMS** – point RustFS at Vault (address, token, transit config, key path) +5. **Exercise functionality** – complete the encryption/decryption workflow +6. **Cleanup** – stop all services -## 🛠️ 故障排除 +## 🛠️ Troubleshooting -### 常见问题 +### Common issues -**Q: 测试失败 "RustFS server failed to become ready"** -``` -A: 检查端口是否被占用: +**Q: `RustFS server failed to become ready`** +```bash lsof -i :9050 -kill -9 # 如果有进程占用端口 +kill -9 # Free the port if necessary ``` -**Q: Vault服务启动失败** -``` -A: 确保Vault已安装且在PATH中: +**Q: Vault fails to start** +```bash which vault vault version ``` -**Q: awscurl认证失败** -``` -A: 检查awscurl路径是否正确: +**Q: awscurl authentication fails** +```bash ls /Users/dandan/Library/Python/3.9/bin/awscurl -# 或安装到不同路径: +# Or install elsewhere pip install awscurl -which awscurl # 然后更新测试中的路径 +which awscurl # Update the path in tests accordingly ``` -**Q: 测试超时** -``` -A: 增加等待时间或检查日志: +**Q: Tests time out** +```bash RUST_LOG=debug cargo test test_local_kms_end_to_end -- --nocapture ``` -### 调试技巧 +### Debug tips -1. **查看详细日志**: +1. **Enable verbose logs** ```bash RUST_LOG=rustfs_kms=debug,rustfs=info cargo test -- --nocapture ``` -2. **保留临时文件**: - 修改测试代码,注释掉清理部分,检查生成的配置文件 +2. **Keep temporary files** – comment out cleanup logic to inspect generated configs -3. **单步调试**: - 在测试中添加 `std::thread::sleep` 来暂停执行,手动检查服务状态 +3. **Pause execution** – add `std::thread::sleep` for manual inspection during tests -4. **端口检查**: +4. **Monitor ports** ```bash - # 测试运行时检查端口状态 netstat -an | grep 9050 curl http://127.0.0.1:9050/minio/health/ready ``` -## 📊 测试覆盖范围 +## 📊 Coverage -### 功能覆盖 -- ✅ KMS服务动态配置 -- ✅ 本地和Vault后端支持 -- ✅ AWS S3兼容加密接口 -- ✅ 密钥管理和生命周期 -- ✅ 错误处理和恢复 -- ✅ 高可用性场景 +### Functional +- ✅ Dynamic KMS configuration +- ✅ Local and Vault backends +- ✅ AWS S3-compatible encryption APIs +- ✅ Key lifecycle management +- ✅ Error handling and recovery paths +- ✅ High-availability behavior -### 加密模式覆盖 -- ✅ SSE-C (Server-Side Encryption with Customer-Provided Keys) -- ✅ SSE-S3 (Server-Side Encryption with S3-Managed Keys) -- ✅ SSE-KMS (Server-Side Encryption with KMS-Managed Keys) +### Encryption modes +- ✅ SSE-C (customer-provided) +- ✅ SSE-S3 (S3-managed) +- ✅ SSE-KMS (KMS-managed) -### S3操作覆盖 -- ✅ 对象上传/下载 (SSE-C模式) -- 🚧 分片上传 (需要AWS SDK兼容性修复) -- 🚧 对象复制 (需要AWS SDK兼容性修复) -- 🚧 Bucket加密配置 (需要AWS SDK兼容性修复) +### S3 operations +- ✅ Object upload/download (SSE-C) +- 🚧 Multipart uploads (pending AWS SDK fixes) +- 🚧 Object replication (pending AWS SDK fixes) +- 🚧 Bucket encryption defaults (pending AWS SDK fixes) -### KMS API覆盖 -- ✅ 基础密钥管理 (创建、列表) -- 🚧 完整密钥生命周期 (需要AWS SDK兼容性修复) -- 🚧 直接加密/解密操作 (需要AWS SDK兼容性修复) -- 🚧 数据密钥生成和解密 (需要AWS SDK兼容性修复) -- ✅ KMS服务管理 (配置、启动、停止、状态) +### KMS API +- ✅ Basic key management (create/list) +- 🚧 Full key lifecycle (pending AWS SDK fixes) +- 🚧 Direct encrypt/decrypt (pending AWS SDK fixes) +- 🚧 Data key operations (pending AWS SDK fixes) +- ✅ Service lifecycle (configure/start/stop/status) -### 认证方式覆盖 -- ✅ Vault Token认证 -- 🚧 Vault AppRole认证 +### Authentication +- ✅ Vault token auth +- 🚧 Vault AppRole auth -## 🔄 持续集成 +## 🔄 CI Integration -这些测试设计为可在CI/CD环境中运行: +Designed to run inside CI/CD pipelines: ```yaml -# GitHub Actions 示例 - name: Run KMS E2E Tests run: | - # 安装依赖 sudo apt-get update sudo apt-get install -y vault pip install awscurl - - # 构建并测试 + cargo build cd crates/e2e_test cargo test kms -- --nocapture --test-threads=1 ``` -## 📚 相关文档 +## 📚 References -- [KMS 配置文档](../../../../docs/kms/README.md) - KMS功能完整文档 -- [动态配置API](../../../../docs/kms/http-api.md) - REST API接口说明 -- [故障排除指南](../../../../docs/kms/troubleshooting.md) - 常见问题解决 +- [KMS configuration guide](../../../../docs/kms/README.md) +- [Dynamic configuration API](../../../../docs/kms/http-api.md) +- [Troubleshooting](../../../../docs/kms/troubleshooting.md) --- -*这些测试确保KMS功能的稳定性和可靠性,为生产环境部署提供信心。* \ No newline at end of file +*These suites ensure KMS stability and reliability, building confidence for production deployments.* diff --git a/crates/e2e_test/src/kms/common.rs b/crates/e2e_test/src/kms/common.rs index daeafb9c..390da7ad 100644 --- a/crates/e2e_test/src/kms/common.rs +++ b/crates/e2e_test/src/kms/common.rs @@ -547,9 +547,9 @@ pub async fn test_multipart_upload_with_config( ) -> Result<(), Box> { let total_size = config.total_size(); - info!("🧪 开始分片上传测试 - {:?}", config.encryption_type); + info!("🧪 Starting multipart upload test - {:?}", config.encryption_type); info!( - " 对象: {}, 分片: {}个, 每片: {}MB, 总计: {}MB", + " Object: {}, parts: {}, part size: {} MB, total: {} MB", config.object_key, config.total_parts, config.part_size / (1024 * 1024), @@ -589,7 +589,7 @@ pub async fn test_multipart_upload_with_config( let create_multipart_output = create_request.send().await?; let upload_id = create_multipart_output.upload_id().unwrap(); - info!("📋 创建分片上传,ID: {}", upload_id); + info!("📋 Created multipart upload, ID: {}", upload_id); // Step 2: Upload parts let mut completed_parts = Vec::new(); @@ -598,7 +598,7 @@ pub async fn test_multipart_upload_with_config( let end = std::cmp::min(start + config.part_size, total_size); let part_data = &test_data[start..end]; - info!("📤 上传分片 {} ({:.2}MB)", part_number, part_data.len() as f64 / (1024.0 * 1024.0)); + info!("📤 Uploading part {} ({:.2} MB)", part_number, part_data.len() as f64 / (1024.0 * 1024.0)); let mut upload_request = s3_client .upload_part() @@ -625,7 +625,7 @@ pub async fn test_multipart_upload_with_config( .build(), ); - debug!("分片 {} 上传完成,ETag: {}", part_number, etag); + debug!("Part {} uploaded with ETag {}", part_number, etag); } // Step 3: Complete multipart upload @@ -633,7 +633,7 @@ pub async fn test_multipart_upload_with_config( .set_parts(Some(completed_parts)) .build(); - info!("🔗 完成分片上传"); + info!("🔗 Completing multipart upload"); let complete_output = s3_client .complete_multipart_upload() .bucket(bucket) @@ -643,10 +643,10 @@ pub async fn test_multipart_upload_with_config( .send() .await?; - debug!("完成分片上传,ETag: {:?}", complete_output.e_tag()); + debug!("Multipart upload finalized with ETag {:?}", complete_output.e_tag()); // Step 4: Download and verify - info!("📥 下载文件并验证"); + info!("📥 Downloading object for verification"); let mut get_request = s3_client.get_object().bucket(bucket).key(&config.object_key); // Add encryption headers for SSE-C GET @@ -680,7 +680,7 @@ pub async fn test_multipart_upload_with_config( assert_eq!(downloaded_data.len(), total_size); assert_eq!(&downloaded_data[..], &test_data[..]); - info!("✅ 分片上传测试通过 - {:?}", config.encryption_type); + info!("✅ Multipart upload test passed - {:?}", config.encryption_type); Ok(()) } @@ -700,7 +700,7 @@ pub async fn test_all_multipart_encryption_types( bucket: &str, base_object_key: &str, ) -> Result<(), Box> { - info!("🧪 测试所有加密类型的分片上传"); + info!("🧪 Testing multipart uploads for every encryption type"); let part_size = 5 * 1024 * 1024; // 5MB per part let total_parts = 2; @@ -718,7 +718,7 @@ pub async fn test_all_multipart_encryption_types( test_multipart_upload_with_config(s3_client, bucket, &config).await?; } - info!("✅ 所有加密类型的分片上传测试通过"); + info!("✅ Multipart uploads succeeded for every encryption type"); Ok(()) } diff --git a/crates/e2e_test/src/kms/multipart_encryption_test.rs b/crates/e2e_test/src/kms/multipart_encryption_test.rs index d55c2bfe..b744f48a 100644 --- a/crates/e2e_test/src/kms/multipart_encryption_test.rs +++ b/crates/e2e_test/src/kms/multipart_encryption_test.rs @@ -201,7 +201,7 @@ async fn test_step3_multipart_upload_with_sse_s3() -> Result<(), Box = (0..total_size).map(|i| ((i / 1000) % 256) as u8).collect(); info!( @@ -275,43 +275,43 @@ async fn test_step3_multipart_upload_with_sse_s3() -> Result<(), Box Result<(), Box> { init_logging(); - info!("🧪 步骤 4:测试大文件分片上传加密"); + info!("🧪 Step 4: test large-file multipart encryption"); let mut kms_env = LocalKMSTestEnvironment::new().await?; let _default_key_id = kms_env.start_rustfs_for_local_kms().await?; @@ -321,18 +321,18 @@ async fn test_step4_large_multipart_upload_with_encryption() -> Result<(), Box = (0..total_size) .map(|i| { let part_num = i / part_size; @@ -341,9 +341,9 @@ async fn test_step4_large_multipart_upload_with_encryption() -> Result<(), Box Result<(), Box Result<(), Box Result<(), Box Result<(), Box Result<(), Box> { init_logging(); - info!("🧪 步骤 5:测试所有加密类型的分片上传"); + info!("🧪 Step 5: test multipart uploads for every encryption mode"); let mut kms_env = LocalKMSTestEnvironment::new().await?; let _default_key_id = kms_env.start_rustfs_for_local_kms().await?; @@ -450,8 +450,8 @@ async fn test_step5_all_encryption_types_multipart() -> Result<(), Box Result<(), Box Result<(), Box Result<(), Box> { - // 生成测试数据 + // Generate test data let test_data: Vec = (0..total_size).map(|i| ((i * 7) % 256) as u8).collect(); - // 准备 SSE-C 所需的密钥(如果需要) + // Prepare SSE-C keys when required let (sse_c_key, sse_c_md5) = if matches!(encryption_type, EncryptionType::SSEC) { let key = "01234567890123456789012345678901"; let key_b64 = base64::Engine::encode(&base64::engine::general_purpose::STANDARD, key); @@ -510,9 +510,9 @@ async fn test_multipart_encryption_type( (None, None) }; - info!("📋 创建分片上传 - {:?}", encryption_type); + info!("📋 Creating multipart upload - {:?}", encryption_type); - // 创建分片上传 + // Create multipart upload let mut create_request = s3_client.create_multipart_upload().bucket(bucket).key(object_key); create_request = match encryption_type { @@ -526,7 +526,7 @@ async fn test_multipart_encryption_type( let create_multipart_output = create_request.send().await?; let upload_id = create_multipart_output.upload_id().unwrap(); - // 上传分片 + // Upload parts let mut completed_parts = Vec::new(); for part_number in 1..=total_parts { let start = (part_number - 1) * part_size; @@ -541,7 +541,7 @@ async fn test_multipart_encryption_type( .part_number(part_number as i32) .body(aws_sdk_s3::primitives::ByteStream::from(part_data.to_vec())); - // SSE-C 需要在每个 UploadPart 请求中包含密钥 + // SSE-C requires the key on each UploadPart request if matches!(encryption_type, EncryptionType::SSEC) { upload_request = upload_request .sse_customer_algorithm("AES256") @@ -558,10 +558,10 @@ async fn test_multipart_encryption_type( .build(), ); - debug!("{:?} 分片 {} 上传完成", encryption_type, part_number); + debug!("{:?} part {} uploaded", encryption_type, part_number); } - // 完成分片上传 + // Complete the multipart upload let completed_multipart_upload = aws_sdk_s3::types::CompletedMultipartUpload::builder() .set_parts(Some(completed_parts)) .build(); @@ -575,10 +575,10 @@ async fn test_multipart_encryption_type( .send() .await?; - // 下载并验证 + // Download and verify let mut get_request = s3_client.get_object().bucket(bucket).key(object_key); - // SSE-C 需要在 GET 请求中包含密钥 + // SSE-C requires the key on GET requests if matches!(encryption_type, EncryptionType::SSEC) { get_request = get_request .sse_customer_algorithm("AES256") @@ -588,7 +588,7 @@ async fn test_multipart_encryption_type( let get_response = get_request.send().await?; - // 验证加密头 + // Verify encryption headers match encryption_type { EncryptionType::SSEKMS => { assert_eq!( @@ -601,11 +601,11 @@ async fn test_multipart_encryption_type( } } - // 验证数据完整性 + // Verify data integrity let downloaded_data = get_response.body.collect().await?.into_bytes(); assert_eq!(downloaded_data.len(), total_size); assert_eq!(&downloaded_data[..], &test_data[..]); - info!("✅ {:?} 分片上传测试通过", encryption_type); + info!("✅ {:?} multipart upload test passed", encryption_type); Ok(()) } diff --git a/crates/e2e_test/src/kms/test_runner.rs b/crates/e2e_test/src/kms/test_runner.rs index 4d6591c4..efa144f6 100644 --- a/crates/e2e_test/src/kms/test_runner.rs +++ b/crates/e2e_test/src/kms/test_runner.rs @@ -346,7 +346,7 @@ impl KMSTestSuite { /// Run the complete test suite pub async fn run_test_suite(&self) -> Vec { init_logging(); - info!("🚀 开始KMS统一测试套件"); + info!("🚀 Starting unified KMS test suite"); let start_time = Instant::now(); let mut results = Vec::new(); @@ -359,17 +359,17 @@ impl KMSTestSuite { .filter(|test| !self.config.include_critical_only || test.is_critical) .collect(); - info!("📊 测试计划: {} 个测试将被执行", tests_to_run.len()); + info!("📊 Test plan: {} test(s) scheduled", tests_to_run.len()); for (i, test) in tests_to_run.iter().enumerate() { info!(" {}. {} ({})", i + 1, test.name, test.category.as_str()); } // Execute tests for (i, test_def) in tests_to_run.iter().enumerate() { - info!("🧪 执行测试 {}/{}: {}", i + 1, tests_to_run.len(), test_def.name); - info!(" 📝 描述: {}", test_def.description); - info!(" 🏷️ 分类: {}", test_def.category.as_str()); - info!(" ⏱️ 预计时间: {:?}", test_def.estimated_duration); + info!("🧪 Running test {}/{}: {}", i + 1, tests_to_run.len(), test_def.name); + info!(" 📝 Description: {}", test_def.description); + info!(" 🏷️ Category: {}", test_def.category.as_str()); + info!(" ⏱️ Estimated duration: {:?}", test_def.estimated_duration); let test_start = Instant::now(); let result = self.run_single_test(test_def).await; @@ -377,11 +377,11 @@ impl KMSTestSuite { match result { Ok(_) => { - info!("✅ 测试通过: {} ({:.2}s)", test_def.name, test_duration.as_secs_f64()); + info!("✅ Test passed: {} ({:.2}s)", test_def.name, test_duration.as_secs_f64()); results.push(TestResult::success(test_def.name.clone(), test_def.category.clone(), test_duration)); } Err(e) => { - error!("❌ 测试失败: {} ({:.2}s): {}", test_def.name, test_duration.as_secs_f64(), e); + error!("❌ Test failed: {} ({:.2}s): {}", test_def.name, test_duration.as_secs_f64(), e); results.push(TestResult::failure( test_def.name.clone(), test_def.category.clone(), @@ -393,7 +393,7 @@ impl KMSTestSuite { // Add delay between tests to avoid resource conflicts if i < tests_to_run.len() - 1 { - debug!("⏸️ 等待2秒后执行下一个测试..."); + debug!("⏸️ Waiting two seconds before the next test..."); sleep(Duration::from_secs(2)).await; } } @@ -408,22 +408,22 @@ impl KMSTestSuite { async fn run_single_test(&self, test_def: &TestDefinition) -> Result<(), Box> { // This is a placeholder for test dispatch logic // In a real implementation, this would dispatch to actual test functions - warn!("⚠️ 测试函数 '{}' 在统一运行器中尚未实现,跳过", test_def.name); + warn!("⚠️ Test '{}' is not implemented in the unified runner; skipping", test_def.name); Ok(()) } /// Print comprehensive test summary fn print_test_summary(&self, results: &[TestResult], total_duration: Duration) { - info!("📊 KMS测试套件总结"); - info!("⏱️ 总执行时间: {:.2}秒", total_duration.as_secs_f64()); - info!("📈 总测试数量: {}", results.len()); + info!("📊 KMS test suite summary"); + info!("⏱️ Total duration: {:.2} seconds", total_duration.as_secs_f64()); + info!("📈 Total tests: {}", results.len()); let passed = results.iter().filter(|r| r.success).count(); let failed = results.iter().filter(|r| !r.success).count(); - info!("✅ 通过: {}", passed); - info!("❌ 失败: {}", failed); - info!("📊 成功率: {:.1}%", (passed as f64 / results.len() as f64) * 100.0); + info!("✅ Passed: {}", passed); + info!("❌ Failed: {}", failed); + info!("📊 Success rate: {:.1}%", (passed as f64 / results.len() as f64) * 100.0); // Summary by category let mut category_summary: std::collections::HashMap = std::collections::HashMap::new(); @@ -435,7 +435,7 @@ impl KMSTestSuite { } } - info!("📊 分类汇总:"); + info!("📊 Category summary:"); for (category, (total, passed_count)) in category_summary { info!( " 🏷️ {}: {}/{} ({:.1}%)", @@ -448,7 +448,7 @@ impl KMSTestSuite { // List failed tests if failed > 0 { - warn!("❌ 失败的测试:"); + warn!("❌ Failing tests:"); for result in results.iter().filter(|r| !r.success) { warn!( " - {}: {}", @@ -479,7 +479,7 @@ async fn test_kms_critical_suite() -> Result<(), Box Result<(), Box /dev/null; then - print_error "Cargo 未找到,请确保已安装 Rust" + print_error "Cargo not found; install Rust first" exit 1 fi - # 检查 criterion + # Check criterion support if ! cargo --list | grep -q "bench"; then - print_error "未找到基准测试支持,请确保使用的是支持基准测试的 Rust 版本" + print_error "Benchmark support missing; use a Rust toolchain with criterion support" exit 1 fi - print_success "系统要求检查通过" + print_success "System requirements satisfied" } -# 清理之前的测试结果 +# Remove previous benchmark artifacts cleanup() { - print_info "清理之前的测试结果..." + print_info "Cleaning previous benchmark artifacts..." rm -rf target/criterion - print_success "清理完成" + print_success "Cleanup complete" } -# 运行 SIMD 模式基准测试 +# Run SIMD-only benchmarks run_simd_benchmark() { - print_info "🎯 开始运行 SIMD 模式基准测试..." + print_info "🎯 Starting SIMD-only benchmark run..." echo "================================================" cargo bench --bench comparison_benchmark \ -- --save-baseline simd_baseline - print_success "SIMD 模式基准测试完成" + print_success "SIMD-only benchmarks completed" } -# 运行完整的基准测试套件 +# Run the full benchmark suite run_full_benchmark() { - print_info "🚀 开始运行完整基准测试套件..." + print_info "🚀 Starting full benchmark suite..." echo "================================================" - # 运行详细的基准测试 + # Execute detailed benchmarks cargo bench --bench erasure_benchmark - print_success "完整基准测试套件完成" + print_success "Full benchmark suite finished" } -# 运行性能测试 +# Run performance tests run_performance_test() { - print_info "📊 开始运行性能测试..." + print_info "📊 Starting performance tests..." echo "================================================" - print_info "步骤 1: 运行编码基准测试..." + print_info "Step 1: running encoding benchmarks..." cargo bench --bench comparison_benchmark \ -- encode --save-baseline encode_baseline - print_info "步骤 2: 运行解码基准测试..." + print_info "Step 2: running decoding benchmarks..." cargo bench --bench comparison_benchmark \ -- decode --save-baseline decode_baseline - print_success "性能测试完成" + print_success "Performance tests completed" } -# 运行大数据集测试 +# Run large dataset tests run_large_data_test() { - print_info "🗂️ 开始运行大数据集测试..." + print_info "🗂️ Starting large-dataset tests..." echo "================================================" cargo bench --bench erasure_benchmark \ -- large_data --save-baseline large_data_baseline - print_success "大数据集测试完成" + print_success "Large-dataset tests completed" } -# 生成比较报告 +# Generate comparison report generate_comparison_report() { - print_info "📊 生成性能报告..." + print_info "📊 Generating performance report..." if [ -d "target/criterion" ]; then - print_info "基准测试结果已保存到 target/criterion/ 目录" - print_info "你可以打开 target/criterion/report/index.html 查看详细报告" + print_info "Benchmark results saved under target/criterion/" + print_info "Open target/criterion/report/index.html for the HTML report" - # 如果有 python 环境,可以启动简单的 HTTP 服务器查看报告 + # If Python is available, start a simple HTTP server to browse the report if command -v python3 &> /dev/null; then - print_info "你可以运行以下命令启动本地服务器查看报告:" + print_info "Run the following command to serve the report locally:" echo " cd target/criterion && python3 -m http.server 8080" - echo " 然后在浏览器中访问 http://localhost:8080/report/index.html" + echo " Then open http://localhost:8080/report/index.html" fi else - print_warning "未找到基准测试结果目录" + print_warning "Benchmark result directory not found" fi } -# 快速测试模式 +# Quick test mode run_quick_test() { - print_info "🏃 运行快速性能测试..." + print_info "🏃 Running quick performance test..." - print_info "测试 SIMD 编码性能..." + print_info "Testing SIMD encoding performance..." cargo bench --bench comparison_benchmark \ -- encode --quick - print_info "测试 SIMD 解码性能..." + print_info "Testing SIMD decoding performance..." cargo bench --bench comparison_benchmark \ -- decode --quick - print_success "快速测试完成" + print_success "Quick test complete" } -# 显示帮助信息 +# Display help show_help() { - echo "Reed-Solomon SIMD 性能基准测试脚本" + echo "Reed-Solomon SIMD performance benchmark script" echo "" - echo "实现模式:" - echo " 🎯 SIMD 模式 - 高性能 SIMD 优化的 reed-solomon-simd 实现" + echo "Modes:" + echo " 🎯 simd High-performance reed-solomon-simd implementation" echo "" - echo "使用方法:" + echo "Usage:" echo " $0 [command]" echo "" - echo "命令:" - echo " quick 运行快速性能测试" - echo " full 运行完整基准测试套件" - echo " performance 运行详细的性能测试" - echo " simd 运行 SIMD 模式测试" - echo " large 运行大数据集测试" - echo " clean 清理测试结果" - echo " help 显示此帮助信息" + echo "Commands:" + echo " quick Run the quick performance test" + echo " full Run the full benchmark suite" + echo " performance Run detailed performance tests" + echo " simd Run the SIMD-only tests" + echo " large Run large-dataset tests" + echo " clean Remove previous results" + echo " help Show this help message" echo "" - echo "示例:" - echo " $0 quick # 快速性能测试" - echo " $0 performance # 详细性能测试" - echo " $0 full # 完整测试套件" - echo " $0 simd # SIMD 模式测试" - echo " $0 large # 大数据集测试" + echo "Examples:" + echo " $0 quick # Quick performance test" + echo " $0 performance # Detailed performance test" + echo " $0 full # Full benchmark suite" + echo " $0 simd # SIMD-only benchmark" + echo " $0 large # Large-dataset benchmark" echo "" - echo "实现特性:" - echo " - 使用 reed-solomon-simd 高性能 SIMD 实现" - echo " - 支持编码器/解码器实例缓存" - echo " - 优化的内存管理和线程安全" - echo " - 跨平台 SIMD 指令支持" + echo "Features:" + echo " - Uses the high-performance reed-solomon-simd implementation" + echo " - Caches encoder/decoder instances" + echo " - Optimized memory management and thread safety" + echo " - Cross-platform SIMD instruction support" } -# 显示测试配置信息 +# Show benchmark configuration show_test_info() { - print_info "📋 测试配置信息:" - echo " - 当前目录: $(pwd)" - echo " - Rust 版本: $(rustc --version)" - echo " - Cargo 版本: $(cargo --version)" - echo " - CPU 架构: $(uname -m)" - echo " - 操作系统: $(uname -s)" + print_info "📋 Benchmark configuration:" + echo " - Working directory: $(pwd)" + echo " - Rust version: $(rustc --version)" + echo " - Cargo version: $(cargo --version)" + echo " - CPU architecture: $(uname -m)" + echo " - Operating system: $(uname -s)" - # 检查 CPU 特性 + # Inspect CPU capabilities if [ -f "/proc/cpuinfo" ]; then - echo " - CPU 型号: $(grep 'model name' /proc/cpuinfo | head -1 | cut -d: -f2 | xargs)" + echo " - CPU model: $(grep 'model name' /proc/cpuinfo | head -1 | cut -d: -f2 | xargs)" if grep -q "avx2" /proc/cpuinfo; then - echo " - SIMD 支持: AVX2 ✅ (将使用高级 SIMD 优化)" + echo " - SIMD support: AVX2 ✅ (using advanced SIMD optimizations)" elif grep -q "sse4" /proc/cpuinfo; then - echo " - SIMD 支持: SSE4 ✅ (将使用 SIMD 优化)" + echo " - SIMD support: SSE4 ✅ (using SIMD optimizations)" else - echo " - SIMD 支持: 基础 SIMD 特性" + echo " - SIMD support: baseline features" fi fi - echo " - 实现: reed-solomon-simd (高性能 SIMD 优化)" - echo " - 特性: 实例缓存、线程安全、跨平台 SIMD" + echo " - Implementation: reed-solomon-simd (SIMD-optimized)" + echo " - Highlights: instance caching, thread safety, cross-platform SIMD" echo "" } -# 主函数 +# Main entry point main() { - print_info "🧪 Reed-Solomon SIMD 实现性能基准测试" + print_info "🧪 Reed-Solomon SIMD benchmark suite" echo "================================================" check_requirements @@ -252,15 +252,15 @@ main() { show_help ;; *) - print_error "未知命令: $1" + print_error "Unknown command: $1" echo "" show_help exit 1 ;; esac - print_success "✨ 基准测试执行完成!" + print_success "✨ Benchmark run completed!" } -# 启动脚本 +# Launch script main "$@" \ No newline at end of file diff --git a/crates/ecstore/src/admin_server_info.rs b/crates/ecstore/src/admin_server_info.rs index 4ee7d94c..8b9699c5 100644 --- a/crates/ecstore/src/admin_server_info.rs +++ b/crates/ecstore/src/admin_server_info.rs @@ -96,21 +96,21 @@ async fn is_server_resolvable(endpoint: &Endpoint) -> Result<()> { let decoded_payload = flatbuffers::root::(finished_data); assert!(decoded_payload.is_ok()); - // 创建客户端 + // Create the client let mut client = node_service_time_out_client(&addr) .await .map_err(|err| Error::other(err.to_string()))?; - // 构造 PingRequest + // Build the PingRequest let request = Request::new(PingRequest { version: 1, body: bytes::Bytes::copy_from_slice(finished_data), }); - // 发送请求并获取响应 + // Send the request and obtain the response let response: PingResponse = client.ping(request).await?.into_inner(); - // 打印响应 + // Print the response let ping_response_body = flatbuffers::root::(&response.body); if let Err(e) = ping_response_body { eprintln!("{e}"); diff --git a/crates/ecstore/src/bucket/metadata.rs b/crates/ecstore/src/bucket/metadata.rs index f388cd0c..87884300 100644 --- a/crates/ecstore/src/bucket/metadata.rs +++ b/crates/ecstore/src/bucket/metadata.rs @@ -428,8 +428,8 @@ where let sec = t.unix_timestamp() - 62135596800; let nsec = t.nanosecond(); buf[0] = 0xc7; // mext8 - buf[1] = 0x0c; // 长度 - buf[2] = 0x05; // 时间扩展类型 + buf[1] = 0x0c; // Length + buf[2] = 0x05; // Time extension type BigEndian::write_u64(&mut buf[3..], sec as u64); BigEndian::write_u32(&mut buf[11..], nsec); s.serialize_bytes(&buf) diff --git a/crates/ecstore/src/bucket/quota/mod.rs b/crates/ecstore/src/bucket/quota/mod.rs index c2588d87..b9e778fd 100644 --- a/crates/ecstore/src/bucket/quota/mod.rs +++ b/crates/ecstore/src/bucket/quota/mod.rs @@ -16,16 +16,16 @@ use crate::error::Result; use rmp_serde::Serializer as rmpSerializer; use serde::{Deserialize, Serialize}; -// 定义 QuotaType 枚举类型 +// Define the QuotaType enum #[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] pub enum QuotaType { Hard, } -// 定义 BucketQuota 结构体 +// Define the BucketQuota structure #[derive(Debug, Deserialize, Serialize, Default, Clone)] pub struct BucketQuota { - quota: Option, // 使用 Option 来表示可能不存在的字段 + quota: Option, // Use Option to represent optional fields size: u64, diff --git a/crates/ecstore/src/bucket/replication/config.rs b/crates/ecstore/src/bucket/replication/config.rs index 88b3a8ed..4a983498 100644 --- a/crates/ecstore/src/bucket/replication/config.rs +++ b/crates/ecstore/src/bucket/replication/config.rs @@ -46,7 +46,7 @@ pub trait ReplicationConfigurationExt { } impl ReplicationConfigurationExt for ReplicationConfiguration { - /// 检查是否有现有对象复制规则 + /// Check whether any object-replication rules exist fn has_existing_object_replication(&self, arn: &str) -> (bool, bool) { let mut has_arn = false; @@ -117,7 +117,7 @@ impl ReplicationConfigurationExt for ReplicationConfiguration { rules } - /// 获取目标配置 + /// Retrieve the destination configuration fn get_destination(&self) -> Destination { if !self.rules.is_empty() { self.rules[0].destination.clone() @@ -134,7 +134,7 @@ impl ReplicationConfigurationExt for ReplicationConfiguration { } } - /// 判断对象是否应该被复制 + /// Determine whether an object should be replicated fn replicate(&self, obj: &ObjectOpts) -> bool { let rules = self.filter_actionable_rules(obj); @@ -164,16 +164,16 @@ impl ReplicationConfigurationExt for ReplicationConfiguration { } } - // 常规对象/元数据复制 + // Regular object/metadata replication return rule.metadata_replicate(obj); } false } - /// 检查是否有活跃的规则 - /// 可选择性地提供前缀 - /// 如果recursive为true,函数还会在前缀下的任何级别有活跃规则时返回true - /// 如果没有指定前缀,recursive实际上为true + /// Check for an active rule + /// Optionally accept a prefix + /// When recursive is true, return true if any level under the prefix has an active rule + /// Without a prefix, recursive behaves as true fn has_active_rules(&self, prefix: &str, recursive: bool) -> bool { if self.rules.is_empty() { return false; @@ -187,13 +187,13 @@ impl ReplicationConfigurationExt for ReplicationConfiguration { if let Some(filter) = &rule.filter { if let Some(filter_prefix) = &filter.prefix { if !prefix.is_empty() && !filter_prefix.is_empty() { - // 传入的前缀必须在规则前缀中 + // The provided prefix must fall within the rule prefix if !recursive && !prefix.starts_with(filter_prefix) { continue; } } - // 如果是递归的,我们可以跳过这个规则,如果它不匹配测试前缀或前缀下的级别不匹配 + // When recursive, skip this rule if it does not match the test prefix or hierarchy if recursive && !rule.prefix().starts_with(prefix) && !prefix.starts_with(rule.prefix()) { continue; } @@ -204,7 +204,7 @@ impl ReplicationConfigurationExt for ReplicationConfiguration { false } - /// 过滤目标ARN,返回配置中不同目标ARN的切片 + /// Filter target ARNs and return a slice of the distinct values in the config fn filter_target_arns(&self, obj: &ObjectOpts) -> Vec { let mut arns = Vec::new(); let mut targets_map: HashSet = HashSet::new(); @@ -216,7 +216,7 @@ impl ReplicationConfigurationExt for ReplicationConfiguration { } if !self.role.is_empty() { - arns.push(self.role.clone()); // 如果存在,使用传统的RoleArn + arns.push(self.role.clone()); // Use the legacy RoleArn when present return arns; } diff --git a/crates/ecstore/src/chunk_stream.rs b/crates/ecstore/src/chunk_stream.rs index 5689d6af..41b3b2d9 100644 --- a/crates/ecstore/src/chunk_stream.rs +++ b/crates/ecstore/src/chunk_stream.rs @@ -39,13 +39,13 @@ // #[allow(clippy::shadow_same)] // necessary for `pin_mut!` // Box::pin(async move { // pin_mut!(body); -// // 上一次没用完的数据 +// // Data left over from the previous call // let mut prev_bytes = Bytes::new(); // let mut read_size = 0; // loop { // let data: Vec = { -// // 读固定大小的数据 +// // Read a fixed-size chunk // match Self::read_data(body.as_mut(), prev_bytes, chunk_size).await { // None => break, // Some(Err(e)) => return Err(e), @@ -72,13 +72,13 @@ // if read_size + prev_bytes.len() >= content_length { // // debug!( -// // "读完了 read_size:{} + prev_bytes.len({}) == content_length {}", +// // "Finished reading: read_size:{} + prev_bytes.len({}) == content_length {}", // // read_size, // // prev_bytes.len(), // // content_length, // // ); -// // 填充 0? +// // Pad with zeros? // if !need_padding { // y.yield_ok(prev_bytes).await; // break; @@ -115,7 +115,7 @@ // { // let mut bytes_buffer = Vec::new(); -// // 只执行一次 +// // Run only once // let mut push_data_bytes = |mut bytes: Bytes| { // // debug!("read from body {} split per {}, prev_bytes: {}", bytes.len(), data_size, prev_bytes.len()); @@ -127,11 +127,11 @@ // return Some(bytes); // } -// // 合并上一次数据 +// // Merge with the previous data // if !prev_bytes.is_empty() { // let need_size = data_size.wrapping_sub(prev_bytes.len()); // // debug!( -// // " 上一次有剩余{},从这一次中取{},共:{}", +// // "Previous leftover {}, take {} now, total: {}", // // prev_bytes.len(), // // need_size, // // prev_bytes.len() + need_size @@ -143,7 +143,7 @@ // combined.extend_from_slice(&data); // // debug!( -// // "取到的长度大于所需,取出需要的长度:{},与上一次合并得到:{},bytes 剩余:{}", +// // "Fetched more bytes than needed: {}, merged result {}, remaining bytes {}", // // need_size, // // combined.len(), // // bytes.len(), @@ -156,7 +156,7 @@ // combined.extend_from_slice(&bytes); // // debug!( -// // "取到的长度小于所需,取出需要的长度:{},与上一次合并得到:{},bytes 剩余:{},直接返回", +// // "Fetched fewer bytes than needed: {}, merged result {}, remaining bytes {}, return immediately", // // need_size, // // combined.len(), // // bytes.len(), @@ -166,29 +166,29 @@ // } // } -// // 取到的数据比需要的块大,从 bytes 中截取需要的块大小 +// // If the fetched data exceeds the chunk, slice the required size // if data_size <= bytes.len() { // let n = bytes.len() / data_size; // for _ in 0..n { // let data = bytes.split_to(data_size); -// // println!("bytes_buffer.push: {},剩余:{}", data.len(), bytes.len()); +// // println!("bytes_buffer.push: {}, remaining: {}", data.len(), bytes.len()); // bytes_buffer.push(data); // } // Some(bytes) // } else { -// // 不够 +// // Insufficient data // Some(bytes) // } // }; -// // 剩余数据 +// // Remaining data // let remaining_bytes = 'outer: { -// // // 如果上一次数据足够,跳出 +// // // Exit if the previous data was sufficient // // if let Some(remaining_bytes) = push_data_bytes(prev_bytes) { -// // println!("从剩下的取"); +// // println!("Consuming leftovers"); // // break 'outer remaining_bytes; // // } diff --git a/crates/ecstore/src/disk/error_reduce.rs b/crates/ecstore/src/disk/error_reduce.rs index 956b57c4..d3264334 100644 --- a/crates/ecstore/src/disk/error_reduce.rs +++ b/crates/ecstore/src/disk/error_reduce.rs @@ -49,12 +49,12 @@ pub fn reduce_quorum_errs(errors: &[Option], ignored_errs: &[Error], quor pub fn reduce_errs(errors: &[Option], ignored_errs: &[Error]) -> (usize, Option) { let nil_error = Error::other("nil".to_string()); - // 首先统计 None 的数量(作为 nil 错误) + // First count the number of None values (treated as nil errors) let nil_count = errors.iter().filter(|e| e.is_none()).count(); let err_counts = errors .iter() - .filter_map(|e| e.as_ref()) // 只处理 Some 的错误 + .filter_map(|e| e.as_ref()) // Only process errors stored in Some .fold(std::collections::HashMap::new(), |mut acc, e| { if is_ignored_err(ignored_errs, e) { return acc; @@ -63,13 +63,13 @@ pub fn reduce_errs(errors: &[Option], ignored_errs: &[Error]) -> (usize, acc }); - // 找到最高频率的非 nil 错误 + // Find the most frequent non-nil error let (best_err, best_count) = err_counts .into_iter() .max_by(|(_, c1), (_, c2)| c1.cmp(c2)) .unwrap_or((nil_error.clone(), 0)); - // 比较 nil 错误和最高频率的非 nil 错误, 优先选择 nil 错误 + // Compare nil errors with the top non-nil error and prefer the nil error if nil_count > best_count || (nil_count == best_count && nil_count > 0) { (nil_count, None) } else { diff --git a/crates/ecstore/src/disk/local.rs b/crates/ecstore/src/disk/local.rs index cccc1e26..ba96de5f 100644 --- a/crates/ecstore/src/disk/local.rs +++ b/crates/ecstore/src/disk/local.rs @@ -319,8 +319,8 @@ impl LocalDisk { } if cfg!(target_os = "windows") { - // 在 Windows 上,卷名不应该包含保留字符。 - // 这个正则表达式匹配了不允许的字符。 + // Windows volume names must not include reserved characters. + // This regular expression matches disallowed characters. if volname.contains('|') || volname.contains('<') || volname.contains('>') @@ -333,7 +333,7 @@ impl LocalDisk { return false; } } else { - // 对于非 Windows 系统,可能需要其他的验证逻辑。 + // Non-Windows systems may require additional validation rules. } true @@ -563,7 +563,7 @@ impl LocalDisk { // return Ok(()); - // TODO: 异步通知 检测硬盘空间 清空回收站 + // TODO: async notifications for disk space checks and trash cleanup let trash_path = self.get_object_path(super::RUSTFS_META_TMP_DELETED_BUCKET, Uuid::new_v4().to_string().as_str())?; // if let Some(parent) = trash_path.parent() { @@ -846,13 +846,13 @@ impl LocalDisk { } } - // 没有版本了,删除 xl.meta + // Remove xl.meta when no versions remain if fm.versions.is_empty() { self.delete_file(&volume_dir, &xlpath, true, false).await?; return Ok(()); } - // 更新 xl.meta + // Update xl.meta let buf = fm.marshal_msg()?; let volume_dir = self.get_bucket_path(volume)?; @@ -1050,7 +1050,7 @@ impl LocalDisk { let mut dir_objes = HashSet::new(); - // 第一层过滤 + // First-level filtering for item in entries.iter_mut() { let entry = item.clone(); // check limit @@ -1229,7 +1229,7 @@ fn is_root_path(path: impl AsRef) -> bool { path.as_ref().components().count() == 1 && path.as_ref().has_root() } -// 过滤 std::io::ErrorKind::NotFound +// Filter std::io::ErrorKind::NotFound pub async fn read_file_exists(path: impl AsRef) -> Result<(Bytes, Option)> { let p = path.as_ref(); let (data, meta) = match read_file_all(&p).await { @@ -1920,11 +1920,11 @@ impl DiskAPI for LocalDisk { } } - // xl.meta 路径 + // xl.meta path let src_file_path = src_volume_dir.join(Path::new(format!("{}/{}", &src_path, STORAGE_FORMAT_FILE).as_str())); let dst_file_path = dst_volume_dir.join(Path::new(format!("{}/{}", &dst_path, STORAGE_FORMAT_FILE).as_str())); - // data_dir 路径 + // data_dir path let has_data_dir_path = { let has_data_dir = { if !fi.is_remote() { @@ -1952,7 +1952,7 @@ impl DiskAPI for LocalDisk { check_path_length(src_file_path.to_string_lossy().to_string().as_str())?; check_path_length(dst_file_path.to_string_lossy().to_string().as_str())?; - // 读旧 xl.meta + // Read the previous xl.meta let has_dst_buf = match super::fs::read_file(&dst_file_path).await { Ok(res) => Some(res), @@ -2437,7 +2437,7 @@ impl DiskAPI for LocalDisk { async fn delete_volume(&self, volume: &str) -> Result<()> { let p = self.get_bucket_path(volume)?; - // TODO: 不能用递归删除,如果目录下面有文件,返回 errVolumeNotEmpty + // TODO: avoid recursive deletion; return errVolumeNotEmpty when files remain if let Err(err) = fs::remove_dir_all(&p).await { let e: DiskError = to_volume_error(err).into(); @@ -2591,7 +2591,7 @@ mod test { assert!(object_path.to_string_lossy().contains("test-bucket")); assert!(object_path.to_string_lossy().contains("test-object")); - // 清理测试目录 + // Clean up the test directory let _ = fs::remove_dir_all(&test_dir).await; } @@ -2656,7 +2656,7 @@ mod test { disk.delete_volume(vol).await.unwrap(); } - // 清理测试目录 + // Clean up the test directory let _ = fs::remove_dir_all(&test_dir).await; } @@ -2680,7 +2680,7 @@ mod test { assert!(!disk_info.fs_type.is_empty()); assert!(disk_info.total > 0); - // 清理测试目录 + // Clean up the test directory let _ = fs::remove_dir_all(&test_dir).await; } diff --git a/crates/ecstore/src/disk/mod.rs b/crates/ecstore/src/disk/mod.rs index 1e8af91c..3716f5eb 100644 --- a/crates/ecstore/src/disk/mod.rs +++ b/crates/ecstore/src/disk/mod.rs @@ -431,7 +431,7 @@ pub trait DiskAPI: Debug + Send + Sync + 'static { async fn stat_volume(&self, volume: &str) -> Result; async fn delete_volume(&self, volume: &str) -> Result<()>; - // 并发边读边写 w <- MetaCacheEntry + // Concurrent read/write pipeline w <- MetaCacheEntry async fn walk_dir(&self, opts: WalkDirOptions, wr: &mut W) -> Result<()>; // Metadata operations @@ -466,7 +466,7 @@ pub trait DiskAPI: Debug + Send + Sync + 'static { ) -> Result; // File operations. - // 读目录下的所有文件、目录 + // Read every file and directory within the folder async fn list_dir(&self, origvolume: &str, volume: &str, dir_path: &str, count: i32) -> Result>; async fn read_file(&self, volume: &str, path: &str) -> Result; async fn read_file_stream(&self, volume: &str, path: &str, offset: usize, length: usize) -> Result; @@ -1000,7 +1000,7 @@ mod tests { // Note: is_online() might return false for local disks without proper initialization // This is expected behavior for test environments - // 清理测试目录 + // Clean up the test directory let _ = fs::remove_dir_all(&test_dir).await; } @@ -1031,7 +1031,7 @@ mod tests { let location = disk.get_disk_location(); assert!(location.valid() || (!location.valid() && endpoint.pool_idx < 0)); - // 清理测试目录 + // Clean up the test directory let _ = fs::remove_dir_all(&test_dir).await; } } diff --git a/crates/ecstore/src/disk/os.rs b/crates/ecstore/src/disk/os.rs index 62670206..097cd61f 100644 --- a/crates/ecstore/src/disk/os.rs +++ b/crates/ecstore/src/disk/os.rs @@ -203,7 +203,7 @@ pub async fn os_mkdir_all(dir_path: impl AsRef, base_dir: impl AsRef } if let Some(parent) = dir_path.as_ref().parent() { - // 不支持递归,直接 create_dir_all 了 + // Without recursion support, fall back to create_dir_all if let Err(e) = super::fs::make_dir_all(&parent).await { if e.kind() == io::ErrorKind::AlreadyExists { return Ok(()); diff --git a/crates/ecstore/src/erasure.rs b/crates/ecstore/src/erasure.rs index 2ad3e270..2939fe13 100644 --- a/crates/ecstore/src/erasure.rs +++ b/crates/ecstore/src/erasure.rs @@ -297,24 +297,24 @@ impl Erasure { pub fn encode_data(self: Arc, data: &[u8]) -> Result> { let (shard_size, total_size) = self.need_size(data.len()); - // 生成一个新的 所需的所有分片数据长度 + // Generate the total length required for all shards let mut data_buffer = BytesMut::with_capacity(total_size); - // 复制源数据 + // Copy the source data data_buffer.extend_from_slice(data); data_buffer.resize(total_size, 0u8); { - // ec encode, 结果会写进 data_buffer + // Perform EC encoding; the results go into data_buffer let data_slices: SmallVec<[&mut [u8]; 16]> = data_buffer.chunks_exact_mut(shard_size).collect(); - // parity 数量大于 0 才 ec + // Only perform EC encoding when parity shards are present if self.parity_shards > 0 { self.encoder.as_ref().unwrap().encode(data_slices).map_err(Error::other)?; } } - // 零拷贝分片,所有 shard 引用 data_buffer + // Zero-copy shards: every shard references data_buffer let mut data_buffer = data_buffer.freeze(); let mut shards = Vec::with_capacity(self.total_shard_count()); for _ in 0..self.total_shard_count() { @@ -333,13 +333,13 @@ impl Erasure { Ok(()) } - // 每个分片长度,所需要的总长度 + // The length per shard and the total required length fn need_size(&self, data_size: usize) -> (usize, usize) { let shard_size = self.shard_size(data_size); (shard_size, shard_size * (self.total_shard_count())) } - // 算出每个分片大小 + // Compute each shard size pub fn shard_size(&self, data_size: usize) -> usize { data_size.div_ceil(self.data_shards) } @@ -354,7 +354,7 @@ impl Erasure { let last_shard_size = last_block_size.div_ceil(self.data_shards); num_shards * self.shard_size(self.block_size) + last_shard_size - // // 因为写入的时候 ec 需要补全,所以最后一个长度应该也是一样的 + // When writing, EC pads the data so the last shard length should match // if last_block_size != 0 { // num_shards += 1 // } @@ -447,12 +447,12 @@ pub trait ReadAt { } pub struct ShardReader { - readers: Vec>, // 磁盘 - data_block_count: usize, // 总的分片数量 + readers: Vec>, // Disk readers + data_block_count: usize, // Total number of shards parity_block_count: usize, - shard_size: usize, // 每个分片的块大小 一次读取一块 - shard_file_size: usize, // 分片文件总长度 - offset: usize, // 在分片中的 offset + shard_size: usize, // Block size per shard (read one block at a time) + shard_file_size: usize, // Total size of the shard file + offset: usize, // Offset within the shard } impl ShardReader { @@ -470,7 +470,7 @@ impl ShardReader { pub async fn read(&mut self) -> Result>>> { // let mut disks = self.readers; let reader_length = self.readers.len(); - // 需要读取的块长度 + // Length of the block to read let mut read_length = self.shard_size; if self.offset + read_length > self.shard_file_size { read_length = self.shard_file_size - self.offset diff --git a/crates/ecstore/src/erasure_coding/bitrot.rs b/crates/ecstore/src/erasure_coding/bitrot.rs index 587b127c..72d17731 100644 --- a/crates/ecstore/src/erasure_coding/bitrot.rs +++ b/crates/ecstore/src/erasure_coding/bitrot.rs @@ -387,7 +387,7 @@ mod tests { } assert_eq!(n, data.len()); - // 读 + // Read let reader = bitrot_writer.into_inner(); let reader = Cursor::new(reader.into_inner()); let mut bitrot_reader = BitrotReader::new(reader, shard_size, HashAlgorithm::HighwayHash256); @@ -433,7 +433,7 @@ mod tests { let res = bitrot_reader.read(&mut buf).await; if idx == count - 1 { - // 最后一个块,应该返回错误 + // The last chunk should trigger an error assert!(res.is_err()); assert_eq!(res.unwrap_err().kind(), std::io::ErrorKind::InvalidData); break; diff --git a/crates/ecstore/src/erasure_coding/erasure.rs b/crates/ecstore/src/erasure_coding/erasure.rs index 37533815..23541011 100644 --- a/crates/ecstore/src/erasure_coding/erasure.rs +++ b/crates/ecstore/src/erasure_coding/erasure.rs @@ -58,7 +58,7 @@ impl Clone for ReedSolomonEncoder { Self { data_shards: self.data_shards, parity_shards: self.parity_shards, - // 为新实例创建空的缓存,不共享缓存 + // Create an empty cache for the new instance instead of sharing one encoder_cache: std::sync::RwLock::new(None), decoder_cache: std::sync::RwLock::new(None), } @@ -947,7 +947,7 @@ mod tests { let block_size = 1024 * 1024; // 1MB block size let erasure = Erasure::new(data_shards, parity_shards, block_size); - // 创建2MB的测试数据,这样可以测试多个1MB块的处理 + // Build 2 MB of test data so multiple 1 MB chunks are exercised let mut data = Vec::with_capacity(2 * 1024 * 1024); for i in 0..(2 * 1024 * 1024) { data.push((i % 256) as u8); @@ -961,7 +961,7 @@ mod tests { data.len() / 1024 ); - // 编码数据 + // Encode the data let start = std::time::Instant::now(); let shards = erasure.encode_data(&data).unwrap(); let encode_duration = start.elapsed(); diff --git a/crates/ecstore/src/pools.rs b/crates/ecstore/src/pools.rs index bb9cfa36..f503ff9d 100644 --- a/crates/ecstore/src/pools.rs +++ b/crates/ecstore/src/pools.rs @@ -384,7 +384,7 @@ impl PoolMeta { let mut update = false; - // 检查指定的池是否需要从已退役的池中移除。 + // Determine whether the selected pool should be removed from the retired list. for k in specified_pools.keys() { if let Some(pi) = remembered_pools.get(k) { if pi.completed { @@ -400,7 +400,7 @@ impl PoolMeta { // ))); } } else { - // 如果之前记住的池不再存在,允许更新,因为可能是添加了一个新池。 + // If the previous pool no longer exists, allow updates because a new pool may have been added. update = true; } } @@ -409,7 +409,7 @@ impl PoolMeta { for (k, pi) in remembered_pools.iter() { if let Some(pos) = specified_pools.get(k) { if *pos != pi.position { - update = true; // 池的顺序发生了变化,允许更新。 + update = true; // Pool order changed, allow the update. } } } @@ -427,12 +427,12 @@ impl PoolMeta { for pool in &self.pools { if let Some(decommission) = &pool.decommission { if decommission.complete || decommission.canceled { - // 不需要恢复的情况: - // - 退役已完成 - // - 退役已取消 + // Recovery is not required when: + // - Decommissioning completed + // - Decommissioning was cancelled continue; } - // 其他情况需要恢复 + // All other scenarios require recovery new_pools.push(pool.clone()); } } diff --git a/crates/ecstore/src/rebalance.rs b/crates/ecstore/src/rebalance.rs index 0de0eb6b..d58d3a88 100644 --- a/crates/ecstore/src/rebalance.rs +++ b/crates/ecstore/src/rebalance.rs @@ -421,15 +421,15 @@ impl ECStore { if let Some(pool_stat) = meta.pool_stats.get_mut(pool_index) { info!("bucket_rebalance_done: buckets {:?}", &pool_stat.buckets); - // 使用 retain 来过滤掉要删除的 bucket + // Use retain to filter out buckets slated for removal let mut found = false; pool_stat.buckets.retain(|b| { if b.as_str() == bucket.as_str() { found = true; pool_stat.rebalanced_buckets.push(b.clone()); - false // 删除这个元素 + false // Remove this element } else { - true // 保留这个元素 + true // Keep this element } }); @@ -946,13 +946,13 @@ impl ECStore { let mut reader = rd.stream; for (i, part) in object_info.parts.iter().enumerate() { - // 每次从 reader 中读取一个 part 上传 + // Read one part from the reader and upload it each time let mut chunk = vec![0u8; part.size]; reader.read_exact(&mut chunk).await?; - // 每次从 reader 中读取一个 part 上传 + // Read one part from the reader and upload it each time let mut data = PutObjReader::from_vec(chunk); let pi = match self diff --git a/crates/ecstore/src/rpc/peer_s3_client.rs b/crates/ecstore/src/rpc/peer_s3_client.rs index c7f36eac..ac0a035c 100644 --- a/crates/ecstore/src/rpc/peer_s3_client.rs +++ b/crates/ecstore/src/rpc/peer_s3_client.rs @@ -536,7 +536,7 @@ impl PeerS3Client for LocalPeerS3Client { } } - // errVolumeNotEmpty 不删除,把已经删除的重新创建 + // For errVolumeNotEmpty, do not delete; recreate only the entries already removed for (idx, err) in errs.into_iter().enumerate() { if err.is_none() && recreate { diff --git a/crates/ecstore/src/rpc/remote_disk.rs b/crates/ecstore/src/rpc/remote_disk.rs index da8a11e1..97d30fda 100644 --- a/crates/ecstore/src/rpc/remote_disk.rs +++ b/crates/ecstore/src/rpc/remote_disk.rs @@ -83,7 +83,7 @@ impl DiskAPI for RemoteDisk { #[tracing::instrument(skip(self))] async fn is_online(&self) -> bool { - // TODO: 连接状态 + // TODO: connection status tracking if node_service_time_out_client(&self.addr).await.is_ok() { return true; } diff --git a/crates/ecstore/src/set_disk.rs b/crates/ecstore/src/set_disk.rs index 823471a4..bc269a85 100644 --- a/crates/ecstore/src/set_disk.rs +++ b/crates/ecstore/src/set_disk.rs @@ -401,7 +401,7 @@ impl SetDisks { let mut futures = Vec::with_capacity(disks.len()); if let Some(ret_err) = reduce_write_quorum_errs(&errs, OBJECT_OP_IGNORED_ERRS, write_quorum) { - // TODO: 并发 + // TODO: add concurrency for (i, err) in errs.iter().enumerate() { if err.is_some() { continue; @@ -891,7 +891,7 @@ impl SetDisks { } if let Some(err) = reduce_write_quorum_errs(&errs, OBJECT_OP_IGNORED_ERRS, write_quorum) { - // TODO: 并发 + // TODO: add concurrency for (i, err) in errs.iter().enumerate() { if err.is_some() { continue; @@ -1700,7 +1700,7 @@ impl SetDisks { let disks = rl.clone(); - // 主动释放锁 + // Explicitly release the lock drop(rl); for (i, opdisk) in disks.iter().enumerate() { @@ -1744,7 +1744,7 @@ impl SetDisks { } }; - // check endpoint 是否一致 + // Check that the endpoint matches let _ = new_disk.set_disk_id(Some(fm.erasure.this)).await; @@ -1959,7 +1959,7 @@ impl SetDisks { Ok(()) } - // 打乱顺序 + // Shuffle the order fn shuffle_disks_and_parts_metadata_by_index( disks: &[Option], parts_metadata: &[FileInfo], @@ -1998,7 +1998,7 @@ impl SetDisks { Self::shuffle_disks_and_parts_metadata(disks, parts_metadata, fi) } - // 打乱顺序 + // Shuffle the order fn shuffle_disks_and_parts_metadata( disks: &[Option], parts_metadata: &[FileInfo], @@ -2075,7 +2075,7 @@ impl SetDisks { let vid = opts.version_id.clone().unwrap_or_default(); - // TODO: 优化并发 可用数量中断 + // TODO: optimize concurrency and break once enough slots are available let (parts_metadata, errs) = Self::read_all_fileinfo(&disks, "", bucket, object, vid.as_str(), read_data, false).await?; // warn!("get_object_fileinfo parts_metadata {:?}", &parts_metadata); // warn!("get_object_fileinfo {}/{} errs {:?}", bucket, object, &errs); @@ -3722,7 +3722,7 @@ impl ObjectIO for SetDisks { error!("encode err {:?}", e); return Err(e.into()); } - }; // TODO: 出错,删除临时目录 + }; // TODO: delete temporary directory on error let _ = mem::replace(&mut data.stream, reader); // if let Err(err) = close_bitrot_writers(&mut writers).await { @@ -4050,7 +4050,7 @@ impl StorageAPI for SetDisks { objects: Vec, opts: ObjectOptions, ) -> (Vec, Vec>) { - // 默认返回值 + // Default return value let mut del_objects = vec![DeletedObject::default(); objects.len()]; let mut del_errs = Vec::with_capacity(objects.len()); @@ -4107,7 +4107,7 @@ impl StorageAPI for SetDisks { vr.set_tier_free_version_id(&Uuid::new_v4().to_string()); - // 删除 + // Delete // del_objects[i].object_name.clone_from(&vr.name); // del_objects[i].version_id = vr.version_id.map(|v| v.to_string()); @@ -4200,9 +4200,9 @@ impl StorageAPI for SetDisks { let mut del_obj_errs: Vec>> = vec![vec![None; objects.len()]; disks.len()]; - // 每个磁盘, 删除所有对象 + // For each disk delete all objects for (disk_idx, errors) in results.into_iter().enumerate() { - // 所有对象的删除结果 + // Deletion results for all objects for idx in 0..vers.len() { if errors[idx].is_some() { for fi in vers[idx].versions.iter() { @@ -4964,7 +4964,7 @@ impl StorageAPI for SetDisks { HashReader::new(Box::new(WarpReader::new(Cursor::new(Vec::new()))), 0, 0, None, None, false)?, ); - let (reader, w_size) = Arc::new(erasure).encode(stream, &mut writers, write_quorum).await?; // TODO: 出错,删除临时目录 + let (reader, w_size) = Arc::new(erasure).encode(stream, &mut writers, write_quorum).await?; // TODO: delete temporary directory on error let _ = mem::replace(&mut data.stream, reader); @@ -5453,7 +5453,7 @@ impl StorageAPI for SetDisks { self.delete_all(RUSTFS_META_MULTIPART_BUCKET, &upload_id_path).await } - // complete_multipart_upload 完成 + // complete_multipart_upload finished #[tracing::instrument(skip(self))] async fn complete_multipart_upload( self: Arc, @@ -6567,7 +6567,7 @@ mod tests { // Test that all CHECK_PART constants have expected values assert_eq!(CHECK_PART_UNKNOWN, 0); assert_eq!(CHECK_PART_SUCCESS, 1); - assert_eq!(CHECK_PART_FILE_NOT_FOUND, 4); // 实际值是 4,不是 2 + assert_eq!(CHECK_PART_FILE_NOT_FOUND, 4); // The actual value is 4, not 2 assert_eq!(CHECK_PART_VOLUME_NOT_FOUND, 3); assert_eq!(CHECK_PART_FILE_CORRUPT, 5); } @@ -6847,7 +6847,7 @@ mod tests { assert_eq!(conv_part_err_to_int(&Some(disk_err)), CHECK_PART_FILE_NOT_FOUND); let other_err = DiskError::other("other error"); - assert_eq!(conv_part_err_to_int(&Some(other_err)), CHECK_PART_UNKNOWN); // other 错误应该返回 UNKNOWN,不是 SUCCESS + assert_eq!(conv_part_err_to_int(&Some(other_err)), CHECK_PART_UNKNOWN); // Other errors should return UNKNOWN, not SUCCESS } #[test] @@ -6919,7 +6919,7 @@ mod tests { let errs = vec![None, Some(DiskError::other("error1")), Some(DiskError::other("error2"))]; let joined = join_errs(&errs); assert!(joined.contains("")); - assert!(joined.contains("io error")); // DiskError::other 显示为 "io error" + assert!(joined.contains("io error")); // DiskError::other is rendered as "io error" // Test with different error types let errs2 = vec![None, Some(DiskError::FileNotFound), Some(DiskError::FileCorrupt)]; diff --git a/crates/ecstore/src/store.rs b/crates/ecstore/src/store.rs index 14a24fcb..9fe7ffe5 100644 --- a/crates/ecstore/src/store.rs +++ b/crates/ecstore/src/store.rs @@ -219,7 +219,7 @@ impl ECStore { disk_map.insert(i, disks); } - // 替换本地磁盘 + // Replace the local disk if !is_dist_erasure().await { let mut global_local_disk_map = GLOBAL_LOCAL_DISK_MAP.write().await; for disk in local_disks { @@ -243,7 +243,7 @@ impl ECStore { decommission_cancelers, }); - // 只有在全局部署ID尚未设置时才设置它 + // Only set it when the global deployment ID is not yet configured if let Some(dep_id) = deployment_id { if get_global_deployment_id().is_none() { set_global_deployment_id(dep_id); @@ -383,7 +383,7 @@ impl ECStore { // Ok(info) // } - // 读所有 + // Read all entries // define in store_list_objects.rs // async fn list_merged(&self, opts: &ListPathOptions, delimiter: &str) -> Result> { // let walk_opts = WalkDirOptions { @@ -425,7 +425,7 @@ impl ECStore { // if !uniq.contains(&entry.name) { // uniq.insert(entry.name.clone()); - // // TODO: 过滤 + // // TODO: filter // if opts.limit > 0 && ress.len() as i32 >= opts.limit { // return Ok(ress); @@ -516,7 +516,7 @@ impl ECStore { } async fn get_available_pool_idx(&self, bucket: &str, object: &str, size: i64) -> Option { - // // 先随机返回一个 + // // Return a random one first let mut server_pools = self.get_server_pools_available_space(bucket, object, size).await; server_pools.filter_max_used(100 - (100_f64 * DISK_RESERVE_FRACTION) as u64); @@ -546,7 +546,7 @@ impl ECStore { let mut n_sets = vec![0; self.pools.len()]; let mut infos = vec![Vec::new(); self.pools.len()]; - // TODO: 并发 + // TODO: add concurrency for (idx, pool) in self.pools.iter().enumerate() { if self.is_suspended(idx).await || self.is_pool_rebalancing(idx).await { continue; @@ -713,7 +713,7 @@ impl ECStore { let mut ress = Vec::new(); - // join_all 结果跟输入顺序一致 + // join_all preserves the input order for (i, res) in results.into_iter().enumerate() { let index = i; @@ -984,7 +984,7 @@ pub async fn all_local_disk() -> Vec { .collect() } -// init_local_disks 初始化本地磁盘,server 启动前必须初始化成功 +// init_local_disks must succeed before the server starts pub async fn init_local_disks(endpoint_pools: EndpointServerPools) -> Result<()> { let opt = &DiskOption { cleanup: true, @@ -1317,7 +1317,7 @@ impl StorageAPI for ECStore { // TODO: replication opts.srdelete_op - // 删除 meta + // Delete the metadata self.delete_all(RUSTFS_META_BUCKET, format!("{BUCKET_META_PREFIX}/{bucket}").as_str()) .await?; Ok(()) @@ -1469,7 +1469,7 @@ impl StorageAPI for ECStore { let mut gopts = opts.clone(); gopts.no_lock = true; - // 查询在哪个 pool + // Determine which pool contains it let (mut pinfo, errs) = self .get_pool_info_existing_with_opts(bucket, object, &gopts) .await @@ -1543,7 +1543,7 @@ impl StorageAPI for ECStore { }) .collect(); - // 默认返回值 + // Default return value let mut del_objects = vec![DeletedObject::default(); objects.len()]; let mut del_errs = Vec::with_capacity(objects.len()); @@ -1625,7 +1625,7 @@ impl StorageAPI for ECStore { // // results.push(jh.await.unwrap()); // // } - // // 记录 pool Index 对应的 objects pool_idx -> objects idx + // // Record the mapping pool_idx -> object index // let mut pool_obj_idx_map = HashMap::new(); // let mut orig_index_map = HashMap::new(); @@ -1675,9 +1675,9 @@ impl StorageAPI for ECStore { // if !pool_obj_idx_map.is_empty() { // for (i, sets) in self.pools.iter().enumerate() { - // // 取 pool idx 对应的 objects index + // // Retrieve the object index for a pool idx // if let Some(objs) = pool_obj_idx_map.get(&i) { - // // 取对应 obj,理论上不会 none + // // Fetch the corresponding object (should never be None) // // let objs: Vec = obj_idxs.iter().filter_map(|&idx| objects.get(idx).cloned()).collect(); // if objs.is_empty() { @@ -1686,10 +1686,10 @@ impl StorageAPI for ECStore { // let (pdel_objs, perrs) = sets.delete_objects(bucket, objs.clone(), opts.clone()).await?; - // // 同时存入不可能为 none + // // Insert simultaneously (should never be None) // let org_indexes = orig_index_map.get(&i).unwrap(); - // // perrs 的顺序理论上跟 obj_idxs 顺序一致 + // // perrs should follow the same order as obj_idxs // for (i, err) in perrs.into_iter().enumerate() { // let obj_idx = org_indexes[i]; diff --git a/crates/ecstore/src/store_utils.rs b/crates/ecstore/src/store_utils.rs index f9be4316..ea9e8379 100644 --- a/crates/ecstore/src/store_utils.rs +++ b/crates/ecstore/src/store_utils.rs @@ -37,17 +37,17 @@ pub fn clean_metadata_keys(metadata: &mut HashMap, key_names: &[ } } -// 检查是否为 元数据桶 +// Check whether the bucket is the metadata bucket fn is_meta_bucket(bucket_name: &str) -> bool { bucket_name == RUSTFS_META_BUCKET } -// 检查是否为 保留桶 +// Check whether the bucket is reserved fn is_reserved_bucket(bucket_name: &str) -> bool { bucket_name == "rustfs" } -// 检查桶名是否为保留名或无效名 +// Check whether the bucket name is reserved or invalid pub fn is_reserved_or_invalid_bucket(bucket_entry: &str, strict: bool) -> bool { if bucket_entry.is_empty() { return true; @@ -59,7 +59,7 @@ pub fn is_reserved_or_invalid_bucket(bucket_entry: &str, strict: bool) -> bool { result || is_meta_bucket(bucket_entry) || is_reserved_bucket(bucket_entry) } -// 检查桶名是否有效 +// Check whether the bucket name is valid fn check_bucket_name(bucket_name: &str, strict: bool) -> Result<()> { if bucket_name.trim().is_empty() { return Err(Error::other("Bucket name cannot be empty")); @@ -86,7 +86,7 @@ fn check_bucket_name(bucket_name: &str, strict: bool) -> Result<()> { return Err(Error::other("Bucket name contains invalid characters")); } - // 检查包含 "..", ".-", "-." + // Check for "..", ".-", "-." if bucket_name.contains("..") || bucket_name.contains(".-") || bucket_name.contains("-.") { return Err(Error::other("Bucket name contains invalid characters")); } diff --git a/crates/ecstore/src/tier/warm_backend_s3sdk.rs b/crates/ecstore/src/tier/warm_backend_s3sdk.rs index 7d856e69..15ae827b 100644 --- a/crates/ecstore/src/tier/warm_backend_s3sdk.rs +++ b/crates/ecstore/src/tier/warm_backend_s3sdk.rs @@ -81,7 +81,7 @@ impl WarmBackendS3 { creds = Credentials::new( conf.access_key.clone(), // access_key_id conf.secret_key.clone(), // secret_access_key - None, // session_token (可选) + None, // session_token (optional) None, "Static", ); diff --git a/crates/filemeta/src/filemeta.rs b/crates/filemeta/src/filemeta.rs index 7b913e8d..5d10d759 100644 --- a/crates/filemeta/src/filemeta.rs +++ b/crates/filemeta/src/filemeta.rs @@ -2712,21 +2712,25 @@ mod test { #[test] fn test_real_xlmeta_compatibility() { - // 测试真实的 xl.meta 文件格式兼容性 - let data = create_real_xlmeta().expect("创建真实测试数据失败"); + // Test compatibility with real xl.meta formats + let data = create_real_xlmeta().expect("Failed to create realistic test data"); - // 验证文件头 - assert_eq!(&data[0..4], b"XL2 ", "文件头应该是 'XL2 '"); - assert_eq!(&data[4..8], &[1, 0, 3, 0], "版本号应该是 1.3.0"); + // Verify the file header + assert_eq!(&data[0..4], b"XL2 ", "File header should be 'XL2 '"); + assert_eq!(&data[4..8], &[1, 0, 3, 0], "Version number should be 1.3.0"); - // 解析元数据 - let fm = FileMeta::load(&data).expect("解析真实数据失败"); + // Parse metadata + let fm = FileMeta::load(&data).expect("Failed to parse realistic data"); - // 验证基本属性 + // Verify basic properties assert_eq!(fm.meta_ver, XL_META_VERSION); - assert_eq!(fm.versions.len(), 3, "应该有 3 个版本(1 个对象,1 个删除标记,1 个 Legacy)"); + assert_eq!( + fm.versions.len(), + 3, + "Should have three versions (one object, one delete marker, one Legacy)" + ); - // 验证版本类型 + // Verify version types let mut object_count = 0; let mut delete_count = 0; let mut legacy_count = 0; @@ -2736,21 +2740,21 @@ mod test { VersionType::Object => object_count += 1, VersionType::Delete => delete_count += 1, VersionType::Legacy => legacy_count += 1, - VersionType::Invalid => panic!("不应该有无效版本"), + VersionType::Invalid => panic!("No invalid versions should be present"), } } - assert_eq!(object_count, 1, "应该有 1 个对象版本"); - assert_eq!(delete_count, 1, "应该有 1 个删除标记"); - assert_eq!(legacy_count, 1, "应该有 1 个 Legacy 版本"); + assert_eq!(object_count, 1, "Should have one object version"); + assert_eq!(delete_count, 1, "Should have one delete marker"); + assert_eq!(legacy_count, 1, "Should have one Legacy version"); - // 验证兼容性 - assert!(fm.is_compatible_with_meta(), "应该与 xl 格式兼容"); + // Verify compatibility + assert!(fm.is_compatible_with_meta(), "Should be compatible with the xl format"); - // 验证完整性 - fm.validate_integrity().expect("完整性验证失败"); + // Verify integrity + fm.validate_integrity().expect("Integrity validation failed"); - // 验证版本统计 + // Verify version statistics let stats = fm.get_version_stats(); assert_eq!(stats.total_versions, 3); assert_eq!(stats.object_versions, 1); @@ -2760,61 +2764,61 @@ mod test { #[test] fn test_complex_xlmeta_handling() { - // 测试复杂的多版本 xl.meta 文件 - let data = create_complex_xlmeta().expect("创建复杂测试数据失败"); - let fm = FileMeta::load(&data).expect("解析复杂数据失败"); + // Test complex xl.meta files with many versions + let data = create_complex_xlmeta().expect("Failed to create complex test data"); + let fm = FileMeta::load(&data).expect("Failed to parse complex data"); - // 验证版本数量 - assert!(fm.versions.len() >= 10, "应该有至少 10 个版本"); + // Verify version count + assert!(fm.versions.len() >= 10, "Should have at least 10 versions"); - // 验证版本排序 - assert!(fm.is_sorted_by_mod_time(), "版本应该按修改时间排序"); + // Verify version ordering + assert!(fm.is_sorted_by_mod_time(), "Versions should be sorted by modification time"); - // 验证不同版本类型的存在 + // Verify presence of different version types let stats = fm.get_version_stats(); - assert!(stats.object_versions > 0, "应该有对象版本"); - assert!(stats.delete_markers > 0, "应该有删除标记"); + assert!(stats.object_versions > 0, "Should include object versions"); + assert!(stats.delete_markers > 0, "Should include delete markers"); - // 测试版本合并功能 + // Test version merge functionality let merged = merge_file_meta_versions(1, false, 0, std::slice::from_ref(&fm.versions)); - assert!(!merged.is_empty(), "合并后应该有版本"); + assert!(!merged.is_empty(), "Merged output should contain versions"); } #[test] fn test_inline_data_handling() { - // 测试内联数据处理 - let data = create_xlmeta_with_inline_data().expect("创建内联数据测试失败"); - let fm = FileMeta::load(&data).expect("解析内联数据失败"); + // Test inline data handling + let data = create_xlmeta_with_inline_data().expect("Failed to create inline test data"); + let fm = FileMeta::load(&data).expect("Failed to parse inline data"); - assert_eq!(fm.versions.len(), 1, "应该有 1 个版本"); - assert!(!fm.data.as_slice().is_empty(), "应该包含内联数据"); + assert_eq!(fm.versions.len(), 1, "Should have one version"); + assert!(!fm.data.as_slice().is_empty(), "Should contain inline data"); - // 验证内联数据内容 + // Verify inline data contents let inline_data = fm.data.as_slice(); - assert!(!inline_data.is_empty(), "内联数据不应为空"); + assert!(!inline_data.is_empty(), "Inline data should not be empty"); } #[test] fn test_error_handling_and_recovery() { - // 测试错误处理和恢复 + // Test error handling and recovery let corrupted_data = create_corrupted_xlmeta(); let result = FileMeta::load(&corrupted_data); - assert!(result.is_err(), "损坏的数据应该解析失败"); + assert!(result.is_err(), "Corrupted data should fail to parse"); - // 测试空文件处理 - let empty_data = create_empty_xlmeta().expect("创建空数据失败"); - let fm = FileMeta::load(&empty_data).expect("解析空数据失败"); - assert_eq!(fm.versions.len(), 0, "空文件应该没有版本"); + // Test handling of empty files + let empty_data = create_empty_xlmeta().expect("Failed to create empty test data"); + let fm = FileMeta::load(&empty_data).expect("Failed to parse empty data"); + assert_eq!(fm.versions.len(), 0, "An empty file should have no versions"); } #[test] fn test_version_type_legacy_support() { - // 专门测试 Legacy 版本类型支持 + // Validate support for Legacy version types assert_eq!(VersionType::Legacy.to_u8(), 3); assert_eq!(VersionType::from_u8(3), VersionType::Legacy); - assert!(VersionType::Legacy.valid(), "Legacy 类型应该是有效的"); + assert!(VersionType::Legacy.valid(), "Legacy type should be valid"); - // 测试 Legacy 版本的创建和处理 + // Exercise creation and handling of Legacy versions let legacy_version = FileMetaVersion { version_type: VersionType::Legacy, object: None, @@ -2822,101 +2826,101 @@ mod test { write_version: 1, }; - assert!(legacy_version.is_legacy(), "应该识别为 Legacy 版本"); + assert!(legacy_version.is_legacy(), "Should be recognized as a Legacy version"); } #[test] fn test_signature_calculation() { - // 测试签名计算功能 - let data = create_real_xlmeta().expect("创建测试数据失败"); - let fm = FileMeta::load(&data).expect("解析失败"); + // Test signature calculation + let data = create_real_xlmeta().expect("Failed to create test data"); + let fm = FileMeta::load(&data).expect("Parsing failed"); for version in &fm.versions { let signature = version.header.get_signature(); - assert_eq!(signature.len(), 4, "签名应该是 4 字节"); + assert_eq!(signature.len(), 4, "Signature should be 4 bytes"); - // 验证相同版本的签名一致性 + // Verify signature consistency for identical versions let signature2 = version.header.get_signature(); - assert_eq!(signature, signature2, "相同版本的签名应该一致"); + assert_eq!(signature, signature2, "Identical versions should produce identical signatures"); } } #[test] fn test_metadata_validation() { - // 测试元数据验证功能 - let data = create_real_xlmeta().expect("创建测试数据失败"); - let fm = FileMeta::load(&data).expect("解析失败"); + // Test metadata validation + let data = create_real_xlmeta().expect("Failed to create test data"); + let fm = FileMeta::load(&data).expect("Parsing failed"); - // 测试完整性验证 - fm.validate_integrity().expect("完整性验证应该通过"); + // Test integrity validation + fm.validate_integrity().expect("Integrity validation should succeed"); - // 测试兼容性检查 - assert!(fm.is_compatible_with_meta(), "应该与 xl 格式兼容"); + // Test compatibility checks + assert!(fm.is_compatible_with_meta(), "Should be compatible with the xl format"); - // 测试版本排序检查 - assert!(fm.is_sorted_by_mod_time(), "版本应该按时间排序"); + // Test version ordering checks + assert!(fm.is_sorted_by_mod_time(), "Versions should be time-ordered"); } #[test] fn test_round_trip_serialization() { - // 测试序列化和反序列化的往返一致性 - let original_data = create_real_xlmeta().expect("创建原始数据失败"); - let fm = FileMeta::load(&original_data).expect("解析原始数据失败"); + // Test round-trip serialization consistency + let original_data = create_real_xlmeta().expect("Failed to create original test data"); + let fm = FileMeta::load(&original_data).expect("Failed to parse original data"); - // 重新序列化 - let serialized_data = fm.marshal_msg().expect("重新序列化失败"); + // Serialize again + let serialized_data = fm.marshal_msg().expect("Re-serialization failed"); - // 再次解析 - let fm2 = FileMeta::load(&serialized_data).expect("解析序列化数据失败"); + // Parse again + let fm2 = FileMeta::load(&serialized_data).expect("Failed to parse serialized data"); - // 验证一致性 - assert_eq!(fm.versions.len(), fm2.versions.len(), "版本数量应该一致"); - assert_eq!(fm.meta_ver, fm2.meta_ver, "元数据版本应该一致"); + // Verify consistency + assert_eq!(fm.versions.len(), fm2.versions.len(), "Version counts should match"); + assert_eq!(fm.meta_ver, fm2.meta_ver, "Metadata versions should match"); - // 验证版本内容一致性 + // Verify version content consistency for (v1, v2) in fm.versions.iter().zip(fm2.versions.iter()) { - assert_eq!(v1.header.version_type, v2.header.version_type, "版本类型应该一致"); - assert_eq!(v1.header.version_id, v2.header.version_id, "版本 ID 应该一致"); + assert_eq!(v1.header.version_type, v2.header.version_type, "Version types should match"); + assert_eq!(v1.header.version_id, v2.header.version_id, "Version IDs should match"); } } #[test] fn test_performance_with_large_metadata() { - // 测试大型元数据文件的性能 + // Test performance with large metadata files use std::time::Instant; let start = Instant::now(); - let data = create_complex_xlmeta().expect("创建大型测试数据失败"); + let data = create_complex_xlmeta().expect("Failed to create large test data"); let creation_time = start.elapsed(); let start = Instant::now(); - let fm = FileMeta::load(&data).expect("解析大型数据失败"); + let fm = FileMeta::load(&data).expect("Failed to parse large data"); let parsing_time = start.elapsed(); let start = Instant::now(); - let _serialized = fm.marshal_msg().expect("序列化失败"); + let _serialized = fm.marshal_msg().expect("Serialization failed"); let serialization_time = start.elapsed(); - println!("性能测试结果:"); - println!(" 创建时间:{creation_time:?}"); - println!(" 解析时间:{parsing_time:?}"); - println!(" 序列化时间:{serialization_time:?}"); + println!("Performance results:"); + println!(" Creation time: {creation_time:?}"); + println!(" Parsing time: {parsing_time:?}"); + println!(" Serialization time: {serialization_time:?}"); - // 基本性能断言(这些值可能需要根据实际性能调整) - assert!(parsing_time.as_millis() < 100, "解析时间应该小于 100ms"); - assert!(serialization_time.as_millis() < 100, "序列化时间应该小于 100ms"); + // Basic performance assertions (adjust as needed for real workloads) + assert!(parsing_time.as_millis() < 100, "Parsing time should be under 100 ms"); + assert!(serialization_time.as_millis() < 100, "Serialization time should be under 100 ms"); } #[test] fn test_edge_cases() { - // 测试边界情况 + // Test edge cases - // 1. 测试空版本 ID + // 1. Test empty version IDs let mut fm = FileMeta::new(); let version = FileMetaVersion { version_type: VersionType::Object, object: Some(MetaObject { - version_id: None, // 空版本 ID + version_id: None, // Empty version ID data_dir: None, erasure_algorithm: crate::fileinfo::ErasureAlgo::ReedSolomon, erasure_m: 1, @@ -2939,35 +2943,35 @@ mod test { write_version: 1, }; - let shallow_version = FileMetaShallowVersion::try_from(version).expect("转换失败"); + let shallow_version = FileMetaShallowVersion::try_from(version).expect("Conversion failed"); fm.versions.push(shallow_version); - // 应该能够序列化和反序列化 - let data = fm.marshal_msg().expect("序列化失败"); - let fm2 = FileMeta::load(&data).expect("解析失败"); + // Should support serialization and deserialization + let data = fm.marshal_msg().expect("Serialization failed"); + let fm2 = FileMeta::load(&data).expect("Parsing failed"); assert_eq!(fm2.versions.len(), 1); - // 2. 测试极大的文件大小 + // 2. Test extremely large file sizes let large_object = MetaObject { size: i64::MAX, part_sizes: vec![usize::MAX], ..Default::default() }; - // 应该能够处理大数值 + // Should handle very large numbers assert_eq!(large_object.size, i64::MAX); } #[tokio::test] async fn test_concurrent_operations() { - // 测试并发操作的安全性 + // Test thread safety for concurrent operations use std::sync::Arc; use std::sync::Mutex; let fm = Arc::new(Mutex::new(FileMeta::new())); let mut handles = vec![]; - // 并发添加版本 + // Add versions concurrently for i in 0..10 { let fm_clone: Arc> = Arc::clone(&fm); let handle = tokio::spawn(async move { @@ -2981,7 +2985,7 @@ mod test { handles.push(handle); } - // 等待所有任务完成 + // Wait for all tasks to finish for handle in handles { handle.await.unwrap(); } @@ -2992,15 +2996,15 @@ mod test { #[test] fn test_memory_efficiency() { - // 测试内存使用效率 + // Test memory efficiency use std::mem; - // 测试空结构体的内存占用 + // Measure memory usage for empty structs let empty_fm = FileMeta::new(); let empty_size = mem::size_of_val(&empty_fm); println!("Empty FileMeta size: {empty_size} bytes"); - // 测试包含大量版本的内存占用 + // Measure memory usage with many versions let mut large_fm = FileMeta::new(); for i in 0..100 { let mut fi = crate::fileinfo::FileInfo::new(&format!("test-{i}"), 2, 1); @@ -3012,18 +3016,18 @@ mod test { let large_size = mem::size_of_val(&large_fm); println!("Large FileMeta size: {large_size} bytes"); - // 验证内存使用是合理的(注意:size_of_val 只计算栈上的大小,不包括堆分配) - // 对于包含 Vec 的结构体,size_of_val 可能相同,因为 Vec 的容量在堆上 - println!("版本数量:{}", large_fm.versions.len()); - assert!(!large_fm.versions.is_empty(), "应该有版本数据"); + // Ensure memory usage is reasonable (size_of_val covers only stack allocations) + // For structs containing Vec, size_of_val may match because capacity lives on the heap + println!("Number of versions: {}", large_fm.versions.len()); + assert!(!large_fm.versions.is_empty(), "Should contain version data"); } #[test] fn test_version_ordering_edge_cases() { - // 测试版本排序的边界情况 + // Test boundary cases for version ordering let mut fm = FileMeta::new(); - // 添加相同时间戳的版本 + // Add versions with identical timestamps let same_time = OffsetDateTime::now_utc(); for i in 0..5 { let mut fi = crate::fileinfo::FileInfo::new(&format!("test-{i}"), 2, 1); @@ -3032,18 +3036,18 @@ mod test { fm.add_version(fi).unwrap(); } - // 验证排序稳定性 + // Verify stable ordering let original_order: Vec<_> = fm.versions.iter().map(|v| v.header.version_id).collect(); fm.sort_by_mod_time(); let sorted_order: Vec<_> = fm.versions.iter().map(|v| v.header.version_id).collect(); - // 对于相同时间戳,排序应该保持稳定 + // Sorting should remain stable for identical timestamps assert_eq!(original_order.len(), sorted_order.len()); } #[test] fn test_checksum_algorithms() { - // 测试不同的校验和算法 + // Test different checksum algorithms let algorithms = vec![ChecksumAlgo::Invalid, ChecksumAlgo::HighwayHash]; for algo in algorithms { @@ -3052,7 +3056,7 @@ mod test { ..Default::default() }; - // 验证算法的有效性检查 + // Verify checksum validation logic match algo { ChecksumAlgo::Invalid => assert!(!algo.valid()), ChecksumAlgo::HighwayHash => assert!(algo.valid()), @@ -3068,12 +3072,12 @@ mod test { #[test] fn test_erasure_coding_parameters() { - // 测试纠删码参数的各种组合 + // Test combinations of erasure coding parameters let test_cases = vec![ - (1, 1), // 最小配置 - (2, 1), // 常见配置 - (4, 2), // 标准配置 - (8, 4), // 高冗余配置 + (1, 1), // Minimum configuration + (2, 1), // Common configuration + (4, 2), // Standard configuration + (8, 4), // High redundancy configuration ]; for (data_blocks, parity_blocks) in test_cases { @@ -3084,9 +3088,9 @@ mod test { ..Default::default() }; - // 验证参数的合理性 - assert!(obj.erasure_m > 0, "数据块数量必须大于 0"); - assert!(obj.erasure_n > 0, "校验块数量必须大于 0"); + // Verify parameter validity + assert!(obj.erasure_m > 0, "Data block count must be greater than 0"); + assert!(obj.erasure_n > 0, "Parity block count must be greater than 0"); assert_eq!(obj.erasure_dist.len(), data_blocks + parity_blocks); // Verify serialization and deserialization @@ -3101,20 +3105,20 @@ mod test { #[test] fn test_metadata_size_limits() { - // 测试元数据大小限制 + // Test metadata size limits let mut obj = MetaObject::default(); - // 测试适量用户元数据 + // Test moderate amounts of user metadata for i in 0..10 { obj.meta_user .insert(format!("key-{i:04}"), format!("value-{:04}-{}", i, "x".repeat(10))); } - // 验证可以序列化元数据 + // Verify metadata can be serialized let data = obj.marshal_msg().unwrap(); - assert!(data.len() > 100, "序列化后的数据应该有合理大小"); + assert!(data.len() > 100, "Serialized data should have a reasonable size"); - // 验证可以反序列化 + // Verify deserialization succeeds let mut obj2 = MetaObject::default(); obj2.unmarshal_msg(&data).unwrap(); assert_eq!(obj.meta_user.len(), obj2.meta_user.len()); @@ -3122,14 +3126,14 @@ mod test { #[test] fn test_version_statistics_accuracy() { - // 测试版本统计的准确性 + // Test accuracy of version statistics let mut fm = FileMeta::new(); - // 添加不同类型的版本 + // Add different version types let object_count = 3; let delete_count = 2; - // 添加对象版本 + // Add object versions for i in 0..object_count { let mut fi = crate::fileinfo::FileInfo::new(&format!("obj-{i}"), 2, 1); fi.version_id = Some(Uuid::new_v4()); @@ -3137,7 +3141,7 @@ mod test { fm.add_version(fi).unwrap(); } - // 添加删除标记 + // Add delete markers for i in 0..delete_count { let delete_marker = MetaDeleteMarker { version_id: Some(Uuid::new_v4()), @@ -3156,13 +3160,13 @@ mod test { fm.versions.push(shallow_version); } - // 验证统计准确性 + // Verify overall statistics let stats = fm.get_version_stats(); assert_eq!(stats.total_versions, object_count + delete_count); assert_eq!(stats.object_versions, object_count); assert_eq!(stats.delete_markers, delete_count); - // 验证详细统计 + // Verify detailed statistics let detailed_stats = fm.get_detailed_version_stats(); assert_eq!(detailed_stats.total_versions, object_count + delete_count); assert_eq!(detailed_stats.object_versions, object_count); @@ -3171,15 +3175,15 @@ mod test { #[test] fn test_cross_platform_compatibility() { - // 测试跨平台兼容性(字节序、路径分隔符等) + // Test cross-platform compatibility (endianness, separators, etc.) let mut fm = FileMeta::new(); - // 使用不同平台风格的路径 + // Use platform-specific path styles let paths = vec![ "unix/style/path", "windows\\style\\path", "mixed/style\\path", - "unicode/路径/测试", + "unicode/path/test", ]; for path in paths { @@ -3189,14 +3193,14 @@ mod test { fm.add_version(fi).unwrap(); } - // 验证序列化和反序列化在不同平台上的一致性 + // Verify serialization/deserialization consistency across platforms let data = fm.marshal_msg().unwrap(); let mut fm2 = FileMeta::default(); fm2.unmarshal_msg(&data).unwrap(); assert_eq!(fm.versions.len(), fm2.versions.len()); - // 验证 UUID 的字节序一致性 + // Verify UUID endianness consistency for (v1, v2) in fm.versions.iter().zip(fm2.versions.iter()) { assert_eq!(v1.header.version_id, v2.header.version_id); } @@ -3204,26 +3208,26 @@ mod test { #[test] fn test_data_integrity_validation() { - // 测试数据完整性验证 + // Test data integrity checks let mut fm = FileMeta::new(); - // 添加一个正常版本 + // Add a normal version let mut fi = crate::fileinfo::FileInfo::new("test", 2, 1); fi.version_id = Some(Uuid::new_v4()); fi.mod_time = Some(OffsetDateTime::now_utc()); fm.add_version(fi).unwrap(); - // 验证正常情况下的完整性 + // Verify integrity under normal conditions assert!(fm.validate_integrity().is_ok()); } #[test] fn test_version_merge_scenarios() { - // 测试版本合并的各种场景 + // Test various version merge scenarios let mut versions1 = vec![]; let mut versions2 = vec![]; - // 创建两组不同的版本 + // Create two distinct sets of versions for i in 0..3 { let mut fi1 = crate::fileinfo::FileInfo::new(&format!("test1-{i}"), 2, 1); fi1.version_id = Some(Uuid::new_v4()); @@ -3240,37 +3244,37 @@ mod test { versions2.push(FileMetaShallowVersion::try_from(version2).unwrap()); } - // 测试简单的合并场景 + // Test a simple merge scenario let merged = merge_file_meta_versions(1, false, 0, &[versions1.clone()]); - assert!(!merged.is_empty(), "单个版本列表的合并结果不应为空"); + assert!(!merged.is_empty(), "Merging a single version list should not be empty"); - // 测试多个版本列表的合并 + // Test merging multiple version lists let merged = merge_file_meta_versions(1, false, 0, &[versions1.clone(), versions2.clone()]); - // 合并结果可能为空,这取决于版本的兼容性,这是正常的 - println!("合并结果数量:{}", merged.len()); + // Merge results may be empty depending on compatibility, which is acceptable + println!("Merge result count: {}", merged.len()); } #[test] fn test_flags_operations() { - // 测试标志位操作 + // Test flag operations let flags = vec![Flags::FreeVersion, Flags::UsesDataDir, Flags::InlineData]; for flag in flags { let flag_value = flag as u8; - assert!(flag_value > 0, "标志位值应该大于 0"); + assert!(flag_value > 0, "Flag value should be greater than 0"); - // 测试标志位组合 + // Test flag combinations let combined = Flags::FreeVersion as u8 | Flags::UsesDataDir as u8; - // 对于位运算,组合值可能不总是大于单个值,这是正常的 - assert!(combined > 0, "组合标志位应该大于 0"); + // For bitwise operations, combined values may not exceed individual ones; this is normal + assert!(combined > 0, "Combined flag value should be greater than 0"); } } #[test] fn test_uuid_handling_edge_cases() { - // 测试 UUID 处理的边界情况 + // Test UUID edge cases let test_uuids = vec![ - Uuid::new_v4(), // 随机 UUID + Uuid::new_v4(), // Random UUID ]; for uuid in test_uuids { @@ -3280,7 +3284,7 @@ mod test { ..Default::default() }; - // 验证序列化和反序列化 + // Verify serialization and deserialization let data = obj.marshal_msg().unwrap(); let mut obj2 = MetaObject::default(); obj2.unmarshal_msg(&data).unwrap(); @@ -3289,7 +3293,7 @@ mod test { assert_eq!(obj.data_dir, obj2.data_dir); } - // 单独测试 nil UUID,因为它在序列化时会被转换为 None + // Test nil UUID separately because serialization converts it to None let obj = MetaObject { version_id: Some(Uuid::nil()), data_dir: Some(Uuid::nil()), @@ -3300,24 +3304,24 @@ mod test { let mut obj2 = MetaObject::default(); obj2.unmarshal_msg(&data).unwrap(); - // nil UUID 在序列化时可能被转换为 None,这是预期行为 - // 检查实际的序列化行为 - println!("原始 version_id: {:?}", obj.version_id); - println!("反序列化后 version_id: {:?}", obj2.version_id); - // 只要反序列化成功就认为测试通过 + // nil UUIDs may be converted to None during serialization; this is expected + // Inspect the actual serialization behavior + println!("Original version_id: {:?}", obj.version_id); + println!("Deserialized version_id: {:?}", obj2.version_id); + // Consider the test successful as long as deserialization succeeds } #[test] fn test_part_handling_edge_cases() { - // 测试分片处理的边界情况 + // Test edge cases for shard handling let mut obj = MetaObject::default(); - // 测试空分片列表 + // Test an empty shard list assert!(obj.part_numbers.is_empty()); assert!(obj.part_etags.is_empty()); assert!(obj.part_sizes.is_empty()); - // 测试单个分片 + // Test a single shard obj.part_numbers = vec![1]; obj.part_etags = vec!["etag1".to_string()]; obj.part_sizes = vec![1024]; @@ -3332,7 +3336,7 @@ mod test { assert_eq!(obj.part_sizes, obj2.part_sizes); assert_eq!(obj.part_actual_sizes, obj2.part_actual_sizes); - // 测试多个分片 + // Test multiple shards obj.part_numbers = vec![1, 2, 3]; obj.part_etags = vec!["etag1".to_string(), "etag2".to_string(), "etag3".to_string()]; obj.part_sizes = vec![1024, 2048, 512]; @@ -3350,7 +3354,7 @@ mod test { #[test] fn test_version_header_validation() { - // 测试版本头的验证功能 + // Test version header validation let mut header = FileMetaVersionHeader { version_type: VersionType::Object, mod_time: Some(OffsetDateTime::now_utc()), @@ -3360,27 +3364,27 @@ mod test { }; assert!(header.is_valid()); - // 测试无效的版本类型 + // Test invalid version types header.version_type = VersionType::Invalid; assert!(!header.is_valid()); - // 重置为有效状态 + // Reset to a valid state header.version_type = VersionType::Object; assert!(header.is_valid()); - // 测试无效的纠删码参数 - // 当 ec_m = 0 时,has_ec() 返回 false,所以不会检查纠删码参数 + // Test invalid erasure coding parameters + // When ec_m = 0, has_ec() returns false so parity parameters are skipped header.ec_m = 0; header.ec_n = 1; - assert!(header.is_valid()); // 这是有效的,因为没有启用纠删码 + assert!(header.is_valid()); // Valid because erasure coding is disabled - // 启用纠删码但参数无效 + // Enable erasure coding with invalid parameters header.ec_m = 2; header.ec_n = 0; - // 当 ec_n = 0 时,has_ec() 返回 false,所以不会检查纠删码参数 - assert!(header.is_valid()); // 这实际上是有效的,因为 has_ec() 返回 false + // When ec_n = 0, has_ec() returns false so parity parameters are skipped + assert!(header.is_valid()); // This remains valid because has_ec() returns false - // 重置为有效状态 + // Reset to a valid state header.ec_n = 1; assert!(header.is_valid()); } @@ -3405,7 +3409,7 @@ mod test { obj.meta_user.insert(key.to_string(), value.to_string()); } - // 验证序列化和反序列化 + // Verify serialization and deserialization let data = obj.marshal_msg().unwrap(); let mut obj2 = MetaObject::default(); obj2.unmarshal_msg(&data).unwrap(); @@ -3451,7 +3455,7 @@ async fn test_read_xl_meta_no_data() { let filepath = "./test_xl.meta"; let mut file = File::create(filepath).await.unwrap(); - // 写入字符串 + // Write string data file.write_all(&buff).await.unwrap(); let mut f = File::open(filepath).await.unwrap(); diff --git a/crates/iam/src/cache.rs b/crates/iam/src/cache.rs index 22fc215b..c1b458aa 100644 --- a/crates/iam/src/cache.rs +++ b/crates/iam/src/cache.rs @@ -127,8 +127,8 @@ impl CacheInner { // todo!() // } - // /// 如果是临时用户,返回 Ok(Some(partent_name))) - // /// 如果不是临时用户,返回 Ok(None) + // /// Return Ok(Some(parent_name)) when the user is temporary. + // /// Return Ok(None) for non-temporary users. // fn is_temp_user(&self, user_name: &str) -> crate::Result> { // let user = self // .get_user(user_name) @@ -141,8 +141,8 @@ impl CacheInner { // } // } - // /// 如果是临时用户,返回 Ok(Some(partent_name))) - // /// 如果不是临时用户,返回 Ok(None) + // /// Return Ok(Some(parent_name)) when the user is a temporary identity. + // /// Return Ok(None) when the user is not temporary. // fn is_service_account(&self, user_name: &str) -> crate::Result> { // let user = self // .get_user(user_name) diff --git a/crates/notify/src/registry.rs b/crates/notify/src/registry.rs index 76f15936..9d649793 100644 --- a/crates/notify/src/registry.rs +++ b/crates/notify/src/registry.rs @@ -150,7 +150,7 @@ impl TargetRegistry { // Case 1: The format is _ // e.g., rest = "ENDPOINT_PRIMARY" -> field_name="ENDPOINT", instance_id="PRIMARY" Some(field) => (field.to_lowercase(), instance_id_part.to_lowercase()), - // Case 2: The format is (无 INSTANCE_ID) + // Case 2: The format is (without INSTANCE_ID) // e.g., rest = "ENABLE" -> field_name="ENABLE", instance_id="" (Universal configuration `_ DEFAULT_DELIMITER`) None => (instance_id_part.to_lowercase(), DEFAULT_DELIMITER.to_string()), }; diff --git a/crates/obs/src/metrics/entry/subsystem.rs b/crates/obs/src/metrics/entry/subsystem.rs index 0b081ae5..56413eb3 100644 --- a/crates/obs/src/metrics/entry/subsystem.rs +++ b/crates/obs/src/metrics/entry/subsystem.rs @@ -123,7 +123,7 @@ impl MetricSubsystem { // Debug related subsystems "/debug/go" => Self::DebugGo, - // 集群相关子系统 + // Cluster-related subsystems "/cluster/health" => Self::ClusterHealth, "/cluster/usage/objects" => Self::ClusterUsageObjects, "/cluster/usage/buckets" => Self::ClusterUsageBuckets, @@ -131,7 +131,7 @@ impl MetricSubsystem { "/cluster/iam" => Self::ClusterIam, "/cluster/config" => Self::ClusterConfig, - // 其他服务相关子系统 + // Other service-related subsystems "/ilm" => Self::Ilm, "/audit" => Self::Audit, "/logger/webhook" => Self::LoggerWebhook, @@ -139,7 +139,7 @@ impl MetricSubsystem { "/notification" => Self::Notification, "/scanner" => Self::Scanner, - // 其他路径作为自定义处理 + // Treat other paths as custom subsystems _ => Self::Custom(path.to_string()), } } diff --git a/crates/rio/src/hash_reader.rs b/crates/rio/src/hash_reader.rs index 03f13864..eb9e773f 100644 --- a/crates/rio/src/hash_reader.rs +++ b/crates/rio/src/hash_reader.rs @@ -727,14 +727,14 @@ mod tests { assert_eq!(final_data.len() as i64, actual_size); assert_eq!(&final_data, &data); } else { - // 如果没有压缩,直接比较解密后的数据 + // Without compression we can compare the decrypted bytes directly assert_eq!(decrypted_data.len() as i64, actual_size); assert_eq!(&decrypted_data, &data); } return; } - // 如果不加密,直接处理压缩/解压缩 + // When encryption is disabled, only handle compression/decompression if is_compress { let decompress_reader = DecompressReader::new(WarpReader::new(Cursor::new(compressed_data)), CompressionAlgorithm::Gzip); @@ -749,7 +749,7 @@ mod tests { assert_eq!(&compressed_data, &data); } - // 验证 etag(注意:压缩会改变数据,所以这里的 etag 验证可能需要调整) + // Validate the etag (compression alters the payload, so this may require adjustments) println!("Test completed successfully with compression: {is_compress}, encryption: {is_encrypt}"); } diff --git a/crates/utils/src/net.rs b/crates/utils/src/net.rs index 5769d654..94bb89e3 100644 --- a/crates/utils/src/net.rs +++ b/crates/utils/src/net.rs @@ -620,7 +620,7 @@ mod test { assert!(!is_socket_addr(&long_string)); // Test unicode characters - assert!(!is_socket_addr("测试.example.com")); + assert!(!is_socket_addr("пример.example.com")); // Test special characters assert!(!is_socket_addr("test@example.com:8080")); diff --git a/deploy/build/rustfs-zh.service b/deploy/build/rustfs-zh.service index b6add428..1cc3373b 100644 --- a/deploy/build/rustfs-zh.service +++ b/deploy/build/rustfs-zh.service @@ -1,49 +1,48 @@ [Unit] Description=RustFS Object Storage Server -# 定义服务的描述,说明这是一个 RustFS 对象存储服务器,显示在 systemctl status 中。 +# Describe the RustFS object storage service as shown in `systemctl status`. Documentation=https://rustfs.com/docs/ -# 提供服务的官方文档链接,方便管理员查阅,占位符需替换为实际 URL。 +# Provide a documentation link for operators. After=network-online.target -# 指定服务在 network-online.target(网络就绪)之后启动,确保网络可用。 +# Ensure the service starts only after the network is online. Wants=network-online.target -# 表示服务希望依赖 network-online.target,但不是强依赖,即使网络未就绪也尝试启动。 +# Express a soft dependency on `network-online.target` so we still attempt to start if the network is late. # If you're using a database, you'll need to add the corresponding dependencies -# 如果服务依赖数据库,可以添加数据库相关的依赖项(当前为注释,未启用)。 +# Uncomment these directives when a database is required. # After=postgresql.service -# 示例:若依赖 PostgreSQL,则在 PostgreSQL 服务后启动(当前未启用)。 +# Example: start after PostgreSQL when the dependency is needed. # Requires=postgresql.service -# 示例:若强制依赖 PostgreSQL,则要求其启动成功(当前未启用)。 +# Example: make PostgreSQL a hard requirement. [Service] Type=notify -# 服务类型为 notify,表示服务通过 sd_notify 通知 systemd 其状态(如就绪)。 +# Use the `notify` type so the process reports readiness via `sd_notify`. NotifyAccess=main -# 指定只有主进程可以发送通知给 systemd,避免子进程干扰。 +# Only the main process can send notifications back to systemd. User=rustfs -# 以 rustfs 用户身份运行服务,需预先创建此用户,提升安全性。 +# Run as the dedicated `rustfs` user (create it ahead of time for security). Group=rustfs -# 以 rustfs 组身份运行服务,与 User 配合使用。 +# Use the matching `rustfs` group. # working directory WorkingDirectory=/opt/rustfs -# 设置服务的工作目录为 /opt/rustfs,影响相对路径的解析。 +# Set the working directory so relative paths resolve consistently. -# 定义环境变量配置,用于传递给服务程序。 +# Inline environment variables for authentication. Environment=RUSTFS_ACCESS_KEY=rustfsadmin -# 设置访问密钥为 rustfsadmin,用于 RustFS 的认证。 +# Access key used by RustFS authentication. Environment=RUSTFS_SECRET_KEY=rustfsadmin -# 设置秘密密钥为 rustfsadmin,与访问密钥配套使用。 +# Secret key that pairs with the access key. ExecStart=/usr/local/bin/rustfs \ --address 0.0.0.0:9000 \ --volumes /data/rustfs/vol1,/data/rustfs/vol2 \ --console-enable -# 定义启动命令,运行 /usr/local/bin/rustfs,带参数: -# --address 0.0.0.0:9000:服务监听所有接口的 9000 端口。 -# --volumes:指定存储卷路径为 /data/rustfs/vol1 和 /data/rustfs/vol2。 -# --console-enable:启用控制台功能。 +# Launch RustFS with common arguments: +# --address 0.0.0.0:9000 listens on every interface. +# --volumes mounts /data/rustfs/vol1 and /data/rustfs/vol2. +# --console-enable turns on the management console. -# 定义环境变量配置,用于传递给服务程序,推荐使用且简洁 -# rustfs 示例文件 详见: `../config/rustfs-zh.env` +# Optionally load additional environment variables (see ../config/rustfs-zh.env). EnvironmentFile=-/etc/default/rustfs ExecStart=/usr/local/bin/rustfs $RUSTFS_VOLUMES $RUSTFS_OPTS @@ -53,50 +52,50 @@ StandardError=append:/data/deploy/rust/logs/rustfs-err.log # resource constraints LimitNOFILE=1048576 -# 设置文件描述符上限为 1048576,支持高并发连接。 +# Allow up to 1,048,576 file descriptors for high concurrency. LimitNPROC=32768 -# 设置进程数上限为 32768,限制子进程数量。 +# Cap the number of processes at 32,768. TasksMax=infinity -# 允许服务创建无限数量的线程(谨慎使用,可能耗尽资源)。 +# Permit unlimited tasks (use carefully to avoid resource exhaustion). # restart the policy Restart=always -# 服务异常退出时总是重启,提高可用性。 +# Always restart the service on failure to improve availability. RestartSec=10s -# 重启前等待 10 秒,避免频繁重启导致资源浪费。 +# Wait 10 seconds between restart attempts. # graceful exit configuration TimeoutStartSec=30s -# 启动超时时间为 30 秒,若超时则认为启动失败。 +# Treat startups that exceed 30 seconds as failures. TimeoutStopSec=30s -# 停止超时时间为 30 秒,若超时则强制停止。 +# Force-stop the service if it does not exit within 30 seconds. # security settings NoNewPrivileges=true -# 禁止服务提升权限,增强安全性。 +# Disable privilege escalation. ProtectSystem=full -# 保护系统目录(如 /usr、/boot、/etc)为只读,防止服务修改。 +# Mount critical system directories read-only. ProtectHome=true -# 保护用户主目录(如 /home、/root),禁止服务访问。 +# Prevent access to user home directories. PrivateTmp=true -# 为服务提供私有 /tmp 目录,隔离临时文件。 +# Provide a private /tmp namespace. PrivateDevices=true -# 禁止服务访问硬件设备(如 /dev),提升安全性。 +# Deny direct hardware device access. ProtectClock=true -# 保护系统时钟,禁止服务修改时间。 +# Block modifications to the system clock. ProtectKernelTunables=true -# 保护内核参数(/proc/sys),禁止服务修改。 +# Protect /proc/sys kernel tunables. ProtectKernelModules=true -# 禁止服务加载或卸载内核模块。 +# Prevent kernel module load/unload. ProtectControlGroups=true -# 保护控制组(cgroups),禁止服务修改。 +# Block cgroup modifications. RestrictSUIDSGID=true -# 禁止服务使用 SUID/SGID 文件,提升安全性。 +# Disallow SUID/SGID binaries. RestrictRealtime=true -# 禁止服务使用实时调度,防止资源滥用。 +# Disallow real-time scheduling. ReadWritePaths=/data/rustfs -# 允许服务对 /data/rustfs 目录读写,限制其他路径访问。 +# Grant read/write access only to /data/rustfs. [Install] WantedBy=multi-user.target -# 服务在多用户模式下自动启动,配合 systemctl enable 使用。 \ No newline at end of file +# Enable the service in multi-user mode (compatible with `systemctl enable`). \ No newline at end of file diff --git a/deploy/build/rustfs.run-zh.md b/deploy/build/rustfs.run-zh.md index 9d70eaf8..ae4ae56c 100644 --- a/deploy/build/rustfs.run-zh.md +++ b/deploy/build/rustfs.run-zh.md @@ -1,89 +1,89 @@ -# RustFS 服务安装配置教程 +# RustFS Service Installation Guide -## 1. 准备工作 +## 1. Preparation -### 1.1 创建系统用户 +### 1.1 Create a system user ```bash -# 创建 rustfs 系统用户和用户组,禁止登录shell +# Create the rustfs system user and group without shell access sudo useradd -r -s /sbin/nologin rustfs ``` -### 1.2 创建必要目录 +### 1.2 Create required directories ```bash -# 创建程序目录 +# Application directory sudo mkdir -p /opt/rustfs -# 创建数据目录 +# Data directories sudo mkdir -p /data/rustfs/{vol1,vol2} -# 创建配置目录 +# Configuration directory sudo mkdir -p /etc/rustfs -# 设置目录权限 +# Assign ownership and permissions sudo chown -R rustfs:rustfs /opt/rustfs /data/rustfs sudo chmod 755 /opt/rustfs /data/rustfs ``` -## 2. 安装 RustFS +## 2. Install RustFS ```bash -# 复制 RustFS 二进制文件 +# Copy the RustFS binary sudo cp rustfs /usr/local/bin/ sudo chmod +x /usr/local/bin/rustfs -# 复制配置文件 +# Copy configuration files sudo cp obs.yaml /etc/rustfs/ sudo chown -R rustfs:rustfs /etc/rustfs ``` -## 3. 配置 Systemd 服务 +## 3. Configure the systemd service ```bash -# 复制服务单元文件 +# Install the service unit sudo cp rustfs.service /etc/systemd/system/ -# 重新加载 systemd 配置 +# Reload systemd units sudo systemctl daemon-reload ``` -## 4. 服务管理 +## 4. Service management -### 4.1 启动服务 +### 4.1 Start the service ```bash sudo systemctl start rustfs ``` -### 4.2 查看服务状态 +### 4.2 Check service status ```bash sudo systemctl status rustfs ``` -### 4.3 启用开机自启 +### 4.3 Enable auto-start on boot ```bash sudo systemctl enable rustfs ``` -### 4.4 查看服务日志 +### 4.4 Inspect logs ```bash -# 查看实时日志 +# Follow live logs sudo journalctl -u rustfs -f -# 查看今天的日志 +# View today's logs sudo journalctl -u rustfs --since today ``` -## 5. 验证安装 +## 5. Validate the installation ```bash -# 检查服务端口 +# Confirm the service port ss -tunlp | grep 9000 -# 测试服务可用性 +# Verify availability curl -I http://localhost:9000 ``` diff --git a/docs/PERFORMANCE_TESTING.md b/docs/PERFORMANCE_TESTING.md index a980cd04..0fff3a51 100644 --- a/docs/PERFORMANCE_TESTING.md +++ b/docs/PERFORMANCE_TESTING.md @@ -1,139 +1,141 @@ -# RustFS 性能测试指南 +# RustFS Performance Testing Guide -本文档提供了对 RustFS 进行性能测试和性能分析的完整方法和工具。 +This document describes the recommended tools and workflows for benchmarking RustFS and analyzing performance bottlenecks. -## 概览 +## Overview -RustFS 提供了多种性能测试和分析工具: +RustFS exposes several complementary tooling options: -1. **性能分析(Profiling)** - 使用内置的 pprof 接口收集 CPU 性能数据 -2. **负载测试(Load Testing)** - 使用多种客户端工具模拟高并发请求 -3. **监控和分析** - 查看性能指标和识别性能瓶颈 +1. **Profiling** – collect CPU samples through the built-in `pprof` endpoints. +2. **Load testing** – drive concurrent requests with dedicated client utilities. +3. **Monitoring and analysis** – inspect collected metrics to locate hotspots. -## 前置条件 +## Prerequisites -### 1. 启用性能分析 +### 1. Enable profiling support -在启动 RustFS 时,需要设置环境变量启用性能分析功能: +Set the profiling environment variable before launching RustFS: ```bash export RUSTFS_ENABLE_PROFILING=true ./rustfs ``` -### 2. 安装依赖工具 +### 2. Install required tooling -确保系统中安装了以下工具: +Make sure the following dependencies are available: ```bash -# 基础工具 -curl # HTTP 请求 -jq # JSON 处理 (可选) +# Base tools +curl # HTTP requests +jq # JSON processing (optional) -# 分析工具 -go # Go pprof 工具 (可选,用于 protobuf 格式) -python3 # Python 负载测试脚本 +# Analysis tools +go # Go pprof CLI (optional, required for protobuf output) +python3 # Python load-testing scripts -# macOS 用户 +# macOS users brew install curl jq go python3 -# Ubuntu/Debian 用户 +# Ubuntu/Debian users sudo apt-get install curl jq golang-go python3 ``` -## 性能测试方法 +## Performance Testing Methods -### 方法 1:使用专业脚本(推荐) +### Method 1: Use the dedicated profiling script (recommended) -项目提供了完整的性能分析脚本: +The repository ships with a helper script for common profiling flows: ```bash -# 查看脚本帮助 +# Show command help ./scripts/profile_rustfs.sh help -# 检查性能分析状态 +# Check profiler status ./scripts/profile_rustfs.sh status -# 收集火焰图(30秒) +# Capture a 30 second flame graph ./scripts/profile_rustfs.sh flamegraph -# 收集 protobuf 格式性能数据 +# Download protobuf-formatted samples ./scripts/profile_rustfs.sh protobuf -# 收集两种格式的性能数据 +# Collect both formats ./scripts/profile_rustfs.sh both -# 自定义参数 +# Provide custom arguments ./scripts/profile_rustfs.sh -d 60 -u http://192.168.1.100:9000 both ``` -### 方法 2:使用 Python 综合测试 +### Method 2: Run the Python end-to-end tester -Python 脚本提供了负载测试和性能分析的一体化解决方案: +A Python utility combines background load generation with profiling: ```bash -# 运行综合性能分析 +# Launch the integrated test harness python3 test_load.py ``` -此脚本会: -1. 启动后台负载测试(多线程 S3 操作) -2. 并行收集性能分析数据 -3. 生成火焰图用于分析 +The script will: -### 方法 3:使用简单负载测试 +1. Launch multi-threaded S3 operations as load. +2. Pull profiling samples in parallel. +3. Produce a flame graph for investigation. -对于快速测试,可以使用 bash 脚本: +### Method 3: Simple shell-based load test + +For quick smoke checks, a lightweight bash script is also provided: ```bash -# 运行简单负载测试 +# Execute a lightweight benchmark ./simple_load_test.sh ``` -## 性能分析输出格式 +## Profiling Output Formats -### 1. 火焰图(SVG 格式) +### 1. Flame graph (SVG) -- **用途**: 可视化 CPU 使用情况 -- **文件**: `rustfs_profile_TIMESTAMP.svg` -- **查看方式**: 使用浏览器打开 SVG 文件 -- **分析要点**: - - 宽度表示 CPU 使用时间 - - 高度表示调用栈深度 - - 点击可以放大特定函数 +- **Purpose**: Visualize CPU time distribution. +- **File name**: `rustfs_profile_TIMESTAMP.svg` +- **How to view**: Open the SVG in a browser. +- **Interpretation tips**: + - Width reflects CPU time per function. + - Height illustrates call-stack depth. + - Click to zoom into specific frames. ```bash -# 在浏览器中打开 +# Example: open the file in a browser open profiles/rustfs_profile_20240911_143000.svg ``` -### 2. Protobuf 格式 +### 2. Protobuf samples -- **用途**: 使用 Go pprof 工具进行详细分析 -- **文件**: `rustfs_profile_TIMESTAMP.pb` -- **分析工具**: `go tool pprof` +- **Purpose**: Feed data to the `go tool pprof` command. +- **File name**: `rustfs_profile_TIMESTAMP.pb` +- **Tooling**: `go tool pprof` ```bash -# 使用 Go pprof 分析 +# Analyze the protobuf output go tool pprof profiles/rustfs_profile_20240911_143000.pb -# pprof 常用命令 -(pprof) top # 显示 CPU 使用率最高的函数 -(pprof) list func # 显示指定函数的源代码 -(pprof) web # 生成 web 界面(需要 graphviz) -(pprof) png # 生成 PNG 图片 -(pprof) help # 查看所有命令 +# Common pprof commands +(pprof) top # Show hottest call sites +(pprof) list func # Display annotated source for a function +(pprof) web # Launch the web UI (requires graphviz) +(pprof) png # Render a PNG flame chart +(pprof) help # List available commands ``` -## API 接口使用 +## API Usage -### 检查性能分析状态 +### Check profiling status ```bash curl "http://127.0.0.1:9000/rustfs/admin/debug/pprof/status" ``` -返回示例: +Sample response: + ```json { "enabled": "true", @@ -141,186 +143,187 @@ curl "http://127.0.0.1:9000/rustfs/admin/debug/pprof/status" } ``` -### 收集性能数据 +### Capture profiling data ```bash -# 收集 30 秒的火焰图 +# Fetch a 30-second flame graph curl "http://127.0.0.1:9000/rustfs/admin/debug/pprof/profile?seconds=30&format=flamegraph" \ -o profile.svg -# 收集 protobuf 格式数据 +# Fetch protobuf output curl "http://127.0.0.1:9000/rustfs/admin/debug/pprof/profile?seconds=30&format=protobuf" \ -o profile.pb ``` -**参数说明**: -- `seconds`: 收集时长(1-300 秒) -- `format`: 输出格式(`flamegraph`/`svg` 或 `protobuf`/`pb`) +**Parameters** +- `seconds`: Duration between 1 and 300 seconds. +- `format`: Output format (`flamegraph`/`svg` or `protobuf`/`pb`). -## 负载测试场景 +## Load Testing Scenarios -### 1. S3 API 负载测试 +### 1. S3 API workload -使用 Python 脚本进行完整的 S3 操作负载测试: +Use the Python harness to exercise a complete S3 workflow: ```python -# 基本配置 +# Basic configuration tester = S3LoadTester( endpoint="http://127.0.0.1:9000", - access_key="rustfsadmin", + access_key="rustfsadmin", secret_key="rustfsadmin" ) -# 运行负载测试 -# 4 个线程,每个线程执行 10 次操作 +# Execute the load test +# Four threads, ten operations each tester.run_load_test(num_threads=4, operations_per_thread=10) ``` -每次操作包括: -1. 上传 1MB 对象 -2. 下载对象 -3. 删除对象 +Each iteration performs: +1. Upload a 1 MB object. +2. Download the object. +3. Delete the object. -### 2. 自定义负载测试 +### 2. Custom load scenarios ```bash -# 创建测试桶 +# Create a test bucket curl -X PUT "http://127.0.0.1:9000/test-bucket" -# 并发上传测试 +# Concurrent uploads for i in {1..10}; do echo "test data $i" | curl -X PUT "http://127.0.0.1:9000/test-bucket/object-$i" -d @- & done wait -# 并发下载测试 +# Concurrent downloads for i in {1..10}; do curl "http://127.0.0.1:9000/test-bucket/object-$i" > /dev/null & done wait ``` -## 性能分析最佳实践 +## Profiling Best Practices -### 1. 测试环境准备 +### 1. Environment preparation -- 确保 RustFS 已启用性能分析: `RUSTFS_ENABLE_PROFILING=true` -- 使用独立的测试环境,避免其他程序干扰 -- 确保有足够的磁盘空间存储分析文件 +- Confirm that `RUSTFS_ENABLE_PROFILING=true` is set. +- Use an isolated benchmark environment to avoid interference. +- Reserve disk space for generated profile artifacts. -### 2. 数据收集建议 +### 2. Data collection tips -- **预热阶段**: 先运行 5-10 分钟的轻量负载 -- **数据收集**: 在稳定负载下收集 30-60 秒的性能数据 -- **多次采样**: 收集多个样本进行对比分析 +- **Warm-up**: Run a light workload for 5–10 minutes before sampling. +- **Sampling window**: Capture 30–60 seconds under steady load. +- **Multiple samples**: Take several runs to compare results. -### 3. 分析重点 +### 3. Analysis focus areas -在火焰图中重点关注: +When inspecting flame graphs, pay attention to: -1. **宽度最大的函数** - CPU 使用时间最长 -2. **平顶函数** - 可能的性能瓶颈 -3. **深度调用栈** - 可能的递归或复杂逻辑 -4. **意外的系统调用** - I/O 或内存分配问题 +1. **The widest frames** – most CPU time consumed. +2. **Flat plateaus** – likely bottlenecks. +3. **Deep call stacks** – recursion or complex logic. +4. **Unexpected syscalls** – I/O stalls or allocation churn. -### 4. 常见性能问题 +### 4. Common issues -- **锁竞争**: 查找 `std::sync` 相关函数 -- **内存分配**: 查找 `alloc` 相关函数 -- **I/O 等待**: 查找文件系统或网络 I/O 函数 -- **序列化开销**: 查找 JSON/XML 解析函数 +- **Lock contention**: Investigate frames under `std::sync`. +- **Memory allocation**: Search for `alloc`-related frames. +- **I/O wait**: Review filesystem or network call stacks. +- **Serialization overhead**: Look for JSON/XML parsing hotspots. -## 故障排除 +## Troubleshooting -### 1. 性能分析未启用 +### 1. Profiling disabled -错误信息:`{"enabled":"false"}` +Error: `{"enabled":"false"}` + +**Fix**: -解决方案: ```bash export RUSTFS_ENABLE_PROFILING=true -# 重启 RustFS +# Restart RustFS ``` -### 2. 连接被拒绝 +### 2. Connection refused -错误信息:`Connection refused` +Error: `Connection refused` -检查项: -- RustFS 是否正在运行 -- 端口是否正确(默认 9000) -- 防火墙设置 +**Checklist**: +- Confirm RustFS is running. +- Ensure the port number is correct (default 9000). +- Verify firewall rules. -### 3. 分析文件过大 +### 3. Oversized profile output -如果生成的分析文件过大: -- 减少收集时间(如 15-30 秒) -- 降低负载测试的并发度 -- 使用 protobuf 格式而非 SVG +If artifacts become too large: +- Shorten the capture window (e.g., 15–30 seconds). +- Reduce load-test concurrency. +- Prefer protobuf output instead of SVG. -## 配置参数 +## Configuration Parameters -### 环境变量 +### Environment variables -| 变量 | 默认值 | 描述 | +| Variable | Default | Description | |------|--------|------| -| `RUSTFS_ENABLE_PROFILING` | `false` | 启用性能分析 | -| `RUSTFS_URL` | `http://127.0.0.1:9000` | RustFS 服务器地址 | -| `PROFILE_DURATION` | `30` | 性能数据收集时长(秒) | -| `OUTPUT_DIR` | `./profiles` | 输出文件目录 | +| `RUSTFS_ENABLE_PROFILING` | `false` | Enable profiling support | +| `RUSTFS_URL` | `http://127.0.0.1:9000` | RustFS endpoint | +| `PROFILE_DURATION` | `30` | Profiling duration in seconds | +| `OUTPUT_DIR` | `./profiles` | Output directory | -### 脚本参数 +### Script arguments ```bash ./scripts/profile_rustfs.sh [OPTIONS] [COMMAND] OPTIONS: -u, --url URL RustFS URL - -d, --duration SECONDS Profile duration + -d, --duration SECONDS Profile duration -o, --output DIR Output directory COMMANDS: - status 检查状态 - flamegraph 收集火焰图 - protobuf 收集 protobuf 数据 - both 收集两种格式(默认) + status Check profiler status + flamegraph Collect a flame graph + protobuf Collect protobuf samples + both Collect both formats (default) ``` -## 输出文件位置 +## Output Locations -- **脚本输出**: `./profiles/` 目录 -- **Python 脚本**: `/tmp/rustfs_profiles/` 目录 -- **文件命名**: `rustfs_profile_TIMESTAMP.{svg|pb}` +- **Script output**: `./profiles/` +- **Python script**: `/tmp/rustfs_profiles/` +- **File naming**: `rustfs_profile_TIMESTAMP.{svg|pb}` -## 示例工作流程 +## Example Workflow -1. **启动 RustFS**: +1. **Launch RustFS** ```bash RUSTFS_ENABLE_PROFILING=true ./rustfs ``` -2. **验证性能分析可用**: +2. **Verify profiling availability** ```bash ./scripts/profile_rustfs.sh status ``` -3. **开始负载测试**: +3. **Start a load test** ```bash python3 test_load.py & ``` -4. **收集性能数据**: +4. **Collect samples** ```bash ./scripts/profile_rustfs.sh -d 60 both ``` -5. **分析结果**: +5. **Inspect the results** ```bash - # 查看火焰图 + # Review the flame graph open profiles/rustfs_profile_*.svg - - # 或使用 pprof 分析 + + # Or analyze the protobuf output go tool pprof profiles/rustfs_profile_*.pb ``` -通过这个完整的性能测试流程,你可以系统地分析 RustFS 的性能特征,识别瓶颈,并进行有针对性的优化。 \ No newline at end of file +Following this workflow helps you understand RustFS performance characteristics, locate bottlenecks, and implement targeted optimizations. diff --git a/docs/README.md b/docs/README.md index d9b2decb..142e3182 100644 --- a/docs/README.md +++ b/docs/README.md @@ -1,239 +1,239 @@ -# RustFS 文档中心 +# RustFS Documentation Center -欢迎来到 RustFS 分布式文件系统文档中心! +Welcome to the RustFS distributed file system documentation center! -## 📚 文档导航 +## 📚 Documentation Navigation -### 🔐 KMS (密钥管理服务) +### 🔐 KMS (Key Management Service) -RustFS KMS 提供企业级密钥管理和数据加密服务。 +RustFS KMS delivers enterprise-grade key management and data encryption. -| 文档 | 描述 | 适用场景 | +| Document | Description | Audience | |------|------|----------| -| [KMS 使用指南](./kms/README.md) | 完整的 KMS 使用文档,包含快速开始、配置和部署 | 所有用户必读 | -| [HTTP API 接口](./kms/http-api.md) | HTTP REST API 接口文档和使用示例 | 管理员和运维 | -| [编程 API 接口](./kms/api.md) | Rust 库编程接口和代码示例 | 开发者集成 | -| [配置参考](./kms/configuration.md) | 完整的配置选项和环境变量说明 | 系统管理员 | -| [故障排除](./kms/troubleshooting.md) | 常见问题诊断和解决方案 | 运维人员 | -| [安全指南](./kms/security.md) | 安全最佳实践和合规指导 | 安全架构师 | +| [KMS User Guide](./kms/README.md) | Comprehensive KMS guide with quick start, configuration, and deployment steps | Required reading for all users | +| [HTTP API Reference](./kms/http-api.md) | HTTP REST API reference with usage examples | Administrators and operators | +| [Programming API Reference](./kms/api.md) | Rust library APIs and code samples | Developers | +| [Configuration Reference](./kms/configuration.md) | Complete configuration options and environment variables | System administrators | +| [Troubleshooting](./kms/troubleshooting.md) | Diagnosis tips and solutions for common issues | Operations engineers | +| [Security Guide](./kms/security.md) | Security best practices and compliance guidance | Security architects | -## 🚀 快速开始 +## 🚀 Quick Start -### 1. KMS 5分钟快速部署 +### 1. Deploy KMS in 5 Minutes -**生产环境(使用 Vault)** +**Production (Vault backend)** ```bash -# 1. 启用 Vault 功能编译 +# 1. Enable the Vault feature flag cargo build --features vault --release -# 2. 配置环境变量 +# 2. Configure environment variables export RUSTFS_VAULT_ADDRESS=https://vault.company.com:8200 export RUSTFS_VAULT_TOKEN=hvs.CAESIJ... -# 3. 启动服务 +# 3. Launch the service ./target/release/rustfs server ``` -**开发测试(使用本地后端)** +**Development & Testing (Local backend)** ```bash -# 1. 编译测试版本 +# 1. Build a release binary cargo build --release -# 2. 配置本地存储 +# 2. Configure local storage export RUSTFS_KMS_BACKEND=Local export RUSTFS_KMS_LOCAL_KEY_DIR=/tmp/rustfs-keys -# 3. 启动服务 +# 3. Launch the service ./target/release/rustfs server ``` -### 2. S3 兼容加密 +### 2. S3-Compatible Encryption ```bash -# 上传加密文件 +# Upload an encrypted object curl -X PUT https://rustfs.company.com/bucket/sensitive.txt \ -H "x-amz-server-side-encryption: AES256" \ --data-binary @sensitive.txt -# 自动解密下载 +# Download with automatic decryption curl https://rustfs.company.com/bucket/sensitive.txt ``` -## 🏗️ 架构概览 +## 🏗️ Architecture Overview -### KMS 三层安全架构 +### Three-Layer KMS Security Architecture ``` ┌─────────────────────────────────────────────────┐ -│ 应用层 │ +│ Application Layer │ │ ┌─────────────┐ ┌─────────────┐ │ -│ │ S3 API │ │ REST API │ │ +│ │ S3 API │ │ REST API │ │ │ └─────────────┘ └─────────────┘ │ ├─────────────────────────────────────────────────┤ -│ 加密层 │ -│ ┌─────────────┐ 加密 ┌─────────────────┐ │ -│ │ 对象数据 │ ◄───► │ 数据密钥 (DEK) │ │ -│ └─────────────┘ └─────────────────┘ │ +│ Encryption Layer │ +│ ┌─────────────┐ Encrypt ┌─────────────────┐ │ +│ │ Object Data │ ◄──────► │ Data Key (DEK) │ │ +│ └─────────────┘ └─────────────────┘ │ ├─────────────────────────────────────────────────┤ -│ 密钥管理层 │ -│ ┌─────────────────┐ 加密 ┌──────────────┐ │ -│ │ 数据密钥 (DEK) │ ◄────│ 主密钥 │ │ -│ └─────────────────┘ │ (Vault/HSM) │ │ -│ └──────────────┘ │ +│ Key Management Layer │ +│ ┌─────────────────┐ Encrypt ┌──────────────┐ │ +│ │ Data Key (DEK) │ ◄───────│ Master Key │ │ +│ └─────────────────┘ │ (Vault/HSM) │ │ +│ └──────────────┘ │ └─────────────────────────────────────────────────┘ ``` -### 核心特性 +### Key Features -- ✅ **多层加密**: Master Key → DEK → Object Data -- ✅ **高性能**: 1MB 流式加密,支持大文件 -- ✅ **多后端**: Vault (生产) + Local (测试) -- ✅ **S3 兼容**: 支持标准 SSE-S3/SSE-KMS 头 -- ✅ **企业级**: 审计、监控、合规支持 +- ✅ **Multi-layer encryption**: Master Key → DEK → Object Data +- ✅ **High performance**: 1 MB streaming encryption with large file support +- ✅ **Multiple backends**: Vault (production) + Local (testing) +- ✅ **S3 compatibility**: Supports standard SSE-S3/SSE-KMS headers +- ✅ **Enterprise-ready**: Auditing, monitoring, and compliance features -## 📖 学习路径 +## 📖 Learning Paths -### 👨‍💻 开发者 +### 👨‍💻 Developers -1. 阅读 [编程 API 接口](./kms/api.md) 了解 Rust 库使用 -2. 查看代码示例学习集成方法 -3. 参考 [故障排除](./kms/troubleshooting.md) 解决问题 +1. Read the [Programming API Reference](./kms/api.md) to learn the Rust library +2. Review the sample code to understand integration patterns +3. Consult [Troubleshooting](./kms/troubleshooting.md) when issues occur -### 👨‍💼 系统管理员 +### 👨‍💼 System Administrators -1. 从 [KMS 使用指南](./kms/README.md) 开始 -2. 学习 [HTTP API 接口](./kms/http-api.md) 进行管理 -3. 详细阅读 [配置参考](./kms/configuration.md) -4. 设置监控和日志 +1. Start with the [KMS User Guide](./kms/README.md) +2. Learn the [HTTP API Reference](./kms/http-api.md) for management tasks +3. Study the [Configuration Reference](./kms/configuration.md) in depth +4. Configure monitoring and logging -### 👨‍🔧 运维工程师 +### 👨‍🔧 Operations Engineers -1. 熟悉 [HTTP API 接口](./kms/http-api.md) 进行日常管理 -2. 掌握 [故障排除](./kms/troubleshooting.md) 技能 -3. 了解 [安全指南](./kms/security.md) 要求 -4. 建立运维流程 +1. Become familiar with the [HTTP API Reference](./kms/http-api.md) for day-to-day work +2. Master the [Troubleshooting](./kms/troubleshooting.md) procedures +3. Understand the requirements in the [Security Guide](./kms/security.md) +4. Establish operational runbooks -### 🔒 安全架构师 +### 🔒 Security Architects -1. 深入学习 [安全指南](./kms/security.md) -2. 评估威胁模型和风险 -3. 制定安全策略 +1. Dive into the [Security Guide](./kms/security.md) +2. Evaluate threat models and risk posture +3. Define security policies -## 🤝 贡献指南 +## 🤝 Contribution Guide -我们欢迎社区贡献! +We welcome community contributions! -### 文档贡献 +### Documentation Contributions ```bash -# 1. Fork 项目 +# 1. Fork the repository git clone https://github.com/your-username/rustfs.git -# 2. 创建文档分支 +# 2. Create a documentation branch git checkout -b docs/improve-kms-guide -# 3. 编辑文档 -# 编辑 docs/kms/ 下的 Markdown 文件 +# 3. Edit the documentation +# Update Markdown files under docs/kms/ -# 4. 提交更改 +# 4. Commit the changes git add docs/ git commit -m "docs: improve KMS configuration examples" -# 5. 创建 Pull Request +# 5. Open a Pull Request gh pr create --title "Improve KMS documentation" ``` -### 文档规范 +### Documentation Guidelines -- 使用清晰的标题和结构 -- 提供可运行的代码示例 -- 包含适当的警告和提示 -- 支持多种使用场景 -- 保持内容最新 +- Use clear headings and structure +- Provide runnable code examples +- Include warnings and tips where appropriate +- Support multiple usage scenarios +- Keep the content up to date -## 📞 支持与反馈 +## 📞 Support & Feedback -### 获取帮助 +### Getting Help - **GitHub Issues**: https://github.com/rustfs/rustfs/issues -- **讨论区**: https://github.com/rustfs/rustfs/discussions -- **文档问题**: 在相关文档页面创建 Issue -- **安全问题**: security@rustfs.com +- **Discussion Forum**: https://github.com/rustfs/rustfs/discussions +- **Documentation Questions**: Open an issue on the relevant document +- **Security Concerns**: security@rustfs.com -### 问题报告模板 +### Issue Reporting Template -报告问题时请提供: +When reporting a problem, please provide: ```markdown -**环境信息** -- RustFS 版本: v1.0.0 -- 操作系统: Ubuntu 20.04 -- Rust 版本: 1.75.0 +**Environment** +- RustFS version: v1.0.0 +- Operating system: Ubuntu 20.04 +- Rust version: 1.75.0 -**问题描述** -简要描述遇到的问题... +**Issue Description** +Summarize the problem you encountered... -**重现步骤** -1. 步骤一 -2. 步骤二 -3. 步骤三 +**Reproduction Steps** +1. Step one +2. Step two +3. Step three -**期望行为** -描述期望的正确行为... +**Expected Behavior** +Describe what you expected to happen... -**实际行为** -描述实际发生的情况... +**Actual Behavior** +Describe what actually happened... -**相关日志** +**Relevant Logs** ```bash -# 粘贴相关日志 +# Paste relevant log excerpts ``` -**附加信息** -其他可能有用的信息... +**Additional Information** +Any other details that may help... ``` -## 📈 版本历史 +## 📈 Release History -| 版本 | 发布日期 | 主要特性 | +| Version | Release Date | Highlights | |------|----------|----------| -| v1.0.0 | 2024-01-15 | 🎉 首个正式版本,完整 KMS 功能 | -| v0.9.0 | 2024-01-01 | 🔐 KMS 系统重构,性能优化 | -| v0.8.0 | 2023-12-15 | ⚡ 流式加密,1MB 块大小优化 | +| v1.0.0 | 2024-01-15 | 🎉 First official release with full KMS functionality | +| v0.9.0 | 2024-01-01 | 🔐 KMS system refactor with performance optimizations | +| v0.8.0 | 2023-12-15 | ⚡ Streaming encryption with 1 MB block size tuning | -## 🗺️ 开发路线图 +## 🗺️ Roadmap -### 即将发布 (v1.1.0) +### Coming Soon (v1.1.0) -- [ ] 密钥自动轮转 -- [ ] HSM 集成支持 -- [ ] Web UI 管理界面 -- [ ] 更多合规性支持 (SOC2, HIPAA) +- [ ] Automatic key rotation +- [ ] HSM integration support +- [ ] Web UI management console +- [ ] Additional compliance support (SOC2, HIPAA) -### 长期规划 +### Long-Term Plans -- [ ] 多租户密钥隔离 -- [ ] 密钥导入/导出工具 -- [ ] 性能基准测试套件 +- [ ] Multi-tenant key isolation +- [ ] Key import/export tooling +- [ ] Performance benchmarking suite - [ ] Kubernetes Operator -## 📋 文档反馈 +## 📋 Documentation Feedback -帮助我们改进文档! +Help us improve the documentation! -**这些文档对您有帮助吗?** -- 👍 很有帮助 -- 👌 基本满意 -- 👎 需要改进 +**Was this documentation helpful?** +- 👍 Very helpful +- 👌 Mostly satisfied +- 👎 Needs improvement -**改进建议**: -请在 GitHub Issues 中提出具体的改进建议。 +**Suggestions for improvement:** +Share specific ideas via GitHub Issues. --- -**最后更新**: 2024-01-15 -**文档版本**: v1.0.0 +**Last Updated**: 2024-01-15 +**Documentation Version**: v1.0.0 -*感谢使用 RustFS!我们致力于为您提供最好的分布式文件系统解决方案。* \ No newline at end of file +*Thank you for using RustFS! We are committed to delivering the best distributed file system solution.* diff --git a/docs/kms/frontend-api-guide-zh.md b/docs/kms/frontend-api-guide-zh.md index 5106c611..2b919ac0 100644 --- a/docs/kms/frontend-api-guide-zh.md +++ b/docs/kms/frontend-api-guide-zh.md @@ -1,149 +1,145 @@ -# RustFS KMS 前端对接指南 +# RustFS KMS Frontend Integration Guide -本文档专为前端开发者编写,提供了与 RustFS 密钥管理系统(KMS)交互的完整 API 规范。 +This document targets frontend engineers who need to integrate with the RustFS Key Management Service (KMS). It provides a complete API reference, usage notes, and example implementations. -## 📋 目录 +## 📋 Contents -1. [快速开始](#快速开始) -2. [认证和权限](#认证和权限) -3. [完整接口列表](#完整接口列表) -4. [服务管理API](#服务管理api) -5. [密钥管理API](#密钥管理api) -6. [数据加密API](#数据加密api) -7. [Bucket加密配置API](#bucket加密配置api) -8. [监控和缓存API](#监控和缓存api) -9. [通用错误码](#通用错误码) -10. [数据类型定义](#数据类型定义) -11. [实现示例](#实现示例) +1. [Quick Start](#quick-start) +2. [Authentication & Permissions](#authentication--permissions) +3. [API Catalog](#api-catalog) +4. [Service Management APIs](#service-management-apis) +5. [Key Management APIs](#key-management-apis) +6. [Data Encryption APIs](#data-encryption-apis) +7. [Bucket Encryption Configuration APIs](#bucket-encryption-configuration-apis) +8. [Monitoring & Cache APIs](#monitoring--cache-apis) +9. [Common Error Codes](#common-error-codes) +10. [Data Types](#data-types) +11. [Implementation Examples](#implementation-examples) -## 🚀 快速开始 +## Quick Start -### API 基础信息 +### Base configuration -| 配置项 | 值 | -|--------|-----| -| **基础URL** | `http://localhost:9000/rustfs/admin/v3` (本地开发) | -| **生产URL** | `https://your-rustfs-domain.com/rustfs/admin/v3` | -| **请求格式** | `application/json` | -| **响应格式** | `application/json` | -| **认证方式** | AWS SigV4 签名 | -| **字符编码** | UTF-8 | +| Setting | Value | +|---------|-------| +| **Base URL** | `http://localhost:9000/rustfs/admin/v3` (local development) | +| **Production URL** | `https://your-rustfs-domain.com/rustfs/admin/v3` | +| **Request format** | `application/json` | +| **Response format** | `application/json` | +| **Authentication** | AWS Signature Version 4 | +| **Encoding** | UTF-8 | -### 通用请求头 +### Common request headers -| 头部字段 | 必需 | 值 | -|----------|------|-----| +| Header | Required | Value | +|--------|----------|-------| | `Content-Type` | ✅ | `application/json` | | `Authorization` | ✅ | `AWS4-HMAC-SHA256 Credential=...` | -| `X-Amz-Date` | ✅ | ISO8601 格式时间戳 | +| `X-Amz-Date` | ✅ | ISO 8601 timestamp | -## 🔐 认证和权限 +## Authentication & Permissions -### 权限要求 +### Required IAM permissions -调用 KMS API 需要账户具有以下权限: -- `ServerInfoAdminAction` - 管理员操作权限 +Clients must have `ServerInfoAdminAction` to invoke KMS APIs. -### AWS SigV4 签名 +### AWS SigV4 signing -所有请求必须使用 AWS Signature Version 4 进行签名认证。 +All requests must be signed with SigV4. -**签名参数**: -- **Access Key ID**: 账户的访问密钥ID -- **Secret Access Key**: 账户的私密访问密钥 -- **Region**: `us-east-1` (固定值) -- **Service**: `execute-api` +- **Access Key ID** – account access key +- **Secret Access Key** – corresponding secret key +- **Region** – `us-east-1` +- **Service** – `execute-api` -## 📋 完整接口列表 +## API Catalog -### 服务管理接口 +### Service management -| 方法 | 接口路径 | 描述 | 状态 | -|------|----------|------|------| -| `POST` | `/kms/configure` | 配置 KMS 服务 | ✅ 可用 | -| `POST` | `/kms/start` | 启动 KMS 服务 | ✅ 可用 | -| `POST` | `/kms/stop` | 停止 KMS 服务 | ✅ 可用 | -| `GET` | `/kms/service-status` | 获取 KMS 服务状态 | ✅ 可用 | -| `POST` | `/kms/reconfigure` | 重新配置 KMS 服务 | ✅ 可用 | +| Method | Path | Description | Status | +|--------|------|-------------|--------| +| `POST` | `/kms/configure` | Configure the KMS service | ✅ Available | +| `POST` | `/kms/start` | Start the service | ✅ Available | +| `POST` | `/kms/stop` | Stop the service | ✅ Available | +| `GET` | `/kms/service-status` | Retrieve service status | ✅ Available | +| `POST` | `/kms/reconfigure` | Reconfigure and restart | ✅ Available | -### 密钥管理接口 +### Key management -| 方法 | 接口路径 | 描述 | 状态 | -|------|----------|------|------| -| `POST` | `/kms/keys` | 创建主密钥 | ✅ 可用 | -| `GET` | `/kms/keys` | 列出密钥 | ✅ 可用 | -| `GET` | `/kms/keys/{key_id}` | 获取密钥详情 | ✅ 可用 | -| `DELETE` | `/kms/keys/delete` | 计划删除密钥 | ✅ 可用 | -| `POST` | `/kms/keys/cancel-deletion` | 取消密钥删除 | ✅ 可用 | +| Method | Path | Description | Status | +|--------|------|-------------|--------| +| `POST` | `/kms/keys` | Create a master key | ✅ Available | +| `GET` | `/kms/keys` | List keys | ✅ Available | +| `GET` | `/kms/keys/{key_id}` | Get key metadata | ✅ Available | +| `DELETE` | `/kms/keys/delete` | Schedule key deletion | ✅ Available | +| `POST` | `/kms/keys/cancel-deletion` | Cancel key deletion | ✅ Available | -### 数据加密接口 +### Data encryption -| 方法 | 接口路径 | 描述 | 状态 | -|------|----------|------|------| -| `POST` | `/kms/generate-data-key` | 生成数据密钥 | ✅ 可用 | -| `POST` | `/kms/decrypt` | 解密数据密钥 | ⚠️ **未实现** | +| Method | Path | Description | Status | +|--------|------|-------------|--------| +| `POST` | `/kms/generate-data-key` | Generate a data key | ✅ Available | +| `POST` | `/kms/decrypt` | Decrypt a data key | ⚠️ Not implemented | -### Bucket加密配置接口 +### Bucket encryption configuration -| 方法 | 接口路径 | 描述 | 状态 | -|------|----------|------|------| -| `GET` | `/api/v1/buckets` | 列出所有buckets | ✅ 可用 | -| `GET` | `/api/v1/bucket-encryption/{bucket}` | 获取bucket加密配置 | ✅ 可用 | -| `PUT` | `/api/v1/bucket-encryption/{bucket}` | 设置bucket加密配置 | ✅ 可用 | -| `DELETE` | `/api/v1/bucket-encryption/{bucket}` | 删除bucket加密配置 | ✅ 可用 | +| Method | Path | Description | Status | +|--------|------|-------------|--------| +| `GET` | `/api/v1/buckets` | List buckets | ✅ Available | +| `GET` | `/api/v1/bucket-encryption/{bucket}` | Get default encryption | ✅ Available | +| `PUT` | `/api/v1/bucket-encryption/{bucket}` | Set default encryption | ✅ Available | +| `DELETE` | `/api/v1/bucket-encryption/{bucket}` | Remove default encryption | ✅ Available | -### 监控和缓存接口 +### Monitoring & cache -| 方法 | 接口路径 | 描述 | 状态 | -|------|----------|------|------| -| `GET` | `/kms/config` | 获取 KMS 配置 | ✅ 可用 | -| `POST` | `/kms/clear-cache` | 清除 KMS 缓存 | ✅ 可用 | +| Method | Path | Description | Status | +|--------|------|-------------|--------| +| `GET` | `/kms/config` | Retrieve KMS configuration | ✅ Available | +| `POST` | `/kms/clear-cache` | Clear the KMS cache | ✅ Available | -### 兼容性接口(旧版本) +### Legacy compatibility endpoints -| 方法 | 接口路径 | 描述 | 状态 | -|------|----------|------|------| -| `POST` | `/kms/create-key` | 创建密钥(旧版) | ✅ 可用 | -| `GET` | `/kms/describe-key` | 获取密钥详情(旧版) | ✅ 可用 | -| `GET` | `/kms/list-keys` | 列出密钥(旧版) | ✅ 可用 | -| `GET` | `/kms/status` | 获取 KMS 状态(旧版) | ✅ 可用 | +| Method | Path | Description | Status | +|--------|------|-------------|--------| +| `POST` | `/kms/create-key` | Create key (legacy) | ✅ Available | +| `GET` | `/kms/describe-key` | Describe key (legacy) | ✅ Available | +| `GET` | `/kms/list-keys` | List keys (legacy) | ✅ Available | +| `GET` | `/kms/status` | KMS status (legacy) | ✅ Available | -**重要说明**: -- ✅ **可用**:接口已实现且可正常使用 -- ⚠️ **未实现**:接口规范已定义但后端未实现,需要联系后端开发团队 -- 建议优先使用新版接口,旧版接口主要用于向后兼容 +> ✅ **Available** – implemented and usable. +> ⚠️ **Not implemented** – API shape defined but backend missing. +> Prefer the new endpoints; legacy routes exist for backwards compatibility. -## 🔧 服务管理API +## Service Management APIs -### 1. 配置 KMS 服务 +### 1. Configure KMS -**接口**: `POST /kms/configure` +`POST /kms/configure` -**请求参数**: +Parameters: -| 参数名 | 类型 | 必需 | 说明 | -|--------|------|------|------| -| `backend_type` | string | ✅ | 后端类型:`"local"` 或 `"vault"` | -| `key_directory` | string | 条件 | Local后端:密钥存储目录路径 | -| `default_key_id` | string | ✅ | 默认主密钥ID | -| `enable_cache` | boolean | ❌ | 是否启用缓存,默认 `true` | -| `cache_ttl_seconds` | integer | ❌ | 缓存TTL秒数,默认 `600` | -| `timeout_seconds` | integer | ❌ | 操作超时秒数,默认 `30` | -| `retry_attempts` | integer | ❌ | 重试次数,默认 `3` | -| `address` | string | 条件 | Vault后端:Vault服务器地址 | -| `auth_method` | object | 条件 | Vault后端:认证方法配置 | -| `mount_path` | string | 条件 | Vault后端:Transit挂载路径 | -| `kv_mount` | string | 条件 | Vault后端:KV存储挂载路径 | -| `key_path_prefix` | string | 条件 | Vault后端:密钥路径前缀 | +| Name | Type | Required | Description | +|------|------|----------|-------------| +| `backend_type` | string | ✅ | `"local"` or `"vault"` | +| `key_directory` | string | Cond. | Local backend key directory | +| `default_key_id` | string | ✅ | Default master key ID | +| `enable_cache` | boolean | ❌ | Toggle cache (default `true`) | +| `cache_ttl_seconds` | integer | ❌ | Cache TTL (default `600`) | +| `timeout_seconds` | integer | ❌ | Operation timeout (default `30`) | +| `retry_attempts` | integer | ❌ | Retry attempts (default `3`) | +| `address` | string | Cond. | Vault server address | +| `auth_method` | object | Cond. | Vault auth config | +| `mount_path` | string | Cond. | Vault transit mount path | +| `kv_mount` | string | Cond. | Vault KV mount | +| `key_path_prefix` | string | Cond. | Vault key prefix | -**Vault auth_method 对象**: +Vault `auth_method` fields: -| 参数名 | 类型 | 必需 | 说明 | -|--------|------|------|------| -| `token` | string | ✅ | Vault访问令牌 | - -**响应格式**: +| Name | Type | Required | Description | +|------|------|----------|-------------| +| `token` | string | ✅ | Vault token | +Response ```json { "success": boolean, @@ -152,111 +148,27 @@ } ``` -**响应字段说明**: +### 2. Start KMS -| 字段名 | 类型 | 说明 | -|--------|------|------| -| `success` | boolean | 配置是否成功 | -| `message` | string | 配置结果描述信息 | -| `config_id` | string | 配置ID(如果成功) | +`POST /kms/start` -**调用示例**: +Response fields: `success`, `message`, `status` (`Running`, `Stopped`, `Error`). -```javascript -// 配置本地 KMS 后端 -const localConfig = { - backend_type: "local", - key_directory: "/var/lib/rustfs/kms/keys", - default_key_id: "default-master-key", - enable_cache: true, - cache_ttl_seconds: 600 -}; +### 3. Stop KMS -const response = await callKMSAPI('POST', '/kms/configure', localConfig); -// 响应: { "success": true, "message": "KMS configured successfully", "config_id": "config-123" } +`POST /kms/stop` -// 配置 Vault KMS 后端 -const vaultConfig = { - backend_type: "vault", - address: "https://vault.example.com:8200", - auth_method: { - token: "s.your-vault-token" - }, - mount_path: "transit", - kv_mount: "secret", - key_path_prefix: "rustfs/kms/keys", - default_key_id: "rustfs-master" -}; +Same response structure as `/kms/start`. -const vaultResponse = await callKMSAPI('POST', '/kms/configure', vaultConfig); -``` +### 4. Service status -### 2. 启动 KMS 服务 - -**接口**: `POST /kms/start` - -**请求参数**: 无 - -**响应格式**: +`GET /kms/service-status` +Response ```json { - "success": boolean, - "message": string, - "status": string -} -``` - -**响应字段说明**: - -| 字段名 | 类型 | 可能值 | 说明 | -|--------|------|--------|------| -| `success` | boolean | `true`, `false` | 启动是否成功 | -| `message` | string | - | 启动结果描述信息 | -| `status` | string | `"Running"`, `"Stopped"`, `"Error"` | 服务当前状态 | - -### 3. 停止 KMS 服务 - -**接口**: `POST /kms/stop` - -**请求参数**: 无 - -**响应格式**: - -```json -{ - "success": boolean, - "message": string, - "status": string -} -``` - -**响应字段说明**: 同启动接口 - -**调用示例**: - -```javascript -// 启动 KMS 服务 -const startResponse = await callKMSAPI('POST', '/kms/start'); -// 响应: { "success": true, "message": "KMS service started successfully", "status": "Running" } - -// 停止 KMS 服务 -const stopResponse = await callKMSAPI('POST', '/kms/stop'); -// 响应: { "success": true, "message": "KMS service stopped successfully", "status": "Stopped" } -``` - -### 4. 获取 KMS 服务状态 - -**接口**: `GET /kms/service-status` - -**请求参数**: 无 - -**响应格式**: - -```json -{ - "status": string, - "backend_type": string, + "status": "Running" | "Stopped" | "NotConfigured" | "Error", + "backend_type": "local" | "vault", "healthy": boolean, "config_summary": { "backend_type": string, @@ -268,1010 +180,115 @@ const stopResponse = await callKMSAPI('POST', '/kms/stop'); } ``` -**响应字段说明**: - -| 字段名 | 类型 | 可能值 | 说明 | -|--------|------|--------|------| -| `status` | string | `"Running"`, `"Stopped"`, `"NotConfigured"`, `"Error"` | 服务状态 | -| `backend_type` | string | `"local"`, `"vault"` | 后端类型 | -| `healthy` | boolean | `true`, `false` | 服务健康状态 | -| `config_summary` | object | - | 配置摘要信息 | - -**调用示例**: - -```javascript -// 获取 KMS 服务状态 -const status = await callKMSAPI('GET', '/kms/service-status'); -console.log('KMS状态:', status); - -/* 响应示例: -{ - "status": "Running", - "backend_type": "vault", - "healthy": true, - "config_summary": { - "backend_type": "vault", - "default_key_id": "rustfs-master", - "timeout_seconds": 30, - "retry_attempts": 3, - "enable_cache": true - } -} -*/ -``` - -### 5. 重新配置 KMS 服务 - -**接口**: `POST /kms/reconfigure` - -**请求参数**: 同配置接口的参数 - -**响应格式**: - -```json -{ - "success": boolean, - "message": string, - "status": string -} -``` - -**调用示例**: - -```javascript -// 重新配置 KMS 服务(会停止当前服务并重新启动) -const newConfig = { - backend_type: "vault", - address: "https://new-vault.example.com:8200", - auth_method: { - token: "s.new-vault-token" - }, - mount_path: "transit", - kv_mount: "secret", - key_path_prefix: "rustfs/kms/keys", - default_key_id: "new-master-key" -}; - -const reconfigureResponse = await callKMSAPI('POST', '/kms/reconfigure', newConfig); -// 响应: { "success": true, "message": "KMS reconfigured and restarted successfully", "status": "Running" } -``` - -## 🔑 密钥管理API - -### 1. 创建主密钥 - -**接口**: `POST /kms/keys` - -**请求参数**: - -| 参数名 | 类型 | 必需 | 说明 | -|--------|------|------|------| -| `KeyUsage` | string | ✅ | 密钥用途,固定值:`"ENCRYPT_DECRYPT"` | -| `Description` | string | ❌ | 密钥描述,最长256字符 | -| `Tags` | object | ❌ | 密钥标签,键值对格式 | - -**Tags 对象**: 任意键值对,值必须为字符串类型 - -**响应格式**: - -```json -{ - "key_id": string, - "key_metadata": { - "key_id": string, - "description": string, - "enabled": boolean, - "key_usage": string, - "creation_date": string, - "rotation_enabled": boolean, - "deletion_date": string? - } -} -``` - -**响应字段说明**: - -| 字段名 | 类型 | 说明 | -|--------|------|------| -| `key_id` | string | 生成的密钥唯一标识符(UUID格式) | -| `key_metadata.key_id` | string | 密钥ID(与外层相同) | -| `key_metadata.description` | string | 密钥描述 | -| `key_metadata.enabled` | boolean | 密钥是否启用 | -| `key_metadata.key_usage` | string | 密钥用途 | -| `key_metadata.creation_date` | string | 创建时间(ISO8601格式) | -| `key_metadata.rotation_enabled` | boolean | 是否启用轮换 | -| `key_metadata.deletion_date` | string | 删除时间(如果已计划删除) | - -**调用示例**: - -```javascript -// 创建主密钥 -const keyRequest = { - KeyUsage: "ENCRYPT_DECRYPT", - Description: "前端应用主密钥", - Tags: { - owner: "frontend-team", - environment: "production", - project: "user-data-encryption" - } -}; - -const newKey = await callKMSAPI('POST', '/kms/keys', keyRequest); -console.log('创建的密钥ID:', newKey.key_id); - -/* 响应示例: -{ - "key_id": "fa5bac0e-2a2c-4f9a-a09d-2f5b8a59ed85", - "key_metadata": { - "key_id": "fa5bac0e-2a2c-4f9a-a09d-2f5b8a59ed85", - "description": "前端应用主密钥", - "enabled": true, - "key_usage": "ENCRYPT_DECRYPT", - "creation_date": "2024-09-19T07:10:42.012345Z", - "rotation_enabled": false - } -} -*/ -``` - -### 2. 获取密钥详情 - -**接口**: `GET /kms/keys/{key_id}` - -**路径参数**: - -| 参数名 | 类型 | 必需 | 说明 | -|--------|------|------|------| -| `key_id` | string | ✅ | 密钥ID(UUID格式) | - -**响应格式**: - -```json -{ - "key_metadata": { - "key_id": string, - "description": string, - "enabled": boolean, - "key_usage": string, - "creation_date": string, - "rotation_enabled": boolean, - "deletion_date": string? - } -} -``` - -**响应字段说明**: 同创建接口的 key_metadata 字段 - -**调用示例**: - -```javascript -// 获取密钥详情 -const keyId = "fa5bac0e-2a2c-4f9a-a09d-2f5b8a59ed85"; -const keyDetails = await callKMSAPI('GET', `/kms/keys/${keyId}`); -console.log('密钥详情:', keyDetails.key_metadata); - -/* 响应示例: -{ - "key_metadata": { - "key_id": "fa5bac0e-2a2c-4f9a-a09d-2f5b8a59ed85", - "description": "前端应用主密钥", - "enabled": true, - "key_usage": "ENCRYPT_DECRYPT", - "creation_date": "2024-09-19T07:10:42.012345Z", - "rotation_enabled": false, - "deletion_date": null - } -} -*/ -``` - -### 3. 列出密钥 - -**接口**: `GET /kms/keys` - -**查询参数**: - -| 参数名 | 类型 | 必需 | 默认值 | 说明 | -|--------|------|------|--------|------| -| `limit` | integer | ❌ | `50` | 每页返回的密钥数量,最大1000 | -| `marker` | string | ❌ | - | 分页标记,用于获取下一页 | - -**响应格式**: - -```json -{ - "keys": [ - { - "key_id": string, - "description": string - } - ], - "truncated": boolean, - "next_marker": string? -} -``` - -**响应字段说明**: - -| 字段名 | 类型 | 说明 | -|--------|------|------| -| `keys` | array | 密钥列表 | -| `keys[].key_id` | string | 密钥ID | -| `keys[].description` | string | 密钥描述 | -| `truncated` | boolean | 是否还有更多数据 | -| `next_marker` | string | 下一页的分页标记 | - -**调用示例**: - -```javascript -// 列出所有密钥(分页) -let allKeys = []; -let marker = null; - -do { - const params = new URLSearchParams({ limit: '50' }); - if (marker) params.append('marker', marker); - - const keysList = await callKMSAPI('GET', `/kms/keys?${params}`); - allKeys.push(...keysList.keys); - marker = keysList.next_marker; -} while (marker); - -console.log('所有密钥:', allKeys); - -/* 响应示例: -{ - "keys": [ - { "key_id": "fa5bac0e-2a2c-4f9a-a09d-2f5b8a59ed85", "description": "前端应用主密钥" }, - { "key_id": "bb2cd4f1-3e4d-4a5b-b6c7-8d9e0f1a2b3c", "description": "用户数据密钥" } - ], - "truncated": false, - "next_marker": null -} -*/ -``` - -### 4. 计划删除密钥 - -**接口**: `DELETE /kms/keys/delete` - -**请求参数**: - -| 参数名 | 类型 | 必需 | 说明 | -|--------|------|------|------| -| `key_id` | string | ✅ | 要删除的密钥ID | -| `pending_window_in_days` | integer | ❌ | 待删除天数,范围 7-30,默认 7 | - -**响应格式**: - -```json -{ - "key_id": string, - "deletion_date": string, - "pending_window_in_days": integer -} -``` - -**响应字段说明**: - -| 字段名 | 类型 | 说明 | -|--------|------|------| -| `key_id` | string | 密钥ID | -| `deletion_date` | string | 计划删除时间(ISO8601格式) | -| `pending_window_in_days` | integer | 待删除天数 | - -**调用示例**: - -```javascript -// 计划删除密钥(7天后删除) -const deleteRequest = { - key_id: "fa5bac0e-2a2c-4f9a-a09d-2f5b8a59ed85", - pending_window_in_days: 7 -}; - -const deleteResponse = await callKMSAPI('DELETE', '/kms/keys/delete', deleteRequest); -console.log('密钥已计划删除:', deleteResponse); - -/* 响应示例: -{ - "key_id": "fa5bac0e-2a2c-4f9a-a09d-2f5b8a59ed85", - "deletion_date": "2024-09-26T07:10:42.012345Z", - "pending_window_in_days": 7 -} -*/ -``` - -### 5. 取消密钥删除 - -**接口**: `POST /kms/keys/cancel-deletion` - -**请求参数**: - -| 参数名 | 类型 | 必需 | 说明 | -|--------|------|------|------| -| `key_id` | string | ✅ | 要取消删除的密钥ID | - -**响应格式**: - -```json -{ - "key_id": string, - "key_metadata": { - "key_id": string, - "description": string, - "enabled": boolean, - "key_usage": string, - "creation_date": string, - "rotation_enabled": boolean, - "deletion_date": null - } -} -``` - -**响应字段说明**: 同创建接口,注意 `deletion_date` 将为 `null` - -**调用示例**: - -```javascript -// 取消密钥删除 -const cancelRequest = { - key_id: "fa5bac0e-2a2c-4f9a-a09d-2f5b8a59ed85" -}; - -const cancelResponse = await callKMSAPI('POST', '/kms/keys/cancel-deletion', cancelRequest); -console.log('密钥删除已取消:', cancelResponse); - -/* 响应示例: -{ - "key_id": "fa5bac0e-2a2c-4f9a-a09d-2f5b8a59ed85", - "key_metadata": { - "key_id": "fa5bac0e-2a2c-4f9a-a09d-2f5b8a59ed85", - "description": "前端应用主密钥", - "enabled": true, - "key_usage": "ENCRYPT_DECRYPT", - "creation_date": "2024-09-19T07:10:42.012345Z", - "rotation_enabled": false, - "deletion_date": null - } -} -*/ -``` - -## 🔒 数据加密API - -### 1. 生成数据密钥 - -**接口**: `POST /kms/generate-data-key` - -**请求参数**: - -| 参数名 | 类型 | 必需 | 说明 | -|--------|------|------|------| -| `key_id` | string | ✅ | 主密钥ID(UUID格式) | -| `key_spec` | string | ❌ | 数据密钥规格,默认 `"AES_256"` | -| `encryption_context` | object | ❌ | 加密上下文,键值对格式 | - -**key_spec 可能值**: -- `"AES_256"` - 256位AES密钥 -- `"AES_128"` - 128位AES密钥 - -**encryption_context 对象**: 任意键值对,用于加密上下文,键和值都必须是字符串 - -**响应格式**: - -```json -{ - "key_id": string, - "plaintext_key": string, - "ciphertext_blob": string -} -``` - -**响应字段说明**: - -| 字段名 | 类型 | 说明 | -|--------|------|------| -| `key_id` | string | 主密钥ID | -| `plaintext_key` | string | 原始数据密钥(Base64编码) | -| `ciphertext_blob` | string | 加密后的数据密钥(Base64编码) | - -**调用示例**: - -```javascript -// 生成数据密钥用于文件加密 -const dataKeyRequest = { - key_id: "fa5bac0e-2a2c-4f9a-a09d-2f5b8a59ed85", - key_spec: "AES_256", - encryption_context: { - bucket: "user-uploads", - object_key: "documents/report.pdf", - user_id: "user123", - department: "finance" - } -}; +### 5. Reconfigure -const dataKey = await callKMSAPI('POST', '/kms/generate-data-key', dataKeyRequest); -console.log('生成的数据密钥:', dataKey); +`POST /kms/reconfigure` -// 立即使用原始密钥进行数据加密 -const encryptedData = await encryptFileWithKey(fileData, dataKey.plaintext_key); +Accepts the same payload as `/kms/configure` and restarts the service. -// 安全地清理内存中的原始密钥 -dataKey.plaintext_key = null; +## Key Management APIs -// 保存加密后的密钥用于后续解密 -localStorage.setItem('encrypted_key', dataKey.ciphertext_blob); +### 1. Create key -/* 响应示例: -{ - "key_id": "fa5bac0e-2a2c-4f9a-a09d-2f5b8a59ed85", - "plaintext_key": "sQW6qt0yS7CqD6c8hY7GZg==", - "ciphertext_blob": "gAAAAABlLK4xQ8..." -} -*/ -``` +`POST /kms/keys` -### 2. 解密数据密钥 +Parameters: -⚠️ **注意:此接口当前未实现** +| Name | Type | Required | Description | +|------|------|----------|-------------| +| `KeyUsage` | string | ✅ | `"ENCRYPT_DECRYPT"` | +| `Description` | string | ❌ | Description (≤256 chars) | +| `Tags` | object | ❌ | Key/value tag map | -根据代码分析,虽然底层 KMS 服务具有解密功能,但尚未暴露对应的 HTTP API 接口。这是一个重要的功能缺失。 +Response includes `key_id` and `key_metadata` (enabled, usage, creation date, etc.). -**预期接口**: `POST /kms/decrypt` - -**预期请求参数**: - -| 参数名 | 类型 | 必需 | 说明 | -|--------|------|------|------| -| `ciphertext_blob` | string | ✅ | 加密的数据密钥(Base64编码) | -| `encryption_context` | object | ❌ | 解密上下文(必须与加密时相同) | +### 2. Key metadata -**预期响应格式**: +`GET /kms/keys/{key_id}` returns the `key_metadata` object. -```json -{ - "key_id": string, - "plaintext": string -} -``` +### 3. List keys -**预期响应字段说明**: +`GET /kms/keys?limit=&marker=` with pagination support. -| 字段名 | 类型 | 说明 | -|--------|------|------| -| `key_id` | string | 用于加密的主密钥ID | -| `plaintext` | string | 解密后的原始数据密钥(Base64编码) | +### 4. Schedule deletion -**临时解决方案**: +`DELETE /kms/keys/delete` -目前前端需要通过其他方式处理数据密钥解密: - -```javascript -// 临时解决方案:建议联系后端开发团队添加此接口 -console.error('解密数据密钥接口暂未实现,请联系后端开发团队'); +Parameters: `key_id`, optional `pending_window_in_days` (7–30, default 7). -// 或者考虑使用以下替代方案: -// 1. 在服务端完成数据加密/解密,前端只处理已解密的数据 -// 2. 等待后端团队实现 /kms/decrypt 接口 - -/* 未来的调用示例: -const encryptedKey = localStorage.getItem('encrypted_key'); - -const decryptRequest = { - ciphertext_blob: encryptedKey, - encryption_context: { - bucket: "user-uploads", - object_key: "documents/report.pdf", - user_id: "user123", - department: "finance" - } -}; - -const decryptedKey = await callKMSAPI('POST', '/kms/decrypt', decryptRequest); -console.log('解密成功,主密钥ID:', decryptedKey.key_id); - -// 使用解密的密钥解密文件数据 -const decryptedData = await decryptFileWithKey(encryptedFileData, decryptedKey.plaintext); - -// 立即清理内存中的原始密钥 -decryptedKey.plaintext = null; -*/ -``` - -**建议**: - -1. **联系后端团队**:建议尽快实现 `POST /kms/decrypt` 接口 -2. **API 设计参考**:可参考 AWS KMS 的 Decrypt API 设计 -3. **安全考虑**:确保接口包含适当的认证和授权检查 - -## 🪣 Bucket加密配置API - -### 概述 +### 5. Cancel deletion -Bucket加密配置API提供了对存储桶级别默认加密设置的管理功能。这些API基于AWS S3兼容的bucket加密接口,支持SSE-S3和SSE-KMS两种加密方式。 +`POST /kms/keys/cancel-deletion` -**重要说明**:这些接口使用AWS S3 SDK的标准接口,不是RustFS的自定义KMS接口。 - -### 1. 列出所有buckets - -**接口**: AWS S3 `ListBuckets` 操作 - -**AWS SDK调用方式**: -```javascript -import { ListBucketsCommand } from '@aws-sdk/client-s3'; - -const listBuckets = async (s3Client) => { - const command = new ListBucketsCommand({}); - return await s3Client.send(command); -}; -``` - -**响应格式**: -```json -{ - "Buckets": [ - { - "Name": "my-bucket", - "CreationDate": "2024-09-19T10:30:00.000Z" - } - ], - "Owner": { - "DisplayName": "owner-name", - "ID": "owner-id" - } -} -``` - -**响应字段说明**: - -| 字段名 | 类型 | 说明 | -|--------|------|------| -| `Buckets` | array | Bucket列表 | -| `Buckets[].Name` | string | Bucket名称 | -| `Buckets[].CreationDate` | string | 创建时间(ISO8601格式) | -| `Owner` | object | 所有者信息 | - -### 2. 获取bucket加密配置 - -**接口**: AWS S3 `GetBucketEncryption` 操作 - -**AWS SDK调用方式**: -```javascript -import { GetBucketEncryptionCommand } from '@aws-sdk/client-s3'; - -const getBucketEncryption = async (s3Client, bucketName) => { - const command = new GetBucketEncryptionCommand({ - Bucket: bucketName - }); - return await s3Client.send(command); -}; -``` - -**响应格式**: -```json -{ - "ServerSideEncryptionConfiguration": { - "Rules": [ - { - "ApplyServerSideEncryptionByDefault": { - "SSEAlgorithm": "aws:kms", - "KMSMasterKeyID": "key-id-here" - } - } - ] - } -} -``` - -**响应字段说明**: - -| 字段名 | 类型 | 可能值 | 说明 | -|--------|------|--------|------| -| `ServerSideEncryptionConfiguration` | object | - | 服务端加密配置 | -| `Rules` | array | - | 加密规则列表 | -| `Rules[].ApplyServerSideEncryptionByDefault` | object | - | 默认加密设置 | -| `SSEAlgorithm` | string | `"aws:kms"`, `"AES256"` | 加密算法 | -| `KMSMasterKeyID` | string | - | KMS主密钥ID(仅SSE-KMS时存在) | - -**错误处理**: -- **404错误**: 表示bucket未配置加密,应视为"未配置"状态 -- **403错误**: 权限不足,无法访问bucket加密配置 - -### 3. 设置bucket加密配置 - -**接口**: AWS S3 `PutBucketEncryption` 操作 - -**AWS SDK调用方式**: - -#### SSE-S3加密: -```javascript -import { PutBucketEncryptionCommand } from '@aws-sdk/client-s3'; - -const putBucketEncryptionSSE_S3 = async (s3Client, bucketName) => { - const command = new PutBucketEncryptionCommand({ - Bucket: bucketName, - ServerSideEncryptionConfiguration: { - Rules: [ - { - ApplyServerSideEncryptionByDefault: { - SSEAlgorithm: 'AES256' - } - } - ] - } - }); - return await s3Client.send(command); -}; -``` - -#### SSE-KMS加密: -```javascript -const putBucketEncryptionSSE_KMS = async (s3Client, bucketName, kmsKeyId) => { - const command = new PutBucketEncryptionCommand({ - Bucket: bucketName, - ServerSideEncryptionConfiguration: { - Rules: [ - { - ApplyServerSideEncryptionByDefault: { - SSEAlgorithm: 'aws:kms', - KMSMasterKeyID: kmsKeyId - } - } - ] - } - }); - return await s3Client.send(command); -}; -``` - -**请求参数**: - -| 参数名 | 类型 | 必需 | 说明 | -|--------|------|------|------| -| `Bucket` | string | ✅ | Bucket名称 | -| `ServerSideEncryptionConfiguration` | object | ✅ | 加密配置对象 | -| `Rules` | array | ✅ | 加密规则数组 | -| `SSEAlgorithm` | string | ✅ | `"AES256"` 或 `"aws:kms"` | -| `KMSMasterKeyID` | string | 条件 | KMS密钥ID(SSE-KMS时必需) | - -**响应**: 成功时返回HTTP 200,无响应体 - -### 4. 删除bucket加密配置 - -**接口**: AWS S3 `DeleteBucketEncryption` 操作 - -**AWS SDK调用方式**: -```javascript -import { DeleteBucketEncryptionCommand } from '@aws-sdk/client-s3'; - -const deleteBucketEncryption = async (s3Client, bucketName) => { - const command = new DeleteBucketEncryptionCommand({ - Bucket: bucketName - }); - return await s3Client.send(command); -}; -``` - -**请求参数**: - -| 参数名 | 类型 | 必需 | 说明 | -|--------|------|------|------| -| `Bucket` | string | ✅ | Bucket名称 | - -**响应**: 成功时返回HTTP 204,无响应体 - -### 前端集成示例 - -#### Vue.js Composable示例 -```javascript -import { ref } from 'vue'; - -export function useBucketEncryption() { - const { listBuckets, getBucketEncryption, putBucketEncryption, deleteBucketEncryption } = useBucket({}); - - const buckets = ref([]); - const loading = ref(false); - const error = ref(null); - - // 加载bucket列表和加密状态 - const loadBucketList = async () => { - loading.value = true; - error.value = null; - - try { - const response = await listBuckets(); - if (response?.Buckets) { - // 并行获取加密配置 - const bucketList = await Promise.all( - response.Buckets.map(async (bucket) => { - try { - const encryptionConfig = await getBucketEncryption(bucket.Name); - - let encryptionStatus = 'Disabled'; - let encryptionType = ''; - let kmsKeyId = ''; - - if (encryptionConfig?.ServerSideEncryptionConfiguration?.Rules?.length > 0) { - const rule = encryptionConfig.ServerSideEncryptionConfiguration.Rules[0]; - if (rule.ApplyServerSideEncryptionByDefault) { - encryptionStatus = 'Enabled'; - const algorithm = rule.ApplyServerSideEncryptionByDefault.SSEAlgorithm; - - if (algorithm === 'aws:kms') { - encryptionType = 'SSE-KMS'; - kmsKeyId = rule.ApplyServerSideEncryptionByDefault.KMSMasterKeyID || ''; - } else if (algorithm === 'AES256') { - encryptionType = 'SSE-S3'; - } - } - } - - return { - name: bucket.Name, - creationDate: bucket.CreationDate, - encryptionStatus, - encryptionType, - kmsKeyId - }; - } catch (encryptionError) { - // 404表示未配置加密 - return { - name: bucket.Name, - creationDate: bucket.CreationDate, - encryptionStatus: 'Disabled', - encryptionType: '', - kmsKeyId: '' - }; - } - }) - ); - - buckets.value = bucketList; - } - } catch (err) { - error.value = err.message; - throw err; - } finally { - loading.value = false; - } - }; - - // 配置bucket加密 - const configureBucketEncryption = async (bucketName, encryptionType, kmsKeyId = '') => { - const encryptionConfig = { - Rules: [ - { - ApplyServerSideEncryptionByDefault: { - SSEAlgorithm: encryptionType === 'SSE-KMS' ? 'aws:kms' : 'AES256', - ...(encryptionType === 'SSE-KMS' && kmsKeyId && { KMSMasterKeyID: kmsKeyId }) - } - } - ] - }; - - await putBucketEncryption(bucketName, encryptionConfig); - await loadBucketList(); // 刷新列表 - }; - - // 移除bucket加密 - const removeBucketEncryption = async (bucketName) => { - await deleteBucketEncryption(bucketName); - await loadBucketList(); // 刷新列表 - }; - - return { - buckets, - loading, - error, - loadBucketList, - configureBucketEncryption, - removeBucketEncryption - }; -} -``` - -### 与KMS密钥管理的集成 - -结合KMS密钥管理API,可以实现完整的加密密钥生命周期管理: - -```javascript -// 完整的加密管理示例 -export function useEncryptionManagement() { - const { loadBucketList, configureBucketEncryption } = useBucketEncryption(); - const { getKeyList, createKey } = useSSE(); - - // 为bucket设置新的加密配置 - const setupBucketEncryption = async (bucketName, encryptionType, keyName) => { - let kmsKeyId = null; - - if (encryptionType === 'SSE-KMS') { - // 1. 获取现有KMS密钥列表 - const keysList = await getKeyList(); - let targetKey = keysList.keys.find(key => - key.tags?.name === keyName || key.description === keyName - ); - - // 2. 如果密钥不存在,创建新密钥 - if (!targetKey) { - const newKeyResponse = await createKey({ - KeyUsage: 'ENCRYPT_DECRYPT', - Description: `Bucket encryption key for ${bucketName}`, - Tags: { - name: keyName, - bucket: bucketName, - purpose: 'bucket-encryption' - } - }); - kmsKeyId = newKeyResponse.key_id; - } else { - kmsKeyId = targetKey.key_id; - } - } - - // 3. 配置bucket加密 - await configureBucketEncryption(bucketName, encryptionType, kmsKeyId); - - return { success: true, kmsKeyId }; - }; - - return { setupBucketEncryption }; -} -``` - -### 安全最佳实践 - -1. **权限控制**: 确保只有授权用户可以修改bucket加密配置 -2. **加密算法选择**: - - **SSE-S3**: 由S3服务管理密钥,适合一般用途 - - **SSE-KMS**: 使用KMS管理密钥,提供更细粒度的访问控制 -3. **密钥管理**: 使用SSE-KMS时,确保KMS密钥具有适当的访问策略 -4. **审计日志**: 记录所有加密配置变更操作 - -### 错误处理指南 - -| 错误类型 | HTTP状态 | 处理建议 | -|----------|----------|----------| -| `NoSuchBucket` | 404 | Bucket不存在,检查bucket名称 | -| `NoSuchBucketPolicy` | 404 | 未配置加密,视为正常状态 | -| `AccessDenied` | 403 | 权限不足,检查IAM策略 | -| `InvalidRequest` | 400 | 请求参数错误,检查加密配置格式 | -| `KMSKeyNotFound` | 400 | KMS密钥不存在,验证密钥ID | - -## 📊 监控和缓存API - -### 1. 获取 KMS 配置 - -**接口**: `GET /kms/config` - -**请求参数**: 无 - -**响应格式**: - -```json -{ - "backend": string, - "cache_enabled": boolean, - "cache_max_keys": integer, - "cache_ttl_seconds": integer, - "default_key_id": string? -} -``` - -**响应字段说明**: - -| 字段名 | 类型 | 说明 | -|--------|------|------| -| `backend` | string | 后端类型 | -| `cache_enabled` | boolean | 是否启用缓存 | -| `cache_max_keys` | integer | 缓存最大密钥数量 | -| `cache_ttl_seconds` | integer | 缓存TTL(秒) | -| `default_key_id` | string | 默认密钥ID | - -**调用示例**: - -```javascript -// 获取 KMS 配置 -const config = await callKMSAPI('GET', '/kms/config'); -console.log('KMS配置:', config); - -/* 响应示例: -{ - "backend": "vault", - "cache_enabled": true, - "cache_max_keys": 1000, - "cache_ttl_seconds": 300, - "default_key_id": "rustfs-master" -} -*/ -``` - -### 2. 清除 KMS 缓存 - -**接口**: `POST /kms/clear-cache` - -**请求参数**: 无 - -**响应格式**: - -```json -{ - "status": string, - "message": string -} -``` - -**调用示例**: - -```javascript -// 清除 KMS 缓存 -const clearResult = await callKMSAPI('POST', '/kms/clear-cache'); -console.log('缓存清除结果:', clearResult); - -/* 响应示例: -{ - "status": "success", - "message": "cache cleared successfully" -} -*/ -``` - -### 3. 获取缓存统计信息 - -**接口**: `GET /kms/status` (旧版接口,包含缓存统计) - -**请求参数**: 无 - -**响应格式**: - -```json -{ - "backend_type": string, - "backend_status": string, - "cache_enabled": boolean, - "cache_stats": { - "hit_count": integer, - "miss_count": integer - }?, - "default_key_id": string? -} -``` - -**调用示例**: - -```javascript -// 获取详细的 KMS 状态(包含缓存统计) -const detailedStatus = await callKMSAPI('GET', '/kms/status'); -console.log('详细状态:', detailedStatus); - -/* 响应示例: -{ - "backend_type": "vault", - "backend_status": "healthy", - "cache_enabled": true, - "cache_stats": { - "hit_count": 1250, - "miss_count": 48 - }, - "default_key_id": "rustfs-master" -} -*/ -``` - -## ❌ 通用错误码 - -### HTTP 状态码 - -| 状态码 | 错误类型 | 说明 | -|--------|----------|------| -| `200` | - | 请求成功 | -| `400` | `InvalidRequest` | 请求格式错误或参数无效 | -| `401` | `AccessDenied` | 认证失败 | -| `403` | `AccessDenied` | 权限不足 | -| `404` | `NotFound` | 资源不存在 | -| `409` | `Conflict` | 资源状态冲突 | -| `500` | `InternalError` | 服务器内部错误 | - -### 错误响应格式 +Provide `key_id`; response returns updated metadata with `deletion_date = null`. + +## Data Encryption APIs + +### 1. Generate data key + +`POST /kms/generate-data-key` + +Parameters: `key_id`, optional `key_spec` (`AES_256` or `AES_128`), optional `encryption_context` map. + +Response contains `plaintext_key` (Base64) and `ciphertext_blob` (Base64). + +### 2. Decrypt data key + +`POST /kms/decrypt` + +> ⚠️ Not yet implemented. Expect parameters `ciphertext_blob` and optional `encryption_context`. A future response will expose `key_id` and `plaintext`. + +## Bucket Encryption Configuration APIs + +RustFS exposes S3-compatible endpoints via the AWS SDK. + +### 1. List buckets + +Use `ListBuckets` from the AWS SDK. + +### 2. Get default encryption + +`GetBucketEncryption` returns SSE rules (`SSEAlgorithm`, optional `KMSMasterKeyID`). A 404 indicates no configuration. + +### 3. Set default encryption + +`PutBucketEncryption` supports SSE-S3 (`AES256`) or SSE-KMS (`aws:kms` + key ID). + +### 4. Delete default encryption + +`DeleteBucketEncryption` removes the configuration. + +Example composable and helper utilities are provided in the original Chinese document; port them as needed. + +## Monitoring & Cache APIs + +### 1. Get KMS config + +`GET /kms/config` returns backend, cache settings, and default key ID. + +### 2. Clear cache + +`POST /kms/clear-cache` invalidates cached key metadata. + +### 3. Legacy status + +`GET /kms/status` (legacy) provides cache hit/miss stats. + +## Common Error Codes + +### HTTP status codes + +| Code | Error | Description | +|------|-------|-------------| +| 200 | – | Success | +| 400 | `InvalidRequest` | Bad request or parameters | +| 401 | `AccessDenied` | Authentication failure | +| 403 | `AccessDenied` | Authorization failure | +| 404 | `NotFound` | Resource not found | +| 409 | `Conflict` | Resource conflict | +| 500 | `InternalError` | Server error | + +### Error payload ```json { @@ -1283,1100 +300,73 @@ console.log('详细状态:', detailedStatus); } ``` -### 具体错误码 +### Specific codes -| 错误码 | HTTP状态 | 说明 | 处理建议 | -|--------|----------|------|----------| -| `InvalidRequest` | 400 | 请求参数错误 | 检查请求格式和参数 | -| `AccessDenied` | 401/403 | 认证或授权失败 | 检查访问凭证和权限 | -| `KeyNotFound` | 404 | 密钥不存在 | 验证密钥ID是否正确 | -| `InvalidKeyState` | 400 | 密钥状态无效 | 检查密钥是否已启用 | -| `ServiceNotConfigured` | 409 | KMS服务未配置 | 先配置KMS服务 | -| `ServiceNotRunning` | 409 | KMS服务未运行 | 启动KMS服务 | -| `BackendError` | 500 | 后端存储错误 | 检查后端服务状态 | -| `EncryptionFailed` | 500 | 加密操作失败 | 重试操作或检查密钥状态 | -| `DecryptionFailed` | 500 | 解密操作失败 | 检查密文和加密上下文 | +- `InvalidRequest` – check payload +- `AccessDenied` – verify credentials/permissions +- `KeyNotFound` – key ID incorrect +- `InvalidKeyState` – key disabled or invalid +- `ServiceNotConfigured` – configure KMS first +- `ServiceNotRunning` – start the service +- `BackendError` – backend failure +- `EncryptionFailed` / `DecryptionFailed` – inspect ciphertext/context -## 📊 数据类型定义 +## Data Types -### KeyMetadata 对象 +### `KeyMetadata` -| 字段名 | 类型 | 必需 | 说明 | -|--------|------|------|------| -| `key_id` | string | ✅ | 密钥唯一标识符(UUID格式) | -| `description` | string | ✅ | 密钥描述 | -| `enabled` | boolean | ✅ | 密钥是否启用 | -| `key_usage` | string | ✅ | 密钥用途,值为 `"ENCRYPT_DECRYPT"` | -| `creation_date` | string | ✅ | 创建时间(ISO8601格式) | -| `rotation_enabled` | boolean | ✅ | 是否启用自动轮换 | -| `deletion_date` | string | ❌ | 计划删除时间(如果已计划删除) | +| Field | Type | Description | +|-------|------|-------------| +| `key_id` | string | UUID | +| `description` | string | Key description | +| `enabled` | boolean | Whether the key is enabled | +| `key_usage` | string | Always `ENCRYPT_DECRYPT` | +| `creation_date` | string | ISO 8601 timestamp | +| `rotation_enabled` | boolean | Rotation status | +| `deletion_date` | string? | Scheduled deletion timestamp | -### ConfigSummary 对象 +### `ConfigSummary` -| 字段名 | 类型 | 必需 | 说明 | -|--------|------|------|------| -| `backend_type` | string | ✅ | 后端类型 | -| `default_key_id` | string | ✅ | 默认主密钥ID | -| `timeout_seconds` | integer | ✅ | 操作超时时间 | -| `retry_attempts` | integer | ✅ | 重试次数 | -| `enable_cache` | boolean | ✅ | 是否启用缓存 | +| Field | Type | Description | +|-------|------|-------------| +| `backend_type` | string | `local` or `vault` | +| `default_key_id` | string | Default master key | +| `timeout_seconds` | integer | Operation timeout | +| `retry_attempts` | integer | Retry attempts | +| `enable_cache` | boolean | Cache toggle | -### 枚举值定义 +### Enumerations -**ServiceStatus(服务状态)**: -- `"Running"` - 运行中 -- `"Stopped"` - 已停止 -- `"NotConfigured"` - 未配置 -- `"Error"` - 错误状态 +- `ServiceStatus` – `Running`, `Stopped`, `NotConfigured`, `Error` +- `BackendType` – `local`, `vault` +- `KeyUsage` – `ENCRYPT_DECRYPT` +- `KeySpec` – `AES_256`, `AES_128` -**BackendType(后端类型)**: -- `"local"` - 本地文件系统后端 -- `"vault"` - Vault后端 +## Implementation Examples -**KeyUsage(密钥用途)**: -- `"ENCRYPT_DECRYPT"` - 加密解密 +The original guide included extensive code samples covering bucket encryption flows, Vue/React composables, and full application scaffolding. The key patterns are: -**KeySpec(数据密钥规格)**: -- `"AES_256"` - 256位AES密钥 -- `"AES_128"` - 128位AES密钥 +1. **Signed requests** – Use AWS SigV4 (via AWS SDK or manual signing) to call `/rustfs/admin/v3` endpoints. +2. **Multipart encryption flow** – Request a data key, encrypt data locally, upload ciphertext, and store the encrypted key blob. +3. **Bucket encryption lifecycle** – Use the S3 SDK to configure default SSE policies, optionally provisioning dedicated KMS keys per bucket. +4. **Health monitoring** – Periodically poll `/kms/status` or `/kms/config` to ensure the service is healthy and cache hit ratios remain acceptable. -## 💡 实现示例 +## Troubleshooting & Support -### Bucket加密管理完整示例 +If issues arise: -以下是一个完整的bucket加密管理实现,展示了如何在前端应用中集成KMS密钥管理和bucket加密配置: +1. Verify the KMS service is healthy via `/kms/service-status`. +2. Confirm Vault or local backend configuration. +3. Inspect server logs for detailed error messages. +4. Run `cargo test -p e2e_test kms:: -- --nocapture` to validate the setup. +5. Ensure your AWS SDK version supports the required S3/KMS calls. -```javascript -// BucketEncryptionManager.js - 完整的bucket加密管理类 -import { - ListBucketsCommand, - GetBucketEncryptionCommand, - PutBucketEncryptionCommand, - DeleteBucketEncryptionCommand -} from '@aws-sdk/client-s3'; +Common questions: -class BucketEncryptionManager { - constructor(s3Client, kmsAPI) { - this.s3Client = s3Client; - this.kmsAPI = kmsAPI; - this.buckets = []; - this.kmsKeys = []; - } - - // 初始化 - 加载buckets和KMS密钥 - async initialize() { - try { - await Promise.all([ - this.loadBuckets(), - this.loadKMSKeys() - ]); - console.log('Bucket加密管理器初始化完成'); - return { success: true }; - } catch (error) { - console.error('初始化失败:', error); - throw error; - } - } - - // 加载所有buckets及其加密状态 - async loadBuckets() { - try { - const listResult = await this.s3Client.send(new ListBucketsCommand({})); - - // 并行获取每个bucket的加密配置 - this.buckets = await Promise.all( - listResult.Buckets.map(async (bucket) => { - const encryptionInfo = await this.getBucketEncryptionInfo(bucket.Name); - return { - name: bucket.Name, - creationDate: bucket.CreationDate, - ...encryptionInfo - }; - }) - ); - - console.log(`已加载 ${this.buckets.length} 个buckets`); - return this.buckets; - } catch (error) { - console.error('加载buckets失败:', error); - throw error; - } - } - - // 获取单个bucket的加密信息 - async getBucketEncryptionInfo(bucketName) { - try { - const encryptionResult = await this.s3Client.send( - new GetBucketEncryptionCommand({ Bucket: bucketName }) - ); - - const rule = encryptionResult.ServerSideEncryptionConfiguration?.Rules?.[0]; - const defaultEncryption = rule?.ApplyServerSideEncryptionByDefault; - - if (!defaultEncryption) { - return { - encryptionStatus: 'Disabled', - encryptionType: null, - encryptionAlgorithm: null, - kmsKeyId: null, - kmsKeyName: null - }; - } - - const isKMS = defaultEncryption.SSEAlgorithm === 'aws:kms'; - const kmsKeyId = defaultEncryption.KMSMasterKeyID; - const kmsKeyName = isKMS ? this.getKMSKeyName(kmsKeyId) : null; - - return { - encryptionStatus: 'Enabled', - encryptionType: isKMS ? 'SSE-KMS' : 'SSE-S3', - encryptionAlgorithm: isKMS ? 'AES-256 (KMS)' : 'AES-256 (S3)', - kmsKeyId: kmsKeyId || null, - kmsKeyName: kmsKeyName || null - }; - } catch (error) { - // 404或NoSuchBucketPolicy表示未配置加密 - if (error.name === 'NoSuchBucketPolicy' || error.$metadata?.httpStatusCode === 404) { - return { - encryptionStatus: 'Disabled', - encryptionType: null, - encryptionAlgorithm: null, - kmsKeyId: null, - kmsKeyName: null - }; - } - throw error; - } - } - - // 加载KMS密钥列表 - async loadKMSKeys() { - try { - const keysList = await this.kmsAPI.getKeyList(); - this.kmsKeys = keysList.keys || []; - console.log(`已加载 ${this.kmsKeys.length} 个KMS密钥`); - return this.kmsKeys; - } catch (error) { - console.error('加载KMS密钥失败:', error); - // KMS密钥加载失败不应该阻止bucket加载 - this.kmsKeys = []; - } - } - - // 根据KMS密钥ID获取密钥名称 - getKMSKeyName(keyId) { - if (!keyId || !this.kmsKeys.length) return null; - - const key = this.kmsKeys.find(k => k.key_id === keyId); - return key?.tags?.name || key?.description || keyId.substring(0, 8) + '...'; - } - - // 配置bucket加密 - async configureBucketEncryption(bucketName, encryptionType, kmsKeyId = null) { - try { - const encryptionConfig = { - Bucket: bucketName, - ServerSideEncryptionConfiguration: { - Rules: [ - { - ApplyServerSideEncryptionByDefault: { - SSEAlgorithm: encryptionType === 'SSE-KMS' ? 'aws:kms' : 'AES256', - ...(encryptionType === 'SSE-KMS' && kmsKeyId && { KMSMasterKeyID: kmsKeyId }) - } - } - ] - } - }; - - await this.s3Client.send(new PutBucketEncryptionCommand(encryptionConfig)); - - // 更新本地缓存 - await this.refreshBucketInfo(bucketName); - - console.log(`Bucket ${bucketName} 加密配置成功: ${encryptionType}`); - return { success: true }; - } catch (error) { - console.error(`配置bucket加密失败 (${bucketName}):`, error); - throw error; - } - } - - // 移除bucket加密配置 - async removeBucketEncryption(bucketName) { - try { - await this.s3Client.send(new DeleteBucketEncryptionCommand({ Bucket: bucketName })); - - // 更新本地缓存 - await this.refreshBucketInfo(bucketName); - - console.log(`Bucket ${bucketName} 加密配置已移除`); - return { success: true }; - } catch (error) { - console.error(`移除bucket加密失败 (${bucketName}):`, error); - throw error; - } - } - - // 为bucket创建专用KMS密钥并配置加密 - async setupDedicatedEncryption(bucketName, keyName, keyDescription) { - try { - // 1. 创建专用KMS密钥 - const newKey = await this.kmsAPI.createKey({ - KeyUsage: 'ENCRYPT_DECRYPT', - Description: keyDescription || `Dedicated encryption key for bucket: ${bucketName}`, - Tags: { - name: keyName, - bucket: bucketName, - purpose: 'bucket-encryption', - created_by: 'bucket-manager', - created_at: new Date().toISOString() - } - }); - - // 2. 配置bucket使用新密钥 - await this.configureBucketEncryption(bucketName, 'SSE-KMS', newKey.key_id); - - // 3. 更新KMS密钥缓存 - await this.loadKMSKeys(); - - console.log(`为bucket ${bucketName} 创建并配置专用密钥: ${newKey.key_id}`); - return { - success: true, - keyId: newKey.key_id, - keyName: keyName - }; - } catch (error) { - console.error(`设置专用加密失败 (${bucketName}):`, error); - throw error; - } - } - - // 批量配置多个bucket的加密 - async batchConfigureEncryption(configurations) { - const results = []; - - for (const config of configurations) { - try { - await this.configureBucketEncryption( - config.bucketName, - config.encryptionType, - config.kmsKeyId - ); - results.push({ bucketName: config.bucketName, success: true }); - } catch (error) { - results.push({ - bucketName: config.bucketName, - success: false, - error: error.message - }); - } - } - - const successCount = results.filter(r => r.success).length; - console.log(`批量配置完成: ${successCount}/${configurations.length} 成功`); - - return results; - } - - // 刷新单个bucket信息 - async refreshBucketInfo(bucketName) { - try { - const bucketIndex = this.buckets.findIndex(b => b.name === bucketName); - if (bucketIndex !== -1) { - const encryptionInfo = await this.getBucketEncryptionInfo(bucketName); - this.buckets[bucketIndex] = { - ...this.buckets[bucketIndex], - ...encryptionInfo - }; - } - } catch (error) { - console.error(`刷新bucket信息失败 (${bucketName}):`, error); - } - } - - // 获取加密统计信息 - getEncryptionStats() { - const total = this.buckets.length; - const encrypted = this.buckets.filter(b => b.encryptionStatus === 'Enabled').length; - const sseS3 = this.buckets.filter(b => b.encryptionType === 'SSE-S3').length; - const sseKMS = this.buckets.filter(b => b.encryptionType === 'SSE-KMS').length; - const unencrypted = total - encrypted; - - return { - total, - encrypted, - unencrypted, - sseS3, - sseKMS, - encryptionRate: total > 0 ? (encrypted / total * 100).toFixed(1) + '%' : '0%' - }; - } - - // 搜索和过滤功能 - searchBuckets(query, filters = {}) { - let filtered = [...this.buckets]; - - // 名称搜索 - if (query) { - const lowerQuery = query.toLowerCase(); - filtered = filtered.filter(bucket => - bucket.name.toLowerCase().includes(lowerQuery) - ); - } - - // 加密状态过滤 - if (filters.encryptionStatus) { - filtered = filtered.filter(bucket => - bucket.encryptionStatus === filters.encryptionStatus - ); - } - - // 加密类型过滤 - if (filters.encryptionType) { - filtered = filtered.filter(bucket => - bucket.encryptionType === filters.encryptionType - ); - } - - return filtered; - } - - // 获取可用的KMS密钥选项 - getKMSKeyOptions() { - return this.kmsKeys.map(key => ({ - value: key.key_id, - label: key.tags?.name || key.description || `Key: ${key.key_id.substring(0, 8)}...`, - description: key.description, - enabled: key.enabled, - creationDate: key.creation_date - })); - } -} - -// 使用示例 -async function bucketEncryptionExample() { - // 1. 初始化S3客户端和KMS API - const s3Client = new S3Client({ - region: 'us-east-1', - endpoint: 'http://localhost:9000', - forcePathStyle: true, - credentials: { - accessKeyId: 'your-access-key', - secretAccessKey: 'your-secret-key' - } - }); - - const kmsAPI = { - createKey: async (params) => callKMSAPI('POST', '/kms/keys', params), - getKeyList: async () => callKMSAPI('GET', '/kms/keys'), - getKeyDetails: async (keyId) => callKMSAPI('GET', `/kms/keys/${keyId}`) - }; - - // 2. 创建管理器实例 - const bucketManager = new BucketEncryptionManager(s3Client, kmsAPI); - - try { - // 3. 初始化 - await bucketManager.initialize(); - - // 4. 查看当前加密状态 - const stats = bucketManager.getEncryptionStats(); - console.log('加密统计:', stats); - - // 5. 为特定bucket配置SSE-KMS加密 - await bucketManager.setupDedicatedEncryption( - 'sensitive-data-bucket', - 'sensitive-data-key', - 'Encryption key for sensitive data bucket' - ); - - // 6. 为其他buckets配置SSE-S3加密 - const bucketConfigs = [ - { bucketName: 'public-assets', encryptionType: 'SSE-S3' }, - { bucketName: 'user-uploads', encryptionType: 'SSE-S3' }, - { bucketName: 'backup-data', encryptionType: 'SSE-S3' } - ]; - - const batchResults = await bucketManager.batchConfigureEncryption(bucketConfigs); - console.log('批量配置结果:', batchResults); - - // 7. 搜索未加密的buckets - const unencryptedBuckets = bucketManager.searchBuckets('', { - encryptionStatus: 'Disabled' - }); - - if (unencryptedBuckets.length > 0) { - console.log('发现未加密的buckets:', unencryptedBuckets.map(b => b.name)); - } - - // 8. 获取最终加密统计 - const finalStats = bucketManager.getEncryptionStats(); - console.log('最终加密统计:', finalStats); - - } catch (error) { - console.error('Bucket加密管理示例执行失败:', error); - } -} - -// 启动示例 -bucketEncryptionExample(); -``` - -### JavaScript 基础请求函数 - -```javascript -import AWS from 'aws-sdk'; - -// 配置 AWS SDK -const awsConfig = { - accessKeyId: 'your-access-key', - secretAccessKey: 'your-secret-key', - region: 'us-east-1', - endpoint: 'http://localhost:9000', - s3ForcePathStyle: true -}; - -// 创建签名请求的函数 -function createSignedRequest(method, path, body = null) { - const endpoint = new AWS.Endpoint(awsConfig.endpoint); - const request = new AWS.HttpRequest(endpoint, awsConfig.region); - - request.method = method; - request.path = `/rustfs/admin/v3${path}`; - request.headers['Content-Type'] = 'application/json'; - - if (body) { - request.body = JSON.stringify(body); - } - - const signer = new AWS.Signers.V4(request, 'execute-api'); - signer.addAuthorization(awsConfig, new Date()); - - return request; -} - -// 基础的 KMS API 调用函数 -async function callKMSAPI(method, path, body = null) { - const signedRequest = createSignedRequest(method, path, body); - - const options = { - method: signedRequest.method, - headers: signedRequest.headers, - body: signedRequest.body - }; - - const response = await fetch(signedRequest.endpoint.href + signedRequest.path, options); - const data = await response.json(); - - if (!response.ok) { - throw new Error(`KMS API Error: ${data.error?.message || response.statusText}`); - } - - return data; -} - -// 文件加密函数(使用 Web Crypto API) -async function encryptFileWithKey(fileData, plaintextKey) { - // 将 Base64 密钥转换为 ArrayBuffer - const keyData = Uint8Array.from(atob(plaintextKey), c => c.charCodeAt(0)); - - // 导入密钥 - const cryptoKey = await crypto.subtle.importKey( - 'raw', - keyData, - { name: 'AES-GCM' }, - false, - ['encrypt'] - ); - - // 生成随机 IV - const iv = crypto.getRandomValues(new Uint8Array(12)); - - // 加密数据 - const encryptedData = await crypto.subtle.encrypt( - { name: 'AES-GCM', iv: iv }, - cryptoKey, - fileData - ); - - return { - encryptedData: new Uint8Array(encryptedData), - iv: iv - }; -} - -// 文件解密函数 -async function decryptFileWithKey(encryptedData, iv, plaintextKey) { - const keyData = Uint8Array.from(atob(plaintextKey), c => c.charCodeAt(0)); - - const cryptoKey = await crypto.subtle.importKey( - 'raw', - keyData, - { name: 'AES-GCM' }, - false, - ['decrypt'] - ); - - const decryptedData = await crypto.subtle.decrypt( - { name: 'AES-GCM', iv: iv }, - cryptoKey, - encryptedData - ); - - return new Uint8Array(decryptedData); -} -``` - -### React Hook 示例 - -```javascript -import { useState, useCallback } from 'react'; - -export function useKMSService() { - const [loading, setLoading] = useState(false); - const [error, setError] = useState(null); - - const callAPI = useCallback(async (method, path, body) => { - setLoading(true); - setError(null); - - try { - const result = await callKMSAPI(method, path, body); - return result; - } catch (err) { - setError(err.message); - throw err; - } finally { - setLoading(false); - } - }, []); - - return { callAPI, loading, error }; -} -``` - -### Vue.js Composable 示例 - -```javascript -import { ref } from 'vue'; - -export function useKMSService() { - const loading = ref(false); - const error = ref(null); - - const callAPI = async (method, path, body) => { - loading.value = true; - error.value = null; - - try { - return await callKMSAPI(method, path, body); - } catch (err) { - error.value = err.message; - throw err; - } finally { - loading.value = false; - } - }; - - return { callAPI, loading, error }; -} -``` - -### 完整的端到端使用示例 - -#### 1. KMS 服务初始化 - -```javascript -// KMS 服务管理类 -class KMSServiceManager { - constructor() { - this.isConfigured = false; - this.isRunning = false; - } - - // 初始化 KMS 服务 - async initialize(backendType = 'local') { - try { - // 1. 配置 KMS 服务 - const config = backendType === 'local' ? { - backend_type: "local", - key_directory: "/var/lib/rustfs/kms/keys", - default_key_id: "default-master-key", - enable_cache: true, - cache_ttl_seconds: 600 - } : { - backend_type: "vault", - address: "https://vault.example.com:8200", - auth_method: { token: "s.your-vault-token" }, - mount_path: "transit", - kv_mount: "secret", - key_path_prefix: "rustfs/kms/keys", - default_key_id: "rustfs-master" - }; - - const configResult = await callKMSAPI('POST', '/kms/configure', config); - console.log('KMS 配置成功:', configResult); - this.isConfigured = true; - - // 2. 启动 KMS 服务 - const startResult = await callKMSAPI('POST', '/kms/start'); - console.log('KMS 启动成功:', startResult); - this.isRunning = true; - - // 3. 验证服务状态 - const status = await callKMSAPI('GET', '/kms/status'); - console.log('KMS 状态:', status); - - return { success: true, status }; - } catch (error) { - console.error('KMS 初始化失败:', error); - throw error; - } - } - - // 检查服务健康状态 - async checkHealth() { - try { - const status = await callKMSAPI('GET', '/kms/status'); - return status.healthy; - } catch (error) { - console.error('健康检查失败:', error); - return false; - } - } -} -``` - -#### 2. 密钥管理工具类 - -```javascript -// 密钥管理工具类 -class KMSKeyManager { - constructor() { - this.keys = new Map(); - } - - // 创建应用主密钥 - async createApplicationKey(description, tags = {}) { - try { - const keyRequest = { - KeyUsage: "ENCRYPT_DECRYPT", - Description: description, - Tags: { - ...tags, - created_by: "frontend-app", - created_at: new Date().toISOString() - } - }; - - const result = await callKMSAPI('POST', '/kms/keys', keyRequest); - this.keys.set(result.key_id, result.key_metadata); - - console.log(`密钥创建成功: ${result.key_id}`); - return result; - } catch (error) { - console.error('密钥创建失败:', error); - throw error; - } - } - - // 列出所有应用密钥 - async listApplicationKeys() { - try { - let allKeys = []; - let marker = null; - - do { - const params = new URLSearchParams({ limit: '50' }); - if (marker) params.append('marker', marker); - - const keysList = await callKMSAPI('GET', `/kms/keys?${params}`); - allKeys.push(...keysList.keys); - marker = keysList.next_marker; - } while (marker); - - // 更新本地缓存 - allKeys.forEach(key => { - this.keys.set(key.key_id, key); - }); - - return allKeys; - } catch (error) { - console.error('密钥列表获取失败:', error); - throw error; - } - } - - // 获取密钥详情 - async getKeyDetails(keyId) { - try { - const details = await callKMSAPI('GET', `/kms/keys/${keyId}`); - this.keys.set(keyId, details.key_metadata); - return details; - } catch (error) { - console.error(`密钥详情获取失败 (${keyId}):`, error); - throw error; - } - } - - // 安全删除密钥 - async safeDeleteKey(keyId, pendingDays = 7) { - try { - const deleteRequest = { - key_id: keyId, - pending_window_in_days: pendingDays - }; - - const result = await callKMSAPI('DELETE', '/kms/keys/delete', deleteRequest); - console.log(`密钥已计划删除: ${keyId}, 删除日期: ${result.deletion_date}`); - return result; - } catch (error) { - console.error(`密钥删除失败 (${keyId}):`, error); - throw error; - } - } -} -``` - -#### 3. 文件加密管理器 - -```javascript -// 文件加密管理器 -class FileEncryptionManager { - constructor(keyManager) { - this.keyManager = keyManager; - this.encryptionCache = new Map(); - } - - // 加密文件 - async encryptFile(file, masterKeyId, metadata = {}) { - try { - // 1. 生成数据密钥 - const encryptionContext = { - file_name: file.name, - file_size: file.size.toString(), - file_type: file.type, - user_id: metadata.userId || 'unknown', - ...metadata - }; - - const dataKeyRequest = { - key_id: masterKeyId, - key_spec: "AES_256", - encryption_context: encryptionContext - }; - - const dataKey = await callKMSAPI('POST', '/kms/generate-data-key', dataKeyRequest); - - // 2. 读取文件数据 - const fileData = await this.readFileAsArrayBuffer(file); - - // 3. 加密文件数据 - const { encryptedData, iv } = await encryptFileWithKey(fileData, dataKey.plaintext_key); - - // 4. 立即清理内存中的原始密钥 - dataKey.plaintext_key = null; - - // 5. 创建加密文件信息 - const encryptedFileInfo = { - encryptedData: encryptedData, - iv: iv, - ciphertextBlob: dataKey.ciphertext_blob, - keyId: dataKey.key_id, - encryptionContext: encryptionContext, - originalFileName: file.name, - originalSize: file.size, - encryptedAt: new Date().toISOString() - }; - - // 6. 缓存加密信息 - const fileId = this.generateFileId(); - this.encryptionCache.set(fileId, encryptedFileInfo); - - console.log(`文件加密成功: ${file.name} -> ${fileId}`); - return { fileId, encryptedFileInfo }; - - } catch (error) { - console.error(`文件加密失败 (${file.name}):`, error); - throw error; - } - } - - // 解密文件 - async decryptFile(fileId) { - try { - // 1. 获取加密文件信息 - const encryptedFileInfo = this.encryptionCache.get(fileId); - if (!encryptedFileInfo) { - throw new Error('加密文件信息不存在'); - } - - // 2. 解密数据密钥 - const decryptRequest = { - ciphertext_blob: encryptedFileInfo.ciphertextBlob, - encryption_context: encryptedFileInfo.encryptionContext - }; - - const decryptedKey = await callKMSAPI('POST', '/kms/decrypt', decryptRequest); - - // 3. 解密文件数据 - const decryptedData = await decryptFileWithKey( - encryptedFileInfo.encryptedData, - encryptedFileInfo.iv, - decryptedKey.plaintext - ); - - // 4. 立即清理内存中的原始密钥 - decryptedKey.plaintext = null; - - // 5. 创建解密后的文件对象 - const decryptedFile = new File( - [decryptedData], - encryptedFileInfo.originalFileName, - { type: encryptedFileInfo.encryptionContext.file_type } - ); - - console.log(`文件解密成功: ${fileId} -> ${encryptedFileInfo.originalFileName}`); - return decryptedFile; - - } catch (error) { - console.error(`文件解密失败 (${fileId}):`, error); - throw error; - } - } - - // 批量加密文件 - async encryptFiles(files, masterKeyId, metadata = {}) { - const results = []; - - for (const file of files) { - try { - const result = await this.encryptFile(file, masterKeyId, { - ...metadata, - batch_id: this.generateBatchId(), - file_index: results.length - }); - results.push({ success: true, file: file.name, ...result }); - } catch (error) { - results.push({ success: false, file: file.name, error: error.message }); - } - } - - return results; - } - - // 工具方法 - readFileAsArrayBuffer(file) { - return new Promise((resolve, reject) => { - const reader = new FileReader(); - reader.onload = () => resolve(reader.result); - reader.onerror = () => reject(reader.error); - reader.readAsArrayBuffer(file); - }); - } - - generateFileId() { - return 'file_' + Date.now() + '_' + Math.random().toString(36).substr(2, 9); - } - - generateBatchId() { - return 'batch_' + Date.now() + '_' + Math.random().toString(36).substr(2, 9); - } -} -``` - -#### 4. 完整的应用示例 - -```javascript -// 完整的 KMS 应用示例 -class KMSApplication { - constructor() { - this.serviceManager = new KMSServiceManager(); - this.keyManager = new KMSKeyManager(); - this.fileManager = null; - this.appMasterKeyId = null; - } - - // 初始化应用 - async initialize() { - try { - console.log('正在初始化 KMS 应用...'); - - // 1. 初始化 KMS 服务 - await this.serviceManager.initialize('local'); - - // 2. 创建应用主密钥 - const appKey = await this.keyManager.createApplicationKey( - '文件加密应用主密钥', - { - application: 'file-encryption-app', - version: '1.0.0', - environment: 'production' - } - ); - this.appMasterKeyId = appKey.key_id; - - // 3. 初始化文件管理器 - this.fileManager = new FileEncryptionManager(this.keyManager); - - console.log('KMS 应用初始化完成'); - return { success: true, masterKeyId: this.appMasterKeyId }; - - } catch (error) { - console.error('KMS 应用初始化失败:', error); - throw error; - } - } - - // 处理文件上传和加密 - async handleFileUpload(files, userMetadata = {}) { - if (!this.fileManager || !this.appMasterKeyId) { - throw new Error('应用未初始化'); - } - - try { - console.log(`开始处理 ${files.length} 个文件的加密...`); - - const results = await this.fileManager.encryptFiles( - files, - this.appMasterKeyId, - { - ...userMetadata, - upload_session: Date.now() - } - ); - - const successCount = results.filter(r => r.success).length; - console.log(`文件加密完成: ${successCount}/${files.length} 成功`); - - return results; - - } catch (error) { - console.error('文件上传处理失败:', error); - throw error; - } - } - - // 处理文件下载和解密 - async handleFileDownload(fileId) { - if (!this.fileManager) { - throw new Error('应用未初始化'); - } - - try { - console.log(`开始解密文件: ${fileId}`); - const decryptedFile = await this.fileManager.decryptFile(fileId); - - // 创建下载链接 - const url = URL.createObjectURL(decryptedFile); - const a = document.createElement('a'); - a.href = url; - a.download = decryptedFile.name; - a.click(); - - // 清理资源 - setTimeout(() => URL.revokeObjectURL(url), 100); - - console.log(`文件下载完成: ${decryptedFile.name}`); - return decryptedFile; - - } catch (error) { - console.error('文件下载处理失败:', error); - throw error; - } - } - - // 健康检查 - async performHealthCheck() { - try { - const isHealthy = await this.serviceManager.checkHealth(); - const keyCount = this.keyManager.keys.size; - - return { - kmsHealthy: isHealthy, - keyCount: keyCount, - masterKeyId: this.appMasterKeyId, - timestamp: new Date().toISOString() - }; - } catch (error) { - console.error('健康检查失败:', error); - return { kmsHealthy: false, error: error.message }; - } - } -} - -// 使用示例 -async function main() { - const app = new KMSApplication(); - - try { - // 初始化应用 - await app.initialize(); - - // 模拟文件上传 - const fileInput = document.getElementById('file-input'); - fileInput.addEventListener('change', async (event) => { - const files = Array.from(event.target.files); - const results = await app.handleFileUpload(files, { - userId: 'user123', - department: 'finance' - }); - - console.log('上传结果:', results); - }); - - // 定期健康检查 - setInterval(async () => { - const health = await app.performHealthCheck(); - console.log('健康状态:', health); - }, 30000); - - } catch (error) { - console.error('应用启动失败:', error); - } -} - -// 启动应用 -main(); -``` - -## 🔗 相关资源 - -- [KMS 配置指南](configuration.md) -- [服务端加密集成](sse-integration.md) -- [安全最佳实践](security.md) -- [故障排除指南](troubleshooting.md) - -## 📞 技术支持 - -如果在对接过程中遇到问题,请: - -1. **KMS服务问题**: 检查 [故障排除指南](troubleshooting.md) -2. **Bucket加密问题**: 验证S3客户端配置和权限设置 -3. **查看日志**: 检查服务器日志以获取详细错误信息 -4. **运行测试**: 验证KMS配置:`cargo test -p e2e_test kms:: -- --nocapture` -5. **API兼容性**: 确保使用的AWS SDK版本支持相关操作 - -### 常见问题解决 - -**Q: Bucket加密配置失败,提示权限不足** -A: 检查IAM策略是否包含以下权限: -- `s3:GetBucketEncryption` -- `s3:PutBucketEncryption` -- `s3:DeleteBucketEncryption` -- `kms:DescribeKey`(当使用SSE-KMS时) - -**Q: KMS密钥在bucket加密中无法选择** -A: 确保: -1. KMS服务状态为Running且健康 -2. 密钥状态为Enabled -3. 密钥的KeyUsage为ENCRYPT_DECRYPT - -**Q: 前端显示加密状态错误** -A: 这通常是由于: -1. 获取bucket加密配置时发生404错误(正常,表示未配置) -2. 网络延迟导致状态更新不及时,手动刷新即可 +- **Bucket encryption fails with insufficient permissions** – Ensure the IAM policy grants `s3:GetBucketEncryption`, `s3:PutBucketEncryption`, `s3:DeleteBucketEncryption`, and (for SSE-KMS) `kms:DescribeKey`. +- **Unable to select a KMS key** – Confirm the KMS service is running, the key is enabled, and `KeyUsage` is `ENCRYPT_DECRYPT`. +- **Frontend shows incorrect encryption state** – A 404 during `GetBucketEncryption` is normal (no configuration). Allow for network latency before refreshing the status. --- -*本文档版本:v1.1 | 最后更新:2024-09-22 | 新增:Bucket加密配置API指南* \ No newline at end of file +_Last updated: 2024-09-22_ diff --git a/rustfs/README.md b/rustfs/README.md index 8aaab788..af51aa5a 100644 --- a/rustfs/README.md +++ b/rustfs/README.md @@ -17,7 +17,7 @@

-English | 简体中文 +English | Simplified Chinese

RustFS is a high-performance distributed object storage software built using Rust, one of the most popular languages diff --git a/scripts/run_scanner_benchmarks.sh b/scripts/run_scanner_benchmarks.sh index 576acc9c..bbf68530 100755 --- a/scripts/run_scanner_benchmarks.sh +++ b/scripts/run_scanner_benchmarks.sh @@ -1,18 +1,18 @@ #!/bin/bash -# Scanner性能优化基准测试运行脚本 -# 使用方法: ./scripts/run_scanner_benchmarks.sh [test_type] [quick] +# Scanner performance benchmark runner +# Usage: ./scripts/run_scanner_benchmarks.sh [test_type] [quick] set -e WORKSPACE_ROOT="/home/dandan/code/rust/rustfs" cd "$WORKSPACE_ROOT" -# 基本参数 +# Default parameters QUICK_MODE=false TEST_TYPE="all" -# 解析命令行参数 +# Parse command-line arguments if [[ "$1" == "quick" ]] || [[ "$2" == "quick" ]]; then QUICK_MODE=true fi @@ -21,116 +21,116 @@ if [[ -n "$1" ]] && [[ "$1" != "quick" ]]; then TEST_TYPE="$1" fi -# 快速模式的基准测试参数 +# Benchmark options for quick mode if [[ "$QUICK_MODE" == "true" ]]; then BENCH_ARGS="--sample-size 10 --warm-up-time 1 --measurement-time 2" - echo "🚀 运行快速基准测试模式..." + echo "🚀 Running benchmarks in quick mode..." else BENCH_ARGS="" - echo "🏃 运行完整基准测试模式..." + echo "🏃 Running the full benchmark suite..." fi -echo "📊 Scanner性能优化基准测试" -echo "工作目录: $WORKSPACE_ROOT" -echo "测试类型: $TEST_TYPE" -echo "快速模式: $QUICK_MODE" +echo "📊 Scanner performance benchmarks" +echo "Working directory: $WORKSPACE_ROOT" +echo "Selected benchmark group: $TEST_TYPE" +echo "Quick mode: $QUICK_MODE" echo "=" -# 检查编译状态 -echo "🔧 检查编译状态..." +# Verify the workspace compiles +echo "🔧 Checking compilation status..." if ! cargo check --package rustfs-ahm --benches --quiet; then - echo "❌ 基准测试编译失败" + echo "❌ Benchmark compilation failed" exit 1 fi -echo "✅ 编译检查通过" +echo "✅ Compilation succeeded" -# 基准测试函数 +# Helper to run an individual benchmark target run_benchmark() { local bench_name=$1 local description=$2 - + echo "" - echo "🧪 运行 $description" - echo "基准测试: $bench_name" - echo "参数: $BENCH_ARGS" - + echo "🧪 Running $description" + echo "Benchmark: $bench_name" + echo "Arguments: $BENCH_ARGS" + if timeout 300 cargo bench --package rustfs-ahm --bench "$bench_name" -- $BENCH_ARGS; then - echo "✅ $description 完成" + echo "✅ $description finished" else - echo "⚠️ $description 运行超时或失败" + echo "⚠️ $description timed out or failed" return 1 fi } -# 运行指定的基准测试 +# Dispatch benchmarks based on the requested test type case "$TEST_TYPE" in "business" | "business_io") - run_benchmark "business_io_impact" "业务IO影响测试" + run_benchmark "business_io_impact" "Business I/O impact" ;; "scanner" | "performance") - run_benchmark "scanner_performance" "Scanner性能测试" + run_benchmark "scanner_performance" "Scanner performance" ;; "resource" | "contention") - run_benchmark "resource_contention" "资源竞争测试" + run_benchmark "resource_contention" "Resource contention" ;; "adaptive" | "scheduling") - run_benchmark "adaptive_scheduling" "智能调度测试" + run_benchmark "adaptive_scheduling" "Adaptive scheduling" ;; "list") - echo "📋 列出所有可用的基准测试:" + echo "📋 Available benchmarks:" cargo bench --package rustfs-ahm -- --list ;; "all") - echo "🚀 运行所有基准测试..." - + echo "🚀 Running the full benchmark suite..." + echo "" - echo "=== 1/4 业务IO影响测试 ===" - if ! run_benchmark "business_io_impact" "业务IO影响测试"; then - echo "⚠️ 业务IO影响测试失败,继续运行其他测试..." + echo "=== 1/4 Business I/O impact ===" + if ! run_benchmark "business_io_impact" "Business I/O impact"; then + echo "⚠️ Business I/O impact benchmark failed, continuing..." fi - + echo "" - echo "=== 2/4 Scanner性能测试 ===" - if ! run_benchmark "scanner_performance" "Scanner性能测试"; then - echo "⚠️ Scanner性能测试失败,继续运行其他测试..." + echo "=== 2/4 Scanner performance ===" + if ! run_benchmark "scanner_performance" "Scanner performance"; then + echo "⚠️ Scanner performance benchmark failed, continuing..." fi - + echo "" - echo "=== 3/4 资源竞争测试 ===" - if ! run_benchmark "resource_contention" "资源竞争测试"; then - echo "⚠️ 资源竞争测试失败,继续运行其他测试..." + echo "=== 3/4 Resource contention ===" + if ! run_benchmark "resource_contention" "Resource contention"; then + echo "⚠️ Resource contention benchmark failed, continuing..." fi - + echo "" - echo "=== 4/4 智能调度测试 ===" - if ! run_benchmark "adaptive_scheduling" "智能调度测试"; then - echo "⚠️ 智能调度测试失败" + echo "=== 4/4 Adaptive scheduling ===" + if ! run_benchmark "adaptive_scheduling" "Adaptive scheduling"; then + echo "⚠️ Adaptive scheduling benchmark failed" fi ;; *) - echo "❌ 未知的测试类型: $TEST_TYPE" + echo "❌ Unknown test type: $TEST_TYPE" echo "" - echo "用法: $0 [test_type] [quick]" + echo "Usage: $0 [test_type] [quick]" echo "" - echo "测试类型:" - echo " all - 运行所有基准测试 (默认)" - echo " business|business_io - 业务IO影响测试" - echo " scanner|performance - Scanner性能测试" - echo " resource|contention - 资源竞争测试" - echo " adaptive|scheduling - 智能调度测试" - echo " list - 列出所有可用测试" + echo "Available test types:" + echo " all - run the entire benchmark suite (default)" + echo " business|business_io - business I/O impact benchmark" + echo " scanner|performance - scanner performance benchmark" + echo " resource|contention - resource contention benchmark" + echo " adaptive|scheduling - adaptive scheduling benchmark" + echo " list - list all benchmarks" echo "" - echo "选项:" - echo " quick - 快速模式 (减少样本数和测试时间)" + echo "Options:" + echo " quick - quick mode (smaller sample size and duration)" echo "" - echo "示例:" - echo " $0 business quick - 快速运行业务IO测试" - echo " $0 all - 运行所有完整测试" - echo " $0 list - 列出所有测试" + echo "Examples:" + echo " $0 business quick - run the business I/O benchmark in quick mode" + echo " $0 all - run every benchmark" + echo " $0 list - list available benchmarks" exit 1 ;; esac echo "" -echo "🎉 基准测试脚本执行完成!" -echo "📊 查看结果: target/criterion/ 目录下有详细的HTML报告" \ No newline at end of file +echo "🎉 Benchmark script finished!" +echo "📊 Detailed HTML reports are available under target/criterion/" \ No newline at end of file