mirror of
https://github.com/rustfs/rustfs.git
synced 2026-01-17 17:40:38 +00:00
Compare commits
28 Commits
1.0.0-alph
...
refactor/l
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0b80074270 | ||
|
|
6617372b33 | ||
|
|
769778e565 | ||
|
|
a7f5c4af46 | ||
|
|
a9d5fbac54 | ||
|
|
281e68c9bf | ||
|
|
d30c42f85a | ||
|
|
79012be2c8 | ||
|
|
325ff62684 | ||
|
|
f0c2ede7a7 | ||
|
|
b9fd66c1cd | ||
|
|
c43b11fb92 | ||
|
|
d737a439d5 | ||
|
|
0714c7a9ca | ||
|
|
2ceb65adb4 | ||
|
|
dd47fcf2a8 | ||
|
|
64ba52bc1e | ||
|
|
d2ced233e5 | ||
|
|
40660e7b80 | ||
|
|
2aca1f77af | ||
|
|
6f3d2885cd | ||
|
|
6ab7619023 | ||
|
|
ed73e2b782 | ||
|
|
6a59c0a474 | ||
|
|
c5264f9703 | ||
|
|
b47765b4c0 | ||
|
|
e22b24684f | ||
|
|
1d069fd351 |
2
.github/workflows/ci.yml
vendored
2
.github/workflows/ci.yml
vendored
@@ -103,6 +103,8 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: Delete huge unnecessary tools folder
|
||||
run: rm -rf /opt/hostedtoolcache
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
|
||||
|
||||
702
.rules.md
Normal file
702
.rules.md
Normal file
@@ -0,0 +1,702 @@
|
||||
# RustFS Project AI Coding Rules
|
||||
|
||||
## 🚨🚨🚨 CRITICAL DEVELOPMENT RULES - ZERO TOLERANCE 🚨🚨🚨
|
||||
|
||||
### ⛔️ ABSOLUTE PROHIBITION: NEVER COMMIT DIRECTLY TO MASTER/MAIN BRANCH ⛔️
|
||||
|
||||
**🔥 THIS IS THE MOST CRITICAL RULE - VIOLATION WILL RESULT IN IMMEDIATE REVERSAL 🔥**
|
||||
|
||||
- **🚫 ZERO DIRECT COMMITS TO MAIN/MASTER BRANCH - ABSOLUTELY FORBIDDEN**
|
||||
- **🚫 ANY DIRECT COMMIT TO MAIN BRANCH MUST BE IMMEDIATELY REVERTED**
|
||||
- **🚫 NO EXCEPTIONS FOR HOTFIXES, EMERGENCIES, OR URGENT CHANGES**
|
||||
- **🚫 NO EXCEPTIONS FOR SMALL CHANGES, TYPOS, OR DOCUMENTATION UPDATES**
|
||||
- **🚫 NO EXCEPTIONS FOR ANYONE - MAINTAINERS, CONTRIBUTORS, OR ADMINS**
|
||||
|
||||
### 📋 MANDATORY WORKFLOW - STRICTLY ENFORCED
|
||||
|
||||
**EVERY SINGLE CHANGE MUST FOLLOW THIS WORKFLOW:**
|
||||
|
||||
1. **Check current branch**: `git branch` (MUST NOT be on main/master)
|
||||
2. **Switch to main**: `git checkout main`
|
||||
3. **Pull latest**: `git pull origin main`
|
||||
4. **Create feature branch**: `git checkout -b feat/your-feature-name`
|
||||
5. **Make changes ONLY on feature branch**
|
||||
6. **Test thoroughly before committing**
|
||||
7. **Commit and push to feature branch**: `git push origin feat/your-feature-name`
|
||||
8. **Create Pull Request**: Use `gh pr create` (MANDATORY)
|
||||
9. **Wait for PR approval**: NO self-merging allowed
|
||||
10. **Merge through GitHub interface**: ONLY after approval
|
||||
|
||||
### 🔒 ENFORCEMENT MECHANISMS
|
||||
|
||||
- **Branch protection rules**: Main branch is protected
|
||||
- **Pre-commit hooks**: Will block direct commits to main
|
||||
- **CI/CD checks**: All PRs must pass before merging
|
||||
- **Code review requirement**: At least one approval needed
|
||||
- **Automated reversal**: Direct commits to main will be automatically reverted
|
||||
|
||||
## 🎯 Core AI Development Principles
|
||||
|
||||
### Five Execution Steps
|
||||
|
||||
#### 1. Task Analysis and Planning
|
||||
- **Clear Objectives**: Deeply understand task requirements and expected results before starting coding
|
||||
- **Plan Development**: List specific files, components, and functions that need modification, explaining the reasons for changes
|
||||
- **Risk Assessment**: Evaluate the impact of changes on existing functionality, develop rollback plans
|
||||
|
||||
#### 2. Precise Code Location
|
||||
- **File Identification**: Determine specific files and line numbers that need modification
|
||||
- **Impact Analysis**: Avoid modifying irrelevant files, clearly state the reason for each file modification
|
||||
- **Minimization Principle**: Unless explicitly required by the task, do not create new abstraction layers or refactor existing code
|
||||
|
||||
#### 3. Minimal Code Changes
|
||||
- **Focus on Core**: Only write code directly required by the task
|
||||
- **Avoid Redundancy**: Do not add unnecessary logs, comments, tests, or error handling
|
||||
- **Isolation**: Ensure new code does not interfere with existing functionality, maintain code independence
|
||||
|
||||
#### 4. Strict Code Review
|
||||
- **Correctness Check**: Verify the correctness and completeness of code logic
|
||||
- **Style Consistency**: Ensure code conforms to established project coding style
|
||||
- **Side Effect Assessment**: Evaluate the impact of changes on downstream systems
|
||||
|
||||
#### 5. Clear Delivery Documentation
|
||||
- **Change Summary**: Detailed explanation of all modifications and reasons
|
||||
- **File List**: List all modified files and their specific changes
|
||||
- **Risk Statement**: Mark any assumptions or potential risk points
|
||||
|
||||
### Core Principles
|
||||
- **🎯 Precise Execution**: Strictly follow task requirements, no arbitrary innovation
|
||||
- **⚡ Efficient Development**: Avoid over-design, only do necessary work
|
||||
- **🛡️ Safe and Reliable**: Always follow development processes, ensure code quality and system stability
|
||||
- **🔒 Cautious Modification**: Only modify when clearly knowing what needs to be changed and having confidence
|
||||
|
||||
### Additional AI Behavior Rules
|
||||
|
||||
1. **Use English for all code comments and documentation** - All comments, variable names, function names, documentation, and user-facing text in code should be in English
|
||||
2. **Clean up temporary scripts after use** - Any temporary scripts, test files, or helper files created during AI work should be removed after task completion
|
||||
3. **Only make confident modifications** - Do not make speculative changes or "convenient" modifications outside the task scope. If uncertain about a change, ask for clarification rather than guessing
|
||||
|
||||
## Project Overview
|
||||
|
||||
RustFS is a high-performance distributed object storage system written in Rust, compatible with S3 API. The project adopts a modular architecture, supporting erasure coding storage, multi-tenant management, observability, and other enterprise-level features.
|
||||
|
||||
## Core Architecture Principles
|
||||
|
||||
### 1. Modular Design
|
||||
|
||||
- Project uses Cargo workspace structure, containing multiple independent crates
|
||||
- Core modules: `rustfs` (main service), `ecstore` (erasure coding storage), `common` (shared components)
|
||||
- Functional modules: `iam` (identity management), `madmin` (management interface), `crypto` (encryption), etc.
|
||||
- Tool modules: `cli` (command line tool), `crates/*` (utility libraries)
|
||||
|
||||
### 2. Asynchronous Programming Pattern
|
||||
|
||||
- Comprehensive use of `tokio` async runtime
|
||||
- Prioritize `async/await` syntax
|
||||
- Use `async-trait` for async methods in traits
|
||||
- Avoid blocking operations, use `spawn_blocking` when necessary
|
||||
|
||||
### 3. Error Handling Strategy
|
||||
|
||||
- **Use modular, type-safe error handling with `thiserror`**
|
||||
- Each module should define its own error type using `thiserror::Error` derive macro
|
||||
- Support error chains and context information through `#[from]` and `#[source]` attributes
|
||||
- Use `Result<T>` type aliases for consistency within each module
|
||||
- Error conversion between modules should use explicit `From` implementations
|
||||
- Follow the pattern: `pub type Result<T> = core::result::Result<T, Error>`
|
||||
- Use `#[error("description")]` attributes for clear error messages
|
||||
- Support error downcasting when needed through `other()` helper methods
|
||||
- Implement `Clone` for errors when required by the domain logic
|
||||
|
||||
## Code Style Guidelines
|
||||
|
||||
### 1. Formatting Configuration
|
||||
|
||||
```toml
|
||||
max_width = 130
|
||||
fn_call_width = 90
|
||||
single_line_let_else_max_width = 100
|
||||
```
|
||||
|
||||
### 2. **🔧 MANDATORY Code Formatting Rules**
|
||||
|
||||
**CRITICAL**: All code must be properly formatted before committing. This project enforces strict formatting standards to maintain code consistency and readability.
|
||||
|
||||
#### Pre-commit Requirements (MANDATORY)
|
||||
|
||||
Before every commit, you **MUST**:
|
||||
|
||||
1. **Format your code**:
|
||||
```bash
|
||||
cargo fmt --all
|
||||
```
|
||||
|
||||
2. **Verify formatting**:
|
||||
```bash
|
||||
cargo fmt --all --check
|
||||
```
|
||||
|
||||
3. **Pass clippy checks**:
|
||||
```bash
|
||||
cargo clippy --all-targets --all-features -- -D warnings
|
||||
```
|
||||
|
||||
4. **Ensure compilation**:
|
||||
```bash
|
||||
cargo check --all-targets
|
||||
```
|
||||
|
||||
#### Quick Commands
|
||||
|
||||
Use these convenient Makefile targets for common tasks:
|
||||
|
||||
```bash
|
||||
# Format all code
|
||||
make fmt
|
||||
|
||||
# Check if code is properly formatted
|
||||
make fmt-check
|
||||
|
||||
# Run clippy checks
|
||||
make clippy
|
||||
|
||||
# Run compilation check
|
||||
make check
|
||||
|
||||
# Run tests
|
||||
make test
|
||||
|
||||
# Run all pre-commit checks (format + clippy + check + test)
|
||||
make pre-commit
|
||||
|
||||
# Setup git hooks (one-time setup)
|
||||
make setup-hooks
|
||||
```
|
||||
|
||||
### 3. Naming Conventions
|
||||
|
||||
- Use `snake_case` for functions, variables, modules
|
||||
- Use `PascalCase` for types, traits, enums
|
||||
- Constants use `SCREAMING_SNAKE_CASE`
|
||||
- Global variables prefix `GLOBAL_`, e.g., `GLOBAL_Endpoints`
|
||||
- Use meaningful and descriptive names for variables, functions, and methods
|
||||
- Avoid meaningless names like `temp`, `data`, `foo`, `bar`, `test123`
|
||||
- Choose names that clearly express the purpose and intent
|
||||
|
||||
### 4. Type Declaration Guidelines
|
||||
|
||||
- **Prefer type inference over explicit type declarations** when the type is obvious from context
|
||||
- Let the Rust compiler infer types whenever possible to reduce verbosity and improve maintainability
|
||||
- Only specify types explicitly when:
|
||||
- The type cannot be inferred by the compiler
|
||||
- Explicit typing improves code clarity and readability
|
||||
- Required for API boundaries (function signatures, public struct fields)
|
||||
- Needed to resolve ambiguity between multiple possible types
|
||||
|
||||
### 5. Documentation Comments
|
||||
|
||||
- Public APIs must have documentation comments
|
||||
- Use `///` for documentation comments
|
||||
- Complex functions add `# Examples` and `# Parameters` descriptions
|
||||
- Error cases use `# Errors` descriptions
|
||||
- Always use English for all comments and documentation
|
||||
- Avoid meaningless comments like "debug 111" or placeholder text
|
||||
|
||||
### 6. Import Guidelines
|
||||
|
||||
- Standard library imports first
|
||||
- Third-party crate imports in the middle
|
||||
- Project internal imports last
|
||||
- Group `use` statements with blank lines between groups
|
||||
|
||||
## Asynchronous Programming Guidelines
|
||||
|
||||
### 1. Trait Definition
|
||||
|
||||
```rust
|
||||
#[async_trait::async_trait]
|
||||
pub trait StorageAPI: Send + Sync {
|
||||
async fn get_object(&self, bucket: &str, object: &str) -> Result<ObjectInfo>;
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Error Handling
|
||||
|
||||
```rust
|
||||
// Use ? operator to propagate errors
|
||||
async fn example_function() -> Result<()> {
|
||||
let data = read_file("path").await?;
|
||||
process_data(data).await?;
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Concurrency Control
|
||||
|
||||
- Use `Arc` and `Mutex`/`RwLock` for shared state management
|
||||
- Prioritize async locks from `tokio::sync`
|
||||
- Avoid holding locks for long periods
|
||||
|
||||
## Logging and Tracing Guidelines
|
||||
|
||||
### 1. Tracing Usage
|
||||
|
||||
```rust
|
||||
#[tracing::instrument(skip(self, data))]
|
||||
async fn process_data(&self, data: &[u8]) -> Result<()> {
|
||||
info!("Processing {} bytes", data.len());
|
||||
// Implementation logic
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Log Levels
|
||||
|
||||
- `error!`: System errors requiring immediate attention
|
||||
- `warn!`: Warning information that may affect functionality
|
||||
- `info!`: Important business information
|
||||
- `debug!`: Debug information for development use
|
||||
- `trace!`: Detailed execution paths
|
||||
|
||||
### 3. Structured Logging
|
||||
|
||||
```rust
|
||||
info!(
|
||||
counter.rustfs_api_requests_total = 1_u64,
|
||||
key_request_method = %request.method(),
|
||||
key_request_uri_path = %request.uri().path(),
|
||||
"API request processed"
|
||||
);
|
||||
```
|
||||
|
||||
## Error Handling Guidelines
|
||||
|
||||
### 1. Error Type Definition
|
||||
|
||||
```rust
|
||||
// Use thiserror for module-specific error types
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum MyError {
|
||||
#[error("IO error: {0}")]
|
||||
Io(#[from] std::io::Error),
|
||||
|
||||
#[error("Storage error: {0}")]
|
||||
Storage(#[from] ecstore::error::StorageError),
|
||||
|
||||
#[error("Custom error: {message}")]
|
||||
Custom { message: String },
|
||||
|
||||
#[error("File not found: {path}")]
|
||||
FileNotFound { path: String },
|
||||
|
||||
#[error("Invalid configuration: {0}")]
|
||||
InvalidConfig(String),
|
||||
}
|
||||
|
||||
// Provide Result type alias for the module
|
||||
pub type Result<T> = core::result::Result<T, MyError>;
|
||||
```
|
||||
|
||||
### 2. Error Helper Methods
|
||||
|
||||
```rust
|
||||
impl MyError {
|
||||
/// Create error from any compatible error type
|
||||
pub fn other<E>(error: E) -> Self
|
||||
where
|
||||
E: Into<Box<dyn std::error::Error + Send + Sync>>,
|
||||
{
|
||||
MyError::Io(std::io::Error::other(error))
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Error Context and Propagation
|
||||
|
||||
```rust
|
||||
// Use ? operator for clean error propagation
|
||||
async fn example_function() -> Result<()> {
|
||||
let data = read_file("path").await?;
|
||||
process_data(data).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Add context to errors
|
||||
fn process_with_context(path: &str) -> Result<()> {
|
||||
std::fs::read(path)
|
||||
.map_err(|e| MyError::Custom {
|
||||
message: format!("Failed to read {}: {}", path, e)
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
## Performance Optimization Guidelines
|
||||
|
||||
### 1. Memory Management
|
||||
|
||||
- Use `Bytes` instead of `Vec<u8>` for zero-copy operations
|
||||
- Avoid unnecessary cloning, use reference passing
|
||||
- Use `Arc` for sharing large objects
|
||||
|
||||
### 2. Concurrency Optimization
|
||||
|
||||
```rust
|
||||
// Use join_all for concurrent operations
|
||||
let futures = disks.iter().map(|disk| disk.operation());
|
||||
let results = join_all(futures).await;
|
||||
```
|
||||
|
||||
### 3. Caching Strategy
|
||||
|
||||
- Use `LazyLock` for global caching
|
||||
- Implement LRU cache to avoid memory leaks
|
||||
|
||||
## Testing Guidelines
|
||||
|
||||
### 1. Unit Tests
|
||||
|
||||
```rust
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use test_case::test_case;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_async_function() {
|
||||
let result = async_function().await;
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[test_case("input1", "expected1")]
|
||||
#[test_case("input2", "expected2")]
|
||||
fn test_with_cases(input: &str, expected: &str) {
|
||||
assert_eq!(function(input), expected);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Integration Tests
|
||||
|
||||
- Use `e2e_test` module for end-to-end testing
|
||||
- Simulate real storage environments
|
||||
|
||||
### 3. Test Quality Standards
|
||||
|
||||
- Write meaningful test cases that verify actual functionality
|
||||
- Avoid placeholder or debug content like "debug 111", "test test", etc.
|
||||
- Use descriptive test names that clearly indicate what is being tested
|
||||
- Each test should have a clear purpose and verify specific behavior
|
||||
- Test data should be realistic and representative of actual use cases
|
||||
|
||||
## Cross-Platform Compatibility Guidelines
|
||||
|
||||
### 1. CPU Architecture Compatibility
|
||||
|
||||
- **Always consider multi-platform and different CPU architecture compatibility** when writing code
|
||||
- Support major architectures: x86_64, aarch64 (ARM64), and other target platforms
|
||||
- Use conditional compilation for architecture-specific code:
|
||||
|
||||
```rust
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
fn optimized_x86_64_function() { /* x86_64 specific implementation */ }
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
fn optimized_aarch64_function() { /* ARM64 specific implementation */ }
|
||||
|
||||
#[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
|
||||
fn generic_function() { /* Generic fallback implementation */ }
|
||||
```
|
||||
|
||||
### 2. Platform-Specific Dependencies
|
||||
|
||||
- Use feature flags for platform-specific dependencies
|
||||
- Provide fallback implementations for unsupported platforms
|
||||
- Test on multiple architectures in CI/CD pipeline
|
||||
|
||||
### 3. Endianness Considerations
|
||||
|
||||
- Use explicit byte order conversion when dealing with binary data
|
||||
- Prefer `to_le_bytes()`, `from_le_bytes()` for consistent little-endian format
|
||||
- Use `byteorder` crate for complex binary format handling
|
||||
|
||||
### 4. SIMD and Performance Optimizations
|
||||
|
||||
- Use portable SIMD libraries like `wide` or `packed_simd`
|
||||
- Provide fallback implementations for non-SIMD architectures
|
||||
- Use runtime feature detection when appropriate
|
||||
|
||||
## Security Guidelines
|
||||
|
||||
### 1. Memory Safety
|
||||
|
||||
- Disable `unsafe` code (workspace.lints.rust.unsafe_code = "deny")
|
||||
- Use `rustls` instead of `openssl`
|
||||
|
||||
### 2. Authentication and Authorization
|
||||
|
||||
```rust
|
||||
// Use IAM system for permission checks
|
||||
let identity = iam.authenticate(&access_key, &secret_key).await?;
|
||||
iam.authorize(&identity, &action, &resource).await?;
|
||||
```
|
||||
|
||||
## Configuration Management Guidelines
|
||||
|
||||
### 1. Environment Variables
|
||||
|
||||
- Use `RUSTFS_` prefix
|
||||
- Support both configuration files and environment variables
|
||||
- Provide reasonable default values
|
||||
|
||||
### 2. Configuration Structure
|
||||
|
||||
```rust
|
||||
#[derive(Debug, Deserialize, Clone)]
|
||||
pub struct Config {
|
||||
pub address: String,
|
||||
pub volumes: String,
|
||||
#[serde(default)]
|
||||
pub console_enable: bool,
|
||||
}
|
||||
```
|
||||
|
||||
## Dependency Management Guidelines
|
||||
|
||||
### 1. Workspace Dependencies
|
||||
|
||||
- Manage versions uniformly at workspace level
|
||||
- Use `workspace = true` to inherit configuration
|
||||
|
||||
### 2. Feature Flags
|
||||
|
||||
```rust
|
||||
[features]
|
||||
default = ["file"]
|
||||
gpu = ["dep:nvml-wrapper"]
|
||||
kafka = ["dep:rdkafka"]
|
||||
```
|
||||
|
||||
## Deployment and Operations Guidelines
|
||||
|
||||
### 1. Containerization
|
||||
|
||||
- Provide Dockerfile and docker-compose configuration
|
||||
- Support multi-stage builds to optimize image size
|
||||
|
||||
### 2. Observability
|
||||
|
||||
- Integrate OpenTelemetry for distributed tracing
|
||||
- Support Prometheus metrics collection
|
||||
- Provide Grafana dashboards
|
||||
|
||||
### 3. Health Checks
|
||||
|
||||
```rust
|
||||
// Implement health check endpoint
|
||||
async fn health_check() -> Result<HealthStatus> {
|
||||
// Check component status
|
||||
}
|
||||
```
|
||||
|
||||
## Code Review Checklist
|
||||
|
||||
### 1. **Code Formatting and Quality (MANDATORY)**
|
||||
|
||||
- [ ] **Code is properly formatted** (`cargo fmt --all --check` passes)
|
||||
- [ ] **All clippy warnings are resolved** (`cargo clippy --all-targets --all-features -- -D warnings` passes)
|
||||
- [ ] **Code compiles successfully** (`cargo check --all-targets` passes)
|
||||
- [ ] **Pre-commit hooks are working** and all checks pass
|
||||
- [ ] **No formatting-related changes** mixed with functional changes (separate commits)
|
||||
|
||||
### 2. Functionality
|
||||
|
||||
- [ ] Are all error cases properly handled?
|
||||
- [ ] Is there appropriate logging?
|
||||
- [ ] Is there necessary test coverage?
|
||||
|
||||
### 3. Performance
|
||||
|
||||
- [ ] Are unnecessary memory allocations avoided?
|
||||
- [ ] Are async operations used correctly?
|
||||
- [ ] Are there potential deadlock risks?
|
||||
|
||||
### 4. Security
|
||||
|
||||
- [ ] Are input parameters properly validated?
|
||||
- [ ] Are there appropriate permission checks?
|
||||
- [ ] Is information leakage avoided?
|
||||
|
||||
### 5. Cross-Platform Compatibility
|
||||
|
||||
- [ ] Does the code work on different CPU architectures (x86_64, aarch64)?
|
||||
- [ ] Are platform-specific features properly gated with conditional compilation?
|
||||
- [ ] Is byte order handling correct for binary data?
|
||||
- [ ] Are there appropriate fallback implementations for unsupported platforms?
|
||||
|
||||
### 6. Code Commits and Documentation
|
||||
|
||||
- [ ] Does it comply with [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/)?
|
||||
- [ ] Are commit messages concise and under 72 characters for the title line?
|
||||
- [ ] Commit titles should be concise and in English, avoid Chinese
|
||||
- [ ] Is PR description provided in copyable markdown format for easy copying?
|
||||
|
||||
## Common Patterns and Best Practices
|
||||
|
||||
### 1. Resource Management
|
||||
|
||||
```rust
|
||||
// Use RAII pattern for resource management
|
||||
pub struct ResourceGuard {
|
||||
resource: Resource,
|
||||
}
|
||||
|
||||
impl Drop for ResourceGuard {
|
||||
fn drop(&mut self) {
|
||||
// Clean up resources
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Dependency Injection
|
||||
|
||||
```rust
|
||||
// Use dependency injection pattern
|
||||
pub struct Service {
|
||||
config: Arc<Config>,
|
||||
storage: Arc<dyn StorageAPI>,
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Graceful Shutdown
|
||||
|
||||
```rust
|
||||
// Implement graceful shutdown
|
||||
async fn shutdown_gracefully(shutdown_rx: &mut Receiver<()>) {
|
||||
tokio::select! {
|
||||
_ = shutdown_rx.recv() => {
|
||||
info!("Received shutdown signal");
|
||||
// Perform cleanup operations
|
||||
}
|
||||
_ = tokio::time::sleep(SHUTDOWN_TIMEOUT) => {
|
||||
warn!("Shutdown timeout reached");
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Domain-Specific Guidelines
|
||||
|
||||
### 1. Storage Operations
|
||||
|
||||
- All storage operations must support erasure coding
|
||||
- Implement read/write quorum mechanisms
|
||||
- Support data integrity verification
|
||||
|
||||
### 2. Network Communication
|
||||
|
||||
- Use gRPC for internal service communication
|
||||
- HTTP/HTTPS support for S3-compatible API
|
||||
- Implement connection pooling and retry mechanisms
|
||||
|
||||
### 3. Metadata Management
|
||||
|
||||
- Use FlatBuffers for serialization
|
||||
- Support version control and migration
|
||||
- Implement metadata caching
|
||||
|
||||
## Branch Management and Development Workflow
|
||||
|
||||
### Branch Management
|
||||
|
||||
- **🚨 CRITICAL: NEVER modify code directly on main or master branch - THIS IS ABSOLUTELY FORBIDDEN 🚨**
|
||||
- **⚠️ ANY DIRECT COMMITS TO MASTER/MAIN WILL BE REJECTED AND MUST BE REVERTED IMMEDIATELY ⚠️**
|
||||
- **🔒 ALL CHANGES MUST GO THROUGH PULL REQUESTS - NO DIRECT COMMITS TO MAIN UNDER ANY CIRCUMSTANCES 🔒**
|
||||
- **Always work on feature branches - NO EXCEPTIONS**
|
||||
- Always check the .rules.md file before starting to ensure you understand the project guidelines
|
||||
- **MANDATORY workflow for ALL changes:**
|
||||
1. `git checkout main` (switch to main branch)
|
||||
2. `git pull` (get latest changes)
|
||||
3. `git checkout -b feat/your-feature-name` (create and switch to feature branch)
|
||||
4. Make your changes ONLY on the feature branch
|
||||
5. Test thoroughly before committing
|
||||
6. Commit and push to the feature branch
|
||||
7. **Create a pull request for code review - THIS IS THE ONLY WAY TO MERGE TO MAIN**
|
||||
8. **Wait for PR approval before merging - NEVER merge your own PRs without review**
|
||||
- Use descriptive branch names following the pattern: `feat/feature-name`, `fix/issue-name`, `refactor/component-name`, etc.
|
||||
- **Double-check current branch before ANY commit: `git branch` to ensure you're NOT on main/master**
|
||||
- **Pull Request Requirements:**
|
||||
- All changes must be submitted via PR regardless of size or urgency
|
||||
- PRs must include comprehensive description and testing information
|
||||
- PRs must pass all CI/CD checks before merging
|
||||
- PRs require at least one approval from code reviewers
|
||||
- Even hotfixes and emergency changes must go through PR process
|
||||
- **Enforcement:**
|
||||
- Main branch should be protected with branch protection rules
|
||||
- Direct pushes to main should be blocked by repository settings
|
||||
- Any accidental direct commits to main must be immediately reverted via PR
|
||||
|
||||
### Development Workflow
|
||||
|
||||
## 🎯 **Core Development Principles**
|
||||
|
||||
- **🔴 Every change must be precise - don't modify unless you're confident**
|
||||
- Carefully analyze code logic and ensure complete understanding before making changes
|
||||
- When uncertain, prefer asking users or consulting documentation over blind modifications
|
||||
- Use small iterative steps, modify only necessary parts at a time
|
||||
- Evaluate impact scope before changes to ensure no new issues are introduced
|
||||
|
||||
- **🚀 GitHub PR creation prioritizes gh command usage**
|
||||
- Prefer using `gh pr create` command to create Pull Requests
|
||||
- Avoid having users manually create PRs through web interface
|
||||
- Provide clear and professional PR titles and descriptions
|
||||
- Using `gh` commands ensures better integration and automation
|
||||
|
||||
## 📝 **Code Quality Requirements**
|
||||
|
||||
- Use English for all code comments, documentation, and variable names
|
||||
- Write meaningful and descriptive names for variables, functions, and methods
|
||||
- Avoid meaningless test content like "debug 111" or placeholder values
|
||||
- Before each change, carefully read the existing code to ensure you understand the code structure and implementation, do not break existing logic implementation, do not introduce new issues
|
||||
- Ensure each change provides sufficient test cases to guarantee code correctness
|
||||
- Do not arbitrarily modify numbers and constants in test cases, carefully analyze their meaning to ensure test case correctness
|
||||
- When writing or modifying tests, check existing test cases to ensure they have scientific naming and rigorous logic testing, if not compliant, modify test cases to ensure scientific and rigorous testing
|
||||
- **Before committing any changes, run `cargo clippy --all-targets --all-features -- -D warnings` to ensure all code passes Clippy checks**
|
||||
- After each development completion, first git add . then git commit -m "feat: feature description" or "fix: issue description", ensure compliance with [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/)
|
||||
- **Keep commit messages concise and under 72 characters** for the title line, use body for detailed explanations if needed
|
||||
- After each development completion, first git push to remote repository
|
||||
- After each change completion, summarize the changes, do not create summary files, provide a brief change description, ensure compliance with [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/)
|
||||
- Provide change descriptions needed for PR in the conversation, ensure compliance with [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/)
|
||||
- **Always provide PR descriptions in English** after completing any changes, including:
|
||||
- Clear and concise title following Conventional Commits format
|
||||
- Detailed description of what was changed and why
|
||||
- List of key changes and improvements
|
||||
- Any breaking changes or migration notes if applicable
|
||||
- Testing information and verification steps
|
||||
- **Provide PR descriptions in copyable markdown format** enclosed in code blocks for easy one-click copying
|
||||
|
||||
## 🚫 AI Documentation Generation Restrictions
|
||||
|
||||
### Forbidden Summary Documents
|
||||
|
||||
- **Strictly forbidden to create any form of AI-generated summary documents**
|
||||
- **Do not create documents containing large amounts of emoji, detailed formatting tables and typical AI style**
|
||||
- **Do not generate the following types of documents in the project:**
|
||||
- Benchmark summary documents (BENCHMARK*.md)
|
||||
- Implementation comparison analysis documents (IMPLEMENTATION_COMPARISON*.md)
|
||||
- Performance analysis report documents
|
||||
- Architecture summary documents
|
||||
- Feature comparison documents
|
||||
- Any documents with large amounts of emoji and formatted content
|
||||
- **If documentation is needed, only create when explicitly requested by the user, and maintain a concise and practical style**
|
||||
- **Documentation should focus on actually needed information, avoiding excessive formatting and decorative content**
|
||||
- **Any discovered AI-generated summary documents should be immediately deleted**
|
||||
|
||||
### Allowed Documentation Types
|
||||
|
||||
- README.md (project introduction, keep concise)
|
||||
- Technical documentation (only create when explicitly needed)
|
||||
- User manual (only create when explicitly needed)
|
||||
- API documentation (generated from code)
|
||||
- Changelog (CHANGELOG.md)
|
||||
|
||||
These rules should serve as guiding principles when developing the RustFS project, ensuring code quality, performance, and maintainability.
|
||||
11
.vscode/launch.json
vendored
11
.vscode/launch.json
vendored
@@ -93,8 +93,15 @@
|
||||
"name": "Debug executable target/debug/test",
|
||||
"type": "lldb",
|
||||
"request": "launch",
|
||||
"program": "${workspaceFolder}/target/debug/deps/lifecycle_integration_test-5eb7590b8f3bea55",
|
||||
"args": [],
|
||||
"program": "${workspaceFolder}/target/debug/deps/lifecycle_integration_test-5915cbfcab491b3b",
|
||||
"args": [
|
||||
"--skip",
|
||||
"test_lifecycle_expiry_basic",
|
||||
"--skip",
|
||||
"test_lifecycle_expiry_deletemarker",
|
||||
//"--skip",
|
||||
//"test_lifecycle_transition_basic",
|
||||
],
|
||||
"cwd": "${workspaceFolder}",
|
||||
//"stopAtEntry": false,
|
||||
//"preLaunchTask": "cargo build",
|
||||
|
||||
75
CLA.md
75
CLA.md
@@ -1,39 +1,88 @@
|
||||
RustFS Individual Contributor License Agreement
|
||||
|
||||
Thank you for your interest in contributing documentation and related software code to a project hosted or managed by RustFS. In order to clarify the intellectual property license granted with Contributions from any person or entity, RustFS must have a Contributor License Agreement (“CLA”) on file that has been signed by each Contributor, indicating agreement to the license terms below. This version of the Contributor License Agreement allows an individual to submit Contributions to the applicable project. If you are making a submission on behalf of a legal entity, then you should sign the separate Corporate Contributor License Agreement.
|
||||
Thank you for your interest in contributing documentation and related software code to a project hosted or managed by
|
||||
RustFS. In order to clarify the intellectual property license granted with Contributions from any person or entity,
|
||||
RustFS must have a Contributor License Agreement ("CLA") on file that has been signed by each Contributor, indicating
|
||||
agreement to the license terms below. This version of the Contributor License Agreement allows an individual to submit
|
||||
Contributions to the applicable project. If you are making a submission on behalf of a legal entity, then you should
|
||||
sign the separate Corporate Contributor License Agreement.
|
||||
|
||||
You accept and agree to the following terms and conditions for Your present and future Contributions submitted to RustFS. You hereby irrevocably assign and transfer to RustFS all right, title, and interest in and to Your Contributions, including all copyrights and other intellectual property rights therein.
|
||||
You accept and agree to the following terms and conditions for Your present and future Contributions submitted to
|
||||
RustFS. You hereby irrevocably assign and transfer to RustFS all right, title, and interest in and to Your
|
||||
Contributions, including all copyrights and other intellectual property rights therein.
|
||||
|
||||
Definitions
|
||||
|
||||
“You” (or “Your”) shall mean the copyright owner or legal entity authorized by the copyright owner that is making this Agreement with RustFS. For legal entities, the entity making a Contribution and all other entities that control, are controlled by, or are under common control with that entity are considered to be a single Contributor. For the purposes of this definition, “control” means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
“You” (or “Your”) shall mean the copyright owner or legal entity authorized by the copyright owner that is making this
|
||||
Agreement with RustFS. For legal entities, the entity making a Contribution and all other entities that control, are
|
||||
controlled by, or are under common control with that entity are considered to be a single Contributor. For the purposes
|
||||
of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such
|
||||
entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares,
|
||||
or (iii) beneficial ownership of such entity.
|
||||
|
||||
“Contribution” shall mean any original work of authorship, including any modifications or additions to an existing work, that is intentionally submitted by You to RustFS for inclusion in, or documentation of, any of the products or projects owned or managed by RustFS (the “Work”), including without limitation any Work described in Schedule A. For the purposes of this definition, “submitted” means any form of electronic or written communication sent to RustFS or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, RustFS for the purpose of discussing and improving the Work.
|
||||
“Contribution” shall mean any original work of authorship, including any modifications or additions to an existing work,
|
||||
that is intentionally submitted by You to RustFS for inclusion in, or documentation of, any of the products or projects
|
||||
owned or managed by RustFS (the "Work"), including without limitation any Work described in Schedule A. For the purposes
|
||||
of this definition, "submitted" means any form of electronic or written communication sent to RustFS or its
|
||||
representatives, including but not limited to communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, RustFS for the purpose of discussing and improving the
|
||||
Work.
|
||||
|
||||
Assignment of Copyright
|
||||
|
||||
Subject to the terms and conditions of this Agreement, You hereby irrevocably assign and transfer to RustFS all right, title, and interest in and to Your Contributions, including all copyrights and other intellectual property rights therein, for the entire term of such rights, including all renewals and extensions. You agree to execute all documents and take all actions as may be reasonably necessary to vest in RustFS the ownership of Your Contributions and to assist RustFS in perfecting, maintaining, and enforcing its rights in Your Contributions.
|
||||
Subject to the terms and conditions of this Agreement, You hereby irrevocably assign and transfer to RustFS all right,
|
||||
title, and interest in and to Your Contributions, including all copyrights and other intellectual property rights
|
||||
therein, for the entire term of such rights, including all renewals and extensions. You agree to execute all documents
|
||||
and take all actions as may be reasonably necessary to vest in RustFS the ownership of Your Contributions and to assist
|
||||
RustFS in perfecting, maintaining, and enforcing its rights in Your Contributions.
|
||||
|
||||
Grant of Patent License
|
||||
|
||||
Subject to the terms and conditions of this Agreement, You hereby grant to RustFS and to recipients of documentation and software distributed by RustFS a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by You that are necessarily infringed by Your Contribution(s) alone or by combination of Your Contribution(s) with the Work to which such Contribution(s) was submitted. If any entity institutes patent litigation against You or any other entity (including a cross-claim or counterclaim in a lawsuit) alleging that your Contribution, or the Work to which you have contributed, constitutes direct or contributory patent infringement, then any patent licenses granted to that entity under this Agreement for that Contribution or Work shall terminate as of the date such litigation is filed.
|
||||
Subject to the terms and conditions of this Agreement, You hereby grant to RustFS and to recipients of documentation and
|
||||
software distributed by RustFS a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as
|
||||
stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the
|
||||
Work, where such license applies only to those patent claims licensable by You that are necessarily infringed by Your
|
||||
Contribution(s) alone or by combination of Your Contribution(s) with the Work to which such Contribution(s) was
|
||||
submitted. If any entity institutes patent litigation against You or any other entity (including a cross-claim or
|
||||
counterclaim in a lawsuit) alleging that your Contribution, or the Work to which you have contributed, constitutes
|
||||
direct or contributory patent infringement, then any patent licenses granted to that entity under this Agreement for
|
||||
that Contribution or Work shall terminate as of the date such litigation is filed.
|
||||
|
||||
You represent that you are legally entitled to grant the above assignment and license.
|
||||
|
||||
You represent that each of Your Contributions is Your original creation (see section 7 for submissions on behalf of others). You represent that Your Contribution submissions include complete details of any third-party license or other restriction (including, but not limited to, related patents and trademarks) of which you are personally aware and which are associated with any part of Your Contributions.
|
||||
You represent that each of Your Contributions is Your original creation (see section 7 for submissions on behalf of
|
||||
others). You represent that Your Contribution submissions include complete details of any third-party license or other
|
||||
restriction (including, but not limited to, related patents and trademarks) of which you are personally aware and which
|
||||
are associated with any part of Your Contributions.
|
||||
|
||||
You are not expected to provide support for Your Contributions, except to the extent You desire to provide support. You may provide support for free, for a fee, or not at all. Unless required by applicable law or agreed to in writing, You provide Your Contributions on an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON- INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
You are not expected to provide support for Your Contributions, except to the extent You desire to provide support. You
|
||||
may provide support for free, for a fee, or not at all. Unless required by applicable law or agreed to in writing, You
|
||||
provide Your Contributions on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
|
||||
including, without limitation, any warranties or conditions of TITLE, NON- INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR
|
||||
A PARTICULAR PURPOSE.
|
||||
|
||||
Should You wish to submit work that is not Your original creation, You may submit it to RustFS separately from any Contribution, identifying the complete details of its source and of any license or other restriction (including, but not limited to, related patents, trademarks, and license agreements) of which you are personally aware, and conspicuously marking the work as “Submitted on behalf of a third-party: [named here]”.
|
||||
Should You wish to submit work that is not Your original creation, You may submit it to RustFS separately from any
|
||||
Contribution, identifying the complete details of its source and of any license or other restriction (including, but not
|
||||
limited to, related patents, trademarks, and license agreements) of which you are personally aware, and conspicuously
|
||||
marking the work as "Submitted on behalf of a third-party: [named here]”.
|
||||
|
||||
You agree to notify RustFS of any facts or circumstances of which you become aware that would make these representations inaccurate in any respect.
|
||||
You agree to notify RustFS of any facts or circumstances of which you become aware that would make these representations
|
||||
inaccurate in any respect.
|
||||
|
||||
Modification of CLA
|
||||
|
||||
RustFS reserves the right to update or modify this CLA in the future. Any updates or modifications to this CLA shall apply only to Contributions made after the effective date of the revised CLA. Contributions made prior to the update shall remain governed by the version of the CLA that was in effect at the time of submission. It is not necessary for all Contributors to re-sign the CLA when the CLA is updated or modified.
|
||||
RustFS reserves the right to update or modify this CLA in the future. Any updates or modifications to this CLA shall
|
||||
apply only to Contributions made after the effective date of the revised CLA. Contributions made prior to the update
|
||||
shall remain governed by the version of the CLA that was in effect at the time of submission. It is not necessary for
|
||||
all Contributors to re-sign the CLA when the CLA is updated or modified.
|
||||
|
||||
Governing Law and Dispute Resolution
|
||||
|
||||
This Agreement will be governed by and construed in accordance with the laws of the People’s Republic of China excluding that body of laws known as conflict of laws. The parties expressly agree that the United Nations Convention on Contracts for the International Sale of Goods will not apply. Any legal action or proceeding arising under this Agreement will be brought exclusively in the courts located in Beijing, China, and the parties hereby irrevocably consent to the personal jurisdiction and venue therein.
|
||||
This Agreement will be governed by and construed in accordance with the laws of the People's Republic of China excluding
|
||||
that body of laws known as conflict of laws. The parties expressly agree that the United Nations Convention on Contracts
|
||||
for the International Sale of Goods will not apply. Any legal action or proceeding arising under this Agreement will be
|
||||
brought exclusively in the courts located in Beijing, China, and the parties hereby irrevocably consent to the personal
|
||||
jurisdiction and venue therein.
|
||||
|
||||
For your reading convenience, this Agreement is written in parallel English and Chinese sections. To the extent there is a conflict between the English and Chinese sections, the English sections shall govern.
|
||||
For your reading convenience, this Agreement is written in parallel English and Chinese sections. To the extent there is
|
||||
a conflict between the English and Chinese sections, the English sections shall govern.
|
||||
84
CLAUDE.md
84
CLAUDE.md
@@ -4,23 +4,28 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co
|
||||
|
||||
## Project Overview
|
||||
|
||||
RustFS is a high-performance distributed object storage software built with Rust, providing S3-compatible APIs and advanced features like data lakes, AI, and big data support. It's designed as an alternative to MinIO with better performance and a more business-friendly Apache 2.0 license.
|
||||
RustFS is a high-performance distributed object storage software built with Rust, providing S3-compatible APIs and
|
||||
advanced features like data lakes, AI, and big data support. It's designed as an alternative to MinIO with better
|
||||
performance and a more business-friendly Apache 2.0 license.
|
||||
|
||||
## Build Commands
|
||||
|
||||
### Primary Build Commands
|
||||
|
||||
- `cargo build --release` - Build the main RustFS binary
|
||||
- `./build-rustfs.sh` - Recommended build script that handles console resources and cross-platform compilation
|
||||
- `./build-rustfs.sh --dev` - Development build with debug symbols
|
||||
- `make build` or `just build` - Use Make/Just for standardized builds
|
||||
|
||||
### Platform-Specific Builds
|
||||
|
||||
- `./build-rustfs.sh --platform x86_64-unknown-linux-musl` - Build for musl target
|
||||
- `./build-rustfs.sh --platform aarch64-unknown-linux-gnu` - Build for ARM64
|
||||
- `make build-musl` or `just build-musl` - Build musl variant
|
||||
- `make build-cross-all` - Build all supported architectures
|
||||
|
||||
### Testing Commands
|
||||
|
||||
- `cargo test --workspace --exclude e2e_test` - Run unit tests (excluding e2e tests)
|
||||
- `cargo nextest run --all --exclude e2e_test` - Use nextest if available (faster)
|
||||
- `cargo test --all --doc` - Run documentation tests
|
||||
@@ -28,22 +33,30 @@ RustFS is a high-performance distributed object storage software built with Rust
|
||||
- `make pre-commit` - Run all quality checks (fmt, clippy, check, test)
|
||||
|
||||
### End-to-End Testing
|
||||
|
||||
- `cargo test --package e2e_test` - Run all e2e tests
|
||||
- `./scripts/run_e2e_tests.sh` - Run e2e tests via script
|
||||
- `./scripts/run_scanner_benchmarks.sh` - Run scanner performance benchmarks
|
||||
|
||||
### KMS-Specific Testing (with proxy bypass)
|
||||
- `NO_PROXY=127.0.0.1,localhost HTTP_PROXY= HTTPS_PROXY= http_proxy= https_proxy= cargo test --package e2e_test test_local_kms_end_to_end -- --nocapture --test-threads=1` - Run complete KMS end-to-end test
|
||||
- `NO_PROXY=127.0.0.1,localhost HTTP_PROXY= HTTPS_PROXY= http_proxy= https_proxy= cargo test --package e2e_test kms:: -- --nocapture --test-threads=1` - Run all KMS tests
|
||||
|
||||
-
|
||||
`NO_PROXY=127.0.0.1,localhost HTTP_PROXY= HTTPS_PROXY= http_proxy= https_proxy= cargo test --package e2e_test test_local_kms_end_to_end -- --nocapture --test-threads=1` -
|
||||
Run complete KMS end-to-end test
|
||||
-
|
||||
`NO_PROXY=127.0.0.1,localhost HTTP_PROXY= HTTPS_PROXY= http_proxy= https_proxy= cargo test --package e2e_test kms:: -- --nocapture --test-threads=1` -
|
||||
Run all KMS tests
|
||||
- `cargo test --package e2e_test test_local_kms_key_isolation -- --nocapture --test-threads=1` - Test KMS key isolation
|
||||
- `cargo test --package e2e_test test_local_kms_large_file -- --nocapture --test-threads=1` - Test KMS with large files
|
||||
|
||||
### Code Quality
|
||||
|
||||
- `cargo fmt --all` - Format code
|
||||
- `cargo clippy --all-targets --all-features -- -D warnings` - Lint code
|
||||
- `make pre-commit` or `just pre-commit` - Run all quality checks (fmt, clippy, check, test)
|
||||
|
||||
### Quick Development Commands
|
||||
|
||||
- `make help` or `just help` - Show all available commands with descriptions
|
||||
- `make help-build` - Show detailed build options and cross-compilation help
|
||||
- `make help-docker` - Show comprehensive Docker build and deployment options
|
||||
@@ -52,6 +65,7 @@ RustFS is a high-performance distributed object storage software built with Rust
|
||||
- `./scripts/probe.sh` - Health check and connectivity testing
|
||||
|
||||
### Docker Build Commands
|
||||
|
||||
- `make docker-buildx` - Build multi-architecture production images
|
||||
- `make docker-dev-local` - Build development image for local use
|
||||
- `./docker-buildx.sh --push` - Build and push production images
|
||||
@@ -61,6 +75,7 @@ RustFS is a high-performance distributed object storage software built with Rust
|
||||
### Core Components
|
||||
|
||||
**Main Binary (`rustfs/`):**
|
||||
|
||||
- Entry point at `rustfs/src/main.rs`
|
||||
- Core modules: admin, auth, config, server, storage, license management, profiling
|
||||
- HTTP server with S3-compatible APIs
|
||||
@@ -68,10 +83,11 @@ RustFS is a high-performance distributed object storage software built with Rust
|
||||
- Parallel service initialization with DNS resolver, bucket metadata, and IAM
|
||||
|
||||
**Key Crates (`crates/`):**
|
||||
|
||||
- `ecstore` - Erasure coding storage implementation (core storage layer)
|
||||
- `iam` - Identity and Access Management
|
||||
- `kms` - Key Management Service for encryption and key handling
|
||||
- `madmin` - Management dashboard and admin API interface
|
||||
- `madmin` - Management dashboard and admin API interface
|
||||
- `s3select-api` & `s3select-query` - S3 Select API and query engine
|
||||
- `config` - Configuration management with notify features
|
||||
- `crypto` - Cryptography and security features
|
||||
@@ -94,6 +110,7 @@ RustFS is a high-performance distributed object storage software built with Rust
|
||||
- `targets` - Target-specific configurations and utilities
|
||||
|
||||
### Build System
|
||||
|
||||
- Cargo workspace with 25+ crates (including new KMS functionality)
|
||||
- Custom `build-rustfs.sh` script for advanced build options
|
||||
- Multi-architecture Docker builds via `docker-buildx.sh`
|
||||
@@ -103,10 +120,11 @@ RustFS is a high-performance distributed object storage software built with Rust
|
||||
- Performance benchmarking and audit workflows
|
||||
|
||||
### Key Dependencies
|
||||
|
||||
- `axum` - HTTP framework for S3 API server
|
||||
- `tokio` - Async runtime
|
||||
- `s3s` - S3 protocol implementation library
|
||||
- `datafusion` - For S3 Select query processing
|
||||
- `datafusion` - For S3 Select query processing
|
||||
- `hyper`/`hyper-util` - HTTP client/server utilities
|
||||
- `rustls` - TLS implementation
|
||||
- `serde`/`serde_json` - Serialization
|
||||
@@ -115,6 +133,7 @@ RustFS is a high-performance distributed object storage software built with Rust
|
||||
- `tikv-jemallocator` - Memory allocator for Linux GNU builds
|
||||
|
||||
### Development Workflow
|
||||
|
||||
- Console resources are embedded during build via `rust-embed`
|
||||
- Protocol buffers generated via custom `gproto` binary
|
||||
- E2E tests in separate crate (`e2e_test`) with comprehensive KMS testing
|
||||
@@ -124,14 +143,16 @@ RustFS is a high-performance distributed object storage software built with Rust
|
||||
- Git hooks setup available via `make setup-hooks` or `just setup-hooks`
|
||||
|
||||
### Performance & Observability
|
||||
|
||||
- Performance profiling available with `pprof` integration (disabled on Windows)
|
||||
- Profiling enabled via environment variables in production
|
||||
- Built-in observability with OpenTelemetry integration
|
||||
- Background services (scanner, heal) can be controlled via environment variables:
|
||||
- `RUSTFS_ENABLE_SCANNER` (default: true)
|
||||
- `RUSTFS_ENABLE_HEAL` (default: true)
|
||||
- `RUSTFS_ENABLE_SCANNER` (default: true)
|
||||
- `RUSTFS_ENABLE_HEAL` (default: true)
|
||||
|
||||
### Service Architecture
|
||||
|
||||
- Service state management with graceful shutdown handling
|
||||
- Parallel initialization of core systems (DNS, bucket metadata, IAM)
|
||||
- Event notification system with MQTT and webhook support
|
||||
@@ -139,6 +160,7 @@ RustFS is a high-performance distributed object storage software built with Rust
|
||||
- Jemalloc allocator for Linux GNU targets for better performance
|
||||
|
||||
## Environment Variables
|
||||
|
||||
- `RUSTFS_ENABLE_SCANNER` - Enable/disable background data scanner (default: true)
|
||||
- `RUSTFS_ENABLE_HEAL` - Enable/disable auto-heal functionality (default: true)
|
||||
- Various profiling and observability controls
|
||||
@@ -146,12 +168,14 @@ RustFS is a high-performance distributed object storage software built with Rust
|
||||
- Test environment configurations in `scripts/dev_rustfs.env`
|
||||
|
||||
### KMS Environment Variables
|
||||
|
||||
- `NO_PROXY=127.0.0.1,localhost` - Required for KMS E2E tests to bypass proxy
|
||||
- `HTTP_PROXY=` `HTTPS_PROXY=` `http_proxy=` `https_proxy=` - Clear proxy settings for local KMS testing
|
||||
|
||||
## KMS (Key Management Service) Architecture
|
||||
|
||||
### KMS Implementation Status
|
||||
|
||||
- **Full KMS Integration:** Complete implementation with Local and Vault backends
|
||||
- **Automatic Configuration:** KMS auto-configures on startup with `--kms-enable` flag
|
||||
- **Encryption Support:** Full S3-compatible server-side encryption (SSE-S3, SSE-KMS, SSE-C)
|
||||
@@ -159,18 +183,21 @@ RustFS is a high-performance distributed object storage software built with Rust
|
||||
- **Production Ready:** Comprehensive testing including large files and key isolation
|
||||
|
||||
### KMS Configuration
|
||||
|
||||
- **Local Backend:** `--kms-backend local --kms-key-dir <path> --kms-default-key-id <id>`
|
||||
- **Vault Backend:** `--kms-backend vault --kms-vault-endpoint <url> --kms-vault-key-name <name>`
|
||||
- **Auto-startup:** KMS automatically initializes when `--kms-enable` is provided
|
||||
- **Manual Configuration:** Also supports dynamic configuration via admin API
|
||||
|
||||
### S3 Encryption Support
|
||||
|
||||
- **SSE-S3:** Server-side encryption with S3-managed keys (`ServerSideEncryption: AES256`)
|
||||
- **SSE-KMS:** Server-side encryption with KMS-managed keys (`ServerSideEncryption: aws:kms`)
|
||||
- **SSE-C:** Server-side encryption with customer-provided keys
|
||||
- **Response Headers:** All encryption types return correct `server_side_encryption` headers in PUT/GET responses
|
||||
|
||||
### KMS Testing Architecture
|
||||
|
||||
- **Comprehensive E2E Tests:** Located in `crates/e2e_test/src/kms/`
|
||||
- **Test Environments:** Automated test environment setup with temporary directories
|
||||
- **Encryption Coverage:** Tests all three encryption types (SSE-S3, SSE-KMS, SSE-C)
|
||||
@@ -178,6 +205,7 @@ RustFS is a high-performance distributed object storage software built with Rust
|
||||
- **Edge Cases:** Key isolation, large file handling, error scenarios
|
||||
|
||||
### Key Files for KMS
|
||||
|
||||
- `crates/kms/` - Core KMS implementation with Local/Vault backends
|
||||
- `rustfs/src/main.rs` - KMS auto-initialization in `init_kms_system()`
|
||||
- `rustfs/src/storage/ecfs.rs` - SSE encryption/decryption in PUT/GET operations
|
||||
@@ -186,54 +214,62 @@ RustFS is a high-performance distributed object storage software built with Rust
|
||||
- `crates/rio/src/encrypt_reader.rs` - Streaming encryption for large files
|
||||
|
||||
## Code Style and Safety Requirements
|
||||
|
||||
- **Language Requirements:**
|
||||
- Communicate with me in Chinese, but **only English can be used in code files**
|
||||
- Code comments, function names, variable names, and all text in source files must be in English only
|
||||
- No Chinese characters, emojis, or non-ASCII characters are allowed in any source code files
|
||||
- This includes comments, strings, documentation, and any other text within code files
|
||||
- Communicate with me in Chinese, but **only English can be used in code files**
|
||||
- Code comments, function names, variable names, and all text in source files must be in English only
|
||||
- No Chinese characters, emojis, or non-ASCII characters are allowed in any source code files
|
||||
- This includes comments, strings, documentation, and any other text within code files
|
||||
- **Safety-Critical Rules:**
|
||||
- `unsafe_code = "deny"` enforced at workspace level
|
||||
- Never use `unwrap()`, `expect()`, or panic-inducing code except in tests
|
||||
- Avoid blocking I/O operations in async contexts
|
||||
- Use proper error handling with `Result<T, E>` and `Option<T>`
|
||||
- Follow Rust's ownership and borrowing rules strictly
|
||||
- `unsafe_code = "deny"` enforced at workspace level
|
||||
- Never use `unwrap()`, `expect()`, or panic-inducing code except in tests
|
||||
- Avoid blocking I/O operations in async contexts
|
||||
- Use proper error handling with `Result<T, E>` and `Option<T>`
|
||||
- Follow Rust's ownership and borrowing rules strictly
|
||||
- **Performance Guidelines:**
|
||||
- Use `cargo clippy --all-targets --all-features -- -D warnings` to catch issues
|
||||
- Prefer `anyhow` for error handling in applications, `thiserror` for libraries
|
||||
- Use appropriate async runtimes and avoid blocking calls
|
||||
- Use `cargo clippy --all-targets --all-features -- -D warnings` to catch issues
|
||||
- Prefer `anyhow` for error handling in applications, `thiserror` for libraries
|
||||
- Use appropriate async runtimes and avoid blocking calls
|
||||
- **Testing Standards:**
|
||||
- All new features must include comprehensive tests
|
||||
- Use `#[cfg(test)]` for test-only code that may use panic macros
|
||||
- E2E tests should cover KMS integration scenarios
|
||||
- All new features must include comprehensive tests
|
||||
- Use `#[cfg(test)]` for test-only code that may use panic macros
|
||||
- E2E tests should cover KMS integration scenarios
|
||||
|
||||
## Common Development Tasks
|
||||
|
||||
### Running KMS Tests Locally
|
||||
|
||||
1. **Clear proxy settings:** KMS tests require direct localhost connections
|
||||
2. **Use serial execution:** `--test-threads=1` prevents port conflicts
|
||||
3. **Enable output:** `--nocapture` shows detailed test logs
|
||||
4. **Full command:** `NO_PROXY=127.0.0.1,localhost HTTP_PROXY= HTTPS_PROXY= http_proxy= https_proxy= cargo test --package e2e_test test_local_kms_end_to_end -- --nocapture --test-threads=1`
|
||||
4. **Full command:**
|
||||
`NO_PROXY=127.0.0.1,localhost HTTP_PROXY= HTTPS_PROXY= http_proxy= https_proxy= cargo test --package e2e_test test_local_kms_end_to_end -- --nocapture --test-threads=1`
|
||||
|
||||
### KMS Development Workflow
|
||||
|
||||
1. **Code changes:** Modify KMS-related code in `crates/kms/` or `rustfs/src/`
|
||||
2. **Compile:** Always run `cargo build` after changes
|
||||
3. **Test specific functionality:** Use targeted test commands for faster iteration
|
||||
4. **Full validation:** Run complete end-to-end tests before commits
|
||||
|
||||
### Debugging KMS Issues
|
||||
|
||||
- **Server startup:** Check that KMS auto-initializes with debug logs
|
||||
- **Encryption failures:** Verify SSE headers are correctly set in both PUT and GET responses
|
||||
- **Test failures:** Use `--nocapture` to see detailed error messages
|
||||
- **Key management:** Test admin API endpoints with proper authentication
|
||||
|
||||
## Important Reminders
|
||||
|
||||
- **Always compile after code changes:** Use `cargo build` to catch errors early
|
||||
- **Don't bypass tests:** All functionality must be properly tested, not worked around
|
||||
- **Use proper error handling:** Never use `unwrap()` or `expect()` in production code (except tests)
|
||||
- **Follow S3 compatibility:** Ensure all encryption types return correct HTTP response headers
|
||||
|
||||
# important-instruction-reminders
|
||||
|
||||
Do what has been asked; nothing more, nothing less.
|
||||
NEVER create files unless they're absolutely necessary for achieving your goal.
|
||||
ALWAYS prefer editing an existing file to creating a new one.
|
||||
NEVER proactively create documentation files (*.md) or README files. Only create documentation files if explicitly requested by the User.
|
||||
NEVER proactively create documentation files (*.md) or README files. Only create documentation files if explicitly
|
||||
requested by the User.
|
||||
1611
Cargo.lock
generated
1611
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
274
Cargo.toml
274
Cargo.toml
@@ -63,104 +63,146 @@ unsafe_code = "deny"
|
||||
all = "warn"
|
||||
|
||||
[workspace.dependencies]
|
||||
# RustFS Internal Crates
|
||||
rustfs = { path = "./rustfs", version = "0.0.5" }
|
||||
rustfs-ahm = { path = "crates/ahm", version = "0.0.5" }
|
||||
rustfs-s3select-api = { path = "crates/s3select-api", version = "0.0.5" }
|
||||
rustfs-appauth = { path = "crates/appauth", version = "0.0.5" }
|
||||
rustfs-audit = { path = "crates/audit", version = "0.0.5" }
|
||||
rustfs-checksums = { path = "crates/checksums", version = "0.0.5" }
|
||||
rustfs-common = { path = "crates/common", version = "0.0.5" }
|
||||
rustfs-config = { path = "./crates/config", version = "0.0.5" }
|
||||
rustfs-crypto = { path = "crates/crypto", version = "0.0.5" }
|
||||
rustfs-ecstore = { path = "crates/ecstore", version = "0.0.5" }
|
||||
rustfs-filemeta = { path = "crates/filemeta", version = "0.0.5" }
|
||||
rustfs-iam = { path = "crates/iam", version = "0.0.5" }
|
||||
rustfs-kms = { path = "crates/kms", version = "0.0.5" }
|
||||
rustfs-lock = { path = "crates/lock", version = "0.0.5" }
|
||||
rustfs-madmin = { path = "crates/madmin", version = "0.0.5" }
|
||||
rustfs-mcp = { path = "crates/mcp", version = "0.0.5" }
|
||||
rustfs-notify = { path = "crates/notify", version = "0.0.5" }
|
||||
rustfs-obs = { path = "crates/obs", version = "0.0.5" }
|
||||
rustfs-policy = { path = "crates/policy", version = "0.0.5" }
|
||||
rustfs-protos = { path = "crates/protos", version = "0.0.5" }
|
||||
rustfs-s3select-query = { path = "crates/s3select-query", version = "0.0.5" }
|
||||
rustfs = { path = "./rustfs", version = "0.0.5" }
|
||||
rustfs-zip = { path = "./crates/zip", version = "0.0.5" }
|
||||
rustfs-config = { path = "./crates/config", version = "0.0.5" }
|
||||
rustfs-obs = { path = "crates/obs", version = "0.0.5" }
|
||||
rustfs-notify = { path = "crates/notify", version = "0.0.5" }
|
||||
rustfs-utils = { path = "crates/utils", version = "0.0.5" }
|
||||
rustfs-rio = { path = "crates/rio", version = "0.0.5" }
|
||||
rustfs-filemeta = { path = "crates/filemeta", version = "0.0.5" }
|
||||
rustfs-s3select-api = { path = "crates/s3select-api", version = "0.0.5" }
|
||||
rustfs-s3select-query = { path = "crates/s3select-query", version = "0.0.5" }
|
||||
rustfs-signer = { path = "crates/signer", version = "0.0.5" }
|
||||
rustfs-checksums = { path = "crates/checksums", version = "0.0.5" }
|
||||
rustfs-workers = { path = "crates/workers", version = "0.0.5" }
|
||||
rustfs-mcp = { path = "crates/mcp", version = "0.0.5" }
|
||||
rustfs-targets = { path = "crates/targets", version = "0.0.5" }
|
||||
rustfs-kms = { path = "crates/kms", version = "0.0.5" }
|
||||
aes-gcm = { version = "0.10.3", features = ["std"] }
|
||||
anyhow = "1.0.100"
|
||||
arc-swap = "1.7.1"
|
||||
argon2 = { version = "0.5.3", features = ["std"] }
|
||||
atoi = "2.0.0"
|
||||
rustfs-utils = { path = "crates/utils", version = "0.0.5" }
|
||||
rustfs-workers = { path = "crates/workers", version = "0.0.5" }
|
||||
rustfs-zip = { path = "./crates/zip", version = "0.0.5" }
|
||||
|
||||
# Async Runtime and Networking
|
||||
async-channel = "2.5.0"
|
||||
async-compression = { version = "0.4.19" }
|
||||
async-recursion = "1.1.1"
|
||||
async-trait = "0.1.89"
|
||||
async-compression = { version = "0.4.19" }
|
||||
atomic_enum = "0.3.0"
|
||||
aws-config = { version = "1.8.8" }
|
||||
aws-credential-types = { version = "1.2.8" }
|
||||
aws-smithy-types = { version = "1.3.3" }
|
||||
aws-sdk-s3 = { version = "1.108.0", default-features = false, features = ["sigv4a", "rustls", "rt-tokio"] }
|
||||
axum = "0.8.6"
|
||||
axum-extra = "0.10.3"
|
||||
axum-extra = "0.12.0"
|
||||
axum-server = { version = "0.7.2", features = ["tls-rustls-no-provider"], default-features = false }
|
||||
base64-simd = "0.8.0"
|
||||
base64 = "0.22.1"
|
||||
brotli = "8.0.2"
|
||||
bytes = { version = "1.10.1", features = ["serde"] }
|
||||
bytesize = "2.1.0"
|
||||
byteorder = "1.5.0"
|
||||
cfg-if = "1.0.4"
|
||||
convert_case = "0.8.0"
|
||||
crc-fast = "1.3.0"
|
||||
chacha20poly1305 = { version = "0.10.1" }
|
||||
chrono = { version = "0.4.42", features = ["serde"] }
|
||||
clap = { version = "4.5.49", features = ["derive", "env"] }
|
||||
const-str = { version = "0.7.0", features = ["std", "proc"] }
|
||||
crc32fast = "1.5.0"
|
||||
crc32c = "0.6.8"
|
||||
crc64fast-nvme = "1.2.0"
|
||||
criterion = { version = "0.7", features = ["html_reports"] }
|
||||
crossbeam-queue = "0.3.12"
|
||||
datafusion = "50.2.0"
|
||||
derive_builder = "0.20.2"
|
||||
enumset = "1.1.10"
|
||||
flatbuffers = "25.9.23"
|
||||
flate2 = "1.1.4"
|
||||
flexi_logger = { version = "0.31.7", features = ["trc", "dont_minimize_extra_stacks", "compress", "kv"] }
|
||||
form_urlencoded = "1.2.2"
|
||||
futures = "0.3.31"
|
||||
futures-core = "0.3.31"
|
||||
futures-util = "0.3.31"
|
||||
hyper = { version = "1.7.0", features = ["http2", "http1", "server"] }
|
||||
hyper-rustls = { version = "0.27.7", default-features = false, features = ["native-tokio", "http1", "tls12", "logging", "http2", "ring", "webpki-roots"] }
|
||||
hyper-util = { version = "0.1.17", features = ["tokio", "server-auto", "server-graceful"] }
|
||||
http = "1.3.1"
|
||||
http-body = "1.0.1"
|
||||
reqwest = { version = "0.12.24", default-features = false, features = ["rustls-tls-webpki-roots", "charset", "http2", "system-proxy", "stream", "json", "blocking"] }
|
||||
socket2 = "0.6.1"
|
||||
tokio = { version = "1.48.0", features = ["fs", "rt-multi-thread"] }
|
||||
tokio-rustls = { version = "0.26.4", default-features = false, features = ["logging", "tls12", "ring"] }
|
||||
tokio-stream = { version = "0.1.17" }
|
||||
tokio-test = "0.4.4"
|
||||
tokio-util = { version = "0.7.17", features = ["io", "compat"] }
|
||||
tonic = { version = "0.14.2", features = ["gzip"] }
|
||||
tonic-prost = { version = "0.14.2" }
|
||||
tonic-prost-build = { version = "0.14.2" }
|
||||
tower = { version = "0.5.2", features = ["timeout"] }
|
||||
tower-http = { version = "0.6.6", features = ["cors"] }
|
||||
|
||||
# Serialization and Data Formats
|
||||
bytes = { version = "1.10.1", features = ["serde"] }
|
||||
bytesize = "2.1.0"
|
||||
byteorder = "1.5.0"
|
||||
flatbuffers = "25.9.23"
|
||||
form_urlencoded = "1.2.2"
|
||||
prost = "0.14.1"
|
||||
quick-xml = "0.38.3"
|
||||
rmcp = { version = "0.8.3" }
|
||||
rmp = { version = "0.8.14" }
|
||||
rmp-serde = { version = "1.3.0" }
|
||||
serde = { version = "1.0.228", features = ["derive"] }
|
||||
serde_json = { version = "1.0.145", features = ["raw_value"] }
|
||||
serde_urlencoded = "0.7.1"
|
||||
schemars = "1.0.4"
|
||||
|
||||
# Cryptography and Security
|
||||
aes-gcm = { version = "0.10.3", features = ["std"] }
|
||||
argon2 = { version = "0.5.3", features = ["std"] }
|
||||
blake3 = { version = "1.8.2" }
|
||||
chacha20poly1305 = { version = "0.10.1" }
|
||||
crc-fast = "1.3.0"
|
||||
crc32c = "0.6.8"
|
||||
crc32fast = "1.5.0"
|
||||
crc64fast-nvme = "1.2.0"
|
||||
hmac = "0.12.1"
|
||||
jsonwebtoken = { version = "10.1.0", features = ["rust_crypto"] }
|
||||
pbkdf2 = "0.12.2"
|
||||
rsa = { version = "0.9.8" }
|
||||
rustls = { version = "0.23.34", features = ["ring", "logging", "std", "tls12"], default-features = false }
|
||||
rustls-pemfile = "2.2.0"
|
||||
rustls-pki-types = "1.13.0"
|
||||
sha1 = "0.10.6"
|
||||
sha2 = "0.10.9"
|
||||
zeroize = { version = "1.8.2", features = ["derive"] }
|
||||
|
||||
# Time and Date
|
||||
chrono = { version = "0.4.42", features = ["serde"] }
|
||||
humantime = "2.3.0"
|
||||
time = { version = "0.3.44", features = ["std", "parsing", "formatting", "macros", "serde"] }
|
||||
|
||||
# Utilities and Tools
|
||||
anyhow = "1.0.100"
|
||||
arc-swap = "1.7.1"
|
||||
astral-tokio-tar = "0.5.6"
|
||||
atoi = "2.0.0"
|
||||
atomic_enum = "0.3.0"
|
||||
aws-config = { version = "1.8.10" }
|
||||
aws-credential-types = { version = "1.2.8" }
|
||||
aws-sdk-s3 = { version = "1.110.0", default-features = false, features = ["sigv4a", "rustls", "rt-tokio"] }
|
||||
aws-smithy-types = { version = "1.3.4" }
|
||||
base64 = "0.22.1"
|
||||
base64-simd = "0.8.0"
|
||||
brotli = "8.0.2"
|
||||
cfg-if = "1.0.4"
|
||||
clap = { version = "4.5.51", features = ["derive", "env"] }
|
||||
const-str = { version = "0.7.0", features = ["std", "proc"] }
|
||||
convert_case = "0.8.0"
|
||||
criterion = { version = "0.7", features = ["html_reports"] }
|
||||
crossbeam-queue = "0.3.12"
|
||||
datafusion = "50.3.0"
|
||||
derive_builder = "0.20.2"
|
||||
enumset = "1.1.10"
|
||||
flate2 = "1.1.5"
|
||||
flexi_logger = { version = "0.31.7", features = ["trc", "dont_minimize_extra_stacks", "compress", "kv"] }
|
||||
glob = "0.3.3"
|
||||
google-cloud-storage = "1.2.0"
|
||||
google-cloud-auth = "1.1.0"
|
||||
hashbrown = { version = "0.16.0", features = ["serde", "rayon"] }
|
||||
hex-simd = "0.8.0"
|
||||
highway = { version = "1.3.0" }
|
||||
hickory-resolver = { version = "0.25.2", features = ["tls-ring"] }
|
||||
hmac = "0.12.1"
|
||||
hyper = "1.7.0"
|
||||
hyper-util = { version = "0.1.17", features = [
|
||||
"tokio",
|
||||
"server-auto",
|
||||
"server-graceful",
|
||||
] }
|
||||
hyper-rustls = { version = "0.27.7", default-features = false, features = ["native-tokio", "http1", "tls12", "logging", "http2", "ring", "webpki-roots"] }
|
||||
http = "1.3.1"
|
||||
http-body = "1.0.1"
|
||||
humantime = "2.3.0"
|
||||
ipnetwork = { version = "0.21.1", features = ["serde"] }
|
||||
jsonwebtoken = { version = "10.0.0", features = ["rust_crypto"] }
|
||||
lazy_static = "1.5.0"
|
||||
libc = "0.2.177"
|
||||
libsystemd = { version = "0.7.2" }
|
||||
local-ip-address = "0.6.5"
|
||||
lz4 = "1.28.1"
|
||||
matchit = "0.8.4"
|
||||
matchit = "0.9.0"
|
||||
md-5 = "0.10.6"
|
||||
md5 = "0.8.0"
|
||||
metrics = "0.24.2"
|
||||
metrics-exporter-opentelemetry = "0.1.2"
|
||||
mime_guess = "2.0.5"
|
||||
moka = { version = "0.12.11", features = ["future"] }
|
||||
netif = "0.1.6"
|
||||
@@ -170,128 +212,72 @@ num_cpus = { version = "1.17.0" }
|
||||
nvml-wrapper = "0.11.0"
|
||||
object_store = "0.12.4"
|
||||
once_cell = "1.21.3"
|
||||
opentelemetry = { version = "0.31.0" }
|
||||
opentelemetry-appender-tracing = { version = "0.31.1", features = [
|
||||
"experimental_use_tracing_span_context",
|
||||
"experimental_metadata_attributes",
|
||||
"spec_unstable_logs_enabled"
|
||||
] }
|
||||
opentelemetry_sdk = { version = "0.31.0" }
|
||||
opentelemetry-stdout = { version = "0.31.0" }
|
||||
opentelemetry-otlp = { version = "0.31.0", default-features = false, features = [
|
||||
"grpc-tonic", "gzip-tonic", "trace", "metrics", "logs", "internal-logs"
|
||||
] }
|
||||
opentelemetry-semantic-conventions = { version = "0.31.0", features = [
|
||||
"semconv_experimental",
|
||||
] }
|
||||
parking_lot = "0.12.5"
|
||||
path-absolutize = "3.1.1"
|
||||
path-clean = "1.0.1"
|
||||
blake3 = { version = "1.8.2" }
|
||||
pbkdf2 = "0.12.2"
|
||||
pin-project-lite = "0.2.16"
|
||||
prost = "0.14.1"
|
||||
pretty_assertions = "1.4.1"
|
||||
quick-xml = "0.38.3"
|
||||
rand = "0.9.2"
|
||||
rayon = "1.11.0"
|
||||
reed-solomon-simd = { version = "3.1.0" }
|
||||
regex = { version = "1.12.2" }
|
||||
reqwest = { version = "0.12.24", default-features = false, features = [
|
||||
"rustls-tls-webpki-roots",
|
||||
"charset",
|
||||
"http2",
|
||||
"system-proxy",
|
||||
"stream",
|
||||
"json",
|
||||
"blocking",
|
||||
] }
|
||||
rmcp = { version = "0.8.1" }
|
||||
rmp = { version = "0.8.14" }
|
||||
rmp-serde = { version = "1.3.0" }
|
||||
rsa = { version = "0.9.8" }
|
||||
rumqttc = { version = "0.25.0" }
|
||||
rust-embed = { version = "8.7.2" }
|
||||
rust-embed = { version = "8.9.0" }
|
||||
rustc-hash = { version = "2.1.1" }
|
||||
rustls = { version = "0.23.32", features = ["ring", "logging", "std", "tls12"], default-features = false }
|
||||
rustls-pki-types = "1.12.0"
|
||||
rustls-pemfile = "2.2.0"
|
||||
s3s = { version = "0.12.0-rc.3", features = ["minio"] }
|
||||
schemars = "1.0.4"
|
||||
serde = { version = "1.0.228", features = ["derive"] }
|
||||
serde_json = { version = "1.0.145", features = ["raw_value"] }
|
||||
serde_urlencoded = "0.7.1"
|
||||
serial_test = "3.2.0"
|
||||
sha1 = "0.10.6"
|
||||
sha2 = "0.10.9"
|
||||
shadow-rs = { version = "1.4.0", default-features = false }
|
||||
siphasher = "1.0.1"
|
||||
smallvec = { version = "1.15.1", features = ["serde"] }
|
||||
smartstring = "1.0.1"
|
||||
snafu = "0.8.9"
|
||||
snap = "1.1.1"
|
||||
socket2 = "0.6.1"
|
||||
starshard = { version = "0.5.0", features = ["rayon", "async", "serde"] }
|
||||
strum = { version = "0.27.2", features = ["derive"] }
|
||||
sysinfo = "0.37.1"
|
||||
sysctl = "0.7.1"
|
||||
tempfile = "3.23.0"
|
||||
sysinfo = "0.37.2"
|
||||
temp-env = "0.3.6"
|
||||
tempfile = "3.23.0"
|
||||
test-case = "3.3.1"
|
||||
thiserror = "2.0.17"
|
||||
time = { version = "0.3.44", features = [
|
||||
"std",
|
||||
"parsing",
|
||||
"formatting",
|
||||
"macros",
|
||||
"serde",
|
||||
] }
|
||||
tokio = { version = "1.48.0", features = ["fs", "rt-multi-thread"] }
|
||||
tokio-rustls = { version = "0.26.4", default-features = false, features = ["logging", "tls12", "ring"] }
|
||||
tokio-stream = { version = "0.1.17" }
|
||||
tokio-tar = "0.3.1"
|
||||
tokio-test = "0.4.4"
|
||||
tokio-util = { version = "0.7.16", features = ["io", "compat"] }
|
||||
tonic = { version = "0.14.2", features = ["gzip"] }
|
||||
tonic-prost = { version = "0.14.2" }
|
||||
tonic-prost-build = { version = "0.14.2" }
|
||||
tower = { version = "0.5.2", features = ["timeout"] }
|
||||
tower-http = { version = "0.6.6", features = ["cors"] }
|
||||
tracing = { version = "0.1.41" }
|
||||
tracing-core = "0.1.34"
|
||||
tracing-error = "0.2.1"
|
||||
tracing-opentelemetry = "0.32.0"
|
||||
tracing-subscriber = { version = "0.3.20", features = ["env-filter", "time"] }
|
||||
transform-stream = "0.3.1"
|
||||
url = "2.5.7"
|
||||
urlencoding = "2.1.3"
|
||||
uuid = { version = "1.18.1", features = [
|
||||
"v4",
|
||||
"fast-rng",
|
||||
"macro-diagnostics",
|
||||
] }
|
||||
uuid = { version = "1.18.1", features = ["v4", "fast-rng", "macro-diagnostics"] }
|
||||
vaultrs = { version = "0.7.4" }
|
||||
walkdir = "2.5.0"
|
||||
wildmatch = { version = "2.5.0", features = ["serde"] }
|
||||
zeroize = { version = "1.8.2", features = ["derive"] }
|
||||
winapi = { version = "0.3.9" }
|
||||
xxhash-rust = { version = "0.8.15", features = ["xxh64", "xxh3"] }
|
||||
zip = "6.0.0"
|
||||
zstd = "0.13.3"
|
||||
|
||||
# Observability and Metrics
|
||||
opentelemetry = { version = "0.31.0" }
|
||||
opentelemetry-appender-tracing = { version = "0.31.1", features = ["experimental_use_tracing_span_context", "experimental_metadata_attributes", "spec_unstable_logs_enabled"] }
|
||||
opentelemetry-otlp = { version = "0.31.0", default-features = false, features = ["grpc-tonic", "gzip-tonic", "trace", "metrics", "logs", "internal-logs"] }
|
||||
opentelemetry_sdk = { version = "0.31.0" }
|
||||
opentelemetry-semantic-conventions = { version = "0.31.0", features = ["semconv_experimental"] }
|
||||
opentelemetry-stdout = { version = "0.31.0" }
|
||||
|
||||
# Performance Analysis and Memory Profiling
|
||||
# Use tikv-jemallocator as memory allocator and enable performance analysis
|
||||
tikv-jemallocator = { version = "0.6", features = ["profiling", "stats", "unprefixed_malloc_on_supported_platforms", "background_threads"] }
|
||||
# Used to control and obtain statistics for jemalloc at runtime
|
||||
tikv-jemalloc-ctl = { version = "0.6", features = ["use_std", "stats", "profiling"] }
|
||||
# Used to generate pprof-compatible memory profiling data and support symbolization and flame graphs
|
||||
jemalloc_pprof = { version = "0.8.1", features = ["symbolize", "flamegraph"] }
|
||||
# Used to generate CPU performance analysis data and flame diagrams
|
||||
pprof = { version = "0.15.0", features = ["flamegraph", "protobuf-codec"] }
|
||||
mimalloc = "0.1"
|
||||
|
||||
|
||||
[workspace.metadata.cargo-shear]
|
||||
ignored = ["rustfs", "rust-i18n", "rustfs-mcp", "tokio-test", "rustfs-audit"]
|
||||
|
||||
[profile.wasm-dev]
|
||||
inherits = "dev"
|
||||
opt-level = 1
|
||||
|
||||
[profile.server-dev]
|
||||
inherits = "dev"
|
||||
|
||||
[profile.android-dev]
|
||||
inherits = "dev"
|
||||
ignored = ["rustfs", "rustfs-mcp", "tokio-test"]
|
||||
|
||||
[profile.release]
|
||||
opt-level = 3
|
||||
|
||||
16
README.md
16
README.md
@@ -25,7 +25,7 @@ English | <a href="https://github.com/rustfs/rustfs/blob/main/README_ZH.md">简
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=fr">français</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ja">日本語</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ko">한국어</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=pt">Português</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=pt">Portuguese</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ru">Русский</a>
|
||||
</p>
|
||||
|
||||
@@ -139,10 +139,14 @@ observability. If you want to start redis as well as nginx container, you can sp
|
||||
make help-docker # Show all Docker-related commands
|
||||
```
|
||||
|
||||
4. **Access the Console**: Open your web browser and navigate to `http://localhost:9000` to access the RustFS console,
|
||||
4. **Build with helm chart(Option 4) - Cloud Native environment**
|
||||
|
||||
Following the instructions on [helm chart README](./helm/README.md) to install RustFS on kubernetes cluster.
|
||||
|
||||
5. **Access the Console**: Open your web browser and navigate to `http://localhost:9000` to access the RustFS console,
|
||||
default username and password is `rustfsadmin` .
|
||||
5. **Create a Bucket**: Use the console to create a new bucket for your objects.
|
||||
6. **Upload Objects**: You can upload files directly through the console or use S3-compatible APIs to interact with your
|
||||
6. **Create a Bucket**: Use the console to create a new bucket for your objects.
|
||||
7. **Upload Objects**: You can upload files directly through the console or use S3-compatible APIs to interact with your
|
||||
RustFS instance.
|
||||
|
||||
**NOTE**: If you want to access RustFS instance with `https`, you can refer
|
||||
@@ -194,6 +198,10 @@ top charts.
|
||||
|
||||
<a href="https://trendshift.io/repositories/14181" target="_blank"><img src="https://raw.githubusercontent.com/rustfs/rustfs/refs/heads/main/docs/rustfs-trending.jpg" alt="rustfs%2Frustfs | Trendshift" /></a>
|
||||
|
||||
## Star History
|
||||
|
||||
[](https://www.star-history.com/#rustfs/rustfs&type=date&legend=top-left)
|
||||
|
||||
## License
|
||||
|
||||
[Apache 2.0](https://opensource.org/licenses/Apache-2.0)
|
||||
|
||||
52
README_ZH.md
52
README_ZH.md
@@ -87,13 +87,49 @@ RustFS 是一个使用 Rust(全球最受欢迎的编程语言之一)构建
|
||||
以外,还有 grafana、prometheus、jaeger 等,这些是为 rustfs 可观测性服务的,还有 redis 和 nginx。你想启动哪些容器,就需要用
|
||||
`--profile` 参数指定相应的 profile。
|
||||
|
||||
3. **访问控制台**:打开 Web 浏览器并导航到 `http://localhost:9000` 以访问 RustFS 控制台,默认的用户名和密码是
|
||||
`rustfsadmin` 。
|
||||
4. **创建存储桶**:使用控制台为您的对象创建新的存储桶。
|
||||
5. **上传对象**:您可以直接通过控制台上传文件,或使用 S3 兼容的 API 与您的 RustFS 实例交互。
|
||||
3. **从源码构建(方案三)- 高级用户**
|
||||
|
||||
**注意**:如果你想通过 `https` 来访问 RustFS
|
||||
实例,请参考 [TLS 配置文档](https://docs.rustfs.com/zh/integration/tls-configured.html)
|
||||
面向希望从源码构建支持多架构 Docker 镜像的开发者:
|
||||
|
||||
```bash
|
||||
# 本地构建多架构镜像
|
||||
./docker-buildx.sh --build-arg RELEASE=latest
|
||||
|
||||
# 构建并推送至镜像仓库
|
||||
./docker-buildx.sh --push
|
||||
|
||||
# 构建指定版本
|
||||
./docker-buildx.sh --release v1.0.0 --push
|
||||
|
||||
# 构建并推送到自定义镜像仓库
|
||||
./docker-buildx.sh --registry your-registry.com --namespace yourname --push
|
||||
```
|
||||
|
||||
`docker-buildx.sh` 脚本支持:
|
||||
- **多架构构建**:`linux/amd64`、`linux/arm64`
|
||||
- **自动版本检测**:可使用 git 标签或提交哈希
|
||||
- **仓库灵活性**:支持 Docker Hub、GitHub Container Registry 等
|
||||
- **构建优化**:包含缓存和并行构建
|
||||
|
||||
你也可以使用 Makefile 提供的目标命令以提升便捷性:
|
||||
|
||||
```bash
|
||||
make docker-buildx # 本地构建
|
||||
make docker-buildx-push # 构建并推送
|
||||
make docker-buildx-version VERSION=v1.0.0 # 构建指定版本
|
||||
make help-docker # 显示全部 Docker 相关命令
|
||||
```
|
||||
|
||||
4. **使用 Helm Chart 部署(方案四)- 云原生环境**
|
||||
|
||||
按照 [helm chart 说明文档](./helm/README.md) 的指引,在 Kubernetes 集群中安装 RustFS。
|
||||
|
||||
5. **访问控制台**:打开 Web 浏览器并导航到 `http://localhost:9000` 以访问 RustFS 控制台,默认的用户名和密码是
|
||||
`rustfsadmin` 。
|
||||
6. **创建存储桶**:使用控制台为您的对象创建新的存储桶。
|
||||
7. **上传对象**:您可以直接通过控制台上传文件,或使用 S3 兼容的 API 与您的 RustFS 实例交互。
|
||||
|
||||
**注意**:如果你想通过 `https` 来访问 RustFS 实例,请参考 [TLS 配置文档](https://docs.rustfs.com/zh/integration/tls-configured.html)
|
||||
|
||||
## 文档
|
||||
|
||||
@@ -136,6 +172,10 @@ RustFS 变得更好的杰出人员。
|
||||
|
||||
<a href="https://trendshift.io/repositories/14181" target="_blank"><img src="https://raw.githubusercontent.com/rustfs/rustfs/refs/heads/main/docs/rustfs-trending.jpg" alt="rustfs%2Frustfs | Trendshift" /></a>
|
||||
|
||||
## Star 历史图
|
||||
|
||||
[](https://www.star-history.com/#rustfs/rustfs&type=date&legend=top-left)
|
||||
|
||||
## 许可证
|
||||
|
||||
[Apache 2.0](https://opensource.org/licenses/Apache-2.0)
|
||||
|
||||
@@ -40,3 +40,4 @@ serde_json = { workspace = true }
|
||||
serial_test = { workspace = true }
|
||||
tracing-subscriber = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
heed = "0.22.0"
|
||||
|
||||
@@ -49,11 +49,12 @@ pub enum HealType {
|
||||
}
|
||||
|
||||
/// Heal priority
|
||||
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
|
||||
#[derive(Debug, Default, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
|
||||
pub enum HealPriority {
|
||||
/// Low priority
|
||||
Low = 0,
|
||||
/// Normal priority
|
||||
#[default]
|
||||
Normal = 1,
|
||||
/// High priority
|
||||
High = 2,
|
||||
@@ -61,12 +62,6 @@ pub enum HealPriority {
|
||||
Urgent = 3,
|
||||
}
|
||||
|
||||
impl Default for HealPriority {
|
||||
fn default() -> Self {
|
||||
Self::Normal
|
||||
}
|
||||
}
|
||||
|
||||
/// Heal options
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct HealOptions {
|
||||
|
||||
508
crates/ahm/tests/lifecycle_cache_test.rs
Normal file
508
crates/ahm/tests/lifecycle_cache_test.rs
Normal file
@@ -0,0 +1,508 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use heed::byteorder::BigEndian;
|
||||
use heed::types::*;
|
||||
use heed::{BoxedError, BytesDecode, BytesEncode, Database, DatabaseFlags, Env, EnvOpenOptions};
|
||||
use rustfs_ahm::scanner::local_scan::{self, LocalObjectRecord, LocalScanOutcome};
|
||||
use rustfs_ecstore::{
|
||||
disk::endpoint::Endpoint,
|
||||
endpoints::{EndpointServerPools, Endpoints, PoolEndpoints},
|
||||
store::ECStore,
|
||||
store_api::{MakeBucketOptions, ObjectIO, ObjectInfo, ObjectOptions, PutObjReader, StorageAPI},
|
||||
};
|
||||
use serial_test::serial;
|
||||
use std::borrow::Cow;
|
||||
use std::sync::Once;
|
||||
use std::sync::OnceLock;
|
||||
use std::{path::PathBuf, sync::Arc};
|
||||
use tokio::fs;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::warn;
|
||||
use tracing::{debug, info};
|
||||
//use heed_traits::Comparator;
|
||||
use time::OffsetDateTime;
|
||||
use uuid::Uuid;
|
||||
|
||||
static GLOBAL_ENV: OnceLock<(Vec<PathBuf>, Arc<ECStore>)> = OnceLock::new();
|
||||
static INIT: Once = Once::new();
|
||||
|
||||
static _LIFECYCLE_EXPIRY_CURRENT_DAYS: i32 = 1;
|
||||
static _LIFECYCLE_EXPIRY_NONCURRENT_DAYS: i32 = 1;
|
||||
static _LIFECYCLE_TRANSITION_CURRENT_DAYS: i32 = 1;
|
||||
static _LIFECYCLE_TRANSITION_NONCURRENT_DAYS: i32 = 1;
|
||||
static GLOBAL_LMDB_ENV: OnceLock<Env> = OnceLock::new();
|
||||
static GLOBAL_LMDB_DB: OnceLock<Database<I64<BigEndian>, LifecycleContentCodec>> = OnceLock::new();
|
||||
|
||||
fn init_tracing() {
|
||||
INIT.call_once(|| {
|
||||
let _ = tracing_subscriber::fmt::try_init();
|
||||
});
|
||||
}
|
||||
|
||||
/// Test helper: Create test environment with ECStore
|
||||
async fn setup_test_env() -> (Vec<PathBuf>, Arc<ECStore>) {
|
||||
init_tracing();
|
||||
|
||||
// Fast path: already initialized, just clone and return
|
||||
if let Some((paths, ecstore)) = GLOBAL_ENV.get() {
|
||||
return (paths.clone(), ecstore.clone());
|
||||
}
|
||||
|
||||
// create temp dir as 4 disks with unique base dir
|
||||
let test_base_dir = format!("/tmp/rustfs_ahm_lifecyclecache_test_{}", uuid::Uuid::new_v4());
|
||||
let temp_dir = std::path::PathBuf::from(&test_base_dir);
|
||||
if temp_dir.exists() {
|
||||
fs::remove_dir_all(&temp_dir).await.ok();
|
||||
}
|
||||
fs::create_dir_all(&temp_dir).await.unwrap();
|
||||
|
||||
// create 4 disk dirs
|
||||
let disk_paths = vec![
|
||||
temp_dir.join("disk1"),
|
||||
temp_dir.join("disk2"),
|
||||
temp_dir.join("disk3"),
|
||||
temp_dir.join("disk4"),
|
||||
];
|
||||
|
||||
for disk_path in &disk_paths {
|
||||
fs::create_dir_all(disk_path).await.unwrap();
|
||||
}
|
||||
|
||||
// create EndpointServerPools
|
||||
let mut endpoints = Vec::new();
|
||||
for (i, disk_path) in disk_paths.iter().enumerate() {
|
||||
let mut endpoint = Endpoint::try_from(disk_path.to_str().unwrap()).unwrap();
|
||||
// set correct index
|
||||
endpoint.set_pool_index(0);
|
||||
endpoint.set_set_index(0);
|
||||
endpoint.set_disk_index(i);
|
||||
endpoints.push(endpoint);
|
||||
}
|
||||
|
||||
let pool_endpoints = PoolEndpoints {
|
||||
legacy: false,
|
||||
set_count: 1,
|
||||
drives_per_set: 4,
|
||||
endpoints: Endpoints::from(endpoints),
|
||||
cmd_line: "test".to_string(),
|
||||
platform: format!("OS: {} | Arch: {}", std::env::consts::OS, std::env::consts::ARCH),
|
||||
};
|
||||
|
||||
let endpoint_pools = EndpointServerPools(vec![pool_endpoints]);
|
||||
|
||||
// format disks (only first time)
|
||||
rustfs_ecstore::store::init_local_disks(endpoint_pools.clone()).await.unwrap();
|
||||
|
||||
// create ECStore with dynamic port 0 (let OS assign) or fixed 9002 if free
|
||||
let port = 9002; // for simplicity
|
||||
let server_addr: std::net::SocketAddr = format!("127.0.0.1:{port}").parse().unwrap();
|
||||
let ecstore = ECStore::new(server_addr, endpoint_pools, CancellationToken::new())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// init bucket metadata system
|
||||
let buckets_list = ecstore
|
||||
.list_bucket(&rustfs_ecstore::store_api::BucketOptions {
|
||||
no_metadata: true,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
let buckets = buckets_list.into_iter().map(|v| v.name).collect();
|
||||
rustfs_ecstore::bucket::metadata_sys::init_bucket_metadata_sys(ecstore.clone(), buckets).await;
|
||||
|
||||
//lmdb env
|
||||
// User home directory
|
||||
/*if let Ok(home_dir) = env::var("HOME").or_else(|_| env::var("USERPROFILE")) {
|
||||
let mut path = PathBuf::from(home_dir);
|
||||
path.push(format!(".{DEFAULT_LOG_FILENAME}"));
|
||||
path.push(DEFAULT_LOG_DIR);
|
||||
if ensure_directory_writable(&path) {
|
||||
//return path;
|
||||
}
|
||||
}*/
|
||||
let test_lmdb_lifecycle_dir = "/tmp/lmdb_lifecycle".to_string();
|
||||
let temp_dir = std::path::PathBuf::from(&test_lmdb_lifecycle_dir);
|
||||
if temp_dir.exists() {
|
||||
fs::remove_dir_all(&temp_dir).await.ok();
|
||||
}
|
||||
fs::create_dir_all(&temp_dir).await.unwrap();
|
||||
let lmdb_env = unsafe { EnvOpenOptions::new().max_dbs(100).open(&test_lmdb_lifecycle_dir).unwrap() };
|
||||
let bucket_name = format!("test-lc-cache-{}", "00000");
|
||||
let mut wtxn = lmdb_env.write_txn().unwrap();
|
||||
let db = match lmdb_env
|
||||
.database_options()
|
||||
.name(&format!("bucket_{}", bucket_name))
|
||||
.types::<I64<BigEndian>, LifecycleContentCodec>()
|
||||
.flags(DatabaseFlags::DUP_SORT)
|
||||
//.dup_sort_comparator::<>()
|
||||
.create(&mut wtxn)
|
||||
{
|
||||
Ok(db) => db,
|
||||
Err(err) => {
|
||||
panic!("lmdb error: {}", err);
|
||||
}
|
||||
};
|
||||
let _ = wtxn.commit();
|
||||
let _ = GLOBAL_LMDB_ENV.set(lmdb_env);
|
||||
let _ = GLOBAL_LMDB_DB.set(db);
|
||||
|
||||
// Store in global once lock
|
||||
let _ = GLOBAL_ENV.set((disk_paths.clone(), ecstore.clone()));
|
||||
|
||||
(disk_paths, ecstore)
|
||||
}
|
||||
|
||||
/// Test helper: Create a test bucket
|
||||
#[allow(dead_code)]
|
||||
async fn create_test_bucket(ecstore: &Arc<ECStore>, bucket_name: &str) {
|
||||
(**ecstore)
|
||||
.make_bucket(bucket_name, &Default::default())
|
||||
.await
|
||||
.expect("Failed to create test bucket");
|
||||
info!("Created test bucket: {}", bucket_name);
|
||||
}
|
||||
|
||||
/// Test helper: Create a test lock bucket
|
||||
async fn create_test_lock_bucket(ecstore: &Arc<ECStore>, bucket_name: &str) {
|
||||
(**ecstore)
|
||||
.make_bucket(
|
||||
bucket_name,
|
||||
&MakeBucketOptions {
|
||||
lock_enabled: true,
|
||||
versioning_enabled: true,
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await
|
||||
.expect("Failed to create test bucket");
|
||||
info!("Created test bucket: {}", bucket_name);
|
||||
}
|
||||
|
||||
/// Test helper: Upload test object
|
||||
async fn upload_test_object(ecstore: &Arc<ECStore>, bucket: &str, object: &str, data: &[u8]) {
|
||||
let mut reader = PutObjReader::from_vec(data.to_vec());
|
||||
let object_info = (**ecstore)
|
||||
.put_object(bucket, object, &mut reader, &ObjectOptions::default())
|
||||
.await
|
||||
.expect("Failed to upload test object");
|
||||
|
||||
println!("object_info1: {:?}", object_info);
|
||||
|
||||
info!("Uploaded test object: {}/{} ({} bytes)", bucket, object, object_info.size);
|
||||
}
|
||||
|
||||
/// Test helper: Check if object exists
|
||||
async fn object_exists(ecstore: &Arc<ECStore>, bucket: &str, object: &str) -> bool {
|
||||
match (**ecstore).get_object_info(bucket, object, &ObjectOptions::default()).await {
|
||||
Ok(info) => !info.delete_marker,
|
||||
Err(_) => false,
|
||||
}
|
||||
}
|
||||
|
||||
fn ns_to_offset_datetime(ns: i128) -> Option<OffsetDateTime> {
|
||||
OffsetDateTime::from_unix_timestamp_nanos(ns).ok()
|
||||
}
|
||||
|
||||
fn convert_record_to_object_info(record: &LocalObjectRecord) -> ObjectInfo {
|
||||
let usage = &record.usage;
|
||||
|
||||
ObjectInfo {
|
||||
bucket: usage.bucket.clone(),
|
||||
name: usage.object.clone(),
|
||||
size: usage.total_size as i64,
|
||||
delete_marker: !usage.has_live_object && usage.delete_markers_count > 0,
|
||||
mod_time: usage.last_modified_ns.and_then(ns_to_offset_datetime),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn to_object_info(
|
||||
bucket: &str,
|
||||
object: &str,
|
||||
total_size: i64,
|
||||
delete_marker: bool,
|
||||
mod_time: OffsetDateTime,
|
||||
version_id: &str,
|
||||
) -> ObjectInfo {
|
||||
ObjectInfo {
|
||||
bucket: bucket.to_string(),
|
||||
name: object.to_string(),
|
||||
size: total_size,
|
||||
delete_marker,
|
||||
mod_time: Some(mod_time),
|
||||
version_id: Some(Uuid::parse_str(version_id).unwrap()),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
enum LifecycleType {
|
||||
ExpiryCurrent,
|
||||
ExpiryNoncurrent,
|
||||
TransitionCurrent,
|
||||
TransitionNoncurrent,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub struct LifecycleContent {
|
||||
ver_no: u8,
|
||||
ver_id: String,
|
||||
mod_time: OffsetDateTime,
|
||||
type_: LifecycleType,
|
||||
object_name: String,
|
||||
}
|
||||
|
||||
pub struct LifecycleContentCodec;
|
||||
|
||||
impl BytesEncode<'_> for LifecycleContentCodec {
|
||||
type EItem = LifecycleContent;
|
||||
|
||||
fn bytes_encode(lcc: &Self::EItem) -> Result<Cow<'_, [u8]>, BoxedError> {
|
||||
let (ver_no_byte, ver_id_bytes, mod_timestamp_bytes, type_byte, object_name_bytes) = match lcc {
|
||||
LifecycleContent {
|
||||
ver_no,
|
||||
ver_id,
|
||||
mod_time,
|
||||
type_: LifecycleType::ExpiryCurrent,
|
||||
object_name,
|
||||
} => (
|
||||
ver_no,
|
||||
ver_id.clone().into_bytes(),
|
||||
mod_time.unix_timestamp().to_be_bytes(),
|
||||
0,
|
||||
object_name.clone().into_bytes(),
|
||||
),
|
||||
LifecycleContent {
|
||||
ver_no,
|
||||
ver_id,
|
||||
mod_time,
|
||||
type_: LifecycleType::ExpiryNoncurrent,
|
||||
object_name,
|
||||
} => (
|
||||
ver_no,
|
||||
ver_id.clone().into_bytes(),
|
||||
mod_time.unix_timestamp().to_be_bytes(),
|
||||
1,
|
||||
object_name.clone().into_bytes(),
|
||||
),
|
||||
LifecycleContent {
|
||||
ver_no,
|
||||
ver_id,
|
||||
mod_time,
|
||||
type_: LifecycleType::TransitionCurrent,
|
||||
object_name,
|
||||
} => (
|
||||
ver_no,
|
||||
ver_id.clone().into_bytes(),
|
||||
mod_time.unix_timestamp().to_be_bytes(),
|
||||
2,
|
||||
object_name.clone().into_bytes(),
|
||||
),
|
||||
LifecycleContent {
|
||||
ver_no,
|
||||
ver_id,
|
||||
mod_time,
|
||||
type_: LifecycleType::TransitionNoncurrent,
|
||||
object_name,
|
||||
} => (
|
||||
ver_no,
|
||||
ver_id.clone().into_bytes(),
|
||||
mod_time.unix_timestamp().to_be_bytes(),
|
||||
3,
|
||||
object_name.clone().into_bytes(),
|
||||
),
|
||||
};
|
||||
|
||||
let mut output = Vec::<u8>::new();
|
||||
output.push(*ver_no_byte);
|
||||
output.extend_from_slice(&ver_id_bytes);
|
||||
output.extend_from_slice(&mod_timestamp_bytes);
|
||||
output.push(type_byte);
|
||||
output.extend_from_slice(&object_name_bytes);
|
||||
Ok(Cow::Owned(output))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> BytesDecode<'a> for LifecycleContentCodec {
|
||||
type DItem = LifecycleContent;
|
||||
|
||||
fn bytes_decode(bytes: &'a [u8]) -> Result<Self::DItem, BoxedError> {
|
||||
use std::mem::size_of;
|
||||
|
||||
let ver_no = match bytes.get(..size_of::<u8>()) {
|
||||
Some(bytes) => bytes.try_into().map(u8::from_be_bytes).unwrap(),
|
||||
None => return Err("invalid LifecycleContent: cannot extract ver_no".into()),
|
||||
};
|
||||
|
||||
let ver_id = match bytes.get(size_of::<u8>()..(36 + 1)) {
|
||||
Some(bytes) => unsafe { std::str::from_utf8_unchecked(bytes).to_string() },
|
||||
None => return Err("invalid LifecycleContent: cannot extract ver_id".into()),
|
||||
};
|
||||
|
||||
let mod_timestamp = match bytes.get((36 + 1)..(size_of::<i64>() + 36 + 1)) {
|
||||
Some(bytes) => bytes.try_into().map(i64::from_be_bytes).unwrap(),
|
||||
None => return Err("invalid LifecycleContent: cannot extract mod_time timestamp".into()),
|
||||
};
|
||||
|
||||
let type_ = match bytes.get(size_of::<i64>() + 36 + 1) {
|
||||
Some(&0) => LifecycleType::ExpiryCurrent,
|
||||
Some(&1) => LifecycleType::ExpiryNoncurrent,
|
||||
Some(&2) => LifecycleType::TransitionCurrent,
|
||||
Some(&3) => LifecycleType::TransitionNoncurrent,
|
||||
Some(_) => return Err("invalid LifecycleContent: invalid LifecycleType".into()),
|
||||
None => return Err("invalid LifecycleContent: cannot extract LifecycleType".into()),
|
||||
};
|
||||
|
||||
let object_name = match bytes.get((size_of::<i64>() + 36 + 1 + 1)..) {
|
||||
Some(bytes) => unsafe { std::str::from_utf8_unchecked(bytes).to_string() },
|
||||
None => return Err("invalid LifecycleContent: cannot extract object_name".into()),
|
||||
};
|
||||
|
||||
Ok(LifecycleContent {
|
||||
ver_no,
|
||||
ver_id,
|
||||
mod_time: OffsetDateTime::from_unix_timestamp(mod_timestamp).unwrap(),
|
||||
type_,
|
||||
object_name,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
mod serial_tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
#[serial]
|
||||
//#[ignore]
|
||||
async fn test_lifecycle_chche_build() {
|
||||
let (_disk_paths, ecstore) = setup_test_env().await;
|
||||
|
||||
// Create test bucket and object
|
||||
let suffix = uuid::Uuid::new_v4().simple().to_string();
|
||||
let bucket_name = format!("test-lc-cache-{}", &suffix[..8]);
|
||||
let object_name = "test/object.txt"; // Match the lifecycle rule prefix "test/"
|
||||
let test_data = b"Hello, this is test data for lifecycle expiry!";
|
||||
|
||||
create_test_lock_bucket(&ecstore, bucket_name.as_str()).await;
|
||||
upload_test_object(&ecstore, bucket_name.as_str(), object_name, test_data).await;
|
||||
|
||||
// Verify object exists initially
|
||||
assert!(object_exists(&ecstore, bucket_name.as_str(), object_name).await);
|
||||
println!("✅ Object exists before lifecycle processing");
|
||||
|
||||
let scan_outcome = match local_scan::scan_and_persist_local_usage(ecstore.clone()).await {
|
||||
Ok(outcome) => outcome,
|
||||
Err(err) => {
|
||||
warn!("Local usage scan failed: {}", err);
|
||||
LocalScanOutcome::default()
|
||||
}
|
||||
};
|
||||
let bucket_objects_map = &scan_outcome.bucket_objects;
|
||||
|
||||
let records = match bucket_objects_map.get(&bucket_name) {
|
||||
Some(records) => records,
|
||||
None => {
|
||||
debug!("No local snapshot entries found for bucket {}; skipping lifecycle/integrity", bucket_name);
|
||||
&vec![]
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(lmdb_env) = GLOBAL_LMDB_ENV.get() {
|
||||
if let Some(lmdb) = GLOBAL_LMDB_DB.get() {
|
||||
let mut wtxn = lmdb_env.write_txn().unwrap();
|
||||
|
||||
/*if let Ok((lc_config, _)) = rustfs_ecstore::bucket::metadata_sys::get_lifecycle_config(bucket_name.as_str()).await {
|
||||
if let Ok(object_info) = ecstore
|
||||
.get_object_info(bucket_name.as_str(), object_name, &rustfs_ecstore::store_api::ObjectOptions::default())
|
||||
.await
|
||||
{
|
||||
let event = rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::eval_action_from_lifecycle(
|
||||
&lc_config,
|
||||
None,
|
||||
None,
|
||||
&object_info,
|
||||
)
|
||||
.await;
|
||||
|
||||
rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::apply_expiry_on_non_transitioned_objects(
|
||||
ecstore.clone(),
|
||||
&object_info,
|
||||
&event,
|
||||
&rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_audit::LcEventSrc::Scanner,
|
||||
)
|
||||
.await;
|
||||
|
||||
expired = wait_for_object_absence(&ecstore, bucket_name.as_str(), object_name, Duration::from_secs(2)).await;
|
||||
}
|
||||
}*/
|
||||
|
||||
for record in records {
|
||||
if !record.usage.has_live_object {
|
||||
continue;
|
||||
}
|
||||
|
||||
let object_info = convert_record_to_object_info(record);
|
||||
println!("object_info2: {:?}", object_info);
|
||||
let mod_time = object_info.mod_time.unwrap_or(OffsetDateTime::now_utc());
|
||||
let expiry_time = rustfs_ecstore::bucket::lifecycle::lifecycle::expected_expiry_time(mod_time, 1);
|
||||
|
||||
let version_id = if let Some(version_id) = object_info.version_id {
|
||||
version_id.to_string()
|
||||
} else {
|
||||
"zzzzzzzz-zzzz-zzzz-zzzz-zzzzzzzzzzzz".to_string()
|
||||
};
|
||||
|
||||
lmdb.put(
|
||||
&mut wtxn,
|
||||
&expiry_time.unix_timestamp(),
|
||||
&LifecycleContent {
|
||||
ver_no: 0,
|
||||
ver_id: version_id,
|
||||
mod_time,
|
||||
type_: LifecycleType::TransitionNoncurrent,
|
||||
object_name: object_info.name,
|
||||
},
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
wtxn.commit().unwrap();
|
||||
|
||||
let mut wtxn = lmdb_env.write_txn().unwrap();
|
||||
let iter = lmdb.iter_mut(&mut wtxn).unwrap();
|
||||
//let _ = unsafe { iter.del_current().unwrap() };
|
||||
for row in iter {
|
||||
if let Ok(ref elm) = row {
|
||||
let LifecycleContent {
|
||||
ver_no,
|
||||
ver_id,
|
||||
mod_time,
|
||||
type_,
|
||||
object_name,
|
||||
} = &elm.1;
|
||||
println!("cache row:{} {} {} {:?} {}", ver_no, ver_id, mod_time, type_, object_name);
|
||||
}
|
||||
println!("row:{:?}", row);
|
||||
}
|
||||
//drop(iter);
|
||||
wtxn.commit().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
println!("Lifecycle cache test completed");
|
||||
}
|
||||
}
|
||||
@@ -18,9 +18,9 @@ use rustfs_ecstore::{
|
||||
bucket::metadata_sys,
|
||||
disk::endpoint::Endpoint,
|
||||
endpoints::{EndpointServerPools, Endpoints, PoolEndpoints},
|
||||
global::GLOBAL_TierConfigMgr,
|
||||
store::ECStore,
|
||||
store_api::{MakeBucketOptions, ObjectIO, ObjectOptions, PutObjReader, StorageAPI},
|
||||
tier::tier::TierConfigMgr,
|
||||
tier::tier_config::{TierConfig, TierMinIO, TierType},
|
||||
};
|
||||
use serial_test::serial;
|
||||
@@ -28,14 +28,11 @@ use std::sync::Once;
|
||||
use std::sync::OnceLock;
|
||||
use std::{path::PathBuf, sync::Arc, time::Duration};
|
||||
use tokio::fs;
|
||||
use tokio::sync::RwLock;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::warn;
|
||||
use tracing::{debug, info};
|
||||
use tracing::info;
|
||||
|
||||
static GLOBAL_ENV: OnceLock<(Vec<PathBuf>, Arc<ECStore>)> = OnceLock::new();
|
||||
static INIT: Once = Once::new();
|
||||
static GLOBAL_TIER_CONFIG_MGR: OnceLock<Arc<RwLock<TierConfigMgr>>> = OnceLock::new();
|
||||
|
||||
fn init_tracing() {
|
||||
INIT.call_once(|| {
|
||||
@@ -121,13 +118,11 @@ async fn setup_test_env() -> (Vec<PathBuf>, Arc<ECStore>) {
|
||||
// Store in global once lock
|
||||
let _ = GLOBAL_ENV.set((disk_paths.clone(), ecstore.clone()));
|
||||
|
||||
let _ = GLOBAL_TIER_CONFIG_MGR.set(TierConfigMgr::new());
|
||||
|
||||
(disk_paths, ecstore)
|
||||
}
|
||||
|
||||
/// Test helper: Create a test bucket
|
||||
async fn _create_test_bucket(ecstore: &Arc<ECStore>, bucket_name: &str) {
|
||||
async fn create_test_bucket(ecstore: &Arc<ECStore>, bucket_name: &str) {
|
||||
(**ecstore)
|
||||
.make_bucket(bucket_name, &Default::default())
|
||||
.await
|
||||
@@ -220,7 +215,7 @@ async fn set_bucket_lifecycle_transition(bucket_name: &str) -> Result<(), Box<dy
|
||||
</Filter>
|
||||
<Transition>
|
||||
<Days>0</Days>
|
||||
<StorageClass>COLDTIER</StorageClass>
|
||||
<StorageClass>COLDTIER44</StorageClass>
|
||||
</Transition>
|
||||
</Rule>
|
||||
<Rule>
|
||||
@@ -231,7 +226,7 @@ async fn set_bucket_lifecycle_transition(bucket_name: &str) -> Result<(), Box<dy
|
||||
</Filter>
|
||||
<NoncurrentVersionTransition>
|
||||
<NoncurrentDays>0</NoncurrentDays>
|
||||
<StorageClass>COLDTIER</StorageClass>
|
||||
<StorageClass>COLDTIER44</StorageClass>
|
||||
</NoncurrentVersionTransition>
|
||||
</Rule>
|
||||
</LifecycleConfiguration>"#;
|
||||
@@ -243,33 +238,51 @@ async fn set_bucket_lifecycle_transition(bucket_name: &str) -> Result<(), Box<dy
|
||||
|
||||
/// Test helper: Create a test tier
|
||||
#[allow(dead_code)]
|
||||
async fn create_test_tier() {
|
||||
async fn create_test_tier(server: u32) {
|
||||
let args = TierConfig {
|
||||
version: "v1".to_string(),
|
||||
tier_type: TierType::MinIO,
|
||||
name: "COLDTIER".to_string(),
|
||||
name: "COLDTIER44".to_string(),
|
||||
s3: None,
|
||||
aliyun: None,
|
||||
tencent: None,
|
||||
huaweicloud: None,
|
||||
azure: None,
|
||||
gcs: None,
|
||||
r2: None,
|
||||
rustfs: None,
|
||||
minio: Some(TierMinIO {
|
||||
access_key: "minioadmin".to_string(),
|
||||
secret_key: "minioadmin".to_string(),
|
||||
bucket: "mblock2".to_string(),
|
||||
endpoint: "http://127.0.0.1:9020".to_string(),
|
||||
prefix: "mypre3/".to_string(),
|
||||
region: "".to_string(),
|
||||
..Default::default()
|
||||
}),
|
||||
minio: if server == 1 {
|
||||
Some(TierMinIO {
|
||||
access_key: "minioadmin".to_string(),
|
||||
secret_key: "minioadmin".to_string(),
|
||||
bucket: "hello".to_string(),
|
||||
endpoint: "http://39.105.198.204:9000".to_string(),
|
||||
prefix: format!("mypre{}/", uuid::Uuid::new_v4()),
|
||||
region: "".to_string(),
|
||||
..Default::default()
|
||||
})
|
||||
} else {
|
||||
Some(TierMinIO {
|
||||
access_key: "minioadmin".to_string(),
|
||||
secret_key: "minioadmin".to_string(),
|
||||
bucket: "mblock2".to_string(),
|
||||
endpoint: "http://127.0.0.1:9020".to_string(),
|
||||
prefix: format!("mypre{}/", uuid::Uuid::new_v4()),
|
||||
region: "".to_string(),
|
||||
..Default::default()
|
||||
})
|
||||
},
|
||||
};
|
||||
let mut tier_config_mgr = GLOBAL_TIER_CONFIG_MGR.get().unwrap().write().await;
|
||||
let mut tier_config_mgr = GLOBAL_TierConfigMgr.write().await;
|
||||
if let Err(err) = tier_config_mgr.add(args, false).await {
|
||||
warn!("tier_config_mgr add failed, e: {:?}", err);
|
||||
println!("tier_config_mgr add failed, e: {:?}", err);
|
||||
panic!("tier add failed. {err}");
|
||||
}
|
||||
if let Err(e) = tier_config_mgr.save().await {
|
||||
warn!("tier_config_mgr save failed, e: {:?}", e);
|
||||
println!("tier_config_mgr save failed, e: {:?}", e);
|
||||
panic!("tier save failed");
|
||||
}
|
||||
info!("Created test tier: {}", "COLDTIER");
|
||||
println!("Created test tier: COLDTIER44");
|
||||
}
|
||||
|
||||
/// Test helper: Check if object exists
|
||||
@@ -284,9 +297,10 @@ async fn object_exists(ecstore: &Arc<ECStore>, bucket: &str, object: &str) -> bo
|
||||
#[allow(dead_code)]
|
||||
async fn object_is_delete_marker(ecstore: &Arc<ECStore>, bucket: &str, object: &str) -> bool {
|
||||
if let Ok(oi) = (**ecstore).get_object_info(bucket, object, &ObjectOptions::default()).await {
|
||||
debug!("oi: {:?}", oi);
|
||||
println!("oi: {:?}", oi);
|
||||
oi.delete_marker
|
||||
} else {
|
||||
println!("object_is_delete_marker is error");
|
||||
panic!("object_is_delete_marker is error");
|
||||
}
|
||||
}
|
||||
@@ -295,9 +309,10 @@ async fn object_is_delete_marker(ecstore: &Arc<ECStore>, bucket: &str, object: &
|
||||
#[allow(dead_code)]
|
||||
async fn object_is_transitioned(ecstore: &Arc<ECStore>, bucket: &str, object: &str) -> bool {
|
||||
if let Ok(oi) = (**ecstore).get_object_info(bucket, object, &ObjectOptions::default()).await {
|
||||
info!("oi: {:?}", oi);
|
||||
println!("oi: {:?}", oi);
|
||||
!oi.transitioned_object.status.is_empty()
|
||||
} else {
|
||||
println!("object_is_transitioned is error");
|
||||
panic!("object_is_transitioned is error");
|
||||
}
|
||||
}
|
||||
@@ -455,8 +470,9 @@ mod serial_tests {
|
||||
println!("Lifecycle expiry basic test completed");
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
|
||||
#[serial]
|
||||
//#[ignore]
|
||||
async fn test_lifecycle_expiry_deletemarker() {
|
||||
let (_disk_paths, ecstore) = setup_test_env().await;
|
||||
|
||||
@@ -578,12 +594,13 @@ mod serial_tests {
|
||||
println!("Lifecycle expiry basic test completed");
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
|
||||
#[serial]
|
||||
#[ignore]
|
||||
async fn test_lifecycle_transition_basic() {
|
||||
let (_disk_paths, ecstore) = setup_test_env().await;
|
||||
|
||||
//create_test_tier().await;
|
||||
create_test_tier(1).await;
|
||||
|
||||
// Create test bucket and object
|
||||
let suffix = uuid::Uuid::new_v4().simple().to_string();
|
||||
@@ -591,7 +608,8 @@ mod serial_tests {
|
||||
let object_name = "test/object.txt"; // Match the lifecycle rule prefix "test/"
|
||||
let test_data = b"Hello, this is test data for lifecycle expiry!";
|
||||
|
||||
create_test_lock_bucket(&ecstore, bucket_name.as_str()).await;
|
||||
//create_test_lock_bucket(&ecstore, bucket_name.as_str()).await;
|
||||
create_test_bucket(&ecstore, bucket_name.as_str()).await;
|
||||
upload_test_object(&ecstore, bucket_name.as_str(), object_name, test_data).await;
|
||||
|
||||
// Verify object exists initially
|
||||
@@ -599,13 +617,13 @@ mod serial_tests {
|
||||
println!("✅ Object exists before lifecycle processing");
|
||||
|
||||
// Set lifecycle configuration with very short expiry (0 days = immediate expiry)
|
||||
/*set_bucket_lifecycle_transition(bucket_name)
|
||||
set_bucket_lifecycle_transition(bucket_name.as_str())
|
||||
.await
|
||||
.expect("Failed to set lifecycle configuration");
|
||||
println!("✅ Lifecycle configuration set for bucket: {bucket_name}");
|
||||
|
||||
// Verify lifecycle configuration was set
|
||||
match rustfs_ecstore::bucket::metadata_sys::get(bucket_name).await {
|
||||
match rustfs_ecstore::bucket::metadata_sys::get(bucket_name.as_str()).await {
|
||||
Ok(bucket_meta) => {
|
||||
assert!(bucket_meta.lifecycle_config.is_some());
|
||||
println!("✅ Bucket metadata retrieved successfully");
|
||||
@@ -613,7 +631,7 @@ mod serial_tests {
|
||||
Err(e) => {
|
||||
println!("❌ Error retrieving bucket metadata: {e:?}");
|
||||
}
|
||||
}*/
|
||||
}
|
||||
|
||||
// Create scanner with very short intervals for testing
|
||||
let scanner_config = ScannerConfig {
|
||||
@@ -640,12 +658,11 @@ mod serial_tests {
|
||||
tokio::time::sleep(Duration::from_secs(5)).await;
|
||||
|
||||
// Check if object has been expired (deleted)
|
||||
//let check_result = object_is_transitioned(&ecstore, bucket_name, object_name).await;
|
||||
let check_result = object_exists(&ecstore, bucket_name.as_str(), object_name).await;
|
||||
let check_result = object_is_transitioned(&ecstore, &bucket_name, object_name).await;
|
||||
println!("Object exists after lifecycle processing: {check_result}");
|
||||
|
||||
if check_result {
|
||||
println!("✅ Object was not deleted by lifecycle processing");
|
||||
println!("✅ Object was transitioned by lifecycle processing");
|
||||
// Let's try to get object info to see its details
|
||||
match ecstore
|
||||
.get_object_info(bucket_name.as_str(), object_name, &rustfs_ecstore::store_api::ObjectOptions::default())
|
||||
@@ -663,7 +680,7 @@ mod serial_tests {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
println!("❌ Object was deleted by lifecycle processing");
|
||||
println!("❌ Object was not transitioned by lifecycle processing");
|
||||
}
|
||||
|
||||
assert!(check_result);
|
||||
|
||||
@@ -94,7 +94,7 @@ async fn test_audit_log_dispatch_performance() {
|
||||
let start_result = system.start(config).await;
|
||||
if start_result.is_err() {
|
||||
println!("AuditSystem failed to start: {start_result:?}");
|
||||
return; // 或 assert!(false, "AuditSystem failed to start");
|
||||
return; // Alternatively: assert!(false, "AuditSystem failed to start");
|
||||
}
|
||||
|
||||
use chrono::Utc;
|
||||
|
||||
@@ -85,19 +85,14 @@ impl Display for DriveState {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[derive(Clone, Copy, Debug, Default, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub enum HealScanMode {
|
||||
Unknown,
|
||||
#[default]
|
||||
Normal,
|
||||
Deep,
|
||||
}
|
||||
|
||||
impl Default for HealScanMode {
|
||||
fn default() -> Self {
|
||||
Self::Normal
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Default, Serialize, Deserialize)]
|
||||
pub struct HealOpts {
|
||||
pub recursive: bool,
|
||||
@@ -175,11 +170,12 @@ pub struct HealChannelResponse {
|
||||
}
|
||||
|
||||
/// Heal priority
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum HealChannelPriority {
|
||||
/// Low priority
|
||||
Low,
|
||||
/// Normal priority
|
||||
#[default]
|
||||
Normal,
|
||||
/// High priority
|
||||
High,
|
||||
@@ -187,12 +183,6 @@ pub enum HealChannelPriority {
|
||||
Critical,
|
||||
}
|
||||
|
||||
impl Default for HealChannelPriority {
|
||||
fn default() -> Self {
|
||||
Self::Normal
|
||||
}
|
||||
}
|
||||
|
||||
/// Heal channel sender
|
||||
pub type HealChannelSender = mpsc::UnboundedSender<HealChannelCommand>;
|
||||
|
||||
|
||||
@@ -21,12 +21,12 @@ pub const APP_NAME: &str = "RustFS";
|
||||
/// Application version
|
||||
/// Default value: 1.0.0
|
||||
/// Environment variable: RUSTFS_VERSION
|
||||
pub const VERSION: &str = "0.0.1";
|
||||
pub const VERSION: &str = "1.0.0";
|
||||
|
||||
/// Default configuration logger level
|
||||
/// Default value: info
|
||||
/// Default value: error
|
||||
/// Environment variable: RUSTFS_LOG_LEVEL
|
||||
pub const DEFAULT_LOG_LEVEL: &str = "info";
|
||||
pub const DEFAULT_LOG_LEVEL: &str = "error";
|
||||
|
||||
/// Default configuration use stdout
|
||||
/// Default value: false
|
||||
@@ -40,22 +40,15 @@ pub const SAMPLE_RATIO: f64 = 1.0;
|
||||
pub const METER_INTERVAL: u64 = 30;
|
||||
|
||||
/// Default configuration service version
|
||||
/// Default value: 0.0.1
|
||||
pub const SERVICE_VERSION: &str = "0.0.1";
|
||||
/// Default value: 1.0.0
|
||||
/// Environment variable: RUSTFS_OBS_SERVICE_VERSION
|
||||
/// Uses the same value as VERSION constant
|
||||
pub const SERVICE_VERSION: &str = "1.0.0";
|
||||
|
||||
/// Default configuration environment
|
||||
/// Default value: production
|
||||
pub const ENVIRONMENT: &str = "production";
|
||||
|
||||
/// maximum number of connections
|
||||
/// This is the maximum number of connections that the server will accept.
|
||||
/// This is used to limit the number of connections to the server.
|
||||
pub const MAX_CONNECTIONS: usize = 100;
|
||||
/// timeout for connections
|
||||
/// This is the timeout for connections to the server.
|
||||
/// This is used to limit the time that a connection can be open.
|
||||
pub const DEFAULT_TIMEOUT_MS: u64 = 3000;
|
||||
|
||||
/// Default Access Key
|
||||
/// Default value: rustfsadmin
|
||||
/// Environment variable: RUSTFS_ACCESS_KEY
|
||||
@@ -145,7 +138,7 @@ pub const DEFAULT_LOG_ROTATION_SIZE_MB: u64 = 100;
|
||||
/// It is used to rotate the logs of the application.
|
||||
/// Default value: hour, eg: day,hour,minute,second
|
||||
/// Environment variable: RUSTFS_OBS_LOG_ROTATION_TIME
|
||||
pub const DEFAULT_LOG_ROTATION_TIME: &str = "day";
|
||||
pub const DEFAULT_LOG_ROTATION_TIME: &str = "hour";
|
||||
|
||||
/// Default log keep files for rustfs
|
||||
/// This is the default log keep files for rustfs.
|
||||
@@ -154,9 +147,18 @@ pub const DEFAULT_LOG_ROTATION_TIME: &str = "day";
|
||||
/// Environment variable: RUSTFS_OBS_LOG_KEEP_FILES
|
||||
pub const DEFAULT_LOG_KEEP_FILES: u16 = 30;
|
||||
|
||||
/// 1 KiB
|
||||
/// Default log local logging enabled for rustfs
|
||||
/// This is the default log local logging enabled for rustfs.
|
||||
/// It is used to enable or disable local logging of the application.
|
||||
/// Default value: false
|
||||
/// Environment variable: RUSTFS_OBS_LOCAL_LOGGING_ENABLED
|
||||
pub const DEFAULT_LOG_LOCAL_LOGGING_ENABLED: bool = false;
|
||||
|
||||
/// Constant representing 1 Kibibyte (1024 bytes)
|
||||
/// Default value: 1024
|
||||
pub const KI_B: usize = 1024;
|
||||
/// 1 MiB
|
||||
/// Constant representing 1 Mebibyte (1024 * 1024 bytes)
|
||||
/// Default value: 1048576
|
||||
pub const MI_B: usize = 1024 * 1024;
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -169,16 +171,16 @@ mod tests {
|
||||
assert_eq!(APP_NAME, "RustFS");
|
||||
assert!(!APP_NAME.contains(' '), "App name should not contain spaces");
|
||||
|
||||
assert_eq!(VERSION, "0.0.1");
|
||||
assert_eq!(VERSION, "1.0.0");
|
||||
|
||||
assert_eq!(SERVICE_VERSION, "0.0.1");
|
||||
assert_eq!(SERVICE_VERSION, "1.0.0");
|
||||
assert_eq!(VERSION, SERVICE_VERSION, "Version and service version should be consistent");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_logging_constants() {
|
||||
// Test logging related constants
|
||||
assert_eq!(DEFAULT_LOG_LEVEL, "info");
|
||||
assert_eq!(DEFAULT_LOG_LEVEL, "error");
|
||||
assert!(
|
||||
["trace", "debug", "info", "warn", "error"].contains(&DEFAULT_LOG_LEVEL),
|
||||
"Log level should be a valid tracing level"
|
||||
@@ -199,14 +201,6 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_connection_constants() {
|
||||
// Test connection related constants
|
||||
assert_eq!(MAX_CONNECTIONS, 100);
|
||||
|
||||
assert_eq!(DEFAULT_TIMEOUT_MS, 3000);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_security_constants() {
|
||||
// Test security related constants
|
||||
@@ -309,8 +303,8 @@ mod tests {
|
||||
// assert!(DEFAULT_TIMEOUT_MS < u64::MAX, "Timeout should be reasonable");
|
||||
|
||||
// These are const non-zero values, so zero checks are redundant
|
||||
// assert!(DEFAULT_PORT != 0, "Default port should not be zero");
|
||||
// assert!(DEFAULT_CONSOLE_PORT != 0, "Console port should not be zero");
|
||||
assert_ne!(DEFAULT_PORT, 0, "Default port should not be zero");
|
||||
assert_ne!(DEFAULT_CONSOLE_PORT, 0, "Console port should not be zero");
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
pub(crate) mod app;
|
||||
pub(crate) mod console;
|
||||
pub(crate) mod env;
|
||||
pub(crate) mod profiler;
|
||||
pub(crate) mod runtime;
|
||||
pub(crate) mod targets;
|
||||
pub(crate) mod tls;
|
||||
|
||||
41
crates/config/src/constants/profiler.rs
Normal file
41
crates/config/src/constants/profiler.rs
Normal file
@@ -0,0 +1,41 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub const ENV_ENABLE_PROFILING: &str = "RUSTFS_ENABLE_PROFILING";
|
||||
|
||||
// CPU profiling
|
||||
pub const ENV_CPU_MODE: &str = "RUSTFS_PROF_CPU_MODE"; // off|continuous|periodic
|
||||
pub const ENV_CPU_FREQ: &str = "RUSTFS_PROF_CPU_FREQ";
|
||||
pub const ENV_CPU_INTERVAL_SECS: &str = "RUSTFS_PROF_CPU_INTERVAL_SECS";
|
||||
pub const ENV_CPU_DURATION_SECS: &str = "RUSTFS_PROF_CPU_DURATION_SECS";
|
||||
|
||||
// Memory profiling (jemalloc)
|
||||
pub const ENV_MEM_PERIODIC: &str = "RUSTFS_PROF_MEM_PERIODIC";
|
||||
pub const ENV_MEM_INTERVAL_SECS: &str = "RUSTFS_PROF_MEM_INTERVAL_SECS";
|
||||
|
||||
// Output directory
|
||||
pub const ENV_OUTPUT_DIR: &str = "RUSTFS_PROF_OUTPUT_DIR";
|
||||
|
||||
// Defaults
|
||||
pub const DEFAULT_ENABLE_PROFILING: bool = false;
|
||||
|
||||
pub const DEFAULT_CPU_MODE: &str = "off";
|
||||
pub const DEFAULT_CPU_FREQ: usize = 100;
|
||||
pub const DEFAULT_CPU_INTERVAL_SECS: u64 = 300;
|
||||
pub const DEFAULT_CPU_DURATION_SECS: u64 = 60;
|
||||
|
||||
pub const DEFAULT_MEM_PERIODIC: bool = false;
|
||||
pub const DEFAULT_MEM_INTERVAL_SECS: u64 = 300;
|
||||
|
||||
pub const DEFAULT_OUTPUT_DIR: &str = ".";
|
||||
@@ -22,7 +22,10 @@ pub const ENV_THREAD_STACK_SIZE: &str = "RUSTFS_RUNTIME_THREAD_STACK_SIZE";
|
||||
pub const ENV_THREAD_KEEP_ALIVE: &str = "RUSTFS_RUNTIME_THREAD_KEEP_ALIVE";
|
||||
pub const ENV_GLOBAL_QUEUE_INTERVAL: &str = "RUSTFS_RUNTIME_GLOBAL_QUEUE_INTERVAL";
|
||||
pub const ENV_THREAD_NAME: &str = "RUSTFS_RUNTIME_THREAD_NAME";
|
||||
pub const ENV_MAX_IO_EVENTS_PER_TICK: &str = "RUSTFS_RUNTIME_MAX_IO_EVENTS_PER_TICK";
|
||||
pub const ENV_RNG_SEED: &str = "RUSTFS_RUNTIME_RNG_SEED";
|
||||
/// Event polling interval
|
||||
pub const ENV_EVENT_INTERVAL: &str = "RUSTFS_RUNTIME_EVENT_INTERVAL";
|
||||
|
||||
// Default values for Tokio runtime
|
||||
pub const DEFAULT_WORKER_THREADS: usize = 16;
|
||||
@@ -32,4 +35,7 @@ pub const DEFAULT_THREAD_STACK_SIZE: usize = MI_B; // 1 MiB
|
||||
pub const DEFAULT_THREAD_KEEP_ALIVE: u64 = 60; // seconds
|
||||
pub const DEFAULT_GLOBAL_QUEUE_INTERVAL: u32 = 31;
|
||||
pub const DEFAULT_THREAD_NAME: &str = "rustfs-worker";
|
||||
pub const DEFAULT_MAX_IO_EVENTS_PER_TICK: usize = 1024;
|
||||
/// Event polling default (Tokio default 61)
|
||||
pub const DEFAULT_EVENT_INTERVAL: u32 = 61;
|
||||
pub const DEFAULT_RNG_SEED: Option<u64> = None; // None means random
|
||||
|
||||
@@ -21,6 +21,8 @@ pub use constants::console::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::env::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::profiler::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::runtime::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::targets::*;
|
||||
|
||||
@@ -226,7 +226,7 @@ fn test_password_variations() -> Result<(), crate::Error> {
|
||||
b"12345".as_slice(), // Numeric
|
||||
b"!@#$%^&*()".as_slice(), // Special characters
|
||||
b"\x00\x01\x02\x03".as_slice(), // Binary password
|
||||
"密码测试".as_bytes(), // Unicode password
|
||||
"пароль тест".as_bytes(), // Unicode password
|
||||
&[0xFF; 64], // Long binary password
|
||||
];
|
||||
|
||||
|
||||
@@ -1,267 +1,253 @@
|
||||
# KMS End-to-End Tests
|
||||
|
||||
本目录包含 RustFS KMS (Key Management Service) 的端到端集成测试,用于验证完整的 KMS 功能流程。
|
||||
This directory contains the integration suites used to validate the full RustFS KMS (Key Management Service) workflow.
|
||||
|
||||
## 📁 测试文件说明
|
||||
## 📁 Test Overview
|
||||
|
||||
### `kms_local_test.rs`
|
||||
本地KMS后端的端到端测试,包含:
|
||||
- 自动启动和配置本地KMS后端
|
||||
- 通过动态配置API配置KMS服务
|
||||
- 测试SSE-C(客户端提供密钥)加密流程
|
||||
- 验证S3兼容的对象加密/解密操作
|
||||
- 密钥生命周期管理测试
|
||||
End-to-end coverage for the local KMS backend:
|
||||
- Auto-start and configure the local backend
|
||||
- Configure KMS through the dynamic configuration API
|
||||
- Verify SSE-C (client-provided keys)
|
||||
- Exercise S3-compatible encryption/decryption
|
||||
- Validate key lifecycle management
|
||||
|
||||
### `kms_vault_test.rs`
|
||||
Vault KMS后端的端到端测试,包含:
|
||||
- 自动启动Vault开发服务器
|
||||
- 配置Vault transit engine和密钥
|
||||
- 通过动态配置API配置KMS服务
|
||||
- 测试完整的Vault KMS集成
|
||||
- 验证Token认证和加密操作
|
||||
End-to-end coverage for the Vault backend:
|
||||
- Launch a Vault dev server automatically
|
||||
- Configure the transit engine and encryption keys
|
||||
- Configure KMS via the dynamic configuration API
|
||||
- Run the full Vault integration flow
|
||||
- Validate token authentication and encryption operations
|
||||
|
||||
### `kms_comprehensive_test.rs`
|
||||
**完整的KMS功能测试套件**(当前因AWS SDK API兼容性问题暂时禁用),包含:
|
||||
- **Bucket加密配置**: SSE-S3和SSE-KMS默认加密设置
|
||||
- **完整的SSE加密模式测试**:
|
||||
- SSE-S3: S3管理的服务端加密
|
||||
- SSE-KMS: KMS管理的服务端加密
|
||||
- SSE-C: 客户端提供密钥的服务端加密
|
||||
- **对象操作测试**: 上传、下载、验证三种SSE模式
|
||||
- **分片上传测试**: 多部分上传支持所有SSE模式
|
||||
- **对象复制测试**: 不同SSE模式间的复制操作
|
||||
- **完整KMS API管理**:
|
||||
- 密钥生命周期管理(创建、列表、描述、删除、取消删除)
|
||||
- 直接加密/解密操作
|
||||
- 数据密钥生成和操作
|
||||
- KMS服务管理(启动、停止、状态查询)
|
||||
**Full KMS capability suite** (currently disabled because of AWS SDK compatibility issues):
|
||||
- **Bucket encryption configuration**: SSE-S3 and SSE-KMS defaults
|
||||
- **All SSE encryption modes**:
|
||||
- SSE-S3 (S3-managed server-side encryption)
|
||||
- SSE-KMS (KMS-managed server-side encryption)
|
||||
- SSE-C (client-provided keys)
|
||||
- **Object operations**: upload, download, and validation for every SSE mode
|
||||
- **Multipart uploads**: cover each SSE mode
|
||||
- **Object replication**: cross-mode replication scenarios
|
||||
- **Complete KMS API management**:
|
||||
- Key lifecycle (create, list, describe, delete, cancel delete)
|
||||
- Direct encrypt/decrypt operations
|
||||
- Data key generation and handling
|
||||
- KMS service lifecycle (start, stop, status)
|
||||
|
||||
### `kms_integration_test.rs`
|
||||
综合性KMS集成测试,包含:
|
||||
- 多后端兼容性测试
|
||||
- KMS服务生命周期测试
|
||||
- 错误处理和恢复测试
|
||||
- **注意**: 当前因AWS SDK API兼容性问题暂时禁用
|
||||
Broad integration tests that exercise:
|
||||
- Multiple backends
|
||||
- KMS lifecycle management
|
||||
- Error handling and recovery
|
||||
- **Note**: currently disabled because of AWS SDK compatibility gaps
|
||||
|
||||
## 🚀 如何运行测试
|
||||
## 🚀 Running Tests
|
||||
|
||||
### 前提条件
|
||||
### Prerequisites
|
||||
|
||||
1. **系统依赖**:
|
||||
1. **System dependencies**
|
||||
```bash
|
||||
# macOS
|
||||
brew install vault awscurl
|
||||
|
||||
|
||||
# Ubuntu/Debian
|
||||
apt-get install vault
|
||||
pip install awscurl
|
||||
```
|
||||
|
||||
2. **构建RustFS**:
|
||||
2. **Build RustFS**
|
||||
```bash
|
||||
# 在项目根目录
|
||||
cargo build
|
||||
```
|
||||
|
||||
### 运行单个测试
|
||||
### Run individual suites
|
||||
|
||||
#### 本地KMS测试
|
||||
#### Local backend
|
||||
```bash
|
||||
cd crates/e2e_test
|
||||
cargo test test_local_kms_end_to_end -- --nocapture
|
||||
```
|
||||
|
||||
#### Vault KMS测试
|
||||
#### Vault backend
|
||||
```bash
|
||||
cd crates/e2e_test
|
||||
cargo test test_vault_kms_end_to_end -- --nocapture
|
||||
```
|
||||
|
||||
#### 高可用性测试
|
||||
#### High availability
|
||||
```bash
|
||||
cd crates/e2e_test
|
||||
cargo test test_vault_kms_high_availability -- --nocapture
|
||||
```
|
||||
|
||||
#### 完整功能测试(开发中)
|
||||
#### Comprehensive features (disabled)
|
||||
```bash
|
||||
cd crates/e2e_test
|
||||
# 注意:以下测试因AWS SDK API兼容性问题暂时禁用
|
||||
# Disabled due to AWS SDK compatibility gaps
|
||||
# cargo test test_comprehensive_kms_functionality -- --nocapture
|
||||
# cargo test test_sse_modes_compatibility -- --nocapture
|
||||
# cargo test test_sse_modes_compatibility -- --nocapture
|
||||
# cargo test test_kms_api_comprehensive -- --nocapture
|
||||
```
|
||||
|
||||
### 运行所有KMS测试
|
||||
### Run all KMS suites
|
||||
```bash
|
||||
cd crates/e2e_test
|
||||
cargo test kms -- --nocapture
|
||||
```
|
||||
|
||||
### 串行运行(避免端口冲突)
|
||||
### Run serially (avoid port conflicts)
|
||||
```bash
|
||||
cd crates/e2e_test
|
||||
cargo test kms -- --nocapture --test-threads=1
|
||||
```
|
||||
|
||||
## 🔧 测试配置
|
||||
## 🔧 Configuration
|
||||
|
||||
### 环境变量
|
||||
### Environment variables
|
||||
```bash
|
||||
# 可选:自定义端口(默认使用9050)
|
||||
# Optional: custom RustFS port (default 9050)
|
||||
export RUSTFS_TEST_PORT=9050
|
||||
|
||||
# 可选:自定义Vault端口(默认使用8200)
|
||||
# Optional: custom Vault port (default 8200)
|
||||
export VAULT_TEST_PORT=8200
|
||||
|
||||
# 可选:启用详细日志
|
||||
# Optional: enable verbose logging
|
||||
export RUST_LOG=debug
|
||||
```
|
||||
|
||||
### 依赖的二进制文件路径
|
||||
### Required binaries
|
||||
|
||||
测试会自动查找以下二进制文件:
|
||||
- `../../target/debug/rustfs` - RustFS服务器
|
||||
- `vault` - Vault (需要在PATH中)
|
||||
- `/Users/dandan/Library/Python/3.9/bin/awscurl` - AWS签名工具
|
||||
Tests look for:
|
||||
- `../../target/debug/rustfs` – RustFS server
|
||||
- `vault` – Vault CLI (must be on PATH)
|
||||
- `/Users/dandan/Library/Python/3.9/bin/awscurl` – AWS SigV4 helper
|
||||
|
||||
## 📋 测试流程说明
|
||||
## 📋 Test Flow
|
||||
|
||||
### Local KMS测试流程
|
||||
1. **环境准备**:创建临时目录,设置KMS密钥存储路径
|
||||
2. **启动服务**:启动RustFS服务器,启用KMS功能
|
||||
3. **等待就绪**:检查端口监听和S3 API响应
|
||||
4. **配置KMS**:通过awscurl发送配置请求到admin API
|
||||
5. **启动KMS**:激活KMS服务
|
||||
6. **功能测试**:
|
||||
- 创建测试存储桶
|
||||
- 测试SSE-C加密(客户端提供密钥)
|
||||
- 验证对象加密/解密
|
||||
7. **清理**:终止进程,清理临时文件
|
||||
### Local backend
|
||||
1. **Prepare environment** – create temporary directories and key storage paths
|
||||
2. **Start RustFS** – launch the server with KMS enabled
|
||||
3. **Wait for readiness** – confirm the port listener and S3 API
|
||||
4. **Configure KMS** – send configuration via awscurl to the admin API
|
||||
5. **Start KMS** – activate the KMS service
|
||||
6. **Exercise functionality**
|
||||
- Create a test bucket
|
||||
- Run SSE-C encryption with client-provided keys
|
||||
- Validate encryption/decryption behavior
|
||||
7. **Cleanup** – stop processes and remove temporary files
|
||||
|
||||
### Vault KMS测试流程
|
||||
1. **启动Vault**:使用开发模式启动Vault服务器
|
||||
2. **配置Vault**:
|
||||
- 启用transit secrets engine
|
||||
- 创建加密密钥(rustfs-master-key)
|
||||
3. **启动RustFS**:启用KMS功能的RustFS服务器
|
||||
4. **配置KMS**:通过API配置Vault后端,包含:
|
||||
- Vault地址和Token认证
|
||||
- Transit engine配置
|
||||
- 密钥路径设置
|
||||
5. **功能测试**:完整的加密/解密流程测试
|
||||
6. **清理**:终止所有进程
|
||||
### Vault backend
|
||||
1. **Launch Vault** – start the dev-mode server
|
||||
2. **Configure Vault**
|
||||
- Enable the transit secrets engine
|
||||
- Create the `rustfs-master-key`
|
||||
3. **Start RustFS** – run the server with KMS enabled
|
||||
4. **Configure KMS** – point RustFS at Vault (address, token, transit config, key path)
|
||||
5. **Exercise functionality** – complete the encryption/decryption workflow
|
||||
6. **Cleanup** – stop all services
|
||||
|
||||
## 🛠️ 故障排除
|
||||
## 🛠️ Troubleshooting
|
||||
|
||||
### 常见问题
|
||||
### Common issues
|
||||
|
||||
**Q: 测试失败 "RustFS server failed to become ready"**
|
||||
```
|
||||
A: 检查端口是否被占用:
|
||||
**Q: `RustFS server failed to become ready`**
|
||||
```bash
|
||||
lsof -i :9050
|
||||
kill -9 <PID> # 如果有进程占用端口
|
||||
kill -9 <PID> # Free the port if necessary
|
||||
```
|
||||
|
||||
**Q: Vault服务启动失败**
|
||||
```
|
||||
A: 确保Vault已安装且在PATH中:
|
||||
**Q: Vault fails to start**
|
||||
```bash
|
||||
which vault
|
||||
vault version
|
||||
```
|
||||
|
||||
**Q: awscurl认证失败**
|
||||
```
|
||||
A: 检查awscurl路径是否正确:
|
||||
**Q: awscurl authentication fails**
|
||||
```bash
|
||||
ls /Users/dandan/Library/Python/3.9/bin/awscurl
|
||||
# 或安装到不同路径:
|
||||
# Or install elsewhere
|
||||
pip install awscurl
|
||||
which awscurl # 然后更新测试中的路径
|
||||
which awscurl # Update the path in tests accordingly
|
||||
```
|
||||
|
||||
**Q: 测试超时**
|
||||
```
|
||||
A: 增加等待时间或检查日志:
|
||||
**Q: Tests time out**
|
||||
```bash
|
||||
RUST_LOG=debug cargo test test_local_kms_end_to_end -- --nocapture
|
||||
```
|
||||
|
||||
### 调试技巧
|
||||
### Debug tips
|
||||
|
||||
1. **查看详细日志**:
|
||||
1. **Enable verbose logs**
|
||||
```bash
|
||||
RUST_LOG=rustfs_kms=debug,rustfs=info cargo test -- --nocapture
|
||||
```
|
||||
|
||||
2. **保留临时文件**:
|
||||
修改测试代码,注释掉清理部分,检查生成的配置文件
|
||||
2. **Keep temporary files** – comment out cleanup logic to inspect generated configs
|
||||
|
||||
3. **单步调试**:
|
||||
在测试中添加 `std::thread::sleep` 来暂停执行,手动检查服务状态
|
||||
3. **Pause execution** – add `std::thread::sleep` for manual inspection during tests
|
||||
|
||||
4. **端口检查**:
|
||||
4. **Monitor ports**
|
||||
```bash
|
||||
# 测试运行时检查端口状态
|
||||
netstat -an | grep 9050
|
||||
curl http://127.0.0.1:9050/minio/health/ready
|
||||
```
|
||||
|
||||
## 📊 测试覆盖范围
|
||||
## 📊 Coverage
|
||||
|
||||
### 功能覆盖
|
||||
- ✅ KMS服务动态配置
|
||||
- ✅ 本地和Vault后端支持
|
||||
- ✅ AWS S3兼容加密接口
|
||||
- ✅ 密钥管理和生命周期
|
||||
- ✅ 错误处理和恢复
|
||||
- ✅ 高可用性场景
|
||||
### Functional
|
||||
- ✅ Dynamic KMS configuration
|
||||
- ✅ Local and Vault backends
|
||||
- ✅ AWS S3-compatible encryption APIs
|
||||
- ✅ Key lifecycle management
|
||||
- ✅ Error handling and recovery paths
|
||||
- ✅ High-availability behavior
|
||||
|
||||
### 加密模式覆盖
|
||||
- ✅ SSE-C (Server-Side Encryption with Customer-Provided Keys)
|
||||
- ✅ SSE-S3 (Server-Side Encryption with S3-Managed Keys)
|
||||
- ✅ SSE-KMS (Server-Side Encryption with KMS-Managed Keys)
|
||||
### Encryption modes
|
||||
- ✅ SSE-C (customer-provided)
|
||||
- ✅ SSE-S3 (S3-managed)
|
||||
- ✅ SSE-KMS (KMS-managed)
|
||||
|
||||
### S3操作覆盖
|
||||
- ✅ 对象上传/下载 (SSE-C模式)
|
||||
- 🚧 分片上传 (需要AWS SDK兼容性修复)
|
||||
- 🚧 对象复制 (需要AWS SDK兼容性修复)
|
||||
- 🚧 Bucket加密配置 (需要AWS SDK兼容性修复)
|
||||
### S3 operations
|
||||
- ✅ Object upload/download (SSE-C)
|
||||
- 🚧 Multipart uploads (pending AWS SDK fixes)
|
||||
- 🚧 Object replication (pending AWS SDK fixes)
|
||||
- 🚧 Bucket encryption defaults (pending AWS SDK fixes)
|
||||
|
||||
### KMS API覆盖
|
||||
- ✅ 基础密钥管理 (创建、列表)
|
||||
- 🚧 完整密钥生命周期 (需要AWS SDK兼容性修复)
|
||||
- 🚧 直接加密/解密操作 (需要AWS SDK兼容性修复)
|
||||
- 🚧 数据密钥生成和解密 (需要AWS SDK兼容性修复)
|
||||
- ✅ KMS服务管理 (配置、启动、停止、状态)
|
||||
### KMS API
|
||||
- ✅ Basic key management (create/list)
|
||||
- 🚧 Full key lifecycle (pending AWS SDK fixes)
|
||||
- 🚧 Direct encrypt/decrypt (pending AWS SDK fixes)
|
||||
- 🚧 Data key operations (pending AWS SDK fixes)
|
||||
- ✅ Service lifecycle (configure/start/stop/status)
|
||||
|
||||
### 认证方式覆盖
|
||||
- ✅ Vault Token认证
|
||||
- 🚧 Vault AppRole认证
|
||||
### Authentication
|
||||
- ✅ Vault token auth
|
||||
- 🚧 Vault AppRole auth
|
||||
|
||||
## 🔄 持续集成
|
||||
## 🔄 CI Integration
|
||||
|
||||
这些测试设计为可在CI/CD环境中运行:
|
||||
Designed to run inside CI/CD pipelines:
|
||||
|
||||
```yaml
|
||||
# GitHub Actions 示例
|
||||
- name: Run KMS E2E Tests
|
||||
run: |
|
||||
# 安装依赖
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y vault
|
||||
pip install awscurl
|
||||
|
||||
# 构建并测试
|
||||
|
||||
cargo build
|
||||
cd crates/e2e_test
|
||||
cargo test kms -- --nocapture --test-threads=1
|
||||
```
|
||||
|
||||
## 📚 相关文档
|
||||
## 📚 References
|
||||
|
||||
- [KMS 配置文档](../../../../docs/kms/README.md) - KMS功能完整文档
|
||||
- [动态配置API](../../../../docs/kms/http-api.md) - REST API接口说明
|
||||
- [故障排除指南](../../../../docs/kms/troubleshooting.md) - 常见问题解决
|
||||
- [KMS configuration guide](../../../../docs/kms/README.md)
|
||||
- [Dynamic configuration API](../../../../docs/kms/http-api.md)
|
||||
- [Troubleshooting](../../../../docs/kms/troubleshooting.md)
|
||||
|
||||
---
|
||||
|
||||
*这些测试确保KMS功能的稳定性和可靠性,为生产环境部署提供信心。*
|
||||
*These suites ensure KMS stability and reliability, building confidence for production deployments.*
|
||||
|
||||
@@ -547,9 +547,9 @@ pub async fn test_multipart_upload_with_config(
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
let total_size = config.total_size();
|
||||
|
||||
info!("🧪 开始分片上传测试 - {:?}", config.encryption_type);
|
||||
info!("🧪 Starting multipart upload test - {:?}", config.encryption_type);
|
||||
info!(
|
||||
" 对象: {}, 分片: {}个, 每片: {}MB, 总计: {}MB",
|
||||
" Object: {}, parts: {}, part size: {} MB, total: {} MB",
|
||||
config.object_key,
|
||||
config.total_parts,
|
||||
config.part_size / (1024 * 1024),
|
||||
@@ -589,7 +589,7 @@ pub async fn test_multipart_upload_with_config(
|
||||
|
||||
let create_multipart_output = create_request.send().await?;
|
||||
let upload_id = create_multipart_output.upload_id().unwrap();
|
||||
info!("📋 创建分片上传,ID: {}", upload_id);
|
||||
info!("📋 Created multipart upload, ID: {}", upload_id);
|
||||
|
||||
// Step 2: Upload parts
|
||||
let mut completed_parts = Vec::new();
|
||||
@@ -598,7 +598,7 @@ pub async fn test_multipart_upload_with_config(
|
||||
let end = std::cmp::min(start + config.part_size, total_size);
|
||||
let part_data = &test_data[start..end];
|
||||
|
||||
info!("📤 上传分片 {} ({:.2}MB)", part_number, part_data.len() as f64 / (1024.0 * 1024.0));
|
||||
info!("📤 Uploading part {} ({:.2} MB)", part_number, part_data.len() as f64 / (1024.0 * 1024.0));
|
||||
|
||||
let mut upload_request = s3_client
|
||||
.upload_part()
|
||||
@@ -625,7 +625,7 @@ pub async fn test_multipart_upload_with_config(
|
||||
.build(),
|
||||
);
|
||||
|
||||
debug!("分片 {} 上传完成,ETag: {}", part_number, etag);
|
||||
debug!("Part {} uploaded with ETag {}", part_number, etag);
|
||||
}
|
||||
|
||||
// Step 3: Complete multipart upload
|
||||
@@ -633,7 +633,7 @@ pub async fn test_multipart_upload_with_config(
|
||||
.set_parts(Some(completed_parts))
|
||||
.build();
|
||||
|
||||
info!("🔗 完成分片上传");
|
||||
info!("🔗 Completing multipart upload");
|
||||
let complete_output = s3_client
|
||||
.complete_multipart_upload()
|
||||
.bucket(bucket)
|
||||
@@ -643,10 +643,10 @@ pub async fn test_multipart_upload_with_config(
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
debug!("完成分片上传,ETag: {:?}", complete_output.e_tag());
|
||||
debug!("Multipart upload finalized with ETag {:?}", complete_output.e_tag());
|
||||
|
||||
// Step 4: Download and verify
|
||||
info!("📥 下载文件并验证");
|
||||
info!("📥 Downloading object for verification");
|
||||
let mut get_request = s3_client.get_object().bucket(bucket).key(&config.object_key);
|
||||
|
||||
// Add encryption headers for SSE-C GET
|
||||
@@ -680,7 +680,7 @@ pub async fn test_multipart_upload_with_config(
|
||||
assert_eq!(downloaded_data.len(), total_size);
|
||||
assert_eq!(&downloaded_data[..], &test_data[..]);
|
||||
|
||||
info!("✅ 分片上传测试通过 - {:?}", config.encryption_type);
|
||||
info!("✅ Multipart upload test passed - {:?}", config.encryption_type);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -700,7 +700,7 @@ pub async fn test_all_multipart_encryption_types(
|
||||
bucket: &str,
|
||||
base_object_key: &str,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
info!("🧪 测试所有加密类型的分片上传");
|
||||
info!("🧪 Testing multipart uploads for every encryption type");
|
||||
|
||||
let part_size = 5 * 1024 * 1024; // 5MB per part
|
||||
let total_parts = 2;
|
||||
@@ -718,7 +718,7 @@ pub async fn test_all_multipart_encryption_types(
|
||||
test_multipart_upload_with_config(s3_client, bucket, &config).await?;
|
||||
}
|
||||
|
||||
info!("✅ 所有加密类型的分片上传测试通过");
|
||||
info!("✅ Multipart uploads succeeded for every encryption type");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ use tracing::info;
|
||||
#[serial]
|
||||
async fn test_comprehensive_kms_full_workflow() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
info!("🏁 开始KMS全功能综合测试");
|
||||
info!("🏁 Start the KMS full-featured synthesis test");
|
||||
|
||||
let mut kms_env = LocalKMSTestEnvironment::new().await?;
|
||||
let _default_key_id = kms_env.start_rustfs_for_local_kms().await?;
|
||||
@@ -43,25 +43,25 @@ async fn test_comprehensive_kms_full_workflow() -> Result<(), Box<dyn std::error
|
||||
kms_env.base_env.create_test_bucket(TEST_BUCKET).await?;
|
||||
|
||||
// Phase 1: Test all single encryption types
|
||||
info!("📋 阶段1: 测试所有单文件加密类型");
|
||||
info!("📋 Phase 1: Test all single-file encryption types");
|
||||
test_sse_s3_encryption(&s3_client, TEST_BUCKET).await?;
|
||||
test_sse_kms_encryption(&s3_client, TEST_BUCKET).await?;
|
||||
test_sse_c_encryption(&s3_client, TEST_BUCKET).await?;
|
||||
|
||||
// Phase 2: Test KMS key management APIs
|
||||
info!("📋 阶段2: 测试KMS密钥管理API");
|
||||
info!("📋 Phase 2: Test the KMS Key Management API");
|
||||
test_kms_key_management(&kms_env.base_env.url, &kms_env.base_env.access_key, &kms_env.base_env.secret_key).await?;
|
||||
|
||||
// Phase 3: Test all multipart encryption types
|
||||
info!("📋 阶段3: 测试所有分片上传加密类型");
|
||||
info!("📋 Phase 3: Test all shard upload encryption types");
|
||||
test_all_multipart_encryption_types(&s3_client, TEST_BUCKET, "comprehensive-multipart-test").await?;
|
||||
|
||||
// Phase 4: Mixed workload test
|
||||
info!("📋 阶段4: 混合工作负载测试");
|
||||
info!("📋 Phase 4: Mixed workload testing");
|
||||
test_mixed_encryption_workload(&s3_client, TEST_BUCKET).await?;
|
||||
|
||||
kms_env.base_env.delete_test_bucket(TEST_BUCKET).await?;
|
||||
info!("✅ KMS全功能综合测试通过");
|
||||
info!("✅ KMS fully functional comprehensive test passed");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -70,7 +70,7 @@ async fn test_mixed_encryption_workload(
|
||||
s3_client: &aws_sdk_s3::Client,
|
||||
bucket: &str,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
info!("🔄 测试混合加密工作负载");
|
||||
info!("🔄 Test hybrid crypto workloads");
|
||||
|
||||
// Test configuration: different sizes and encryption types
|
||||
let test_configs = vec![
|
||||
@@ -89,11 +89,11 @@ async fn test_mixed_encryption_workload(
|
||||
];
|
||||
|
||||
for (i, config) in test_configs.iter().enumerate() {
|
||||
info!("🔄 执行混合测试 {}/{}: {:?}", i + 1, test_configs.len(), config.encryption_type);
|
||||
info!("🔄 Perform hybrid testing {}/{}: {:?}", i + 1, test_configs.len(), config.encryption_type);
|
||||
test_multipart_upload_with_config(s3_client, bucket, config).await?;
|
||||
}
|
||||
|
||||
info!("✅ 混合加密工作负载测试通过");
|
||||
info!("✅ Hybrid cryptographic workload tests pass");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -102,7 +102,7 @@ async fn test_mixed_encryption_workload(
|
||||
#[serial]
|
||||
async fn test_comprehensive_stress_test() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
info!("💪 开始KMS压力测试");
|
||||
info!("💪 Start the KMS stress test");
|
||||
|
||||
let mut kms_env = LocalKMSTestEnvironment::new().await?;
|
||||
let _default_key_id = kms_env.start_rustfs_for_local_kms().await?;
|
||||
@@ -120,7 +120,7 @@ async fn test_comprehensive_stress_test() -> Result<(), Box<dyn std::error::Erro
|
||||
|
||||
for config in stress_configs {
|
||||
info!(
|
||||
"💪 执行压力测试: {:?}, 总大小: {}MB",
|
||||
"💪 Perform stress test: {:?}, Total size: {}MB",
|
||||
config.encryption_type,
|
||||
config.total_size() / (1024 * 1024)
|
||||
);
|
||||
@@ -128,7 +128,7 @@ async fn test_comprehensive_stress_test() -> Result<(), Box<dyn std::error::Erro
|
||||
}
|
||||
|
||||
kms_env.base_env.delete_test_bucket(TEST_BUCKET).await?;
|
||||
info!("✅ KMS压力测试通过");
|
||||
info!("✅ KMS stress test passed");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -137,7 +137,7 @@ async fn test_comprehensive_stress_test() -> Result<(), Box<dyn std::error::Erro
|
||||
#[serial]
|
||||
async fn test_comprehensive_key_isolation() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
info!("🔐 开始加密密钥隔离综合测试");
|
||||
info!("🔐 Begin the comprehensive test of encryption key isolation");
|
||||
|
||||
let mut kms_env = LocalKMSTestEnvironment::new().await?;
|
||||
let _default_key_id = kms_env.start_rustfs_for_local_kms().await?;
|
||||
@@ -173,14 +173,14 @@ async fn test_comprehensive_key_isolation() -> Result<(), Box<dyn std::error::Er
|
||||
);
|
||||
|
||||
// Upload with different keys
|
||||
info!("🔐 上传文件用密钥1");
|
||||
info!("🔐 Key 1 for uploading files");
|
||||
test_multipart_upload_with_config(&s3_client, TEST_BUCKET, &config1).await?;
|
||||
|
||||
info!("🔐 上传文件用密钥2");
|
||||
info!("🔐 Key 2 for uploading files");
|
||||
test_multipart_upload_with_config(&s3_client, TEST_BUCKET, &config2).await?;
|
||||
|
||||
// Verify that files cannot be read with wrong keys
|
||||
info!("🔒 验证密钥隔离");
|
||||
info!("🔒 Verify key isolation");
|
||||
let wrong_key = "11111111111111111111111111111111";
|
||||
let wrong_key_b64 = base64::Engine::encode(&base64::engine::general_purpose::STANDARD, wrong_key);
|
||||
let wrong_key_md5 = format!("{:x}", md5::compute(wrong_key));
|
||||
@@ -196,11 +196,11 @@ async fn test_comprehensive_key_isolation() -> Result<(), Box<dyn std::error::Er
|
||||
.send()
|
||||
.await;
|
||||
|
||||
assert!(wrong_read_result.is_err(), "应该无法用错误密钥读取加密文件");
|
||||
info!("✅ 确认密钥隔离正常工作");
|
||||
assert!(wrong_read_result.is_err(), "The encrypted file should not be readable with the wrong key");
|
||||
info!("✅ Confirm that key isolation is working correctly");
|
||||
|
||||
kms_env.base_env.delete_test_bucket(TEST_BUCKET).await?;
|
||||
info!("✅ 加密密钥隔离综合测试通过");
|
||||
info!("✅ Encryption key isolation comprehensive test passed");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -209,7 +209,7 @@ async fn test_comprehensive_key_isolation() -> Result<(), Box<dyn std::error::Er
|
||||
#[serial]
|
||||
async fn test_comprehensive_concurrent_operations() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
info!("⚡ 开始并发加密操作综合测试");
|
||||
info!("⚡ Started comprehensive testing of concurrent encryption operations");
|
||||
|
||||
let mut kms_env = LocalKMSTestEnvironment::new().await?;
|
||||
let _default_key_id = kms_env.start_rustfs_for_local_kms().await?;
|
||||
@@ -228,7 +228,7 @@ async fn test_comprehensive_concurrent_operations() -> Result<(), Box<dyn std::e
|
||||
];
|
||||
|
||||
// Execute uploads concurrently
|
||||
info!("⚡ 开始并发上传");
|
||||
info!("⚡ Start concurrent uploads");
|
||||
let mut tasks = Vec::new();
|
||||
for config in concurrent_configs {
|
||||
let client = s3_client.clone();
|
||||
@@ -243,10 +243,10 @@ async fn test_comprehensive_concurrent_operations() -> Result<(), Box<dyn std::e
|
||||
task.await??;
|
||||
}
|
||||
|
||||
info!("✅ 所有并发操作完成");
|
||||
info!("✅ All concurrent operations are completed");
|
||||
|
||||
kms_env.base_env.delete_test_bucket(TEST_BUCKET).await?;
|
||||
info!("✅ 并发加密操作综合测试通过");
|
||||
info!("✅ The comprehensive test of concurrent encryption operation has passed");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -255,7 +255,7 @@ async fn test_comprehensive_concurrent_operations() -> Result<(), Box<dyn std::e
|
||||
#[serial]
|
||||
async fn test_comprehensive_performance_benchmark() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
info!("📊 开始KMS性能基准测试");
|
||||
info!("📊 Start KMS performance benchmarking");
|
||||
|
||||
let mut kms_env = LocalKMSTestEnvironment::new().await?;
|
||||
let _default_key_id = kms_env.start_rustfs_for_local_kms().await?;
|
||||
@@ -278,7 +278,7 @@ async fn test_comprehensive_performance_benchmark() -> Result<(), Box<dyn std::e
|
||||
];
|
||||
|
||||
for (size_name, config) in perf_configs {
|
||||
info!("📊 测试{}文件性能 ({}MB)", size_name, config.total_size() / (1024 * 1024));
|
||||
info!("📊 Test {} file performance ({}MB)", size_name, config.total_size() / (1024 * 1024));
|
||||
|
||||
let start_time = std::time::Instant::now();
|
||||
test_multipart_upload_with_config(&s3_client, TEST_BUCKET, &config).await?;
|
||||
@@ -286,7 +286,7 @@ async fn test_comprehensive_performance_benchmark() -> Result<(), Box<dyn std::e
|
||||
|
||||
let throughput_mbps = (config.total_size() as f64 / (1024.0 * 1024.0)) / duration.as_secs_f64();
|
||||
info!(
|
||||
"📊 {}文件测试完成: {:.2}秒, 吞吐量: {:.2} MB/s",
|
||||
"📊 {} file test completed: {:.2} seconds, throughput: {:.2} MB/s",
|
||||
size_name,
|
||||
duration.as_secs_f64(),
|
||||
throughput_mbps
|
||||
@@ -294,6 +294,6 @@ async fn test_comprehensive_performance_benchmark() -> Result<(), Box<dyn std::e
|
||||
}
|
||||
|
||||
kms_env.base_env.delete_test_bucket(TEST_BUCKET).await?;
|
||||
info!("✅ KMS性能基准测试通过");
|
||||
info!("✅ KMS performance benchmark passed");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ use super::common::LocalKMSTestEnvironment;
|
||||
use crate::common::{TEST_BUCKET, init_logging};
|
||||
use aws_sdk_s3::types::ServerSideEncryption;
|
||||
use base64::Engine;
|
||||
use md5::compute;
|
||||
use serial_test::serial;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Semaphore;
|
||||
@@ -71,7 +72,7 @@ async fn test_kms_zero_byte_file_encryption() -> Result<(), Box<dyn std::error::
|
||||
info!("📤 Testing SSE-C with zero-byte file");
|
||||
let test_key = "01234567890123456789012345678901";
|
||||
let test_key_b64 = base64::engine::general_purpose::STANDARD.encode(test_key);
|
||||
let test_key_md5 = format!("{:x}", md5::compute(test_key));
|
||||
let test_key_md5 = format!("{:x}", compute(test_key));
|
||||
let object_key_c = "zero-byte-sse-c";
|
||||
|
||||
let _put_response_c = s3_client
|
||||
@@ -165,7 +166,7 @@ async fn test_kms_single_byte_file_encryption() -> Result<(), Box<dyn std::error
|
||||
info!("📤 Testing SSE-C with single-byte file");
|
||||
let test_key = "01234567890123456789012345678901";
|
||||
let test_key_b64 = base64::engine::general_purpose::STANDARD.encode(test_key);
|
||||
let test_key_md5 = format!("{:x}", md5::compute(test_key));
|
||||
let test_key_md5 = format!("{:x}", compute(test_key));
|
||||
let object_key_c = "single-byte-sse-c";
|
||||
|
||||
s3_client
|
||||
@@ -293,7 +294,7 @@ async fn test_kms_invalid_key_scenarios() -> Result<(), Box<dyn std::error::Erro
|
||||
info!("🔍 Testing invalid SSE-C key length");
|
||||
let invalid_short_key = "short"; // Too short
|
||||
let invalid_key_b64 = base64::engine::general_purpose::STANDARD.encode(invalid_short_key);
|
||||
let invalid_key_md5 = format!("{:x}", md5::compute(invalid_short_key));
|
||||
let invalid_key_md5 = format!("{:x}", compute(invalid_short_key));
|
||||
|
||||
let invalid_key_result = s3_client
|
||||
.put_object()
|
||||
@@ -333,7 +334,7 @@ async fn test_kms_invalid_key_scenarios() -> Result<(), Box<dyn std::error::Erro
|
||||
info!("🔍 Testing access to SSE-C object without key");
|
||||
|
||||
// First upload a valid SSE-C object
|
||||
let valid_key_md5 = format!("{:x}", md5::compute(valid_key));
|
||||
let valid_key_md5 = format!("{:x}", compute(valid_key));
|
||||
s3_client
|
||||
.put_object()
|
||||
.bucket(TEST_BUCKET)
|
||||
@@ -420,7 +421,7 @@ async fn test_kms_concurrent_encryption() -> Result<(), Box<dyn std::error::Erro
|
||||
// SSE-C
|
||||
let key = format!("testkey{i:026}"); // 32-byte key
|
||||
let key_b64 = base64::engine::general_purpose::STANDARD.encode(&key);
|
||||
let key_md5 = format!("{:x}", md5::compute(&key));
|
||||
let key_md5 = format!("{:x}", compute(&key));
|
||||
|
||||
client
|
||||
.put_object()
|
||||
@@ -492,8 +493,8 @@ async fn test_kms_key_validation_security() -> Result<(), Box<dyn std::error::Er
|
||||
|
||||
let key1_b64 = base64::engine::general_purpose::STANDARD.encode(key1);
|
||||
let key2_b64 = base64::engine::general_purpose::STANDARD.encode(key2);
|
||||
let key1_md5 = format!("{:x}", md5::compute(key1));
|
||||
let key2_md5 = format!("{:x}", md5::compute(key2));
|
||||
let key1_md5 = format!("{:x}", compute(key1));
|
||||
let key2_md5 = format!("{:x}", compute(key2));
|
||||
|
||||
// Upload same data with different keys
|
||||
s3_client
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
//! multipart upload behaviour.
|
||||
|
||||
use crate::common::{TEST_BUCKET, init_logging};
|
||||
use md5::compute;
|
||||
use serial_test::serial;
|
||||
use tokio::time::{Duration, sleep};
|
||||
use tracing::{error, info};
|
||||
@@ -132,8 +133,8 @@ async fn test_vault_kms_key_isolation() -> Result<(), Box<dyn std::error::Error
|
||||
let key2 = "98765432109876543210987654321098";
|
||||
let key1_b64 = base64::Engine::encode(&base64::engine::general_purpose::STANDARD, key1);
|
||||
let key2_b64 = base64::Engine::encode(&base64::engine::general_purpose::STANDARD, key2);
|
||||
let key1_md5 = format!("{:x}", md5::compute(key1));
|
||||
let key2_md5 = format!("{:x}", md5::compute(key2));
|
||||
let key1_md5 = format!("{:x}", compute(key1));
|
||||
let key2_md5 = format!("{:x}", compute(key2));
|
||||
|
||||
let data1 = b"Vault data encrypted with key 1";
|
||||
let data2 = b"Vault data encrypted with key 2";
|
||||
|
||||
@@ -13,25 +13,25 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! 分片上传加密功能的分步测试用例
|
||||
//! Step-by-step test cases for sharded upload encryption
|
||||
//!
|
||||
//! 这个测试套件将验证分片上传加密功能的每一个步骤:
|
||||
//! 1. 测试基础的单分片加密(验证加密基础逻辑)
|
||||
//! 2. 测试多分片上传(验证分片拼接逻辑)
|
||||
//! 3. 测试加密元数据的保存和读取
|
||||
//! 4. 测试完整的分片上传加密流程
|
||||
//! This test suite will validate every step of the sharded upload encryption feature:
|
||||
//! 1. Test the underlying single-shard encryption (validate the encryption underlying logic)
|
||||
//! 2. Test multi-shard uploads (verify shard stitching logic)
|
||||
//! 3. Test the saving and reading of encrypted metadata
|
||||
//! 4. Test the complete sharded upload encryption process
|
||||
|
||||
use super::common::LocalKMSTestEnvironment;
|
||||
use crate::common::{TEST_BUCKET, init_logging};
|
||||
use serial_test::serial;
|
||||
use tracing::{debug, info};
|
||||
|
||||
/// 步骤1:测试基础单文件加密功能(确保SSE-S3在非分片场景下正常工作)
|
||||
/// Step 1: Test the basic single-file encryption function (ensure that SSE-S3 works properly in non-sharded scenarios)
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_step1_basic_single_file_encryption() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
info!("🧪 步骤1:测试基础单文件加密功能");
|
||||
info!("🧪 Step 1: Test the basic single-file encryption function");
|
||||
|
||||
let mut kms_env = LocalKMSTestEnvironment::new().await?;
|
||||
let _default_key_id = kms_env.start_rustfs_for_local_kms().await?;
|
||||
@@ -40,11 +40,11 @@ async fn test_step1_basic_single_file_encryption() -> Result<(), Box<dyn std::er
|
||||
let s3_client = kms_env.base_env.create_s3_client();
|
||||
kms_env.base_env.create_test_bucket(TEST_BUCKET).await?;
|
||||
|
||||
// 测试小文件加密(应该会内联存储)
|
||||
// Test small file encryption (should be stored inline)
|
||||
let test_data = b"Hello, this is a small test file for SSE-S3!";
|
||||
let object_key = "test-single-file-encrypted";
|
||||
|
||||
info!("📤 上传小文件({}字节),启用SSE-S3加密", test_data.len());
|
||||
info!("📤 Upload a small file ({} bytes) with SSE-S3 encryption enabled", test_data.len());
|
||||
let put_response = s3_client
|
||||
.put_object()
|
||||
.bucket(TEST_BUCKET)
|
||||
@@ -54,41 +54,41 @@ async fn test_step1_basic_single_file_encryption() -> Result<(), Box<dyn std::er
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
debug!("PUT响应ETag: {:?}", put_response.e_tag());
|
||||
debug!("PUT响应SSE: {:?}", put_response.server_side_encryption());
|
||||
debug!("PUT responds to ETags: {:?}", put_response.e_tag());
|
||||
debug!("PUT responds to SSE: {:?}", put_response.server_side_encryption());
|
||||
|
||||
// 验证PUT响应包含正确的加密头
|
||||
// Verify that the PUT response contains the correct cipher header
|
||||
assert_eq!(
|
||||
put_response.server_side_encryption(),
|
||||
Some(&aws_sdk_s3::types::ServerSideEncryption::Aes256)
|
||||
);
|
||||
|
||||
info!("📥 下载文件并验证加密状态");
|
||||
info!("📥 Download the file and verify the encryption status");
|
||||
let get_response = s3_client.get_object().bucket(TEST_BUCKET).key(object_key).send().await?;
|
||||
|
||||
debug!("GET响应SSE: {:?}", get_response.server_side_encryption());
|
||||
debug!("GET responds to SSE: {:?}", get_response.server_side_encryption());
|
||||
|
||||
// 验证GET响应包含正确的加密头
|
||||
// Verify that the GET response contains the correct cipher header
|
||||
assert_eq!(
|
||||
get_response.server_side_encryption(),
|
||||
Some(&aws_sdk_s3::types::ServerSideEncryption::Aes256)
|
||||
);
|
||||
|
||||
// 验证数据完整性
|
||||
// Verify data integrity
|
||||
let downloaded_data = get_response.body.collect().await?.into_bytes();
|
||||
assert_eq!(&downloaded_data[..], test_data);
|
||||
|
||||
kms_env.base_env.delete_test_bucket(TEST_BUCKET).await?;
|
||||
info!("✅ 步骤1通过:基础单文件加密功能正常");
|
||||
info!("✅ Step 1: The basic single file encryption function is normal");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// 步骤2:测试不加密的分片上传(确保分片上传基础功能正常)
|
||||
/// Step 2: Test the unencrypted shard upload (make sure the shard upload base is working properly)
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_step2_basic_multipart_upload_without_encryption() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
info!("🧪 步骤2:测试不加密的分片上传");
|
||||
info!("🧪 Step 2: Test unencrypted shard uploads");
|
||||
|
||||
let mut kms_env = LocalKMSTestEnvironment::new().await?;
|
||||
let _default_key_id = kms_env.start_rustfs_for_local_kms().await?;
|
||||
@@ -102,12 +102,16 @@ async fn test_step2_basic_multipart_upload_without_encryption() -> Result<(), Bo
|
||||
let total_parts = 2;
|
||||
let total_size = part_size * total_parts;
|
||||
|
||||
// 生成测试数据(有明显的模式便于验证)
|
||||
// Generate test data (with obvious patterns for easy verification)
|
||||
let test_data: Vec<u8> = (0..total_size).map(|i| (i % 256) as u8).collect();
|
||||
|
||||
info!("🚀 开始分片上传(无加密):{} parts,每个 {}MB", total_parts, part_size / (1024 * 1024));
|
||||
info!(
|
||||
"🚀 Start sharded upload (unencrypted): {} parts, {}MB each",
|
||||
total_parts,
|
||||
part_size / (1024 * 1024)
|
||||
);
|
||||
|
||||
// 步骤1:创建分片上传
|
||||
// Step 1: Create a sharded upload
|
||||
let create_multipart_output = s3_client
|
||||
.create_multipart_upload()
|
||||
.bucket(TEST_BUCKET)
|
||||
@@ -116,16 +120,16 @@ async fn test_step2_basic_multipart_upload_without_encryption() -> Result<(), Bo
|
||||
.await?;
|
||||
|
||||
let upload_id = create_multipart_output.upload_id().unwrap();
|
||||
info!("📋 创建分片上传,ID: {}", upload_id);
|
||||
info!("📋 Create a shard upload with ID: {}", upload_id);
|
||||
|
||||
// 步骤2:上传各个分片
|
||||
// Step 2: Upload individual shards
|
||||
let mut completed_parts = Vec::new();
|
||||
for part_number in 1..=total_parts {
|
||||
let start = (part_number - 1) * part_size;
|
||||
let end = std::cmp::min(start + part_size, total_size);
|
||||
let part_data = &test_data[start..end];
|
||||
|
||||
info!("📤 上传分片 {} ({} bytes)", part_number, part_data.len());
|
||||
info!("📤 Upload the shard {} ({} bytes)", part_number, part_data.len());
|
||||
|
||||
let upload_part_output = s3_client
|
||||
.upload_part()
|
||||
@@ -145,15 +149,15 @@ async fn test_step2_basic_multipart_upload_without_encryption() -> Result<(), Bo
|
||||
.build(),
|
||||
);
|
||||
|
||||
debug!("分片 {} 上传完成,ETag: {}", part_number, etag);
|
||||
debug!("Fragment {} upload complete,ETag: {}", part_number, etag);
|
||||
}
|
||||
|
||||
// 步骤3:完成分片上传
|
||||
// Step 3: Complete the shard upload
|
||||
let completed_multipart_upload = aws_sdk_s3::types::CompletedMultipartUpload::builder()
|
||||
.set_parts(Some(completed_parts))
|
||||
.build();
|
||||
|
||||
info!("🔗 完成分片上传");
|
||||
info!("🔗 Complete the shard upload");
|
||||
let complete_output = s3_client
|
||||
.complete_multipart_upload()
|
||||
.bucket(TEST_BUCKET)
|
||||
@@ -163,10 +167,10 @@ async fn test_step2_basic_multipart_upload_without_encryption() -> Result<(), Bo
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
debug!("完成分片上传,ETag: {:?}", complete_output.e_tag());
|
||||
debug!("Complete the shard upload,ETag: {:?}", complete_output.e_tag());
|
||||
|
||||
// 步骤4:下载并验证
|
||||
info!("📥 下载文件并验证数据完整性");
|
||||
// Step 4: Download and verify
|
||||
info!("📥 Download the file and verify data integrity");
|
||||
let get_response = s3_client.get_object().bucket(TEST_BUCKET).key(object_key).send().await?;
|
||||
|
||||
let downloaded_data = get_response.body.collect().await?.into_bytes();
|
||||
@@ -174,16 +178,16 @@ async fn test_step2_basic_multipart_upload_without_encryption() -> Result<(), Bo
|
||||
assert_eq!(&downloaded_data[..], &test_data[..]);
|
||||
|
||||
kms_env.base_env.delete_test_bucket(TEST_BUCKET).await?;
|
||||
info!("✅ 步骤2通过:不加密的分片上传功能正常");
|
||||
info!("✅ Step 2: Unencrypted shard upload functions normally");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// 步骤3:测试分片上传 + SSE-S3加密(重点测试)
|
||||
/// Step 3: Test Shard Upload + SSE-S3 Encryption (Focus Test)
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_step3_multipart_upload_with_sse_s3() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
info!("🧪 步骤3:测试分片上传 + SSE-S3加密");
|
||||
info!("🧪 Step 3: Test Shard Upload + SSE-S3 Encryption");
|
||||
|
||||
let mut kms_env = LocalKMSTestEnvironment::new().await?;
|
||||
let _default_key_id = kms_env.start_rustfs_for_local_kms().await?;
|
||||
@@ -197,16 +201,16 @@ async fn test_step3_multipart_upload_with_sse_s3() -> Result<(), Box<dyn std::er
|
||||
let total_parts = 2;
|
||||
let total_size = part_size * total_parts;
|
||||
|
||||
// 生成测试数据
|
||||
// Generate test data
|
||||
let test_data: Vec<u8> = (0..total_size).map(|i| ((i / 1000) % 256) as u8).collect();
|
||||
|
||||
info!(
|
||||
"🔐 开始分片上传(SSE-S3加密):{} parts,每个 {}MB",
|
||||
"🔐 Start sharded upload (SSE-S3 encryption): {} parts, {}MB each",
|
||||
total_parts,
|
||||
part_size / (1024 * 1024)
|
||||
);
|
||||
|
||||
// 步骤1:创建分片上传并启用SSE-S3
|
||||
// Step 1: Create a shard upload and enable SSE-S3
|
||||
let create_multipart_output = s3_client
|
||||
.create_multipart_upload()
|
||||
.bucket(TEST_BUCKET)
|
||||
@@ -216,24 +220,24 @@ async fn test_step3_multipart_upload_with_sse_s3() -> Result<(), Box<dyn std::er
|
||||
.await?;
|
||||
|
||||
let upload_id = create_multipart_output.upload_id().unwrap();
|
||||
info!("📋 创建加密分片上传,ID: {}", upload_id);
|
||||
info!("📋 Create an encrypted shard upload with ID: {}", upload_id);
|
||||
|
||||
// 验证CreateMultipartUpload响应(如果有SSE头的话)
|
||||
// Verify the CreateMultipartUpload response (if there is an SSE header)
|
||||
if let Some(sse) = create_multipart_output.server_side_encryption() {
|
||||
debug!("CreateMultipartUpload包含SSE响应: {:?}", sse);
|
||||
debug!("CreateMultipartUpload Contains SSE responses: {:?}", sse);
|
||||
assert_eq!(sse, &aws_sdk_s3::types::ServerSideEncryption::Aes256);
|
||||
} else {
|
||||
debug!("CreateMultipartUpload不包含SSE响应头(某些实现中正常)");
|
||||
debug!("CreateMultipartUpload does not contain SSE response headers (normal in some implementations)");
|
||||
}
|
||||
|
||||
// 步骤2:上传各个分片
|
||||
// Step 2: Upload individual shards
|
||||
let mut completed_parts = Vec::new();
|
||||
for part_number in 1..=total_parts {
|
||||
let start = (part_number - 1) * part_size;
|
||||
let end = std::cmp::min(start + part_size, total_size);
|
||||
let part_data = &test_data[start..end];
|
||||
|
||||
info!("🔐 上传加密分片 {} ({} bytes)", part_number, part_data.len());
|
||||
info!("🔐 Upload encrypted shards {} ({} bytes)", part_number, part_data.len());
|
||||
|
||||
let upload_part_output = s3_client
|
||||
.upload_part()
|
||||
@@ -253,15 +257,15 @@ async fn test_step3_multipart_upload_with_sse_s3() -> Result<(), Box<dyn std::er
|
||||
.build(),
|
||||
);
|
||||
|
||||
debug!("加密分片 {} 上传完成,ETag: {}", part_number, etag);
|
||||
debug!("Encrypted shard {} upload complete,ETag: {}", part_number, etag);
|
||||
}
|
||||
|
||||
// 步骤3:完成分片上传
|
||||
// Step 3: Complete the shard upload
|
||||
let completed_multipart_upload = aws_sdk_s3::types::CompletedMultipartUpload::builder()
|
||||
.set_parts(Some(completed_parts))
|
||||
.build();
|
||||
|
||||
info!("🔗 完成加密分片上传");
|
||||
info!("🔗 Complete the encrypted shard upload");
|
||||
let complete_output = s3_client
|
||||
.complete_multipart_upload()
|
||||
.bucket(TEST_BUCKET)
|
||||
@@ -271,43 +275,43 @@ async fn test_step3_multipart_upload_with_sse_s3() -> Result<(), Box<dyn std::er
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
debug!("完成加密分片上传,ETag: {:?}", complete_output.e_tag());
|
||||
debug!("Encrypted multipart upload completed with ETag {:?}", complete_output.e_tag());
|
||||
|
||||
// 步骤4:HEAD请求检查元数据
|
||||
info!("📋 检查对象元数据");
|
||||
// Step 4: HEAD request to inspect metadata
|
||||
info!("📋 Inspecting object metadata");
|
||||
let head_response = s3_client.head_object().bucket(TEST_BUCKET).key(object_key).send().await?;
|
||||
|
||||
debug!("HEAD响应 SSE: {:?}", head_response.server_side_encryption());
|
||||
debug!("HEAD响应 元数据: {:?}", head_response.metadata());
|
||||
debug!("HEAD response SSE: {:?}", head_response.server_side_encryption());
|
||||
debug!("HEAD response metadata: {:?}", head_response.metadata());
|
||||
|
||||
// 步骤5:GET请求下载并验证
|
||||
info!("📥 下载加密文件并验证");
|
||||
// Step 5: GET request to download and verify
|
||||
info!("📥 Downloading encrypted object for verification");
|
||||
let get_response = s3_client.get_object().bucket(TEST_BUCKET).key(object_key).send().await?;
|
||||
|
||||
debug!("GET响应 SSE: {:?}", get_response.server_side_encryption());
|
||||
debug!("GET response SSE: {:?}", get_response.server_side_encryption());
|
||||
|
||||
// 🎯 关键验证:GET响应必须包含SSE-S3加密头
|
||||
// 🎯 Critical check: GET response must include SSE-S3 headers
|
||||
assert_eq!(
|
||||
get_response.server_side_encryption(),
|
||||
Some(&aws_sdk_s3::types::ServerSideEncryption::Aes256)
|
||||
);
|
||||
|
||||
// 验证数据完整性
|
||||
// Verify data integrity
|
||||
let downloaded_data = get_response.body.collect().await?.into_bytes();
|
||||
assert_eq!(downloaded_data.len(), total_size);
|
||||
assert_eq!(&downloaded_data[..], &test_data[..]);
|
||||
|
||||
kms_env.base_env.delete_test_bucket(TEST_BUCKET).await?;
|
||||
info!("✅ 步骤3通过:分片上传 + SSE-S3加密功能正常");
|
||||
info!("✅ Step 3 passed: multipart upload with SSE-S3 encryption");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// 步骤4:测试更大的分片上传(测试流式加密)
|
||||
/// Step 4: test larger multipart uploads (streaming encryption)
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_step4_large_multipart_upload_with_encryption() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
info!("🧪 步骤4:测试大文件分片上传加密");
|
||||
info!("🧪 Step 4: test large-file multipart encryption");
|
||||
|
||||
let mut kms_env = LocalKMSTestEnvironment::new().await?;
|
||||
let _default_key_id = kms_env.start_rustfs_for_local_kms().await?;
|
||||
@@ -317,18 +321,18 @@ async fn test_step4_large_multipart_upload_with_encryption() -> Result<(), Box<d
|
||||
kms_env.base_env.create_test_bucket(TEST_BUCKET).await?;
|
||||
|
||||
let object_key = "test-large-multipart-encrypted";
|
||||
let part_size = 6 * 1024 * 1024; // 6MB per part (大于1MB加密块大小)
|
||||
let total_parts = 3; // 总共18MB
|
||||
let part_size = 6 * 1024 * 1024; // 6 MB per part (greater than the 1 MB encryption chunk)
|
||||
let total_parts = 3; // 18 MB total
|
||||
let total_size = part_size * total_parts;
|
||||
|
||||
info!(
|
||||
"🗂️ 生成大文件测试数据:{} parts,每个 {}MB,总计 {}MB",
|
||||
"🗂️ Generated large-file test data: {} parts, {} MB each, {} MB total",
|
||||
total_parts,
|
||||
part_size / (1024 * 1024),
|
||||
total_size / (1024 * 1024)
|
||||
);
|
||||
|
||||
// 生成大文件测试数据(使用复杂模式便于验证)
|
||||
// Generate large test data (complex pattern for validation)
|
||||
let test_data: Vec<u8> = (0..total_size)
|
||||
.map(|i| {
|
||||
let part_num = i / part_size;
|
||||
@@ -337,9 +341,9 @@ async fn test_step4_large_multipart_upload_with_encryption() -> Result<(), Box<d
|
||||
})
|
||||
.collect();
|
||||
|
||||
info!("🔐 开始大文件分片上传(SSE-S3加密)");
|
||||
info!("🔐 Starting large-file multipart upload (SSE-S3 encryption)");
|
||||
|
||||
// 创建分片上传
|
||||
// Create multipart upload
|
||||
let create_multipart_output = s3_client
|
||||
.create_multipart_upload()
|
||||
.bucket(TEST_BUCKET)
|
||||
@@ -349,9 +353,9 @@ async fn test_step4_large_multipart_upload_with_encryption() -> Result<(), Box<d
|
||||
.await?;
|
||||
|
||||
let upload_id = create_multipart_output.upload_id().unwrap();
|
||||
info!("📋 创建大文件加密分片上传,ID: {}", upload_id);
|
||||
info!("📋 Created large encrypted multipart upload, ID: {}", upload_id);
|
||||
|
||||
// 上传各个分片
|
||||
// Upload each part
|
||||
let mut completed_parts = Vec::new();
|
||||
for part_number in 1..=total_parts {
|
||||
let start = (part_number - 1) * part_size;
|
||||
@@ -359,7 +363,7 @@ async fn test_step4_large_multipart_upload_with_encryption() -> Result<(), Box<d
|
||||
let part_data = &test_data[start..end];
|
||||
|
||||
info!(
|
||||
"🔐 上传大文件加密分片 {} ({:.2}MB)",
|
||||
"🔐 Uploading encrypted large-file part {} ({:.2} MB)",
|
||||
part_number,
|
||||
part_data.len() as f64 / (1024.0 * 1024.0)
|
||||
);
|
||||
@@ -382,15 +386,15 @@ async fn test_step4_large_multipart_upload_with_encryption() -> Result<(), Box<d
|
||||
.build(),
|
||||
);
|
||||
|
||||
debug!("大文件加密分片 {} 上传完成,ETag: {}", part_number, etag);
|
||||
debug!("Large encrypted part {} uploaded with ETag {}", part_number, etag);
|
||||
}
|
||||
|
||||
// 完成分片上传
|
||||
// Complete the multipart upload
|
||||
let completed_multipart_upload = aws_sdk_s3::types::CompletedMultipartUpload::builder()
|
||||
.set_parts(Some(completed_parts))
|
||||
.build();
|
||||
|
||||
info!("🔗 完成大文件加密分片上传");
|
||||
info!("🔗 Completing large encrypted multipart upload");
|
||||
let complete_output = s3_client
|
||||
.complete_multipart_upload()
|
||||
.bucket(TEST_BUCKET)
|
||||
@@ -400,40 +404,40 @@ async fn test_step4_large_multipart_upload_with_encryption() -> Result<(), Box<d
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
debug!("完成大文件加密分片上传,ETag: {:?}", complete_output.e_tag());
|
||||
debug!("Large encrypted multipart upload completed with ETag {:?}", complete_output.e_tag());
|
||||
|
||||
// 下载并验证
|
||||
info!("📥 下载大文件并验证");
|
||||
// Download and verify
|
||||
info!("📥 Downloading large object for verification");
|
||||
let get_response = s3_client.get_object().bucket(TEST_BUCKET).key(object_key).send().await?;
|
||||
|
||||
// 验证加密头
|
||||
// Verify encryption headers
|
||||
assert_eq!(
|
||||
get_response.server_side_encryption(),
|
||||
Some(&aws_sdk_s3::types::ServerSideEncryption::Aes256)
|
||||
);
|
||||
|
||||
// 验证数据完整性
|
||||
// Verify data integrity
|
||||
let downloaded_data = get_response.body.collect().await?.into_bytes();
|
||||
assert_eq!(downloaded_data.len(), total_size);
|
||||
|
||||
// 逐字节验证数据(对于大文件更严格)
|
||||
// Validate bytes individually (stricter for large files)
|
||||
for (i, (&actual, &expected)) in downloaded_data.iter().zip(test_data.iter()).enumerate() {
|
||||
if actual != expected {
|
||||
panic!("大文件数据在第{i}字节不匹配: 实际={actual}, 期待={expected}");
|
||||
panic!("Large file mismatch at byte {i}: actual={actual}, expected={expected}");
|
||||
}
|
||||
}
|
||||
|
||||
kms_env.base_env.delete_test_bucket(TEST_BUCKET).await?;
|
||||
info!("✅ 步骤4通过:大文件分片上传加密功能正常");
|
||||
info!("✅ Step 4 passed: large-file multipart encryption succeeded");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// 步骤5:测试所有加密类型的分片上传
|
||||
/// Step 5: test multipart uploads for every encryption mode
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_step5_all_encryption_types_multipart() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
info!("🧪 步骤5:测试所有加密类型的分片上传");
|
||||
info!("🧪 Step 5: test multipart uploads for every encryption mode");
|
||||
|
||||
let mut kms_env = LocalKMSTestEnvironment::new().await?;
|
||||
let _default_key_id = kms_env.start_rustfs_for_local_kms().await?;
|
||||
@@ -446,8 +450,8 @@ async fn test_step5_all_encryption_types_multipart() -> Result<(), Box<dyn std::
|
||||
let total_parts = 2;
|
||||
let total_size = part_size * total_parts;
|
||||
|
||||
// 测试SSE-KMS
|
||||
info!("🔐 测试 SSE-KMS 分片上传");
|
||||
// Test SSE-KMS
|
||||
info!("🔐 Testing SSE-KMS multipart upload");
|
||||
test_multipart_encryption_type(
|
||||
&s3_client,
|
||||
TEST_BUCKET,
|
||||
@@ -459,8 +463,8 @@ async fn test_step5_all_encryption_types_multipart() -> Result<(), Box<dyn std::
|
||||
)
|
||||
.await?;
|
||||
|
||||
// 测试SSE-C
|
||||
info!("🔐 测试 SSE-C 分片上传");
|
||||
// Test SSE-C
|
||||
info!("🔐 Testing SSE-C multipart upload");
|
||||
test_multipart_encryption_type(
|
||||
&s3_client,
|
||||
TEST_BUCKET,
|
||||
@@ -473,7 +477,7 @@ async fn test_step5_all_encryption_types_multipart() -> Result<(), Box<dyn std::
|
||||
.await?;
|
||||
|
||||
kms_env.base_env.delete_test_bucket(TEST_BUCKET).await?;
|
||||
info!("✅ 步骤5通过:所有加密类型的分片上传功能正常");
|
||||
info!("✅ Step 5 passed: multipart uploads succeeded for every encryption mode");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -483,7 +487,7 @@ enum EncryptionType {
|
||||
SSEC,
|
||||
}
|
||||
|
||||
/// 辅助函数:测试特定加密类型的分片上传
|
||||
/// Helper: test multipart uploads for a specific encryption type
|
||||
async fn test_multipart_encryption_type(
|
||||
s3_client: &aws_sdk_s3::Client,
|
||||
bucket: &str,
|
||||
@@ -493,10 +497,10 @@ async fn test_multipart_encryption_type(
|
||||
total_parts: usize,
|
||||
encryption_type: EncryptionType,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
// 生成测试数据
|
||||
// Generate test data
|
||||
let test_data: Vec<u8> = (0..total_size).map(|i| ((i * 7) % 256) as u8).collect();
|
||||
|
||||
// 准备SSE-C所需的密钥(如果需要)
|
||||
// Prepare SSE-C keys when required
|
||||
let (sse_c_key, sse_c_md5) = if matches!(encryption_type, EncryptionType::SSEC) {
|
||||
let key = "01234567890123456789012345678901";
|
||||
let key_b64 = base64::Engine::encode(&base64::engine::general_purpose::STANDARD, key);
|
||||
@@ -506,9 +510,9 @@ async fn test_multipart_encryption_type(
|
||||
(None, None)
|
||||
};
|
||||
|
||||
info!("📋 创建分片上传 - {:?}", encryption_type);
|
||||
info!("📋 Creating multipart upload - {:?}", encryption_type);
|
||||
|
||||
// 创建分片上传
|
||||
// Create multipart upload
|
||||
let mut create_request = s3_client.create_multipart_upload().bucket(bucket).key(object_key);
|
||||
|
||||
create_request = match encryption_type {
|
||||
@@ -522,7 +526,7 @@ async fn test_multipart_encryption_type(
|
||||
let create_multipart_output = create_request.send().await?;
|
||||
let upload_id = create_multipart_output.upload_id().unwrap();
|
||||
|
||||
// 上传分片
|
||||
// Upload parts
|
||||
let mut completed_parts = Vec::new();
|
||||
for part_number in 1..=total_parts {
|
||||
let start = (part_number - 1) * part_size;
|
||||
@@ -537,7 +541,7 @@ async fn test_multipart_encryption_type(
|
||||
.part_number(part_number as i32)
|
||||
.body(aws_sdk_s3::primitives::ByteStream::from(part_data.to_vec()));
|
||||
|
||||
// SSE-C需要在每个UploadPart请求中包含密钥
|
||||
// SSE-C requires the key on each UploadPart request
|
||||
if matches!(encryption_type, EncryptionType::SSEC) {
|
||||
upload_request = upload_request
|
||||
.sse_customer_algorithm("AES256")
|
||||
@@ -554,10 +558,10 @@ async fn test_multipart_encryption_type(
|
||||
.build(),
|
||||
);
|
||||
|
||||
debug!("{:?} 分片 {} 上传完成", encryption_type, part_number);
|
||||
debug!("{:?} part {} uploaded", encryption_type, part_number);
|
||||
}
|
||||
|
||||
// 完成分片上传
|
||||
// Complete the multipart upload
|
||||
let completed_multipart_upload = aws_sdk_s3::types::CompletedMultipartUpload::builder()
|
||||
.set_parts(Some(completed_parts))
|
||||
.build();
|
||||
@@ -571,10 +575,10 @@ async fn test_multipart_encryption_type(
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
// 下载并验证
|
||||
// Download and verify
|
||||
let mut get_request = s3_client.get_object().bucket(bucket).key(object_key);
|
||||
|
||||
// SSE-C需要在GET请求中包含密钥
|
||||
// SSE-C requires the key on GET requests
|
||||
if matches!(encryption_type, EncryptionType::SSEC) {
|
||||
get_request = get_request
|
||||
.sse_customer_algorithm("AES256")
|
||||
@@ -584,7 +588,7 @@ async fn test_multipart_encryption_type(
|
||||
|
||||
let get_response = get_request.send().await?;
|
||||
|
||||
// 验证加密头
|
||||
// Verify encryption headers
|
||||
match encryption_type {
|
||||
EncryptionType::SSEKMS => {
|
||||
assert_eq!(
|
||||
@@ -597,11 +601,11 @@ async fn test_multipart_encryption_type(
|
||||
}
|
||||
}
|
||||
|
||||
// 验证数据完整性
|
||||
// Verify data integrity
|
||||
let downloaded_data = get_response.body.collect().await?.into_bytes();
|
||||
assert_eq!(downloaded_data.len(), total_size);
|
||||
assert_eq!(&downloaded_data[..], &test_data[..]);
|
||||
|
||||
info!("✅ {:?} 分片上传测试通过", encryption_type);
|
||||
info!("✅ {:?} multipart upload test passed", encryption_type);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -346,7 +346,7 @@ impl KMSTestSuite {
|
||||
/// Run the complete test suite
|
||||
pub async fn run_test_suite(&self) -> Vec<TestResult> {
|
||||
init_logging();
|
||||
info!("🚀 开始KMS统一测试套件");
|
||||
info!("🚀 Starting unified KMS test suite");
|
||||
|
||||
let start_time = Instant::now();
|
||||
let mut results = Vec::new();
|
||||
@@ -359,17 +359,17 @@ impl KMSTestSuite {
|
||||
.filter(|test| !self.config.include_critical_only || test.is_critical)
|
||||
.collect();
|
||||
|
||||
info!("📊 测试计划: {} 个测试将被执行", tests_to_run.len());
|
||||
info!("📊 Test plan: {} test(s) scheduled", tests_to_run.len());
|
||||
for (i, test) in tests_to_run.iter().enumerate() {
|
||||
info!(" {}. {} ({})", i + 1, test.name, test.category.as_str());
|
||||
}
|
||||
|
||||
// Execute tests
|
||||
for (i, test_def) in tests_to_run.iter().enumerate() {
|
||||
info!("🧪 执行测试 {}/{}: {}", i + 1, tests_to_run.len(), test_def.name);
|
||||
info!(" 📝 描述: {}", test_def.description);
|
||||
info!(" 🏷️ 分类: {}", test_def.category.as_str());
|
||||
info!(" ⏱️ 预计时间: {:?}", test_def.estimated_duration);
|
||||
info!("🧪 Running test {}/{}: {}", i + 1, tests_to_run.len(), test_def.name);
|
||||
info!(" 📝 Description: {}", test_def.description);
|
||||
info!(" 🏷️ Category: {}", test_def.category.as_str());
|
||||
info!(" ⏱️ Estimated duration: {:?}", test_def.estimated_duration);
|
||||
|
||||
let test_start = Instant::now();
|
||||
let result = self.run_single_test(test_def).await;
|
||||
@@ -377,11 +377,11 @@ impl KMSTestSuite {
|
||||
|
||||
match result {
|
||||
Ok(_) => {
|
||||
info!("✅ 测试通过: {} ({:.2}s)", test_def.name, test_duration.as_secs_f64());
|
||||
info!("✅ Test passed: {} ({:.2}s)", test_def.name, test_duration.as_secs_f64());
|
||||
results.push(TestResult::success(test_def.name.clone(), test_def.category.clone(), test_duration));
|
||||
}
|
||||
Err(e) => {
|
||||
error!("❌ 测试失败: {} ({:.2}s): {}", test_def.name, test_duration.as_secs_f64(), e);
|
||||
error!("❌ Test failed: {} ({:.2}s): {}", test_def.name, test_duration.as_secs_f64(), e);
|
||||
results.push(TestResult::failure(
|
||||
test_def.name.clone(),
|
||||
test_def.category.clone(),
|
||||
@@ -393,7 +393,7 @@ impl KMSTestSuite {
|
||||
|
||||
// Add delay between tests to avoid resource conflicts
|
||||
if i < tests_to_run.len() - 1 {
|
||||
debug!("⏸️ 等待2秒后执行下一个测试...");
|
||||
debug!("⏸️ Waiting two seconds before the next test...");
|
||||
sleep(Duration::from_secs(2)).await;
|
||||
}
|
||||
}
|
||||
@@ -408,22 +408,22 @@ impl KMSTestSuite {
|
||||
async fn run_single_test(&self, test_def: &TestDefinition) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
// This is a placeholder for test dispatch logic
|
||||
// In a real implementation, this would dispatch to actual test functions
|
||||
warn!("⚠️ 测试函数 '{}' 在统一运行器中尚未实现,跳过", test_def.name);
|
||||
warn!("⚠️ Test '{}' is not implemented in the unified runner; skipping", test_def.name);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Print comprehensive test summary
|
||||
fn print_test_summary(&self, results: &[TestResult], total_duration: Duration) {
|
||||
info!("📊 KMS测试套件总结");
|
||||
info!("⏱️ 总执行时间: {:.2}秒", total_duration.as_secs_f64());
|
||||
info!("📈 总测试数量: {}", results.len());
|
||||
info!("📊 KMS test suite summary");
|
||||
info!("⏱️ Total duration: {:.2} seconds", total_duration.as_secs_f64());
|
||||
info!("📈 Total tests: {}", results.len());
|
||||
|
||||
let passed = results.iter().filter(|r| r.success).count();
|
||||
let failed = results.iter().filter(|r| !r.success).count();
|
||||
|
||||
info!("✅ 通过: {}", passed);
|
||||
info!("❌ 失败: {}", failed);
|
||||
info!("📊 成功率: {:.1}%", (passed as f64 / results.len() as f64) * 100.0);
|
||||
info!("✅ Passed: {}", passed);
|
||||
info!("❌ Failed: {}", failed);
|
||||
info!("📊 Success rate: {:.1}%", (passed as f64 / results.len() as f64) * 100.0);
|
||||
|
||||
// Summary by category
|
||||
let mut category_summary: std::collections::HashMap<TestCategory, (usize, usize)> = std::collections::HashMap::new();
|
||||
@@ -435,7 +435,7 @@ impl KMSTestSuite {
|
||||
}
|
||||
}
|
||||
|
||||
info!("📊 分类汇总:");
|
||||
info!("📊 Category summary:");
|
||||
for (category, (total, passed_count)) in category_summary {
|
||||
info!(
|
||||
" 🏷️ {}: {}/{} ({:.1}%)",
|
||||
@@ -448,7 +448,7 @@ impl KMSTestSuite {
|
||||
|
||||
// List failed tests
|
||||
if failed > 0 {
|
||||
warn!("❌ 失败的测试:");
|
||||
warn!("❌ Failing tests:");
|
||||
for result in results.iter().filter(|r| !r.success) {
|
||||
warn!(
|
||||
" - {}: {}",
|
||||
@@ -479,7 +479,7 @@ async fn test_kms_critical_suite() -> Result<(), Box<dyn std::error::Error + Sen
|
||||
return Err(format!("Critical test suite failed: {failed_count} tests failed").into());
|
||||
}
|
||||
|
||||
info!("✅ 所有关键测试通过");
|
||||
info!("✅ All critical tests passed");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -494,13 +494,13 @@ async fn test_kms_full_suite() -> Result<(), Box<dyn std::error::Error + Send +
|
||||
let failed_count = results.iter().filter(|r| !r.success).count();
|
||||
let success_rate = ((total_tests - failed_count) as f64 / total_tests as f64) * 100.0;
|
||||
|
||||
info!("📊 完整测试套件结果: {:.1}% 成功率", success_rate);
|
||||
info!("📊 Full suite success rate: {:.1}%", success_rate);
|
||||
|
||||
// Allow up to 10% failure rate for non-critical tests
|
||||
if success_rate < 90.0 {
|
||||
return Err(format!("Test suite success rate too low: {success_rate:.1}%").into());
|
||||
}
|
||||
|
||||
info!("✅ 完整测试套件通过");
|
||||
info!("✅ Full test suite succeeded");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -102,6 +102,10 @@ aws-smithy-types = { workspace = true }
|
||||
parking_lot = { workspace = true }
|
||||
moka = { workspace = true }
|
||||
base64-simd.workspace = true
|
||||
serde_urlencoded.workspace = true
|
||||
google-cloud-storage = { workspace = true }
|
||||
google-cloud-auth = { workspace = true }
|
||||
aws-config = { workspace = true }
|
||||
|
||||
[target.'cfg(not(windows))'.dependencies]
|
||||
nix = { workspace = true }
|
||||
|
||||
@@ -14,12 +14,12 @@
|
||||
# limitations under the License.
|
||||
|
||||
|
||||
# Reed-Solomon SIMD 性能基准测试脚本
|
||||
# 使用高性能 SIMD 实现进行纠删码性能测试
|
||||
# Reed-Solomon SIMD performance benchmark script
|
||||
# Run erasure-coding benchmarks using the high-performance SIMD implementation
|
||||
|
||||
set -e
|
||||
|
||||
# ANSI 颜色码
|
||||
# ANSI color codes
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
@@ -27,7 +27,7 @@ BLUE='\033[0;34m'
|
||||
PURPLE='\033[0;35m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# 打印带颜色的消息
|
||||
# Print colored messages
|
||||
print_info() {
|
||||
echo -e "${BLUE}ℹ️ $1${NC}"
|
||||
}
|
||||
@@ -44,177 +44,177 @@ print_error() {
|
||||
echo -e "${RED}❌ $1${NC}"
|
||||
}
|
||||
|
||||
# 检查系统要求
|
||||
# Validate system requirements
|
||||
check_requirements() {
|
||||
print_info "检查系统要求..."
|
||||
print_info "Checking system requirements..."
|
||||
|
||||
# 检查 Rust
|
||||
# Check for Rust
|
||||
if ! command -v cargo &> /dev/null; then
|
||||
print_error "Cargo 未找到,请确保已安装 Rust"
|
||||
print_error "Cargo not found; install Rust first"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# 检查 criterion
|
||||
# Check criterion support
|
||||
if ! cargo --list | grep -q "bench"; then
|
||||
print_error "未找到基准测试支持,请确保使用的是支持基准测试的 Rust 版本"
|
||||
print_error "Benchmark support missing; use a Rust toolchain with criterion support"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
print_success "系统要求检查通过"
|
||||
print_success "System requirements satisfied"
|
||||
}
|
||||
|
||||
# 清理之前的测试结果
|
||||
# Remove previous benchmark artifacts
|
||||
cleanup() {
|
||||
print_info "清理之前的测试结果..."
|
||||
print_info "Cleaning previous benchmark artifacts..."
|
||||
rm -rf target/criterion
|
||||
print_success "清理完成"
|
||||
print_success "Cleanup complete"
|
||||
}
|
||||
|
||||
# 运行 SIMD 模式基准测试
|
||||
# Run SIMD-only benchmarks
|
||||
run_simd_benchmark() {
|
||||
print_info "🎯 开始运行 SIMD 模式基准测试..."
|
||||
print_info "🎯 Starting SIMD-only benchmark run..."
|
||||
echo "================================================"
|
||||
|
||||
cargo bench --bench comparison_benchmark \
|
||||
-- --save-baseline simd_baseline
|
||||
|
||||
print_success "SIMD 模式基准测试完成"
|
||||
print_success "SIMD-only benchmarks completed"
|
||||
}
|
||||
|
||||
# 运行完整的基准测试套件
|
||||
# Run the full benchmark suite
|
||||
run_full_benchmark() {
|
||||
print_info "🚀 开始运行完整基准测试套件..."
|
||||
print_info "🚀 Starting full benchmark suite..."
|
||||
echo "================================================"
|
||||
|
||||
# 运行详细的基准测试
|
||||
# Execute detailed benchmarks
|
||||
cargo bench --bench erasure_benchmark
|
||||
|
||||
print_success "完整基准测试套件完成"
|
||||
print_success "Full benchmark suite finished"
|
||||
}
|
||||
|
||||
# 运行性能测试
|
||||
# Run performance tests
|
||||
run_performance_test() {
|
||||
print_info "📊 开始运行性能测试..."
|
||||
print_info "📊 Starting performance tests..."
|
||||
echo "================================================"
|
||||
|
||||
print_info "步骤 1: 运行编码基准测试..."
|
||||
print_info "Step 1: running encoding benchmarks..."
|
||||
cargo bench --bench comparison_benchmark \
|
||||
-- encode --save-baseline encode_baseline
|
||||
|
||||
print_info "步骤 2: 运行解码基准测试..."
|
||||
print_info "Step 2: running decoding benchmarks..."
|
||||
cargo bench --bench comparison_benchmark \
|
||||
-- decode --save-baseline decode_baseline
|
||||
|
||||
print_success "性能测试完成"
|
||||
print_success "Performance tests completed"
|
||||
}
|
||||
|
||||
# 运行大数据集测试
|
||||
# Run large dataset tests
|
||||
run_large_data_test() {
|
||||
print_info "🗂️ 开始运行大数据集测试..."
|
||||
print_info "🗂️ Starting large-dataset tests..."
|
||||
echo "================================================"
|
||||
|
||||
cargo bench --bench erasure_benchmark \
|
||||
-- large_data --save-baseline large_data_baseline
|
||||
|
||||
print_success "大数据集测试完成"
|
||||
print_success "Large-dataset tests completed"
|
||||
}
|
||||
|
||||
# 生成比较报告
|
||||
# Generate comparison report
|
||||
generate_comparison_report() {
|
||||
print_info "📊 生成性能报告..."
|
||||
print_info "📊 Generating performance report..."
|
||||
|
||||
if [ -d "target/criterion" ]; then
|
||||
print_info "基准测试结果已保存到 target/criterion/ 目录"
|
||||
print_info "你可以打开 target/criterion/report/index.html 查看详细报告"
|
||||
print_info "Benchmark results saved under target/criterion/"
|
||||
print_info "Open target/criterion/report/index.html for the HTML report"
|
||||
|
||||
# 如果有 python 环境,可以启动简单的 HTTP 服务器查看报告
|
||||
# If Python is available, start a simple HTTP server to browse the report
|
||||
if command -v python3 &> /dev/null; then
|
||||
print_info "你可以运行以下命令启动本地服务器查看报告:"
|
||||
print_info "Run the following command to serve the report locally:"
|
||||
echo " cd target/criterion && python3 -m http.server 8080"
|
||||
echo " 然后在浏览器中访问 http://localhost:8080/report/index.html"
|
||||
echo " Then open http://localhost:8080/report/index.html"
|
||||
fi
|
||||
else
|
||||
print_warning "未找到基准测试结果目录"
|
||||
print_warning "Benchmark result directory not found"
|
||||
fi
|
||||
}
|
||||
|
||||
# 快速测试模式
|
||||
# Quick test mode
|
||||
run_quick_test() {
|
||||
print_info "🏃 运行快速性能测试..."
|
||||
print_info "🏃 Running quick performance test..."
|
||||
|
||||
print_info "测试 SIMD 编码性能..."
|
||||
print_info "Testing SIMD encoding performance..."
|
||||
cargo bench --bench comparison_benchmark \
|
||||
-- encode --quick
|
||||
|
||||
print_info "测试 SIMD 解码性能..."
|
||||
print_info "Testing SIMD decoding performance..."
|
||||
cargo bench --bench comparison_benchmark \
|
||||
-- decode --quick
|
||||
|
||||
print_success "快速测试完成"
|
||||
print_success "Quick test complete"
|
||||
}
|
||||
|
||||
# 显示帮助信息
|
||||
# Display help
|
||||
show_help() {
|
||||
echo "Reed-Solomon SIMD 性能基准测试脚本"
|
||||
echo "Reed-Solomon SIMD performance benchmark script"
|
||||
echo ""
|
||||
echo "实现模式:"
|
||||
echo " 🎯 SIMD 模式 - 高性能 SIMD 优化的 reed-solomon-simd 实现"
|
||||
echo "Modes:"
|
||||
echo " 🎯 simd High-performance reed-solomon-simd implementation"
|
||||
echo ""
|
||||
echo "使用方法:"
|
||||
echo "Usage:"
|
||||
echo " $0 [command]"
|
||||
echo ""
|
||||
echo "命令:"
|
||||
echo " quick 运行快速性能测试"
|
||||
echo " full 运行完整基准测试套件"
|
||||
echo " performance 运行详细的性能测试"
|
||||
echo " simd 运行 SIMD 模式测试"
|
||||
echo " large 运行大数据集测试"
|
||||
echo " clean 清理测试结果"
|
||||
echo " help 显示此帮助信息"
|
||||
echo "Commands:"
|
||||
echo " quick Run the quick performance test"
|
||||
echo " full Run the full benchmark suite"
|
||||
echo " performance Run detailed performance tests"
|
||||
echo " simd Run the SIMD-only tests"
|
||||
echo " large Run large-dataset tests"
|
||||
echo " clean Remove previous results"
|
||||
echo " help Show this help message"
|
||||
echo ""
|
||||
echo "示例:"
|
||||
echo " $0 quick # 快速性能测试"
|
||||
echo " $0 performance # 详细性能测试"
|
||||
echo " $0 full # 完整测试套件"
|
||||
echo " $0 simd # SIMD 模式测试"
|
||||
echo " $0 large # 大数据集测试"
|
||||
echo "Examples:"
|
||||
echo " $0 quick # Quick performance test"
|
||||
echo " $0 performance # Detailed performance test"
|
||||
echo " $0 full # Full benchmark suite"
|
||||
echo " $0 simd # SIMD-only benchmark"
|
||||
echo " $0 large # Large-dataset benchmark"
|
||||
echo ""
|
||||
echo "实现特性:"
|
||||
echo " - 使用 reed-solomon-simd 高性能 SIMD 实现"
|
||||
echo " - 支持编码器/解码器实例缓存"
|
||||
echo " - 优化的内存管理和线程安全"
|
||||
echo " - 跨平台 SIMD 指令支持"
|
||||
echo "Features:"
|
||||
echo " - Uses the high-performance reed-solomon-simd implementation"
|
||||
echo " - Caches encoder/decoder instances"
|
||||
echo " - Optimized memory management and thread safety"
|
||||
echo " - Cross-platform SIMD instruction support"
|
||||
}
|
||||
|
||||
# 显示测试配置信息
|
||||
# Show benchmark configuration
|
||||
show_test_info() {
|
||||
print_info "📋 测试配置信息:"
|
||||
echo " - 当前目录: $(pwd)"
|
||||
echo " - Rust 版本: $(rustc --version)"
|
||||
echo " - Cargo 版本: $(cargo --version)"
|
||||
echo " - CPU 架构: $(uname -m)"
|
||||
echo " - 操作系统: $(uname -s)"
|
||||
print_info "📋 Benchmark configuration:"
|
||||
echo " - Working directory: $(pwd)"
|
||||
echo " - Rust version: $(rustc --version)"
|
||||
echo " - Cargo version: $(cargo --version)"
|
||||
echo " - CPU architecture: $(uname -m)"
|
||||
echo " - Operating system: $(uname -s)"
|
||||
|
||||
# 检查 CPU 特性
|
||||
# Inspect CPU capabilities
|
||||
if [ -f "/proc/cpuinfo" ]; then
|
||||
echo " - CPU 型号: $(grep 'model name' /proc/cpuinfo | head -1 | cut -d: -f2 | xargs)"
|
||||
echo " - CPU model: $(grep 'model name' /proc/cpuinfo | head -1 | cut -d: -f2 | xargs)"
|
||||
if grep -q "avx2" /proc/cpuinfo; then
|
||||
echo " - SIMD 支持: AVX2 ✅ (将使用高级 SIMD 优化)"
|
||||
echo " - SIMD support: AVX2 ✅ (using advanced SIMD optimizations)"
|
||||
elif grep -q "sse4" /proc/cpuinfo; then
|
||||
echo " - SIMD 支持: SSE4 ✅ (将使用 SIMD 优化)"
|
||||
echo " - SIMD support: SSE4 ✅ (using SIMD optimizations)"
|
||||
else
|
||||
echo " - SIMD 支持: 基础 SIMD 特性"
|
||||
echo " - SIMD support: baseline features"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo " - 实现: reed-solomon-simd (高性能 SIMD 优化)"
|
||||
echo " - 特性: 实例缓存、线程安全、跨平台 SIMD"
|
||||
echo " - Implementation: reed-solomon-simd (SIMD-optimized)"
|
||||
echo " - Highlights: instance caching, thread safety, cross-platform SIMD"
|
||||
echo ""
|
||||
}
|
||||
|
||||
# 主函数
|
||||
# Main entry point
|
||||
main() {
|
||||
print_info "🧪 Reed-Solomon SIMD 实现性能基准测试"
|
||||
print_info "🧪 Reed-Solomon SIMD benchmark suite"
|
||||
echo "================================================"
|
||||
|
||||
check_requirements
|
||||
@@ -252,15 +252,15 @@ main() {
|
||||
show_help
|
||||
;;
|
||||
*)
|
||||
print_error "未知命令: $1"
|
||||
print_error "Unknown command: $1"
|
||||
echo ""
|
||||
show_help
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
|
||||
print_success "✨ 基准测试执行完成!"
|
||||
print_success "✨ Benchmark run completed!"
|
||||
}
|
||||
|
||||
# 启动脚本
|
||||
# Launch script
|
||||
main "$@"
|
||||
@@ -96,21 +96,21 @@ async fn is_server_resolvable(endpoint: &Endpoint) -> Result<()> {
|
||||
let decoded_payload = flatbuffers::root::<PingBody>(finished_data);
|
||||
assert!(decoded_payload.is_ok());
|
||||
|
||||
// 创建客户端
|
||||
// Create the client
|
||||
let mut client = node_service_time_out_client(&addr)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
|
||||
// 构造 PingRequest
|
||||
// Build the PingRequest
|
||||
let request = Request::new(PingRequest {
|
||||
version: 1,
|
||||
body: bytes::Bytes::copy_from_slice(finished_data),
|
||||
});
|
||||
|
||||
// 发送请求并获取响应
|
||||
// Send the request and obtain the response
|
||||
let response: PingResponse = client.ping(request).await?.into_inner();
|
||||
|
||||
// 打印响应
|
||||
// Print the response
|
||||
let ping_response_body = flatbuffers::root::<PingBody>(&response.body);
|
||||
if let Err(e) = ping_response_body {
|
||||
eprintln!("{e}");
|
||||
|
||||
@@ -18,14 +18,18 @@
|
||||
#![allow(unused_must_use)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use crate::error::StorageError;
|
||||
use async_channel::{Receiver as A_Receiver, Sender as A_Sender, bounded};
|
||||
use bytes::BytesMut;
|
||||
use futures::Future;
|
||||
use http::HeaderMap;
|
||||
use lazy_static::lazy_static;
|
||||
use rustfs_common::data_usage::TierStats;
|
||||
use rustfs_common::heal_channel::rep_has_active_rules;
|
||||
use rustfs_common::metrics::{IlmAction, Metrics};
|
||||
use rustfs_filemeta::fileinfo::{NULL_VERSION_ID, RestoreStatusOps, is_restored_object_on_disk};
|
||||
use rustfs_utils::path::encode_dir_object;
|
||||
use rustfs_utils::string::strings_has_prefix_fold;
|
||||
use s3s::Body;
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::any::Any;
|
||||
@@ -62,7 +66,11 @@ use crate::store::ECStore;
|
||||
use crate::store_api::StorageAPI;
|
||||
use crate::store_api::{GetObjectReader, HTTPRangeSpec, ObjectInfo, ObjectOptions, ObjectToDelete};
|
||||
use crate::tier::warm_backend::WarmBackendGetOpts;
|
||||
use s3s::dto::{BucketLifecycleConfiguration, DefaultRetention, ReplicationConfiguration};
|
||||
use s3s::dto::{
|
||||
BucketLifecycleConfiguration, DefaultRetention, ReplicationConfiguration, RestoreRequest, RestoreRequestType, RestoreStatus,
|
||||
ServerSideEncryption, Timestamp,
|
||||
};
|
||||
use s3s::header::{X_AMZ_RESTORE, X_AMZ_SERVER_SIDE_ENCRYPTION, X_AMZ_STORAGE_CLASS};
|
||||
|
||||
pub type TimeFn = Arc<dyn Fn() -> Pin<Box<dyn Future<Output = ()> + Send>> + Send + Sync + 'static>;
|
||||
pub type TraceFn =
|
||||
@@ -71,9 +79,12 @@ pub type ExpiryOpType = Box<dyn ExpiryOp + Send + Sync + 'static>;
|
||||
|
||||
static XXHASH_SEED: u64 = 0;
|
||||
|
||||
const _DISABLED: &str = "Disabled";
|
||||
pub const AMZ_OBJECT_TAGGING: &str = "X-Amz-Tagging";
|
||||
pub const AMZ_TAG_COUNT: &str = "x-amz-tagging-count";
|
||||
pub const AMZ_TAG_DIRECTIVE: &str = "X-Amz-Tagging-Directive";
|
||||
pub const AMZ_ENCRYPTION_AES: &str = "AES256";
|
||||
pub const AMZ_ENCRYPTION_KMS: &str = "aws:kms";
|
||||
|
||||
//pub const ERR_INVALID_STORAGECLASS: &str = "invalid storage class.";
|
||||
pub const ERR_INVALID_STORAGECLASS: &str = "invalid tier.";
|
||||
|
||||
lazy_static! {
|
||||
@@ -762,11 +773,14 @@ pub fn gen_transition_objname(bucket: &str) -> Result<String, Error> {
|
||||
pub async fn transition_object(api: Arc<ECStore>, oi: &ObjectInfo, lae: LcAuditEvent) -> Result<(), Error> {
|
||||
let time_ilm = Metrics::time_ilm(lae.event.action);
|
||||
|
||||
let etag = if let Some(etag) = &oi.etag { etag } else { "" };
|
||||
let etag = etag.to_string();
|
||||
|
||||
let opts = ObjectOptions {
|
||||
transition: TransitionOptions {
|
||||
status: lifecycle::TRANSITION_PENDING.to_string(),
|
||||
tier: lae.event.storage_class,
|
||||
etag: oi.etag.clone().expect("err").to_string(),
|
||||
etag,
|
||||
..Default::default()
|
||||
},
|
||||
//lifecycle_audit_event: lae,
|
||||
@@ -787,9 +801,9 @@ pub fn audit_tier_actions(_api: ECStore, _tier: &str, _bytes: i64) -> TimeFn {
|
||||
pub async fn get_transitioned_object_reader(
|
||||
bucket: &str,
|
||||
object: &str,
|
||||
rs: HTTPRangeSpec,
|
||||
h: HeaderMap,
|
||||
oi: ObjectInfo,
|
||||
rs: &Option<HTTPRangeSpec>,
|
||||
h: &HeaderMap,
|
||||
oi: &ObjectInfo,
|
||||
opts: &ObjectOptions,
|
||||
) -> Result<GetObjectReader, std::io::Error> {
|
||||
let mut tier_config_mgr = GLOBAL_TierConfigMgr.write().await;
|
||||
@@ -815,19 +829,131 @@ pub async fn get_transitioned_object_reader(
|
||||
let reader = tgt_client
|
||||
.get(&oi.transitioned_object.name, &oi.transitioned_object.version_id, gopts)
|
||||
.await?;
|
||||
Ok(get_fn(reader, h))
|
||||
Ok(get_fn(reader, h.clone()))
|
||||
}
|
||||
|
||||
pub fn post_restore_opts(_r: http::Request<Body>, _bucket: &str, _object: &str) -> Result<ObjectOptions, std::io::Error> {
|
||||
todo!();
|
||||
pub async fn post_restore_opts(version_id: &str, bucket: &str, object: &str) -> Result<ObjectOptions, std::io::Error> {
|
||||
let versioned = BucketVersioningSys::prefix_enabled(bucket, object).await;
|
||||
let version_suspended = BucketVersioningSys::prefix_suspended(bucket, object).await;
|
||||
let vid = version_id.trim();
|
||||
if vid != "" && vid != NULL_VERSION_ID {
|
||||
if let Err(err) = Uuid::parse_str(vid) {
|
||||
return Err(std::io::Error::other(
|
||||
StorageError::InvalidVersionID(bucket.to_string(), object.to_string(), vid.to_string()).to_string(),
|
||||
));
|
||||
}
|
||||
if !versioned && !version_suspended {
|
||||
return Err(std::io::Error::other(
|
||||
StorageError::InvalidArgument(
|
||||
bucket.to_string(),
|
||||
object.to_string(),
|
||||
format!("version-id specified {} but versioning is not enabled on {}", vid, bucket),
|
||||
)
|
||||
.to_string(),
|
||||
));
|
||||
}
|
||||
}
|
||||
Ok(ObjectOptions {
|
||||
versioned: versioned,
|
||||
version_suspended: version_suspended,
|
||||
version_id: Some(vid.to_string()),
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
pub fn put_restore_opts(_bucket: &str, _object: &str, _rreq: &RestoreObjectRequest, _oi: &ObjectInfo) -> ObjectOptions {
|
||||
todo!();
|
||||
pub async fn put_restore_opts(
|
||||
bucket: &str,
|
||||
object: &str,
|
||||
rreq: &RestoreRequest,
|
||||
oi: &ObjectInfo,
|
||||
) -> Result<ObjectOptions, std::io::Error> {
|
||||
let mut meta = HashMap::<String, String>::new();
|
||||
/*let mut b = false;
|
||||
let Some(Some(Some(mut sc))) = rreq.output_location.s3.storage_class else { b = true; };
|
||||
if b || sc == "" {
|
||||
//sc = oi.storage_class;
|
||||
sc = oi.transitioned_object.tier;
|
||||
}
|
||||
meta.insert(X_AMZ_STORAGE_CLASS.as_str().to_lowercase(), sc);*/
|
||||
|
||||
if let Some(type_) = &rreq.type_
|
||||
&& type_.as_str() == RestoreRequestType::SELECT
|
||||
{
|
||||
for v in rreq
|
||||
.output_location
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.s3
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
.user_metadata
|
||||
.as_ref()
|
||||
.unwrap()
|
||||
{
|
||||
if !strings_has_prefix_fold(&v.name.clone().unwrap(), "x-amz-meta") {
|
||||
meta.insert(
|
||||
format!("x-amz-meta-{}", v.name.as_ref().unwrap()),
|
||||
v.value.clone().unwrap_or("".to_string()),
|
||||
);
|
||||
continue;
|
||||
}
|
||||
meta.insert(v.name.clone().unwrap(), v.value.clone().unwrap_or("".to_string()));
|
||||
}
|
||||
if let Some(output_location) = rreq.output_location.as_ref() {
|
||||
if let Some(s3) = &output_location.s3 {
|
||||
if let Some(tags) = &s3.tagging {
|
||||
meta.insert(
|
||||
AMZ_OBJECT_TAGGING.to_string(),
|
||||
serde_urlencoded::to_string(tags.tag_set.clone()).unwrap_or("".to_string()),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Some(output_location) = rreq.output_location.as_ref() {
|
||||
if let Some(s3) = &output_location.s3 {
|
||||
if let Some(encryption) = &s3.encryption {
|
||||
if encryption.encryption_type.as_str() != "" {
|
||||
meta.insert(X_AMZ_SERVER_SIDE_ENCRYPTION.as_str().to_string(), AMZ_ENCRYPTION_AES.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return Ok(ObjectOptions {
|
||||
versioned: BucketVersioningSys::prefix_enabled(bucket, object).await,
|
||||
version_suspended: BucketVersioningSys::prefix_suspended(bucket, object).await,
|
||||
user_defined: meta,
|
||||
..Default::default()
|
||||
});
|
||||
}
|
||||
for (k, v) in &oi.user_defined {
|
||||
meta.insert(k.to_string(), v.clone());
|
||||
}
|
||||
if oi.user_tags.len() != 0 {
|
||||
meta.insert(AMZ_OBJECT_TAGGING.to_string(), oi.user_tags.clone());
|
||||
}
|
||||
let restore_expiry = lifecycle::expected_expiry_time(OffsetDateTime::now_utc(), rreq.days.unwrap_or(1));
|
||||
meta.insert(
|
||||
X_AMZ_RESTORE.as_str().to_string(),
|
||||
RestoreStatus {
|
||||
is_restore_in_progress: Some(false),
|
||||
restore_expiry_date: Some(Timestamp::from(restore_expiry)),
|
||||
}
|
||||
.to_string(),
|
||||
);
|
||||
Ok(ObjectOptions {
|
||||
versioned: BucketVersioningSys::prefix_enabled(bucket, object).await,
|
||||
version_suspended: BucketVersioningSys::prefix_suspended(bucket, object).await,
|
||||
user_defined: meta,
|
||||
version_id: oi.version_id.map(|e| e.to_string()),
|
||||
mod_time: oi.mod_time,
|
||||
//expires: oi.expires,
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
pub trait LifecycleOps {
|
||||
fn to_lifecycle_opts(&self) -> lifecycle::ObjectOpts;
|
||||
fn is_remote(&self) -> bool;
|
||||
}
|
||||
|
||||
impl LifecycleOps for ObjectInfo {
|
||||
@@ -848,29 +974,54 @@ impl LifecycleOps for ObjectInfo {
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
fn is_remote(&self) -> bool {
|
||||
if self.transitioned_object.status != lifecycle::TRANSITION_COMPLETE {
|
||||
return false;
|
||||
}
|
||||
!is_restored_object_on_disk(&self.user_defined)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct S3Location {
|
||||
pub bucketname: String,
|
||||
//pub encryption: Encryption,
|
||||
pub prefix: String,
|
||||
pub storage_class: String,
|
||||
//pub tagging: Tags,
|
||||
pub user_metadata: HashMap<String, String>,
|
||||
pub trait RestoreRequestOps {
|
||||
fn validate(&self, api: Arc<ECStore>) -> Result<(), std::io::Error>;
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct OutputLocation(pub S3Location);
|
||||
impl RestoreRequestOps for RestoreRequest {
|
||||
fn validate(&self, api: Arc<ECStore>) -> Result<(), std::io::Error> {
|
||||
/*if self.type_.is_none() && self.select_parameters.is_some() {
|
||||
return Err(std::io::Error::other("Select parameters can only be specified with SELECT request type"));
|
||||
}
|
||||
if let Some(type_) = self.type_ && type_ == RestoreRequestType::SELECT && self.select_parameters.is_none() {
|
||||
return Err(std::io::Error::other("SELECT restore request requires select parameters to be specified"));
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct RestoreObjectRequest {
|
||||
pub days: i64,
|
||||
pub ror_type: String,
|
||||
pub tier: String,
|
||||
pub description: String,
|
||||
//pub select_parameters: SelectParameters,
|
||||
pub output_location: OutputLocation,
|
||||
if self.type_.is_none() && self.output_location.is_some() {
|
||||
return Err(std::io::Error::other("OutputLocation required only for SELECT request type"));
|
||||
}
|
||||
if let Some(type_) = self.type_ && type_ == RestoreRequestType::SELECT && self.output_location.is_none() {
|
||||
return Err(std::io::Error::other("OutputLocation required for SELECT requests"));
|
||||
}
|
||||
|
||||
if let Some(type_) = self.type_ && type_ == RestoreRequestType::SELECT && self.days != 0 {
|
||||
return Err(std::io::Error::other("Days cannot be specified with SELECT restore request"));
|
||||
}
|
||||
if self.days == 0 && self.type_.is_none() {
|
||||
return Err(std::io::Error::other("restoration days should be at least 1"));
|
||||
}
|
||||
if self.output_location.is_some() {
|
||||
if _, err := api.get_bucket_info(self.output_location.s3.bucket_name, BucketOptions{}); err != nil {
|
||||
return err
|
||||
}
|
||||
if self.output_location.s3.prefix == "" {
|
||||
return Err(std::io::Error::other("Prefix is a required parameter in OutputLocation"));
|
||||
}
|
||||
if self.output_location.s3.encryption.encryption_type.as_str() != ServerSideEncryption::AES256 {
|
||||
return NotImplemented{}
|
||||
}
|
||||
}*/
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
const _MAX_RESTORE_OBJECT_REQUEST_SIZE: i64 = 2 << 20;
|
||||
@@ -975,11 +1126,10 @@ pub async fn apply_expiry_on_non_transitioned_objects(
|
||||
//debug!("lc_event.action: {:?}", lc_event.action);
|
||||
//debug!("opts: {:?}", opts);
|
||||
let mut dobj = match api.delete_object(&oi.bucket, &encode_dir_object(&oi.name), opts).await {
|
||||
Ok(obj) => obj,
|
||||
Ok(dobj) => dobj,
|
||||
Err(e) => {
|
||||
error!("Failed to delete object {}/{}: {:?}", oi.bucket, oi.name, e);
|
||||
// Return the original object info if deletion fails
|
||||
oi.clone()
|
||||
error!("delete_object error: {:?}", e);
|
||||
return false;
|
||||
}
|
||||
};
|
||||
//debug!("dobj: {:?}", dobj);
|
||||
|
||||
@@ -20,7 +20,7 @@
|
||||
|
||||
use s3s::dto::{
|
||||
BucketLifecycleConfiguration, ExpirationStatus, LifecycleExpiration, LifecycleRule, NoncurrentVersionTransition,
|
||||
ObjectLockConfiguration, ObjectLockEnabled, Transition,
|
||||
ObjectLockConfiguration, ObjectLockEnabled, RestoreRequest, Transition,
|
||||
};
|
||||
use std::cmp::Ordering;
|
||||
use std::env;
|
||||
@@ -32,8 +32,6 @@ use tracing::info;
|
||||
|
||||
use crate::bucket::lifecycle::rule::TransitionOps;
|
||||
|
||||
use super::bucket_lifecycle_ops::RestoreObjectRequest;
|
||||
|
||||
pub const TRANSITION_COMPLETE: &str = "complete";
|
||||
pub const TRANSITION_PENDING: &str = "pending";
|
||||
|
||||
@@ -325,7 +323,7 @@ impl Lifecycle for BucketLifecycleConfiguration {
|
||||
}
|
||||
|
||||
if let Some(days) = expiration.days {
|
||||
let expected_expiry = expected_expiry_time(obj.mod_time.expect("err!"), days /*, date*/);
|
||||
let expected_expiry = expected_expiry_time(obj.mod_time.unwrap(), days /*, date*/);
|
||||
if now.unix_timestamp() >= expected_expiry.unix_timestamp() {
|
||||
events.push(Event {
|
||||
action: IlmAction::DeleteVersionAction,
|
||||
@@ -402,19 +400,21 @@ impl Lifecycle for BucketLifecycleConfiguration {
|
||||
if storage_class.as_str() != "" && !obj.delete_marker && obj.transition_status != TRANSITION_COMPLETE
|
||||
{
|
||||
let due = rule.noncurrent_version_transitions.as_ref().unwrap()[0].next_due(obj);
|
||||
if due.is_some() && (now.unix_timestamp() >= due.unwrap().unix_timestamp()) {
|
||||
events.push(Event {
|
||||
action: IlmAction::TransitionVersionAction,
|
||||
rule_id: rule.id.clone().expect("err!"),
|
||||
due,
|
||||
storage_class: rule.noncurrent_version_transitions.as_ref().unwrap()[0]
|
||||
.storage_class
|
||||
.clone()
|
||||
.unwrap()
|
||||
.as_str()
|
||||
.to_string(),
|
||||
..Default::default()
|
||||
});
|
||||
if let Some(due0) = due {
|
||||
if now.unix_timestamp() == 0 || now.unix_timestamp() > due0.unix_timestamp() {
|
||||
events.push(Event {
|
||||
action: IlmAction::TransitionVersionAction,
|
||||
rule_id: rule.id.clone().expect("err!"),
|
||||
due,
|
||||
storage_class: rule.noncurrent_version_transitions.as_ref().unwrap()[0]
|
||||
.storage_class
|
||||
.clone()
|
||||
.unwrap()
|
||||
.as_str()
|
||||
.to_string(),
|
||||
..Default::default()
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -446,7 +446,7 @@ impl Lifecycle for BucketLifecycleConfiguration {
|
||||
});
|
||||
}
|
||||
} else if let Some(days) = expiration.days {
|
||||
let expected_expiry: OffsetDateTime = expected_expiry_time(obj.mod_time.expect("err!"), days);
|
||||
let expected_expiry: OffsetDateTime = expected_expiry_time(obj.mod_time.unwrap(), days);
|
||||
info!(
|
||||
"eval_inner: expiration check - days={}, obj_time={:?}, expiry_time={:?}, now={:?}, should_expire={}",
|
||||
days,
|
||||
@@ -480,12 +480,12 @@ impl Lifecycle for BucketLifecycleConfiguration {
|
||||
if obj.transition_status != TRANSITION_COMPLETE {
|
||||
if let Some(ref transitions) = rule.transitions {
|
||||
let due = transitions[0].next_due(obj);
|
||||
if let Some(due) = due {
|
||||
if due.unix_timestamp() > 0 && (now.unix_timestamp() >= due.unix_timestamp()) {
|
||||
if let Some(due0) = due {
|
||||
if now.unix_timestamp() == 0 || now.unix_timestamp() > due0.unix_timestamp() {
|
||||
events.push(Event {
|
||||
action: IlmAction::TransitionAction,
|
||||
rule_id: rule.id.clone().expect("err!"),
|
||||
due: Some(due),
|
||||
due,
|
||||
storage_class: transitions[0].storage_class.clone().expect("err!").as_str().to_string(),
|
||||
noncurrent_days: 0,
|
||||
newer_noncurrent_versions: 0,
|
||||
@@ -580,8 +580,10 @@ impl LifecycleCalculate for LifecycleExpiration {
|
||||
if !obj.is_latest || !obj.delete_marker {
|
||||
return None;
|
||||
}
|
||||
|
||||
Some(expected_expiry_time(obj.mod_time.unwrap(), self.days.unwrap()))
|
||||
match self.days {
|
||||
Some(days) => Some(expected_expiry_time(obj.mod_time.unwrap(), days)),
|
||||
None => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -591,10 +593,16 @@ impl LifecycleCalculate for NoncurrentVersionTransition {
|
||||
if obj.is_latest || self.storage_class.is_none() {
|
||||
return None;
|
||||
}
|
||||
if self.noncurrent_days.is_none() {
|
||||
return obj.successor_mod_time;
|
||||
match self.noncurrent_days {
|
||||
Some(noncurrent_days) => {
|
||||
if let Some(successor_mod_time) = obj.successor_mod_time {
|
||||
Some(expected_expiry_time(successor_mod_time, noncurrent_days))
|
||||
} else {
|
||||
Some(expected_expiry_time(OffsetDateTime::now_utc(), noncurrent_days))
|
||||
}
|
||||
}
|
||||
None => obj.successor_mod_time,
|
||||
}
|
||||
Some(expected_expiry_time(obj.successor_mod_time.unwrap(), self.noncurrent_days.unwrap()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -609,10 +617,10 @@ impl LifecycleCalculate for Transition {
|
||||
return Some(date.into());
|
||||
}
|
||||
|
||||
if self.days.is_none() {
|
||||
return obj.mod_time;
|
||||
match self.days {
|
||||
Some(days) => Some(expected_expiry_time(obj.mod_time.unwrap(), days)),
|
||||
None => obj.mod_time,
|
||||
}
|
||||
Some(expected_expiry_time(obj.mod_time.unwrap(), self.days.unwrap()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -692,7 +700,7 @@ pub struct TransitionOptions {
|
||||
pub status: String,
|
||||
pub tier: String,
|
||||
pub etag: String,
|
||||
pub restore_request: RestoreObjectRequest,
|
||||
pub restore_request: RestoreRequest,
|
||||
pub restore_expiry: OffsetDateTime,
|
||||
pub expire_restored: bool,
|
||||
}
|
||||
|
||||
@@ -428,8 +428,8 @@ where
|
||||
let sec = t.unix_timestamp() - 62135596800;
|
||||
let nsec = t.nanosecond();
|
||||
buf[0] = 0xc7; // mext8
|
||||
buf[1] = 0x0c; // 长度
|
||||
buf[2] = 0x05; // 时间扩展类型
|
||||
buf[1] = 0x0c; // Length
|
||||
buf[2] = 0x05; // Time extension type
|
||||
BigEndian::write_u64(&mut buf[3..], sec as u64);
|
||||
BigEndian::write_u32(&mut buf[11..], nsec);
|
||||
s.serialize_bytes(&buf)
|
||||
|
||||
@@ -16,16 +16,16 @@ use crate::error::Result;
|
||||
use rmp_serde::Serializer as rmpSerializer;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
// 定义 QuotaType 枚举类型
|
||||
// Define the QuotaType enum
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
pub enum QuotaType {
|
||||
Hard,
|
||||
}
|
||||
|
||||
// 定义 BucketQuota 结构体
|
||||
// Define the BucketQuota structure
|
||||
#[derive(Debug, Deserialize, Serialize, Default, Clone)]
|
||||
pub struct BucketQuota {
|
||||
quota: Option<u64>, // 使用 Option 来表示可能不存在的字段
|
||||
quota: Option<u64>, // Use Option to represent optional fields
|
||||
|
||||
size: u64,
|
||||
|
||||
|
||||
@@ -46,7 +46,7 @@ pub trait ReplicationConfigurationExt {
|
||||
}
|
||||
|
||||
impl ReplicationConfigurationExt for ReplicationConfiguration {
|
||||
/// 检查是否有现有对象复制规则
|
||||
/// Check whether any object-replication rules exist
|
||||
fn has_existing_object_replication(&self, arn: &str) -> (bool, bool) {
|
||||
let mut has_arn = false;
|
||||
|
||||
@@ -117,7 +117,7 @@ impl ReplicationConfigurationExt for ReplicationConfiguration {
|
||||
rules
|
||||
}
|
||||
|
||||
/// 获取目标配置
|
||||
/// Retrieve the destination configuration
|
||||
fn get_destination(&self) -> Destination {
|
||||
if !self.rules.is_empty() {
|
||||
self.rules[0].destination.clone()
|
||||
@@ -134,7 +134,7 @@ impl ReplicationConfigurationExt for ReplicationConfiguration {
|
||||
}
|
||||
}
|
||||
|
||||
/// 判断对象是否应该被复制
|
||||
/// Determine whether an object should be replicated
|
||||
fn replicate(&self, obj: &ObjectOpts) -> bool {
|
||||
let rules = self.filter_actionable_rules(obj);
|
||||
|
||||
@@ -164,16 +164,16 @@ impl ReplicationConfigurationExt for ReplicationConfiguration {
|
||||
}
|
||||
}
|
||||
|
||||
// 常规对象/元数据复制
|
||||
// Regular object/metadata replication
|
||||
return rule.metadata_replicate(obj);
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
/// 检查是否有活跃的规则
|
||||
/// 可选择性地提供前缀
|
||||
/// 如果recursive为true,函数还会在前缀下的任何级别有活跃规则时返回true
|
||||
/// 如果没有指定前缀,recursive实际上为true
|
||||
/// Check for an active rule
|
||||
/// Optionally accept a prefix
|
||||
/// When recursive is true, return true if any level under the prefix has an active rule
|
||||
/// Without a prefix, recursive behaves as true
|
||||
fn has_active_rules(&self, prefix: &str, recursive: bool) -> bool {
|
||||
if self.rules.is_empty() {
|
||||
return false;
|
||||
@@ -187,13 +187,13 @@ impl ReplicationConfigurationExt for ReplicationConfiguration {
|
||||
if let Some(filter) = &rule.filter {
|
||||
if let Some(filter_prefix) = &filter.prefix {
|
||||
if !prefix.is_empty() && !filter_prefix.is_empty() {
|
||||
// 传入的前缀必须在规则前缀中
|
||||
// The provided prefix must fall within the rule prefix
|
||||
if !recursive && !prefix.starts_with(filter_prefix) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// 如果是递归的,我们可以跳过这个规则,如果它不匹配测试前缀或前缀下的级别不匹配
|
||||
// When recursive, skip this rule if it does not match the test prefix or hierarchy
|
||||
if recursive && !rule.prefix().starts_with(prefix) && !prefix.starts_with(rule.prefix()) {
|
||||
continue;
|
||||
}
|
||||
@@ -204,7 +204,7 @@ impl ReplicationConfigurationExt for ReplicationConfiguration {
|
||||
false
|
||||
}
|
||||
|
||||
/// 过滤目标ARN,返回配置中不同目标ARN的切片
|
||||
/// Filter target ARNs and return a slice of the distinct values in the config
|
||||
fn filter_target_arns(&self, obj: &ObjectOpts) -> Vec<String> {
|
||||
let mut arns = Vec::new();
|
||||
let mut targets_map: HashSet<String> = HashSet::new();
|
||||
@@ -216,7 +216,7 @@ impl ReplicationConfigurationExt for ReplicationConfiguration {
|
||||
}
|
||||
|
||||
if !self.role.is_empty() {
|
||||
arns.push(self.role.clone()); // 如果存在,使用传统的RoleArn
|
||||
arns.push(self.role.clone()); // Use the legacy RoleArn when present
|
||||
return arns;
|
||||
}
|
||||
|
||||
|
||||
@@ -108,7 +108,7 @@ pub async fn list_path_raw(rx: CancellationToken, opts: ListPathRawOptions) -> d
|
||||
}
|
||||
|
||||
if cancel_rx_clone.is_cancelled() {
|
||||
// warn!("list_path_raw: cancel_rx_clone.try_recv().await.is_ok()");
|
||||
// warn!("list_path_raw: cancel_rx_clone.is_cancelled()");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
|
||||
@@ -39,13 +39,13 @@
|
||||
// #[allow(clippy::shadow_same)] // necessary for `pin_mut!`
|
||||
// Box::pin(async move {
|
||||
// pin_mut!(body);
|
||||
// // 上一次没用完的数据
|
||||
// // Data left over from the previous call
|
||||
// let mut prev_bytes = Bytes::new();
|
||||
// let mut read_size = 0;
|
||||
|
||||
// loop {
|
||||
// let data: Vec<Bytes> = {
|
||||
// // 读固定大小的数据
|
||||
// // Read a fixed-size chunk
|
||||
// match Self::read_data(body.as_mut(), prev_bytes, chunk_size).await {
|
||||
// None => break,
|
||||
// Some(Err(e)) => return Err(e),
|
||||
@@ -72,13 +72,13 @@
|
||||
|
||||
// if read_size + prev_bytes.len() >= content_length {
|
||||
// // debug!(
|
||||
// // "读完了 read_size:{} + prev_bytes.len({}) == content_length {}",
|
||||
// // "Finished reading: read_size:{} + prev_bytes.len({}) == content_length {}",
|
||||
// // read_size,
|
||||
// // prev_bytes.len(),
|
||||
// // content_length,
|
||||
// // );
|
||||
|
||||
// // 填充 0?
|
||||
// // Pad with zeros?
|
||||
// if !need_padding {
|
||||
// y.yield_ok(prev_bytes).await;
|
||||
// break;
|
||||
@@ -115,7 +115,7 @@
|
||||
// {
|
||||
// let mut bytes_buffer = Vec::new();
|
||||
|
||||
// // 只执行一次
|
||||
// // Run only once
|
||||
// let mut push_data_bytes = |mut bytes: Bytes| {
|
||||
// // debug!("read from body {} split per {}, prev_bytes: {}", bytes.len(), data_size, prev_bytes.len());
|
||||
|
||||
@@ -127,11 +127,11 @@
|
||||
// return Some(bytes);
|
||||
// }
|
||||
|
||||
// // 合并上一次数据
|
||||
// // Merge with the previous data
|
||||
// if !prev_bytes.is_empty() {
|
||||
// let need_size = data_size.wrapping_sub(prev_bytes.len());
|
||||
// // debug!(
|
||||
// // " 上一次有剩余{},从这一次中取{},共:{}",
|
||||
// // "Previous leftover {}, take {} now, total: {}",
|
||||
// // prev_bytes.len(),
|
||||
// // need_size,
|
||||
// // prev_bytes.len() + need_size
|
||||
@@ -143,7 +143,7 @@
|
||||
// combined.extend_from_slice(&data);
|
||||
|
||||
// // debug!(
|
||||
// // "取到的长度大于所需,取出需要的长度:{},与上一次合并得到:{},bytes 剩余:{}",
|
||||
// // "Fetched more bytes than needed: {}, merged result {}, remaining bytes {}",
|
||||
// // need_size,
|
||||
// // combined.len(),
|
||||
// // bytes.len(),
|
||||
@@ -156,7 +156,7 @@
|
||||
// combined.extend_from_slice(&bytes);
|
||||
|
||||
// // debug!(
|
||||
// // "取到的长度小于所需,取出需要的长度:{},与上一次合并得到:{},bytes 剩余:{},直接返回",
|
||||
// // "Fetched fewer bytes than needed: {}, merged result {}, remaining bytes {}, return immediately",
|
||||
// // need_size,
|
||||
// // combined.len(),
|
||||
// // bytes.len(),
|
||||
@@ -166,29 +166,29 @@
|
||||
// }
|
||||
// }
|
||||
|
||||
// // 取到的数据比需要的块大,从 bytes 中截取需要的块大小
|
||||
// // If the fetched data exceeds the chunk, slice the required size
|
||||
// if data_size <= bytes.len() {
|
||||
// let n = bytes.len() / data_size;
|
||||
|
||||
// for _ in 0..n {
|
||||
// let data = bytes.split_to(data_size);
|
||||
|
||||
// // println!("bytes_buffer.push: {},剩余:{}", data.len(), bytes.len());
|
||||
// // println!("bytes_buffer.push: {}, remaining: {}", data.len(), bytes.len());
|
||||
// bytes_buffer.push(data);
|
||||
// }
|
||||
|
||||
// Some(bytes)
|
||||
// } else {
|
||||
// // 不够
|
||||
// // Insufficient data
|
||||
// Some(bytes)
|
||||
// }
|
||||
// };
|
||||
|
||||
// // 剩余数据
|
||||
// // Remaining data
|
||||
// let remaining_bytes = 'outer: {
|
||||
// // // 如果上一次数据足够,跳出
|
||||
// // // Exit if the previous data was sufficient
|
||||
// // if let Some(remaining_bytes) = push_data_bytes(prev_bytes) {
|
||||
// // println!("从剩下的取");
|
||||
// // println!("Consuming leftovers");
|
||||
// // break 'outer remaining_bytes;
|
||||
// // }
|
||||
|
||||
|
||||
@@ -18,28 +18,23 @@
|
||||
#![allow(unused_must_use)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use bytes::Bytes;
|
||||
use http::HeaderMap;
|
||||
use std::collections::HashMap;
|
||||
use std::io::Cursor;
|
||||
use tokio::io::BufReader;
|
||||
|
||||
use crate::client::{
|
||||
api_error_response::{err_invalid_argument, http_resp_to_error_response},
|
||||
api_get_object_acl::AccessControlList,
|
||||
api_get_options::GetObjectOptions,
|
||||
transition_api::{ObjectInfo, ReadCloser, ReaderImpl, RequestMetadata, TransitionClient, to_object_info},
|
||||
};
|
||||
use bytes::Bytes;
|
||||
use http::HeaderMap;
|
||||
use s3s::dto::RestoreRequest;
|
||||
use std::collections::HashMap;
|
||||
use std::io::Cursor;
|
||||
use tokio::io::BufReader;
|
||||
|
||||
const TIER_STANDARD: &str = "Standard";
|
||||
const TIER_BULK: &str = "Bulk";
|
||||
const TIER_EXPEDITED: &str = "Expedited";
|
||||
|
||||
#[derive(Debug, Default, serde::Serialize)]
|
||||
pub struct GlacierJobParameters {
|
||||
pub tier: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, serde::Serialize, serde::Deserialize)]
|
||||
pub struct Encryption {
|
||||
pub encryption_type: String,
|
||||
@@ -65,58 +60,6 @@ pub struct S3 {
|
||||
pub user_metadata: MetadataEntry,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, serde::Serialize)]
|
||||
pub struct SelectParameters {
|
||||
pub expression_type: String,
|
||||
pub expression: String,
|
||||
//input_serialization: SelectObjectInputSerialization,
|
||||
//output_serialization: SelectObjectOutputSerialization,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, serde::Serialize)]
|
||||
pub struct OutputLocation(pub S3);
|
||||
|
||||
#[derive(Debug, Default, serde::Serialize)]
|
||||
pub struct RestoreRequest {
|
||||
pub restore_type: String,
|
||||
pub tier: String,
|
||||
pub days: i64,
|
||||
pub glacier_job_parameters: GlacierJobParameters,
|
||||
pub description: String,
|
||||
pub select_parameters: SelectParameters,
|
||||
pub output_location: OutputLocation,
|
||||
}
|
||||
|
||||
impl RestoreRequest {
|
||||
fn set_days(&mut self, v: i64) {
|
||||
self.days = v;
|
||||
}
|
||||
|
||||
fn set_glacier_job_parameters(&mut self, v: GlacierJobParameters) {
|
||||
self.glacier_job_parameters = v;
|
||||
}
|
||||
|
||||
fn set_type(&mut self, v: &str) {
|
||||
self.restore_type = v.to_string();
|
||||
}
|
||||
|
||||
fn set_tier(&mut self, v: &str) {
|
||||
self.tier = v.to_string();
|
||||
}
|
||||
|
||||
fn set_description(&mut self, v: &str) {
|
||||
self.description = v.to_string();
|
||||
}
|
||||
|
||||
fn set_select_parameters(&mut self, v: SelectParameters) {
|
||||
self.select_parameters = v;
|
||||
}
|
||||
|
||||
fn set_output_location(&mut self, v: OutputLocation) {
|
||||
self.output_location = v;
|
||||
}
|
||||
}
|
||||
|
||||
impl TransitionClient {
|
||||
pub async fn restore_object(
|
||||
&self,
|
||||
@@ -125,12 +68,13 @@ impl TransitionClient {
|
||||
version_id: &str,
|
||||
restore_req: &RestoreRequest,
|
||||
) -> Result<(), std::io::Error> {
|
||||
let restore_request = match quick_xml::se::to_string(restore_req) {
|
||||
/*let restore_request = match quick_xml::se::to_string(restore_req) {
|
||||
Ok(buf) => buf,
|
||||
Err(e) => {
|
||||
return Err(std::io::Error::other(e));
|
||||
}
|
||||
};
|
||||
};*/
|
||||
let restore_request = "".to_string();
|
||||
let restore_request_bytes = restore_request.as_bytes().to_vec();
|
||||
|
||||
let mut url_values = HashMap::new();
|
||||
|
||||
@@ -27,7 +27,7 @@ use tracing::{debug, error, info};
|
||||
|
||||
use crate::client::{
|
||||
api_error_response::{http_resp_to_error_response, to_error_response},
|
||||
transition_api::{Document, TransitionClient},
|
||||
transition_api::{CreateBucketConfiguration, LocationConstraint, TransitionClient},
|
||||
};
|
||||
use rustfs_utils::hash::EMPTY_STRING_SHA256_HASH;
|
||||
use s3s::Body;
|
||||
@@ -82,7 +82,7 @@ impl TransitionClient {
|
||||
let req = self.get_bucket_location_request(bucket_name)?;
|
||||
|
||||
let mut resp = self.doit(req).await?;
|
||||
location = process_bucket_location_response(resp, bucket_name).await?;
|
||||
location = process_bucket_location_response(resp, bucket_name, &self.tier_type).await?;
|
||||
{
|
||||
let mut bucket_loc_cache = self.bucket_loc_cache.lock().unwrap();
|
||||
bucket_loc_cache.set(bucket_name, &location);
|
||||
@@ -175,7 +175,11 @@ impl TransitionClient {
|
||||
}
|
||||
}
|
||||
|
||||
async fn process_bucket_location_response(mut resp: http::Response<Body>, bucket_name: &str) -> Result<String, std::io::Error> {
|
||||
async fn process_bucket_location_response(
|
||||
mut resp: http::Response<Body>,
|
||||
bucket_name: &str,
|
||||
tier_type: &str,
|
||||
) -> Result<String, std::io::Error> {
|
||||
//if resp != nil {
|
||||
if resp.status() != StatusCode::OK {
|
||||
let err_resp = http_resp_to_error_response(&resp, vec![], bucket_name, "");
|
||||
@@ -209,9 +213,17 @@ async fn process_bucket_location_response(mut resp: http::Response<Body>, bucket
|
||||
//}
|
||||
|
||||
let b = resp.body_mut().store_all_unlimited().await.unwrap().to_vec();
|
||||
let Document(location_constraint) = quick_xml::de::from_str::<Document>(&String::from_utf8(b).unwrap()).unwrap();
|
||||
let mut location = "".to_string();
|
||||
if tier_type == "huaweicloud" {
|
||||
let d = quick_xml::de::from_str::<CreateBucketConfiguration>(&String::from_utf8(b).unwrap()).unwrap();
|
||||
location = d.location_constraint;
|
||||
} else {
|
||||
if let Ok(LocationConstraint { field }) = quick_xml::de::from_str::<LocationConstraint>(&String::from_utf8(b).unwrap()) {
|
||||
location = field;
|
||||
}
|
||||
}
|
||||
//debug!("location: {}", location);
|
||||
|
||||
let mut location = location_constraint;
|
||||
if location == "" {
|
||||
location = "us-east-1".to_string();
|
||||
}
|
||||
|
||||
@@ -21,6 +21,7 @@
|
||||
|
||||
use http::HeaderMap;
|
||||
use s3s::dto::ETag;
|
||||
use std::pin::Pin;
|
||||
use std::{collections::HashMap, io::Cursor, sync::Arc};
|
||||
use tokio::io::BufReader;
|
||||
|
||||
@@ -54,7 +55,7 @@ impl PutObjReader {
|
||||
}
|
||||
}
|
||||
|
||||
pub type ObjReaderFn = Arc<dyn Fn(BufReader<Cursor<Vec<u8>>>, HeaderMap) -> GetObjectReader + 'static>;
|
||||
pub type ObjReaderFn<'a> = Arc<dyn Fn(BufReader<Cursor<Vec<u8>>>, HeaderMap) -> GetObjectReader + Send + Sync + 'a>;
|
||||
|
||||
fn part_number_to_rangespec(oi: ObjectInfo, part_number: usize) -> Option<HTTPRangeSpec> {
|
||||
if oi.size == 0 || oi.parts.len() == 0 {
|
||||
@@ -108,19 +109,24 @@ fn get_compressed_offsets(oi: ObjectInfo, offset: i64) -> (i64, i64, i64, i64, u
|
||||
(compressed_offset, part_skip, first_part_idx, decrypt_skip, seq_num)
|
||||
}
|
||||
|
||||
pub fn new_getobjectreader(
|
||||
rs: HTTPRangeSpec,
|
||||
oi: &ObjectInfo,
|
||||
pub fn new_getobjectreader<'a>(
|
||||
rs: &Option<HTTPRangeSpec>,
|
||||
oi: &'a ObjectInfo,
|
||||
opts: &ObjectOptions,
|
||||
h: &HeaderMap,
|
||||
) -> Result<(ObjReaderFn, i64, i64), ErrorResponse> {
|
||||
_h: &HeaderMap,
|
||||
) -> Result<(ObjReaderFn<'a>, i64, i64), ErrorResponse> {
|
||||
//let (_, mut is_encrypted) = crypto.is_encrypted(oi.user_defined)?;
|
||||
let mut is_encrypted = false;
|
||||
let is_compressed = false; //oi.is_compressed_ok();
|
||||
|
||||
let mut rs_ = None;
|
||||
if rs.is_none() && opts.part_number.is_some() && opts.part_number.unwrap() > 0 {
|
||||
rs_ = part_number_to_rangespec(oi.clone(), opts.part_number.unwrap());
|
||||
}
|
||||
|
||||
let mut get_fn: ObjReaderFn;
|
||||
|
||||
let (off, length) = match rs.get_offset_length(oi.size) {
|
||||
let (off, length) = match rs_.unwrap().get_offset_length(oi.size) {
|
||||
Ok(x) => x,
|
||||
Err(err) => {
|
||||
return Err(ErrorResponse {
|
||||
@@ -136,12 +142,11 @@ pub fn new_getobjectreader(
|
||||
};
|
||||
get_fn = Arc::new(move |input_reader: BufReader<Cursor<Vec<u8>>>, _: HeaderMap| {
|
||||
//Box::pin({
|
||||
/*let r = GetObjectReader {
|
||||
let r = GetObjectReader {
|
||||
object_info: oi.clone(),
|
||||
stream: StreamingBlob::new(HashReader::new(input_reader, 10, None, None, 10)),
|
||||
stream: Box::new(input_reader),
|
||||
};
|
||||
r*/
|
||||
todo!();
|
||||
r
|
||||
//})
|
||||
});
|
||||
|
||||
|
||||
@@ -44,7 +44,7 @@ use std::{
|
||||
use time::Duration;
|
||||
use time::OffsetDateTime;
|
||||
use tokio::io::BufReader;
|
||||
use tracing::{debug, error};
|
||||
use tracing::{debug, error, warn};
|
||||
use url::{Url, form_urlencoded};
|
||||
use uuid::Uuid;
|
||||
|
||||
@@ -109,6 +109,7 @@ pub struct TransitionClient {
|
||||
pub health_status: AtomicI32,
|
||||
pub trailing_header_support: bool,
|
||||
pub max_retries: i64,
|
||||
pub tier_type: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
@@ -132,13 +133,13 @@ pub enum BucketLookupType {
|
||||
}
|
||||
|
||||
impl TransitionClient {
|
||||
pub async fn new(endpoint: &str, opts: Options) -> Result<TransitionClient, std::io::Error> {
|
||||
let clnt = Self::private_new(endpoint, opts).await?;
|
||||
pub async fn new(endpoint: &str, opts: Options, tier_type: &str) -> Result<TransitionClient, std::io::Error> {
|
||||
let clnt = Self::private_new(endpoint, opts, tier_type).await?;
|
||||
|
||||
Ok(clnt)
|
||||
}
|
||||
|
||||
async fn private_new(endpoint: &str, opts: Options) -> Result<TransitionClient, std::io::Error> {
|
||||
async fn private_new(endpoint: &str, opts: Options, tier_type: &str) -> Result<TransitionClient, std::io::Error> {
|
||||
let endpoint_url = get_endpoint_url(endpoint, opts.secure)?;
|
||||
|
||||
//#[cfg(feature = "ring")]
|
||||
@@ -175,6 +176,7 @@ impl TransitionClient {
|
||||
health_status: AtomicI32::new(C_UNKNOWN),
|
||||
trailing_header_support: opts.trailing_headers,
|
||||
max_retries: opts.max_retries,
|
||||
tier_type: tier_type.to_string(),
|
||||
};
|
||||
|
||||
{
|
||||
@@ -283,11 +285,14 @@ impl TransitionClient {
|
||||
let mut resp = resp.unwrap();
|
||||
debug!("http_resp: {:?}", resp);
|
||||
|
||||
//let b = resp.body_mut().store_all_unlimited().await.unwrap().to_vec();
|
||||
//debug!("http_resp_body: {}", String::from_utf8(b).unwrap());
|
||||
|
||||
//if self.is_trace_enabled && !(self.trace_errors_only && resp.status() == StatusCode::OK) {
|
||||
if resp.status() != StatusCode::OK {
|
||||
//self.dump_http(&cloned_req, &resp)?;
|
||||
let b = resp.body_mut().store_all_unlimited().await.unwrap().to_vec();
|
||||
debug!("err_body: {}", String::from_utf8(b).unwrap());
|
||||
warn!("err_body: {}", String::from_utf8(b).unwrap());
|
||||
}
|
||||
|
||||
Ok(resp)
|
||||
@@ -330,7 +335,8 @@ impl TransitionClient {
|
||||
}
|
||||
|
||||
let b = resp.body_mut().store_all_unlimited().await.unwrap().to_vec();
|
||||
let err_response = http_resp_to_error_response(&resp, b.clone(), &metadata.bucket_name, &metadata.object_name);
|
||||
let mut err_response = http_resp_to_error_response(&resp, b.clone(), &metadata.bucket_name, &metadata.object_name);
|
||||
err_response.message = format!("remote tier error: {}", err_response.message);
|
||||
|
||||
if self.region == "" {
|
||||
match err_response.code {
|
||||
@@ -380,9 +386,9 @@ impl TransitionClient {
|
||||
method: &http::Method,
|
||||
metadata: &mut RequestMetadata,
|
||||
) -> Result<http::Request<Body>, std::io::Error> {
|
||||
let location = metadata.bucket_location.clone();
|
||||
let mut location = metadata.bucket_location.clone();
|
||||
if location == "" && metadata.bucket_name != "" {
|
||||
let location = self.get_bucket_location(&metadata.bucket_name).await?;
|
||||
location = self.get_bucket_location(&metadata.bucket_name).await?;
|
||||
}
|
||||
|
||||
let is_makebucket = metadata.object_name == "" && method == http::Method::PUT && metadata.query_values.len() == 0;
|
||||
@@ -624,7 +630,7 @@ pub struct TransitionCore(pub Arc<TransitionClient>);
|
||||
|
||||
impl TransitionCore {
|
||||
pub async fn new(endpoint: &str, opts: Options) -> Result<Self, std::io::Error> {
|
||||
let client = TransitionClient::new(endpoint, opts).await?;
|
||||
let client = TransitionClient::new(endpoint, opts, "").await?;
|
||||
Ok(Self(Arc::new(client)))
|
||||
}
|
||||
|
||||
@@ -997,4 +1003,13 @@ impl tower::Service<Request<Body>> for SendRequest {
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct Document(pub String);
|
||||
pub struct LocationConstraint {
|
||||
#[serde(rename = "$value")]
|
||||
pub field: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize)]
|
||||
pub struct CreateBucketConfiguration {
|
||||
#[serde(rename = "LocationConstraint")]
|
||||
pub location_constraint: String,
|
||||
}
|
||||
|
||||
@@ -40,7 +40,6 @@ pub const ENV_ACCESS_KEY: &str = "RUSTFS_ACCESS_KEY";
|
||||
pub const ENV_SECRET_KEY: &str = "RUSTFS_SECRET_KEY";
|
||||
pub const ENV_ROOT_USER: &str = "RUSTFS_ROOT_USER";
|
||||
pub const ENV_ROOT_PASSWORD: &str = "RUSTFS_ROOT_PASSWORD";
|
||||
|
||||
pub static RUSTFS_CONFIG_PREFIX: &str = "config";
|
||||
|
||||
pub struct ConfigSys {}
|
||||
|
||||
@@ -49,12 +49,12 @@ pub fn reduce_quorum_errs(errors: &[Option<Error>], ignored_errs: &[Error], quor
|
||||
pub fn reduce_errs(errors: &[Option<Error>], ignored_errs: &[Error]) -> (usize, Option<Error>) {
|
||||
let nil_error = Error::other("nil".to_string());
|
||||
|
||||
// 首先统计 None 的数量(作为 nil 错误)
|
||||
// First count the number of None values (treated as nil errors)
|
||||
let nil_count = errors.iter().filter(|e| e.is_none()).count();
|
||||
|
||||
let err_counts = errors
|
||||
.iter()
|
||||
.filter_map(|e| e.as_ref()) // 只处理 Some 的错误
|
||||
.filter_map(|e| e.as_ref()) // Only process errors stored in Some
|
||||
.fold(std::collections::HashMap::new(), |mut acc, e| {
|
||||
if is_ignored_err(ignored_errs, e) {
|
||||
return acc;
|
||||
@@ -63,13 +63,13 @@ pub fn reduce_errs(errors: &[Option<Error>], ignored_errs: &[Error]) -> (usize,
|
||||
acc
|
||||
});
|
||||
|
||||
// 找到最高频率的非 nil 错误
|
||||
// Find the most frequent non-nil error
|
||||
let (best_err, best_count) = err_counts
|
||||
.into_iter()
|
||||
.max_by(|(_, c1), (_, c2)| c1.cmp(c2))
|
||||
.unwrap_or((nil_error.clone(), 0));
|
||||
|
||||
// 比较 nil 错误和最高频率的非 nil 错误, 优先选择 nil 错误
|
||||
// Compare nil errors with the top non-nil error and prefer the nil error
|
||||
if nil_count > best_count || (nil_count == best_count && nil_count > 0) {
|
||||
(nil_count, None)
|
||||
} else {
|
||||
|
||||
@@ -319,8 +319,8 @@ impl LocalDisk {
|
||||
}
|
||||
|
||||
if cfg!(target_os = "windows") {
|
||||
// 在 Windows 上,卷名不应该包含保留字符。
|
||||
// 这个正则表达式匹配了不允许的字符。
|
||||
// Windows volume names must not include reserved characters.
|
||||
// This regular expression matches disallowed characters.
|
||||
if volname.contains('|')
|
||||
|| volname.contains('<')
|
||||
|| volname.contains('>')
|
||||
@@ -333,7 +333,7 @@ impl LocalDisk {
|
||||
return false;
|
||||
}
|
||||
} else {
|
||||
// 对于非 Windows 系统,可能需要其他的验证逻辑。
|
||||
// Non-Windows systems may require additional validation rules.
|
||||
}
|
||||
|
||||
true
|
||||
@@ -563,7 +563,7 @@ impl LocalDisk {
|
||||
|
||||
// return Ok(());
|
||||
|
||||
// TODO: 异步通知 检测硬盘空间 清空回收站
|
||||
// TODO: async notifications for disk space checks and trash cleanup
|
||||
|
||||
let trash_path = self.get_object_path(super::RUSTFS_META_TMP_DELETED_BUCKET, Uuid::new_v4().to_string().as_str())?;
|
||||
// if let Some(parent) = trash_path.parent() {
|
||||
@@ -846,13 +846,13 @@ impl LocalDisk {
|
||||
}
|
||||
}
|
||||
|
||||
// 没有版本了,删除 xl.meta
|
||||
// Remove xl.meta when no versions remain
|
||||
if fm.versions.is_empty() {
|
||||
self.delete_file(&volume_dir, &xlpath, true, false).await?;
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// 更新 xl.meta
|
||||
// Update xl.meta
|
||||
let buf = fm.marshal_msg()?;
|
||||
|
||||
let volume_dir = self.get_bucket_path(volume)?;
|
||||
@@ -984,7 +984,8 @@ impl LocalDisk {
|
||||
#[async_recursion::async_recursion]
|
||||
async fn scan_dir<W>(
|
||||
&self,
|
||||
current: &mut String,
|
||||
mut current: String,
|
||||
mut prefix: String,
|
||||
opts: &WalkDirOptions,
|
||||
out: &mut MetacacheWriter<W>,
|
||||
objs_returned: &mut i32,
|
||||
@@ -1022,14 +1023,16 @@ impl LocalDisk {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut entries = match self.list_dir("", &opts.bucket, current, -1).await {
|
||||
// TODO: add lock
|
||||
|
||||
let mut entries = match self.list_dir("", &opts.bucket, ¤t, -1).await {
|
||||
Ok(res) => res,
|
||||
Err(e) => {
|
||||
if e != DiskError::VolumeNotFound && e != Error::FileNotFound {
|
||||
debug!("scan list_dir {}, err {:?}", ¤t, &e);
|
||||
error!("scan list_dir {}, err {:?}", ¤t, &e);
|
||||
}
|
||||
|
||||
if opts.report_notfound && e == Error::FileNotFound && current == &opts.base_dir {
|
||||
if opts.report_notfound && e == Error::FileNotFound && current == opts.base_dir {
|
||||
return Err(DiskError::FileNotFound);
|
||||
}
|
||||
|
||||
@@ -1041,14 +1044,13 @@ impl LocalDisk {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let s = SLASH_SEPARATOR.chars().next().unwrap_or_default();
|
||||
*current = current.trim_matches(s).to_owned();
|
||||
current = current.trim_matches('/').to_owned();
|
||||
|
||||
let bucket = opts.bucket.as_str();
|
||||
|
||||
let mut dir_objes = HashSet::new();
|
||||
|
||||
// 第一层过滤
|
||||
// First-level filtering
|
||||
for item in entries.iter_mut() {
|
||||
let entry = item.clone();
|
||||
// check limit
|
||||
@@ -1056,11 +1058,9 @@ impl LocalDisk {
|
||||
return Ok(());
|
||||
}
|
||||
// check prefix
|
||||
if let Some(filter_prefix) = &opts.filter_prefix {
|
||||
if !entry.starts_with(filter_prefix) {
|
||||
*item = "".to_owned();
|
||||
continue;
|
||||
}
|
||||
if !prefix.is_empty() && !entry.starts_with(prefix.as_str()) {
|
||||
*item = "".to_owned();
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(forward) = &forward {
|
||||
@@ -1085,46 +1085,55 @@ impl LocalDisk {
|
||||
*item = "".to_owned();
|
||||
|
||||
if entry.ends_with(STORAGE_FORMAT_FILE) {
|
||||
//
|
||||
let metadata = self
|
||||
let metadata = match self
|
||||
.read_metadata(self.get_object_path(bucket, format!("{}/{}", ¤t, &entry).as_str())?)
|
||||
.await?;
|
||||
.await
|
||||
{
|
||||
Ok(res) => res,
|
||||
Err(err) => {
|
||||
warn!("scan dir read_metadata error, continue {:?}", err);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
// 用 strip_suffix 只删除一次
|
||||
let entry = entry.strip_suffix(STORAGE_FORMAT_FILE).unwrap_or_default().to_owned();
|
||||
let name = entry.trim_end_matches(SLASH_SEPARATOR);
|
||||
let name = decode_dir_object(format!("{}/{}", ¤t, &name).as_str());
|
||||
|
||||
// if opts.limit > 0
|
||||
// && let Ok(meta) = FileMeta::load(&metadata)
|
||||
// && !meta.all_hidden(true)
|
||||
// {
|
||||
*objs_returned += 1;
|
||||
// }
|
||||
|
||||
out.write_obj(&MetaCacheEntry {
|
||||
name: name.clone(),
|
||||
metadata,
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
*objs_returned += 1;
|
||||
|
||||
// warn!("scan list_dir {}, write_obj done, name: {:?}", ¤t, &name);
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
entries.sort();
|
||||
|
||||
let mut entries = entries.as_slice();
|
||||
if let Some(forward) = &forward {
|
||||
for (i, entry) in entries.iter().enumerate() {
|
||||
if entry >= forward || forward.starts_with(entry.as_str()) {
|
||||
entries = &entries[i..];
|
||||
entries.drain(..i);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut dir_stack: Vec<String> = Vec::with_capacity(5);
|
||||
prefix = "".to_owned();
|
||||
|
||||
for entry in entries.iter() {
|
||||
if opts.limit > 0 && *objs_returned >= opts.limit {
|
||||
// warn!("scan list_dir {}, limit reached 2", ¤t);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
@@ -1132,7 +1141,7 @@ impl LocalDisk {
|
||||
continue;
|
||||
}
|
||||
|
||||
let name = path_join_buf(&[current, entry]);
|
||||
let name = path_join_buf(&[current.as_str(), entry.as_str()]);
|
||||
|
||||
if !dir_stack.is_empty() {
|
||||
if let Some(pop) = dir_stack.last().cloned() {
|
||||
@@ -1144,9 +1153,7 @@ impl LocalDisk {
|
||||
.await?;
|
||||
|
||||
if opts.recursive {
|
||||
let mut opts = opts.clone();
|
||||
opts.filter_prefix = None;
|
||||
if let Err(er) = Box::pin(self.scan_dir(&mut pop.clone(), &opts, out, objs_returned)).await {
|
||||
if let Err(er) = Box::pin(self.scan_dir(pop, prefix.clone(), opts, out, objs_returned)).await {
|
||||
error!("scan_dir err {:?}", er);
|
||||
}
|
||||
}
|
||||
@@ -1181,9 +1188,17 @@ impl LocalDisk {
|
||||
meta.metadata = res;
|
||||
|
||||
out.write_obj(&meta).await?;
|
||||
|
||||
// if let Ok(meta) = FileMeta::load(&meta.metadata)
|
||||
// && !meta.all_hidden(true)
|
||||
// {
|
||||
*objs_returned += 1;
|
||||
// }
|
||||
}
|
||||
Err(err) => {
|
||||
if err == Error::DiskNotDir {
|
||||
continue;
|
||||
}
|
||||
if err == Error::FileNotFound || err == Error::IsNotRegular {
|
||||
// NOT an object, append to stack (with slash)
|
||||
// If dirObject, but no metadata (which is unexpected) we skip it.
|
||||
@@ -1198,9 +1213,8 @@ impl LocalDisk {
|
||||
};
|
||||
}
|
||||
|
||||
while let Some(dir) = dir_stack.pop() {
|
||||
while let Some(dir) = dir_stack.last() {
|
||||
if opts.limit > 0 && *objs_returned >= opts.limit {
|
||||
// warn!("scan list_dir {}, limit reached 3", ¤t);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
@@ -1209,19 +1223,15 @@ impl LocalDisk {
|
||||
..Default::default()
|
||||
})
|
||||
.await?;
|
||||
*objs_returned += 1;
|
||||
|
||||
if opts.recursive {
|
||||
let mut dir = dir;
|
||||
let mut opts = opts.clone();
|
||||
opts.filter_prefix = None;
|
||||
if let Err(er) = Box::pin(self.scan_dir(&mut dir, &opts, out, objs_returned)).await {
|
||||
if let Err(er) = Box::pin(self.scan_dir(dir.clone(), prefix.clone(), opts, out, objs_returned)).await {
|
||||
warn!("scan_dir err {:?}", &er);
|
||||
}
|
||||
}
|
||||
dir_stack.pop();
|
||||
}
|
||||
|
||||
// warn!("scan list_dir {}, done", ¤t);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -1230,7 +1240,7 @@ fn is_root_path(path: impl AsRef<Path>) -> bool {
|
||||
path.as_ref().components().count() == 1 && path.as_ref().has_root()
|
||||
}
|
||||
|
||||
// 过滤 std::io::ErrorKind::NotFound
|
||||
// Filter std::io::ErrorKind::NotFound
|
||||
pub async fn read_file_exists(path: impl AsRef<Path>) -> Result<(Bytes, Option<Metadata>)> {
|
||||
let p = path.as_ref();
|
||||
let (data, meta) = match read_file_all(&p).await {
|
||||
@@ -1884,8 +1894,14 @@ impl DiskAPI for LocalDisk {
|
||||
}
|
||||
}
|
||||
|
||||
let mut current = opts.base_dir.clone();
|
||||
self.scan_dir(&mut current, &opts, &mut out, &mut objs_returned).await?;
|
||||
self.scan_dir(
|
||||
opts.base_dir.clone(),
|
||||
opts.filter_prefix.clone().unwrap_or_default(),
|
||||
&opts,
|
||||
&mut out,
|
||||
&mut objs_returned,
|
||||
)
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1915,11 +1931,11 @@ impl DiskAPI for LocalDisk {
|
||||
}
|
||||
}
|
||||
|
||||
// xl.meta 路径
|
||||
// xl.meta path
|
||||
let src_file_path = src_volume_dir.join(Path::new(format!("{}/{}", &src_path, STORAGE_FORMAT_FILE).as_str()));
|
||||
let dst_file_path = dst_volume_dir.join(Path::new(format!("{}/{}", &dst_path, STORAGE_FORMAT_FILE).as_str()));
|
||||
|
||||
// data_dir 路径
|
||||
// data_dir path
|
||||
let has_data_dir_path = {
|
||||
let has_data_dir = {
|
||||
if !fi.is_remote() {
|
||||
@@ -1947,7 +1963,7 @@ impl DiskAPI for LocalDisk {
|
||||
check_path_length(src_file_path.to_string_lossy().to_string().as_str())?;
|
||||
check_path_length(dst_file_path.to_string_lossy().to_string().as_str())?;
|
||||
|
||||
// 读旧 xl.meta
|
||||
// Read the previous xl.meta
|
||||
|
||||
let has_dst_buf = match super::fs::read_file(&dst_file_path).await {
|
||||
Ok(res) => Some(res),
|
||||
@@ -2432,7 +2448,7 @@ impl DiskAPI for LocalDisk {
|
||||
async fn delete_volume(&self, volume: &str) -> Result<()> {
|
||||
let p = self.get_bucket_path(volume)?;
|
||||
|
||||
// TODO: 不能用递归删除,如果目录下面有文件,返回 errVolumeNotEmpty
|
||||
// TODO: avoid recursive deletion; return errVolumeNotEmpty when files remain
|
||||
|
||||
if let Err(err) = fs::remove_dir_all(&p).await {
|
||||
let e: DiskError = to_volume_error(err).into();
|
||||
@@ -2586,7 +2602,7 @@ mod test {
|
||||
assert!(object_path.to_string_lossy().contains("test-bucket"));
|
||||
assert!(object_path.to_string_lossy().contains("test-object"));
|
||||
|
||||
// 清理测试目录
|
||||
// Clean up the test directory
|
||||
let _ = fs::remove_dir_all(&test_dir).await;
|
||||
}
|
||||
|
||||
@@ -2651,7 +2667,7 @@ mod test {
|
||||
disk.delete_volume(vol).await.unwrap();
|
||||
}
|
||||
|
||||
// 清理测试目录
|
||||
// Clean up the test directory
|
||||
let _ = fs::remove_dir_all(&test_dir).await;
|
||||
}
|
||||
|
||||
@@ -2675,7 +2691,7 @@ mod test {
|
||||
assert!(!disk_info.fs_type.is_empty());
|
||||
assert!(disk_info.total > 0);
|
||||
|
||||
// 清理测试目录
|
||||
// Clean up the test directory
|
||||
let _ = fs::remove_dir_all(&test_dir).await;
|
||||
}
|
||||
|
||||
|
||||
@@ -431,7 +431,7 @@ pub trait DiskAPI: Debug + Send + Sync + 'static {
|
||||
async fn stat_volume(&self, volume: &str) -> Result<VolumeInfo>;
|
||||
async fn delete_volume(&self, volume: &str) -> Result<()>;
|
||||
|
||||
// 并发边读边写 w <- MetaCacheEntry
|
||||
// Concurrent read/write pipeline w <- MetaCacheEntry
|
||||
async fn walk_dir<W: AsyncWrite + Unpin + Send>(&self, opts: WalkDirOptions, wr: &mut W) -> Result<()>;
|
||||
|
||||
// Metadata operations
|
||||
@@ -466,7 +466,7 @@ pub trait DiskAPI: Debug + Send + Sync + 'static {
|
||||
) -> Result<RenameDataResp>;
|
||||
|
||||
// File operations.
|
||||
// 读目录下的所有文件、目录
|
||||
// Read every file and directory within the folder
|
||||
async fn list_dir(&self, origvolume: &str, volume: &str, dir_path: &str, count: i32) -> Result<Vec<String>>;
|
||||
async fn read_file(&self, volume: &str, path: &str) -> Result<FileReader>;
|
||||
async fn read_file_stream(&self, volume: &str, path: &str, offset: usize, length: usize) -> Result<FileReader>;
|
||||
@@ -1000,7 +1000,7 @@ mod tests {
|
||||
// Note: is_online() might return false for local disks without proper initialization
|
||||
// This is expected behavior for test environments
|
||||
|
||||
// 清理测试目录
|
||||
// Clean up the test directory
|
||||
let _ = fs::remove_dir_all(&test_dir).await;
|
||||
}
|
||||
|
||||
@@ -1031,7 +1031,7 @@ mod tests {
|
||||
let location = disk.get_disk_location();
|
||||
assert!(location.valid() || (!location.valid() && endpoint.pool_idx < 0));
|
||||
|
||||
// 清理测试目录
|
||||
// Clean up the test directory
|
||||
let _ = fs::remove_dir_all(&test_dir).await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -203,7 +203,7 @@ pub async fn os_mkdir_all(dir_path: impl AsRef<Path>, base_dir: impl AsRef<Path>
|
||||
}
|
||||
|
||||
if let Some(parent) = dir_path.as_ref().parent() {
|
||||
// 不支持递归,直接 create_dir_all 了
|
||||
// Without recursion support, fall back to create_dir_all
|
||||
if let Err(e) = super::fs::make_dir_all(&parent).await {
|
||||
if e.kind() == io::ErrorKind::AlreadyExists {
|
||||
return Ok(());
|
||||
|
||||
@@ -297,24 +297,24 @@ impl Erasure {
|
||||
pub fn encode_data(self: Arc<Self>, data: &[u8]) -> Result<Vec<Bytes>> {
|
||||
let (shard_size, total_size) = self.need_size(data.len());
|
||||
|
||||
// 生成一个新的 所需的所有分片数据长度
|
||||
// Generate the total length required for all shards
|
||||
let mut data_buffer = BytesMut::with_capacity(total_size);
|
||||
|
||||
// 复制源数据
|
||||
// Copy the source data
|
||||
data_buffer.extend_from_slice(data);
|
||||
data_buffer.resize(total_size, 0u8);
|
||||
|
||||
{
|
||||
// ec encode, 结果会写进 data_buffer
|
||||
// Perform EC encoding; the results go into data_buffer
|
||||
let data_slices: SmallVec<[&mut [u8]; 16]> = data_buffer.chunks_exact_mut(shard_size).collect();
|
||||
|
||||
// parity 数量大于 0 才 ec
|
||||
// Only perform EC encoding when parity shards are present
|
||||
if self.parity_shards > 0 {
|
||||
self.encoder.as_ref().unwrap().encode(data_slices).map_err(Error::other)?;
|
||||
}
|
||||
}
|
||||
|
||||
// 零拷贝分片,所有 shard 引用 data_buffer
|
||||
// Zero-copy shards: every shard references data_buffer
|
||||
let mut data_buffer = data_buffer.freeze();
|
||||
let mut shards = Vec::with_capacity(self.total_shard_count());
|
||||
for _ in 0..self.total_shard_count() {
|
||||
@@ -333,13 +333,13 @@ impl Erasure {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// 每个分片长度,所需要的总长度
|
||||
// The length per shard and the total required length
|
||||
fn need_size(&self, data_size: usize) -> (usize, usize) {
|
||||
let shard_size = self.shard_size(data_size);
|
||||
(shard_size, shard_size * (self.total_shard_count()))
|
||||
}
|
||||
|
||||
// 算出每个分片大小
|
||||
// Compute each shard size
|
||||
pub fn shard_size(&self, data_size: usize) -> usize {
|
||||
data_size.div_ceil(self.data_shards)
|
||||
}
|
||||
@@ -354,7 +354,7 @@ impl Erasure {
|
||||
let last_shard_size = last_block_size.div_ceil(self.data_shards);
|
||||
num_shards * self.shard_size(self.block_size) + last_shard_size
|
||||
|
||||
// // 因为写入的时候 ec 需要补全,所以最后一个长度应该也是一样的
|
||||
// When writing, EC pads the data so the last shard length should match
|
||||
// if last_block_size != 0 {
|
||||
// num_shards += 1
|
||||
// }
|
||||
@@ -447,12 +447,12 @@ pub trait ReadAt {
|
||||
}
|
||||
|
||||
pub struct ShardReader {
|
||||
readers: Vec<Option<BitrotReader>>, // 磁盘
|
||||
data_block_count: usize, // 总的分片数量
|
||||
readers: Vec<Option<BitrotReader>>, // Disk readers
|
||||
data_block_count: usize, // Total number of shards
|
||||
parity_block_count: usize,
|
||||
shard_size: usize, // 每个分片的块大小 一次读取一块
|
||||
shard_file_size: usize, // 分片文件总长度
|
||||
offset: usize, // 在分片中的 offset
|
||||
shard_size: usize, // Block size per shard (read one block at a time)
|
||||
shard_file_size: usize, // Total size of the shard file
|
||||
offset: usize, // Offset within the shard
|
||||
}
|
||||
|
||||
impl ShardReader {
|
||||
@@ -470,7 +470,7 @@ impl ShardReader {
|
||||
pub async fn read(&mut self) -> Result<Vec<Option<Vec<u8>>>> {
|
||||
// let mut disks = self.readers;
|
||||
let reader_length = self.readers.len();
|
||||
// 需要读取的块长度
|
||||
// Length of the block to read
|
||||
let mut read_length = self.shard_size;
|
||||
if self.offset + read_length > self.shard_file_size {
|
||||
read_length = self.shard_file_size - self.offset
|
||||
|
||||
@@ -387,7 +387,7 @@ mod tests {
|
||||
}
|
||||
assert_eq!(n, data.len());
|
||||
|
||||
// 读
|
||||
// Read
|
||||
let reader = bitrot_writer.into_inner();
|
||||
let reader = Cursor::new(reader.into_inner());
|
||||
let mut bitrot_reader = BitrotReader::new(reader, shard_size, HashAlgorithm::HighwayHash256);
|
||||
@@ -433,7 +433,7 @@ mod tests {
|
||||
let res = bitrot_reader.read(&mut buf).await;
|
||||
|
||||
if idx == count - 1 {
|
||||
// 最后一个块,应该返回错误
|
||||
// The last chunk should trigger an error
|
||||
assert!(res.is_err());
|
||||
assert_eq!(res.unwrap_err().kind(), std::io::ErrorKind::InvalidData);
|
||||
break;
|
||||
|
||||
@@ -27,7 +27,7 @@
|
||||
//!
|
||||
//! ## Example
|
||||
//!
|
||||
//! ```rust
|
||||
//! ```ignore
|
||||
//! use rustfs_ecstore::erasure_coding::Erasure;
|
||||
//!
|
||||
//! let erasure = Erasure::new(4, 2, 1024); // 4 data shards, 2 parity shards, 1KB block size
|
||||
@@ -58,7 +58,7 @@ impl Clone for ReedSolomonEncoder {
|
||||
Self {
|
||||
data_shards: self.data_shards,
|
||||
parity_shards: self.parity_shards,
|
||||
// 为新实例创建空的缓存,不共享缓存
|
||||
// Create an empty cache for the new instance instead of sharing one
|
||||
encoder_cache: std::sync::RwLock::new(None),
|
||||
decoder_cache: std::sync::RwLock::new(None),
|
||||
}
|
||||
@@ -83,7 +83,6 @@ impl ReedSolomonEncoder {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// 使用 SIMD 进行编码
|
||||
let simd_result = self.encode_with_simd(&mut shards_vec);
|
||||
|
||||
match simd_result {
|
||||
@@ -176,7 +175,6 @@ impl ReedSolomonEncoder {
|
||||
.find_map(|s| s.as_ref().map(|v| v.len()))
|
||||
.ok_or_else(|| io::Error::other("No valid shards found for reconstruction"))?;
|
||||
|
||||
// 获取或创建decoder
|
||||
let mut decoder = {
|
||||
let mut cache_guard = self
|
||||
.decoder_cache
|
||||
@@ -185,21 +183,17 @@ impl ReedSolomonEncoder {
|
||||
|
||||
match cache_guard.take() {
|
||||
Some(mut cached_decoder) => {
|
||||
// 使用reset方法重置现有decoder
|
||||
if let Err(e) = cached_decoder.reset(self.data_shards, self.parity_shards, shard_len) {
|
||||
warn!("Failed to reset SIMD decoder: {:?}, creating new one", e);
|
||||
// 如果reset失败,创建新的decoder
|
||||
|
||||
reed_solomon_simd::ReedSolomonDecoder::new(self.data_shards, self.parity_shards, shard_len)
|
||||
.map_err(|e| io::Error::other(format!("Failed to create SIMD decoder: {e:?}")))?
|
||||
} else {
|
||||
cached_decoder
|
||||
}
|
||||
}
|
||||
None => {
|
||||
// 第一次使用,创建新decoder
|
||||
reed_solomon_simd::ReedSolomonDecoder::new(self.data_shards, self.parity_shards, shard_len)
|
||||
.map_err(|e| io::Error::other(format!("Failed to create SIMD decoder: {e:?}")))?
|
||||
}
|
||||
None => reed_solomon_simd::ReedSolomonDecoder::new(self.data_shards, self.parity_shards, shard_len)
|
||||
.map_err(|e| io::Error::other(format!("Failed to create SIMD decoder: {e:?}")))?,
|
||||
}
|
||||
};
|
||||
|
||||
@@ -235,8 +229,7 @@ impl ReedSolomonEncoder {
|
||||
}
|
||||
}
|
||||
|
||||
// 将decoder放回缓存(在result被drop后decoder自动重置,可以重用)
|
||||
drop(result); // 显式drop result,确保decoder被重置
|
||||
drop(result);
|
||||
|
||||
*self
|
||||
.decoder_cache
|
||||
@@ -262,7 +255,7 @@ impl ReedSolomonEncoder {
|
||||
/// - `_buf`: Internal buffer for block operations.
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// ```ignore
|
||||
/// use rustfs_ecstore::erasure_coding::Erasure;
|
||||
/// let erasure = Erasure::new(4, 2, 8);
|
||||
/// let data = b"hello world";
|
||||
@@ -954,7 +947,7 @@ mod tests {
|
||||
let block_size = 1024 * 1024; // 1MB block size
|
||||
let erasure = Erasure::new(data_shards, parity_shards, block_size);
|
||||
|
||||
// 创建2MB的测试数据,这样可以测试多个1MB块的处理
|
||||
// Build 2 MB of test data so multiple 1 MB chunks are exercised
|
||||
let mut data = Vec::with_capacity(2 * 1024 * 1024);
|
||||
for i in 0..(2 * 1024 * 1024) {
|
||||
data.push((i % 256) as u8);
|
||||
@@ -968,7 +961,7 @@ mod tests {
|
||||
data.len() / 1024
|
||||
);
|
||||
|
||||
// 编码数据
|
||||
// Encode the data
|
||||
let start = std::time::Instant::now();
|
||||
let shards = erasure.encode_data(&data).unwrap();
|
||||
let encode_duration = start.elapsed();
|
||||
|
||||
@@ -193,6 +193,9 @@ pub enum StorageError {
|
||||
|
||||
#[error("Precondition failed")]
|
||||
PreconditionFailed,
|
||||
|
||||
#[error("Invalid range specified: {0}")]
|
||||
InvalidRangeSpec(String),
|
||||
}
|
||||
|
||||
impl StorageError {
|
||||
@@ -424,6 +427,7 @@ impl Clone for StorageError {
|
||||
StorageError::InsufficientReadQuorum(a, b) => StorageError::InsufficientReadQuorum(a.clone(), b.clone()),
|
||||
StorageError::InsufficientWriteQuorum(a, b) => StorageError::InsufficientWriteQuorum(a.clone(), b.clone()),
|
||||
StorageError::PreconditionFailed => StorageError::PreconditionFailed,
|
||||
StorageError::InvalidRangeSpec(a) => StorageError::InvalidRangeSpec(a.clone()),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -491,6 +495,7 @@ impl StorageError {
|
||||
StorageError::InsufficientWriteQuorum(_, _) => 0x3A,
|
||||
StorageError::PreconditionFailed => 0x3B,
|
||||
StorageError::EntityTooSmall(_, _, _) => 0x3C,
|
||||
StorageError::InvalidRangeSpec(_) => 0x3D,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -559,6 +564,8 @@ impl StorageError {
|
||||
0x39 => Some(StorageError::InsufficientReadQuorum(Default::default(), Default::default())),
|
||||
0x3A => Some(StorageError::InsufficientWriteQuorum(Default::default(), Default::default())),
|
||||
0x3B => Some(StorageError::PreconditionFailed),
|
||||
0x3C => Some(StorageError::EntityTooSmall(Default::default(), Default::default(), Default::default())),
|
||||
0x3D => Some(StorageError::InvalidRangeSpec(Default::default())),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -384,7 +384,7 @@ impl PoolMeta {
|
||||
|
||||
let mut update = false;
|
||||
|
||||
// 检查指定的池是否需要从已退役的池中移除。
|
||||
// Determine whether the selected pool should be removed from the retired list.
|
||||
for k in specified_pools.keys() {
|
||||
if let Some(pi) = remembered_pools.get(k) {
|
||||
if pi.completed {
|
||||
@@ -400,7 +400,7 @@ impl PoolMeta {
|
||||
// )));
|
||||
}
|
||||
} else {
|
||||
// 如果之前记住的池不再存在,允许更新,因为可能是添加了一个新池。
|
||||
// If the previous pool no longer exists, allow updates because a new pool may have been added.
|
||||
update = true;
|
||||
}
|
||||
}
|
||||
@@ -409,7 +409,7 @@ impl PoolMeta {
|
||||
for (k, pi) in remembered_pools.iter() {
|
||||
if let Some(pos) = specified_pools.get(k) {
|
||||
if *pos != pi.position {
|
||||
update = true; // 池的顺序发生了变化,允许更新。
|
||||
update = true; // Pool order changed, allow the update.
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -427,12 +427,12 @@ impl PoolMeta {
|
||||
for pool in &self.pools {
|
||||
if let Some(decommission) = &pool.decommission {
|
||||
if decommission.complete || decommission.canceled {
|
||||
// 不需要恢复的情况:
|
||||
// - 退役已完成
|
||||
// - 退役已取消
|
||||
// Recovery is not required when:
|
||||
// - Decommissioning completed
|
||||
// - Decommissioning was cancelled
|
||||
continue;
|
||||
}
|
||||
// 其他情况需要恢复
|
||||
// All other scenarios require recovery
|
||||
new_pools.push(pool.clone());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -421,15 +421,15 @@ impl ECStore {
|
||||
if let Some(pool_stat) = meta.pool_stats.get_mut(pool_index) {
|
||||
info!("bucket_rebalance_done: buckets {:?}", &pool_stat.buckets);
|
||||
|
||||
// 使用 retain 来过滤掉要删除的 bucket
|
||||
// Use retain to filter out buckets slated for removal
|
||||
let mut found = false;
|
||||
pool_stat.buckets.retain(|b| {
|
||||
if b.as_str() == bucket.as_str() {
|
||||
found = true;
|
||||
pool_stat.rebalanced_buckets.push(b.clone());
|
||||
false // 删除这个元素
|
||||
false // Remove this element
|
||||
} else {
|
||||
true // 保留这个元素
|
||||
true // Keep this element
|
||||
}
|
||||
});
|
||||
|
||||
@@ -946,13 +946,13 @@ impl ECStore {
|
||||
let mut reader = rd.stream;
|
||||
|
||||
for (i, part) in object_info.parts.iter().enumerate() {
|
||||
// 每次从 reader 中读取一个 part 上传
|
||||
// Read one part from the reader and upload it each time
|
||||
|
||||
let mut chunk = vec![0u8; part.size];
|
||||
|
||||
reader.read_exact(&mut chunk).await?;
|
||||
|
||||
// 每次从 reader 中读取一个 part 上传
|
||||
// Read one part from the reader and upload it each time
|
||||
let mut data = PutObjReader::from_vec(chunk);
|
||||
|
||||
let pi = match self
|
||||
|
||||
@@ -94,11 +94,11 @@ impl S3PeerSys {
|
||||
|
||||
let mut pool_errs = Vec::new();
|
||||
for pool_idx in 0..self.pools_count {
|
||||
let mut per_pool_errs = Vec::new();
|
||||
let mut per_pool_errs = vec![None; self.clients.len()];
|
||||
for (i, client) in self.clients.iter().enumerate() {
|
||||
if let Some(v) = client.get_pools() {
|
||||
if v.contains(&pool_idx) {
|
||||
per_pool_errs.push(errs[i].clone());
|
||||
per_pool_errs[i] = errs[i].clone();
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -129,20 +129,28 @@ impl S3PeerSys {
|
||||
let errs = join_all(futures).await;
|
||||
|
||||
for pool_idx in 0..self.pools_count {
|
||||
let mut per_pool_errs = Vec::new();
|
||||
let mut per_pool_errs = vec![None; self.clients.len()];
|
||||
for (i, client) in self.clients.iter().enumerate() {
|
||||
if let Some(v) = client.get_pools() {
|
||||
if v.contains(&pool_idx) {
|
||||
per_pool_errs.push(errs[i].clone());
|
||||
per_pool_errs[i] = errs[i].clone();
|
||||
}
|
||||
}
|
||||
}
|
||||
let qu = per_pool_errs.len() / 2;
|
||||
if let Some(pool_err) = reduce_write_quorum_errs(&per_pool_errs, BUCKET_OP_IGNORED_ERRS, qu) {
|
||||
tracing::error!("heal_bucket per_pool_errs: {per_pool_errs:?}");
|
||||
tracing::error!("heal_bucket reduce_write_quorum_errs: {pool_err}");
|
||||
return Err(pool_err);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(err) = reduce_write_quorum_errs(&errs, BUCKET_OP_IGNORED_ERRS, (errs.len() / 2) + 1) {
|
||||
tracing::error!("heal_bucket errs: {errs:?}");
|
||||
tracing::error!("heal_bucket reduce_write_quorum_errs: {err}");
|
||||
return Err(err);
|
||||
}
|
||||
|
||||
for (i, err) in errs.iter().enumerate() {
|
||||
if err.is_none() {
|
||||
return Ok(heal_bucket_results.read().await[i].clone());
|
||||
@@ -157,34 +165,36 @@ impl S3PeerSys {
|
||||
futures.push(cli.make_bucket(bucket, opts));
|
||||
}
|
||||
|
||||
let mut errors = Vec::with_capacity(self.clients.len());
|
||||
let mut errors = vec![None; self.clients.len()];
|
||||
|
||||
let results = join_all(futures).await;
|
||||
for result in results {
|
||||
for (i, result) in results.into_iter().enumerate() {
|
||||
match result {
|
||||
Ok(_) => {
|
||||
errors.push(None);
|
||||
errors[i] = None;
|
||||
}
|
||||
Err(e) => {
|
||||
errors.push(Some(e));
|
||||
errors[i] = Some(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for i in 0..self.pools_count {
|
||||
let mut per_pool_errs = Vec::with_capacity(self.clients.len());
|
||||
let mut per_pool_errs = vec![None; self.clients.len()];
|
||||
for (j, cli) in self.clients.iter().enumerate() {
|
||||
let pools = cli.get_pools();
|
||||
let idx = i;
|
||||
if pools.unwrap_or_default().contains(&idx) {
|
||||
per_pool_errs.push(errors[j].clone());
|
||||
per_pool_errs[j] = errors[j].clone();
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(pool_err) =
|
||||
reduce_write_quorum_errs(&per_pool_errs, BUCKET_OP_IGNORED_ERRS, (per_pool_errs.len() / 2) + 1)
|
||||
{
|
||||
return Err(pool_err);
|
||||
}
|
||||
if let Some(pool_err) =
|
||||
reduce_write_quorum_errs(&per_pool_errs, BUCKET_OP_IGNORED_ERRS, (per_pool_errs.len() / 2) + 1)
|
||||
{
|
||||
tracing::error!("make_bucket per_pool_errs: {per_pool_errs:?}");
|
||||
tracing::error!("make_bucket reduce_write_quorum_errs: {pool_err}");
|
||||
return Err(pool_err);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -196,42 +206,74 @@ impl S3PeerSys {
|
||||
futures.push(cli.list_bucket(opts));
|
||||
}
|
||||
|
||||
let mut errors = Vec::with_capacity(self.clients.len());
|
||||
let mut ress = Vec::with_capacity(self.clients.len());
|
||||
let mut errors = vec![None; self.clients.len()];
|
||||
let mut node_buckets = vec![None; self.clients.len()];
|
||||
|
||||
let results = join_all(futures).await;
|
||||
for result in results {
|
||||
for (i, result) in results.into_iter().enumerate() {
|
||||
match result {
|
||||
Ok(res) => {
|
||||
ress.push(Some(res));
|
||||
errors.push(None);
|
||||
node_buckets[i] = Some(res);
|
||||
errors[i] = None;
|
||||
}
|
||||
Err(e) => {
|
||||
ress.push(None);
|
||||
errors.push(Some(e));
|
||||
}
|
||||
}
|
||||
}
|
||||
// TODO: reduceWriteQuorumErrs
|
||||
// for i in 0..self.pools_count {}
|
||||
|
||||
let mut uniq_map: HashMap<&String, &BucketInfo> = HashMap::new();
|
||||
|
||||
for res in ress.iter() {
|
||||
if res.is_none() {
|
||||
continue;
|
||||
}
|
||||
|
||||
let buckets = res.as_ref().unwrap();
|
||||
|
||||
for bucket in buckets.iter() {
|
||||
if !uniq_map.contains_key(&bucket.name) {
|
||||
uniq_map.insert(&bucket.name, bucket);
|
||||
node_buckets[i] = None;
|
||||
errors[i] = Some(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let buckets: Vec<BucketInfo> = uniq_map.values().map(|&v| v.clone()).collect();
|
||||
let mut result_map: HashMap<&String, BucketInfo> = HashMap::new();
|
||||
for i in 0..self.pools_count {
|
||||
let mut per_pool_errs = vec![None; self.clients.len()];
|
||||
for (j, cli) in self.clients.iter().enumerate() {
|
||||
let pools = cli.get_pools();
|
||||
let idx = i;
|
||||
if pools.unwrap_or_default().contains(&idx) {
|
||||
per_pool_errs[j] = errors[j].clone();
|
||||
}
|
||||
}
|
||||
|
||||
let quorum = per_pool_errs.len() / 2;
|
||||
|
||||
if let Some(pool_err) = reduce_write_quorum_errs(&per_pool_errs, BUCKET_OP_IGNORED_ERRS, quorum) {
|
||||
tracing::error!("list_bucket per_pool_errs: {per_pool_errs:?}");
|
||||
tracing::error!("list_bucket reduce_write_quorum_errs: {pool_err}");
|
||||
return Err(pool_err);
|
||||
}
|
||||
|
||||
let mut bucket_map: HashMap<&String, usize> = HashMap::new();
|
||||
for (j, node_bucket) in node_buckets.iter().enumerate() {
|
||||
if let Some(buckets) = node_bucket.as_ref() {
|
||||
if buckets.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
if !self.clients[j].get_pools().unwrap_or_default().contains(&i) {
|
||||
continue;
|
||||
}
|
||||
|
||||
for bucket in buckets.iter() {
|
||||
if result_map.contains_key(&bucket.name) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// incr bucket_map count create if not exists
|
||||
let count = bucket_map.entry(&bucket.name).or_insert(0usize);
|
||||
*count += 1;
|
||||
|
||||
if *count >= quorum {
|
||||
result_map.insert(&bucket.name, bucket.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// TODO: MRF
|
||||
}
|
||||
|
||||
let mut buckets: Vec<BucketInfo> = result_map.into_values().collect();
|
||||
|
||||
buckets.sort_by_key(|b| b.name.clone());
|
||||
|
||||
Ok(buckets)
|
||||
}
|
||||
@@ -241,22 +283,27 @@ impl S3PeerSys {
|
||||
futures.push(cli.delete_bucket(bucket, opts));
|
||||
}
|
||||
|
||||
let mut errors = Vec::with_capacity(self.clients.len());
|
||||
let mut errors = vec![None; self.clients.len()];
|
||||
|
||||
let results = join_all(futures).await;
|
||||
|
||||
for result in results {
|
||||
for (i, result) in results.into_iter().enumerate() {
|
||||
match result {
|
||||
Ok(_) => {
|
||||
errors.push(None);
|
||||
errors[i] = None;
|
||||
}
|
||||
Err(e) => {
|
||||
errors.push(Some(e));
|
||||
errors[i] = Some(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: reduceWriteQuorumErrs
|
||||
if let Some(err) = reduce_write_quorum_errs(&errors, BUCKET_OP_IGNORED_ERRS, (errors.len() / 2) + 1) {
|
||||
if !Error::is_err_object_not_found(&err) && !opts.no_recreate {
|
||||
let _ = self.make_bucket(bucket, &MakeBucketOptions::default()).await;
|
||||
}
|
||||
return Err(err);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -266,37 +313,44 @@ impl S3PeerSys {
|
||||
futures.push(cli.get_bucket_info(bucket, opts));
|
||||
}
|
||||
|
||||
let mut ress = Vec::with_capacity(self.clients.len());
|
||||
let mut errors = Vec::with_capacity(self.clients.len());
|
||||
let mut ress = vec![None; self.clients.len()];
|
||||
let mut errors = vec![None; self.clients.len()];
|
||||
|
||||
let results = join_all(futures).await;
|
||||
for result in results {
|
||||
for (i, result) in results.into_iter().enumerate() {
|
||||
match result {
|
||||
Ok(res) => {
|
||||
ress.push(Some(res));
|
||||
errors.push(None);
|
||||
ress[i] = Some(res);
|
||||
errors[i] = None;
|
||||
}
|
||||
Err(e) => {
|
||||
ress.push(None);
|
||||
errors.push(Some(e));
|
||||
ress[i] = None;
|
||||
errors[i] = Some(e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for i in 0..self.pools_count {
|
||||
let mut per_pool_errs = Vec::with_capacity(self.clients.len());
|
||||
let mut per_pool_errs = vec![None; self.clients.len()];
|
||||
for (j, cli) in self.clients.iter().enumerate() {
|
||||
let pools = cli.get_pools();
|
||||
let idx = i;
|
||||
if pools.unwrap_or_default().contains(&idx) {
|
||||
per_pool_errs.push(errors[j].as_ref());
|
||||
per_pool_errs[j] = errors[j].clone();
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: reduceWriteQuorumErrs
|
||||
if let Some(pool_err) =
|
||||
reduce_write_quorum_errs(&per_pool_errs, BUCKET_OP_IGNORED_ERRS, (per_pool_errs.len() / 2) + 1)
|
||||
{
|
||||
return Err(pool_err);
|
||||
}
|
||||
}
|
||||
|
||||
ress.iter().find_map(|op| op.clone()).ok_or(Error::VolumeNotFound)
|
||||
ress.into_iter()
|
||||
.filter(|op| op.is_some())
|
||||
.find_map(|op| op.clone())
|
||||
.ok_or(Error::VolumeNotFound)
|
||||
}
|
||||
|
||||
pub fn get_pools(&self) -> Option<Vec<usize>> {
|
||||
@@ -482,7 +536,7 @@ impl PeerS3Client for LocalPeerS3Client {
|
||||
}
|
||||
}
|
||||
|
||||
// errVolumeNotEmpty 不删除,把已经删除的重新创建
|
||||
// For errVolumeNotEmpty, do not delete; recreate only the entries already removed
|
||||
|
||||
for (idx, err) in errs.into_iter().enumerate() {
|
||||
if err.is_none() && recreate {
|
||||
|
||||
@@ -83,7 +83,7 @@ impl DiskAPI for RemoteDisk {
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn is_online(&self) -> bool {
|
||||
// TODO: 连接状态
|
||||
// TODO: connection status tracking
|
||||
if node_service_time_out_client(&self.addr).await.is_ok() {
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -31,13 +31,15 @@ use crate::disk::{
|
||||
use crate::erasure_coding;
|
||||
use crate::erasure_coding::bitrot_verify;
|
||||
use crate::error::{Error, Result, is_err_version_not_found};
|
||||
use crate::error::{ObjectApiError, is_err_object_not_found};
|
||||
use crate::error::{GenericError, ObjectApiError, is_err_object_not_found};
|
||||
use crate::global::{GLOBAL_LocalNodeName, GLOBAL_TierConfigMgr};
|
||||
use crate::store_api::ListObjectVersionsInfo;
|
||||
use crate::store_api::{ListPartsInfo, ObjectOptions, ObjectToDelete};
|
||||
use crate::store_api::{ObjectInfoOrErr, WalkOptions};
|
||||
use crate::{
|
||||
bucket::lifecycle::bucket_lifecycle_ops::{gen_transition_objname, get_transitioned_object_reader, put_restore_opts},
|
||||
bucket::lifecycle::bucket_lifecycle_ops::{
|
||||
LifecycleOps, gen_transition_objname, get_transitioned_object_reader, put_restore_opts,
|
||||
},
|
||||
cache_value::metacache_set::{ListPathRawOptions, list_path_raw},
|
||||
config::{GLOBAL_STORAGE_CLASS, storageclass},
|
||||
disk::{
|
||||
@@ -73,9 +75,9 @@ use rustfs_filemeta::{
|
||||
use rustfs_lock::fast_lock::types::LockResult;
|
||||
use rustfs_madmin::heal_commands::{HealDriveInfo, HealResultItem};
|
||||
use rustfs_rio::{EtagResolvable, HashReader, HashReaderMut, TryGetIndex as _, WarpReader};
|
||||
use rustfs_utils::http::headers::AMZ_OBJECT_TAGGING;
|
||||
use rustfs_utils::http::RUSTFS_BUCKET_REPLICATION_SSEC_CHECKSUM;
|
||||
use rustfs_utils::http::headers::AMZ_STORAGE_CLASS;
|
||||
use rustfs_utils::http::headers::RESERVED_METADATA_PREFIX_LOWER;
|
||||
use rustfs_utils::http::headers::{AMZ_OBJECT_TAGGING, RESERVED_METADATA_PREFIX, RESERVED_METADATA_PREFIX_LOWER};
|
||||
use rustfs_utils::{
|
||||
HashAlgorithm,
|
||||
crypto::hex,
|
||||
@@ -96,7 +98,7 @@ use std::{
|
||||
};
|
||||
use time::OffsetDateTime;
|
||||
use tokio::{
|
||||
io::AsyncWrite,
|
||||
io::{AsyncReadExt, AsyncWrite, AsyncWriteExt, BufReader},
|
||||
sync::{RwLock, broadcast},
|
||||
};
|
||||
use tokio::{
|
||||
@@ -399,7 +401,7 @@ impl SetDisks {
|
||||
|
||||
let mut futures = Vec::with_capacity(disks.len());
|
||||
if let Some(ret_err) = reduce_write_quorum_errs(&errs, OBJECT_OP_IGNORED_ERRS, write_quorum) {
|
||||
// TODO: 并发
|
||||
// TODO: add concurrency
|
||||
for (i, err) in errs.iter().enumerate() {
|
||||
if err.is_some() {
|
||||
continue;
|
||||
@@ -889,7 +891,7 @@ impl SetDisks {
|
||||
}
|
||||
|
||||
if let Some(err) = reduce_write_quorum_errs(&errs, OBJECT_OP_IGNORED_ERRS, write_quorum) {
|
||||
// TODO: 并发
|
||||
// TODO: add concurrency
|
||||
for (i, err) in errs.iter().enumerate() {
|
||||
if err.is_some() {
|
||||
continue;
|
||||
@@ -1698,7 +1700,7 @@ impl SetDisks {
|
||||
|
||||
let disks = rl.clone();
|
||||
|
||||
// 主动释放锁
|
||||
// Explicitly release the lock
|
||||
drop(rl);
|
||||
|
||||
for (i, opdisk) in disks.iter().enumerate() {
|
||||
@@ -1742,7 +1744,7 @@ impl SetDisks {
|
||||
}
|
||||
};
|
||||
|
||||
// check endpoint 是否一致
|
||||
// Check that the endpoint matches
|
||||
|
||||
let _ = new_disk.set_disk_id(Some(fm.erasure.this)).await;
|
||||
|
||||
@@ -1957,7 +1959,7 @@ impl SetDisks {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// 打乱顺序
|
||||
// Shuffle the order
|
||||
fn shuffle_disks_and_parts_metadata_by_index(
|
||||
disks: &[Option<DiskStore>],
|
||||
parts_metadata: &[FileInfo],
|
||||
@@ -1996,7 +1998,7 @@ impl SetDisks {
|
||||
Self::shuffle_disks_and_parts_metadata(disks, parts_metadata, fi)
|
||||
}
|
||||
|
||||
// 打乱顺序
|
||||
// Shuffle the order
|
||||
fn shuffle_disks_and_parts_metadata(
|
||||
disks: &[Option<DiskStore>],
|
||||
parts_metadata: &[FileInfo],
|
||||
@@ -2073,7 +2075,7 @@ impl SetDisks {
|
||||
|
||||
let vid = opts.version_id.clone().unwrap_or_default();
|
||||
|
||||
// TODO: 优化并发 可用数量中断
|
||||
// TODO: optimize concurrency and break once enough slots are available
|
||||
let (parts_metadata, errs) = Self::read_all_fileinfo(&disks, "", bucket, object, vid.as_str(), read_data, false).await?;
|
||||
// warn!("get_object_fileinfo parts_metadata {:?}", &parts_metadata);
|
||||
// warn!("get_object_fileinfo {}/{} errs {:?}", bucket, object, &errs);
|
||||
@@ -3419,7 +3421,7 @@ impl SetDisks {
|
||||
oi.user_defined.remove(X_AMZ_RESTORE.as_str());
|
||||
|
||||
let version_id = oi.version_id.map(|v| v.to_string());
|
||||
let obj = self
|
||||
let _obj = self
|
||||
.copy_object(
|
||||
bucket,
|
||||
object,
|
||||
@@ -3435,8 +3437,7 @@ impl SetDisks {
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await;
|
||||
obj?;
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -3536,7 +3537,10 @@ impl ObjectIO for SetDisks {
|
||||
return Ok(reader);
|
||||
}
|
||||
|
||||
// TODO: remote
|
||||
if object_info.is_remote() {
|
||||
let gr = get_transitioned_object_reader(bucket, object, &range, &h, &object_info, opts).await?;
|
||||
return Ok(gr);
|
||||
}
|
||||
|
||||
let (rd, wd) = tokio::io::duplex(DEFAULT_READ_BUFFER_SIZE);
|
||||
|
||||
@@ -3718,7 +3722,7 @@ impl ObjectIO for SetDisks {
|
||||
error!("encode err {:?}", e);
|
||||
return Err(e.into());
|
||||
}
|
||||
}; // TODO: 出错,删除临时目录
|
||||
}; // TODO: delete temporary directory on error
|
||||
|
||||
let _ = mem::replace(&mut data.stream, reader);
|
||||
// if let Err(err) = close_bitrot_writers(&mut writers).await {
|
||||
@@ -4046,7 +4050,7 @@ impl StorageAPI for SetDisks {
|
||||
objects: Vec<ObjectToDelete>,
|
||||
opts: ObjectOptions,
|
||||
) -> (Vec<DeletedObject>, Vec<Option<Error>>) {
|
||||
// 默认返回值
|
||||
// Default return value
|
||||
let mut del_objects = vec![DeletedObject::default(); objects.len()];
|
||||
|
||||
let mut del_errs = Vec::with_capacity(objects.len());
|
||||
@@ -4103,7 +4107,7 @@ impl StorageAPI for SetDisks {
|
||||
|
||||
vr.set_tier_free_version_id(&Uuid::new_v4().to_string());
|
||||
|
||||
// 删除
|
||||
// Delete
|
||||
// del_objects[i].object_name.clone_from(&vr.name);
|
||||
// del_objects[i].version_id = vr.version_id.map(|v| v.to_string());
|
||||
|
||||
@@ -4196,9 +4200,9 @@ impl StorageAPI for SetDisks {
|
||||
|
||||
let mut del_obj_errs: Vec<Vec<Option<DiskError>>> = vec![vec![None; objects.len()]; disks.len()];
|
||||
|
||||
// 每个磁盘, 删除所有对象
|
||||
// For each disk delete all objects
|
||||
for (disk_idx, errors) in results.into_iter().enumerate() {
|
||||
// 所有对象的删除结果
|
||||
// Deletion results for all objects
|
||||
for idx in 0..vers.len() {
|
||||
if errors[idx].is_some() {
|
||||
for fi in vers[idx].versions.iter() {
|
||||
@@ -4565,7 +4569,7 @@ impl StorageAPI for SetDisks {
|
||||
let tgt_client = match tier_config_mgr.get_driver(&opts.transition.tier).await {
|
||||
Ok(client) => client,
|
||||
Err(err) => {
|
||||
return Err(Error::other(err.to_string()));
|
||||
return Err(Error::other(format!("remote tier error: {}", err)));
|
||||
}
|
||||
};
|
||||
|
||||
@@ -4594,10 +4598,10 @@ impl StorageAPI for SetDisks {
|
||||
// Normalize ETags by removing quotes before comparison (PR #592 compatibility)
|
||||
let transition_etag = rustfs_utils::path::trim_etag(&opts.transition.etag);
|
||||
let stored_etag = rustfs_utils::path::trim_etag(&get_raw_etag(&fi.metadata));
|
||||
if !opts.mod_time.expect("err").unix_timestamp() == fi.mod_time.as_ref().expect("err").unix_timestamp()
|
||||
if opts.mod_time.expect("err").unix_timestamp() != fi.mod_time.as_ref().expect("err").unix_timestamp()
|
||||
|| transition_etag != stored_etag
|
||||
{
|
||||
return Err(to_object_err(Error::from(DiskError::FileNotFound), vec![bucket, object]));
|
||||
return Err(to_object_err(Error::other(DiskError::FileNotFound), vec![bucket, object]));
|
||||
}
|
||||
if fi.transition_status == TRANSITION_COMPLETE {
|
||||
return Ok(());
|
||||
@@ -4699,7 +4703,7 @@ impl StorageAPI for SetDisks {
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(self))]
|
||||
async fn restore_transitioned_object(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()> {
|
||||
async fn restore_transitioned_object(self: Arc<Self>, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()> {
|
||||
// Acquire write-lock early for the restore operation
|
||||
// if !opts.no_lock {
|
||||
// let guard_opt = self
|
||||
@@ -4711,6 +4715,7 @@ impl StorageAPI for SetDisks {
|
||||
// }
|
||||
// _lock_guard = guard_opt;
|
||||
// }
|
||||
let self_ = self.clone();
|
||||
let set_restore_header_fn = async move |oi: &mut ObjectInfo, rerr: Option<Error>| -> Result<()> {
|
||||
if rerr.is_none() {
|
||||
return Ok(());
|
||||
@@ -4719,54 +4724,79 @@ impl StorageAPI for SetDisks {
|
||||
Err(rerr.unwrap())
|
||||
};
|
||||
let mut oi = ObjectInfo::default();
|
||||
let fi = self.get_object_fileinfo(bucket, object, opts, true).await;
|
||||
let fi = self_.clone().get_object_fileinfo(bucket, object, opts, true).await;
|
||||
if let Err(err) = fi {
|
||||
return set_restore_header_fn(&mut oi, Some(to_object_err(err, vec![bucket, object]))).await;
|
||||
}
|
||||
let (actual_fi, _, _) = fi.unwrap();
|
||||
|
||||
oi = ObjectInfo::from_file_info(&actual_fi, bucket, object, opts.versioned || opts.version_suspended);
|
||||
let ropts = put_restore_opts(bucket, object, &opts.transition.restore_request, &oi);
|
||||
/*if oi.parts.len() == 1 {
|
||||
let mut rs: HTTPRangeSpec;
|
||||
let gr = get_transitioned_object_reader(bucket, object, rs, HeaderMap::new(), oi, opts);
|
||||
//if err != nil {
|
||||
// return set_restore_header_fn(&mut oi, Some(toObjectErr(err, bucket, object)));
|
||||
//}
|
||||
let hash_reader = HashReader::new(gr, gr.obj_info.size, "", "", gr.obj_info.size);
|
||||
let p_reader = PutObjReader::new(StreamingBlob::from(Box::pin(hash_reader)), hash_reader.size());
|
||||
if let Err(err) = self.put_object(bucket, object, &mut p_reader, &ropts).await {
|
||||
return set_restore_header_fn(&mut oi, Some(to_object_err(err, vec![bucket, object])));
|
||||
let ropts = put_restore_opts(bucket, object, &opts.transition.restore_request, &oi).await?;
|
||||
if oi.parts.len() == 1 {
|
||||
let rs: Option<HTTPRangeSpec> = None;
|
||||
let gr = get_transitioned_object_reader(bucket, object, &rs, &HeaderMap::new(), &oi, opts).await;
|
||||
if let Err(err) = gr {
|
||||
return set_restore_header_fn(&mut oi, Some(to_object_err(err.into(), vec![bucket, object]))).await;
|
||||
}
|
||||
let gr = gr.unwrap();
|
||||
let reader = BufReader::new(gr.stream);
|
||||
let hash_reader = HashReader::new(
|
||||
Box::new(WarpReader::new(reader)),
|
||||
gr.object_info.size,
|
||||
gr.object_info.size,
|
||||
None,
|
||||
None,
|
||||
false,
|
||||
)?;
|
||||
let mut p_reader = PutObjReader::new(hash_reader);
|
||||
if let Err(err) = self_.clone().put_object(bucket, object, &mut p_reader, &ropts).await {
|
||||
return set_restore_header_fn(&mut oi, Some(to_object_err(err, vec![bucket, object]))).await;
|
||||
} else {
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
let res = self.new_multipart_upload(bucket, object, &ropts).await?;
|
||||
let res = self_.clone().new_multipart_upload(bucket, object, &ropts).await?;
|
||||
//if err != nil {
|
||||
// return set_restore_header_fn(&mut oi, err);
|
||||
// return set_restore_header_fn(&mut oi, err).await;
|
||||
//}
|
||||
|
||||
let mut uploaded_parts: Vec<CompletePart> = vec![];
|
||||
let mut rs: HTTPRangeSpec;
|
||||
let gr = get_transitioned_object_reader(bucket, object, rs, HeaderMap::new(), oi, opts).await?;
|
||||
//if err != nil {
|
||||
// return set_restore_header_fn(&mut oi, err);
|
||||
//}
|
||||
let rs: Option<HTTPRangeSpec> = None;
|
||||
let gr = get_transitioned_object_reader(bucket, object, &rs, &HeaderMap::new(), &oi, opts).await;
|
||||
if let Err(err) = gr {
|
||||
return set_restore_header_fn(&mut oi, Some(StorageError::Io(err))).await;
|
||||
}
|
||||
let gr = gr.unwrap();
|
||||
|
||||
for part_info in oi.parts {
|
||||
//let hr = HashReader::new(LimitReader(gr, part_info.size), part_info.size, "", "", part_info.size);
|
||||
let hr = HashReader::new(gr, part_info.size as i64, part_info.size as i64, None, false);
|
||||
//if err != nil {
|
||||
// return set_restore_header_fn(&mut oi, err);
|
||||
//}
|
||||
let mut p_reader = PutObjReader::new(hr, hr.size());
|
||||
let p_info = self.put_object_part(bucket, object, &res.upload_id, part_info.number, &mut p_reader, &ObjectOptions::default()).await?;
|
||||
for part_info in &oi.parts {
|
||||
let reader = BufReader::new(Cursor::new(vec![] /*gr.stream*/));
|
||||
let hash_reader = HashReader::new(
|
||||
Box::new(WarpReader::new(reader)),
|
||||
part_info.size as i64,
|
||||
part_info.size as i64,
|
||||
None,
|
||||
None,
|
||||
false,
|
||||
)?;
|
||||
let mut p_reader = PutObjReader::new(hash_reader);
|
||||
let p_info = self_
|
||||
.clone()
|
||||
.put_object_part(bucket, object, &res.upload_id, part_info.number, &mut p_reader, &ObjectOptions::default())
|
||||
.await?;
|
||||
//if let Err(err) = p_info {
|
||||
// return set_restore_header_fn(&mut oi, err);
|
||||
// return set_restore_header_fn(&mut oi, err).await;
|
||||
//}
|
||||
if p_info.size != part_info.size {
|
||||
return set_restore_header_fn(&mut oi, Some(Error::from(ObjectApiError::InvalidObjectState(GenericError{bucket: bucket.to_string(), object: object.to_string(), ..Default::default()}))));
|
||||
return set_restore_header_fn(
|
||||
&mut oi,
|
||||
Some(Error::other(ObjectApiError::InvalidObjectState(GenericError {
|
||||
bucket: bucket.to_string(),
|
||||
object: object.to_string(),
|
||||
..Default::default()
|
||||
}))),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
uploaded_parts.push(CompletePart {
|
||||
part_num: p_info.part_num,
|
||||
@@ -4778,12 +4808,22 @@ impl StorageAPI for SetDisks {
|
||||
checksum_crc64nvme: None,
|
||||
});
|
||||
}
|
||||
if let Err(err) = self.complete_multipart_upload(bucket, object, &res.upload_id, uploaded_parts, &ObjectOptions {
|
||||
mod_time: oi.mod_time,
|
||||
..Default::default()
|
||||
}).await {
|
||||
set_restore_header_fn(&mut oi, Some(err));
|
||||
}*/
|
||||
if let Err(err) = self_
|
||||
.clone()
|
||||
.complete_multipart_upload(
|
||||
bucket,
|
||||
object,
|
||||
&res.upload_id,
|
||||
uploaded_parts,
|
||||
&ObjectOptions {
|
||||
mod_time: oi.mod_time,
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
return set_restore_header_fn(&mut oi, Some(err)).await;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -4924,7 +4964,7 @@ impl StorageAPI for SetDisks {
|
||||
HashReader::new(Box::new(WarpReader::new(Cursor::new(Vec::new()))), 0, 0, None, None, false)?,
|
||||
);
|
||||
|
||||
let (reader, w_size) = Arc::new(erasure).encode(stream, &mut writers, write_quorum).await?; // TODO: 出错,删除临时目录
|
||||
let (reader, w_size) = Arc::new(erasure).encode(stream, &mut writers, write_quorum).await?; // TODO: delete temporary directory on error
|
||||
|
||||
let _ = mem::replace(&mut data.stream, reader);
|
||||
|
||||
@@ -4953,6 +4993,8 @@ impl StorageAPI for SetDisks {
|
||||
}
|
||||
}
|
||||
|
||||
let checksums = data.as_hash_reader().content_crc();
|
||||
|
||||
let part_info = ObjectPartInfo {
|
||||
etag: etag.clone(),
|
||||
number: part_id,
|
||||
@@ -4960,13 +5002,10 @@ impl StorageAPI for SetDisks {
|
||||
mod_time: Some(OffsetDateTime::now_utc()),
|
||||
actual_size,
|
||||
index: index_op,
|
||||
checksums: if checksums.is_empty() { None } else { Some(checksums) },
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// debug!("put_object_part part_info {:?}", part_info);
|
||||
|
||||
// fi.parts = vec![part_info.clone()];
|
||||
|
||||
let part_info_buff = part_info.marshal_msg()?;
|
||||
|
||||
drop(writers); // drop writers to close all files
|
||||
@@ -5317,7 +5356,13 @@ impl StorageAPI for SetDisks {
|
||||
}
|
||||
|
||||
fi.data_dir = Some(Uuid::new_v4());
|
||||
fi.fresh = true;
|
||||
|
||||
if let Some(cssum) = user_defined.get(RUSTFS_BUCKET_REPLICATION_SSEC_CHECKSUM)
|
||||
&& !cssum.is_empty()
|
||||
{
|
||||
fi.checksum = base64_simd::STANDARD.decode_to_vec(cssum).ok().map(Bytes::from);
|
||||
user_defined.remove(RUSTFS_BUCKET_REPLICATION_SSEC_CHECKSUM);
|
||||
}
|
||||
|
||||
let parts_metadata = vec![fi.clone(); disks.len()];
|
||||
|
||||
@@ -5343,10 +5388,10 @@ impl StorageAPI for SetDisks {
|
||||
|
||||
let mod_time = opts.mod_time.unwrap_or(OffsetDateTime::now_utc());
|
||||
|
||||
for fi in parts_metadatas.iter_mut() {
|
||||
fi.metadata = user_defined.clone();
|
||||
fi.mod_time = Some(mod_time);
|
||||
fi.fresh = true;
|
||||
for f in parts_metadatas.iter_mut() {
|
||||
f.metadata = user_defined.clone();
|
||||
f.mod_time = Some(mod_time);
|
||||
f.fresh = true;
|
||||
}
|
||||
|
||||
// fi.mod_time = Some(now);
|
||||
@@ -5408,7 +5453,7 @@ impl StorageAPI for SetDisks {
|
||||
|
||||
self.delete_all(RUSTFS_META_MULTIPART_BUCKET, &upload_id_path).await
|
||||
}
|
||||
// complete_multipart_upload 完成
|
||||
// complete_multipart_upload finished
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn complete_multipart_upload(
|
||||
self: Arc<Self>,
|
||||
@@ -5463,23 +5508,27 @@ impl StorageAPI for SetDisks {
|
||||
return Err(Error::other("part result number err"));
|
||||
}
|
||||
|
||||
let mut checksum_type = rustfs_rio::ChecksumType::NONE;
|
||||
|
||||
if let Some(cs) = fi.metadata.get(rustfs_rio::RUSTFS_MULTIPART_CHECKSUM) {
|
||||
let Some(checksum_type) = fi.metadata.get(rustfs_rio::RUSTFS_MULTIPART_CHECKSUM_TYPE) else {
|
||||
let Some(ct) = fi.metadata.get(rustfs_rio::RUSTFS_MULTIPART_CHECKSUM_TYPE) else {
|
||||
return Err(Error::other("checksum type not found"));
|
||||
};
|
||||
|
||||
if opts.want_checksum.is_some()
|
||||
&& !opts.want_checksum.as_ref().is_some_and(|v| {
|
||||
v.checksum_type
|
||||
.is(rustfs_rio::ChecksumType::from_string_with_obj_type(cs, checksum_type))
|
||||
.is(rustfs_rio::ChecksumType::from_string_with_obj_type(cs, ct))
|
||||
})
|
||||
{
|
||||
return Err(Error::other(format!(
|
||||
"checksum type mismatch, got {:?}, want {:?}",
|
||||
opts.want_checksum.as_ref().unwrap(),
|
||||
rustfs_rio::ChecksumType::from_string_with_obj_type(cs, checksum_type)
|
||||
rustfs_rio::ChecksumType::from_string_with_obj_type(cs, ct)
|
||||
)));
|
||||
}
|
||||
|
||||
checksum_type = rustfs_rio::ChecksumType::from_string_with_obj_type(cs, ct);
|
||||
}
|
||||
|
||||
for (i, part) in object_parts.iter().enumerate() {
|
||||
@@ -5515,6 +5564,12 @@ impl StorageAPI for SetDisks {
|
||||
let mut object_size: usize = 0;
|
||||
let mut object_actual_size: i64 = 0;
|
||||
|
||||
let mut checksum_combined = bytes::BytesMut::new();
|
||||
let mut checksum = rustfs_rio::Checksum {
|
||||
checksum_type,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
for (i, p) in uploaded_parts.iter().enumerate() {
|
||||
let has_part = curr_fi.parts.iter().find(|v| v.number == p.part_num);
|
||||
if has_part.is_none() {
|
||||
@@ -5555,6 +5610,75 @@ impl StorageAPI for SetDisks {
|
||||
));
|
||||
}
|
||||
|
||||
if checksum_type.is_set() {
|
||||
let Some(crc) = ext_part
|
||||
.checksums
|
||||
.as_ref()
|
||||
.and_then(|f| f.get(checksum_type.to_string().as_str()))
|
||||
.cloned()
|
||||
else {
|
||||
error!(
|
||||
"complete_multipart_upload fi.checksum not found type={checksum_type}, part_id={}, bucket={}, object={}",
|
||||
p.part_num, bucket, object
|
||||
);
|
||||
return Err(Error::InvalidPart(p.part_num, ext_part.etag.clone(), p.etag.clone().unwrap_or_default()));
|
||||
};
|
||||
|
||||
let part_crc = match checksum_type {
|
||||
rustfs_rio::ChecksumType::SHA256 => p.checksum_sha256.clone(),
|
||||
rustfs_rio::ChecksumType::SHA1 => p.checksum_sha1.clone(),
|
||||
rustfs_rio::ChecksumType::CRC32 => p.checksum_crc32.clone(),
|
||||
rustfs_rio::ChecksumType::CRC32C => p.checksum_crc32c.clone(),
|
||||
rustfs_rio::ChecksumType::CRC64_NVME => p.checksum_crc64nvme.clone(),
|
||||
_ => {
|
||||
error!(
|
||||
"complete_multipart_upload checksum type={checksum_type}, part_id={}, bucket={}, object={}",
|
||||
p.part_num, bucket, object
|
||||
);
|
||||
return Err(Error::InvalidPart(p.part_num, ext_part.etag.clone(), p.etag.clone().unwrap_or_default()));
|
||||
}
|
||||
};
|
||||
|
||||
if part_crc.clone().unwrap_or_default() != crc {
|
||||
error!("complete_multipart_upload checksum_type={checksum_type:?}, part_crc={part_crc:?}, crc={crc:?}");
|
||||
error!(
|
||||
"complete_multipart_upload checksum mismatch part_id={}, bucket={}, object={}",
|
||||
p.part_num, bucket, object
|
||||
);
|
||||
return Err(Error::InvalidPart(p.part_num, ext_part.etag.clone(), p.etag.clone().unwrap_or_default()));
|
||||
}
|
||||
|
||||
let Some(cs) = rustfs_rio::Checksum::new_with_type(checksum_type, &crc) else {
|
||||
error!(
|
||||
"complete_multipart_upload checksum new_with_type failed part_id={}, bucket={}, object={}",
|
||||
p.part_num, bucket, object
|
||||
);
|
||||
return Err(Error::InvalidPart(p.part_num, ext_part.etag.clone(), p.etag.clone().unwrap_or_default()));
|
||||
};
|
||||
|
||||
if !cs.valid() {
|
||||
error!(
|
||||
"complete_multipart_upload checksum valid failed part_id={}, bucket={}, object={}",
|
||||
p.part_num, bucket, object
|
||||
);
|
||||
return Err(Error::InvalidPart(p.part_num, ext_part.etag.clone(), p.etag.clone().unwrap_or_default()));
|
||||
}
|
||||
|
||||
if checksum_type.full_object_requested() {
|
||||
if let Err(err) = checksum.add_part(&cs, ext_part.actual_size) {
|
||||
error!(
|
||||
"complete_multipart_upload checksum add_part failed part_id={}, bucket={}, object={}",
|
||||
p.part_num, bucket, object
|
||||
);
|
||||
return Err(Error::InvalidPart(p.part_num, ext_part.etag.clone(), p.etag.clone().unwrap_or_default()));
|
||||
}
|
||||
}
|
||||
|
||||
checksum_combined.extend_from_slice(cs.raw.as_slice());
|
||||
}
|
||||
|
||||
// TODO: check min part size
|
||||
|
||||
object_size += ext_part.size;
|
||||
object_actual_size += ext_part.actual_size;
|
||||
|
||||
@@ -5569,6 +5693,52 @@ impl StorageAPI for SetDisks {
|
||||
});
|
||||
}
|
||||
|
||||
if let Some(wtcs) = opts.want_checksum.as_ref() {
|
||||
if checksum_type.full_object_requested() {
|
||||
if wtcs.encoded != checksum.encoded {
|
||||
error!(
|
||||
"complete_multipart_upload checksum mismatch want={}, got={}",
|
||||
wtcs.encoded, checksum.encoded
|
||||
);
|
||||
return Err(Error::other(format!(
|
||||
"complete_multipart_upload checksum mismatch want={}, got={}",
|
||||
wtcs.encoded, checksum.encoded
|
||||
)));
|
||||
}
|
||||
} else if let Err(err) = wtcs.matches(&checksum_combined, uploaded_parts.len() as i32) {
|
||||
error!(
|
||||
"complete_multipart_upload checksum matches failed want={}, got={}",
|
||||
wtcs.encoded, checksum.encoded
|
||||
);
|
||||
return Err(Error::other(format!(
|
||||
"complete_multipart_upload checksum matches failed want={}, got={}",
|
||||
wtcs.encoded, checksum.encoded
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(rc_crc) = opts.user_defined.get(RUSTFS_BUCKET_REPLICATION_SSEC_CHECKSUM) {
|
||||
if let Ok(rc_crc_bytes) = base64_simd::STANDARD.decode_to_vec(rc_crc) {
|
||||
fi.checksum = Some(Bytes::from(rc_crc_bytes));
|
||||
} else {
|
||||
error!("complete_multipart_upload decode rc_crc failed rc_crc={}", rc_crc);
|
||||
}
|
||||
}
|
||||
|
||||
if checksum_type.is_set() {
|
||||
checksum_type
|
||||
.merge(rustfs_rio::ChecksumType::MULTIPART)
|
||||
.merge(rustfs_rio::ChecksumType::INCLUDES_MULTIPART);
|
||||
if !checksum_type.full_object_requested() {
|
||||
checksum = rustfs_rio::Checksum::new_from_data(checksum_type, &checksum_combined)
|
||||
.ok_or_else(|| Error::other("checksum new_from_data failed"))?;
|
||||
}
|
||||
fi.checksum = Some(checksum.to_bytes(&checksum_combined));
|
||||
}
|
||||
|
||||
fi.metadata.remove(rustfs_rio::RUSTFS_MULTIPART_CHECKSUM);
|
||||
fi.metadata.remove(rustfs_rio::RUSTFS_MULTIPART_CHECKSUM_TYPE);
|
||||
|
||||
fi.size = object_size as i64;
|
||||
fi.mod_time = opts.mod_time;
|
||||
if fi.mod_time.is_none() {
|
||||
@@ -5586,11 +5756,22 @@ impl StorageAPI for SetDisks {
|
||||
|
||||
fi.metadata.insert("etag".to_owned(), etag);
|
||||
|
||||
fi.metadata
|
||||
.insert(format!("{RESERVED_METADATA_PREFIX_LOWER}actual-size"), object_actual_size.to_string());
|
||||
|
||||
fi.metadata
|
||||
.insert("x-rustfs-encryption-original-size".to_string(), object_actual_size.to_string());
|
||||
if opts.replication_request {
|
||||
if let Some(actual_size) = opts
|
||||
.user_defined
|
||||
.get(format!("{RESERVED_METADATA_PREFIX_LOWER}Actual-Object-Size").as_str())
|
||||
{
|
||||
fi.metadata
|
||||
.insert(format!("{RESERVED_METADATA_PREFIX}actual-size"), actual_size.clone());
|
||||
fi.metadata
|
||||
.insert("x-rustfs-encryption-original-size".to_string(), actual_size.to_string());
|
||||
}
|
||||
} else {
|
||||
fi.metadata
|
||||
.insert(format!("{RESERVED_METADATA_PREFIX}actual-size"), object_actual_size.to_string());
|
||||
fi.metadata
|
||||
.insert("x-rustfs-encryption-original-size".to_string(), object_actual_size.to_string());
|
||||
}
|
||||
|
||||
if fi.is_compressed() {
|
||||
fi.metadata
|
||||
@@ -5601,9 +5782,6 @@ impl StorageAPI for SetDisks {
|
||||
fi.set_data_moved();
|
||||
}
|
||||
|
||||
// TODO: object_actual_size
|
||||
let _ = object_actual_size;
|
||||
|
||||
for meta in parts_metadatas.iter_mut() {
|
||||
if meta.is_valid() {
|
||||
meta.size = fi.size;
|
||||
@@ -5611,13 +5789,12 @@ impl StorageAPI for SetDisks {
|
||||
meta.parts.clone_from(&fi.parts);
|
||||
meta.metadata = fi.metadata.clone();
|
||||
meta.versioned = opts.versioned || opts.version_suspended;
|
||||
|
||||
// TODO: Checksum
|
||||
meta.checksum = fi.checksum.clone();
|
||||
}
|
||||
}
|
||||
|
||||
let mut parts = Vec::with_capacity(curr_fi.parts.len());
|
||||
// TODO: 优化 cleanupMultipartPath
|
||||
|
||||
for p in curr_fi.parts.iter() {
|
||||
parts.push(path_join_buf(&[
|
||||
&upload_id_path,
|
||||
@@ -5632,28 +5809,6 @@ impl StorageAPI for SetDisks {
|
||||
format!("part.{}", p.number).as_str(),
|
||||
]));
|
||||
}
|
||||
|
||||
// let _ = self
|
||||
// .remove_part_meta(
|
||||
// bucket,
|
||||
// object,
|
||||
// upload_id,
|
||||
// curr_fi.data_dir.unwrap_or(Uuid::nil()).to_string().as_str(),
|
||||
// p.number,
|
||||
// )
|
||||
// .await;
|
||||
|
||||
// if !fi.parts.iter().any(|v| v.number == p.number) {
|
||||
// let _ = self
|
||||
// .remove_object_part(
|
||||
// bucket,
|
||||
// object,
|
||||
// upload_id,
|
||||
// curr_fi.data_dir.unwrap_or(Uuid::nil()).to_string().as_str(),
|
||||
// p.number,
|
||||
// )
|
||||
// .await;
|
||||
// }
|
||||
}
|
||||
|
||||
{
|
||||
@@ -5672,9 +5827,6 @@ impl StorageAPI for SetDisks {
|
||||
)
|
||||
.await?;
|
||||
|
||||
// debug!("complete fileinfo {:?}", &fi);
|
||||
|
||||
// TODO: reduce_common_data_dir
|
||||
if let Some(old_dir) = op_old_dir {
|
||||
self.commit_rename_data_dir(&shuffle_disks, bucket, object, &old_dir.to_string(), write_quorum)
|
||||
.await?;
|
||||
@@ -6415,7 +6567,7 @@ mod tests {
|
||||
// Test that all CHECK_PART constants have expected values
|
||||
assert_eq!(CHECK_PART_UNKNOWN, 0);
|
||||
assert_eq!(CHECK_PART_SUCCESS, 1);
|
||||
assert_eq!(CHECK_PART_FILE_NOT_FOUND, 4); // 实际值是 4,不是 2
|
||||
assert_eq!(CHECK_PART_FILE_NOT_FOUND, 4); // The actual value is 4, not 2
|
||||
assert_eq!(CHECK_PART_VOLUME_NOT_FOUND, 3);
|
||||
assert_eq!(CHECK_PART_FILE_CORRUPT, 5);
|
||||
}
|
||||
@@ -6695,7 +6847,7 @@ mod tests {
|
||||
assert_eq!(conv_part_err_to_int(&Some(disk_err)), CHECK_PART_FILE_NOT_FOUND);
|
||||
|
||||
let other_err = DiskError::other("other error");
|
||||
assert_eq!(conv_part_err_to_int(&Some(other_err)), CHECK_PART_UNKNOWN); // other 错误应该返回 UNKNOWN,不是 SUCCESS
|
||||
assert_eq!(conv_part_err_to_int(&Some(other_err)), CHECK_PART_UNKNOWN); // Other errors should return UNKNOWN, not SUCCESS
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -6767,7 +6919,7 @@ mod tests {
|
||||
let errs = vec![None, Some(DiskError::other("error1")), Some(DiskError::other("error2"))];
|
||||
let joined = join_errs(&errs);
|
||||
assert!(joined.contains("<nil>"));
|
||||
assert!(joined.contains("io error")); // DiskError::other 显示为 "io error"
|
||||
assert!(joined.contains("io error")); // DiskError::other is rendered as "io error"
|
||||
|
||||
// Test with different error types
|
||||
let errs2 = vec![None, Some(DiskError::FileNotFound), Some(DiskError::FileCorrupt)];
|
||||
|
||||
@@ -646,7 +646,7 @@ impl StorageAPI for Sets {
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn restore_transitioned_object(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()> {
|
||||
async fn restore_transitioned_object(self: Arc<Self>, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()> {
|
||||
self.get_disks_by_key(object)
|
||||
.restore_transitioned_object(bucket, object, opts)
|
||||
.await
|
||||
|
||||
@@ -219,7 +219,7 @@ impl ECStore {
|
||||
disk_map.insert(i, disks);
|
||||
}
|
||||
|
||||
// 替换本地磁盘
|
||||
// Replace the local disk
|
||||
if !is_dist_erasure().await {
|
||||
let mut global_local_disk_map = GLOBAL_LOCAL_DISK_MAP.write().await;
|
||||
for disk in local_disks {
|
||||
@@ -243,7 +243,7 @@ impl ECStore {
|
||||
decommission_cancelers,
|
||||
});
|
||||
|
||||
// 只有在全局部署ID尚未设置时才设置它
|
||||
// Only set it when the global deployment ID is not yet configured
|
||||
if let Some(dep_id) = deployment_id {
|
||||
if get_global_deployment_id().is_none() {
|
||||
set_global_deployment_id(dep_id);
|
||||
@@ -383,7 +383,7 @@ impl ECStore {
|
||||
// Ok(info)
|
||||
// }
|
||||
|
||||
// 读所有
|
||||
// Read all entries
|
||||
// define in store_list_objects.rs
|
||||
// async fn list_merged(&self, opts: &ListPathOptions, delimiter: &str) -> Result<Vec<ObjectInfo>> {
|
||||
// let walk_opts = WalkDirOptions {
|
||||
@@ -425,7 +425,7 @@ impl ECStore {
|
||||
|
||||
// if !uniq.contains(&entry.name) {
|
||||
// uniq.insert(entry.name.clone());
|
||||
// // TODO: 过滤
|
||||
// // TODO: filter
|
||||
|
||||
// if opts.limit > 0 && ress.len() as i32 >= opts.limit {
|
||||
// return Ok(ress);
|
||||
@@ -516,7 +516,7 @@ impl ECStore {
|
||||
}
|
||||
|
||||
async fn get_available_pool_idx(&self, bucket: &str, object: &str, size: i64) -> Option<usize> {
|
||||
// // 先随机返回一个
|
||||
// // Return a random one first
|
||||
|
||||
let mut server_pools = self.get_server_pools_available_space(bucket, object, size).await;
|
||||
server_pools.filter_max_used(100 - (100_f64 * DISK_RESERVE_FRACTION) as u64);
|
||||
@@ -546,7 +546,7 @@ impl ECStore {
|
||||
let mut n_sets = vec![0; self.pools.len()];
|
||||
let mut infos = vec![Vec::new(); self.pools.len()];
|
||||
|
||||
// TODO: 并发
|
||||
// TODO: add concurrency
|
||||
for (idx, pool) in self.pools.iter().enumerate() {
|
||||
if self.is_suspended(idx).await || self.is_pool_rebalancing(idx).await {
|
||||
continue;
|
||||
@@ -713,7 +713,7 @@ impl ECStore {
|
||||
|
||||
let mut ress = Vec::new();
|
||||
|
||||
// join_all 结果跟输入顺序一致
|
||||
// join_all preserves the input order
|
||||
for (i, res) in results.into_iter().enumerate() {
|
||||
let index = i;
|
||||
|
||||
@@ -984,7 +984,7 @@ pub async fn all_local_disk() -> Vec<DiskStore> {
|
||||
.collect()
|
||||
}
|
||||
|
||||
// init_local_disks 初始化本地磁盘,server 启动前必须初始化成功
|
||||
// init_local_disks must succeed before the server starts
|
||||
pub async fn init_local_disks(endpoint_pools: EndpointServerPools) -> Result<()> {
|
||||
let opt = &DiskOption {
|
||||
cleanup: true,
|
||||
@@ -1317,7 +1317,7 @@ impl StorageAPI for ECStore {
|
||||
|
||||
// TODO: replication opts.srdelete_op
|
||||
|
||||
// 删除 meta
|
||||
// Delete the metadata
|
||||
self.delete_all(RUSTFS_META_BUCKET, format!("{BUCKET_META_PREFIX}/{bucket}").as_str())
|
||||
.await?;
|
||||
Ok(())
|
||||
@@ -1469,7 +1469,7 @@ impl StorageAPI for ECStore {
|
||||
let mut gopts = opts.clone();
|
||||
gopts.no_lock = true;
|
||||
|
||||
// 查询在哪个 pool
|
||||
// Determine which pool contains it
|
||||
let (mut pinfo, errs) = self
|
||||
.get_pool_info_existing_with_opts(bucket, object, &gopts)
|
||||
.await
|
||||
@@ -1543,7 +1543,7 @@ impl StorageAPI for ECStore {
|
||||
})
|
||||
.collect();
|
||||
|
||||
// 默认返回值
|
||||
// Default return value
|
||||
let mut del_objects = vec![DeletedObject::default(); objects.len()];
|
||||
|
||||
let mut del_errs = Vec::with_capacity(objects.len());
|
||||
@@ -1625,7 +1625,7 @@ impl StorageAPI for ECStore {
|
||||
// // results.push(jh.await.unwrap());
|
||||
// // }
|
||||
|
||||
// // 记录 pool Index 对应的 objects pool_idx -> objects idx
|
||||
// // Record the mapping pool_idx -> object index
|
||||
// let mut pool_obj_idx_map = HashMap::new();
|
||||
// let mut orig_index_map = HashMap::new();
|
||||
|
||||
@@ -1675,9 +1675,9 @@ impl StorageAPI for ECStore {
|
||||
|
||||
// if !pool_obj_idx_map.is_empty() {
|
||||
// for (i, sets) in self.pools.iter().enumerate() {
|
||||
// // 取 pool idx 对应的 objects index
|
||||
// // Retrieve the object index for a pool idx
|
||||
// if let Some(objs) = pool_obj_idx_map.get(&i) {
|
||||
// // 取对应 obj,理论上不会 none
|
||||
// // Fetch the corresponding object (should never be None)
|
||||
// // let objs: Vec<ObjectToDelete> = obj_idxs.iter().filter_map(|&idx| objects.get(idx).cloned()).collect();
|
||||
|
||||
// if objs.is_empty() {
|
||||
@@ -1686,10 +1686,10 @@ impl StorageAPI for ECStore {
|
||||
|
||||
// let (pdel_objs, perrs) = sets.delete_objects(bucket, objs.clone(), opts.clone()).await?;
|
||||
|
||||
// // 同时存入不可能为 none
|
||||
// // Insert simultaneously (should never be None)
|
||||
// let org_indexes = orig_index_map.get(&i).unwrap();
|
||||
|
||||
// // perrs 的顺序理论上跟 obj_idxs 顺序一致
|
||||
// // perrs should follow the same order as obj_idxs
|
||||
// for (i, err) in perrs.into_iter().enumerate() {
|
||||
// let obj_idx = org_indexes[i];
|
||||
|
||||
@@ -1864,17 +1864,20 @@ impl StorageAPI for ECStore {
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn restore_transitioned_object(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()> {
|
||||
async fn restore_transitioned_object(self: Arc<Self>, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()> {
|
||||
let object = encode_dir_object(object);
|
||||
if self.single_pool() {
|
||||
return self.pools[0].restore_transitioned_object(bucket, &object, opts).await;
|
||||
return self.pools[0].clone().restore_transitioned_object(bucket, &object, opts).await;
|
||||
}
|
||||
|
||||
//opts.skip_decommissioned = true;
|
||||
//opts.nolock = true;
|
||||
let idx = self.get_pool_idx_existing_with_opts(bucket, object.as_str(), opts).await?;
|
||||
|
||||
self.pools[idx].restore_transitioned_object(bucket, &object, opts).await
|
||||
self.pools[idx]
|
||||
.clone()
|
||||
.restore_transitioned_object(bucket, &object, opts)
|
||||
.await
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
@@ -2598,6 +2601,8 @@ pub async fn has_space_for(dis: &[Option<DiskInfo>], size: i64) -> Result<bool>
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::bucket::metadata_sys::init_bucket_metadata_sys;
|
||||
|
||||
use super::*;
|
||||
|
||||
// Test validation functions
|
||||
@@ -2785,4 +2790,122 @@ mod tests {
|
||||
assert!(check_put_object_args("", "test-object").is_err());
|
||||
assert!(check_put_object_args("test-bucket", "").is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ecstore_put_and_list_objects() {
|
||||
use crate::disk::endpoint::Endpoint;
|
||||
use crate::endpoints::{EndpointServerPools, Endpoints, PoolEndpoints};
|
||||
use std::path::PathBuf;
|
||||
use tokio::fs;
|
||||
|
||||
let test_base_dir = format!("/tmp/rustfs_test_put_list_{}", Uuid::new_v4());
|
||||
let temp_dir = PathBuf::from(&test_base_dir);
|
||||
|
||||
if temp_dir.exists() {
|
||||
let _ = fs::remove_dir_all(&temp_dir).await;
|
||||
}
|
||||
fs::create_dir_all(&temp_dir).await.expect("Failed to create test directory");
|
||||
|
||||
let disk_paths = vec![
|
||||
temp_dir.join("disk1"),
|
||||
temp_dir.join("disk2"),
|
||||
temp_dir.join("disk3"),
|
||||
temp_dir.join("disk4"),
|
||||
];
|
||||
|
||||
for disk_path in &disk_paths {
|
||||
fs::create_dir_all(disk_path).await.expect("Failed to create disk directory");
|
||||
}
|
||||
|
||||
let mut endpoints = Vec::new();
|
||||
for (i, disk_path) in disk_paths.iter().enumerate() {
|
||||
let disk_str = disk_path.to_str().expect("Invalid disk path");
|
||||
let mut endpoint = Endpoint::try_from(disk_str).expect("Failed to create endpoint");
|
||||
endpoint.set_pool_index(0);
|
||||
endpoint.set_set_index(0);
|
||||
endpoint.set_disk_index(i);
|
||||
endpoints.push(endpoint);
|
||||
}
|
||||
|
||||
let pool_endpoints = PoolEndpoints {
|
||||
legacy: false,
|
||||
set_count: 1,
|
||||
drives_per_set: 4,
|
||||
endpoints: Endpoints::from(endpoints),
|
||||
cmd_line: "test".to_string(),
|
||||
platform: format!("OS: {} | Arch: {}", std::env::consts::OS, std::env::consts::ARCH),
|
||||
};
|
||||
|
||||
let endpoint_pools = EndpointServerPools(vec![pool_endpoints]);
|
||||
|
||||
init_local_disks(endpoint_pools.clone())
|
||||
.await
|
||||
.expect("Failed to initialize local disks");
|
||||
|
||||
let server_addr: SocketAddr = "127.0.0.1:0".parse().expect("Invalid server address");
|
||||
let ecstore = ECStore::new(server_addr, endpoint_pools, CancellationToken::new())
|
||||
.await
|
||||
.expect("Failed to create ECStore");
|
||||
|
||||
init_bucket_metadata_sys(ecstore.clone(), vec![]).await;
|
||||
|
||||
let bucket_name = "test-bucket";
|
||||
ecstore
|
||||
.make_bucket(bucket_name, &MakeBucketOptions::default())
|
||||
.await
|
||||
.expect("Failed to create bucket");
|
||||
|
||||
let test_objects = vec![
|
||||
("object1.txt", b"Hello, World!".to_vec()),
|
||||
("object2.txt", b"Test data for object 2".to_vec()),
|
||||
("folder/object3.txt", b"Object in folder".to_vec()),
|
||||
("folder/subfolder/object4.txt", b"Nested object".to_vec()),
|
||||
];
|
||||
|
||||
for (object_name, data) in &test_objects {
|
||||
let mut reader = PutObjReader::from_vec(data.clone());
|
||||
let object_info = ecstore
|
||||
.put_object(bucket_name, object_name, &mut reader, &ObjectOptions::default())
|
||||
.await
|
||||
.unwrap_or_else(|e| panic!("Failed to put object {}: {}", object_name, e));
|
||||
|
||||
assert_eq!(object_info.size, data.len() as i64, "Object size mismatch for {}", object_name);
|
||||
assert_eq!(object_info.bucket, bucket_name);
|
||||
}
|
||||
|
||||
let list_result = ecstore
|
||||
.clone()
|
||||
.list_objects_v2(bucket_name, "", None, None, 1000, false, None)
|
||||
.await
|
||||
.expect("Failed to list objects");
|
||||
|
||||
assert_eq!(list_result.objects.len(), test_objects.len(), "Number of objects mismatch");
|
||||
|
||||
let mut object_names: Vec<String> = list_result.objects.iter().map(|o| o.name.clone()).collect();
|
||||
object_names.sort();
|
||||
|
||||
let mut expected_names: Vec<String> = test_objects.iter().map(|(n, _)| n.to_string()).collect();
|
||||
expected_names.sort();
|
||||
|
||||
assert_eq!(object_names, expected_names, "Object names mismatch");
|
||||
|
||||
let prefix_result = ecstore
|
||||
.clone()
|
||||
.list_objects_v2(bucket_name, "folder/", None, None, 1000, false, None)
|
||||
.await
|
||||
.expect("Failed to list objects with prefix");
|
||||
|
||||
assert_eq!(prefix_result.objects.len(), 2, "Should find 2 objects with prefix 'folder/'");
|
||||
assert!(prefix_result.objects.iter().all(|o| o.name.starts_with("folder/")));
|
||||
|
||||
let delimiter_result = ecstore
|
||||
.clone()
|
||||
.list_objects_v2(bucket_name, "", None, Some("/".to_string()), 1000, false, None)
|
||||
.await
|
||||
.expect("Failed to list objects with delimiter");
|
||||
|
||||
assert!(!delimiter_result.prefixes.is_empty() || !delimiter_result.objects.is_empty());
|
||||
|
||||
let _ = fs::remove_dir_all(&temp_dir).await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
use crate::bucket::metadata_sys::get_versioning_config;
|
||||
use crate::bucket::versioning::VersioningApi as _;
|
||||
use crate::config::storageclass;
|
||||
use crate::disk::DiskStore;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::store_utils::clean_metadata;
|
||||
@@ -33,6 +34,7 @@ use rustfs_madmin::heal_commands::HealResultItem;
|
||||
use rustfs_rio::Checksum;
|
||||
use rustfs_rio::{DecompressReader, HashReader, LimitReader, WarpReader};
|
||||
use rustfs_utils::CompressionAlgorithm;
|
||||
use rustfs_utils::http::AMZ_STORAGE_CLASS;
|
||||
use rustfs_utils::http::headers::{AMZ_OBJECT_TAGGING, RESERVED_METADATA_PREFIX_LOWER};
|
||||
use rustfs_utils::path::decode_dir_object;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -291,7 +293,7 @@ impl HTTPRangeSpec {
|
||||
let suffix_len = if self.start < 0 {
|
||||
self.start
|
||||
.checked_neg()
|
||||
.ok_or_else(|| Error::other("range value invalid: suffix length overflow"))?
|
||||
.ok_or_else(|| Error::InvalidRangeSpec("range value invalid: suffix length overflow".to_string()))?
|
||||
} else {
|
||||
self.start
|
||||
};
|
||||
@@ -304,14 +306,14 @@ impl HTTPRangeSpec {
|
||||
}
|
||||
pub fn get_length(&self, res_size: i64) -> Result<i64> {
|
||||
if res_size < 0 {
|
||||
return Err(Error::other("The requested range is not satisfiable"));
|
||||
return Err(Error::InvalidRangeSpec("The requested range is not satisfiable".to_string()));
|
||||
}
|
||||
|
||||
if self.is_suffix_length {
|
||||
let specified_len = if self.start < 0 {
|
||||
self.start
|
||||
.checked_neg()
|
||||
.ok_or_else(|| Error::other("range value invalid: suffix length overflow"))?
|
||||
.ok_or_else(|| Error::InvalidRangeSpec("range value invalid: suffix length overflow".to_string()))?
|
||||
} else {
|
||||
self.start
|
||||
};
|
||||
@@ -325,7 +327,7 @@ impl HTTPRangeSpec {
|
||||
}
|
||||
|
||||
if self.start >= res_size {
|
||||
return Err(Error::other("The requested range is not satisfiable"));
|
||||
return Err(Error::InvalidRangeSpec("The requested range is not satisfiable".to_string()));
|
||||
}
|
||||
|
||||
if self.end > -1 {
|
||||
@@ -343,7 +345,7 @@ impl HTTPRangeSpec {
|
||||
return Ok(range_length);
|
||||
}
|
||||
|
||||
Err(Error::other(format!(
|
||||
Err(Error::InvalidRangeSpec(format!(
|
||||
"range value invalid: start={}, end={}, expected start <= end and end >= -1",
|
||||
self.start, self.end
|
||||
)))
|
||||
@@ -518,6 +520,7 @@ impl From<s3s::dto::CompletedPart> for CompletePart {
|
||||
pub struct ObjectInfo {
|
||||
pub bucket: String,
|
||||
pub name: String,
|
||||
pub storage_class: Option<String>,
|
||||
pub mod_time: Option<OffsetDateTime>,
|
||||
pub size: i64,
|
||||
// Actual size is the real size of the object uploaded by client.
|
||||
@@ -557,6 +560,7 @@ impl Clone for ObjectInfo {
|
||||
Self {
|
||||
bucket: self.bucket.clone(),
|
||||
name: self.name.clone(),
|
||||
storage_class: self.storage_class.clone(),
|
||||
mod_time: self.mod_time,
|
||||
size: self.size,
|
||||
actual_size: self.actual_size,
|
||||
@@ -689,6 +693,12 @@ impl ObjectInfo {
|
||||
v
|
||||
};
|
||||
|
||||
// Extract storage class from metadata, default to STANDARD if not found
|
||||
let storage_class = metadata
|
||||
.get(AMZ_STORAGE_CLASS)
|
||||
.cloned()
|
||||
.or_else(|| Some(storageclass::STANDARD.to_string()));
|
||||
|
||||
// Convert parts from rustfs_filemeta::ObjectPartInfo to store_api::ObjectPartInfo
|
||||
let parts = fi
|
||||
.parts
|
||||
@@ -727,6 +737,7 @@ impl ObjectInfo {
|
||||
user_defined: metadata,
|
||||
transitioned_object,
|
||||
checksum: fi.checksum.clone(),
|
||||
storage_class,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
@@ -1325,7 +1336,7 @@ pub trait StorageAPI: ObjectIO + Debug {
|
||||
async fn get_object_tags(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<String>;
|
||||
async fn add_partial(&self, bucket: &str, object: &str, version_id: &str) -> Result<()>;
|
||||
async fn transition_object(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()>;
|
||||
async fn restore_transitioned_object(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()>;
|
||||
async fn restore_transitioned_object(self: Arc<Self>, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<()>;
|
||||
async fn put_object_tags(&self, bucket: &str, object: &str, tags: &str, opts: &ObjectOptions) -> Result<ObjectInfo>;
|
||||
async fn delete_object_tags(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo>;
|
||||
|
||||
@@ -1361,7 +1372,7 @@ impl<R: AsyncRead + Unpin + Send + Sync> RangedDecompressReader<R> {
|
||||
// Validate the range request
|
||||
if offset >= total_size {
|
||||
tracing::debug!("Range offset {} exceeds total size {}", offset, total_size);
|
||||
return Err(Error::other("Range offset exceeds file size"));
|
||||
return Err(Error::InvalidRangeSpec("Range offset exceeds file size".to_string()));
|
||||
}
|
||||
|
||||
// Adjust length if it extends beyond file end
|
||||
|
||||
@@ -952,6 +952,12 @@ async fn gather_results(
|
||||
let mut recv = recv;
|
||||
let mut entries = Vec::new();
|
||||
while let Some(mut entry) = recv.recv().await {
|
||||
#[cfg(windows)]
|
||||
{
|
||||
// normalize windows path separator
|
||||
entry.name = entry.name.replace("\\", "/");
|
||||
}
|
||||
|
||||
if returned {
|
||||
continue;
|
||||
}
|
||||
@@ -1050,6 +1056,10 @@ async fn merge_entry_channels(
|
||||
out_channel: Sender<MetaCacheEntry>,
|
||||
read_quorum: usize,
|
||||
) -> Result<()> {
|
||||
if in_channels.is_empty() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut in_channels = in_channels;
|
||||
if in_channels.len() == 1 {
|
||||
loop {
|
||||
@@ -1086,18 +1096,18 @@ async fn merge_entry_channels(
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut best: Option<MetaCacheEntry> = None;
|
||||
let mut best = top[0].clone();
|
||||
let mut best_idx = 0;
|
||||
to_merge.clear();
|
||||
|
||||
// FIXME: top move when select_from call
|
||||
let vtop = top.clone();
|
||||
// let vtop = top.clone();
|
||||
|
||||
for (i, other) in vtop.iter().enumerate() {
|
||||
if let Some(other_entry) = other {
|
||||
// let vtop = top.as_slice();
|
||||
|
||||
for other_idx in 1..top.len() {
|
||||
if let Some(other_entry) = &top[other_idx] {
|
||||
if let Some(best_entry) = &best {
|
||||
let other_idx = i;
|
||||
|
||||
// println!("get other_entry {:?}", other_entry.name);
|
||||
|
||||
if path::clean(&best_entry.name) == path::clean(&other_entry.name) {
|
||||
@@ -1124,21 +1134,20 @@ async fn merge_entry_channels(
|
||||
best_idx = other_idx;
|
||||
continue;
|
||||
}
|
||||
} else if best_entry.name > other_entry.name {
|
||||
}
|
||||
|
||||
if best_entry.name > other_entry.name {
|
||||
to_merge.clear();
|
||||
best = Some(other_entry.clone());
|
||||
best_idx = i;
|
||||
best_idx = other_idx;
|
||||
}
|
||||
} else {
|
||||
best = Some(other_entry.clone());
|
||||
best_idx = i;
|
||||
best_idx = other_idx;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// println!("get best_entry {} {:?}", &best_idx, &best.clone().unwrap_or_default().name);
|
||||
|
||||
// TODO:
|
||||
if !to_merge.is_empty() {
|
||||
if let Some(entry) = &best {
|
||||
let mut versions = Vec::with_capacity(to_merge.len() + 1);
|
||||
@@ -1150,9 +1159,9 @@ async fn merge_entry_channels(
|
||||
}
|
||||
|
||||
for &idx in to_merge.iter() {
|
||||
let has_entry = { top.get(idx).cloned() };
|
||||
let has_entry = top[idx].clone();
|
||||
|
||||
if let Some(Some(entry)) = has_entry {
|
||||
if let Some(entry) = has_entry {
|
||||
let xl2 = match entry.clone().xl_meta() {
|
||||
Ok(res) => res,
|
||||
Err(_) => {
|
||||
@@ -1198,9 +1207,9 @@ async fn merge_entry_channels(
|
||||
out_channel.send(best_entry.clone()).await.map_err(Error::other)?;
|
||||
last = best_entry.name.clone();
|
||||
}
|
||||
top[best_idx] = None; // Replace entry we just sent
|
||||
select_from(&mut in_channels, best_idx, &mut top, &mut n_done).await?;
|
||||
}
|
||||
|
||||
select_from(&mut in_channels, best_idx, &mut top, &mut n_done).await?;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -37,17 +37,17 @@ pub fn clean_metadata_keys(metadata: &mut HashMap<String, String>, key_names: &[
|
||||
}
|
||||
}
|
||||
|
||||
// 检查是否为 元数据桶
|
||||
// Check whether the bucket is the metadata bucket
|
||||
fn is_meta_bucket(bucket_name: &str) -> bool {
|
||||
bucket_name == RUSTFS_META_BUCKET
|
||||
}
|
||||
|
||||
// 检查是否为 保留桶
|
||||
// Check whether the bucket is reserved
|
||||
fn is_reserved_bucket(bucket_name: &str) -> bool {
|
||||
bucket_name == "rustfs"
|
||||
}
|
||||
|
||||
// 检查桶名是否为保留名或无效名
|
||||
// Check whether the bucket name is reserved or invalid
|
||||
pub fn is_reserved_or_invalid_bucket(bucket_entry: &str, strict: bool) -> bool {
|
||||
if bucket_entry.is_empty() {
|
||||
return true;
|
||||
@@ -59,7 +59,7 @@ pub fn is_reserved_or_invalid_bucket(bucket_entry: &str, strict: bool) -> bool {
|
||||
result || is_meta_bucket(bucket_entry) || is_reserved_bucket(bucket_entry)
|
||||
}
|
||||
|
||||
// 检查桶名是否有效
|
||||
// Check whether the bucket name is valid
|
||||
fn check_bucket_name(bucket_name: &str, strict: bool) -> Result<()> {
|
||||
if bucket_name.trim().is_empty() {
|
||||
return Err(Error::other("Bucket name cannot be empty"));
|
||||
@@ -86,7 +86,7 @@ fn check_bucket_name(bucket_name: &str, strict: bool) -> Result<()> {
|
||||
return Err(Error::other("Bucket name contains invalid characters"));
|
||||
}
|
||||
|
||||
// 检查包含 "..", ".-", "-."
|
||||
// Check for "..", ".-", "-."
|
||||
if bucket_name.contains("..") || bucket_name.contains(".-") || bucket_name.contains("-.") {
|
||||
return Err(Error::other("Bucket name contains invalid characters"));
|
||||
}
|
||||
|
||||
@@ -18,6 +18,13 @@ pub mod tier_config;
|
||||
pub mod tier_gen;
|
||||
pub mod tier_handlers;
|
||||
pub mod warm_backend;
|
||||
pub mod warm_backend_aliyun;
|
||||
pub mod warm_backend_azure;
|
||||
pub mod warm_backend_gcs;
|
||||
pub mod warm_backend_huaweicloud;
|
||||
pub mod warm_backend_minio;
|
||||
pub mod warm_backend_r2;
|
||||
pub mod warm_backend_rustfs;
|
||||
pub mod warm_backend_s3;
|
||||
pub mod warm_backend_s3sdk;
|
||||
pub mod warm_backend_tencent;
|
||||
|
||||
@@ -141,8 +141,8 @@ impl TierConfigMgr {
|
||||
(TierType::Unsupported, false)
|
||||
}
|
||||
|
||||
pub async fn add(&mut self, tier: TierConfig, force: bool) -> std::result::Result<(), AdminError> {
|
||||
let tier_name = &tier.name;
|
||||
pub async fn add(&mut self, tier_config: TierConfig, force: bool) -> std::result::Result<(), AdminError> {
|
||||
let tier_name = &tier_config.name;
|
||||
if tier_name != tier_name.to_uppercase().as_str() {
|
||||
return Err(ERR_TIER_NAME_NOT_UPPERCASE.clone());
|
||||
}
|
||||
@@ -152,7 +152,7 @@ impl TierConfigMgr {
|
||||
return Err(ERR_TIER_ALREADY_EXISTS.clone());
|
||||
}
|
||||
|
||||
let d = new_warm_backend(&tier, true).await?;
|
||||
let d = new_warm_backend(&tier_config, true).await?;
|
||||
|
||||
if !force {
|
||||
let in_use = d.in_use().await;
|
||||
@@ -180,7 +180,7 @@ impl TierConfigMgr {
|
||||
}
|
||||
|
||||
self.driver_cache.insert(tier_name.to_string(), d);
|
||||
self.tiers.insert(tier_name.to_string(), tier);
|
||||
self.tiers.insert(tier_name.to_string(), tier_config);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -260,10 +260,10 @@ impl TierConfigMgr {
|
||||
return Err(ERR_TIER_NOT_FOUND.clone());
|
||||
}
|
||||
|
||||
let mut cfg = self.tiers[tier_name].clone();
|
||||
let mut tier_config = self.tiers[tier_name].clone();
|
||||
match tier_type {
|
||||
TierType::S3 => {
|
||||
let mut s3 = cfg.s3.as_mut().expect("err");
|
||||
let mut s3 = tier_config.s3.as_mut().expect("err");
|
||||
if creds.aws_role {
|
||||
s3.aws_role = true
|
||||
}
|
||||
@@ -277,7 +277,7 @@ impl TierConfigMgr {
|
||||
}
|
||||
}
|
||||
TierType::RustFS => {
|
||||
let mut rustfs = cfg.rustfs.as_mut().expect("err");
|
||||
let mut rustfs = tier_config.rustfs.as_mut().expect("err");
|
||||
if creds.access_key == "" || creds.secret_key == "" {
|
||||
return Err(ERR_TIER_MISSING_CREDENTIALS.clone());
|
||||
}
|
||||
@@ -285,18 +285,65 @@ impl TierConfigMgr {
|
||||
rustfs.secret_key = creds.secret_key;
|
||||
}
|
||||
TierType::MinIO => {
|
||||
let mut minio = cfg.minio.as_mut().expect("err");
|
||||
let mut minio = tier_config.minio.as_mut().expect("err");
|
||||
if creds.access_key == "" || creds.secret_key == "" {
|
||||
return Err(ERR_TIER_MISSING_CREDENTIALS.clone());
|
||||
}
|
||||
minio.access_key = creds.access_key;
|
||||
minio.secret_key = creds.secret_key;
|
||||
}
|
||||
TierType::Aliyun => {
|
||||
let mut aliyun = tier_config.aliyun.as_mut().expect("err");
|
||||
if creds.access_key == "" || creds.secret_key == "" {
|
||||
return Err(ERR_TIER_MISSING_CREDENTIALS.clone());
|
||||
}
|
||||
aliyun.access_key = creds.access_key;
|
||||
aliyun.secret_key = creds.secret_key;
|
||||
}
|
||||
TierType::Tencent => {
|
||||
let mut tencent = tier_config.tencent.as_mut().expect("err");
|
||||
if creds.access_key == "" || creds.secret_key == "" {
|
||||
return Err(ERR_TIER_MISSING_CREDENTIALS.clone());
|
||||
}
|
||||
tencent.access_key = creds.access_key;
|
||||
tencent.secret_key = creds.secret_key;
|
||||
}
|
||||
TierType::Huaweicloud => {
|
||||
let mut huaweicloud = tier_config.huaweicloud.as_mut().expect("err");
|
||||
if creds.access_key == "" || creds.secret_key == "" {
|
||||
return Err(ERR_TIER_MISSING_CREDENTIALS.clone());
|
||||
}
|
||||
huaweicloud.access_key = creds.access_key;
|
||||
huaweicloud.secret_key = creds.secret_key;
|
||||
}
|
||||
TierType::Azure => {
|
||||
let mut azure = tier_config.azure.as_mut().expect("err");
|
||||
if creds.access_key == "" || creds.secret_key == "" {
|
||||
return Err(ERR_TIER_MISSING_CREDENTIALS.clone());
|
||||
}
|
||||
azure.access_key = creds.access_key;
|
||||
azure.secret_key = creds.secret_key;
|
||||
}
|
||||
TierType::GCS => {
|
||||
let mut gcs = tier_config.gcs.as_mut().expect("err");
|
||||
if creds.access_key == "" || creds.secret_key == "" {
|
||||
return Err(ERR_TIER_MISSING_CREDENTIALS.clone());
|
||||
}
|
||||
gcs.creds = creds.access_key; //creds.creds_json
|
||||
}
|
||||
TierType::R2 => {
|
||||
let mut r2 = tier_config.r2.as_mut().expect("err");
|
||||
if creds.access_key == "" || creds.secret_key == "" {
|
||||
return Err(ERR_TIER_MISSING_CREDENTIALS.clone());
|
||||
}
|
||||
r2.access_key = creds.access_key;
|
||||
r2.secret_key = creds.secret_key;
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
|
||||
let d = new_warm_backend(&cfg, true).await?;
|
||||
self.tiers.insert(tier_name.to_string(), cfg);
|
||||
let d = new_warm_backend(&tier_config, true).await?;
|
||||
self.tiers.insert(tier_name.to_string(), tier_config);
|
||||
self.driver_cache.insert(tier_name.to_string(), d);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -26,14 +26,22 @@ pub enum TierType {
|
||||
Unsupported,
|
||||
#[serde(rename = "s3")]
|
||||
S3,
|
||||
#[serde(rename = "azure")]
|
||||
Azure,
|
||||
#[serde(rename = "gcs")]
|
||||
GCS,
|
||||
#[serde(rename = "rustfs")]
|
||||
RustFS,
|
||||
#[serde(rename = "minio")]
|
||||
MinIO,
|
||||
#[serde(rename = "aliyun")]
|
||||
Aliyun,
|
||||
#[serde(rename = "tencent")]
|
||||
Tencent,
|
||||
#[serde(rename = "huaweicloud")]
|
||||
Huaweicloud,
|
||||
#[serde(rename = "azure")]
|
||||
Azure,
|
||||
#[serde(rename = "gcs")]
|
||||
GCS,
|
||||
#[serde(rename = "r2")]
|
||||
R2,
|
||||
}
|
||||
|
||||
impl Display for TierType {
|
||||
@@ -48,6 +56,24 @@ impl Display for TierType {
|
||||
TierType::MinIO => {
|
||||
write!(f, "MinIO")
|
||||
}
|
||||
TierType::Aliyun => {
|
||||
write!(f, "Aliyun")
|
||||
}
|
||||
TierType::Tencent => {
|
||||
write!(f, "Tencent")
|
||||
}
|
||||
TierType::Huaweicloud => {
|
||||
write!(f, "Huaweicloud")
|
||||
}
|
||||
TierType::Azure => {
|
||||
write!(f, "Azure")
|
||||
}
|
||||
TierType::GCS => {
|
||||
write!(f, "GCS")
|
||||
}
|
||||
TierType::R2 => {
|
||||
write!(f, "R2")
|
||||
}
|
||||
_ => {
|
||||
write!(f, "Unsupported")
|
||||
}
|
||||
@@ -61,6 +87,12 @@ impl TierType {
|
||||
"S3" => TierType::S3,
|
||||
"RustFS" => TierType::RustFS,
|
||||
"MinIO" => TierType::MinIO,
|
||||
"Aliyun" => TierType::Aliyun,
|
||||
"Tencent" => TierType::Tencent,
|
||||
"Huaweicloud" => TierType::Huaweicloud,
|
||||
"Azure" => TierType::Azure,
|
||||
"GCS" => TierType::GCS,
|
||||
"R2" => TierType::R2,
|
||||
_ => TierType::Unsupported,
|
||||
}
|
||||
}
|
||||
@@ -70,6 +102,12 @@ impl TierType {
|
||||
TierType::S3 => "s3".to_string(),
|
||||
TierType::RustFS => "rustfs".to_string(),
|
||||
TierType::MinIO => "minio".to_string(),
|
||||
TierType::Aliyun => "aliyun".to_string(),
|
||||
TierType::Tencent => "tencent".to_string(),
|
||||
TierType::Huaweicloud => "huaweicloud".to_string(),
|
||||
TierType::Azure => "azure".to_string(),
|
||||
TierType::GCS => "gcs".to_string(),
|
||||
TierType::R2 => "r2".to_string(),
|
||||
_ => "unsupported".to_string(),
|
||||
}
|
||||
}
|
||||
@@ -86,8 +124,18 @@ pub struct TierConfig {
|
||||
pub name: String,
|
||||
#[serde(rename = "s3", skip_serializing_if = "Option::is_none")]
|
||||
pub s3: Option<TierS3>,
|
||||
//TODO: azure: Option<TierAzure>,
|
||||
//TODO: gcs: Option<TierGCS>,
|
||||
#[serde(rename = "aliyun", skip_serializing_if = "Option::is_none")]
|
||||
pub aliyun: Option<TierAliyun>,
|
||||
#[serde(rename = "tencent", skip_serializing_if = "Option::is_none")]
|
||||
pub tencent: Option<TierTencent>,
|
||||
#[serde(rename = "huaweicloud", skip_serializing_if = "Option::is_none")]
|
||||
pub huaweicloud: Option<TierHuaweicloud>,
|
||||
#[serde(rename = "azure", skip_serializing_if = "Option::is_none")]
|
||||
pub azure: Option<TierAzure>,
|
||||
#[serde(rename = "gcs", skip_serializing_if = "Option::is_none")]
|
||||
pub gcs: Option<TierGCS>,
|
||||
#[serde(rename = "r2", skip_serializing_if = "Option::is_none")]
|
||||
pub r2: Option<TierR2>,
|
||||
#[serde(rename = "rustfs", skip_serializing_if = "Option::is_none")]
|
||||
pub rustfs: Option<TierRustFS>,
|
||||
#[serde(rename = "minio", skip_serializing_if = "Option::is_none")]
|
||||
@@ -97,10 +145,14 @@ pub struct TierConfig {
|
||||
impl Clone for TierConfig {
|
||||
fn clone(&self) -> TierConfig {
|
||||
let mut s3 = None;
|
||||
//az TierAzure
|
||||
//gcs TierGCS
|
||||
let mut r = None;
|
||||
let mut m = None;
|
||||
let mut aliyun = None;
|
||||
let mut tencent = None;
|
||||
let mut huaweicloud = None;
|
||||
let mut azure = None;
|
||||
let mut gcs = None;
|
||||
let mut r2 = None;
|
||||
match self.tier_type {
|
||||
TierType::S3 => {
|
||||
let mut s3_ = self.s3.as_ref().expect("err").clone();
|
||||
@@ -117,6 +169,36 @@ impl Clone for TierConfig {
|
||||
m_.secret_key = "REDACTED".to_string();
|
||||
m = Some(m_);
|
||||
}
|
||||
TierType::Aliyun => {
|
||||
let mut aliyun_ = self.aliyun.as_ref().expect("err").clone();
|
||||
aliyun_.secret_key = "REDACTED".to_string();
|
||||
aliyun = Some(aliyun_);
|
||||
}
|
||||
TierType::Tencent => {
|
||||
let mut tencent_ = self.tencent.as_ref().expect("err").clone();
|
||||
tencent_.secret_key = "REDACTED".to_string();
|
||||
tencent = Some(tencent_);
|
||||
}
|
||||
TierType::Huaweicloud => {
|
||||
let mut huaweicloud_ = self.huaweicloud.as_ref().expect("err").clone();
|
||||
huaweicloud_.secret_key = "REDACTED".to_string();
|
||||
huaweicloud = Some(huaweicloud_);
|
||||
}
|
||||
TierType::Azure => {
|
||||
let mut azure_ = self.azure.as_ref().expect("err").clone();
|
||||
azure_.secret_key = "REDACTED".to_string();
|
||||
azure = Some(azure_);
|
||||
}
|
||||
TierType::GCS => {
|
||||
let mut gcs_ = self.gcs.as_ref().expect("err").clone();
|
||||
gcs_.creds = "REDACTED".to_string();
|
||||
gcs = Some(gcs_);
|
||||
}
|
||||
TierType::R2 => {
|
||||
let mut r2_ = self.r2.as_ref().expect("err").clone();
|
||||
r2_.secret_key = "REDACTED".to_string();
|
||||
r2 = Some(r2_);
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
TierConfig {
|
||||
@@ -126,6 +208,12 @@ impl Clone for TierConfig {
|
||||
s3,
|
||||
rustfs: r,
|
||||
minio: m,
|
||||
aliyun,
|
||||
tencent,
|
||||
huaweicloud,
|
||||
azure,
|
||||
gcs,
|
||||
r2,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -137,6 +225,12 @@ impl TierConfig {
|
||||
TierType::S3 => self.s3.as_ref().expect("err").endpoint.clone(),
|
||||
TierType::RustFS => self.rustfs.as_ref().expect("err").endpoint.clone(),
|
||||
TierType::MinIO => self.minio.as_ref().expect("err").endpoint.clone(),
|
||||
TierType::Aliyun => self.aliyun.as_ref().expect("err").endpoint.clone(),
|
||||
TierType::Tencent => self.tencent.as_ref().expect("err").endpoint.clone(),
|
||||
TierType::Huaweicloud => self.huaweicloud.as_ref().expect("err").endpoint.clone(),
|
||||
TierType::Azure => self.azure.as_ref().expect("err").endpoint.clone(),
|
||||
TierType::GCS => self.gcs.as_ref().expect("err").endpoint.clone(),
|
||||
TierType::R2 => self.r2.as_ref().expect("err").endpoint.clone(),
|
||||
_ => {
|
||||
info!("unexpected tier type {}", self.tier_type);
|
||||
"".to_string()
|
||||
@@ -149,6 +243,12 @@ impl TierConfig {
|
||||
TierType::S3 => self.s3.as_ref().expect("err").bucket.clone(),
|
||||
TierType::RustFS => self.rustfs.as_ref().expect("err").bucket.clone(),
|
||||
TierType::MinIO => self.minio.as_ref().expect("err").bucket.clone(),
|
||||
TierType::Aliyun => self.aliyun.as_ref().expect("err").bucket.clone(),
|
||||
TierType::Tencent => self.tencent.as_ref().expect("err").bucket.clone(),
|
||||
TierType::Huaweicloud => self.huaweicloud.as_ref().expect("err").bucket.clone(),
|
||||
TierType::Azure => self.azure.as_ref().expect("err").bucket.clone(),
|
||||
TierType::GCS => self.gcs.as_ref().expect("err").bucket.clone(),
|
||||
TierType::R2 => self.r2.as_ref().expect("err").bucket.clone(),
|
||||
_ => {
|
||||
info!("unexpected tier type {}", self.tier_type);
|
||||
"".to_string()
|
||||
@@ -161,6 +261,12 @@ impl TierConfig {
|
||||
TierType::S3 => self.s3.as_ref().expect("err").prefix.clone(),
|
||||
TierType::RustFS => self.rustfs.as_ref().expect("err").prefix.clone(),
|
||||
TierType::MinIO => self.minio.as_ref().expect("err").prefix.clone(),
|
||||
TierType::Aliyun => self.aliyun.as_ref().expect("err").prefix.clone(),
|
||||
TierType::Tencent => self.tencent.as_ref().expect("err").prefix.clone(),
|
||||
TierType::Huaweicloud => self.huaweicloud.as_ref().expect("err").prefix.clone(),
|
||||
TierType::Azure => self.azure.as_ref().expect("err").prefix.clone(),
|
||||
TierType::GCS => self.gcs.as_ref().expect("err").prefix.clone(),
|
||||
TierType::R2 => self.r2.as_ref().expect("err").prefix.clone(),
|
||||
_ => {
|
||||
info!("unexpected tier type {}", self.tier_type);
|
||||
"".to_string()
|
||||
@@ -173,6 +279,12 @@ impl TierConfig {
|
||||
TierType::S3 => self.s3.as_ref().expect("err").region.clone(),
|
||||
TierType::RustFS => self.rustfs.as_ref().expect("err").region.clone(),
|
||||
TierType::MinIO => self.minio.as_ref().expect("err").region.clone(),
|
||||
TierType::Aliyun => self.aliyun.as_ref().expect("err").region.clone(),
|
||||
TierType::Tencent => self.tencent.as_ref().expect("err").region.clone(),
|
||||
TierType::Huaweicloud => self.huaweicloud.as_ref().expect("err").region.clone(),
|
||||
TierType::Azure => self.azure.as_ref().expect("err").region.clone(),
|
||||
TierType::GCS => self.gcs.as_ref().expect("err").region.clone(),
|
||||
TierType::R2 => self.r2.as_ref().expect("err").region.clone(),
|
||||
_ => {
|
||||
info!("unexpected tier type {}", self.tier_type);
|
||||
"".to_string()
|
||||
@@ -319,3 +431,152 @@ impl TierMinIO {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default, Debug, Clone)]
|
||||
#[serde(default)]
|
||||
pub struct TierAliyun {
|
||||
pub name: String,
|
||||
pub endpoint: String,
|
||||
#[serde(rename = "accessKey")]
|
||||
pub access_key: String,
|
||||
#[serde(rename = "secretKey")]
|
||||
pub secret_key: String,
|
||||
pub bucket: String,
|
||||
pub prefix: String,
|
||||
pub region: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default, Debug, Clone)]
|
||||
#[serde(default)]
|
||||
pub struct TierTencent {
|
||||
pub name: String,
|
||||
pub endpoint: String,
|
||||
#[serde(rename = "accessKey")]
|
||||
pub access_key: String,
|
||||
#[serde(rename = "secretKey")]
|
||||
pub secret_key: String,
|
||||
pub bucket: String,
|
||||
pub prefix: String,
|
||||
pub region: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default, Debug, Clone)]
|
||||
#[serde(default)]
|
||||
pub struct TierHuaweicloud {
|
||||
pub name: String,
|
||||
pub endpoint: String,
|
||||
#[serde(rename = "accessKey")]
|
||||
pub access_key: String,
|
||||
#[serde(rename = "secretKey")]
|
||||
pub secret_key: String,
|
||||
pub bucket: String,
|
||||
pub prefix: String,
|
||||
pub region: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default, Debug, Clone)]
|
||||
#[serde(default)]
|
||||
pub struct ServicePrincipalAuth {
|
||||
pub tenant_id: String,
|
||||
pub client_id: String,
|
||||
pub client_secret: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default, Debug, Clone)]
|
||||
#[serde(default)]
|
||||
pub struct TierAzure {
|
||||
pub name: String,
|
||||
pub endpoint: String,
|
||||
#[serde(rename = "accessKey")]
|
||||
pub access_key: String,
|
||||
#[serde(rename = "secretKey")]
|
||||
pub secret_key: String,
|
||||
pub bucket: String,
|
||||
pub prefix: String,
|
||||
pub region: String,
|
||||
#[serde(rename = "storageClass")]
|
||||
pub storage_class: String,
|
||||
#[serde(rename = "spAuth")]
|
||||
pub sp_auth: ServicePrincipalAuth,
|
||||
}
|
||||
|
||||
impl TierAzure {
|
||||
pub fn is_sp_enabled(&self) -> bool {
|
||||
!self.sp_auth.tenant_id.is_empty() && !self.sp_auth.client_id.is_empty() && !self.sp_auth.client_secret.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
fn AzureServicePrincipal(tenantID, clientID, clientSecret string) func(az *TierAzure) error {
|
||||
return func(az *TierAzure) error {
|
||||
if tenantID == "" {
|
||||
return errors.New("empty tenant ID unsupported")
|
||||
}
|
||||
if clientID == "" {
|
||||
return errors.New("empty client ID unsupported")
|
||||
}
|
||||
if clientSecret == "" {
|
||||
return errors.New("empty client secret unsupported")
|
||||
}
|
||||
az.SPAuth.TenantID = tenantID
|
||||
az.SPAuth.ClientID = clientID
|
||||
az.SPAuth.ClientSecret = clientSecret
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
fn AzurePrefix(prefix string) func(az *TierAzure) error {
|
||||
return func(az *TierAzure) error {
|
||||
az.Prefix = prefix
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
fn AzureEndpoint(endpoint string) func(az *TierAzure) error {
|
||||
return func(az *TierAzure) error {
|
||||
az.Endpoint = endpoint
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
fn AzureRegion(region string) func(az *TierAzure) error {
|
||||
return func(az *TierAzure) error {
|
||||
az.Region = region
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
fn AzureStorageClass(sc string) func(az *TierAzure) error {
|
||||
return func(az *TierAzure) error {
|
||||
az.StorageClass = sc
|
||||
return nil
|
||||
}
|
||||
}*/
|
||||
|
||||
#[derive(Serialize, Deserialize, Default, Debug, Clone)]
|
||||
#[serde(default)]
|
||||
pub struct TierGCS {
|
||||
pub name: String,
|
||||
pub endpoint: String,
|
||||
#[serde(rename = "creds")]
|
||||
pub creds: String,
|
||||
pub bucket: String,
|
||||
pub prefix: String,
|
||||
pub region: String,
|
||||
#[serde(rename = "storageClass")]
|
||||
pub storage_class: String,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Default, Debug, Clone)]
|
||||
#[serde(default)]
|
||||
pub struct TierR2 {
|
||||
pub name: String,
|
||||
pub endpoint: String,
|
||||
#[serde(rename = "accessKey")]
|
||||
pub access_key: String,
|
||||
#[serde(rename = "secretKey")]
|
||||
pub secret_key: String,
|
||||
pub bucket: String,
|
||||
pub prefix: String,
|
||||
pub region: String,
|
||||
}
|
||||
|
||||
@@ -27,9 +27,15 @@ use crate::tier::{
|
||||
tier::ERR_TIER_TYPE_UNSUPPORTED,
|
||||
tier_config::{TierConfig, TierType},
|
||||
tier_handlers::{ERR_TIER_BUCKET_NOT_FOUND, ERR_TIER_PERM_ERR},
|
||||
warm_backend_aliyun::WarmBackendAliyun,
|
||||
warm_backend_azure::WarmBackendAzure,
|
||||
warm_backend_gcs::WarmBackendGCS,
|
||||
warm_backend_huaweicloud::WarmBackendHuaweicloud,
|
||||
warm_backend_minio::WarmBackendMinIO,
|
||||
warm_backend_r2::WarmBackendR2,
|
||||
warm_backend_rustfs::WarmBackendRustFS,
|
||||
warm_backend_s3::WarmBackendS3,
|
||||
warm_backend_tencent::WarmBackendTencent,
|
||||
};
|
||||
use bytes::Bytes;
|
||||
use http::StatusCode;
|
||||
@@ -128,6 +134,78 @@ pub async fn new_warm_backend(tier: &TierConfig, probe: bool) -> Result<WarmBack
|
||||
}
|
||||
d = Some(Box::new(dd.expect("err")));
|
||||
}
|
||||
TierType::Aliyun => {
|
||||
let dd = WarmBackendAliyun::new(tier.aliyun.as_ref().expect("err"), &tier.name).await;
|
||||
if let Err(err) = dd {
|
||||
warn!("{}", err);
|
||||
return Err(AdminError {
|
||||
code: "XRustFSAdminTierInvalidConfig".to_string(),
|
||||
message: format!("Unable to setup remote tier, check tier configuration: {}", err.to_string()),
|
||||
status_code: StatusCode::BAD_REQUEST,
|
||||
});
|
||||
}
|
||||
d = Some(Box::new(dd.expect("err")));
|
||||
}
|
||||
TierType::Tencent => {
|
||||
let dd = WarmBackendTencent::new(tier.tencent.as_ref().expect("err"), &tier.name).await;
|
||||
if let Err(err) = dd {
|
||||
warn!("{}", err);
|
||||
return Err(AdminError {
|
||||
code: "XRustFSAdminTierInvalidConfig".to_string(),
|
||||
message: format!("Unable to setup remote tier, check tier configuration: {}", err.to_string()),
|
||||
status_code: StatusCode::BAD_REQUEST,
|
||||
});
|
||||
}
|
||||
d = Some(Box::new(dd.expect("err")));
|
||||
}
|
||||
TierType::Huaweicloud => {
|
||||
let dd = WarmBackendHuaweicloud::new(tier.huaweicloud.as_ref().expect("err"), &tier.name).await;
|
||||
if let Err(err) = dd {
|
||||
warn!("{}", err);
|
||||
return Err(AdminError {
|
||||
code: "XRustFSAdminTierInvalidConfig".to_string(),
|
||||
message: format!("Unable to setup remote tier, check tier configuration: {}", err.to_string()),
|
||||
status_code: StatusCode::BAD_REQUEST,
|
||||
});
|
||||
}
|
||||
d = Some(Box::new(dd.expect("err")));
|
||||
}
|
||||
TierType::Azure => {
|
||||
let dd = WarmBackendAzure::new(tier.azure.as_ref().expect("err"), &tier.name).await;
|
||||
if let Err(err) = dd {
|
||||
warn!("{}", err);
|
||||
return Err(AdminError {
|
||||
code: "XRustFSAdminTierInvalidConfig".to_string(),
|
||||
message: format!("Unable to setup remote tier, check tier configuration: {}", err.to_string()),
|
||||
status_code: StatusCode::BAD_REQUEST,
|
||||
});
|
||||
}
|
||||
d = Some(Box::new(dd.expect("err")));
|
||||
}
|
||||
TierType::GCS => {
|
||||
let dd = WarmBackendGCS::new(tier.gcs.as_ref().expect("err"), &tier.name).await;
|
||||
if let Err(err) = dd {
|
||||
warn!("{}", err);
|
||||
return Err(AdminError {
|
||||
code: "XRustFSAdminTierInvalidConfig".to_string(),
|
||||
message: format!("Unable to setup remote tier, check tier configuration: {}", err.to_string()),
|
||||
status_code: StatusCode::BAD_REQUEST,
|
||||
});
|
||||
}
|
||||
d = Some(Box::new(dd.expect("err")));
|
||||
}
|
||||
TierType::R2 => {
|
||||
let dd = WarmBackendR2::new(tier.r2.as_ref().expect("err"), &tier.name).await;
|
||||
if let Err(err) = dd {
|
||||
warn!("{}", err);
|
||||
return Err(AdminError {
|
||||
code: "XRustFSAdminTierInvalidConfig".to_string(),
|
||||
message: format!("Unable to setup remote tier, check tier configuration: {}", err.to_string()),
|
||||
status_code: StatusCode::BAD_REQUEST,
|
||||
});
|
||||
}
|
||||
d = Some(Box::new(dd.expect("err")));
|
||||
}
|
||||
_ => {
|
||||
return Err(ERR_TIER_TYPE_UNSUPPORTED.clone());
|
||||
}
|
||||
|
||||
164
crates/ecstore/src/tier/warm_backend_aliyun.rs
Normal file
164
crates/ecstore/src/tier/warm_backend_aliyun.rs
Normal file
@@ -0,0 +1,164 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#![allow(unused_imports)]
|
||||
#![allow(unused_variables)]
|
||||
#![allow(unused_mut)]
|
||||
#![allow(unused_assignments)]
|
||||
#![allow(unused_must_use)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::client::{
|
||||
admin_handler_utils::AdminError,
|
||||
api_put_object::PutObjectOptions,
|
||||
credentials::{Credentials, SignatureType, Static, Value},
|
||||
transition_api::{BucketLookupType, Options, ReadCloser, ReaderImpl, TransitionClient, TransitionCore},
|
||||
};
|
||||
use crate::tier::{
|
||||
tier_config::TierAliyun,
|
||||
warm_backend::{WarmBackend, WarmBackendGetOpts},
|
||||
warm_backend_s3::WarmBackendS3,
|
||||
};
|
||||
use tracing::warn;
|
||||
|
||||
const MAX_MULTIPART_PUT_OBJECT_SIZE: i64 = 1024 * 1024 * 1024 * 1024 * 5;
|
||||
const MAX_PARTS_COUNT: i64 = 10000;
|
||||
const _MAX_PART_SIZE: i64 = 1024 * 1024 * 1024 * 5;
|
||||
const MIN_PART_SIZE: i64 = 1024 * 1024 * 128;
|
||||
|
||||
pub struct WarmBackendAliyun(WarmBackendS3);
|
||||
|
||||
impl WarmBackendAliyun {
|
||||
pub async fn new(conf: &TierAliyun, tier: &str) -> Result<Self, std::io::Error> {
|
||||
if conf.access_key == "" || conf.secret_key == "" {
|
||||
return Err(std::io::Error::other("both access and secret keys are required"));
|
||||
}
|
||||
|
||||
if conf.bucket == "" {
|
||||
return Err(std::io::Error::other("no bucket name was provided"));
|
||||
}
|
||||
|
||||
let u = match url::Url::parse(&conf.endpoint) {
|
||||
Ok(u) => u,
|
||||
Err(e) => {
|
||||
return Err(std::io::Error::other(e.to_string()));
|
||||
}
|
||||
};
|
||||
|
||||
let creds = Credentials::new(Static(Value {
|
||||
access_key_id: conf.access_key.clone(),
|
||||
secret_access_key: conf.secret_key.clone(),
|
||||
session_token: "".to_string(),
|
||||
signer_type: SignatureType::SignatureV4,
|
||||
..Default::default()
|
||||
}));
|
||||
let opts = Options {
|
||||
creds,
|
||||
secure: u.scheme() == "https",
|
||||
//transport: GLOBAL_RemoteTargetTransport,
|
||||
trailing_headers: true,
|
||||
region: conf.region.clone(),
|
||||
bucket_lookup: BucketLookupType::BucketLookupDNS,
|
||||
..Default::default()
|
||||
};
|
||||
let scheme = u.scheme();
|
||||
let default_port = if scheme == "https" { 443 } else { 80 };
|
||||
let client = TransitionClient::new(
|
||||
&format!("{}:{}", u.host_str().expect("err"), u.port().unwrap_or(default_port)),
|
||||
opts,
|
||||
"aliyun",
|
||||
)
|
||||
.await?;
|
||||
|
||||
let client = Arc::new(client);
|
||||
let core = TransitionCore(Arc::clone(&client));
|
||||
Ok(Self(WarmBackendS3 {
|
||||
client,
|
||||
core,
|
||||
bucket: conf.bucket.clone(),
|
||||
prefix: conf.prefix.strip_suffix("/").unwrap_or(&conf.prefix).to_owned(),
|
||||
storage_class: "".to_string(),
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl WarmBackend for WarmBackendAliyun {
|
||||
async fn put_with_meta(
|
||||
&self,
|
||||
object: &str,
|
||||
r: ReaderImpl,
|
||||
length: i64,
|
||||
meta: HashMap<String, String>,
|
||||
) -> Result<String, std::io::Error> {
|
||||
let part_size = optimal_part_size(length)?;
|
||||
let client = self.0.client.clone();
|
||||
let res = client
|
||||
.put_object(
|
||||
&self.0.bucket,
|
||||
&self.0.get_dest(object),
|
||||
r,
|
||||
length,
|
||||
&PutObjectOptions {
|
||||
storage_class: self.0.storage_class.clone(),
|
||||
part_size: part_size as u64,
|
||||
disable_content_sha256: true,
|
||||
user_metadata: meta,
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
//self.ToObjectError(err, object)
|
||||
Ok(res.version_id)
|
||||
}
|
||||
|
||||
async fn put(&self, object: &str, r: ReaderImpl, length: i64) -> Result<String, std::io::Error> {
|
||||
self.put_with_meta(object, r, length, HashMap::new()).await
|
||||
}
|
||||
|
||||
async fn get(&self, object: &str, rv: &str, opts: WarmBackendGetOpts) -> Result<ReadCloser, std::io::Error> {
|
||||
self.0.get(object, rv, opts).await
|
||||
}
|
||||
|
||||
async fn remove(&self, object: &str, rv: &str) -> Result<(), std::io::Error> {
|
||||
self.0.remove(object, rv).await
|
||||
}
|
||||
|
||||
async fn in_use(&self) -> Result<bool, std::io::Error> {
|
||||
self.0.in_use().await
|
||||
}
|
||||
}
|
||||
|
||||
fn optimal_part_size(object_size: i64) -> Result<i64, std::io::Error> {
|
||||
let mut object_size = object_size;
|
||||
if object_size == -1 {
|
||||
object_size = MAX_MULTIPART_PUT_OBJECT_SIZE;
|
||||
}
|
||||
|
||||
if object_size > MAX_MULTIPART_PUT_OBJECT_SIZE {
|
||||
return Err(std::io::Error::other("entity too large"));
|
||||
}
|
||||
|
||||
let configured_part_size = MIN_PART_SIZE;
|
||||
let mut part_size_flt = object_size as f64 / MAX_PARTS_COUNT as f64;
|
||||
part_size_flt = (part_size_flt as f64 / configured_part_size as f64).ceil() * configured_part_size as f64;
|
||||
|
||||
let part_size = part_size_flt as i64;
|
||||
if part_size == 0 {
|
||||
return Ok(MIN_PART_SIZE);
|
||||
}
|
||||
Ok(part_size)
|
||||
}
|
||||
164
crates/ecstore/src/tier/warm_backend_azure.rs
Normal file
164
crates/ecstore/src/tier/warm_backend_azure.rs
Normal file
@@ -0,0 +1,164 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#![allow(unused_imports)]
|
||||
#![allow(unused_variables)]
|
||||
#![allow(unused_mut)]
|
||||
#![allow(unused_assignments)]
|
||||
#![allow(unused_must_use)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::client::{
|
||||
admin_handler_utils::AdminError,
|
||||
api_put_object::PutObjectOptions,
|
||||
credentials::{Credentials, SignatureType, Static, Value},
|
||||
transition_api::{BucketLookupType, Options, ReadCloser, ReaderImpl, TransitionClient, TransitionCore},
|
||||
};
|
||||
use crate::tier::{
|
||||
tier_config::TierAzure,
|
||||
warm_backend::{WarmBackend, WarmBackendGetOpts},
|
||||
warm_backend_s3::WarmBackendS3,
|
||||
};
|
||||
use tracing::warn;
|
||||
|
||||
const MAX_MULTIPART_PUT_OBJECT_SIZE: i64 = 1024 * 1024 * 1024 * 1024 * 5;
|
||||
const MAX_PARTS_COUNT: i64 = 10000;
|
||||
const _MAX_PART_SIZE: i64 = 1024 * 1024 * 1024 * 5;
|
||||
const MIN_PART_SIZE: i64 = 1024 * 1024 * 128;
|
||||
|
||||
pub struct WarmBackendAzure(WarmBackendS3);
|
||||
|
||||
impl WarmBackendAzure {
|
||||
pub async fn new(conf: &TierAzure, tier: &str) -> Result<Self, std::io::Error> {
|
||||
if conf.access_key == "" || conf.secret_key == "" {
|
||||
return Err(std::io::Error::other("both access and secret keys are required"));
|
||||
}
|
||||
|
||||
if conf.bucket == "" {
|
||||
return Err(std::io::Error::other("no bucket name was provided"));
|
||||
}
|
||||
|
||||
let u = match url::Url::parse(&conf.endpoint) {
|
||||
Ok(u) => u,
|
||||
Err(e) => {
|
||||
return Err(std::io::Error::other(e.to_string()));
|
||||
}
|
||||
};
|
||||
|
||||
let creds = Credentials::new(Static(Value {
|
||||
access_key_id: conf.access_key.clone(),
|
||||
secret_access_key: conf.secret_key.clone(),
|
||||
session_token: "".to_string(),
|
||||
signer_type: SignatureType::SignatureV4,
|
||||
..Default::default()
|
||||
}));
|
||||
let opts = Options {
|
||||
creds,
|
||||
secure: u.scheme() == "https",
|
||||
//transport: GLOBAL_RemoteTargetTransport,
|
||||
trailing_headers: true,
|
||||
region: conf.region.clone(),
|
||||
bucket_lookup: BucketLookupType::BucketLookupDNS,
|
||||
..Default::default()
|
||||
};
|
||||
let scheme = u.scheme();
|
||||
let default_port = if scheme == "https" { 443 } else { 80 };
|
||||
let client = TransitionClient::new(
|
||||
&format!("{}:{}", u.host_str().expect("err"), u.port().unwrap_or(default_port)),
|
||||
opts,
|
||||
"azure",
|
||||
)
|
||||
.await?;
|
||||
|
||||
let client = Arc::new(client);
|
||||
let core = TransitionCore(Arc::clone(&client));
|
||||
Ok(Self(WarmBackendS3 {
|
||||
client,
|
||||
core,
|
||||
bucket: conf.bucket.clone(),
|
||||
prefix: conf.prefix.strip_suffix("/").unwrap_or(&conf.prefix).to_owned(),
|
||||
storage_class: "".to_string(),
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl WarmBackend for WarmBackendAzure {
|
||||
async fn put_with_meta(
|
||||
&self,
|
||||
object: &str,
|
||||
r: ReaderImpl,
|
||||
length: i64,
|
||||
meta: HashMap<String, String>,
|
||||
) -> Result<String, std::io::Error> {
|
||||
let part_size = optimal_part_size(length)?;
|
||||
let client = self.0.client.clone();
|
||||
let res = client
|
||||
.put_object(
|
||||
&self.0.bucket,
|
||||
&self.0.get_dest(object),
|
||||
r,
|
||||
length,
|
||||
&PutObjectOptions {
|
||||
storage_class: self.0.storage_class.clone(),
|
||||
part_size: part_size as u64,
|
||||
disable_content_sha256: true,
|
||||
user_metadata: meta,
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
//self.ToObjectError(err, object)
|
||||
Ok(res.version_id)
|
||||
}
|
||||
|
||||
async fn put(&self, object: &str, r: ReaderImpl, length: i64) -> Result<String, std::io::Error> {
|
||||
self.put_with_meta(object, r, length, HashMap::new()).await
|
||||
}
|
||||
|
||||
async fn get(&self, object: &str, rv: &str, opts: WarmBackendGetOpts) -> Result<ReadCloser, std::io::Error> {
|
||||
self.0.get(object, rv, opts).await
|
||||
}
|
||||
|
||||
async fn remove(&self, object: &str, rv: &str) -> Result<(), std::io::Error> {
|
||||
self.0.remove(object, rv).await
|
||||
}
|
||||
|
||||
async fn in_use(&self) -> Result<bool, std::io::Error> {
|
||||
self.0.in_use().await
|
||||
}
|
||||
}
|
||||
|
||||
fn optimal_part_size(object_size: i64) -> Result<i64, std::io::Error> {
|
||||
let mut object_size = object_size;
|
||||
if object_size == -1 {
|
||||
object_size = MAX_MULTIPART_PUT_OBJECT_SIZE;
|
||||
}
|
||||
|
||||
if object_size > MAX_MULTIPART_PUT_OBJECT_SIZE {
|
||||
return Err(std::io::Error::other("entity too large"));
|
||||
}
|
||||
|
||||
let configured_part_size = MIN_PART_SIZE;
|
||||
let mut part_size_flt = object_size as f64 / MAX_PARTS_COUNT as f64;
|
||||
part_size_flt = (part_size_flt as f64 / configured_part_size as f64).ceil() * configured_part_size as f64;
|
||||
|
||||
let part_size = part_size_flt as i64;
|
||||
if part_size == 0 {
|
||||
return Ok(MIN_PART_SIZE);
|
||||
}
|
||||
Ok(part_size)
|
||||
}
|
||||
231
crates/ecstore/src/tier/warm_backend_azure2.rs
Normal file
231
crates/ecstore/src/tier/warm_backend_azure2.rs
Normal file
@@ -0,0 +1,231 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#![allow(unused_imports)]
|
||||
#![allow(unused_variables)]
|
||||
#![allow(unused_mut)]
|
||||
#![allow(unused_assignments)]
|
||||
#![allow(unused_must_use)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use azure_core::http::{Body, ClientOptions, RequestContent};
|
||||
use azure_storage::StorageCredentials;
|
||||
use azure_storage_blobs::prelude::*;
|
||||
|
||||
use crate::client::{
|
||||
admin_handler_utils::AdminError,
|
||||
api_put_object::PutObjectOptions,
|
||||
transition_api::{Options, ReadCloser, ReaderImpl},
|
||||
};
|
||||
use crate::tier::{
|
||||
tier_config::TierAzure,
|
||||
warm_backend::{WarmBackend, WarmBackendGetOpts},
|
||||
};
|
||||
use tracing::warn;
|
||||
|
||||
const MAX_MULTIPART_PUT_OBJECT_SIZE: i64 = 1024 * 1024 * 1024 * 1024 * 5;
|
||||
const MAX_PARTS_COUNT: i64 = 10000;
|
||||
const _MAX_PART_SIZE: i64 = 1024 * 1024 * 1024 * 5;
|
||||
const MIN_PART_SIZE: i64 = 1024 * 1024 * 128;
|
||||
|
||||
pub struct WarmBackendAzure {
|
||||
pub client: Arc<BlobServiceClient>,
|
||||
pub bucket: String,
|
||||
pub prefix: String,
|
||||
pub storage_class: String,
|
||||
}
|
||||
|
||||
impl WarmBackendAzure {
|
||||
pub async fn new(conf: &TierAzure, tier: &str) -> Result<Self, std::io::Error> {
|
||||
if conf.access_key == "" || conf.secret_key == "" {
|
||||
return Err(std::io::Error::other("both access and secret keys are required"));
|
||||
}
|
||||
|
||||
if conf.bucket == "" {
|
||||
return Err(std::io::Error::other("no bucket name was provided"));
|
||||
}
|
||||
|
||||
let creds = StorageCredentials::access_key(conf.access_key.clone(), conf.secret_key.clone());
|
||||
let client = ClientBuilder::new(conf.access_key.clone(), creds)
|
||||
//.endpoint(conf.endpoint)
|
||||
.blob_service_client();
|
||||
let client = Arc::new(client);
|
||||
Ok(Self {
|
||||
client,
|
||||
bucket: conf.bucket.clone(),
|
||||
prefix: conf.prefix.strip_suffix("/").unwrap_or(&conf.prefix).to_owned(),
|
||||
storage_class: "".to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
/*pub fn tier(&self) -> *blob.AccessTier {
|
||||
if self.storage_class == "" {
|
||||
return None;
|
||||
}
|
||||
for t in blob.PossibleAccessTierValues() {
|
||||
if strings.EqualFold(self.storage_class, t) {
|
||||
return &t
|
||||
}
|
||||
}
|
||||
None
|
||||
}*/
|
||||
|
||||
pub fn get_dest(&self, object: &str) -> String {
|
||||
let mut dest_obj = object.to_string();
|
||||
if self.prefix != "" {
|
||||
dest_obj = format!("{}/{}", &self.prefix, object);
|
||||
}
|
||||
return dest_obj;
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl WarmBackend for WarmBackendAzure {
|
||||
async fn put_with_meta(
|
||||
&self,
|
||||
object: &str,
|
||||
r: ReaderImpl,
|
||||
length: i64,
|
||||
meta: HashMap<String, String>,
|
||||
) -> Result<String, std::io::Error> {
|
||||
let part_size = length;
|
||||
let client = self.client.clone();
|
||||
let container_client = client.container_client(self.bucket.clone());
|
||||
let blob_client = container_client.blob_client(self.get_dest(object));
|
||||
/*let res = blob_client
|
||||
.upload(
|
||||
RequestContent::from(match r {
|
||||
ReaderImpl::Body(content_body) => content_body.to_vec(),
|
||||
ReaderImpl::ObjectBody(mut content_body) => content_body.read_all().await?,
|
||||
}),
|
||||
false,
|
||||
length as u64,
|
||||
None,
|
||||
)
|
||||
.await
|
||||
else {
|
||||
return Err(std::io::Error::other("upload error"));
|
||||
};*/
|
||||
|
||||
let Ok(res) = blob_client
|
||||
.put_block_blob(match r {
|
||||
ReaderImpl::Body(content_body) => content_body.to_vec(),
|
||||
ReaderImpl::ObjectBody(mut content_body) => content_body.read_all().await?,
|
||||
})
|
||||
.content_type("text/plain")
|
||||
.into_future()
|
||||
.await
|
||||
else {
|
||||
return Err(std::io::Error::other("put_block_blob error"));
|
||||
};
|
||||
|
||||
//self.ToObjectError(err, object)
|
||||
Ok(res.request_id.to_string())
|
||||
}
|
||||
|
||||
async fn put(&self, object: &str, r: ReaderImpl, length: i64) -> Result<String, std::io::Error> {
|
||||
self.put_with_meta(object, r, length, HashMap::new()).await
|
||||
}
|
||||
|
||||
async fn get(&self, object: &str, rv: &str, opts: WarmBackendGetOpts) -> Result<ReadCloser, std::io::Error> {
|
||||
let client = self.client.clone();
|
||||
let container_client = client.container_client(self.bucket.clone());
|
||||
let blob_client = container_client.blob_client(self.get_dest(object));
|
||||
blob_client.get();
|
||||
todo!();
|
||||
}
|
||||
|
||||
async fn remove(&self, object: &str, rv: &str) -> Result<(), std::io::Error> {
|
||||
let client = self.client.clone();
|
||||
let container_client = client.container_client(self.bucket.clone());
|
||||
let blob_client = container_client.blob_client(self.get_dest(object));
|
||||
blob_client.delete();
|
||||
todo!();
|
||||
}
|
||||
|
||||
async fn in_use(&self) -> Result<bool, std::io::Error> {
|
||||
/*let result = self.client
|
||||
.list_objects_v2(&self.bucket, &self.prefix, "", "", SLASH_SEPARATOR, 1)
|
||||
.await?;
|
||||
|
||||
Ok(result.common_prefixes.len() > 0 || result.contents.len() > 0)*/
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
|
||||
/*fn azure_to_object_error(err: Error, params: Vec<String>) -> Option<error> {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
bucket := ""
|
||||
object := ""
|
||||
if len(params) >= 1 {
|
||||
bucket = params[0]
|
||||
}
|
||||
if len(params) == 2 {
|
||||
object = params[1]
|
||||
}
|
||||
|
||||
azureErr, ok := err.(*azcore.ResponseError)
|
||||
if !ok {
|
||||
// We don't interpret non Azure errors. As azure errors will
|
||||
// have StatusCode to help to convert to object errors.
|
||||
return err
|
||||
}
|
||||
|
||||
serviceCode := azureErr.ErrorCode
|
||||
statusCode := azureErr.StatusCode
|
||||
|
||||
azureCodesToObjectError(err, serviceCode, statusCode, bucket, object)
|
||||
}*/
|
||||
|
||||
/*fn azure_codes_to_object_error(err: Error, service_code: String, status_code: i32, bucket: String, object: String) -> Option<Error> {
|
||||
switch serviceCode {
|
||||
case "ContainerNotFound", "ContainerBeingDeleted":
|
||||
err = BucketNotFound{Bucket: bucket}
|
||||
case "ContainerAlreadyExists":
|
||||
err = BucketExists{Bucket: bucket}
|
||||
case "InvalidResourceName":
|
||||
err = BucketNameInvalid{Bucket: bucket}
|
||||
case "RequestBodyTooLarge":
|
||||
err = PartTooBig{}
|
||||
case "InvalidMetadata":
|
||||
err = UnsupportedMetadata{}
|
||||
case "BlobAccessTierNotSupportedForAccountType":
|
||||
err = NotImplemented{}
|
||||
case "OutOfRangeInput":
|
||||
err = ObjectNameInvalid{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
}
|
||||
default:
|
||||
switch statusCode {
|
||||
case http.StatusNotFound:
|
||||
if object != "" {
|
||||
err = ObjectNotFound{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
}
|
||||
} else {
|
||||
err = BucketNotFound{Bucket: bucket}
|
||||
}
|
||||
case http.StatusBadRequest:
|
||||
err = BucketNameInvalid{Bucket: bucket}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}*/
|
||||
248
crates/ecstore/src/tier/warm_backend_gcs.rs
Normal file
248
crates/ecstore/src/tier/warm_backend_gcs.rs
Normal file
@@ -0,0 +1,248 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#![allow(unused_imports)]
|
||||
#![allow(unused_variables)]
|
||||
#![allow(unused_mut)]
|
||||
#![allow(unused_assignments)]
|
||||
#![allow(unused_must_use)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use bytes::Bytes;
|
||||
use google_cloud_auth::credentials::Credentials;
|
||||
use google_cloud_auth::credentials::user_account::Builder;
|
||||
use google_cloud_storage as gcs;
|
||||
use google_cloud_storage::client::Storage;
|
||||
use std::convert::TryFrom;
|
||||
|
||||
use crate::client::{
|
||||
admin_handler_utils::AdminError,
|
||||
api_put_object::PutObjectOptions,
|
||||
transition_api::{Options, ReadCloser, ReaderImpl},
|
||||
};
|
||||
use crate::tier::{
|
||||
tier_config::TierGCS,
|
||||
warm_backend::{WarmBackend, WarmBackendGetOpts},
|
||||
};
|
||||
use tracing::warn;
|
||||
|
||||
const MAX_MULTIPART_PUT_OBJECT_SIZE: i64 = 1024 * 1024 * 1024 * 1024 * 5;
|
||||
const MAX_PARTS_COUNT: i64 = 10000;
|
||||
const _MAX_PART_SIZE: i64 = 1024 * 1024 * 1024 * 5;
|
||||
const MIN_PART_SIZE: i64 = 1024 * 1024 * 128;
|
||||
|
||||
pub struct WarmBackendGCS {
|
||||
pub client: Arc<Storage>,
|
||||
pub bucket: String,
|
||||
pub prefix: String,
|
||||
pub storage_class: String,
|
||||
}
|
||||
|
||||
impl WarmBackendGCS {
|
||||
pub async fn new(conf: &TierGCS, tier: &str) -> Result<Self, std::io::Error> {
|
||||
if conf.creds == "" {
|
||||
return Err(std::io::Error::other("both access and secret keys are required"));
|
||||
}
|
||||
|
||||
if conf.bucket == "" {
|
||||
return Err(std::io::Error::other("no bucket name was provided"));
|
||||
}
|
||||
|
||||
let authorized_user = serde_json::from_str(&conf.creds)?;
|
||||
let credentials = Builder::new(authorized_user)
|
||||
//.with_retry_policy(AlwaysRetry.with_attempt_limit(3))
|
||||
//.with_backoff_policy(backoff)
|
||||
.build()
|
||||
.map_err(|e| std::io::Error::other(format!("Invalid credentials JSON: {}", e)))?;
|
||||
|
||||
let Ok(client) = Storage::builder()
|
||||
.with_endpoint(conf.endpoint.clone())
|
||||
.with_credentials(credentials)
|
||||
.build()
|
||||
.await
|
||||
else {
|
||||
return Err(std::io::Error::other("Storage::builder error"));
|
||||
};
|
||||
let client = Arc::new(client);
|
||||
Ok(Self {
|
||||
client,
|
||||
bucket: conf.bucket.clone(),
|
||||
prefix: conf.prefix.strip_suffix("/").unwrap_or(&conf.prefix).to_owned(),
|
||||
storage_class: "".to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_dest(&self, object: &str) -> String {
|
||||
let mut dest_obj = object.to_string();
|
||||
if self.prefix != "" {
|
||||
dest_obj = format!("{}/{}", &self.prefix, object);
|
||||
}
|
||||
return dest_obj;
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl WarmBackend for WarmBackendGCS {
|
||||
async fn put_with_meta(
|
||||
&self,
|
||||
object: &str,
|
||||
r: ReaderImpl,
|
||||
length: i64,
|
||||
meta: HashMap<String, String>,
|
||||
) -> Result<String, std::io::Error> {
|
||||
let d = match r {
|
||||
ReaderImpl::Body(content_body) => content_body.to_vec(),
|
||||
ReaderImpl::ObjectBody(mut content_body) => content_body.read_all().await?,
|
||||
};
|
||||
let Ok(res) = self
|
||||
.client
|
||||
.write_object(&self.bucket, &self.get_dest(object), Bytes::from(d))
|
||||
.send_buffered()
|
||||
.await
|
||||
else {
|
||||
return Err(std::io::Error::other("write_object error"));
|
||||
};
|
||||
//self.ToObjectError(err, object)
|
||||
Ok(res.generation.to_string())
|
||||
}
|
||||
|
||||
async fn put(&self, object: &str, r: ReaderImpl, length: i64) -> Result<String, std::io::Error> {
|
||||
self.put_with_meta(object, r, length, HashMap::new()).await
|
||||
}
|
||||
|
||||
async fn get(&self, object: &str, rv: &str, opts: WarmBackendGetOpts) -> Result<ReadCloser, std::io::Error> {
|
||||
let Ok(mut reader) = self.client.read_object(&self.bucket, &self.get_dest(object)).send().await else {
|
||||
return Err(std::io::Error::other("read_object error"));
|
||||
};
|
||||
let mut contents = Vec::new();
|
||||
while let Ok(Some(chunk)) = reader.next().await.transpose() {
|
||||
contents.extend_from_slice(&chunk);
|
||||
}
|
||||
Ok(ReadCloser::new(std::io::Cursor::new(contents)))
|
||||
}
|
||||
|
||||
async fn remove(&self, object: &str, rv: &str) -> Result<(), std::io::Error> {
|
||||
/*self.client
|
||||
.delete_object()
|
||||
.set_bucket(&self.bucket)
|
||||
.set_object(&self.get_dest(object))
|
||||
//.set_generation(object.generation)
|
||||
.send()
|
||||
.await?;*/
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn in_use(&self) -> Result<bool, std::io::Error> {
|
||||
/*let result = self.client
|
||||
.list_objects_v2(&self.bucket, &self.prefix, "", "", SLASH_SEPARATOR, 1)
|
||||
.await?;
|
||||
|
||||
Ok(result.common_prefixes.len() > 0 || result.contents.len() > 0)*/
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
|
||||
/*fn gcs_to_object_error(err: Error, params: Vec<String>) -> Option<Error> {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
bucket := ""
|
||||
object := ""
|
||||
uploadID := ""
|
||||
if len(params) >= 1 {
|
||||
bucket = params[0]
|
||||
}
|
||||
if len(params) == 2 {
|
||||
object = params[1]
|
||||
}
|
||||
if len(params) == 3 {
|
||||
uploadID = params[2]
|
||||
}
|
||||
|
||||
// in some cases just a plain error is being returned
|
||||
switch err.Error() {
|
||||
case "storage: bucket doesn't exist":
|
||||
err = BucketNotFound{
|
||||
Bucket: bucket,
|
||||
}
|
||||
return err
|
||||
case "storage: object doesn't exist":
|
||||
if uploadID != "" {
|
||||
err = InvalidUploadID{
|
||||
UploadID: uploadID,
|
||||
}
|
||||
} else {
|
||||
err = ObjectNotFound{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
googleAPIErr, ok := err.(*googleapi.Error)
|
||||
if !ok {
|
||||
// We don't interpret non MinIO errors. As minio errors will
|
||||
// have StatusCode to help to convert to object errors.
|
||||
return err
|
||||
}
|
||||
|
||||
if len(googleAPIErr.Errors) == 0 {
|
||||
return err
|
||||
}
|
||||
|
||||
reason := googleAPIErr.Errors[0].Reason
|
||||
message := googleAPIErr.Errors[0].Message
|
||||
|
||||
switch reason {
|
||||
case "required":
|
||||
// Anonymous users does not have storage.xyz access to project 123.
|
||||
fallthrough
|
||||
case "keyInvalid":
|
||||
fallthrough
|
||||
case "forbidden":
|
||||
err = PrefixAccessDenied{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
}
|
||||
case "invalid":
|
||||
err = BucketNameInvalid{
|
||||
Bucket: bucket,
|
||||
}
|
||||
case "notFound":
|
||||
if object != "" {
|
||||
err = ObjectNotFound{
|
||||
Bucket: bucket,
|
||||
Object: object,
|
||||
}
|
||||
break
|
||||
}
|
||||
err = BucketNotFound{Bucket: bucket}
|
||||
case "conflict":
|
||||
if message == "You already own this bucket. Please select another name." {
|
||||
err = BucketAlreadyOwnedByYou{Bucket: bucket}
|
||||
break
|
||||
}
|
||||
if message == "Sorry, that name is not available. Please try a different one." {
|
||||
err = BucketAlreadyExists{Bucket: bucket}
|
||||
break
|
||||
}
|
||||
err = BucketNotEmpty{Bucket: bucket}
|
||||
}
|
||||
|
||||
return err
|
||||
}*/
|
||||
164
crates/ecstore/src/tier/warm_backend_huaweicloud.rs
Normal file
164
crates/ecstore/src/tier/warm_backend_huaweicloud.rs
Normal file
@@ -0,0 +1,164 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#![allow(unused_imports)]
|
||||
#![allow(unused_variables)]
|
||||
#![allow(unused_mut)]
|
||||
#![allow(unused_assignments)]
|
||||
#![allow(unused_must_use)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::client::{
|
||||
admin_handler_utils::AdminError,
|
||||
api_put_object::PutObjectOptions,
|
||||
credentials::{Credentials, SignatureType, Static, Value},
|
||||
transition_api::{BucketLookupType, Options, ReadCloser, ReaderImpl, TransitionClient, TransitionCore},
|
||||
};
|
||||
use crate::tier::{
|
||||
tier_config::TierHuaweicloud,
|
||||
warm_backend::{WarmBackend, WarmBackendGetOpts},
|
||||
warm_backend_s3::WarmBackendS3,
|
||||
};
|
||||
use tracing::warn;
|
||||
|
||||
const MAX_MULTIPART_PUT_OBJECT_SIZE: i64 = 1024 * 1024 * 1024 * 1024 * 5;
|
||||
const MAX_PARTS_COUNT: i64 = 10000;
|
||||
const _MAX_PART_SIZE: i64 = 1024 * 1024 * 1024 * 5;
|
||||
const MIN_PART_SIZE: i64 = 1024 * 1024 * 128;
|
||||
|
||||
pub struct WarmBackendHuaweicloud(WarmBackendS3);
|
||||
|
||||
impl WarmBackendHuaweicloud {
|
||||
pub async fn new(conf: &TierHuaweicloud, tier: &str) -> Result<Self, std::io::Error> {
|
||||
if conf.access_key == "" || conf.secret_key == "" {
|
||||
return Err(std::io::Error::other("both access and secret keys are required"));
|
||||
}
|
||||
|
||||
if conf.bucket == "" {
|
||||
return Err(std::io::Error::other("no bucket name was provided"));
|
||||
}
|
||||
|
||||
let u = match url::Url::parse(&conf.endpoint) {
|
||||
Ok(u) => u,
|
||||
Err(e) => {
|
||||
return Err(std::io::Error::other(e.to_string()));
|
||||
}
|
||||
};
|
||||
|
||||
let creds = Credentials::new(Static(Value {
|
||||
access_key_id: conf.access_key.clone(),
|
||||
secret_access_key: conf.secret_key.clone(),
|
||||
session_token: "".to_string(),
|
||||
signer_type: SignatureType::SignatureV4,
|
||||
..Default::default()
|
||||
}));
|
||||
let opts = Options {
|
||||
creds,
|
||||
secure: u.scheme() == "https",
|
||||
//transport: GLOBAL_RemoteTargetTransport,
|
||||
trailing_headers: true,
|
||||
region: conf.region.clone(),
|
||||
bucket_lookup: BucketLookupType::BucketLookupDNS,
|
||||
..Default::default()
|
||||
};
|
||||
let scheme = u.scheme();
|
||||
let default_port = if scheme == "https" { 443 } else { 80 };
|
||||
let client = TransitionClient::new(
|
||||
&format!("{}:{}", u.host_str().expect("err"), u.port().unwrap_or(default_port)),
|
||||
opts,
|
||||
"huaweicloud",
|
||||
)
|
||||
.await?;
|
||||
|
||||
let client = Arc::new(client);
|
||||
let core = TransitionCore(Arc::clone(&client));
|
||||
Ok(Self(WarmBackendS3 {
|
||||
client,
|
||||
core,
|
||||
bucket: conf.bucket.clone(),
|
||||
prefix: conf.prefix.strip_suffix("/").unwrap_or(&conf.prefix).to_owned(),
|
||||
storage_class: "".to_string(),
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl WarmBackend for WarmBackendHuaweicloud {
|
||||
async fn put_with_meta(
|
||||
&self,
|
||||
object: &str,
|
||||
r: ReaderImpl,
|
||||
length: i64,
|
||||
meta: HashMap<String, String>,
|
||||
) -> Result<String, std::io::Error> {
|
||||
let part_size = optimal_part_size(length)?;
|
||||
let client = self.0.client.clone();
|
||||
let res = client
|
||||
.put_object(
|
||||
&self.0.bucket,
|
||||
&self.0.get_dest(object),
|
||||
r,
|
||||
length,
|
||||
&PutObjectOptions {
|
||||
storage_class: self.0.storage_class.clone(),
|
||||
part_size: part_size as u64,
|
||||
disable_content_sha256: true,
|
||||
user_metadata: meta,
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
//self.ToObjectError(err, object)
|
||||
Ok(res.version_id)
|
||||
}
|
||||
|
||||
async fn put(&self, object: &str, r: ReaderImpl, length: i64) -> Result<String, std::io::Error> {
|
||||
self.put_with_meta(object, r, length, HashMap::new()).await
|
||||
}
|
||||
|
||||
async fn get(&self, object: &str, rv: &str, opts: WarmBackendGetOpts) -> Result<ReadCloser, std::io::Error> {
|
||||
self.0.get(object, rv, opts).await
|
||||
}
|
||||
|
||||
async fn remove(&self, object: &str, rv: &str) -> Result<(), std::io::Error> {
|
||||
self.0.remove(object, rv).await
|
||||
}
|
||||
|
||||
async fn in_use(&self) -> Result<bool, std::io::Error> {
|
||||
self.0.in_use().await
|
||||
}
|
||||
}
|
||||
|
||||
fn optimal_part_size(object_size: i64) -> Result<i64, std::io::Error> {
|
||||
let mut object_size = object_size;
|
||||
if object_size == -1 {
|
||||
object_size = MAX_MULTIPART_PUT_OBJECT_SIZE;
|
||||
}
|
||||
|
||||
if object_size > MAX_MULTIPART_PUT_OBJECT_SIZE {
|
||||
return Err(std::io::Error::other("entity too large"));
|
||||
}
|
||||
|
||||
let configured_part_size = MIN_PART_SIZE;
|
||||
let mut part_size_flt = object_size as f64 / MAX_PARTS_COUNT as f64;
|
||||
part_size_flt = (part_size_flt as f64 / configured_part_size as f64).ceil() * configured_part_size as f64;
|
||||
|
||||
let part_size = part_size_flt as i64;
|
||||
if part_size == 0 {
|
||||
return Ok(MIN_PART_SIZE);
|
||||
}
|
||||
Ok(part_size)
|
||||
}
|
||||
@@ -1,4 +1,3 @@
|
||||
#![allow(unused_imports)]
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@@ -12,6 +11,7 @@
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#![allow(unused_imports)]
|
||||
#![allow(unused_variables)]
|
||||
#![allow(unused_mut)]
|
||||
#![allow(unused_assignments)]
|
||||
@@ -70,12 +70,17 @@ impl WarmBackendMinIO {
|
||||
secure: u.scheme() == "https",
|
||||
//transport: GLOBAL_RemoteTargetTransport,
|
||||
trailing_headers: true,
|
||||
region: conf.region.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
let scheme = u.scheme();
|
||||
let default_port = if scheme == "https" { 443 } else { 80 };
|
||||
let client =
|
||||
TransitionClient::new(&format!("{}:{}", u.host_str().expect("err"), u.port().unwrap_or(default_port)), opts).await?;
|
||||
let client = TransitionClient::new(
|
||||
&format!("{}:{}", u.host_str().expect("err"), u.port().unwrap_or(default_port)),
|
||||
opts,
|
||||
"minio",
|
||||
)
|
||||
.await?;
|
||||
|
||||
let client = Arc::new(client);
|
||||
let core = TransitionCore(Arc::clone(&client));
|
||||
|
||||
163
crates/ecstore/src/tier/warm_backend_r2.rs
Normal file
163
crates/ecstore/src/tier/warm_backend_r2.rs
Normal file
@@ -0,0 +1,163 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#![allow(unused_imports)]
|
||||
#![allow(unused_variables)]
|
||||
#![allow(unused_mut)]
|
||||
#![allow(unused_assignments)]
|
||||
#![allow(unused_must_use)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::client::{
|
||||
admin_handler_utils::AdminError,
|
||||
api_put_object::PutObjectOptions,
|
||||
credentials::{Credentials, SignatureType, Static, Value},
|
||||
transition_api::{Options, ReadCloser, ReaderImpl, TransitionClient, TransitionCore},
|
||||
};
|
||||
use crate::tier::{
|
||||
tier_config::TierR2,
|
||||
warm_backend::{WarmBackend, WarmBackendGetOpts},
|
||||
warm_backend_s3::WarmBackendS3,
|
||||
};
|
||||
use tracing::warn;
|
||||
|
||||
const MAX_MULTIPART_PUT_OBJECT_SIZE: i64 = 1024 * 1024 * 1024 * 1024 * 5;
|
||||
const MAX_PARTS_COUNT: i64 = 10000;
|
||||
const _MAX_PART_SIZE: i64 = 1024 * 1024 * 1024 * 5;
|
||||
const MIN_PART_SIZE: i64 = 1024 * 1024 * 128;
|
||||
|
||||
pub struct WarmBackendR2(WarmBackendS3);
|
||||
|
||||
impl WarmBackendR2 {
|
||||
pub async fn new(conf: &TierR2, tier: &str) -> Result<Self, std::io::Error> {
|
||||
if conf.access_key == "" || conf.secret_key == "" {
|
||||
return Err(std::io::Error::other("both access and secret keys are required"));
|
||||
}
|
||||
|
||||
if conf.bucket == "" {
|
||||
return Err(std::io::Error::other("no bucket name was provided"));
|
||||
}
|
||||
|
||||
let u = match url::Url::parse(&conf.endpoint) {
|
||||
Ok(u) => u,
|
||||
Err(e) => {
|
||||
return Err(std::io::Error::other(e.to_string()));
|
||||
}
|
||||
};
|
||||
|
||||
let creds = Credentials::new(Static(Value {
|
||||
access_key_id: conf.access_key.clone(),
|
||||
secret_access_key: conf.secret_key.clone(),
|
||||
session_token: "".to_string(),
|
||||
signer_type: SignatureType::SignatureV4,
|
||||
..Default::default()
|
||||
}));
|
||||
let opts = Options {
|
||||
creds,
|
||||
secure: u.scheme() == "https",
|
||||
//transport: GLOBAL_RemoteTargetTransport,
|
||||
trailing_headers: true,
|
||||
region: conf.region.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
let scheme = u.scheme();
|
||||
let default_port = if scheme == "https" { 443 } else { 80 };
|
||||
let client = TransitionClient::new(
|
||||
&format!("{}:{}", u.host_str().expect("err"), u.port().unwrap_or(default_port)),
|
||||
opts,
|
||||
"r2",
|
||||
)
|
||||
.await?;
|
||||
|
||||
let client = Arc::new(client);
|
||||
let core = TransitionCore(Arc::clone(&client));
|
||||
Ok(Self(WarmBackendS3 {
|
||||
client,
|
||||
core,
|
||||
bucket: conf.bucket.clone(),
|
||||
prefix: conf.prefix.strip_suffix("/").unwrap_or(&conf.prefix).to_owned(),
|
||||
storage_class: "".to_string(),
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl WarmBackend for WarmBackendR2 {
|
||||
async fn put_with_meta(
|
||||
&self,
|
||||
object: &str,
|
||||
r: ReaderImpl,
|
||||
length: i64,
|
||||
meta: HashMap<String, String>,
|
||||
) -> Result<String, std::io::Error> {
|
||||
let part_size = optimal_part_size(length)?;
|
||||
let client = self.0.client.clone();
|
||||
let res = client
|
||||
.put_object(
|
||||
&self.0.bucket,
|
||||
&self.0.get_dest(object),
|
||||
r,
|
||||
length,
|
||||
&PutObjectOptions {
|
||||
storage_class: self.0.storage_class.clone(),
|
||||
part_size: part_size as u64,
|
||||
disable_content_sha256: true,
|
||||
user_metadata: meta,
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
//self.ToObjectError(err, object)
|
||||
Ok(res.version_id)
|
||||
}
|
||||
|
||||
async fn put(&self, object: &str, r: ReaderImpl, length: i64) -> Result<String, std::io::Error> {
|
||||
self.put_with_meta(object, r, length, HashMap::new()).await
|
||||
}
|
||||
|
||||
async fn get(&self, object: &str, rv: &str, opts: WarmBackendGetOpts) -> Result<ReadCloser, std::io::Error> {
|
||||
self.0.get(object, rv, opts).await
|
||||
}
|
||||
|
||||
async fn remove(&self, object: &str, rv: &str) -> Result<(), std::io::Error> {
|
||||
self.0.remove(object, rv).await
|
||||
}
|
||||
|
||||
async fn in_use(&self) -> Result<bool, std::io::Error> {
|
||||
self.0.in_use().await
|
||||
}
|
||||
}
|
||||
|
||||
fn optimal_part_size(object_size: i64) -> Result<i64, std::io::Error> {
|
||||
let mut object_size = object_size;
|
||||
if object_size == -1 {
|
||||
object_size = MAX_MULTIPART_PUT_OBJECT_SIZE;
|
||||
}
|
||||
|
||||
if object_size > MAX_MULTIPART_PUT_OBJECT_SIZE {
|
||||
return Err(std::io::Error::other("entity too large"));
|
||||
}
|
||||
|
||||
let configured_part_size = MIN_PART_SIZE;
|
||||
let mut part_size_flt = object_size as f64 / MAX_PARTS_COUNT as f64;
|
||||
part_size_flt = (part_size_flt as f64 / configured_part_size as f64).ceil() * configured_part_size as f64;
|
||||
|
||||
let part_size = part_size_flt as i64;
|
||||
if part_size == 0 {
|
||||
return Ok(MIN_PART_SIZE);
|
||||
}
|
||||
Ok(part_size)
|
||||
}
|
||||
@@ -1,4 +1,3 @@
|
||||
#![allow(unused_imports)]
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@@ -12,6 +11,7 @@
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#![allow(unused_imports)]
|
||||
#![allow(unused_variables)]
|
||||
#![allow(unused_mut)]
|
||||
#![allow(unused_assignments)]
|
||||
@@ -67,12 +67,17 @@ impl WarmBackendRustFS {
|
||||
secure: u.scheme() == "https",
|
||||
//transport: GLOBAL_RemoteTargetTransport,
|
||||
trailing_headers: true,
|
||||
region: conf.region.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
let scheme = u.scheme();
|
||||
let default_port = if scheme == "https" { 443 } else { 80 };
|
||||
let client =
|
||||
TransitionClient::new(&format!("{}:{}", u.host_str().expect("err"), u.port().unwrap_or(default_port)), opts).await?;
|
||||
let client = TransitionClient::new(
|
||||
&format!("{}:{}", u.host_str().expect("err"), u.port().unwrap_or(default_port)),
|
||||
opts,
|
||||
"rustfs",
|
||||
)
|
||||
.await?;
|
||||
|
||||
let client = Arc::new(client);
|
||||
let core = TransitionCore(Arc::clone(&client));
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
#![allow(unused_imports)]
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@@ -12,6 +11,7 @@
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#![allow(unused_imports)]
|
||||
#![allow(unused_variables)]
|
||||
#![allow(unused_mut)]
|
||||
#![allow(unused_assignments)]
|
||||
@@ -92,9 +92,10 @@ impl WarmBackendS3 {
|
||||
creds,
|
||||
secure: u.scheme() == "https",
|
||||
//transport: GLOBAL_RemoteTargetTransport,
|
||||
region: conf.region.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
let client = TransitionClient::new(&u.host().expect("err").to_string(), opts).await?;
|
||||
let client = TransitionClient::new(&u.host().expect("err").to_string(), opts, "s3").await?;
|
||||
|
||||
let client = Arc::new(client);
|
||||
let core = TransitionCore(Arc::clone(&client));
|
||||
|
||||
196
crates/ecstore/src/tier/warm_backend_s3sdk.rs
Normal file
196
crates/ecstore/src/tier/warm_backend_s3sdk.rs
Normal file
@@ -0,0 +1,196 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#![allow(unused_imports)]
|
||||
#![allow(unused_variables)]
|
||||
#![allow(unused_mut)]
|
||||
#![allow(unused_assignments)]
|
||||
#![allow(unused_must_use)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use url::Url;
|
||||
|
||||
use aws_config::meta::region::RegionProviderChain;
|
||||
use aws_sdk_s3::Client;
|
||||
use aws_sdk_s3::config::{Credentials, Region};
|
||||
use aws_sdk_s3::primitives::ByteStream;
|
||||
|
||||
use crate::client::{
|
||||
api_get_options::GetObjectOptions,
|
||||
api_put_object::PutObjectOptions,
|
||||
api_remove::RemoveObjectOptions,
|
||||
transition_api::{ReadCloser, ReaderImpl},
|
||||
};
|
||||
use crate::error::ErrorResponse;
|
||||
use crate::error::error_resp_to_object_err;
|
||||
use crate::tier::{
|
||||
tier_config::TierS3,
|
||||
warm_backend::{WarmBackend, WarmBackendGetOpts},
|
||||
};
|
||||
use rustfs_utils::path::SLASH_SEPARATOR;
|
||||
|
||||
pub struct WarmBackendS3 {
|
||||
pub client: Arc<Client>,
|
||||
pub bucket: String,
|
||||
pub prefix: String,
|
||||
pub storage_class: String,
|
||||
}
|
||||
|
||||
impl WarmBackendS3 {
|
||||
pub async fn new(conf: &TierS3, tier: &str) -> Result<Self, std::io::Error> {
|
||||
let u = match Url::parse(&conf.endpoint) {
|
||||
Ok(u) => u,
|
||||
Err(err) => {
|
||||
return Err(std::io::Error::other(err.to_string()));
|
||||
}
|
||||
};
|
||||
|
||||
if conf.aws_role_web_identity_token_file == "" && conf.aws_role_arn != ""
|
||||
|| conf.aws_role_web_identity_token_file != "" && conf.aws_role_arn == ""
|
||||
{
|
||||
return Err(std::io::Error::other("both the token file and the role ARN are required"));
|
||||
} else if conf.access_key == "" && conf.secret_key != "" || conf.access_key != "" && conf.secret_key == "" {
|
||||
return Err(std::io::Error::other("both the access and secret keys are required"));
|
||||
} else if conf.aws_role
|
||||
&& (conf.aws_role_web_identity_token_file != ""
|
||||
|| conf.aws_role_arn != ""
|
||||
|| conf.access_key != ""
|
||||
|| conf.secret_key != "")
|
||||
{
|
||||
return Err(std::io::Error::other(
|
||||
"AWS Role cannot be activated with static credentials or the web identity token file",
|
||||
));
|
||||
} else if conf.bucket == "" {
|
||||
return Err(std::io::Error::other("no bucket name was provided"));
|
||||
}
|
||||
|
||||
let creds;
|
||||
if conf.access_key != "" && conf.secret_key != "" {
|
||||
creds = Credentials::new(
|
||||
conf.access_key.clone(), // access_key_id
|
||||
conf.secret_key.clone(), // secret_access_key
|
||||
None, // session_token (optional)
|
||||
None,
|
||||
"Static",
|
||||
);
|
||||
} else {
|
||||
return Err(std::io::Error::other("insufficient parameters for S3 backend authentication"));
|
||||
}
|
||||
let region_provider = RegionProviderChain::default_provider().or_else(Region::new(conf.region.clone()));
|
||||
#[allow(deprecated)]
|
||||
let config = aws_config::from_env()
|
||||
.endpoint_url(conf.endpoint.clone())
|
||||
.region(region_provider)
|
||||
.credentials_provider(creds)
|
||||
.load()
|
||||
.await;
|
||||
let client = Client::new(&config);
|
||||
let client = Arc::new(client);
|
||||
Ok(Self {
|
||||
client,
|
||||
bucket: conf.bucket.clone(),
|
||||
prefix: conf.prefix.clone().trim_matches('/').to_string(),
|
||||
storage_class: conf.storage_class.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_dest(&self, object: &str) -> String {
|
||||
let mut dest_obj = object.to_string();
|
||||
if self.prefix != "" {
|
||||
dest_obj = format!("{}/{}", &self.prefix, object);
|
||||
}
|
||||
return dest_obj;
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl WarmBackend for WarmBackendS3 {
|
||||
async fn put_with_meta(
|
||||
&self,
|
||||
object: &str,
|
||||
r: ReaderImpl,
|
||||
length: i64,
|
||||
meta: HashMap<String, String>,
|
||||
) -> Result<String, std::io::Error> {
|
||||
let client = self.client.clone();
|
||||
let Ok(res) = client
|
||||
.put_object()
|
||||
.bucket(&self.bucket)
|
||||
.key(&self.get_dest(object))
|
||||
.body(match r {
|
||||
ReaderImpl::Body(content_body) => ByteStream::from(content_body.to_vec()),
|
||||
ReaderImpl::ObjectBody(mut content_body) => ByteStream::from(content_body.read_all().await?),
|
||||
})
|
||||
.send()
|
||||
.await
|
||||
else {
|
||||
return Err(std::io::Error::other("put_object error"));
|
||||
};
|
||||
|
||||
Ok(res.version_id().unwrap_or("").to_string())
|
||||
}
|
||||
|
||||
async fn put(&self, object: &str, r: ReaderImpl, length: i64) -> Result<String, std::io::Error> {
|
||||
self.put_with_meta(object, r, length, HashMap::new()).await
|
||||
}
|
||||
|
||||
async fn get(&self, object: &str, rv: &str, opts: WarmBackendGetOpts) -> Result<ReadCloser, std::io::Error> {
|
||||
let client = self.client.clone();
|
||||
let Ok(res) = client
|
||||
.get_object()
|
||||
.bucket(&self.bucket)
|
||||
.key(&self.get_dest(object))
|
||||
.send()
|
||||
.await
|
||||
else {
|
||||
return Err(std::io::Error::other("get_object error"));
|
||||
};
|
||||
|
||||
Ok(ReadCloser::new(std::io::Cursor::new(
|
||||
res.body.collect().await.map(|data| data.into_bytes().to_vec())?,
|
||||
)))
|
||||
}
|
||||
|
||||
async fn remove(&self, object: &str, rv: &str) -> Result<(), std::io::Error> {
|
||||
let client = self.client.clone();
|
||||
if let Err(_) = client
|
||||
.delete_object()
|
||||
.bucket(&self.bucket)
|
||||
.key(&self.get_dest(object))
|
||||
.send()
|
||||
.await
|
||||
{
|
||||
return Err(std::io::Error::other("delete_object error"));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn in_use(&self) -> Result<bool, std::io::Error> {
|
||||
let client = self.client.clone();
|
||||
let Ok(res) = client
|
||||
.list_objects_v2()
|
||||
.bucket(&self.bucket)
|
||||
//.max_keys(10)
|
||||
//.into_paginator()
|
||||
.send()
|
||||
.await
|
||||
else {
|
||||
return Err(std::io::Error::other("list_objects_v2 error"));
|
||||
};
|
||||
|
||||
Ok(res.common_prefixes.unwrap().len() > 0 || res.contents.unwrap().len() > 0)
|
||||
}
|
||||
}
|
||||
164
crates/ecstore/src/tier/warm_backend_tencent.rs
Normal file
164
crates/ecstore/src/tier/warm_backend_tencent.rs
Normal file
@@ -0,0 +1,164 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#![allow(unused_imports)]
|
||||
#![allow(unused_variables)]
|
||||
#![allow(unused_mut)]
|
||||
#![allow(unused_assignments)]
|
||||
#![allow(unused_must_use)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
use crate::client::{
|
||||
admin_handler_utils::AdminError,
|
||||
api_put_object::PutObjectOptions,
|
||||
credentials::{Credentials, SignatureType, Static, Value},
|
||||
transition_api::{BucketLookupType, Options, ReadCloser, ReaderImpl, TransitionClient, TransitionCore},
|
||||
};
|
||||
use crate::tier::{
|
||||
tier_config::TierTencent,
|
||||
warm_backend::{WarmBackend, WarmBackendGetOpts},
|
||||
warm_backend_s3::WarmBackendS3,
|
||||
};
|
||||
use tracing::warn;
|
||||
|
||||
const MAX_MULTIPART_PUT_OBJECT_SIZE: i64 = 1024 * 1024 * 1024 * 1024 * 5;
|
||||
const MAX_PARTS_COUNT: i64 = 10000;
|
||||
const _MAX_PART_SIZE: i64 = 1024 * 1024 * 1024 * 5;
|
||||
const MIN_PART_SIZE: i64 = 1024 * 1024 * 128;
|
||||
|
||||
pub struct WarmBackendTencent(WarmBackendS3);
|
||||
|
||||
impl WarmBackendTencent {
|
||||
pub async fn new(conf: &TierTencent, tier: &str) -> Result<Self, std::io::Error> {
|
||||
if conf.access_key == "" || conf.secret_key == "" {
|
||||
return Err(std::io::Error::other("both access and secret keys are required"));
|
||||
}
|
||||
|
||||
if conf.bucket == "" {
|
||||
return Err(std::io::Error::other("no bucket name was provided"));
|
||||
}
|
||||
|
||||
let u = match url::Url::parse(&conf.endpoint) {
|
||||
Ok(u) => u,
|
||||
Err(e) => {
|
||||
return Err(std::io::Error::other(e.to_string()));
|
||||
}
|
||||
};
|
||||
|
||||
let creds = Credentials::new(Static(Value {
|
||||
access_key_id: conf.access_key.clone(),
|
||||
secret_access_key: conf.secret_key.clone(),
|
||||
session_token: "".to_string(),
|
||||
signer_type: SignatureType::SignatureV4,
|
||||
..Default::default()
|
||||
}));
|
||||
let opts = Options {
|
||||
creds,
|
||||
secure: u.scheme() == "https",
|
||||
//transport: GLOBAL_RemoteTargetTransport,
|
||||
trailing_headers: true,
|
||||
region: conf.region.clone(),
|
||||
bucket_lookup: BucketLookupType::BucketLookupDNS,
|
||||
..Default::default()
|
||||
};
|
||||
let scheme = u.scheme();
|
||||
let default_port = if scheme == "https" { 443 } else { 80 };
|
||||
let client = TransitionClient::new(
|
||||
&format!("{}:{}", u.host_str().expect("err"), u.port().unwrap_or(default_port)),
|
||||
opts,
|
||||
"tencent",
|
||||
)
|
||||
.await?;
|
||||
|
||||
let client = Arc::new(client);
|
||||
let core = TransitionCore(Arc::clone(&client));
|
||||
Ok(Self(WarmBackendS3 {
|
||||
client,
|
||||
core,
|
||||
bucket: conf.bucket.clone(),
|
||||
prefix: conf.prefix.strip_suffix("/").unwrap_or(&conf.prefix).to_owned(),
|
||||
storage_class: "".to_string(),
|
||||
}))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl WarmBackend for WarmBackendTencent {
|
||||
async fn put_with_meta(
|
||||
&self,
|
||||
object: &str,
|
||||
r: ReaderImpl,
|
||||
length: i64,
|
||||
meta: HashMap<String, String>,
|
||||
) -> Result<String, std::io::Error> {
|
||||
let part_size = optimal_part_size(length)?;
|
||||
let client = self.0.client.clone();
|
||||
let res = client
|
||||
.put_object(
|
||||
&self.0.bucket,
|
||||
&self.0.get_dest(object),
|
||||
r,
|
||||
length,
|
||||
&PutObjectOptions {
|
||||
storage_class: self.0.storage_class.clone(),
|
||||
part_size: part_size as u64,
|
||||
disable_content_sha256: true,
|
||||
user_metadata: meta,
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await?;
|
||||
//self.ToObjectError(err, object)
|
||||
Ok(res.version_id)
|
||||
}
|
||||
|
||||
async fn put(&self, object: &str, r: ReaderImpl, length: i64) -> Result<String, std::io::Error> {
|
||||
self.put_with_meta(object, r, length, HashMap::new()).await
|
||||
}
|
||||
|
||||
async fn get(&self, object: &str, rv: &str, opts: WarmBackendGetOpts) -> Result<ReadCloser, std::io::Error> {
|
||||
self.0.get(object, rv, opts).await
|
||||
}
|
||||
|
||||
async fn remove(&self, object: &str, rv: &str) -> Result<(), std::io::Error> {
|
||||
self.0.remove(object, rv).await
|
||||
}
|
||||
|
||||
async fn in_use(&self) -> Result<bool, std::io::Error> {
|
||||
self.0.in_use().await
|
||||
}
|
||||
}
|
||||
|
||||
fn optimal_part_size(object_size: i64) -> Result<i64, std::io::Error> {
|
||||
let mut object_size = object_size;
|
||||
if object_size == -1 {
|
||||
object_size = MAX_MULTIPART_PUT_OBJECT_SIZE;
|
||||
}
|
||||
|
||||
if object_size > MAX_MULTIPART_PUT_OBJECT_SIZE {
|
||||
return Err(std::io::Error::other("entity too large"));
|
||||
}
|
||||
|
||||
let configured_part_size = MIN_PART_SIZE;
|
||||
let mut part_size_flt = object_size as f64 / MAX_PARTS_COUNT as f64;
|
||||
part_size_flt = (part_size_flt as f64 / configured_part_size as f64).ceil() * configured_part_size as f64;
|
||||
|
||||
let part_size = part_size_flt as i64;
|
||||
if part_size == 0 {
|
||||
return Ok(MIN_PART_SIZE);
|
||||
}
|
||||
Ok(part_size)
|
||||
}
|
||||
@@ -12,16 +12,19 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::filemeta::TRANSITION_COMPLETE;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::{ReplicationState, ReplicationStatusType, VersionPurgeStatusType};
|
||||
use bytes::Bytes;
|
||||
use rmp_serde::Serializer;
|
||||
use rustfs_utils::HashAlgorithm;
|
||||
use rustfs_utils::http::headers::{RESERVED_METADATA_PREFIX_LOWER, RUSTFS_HEALING};
|
||||
use s3s::dto::{RestoreStatus, Timestamp};
|
||||
use s3s::header::X_AMZ_RESTORE;
|
||||
use serde::Deserialize;
|
||||
use serde::Serialize;
|
||||
use std::collections::HashMap;
|
||||
use time::OffsetDateTime;
|
||||
use time::{OffsetDateTime, format_description::well_known::Rfc3339};
|
||||
use uuid::Uuid;
|
||||
|
||||
pub const ERASURE_ALGORITHM: &str = "rs-vandermonde";
|
||||
@@ -35,6 +38,8 @@ pub const TIER_FV_ID: &str = "tier-free-versionID";
|
||||
pub const TIER_FV_MARKER: &str = "tier-free-marker";
|
||||
pub const TIER_SKIP_FV_ID: &str = "tier-skip-fvid";
|
||||
|
||||
const ERR_RESTORE_HDR_MALFORMED: &str = "x-amz-restore header malformed";
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Clone, Default)]
|
||||
pub struct ObjectPartInfo {
|
||||
pub etag: String,
|
||||
@@ -394,7 +399,10 @@ impl FileInfo {
|
||||
|
||||
/// Check if the object is remote (transitioned to another tier)
|
||||
pub fn is_remote(&self) -> bool {
|
||||
!self.transition_tier.is_empty()
|
||||
if self.transition_status != TRANSITION_COMPLETE {
|
||||
return false;
|
||||
}
|
||||
!is_restored_object_on_disk(&self.metadata)
|
||||
}
|
||||
|
||||
/// Get the data directory for this object
|
||||
@@ -535,3 +543,101 @@ pub struct FilesInfo {
|
||||
pub files: Vec<FileInfo>,
|
||||
pub is_truncated: bool,
|
||||
}
|
||||
|
||||
pub trait RestoreStatusOps {
|
||||
fn expiry(&self) -> Option<OffsetDateTime>;
|
||||
fn on_going(&self) -> bool;
|
||||
fn on_disk(&self) -> bool;
|
||||
fn to_string(&self) -> String;
|
||||
}
|
||||
|
||||
impl RestoreStatusOps for RestoreStatus {
|
||||
fn expiry(&self) -> Option<OffsetDateTime> {
|
||||
if self.on_going() {
|
||||
return None;
|
||||
}
|
||||
self.restore_expiry_date.clone().map(OffsetDateTime::from)
|
||||
}
|
||||
|
||||
fn on_going(&self) -> bool {
|
||||
if let Some(on_going) = self.is_restore_in_progress {
|
||||
return on_going;
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
fn on_disk(&self) -> bool {
|
||||
let expiry = self.expiry();
|
||||
if let Some(expiry0) = expiry
|
||||
&& OffsetDateTime::now_utc().unix_timestamp() < expiry0.unix_timestamp()
|
||||
{
|
||||
return true;
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
fn to_string(&self) -> String {
|
||||
if self.on_going() {
|
||||
return "ongoing-request=\"true\"".to_string();
|
||||
}
|
||||
format!(
|
||||
"ongoing-request=\"false\", expiry-date=\"{}\"",
|
||||
OffsetDateTime::from(self.restore_expiry_date.clone().unwrap())
|
||||
.format(&Rfc3339)
|
||||
.unwrap()
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_restore_obj_status(restore_hdr: &str) -> Result<RestoreStatus> {
|
||||
let tokens: Vec<&str> = restore_hdr.splitn(2, ",").collect();
|
||||
let progress_tokens: Vec<&str> = tokens[0].splitn(2, "=").collect();
|
||||
if progress_tokens.len() != 2 {
|
||||
return Err(Error::other(ERR_RESTORE_HDR_MALFORMED));
|
||||
}
|
||||
if progress_tokens[0].trim() != "ongoing-request" {
|
||||
return Err(Error::other(ERR_RESTORE_HDR_MALFORMED));
|
||||
}
|
||||
|
||||
match progress_tokens[1] {
|
||||
"true" | "\"true\"" => {
|
||||
if tokens.len() == 1 {
|
||||
return Ok(RestoreStatus {
|
||||
is_restore_in_progress: Some(true),
|
||||
..Default::default()
|
||||
});
|
||||
}
|
||||
}
|
||||
"false" | "\"false\"" => {
|
||||
if tokens.len() != 2 {
|
||||
return Err(Error::other(ERR_RESTORE_HDR_MALFORMED));
|
||||
}
|
||||
let expiry_tokens: Vec<&str> = tokens[1].splitn(2, "=").collect();
|
||||
if expiry_tokens.len() != 2 {
|
||||
return Err(Error::other(ERR_RESTORE_HDR_MALFORMED));
|
||||
}
|
||||
if expiry_tokens[0].trim() != "expiry-date" {
|
||||
return Err(Error::other(ERR_RESTORE_HDR_MALFORMED));
|
||||
}
|
||||
let expiry = OffsetDateTime::parse(expiry_tokens[1].trim_matches('"'), &Rfc3339).unwrap();
|
||||
/*if err != nil {
|
||||
return Err(Error::other(ERR_RESTORE_HDR_MALFORMED));
|
||||
}*/
|
||||
return Ok(RestoreStatus {
|
||||
is_restore_in_progress: Some(false),
|
||||
restore_expiry_date: Some(Timestamp::from(expiry)),
|
||||
});
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
Err(Error::other(ERR_RESTORE_HDR_MALFORMED))
|
||||
}
|
||||
|
||||
pub fn is_restored_object_on_disk(meta: &HashMap<String, String>) -> bool {
|
||||
if let Some(restore_hdr) = meta.get(X_AMZ_RESTORE.as_str()) {
|
||||
if let Ok(restore_status) = parse_restore_obj_status(restore_hdr) {
|
||||
return restore_status.on_disk();
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
@@ -22,8 +22,9 @@ use byteorder::ByteOrder;
|
||||
use bytes::Bytes;
|
||||
use rustfs_utils::http::AMZ_BUCKET_REPLICATION_STATUS;
|
||||
use rustfs_utils::http::headers::{
|
||||
self, AMZ_META_UNENCRYPTED_CONTENT_LENGTH, AMZ_META_UNENCRYPTED_CONTENT_MD5, AMZ_STORAGE_CLASS, RESERVED_METADATA_PREFIX,
|
||||
RESERVED_METADATA_PREFIX_LOWER, VERSION_PURGE_STATUS_KEY,
|
||||
self, AMZ_META_UNENCRYPTED_CONTENT_LENGTH, AMZ_META_UNENCRYPTED_CONTENT_MD5, AMZ_RESTORE_EXPIRY_DAYS,
|
||||
AMZ_RESTORE_REQUEST_DATE, AMZ_STORAGE_CLASS, RESERVED_METADATA_PREFIX, RESERVED_METADATA_PREFIX_LOWER,
|
||||
VERSION_PURGE_STATUS_KEY,
|
||||
};
|
||||
use s3s::header::X_AMZ_RESTORE;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -68,9 +69,6 @@ pub const TRANSITIONED_OBJECTNAME: &str = "transitioned-object";
|
||||
pub const TRANSITIONED_VERSION_ID: &str = "transitioned-versionID";
|
||||
pub const TRANSITION_TIER: &str = "transition-tier";
|
||||
|
||||
const X_AMZ_RESTORE_EXPIRY_DAYS: &str = "X-Amz-Restore-Expiry-Days";
|
||||
const X_AMZ_RESTORE_REQUEST_DATE: &str = "X-Amz-Restore-Request-Date";
|
||||
|
||||
// type ScanHeaderVersionFn = Box<dyn Fn(usize, &[u8], &[u8]) -> Result<()>>;
|
||||
|
||||
#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
|
||||
@@ -693,11 +691,6 @@ impl FileMeta {
|
||||
}
|
||||
}
|
||||
|
||||
// ???
|
||||
if fi.transition_status == TRANSITION_COMPLETE {
|
||||
update_version = false;
|
||||
}
|
||||
|
||||
for (i, ver) in self.versions.iter().enumerate() {
|
||||
if ver.header.version_id != fi.version_id {
|
||||
continue;
|
||||
@@ -1088,13 +1081,24 @@ impl FileMeta {
|
||||
|
||||
/// Count shared data directories
|
||||
pub fn shared_data_dir_count(&self, version_id: Option<Uuid>, data_dir: Option<Uuid>) -> usize {
|
||||
if self.data.entries().unwrap_or_default() > 0
|
||||
&& version_id.is_some()
|
||||
&& self
|
||||
.data
|
||||
.find(version_id.unwrap().to_string().as_str())
|
||||
.unwrap_or_default()
|
||||
.is_some()
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
self.versions
|
||||
.iter()
|
||||
.filter(|v| {
|
||||
v.header.version_type == VersionType::Object && v.header.version_id != version_id && v.header.user_data_dir()
|
||||
})
|
||||
.filter_map(|v| FileMetaVersion::decode_data_dir_from_meta(&v.meta).ok().flatten())
|
||||
.filter(|&dir| Some(dir) == data_dir)
|
||||
.filter_map(|v| FileMetaVersion::decode_data_dir_from_meta(&v.meta).ok())
|
||||
.filter(|&dir| dir == data_dir)
|
||||
.count()
|
||||
}
|
||||
|
||||
@@ -1382,7 +1386,7 @@ impl From<FileInfo> for FileMetaVersion {
|
||||
FileMetaVersion {
|
||||
version_type: VersionType::Object,
|
||||
delete_marker: None,
|
||||
object: Some(value.into()),
|
||||
object: Some(MetaObject::from(value)),
|
||||
write_version: 0,
|
||||
}
|
||||
}
|
||||
@@ -1838,8 +1842,8 @@ impl MetaObject {
|
||||
|
||||
pub fn remove_restore_hdrs(&mut self) {
|
||||
self.meta_user.remove(X_AMZ_RESTORE.as_str());
|
||||
self.meta_user.remove(X_AMZ_RESTORE_EXPIRY_DAYS);
|
||||
self.meta_user.remove(X_AMZ_RESTORE_REQUEST_DATE);
|
||||
self.meta_user.remove(AMZ_RESTORE_EXPIRY_DAYS);
|
||||
self.meta_user.remove(AMZ_RESTORE_REQUEST_DATE);
|
||||
}
|
||||
|
||||
pub fn uses_data_dir(&self) -> bool {
|
||||
@@ -2708,21 +2712,25 @@ mod test {
|
||||
|
||||
#[test]
|
||||
fn test_real_xlmeta_compatibility() {
|
||||
// 测试真实的 xl.meta 文件格式兼容性
|
||||
let data = create_real_xlmeta().expect("创建真实测试数据失败");
|
||||
// Test compatibility with real xl.meta formats
|
||||
let data = create_real_xlmeta().expect("Failed to create realistic test data");
|
||||
|
||||
// 验证文件头
|
||||
assert_eq!(&data[0..4], b"XL2 ", "文件头应该是 'XL2 '");
|
||||
assert_eq!(&data[4..8], &[1, 0, 3, 0], "版本号应该是 1.3.0");
|
||||
// Verify the file header
|
||||
assert_eq!(&data[0..4], b"XL2 ", "File header should be 'XL2 '");
|
||||
assert_eq!(&data[4..8], &[1, 0, 3, 0], "Version number should be 1.3.0");
|
||||
|
||||
// 解析元数据
|
||||
let fm = FileMeta::load(&data).expect("解析真实数据失败");
|
||||
// Parse metadata
|
||||
let fm = FileMeta::load(&data).expect("Failed to parse realistic data");
|
||||
|
||||
// 验证基本属性
|
||||
// Verify basic properties
|
||||
assert_eq!(fm.meta_ver, XL_META_VERSION);
|
||||
assert_eq!(fm.versions.len(), 3, "应该有 3 个版本(1 个对象,1 个删除标记,1 个 Legacy)");
|
||||
assert_eq!(
|
||||
fm.versions.len(),
|
||||
3,
|
||||
"Should have three versions (one object, one delete marker, one Legacy)"
|
||||
);
|
||||
|
||||
// 验证版本类型
|
||||
// Verify version types
|
||||
let mut object_count = 0;
|
||||
let mut delete_count = 0;
|
||||
let mut legacy_count = 0;
|
||||
@@ -2732,21 +2740,21 @@ mod test {
|
||||
VersionType::Object => object_count += 1,
|
||||
VersionType::Delete => delete_count += 1,
|
||||
VersionType::Legacy => legacy_count += 1,
|
||||
VersionType::Invalid => panic!("不应该有无效版本"),
|
||||
VersionType::Invalid => panic!("No invalid versions should be present"),
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!(object_count, 1, "应该有 1 个对象版本");
|
||||
assert_eq!(delete_count, 1, "应该有 1 个删除标记");
|
||||
assert_eq!(legacy_count, 1, "应该有 1 个 Legacy 版本");
|
||||
assert_eq!(object_count, 1, "Should have one object version");
|
||||
assert_eq!(delete_count, 1, "Should have one delete marker");
|
||||
assert_eq!(legacy_count, 1, "Should have one Legacy version");
|
||||
|
||||
// 验证兼容性
|
||||
assert!(fm.is_compatible_with_meta(), "应该与 xl 格式兼容");
|
||||
// Verify compatibility
|
||||
assert!(fm.is_compatible_with_meta(), "Should be compatible with the xl format");
|
||||
|
||||
// 验证完整性
|
||||
fm.validate_integrity().expect("完整性验证失败");
|
||||
// Verify integrity
|
||||
fm.validate_integrity().expect("Integrity validation failed");
|
||||
|
||||
// 验证版本统计
|
||||
// Verify version statistics
|
||||
let stats = fm.get_version_stats();
|
||||
assert_eq!(stats.total_versions, 3);
|
||||
assert_eq!(stats.object_versions, 1);
|
||||
@@ -2756,61 +2764,61 @@ mod test {
|
||||
|
||||
#[test]
|
||||
fn test_complex_xlmeta_handling() {
|
||||
// 测试复杂的多版本 xl.meta 文件
|
||||
let data = create_complex_xlmeta().expect("创建复杂测试数据失败");
|
||||
let fm = FileMeta::load(&data).expect("解析复杂数据失败");
|
||||
// Test complex xl.meta files with many versions
|
||||
let data = create_complex_xlmeta().expect("Failed to create complex test data");
|
||||
let fm = FileMeta::load(&data).expect("Failed to parse complex data");
|
||||
|
||||
// 验证版本数量
|
||||
assert!(fm.versions.len() >= 10, "应该有至少 10 个版本");
|
||||
// Verify version count
|
||||
assert!(fm.versions.len() >= 10, "Should have at least 10 versions");
|
||||
|
||||
// 验证版本排序
|
||||
assert!(fm.is_sorted_by_mod_time(), "版本应该按修改时间排序");
|
||||
// Verify version ordering
|
||||
assert!(fm.is_sorted_by_mod_time(), "Versions should be sorted by modification time");
|
||||
|
||||
// 验证不同版本类型的存在
|
||||
// Verify presence of different version types
|
||||
let stats = fm.get_version_stats();
|
||||
assert!(stats.object_versions > 0, "应该有对象版本");
|
||||
assert!(stats.delete_markers > 0, "应该有删除标记");
|
||||
assert!(stats.object_versions > 0, "Should include object versions");
|
||||
assert!(stats.delete_markers > 0, "Should include delete markers");
|
||||
|
||||
// 测试版本合并功能
|
||||
// Test version merge functionality
|
||||
let merged = merge_file_meta_versions(1, false, 0, std::slice::from_ref(&fm.versions));
|
||||
assert!(!merged.is_empty(), "合并后应该有版本");
|
||||
assert!(!merged.is_empty(), "Merged output should contain versions");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_inline_data_handling() {
|
||||
// 测试内联数据处理
|
||||
let data = create_xlmeta_with_inline_data().expect("创建内联数据测试失败");
|
||||
let fm = FileMeta::load(&data).expect("解析内联数据失败");
|
||||
// Test inline data handling
|
||||
let data = create_xlmeta_with_inline_data().expect("Failed to create inline test data");
|
||||
let fm = FileMeta::load(&data).expect("Failed to parse inline data");
|
||||
|
||||
assert_eq!(fm.versions.len(), 1, "应该有 1 个版本");
|
||||
assert!(!fm.data.as_slice().is_empty(), "应该包含内联数据");
|
||||
assert_eq!(fm.versions.len(), 1, "Should have one version");
|
||||
assert!(!fm.data.as_slice().is_empty(), "Should contain inline data");
|
||||
|
||||
// 验证内联数据内容
|
||||
// Verify inline data contents
|
||||
let inline_data = fm.data.as_slice();
|
||||
assert!(!inline_data.is_empty(), "内联数据不应为空");
|
||||
assert!(!inline_data.is_empty(), "Inline data should not be empty");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_handling_and_recovery() {
|
||||
// 测试错误处理和恢复
|
||||
// Test error handling and recovery
|
||||
let corrupted_data = create_corrupted_xlmeta();
|
||||
let result = FileMeta::load(&corrupted_data);
|
||||
assert!(result.is_err(), "损坏的数据应该解析失败");
|
||||
assert!(result.is_err(), "Corrupted data should fail to parse");
|
||||
|
||||
// 测试空文件处理
|
||||
let empty_data = create_empty_xlmeta().expect("创建空数据失败");
|
||||
let fm = FileMeta::load(&empty_data).expect("解析空数据失败");
|
||||
assert_eq!(fm.versions.len(), 0, "空文件应该没有版本");
|
||||
// Test handling of empty files
|
||||
let empty_data = create_empty_xlmeta().expect("Failed to create empty test data");
|
||||
let fm = FileMeta::load(&empty_data).expect("Failed to parse empty data");
|
||||
assert_eq!(fm.versions.len(), 0, "An empty file should have no versions");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_version_type_legacy_support() {
|
||||
// 专门测试 Legacy 版本类型支持
|
||||
// Validate support for Legacy version types
|
||||
assert_eq!(VersionType::Legacy.to_u8(), 3);
|
||||
assert_eq!(VersionType::from_u8(3), VersionType::Legacy);
|
||||
assert!(VersionType::Legacy.valid(), "Legacy 类型应该是有效的");
|
||||
assert!(VersionType::Legacy.valid(), "Legacy type should be valid");
|
||||
|
||||
// 测试 Legacy 版本的创建和处理
|
||||
// Exercise creation and handling of Legacy versions
|
||||
let legacy_version = FileMetaVersion {
|
||||
version_type: VersionType::Legacy,
|
||||
object: None,
|
||||
@@ -2818,101 +2826,101 @@ mod test {
|
||||
write_version: 1,
|
||||
};
|
||||
|
||||
assert!(legacy_version.is_legacy(), "应该识别为 Legacy 版本");
|
||||
assert!(legacy_version.is_legacy(), "Should be recognized as a Legacy version");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_signature_calculation() {
|
||||
// 测试签名计算功能
|
||||
let data = create_real_xlmeta().expect("创建测试数据失败");
|
||||
let fm = FileMeta::load(&data).expect("解析失败");
|
||||
// Test signature calculation
|
||||
let data = create_real_xlmeta().expect("Failed to create test data");
|
||||
let fm = FileMeta::load(&data).expect("Parsing failed");
|
||||
|
||||
for version in &fm.versions {
|
||||
let signature = version.header.get_signature();
|
||||
assert_eq!(signature.len(), 4, "签名应该是 4 字节");
|
||||
assert_eq!(signature.len(), 4, "Signature should be 4 bytes");
|
||||
|
||||
// 验证相同版本的签名一致性
|
||||
// Verify signature consistency for identical versions
|
||||
let signature2 = version.header.get_signature();
|
||||
assert_eq!(signature, signature2, "相同版本的签名应该一致");
|
||||
assert_eq!(signature, signature2, "Identical versions should produce identical signatures");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_metadata_validation() {
|
||||
// 测试元数据验证功能
|
||||
let data = create_real_xlmeta().expect("创建测试数据失败");
|
||||
let fm = FileMeta::load(&data).expect("解析失败");
|
||||
// Test metadata validation
|
||||
let data = create_real_xlmeta().expect("Failed to create test data");
|
||||
let fm = FileMeta::load(&data).expect("Parsing failed");
|
||||
|
||||
// 测试完整性验证
|
||||
fm.validate_integrity().expect("完整性验证应该通过");
|
||||
// Test integrity validation
|
||||
fm.validate_integrity().expect("Integrity validation should succeed");
|
||||
|
||||
// 测试兼容性检查
|
||||
assert!(fm.is_compatible_with_meta(), "应该与 xl 格式兼容");
|
||||
// Test compatibility checks
|
||||
assert!(fm.is_compatible_with_meta(), "Should be compatible with the xl format");
|
||||
|
||||
// 测试版本排序检查
|
||||
assert!(fm.is_sorted_by_mod_time(), "版本应该按时间排序");
|
||||
// Test version ordering checks
|
||||
assert!(fm.is_sorted_by_mod_time(), "Versions should be time-ordered");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_round_trip_serialization() {
|
||||
// 测试序列化和反序列化的往返一致性
|
||||
let original_data = create_real_xlmeta().expect("创建原始数据失败");
|
||||
let fm = FileMeta::load(&original_data).expect("解析原始数据失败");
|
||||
// Test round-trip serialization consistency
|
||||
let original_data = create_real_xlmeta().expect("Failed to create original test data");
|
||||
let fm = FileMeta::load(&original_data).expect("Failed to parse original data");
|
||||
|
||||
// 重新序列化
|
||||
let serialized_data = fm.marshal_msg().expect("重新序列化失败");
|
||||
// Serialize again
|
||||
let serialized_data = fm.marshal_msg().expect("Re-serialization failed");
|
||||
|
||||
// 再次解析
|
||||
let fm2 = FileMeta::load(&serialized_data).expect("解析序列化数据失败");
|
||||
// Parse again
|
||||
let fm2 = FileMeta::load(&serialized_data).expect("Failed to parse serialized data");
|
||||
|
||||
// 验证一致性
|
||||
assert_eq!(fm.versions.len(), fm2.versions.len(), "版本数量应该一致");
|
||||
assert_eq!(fm.meta_ver, fm2.meta_ver, "元数据版本应该一致");
|
||||
// Verify consistency
|
||||
assert_eq!(fm.versions.len(), fm2.versions.len(), "Version counts should match");
|
||||
assert_eq!(fm.meta_ver, fm2.meta_ver, "Metadata versions should match");
|
||||
|
||||
// 验证版本内容一致性
|
||||
// Verify version content consistency
|
||||
for (v1, v2) in fm.versions.iter().zip(fm2.versions.iter()) {
|
||||
assert_eq!(v1.header.version_type, v2.header.version_type, "版本类型应该一致");
|
||||
assert_eq!(v1.header.version_id, v2.header.version_id, "版本 ID 应该一致");
|
||||
assert_eq!(v1.header.version_type, v2.header.version_type, "Version types should match");
|
||||
assert_eq!(v1.header.version_id, v2.header.version_id, "Version IDs should match");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_performance_with_large_metadata() {
|
||||
// 测试大型元数据文件的性能
|
||||
// Test performance with large metadata files
|
||||
use std::time::Instant;
|
||||
|
||||
let start = Instant::now();
|
||||
let data = create_complex_xlmeta().expect("创建大型测试数据失败");
|
||||
let data = create_complex_xlmeta().expect("Failed to create large test data");
|
||||
let creation_time = start.elapsed();
|
||||
|
||||
let start = Instant::now();
|
||||
let fm = FileMeta::load(&data).expect("解析大型数据失败");
|
||||
let fm = FileMeta::load(&data).expect("Failed to parse large data");
|
||||
let parsing_time = start.elapsed();
|
||||
|
||||
let start = Instant::now();
|
||||
let _serialized = fm.marshal_msg().expect("序列化失败");
|
||||
let _serialized = fm.marshal_msg().expect("Serialization failed");
|
||||
let serialization_time = start.elapsed();
|
||||
|
||||
println!("性能测试结果:");
|
||||
println!(" 创建时间:{creation_time:?}");
|
||||
println!(" 解析时间:{parsing_time:?}");
|
||||
println!(" 序列化时间:{serialization_time:?}");
|
||||
println!("Performance results:");
|
||||
println!(" Creation time: {creation_time:?}");
|
||||
println!(" Parsing time: {parsing_time:?}");
|
||||
println!(" Serialization time: {serialization_time:?}");
|
||||
|
||||
// 基本性能断言(这些值可能需要根据实际性能调整)
|
||||
assert!(parsing_time.as_millis() < 100, "解析时间应该小于 100ms");
|
||||
assert!(serialization_time.as_millis() < 100, "序列化时间应该小于 100ms");
|
||||
// Basic performance assertions (adjust as needed for real workloads)
|
||||
assert!(parsing_time.as_millis() < 100, "Parsing time should be under 100 ms");
|
||||
assert!(serialization_time.as_millis() < 100, "Serialization time should be under 100 ms");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_edge_cases() {
|
||||
// 测试边界情况
|
||||
// Test edge cases
|
||||
|
||||
// 1. 测试空版本 ID
|
||||
// 1. Test empty version IDs
|
||||
let mut fm = FileMeta::new();
|
||||
let version = FileMetaVersion {
|
||||
version_type: VersionType::Object,
|
||||
object: Some(MetaObject {
|
||||
version_id: None, // 空版本 ID
|
||||
version_id: None, // Empty version ID
|
||||
data_dir: None,
|
||||
erasure_algorithm: crate::fileinfo::ErasureAlgo::ReedSolomon,
|
||||
erasure_m: 1,
|
||||
@@ -2935,35 +2943,35 @@ mod test {
|
||||
write_version: 1,
|
||||
};
|
||||
|
||||
let shallow_version = FileMetaShallowVersion::try_from(version).expect("转换失败");
|
||||
let shallow_version = FileMetaShallowVersion::try_from(version).expect("Conversion failed");
|
||||
fm.versions.push(shallow_version);
|
||||
|
||||
// 应该能够序列化和反序列化
|
||||
let data = fm.marshal_msg().expect("序列化失败");
|
||||
let fm2 = FileMeta::load(&data).expect("解析失败");
|
||||
// Should support serialization and deserialization
|
||||
let data = fm.marshal_msg().expect("Serialization failed");
|
||||
let fm2 = FileMeta::load(&data).expect("Parsing failed");
|
||||
assert_eq!(fm2.versions.len(), 1);
|
||||
|
||||
// 2. 测试极大的文件大小
|
||||
// 2. Test extremely large file sizes
|
||||
let large_object = MetaObject {
|
||||
size: i64::MAX,
|
||||
part_sizes: vec![usize::MAX],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// 应该能够处理大数值
|
||||
// Should handle very large numbers
|
||||
assert_eq!(large_object.size, i64::MAX);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_concurrent_operations() {
|
||||
// 测试并发操作的安全性
|
||||
// Test thread safety for concurrent operations
|
||||
use std::sync::Arc;
|
||||
use std::sync::Mutex;
|
||||
|
||||
let fm = Arc::new(Mutex::new(FileMeta::new()));
|
||||
let mut handles = vec![];
|
||||
|
||||
// 并发添加版本
|
||||
// Add versions concurrently
|
||||
for i in 0..10 {
|
||||
let fm_clone: Arc<Mutex<FileMeta>> = Arc::clone(&fm);
|
||||
let handle = tokio::spawn(async move {
|
||||
@@ -2977,7 +2985,7 @@ mod test {
|
||||
handles.push(handle);
|
||||
}
|
||||
|
||||
// 等待所有任务完成
|
||||
// Wait for all tasks to finish
|
||||
for handle in handles {
|
||||
handle.await.unwrap();
|
||||
}
|
||||
@@ -2988,15 +2996,15 @@ mod test {
|
||||
|
||||
#[test]
|
||||
fn test_memory_efficiency() {
|
||||
// 测试内存使用效率
|
||||
// Test memory efficiency
|
||||
use std::mem;
|
||||
|
||||
// 测试空结构体的内存占用
|
||||
// Measure memory usage for empty structs
|
||||
let empty_fm = FileMeta::new();
|
||||
let empty_size = mem::size_of_val(&empty_fm);
|
||||
println!("Empty FileMeta size: {empty_size} bytes");
|
||||
|
||||
// 测试包含大量版本的内存占用
|
||||
// Measure memory usage with many versions
|
||||
let mut large_fm = FileMeta::new();
|
||||
for i in 0..100 {
|
||||
let mut fi = crate::fileinfo::FileInfo::new(&format!("test-{i}"), 2, 1);
|
||||
@@ -3008,18 +3016,18 @@ mod test {
|
||||
let large_size = mem::size_of_val(&large_fm);
|
||||
println!("Large FileMeta size: {large_size} bytes");
|
||||
|
||||
// 验证内存使用是合理的(注意:size_of_val 只计算栈上的大小,不包括堆分配)
|
||||
// 对于包含 Vec 的结构体,size_of_val 可能相同,因为 Vec 的容量在堆上
|
||||
println!("版本数量:{}", large_fm.versions.len());
|
||||
assert!(!large_fm.versions.is_empty(), "应该有版本数据");
|
||||
// Ensure memory usage is reasonable (size_of_val covers only stack allocations)
|
||||
// For structs containing Vec, size_of_val may match because capacity lives on the heap
|
||||
println!("Number of versions: {}", large_fm.versions.len());
|
||||
assert!(!large_fm.versions.is_empty(), "Should contain version data");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_version_ordering_edge_cases() {
|
||||
// 测试版本排序的边界情况
|
||||
// Test boundary cases for version ordering
|
||||
let mut fm = FileMeta::new();
|
||||
|
||||
// 添加相同时间戳的版本
|
||||
// Add versions with identical timestamps
|
||||
let same_time = OffsetDateTime::now_utc();
|
||||
for i in 0..5 {
|
||||
let mut fi = crate::fileinfo::FileInfo::new(&format!("test-{i}"), 2, 1);
|
||||
@@ -3028,18 +3036,18 @@ mod test {
|
||||
fm.add_version(fi).unwrap();
|
||||
}
|
||||
|
||||
// 验证排序稳定性
|
||||
// Verify stable ordering
|
||||
let original_order: Vec<_> = fm.versions.iter().map(|v| v.header.version_id).collect();
|
||||
fm.sort_by_mod_time();
|
||||
let sorted_order: Vec<_> = fm.versions.iter().map(|v| v.header.version_id).collect();
|
||||
|
||||
// 对于相同时间戳,排序应该保持稳定
|
||||
// Sorting should remain stable for identical timestamps
|
||||
assert_eq!(original_order.len(), sorted_order.len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_checksum_algorithms() {
|
||||
// 测试不同的校验和算法
|
||||
// Test different checksum algorithms
|
||||
let algorithms = vec![ChecksumAlgo::Invalid, ChecksumAlgo::HighwayHash];
|
||||
|
||||
for algo in algorithms {
|
||||
@@ -3048,7 +3056,7 @@ mod test {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// 验证算法的有效性检查
|
||||
// Verify checksum validation logic
|
||||
match algo {
|
||||
ChecksumAlgo::Invalid => assert!(!algo.valid()),
|
||||
ChecksumAlgo::HighwayHash => assert!(algo.valid()),
|
||||
@@ -3064,12 +3072,12 @@ mod test {
|
||||
|
||||
#[test]
|
||||
fn test_erasure_coding_parameters() {
|
||||
// 测试纠删码参数的各种组合
|
||||
// Test combinations of erasure coding parameters
|
||||
let test_cases = vec![
|
||||
(1, 1), // 最小配置
|
||||
(2, 1), // 常见配置
|
||||
(4, 2), // 标准配置
|
||||
(8, 4), // 高冗余配置
|
||||
(1, 1), // Minimum configuration
|
||||
(2, 1), // Common configuration
|
||||
(4, 2), // Standard configuration
|
||||
(8, 4), // High redundancy configuration
|
||||
];
|
||||
|
||||
for (data_blocks, parity_blocks) in test_cases {
|
||||
@@ -3080,9 +3088,9 @@ mod test {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// 验证参数的合理性
|
||||
assert!(obj.erasure_m > 0, "数据块数量必须大于 0");
|
||||
assert!(obj.erasure_n > 0, "校验块数量必须大于 0");
|
||||
// Verify parameter validity
|
||||
assert!(obj.erasure_m > 0, "Data block count must be greater than 0");
|
||||
assert!(obj.erasure_n > 0, "Parity block count must be greater than 0");
|
||||
assert_eq!(obj.erasure_dist.len(), data_blocks + parity_blocks);
|
||||
|
||||
// Verify serialization and deserialization
|
||||
@@ -3097,20 +3105,20 @@ mod test {
|
||||
|
||||
#[test]
|
||||
fn test_metadata_size_limits() {
|
||||
// 测试元数据大小限制
|
||||
// Test metadata size limits
|
||||
let mut obj = MetaObject::default();
|
||||
|
||||
// 测试适量用户元数据
|
||||
// Test moderate amounts of user metadata
|
||||
for i in 0..10 {
|
||||
obj.meta_user
|
||||
.insert(format!("key-{i:04}"), format!("value-{:04}-{}", i, "x".repeat(10)));
|
||||
}
|
||||
|
||||
// 验证可以序列化元数据
|
||||
// Verify metadata can be serialized
|
||||
let data = obj.marshal_msg().unwrap();
|
||||
assert!(data.len() > 100, "序列化后的数据应该有合理大小");
|
||||
assert!(data.len() > 100, "Serialized data should have a reasonable size");
|
||||
|
||||
// 验证可以反序列化
|
||||
// Verify deserialization succeeds
|
||||
let mut obj2 = MetaObject::default();
|
||||
obj2.unmarshal_msg(&data).unwrap();
|
||||
assert_eq!(obj.meta_user.len(), obj2.meta_user.len());
|
||||
@@ -3118,14 +3126,14 @@ mod test {
|
||||
|
||||
#[test]
|
||||
fn test_version_statistics_accuracy() {
|
||||
// 测试版本统计的准确性
|
||||
// Test accuracy of version statistics
|
||||
let mut fm = FileMeta::new();
|
||||
|
||||
// 添加不同类型的版本
|
||||
// Add different version types
|
||||
let object_count = 3;
|
||||
let delete_count = 2;
|
||||
|
||||
// 添加对象版本
|
||||
// Add object versions
|
||||
for i in 0..object_count {
|
||||
let mut fi = crate::fileinfo::FileInfo::new(&format!("obj-{i}"), 2, 1);
|
||||
fi.version_id = Some(Uuid::new_v4());
|
||||
@@ -3133,7 +3141,7 @@ mod test {
|
||||
fm.add_version(fi).unwrap();
|
||||
}
|
||||
|
||||
// 添加删除标记
|
||||
// Add delete markers
|
||||
for i in 0..delete_count {
|
||||
let delete_marker = MetaDeleteMarker {
|
||||
version_id: Some(Uuid::new_v4()),
|
||||
@@ -3152,13 +3160,13 @@ mod test {
|
||||
fm.versions.push(shallow_version);
|
||||
}
|
||||
|
||||
// 验证统计准确性
|
||||
// Verify overall statistics
|
||||
let stats = fm.get_version_stats();
|
||||
assert_eq!(stats.total_versions, object_count + delete_count);
|
||||
assert_eq!(stats.object_versions, object_count);
|
||||
assert_eq!(stats.delete_markers, delete_count);
|
||||
|
||||
// 验证详细统计
|
||||
// Verify detailed statistics
|
||||
let detailed_stats = fm.get_detailed_version_stats();
|
||||
assert_eq!(detailed_stats.total_versions, object_count + delete_count);
|
||||
assert_eq!(detailed_stats.object_versions, object_count);
|
||||
@@ -3167,15 +3175,15 @@ mod test {
|
||||
|
||||
#[test]
|
||||
fn test_cross_platform_compatibility() {
|
||||
// 测试跨平台兼容性(字节序、路径分隔符等)
|
||||
// Test cross-platform compatibility (endianness, separators, etc.)
|
||||
let mut fm = FileMeta::new();
|
||||
|
||||
// 使用不同平台风格的路径
|
||||
// Use platform-specific path styles
|
||||
let paths = vec![
|
||||
"unix/style/path",
|
||||
"windows\\style\\path",
|
||||
"mixed/style\\path",
|
||||
"unicode/路径/测试",
|
||||
"unicode/path/test",
|
||||
];
|
||||
|
||||
for path in paths {
|
||||
@@ -3185,14 +3193,14 @@ mod test {
|
||||
fm.add_version(fi).unwrap();
|
||||
}
|
||||
|
||||
// 验证序列化和反序列化在不同平台上的一致性
|
||||
// Verify serialization/deserialization consistency across platforms
|
||||
let data = fm.marshal_msg().unwrap();
|
||||
let mut fm2 = FileMeta::default();
|
||||
fm2.unmarshal_msg(&data).unwrap();
|
||||
|
||||
assert_eq!(fm.versions.len(), fm2.versions.len());
|
||||
|
||||
// 验证 UUID 的字节序一致性
|
||||
// Verify UUID endianness consistency
|
||||
for (v1, v2) in fm.versions.iter().zip(fm2.versions.iter()) {
|
||||
assert_eq!(v1.header.version_id, v2.header.version_id);
|
||||
}
|
||||
@@ -3200,26 +3208,26 @@ mod test {
|
||||
|
||||
#[test]
|
||||
fn test_data_integrity_validation() {
|
||||
// 测试数据完整性验证
|
||||
// Test data integrity checks
|
||||
let mut fm = FileMeta::new();
|
||||
|
||||
// 添加一个正常版本
|
||||
// Add a normal version
|
||||
let mut fi = crate::fileinfo::FileInfo::new("test", 2, 1);
|
||||
fi.version_id = Some(Uuid::new_v4());
|
||||
fi.mod_time = Some(OffsetDateTime::now_utc());
|
||||
fm.add_version(fi).unwrap();
|
||||
|
||||
// 验证正常情况下的完整性
|
||||
// Verify integrity under normal conditions
|
||||
assert!(fm.validate_integrity().is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_version_merge_scenarios() {
|
||||
// 测试版本合并的各种场景
|
||||
// Test various version merge scenarios
|
||||
let mut versions1 = vec![];
|
||||
let mut versions2 = vec![];
|
||||
|
||||
// 创建两组不同的版本
|
||||
// Create two distinct sets of versions
|
||||
for i in 0..3 {
|
||||
let mut fi1 = crate::fileinfo::FileInfo::new(&format!("test1-{i}"), 2, 1);
|
||||
fi1.version_id = Some(Uuid::new_v4());
|
||||
@@ -3236,37 +3244,37 @@ mod test {
|
||||
versions2.push(FileMetaShallowVersion::try_from(version2).unwrap());
|
||||
}
|
||||
|
||||
// 测试简单的合并场景
|
||||
// Test a simple merge scenario
|
||||
let merged = merge_file_meta_versions(1, false, 0, &[versions1.clone()]);
|
||||
assert!(!merged.is_empty(), "单个版本列表的合并结果不应为空");
|
||||
assert!(!merged.is_empty(), "Merging a single version list should not be empty");
|
||||
|
||||
// 测试多个版本列表的合并
|
||||
// Test merging multiple version lists
|
||||
let merged = merge_file_meta_versions(1, false, 0, &[versions1.clone(), versions2.clone()]);
|
||||
// 合并结果可能为空,这取决于版本的兼容性,这是正常的
|
||||
println!("合并结果数量:{}", merged.len());
|
||||
// Merge results may be empty depending on compatibility, which is acceptable
|
||||
println!("Merge result count: {}", merged.len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_flags_operations() {
|
||||
// 测试标志位操作
|
||||
// Test flag operations
|
||||
let flags = vec![Flags::FreeVersion, Flags::UsesDataDir, Flags::InlineData];
|
||||
|
||||
for flag in flags {
|
||||
let flag_value = flag as u8;
|
||||
assert!(flag_value > 0, "标志位值应该大于 0");
|
||||
assert!(flag_value > 0, "Flag value should be greater than 0");
|
||||
|
||||
// 测试标志位组合
|
||||
// Test flag combinations
|
||||
let combined = Flags::FreeVersion as u8 | Flags::UsesDataDir as u8;
|
||||
// 对于位运算,组合值可能不总是大于单个值,这是正常的
|
||||
assert!(combined > 0, "组合标志位应该大于 0");
|
||||
// For bitwise operations, combined values may not exceed individual ones; this is normal
|
||||
assert!(combined > 0, "Combined flag value should be greater than 0");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_uuid_handling_edge_cases() {
|
||||
// 测试 UUID 处理的边界情况
|
||||
// Test UUID edge cases
|
||||
let test_uuids = vec![
|
||||
Uuid::new_v4(), // 随机 UUID
|
||||
Uuid::new_v4(), // Random UUID
|
||||
];
|
||||
|
||||
for uuid in test_uuids {
|
||||
@@ -3276,7 +3284,7 @@ mod test {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// 验证序列化和反序列化
|
||||
// Verify serialization and deserialization
|
||||
let data = obj.marshal_msg().unwrap();
|
||||
let mut obj2 = MetaObject::default();
|
||||
obj2.unmarshal_msg(&data).unwrap();
|
||||
@@ -3285,7 +3293,7 @@ mod test {
|
||||
assert_eq!(obj.data_dir, obj2.data_dir);
|
||||
}
|
||||
|
||||
// 单独测试 nil UUID,因为它在序列化时会被转换为 None
|
||||
// Test nil UUID separately because serialization converts it to None
|
||||
let obj = MetaObject {
|
||||
version_id: Some(Uuid::nil()),
|
||||
data_dir: Some(Uuid::nil()),
|
||||
@@ -3296,24 +3304,24 @@ mod test {
|
||||
let mut obj2 = MetaObject::default();
|
||||
obj2.unmarshal_msg(&data).unwrap();
|
||||
|
||||
// nil UUID 在序列化时可能被转换为 None,这是预期行为
|
||||
// 检查实际的序列化行为
|
||||
println!("原始 version_id: {:?}", obj.version_id);
|
||||
println!("反序列化后 version_id: {:?}", obj2.version_id);
|
||||
// 只要反序列化成功就认为测试通过
|
||||
// nil UUIDs may be converted to None during serialization; this is expected
|
||||
// Inspect the actual serialization behavior
|
||||
println!("Original version_id: {:?}", obj.version_id);
|
||||
println!("Deserialized version_id: {:?}", obj2.version_id);
|
||||
// Consider the test successful as long as deserialization succeeds
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_part_handling_edge_cases() {
|
||||
// 测试分片处理的边界情况
|
||||
// Test edge cases for shard handling
|
||||
let mut obj = MetaObject::default();
|
||||
|
||||
// 测试空分片列表
|
||||
// Test an empty shard list
|
||||
assert!(obj.part_numbers.is_empty());
|
||||
assert!(obj.part_etags.is_empty());
|
||||
assert!(obj.part_sizes.is_empty());
|
||||
|
||||
// 测试单个分片
|
||||
// Test a single shard
|
||||
obj.part_numbers = vec![1];
|
||||
obj.part_etags = vec!["etag1".to_string()];
|
||||
obj.part_sizes = vec![1024];
|
||||
@@ -3328,7 +3336,7 @@ mod test {
|
||||
assert_eq!(obj.part_sizes, obj2.part_sizes);
|
||||
assert_eq!(obj.part_actual_sizes, obj2.part_actual_sizes);
|
||||
|
||||
// 测试多个分片
|
||||
// Test multiple shards
|
||||
obj.part_numbers = vec![1, 2, 3];
|
||||
obj.part_etags = vec!["etag1".to_string(), "etag2".to_string(), "etag3".to_string()];
|
||||
obj.part_sizes = vec![1024, 2048, 512];
|
||||
@@ -3346,7 +3354,7 @@ mod test {
|
||||
|
||||
#[test]
|
||||
fn test_version_header_validation() {
|
||||
// 测试版本头的验证功能
|
||||
// Test version header validation
|
||||
let mut header = FileMetaVersionHeader {
|
||||
version_type: VersionType::Object,
|
||||
mod_time: Some(OffsetDateTime::now_utc()),
|
||||
@@ -3356,27 +3364,27 @@ mod test {
|
||||
};
|
||||
assert!(header.is_valid());
|
||||
|
||||
// 测试无效的版本类型
|
||||
// Test invalid version types
|
||||
header.version_type = VersionType::Invalid;
|
||||
assert!(!header.is_valid());
|
||||
|
||||
// 重置为有效状态
|
||||
// Reset to a valid state
|
||||
header.version_type = VersionType::Object;
|
||||
assert!(header.is_valid());
|
||||
|
||||
// 测试无效的纠删码参数
|
||||
// 当 ec_m = 0 时,has_ec() 返回 false,所以不会检查纠删码参数
|
||||
// Test invalid erasure coding parameters
|
||||
// When ec_m = 0, has_ec() returns false so parity parameters are skipped
|
||||
header.ec_m = 0;
|
||||
header.ec_n = 1;
|
||||
assert!(header.is_valid()); // 这是有效的,因为没有启用纠删码
|
||||
assert!(header.is_valid()); // Valid because erasure coding is disabled
|
||||
|
||||
// 启用纠删码但参数无效
|
||||
// Enable erasure coding with invalid parameters
|
||||
header.ec_m = 2;
|
||||
header.ec_n = 0;
|
||||
// 当 ec_n = 0 时,has_ec() 返回 false,所以不会检查纠删码参数
|
||||
assert!(header.is_valid()); // 这实际上是有效的,因为 has_ec() 返回 false
|
||||
// When ec_n = 0, has_ec() returns false so parity parameters are skipped
|
||||
assert!(header.is_valid()); // This remains valid because has_ec() returns false
|
||||
|
||||
// 重置为有效状态
|
||||
// Reset to a valid state
|
||||
header.ec_n = 1;
|
||||
assert!(header.is_valid());
|
||||
}
|
||||
@@ -3401,7 +3409,7 @@ mod test {
|
||||
obj.meta_user.insert(key.to_string(), value.to_string());
|
||||
}
|
||||
|
||||
// 验证序列化和反序列化
|
||||
// Verify serialization and deserialization
|
||||
let data = obj.marshal_msg().unwrap();
|
||||
let mut obj2 = MetaObject::default();
|
||||
obj2.unmarshal_msg(&data).unwrap();
|
||||
@@ -3447,7 +3455,7 @@ async fn test_read_xl_meta_no_data() {
|
||||
let filepath = "./test_xl.meta";
|
||||
|
||||
let mut file = File::create(filepath).await.unwrap();
|
||||
// 写入字符串
|
||||
// Write string data
|
||||
file.write_all(&buff).await.unwrap();
|
||||
|
||||
let mut f = File::open(filepath).await.unwrap();
|
||||
|
||||
@@ -44,6 +44,20 @@ impl InlineData {
|
||||
if self.0.is_empty() { &self.0 } else { &self.0[1..] }
|
||||
}
|
||||
|
||||
pub fn entries(&self) -> Result<usize> {
|
||||
if self.0.is_empty() || !self.version_ok() {
|
||||
return Ok(0);
|
||||
}
|
||||
|
||||
let buf = self.after_version();
|
||||
|
||||
let mut cur = Cursor::new(buf);
|
||||
|
||||
let fields_len = rmp::decode::read_map_len(&mut cur)?;
|
||||
|
||||
Ok(fields_len as usize)
|
||||
}
|
||||
|
||||
pub fn find(&self, key: &str) -> Result<Option<Vec<u8>>> {
|
||||
if self.0.is_empty() || !self.version_ok() {
|
||||
return Ok(None);
|
||||
|
||||
@@ -34,7 +34,6 @@ time = { workspace = true, features = ["serde-human-readable"] }
|
||||
serde = { workspace = true, features = ["derive", "rc"] }
|
||||
rustfs-ecstore = { workspace = true }
|
||||
rustfs-policy.workspace = true
|
||||
rustfs-config.workspace = true
|
||||
serde_json.workspace = true
|
||||
async-trait.workspace = true
|
||||
thiserror.workspace = true
|
||||
|
||||
@@ -127,8 +127,8 @@ impl CacheInner {
|
||||
// todo!()
|
||||
// }
|
||||
|
||||
// /// 如果是临时用户,返回 Ok(Some(partent_name)))
|
||||
// /// 如果不是临时用户,返回 Ok(None)
|
||||
// /// Return Ok(Some(parent_name)) when the user is temporary.
|
||||
// /// Return Ok(None) for non-temporary users.
|
||||
// fn is_temp_user(&self, user_name: &str) -> crate::Result<Option<&str>> {
|
||||
// let user = self
|
||||
// .get_user(user_name)
|
||||
@@ -141,8 +141,8 @@ impl CacheInner {
|
||||
// }
|
||||
// }
|
||||
|
||||
// /// 如果是临时用户,返回 Ok(Some(partent_name)))
|
||||
// /// 如果不是临时用户,返回 Ok(None)
|
||||
// /// Return Ok(Some(parent_name)) when the user is a temporary identity.
|
||||
// /// Return Ok(None) when the user is not temporary.
|
||||
// fn is_service_account(&self, user_name: &str) -> crate::Result<Option<&str>> {
|
||||
// let user = self
|
||||
// .get_user(user_name)
|
||||
|
||||
@@ -163,6 +163,83 @@ where
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn load_user(&self, access_key: &str) -> Result<()> {
|
||||
let mut users_map: HashMap<String, UserIdentity> = HashMap::new();
|
||||
let mut user_policy_map = HashMap::new();
|
||||
let mut sts_users_map = HashMap::new();
|
||||
let mut sts_policy_map = HashMap::new();
|
||||
let mut policy_docs_map = HashMap::new();
|
||||
|
||||
let _ = self.api.load_user(access_key, UserType::Svc, &mut users_map).await;
|
||||
|
||||
let parent_user = users_map.get(access_key).map(|svc| svc.credentials.parent_user.clone());
|
||||
|
||||
if let Some(parent_user) = parent_user {
|
||||
let _ = self.api.load_user(&parent_user, UserType::Reg, &mut users_map).await;
|
||||
let _ = self
|
||||
.api
|
||||
.load_mapped_policy(&parent_user, UserType::Reg, false, &mut user_policy_map)
|
||||
.await;
|
||||
} else {
|
||||
let _ = self.api.load_user(access_key, UserType::Reg, &mut users_map).await;
|
||||
if users_map.contains_key(access_key) {
|
||||
let _ = self
|
||||
.api
|
||||
.load_mapped_policy(access_key, UserType::Reg, false, &mut user_policy_map)
|
||||
.await;
|
||||
}
|
||||
|
||||
let _ = self.api.load_user(access_key, UserType::Sts, &mut sts_users_map).await;
|
||||
|
||||
let has_sts_user = sts_users_map.get(access_key);
|
||||
|
||||
let sts_parent = has_sts_user.map(|sts| sts.credentials.parent_user.clone());
|
||||
if let Some(parent) = sts_parent {
|
||||
let _ = self
|
||||
.api
|
||||
.load_mapped_policy(&parent, UserType::Sts, false, &mut sts_policy_map)
|
||||
.await;
|
||||
}
|
||||
|
||||
let sts_user = has_sts_user.map(|sts| sts.credentials.access_key.clone());
|
||||
if let Some(ref sts) = sts_user {
|
||||
if let Some(plc) = sts_policy_map.get(sts) {
|
||||
for p in plc.to_slice().iter() {
|
||||
if !policy_docs_map.contains_key(p) {
|
||||
let _ = self.api.load_policy_doc(p, &mut policy_docs_map).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(plc) = user_policy_map.get(access_key) {
|
||||
for p in plc.to_slice().iter() {
|
||||
if !policy_docs_map.contains_key(p) {
|
||||
let _ = self.api.load_policy_doc(p, &mut policy_docs_map).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(user) = users_map.get(access_key) {
|
||||
Cache::add_or_update(&self.cache.users, access_key, user, OffsetDateTime::now_utc());
|
||||
}
|
||||
if let Some(user_policy) = user_policy_map.get(access_key) {
|
||||
Cache::add_or_update(&self.cache.user_policies, access_key, user_policy, OffsetDateTime::now_utc());
|
||||
}
|
||||
if let Some(sts_user) = sts_users_map.get(access_key) {
|
||||
Cache::add_or_update(&self.cache.sts_accounts, access_key, sts_user, OffsetDateTime::now_utc());
|
||||
}
|
||||
if let Some(sts_policy) = sts_policy_map.get(access_key) {
|
||||
Cache::add_or_update(&self.cache.sts_policies, access_key, sts_policy, OffsetDateTime::now_utc());
|
||||
}
|
||||
if let Some(policy_doc) = policy_docs_map.get(access_key) {
|
||||
Cache::add_or_update(&self.cache.policy_docs, access_key, policy_doc, OffsetDateTime::now_utc());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// TODO: Check if exists, whether retry is possible
|
||||
#[tracing::instrument(level = "debug", skip(self))]
|
||||
async fn save_iam_formatter(self: Arc<Self>) -> Result<()> {
|
||||
@@ -653,7 +730,11 @@ where
|
||||
Some(p) => p.clone(),
|
||||
None => {
|
||||
let mut m = HashMap::new();
|
||||
self.api.load_mapped_policy(name, UserType::Reg, false, &mut m).await?;
|
||||
if let Err(err) = self.api.load_mapped_policy(name, UserType::Reg, false, &mut m).await {
|
||||
if !is_err_no_such_policy(&err) {
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
if let Some(p) = m.get(name) {
|
||||
Cache::add_or_update(&self.cache.user_policies, name, p, OffsetDateTime::now_utc());
|
||||
p.clone()
|
||||
@@ -662,7 +743,11 @@ where
|
||||
Some(p) => p.clone(),
|
||||
None => {
|
||||
let mut m = HashMap::new();
|
||||
self.api.load_mapped_policy(name, UserType::Sts, false, &mut m).await?;
|
||||
if let Err(err) = self.api.load_mapped_policy(name, UserType::Sts, false, &mut m).await {
|
||||
if !is_err_no_such_policy(&err) {
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
if let Some(p) = m.get(name) {
|
||||
Cache::add_or_update(&self.cache.sts_policies, name, p, OffsetDateTime::now_utc());
|
||||
p.clone()
|
||||
@@ -694,7 +779,11 @@ where
|
||||
Some(p) => p.clone(),
|
||||
None => {
|
||||
let mut m = HashMap::new();
|
||||
self.api.load_mapped_policy(group, UserType::Reg, true, &mut m).await?;
|
||||
if let Err(err) = self.api.load_mapped_policy(group, UserType::Reg, true, &mut m).await {
|
||||
if !is_err_no_such_policy(&err) {
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
if let Some(p) = m.get(group) {
|
||||
Cache::add_or_update(&self.cache.group_policies, group, p, OffsetDateTime::now_utc());
|
||||
p.clone()
|
||||
@@ -736,7 +825,11 @@ where
|
||||
Some(p) => p.clone(),
|
||||
None => {
|
||||
let mut m = HashMap::new();
|
||||
self.api.load_mapped_policy(group, UserType::Reg, true, &mut m).await?;
|
||||
if let Err(err) = self.api.load_mapped_policy(group, UserType::Reg, true, &mut m).await {
|
||||
if !is_err_no_such_policy(&err) {
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
if let Some(p) = m.get(group) {
|
||||
Cache::add_or_update(&self.cache.group_policies, group, p, OffsetDateTime::now_utc());
|
||||
p.clone()
|
||||
@@ -1038,7 +1131,7 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
self.api.delete_mapped_policy(access_key, utype, false).await?;
|
||||
let _ = self.api.delete_mapped_policy(access_key, utype, false).await;
|
||||
|
||||
Cache::delete(&self.cache.user_policies, access_key, OffsetDateTime::now_utc());
|
||||
|
||||
@@ -1246,6 +1339,26 @@ where
|
||||
Ok(self.cache.groups.load().keys().cloned().collect())
|
||||
}
|
||||
|
||||
pub async fn update_groups(&self) -> Result<Vec<String>> {
|
||||
let mut groups_set = HashSet::new();
|
||||
let mut m = HashMap::new();
|
||||
self.api.load_groups(&mut m).await?;
|
||||
for (group, gi) in m.iter() {
|
||||
Cache::add_or_update(&self.cache.groups, group, gi, OffsetDateTime::now_utc());
|
||||
groups_set.insert(group.to_string());
|
||||
}
|
||||
|
||||
let mut m = HashMap::new();
|
||||
|
||||
self.api.load_mapped_policies(UserType::Reg, true, &mut m).await?;
|
||||
for (group, gi) in m.iter() {
|
||||
Cache::add_or_update(&self.cache.group_policies, group, gi, OffsetDateTime::now_utc());
|
||||
groups_set.insert(group.to_string());
|
||||
}
|
||||
|
||||
Ok(groups_set.into_iter().collect())
|
||||
}
|
||||
|
||||
pub async fn remove_members_from_group(
|
||||
&self,
|
||||
name: &str,
|
||||
@@ -1312,9 +1425,17 @@ where
|
||||
}
|
||||
|
||||
if members.is_empty() {
|
||||
self.api.delete_mapped_policy(group, UserType::Reg, true).await?;
|
||||
if let Err(err) = self.api.delete_mapped_policy(group, UserType::Reg, true).await {
|
||||
if !is_err_no_such_policy(&err) {
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
|
||||
self.api.delete_group_info(group).await?;
|
||||
if let Err(err) = self.api.delete_group_info(group).await {
|
||||
if !is_err_no_such_group(&err) {
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
|
||||
Cache::delete(&self.cache.groups, group, OffsetDateTime::now_utc());
|
||||
Cache::delete(&self.cache.group_policies, group, OffsetDateTime::now_utc());
|
||||
|
||||
@@ -13,7 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use super::{GroupInfo, MappedPolicy, Store, UserType};
|
||||
use crate::error::{Error, Result, is_err_config_not_found};
|
||||
use crate::error::{Error, Result, is_err_config_not_found, is_err_no_such_group};
|
||||
use crate::{
|
||||
cache::{Cache, CacheEntity},
|
||||
error::{is_err_no_such_policy, is_err_no_such_user},
|
||||
@@ -563,7 +563,11 @@ impl Store for ObjectStore {
|
||||
|
||||
if let Some(item) = v.item {
|
||||
let name = rustfs_utils::path::dir(&item);
|
||||
self.load_group(&name, m).await?;
|
||||
if let Err(err) = self.load_group(&name, m).await {
|
||||
if !is_err_no_such_group(&err) {
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
let _ = ctx.cancel();
|
||||
|
||||
@@ -626,7 +626,17 @@ impl<T: Store> IamSys<T> {
|
||||
|
||||
Ok((Some(res), ok))
|
||||
}
|
||||
None => Ok((None, false)),
|
||||
None => {
|
||||
let _ = self.store.load_user(access_key).await;
|
||||
|
||||
if let Some(res) = self.store.get_user(access_key).await {
|
||||
let ok = res.credentials.is_valid();
|
||||
|
||||
Ok((Some(res), ok))
|
||||
} else {
|
||||
Ok((None, false))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -667,6 +677,10 @@ impl<T: Store> IamSys<T> {
|
||||
self.store.get_group_description(group).await
|
||||
}
|
||||
|
||||
pub async fn list_groups_load(&self) -> Result<Vec<String>> {
|
||||
self.store.update_groups().await
|
||||
}
|
||||
|
||||
pub async fn list_groups(&self) -> Result<Vec<String>> {
|
||||
self.store.list_groups().await
|
||||
}
|
||||
|
||||
@@ -21,21 +21,15 @@ use std::time::Duration;
|
||||
use url::Url;
|
||||
|
||||
/// KMS backend types
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[derive(Debug, Default, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub enum KmsBackend {
|
||||
/// Vault backend (recommended for production)
|
||||
Vault,
|
||||
/// Local file-based backend for development and testing only
|
||||
#[default]
|
||||
Local,
|
||||
}
|
||||
|
||||
impl Default for KmsBackend {
|
||||
fn default() -> Self {
|
||||
// Default to Local backend since Vault requires configuration
|
||||
Self::Local
|
||||
}
|
||||
}
|
||||
|
||||
/// Main KMS configuration
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct KmsConfig {
|
||||
|
||||
@@ -150,7 +150,7 @@ impl TargetRegistry {
|
||||
// Case 1: The format is <FIELD_NAME>_<INSTANCE_ID>
|
||||
// e.g., rest = "ENDPOINT_PRIMARY" -> field_name="ENDPOINT", instance_id="PRIMARY"
|
||||
Some(field) => (field.to_lowercase(), instance_id_part.to_lowercase()),
|
||||
// Case 2: The format is <FIELD_NAME> (无 INSTANCE_ID)
|
||||
// Case 2: The format is <FIELD_NAME> (without INSTANCE_ID)
|
||||
// e.g., rest = "ENABLE" -> field_name="ENABLE", instance_id="" (Universal configuration `_ DEFAULT_DELIMITER`)
|
||||
None => (instance_id_part.to_lowercase(), DEFAULT_DELIMITER.to_string()),
|
||||
};
|
||||
|
||||
@@ -31,11 +31,14 @@ workspace = true
|
||||
[features]
|
||||
default = []
|
||||
gpu = ["dep:nvml-wrapper"]
|
||||
full = ["gpu"]
|
||||
|
||||
[dependencies]
|
||||
rustfs-config = { workspace = true, features = ["constants", "observability"] }
|
||||
rustfs-utils = { workspace = true, features = ["ip", "path"] }
|
||||
flexi_logger = { workspace = true }
|
||||
metrics = { workspace = true }
|
||||
metrics-exporter-opentelemetry = { workspace = true }
|
||||
nu-ansi-term = { workspace = true }
|
||||
nvml-wrapper = { workspace = true, optional = true }
|
||||
opentelemetry = { workspace = true }
|
||||
@@ -47,7 +50,6 @@ opentelemetry-semantic-conventions = { workspace = true, features = ["semconv_ex
|
||||
serde = { workspace = true }
|
||||
smallvec = { workspace = true, features = ["serde"] }
|
||||
tracing = { workspace = true, features = ["std", "attributes"] }
|
||||
tracing-core = { workspace = true }
|
||||
tracing-error = { workspace = true }
|
||||
tracing-opentelemetry = { workspace = true }
|
||||
tracing-subscriber = { workspace = true, features = ["registry", "std", "fmt", "env-filter", "tracing-log", "time", "local-time", "json"] }
|
||||
|
||||
@@ -43,7 +43,7 @@ async fn run(service_name: String) {
|
||||
&[opentelemetry::KeyValue::new("operation", "run")],
|
||||
);
|
||||
|
||||
match SystemObserver::init_process_observer(meter).await {
|
||||
match SystemObserver::init_process_observer().await {
|
||||
Ok(_) => info!("Process observer initialized successfully"),
|
||||
Err(e) => error!("Failed to initialize process observer: {:?}", e),
|
||||
}
|
||||
|
||||
@@ -18,21 +18,39 @@ use rustfs_config::observability::{
|
||||
ENV_OBS_METER_INTERVAL, ENV_OBS_SAMPLE_RATIO, ENV_OBS_SERVICE_NAME, ENV_OBS_SERVICE_VERSION, ENV_OBS_USE_STDOUT,
|
||||
};
|
||||
use rustfs_config::{
|
||||
APP_NAME, DEFAULT_LOG_KEEP_FILES, DEFAULT_LOG_LEVEL, DEFAULT_LOG_ROTATION_SIZE_MB, DEFAULT_LOG_ROTATION_TIME,
|
||||
DEFAULT_OBS_LOG_FILENAME, ENVIRONMENT, METER_INTERVAL, SAMPLE_RATIO, SERVICE_VERSION, USE_STDOUT,
|
||||
APP_NAME, DEFAULT_LOG_KEEP_FILES, DEFAULT_LOG_LEVEL, DEFAULT_LOG_LOCAL_LOGGING_ENABLED, DEFAULT_LOG_ROTATION_SIZE_MB,
|
||||
DEFAULT_LOG_ROTATION_TIME, DEFAULT_OBS_LOG_FILENAME, ENVIRONMENT, METER_INTERVAL, SAMPLE_RATIO, SERVICE_VERSION, USE_STDOUT,
|
||||
};
|
||||
use rustfs_utils::dirs::get_log_directory_to_string;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::env;
|
||||
|
||||
/// OpenTelemetry Configuration
|
||||
/// Add service name, service version, environment
|
||||
/// Add interval time for metric collection
|
||||
/// Add sample ratio for trace sampling
|
||||
/// Add endpoint for metric collection
|
||||
/// Add use_stdout for output to stdout
|
||||
/// Add logger level for log level
|
||||
/// Add local_logging_enabled for local logging enabled
|
||||
/// Observability: OpenTelemetry configuration
|
||||
/// # Fields
|
||||
/// * `endpoint`: Endpoint for metric collection
|
||||
/// * `use_stdout`: Output to stdout
|
||||
/// * `sample_ratio`: Trace sampling ratio
|
||||
/// * `meter_interval`: Metric collection interval
|
||||
/// * `service_name`: Service name
|
||||
/// * `service_version`: Service version
|
||||
/// * `environment`: Environment
|
||||
/// * `logger_level`: Logger level
|
||||
/// * `local_logging_enabled`: Local logging enabled
|
||||
/// # Added flexi_logger related configurations
|
||||
/// * `log_directory`: Log file directory
|
||||
/// * `log_filename`: The name of the log file
|
||||
/// * `log_rotation_size_mb`: Log file size cut threshold (MB)
|
||||
/// * `log_rotation_time`: Logs are cut by time (Hour,Day,Minute,Second)
|
||||
/// * `log_keep_files`: Number of log files to be retained
|
||||
/// # Returns
|
||||
/// A new instance of OtelConfig
|
||||
///
|
||||
/// # Example
|
||||
/// ```no_run
|
||||
/// use rustfs_obs::OtelConfig;
|
||||
///
|
||||
/// let config = OtelConfig::new();
|
||||
/// ```
|
||||
#[derive(Debug, Deserialize, Serialize, Clone)]
|
||||
pub struct OtelConfig {
|
||||
pub endpoint: String, // Endpoint for metric collection
|
||||
@@ -102,7 +120,7 @@ impl OtelConfig {
|
||||
local_logging_enabled: env::var(ENV_OBS_LOCAL_LOGGING_ENABLED)
|
||||
.ok()
|
||||
.and_then(|v| v.parse().ok())
|
||||
.or(Some(false)),
|
||||
.or(Some(DEFAULT_LOG_LOCAL_LOGGING_ENABLED)),
|
||||
log_directory: Some(get_log_directory_to_string(ENV_OBS_LOG_DIRECTORY)),
|
||||
log_filename: env::var(ENV_OBS_LOG_FILENAME)
|
||||
.ok()
|
||||
@@ -127,11 +145,28 @@ impl OtelConfig {
|
||||
///
|
||||
/// # Returns
|
||||
/// A new instance of OtelConfig
|
||||
///
|
||||
/// # Example
|
||||
/// ```no_run
|
||||
/// use rustfs_obs::OtelConfig;
|
||||
///
|
||||
/// let config = OtelConfig::new();
|
||||
/// ```
|
||||
pub fn new() -> Self {
|
||||
Self::extract_otel_config_from_env(None)
|
||||
}
|
||||
}
|
||||
|
||||
/// Implement Default trait for OtelConfig
|
||||
/// This allows creating a default instance of OtelConfig using OtelConfig::default()
|
||||
/// which internally calls OtelConfig::new()
|
||||
///
|
||||
/// # Example
|
||||
/// ```no_run
|
||||
/// use rustfs_obs::OtelConfig;
|
||||
///
|
||||
/// let config = OtelConfig::default();
|
||||
/// ```
|
||||
impl Default for OtelConfig {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
@@ -165,6 +200,20 @@ impl AppConfig {
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new instance of AppConfig with specified endpoint
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `endpoint` - An optional string representing the endpoint for metric collection
|
||||
///
|
||||
/// # Returns
|
||||
/// A new instance of AppConfig
|
||||
///
|
||||
/// # Example
|
||||
/// ```no_run
|
||||
/// use rustfs_obs::AppConfig;
|
||||
///
|
||||
/// let config = AppConfig::new_with_endpoint(Some("http://localhost:4317".to_string()));
|
||||
/// ```
|
||||
pub fn new_with_endpoint(endpoint: Option<String>) -> Self {
|
||||
Self {
|
||||
observability: OtelConfig::extract_otel_config_from_env(endpoint),
|
||||
@@ -172,7 +221,16 @@ impl AppConfig {
|
||||
}
|
||||
}
|
||||
|
||||
// implement default for AppConfig
|
||||
/// Implement Default trait for AppConfig
|
||||
/// This allows creating a default instance of AppConfig using AppConfig::default()
|
||||
/// which internally calls AppConfig::new()
|
||||
///
|
||||
/// # Example
|
||||
/// ```no_run
|
||||
/// use rustfs_obs::AppConfig;
|
||||
///
|
||||
/// let config = AppConfig::default();
|
||||
/// ```
|
||||
impl Default for AppConfig {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
|
||||
@@ -12,10 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::AppConfig;
|
||||
use crate::telemetry::{OtelGuard, init_telemetry};
|
||||
use opentelemetry::metrics::Meter;
|
||||
use rustfs_config::APP_NAME;
|
||||
use crate::{AppConfig, SystemObserver};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use tokio::sync::{OnceCell, SetError};
|
||||
use tracing::{error, info};
|
||||
@@ -26,20 +24,11 @@ static GLOBAL_GUARD: OnceCell<Arc<Mutex<OtelGuard>>> = OnceCell::const_new();
|
||||
/// Flag indicating if observability is enabled
|
||||
pub(crate) static IS_OBSERVABILITY_ENABLED: OnceCell<bool> = OnceCell::const_new();
|
||||
|
||||
/// Name of the observability meter
|
||||
pub(crate) static OBSERVABILITY_METER_NAME: OnceCell<String> = OnceCell::const_new();
|
||||
|
||||
/// Check whether Observability is enabled
|
||||
pub fn is_observability_enabled() -> bool {
|
||||
IS_OBSERVABILITY_ENABLED.get().copied().unwrap_or(false)
|
||||
}
|
||||
|
||||
/// Get the global meter for observability
|
||||
pub fn global_meter() -> Meter {
|
||||
let meter_name = OBSERVABILITY_METER_NAME.get().map(|s| s.as_str()).unwrap_or(APP_NAME);
|
||||
opentelemetry::global::meter(meter_name)
|
||||
}
|
||||
|
||||
/// Error type for global guard operations
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum GlobalError {
|
||||
@@ -75,18 +64,33 @@ pub enum GlobalError {
|
||||
///
|
||||
/// # Example
|
||||
/// ```no_run
|
||||
/// use rustfs_obs::init_obs;
|
||||
/// # use rustfs_obs::init_obs;
|
||||
///
|
||||
/// # #[tokio::main]
|
||||
/// # async fn main() {
|
||||
/// let guard = init_obs(None).await;
|
||||
/// # let guard = init_obs(None).await;
|
||||
/// # }
|
||||
/// ```
|
||||
pub async fn init_obs(endpoint: Option<String>) -> OtelGuard {
|
||||
// Load the configuration file
|
||||
let config = AppConfig::new_with_endpoint(endpoint);
|
||||
|
||||
init_telemetry(&config.observability)
|
||||
let otel_guard = init_telemetry(&config.observability);
|
||||
// Server will be created per connection - this ensures isolation
|
||||
tokio::spawn(async move {
|
||||
// Record the PID-related metrics of the current process
|
||||
let obs_result = SystemObserver::init_process_observer().await;
|
||||
match obs_result {
|
||||
Ok(_) => {
|
||||
info!(target: "rustfs::obs::system::metrics","Process observer initialized successfully");
|
||||
}
|
||||
Err(e) => {
|
||||
error!(target: "rustfs::obs::system::metrics","Failed to initialize process observer: {}", e);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
otel_guard
|
||||
}
|
||||
|
||||
/// Set the global guard for OpenTelemetry
|
||||
@@ -99,14 +103,14 @@ pub async fn init_obs(endpoint: Option<String>) -> OtelGuard {
|
||||
/// * `Err(GuardError)` if setting fails
|
||||
///
|
||||
/// # Example
|
||||
/// ```rust
|
||||
/// use rustfs_obs::{ init_obs, set_global_guard};
|
||||
/// ```no_run
|
||||
/// # use rustfs_obs::{ init_obs, set_global_guard};
|
||||
///
|
||||
/// async fn init() -> Result<(), Box<dyn std::error::Error>> {
|
||||
/// let guard = init_obs(None).await;
|
||||
/// set_global_guard(guard)?;
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// # async fn init() -> Result<(), Box<dyn std::error::Error>> {
|
||||
/// # let guard = init_obs(None).await;
|
||||
/// # set_global_guard(guard)?;
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
/// ```
|
||||
pub fn set_global_guard(guard: OtelGuard) -> Result<(), GlobalError> {
|
||||
info!("Initializing global OpenTelemetry guard");
|
||||
@@ -120,29 +124,20 @@ pub fn set_global_guard(guard: OtelGuard) -> Result<(), GlobalError> {
|
||||
/// * `Err(GuardError)` if guard not initialized
|
||||
///
|
||||
/// # Example
|
||||
/// ```rust
|
||||
/// use rustfs_obs::get_global_guard;
|
||||
/// ```no_run
|
||||
/// # use rustfs_obs::get_global_guard;
|
||||
///
|
||||
/// async fn trace_operation() -> Result<(), Box<dyn std::error::Error>> {
|
||||
/// let guard = get_global_guard()?;
|
||||
/// let _lock = guard.lock().unwrap();
|
||||
/// // Perform traced operation
|
||||
/// Ok(())
|
||||
/// }
|
||||
/// # async fn trace_operation() -> Result<(), Box<dyn std::error::Error>> {
|
||||
/// # let guard = get_global_guard()?;
|
||||
/// # let _lock = guard.lock().unwrap();
|
||||
/// # // Perform traced operation
|
||||
/// # Ok(())
|
||||
/// # }
|
||||
/// ```
|
||||
pub fn get_global_guard() -> Result<Arc<Mutex<OtelGuard>>, GlobalError> {
|
||||
GLOBAL_GUARD.get().cloned().ok_or(GlobalError::NotInitialized)
|
||||
}
|
||||
|
||||
/// Try to get the global guard for OpenTelemetry
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Some(Arc<Mutex<OtelGuard>>)` if guard exists
|
||||
/// * `None` if guard not initialized
|
||||
pub fn try_get_global_guard() -> Option<Arc<Mutex<OtelGuard>>> {
|
||||
GLOBAL_GUARD.get().cloned()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
@@ -17,7 +17,7 @@
|
||||
//! provides tools for system and service monitoring
|
||||
//!
|
||||
//! ## feature mark
|
||||
//!
|
||||
//! - `default`: default monitoring function
|
||||
//! - `gpu`: gpu monitoring function
|
||||
//! - `full`: includes all functions
|
||||
//!
|
||||
@@ -47,6 +47,6 @@ mod metrics;
|
||||
mod system;
|
||||
mod telemetry;
|
||||
|
||||
pub use config::AppConfig;
|
||||
pub use config::{AppConfig, OtelConfig};
|
||||
pub use global::*;
|
||||
pub use system::SystemObserver;
|
||||
|
||||
@@ -123,7 +123,7 @@ impl MetricSubsystem {
|
||||
// Debug related subsystems
|
||||
"/debug/go" => Self::DebugGo,
|
||||
|
||||
// 集群相关子系统
|
||||
// Cluster-related subsystems
|
||||
"/cluster/health" => Self::ClusterHealth,
|
||||
"/cluster/usage/objects" => Self::ClusterUsageObjects,
|
||||
"/cluster/usage/buckets" => Self::ClusterUsageBuckets,
|
||||
@@ -131,7 +131,7 @@ impl MetricSubsystem {
|
||||
"/cluster/iam" => Self::ClusterIam,
|
||||
"/cluster/config" => Self::ClusterConfig,
|
||||
|
||||
// 其他服务相关子系统
|
||||
// Other service-related subsystems
|
||||
"/ilm" => Self::Ilm,
|
||||
"/audit" => Self::Audit,
|
||||
"/logger/webhook" => Self::LoggerWebhook,
|
||||
@@ -139,7 +139,7 @@ impl MetricSubsystem {
|
||||
"/notification" => Self::Notification,
|
||||
"/scanner" => Self::Scanner,
|
||||
|
||||
// 其他路径作为自定义处理
|
||||
// Treat other paths as custom subsystems
|
||||
_ => Self::Custom(path.to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,10 +16,10 @@ use crate::GlobalError;
|
||||
use opentelemetry::KeyValue;
|
||||
use sysinfo::{Pid, System};
|
||||
|
||||
pub const PROCESS_PID: opentelemetry::Key = opentelemetry::Key::from_static_str("process.pid");
|
||||
pub const PROCESS_EXECUTABLE_NAME: opentelemetry::Key = opentelemetry::Key::from_static_str("process.executable.name");
|
||||
pub const PROCESS_EXECUTABLE_PATH: opentelemetry::Key = opentelemetry::Key::from_static_str("process.executable.path");
|
||||
pub const PROCESS_COMMAND: opentelemetry::Key = opentelemetry::Key::from_static_str("process.command");
|
||||
pub(crate) const PROCESS_PID: opentelemetry::Key = opentelemetry::Key::from_static_str("process.pid");
|
||||
pub(crate) const PROCESS_EXECUTABLE_NAME: opentelemetry::Key = opentelemetry::Key::from_static_str("process.executable.name");
|
||||
pub(crate) const PROCESS_EXECUTABLE_PATH: opentelemetry::Key = opentelemetry::Key::from_static_str("process.executable.path");
|
||||
pub(crate) const PROCESS_COMMAND: opentelemetry::Key = opentelemetry::Key::from_static_str("process.command");
|
||||
|
||||
/// Struct to hold process attributes
|
||||
pub struct ProcessAttributes {
|
||||
|
||||
@@ -14,7 +14,6 @@
|
||||
|
||||
use crate::GlobalError;
|
||||
use crate::system::attributes::ProcessAttributes;
|
||||
use crate::system::gpu::GpuCollector;
|
||||
use crate::system::metrics::{DIRECTION, INTERFACE, Metrics, STATUS};
|
||||
use opentelemetry::KeyValue;
|
||||
use std::time::SystemTime;
|
||||
@@ -27,7 +26,8 @@ use tokio::time::{Duration, sleep};
|
||||
pub struct Collector {
|
||||
metrics: Metrics,
|
||||
attributes: ProcessAttributes,
|
||||
gpu_collector: GpuCollector,
|
||||
#[cfg(feature = "gpu")]
|
||||
gpu_collector: crate::system::gpu::GpuCollector,
|
||||
pid: Pid,
|
||||
system: System,
|
||||
networks: Networks,
|
||||
@@ -41,12 +41,14 @@ impl Collector {
|
||||
let attributes = ProcessAttributes::new(pid, &mut system)?;
|
||||
let core_count = System::physical_core_count().ok_or(GlobalError::CoreCountError)?;
|
||||
let metrics = Metrics::new(&meter);
|
||||
let gpu_collector = GpuCollector::new(pid)?;
|
||||
#[cfg(feature = "gpu")]
|
||||
let gpu_collector = crate::system::gpu::GpuCollector::new(pid)?;
|
||||
let networks = Networks::new_with_refreshed_list();
|
||||
|
||||
Ok(Collector {
|
||||
metrics,
|
||||
attributes,
|
||||
#[cfg(feature = "gpu")]
|
||||
gpu_collector,
|
||||
pid,
|
||||
system,
|
||||
@@ -163,6 +165,7 @@ impl Collector {
|
||||
);
|
||||
|
||||
// GPU Metrics (Optional) Non-MacOS
|
||||
#[cfg(feature = "gpu")]
|
||||
self.gpu_collector.collect(&self.metrics, &self.attributes)?;
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -12,29 +12,20 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#[cfg(feature = "gpu")]
|
||||
use crate::GlobalError;
|
||||
#[cfg(feature = "gpu")]
|
||||
use crate::system::attributes::ProcessAttributes;
|
||||
#[cfg(feature = "gpu")]
|
||||
use crate::system::metrics::Metrics;
|
||||
#[cfg(feature = "gpu")]
|
||||
use nvml_wrapper::Nvml;
|
||||
#[cfg(feature = "gpu")]
|
||||
use nvml_wrapper::enums::device::UsedGpuMemory;
|
||||
#[cfg(feature = "gpu")]
|
||||
use sysinfo::Pid;
|
||||
#[cfg(feature = "gpu")]
|
||||
use tracing::warn;
|
||||
|
||||
/// `GpuCollector` is responsible for collecting GPU memory usage metrics.
|
||||
#[cfg(feature = "gpu")]
|
||||
pub struct GpuCollector {
|
||||
nvml: Nvml,
|
||||
pid: Pid,
|
||||
}
|
||||
|
||||
#[cfg(feature = "gpu")]
|
||||
impl GpuCollector {
|
||||
pub fn new(pid: Pid) -> Result<Self, GlobalError> {
|
||||
let nvml = Nvml::init().map_err(|e| GlobalError::GpuInitError(e.to_string()))?;
|
||||
@@ -64,21 +55,3 @@ impl GpuCollector {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(not(feature = "gpu"))]
|
||||
pub struct GpuCollector;
|
||||
|
||||
#[cfg(not(feature = "gpu"))]
|
||||
impl GpuCollector {
|
||||
pub fn new(_pid: sysinfo::Pid) -> Result<Self, crate::GlobalError> {
|
||||
Ok(GpuCollector)
|
||||
}
|
||||
|
||||
pub fn collect(
|
||||
&self,
|
||||
_metrics: &crate::system::metrics::Metrics,
|
||||
_attributes: &crate::system::attributes::ProcessAttributes,
|
||||
) -> Result<(), crate::GlobalError> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,19 +12,21 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub const PROCESS_CPU_USAGE: &str = "process.cpu.usage";
|
||||
pub const PROCESS_CPU_UTILIZATION: &str = "process.cpu.utilization";
|
||||
pub const PROCESS_MEMORY_USAGE: &str = "process.memory.usage";
|
||||
pub const PROCESS_MEMORY_VIRTUAL: &str = "process.memory.virtual";
|
||||
pub const PROCESS_DISK_IO: &str = "process.disk.io";
|
||||
pub const PROCESS_NETWORK_IO: &str = "process.network.io";
|
||||
pub const PROCESS_NETWORK_IO_PER_INTERFACE: &str = "process.network.io.per_interface";
|
||||
pub const PROCESS_STATUS: &str = "process.status";
|
||||
use opentelemetry::metrics::{Gauge, Meter};
|
||||
|
||||
pub(crate) const PROCESS_CPU_USAGE: &str = "process.cpu.usage";
|
||||
pub(crate) const PROCESS_CPU_UTILIZATION: &str = "process.cpu.utilization";
|
||||
pub(crate) const PROCESS_MEMORY_USAGE: &str = "process.memory.usage";
|
||||
pub(crate) const PROCESS_MEMORY_VIRTUAL: &str = "process.memory.virtual";
|
||||
pub(crate) const PROCESS_DISK_IO: &str = "process.disk.io";
|
||||
pub(crate) const PROCESS_NETWORK_IO: &str = "process.network.io";
|
||||
pub(crate) const PROCESS_NETWORK_IO_PER_INTERFACE: &str = "process.network.io.per_interface";
|
||||
pub(crate) const PROCESS_STATUS: &str = "process.status";
|
||||
#[cfg(feature = "gpu")]
|
||||
pub const PROCESS_GPU_MEMORY_USAGE: &str = "process.gpu.memory.usage";
|
||||
pub const DIRECTION: opentelemetry::Key = opentelemetry::Key::from_static_str("direction");
|
||||
pub const STATUS: opentelemetry::Key = opentelemetry::Key::from_static_str("status");
|
||||
pub const INTERFACE: opentelemetry::Key = opentelemetry::Key::from_static_str("interface");
|
||||
pub(crate) const DIRECTION: opentelemetry::Key = opentelemetry::Key::from_static_str("direction");
|
||||
pub(crate) const STATUS: opentelemetry::Key = opentelemetry::Key::from_static_str("status");
|
||||
pub(crate) const INTERFACE: opentelemetry::Key = opentelemetry::Key::from_static_str("interface");
|
||||
|
||||
/// `Metrics` struct holds the OpenTelemetry metrics for process monitoring.
|
||||
/// It contains various metrics such as CPU usage, memory usage,
|
||||
@@ -36,20 +38,20 @@ pub const INTERFACE: opentelemetry::Key = opentelemetry::Key::from_static_str("i
|
||||
/// The `new` method initializes the metrics using the provided
|
||||
/// `opentelemetry::metrics::Meter`.
|
||||
pub struct Metrics {
|
||||
pub cpu_usage: opentelemetry::metrics::Gauge<f64>,
|
||||
pub cpu_utilization: opentelemetry::metrics::Gauge<f64>,
|
||||
pub memory_usage: opentelemetry::metrics::Gauge<i64>,
|
||||
pub memory_virtual: opentelemetry::metrics::Gauge<i64>,
|
||||
pub disk_io: opentelemetry::metrics::Gauge<i64>,
|
||||
pub network_io: opentelemetry::metrics::Gauge<i64>,
|
||||
pub network_io_per_interface: opentelemetry::metrics::Gauge<i64>,
|
||||
pub process_status: opentelemetry::metrics::Gauge<i64>,
|
||||
pub cpu_usage: Gauge<f64>,
|
||||
pub cpu_utilization: Gauge<f64>,
|
||||
pub memory_usage: Gauge<i64>,
|
||||
pub memory_virtual: Gauge<i64>,
|
||||
pub disk_io: Gauge<i64>,
|
||||
pub network_io: Gauge<i64>,
|
||||
pub network_io_per_interface: Gauge<i64>,
|
||||
pub process_status: Gauge<i64>,
|
||||
#[cfg(feature = "gpu")]
|
||||
pub gpu_memory_usage: opentelemetry::metrics::Gauge<u64>,
|
||||
pub gpu_memory_usage: Gauge<u64>,
|
||||
}
|
||||
|
||||
impl Metrics {
|
||||
pub fn new(meter: &opentelemetry::metrics::Meter) -> Self {
|
||||
pub fn new(meter: &Meter) -> Self {
|
||||
let cpu_usage = meter
|
||||
.f64_gauge(PROCESS_CPU_USAGE)
|
||||
.with_description("The percentage of CPU in use.")
|
||||
|
||||
@@ -12,12 +12,14 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::GlobalError;
|
||||
use crate::{GlobalError, is_observability_enabled};
|
||||
use opentelemetry::global::meter;
|
||||
|
||||
pub(crate) mod attributes;
|
||||
mod attributes;
|
||||
mod collector;
|
||||
pub(crate) mod gpu;
|
||||
pub(crate) mod metrics;
|
||||
#[cfg(feature = "gpu")]
|
||||
mod gpu;
|
||||
mod metrics;
|
||||
|
||||
pub struct SystemObserver {}
|
||||
|
||||
@@ -25,10 +27,12 @@ impl SystemObserver {
|
||||
/// Initialize the indicator collector for the current process
|
||||
/// This function will create a new `Collector` instance and start collecting metrics.
|
||||
/// It will run indefinitely until the process is terminated.
|
||||
pub async fn init_process_observer(meter: opentelemetry::metrics::Meter) -> Result<(), GlobalError> {
|
||||
let pid = sysinfo::get_current_pid().map_err(|e| GlobalError::PidError(e.to_string()))?;
|
||||
let mut collector = collector::Collector::new(pid, meter, 30000)?;
|
||||
collector.run().await
|
||||
pub async fn init_process_observer() -> Result<(), GlobalError> {
|
||||
if is_observability_enabled() {
|
||||
let meter = meter("system");
|
||||
return SystemObserver::init_process_observer_for_pid(meter, 30000).await;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Initialize the metric collector for the specified PID process
|
||||
|
||||
@@ -13,12 +13,13 @@
|
||||
// limitations under the License.
|
||||
|
||||
use crate::config::OtelConfig;
|
||||
use crate::global::{IS_OBSERVABILITY_ENABLED, OBSERVABILITY_METER_NAME};
|
||||
use crate::global::IS_OBSERVABILITY_ENABLED;
|
||||
use flexi_logger::{
|
||||
Age, Cleanup, Criterion, DeferredNow, FileSpec, LogSpecification, Naming, Record, WriteMode,
|
||||
WriteMode::{AsyncWith, BufferAndFlush},
|
||||
style,
|
||||
};
|
||||
use metrics::counter;
|
||||
use nu_ansi_term::Color;
|
||||
use opentelemetry::trace::TracerProvider;
|
||||
use opentelemetry::{KeyValue, global};
|
||||
@@ -35,7 +36,8 @@ use opentelemetry_semantic_conventions::{
|
||||
attribute::{DEPLOYMENT_ENVIRONMENT_NAME, NETWORK_LOCAL_ADDRESS, SERVICE_VERSION as OTEL_SERVICE_VERSION},
|
||||
};
|
||||
use rustfs_config::{
|
||||
APP_NAME, DEFAULT_LOG_KEEP_FILES, DEFAULT_LOG_LEVEL, ENVIRONMENT, METER_INTERVAL, SAMPLE_RATIO, SERVICE_VERSION, USE_STDOUT,
|
||||
APP_NAME, DEFAULT_LOG_KEEP_FILES, DEFAULT_LOG_LEVEL, DEFAULT_LOG_LOCAL_LOGGING_ENABLED, ENVIRONMENT, METER_INTERVAL,
|
||||
SAMPLE_RATIO, SERVICE_VERSION, USE_STDOUT,
|
||||
observability::{
|
||||
DEFAULT_OBS_ENVIRONMENT_PRODUCTION, DEFAULT_OBS_LOG_FLUSH_MS, DEFAULT_OBS_LOG_MESSAGE_CAPA, DEFAULT_OBS_LOG_POOL_CAPA,
|
||||
ENV_OBS_LOG_DIRECTORY,
|
||||
@@ -233,6 +235,13 @@ pub(crate) fn init_telemetry(config: &OtelConfig) -> OtelGuard {
|
||||
meter_provider
|
||||
};
|
||||
|
||||
match metrics_exporter_opentelemetry::Recorder::builder("order-service").install_global() {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
eprintln!("Failed to set global metrics recorder: {e:?}");
|
||||
}
|
||||
}
|
||||
|
||||
// initialize logger provider
|
||||
let logger_provider = {
|
||||
let mut builder = SdkLoggerProvider::builder().with_resource(res);
|
||||
@@ -286,7 +295,7 @@ pub(crate) fn init_telemetry(config: &OtelConfig) -> OtelGuard {
|
||||
tracing_subscriber::registry()
|
||||
.with(filter)
|
||||
.with(ErrorLayer::default())
|
||||
.with(if config.local_logging_enabled.unwrap_or(false) {
|
||||
.with(if config.local_logging_enabled.unwrap_or(DEFAULT_LOG_LOCAL_LOGGING_ENABLED) {
|
||||
Some(fmt_layer)
|
||||
} else {
|
||||
None
|
||||
@@ -304,10 +313,9 @@ pub(crate) fn init_telemetry(config: &OtelConfig) -> OtelGuard {
|
||||
env::var("RUST_LOG").unwrap_or_else(|_| "Not set".to_string())
|
||||
);
|
||||
IS_OBSERVABILITY_ENABLED.set(true).ok();
|
||||
OBSERVABILITY_METER_NAME.set(service_name.to_string()).ok();
|
||||
}
|
||||
}
|
||||
|
||||
counter!("rustfs.start.total").increment(1);
|
||||
return OtelGuard {
|
||||
tracer_provider: Some(tracer_provider),
|
||||
meter_provider: Some(meter_provider),
|
||||
|
||||
@@ -48,7 +48,6 @@ thiserror.workspace = true
|
||||
base64.workspace = true
|
||||
sha1.workspace = true
|
||||
sha2.workspace = true
|
||||
base64-simd.workspace = true
|
||||
crc64fast-nvme.workspace = true
|
||||
s3s.workspace = true
|
||||
hex-simd.workspace = true
|
||||
|
||||
@@ -78,6 +78,12 @@ impl ChecksumType {
|
||||
(self.0 & t.0) == t.0
|
||||
}
|
||||
|
||||
/// Merge another checksum type into this one
|
||||
pub fn merge(&mut self, other: ChecksumType) -> &mut Self {
|
||||
self.0 |= other.0;
|
||||
self
|
||||
}
|
||||
|
||||
/// Get the base checksum type (without flags)
|
||||
pub fn base(self) -> ChecksumType {
|
||||
ChecksumType(self.0 & Self::BASE_TYPE_MASK)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user