mirror of
https://github.com/rustfs/rustfs.git
synced 2026-01-17 01:30:33 +00:00
Merge pull request #437 from rustfs/feat/add-formatting-rules-and-type-inference
feat: add comprehensive formatting rules and type inference guidelines
This commit is contained in:
163
.cursorrules
163
.cursorrules
@@ -46,7 +46,104 @@ fn_call_width = 90
|
||||
single_line_let_else_max_width = 100
|
||||
```
|
||||
|
||||
### 2. Naming Conventions
|
||||
### 2. **🔧 MANDATORY Code Formatting Rules**
|
||||
|
||||
**CRITICAL**: All code must be properly formatted before committing. This project enforces strict formatting standards to maintain code consistency and readability.
|
||||
|
||||
#### Pre-commit Requirements (MANDATORY)
|
||||
|
||||
Before every commit, you **MUST**:
|
||||
|
||||
1. **Format your code**:
|
||||
```bash
|
||||
cargo fmt --all
|
||||
```
|
||||
|
||||
2. **Verify formatting**:
|
||||
```bash
|
||||
cargo fmt --all --check
|
||||
```
|
||||
|
||||
3. **Pass clippy checks**:
|
||||
```bash
|
||||
cargo clippy --all-targets --all-features -- -D warnings
|
||||
```
|
||||
|
||||
4. **Ensure compilation**:
|
||||
```bash
|
||||
cargo check --all-targets
|
||||
```
|
||||
|
||||
#### Quick Commands
|
||||
|
||||
Use these convenient Makefile targets for common tasks:
|
||||
|
||||
```bash
|
||||
# Format all code
|
||||
make fmt
|
||||
|
||||
# Check if code is properly formatted
|
||||
make fmt-check
|
||||
|
||||
# Run clippy checks
|
||||
make clippy
|
||||
|
||||
# Run compilation check
|
||||
make check
|
||||
|
||||
# Run tests
|
||||
make test
|
||||
|
||||
# Run all pre-commit checks (format + clippy + check + test)
|
||||
make pre-commit
|
||||
|
||||
# Setup git hooks (one-time setup)
|
||||
make setup-hooks
|
||||
```
|
||||
|
||||
#### 🔒 Automated Pre-commit Hooks
|
||||
|
||||
This project includes a pre-commit hook that automatically runs before each commit to ensure:
|
||||
|
||||
- ✅ Code is properly formatted (`cargo fmt --all --check`)
|
||||
- ✅ No clippy warnings (`cargo clippy --all-targets --all-features -- -D warnings`)
|
||||
- ✅ Code compiles successfully (`cargo check --all-targets`)
|
||||
|
||||
**Setting Up Pre-commit Hooks** (MANDATORY for all developers):
|
||||
|
||||
Run this command once after cloning the repository:
|
||||
|
||||
```bash
|
||||
make setup-hooks
|
||||
```
|
||||
|
||||
Or manually:
|
||||
|
||||
```bash
|
||||
chmod +x .git/hooks/pre-commit
|
||||
```
|
||||
|
||||
#### 🚫 Commit Prevention
|
||||
|
||||
If your code doesn't meet the formatting requirements, the pre-commit hook will:
|
||||
|
||||
1. **Block the commit** and show clear error messages
|
||||
2. **Provide exact commands** to fix the issues
|
||||
3. **Guide you through** the resolution process
|
||||
|
||||
Example output when formatting fails:
|
||||
|
||||
```
|
||||
❌ Code formatting check failed!
|
||||
💡 Please run 'cargo fmt --all' to format your code before committing.
|
||||
|
||||
🔧 Quick fix:
|
||||
cargo fmt --all
|
||||
git add .
|
||||
git commit
|
||||
```
|
||||
|
||||
### 3. Naming Conventions
|
||||
- Use `snake_case` for functions, variables, modules
|
||||
- Use `PascalCase` for types, traits, enums
|
||||
- Constants use `SCREAMING_SNAKE_CASE`
|
||||
@@ -55,7 +152,47 @@ single_line_let_else_max_width = 100
|
||||
- Avoid meaningless names like `temp`, `data`, `foo`, `bar`, `test123`
|
||||
- Choose names that clearly express the purpose and intent
|
||||
|
||||
### 3. Documentation Comments
|
||||
### 4. Type Declaration Guidelines
|
||||
- **Prefer type inference over explicit type declarations** when the type is obvious from context
|
||||
- Let the Rust compiler infer types whenever possible to reduce verbosity and improve maintainability
|
||||
- Only specify types explicitly when:
|
||||
- The type cannot be inferred by the compiler
|
||||
- Explicit typing improves code clarity and readability
|
||||
- Required for API boundaries (function signatures, public struct fields)
|
||||
- Needed to resolve ambiguity between multiple possible types
|
||||
|
||||
**Good examples (prefer these):**
|
||||
```rust
|
||||
// Compiler can infer the type
|
||||
let items = vec![1, 2, 3, 4];
|
||||
let config = Config::default();
|
||||
let result = process_data(&input);
|
||||
|
||||
// Iterator chains with clear context
|
||||
let filtered: Vec<_> = items.iter().filter(|&&x| x > 2).collect();
|
||||
```
|
||||
|
||||
**Avoid unnecessary explicit types:**
|
||||
```rust
|
||||
// Unnecessary - type is obvious
|
||||
let items: Vec<i32> = vec![1, 2, 3, 4];
|
||||
let config: Config = Config::default();
|
||||
let result: ProcessResult = process_data(&input);
|
||||
```
|
||||
|
||||
**When explicit types are beneficial:**
|
||||
```rust
|
||||
// API boundaries - always specify types
|
||||
pub fn process_data(input: &[u8]) -> Result<ProcessResult, Error> { ... }
|
||||
|
||||
// Ambiguous cases - explicit type needed
|
||||
let value: f64 = "3.14".parse().unwrap();
|
||||
|
||||
// Complex generic types - explicit for clarity
|
||||
let cache: HashMap<String, Arc<Mutex<CacheEntry>>> = HashMap::new();
|
||||
```
|
||||
|
||||
### 5. Documentation Comments
|
||||
- Public APIs must have documentation comments
|
||||
- Use `///` for documentation comments
|
||||
- Complex functions add `# Examples` and `# Parameters` descriptions
|
||||
@@ -63,7 +200,7 @@ single_line_let_else_max_width = 100
|
||||
- Always use English for all comments and documentation
|
||||
- Avoid meaningless comments like "debug 111" or placeholder text
|
||||
|
||||
### 4. Import Guidelines
|
||||
### 6. Import Guidelines
|
||||
- Standard library imports first
|
||||
- Third-party crate imports in the middle
|
||||
- Project internal imports last
|
||||
@@ -306,32 +443,34 @@ async fn health_check() -> Result<HealthStatus> {
|
||||
|
||||
## Code Review Checklist
|
||||
|
||||
### 1. Functionality
|
||||
### 1. **Code Formatting and Quality (MANDATORY)**
|
||||
- [ ] **Code is properly formatted** (`cargo fmt --all --check` passes)
|
||||
- [ ] **All clippy warnings are resolved** (`cargo clippy --all-targets --all-features -- -D warnings` passes)
|
||||
- [ ] **Code compiles successfully** (`cargo check --all-targets` passes)
|
||||
- [ ] **Pre-commit hooks are working** and all checks pass
|
||||
- [ ] **No formatting-related changes** mixed with functional changes (separate commits)
|
||||
|
||||
### 2. Functionality
|
||||
- [ ] Are all error cases properly handled?
|
||||
- [ ] Is there appropriate logging?
|
||||
- [ ] Is there necessary test coverage?
|
||||
|
||||
### 2. Performance
|
||||
### 3. Performance
|
||||
- [ ] Are unnecessary memory allocations avoided?
|
||||
- [ ] Are async operations used correctly?
|
||||
- [ ] Are there potential deadlock risks?
|
||||
|
||||
### 3. Security
|
||||
### 4. Security
|
||||
- [ ] Are input parameters properly validated?
|
||||
- [ ] Are there appropriate permission checks?
|
||||
- [ ] Is information leakage avoided?
|
||||
|
||||
### 4. Cross-Platform Compatibility
|
||||
### 5. Cross-Platform Compatibility
|
||||
- [ ] Does the code work on different CPU architectures (x86_64, aarch64)?
|
||||
- [ ] Are platform-specific features properly gated with conditional compilation?
|
||||
- [ ] Is byte order handling correct for binary data?
|
||||
- [ ] Are there appropriate fallback implementations for unsupported platforms?
|
||||
|
||||
### 5. Maintainability
|
||||
- [ ] Is the code clear and understandable?
|
||||
- [ ] Does it follow the project's architectural patterns?
|
||||
- [ ] Is there appropriate documentation?
|
||||
|
||||
### 6. Code Commits and Documentation
|
||||
- [ ] Does it comply with [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/)?
|
||||
- [ ] Are commit messages concise and under 72 characters for the title line?
|
||||
|
||||
184
DEVELOPMENT.md
Normal file
184
DEVELOPMENT.md
Normal file
@@ -0,0 +1,184 @@
|
||||
# RustFS Development Guide
|
||||
|
||||
## 📋 Code Quality Requirements
|
||||
|
||||
### 🔧 Code Formatting Rules
|
||||
|
||||
**MANDATORY**: All code must be properly formatted before committing. This project enforces strict formatting standards to maintain code consistency and readability.
|
||||
|
||||
#### Pre-commit Requirements
|
||||
|
||||
Before every commit, you **MUST**:
|
||||
|
||||
1. **Format your code**:
|
||||
```bash
|
||||
cargo fmt --all
|
||||
```
|
||||
|
||||
2. **Verify formatting**:
|
||||
```bash
|
||||
cargo fmt --all --check
|
||||
```
|
||||
|
||||
3. **Pass clippy checks**:
|
||||
```bash
|
||||
cargo clippy --all-targets --all-features -- -D warnings
|
||||
```
|
||||
|
||||
4. **Ensure compilation**:
|
||||
```bash
|
||||
cargo check --all-targets
|
||||
```
|
||||
|
||||
#### Quick Commands
|
||||
|
||||
We provide convenient Makefile targets for common tasks:
|
||||
|
||||
```bash
|
||||
# Format all code
|
||||
make fmt
|
||||
|
||||
# Check if code is properly formatted
|
||||
make fmt-check
|
||||
|
||||
# Run clippy checks
|
||||
make clippy
|
||||
|
||||
# Run compilation check
|
||||
make check
|
||||
|
||||
# Run tests
|
||||
make test
|
||||
|
||||
# Run all pre-commit checks (format + clippy + check + test)
|
||||
make pre-commit
|
||||
|
||||
# Setup git hooks (one-time setup)
|
||||
make setup-hooks
|
||||
```
|
||||
|
||||
### 🔒 Automated Pre-commit Hooks
|
||||
|
||||
This project includes a pre-commit hook that automatically runs before each commit to ensure:
|
||||
|
||||
- ✅ Code is properly formatted (`cargo fmt --all --check`)
|
||||
- ✅ No clippy warnings (`cargo clippy --all-targets --all-features -- -D warnings`)
|
||||
- ✅ Code compiles successfully (`cargo check --all-targets`)
|
||||
|
||||
#### Setting Up Pre-commit Hooks
|
||||
|
||||
Run this command once after cloning the repository:
|
||||
|
||||
```bash
|
||||
make setup-hooks
|
||||
```
|
||||
|
||||
Or manually:
|
||||
|
||||
```bash
|
||||
chmod +x .git/hooks/pre-commit
|
||||
```
|
||||
|
||||
### 📝 Formatting Configuration
|
||||
|
||||
The project uses the following rustfmt configuration (defined in `rustfmt.toml`):
|
||||
|
||||
```toml
|
||||
max_width = 130
|
||||
fn_call_width = 90
|
||||
single_line_let_else_max_width = 100
|
||||
```
|
||||
|
||||
### 🚫 Commit Prevention
|
||||
|
||||
If your code doesn't meet the formatting requirements, the pre-commit hook will:
|
||||
|
||||
1. **Block the commit** and show clear error messages
|
||||
2. **Provide exact commands** to fix the issues
|
||||
3. **Guide you through** the resolution process
|
||||
|
||||
Example output when formatting fails:
|
||||
|
||||
```
|
||||
❌ Code formatting check failed!
|
||||
💡 Please run 'cargo fmt --all' to format your code before committing.
|
||||
|
||||
🔧 Quick fix:
|
||||
cargo fmt --all
|
||||
git add .
|
||||
git commit
|
||||
```
|
||||
|
||||
### 🔄 Development Workflow
|
||||
|
||||
1. **Make your changes**
|
||||
2. **Format your code**: `make fmt` or `cargo fmt --all`
|
||||
3. **Run pre-commit checks**: `make pre-commit`
|
||||
4. **Commit your changes**: `git commit -m "your message"`
|
||||
5. **Push to your branch**: `git push`
|
||||
|
||||
### 🛠️ IDE Integration
|
||||
|
||||
#### VS Code
|
||||
|
||||
Install the `rust-analyzer` extension and add to your `settings.json`:
|
||||
|
||||
```json
|
||||
{
|
||||
"rust-analyzer.rustfmt.extraArgs": ["--config-path", "./rustfmt.toml"],
|
||||
"editor.formatOnSave": true,
|
||||
"[rust]": {
|
||||
"editor.defaultFormatter": "rust-lang.rust-analyzer"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Other IDEs
|
||||
|
||||
Configure your IDE to:
|
||||
- Use the project's `rustfmt.toml` configuration
|
||||
- Format on save
|
||||
- Run clippy checks
|
||||
|
||||
### ❗ Important Notes
|
||||
|
||||
- **Never bypass formatting checks** - they are there for a reason
|
||||
- **All CI/CD pipelines** will also enforce these same checks
|
||||
- **Pull requests** will be automatically rejected if formatting checks fail
|
||||
- **Consistent formatting** improves code readability and reduces merge conflicts
|
||||
|
||||
### 🆘 Troubleshooting
|
||||
|
||||
#### Pre-commit hook not running?
|
||||
|
||||
```bash
|
||||
# Check if hook is executable
|
||||
ls -la .git/hooks/pre-commit
|
||||
|
||||
# Make it executable if needed
|
||||
chmod +x .git/hooks/pre-commit
|
||||
```
|
||||
|
||||
#### Formatting issues?
|
||||
|
||||
```bash
|
||||
# Format all code
|
||||
cargo fmt --all
|
||||
|
||||
# Check specific issues
|
||||
cargo fmt --all --check --verbose
|
||||
```
|
||||
|
||||
#### Clippy issues?
|
||||
|
||||
```bash
|
||||
# See detailed clippy output
|
||||
cargo clippy --all-targets --all-features -- -D warnings
|
||||
|
||||
# Fix automatically fixable issues
|
||||
cargo clippy --fix --all-targets --all-features
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
Following these guidelines ensures high code quality and smooth collaboration across the RustFS project! 🚀
|
||||
38
Makefile
38
Makefile
@@ -7,6 +7,42 @@ IMAGE_NAME ?= rustfs:v1.0.0
|
||||
CONTAINER_NAME ?= rustfs-dev
|
||||
DOCKERFILE_PATH = $(shell pwd)/.docker
|
||||
|
||||
# Code quality and formatting targets
|
||||
.PHONY: fmt
|
||||
fmt:
|
||||
@echo "🔧 Formatting code..."
|
||||
cargo fmt --all
|
||||
|
||||
.PHONY: fmt-check
|
||||
fmt-check:
|
||||
@echo "📝 Checking code formatting..."
|
||||
cargo fmt --all --check
|
||||
|
||||
.PHONY: clippy
|
||||
clippy:
|
||||
@echo "🔍 Running clippy checks..."
|
||||
cargo clippy --all-targets --all-features -- -D warnings
|
||||
|
||||
.PHONY: check
|
||||
check:
|
||||
@echo "🔨 Running compilation check..."
|
||||
cargo check --all-targets
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
@echo "🧪 Running tests..."
|
||||
cargo test --all --exclude e2e_test
|
||||
|
||||
.PHONY: pre-commit
|
||||
pre-commit: fmt clippy check test
|
||||
@echo "✅ All pre-commit checks passed!"
|
||||
|
||||
.PHONY: setup-hooks
|
||||
setup-hooks:
|
||||
@echo "🔧 Setting up git hooks..."
|
||||
chmod +x .git/hooks/pre-commit
|
||||
@echo "✅ Git hooks setup complete!"
|
||||
|
||||
.PHONY: init-devenv
|
||||
init-devenv:
|
||||
$(DOCKER_CLI) build -t $(IMAGE_NAME) -f $(DOCKERFILE_PATH)/Dockerfile.devenv .
|
||||
@@ -20,7 +56,7 @@ start:
|
||||
|
||||
.PHONY: stop
|
||||
stop:
|
||||
$(DOCKER_CLI) stop $(CONTAINER_NAME)
|
||||
$(DOCKER_CLI) stop $(CONTAINER_NAME)
|
||||
|
||||
.PHONY: e2e-server
|
||||
e2e-server:
|
||||
|
||||
@@ -295,13 +295,13 @@ mod tests {
|
||||
#[test]
|
||||
fn test_extract_host_port_invalid() {
|
||||
let invalid_cases = vec![
|
||||
"127.0.0.1", // Missing port
|
||||
"127.0.0.1:", // Empty port
|
||||
"127.0.0.1:abc", // Invalid port
|
||||
"127.0.0.1:99999", // Port out of range
|
||||
"", // Empty string
|
||||
"127.0.0.1", // Missing port
|
||||
"127.0.0.1:", // Empty port
|
||||
"127.0.0.1:abc", // Invalid port
|
||||
"127.0.0.1:99999", // Port out of range
|
||||
"", // Empty string
|
||||
"127.0.0.1:9000:extra", // Too many parts
|
||||
"invalid", // No colon
|
||||
"invalid", // No colon
|
||||
];
|
||||
|
||||
for input in invalid_cases {
|
||||
|
||||
@@ -624,17 +624,17 @@ mod tests {
|
||||
|
||||
// Test that commands can be created
|
||||
match start_cmd {
|
||||
ServiceCommand::Start(_) => {},
|
||||
ServiceCommand::Start(_) => {}
|
||||
_ => panic!("Expected Start command"),
|
||||
}
|
||||
|
||||
match stop_cmd {
|
||||
ServiceCommand::Stop => {},
|
||||
ServiceCommand::Stop => {}
|
||||
_ => panic!("Expected Stop command"),
|
||||
}
|
||||
|
||||
match restart_cmd {
|
||||
ServiceCommand::Restart(_) => {},
|
||||
ServiceCommand::Restart(_) => {}
|
||||
_ => panic!("Expected Restart command"),
|
||||
}
|
||||
}
|
||||
@@ -680,7 +680,7 @@ mod tests {
|
||||
assert!(debug_str.contains("Test message"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[test]
|
||||
fn test_service_manager_creation() {
|
||||
// Test ServiceManager creation in a tokio runtime
|
||||
let rt = tokio::runtime::Runtime::new().unwrap();
|
||||
@@ -714,17 +714,17 @@ mod tests {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[test]
|
||||
fn test_extract_port_invalid() {
|
||||
let invalid_cases = vec![
|
||||
"127.0.0.1", // Missing port
|
||||
"127.0.0.1:", // Empty port
|
||||
"127.0.0.1:abc", // Invalid port
|
||||
"127.0.0.1:99999", // Port out of range
|
||||
"", // Empty string
|
||||
"invalid", // No colon
|
||||
"host:-1", // Negative port
|
||||
"host:0.5", // Decimal port
|
||||
"127.0.0.1", // Missing port
|
||||
"127.0.0.1:", // Empty port
|
||||
"127.0.0.1:abc", // Invalid port
|
||||
"127.0.0.1:99999", // Port out of range
|
||||
"", // Empty string
|
||||
"invalid", // No colon
|
||||
"host:-1", // Negative port
|
||||
"host:0.5", // Decimal port
|
||||
];
|
||||
|
||||
for input in invalid_cases {
|
||||
@@ -746,10 +746,10 @@ mod tests {
|
||||
assert_eq!(ServiceManager::extract_port("host:0"), Some(0));
|
||||
assert_eq!(ServiceManager::extract_port("host:65535"), Some(65535));
|
||||
assert_eq!(ServiceManager::extract_port("host:65536"), None); // Out of range
|
||||
// IPv6-like address - extract_port takes the second part after split(':')
|
||||
// For "::1:8080", split(':') gives ["", "", "1", "8080"], nth(1) gives ""
|
||||
// IPv6-like address - extract_port takes the second part after split(':')
|
||||
// For "::1:8080", split(':') gives ["", "", "1", "8080"], nth(1) gives ""
|
||||
assert_eq!(ServiceManager::extract_port("::1:8080"), None); // Second part is empty
|
||||
// For "[::1]:8080", split(':') gives ["[", "", "1]", "8080"], nth(1) gives ""
|
||||
// For "[::1]:8080", split(':') gives ["[", "", "1]", "8080"], nth(1) gives ""
|
||||
assert_eq!(ServiceManager::extract_port("[::1]:8080"), None); // Second part is empty
|
||||
}
|
||||
|
||||
@@ -844,7 +844,7 @@ mod tests {
|
||||
ServiceCommand::Start(config) => {
|
||||
assert_eq!(config.address, "127.0.0.1:9000");
|
||||
assert_eq!(config.access_key, "admin1");
|
||||
},
|
||||
}
|
||||
_ => panic!("Expected Start command"),
|
||||
}
|
||||
|
||||
@@ -852,7 +852,7 @@ mod tests {
|
||||
ServiceCommand::Restart(config) => {
|
||||
assert_eq!(config.address, "192.168.1.100:8080");
|
||||
assert_eq!(config.access_key, "admin2");
|
||||
},
|
||||
}
|
||||
_ => panic!("Expected Restart command"),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,9 +58,7 @@ mod tests {
|
||||
fn ensure_logger_init() {
|
||||
INIT.call_once(|| {
|
||||
// Initialize a simple test logger to avoid conflicts
|
||||
let _ = tracing_subscriber::fmt()
|
||||
.with_test_writer()
|
||||
.try_init();
|
||||
let _ = tracing_subscriber::fmt().with_test_writer().try_init();
|
||||
});
|
||||
}
|
||||
|
||||
@@ -84,7 +82,7 @@ mod tests {
|
||||
assert!(logs_dir.to_string_lossy().contains("logs"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[test]
|
||||
fn test_rolling_file_appender_builder() {
|
||||
ensure_logger_init();
|
||||
|
||||
@@ -120,7 +118,7 @@ mod tests {
|
||||
assert!(!format!("{:?}", never).is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[test]
|
||||
fn test_fmt_layer_configuration() {
|
||||
ensure_logger_init();
|
||||
|
||||
|
||||
@@ -258,7 +258,9 @@ mod tests {
|
||||
fn test_multiple_error_types() {
|
||||
let errors = vec![
|
||||
Error::new(io::Error::new(io::ErrorKind::NotFound, "Not found")),
|
||||
Error::new(CustomTestError { message: "Custom".to_string() }),
|
||||
Error::new(CustomTestError {
|
||||
message: "Custom".to_string(),
|
||||
}),
|
||||
Error::new(AnotherTestError),
|
||||
Error::msg("String error"),
|
||||
];
|
||||
|
||||
@@ -247,11 +247,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_acc_elem_avg_zero_total() {
|
||||
let elem = AccElem {
|
||||
total: 0,
|
||||
size: 0,
|
||||
n: 5,
|
||||
};
|
||||
let elem = AccElem { total: 0, size: 0, n: 5 };
|
||||
|
||||
let avg = elem.avg();
|
||||
assert_eq!(avg, Duration::from_secs(0));
|
||||
@@ -464,7 +460,8 @@ mod tests {
|
||||
let mut latency = LastMinuteLatency::default();
|
||||
|
||||
// Test that indices wrap around correctly
|
||||
for sec in 0..120 { // Test for 2 minutes
|
||||
for sec in 0..120 {
|
||||
// Test for 2 minutes
|
||||
let acc_elem = AccElem {
|
||||
total: sec,
|
||||
size: 0,
|
||||
@@ -482,7 +479,14 @@ mod tests {
|
||||
let mut latency = LastMinuteLatency::default();
|
||||
|
||||
// Add data at time 1000
|
||||
latency.add_all(1000, &AccElem { total: 10, size: 0, n: 1 });
|
||||
latency.add_all(
|
||||
1000,
|
||||
&AccElem {
|
||||
total: 10,
|
||||
size: 0,
|
||||
n: 1,
|
||||
},
|
||||
);
|
||||
|
||||
// Forward to time 1030 (30 seconds later)
|
||||
latency.forward_to(1030);
|
||||
@@ -637,9 +641,21 @@ mod tests {
|
||||
latency.last_sec = current_time;
|
||||
|
||||
// Add data to multiple slots
|
||||
latency.totals[0] = AccElem { total: 10, size: 100, n: 1 };
|
||||
latency.totals[1] = AccElem { total: 20, size: 200, n: 2 };
|
||||
latency.totals[59] = AccElem { total: 30, size: 300, n: 3 };
|
||||
latency.totals[0] = AccElem {
|
||||
total: 10,
|
||||
size: 100,
|
||||
n: 1,
|
||||
};
|
||||
latency.totals[1] = AccElem {
|
||||
total: 20,
|
||||
size: 200,
|
||||
n: 2,
|
||||
};
|
||||
latency.totals[59] = AccElem {
|
||||
total: 30,
|
||||
size: 300,
|
||||
n: 3,
|
||||
};
|
||||
|
||||
let total = latency.get_total();
|
||||
|
||||
@@ -653,29 +669,20 @@ mod tests {
|
||||
// Test that window index calculation works correctly
|
||||
let _latency = LastMinuteLatency::default();
|
||||
|
||||
let acc_elem = AccElem {
|
||||
total: 1,
|
||||
size: 1,
|
||||
n: 1,
|
||||
};
|
||||
let acc_elem = AccElem { total: 1, size: 1, n: 1 };
|
||||
|
||||
// Test various timestamps
|
||||
let test_cases = [
|
||||
(0, 0),
|
||||
(1, 1),
|
||||
(59, 59),
|
||||
(60, 0),
|
||||
(61, 1),
|
||||
(119, 59),
|
||||
(120, 0),
|
||||
];
|
||||
let test_cases = [(0, 0), (1, 1), (59, 59), (60, 0), (61, 1), (119, 59), (120, 0)];
|
||||
|
||||
for (timestamp, expected_idx) in test_cases {
|
||||
let mut test_latency = LastMinuteLatency::default();
|
||||
test_latency.add_all(timestamp, &acc_elem);
|
||||
|
||||
assert_eq!(test_latency.totals[expected_idx].n, 1,
|
||||
"Failed for timestamp {} (expected index {})", timestamp, expected_idx);
|
||||
assert_eq!(
|
||||
test_latency.totals[expected_idx].n, 1,
|
||||
"Failed for timestamp {} (expected index {})",
|
||||
timestamp, expected_idx
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -374,9 +374,9 @@ fn check_quorum_locked(locks: &[String], quorum: usize) -> bool {
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::local_locker::LocalLocker;
|
||||
use async_trait::async_trait;
|
||||
use common::error::{Error, Result};
|
||||
use crate::local_locker::LocalLocker;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
@@ -776,11 +776,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_drw_mutex_multiple_resources() {
|
||||
let names = vec![
|
||||
"resource1".to_string(),
|
||||
"resource2".to_string(),
|
||||
"resource3".to_string(),
|
||||
];
|
||||
let names = vec!["resource1".to_string(), "resource2".to_string(), "resource3".to_string()];
|
||||
let lockers = create_mock_lockers(1);
|
||||
let mut mutex = DRWMutex::new("owner1".to_string(), names.clone(), lockers);
|
||||
|
||||
@@ -884,8 +880,8 @@ mod tests {
|
||||
// Case 1: Even number of lockers
|
||||
let locks = vec!["uid1".to_string(), "uid2".to_string(), "uid3".to_string(), "uid4".to_string()];
|
||||
let tolerance = 2; // locks.len() / 2 = 4 / 2 = 2
|
||||
// locks.len() - tolerance = 4 - 2 = 2, which equals tolerance
|
||||
// So the special case applies: un_locks_failed >= tolerance
|
||||
// locks.len() - tolerance = 4 - 2 = 2, which equals tolerance
|
||||
// So the special case applies: un_locks_failed >= tolerance
|
||||
|
||||
// All 4 failed unlocks
|
||||
assert!(check_failed_unlocks(&locks, tolerance)); // 4 >= 2 = true
|
||||
@@ -901,8 +897,8 @@ mod tests {
|
||||
// Case 2: Odd number of lockers
|
||||
let locks = vec!["uid1".to_string(), "uid2".to_string(), "uid3".to_string()];
|
||||
let tolerance = 1; // locks.len() / 2 = 3 / 2 = 1
|
||||
// locks.len() - tolerance = 3 - 1 = 2, which does NOT equal tolerance (1)
|
||||
// So the normal case applies: un_locks_failed > tolerance
|
||||
// locks.len() - tolerance = 3 - 1 = 2, which does NOT equal tolerance (1)
|
||||
// So the normal case applies: un_locks_failed > tolerance
|
||||
|
||||
// 3 failed unlocks
|
||||
assert!(check_failed_unlocks(&locks, tolerance)); // 3 > 1 = true
|
||||
@@ -946,11 +942,36 @@ mod tests {
|
||||
}
|
||||
|
||||
let test_cases = vec![
|
||||
QuorumTest { locker_count: 1, expected_tolerance: 0, expected_write_quorum: 1, expected_read_quorum: 1 },
|
||||
QuorumTest { locker_count: 2, expected_tolerance: 1, expected_write_quorum: 2, expected_read_quorum: 1 },
|
||||
QuorumTest { locker_count: 3, expected_tolerance: 1, expected_write_quorum: 2, expected_read_quorum: 2 },
|
||||
QuorumTest { locker_count: 4, expected_tolerance: 2, expected_write_quorum: 3, expected_read_quorum: 2 },
|
||||
QuorumTest { locker_count: 5, expected_tolerance: 2, expected_write_quorum: 3, expected_read_quorum: 3 },
|
||||
QuorumTest {
|
||||
locker_count: 1,
|
||||
expected_tolerance: 0,
|
||||
expected_write_quorum: 1,
|
||||
expected_read_quorum: 1,
|
||||
},
|
||||
QuorumTest {
|
||||
locker_count: 2,
|
||||
expected_tolerance: 1,
|
||||
expected_write_quorum: 2,
|
||||
expected_read_quorum: 1,
|
||||
},
|
||||
QuorumTest {
|
||||
locker_count: 3,
|
||||
expected_tolerance: 1,
|
||||
expected_write_quorum: 2,
|
||||
expected_read_quorum: 2,
|
||||
},
|
||||
QuorumTest {
|
||||
locker_count: 4,
|
||||
expected_tolerance: 2,
|
||||
expected_write_quorum: 3,
|
||||
expected_read_quorum: 2,
|
||||
},
|
||||
QuorumTest {
|
||||
locker_count: 5,
|
||||
expected_tolerance: 2,
|
||||
expected_write_quorum: 3,
|
||||
expected_read_quorum: 3,
|
||||
},
|
||||
];
|
||||
|
||||
for test_case in test_cases {
|
||||
@@ -963,12 +984,21 @@ mod tests {
|
||||
write_quorum += 1;
|
||||
}
|
||||
|
||||
assert_eq!(tolerance, test_case.expected_tolerance,
|
||||
"Tolerance mismatch for {} lockers", test_case.locker_count);
|
||||
assert_eq!(write_quorum, test_case.expected_write_quorum,
|
||||
"Write quorum mismatch for {} lockers", test_case.locker_count);
|
||||
assert_eq!(read_quorum, test_case.expected_read_quorum,
|
||||
"Read quorum mismatch for {} lockers", test_case.locker_count);
|
||||
assert_eq!(
|
||||
tolerance, test_case.expected_tolerance,
|
||||
"Tolerance mismatch for {} lockers",
|
||||
test_case.locker_count
|
||||
);
|
||||
assert_eq!(
|
||||
write_quorum, test_case.expected_write_quorum,
|
||||
"Write quorum mismatch for {} lockers",
|
||||
test_case.locker_count
|
||||
);
|
||||
assert_eq!(
|
||||
read_quorum, test_case.expected_read_quorum,
|
||||
"Read quorum mismatch for {} lockers",
|
||||
test_case.locker_count
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -998,11 +1028,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_drw_mutex_new_with_unsorted_names() {
|
||||
let names = vec![
|
||||
"zebra".to_string(),
|
||||
"alpha".to_string(),
|
||||
"beta".to_string(),
|
||||
];
|
||||
let names = vec!["zebra".to_string(), "alpha".to_string(), "beta".to_string()];
|
||||
let lockers = create_mock_lockers(1);
|
||||
let mutex = DRWMutex::new("owner1".to_string(), names, lockers);
|
||||
|
||||
|
||||
@@ -94,7 +94,10 @@ mod tests {
|
||||
|
||||
// Test event config properties
|
||||
assert!(!config.event.store_path.is_empty(), "Store path should not be empty");
|
||||
assert!(config.event.channel_capacity >= 1000, "Channel capacity should be reasonable for production");
|
||||
assert!(
|
||||
config.event.channel_capacity >= 1000,
|
||||
"Channel capacity should be reasonable for production"
|
||||
);
|
||||
|
||||
// Test that store path is a valid path format
|
||||
let store_path = &config.event.store_path;
|
||||
@@ -106,13 +109,13 @@ mod tests {
|
||||
match adapter {
|
||||
crate::event::adapters::AdapterConfig::Webhook(_) => {
|
||||
// Webhook adapter should be properly configured
|
||||
},
|
||||
}
|
||||
crate::event::adapters::AdapterConfig::Kafka(_) => {
|
||||
// Kafka adapter should be properly configured
|
||||
},
|
||||
}
|
||||
crate::event::adapters::AdapterConfig::Mqtt(_) => {
|
||||
// MQTT adapter should be properly configured
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -153,7 +156,10 @@ mod tests {
|
||||
// Test that observability config has Debug trait
|
||||
let observability_debug = format!("{:?}", config.observability);
|
||||
assert!(!observability_debug.is_empty(), "Observability config should have debug output");
|
||||
assert!(observability_debug.contains("ObservabilityConfig"), "Debug output should contain type name");
|
||||
assert!(
|
||||
observability_debug.contains("ObservabilityConfig"),
|
||||
"Debug output should contain type name"
|
||||
);
|
||||
|
||||
// Test that event config has Debug trait
|
||||
let event_debug = format!("{:?}", config.event);
|
||||
|
||||
@@ -53,7 +53,10 @@ mod tests {
|
||||
|
||||
// Verify store path is set
|
||||
assert!(!config.store_path.is_empty(), "Store path should not be empty");
|
||||
assert!(config.store_path.contains("event-notification"), "Store path should contain event-notification");
|
||||
assert!(
|
||||
config.store_path.contains("event-notification"),
|
||||
"Store path should contain event-notification"
|
||||
);
|
||||
|
||||
// Verify channel capacity is reasonable
|
||||
assert_eq!(config.channel_capacity, 10000, "Channel capacity should be 10000");
|
||||
@@ -153,7 +156,10 @@ mod tests {
|
||||
assert!(!debug_str.is_empty(), "Debug output should not be empty");
|
||||
assert!(debug_str.contains("NotifierConfig"), "Debug output should contain struct name");
|
||||
assert!(debug_str.contains("store_path"), "Debug output should contain store_path field");
|
||||
assert!(debug_str.contains("channel_capacity"), "Debug output should contain channel_capacity field");
|
||||
assert!(
|
||||
debug_str.contains("channel_capacity"),
|
||||
"Debug output should contain channel_capacity field"
|
||||
);
|
||||
assert!(debug_str.contains("adapters"), "Debug output should contain adapters field");
|
||||
}
|
||||
|
||||
@@ -217,13 +223,13 @@ mod tests {
|
||||
match adapter {
|
||||
AdapterConfig::Webhook(_) => {
|
||||
// Webhook adapter should be properly configured
|
||||
},
|
||||
}
|
||||
AdapterConfig::Kafka(_) => {
|
||||
// Kafka adapter should be properly configured
|
||||
},
|
||||
}
|
||||
AdapterConfig::Mqtt(_) => {
|
||||
// MQTT adapter should be properly configured
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -320,6 +326,9 @@ mod tests {
|
||||
// DEFAULT_CONFIG_FILE is a const, so is_empty() check is redundant
|
||||
// assert!(!DEFAULT_CONFIG_FILE.is_empty(), "Config file name should not be empty");
|
||||
assert!(!DEFAULT_CONFIG_FILE.contains('/'), "Config file name should not contain path separators");
|
||||
assert!(!DEFAULT_CONFIG_FILE.contains('\\'), "Config file name should not contain Windows path separators");
|
||||
assert!(
|
||||
!DEFAULT_CONFIG_FILE.contains('\\'),
|
||||
"Config file name should not contain Windows path separators"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -191,7 +191,7 @@ mod tests {
|
||||
use std::fs;
|
||||
use tempfile::TempDir;
|
||||
|
||||
#[test]
|
||||
#[test]
|
||||
fn test_certs_error_function() {
|
||||
let error_msg = "Test error message";
|
||||
let error = certs_error(error_msg.to_string());
|
||||
@@ -210,7 +210,7 @@ mod tests {
|
||||
assert!(error.to_string().contains("failed to open"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[test]
|
||||
fn test_load_private_key_file_not_found() {
|
||||
let result = load_private_key("non_existent_key.pem");
|
||||
assert!(result.is_err());
|
||||
@@ -233,7 +233,7 @@ mod tests {
|
||||
assert!(error.to_string().contains("No valid certificate was found"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[test]
|
||||
fn test_load_certs_invalid_format() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let cert_path = temp_dir.path().join("invalid.pem");
|
||||
@@ -259,7 +259,7 @@ mod tests {
|
||||
assert!(error.to_string().contains("no private key found"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[test]
|
||||
fn test_load_private_key_invalid_format() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let key_path = temp_dir.path().join("invalid_key.pem");
|
||||
@@ -281,7 +281,7 @@ mod tests {
|
||||
assert!(error.to_string().contains("does not exist or is not a directory"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[test]
|
||||
fn test_load_all_certs_from_directory_empty() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
|
||||
@@ -315,7 +315,7 @@ mod tests {
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[test]
|
||||
fn test_load_cert_key_pair_missing_key() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let cert_path = temp_dir.path().join("test_cert.pem");
|
||||
@@ -355,12 +355,12 @@ mod tests {
|
||||
fn test_path_handling_edge_cases() {
|
||||
// Test with various path formats
|
||||
let path_cases = vec![
|
||||
"", // Empty path
|
||||
".", // Current directory
|
||||
"..", // Parent directory
|
||||
"/", // Root directory (Unix)
|
||||
"relative/path", // Relative path
|
||||
"/absolute/path", // Absolute path
|
||||
"", // Empty path
|
||||
".", // Current directory
|
||||
"..", // Parent directory
|
||||
"/", // Root directory (Unix)
|
||||
"relative/path", // Relative path
|
||||
"/absolute/path", // Absolute path
|
||||
];
|
||||
|
||||
for path in path_cases {
|
||||
@@ -396,7 +396,10 @@ mod tests {
|
||||
// Should fail because no certificates found
|
||||
let result = load_all_certs_from_directory(temp_dir.path().to_str().unwrap());
|
||||
assert!(result.is_err());
|
||||
assert!(result.unwrap_err().to_string().contains("No valid certificate/private key pair found"));
|
||||
assert!(result
|
||||
.unwrap_err()
|
||||
.to_string()
|
||||
.contains("No valid certificate/private key pair found"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -409,7 +412,10 @@ mod tests {
|
||||
|
||||
let result = load_all_certs_from_directory(unicode_dir.to_str().unwrap());
|
||||
assert!(result.is_err());
|
||||
assert!(result.unwrap_err().to_string().contains("No valid certificate/private key pair found"));
|
||||
assert!(result
|
||||
.unwrap_err()
|
||||
.to_string()
|
||||
.contains("No valid certificate/private key pair found"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -420,14 +426,16 @@ mod tests {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let dir_path = Arc::new(temp_dir.path().to_string_lossy().to_string());
|
||||
|
||||
let handles: Vec<_> = (0..5).map(|_| {
|
||||
let path = Arc::clone(&dir_path);
|
||||
thread::spawn(move || {
|
||||
let result = load_all_certs_from_directory(&path);
|
||||
// All should fail since directory is empty
|
||||
assert!(result.is_err());
|
||||
let handles: Vec<_> = (0..5)
|
||||
.map(|_| {
|
||||
let path = Arc::clone(&dir_path);
|
||||
thread::spawn(move || {
|
||||
let result = load_all_certs_from_directory(&path);
|
||||
// All should fail since directory is empty
|
||||
assert!(result.is_err());
|
||||
})
|
||||
})
|
||||
}).collect();
|
||||
.collect();
|
||||
|
||||
for handle in handles {
|
||||
handle.join().expect("Thread should complete successfully");
|
||||
|
||||
@@ -154,8 +154,12 @@ mod tests {
|
||||
let id = ID::Pbkdf2AESGCM;
|
||||
let salt = b"same_salt_for_all";
|
||||
|
||||
let key1 = id.get_key(b"password1", salt).expect("Key generation with password1 should succeed");
|
||||
let key2 = id.get_key(b"password2", salt).expect("Key generation with password2 should succeed");
|
||||
let key1 = id
|
||||
.get_key(b"password1", salt)
|
||||
.expect("Key generation with password1 should succeed");
|
||||
let key2 = id
|
||||
.get_key(b"password2", salt)
|
||||
.expect("Key generation with password2 should succeed");
|
||||
|
||||
assert_ne!(key1, key2);
|
||||
}
|
||||
@@ -166,8 +170,12 @@ mod tests {
|
||||
let id = ID::Pbkdf2AESGCM;
|
||||
let password = b"same_password";
|
||||
|
||||
let key1 = id.get_key(password, b"salt1_16_bytes__").expect("Key generation with salt1 should succeed");
|
||||
let key2 = id.get_key(password, b"salt2_16_bytes__").expect("Key generation with salt2 should succeed");
|
||||
let key1 = id
|
||||
.get_key(password, b"salt1_16_bytes__")
|
||||
.expect("Key generation with salt1 should succeed");
|
||||
let key2 = id
|
||||
.get_key(password, b"salt2_16_bytes__")
|
||||
.expect("Key generation with salt2 should succeed");
|
||||
|
||||
assert_ne!(key1, key2);
|
||||
}
|
||||
@@ -231,9 +239,15 @@ mod tests {
|
||||
let password = b"consistent_password";
|
||||
let salt = b"consistent_salt_";
|
||||
|
||||
let key_argon2_aes = ID::Argon2idAESGCM.get_key(password, salt).expect("Argon2id AES key generation should succeed");
|
||||
let key_argon2_chacha = ID::Argon2idChaCHa20Poly1305.get_key(password, salt).expect("Argon2id ChaCha key generation should succeed");
|
||||
let key_pbkdf2 = ID::Pbkdf2AESGCM.get_key(password, salt).expect("PBKDF2 key generation should succeed");
|
||||
let key_argon2_aes = ID::Argon2idAESGCM
|
||||
.get_key(password, salt)
|
||||
.expect("Argon2id AES key generation should succeed");
|
||||
let key_argon2_chacha = ID::Argon2idChaCHa20Poly1305
|
||||
.get_key(password, salt)
|
||||
.expect("Argon2id ChaCha key generation should succeed");
|
||||
let key_pbkdf2 = ID::Pbkdf2AESGCM
|
||||
.get_key(password, salt)
|
||||
.expect("PBKDF2 key generation should succeed");
|
||||
|
||||
// Different algorithms should produce different keys
|
||||
assert_ne!(key_argon2_aes, key_pbkdf2);
|
||||
|
||||
@@ -75,10 +75,10 @@ fn test_encrypt_decrypt_with_long_password() -> Result<(), crate::Error> {
|
||||
fn test_encrypt_decrypt_binary_data() -> Result<(), crate::Error> {
|
||||
// Test with various binary patterns
|
||||
let binary_patterns = [
|
||||
vec![0x00; 100], // All zeros
|
||||
vec![0xFF; 100], // All ones
|
||||
vec![0x00; 100], // All zeros
|
||||
vec![0xFF; 100], // All ones
|
||||
(0..=255u8).cycle().take(1000).collect::<Vec<u8>>(), // Sequential pattern
|
||||
[0xAA, 0x55].repeat(500), // Alternating pattern
|
||||
[0xAA, 0x55].repeat(500), // Alternating pattern
|
||||
];
|
||||
|
||||
for pattern in &binary_patterns {
|
||||
@@ -136,11 +136,11 @@ fn test_decrypt_with_truncated_data() {
|
||||
|
||||
// Test truncation at various lengths
|
||||
let truncation_lengths = [
|
||||
0, // Empty data
|
||||
10, // Very short
|
||||
32, // Salt length
|
||||
44, // Just before nonce
|
||||
encrypted.len() - 1, // Missing last byte
|
||||
0, // Empty data
|
||||
10, // Very short
|
||||
32, // Salt length
|
||||
44, // Just before nonce
|
||||
encrypted.len() - 1, // Missing last byte
|
||||
];
|
||||
|
||||
for &length in &truncation_lengths {
|
||||
@@ -193,8 +193,12 @@ fn test_encrypted_data_structure() -> Result<(), crate::Error> {
|
||||
|
||||
// Should have at least: 32 bytes salt + 1 byte ID + 12 bytes nonce + data + 16 bytes tag
|
||||
let min_expected_length = 32 + 1 + 12 + data.len() + 16;
|
||||
assert!(encrypted.len() >= min_expected_length,
|
||||
"Encrypted data length {} should be at least {}", encrypted.len(), min_expected_length);
|
||||
assert!(
|
||||
encrypted.len() >= min_expected_length,
|
||||
"Encrypted data length {} should be at least {}",
|
||||
encrypted.len(),
|
||||
min_expected_length
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -204,12 +208,12 @@ fn test_password_variations() -> Result<(), crate::Error> {
|
||||
let data = b"test data";
|
||||
|
||||
let password_variations = [
|
||||
b"a".as_slice(), // Single character
|
||||
b"12345".as_slice(), // Numeric
|
||||
b"!@#$%^&*()".as_slice(), // Special characters
|
||||
b"\x00\x01\x02\x03".as_slice(), // Binary password
|
||||
"密码测试".as_bytes(), // Unicode password
|
||||
&[0xFF; 64], // Long binary password
|
||||
b"a".as_slice(), // Single character
|
||||
b"12345".as_slice(), // Numeric
|
||||
b"!@#$%^&*()".as_slice(), // Special characters
|
||||
b"\x00\x01\x02\x03".as_slice(), // Binary password
|
||||
"密码测试".as_bytes(), // Unicode password
|
||||
&[0xFF; 64], // Long binary password
|
||||
];
|
||||
|
||||
for password in &password_variations {
|
||||
@@ -238,8 +242,8 @@ fn test_deterministic_with_same_salt_and_nonce() {
|
||||
fn test_cross_platform_compatibility() -> Result<(), crate::Error> {
|
||||
// Test data that might behave differently on different platforms
|
||||
let test_cases = [
|
||||
vec![0x00, 0x01, 0x02, 0x03], // Low values
|
||||
vec![0xFC, 0xFD, 0xFE, 0xFF], // High values
|
||||
vec![0x00, 0x01, 0x02, 0x03], // Low values
|
||||
vec![0xFC, 0xFD, 0xFE, 0xFF], // High values
|
||||
(0..256u16).map(|x| (x % 256) as u8).collect::<Vec<u8>>(), // Full byte range
|
||||
];
|
||||
|
||||
@@ -258,8 +262,8 @@ fn test_memory_safety_with_large_passwords() -> Result<(), crate::Error> {
|
||||
|
||||
// Test with very large passwords
|
||||
let large_passwords = [
|
||||
vec![b'a'; 1024], // 1KB password
|
||||
vec![b'x'; 10 * 1024], // 10KB password
|
||||
vec![b'a'; 1024], // 1KB password
|
||||
vec![b'x'; 10 * 1024], // 10KB password
|
||||
(0..=255u8).cycle().take(5000).collect::<Vec<u8>>(), // 5KB varied password
|
||||
];
|
||||
|
||||
@@ -280,16 +284,18 @@ fn test_concurrent_encryption_safety() -> Result<(), crate::Error> {
|
||||
let data = Arc::new(b"concurrent test data".to_vec());
|
||||
let password = Arc::new(b"concurrent_password".to_vec());
|
||||
|
||||
let handles: Vec<_> = (0..10).map(|i| {
|
||||
let data = Arc::clone(&data);
|
||||
let password = Arc::clone(&password);
|
||||
let handles: Vec<_> = (0..10)
|
||||
.map(|i| {
|
||||
let data = Arc::clone(&data);
|
||||
let password = Arc::clone(&password);
|
||||
|
||||
thread::spawn(move || {
|
||||
let encrypted = encrypt_data(&password, &data).expect("Encryption should succeed");
|
||||
let decrypted = decrypt_data(&password, &encrypted).expect("Decryption should succeed");
|
||||
assert_eq!(**data, decrypted, "Thread {} failed", i);
|
||||
thread::spawn(move || {
|
||||
let encrypted = encrypt_data(&password, &data).expect("Encryption should succeed");
|
||||
let decrypted = decrypt_data(&password, &encrypted).expect("Decryption should succeed");
|
||||
assert_eq!(**data, decrypted, "Thread {} failed", i);
|
||||
})
|
||||
})
|
||||
}).collect();
|
||||
.collect();
|
||||
|
||||
for handle in handles {
|
||||
handle.join().expect("Thread should complete successfully");
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
use time::OffsetDateTime;
|
||||
use serde_json::json;
|
||||
use time::OffsetDateTime;
|
||||
|
||||
use super::{decode::decode, encode::encode};
|
||||
|
||||
@@ -64,11 +64,11 @@ fn test_jwt_decode_invalid_token_format() {
|
||||
|
||||
// Test various invalid token formats
|
||||
let invalid_tokens = [
|
||||
"", // Empty token
|
||||
"invalid", // Not a JWT format
|
||||
"header.payload", // Missing signature
|
||||
"header.payload.signature.extra", // Too many parts
|
||||
"invalid.header.signature", // Invalid base64
|
||||
"", // Empty token
|
||||
"invalid", // Not a JWT format
|
||||
"header.payload", // Missing signature
|
||||
"header.payload.signature.extra", // Too many parts
|
||||
"invalid.header.signature", // Invalid base64
|
||||
"eyJ0eXAiOiJKV1QiLCJhbGciOiJIUzI1NiJ9.invalid.signature", // Invalid payload
|
||||
];
|
||||
|
||||
@@ -110,7 +110,10 @@ fn test_jwt_with_future_issued_at() {
|
||||
let result = decode(&jwt_token, secret);
|
||||
// For now, we just verify the token can be decoded, but in a production system
|
||||
// you might want to add custom validation for iat claims
|
||||
assert!(result.is_ok(), "Token decoding should succeed, but iat validation should be handled separately");
|
||||
assert!(
|
||||
result.is_ok(),
|
||||
"Token decoding should succeed, but iat validation should be handled separately"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -135,18 +138,18 @@ fn test_jwt_with_different_secret_lengths() {
|
||||
|
||||
// Test with various secret lengths
|
||||
let secrets = [
|
||||
b"a".as_slice(), // Very short
|
||||
b"short_key".as_slice(), // Short
|
||||
b"medium_length_secret_key".as_slice(), // Medium
|
||||
b"a".as_slice(), // Very short
|
||||
b"short_key".as_slice(), // Short
|
||||
b"medium_length_secret_key".as_slice(), // Medium
|
||||
b"very_long_secret_key_with_many_characters_for_testing_purposes".as_slice(), // Long
|
||||
];
|
||||
|
||||
for secret in &secrets {
|
||||
let jwt_token = encode(secret, &claims)
|
||||
.unwrap_or_else(|_| panic!("Failed to encode JWT with secret length {}", secret.len()));
|
||||
let jwt_token =
|
||||
encode(secret, &claims).unwrap_or_else(|_| panic!("Failed to encode JWT with secret length {}", secret.len()));
|
||||
|
||||
let decoded = decode(&jwt_token, secret)
|
||||
.unwrap_or_else(|_| panic!("Failed to decode JWT with secret length {}", secret.len()));
|
||||
let decoded =
|
||||
decode(&jwt_token, secret).unwrap_or_else(|_| panic!("Failed to decode JWT with secret length {}", secret.len()));
|
||||
|
||||
assert_eq!(decoded.claims, claims);
|
||||
}
|
||||
|
||||
@@ -189,9 +189,7 @@ async fn apply_dynamic_config<S: StorageAPI>(cfg: &mut Config, api: Arc<S>) -> R
|
||||
async fn apply_dynamic_config_for_sub_sys<S: StorageAPI>(cfg: &mut Config, api: Arc<S>, subsys: &str) -> Result<()> {
|
||||
let set_drive_counts = api.set_drive_counts();
|
||||
if subsys == STORAGE_CLASS_SUB_SYS {
|
||||
let kvs = cfg
|
||||
.get_value(STORAGE_CLASS_SUB_SYS, DEFAULT_KV_KEY)
|
||||
.unwrap_or_default();
|
||||
let kvs = cfg.get_value(STORAGE_CLASS_SUB_SYS, DEFAULT_KV_KEY).unwrap_or_default();
|
||||
|
||||
for (i, count) in set_drive_counts.iter().enumerate() {
|
||||
match storageclass::lookup_config(&kvs, *count) {
|
||||
|
||||
@@ -97,7 +97,7 @@ impl FileMeta {
|
||||
if buf.len() < 5 {
|
||||
return Err(Error::new(io::Error::new(
|
||||
io::ErrorKind::UnexpectedEof,
|
||||
format!("Buffer too small: {} bytes, need at least 5", buf.len())
|
||||
format!("Buffer too small: {} bytes, need at least 5", buf.len()),
|
||||
)));
|
||||
}
|
||||
|
||||
|
||||
@@ -208,14 +208,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_http_file_writer_creation() {
|
||||
let writer = HttpFileWriter::new(
|
||||
"http://localhost:8080",
|
||||
"test-disk",
|
||||
"test-volume",
|
||||
"test-path",
|
||||
1024,
|
||||
false
|
||||
);
|
||||
let writer = HttpFileWriter::new("http://localhost:8080", "test-disk", "test-volume", "test-path", 1024, false);
|
||||
|
||||
assert!(writer.is_ok(), "HttpFileWriter creation should succeed");
|
||||
}
|
||||
@@ -228,7 +221,7 @@ mod tests {
|
||||
"test/volume",
|
||||
"test file with spaces & symbols.txt",
|
||||
1024,
|
||||
false
|
||||
false,
|
||||
);
|
||||
|
||||
assert!(writer.is_ok(), "HttpFileWriter creation with special characters should succeed");
|
||||
@@ -242,7 +235,7 @@ mod tests {
|
||||
"test-volume",
|
||||
"append-test.txt",
|
||||
1024,
|
||||
true // append mode
|
||||
true, // append mode
|
||||
);
|
||||
|
||||
assert!(writer.is_ok(), "HttpFileWriter creation in append mode should succeed");
|
||||
@@ -256,7 +249,7 @@ mod tests {
|
||||
"test-volume",
|
||||
"empty-file.txt",
|
||||
0, // zero size
|
||||
false
|
||||
false,
|
||||
);
|
||||
|
||||
assert!(writer.is_ok(), "HttpFileWriter creation with zero size should succeed");
|
||||
@@ -270,7 +263,7 @@ mod tests {
|
||||
"test-volume",
|
||||
"large-file.txt",
|
||||
1024 * 1024 * 100, // 100MB
|
||||
false
|
||||
false,
|
||||
);
|
||||
|
||||
assert!(writer.is_ok(), "HttpFileWriter creation with large size should succeed");
|
||||
@@ -278,14 +271,7 @@ mod tests {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_http_file_writer_invalid_url() {
|
||||
let writer = HttpFileWriter::new(
|
||||
"invalid-url",
|
||||
"test-disk",
|
||||
"test-volume",
|
||||
"test-path",
|
||||
1024,
|
||||
false
|
||||
);
|
||||
let writer = HttpFileWriter::new("invalid-url", "test-disk", "test-volume", "test-path", 1024, false);
|
||||
|
||||
// This should still succeed at creation time, errors occur during actual I/O
|
||||
assert!(writer.is_ok(), "HttpFileWriter creation should succeed even with invalid URL");
|
||||
@@ -295,14 +281,8 @@ mod tests {
|
||||
async fn test_http_file_reader_creation() {
|
||||
// Test creation without actually making HTTP requests
|
||||
// We'll test the URL construction logic by checking the error messages
|
||||
let result = HttpFileReader::new(
|
||||
"http://invalid-server:9999",
|
||||
"test-disk",
|
||||
"test-volume",
|
||||
"test-file.txt",
|
||||
0,
|
||||
1024
|
||||
).await;
|
||||
let result =
|
||||
HttpFileReader::new("http://invalid-server:9999", "test-disk", "test-volume", "test-file.txt", 0, 1024).await;
|
||||
|
||||
// May succeed or fail depending on network conditions, but should not panic
|
||||
// The important thing is that the URL construction logic works
|
||||
@@ -317,8 +297,9 @@ mod tests {
|
||||
"test-volume",
|
||||
"test-file.txt",
|
||||
100, // offset
|
||||
500 // length
|
||||
).await;
|
||||
500, // length
|
||||
)
|
||||
.await;
|
||||
|
||||
// May succeed or fail, but this tests parameter handling
|
||||
assert!(result.is_ok() || result.is_err(), "HttpFileReader creation should not panic");
|
||||
@@ -332,8 +313,9 @@ mod tests {
|
||||
"test-volume",
|
||||
"test-file.txt",
|
||||
0,
|
||||
0 // zero length
|
||||
).await;
|
||||
0, // zero length
|
||||
)
|
||||
.await;
|
||||
|
||||
// May succeed or fail, but this tests zero length handling
|
||||
assert!(result.is_ok() || result.is_err(), "HttpFileReader creation should not panic");
|
||||
@@ -347,8 +329,9 @@ mod tests {
|
||||
"test/volume",
|
||||
"test file with spaces & symbols.txt",
|
||||
0,
|
||||
1024
|
||||
).await;
|
||||
1024,
|
||||
)
|
||||
.await;
|
||||
|
||||
// May succeed or fail, but this tests URL encoding
|
||||
assert!(result.is_ok() || result.is_err(), "HttpFileReader creation should not panic");
|
||||
@@ -505,11 +488,7 @@ mod tests {
|
||||
let etag_reader3 = EtagReader::new(cursor3);
|
||||
|
||||
// Compute ETags concurrently
|
||||
let (result1, result2, result3) = tokio::join!(
|
||||
etag_reader1.etag(),
|
||||
etag_reader2.etag(),
|
||||
etag_reader3.etag()
|
||||
);
|
||||
let (result1, result2, result3) = tokio::join!(etag_reader1.etag(), etag_reader2.etag(), etag_reader3.etag());
|
||||
|
||||
// All ETags should be the same (empty data hash) since no data was read
|
||||
assert_eq!(result1, result2);
|
||||
@@ -533,7 +512,7 @@ mod tests {
|
||||
"", // empty volume
|
||||
"", // empty path
|
||||
0, // zero size
|
||||
false
|
||||
false,
|
||||
);
|
||||
assert!(writer.is_ok(), "HttpFileWriter should handle empty parameters");
|
||||
|
||||
@@ -544,8 +523,9 @@ mod tests {
|
||||
"", // empty volume
|
||||
"", // empty path
|
||||
0, // zero offset
|
||||
0 // zero length
|
||||
).await;
|
||||
0, // zero length
|
||||
)
|
||||
.await;
|
||||
// May succeed or fail, but parameters should be handled
|
||||
assert!(result.is_ok() || result.is_err(), "HttpFileReader creation should not panic");
|
||||
}
|
||||
@@ -555,24 +535,10 @@ mod tests {
|
||||
// Test with characters that need URL encoding
|
||||
let special_chars = "test file with spaces & symbols + % # ? = @ ! $ ( ) [ ] { } | \\ / : ; , . < > \" '";
|
||||
|
||||
let writer = HttpFileWriter::new(
|
||||
"http://localhost:8080",
|
||||
special_chars,
|
||||
special_chars,
|
||||
special_chars,
|
||||
1024,
|
||||
false
|
||||
);
|
||||
let writer = HttpFileWriter::new("http://localhost:8080", special_chars, special_chars, special_chars, 1024, false);
|
||||
assert!(writer.is_ok(), "HttpFileWriter should handle special characters");
|
||||
|
||||
let result = HttpFileReader::new(
|
||||
"http://invalid:9999",
|
||||
special_chars,
|
||||
special_chars,
|
||||
special_chars,
|
||||
0,
|
||||
1024
|
||||
).await;
|
||||
let result = HttpFileReader::new("http://invalid:9999", special_chars, special_chars, special_chars, 0, 1024).await;
|
||||
// May succeed or fail, but URL encoding should work
|
||||
assert!(result.is_ok() || result.is_err(), "HttpFileReader creation should not panic");
|
||||
}
|
||||
|
||||
@@ -17,28 +17,24 @@ pub fn get_info(p: impl AsRef<Path>) -> std::io::Result<Info> {
|
||||
let reserved = match bfree.checked_sub(bavail) {
|
||||
Some(reserved) => reserved,
|
||||
None => {
|
||||
return Err(Error::other(
|
||||
format!(
|
||||
"detected f_bavail space ({}) > f_bfree space ({}), fs corruption at ({}). please run fsck",
|
||||
bavail,
|
||||
bfree,
|
||||
p.as_ref().display()
|
||||
),
|
||||
))
|
||||
return Err(Error::other(format!(
|
||||
"detected f_bavail space ({}) > f_bfree space ({}), fs corruption at ({}). please run fsck",
|
||||
bavail,
|
||||
bfree,
|
||||
p.as_ref().display()
|
||||
)))
|
||||
}
|
||||
};
|
||||
|
||||
let total = match blocks.checked_sub(reserved) {
|
||||
Some(total) => total * bsize,
|
||||
None => {
|
||||
return Err(Error::other(
|
||||
format!(
|
||||
"detected reserved space ({}) > blocks space ({}), fs corruption at ({}). please run fsck",
|
||||
reserved,
|
||||
blocks,
|
||||
p.as_ref().display()
|
||||
),
|
||||
))
|
||||
return Err(Error::other(format!(
|
||||
"detected reserved space ({}) > blocks space ({}), fs corruption at ({}). please run fsck",
|
||||
reserved,
|
||||
blocks,
|
||||
p.as_ref().display()
|
||||
)))
|
||||
}
|
||||
};
|
||||
|
||||
@@ -46,14 +42,12 @@ pub fn get_info(p: impl AsRef<Path>) -> std::io::Result<Info> {
|
||||
let used = match total.checked_sub(free) {
|
||||
Some(used) => used,
|
||||
None => {
|
||||
return Err(Error::other(
|
||||
format!(
|
||||
"detected free space ({}) > total drive space ({}), fs corruption at ({}). please run fsck",
|
||||
free,
|
||||
total,
|
||||
p.as_ref().display()
|
||||
),
|
||||
))
|
||||
return Err(Error::other(format!(
|
||||
"detected free space ({}) > total drive space ({}), fs corruption at ({}). please run fsck",
|
||||
free,
|
||||
total,
|
||||
p.as_ref().display()
|
||||
)))
|
||||
}
|
||||
};
|
||||
|
||||
|
||||
@@ -997,10 +997,19 @@ mod tests {
|
||||
sqs_arn: Some(vec!["arn:aws:sqs:us-east-1:123456789012:test-queue".to_string()]),
|
||||
deployment_id: Some("deployment-123".to_string()),
|
||||
buckets: Some(Buckets { count: 5, error: None }),
|
||||
objects: Some(Objects { count: 1000, error: None }),
|
||||
versions: Some(Versions { count: 1200, error: None }),
|
||||
objects: Some(Objects {
|
||||
count: 1000,
|
||||
error: None,
|
||||
}),
|
||||
versions: Some(Versions {
|
||||
count: 1200,
|
||||
error: None,
|
||||
}),
|
||||
delete_markers: Some(DeleteMarkers { count: 50, error: None }),
|
||||
usage: Some(Usage { size: 1000000000, error: None }),
|
||||
usage: Some(Usage {
|
||||
size: 1000000000,
|
||||
error: None,
|
||||
}),
|
||||
services: Some(Services::default()),
|
||||
backend: Some(ErasureBackend::default()),
|
||||
servers: Some(vec![ServerProperties::default()]),
|
||||
|
||||
@@ -685,10 +685,7 @@ mod tests {
|
||||
write: false,
|
||||
};
|
||||
|
||||
let full_access = AccountAccess {
|
||||
read: true,
|
||||
write: true,
|
||||
};
|
||||
let full_access = AccountAccess { read: true, write: true };
|
||||
|
||||
let no_access = AccountAccess {
|
||||
read: false,
|
||||
|
||||
@@ -117,11 +117,7 @@ impl Operation for PutFile {
|
||||
.map_err(|e| s3_error!(InternalError, "read file err {}", e))?
|
||||
};
|
||||
|
||||
let mut body = StreamReader::new(
|
||||
req.input
|
||||
.into_stream()
|
||||
.map_err(std::io::Error::other),
|
||||
);
|
||||
let mut body = StreamReader::new(req.input.into_stream().map_err(std::io::Error::other));
|
||||
|
||||
tokio::io::copy(&mut body, &mut file)
|
||||
.await
|
||||
|
||||
@@ -960,9 +960,7 @@ impl S3 for FS {
|
||||
}
|
||||
};
|
||||
|
||||
let body = Box::new(StreamReader::new(
|
||||
body.map(|f| f.map_err(|e| std::io::Error::other(e.to_string()))),
|
||||
));
|
||||
let body = Box::new(StreamReader::new(body.map(|f| f.map_err(|e| std::io::Error::other(e.to_string())))));
|
||||
|
||||
let mut reader = PutObjReader::new(body, content_length as usize);
|
||||
|
||||
@@ -1076,9 +1074,7 @@ impl S3 for FS {
|
||||
}
|
||||
};
|
||||
|
||||
let body = Box::new(StreamReader::new(
|
||||
body.map(|f| f.map_err(|e| std::io::Error::other(e.to_string()))),
|
||||
));
|
||||
let body = Box::new(StreamReader::new(body.map(|f| f.map_err(|e| std::io::Error::other(e.to_string())))));
|
||||
|
||||
// mc cp step 4
|
||||
let mut data = PutObjReader::new(body, content_length as usize);
|
||||
|
||||
@@ -104,11 +104,8 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_to_s3_error_invalid_argument() {
|
||||
let storage_err = StorageError::InvalidArgument(
|
||||
"test-bucket".to_string(),
|
||||
"test-object".to_string(),
|
||||
"test-version".to_string(),
|
||||
);
|
||||
let storage_err =
|
||||
StorageError::InvalidArgument("test-bucket".to_string(), "test-object".to_string(), "test-version".to_string());
|
||||
let err = Error::new(storage_err);
|
||||
let s3_err = to_s3_error(err);
|
||||
|
||||
@@ -163,10 +160,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_to_s3_error_object_name_invalid() {
|
||||
let storage_err = StorageError::ObjectNameInvalid(
|
||||
"test-bucket".to_string(),
|
||||
"invalid-object".to_string(),
|
||||
);
|
||||
let storage_err = StorageError::ObjectNameInvalid("test-bucket".to_string(), "invalid-object".to_string());
|
||||
let err = Error::new(storage_err);
|
||||
let s3_err = to_s3_error(err);
|
||||
|
||||
@@ -193,7 +187,10 @@ mod tests {
|
||||
let s3_err = to_s3_error(err);
|
||||
|
||||
assert_eq!(*s3_err.code(), S3ErrorCode::ServiceUnavailable);
|
||||
assert!(s3_err.message().unwrap().contains("Storage reached its minimum free drive threshold"));
|
||||
assert!(s3_err
|
||||
.message()
|
||||
.unwrap()
|
||||
.contains("Storage reached its minimum free drive threshold"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -208,10 +205,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_to_s3_error_prefix_access_denied() {
|
||||
let storage_err = StorageError::PrefixAccessDenied(
|
||||
"test-bucket".to_string(),
|
||||
"test-prefix".to_string(),
|
||||
);
|
||||
let storage_err = StorageError::PrefixAccessDenied("test-bucket".to_string(), "test-prefix".to_string());
|
||||
let err = Error::new(storage_err);
|
||||
let s3_err = to_s3_error(err);
|
||||
|
||||
@@ -223,10 +217,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_to_s3_error_invalid_upload_id_key_combination() {
|
||||
let storage_err = StorageError::InvalidUploadIDKeyCombination(
|
||||
"test-bucket".to_string(),
|
||||
"test-object".to_string(),
|
||||
);
|
||||
let storage_err = StorageError::InvalidUploadIDKeyCombination("test-bucket".to_string(), "test-object".to_string());
|
||||
let err = Error::new(storage_err);
|
||||
let s3_err = to_s3_error(err);
|
||||
|
||||
@@ -249,10 +240,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_to_s3_error_object_name_too_long() {
|
||||
let storage_err = StorageError::ObjectNameTooLong(
|
||||
"test-bucket".to_string(),
|
||||
"very-long-object-name".to_string(),
|
||||
);
|
||||
let storage_err = StorageError::ObjectNameTooLong("test-bucket".to_string(), "very-long-object-name".to_string());
|
||||
let err = Error::new(storage_err);
|
||||
let s3_err = to_s3_error(err);
|
||||
|
||||
@@ -264,25 +252,22 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_to_s3_error_object_name_prefix_as_slash() {
|
||||
let storage_err = StorageError::ObjectNamePrefixAsSlash(
|
||||
"test-bucket".to_string(),
|
||||
"/invalid-object".to_string(),
|
||||
);
|
||||
let storage_err = StorageError::ObjectNamePrefixAsSlash("test-bucket".to_string(), "/invalid-object".to_string());
|
||||
let err = Error::new(storage_err);
|
||||
let s3_err = to_s3_error(err);
|
||||
|
||||
assert_eq!(*s3_err.code(), S3ErrorCode::InvalidArgument);
|
||||
assert!(s3_err.message().unwrap().contains("Object name contains forward slash as prefix"));
|
||||
assert!(s3_err
|
||||
.message()
|
||||
.unwrap()
|
||||
.contains("Object name contains forward slash as prefix"));
|
||||
assert!(s3_err.message().unwrap().contains("test-bucket"));
|
||||
assert!(s3_err.message().unwrap().contains("/invalid-object"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_to_s3_error_object_not_found() {
|
||||
let storage_err = StorageError::ObjectNotFound(
|
||||
"test-bucket".to_string(),
|
||||
"missing-object".to_string(),
|
||||
);
|
||||
let storage_err = StorageError::ObjectNotFound("test-bucket".to_string(), "missing-object".to_string());
|
||||
let err = Error::new(storage_err);
|
||||
let s3_err = to_s3_error(err);
|
||||
|
||||
@@ -293,11 +278,8 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_to_s3_error_version_not_found() {
|
||||
let storage_err = StorageError::VersionNotFound(
|
||||
"test-bucket".to_string(),
|
||||
"test-object".to_string(),
|
||||
"missing-version".to_string(),
|
||||
);
|
||||
let storage_err =
|
||||
StorageError::VersionNotFound("test-bucket".to_string(), "test-object".to_string(), "missing-version".to_string());
|
||||
let err = Error::new(storage_err);
|
||||
let s3_err = to_s3_error(err);
|
||||
|
||||
@@ -309,11 +291,8 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_to_s3_error_invalid_upload_id() {
|
||||
let storage_err = StorageError::InvalidUploadID(
|
||||
"test-bucket".to_string(),
|
||||
"test-object".to_string(),
|
||||
"invalid-upload-id".to_string(),
|
||||
);
|
||||
let storage_err =
|
||||
StorageError::InvalidUploadID("test-bucket".to_string(), "test-object".to_string(), "invalid-upload-id".to_string());
|
||||
let err = Error::new(storage_err);
|
||||
let s3_err = to_s3_error(err);
|
||||
|
||||
@@ -361,10 +340,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_to_s3_error_object_exists_as_directory() {
|
||||
let storage_err = StorageError::ObjectExistsAsDirectory(
|
||||
"test-bucket".to_string(),
|
||||
"directory-object".to_string(),
|
||||
);
|
||||
let storage_err = StorageError::ObjectExistsAsDirectory("test-bucket".to_string(), "directory-object".to_string());
|
||||
let err = Error::new(storage_err);
|
||||
let s3_err = to_s3_error(err);
|
||||
|
||||
@@ -382,7 +358,10 @@ mod tests {
|
||||
let s3_err = to_s3_error(err);
|
||||
|
||||
assert_eq!(*s3_err.code(), S3ErrorCode::SlowDown);
|
||||
assert!(s3_err.message().unwrap().contains("Storage resources are insufficient for the read operation"));
|
||||
assert!(s3_err
|
||||
.message()
|
||||
.unwrap()
|
||||
.contains("Storage resources are insufficient for the read operation"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -392,7 +371,10 @@ mod tests {
|
||||
let s3_err = to_s3_error(err);
|
||||
|
||||
assert_eq!(*s3_err.code(), S3ErrorCode::SlowDown);
|
||||
assert!(s3_err.message().unwrap().contains("Storage resources are insufficient for the write operation"));
|
||||
assert!(s3_err
|
||||
.message()
|
||||
.unwrap()
|
||||
.contains("Storage resources are insufficient for the write operation"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -428,11 +410,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_to_s3_error_invalid_part() {
|
||||
let storage_err = StorageError::InvalidPart(
|
||||
1,
|
||||
"expected-part".to_string(),
|
||||
"got-part".to_string(),
|
||||
);
|
||||
let storage_err = StorageError::InvalidPart(1, "expected-part".to_string(), "got-part".to_string());
|
||||
let err = Error::new(storage_err);
|
||||
let s3_err = to_s3_error(err);
|
||||
|
||||
@@ -477,10 +455,7 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_to_s3_error_with_special_characters() {
|
||||
let storage_err = StorageError::ObjectNameInvalid(
|
||||
"bucket-with-@#$%".to_string(),
|
||||
"object-with-!@#$%^&*()".to_string(),
|
||||
);
|
||||
let storage_err = StorageError::ObjectNameInvalid("bucket-with-@#$%".to_string(), "object-with-!@#$%^&*()".to_string());
|
||||
let err = Error::new(storage_err);
|
||||
let s3_err = to_s3_error(err);
|
||||
|
||||
|
||||
@@ -630,7 +630,7 @@ mod tests {
|
||||
"x-amz-storage-class",
|
||||
"x-amz-tagging",
|
||||
"expires",
|
||||
"x-amz-replication-status"
|
||||
"x-amz-replication-status",
|
||||
];
|
||||
|
||||
assert_eq!(*SUPPORTED_HEADERS, expected_headers);
|
||||
|
||||
@@ -193,12 +193,18 @@ mod tests {
|
||||
let valid_parts = ['a', 'A', '0', '9', '_', '#', '@', '$', 'α', '中'];
|
||||
|
||||
for start_char in valid_starts {
|
||||
assert!(dialect.is_identifier_start(start_char),
|
||||
"Character '{}' should be valid identifier start", start_char);
|
||||
assert!(
|
||||
dialect.is_identifier_start(start_char),
|
||||
"Character '{}' should be valid identifier start",
|
||||
start_char
|
||||
);
|
||||
|
||||
for part_char in valid_parts {
|
||||
assert!(dialect.is_identifier_part(part_char),
|
||||
"Character '{}' should be valid identifier part", part_char);
|
||||
assert!(
|
||||
dialect.is_identifier_part(part_char),
|
||||
"Character '{}' should be valid identifier part",
|
||||
part_char
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -211,8 +217,14 @@ mod tests {
|
||||
assert!(!dialect.is_identifier_start('\0'), "Null character should not be valid identifier start");
|
||||
assert!(!dialect.is_identifier_part('\0'), "Null character should not be valid identifier part");
|
||||
|
||||
assert!(!dialect.is_identifier_start('\x01'), "Control character should not be valid identifier start");
|
||||
assert!(!dialect.is_identifier_part('\x01'), "Control character should not be valid identifier part");
|
||||
assert!(
|
||||
!dialect.is_identifier_start('\x01'),
|
||||
"Control character should not be valid identifier start"
|
||||
);
|
||||
assert!(
|
||||
!dialect.is_identifier_part('\x01'),
|
||||
"Control character should not be valid identifier part"
|
||||
);
|
||||
|
||||
assert!(!dialect.is_identifier_start('\x7F'), "DEL character should not be valid identifier start");
|
||||
assert!(!dialect.is_identifier_part('\x7F'), "DEL character should not be valid identifier part");
|
||||
@@ -226,10 +238,12 @@ mod tests {
|
||||
let unicode_letters = ['α', 'β', 'γ', 'Α', 'Β', 'Γ', '中', '文', '日', '本', 'ñ', 'ü', 'ç'];
|
||||
|
||||
for ch in unicode_letters {
|
||||
assert!(dialect.is_identifier_start(ch),
|
||||
"Unicode letter '{}' should be valid identifier start", ch);
|
||||
assert!(dialect.is_identifier_part(ch),
|
||||
"Unicode letter '{}' should be valid identifier part", ch);
|
||||
assert!(
|
||||
dialect.is_identifier_start(ch),
|
||||
"Unicode letter '{}' should be valid identifier start",
|
||||
ch
|
||||
);
|
||||
assert!(dialect.is_identifier_part(ch), "Unicode letter '{}' should be valid identifier part", ch);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -239,10 +253,16 @@ mod tests {
|
||||
|
||||
// Test all ASCII digits
|
||||
for digit in '0'..='9' {
|
||||
assert!(!dialect.is_identifier_start(digit),
|
||||
"ASCII digit '{}' should not be valid identifier start", digit);
|
||||
assert!(dialect.is_identifier_part(digit),
|
||||
"ASCII digit '{}' should be valid identifier part", digit);
|
||||
assert!(
|
||||
!dialect.is_identifier_start(digit),
|
||||
"ASCII digit '{}' should not be valid identifier start",
|
||||
digit
|
||||
);
|
||||
assert!(
|
||||
dialect.is_identifier_part(digit),
|
||||
"ASCII digit '{}' should be valid identifier part",
|
||||
digit
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -252,14 +272,16 @@ mod tests {
|
||||
|
||||
// Test that all valid identifier starts are also valid identifier parts
|
||||
let test_chars = [
|
||||
'a', 'A', 'z', 'Z', '_', '#', '@', 'α', '中', 'ñ',
|
||||
'0', '9', '$', ' ', '.', ',', ';', '(', ')', '=', '+', '-'
|
||||
'a', 'A', 'z', 'Z', '_', '#', '@', 'α', '中', 'ñ', '0', '9', '$', ' ', '.', ',', ';', '(', ')', '=', '+', '-',
|
||||
];
|
||||
|
||||
for ch in test_chars {
|
||||
if dialect.is_identifier_start(ch) {
|
||||
assert!(dialect.is_identifier_part(ch),
|
||||
"Character '{}' that is valid identifier start should also be valid identifier part", ch);
|
||||
assert!(
|
||||
dialect.is_identifier_part(ch),
|
||||
"Character '{}' that is valid identifier start should also be valid identifier part",
|
||||
ch
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -285,7 +307,10 @@ mod tests {
|
||||
assert!(!dialect_ref.is_identifier_start('0'), "Trait method should work for invalid start");
|
||||
assert!(dialect_ref.is_identifier_part('a'), "Trait method should work for valid part");
|
||||
assert!(dialect_ref.is_identifier_part('0'), "Trait method should work for digit part");
|
||||
assert!(dialect_ref.supports_group_by_expr(), "Trait method should return true for GROUP BY support");
|
||||
assert!(
|
||||
dialect_ref.supports_group_by_expr(),
|
||||
"Trait method should return true for GROUP BY support"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -297,13 +322,22 @@ mod tests {
|
||||
let test_chars = ['a', 'A', '0', '_', '#', '@', '$', ' ', '.'];
|
||||
|
||||
for ch in test_chars {
|
||||
assert_eq!(dialect1.is_identifier_start(ch), dialect2.is_identifier_start(ch),
|
||||
"Different instances should behave the same for is_identifier_start");
|
||||
assert_eq!(dialect1.is_identifier_part(ch), dialect2.is_identifier_part(ch),
|
||||
"Different instances should behave the same for is_identifier_part");
|
||||
assert_eq!(
|
||||
dialect1.is_identifier_start(ch),
|
||||
dialect2.is_identifier_start(ch),
|
||||
"Different instances should behave the same for is_identifier_start"
|
||||
);
|
||||
assert_eq!(
|
||||
dialect1.is_identifier_part(ch),
|
||||
dialect2.is_identifier_part(ch),
|
||||
"Different instances should behave the same for is_identifier_part"
|
||||
);
|
||||
}
|
||||
|
||||
assert_eq!(dialect1.supports_group_by_expr(), dialect2.supports_group_by_expr(),
|
||||
"Different instances should behave the same for supports_group_by_expr");
|
||||
assert_eq!(
|
||||
dialect1.supports_group_by_expr(),
|
||||
dialect2.supports_group_by_expr(),
|
||||
"Different instances should behave the same for supports_group_by_expr"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -90,7 +90,10 @@ mod tests {
|
||||
let _builder = CascadeOptimizerBuilder::default();
|
||||
|
||||
// Test that builder can be created successfully
|
||||
assert!(std::mem::size_of::<CascadeOptimizerBuilder>() > 0, "Builder should be created successfully");
|
||||
assert!(
|
||||
std::mem::size_of::<CascadeOptimizerBuilder>() > 0,
|
||||
"Builder should be created successfully"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -109,7 +112,10 @@ mod tests {
|
||||
|
||||
// Test that we can call builder methods (even if we don't have mock implementations)
|
||||
// This tests the builder pattern itself
|
||||
assert!(std::mem::size_of::<CascadeOptimizerBuilder>() > 0, "Builder should be created successfully");
|
||||
assert!(
|
||||
std::mem::size_of::<CascadeOptimizerBuilder>() > 0,
|
||||
"Builder should be created successfully"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -104,7 +104,7 @@ mod tests {
|
||||
assert!(std::mem::size_of::<DefaultParser>() == 0, "Parser should be zero-sized");
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[test]
|
||||
fn test_default_parser_simple_select() {
|
||||
let parser = DefaultParser::default();
|
||||
let sql = "SELECT * FROM S3Object";
|
||||
@@ -119,11 +119,11 @@ mod tests {
|
||||
match &statements[0] {
|
||||
ExtStatement::SqlStatement(_) => {
|
||||
// Successfully parsed as SQL statement
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[test]
|
||||
fn test_default_parser_select_with_columns() {
|
||||
let parser = DefaultParser::default();
|
||||
let sql = "SELECT id, name, age FROM S3Object";
|
||||
@@ -137,11 +137,11 @@ mod tests {
|
||||
match &statements[0] {
|
||||
ExtStatement::SqlStatement(_) => {
|
||||
// Successfully parsed as SQL statement
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[test]
|
||||
fn test_default_parser_select_with_where() {
|
||||
let parser = DefaultParser::default();
|
||||
let sql = "SELECT * FROM S3Object WHERE age > 25";
|
||||
@@ -155,7 +155,7 @@ mod tests {
|
||||
match &statements[0] {
|
||||
ExtStatement::SqlStatement(_) => {
|
||||
// Successfully parsed as SQL statement
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -248,7 +248,7 @@ mod tests {
|
||||
assert!(result.is_ok(), "ExtParser::new_with_dialect should work");
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[test]
|
||||
fn test_ext_parser_complex_query() {
|
||||
let sql = "SELECT id, name, age FROM S3Object WHERE age > 25 AND department = 'IT' ORDER BY age DESC LIMIT 10";
|
||||
|
||||
@@ -261,11 +261,11 @@ mod tests {
|
||||
match &statements[0] {
|
||||
ExtStatement::SqlStatement(_) => {
|
||||
// Successfully parsed as SQL statement
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[test]
|
||||
fn test_ext_parser_aggregate_functions() {
|
||||
let sql = "SELECT COUNT(*), AVG(age), MAX(salary) FROM S3Object GROUP BY department";
|
||||
|
||||
@@ -278,7 +278,7 @@ mod tests {
|
||||
match &statements[0] {
|
||||
ExtStatement::SqlStatement(_) => {
|
||||
// Successfully parsed as SQL statement
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -348,14 +348,14 @@ mod tests {
|
||||
assert_eq!(statements.len(), 1, "Should have exactly one statement");
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[test]
|
||||
fn test_ext_parser_error_handling() {
|
||||
let invalid_sqls = vec![
|
||||
"SELECT FROM", // Missing column list
|
||||
"SELECT * FROM", // Missing table name
|
||||
"SELECT * FROM S3Object WHERE", // Incomplete WHERE clause
|
||||
"SELECT * FROM S3Object GROUP", // Incomplete GROUP BY
|
||||
"SELECT * FROM S3Object ORDER", // Incomplete ORDER BY
|
||||
"SELECT FROM", // Missing column list
|
||||
"SELECT * FROM", // Missing table name
|
||||
"SELECT * FROM S3Object WHERE", // Incomplete WHERE clause
|
||||
"SELECT * FROM S3Object GROUP", // Incomplete GROUP BY
|
||||
"SELECT * FROM S3Object ORDER", // Incomplete ORDER BY
|
||||
];
|
||||
|
||||
for sql in invalid_sqls {
|
||||
@@ -402,7 +402,7 @@ mod tests {
|
||||
assert_eq!(statements.len(), 1, "Should have exactly one statement");
|
||||
}
|
||||
|
||||
#[test]
|
||||
#[test]
|
||||
fn test_parser_err_macro() {
|
||||
let error: Result<()> = parser_err!("Test error message");
|
||||
assert!(error.is_err(), "parser_err! macro should create error");
|
||||
@@ -410,7 +410,7 @@ mod tests {
|
||||
match error {
|
||||
Err(ParserError::ParserError(msg)) => {
|
||||
assert_eq!(msg, "Test error message", "Error message should match");
|
||||
},
|
||||
}
|
||||
_ => panic!("Expected ParserError::ParserError"),
|
||||
}
|
||||
}
|
||||
@@ -428,7 +428,7 @@ mod tests {
|
||||
Err(ParserError::ParserError(msg)) => {
|
||||
assert!(msg.contains("Expected test token"), "Error should contain expected message");
|
||||
assert!(msg.contains("found: found token"), "Error should contain found message");
|
||||
},
|
||||
}
|
||||
_ => panic!("Expected ParserError::ParserError"),
|
||||
}
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user