mirror of
https://github.com/rustfs/rustfs.git
synced 2026-01-16 17:20:33 +00:00
Merge branch 'main' of https://github.com/rustfs/s3-rustfs into feature/ilm
# Conflicts: # Cargo.lock # Cargo.toml # crates/utils/Cargo.toml # crates/utils/src/net.rs # ecstore/Cargo.toml # ecstore/src/set_disk.rs # rustfs/src/storage/ecfs.rs
This commit is contained in:
178
.cursorrules
178
.cursorrules
@@ -3,6 +3,7 @@
|
||||
## ⚠️ CRITICAL DEVELOPMENT RULES ⚠️
|
||||
|
||||
### 🚨 NEVER COMMIT DIRECTLY TO MASTER/MAIN BRANCH 🚨
|
||||
|
||||
- **This is the most important rule - NEVER modify code directly on main or master branch**
|
||||
- **Always work on feature branches and use pull requests for all changes**
|
||||
- **Any direct commits to master/main branch are strictly forbidden**
|
||||
@@ -15,23 +16,27 @@
|
||||
6. Create a pull request for review
|
||||
|
||||
## Project Overview
|
||||
|
||||
RustFS is a high-performance distributed object storage system written in Rust, compatible with S3 API. The project adopts a modular architecture, supporting erasure coding storage, multi-tenant management, observability, and other enterprise-level features.
|
||||
|
||||
## Core Architecture Principles
|
||||
|
||||
### 1. Modular Design
|
||||
|
||||
- Project uses Cargo workspace structure, containing multiple independent crates
|
||||
- Core modules: `rustfs` (main service), `ecstore` (erasure coding storage), `common` (shared components)
|
||||
- Functional modules: `iam` (identity management), `madmin` (management interface), `crypto` (encryption), etc.
|
||||
- Tool modules: `cli` (command line tool), `crates/*` (utility libraries)
|
||||
|
||||
### 2. Asynchronous Programming Pattern
|
||||
|
||||
- Comprehensive use of `tokio` async runtime
|
||||
- Prioritize `async/await` syntax
|
||||
- Use `async-trait` for async methods in traits
|
||||
- Avoid blocking operations, use `spawn_blocking` when necessary
|
||||
|
||||
### 3. Error Handling Strategy
|
||||
|
||||
- **Use modular, type-safe error handling with `thiserror`**
|
||||
- Each module should define its own error type using `thiserror::Error` derive macro
|
||||
- Support error chains and context information through `#[from]` and `#[source]` attributes
|
||||
@@ -54,6 +59,7 @@ RustFS is a high-performance distributed object storage system written in Rust,
|
||||
## Code Style Guidelines
|
||||
|
||||
### 1. Formatting Configuration
|
||||
|
||||
```toml
|
||||
max_width = 130
|
||||
fn_call_width = 90
|
||||
@@ -69,21 +75,25 @@ single_line_let_else_max_width = 100
|
||||
Before every commit, you **MUST**:
|
||||
|
||||
1. **Format your code**:
|
||||
|
||||
```bash
|
||||
cargo fmt --all
|
||||
```
|
||||
|
||||
2. **Verify formatting**:
|
||||
|
||||
```bash
|
||||
cargo fmt --all --check
|
||||
```
|
||||
|
||||
3. **Pass clippy checks**:
|
||||
|
||||
```bash
|
||||
cargo clippy --all-targets --all-features -- -D warnings
|
||||
```
|
||||
|
||||
4. **Ensure compilation**:
|
||||
|
||||
```bash
|
||||
cargo check --all-targets
|
||||
```
|
||||
@@ -158,6 +168,7 @@ Example output when formatting fails:
|
||||
```
|
||||
|
||||
### 3. Naming Conventions
|
||||
|
||||
- Use `snake_case` for functions, variables, modules
|
||||
- Use `PascalCase` for types, traits, enums
|
||||
- Constants use `SCREAMING_SNAKE_CASE`
|
||||
@@ -167,6 +178,7 @@ Example output when formatting fails:
|
||||
- Choose names that clearly express the purpose and intent
|
||||
|
||||
### 4. Type Declaration Guidelines
|
||||
|
||||
- **Prefer type inference over explicit type declarations** when the type is obvious from context
|
||||
- Let the Rust compiler infer types whenever possible to reduce verbosity and improve maintainability
|
||||
- Only specify types explicitly when:
|
||||
@@ -176,6 +188,7 @@ Example output when formatting fails:
|
||||
- Needed to resolve ambiguity between multiple possible types
|
||||
|
||||
**Good examples (prefer these):**
|
||||
|
||||
```rust
|
||||
// Compiler can infer the type
|
||||
let items = vec![1, 2, 3, 4];
|
||||
@@ -187,6 +200,7 @@ let filtered: Vec<_> = items.iter().filter(|&&x| x > 2).collect();
|
||||
```
|
||||
|
||||
**Avoid unnecessary explicit types:**
|
||||
|
||||
```rust
|
||||
// Unnecessary - type is obvious
|
||||
let items: Vec<i32> = vec![1, 2, 3, 4];
|
||||
@@ -195,6 +209,7 @@ let result: ProcessResult = process_data(&input);
|
||||
```
|
||||
|
||||
**When explicit types are beneficial:**
|
||||
|
||||
```rust
|
||||
// API boundaries - always specify types
|
||||
pub fn process_data(input: &[u8]) -> Result<ProcessResult, Error> { ... }
|
||||
@@ -207,6 +222,7 @@ let cache: HashMap<String, Arc<Mutex<CacheEntry>>> = HashMap::new();
|
||||
```
|
||||
|
||||
### 5. Documentation Comments
|
||||
|
||||
- Public APIs must have documentation comments
|
||||
- Use `///` for documentation comments
|
||||
- Complex functions add `# Examples` and `# Parameters` descriptions
|
||||
@@ -215,6 +231,7 @@ let cache: HashMap<String, Arc<Mutex<CacheEntry>>> = HashMap::new();
|
||||
- Avoid meaningless comments like "debug 111" or placeholder text
|
||||
|
||||
### 6. Import Guidelines
|
||||
|
||||
- Standard library imports first
|
||||
- Third-party crate imports in the middle
|
||||
- Project internal imports last
|
||||
@@ -223,6 +240,7 @@ let cache: HashMap<String, Arc<Mutex<CacheEntry>>> = HashMap::new();
|
||||
## Asynchronous Programming Guidelines
|
||||
|
||||
### 1. Trait Definition
|
||||
|
||||
```rust
|
||||
#[async_trait::async_trait]
|
||||
pub trait StorageAPI: Send + Sync {
|
||||
@@ -231,6 +249,7 @@ pub trait StorageAPI: Send + Sync {
|
||||
```
|
||||
|
||||
### 2. Error Handling
|
||||
|
||||
```rust
|
||||
// Use ? operator to propagate errors
|
||||
async fn example_function() -> Result<()> {
|
||||
@@ -241,6 +260,7 @@ async fn example_function() -> Result<()> {
|
||||
```
|
||||
|
||||
### 3. Concurrency Control
|
||||
|
||||
- Use `Arc` and `Mutex`/`RwLock` for shared state management
|
||||
- Prioritize async locks from `tokio::sync`
|
||||
- Avoid holding locks for long periods
|
||||
@@ -248,6 +268,7 @@ async fn example_function() -> Result<()> {
|
||||
## Logging and Tracing Guidelines
|
||||
|
||||
### 1. Tracing Usage
|
||||
|
||||
```rust
|
||||
#[tracing::instrument(skip(self, data))]
|
||||
async fn process_data(&self, data: &[u8]) -> Result<()> {
|
||||
@@ -257,6 +278,7 @@ async fn process_data(&self, data: &[u8]) -> Result<()> {
|
||||
```
|
||||
|
||||
### 2. Log Levels
|
||||
|
||||
- `error!`: System errors requiring immediate attention
|
||||
- `warn!`: Warning information that may affect functionality
|
||||
- `info!`: Important business information
|
||||
@@ -264,6 +286,7 @@ async fn process_data(&self, data: &[u8]) -> Result<()> {
|
||||
- `trace!`: Detailed execution paths
|
||||
|
||||
### 3. Structured Logging
|
||||
|
||||
```rust
|
||||
info!(
|
||||
counter.rustfs_api_requests_total = 1_u64,
|
||||
@@ -276,22 +299,23 @@ info!(
|
||||
## Error Handling Guidelines
|
||||
|
||||
### 1. Error Type Definition
|
||||
|
||||
```rust
|
||||
// Use thiserror for module-specific error types
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum MyError {
|
||||
#[error("IO error: {0}")]
|
||||
Io(#[from] std::io::Error),
|
||||
|
||||
|
||||
#[error("Storage error: {0}")]
|
||||
Storage(#[from] ecstore::error::StorageError),
|
||||
|
||||
|
||||
#[error("Custom error: {message}")]
|
||||
Custom { message: String },
|
||||
|
||||
|
||||
#[error("File not found: {path}")]
|
||||
FileNotFound { path: String },
|
||||
|
||||
|
||||
#[error("Invalid configuration: {0}")]
|
||||
InvalidConfig(String),
|
||||
}
|
||||
@@ -301,6 +325,7 @@ pub type Result<T> = core::result::Result<T, MyError>;
|
||||
```
|
||||
|
||||
### 2. Error Helper Methods
|
||||
|
||||
```rust
|
||||
impl MyError {
|
||||
/// Create error from any compatible error type
|
||||
@@ -314,6 +339,7 @@ impl MyError {
|
||||
```
|
||||
|
||||
### 3. Error Conversion Between Modules
|
||||
|
||||
```rust
|
||||
// Convert between different module error types
|
||||
impl From<ecstore::error::StorageError> for MyError {
|
||||
@@ -340,6 +366,7 @@ impl From<MyError> for ecstore::error::StorageError {
|
||||
```
|
||||
|
||||
### 4. Error Context and Propagation
|
||||
|
||||
```rust
|
||||
// Use ? operator for clean error propagation
|
||||
async fn example_function() -> Result<()> {
|
||||
@@ -351,14 +378,15 @@ async fn example_function() -> Result<()> {
|
||||
// Add context to errors
|
||||
fn process_with_context(path: &str) -> Result<()> {
|
||||
std::fs::read(path)
|
||||
.map_err(|e| MyError::Custom {
|
||||
message: format!("Failed to read {}: {}", path, e)
|
||||
.map_err(|e| MyError::Custom {
|
||||
message: format!("Failed to read {}: {}", path, e)
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
### 5. API Error Conversion (S3 Example)
|
||||
|
||||
```rust
|
||||
// Convert storage errors to API-specific errors
|
||||
use s3s::{S3Error, S3ErrorCode};
|
||||
@@ -404,6 +432,7 @@ impl From<ApiError> for S3Error {
|
||||
### 6. Error Handling Best Practices
|
||||
|
||||
#### Pattern Matching and Error Classification
|
||||
|
||||
```rust
|
||||
// Use pattern matching for specific error handling
|
||||
async fn handle_storage_operation() -> Result<()> {
|
||||
@@ -415,8 +444,8 @@ async fn handle_storage_operation() -> Result<()> {
|
||||
}
|
||||
Err(ecstore::error::StorageError::BucketNotFound(bucket)) => {
|
||||
error!("Bucket not found: {}", bucket);
|
||||
Err(MyError::Custom {
|
||||
message: format!("Bucket {} does not exist", bucket)
|
||||
Err(MyError::Custom {
|
||||
message: format!("Bucket {} does not exist", bucket)
|
||||
})
|
||||
}
|
||||
Err(e) => {
|
||||
@@ -428,30 +457,32 @@ async fn handle_storage_operation() -> Result<()> {
|
||||
```
|
||||
|
||||
#### Error Aggregation and Reporting
|
||||
|
||||
```rust
|
||||
// Collect and report multiple errors
|
||||
pub fn validate_configuration(config: &Config) -> Result<()> {
|
||||
let mut errors = Vec::new();
|
||||
|
||||
|
||||
if config.bucket_name.is_empty() {
|
||||
errors.push("Bucket name cannot be empty");
|
||||
}
|
||||
|
||||
|
||||
if config.region.is_empty() {
|
||||
errors.push("Region must be specified");
|
||||
}
|
||||
|
||||
|
||||
if !errors.is_empty() {
|
||||
return Err(MyError::Custom {
|
||||
message: format!("Configuration validation failed: {}", errors.join(", "))
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
#### Contextual Error Information
|
||||
|
||||
```rust
|
||||
// Add operation context to errors
|
||||
#[tracing::instrument(skip(self))]
|
||||
@@ -468,11 +499,13 @@ async fn upload_file(&self, bucket: &str, key: &str, data: Vec<u8>) -> Result<()
|
||||
## Performance Optimization Guidelines
|
||||
|
||||
### 1. Memory Management
|
||||
|
||||
- Use `Bytes` instead of `Vec<u8>` for zero-copy operations
|
||||
- Avoid unnecessary cloning, use reference passing
|
||||
- Use `Arc` for sharing large objects
|
||||
|
||||
### 2. Concurrency Optimization
|
||||
|
||||
```rust
|
||||
// Use join_all for concurrent operations
|
||||
let futures = disks.iter().map(|disk| disk.operation());
|
||||
@@ -480,12 +513,14 @@ let results = join_all(futures).await;
|
||||
```
|
||||
|
||||
### 3. Caching Strategy
|
||||
|
||||
- Use `lazy_static` or `OnceCell` for global caching
|
||||
- Implement LRU cache to avoid memory leaks
|
||||
|
||||
## Testing Guidelines
|
||||
|
||||
### 1. Unit Tests
|
||||
|
||||
```rust
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
@@ -507,10 +542,10 @@ mod tests {
|
||||
#[test]
|
||||
fn test_error_conversion() {
|
||||
use ecstore::error::StorageError;
|
||||
|
||||
|
||||
let storage_err = StorageError::BucketNotFound("test-bucket".to_string());
|
||||
let api_err: ApiError = storage_err.into();
|
||||
|
||||
|
||||
assert_eq!(api_err.code, S3ErrorCode::NoSuchBucket);
|
||||
assert!(api_err.message.contains("test-bucket"));
|
||||
assert!(api_err.source.is_some());
|
||||
@@ -520,7 +555,7 @@ mod tests {
|
||||
fn test_error_types() {
|
||||
let io_err = std::io::Error::new(std::io::ErrorKind::NotFound, "file not found");
|
||||
let my_err = MyError::Io(io_err);
|
||||
|
||||
|
||||
// Test error matching
|
||||
match my_err {
|
||||
MyError::Io(_) => {}, // Expected
|
||||
@@ -532,7 +567,7 @@ mod tests {
|
||||
fn test_error_context() {
|
||||
let result = process_with_context("nonexistent_file.txt");
|
||||
assert!(result.is_err());
|
||||
|
||||
|
||||
let err = result.unwrap_err();
|
||||
match err {
|
||||
MyError::Custom { message } => {
|
||||
@@ -546,10 +581,12 @@ mod tests {
|
||||
```
|
||||
|
||||
### 2. Integration Tests
|
||||
|
||||
- Use `e2e_test` module for end-to-end testing
|
||||
- Simulate real storage environments
|
||||
|
||||
### 3. Test Quality Standards
|
||||
|
||||
- Write meaningful test cases that verify actual functionality
|
||||
- Avoid placeholder or debug content like "debug 111", "test test", etc.
|
||||
- Use descriptive test names that clearly indicate what is being tested
|
||||
@@ -559,9 +596,11 @@ mod tests {
|
||||
## Cross-Platform Compatibility Guidelines
|
||||
|
||||
### 1. CPU Architecture Compatibility
|
||||
|
||||
- **Always consider multi-platform and different CPU architecture compatibility** when writing code
|
||||
- Support major architectures: x86_64, aarch64 (ARM64), and other target platforms
|
||||
- Use conditional compilation for architecture-specific code:
|
||||
|
||||
```rust
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
fn optimized_x86_64_function() { /* x86_64 specific implementation */ }
|
||||
@@ -574,16 +613,19 @@ fn generic_function() { /* Generic fallback implementation */ }
|
||||
```
|
||||
|
||||
### 2. Platform-Specific Dependencies
|
||||
|
||||
- Use feature flags for platform-specific dependencies
|
||||
- Provide fallback implementations for unsupported platforms
|
||||
- Test on multiple architectures in CI/CD pipeline
|
||||
|
||||
### 3. Endianness Considerations
|
||||
|
||||
- Use explicit byte order conversion when dealing with binary data
|
||||
- Prefer `to_le_bytes()`, `from_le_bytes()` for consistent little-endian format
|
||||
- Use `byteorder` crate for complex binary format handling
|
||||
|
||||
### 4. SIMD and Performance Optimizations
|
||||
|
||||
- Use portable SIMD libraries like `wide` or `packed_simd`
|
||||
- Provide fallback implementations for non-SIMD architectures
|
||||
- Use runtime feature detection when appropriate
|
||||
@@ -591,10 +633,12 @@ fn generic_function() { /* Generic fallback implementation */ }
|
||||
## Security Guidelines
|
||||
|
||||
### 1. Memory Safety
|
||||
|
||||
- Disable `unsafe` code (workspace.lints.rust.unsafe_code = "deny")
|
||||
- Use `rustls` instead of `openssl`
|
||||
|
||||
### 2. Authentication and Authorization
|
||||
|
||||
```rust
|
||||
// Use IAM system for permission checks
|
||||
let identity = iam.authenticate(&access_key, &secret_key).await?;
|
||||
@@ -604,11 +648,13 @@ iam.authorize(&identity, &action, &resource).await?;
|
||||
## Configuration Management Guidelines
|
||||
|
||||
### 1. Environment Variables
|
||||
|
||||
- Use `RUSTFS_` prefix
|
||||
- Support both configuration files and environment variables
|
||||
- Provide reasonable default values
|
||||
|
||||
### 2. Configuration Structure
|
||||
|
||||
```rust
|
||||
#[derive(Debug, Deserialize, Clone)]
|
||||
pub struct Config {
|
||||
@@ -622,10 +668,12 @@ pub struct Config {
|
||||
## Dependency Management Guidelines
|
||||
|
||||
### 1. Workspace Dependencies
|
||||
|
||||
- Manage versions uniformly at workspace level
|
||||
- Use `workspace = true` to inherit configuration
|
||||
|
||||
### 2. Feature Flags
|
||||
|
||||
```rust
|
||||
[features]
|
||||
default = ["file"]
|
||||
@@ -636,15 +684,18 @@ kafka = ["dep:rdkafka"]
|
||||
## Deployment and Operations Guidelines
|
||||
|
||||
### 1. Containerization
|
||||
|
||||
- Provide Dockerfile and docker-compose configuration
|
||||
- Support multi-stage builds to optimize image size
|
||||
|
||||
### 2. Observability
|
||||
|
||||
- Integrate OpenTelemetry for distributed tracing
|
||||
- Support Prometheus metrics collection
|
||||
- Provide Grafana dashboards
|
||||
|
||||
### 3. Health Checks
|
||||
|
||||
```rust
|
||||
// Implement health check endpoint
|
||||
async fn health_check() -> Result<HealthStatus> {
|
||||
@@ -655,6 +706,7 @@ async fn health_check() -> Result<HealthStatus> {
|
||||
## Code Review Checklist
|
||||
|
||||
### 1. **Code Formatting and Quality (MANDATORY)**
|
||||
|
||||
- [ ] **Code is properly formatted** (`cargo fmt --all --check` passes)
|
||||
- [ ] **All clippy warnings are resolved** (`cargo clippy --all-targets --all-features -- -D warnings` passes)
|
||||
- [ ] **Code compiles successfully** (`cargo check --all-targets` passes)
|
||||
@@ -662,27 +714,32 @@ async fn health_check() -> Result<HealthStatus> {
|
||||
- [ ] **No formatting-related changes** mixed with functional changes (separate commits)
|
||||
|
||||
### 2. Functionality
|
||||
|
||||
- [ ] Are all error cases properly handled?
|
||||
- [ ] Is there appropriate logging?
|
||||
- [ ] Is there necessary test coverage?
|
||||
|
||||
### 3. Performance
|
||||
|
||||
- [ ] Are unnecessary memory allocations avoided?
|
||||
- [ ] Are async operations used correctly?
|
||||
- [ ] Are there potential deadlock risks?
|
||||
|
||||
### 4. Security
|
||||
|
||||
- [ ] Are input parameters properly validated?
|
||||
- [ ] Are there appropriate permission checks?
|
||||
- [ ] Is information leakage avoided?
|
||||
|
||||
### 5. Cross-Platform Compatibility
|
||||
|
||||
- [ ] Does the code work on different CPU architectures (x86_64, aarch64)?
|
||||
- [ ] Are platform-specific features properly gated with conditional compilation?
|
||||
- [ ] Is byte order handling correct for binary data?
|
||||
- [ ] Are there appropriate fallback implementations for unsupported platforms?
|
||||
|
||||
### 6. Code Commits and Documentation
|
||||
|
||||
- [ ] Does it comply with [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/)?
|
||||
- [ ] Are commit messages concise and under 72 characters for the title line?
|
||||
- [ ] Commit titles should be concise and in English, avoid Chinese
|
||||
@@ -691,6 +748,7 @@ async fn health_check() -> Result<HealthStatus> {
|
||||
## Common Patterns and Best Practices
|
||||
|
||||
### 1. Resource Management
|
||||
|
||||
```rust
|
||||
// Use RAII pattern for resource management
|
||||
pub struct ResourceGuard {
|
||||
@@ -705,6 +763,7 @@ impl Drop for ResourceGuard {
|
||||
```
|
||||
|
||||
### 2. Dependency Injection
|
||||
|
||||
```rust
|
||||
// Use dependency injection pattern
|
||||
pub struct Service {
|
||||
@@ -714,6 +773,7 @@ pub struct Service {
|
||||
```
|
||||
|
||||
### 3. Graceful Shutdown
|
||||
|
||||
```rust
|
||||
// Implement graceful shutdown
|
||||
async fn shutdown_gracefully(shutdown_rx: &mut Receiver<()>) {
|
||||
@@ -732,16 +792,19 @@ async fn shutdown_gracefully(shutdown_rx: &mut Receiver<()>) {
|
||||
## Domain-Specific Guidelines
|
||||
|
||||
### 1. Storage Operations
|
||||
|
||||
- All storage operations must support erasure coding
|
||||
- Implement read/write quorum mechanisms
|
||||
- Support data integrity verification
|
||||
|
||||
### 2. Network Communication
|
||||
|
||||
- Use gRPC for internal service communication
|
||||
- HTTP/HTTPS support for S3-compatible API
|
||||
- Implement connection pooling and retry mechanisms
|
||||
|
||||
### 3. Metadata Management
|
||||
|
||||
- Use FlatBuffers for serialization
|
||||
- Support version control and migration
|
||||
- Implement metadata caching
|
||||
@@ -751,11 +814,12 @@ These rules should serve as guiding principles when developing the RustFS projec
|
||||
### 4. Code Operations
|
||||
|
||||
#### Branch Management
|
||||
- **🚨 CRITICAL: NEVER modify code directly on main or master branch - THIS IS ABSOLUTELY FORBIDDEN 🚨**
|
||||
- **⚠️ ANY DIRECT COMMITS TO MASTER/MAIN WILL BE REJECTED AND MUST BE REVERTED IMMEDIATELY ⚠️**
|
||||
- **Always work on feature branches - NO EXCEPTIONS**
|
||||
- Always check the .cursorrules file before starting to ensure you understand the project guidelines
|
||||
- **MANDATORY workflow for ALL changes:**
|
||||
|
||||
- **🚨 CRITICAL: NEVER modify code directly on main or master branch - THIS IS ABSOLUTELY FORBIDDEN 🚨**
|
||||
- **⚠️ ANY DIRECT COMMITS TO MASTER/MAIN WILL BE REJECTED AND MUST BE REVERTED IMMEDIATELY ⚠️**
|
||||
- **Always work on feature branches - NO EXCEPTIONS**
|
||||
- Always check the .cursorrules file before starting to ensure you understand the project guidelines
|
||||
- **MANDATORY workflow for ALL changes:**
|
||||
1. `git checkout main` (switch to main branch)
|
||||
2. `git pull` (get latest changes)
|
||||
3. `git checkout -b feat/your-feature-name` (create and switch to feature branch)
|
||||
@@ -763,28 +827,54 @@ These rules should serve as guiding principles when developing the RustFS projec
|
||||
5. Test thoroughly before committing
|
||||
6. Commit and push to the feature branch
|
||||
7. Create a pull request for code review
|
||||
- Use descriptive branch names following the pattern: `feat/feature-name`, `fix/issue-name`, `refactor/component-name`, etc.
|
||||
- **Double-check current branch before ANY commit: `git branch` to ensure you're NOT on main/master**
|
||||
- Ensure all changes are made on feature branches and merged through pull requests
|
||||
- Use descriptive branch names following the pattern: `feat/feature-name`, `fix/issue-name`, `refactor/component-name`, etc.
|
||||
- **Double-check current branch before ANY commit: `git branch` to ensure you're NOT on main/master**
|
||||
- Ensure all changes are made on feature branches and merged through pull requests
|
||||
|
||||
#### Development Workflow
|
||||
- Use English for all code comments, documentation, and variable names
|
||||
- Write meaningful and descriptive names for variables, functions, and methods
|
||||
- Avoid meaningless test content like "debug 111" or placeholder values
|
||||
- Before each change, carefully read the existing code to ensure you understand the code structure and implementation, do not break existing logic implementation, do not introduce new issues
|
||||
- Ensure each change provides sufficient test cases to guarantee code correctness
|
||||
- Do not arbitrarily modify numbers and constants in test cases, carefully analyze their meaning to ensure test case correctness
|
||||
- When writing or modifying tests, check existing test cases to ensure they have scientific naming and rigorous logic testing, if not compliant, modify test cases to ensure scientific and rigorous testing
|
||||
- **Before committing any changes, run `cargo clippy --all-targets --all-features -- -D warnings` to ensure all code passes Clippy checks**
|
||||
- After each development completion, first git add . then git commit -m "feat: feature description" or "fix: issue description", ensure compliance with [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/)
|
||||
- **Keep commit messages concise and under 72 characters** for the title line, use body for detailed explanations if needed
|
||||
- After each development completion, first git push to remote repository
|
||||
- After each change completion, summarize the changes, do not create summary files, provide a brief change description, ensure compliance with [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/)
|
||||
- Provide change descriptions needed for PR in the conversation, ensure compliance with [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/)
|
||||
- **Always provide PR descriptions in English** after completing any changes, including:
|
||||
- Clear and concise title following Conventional Commits format
|
||||
- Detailed description of what was changed and why
|
||||
- List of key changes and improvements
|
||||
- Any breaking changes or migration notes if applicable
|
||||
- Testing information and verification steps
|
||||
- **Provide PR descriptions in copyable markdown format** enclosed in code blocks for easy one-click copying
|
||||
|
||||
- Use English for all code comments, documentation, and variable names
|
||||
- Write meaningful and descriptive names for variables, functions, and methods
|
||||
- Avoid meaningless test content like "debug 111" or placeholder values
|
||||
- Before each change, carefully read the existing code to ensure you understand the code structure and implementation, do not break existing logic implementation, do not introduce new issues
|
||||
- Ensure each change provides sufficient test cases to guarantee code correctness
|
||||
- Do not arbitrarily modify numbers and constants in test cases, carefully analyze their meaning to ensure test case correctness
|
||||
- When writing or modifying tests, check existing test cases to ensure they have scientific naming and rigorous logic testing, if not compliant, modify test cases to ensure scientific and rigorous testing
|
||||
- **Before committing any changes, run `cargo clippy --all-targets --all-features -- -D warnings` to ensure all code passes Clippy checks**
|
||||
- After each development completion, first git add . then git commit -m "feat: feature description" or "fix: issue description", ensure compliance with [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/)
|
||||
- **Keep commit messages concise and under 72 characters** for the title line, use body for detailed explanations if needed
|
||||
- After each development completion, first git push to remote repository
|
||||
- After each change completion, summarize the changes, do not create summary files, provide a brief change description, ensure compliance with [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/)
|
||||
- Provide change descriptions needed for PR in the conversation, ensure compliance with [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/)
|
||||
- **Always provide PR descriptions in English** after completing any changes, including:
|
||||
- Clear and concise title following Conventional Commits format
|
||||
- Detailed description of what was changed and why
|
||||
- List of key changes and improvements
|
||||
- Any breaking changes or migration notes if applicable
|
||||
- Testing information and verification steps
|
||||
- **Provide PR descriptions in copyable markdown format** enclosed in code blocks for easy one-click copying
|
||||
|
||||
## 🚫 AI 文档生成限制
|
||||
|
||||
### 禁止生成总结文档
|
||||
|
||||
- **严格禁止创建任何形式的AI生成总结文档**
|
||||
- **不得创建包含大量表情符号、详细格式化表格和典型AI风格的文档**
|
||||
- **不得在项目中生成以下类型的文档:**
|
||||
- 基准测试总结文档(BENCHMARK*.md)
|
||||
- 实现对比分析文档(IMPLEMENTATION_COMPARISON*.md)
|
||||
- 性能分析报告文档
|
||||
- 架构总结文档
|
||||
- 功能对比文档
|
||||
- 任何带有大量表情符号和格式化内容的文档
|
||||
- **如果需要文档,请只在用户明确要求时创建,并保持简洁实用的风格**
|
||||
- **文档应当专注于实际需要的信息,避免过度格式化和装饰性内容**
|
||||
- **任何发现的AI生成总结文档都应该立即删除**
|
||||
|
||||
### 允许的文档类型
|
||||
|
||||
- README.md(项目介绍,保持简洁)
|
||||
- 技术文档(仅在明确需要时创建)
|
||||
- 用户手册(仅在明确需要时创建)
|
||||
- API文档(从代码生成)
|
||||
- 变更日志(CHANGELOG.md)
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM m.daocloud.io/docker.io/library/ubuntu:22.04
|
||||
FROM ubuntu:22.04
|
||||
|
||||
ENV LANG C.UTF-8
|
||||
|
||||
@@ -18,10 +18,7 @@ RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.
|
||||
&& mv flatc /usr/local/bin/ && chmod +x /usr/local/bin/flatc && rm -rf Linux.flatc.binary.g++-13.zip
|
||||
|
||||
# install rust
|
||||
ENV RUSTUP_DIST_SERVER="https://rsproxy.cn"
|
||||
ENV RUSTUP_UPDATE_ROOT="https://rsproxy.cn/rustup"
|
||||
RUN curl -o rustup-init.sh --proto '=https' --tlsv1.2 -sSf https://rsproxy.cn/rustup-init.sh \
|
||||
&& sh rustup-init.sh -y && rm -rf rustup-init.sh
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
COPY .docker/cargo.config.toml /root/.cargo/config.toml
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM m.daocloud.io/docker.io/library/rockylinux:9.3 AS builder
|
||||
FROM rockylinux:9.3 AS builder
|
||||
|
||||
ENV LANG C.UTF-8
|
||||
|
||||
@@ -25,10 +25,7 @@ RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.
|
||||
&& rm -rf Linux.flatc.binary.g++-13.zip
|
||||
|
||||
# install rust
|
||||
ENV RUSTUP_DIST_SERVER="https://rsproxy.cn"
|
||||
ENV RUSTUP_UPDATE_ROOT="https://rsproxy.cn/rustup"
|
||||
RUN curl -o rustup-init.sh --proto '=https' --tlsv1.2 -sSf https://rsproxy.cn/rustup-init.sh \
|
||||
&& sh rustup-init.sh -y && rm -rf rustup-init.sh
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
COPY .docker/cargo.config.toml /root/.cargo/config.toml
|
||||
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM m.daocloud.io/docker.io/library/ubuntu:22.04
|
||||
FROM ubuntu:22.04
|
||||
|
||||
ENV LANG C.UTF-8
|
||||
|
||||
@@ -18,10 +18,7 @@ RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.
|
||||
&& mv flatc /usr/local/bin/ && chmod +x /usr/local/bin/flatc && rm -rf Linux.flatc.binary.g++-13.zip
|
||||
|
||||
# install rust
|
||||
ENV RUSTUP_DIST_SERVER="https://rsproxy.cn"
|
||||
ENV RUSTUP_UPDATE_ROOT="https://rsproxy.cn/rustup"
|
||||
RUN curl -o rustup-init.sh --proto '=https' --tlsv1.2 -sSf https://rsproxy.cn/rustup-init.sh \
|
||||
&& sh rustup-init.sh -y && rm -rf rustup-init.sh
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
COPY .docker/cargo.config.toml /root/.cargo/config.toml
|
||||
|
||||
|
||||
@@ -1,13 +1,5 @@
|
||||
[source.crates-io]
|
||||
registry = "https://github.com/rust-lang/crates.io-index"
|
||||
replace-with = 'rsproxy-sparse'
|
||||
|
||||
[source.rsproxy]
|
||||
registry = "https://rsproxy.cn/crates.io-index"
|
||||
[registries.rsproxy]
|
||||
index = "https://rsproxy.cn/crates.io-index"
|
||||
[source.rsproxy-sparse]
|
||||
registry = "sparse+https://rsproxy.cn/index/"
|
||||
|
||||
[net]
|
||||
git-fetch-with-cli = true
|
||||
|
||||
37
.docker/mqtt/config/emqx.conf
Normal file
37
.docker/mqtt/config/emqx.conf
Normal file
@@ -0,0 +1,37 @@
|
||||
# 节点配置
|
||||
node.name = "emqx@127.0.0.1"
|
||||
node.cookie = "aBcDeFgHiJkLmNoPqRsTuVwXyZ012345"
|
||||
node.data_dir = "/opt/emqx/data"
|
||||
|
||||
# 日志配置
|
||||
log.console = {level = info, enable = true}
|
||||
log.file = {path = "/opt/emqx/log/emqx.log", enable = true, level = info}
|
||||
|
||||
# MQTT TCP 监听器
|
||||
listeners.tcp.default = {bind = "0.0.0.0:1883", max_connections = 1000000, enable = true}
|
||||
|
||||
# MQTT SSL 监听器
|
||||
listeners.ssl.default = {bind = "0.0.0.0:8883", enable = false}
|
||||
|
||||
# MQTT WebSocket 监听器
|
||||
listeners.ws.default = {bind = "0.0.0.0:8083", enable = true}
|
||||
|
||||
# MQTT WebSocket SSL 监听器
|
||||
listeners.wss.default = {bind = "0.0.0.0:8084", enable = false}
|
||||
|
||||
# 管理控制台
|
||||
dashboard.listeners.http = {bind = "0.0.0.0:18083", enable = true}
|
||||
|
||||
# HTTP API
|
||||
management.listeners.http = {bind = "0.0.0.0:8081", enable = true}
|
||||
|
||||
# 认证配置
|
||||
authentication = [
|
||||
{enable = true, mechanism = password_based, backend = built_in_database, user_id_type = username}
|
||||
]
|
||||
|
||||
# 授权配置
|
||||
authorization.sources = [{type = built_in_database, enable = true}]
|
||||
|
||||
# 持久化消息存储
|
||||
message.storage.backend = built_in_database
|
||||
9
.docker/mqtt/config/vm.args
Normal file
9
.docker/mqtt/config/vm.args
Normal file
@@ -0,0 +1,9 @@
|
||||
-name emqx@127.0.0.1
|
||||
-setcookie aBcDeFgHiJkLmNoPqRsTuVwXyZ012345
|
||||
+P 2097152
|
||||
+t 1048576
|
||||
+zdbbl 32768
|
||||
-kernel inet_dist_listen_min 6000
|
||||
-kernel inet_dist_listen_max 6100
|
||||
-smp enable
|
||||
-mnesia dir "/opt/emqx/data/mnesia"
|
||||
60
.docker/mqtt/docker-compose-more.yml
Normal file
60
.docker/mqtt/docker-compose-more.yml
Normal file
@@ -0,0 +1,60 @@
|
||||
services:
|
||||
emqx:
|
||||
image: emqx/emqx:latest
|
||||
container_name: emqx
|
||||
restart: unless-stopped
|
||||
environment:
|
||||
- EMQX_NODE__NAME=emqx@127.0.0.1
|
||||
- EMQX_NODE__COOKIE=aBcDeFgHiJkLmNoPqRsTuVwXyZ012345
|
||||
- EMQX_NODE__DATA_DIR=/opt/emqx/data
|
||||
- EMQX_LOG__CONSOLE__LEVEL=info
|
||||
- EMQX_LOG__CONSOLE__ENABLE=true
|
||||
- EMQX_LOG__FILE__PATH=/opt/emqx/log/emqx.log
|
||||
- EMQX_LOG__FILE__LEVEL=info
|
||||
- EMQX_LOG__FILE__ENABLE=true
|
||||
- EMQX_LISTENERS__TCP__DEFAULT__BIND=0.0.0.0:1883
|
||||
- EMQX_LISTENERS__TCP__DEFAULT__MAX_CONNECTIONS=1000000
|
||||
- EMQX_LISTENERS__TCP__DEFAULT__ENABLE=true
|
||||
- EMQX_LISTENERS__SSL__DEFAULT__BIND=0.0.0.0:8883
|
||||
- EMQX_LISTENERS__SSL__DEFAULT__ENABLE=false
|
||||
- EMQX_LISTENERS__WS__DEFAULT__BIND=0.0.0.0:8083
|
||||
- EMQX_LISTENERS__WS__DEFAULT__ENABLE=true
|
||||
- EMQX_LISTENERS__WSS__DEFAULT__BIND=0.0.0.0:8084
|
||||
- EMQX_LISTENERS__WSS__DEFAULT__ENABLE=false
|
||||
- EMQX_DASHBOARD__LISTENERS__HTTP__BIND=0.0.0.0:18083
|
||||
- EMQX_DASHBOARD__LISTENERS__HTTP__ENABLE=true
|
||||
- EMQX_MANAGEMENT__LISTENERS__HTTP__BIND=0.0.0.0:8081
|
||||
- EMQX_MANAGEMENT__LISTENERS__HTTP__ENABLE=true
|
||||
- EMQX_AUTHENTICATION__1__ENABLE=true
|
||||
- EMQX_AUTHENTICATION__1__MECHANISM=password_based
|
||||
- EMQX_AUTHENTICATION__1__BACKEND=built_in_database
|
||||
- EMQX_AUTHENTICATION__1__USER_ID_TYPE=username
|
||||
- EMQX_AUTHORIZATION__SOURCES__1__TYPE=built_in_database
|
||||
- EMQX_AUTHORIZATION__SOURCES__1__ENABLE=true
|
||||
ports:
|
||||
- "1883:1883" # MQTT TCP
|
||||
- "8883:8883" # MQTT SSL
|
||||
- "8083:8083" # MQTT WebSocket
|
||||
- "8084:8084" # MQTT WebSocket SSL
|
||||
- "18083:18083" # Web 管理控制台
|
||||
- "8081:8081" # HTTP API
|
||||
volumes:
|
||||
- ./data:/opt/emqx/data
|
||||
- ./log:/opt/emqx/log
|
||||
- ./config:/opt/emqx/etc
|
||||
networks:
|
||||
- mqtt-net
|
||||
healthcheck:
|
||||
test: [ "CMD", "/opt/emqx/bin/emqx_ctl", "status" ]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
logging:
|
||||
driver: "json-file"
|
||||
options:
|
||||
max-size: "100m"
|
||||
max-file: "3"
|
||||
|
||||
networks:
|
||||
mqtt-net:
|
||||
driver: bridge
|
||||
15
.docker/mqtt/docker-compose.yml
Normal file
15
.docker/mqtt/docker-compose.yml
Normal file
@@ -0,0 +1,15 @@
|
||||
services:
|
||||
emqx:
|
||||
image: emqx/emqx:latest
|
||||
container_name: emqx
|
||||
ports:
|
||||
- "1883:1883"
|
||||
- "8083:8083"
|
||||
- "8084:8084"
|
||||
- "8883:8883"
|
||||
- "18083:18083"
|
||||
restart: unless-stopped
|
||||
|
||||
networks:
|
||||
default:
|
||||
driver: bridge
|
||||
7
.github/actions/setup/action.yml
vendored
7
.github/actions/setup/action.yml
vendored
@@ -13,9 +13,9 @@ inputs:
|
||||
description: "Cache key for shared cache"
|
||||
cache-save-if:
|
||||
required: true
|
||||
default: true
|
||||
default: ${{ github.ref == 'refs/heads/main' }}
|
||||
description: "Cache save condition"
|
||||
run-os:
|
||||
runs-on:
|
||||
required: true
|
||||
default: "ubuntu-latest"
|
||||
description: "Running system"
|
||||
@@ -24,7 +24,7 @@ runs:
|
||||
using: "composite"
|
||||
steps:
|
||||
- name: Install system dependencies
|
||||
if: inputs.run-os == 'ubuntu-latest'
|
||||
if: inputs.runs-on == 'ubuntu-latest'
|
||||
shell: bash
|
||||
run: |
|
||||
sudo apt update
|
||||
@@ -45,7 +45,6 @@ runs:
|
||||
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
cache-on-failure: true
|
||||
cache-all-crates: true
|
||||
shared-key: ${{ inputs.cache-shared-key }}
|
||||
save-if: ${{ inputs.cache-save-if }}
|
||||
|
||||
186
.github/workflows/build.yml
vendored
186
.github/workflows/build.yml
vendored
@@ -17,42 +17,114 @@ jobs:
|
||||
matrix:
|
||||
os: [ ubuntu-latest, macos-latest, windows-latest ]
|
||||
variant:
|
||||
- { profile: release, target: x86_64-unknown-linux-musl, glibc: "default" }
|
||||
- { profile: release, target: x86_64-unknown-linux-gnu, glibc: "default" }
|
||||
- {
|
||||
profile: release,
|
||||
target: x86_64-unknown-linux-musl,
|
||||
glibc: "default",
|
||||
}
|
||||
- {
|
||||
profile: release,
|
||||
target: x86_64-unknown-linux-gnu,
|
||||
glibc: "default",
|
||||
}
|
||||
- { profile: release, target: aarch64-apple-darwin, glibc: "default" }
|
||||
#- { profile: release, target: aarch64-unknown-linux-gnu, glibc: "default" }
|
||||
- { profile: release, target: aarch64-unknown-linux-musl, glibc: "default" }
|
||||
- {
|
||||
profile: release,
|
||||
target: aarch64-unknown-linux-musl,
|
||||
glibc: "default",
|
||||
}
|
||||
#- { profile: release, target: x86_64-pc-windows-msvc, glibc: "default" }
|
||||
exclude:
|
||||
# Linux targets on non-Linux systems
|
||||
- os: macos-latest
|
||||
variant: { profile: release, target: x86_64-unknown-linux-gnu, glibc: "default" }
|
||||
variant:
|
||||
{
|
||||
profile: release,
|
||||
target: x86_64-unknown-linux-gnu,
|
||||
glibc: "default",
|
||||
}
|
||||
- os: macos-latest
|
||||
variant: { profile: release, target: x86_64-unknown-linux-musl, glibc: "default" }
|
||||
variant:
|
||||
{
|
||||
profile: release,
|
||||
target: x86_64-unknown-linux-musl,
|
||||
glibc: "default",
|
||||
}
|
||||
- os: macos-latest
|
||||
variant: { profile: release, target: aarch64-unknown-linux-gnu, glibc: "default" }
|
||||
variant:
|
||||
{
|
||||
profile: release,
|
||||
target: aarch64-unknown-linux-gnu,
|
||||
glibc: "default",
|
||||
}
|
||||
- os: macos-latest
|
||||
variant: { profile: release, target: aarch64-unknown-linux-musl, glibc: "default" }
|
||||
variant:
|
||||
{
|
||||
profile: release,
|
||||
target: aarch64-unknown-linux-musl,
|
||||
glibc: "default",
|
||||
}
|
||||
- os: windows-latest
|
||||
variant: { profile: release, target: x86_64-unknown-linux-gnu, glibc: "default" }
|
||||
variant:
|
||||
{
|
||||
profile: release,
|
||||
target: x86_64-unknown-linux-gnu,
|
||||
glibc: "default",
|
||||
}
|
||||
- os: windows-latest
|
||||
variant: { profile: release, target: x86_64-unknown-linux-musl, glibc: "default" }
|
||||
variant:
|
||||
{
|
||||
profile: release,
|
||||
target: x86_64-unknown-linux-musl,
|
||||
glibc: "default",
|
||||
}
|
||||
- os: windows-latest
|
||||
variant: { profile: release, target: aarch64-unknown-linux-gnu, glibc: "default" }
|
||||
variant:
|
||||
{
|
||||
profile: release,
|
||||
target: aarch64-unknown-linux-gnu,
|
||||
glibc: "default",
|
||||
}
|
||||
- os: windows-latest
|
||||
variant: { profile: release, target: aarch64-unknown-linux-musl, glibc: "default" }
|
||||
variant:
|
||||
{
|
||||
profile: release,
|
||||
target: aarch64-unknown-linux-musl,
|
||||
glibc: "default",
|
||||
}
|
||||
|
||||
# Apple targets on non-macOS systems
|
||||
- os: ubuntu-latest
|
||||
variant: { profile: release, target: aarch64-apple-darwin, glibc: "default" }
|
||||
variant:
|
||||
{
|
||||
profile: release,
|
||||
target: aarch64-apple-darwin,
|
||||
glibc: "default",
|
||||
}
|
||||
- os: windows-latest
|
||||
variant: { profile: release, target: aarch64-apple-darwin, glibc: "default" }
|
||||
variant:
|
||||
{
|
||||
profile: release,
|
||||
target: aarch64-apple-darwin,
|
||||
glibc: "default",
|
||||
}
|
||||
|
||||
# Windows targets on non-Windows systems
|
||||
- os: ubuntu-latest
|
||||
variant: { profile: release, target: x86_64-pc-windows-msvc, glibc: "default" }
|
||||
variant:
|
||||
{
|
||||
profile: release,
|
||||
target: x86_64-pc-windows-msvc,
|
||||
glibc: "default",
|
||||
}
|
||||
- os: macos-latest
|
||||
variant: { profile: release, target: x86_64-pc-windows-msvc, glibc: "default" }
|
||||
variant:
|
||||
{
|
||||
profile: release,
|
||||
target: x86_64-pc-windows-msvc,
|
||||
glibc: "default",
|
||||
}
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
@@ -89,7 +161,7 @@ jobs:
|
||||
if: steps.cache-protoc.outputs.cache-hit != 'true'
|
||||
uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
version: '31.1'
|
||||
version: "31.1"
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Setup Flatc
|
||||
@@ -180,14 +252,14 @@ jobs:
|
||||
if [[ "$GLIBC" != "default" ]]; then
|
||||
BIN_NAME="${BIN_NAME}.glibc${GLIBC}"
|
||||
fi
|
||||
|
||||
|
||||
# Windows systems use exe suffix, and other systems do not have suffix
|
||||
if [[ "${{ matrix.variant.target }}" == *"windows"* ]]; then
|
||||
BIN_NAME="${BIN_NAME}.exe"
|
||||
else
|
||||
BIN_NAME="${BIN_NAME}.bin"
|
||||
fi
|
||||
|
||||
|
||||
echo "Binary name will be: $BIN_NAME"
|
||||
|
||||
echo "::group::Building rustfs"
|
||||
@@ -265,17 +337,56 @@ jobs:
|
||||
path: ${{ steps.package.outputs.artifact_name }}.zip
|
||||
retention-days: 7
|
||||
|
||||
# Install ossutil2 tool for OSS upload
|
||||
- name: Install ossutil2
|
||||
if: startsWith(github.ref, 'refs/tags/') || github.ref == 'refs/heads/main'
|
||||
shell: bash
|
||||
run: |
|
||||
echo "::group::Installing ossutil2"
|
||||
# Download and install ossutil based on platform
|
||||
if [ "${{ runner.os }}" = "Linux" ]; then
|
||||
curl -o ossutil.zip https://gosspublic.alicdn.com/ossutil/v2/2.1.1/ossutil-2.1.1-linux-amd64.zip
|
||||
unzip -o ossutil.zip
|
||||
chmod 755 ossutil-2.1.1-linux-amd64/ossutil
|
||||
sudo mv ossutil-2.1.1-linux-amd64/ossutil /usr/local/bin/
|
||||
rm -rf ossutil.zip ossutil-2.1.1-linux-amd64
|
||||
elif [ "${{ runner.os }}" = "macOS" ]; then
|
||||
if [ "$(uname -m)" = "arm64" ]; then
|
||||
curl -o ossutil.zip https://gosspublic.alicdn.com/ossutil/v2/2.1.1/ossutil-2.1.1-mac-arm64.zip
|
||||
else
|
||||
curl -o ossutil.zip https://gosspublic.alicdn.com/ossutil/v2/2.1.1/ossutil-2.1.1-mac-amd64.zip
|
||||
fi
|
||||
unzip -o ossutil.zip
|
||||
chmod 755 ossutil-*/ossutil
|
||||
sudo mv ossutil-*/ossutil /usr/local/bin/
|
||||
rm -rf ossutil.zip ossutil-*
|
||||
elif [ "${{ runner.os }}" = "Windows" ]; then
|
||||
curl -o ossutil.zip https://gosspublic.alicdn.com/ossutil/v2/2.1.1/ossutil-2.1.1-windows-amd64.zip
|
||||
unzip -o ossutil.zip
|
||||
mv ossutil-*/ossutil.exe /usr/bin/ossutil.exe
|
||||
rm -rf ossutil.zip ossutil-*
|
||||
fi
|
||||
echo "ossutil2 installation completed"
|
||||
|
||||
# Set the OSS configuration
|
||||
ossutil config set Region oss-cn-beijing
|
||||
ossutil config set endpoint oss-cn-beijing.aliyuncs.com
|
||||
ossutil config set accessKeyID ${{ secrets.ALICLOUDOSS_KEY_ID }}
|
||||
ossutil config set accessKeySecret ${{ secrets.ALICLOUDOSS_KEY_SECRET }}
|
||||
|
||||
- name: Upload to Aliyun OSS
|
||||
if: startsWith(github.ref, 'refs/tags/') || github.ref == 'refs/heads/main'
|
||||
uses: JohnGuan/oss-upload-action@main
|
||||
with:
|
||||
key-id: ${{ secrets.ALICLOUDOSS_KEY_ID }}
|
||||
key-secret: ${{ secrets.ALICLOUDOSS_KEY_SECRET }}
|
||||
region: oss-cn-beijing
|
||||
bucket: rustfs-artifacts
|
||||
assets: |
|
||||
${{ steps.package.outputs.artifact_name }}.zip:/artifacts/rustfs/${{ steps.package.outputs.artifact_name }}.zip
|
||||
${{ steps.package.outputs.artifact_name }}.zip:/artifacts/rustfs/${{ steps.package.outputs.artifact_name }}.latest.zip
|
||||
shell: bash
|
||||
env:
|
||||
OSSUTIL_ACCESS_KEY_ID: ${{ secrets.ALICLOUDOSS_KEY_ID }}
|
||||
OSSUTIL_ACCESS_KEY_SECRET: ${{ secrets.ALICLOUDOSS_KEY_SECRET }}
|
||||
OSSUTIL_ENDPOINT: https://oss-cn-beijing.aliyuncs.com
|
||||
run: |
|
||||
echo "::group::Uploading files to OSS"
|
||||
# Upload the artifact file to two different paths
|
||||
ossutil cp "${{ steps.package.outputs.artifact_name }}.zip" "oss://rustfs-artifacts/artifacts/rustfs/${{ steps.package.outputs.artifact_name }}.zip" --force
|
||||
ossutil cp "${{ steps.package.outputs.artifact_name }}.zip" "oss://rustfs-artifacts/artifacts/rustfs/${{ steps.package.outputs.artifact_name }}.latest.zip" --force
|
||||
echo "Successfully uploaded artifacts to OSS"
|
||||
|
||||
# Determine whether to perform GUI construction based on conditions
|
||||
- name: Prepare for GUI build
|
||||
@@ -393,16 +504,17 @@ jobs:
|
||||
# Upload GUI to Alibaba Cloud OSS
|
||||
- name: Upload GUI to Aliyun OSS
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
uses: JohnGuan/oss-upload-action@main
|
||||
with:
|
||||
key-id: ${{ secrets.ALICLOUDOSS_KEY_ID }}
|
||||
key-secret: ${{ secrets.ALICLOUDOSS_KEY_SECRET }}
|
||||
region: oss-cn-beijing
|
||||
bucket: rustfs-artifacts
|
||||
assets: |
|
||||
${{ steps.build_gui.outputs.gui_artifact_name }}.zip:/artifacts/rustfs/${{ steps.build_gui.outputs.gui_artifact_name }}.zip
|
||||
${{ steps.build_gui.outputs.gui_artifact_name }}.zip:/artifacts/rustfs/${{ steps.build_gui.outputs.gui_artifact_name }}.latest.zip
|
||||
|
||||
shell: bash
|
||||
env:
|
||||
OSSUTIL_ACCESS_KEY_ID: ${{ secrets.ALICLOUDOSS_KEY_ID }}
|
||||
OSSUTIL_ACCESS_KEY_SECRET: ${{ secrets.ALICLOUDOSS_KEY_SECRET }}
|
||||
OSSUTIL_ENDPOINT: https://oss-cn-beijing.aliyuncs.com
|
||||
run: |
|
||||
echo "::group::Uploading GUI files to OSS"
|
||||
# Upload the GUI artifact file to two different paths
|
||||
ossutil cp "${{ steps.build_gui.outputs.gui_artifact_name }}.zip" "oss://rustfs-artifacts/artifacts/rustfs/${{ steps.build_gui.outputs.gui_artifact_name }}.zip" --force
|
||||
ossutil cp "${{ steps.build_gui.outputs.gui_artifact_name }}.zip" "oss://rustfs-artifacts/artifacts/rustfs/${{ steps.build_gui.outputs.gui_artifact_name }}.latest.zip" --force
|
||||
echo "Successfully uploaded GUI artifacts to OSS"
|
||||
|
||||
merge:
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
116
.github/workflows/ci.yml
vendored
116
.github/workflows/ci.yml
vendored
@@ -11,9 +11,6 @@ on:
|
||||
- cron: '0 0 * * 0' # at midnight of each sunday
|
||||
workflow_dispatch:
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
skip-check:
|
||||
permissions:
|
||||
@@ -30,103 +27,52 @@ jobs:
|
||||
cancel_others: true
|
||||
paths_ignore: '["*.md"]'
|
||||
|
||||
# Quality checks for pull requests
|
||||
pr-checks:
|
||||
name: Pull Request Quality Checks
|
||||
if: github.event_name == 'pull_request'
|
||||
develop:
|
||||
needs: skip-check
|
||||
if: needs.skip-check.outputs.should_skip != 'true'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: ./.github/actions/setup
|
||||
|
||||
- name: Test
|
||||
run: cargo test --all --exclude e2e_test
|
||||
|
||||
- name: Format
|
||||
run: cargo fmt --all --check
|
||||
|
||||
- name: Lint
|
||||
run: cargo clippy --all-targets --all-features -- -D warnings
|
||||
|
||||
s3s-e2e:
|
||||
name: E2E (s3s-e2e)
|
||||
needs: skip-check
|
||||
if: needs.skip-check.outputs.should_skip != 'true'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- uses: actions/checkout@v4.2.2
|
||||
- uses: ./.github/actions/setup
|
||||
|
||||
# - name: Format Check
|
||||
# run: cargo fmt --all --check
|
||||
|
||||
- name: Lint Check
|
||||
run: cargo check --all-targets
|
||||
|
||||
- name: Clippy Check
|
||||
run: cargo clippy --all-targets --all-features -- -D warnings
|
||||
|
||||
- name: Unit Tests
|
||||
run: cargo test --all --exclude e2e_test
|
||||
|
||||
- name: Format Code
|
||||
run: cargo fmt --all
|
||||
|
||||
s3s-e2e:
|
||||
name: E2E (s3s-e2e)
|
||||
needs:
|
||||
- skip-check
|
||||
- develop
|
||||
if: needs.skip-check.outputs.should_skip != 'true'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4.2.2
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- uses: Swatinem/rust-cache@v2
|
||||
- name: Install s3s-e2e
|
||||
uses: taiki-e/cache-cargo-install-action@v2
|
||||
with:
|
||||
cache-on-failure: true
|
||||
cache-all-crates: true
|
||||
|
||||
- name: Install system dependencies
|
||||
run: |
|
||||
sudo apt update
|
||||
sudo apt install -y musl-tools build-essential lld libdbus-1-dev libwayland-dev libwebkit2gtk-4.1-dev libxdo-dev
|
||||
|
||||
- name: Test
|
||||
run: cargo test --all --exclude e2e_test
|
||||
tool: s3s-e2e
|
||||
git: https://github.com/Nugine/s3s.git
|
||||
rev: b7714bfaa17ddfa9b23ea01774a1e7bbdbfc2ca3
|
||||
|
||||
- name: Build debug
|
||||
run: |
|
||||
touch rustfs/build.rs
|
||||
cargo build -p rustfs --bins
|
||||
|
||||
- name: Pack artifacts
|
||||
run: |
|
||||
mkdir -p ./target/artifacts
|
||||
cp target/debug/rustfs ./target/artifacts/rustfs-debug
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: rustfs
|
||||
path: ./target/artifacts/*
|
||||
|
||||
- name: Install s3s-e2e
|
||||
run: |
|
||||
cargo install s3s-e2e --git https://github.com/Nugine/s3s.git
|
||||
s3s-e2e --version
|
||||
|
||||
- uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: rustfs
|
||||
path: ./target/artifacts
|
||||
|
||||
- name: Run s3s-e2e
|
||||
timeout-minutes: 10
|
||||
run: |
|
||||
./scripts/e2e-run.sh ./target/artifacts/rustfs-debug /tmp/rustfs
|
||||
s3s-e2e --version
|
||||
./scripts/e2e-run.sh ./target/debug/rustfs /tmp/rustfs
|
||||
|
||||
- uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: s3s-e2e.logs
|
||||
path: /tmp/rustfs.log
|
||||
|
||||
|
||||
|
||||
develop:
|
||||
needs: skip-check
|
||||
if: needs.skip-check.outputs.should_skip != 'true'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4.2.2
|
||||
- uses: ./.github/actions/setup
|
||||
|
||||
- name: Format
|
||||
run: cargo fmt --all --check
|
||||
|
||||
- name: Lint
|
||||
run: cargo check --all-targets
|
||||
|
||||
- name: Clippy
|
||||
run: cargo clippy --all-targets --all-features -- -D warnings
|
||||
path: /tmp/rustfs.log
|
||||
227
.github/workflows/docker.yml
vendored
Normal file
227
.github/workflows/docker.yml
vendored
Normal file
@@ -0,0 +1,227 @@
|
||||
name: Build and Push Docker Images
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- main
|
||||
tags:
|
||||
- "v*"
|
||||
pull_request:
|
||||
branches:
|
||||
- main
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
push_to_registry:
|
||||
description: "Push images to registry"
|
||||
required: false
|
||||
default: "true"
|
||||
type: boolean
|
||||
|
||||
env:
|
||||
REGISTRY_IMAGE_DOCKERHUB: rustfs/rustfs
|
||||
REGISTRY_IMAGE_GHCR: ghcr.io/${{ github.repository }}
|
||||
|
||||
jobs:
|
||||
# Skip duplicate job runs
|
||||
skip-check:
|
||||
permissions:
|
||||
actions: write
|
||||
contents: read
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
should_skip: ${{ steps.skip_check.outputs.should_skip }}
|
||||
steps:
|
||||
- id: skip_check
|
||||
uses: fkirc/skip-duplicate-actions@v5
|
||||
with:
|
||||
concurrent_skipping: "same_content_newer"
|
||||
cancel_others: true
|
||||
paths_ignore: '["*.md", "docs/**"]'
|
||||
|
||||
# Build RustFS binary for different platforms
|
||||
build-binary:
|
||||
needs: skip-check
|
||||
if: needs.skip-check.outputs.should_skip != 'true'
|
||||
strategy:
|
||||
matrix:
|
||||
include:
|
||||
- target: x86_64-unknown-linux-musl
|
||||
os: ubuntu-latest
|
||||
arch: amd64
|
||||
use_cross: false
|
||||
- target: aarch64-unknown-linux-gnu
|
||||
os: ubuntu-latest
|
||||
arch: arm64
|
||||
use_cross: true
|
||||
runs-on: ${{ matrix.os }}
|
||||
timeout-minutes: 120
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Rust toolchain
|
||||
uses: actions-rust-lang/setup-rust-toolchain@v1
|
||||
with:
|
||||
target: ${{ matrix.target }}
|
||||
components: rustfmt, clippy
|
||||
|
||||
- name: Install cross-compilation dependencies (native build)
|
||||
if: matrix.use_cross == false
|
||||
run: |
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y musl-tools
|
||||
|
||||
- name: Install cross tool (cross compilation)
|
||||
if: matrix.use_cross == true
|
||||
uses: taiki-e/install-action@v2
|
||||
with:
|
||||
tool: cross
|
||||
|
||||
- name: Install protoc
|
||||
uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
version: "31.1"
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Install flatc
|
||||
uses: Nugine/setup-flatc@v1
|
||||
with:
|
||||
version: "25.2.10"
|
||||
|
||||
- name: Cache cargo dependencies
|
||||
uses: actions/cache@v3
|
||||
with:
|
||||
path: |
|
||||
~/.cargo/registry
|
||||
~/.cargo/git
|
||||
target
|
||||
key: ${{ runner.os }}-cargo-${{ matrix.target }}-${{ hashFiles('**/Cargo.lock') }}
|
||||
restore-keys: |
|
||||
${{ runner.os }}-cargo-${{ matrix.target }}-
|
||||
${{ runner.os }}-cargo-
|
||||
|
||||
- name: Generate protobuf code
|
||||
run: cargo run --bin gproto
|
||||
|
||||
- name: Build RustFS binary (native)
|
||||
if: matrix.use_cross == false
|
||||
run: |
|
||||
cargo build --release --target ${{ matrix.target }} --bin rustfs
|
||||
|
||||
- name: Build RustFS binary (cross)
|
||||
if: matrix.use_cross == true
|
||||
run: |
|
||||
cross build --release --target ${{ matrix.target }} --bin rustfs
|
||||
|
||||
- name: Upload binary artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: rustfs-${{ matrix.arch }}
|
||||
path: target/${{ matrix.target }}/release/rustfs
|
||||
retention-days: 1
|
||||
|
||||
# Build and push multi-arch Docker images
|
||||
build-images:
|
||||
needs: [skip-check, build-binary]
|
||||
if: needs.skip-check.outputs.should_skip != 'true'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
matrix:
|
||||
image-type: [production, ubuntu, rockylinux, devenv]
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Download binary artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: ./artifacts
|
||||
|
||||
- name: Setup binary files
|
||||
run: |
|
||||
mkdir -p target/x86_64-unknown-linux-musl/release
|
||||
mkdir -p target/aarch64-unknown-linux-gnu/release
|
||||
cp artifacts/rustfs-amd64/rustfs target/x86_64-unknown-linux-musl/release/
|
||||
cp artifacts/rustfs-arm64/rustfs target/aarch64-unknown-linux-gnu/release/
|
||||
chmod +x target/*/release/rustfs
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Login to Docker Hub
|
||||
if: github.event_name != 'pull_request' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/'))
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
if: github.event_name != 'pull_request' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/'))
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Set Dockerfile and context
|
||||
id: dockerfile
|
||||
run: |
|
||||
case "${{ matrix.image-type }}" in
|
||||
production)
|
||||
echo "dockerfile=Dockerfile" >> $GITHUB_OUTPUT
|
||||
echo "context=." >> $GITHUB_OUTPUT
|
||||
echo "suffix=" >> $GITHUB_OUTPUT
|
||||
;;
|
||||
ubuntu)
|
||||
echo "dockerfile=.docker/Dockerfile.ubuntu22.04" >> $GITHUB_OUTPUT
|
||||
echo "context=." >> $GITHUB_OUTPUT
|
||||
echo "suffix=-ubuntu22.04" >> $GITHUB_OUTPUT
|
||||
;;
|
||||
rockylinux)
|
||||
echo "dockerfile=.docker/Dockerfile.rockylinux9.3" >> $GITHUB_OUTPUT
|
||||
echo "context=." >> $GITHUB_OUTPUT
|
||||
echo "suffix=-rockylinux9.3" >> $GITHUB_OUTPUT
|
||||
;;
|
||||
devenv)
|
||||
echo "dockerfile=.docker/Dockerfile.devenv" >> $GITHUB_OUTPUT
|
||||
echo "context=." >> $GITHUB_OUTPUT
|
||||
echo "suffix=-devenv" >> $GITHUB_OUTPUT
|
||||
;;
|
||||
esac
|
||||
|
||||
- name: Extract metadata
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: |
|
||||
${{ env.REGISTRY_IMAGE_DOCKERHUB }}
|
||||
${{ env.REGISTRY_IMAGE_GHCR }}
|
||||
tags: |
|
||||
type=ref,event=branch,suffix=${{ steps.dockerfile.outputs.suffix }}
|
||||
type=ref,event=pr,suffix=${{ steps.dockerfile.outputs.suffix }}
|
||||
type=semver,pattern={{version}},suffix=${{ steps.dockerfile.outputs.suffix }}
|
||||
type=semver,pattern={{major}}.{{minor}},suffix=${{ steps.dockerfile.outputs.suffix }}
|
||||
type=semver,pattern={{major}},suffix=${{ steps.dockerfile.outputs.suffix }}
|
||||
type=raw,value=latest,suffix=${{ steps.dockerfile.outputs.suffix }},enable={{is_default_branch}}
|
||||
flavor: |
|
||||
latest=false
|
||||
|
||||
- name: Build and push multi-arch Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
with:
|
||||
context: ${{ steps.dockerfile.outputs.context }}
|
||||
file: ${{ steps.dockerfile.outputs.dockerfile }}
|
||||
platforms: linux/amd64,linux/arm64
|
||||
push: ${{ (github.event_name != 'pull_request' && (github.ref == 'refs/heads/main' || startsWith(github.ref, 'refs/tags/'))) || github.event.inputs.push_to_registry == 'true' }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
cache-from: type=gha,scope=${{ matrix.image-type }}
|
||||
cache-to: type=gha,mode=max,scope=${{ matrix.image-type }}
|
||||
build-args: |
|
||||
BUILDTIME=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.created'] }}
|
||||
VERSION=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.version'] }}
|
||||
REVISION=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.revision'] }}
|
||||
2
.gitignore
vendored
2
.gitignore
vendored
@@ -17,4 +17,4 @@ deploy/certs/*
|
||||
.rustfs.sys
|
||||
.cargo
|
||||
profile.json
|
||||
.docker/openobserve-otel/data
|
||||
.docker/openobserve-otel/data
|
||||
39
Cargo.toml
39
Cargo.toml
@@ -3,13 +3,16 @@ members = [
|
||||
"appauth", # Application authentication and authorization
|
||||
"cli/rustfs-gui", # Graphical user interface client
|
||||
"common/common", # Shared utilities and data structures
|
||||
"crates/filemeta", # File metadata management
|
||||
"common/lock", # Distributed locking implementation
|
||||
"common/protos", # Protocol buffer definitions
|
||||
"common/workers", # Worker thread pools and task scheduling
|
||||
"crates/config", # Configuration management
|
||||
"crates/event-notifier", # Event notification system
|
||||
"crates/notify", # Notification system for events
|
||||
"crates/obs", # Observability utilities
|
||||
"crates/rio", # Rust I/O utilities and abstractions
|
||||
"crates/utils", # Utility functions and helpers
|
||||
"crates/zip", # ZIP file handling and compression
|
||||
"crypto", # Cryptography and security features
|
||||
"ecstore", # Erasure coding storage implementation
|
||||
"e2e_test", # End-to-end test suite
|
||||
@@ -18,10 +21,8 @@ members = [
|
||||
"rustfs", # Core file system implementation
|
||||
"s3select/api", # S3 Select API interface
|
||||
"s3select/query", # S3 Select query engine
|
||||
"crates/zip",
|
||||
"crates/filemeta",
|
||||
"crates/rio",
|
||||
"reader",
|
||||
|
||||
]
|
||||
resolver = "2"
|
||||
|
||||
@@ -54,12 +55,10 @@ rustfs = { path = "./rustfs", version = "0.0.1" }
|
||||
rustfs-zip = { path = "./crates/zip", version = "0.0.1" }
|
||||
rustfs-config = { path = "./crates/config", version = "0.0.1" }
|
||||
rustfs-obs = { path = "crates/obs", version = "0.0.1" }
|
||||
rustfs-event-notifier = { path = "crates/event-notifier", version = "0.0.1" }
|
||||
rustfs-notify = { path = "crates/notify", version = "0.0.1" }
|
||||
rustfs-utils = { path = "crates/utils", version = "0.0.1" }
|
||||
rustfs-rio = { path = "crates/rio", version = "0.0.1" }
|
||||
rustfs-filemeta = { path = "crates/filemeta", version = "0.0.1" }
|
||||
rustfs-disk = { path = "crates/disk", version = "0.0.1" }
|
||||
rustfs-error = { path = "crates/error", version = "0.0.1" }
|
||||
workers = { path = "./common/workers", version = "0.0.1" }
|
||||
reader = { path = "./reader", version = "0.0.1" }
|
||||
aes-gcm = { version = "0.10.3", features = ["std"] }
|
||||
@@ -75,24 +74,25 @@ axum-extra = "0.10.1"
|
||||
axum-server = { version = "0.7.2", features = ["tls-rustls"] }
|
||||
backon = "1.5.1"
|
||||
base64-simd = "0.8.0"
|
||||
base64 = "0.22.1"
|
||||
blake2 = "0.10.6"
|
||||
bytes = "1.10.1"
|
||||
bytes = { version = "1.10.1", features = ["serde"] }
|
||||
bytesize = "2.0.1"
|
||||
byteorder = "1.5.0"
|
||||
cfg-if = "1.0.0"
|
||||
chacha20poly1305 = { version = "0.10.1" }
|
||||
chrono = { version = "0.4.41", features = ["serde"] }
|
||||
clap = { version = "4.5.40", features = ["derive", "env"] }
|
||||
config = "0.15.11"
|
||||
const-str = { version = "0.6.2", features = ["std", "proc"] }
|
||||
crc32fast = "1.4.2"
|
||||
dashmap = "6.1.0"
|
||||
datafusion = "46.0.1"
|
||||
derive_builder = "0.20.2"
|
||||
dotenvy = "0.15.7"
|
||||
dioxus = { version = "0.6.3", features = ["router"] }
|
||||
dirs = "6.0.0"
|
||||
flatbuffers = "25.2.10"
|
||||
flexi_logger = { version = "0.30.2", features = ["trc","dont_minimize_extra_stacks"] }
|
||||
form_urlencoded = "1.2.1"
|
||||
futures = "0.3.31"
|
||||
futures-core = "0.3.31"
|
||||
futures-util = "0.3.31"
|
||||
@@ -100,6 +100,7 @@ glob = "0.3.2"
|
||||
hex = "0.4.3"
|
||||
hex-simd = "0.8.0"
|
||||
highway = { version = "1.3.0" }
|
||||
hmac = "0.12.1"
|
||||
hyper = "1.6.0"
|
||||
hyper-util = { version = "0.1.14", features = [
|
||||
"tokio",
|
||||
@@ -158,12 +159,17 @@ pin-project-lite = "0.2.16"
|
||||
prost = "0.13.5"
|
||||
prost-build = "0.13.5"
|
||||
protobuf = "3.7"
|
||||
quick-xml = "0.37.5"
|
||||
rand = "0.9.1"
|
||||
brotli = "8.0.1"
|
||||
flate2 = "1.1.1"
|
||||
zstd = "0.13.3"
|
||||
lz4 = "1.28.1"
|
||||
rdkafka = { version = "0.37.0", features = ["tokio"] }
|
||||
reed-solomon-erasure = { version = "6.0.0", features = ["simd-accel"] }
|
||||
|
||||
reed-solomon-simd = { version = "3.0.0" }
|
||||
regex = { version = "1.11.1" }
|
||||
reqwest = { version = "0.12.19", default-features = false, features = [
|
||||
reqwest = { version = "0.12.20", default-features = false, features = [
|
||||
"rustls-tls",
|
||||
"charset",
|
||||
"http2",
|
||||
@@ -181,6 +187,7 @@ rmp-serde = "1.3.0"
|
||||
rsa = "0.9.8"
|
||||
rumqttc = { version = "0.24" }
|
||||
rust-embed = { version = "8.7.2" }
|
||||
rust-i18n = { version = "3.1.4" }
|
||||
rustfs-rsc = "2025.506.1"
|
||||
rustls = { version = "0.23.27" }
|
||||
rustls-pki-types = "1.12.0"
|
||||
@@ -193,7 +200,6 @@ serde = { version = "1.0.219", features = ["derive"] }
|
||||
serde_json = "1.0.140"
|
||||
serde-xml-rs = "0.8.1"
|
||||
serde_urlencoded = "0.7.1"
|
||||
serde_with = "3.12.0"
|
||||
sha1 = "0.10.6"
|
||||
sha2 = "0.10.9"
|
||||
hmac = "0.12.1"
|
||||
@@ -201,6 +207,7 @@ std-next = "0.1.8"
|
||||
siphasher = "1.0.1"
|
||||
smallvec = { version = "1.15.1", features = ["serde"] }
|
||||
snafu = "0.8.6"
|
||||
snap = "1.1.1"
|
||||
socket2 = "0.5.10"
|
||||
strum = { version = "0.27.1", features = ["derive"] }
|
||||
sysinfo = "0.35.2"
|
||||
@@ -214,13 +221,14 @@ time = { version = "0.3.41", features = [
|
||||
"macros",
|
||||
"serde",
|
||||
] }
|
||||
|
||||
tokio = { version = "1.45.1", features = ["fs", "rt-multi-thread"] }
|
||||
tonic = { version = "0.13.1", features = ["gzip"] }
|
||||
tonic-build = { version = "0.13.1" }
|
||||
tokio-rustls = { version = "0.26.2", default-features = false }
|
||||
tokio-stream = { version = "0.1.17" }
|
||||
tokio-tar = "0.3.1"
|
||||
tokio-util = { version = "0.7.15", features = ["io", "compat"] }
|
||||
tonic = { version = "0.13.1", features = ["gzip"] }
|
||||
tonic-build = { version = "0.13.1" }
|
||||
async-channel = "2.3.1"
|
||||
tower = { version = "0.5.2", features = ["timeout"] }
|
||||
tower-http = { version = "0.6.6", features = ["cors"] }
|
||||
@@ -238,6 +246,7 @@ uuid = { version = "1.17.0", features = [
|
||||
"fast-rng",
|
||||
"macro-diagnostics",
|
||||
] }
|
||||
wildmatch = { version = "2.4.0", features = ["serde"] }
|
||||
winapi = { version = "0.3.9" }
|
||||
xxhash-rust = { version = "0.8.15", features = ["xxh64", "xxh3"] }
|
||||
|
||||
|
||||
32
Dockerfile
32
Dockerfile
@@ -1,17 +1,37 @@
|
||||
FROM alpine:latest
|
||||
|
||||
# RUN apk add --no-cache <package-name>
|
||||
# Install runtime dependencies
|
||||
RUN apk add --no-cache \
|
||||
ca-certificates \
|
||||
tzdata \
|
||||
&& rm -rf /var/cache/apk/*
|
||||
|
||||
# Create rustfs user and group
|
||||
RUN addgroup -g 1000 rustfs && \
|
||||
adduser -D -s /bin/sh -u 1000 -G rustfs rustfs
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
RUN mkdir -p /data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3
|
||||
# Create data directories
|
||||
RUN mkdir -p /data/rustfs{0,1,2,3} && \
|
||||
chown -R rustfs:rustfs /data /app
|
||||
|
||||
COPY ./target/x86_64-unknown-linux-musl/release/rustfs /app/rustfs
|
||||
# Copy binary based on target architecture
|
||||
COPY --chown=rustfs:rustfs \
|
||||
target/*/release/rustfs \
|
||||
/app/rustfs
|
||||
|
||||
RUN chmod +x /app/rustfs
|
||||
|
||||
EXPOSE 9000
|
||||
EXPOSE 9001
|
||||
# Switch to non-root user
|
||||
USER rustfs
|
||||
|
||||
# Expose ports
|
||||
EXPOSE 9000 9001
|
||||
|
||||
CMD ["/app/rustfs"]
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD wget --no-verbose --tries=1 --spider http://localhost:9000/health || exit 1
|
||||
|
||||
# Set default command
|
||||
CMD ["/app/rustfs"]
|
||||
|
||||
121
Dockerfile.multi-stage
Normal file
121
Dockerfile.multi-stage
Normal file
@@ -0,0 +1,121 @@
|
||||
# Multi-stage Dockerfile for RustFS
|
||||
# Supports cross-compilation for amd64 and arm64 architectures
|
||||
ARG TARGETPLATFORM
|
||||
ARG BUILDPLATFORM
|
||||
|
||||
# Build stage
|
||||
FROM --platform=$BUILDPLATFORM rust:1.85-bookworm AS builder
|
||||
|
||||
# Install required build dependencies
|
||||
RUN apt-get update && apt-get install -y \
|
||||
wget \
|
||||
git \
|
||||
curl \
|
||||
unzip \
|
||||
gcc \
|
||||
pkg-config \
|
||||
libssl-dev \
|
||||
lld \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install cross-compilation tools for ARM64
|
||||
RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
|
||||
apt-get update && \
|
||||
apt-get install -y gcc-aarch64-linux-gnu && \
|
||||
rm -rf /var/lib/apt/lists/*; \
|
||||
fi
|
||||
|
||||
# Install protoc
|
||||
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip \
|
||||
&& unzip protoc-31.1-linux-x86_64.zip -d protoc3 \
|
||||
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc \
|
||||
&& mv protoc3/include/* /usr/local/include/ && rm -rf protoc-31.1-linux-x86_64.zip protoc3
|
||||
|
||||
# Install flatc
|
||||
RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip \
|
||||
&& unzip Linux.flatc.binary.g++-13.zip \
|
||||
&& mv flatc /usr/local/bin/ && chmod +x /usr/local/bin/flatc && rm -rf Linux.flatc.binary.g++-13.zip
|
||||
|
||||
# Set up Rust targets based on platform
|
||||
RUN case "$TARGETPLATFORM" in \
|
||||
"linux/amd64") rustup target add x86_64-unknown-linux-gnu ;; \
|
||||
"linux/arm64") rustup target add aarch64-unknown-linux-gnu ;; \
|
||||
*) echo "Unsupported platform: $TARGETPLATFORM" && exit 1 ;; \
|
||||
esac
|
||||
|
||||
# Set up environment for cross-compilation
|
||||
ENV CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=aarch64-linux-gnu-gcc
|
||||
ENV CC_aarch64_unknown_linux_gnu=aarch64-linux-gnu-gcc
|
||||
ENV CXX_aarch64_unknown_linux_gnu=aarch64-linux-gnu-g++
|
||||
|
||||
WORKDIR /usr/src/rustfs
|
||||
|
||||
# Copy Cargo files for dependency caching
|
||||
COPY Cargo.toml Cargo.lock ./
|
||||
COPY */Cargo.toml ./*/
|
||||
|
||||
# Create dummy main.rs files for dependency compilation
|
||||
RUN find . -name "Cargo.toml" -not -path "./Cargo.toml" | \
|
||||
xargs -I {} dirname {} | \
|
||||
xargs -I {} sh -c 'mkdir -p {}/src && echo "fn main() {}" > {}/src/main.rs'
|
||||
|
||||
# Build dependencies only (cache layer)
|
||||
RUN case "$TARGETPLATFORM" in \
|
||||
"linux/amd64") cargo build --release --target x86_64-unknown-linux-gnu ;; \
|
||||
"linux/arm64") cargo build --release --target aarch64-unknown-linux-gnu ;; \
|
||||
esac
|
||||
|
||||
# Copy source code
|
||||
COPY . .
|
||||
|
||||
# Generate protobuf code
|
||||
RUN cargo run --bin gproto
|
||||
|
||||
# Build the actual application
|
||||
RUN case "$TARGETPLATFORM" in \
|
||||
"linux/amd64") \
|
||||
cargo build --release --target x86_64-unknown-linux-gnu --bin rustfs && \
|
||||
cp target/x86_64-unknown-linux-gnu/release/rustfs /usr/local/bin/rustfs \
|
||||
;; \
|
||||
"linux/arm64") \
|
||||
cargo build --release --target aarch64-unknown-linux-gnu --bin rustfs && \
|
||||
cp target/aarch64-unknown-linux-gnu/release/rustfs /usr/local/bin/rustfs \
|
||||
;; \
|
||||
esac
|
||||
|
||||
# Runtime stage - Ubuntu minimal for better compatibility
|
||||
FROM ubuntu:22.04
|
||||
|
||||
# Install runtime dependencies
|
||||
RUN apt-get update && apt-get install -y \
|
||||
ca-certificates \
|
||||
tzdata \
|
||||
wget \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Create rustfs user and group
|
||||
RUN groupadd -g 1000 rustfs && \
|
||||
useradd -d /app -g rustfs -u 1000 -s /bin/bash rustfs
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Create data directories
|
||||
RUN mkdir -p /data/rustfs{0,1,2,3} && \
|
||||
chown -R rustfs:rustfs /data /app
|
||||
|
||||
# Copy binary from builder stage
|
||||
COPY --from=builder /usr/local/bin/rustfs /app/rustfs
|
||||
RUN chmod +x /app/rustfs && chown rustfs:rustfs /app/rustfs
|
||||
|
||||
# Switch to non-root user
|
||||
USER rustfs
|
||||
|
||||
# Expose ports
|
||||
EXPOSE 9000 9001
|
||||
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD wget --no-verbose --tries=1 --spider http://localhost:9000/health || exit 1
|
||||
|
||||
# Set default command
|
||||
CMD ["/app/rustfs"]
|
||||
118
Makefile
118
Makefile
@@ -79,3 +79,121 @@ build: BUILD_CMD = /root/.cargo/bin/cargo build --release --bin rustfs --target-
|
||||
build:
|
||||
$(DOCKER_CLI) build -t $(ROCKYLINUX_BUILD_IMAGE_NAME) -f $(DOCKERFILE_PATH)/Dockerfile.$(BUILD_OS) .
|
||||
$(DOCKER_CLI) run --rm --name $(ROCKYLINUX_BUILD_CONTAINER_NAME) -v $(shell pwd):/root/s3-rustfs -it $(ROCKYLINUX_BUILD_IMAGE_NAME) $(BUILD_CMD)
|
||||
|
||||
.PHONY: build-musl
|
||||
build-musl:
|
||||
@echo "🔨 Building rustfs for x86_64-unknown-linux-musl..."
|
||||
cargo build --target x86_64-unknown-linux-musl --bin rustfs -r
|
||||
|
||||
.PHONY: build-gnu
|
||||
build-gnu:
|
||||
@echo "🔨 Building rustfs for x86_64-unknown-linux-gnu..."
|
||||
cargo build --target x86_64-unknown-linux-gnu --bin rustfs -r
|
||||
|
||||
.PHONY: deploy-dev
|
||||
deploy-dev: build-musl
|
||||
@echo "🚀 Deploying to dev server: $${IP}"
|
||||
./scripts/dev_deploy.sh $${IP}
|
||||
|
||||
# Multi-architecture Docker build targets
|
||||
.PHONY: docker-build-multiarch
|
||||
docker-build-multiarch:
|
||||
@echo "🏗️ Building multi-architecture Docker images..."
|
||||
./scripts/build-docker-multiarch.sh
|
||||
|
||||
.PHONY: docker-build-multiarch-push
|
||||
docker-build-multiarch-push:
|
||||
@echo "🚀 Building and pushing multi-architecture Docker images..."
|
||||
./scripts/build-docker-multiarch.sh --push
|
||||
|
||||
.PHONY: docker-build-multiarch-version
|
||||
docker-build-multiarch-version:
|
||||
@if [ -z "$(VERSION)" ]; then \
|
||||
echo "❌ 错误: 请指定版本, 例如: make docker-build-multiarch-version VERSION=v1.0.0"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🏗️ Building multi-architecture Docker images (version: $(VERSION))..."
|
||||
./scripts/build-docker-multiarch.sh --version $(VERSION)
|
||||
|
||||
.PHONY: docker-push-multiarch-version
|
||||
docker-push-multiarch-version:
|
||||
@if [ -z "$(VERSION)" ]; then \
|
||||
echo "❌ 错误: 请指定版本, 例如: make docker-push-multiarch-version VERSION=v1.0.0"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🚀 Building and pushing multi-architecture Docker images (version: $(VERSION))..."
|
||||
./scripts/build-docker-multiarch.sh --version $(VERSION) --push
|
||||
|
||||
.PHONY: docker-build-ubuntu
|
||||
docker-build-ubuntu:
|
||||
@echo "🏗️ Building multi-architecture Ubuntu Docker images..."
|
||||
./scripts/build-docker-multiarch.sh --type ubuntu
|
||||
|
||||
.PHONY: docker-build-rockylinux
|
||||
docker-build-rockylinux:
|
||||
@echo "🏗️ Building multi-architecture RockyLinux Docker images..."
|
||||
./scripts/build-docker-multiarch.sh --type rockylinux
|
||||
|
||||
.PHONY: docker-build-devenv
|
||||
docker-build-devenv:
|
||||
@echo "🏗️ Building multi-architecture development environment Docker images..."
|
||||
./scripts/build-docker-multiarch.sh --type devenv
|
||||
|
||||
.PHONY: docker-build-all-types
|
||||
docker-build-all-types:
|
||||
@echo "🏗️ Building all multi-architecture Docker image types..."
|
||||
./scripts/build-docker-multiarch.sh --type production
|
||||
./scripts/build-docker-multiarch.sh --type ubuntu
|
||||
./scripts/build-docker-multiarch.sh --type rockylinux
|
||||
./scripts/build-docker-multiarch.sh --type devenv
|
||||
|
||||
.PHONY: docker-inspect-multiarch
|
||||
docker-inspect-multiarch:
|
||||
@if [ -z "$(IMAGE)" ]; then \
|
||||
echo "❌ 错误: 请指定镜像, 例如: make docker-inspect-multiarch IMAGE=rustfs/rustfs:latest"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🔍 Inspecting multi-architecture image: $(IMAGE)"
|
||||
docker buildx imagetools inspect $(IMAGE)
|
||||
|
||||
.PHONY: build-cross-all
|
||||
build-cross-all:
|
||||
@echo "🔧 Building all target architectures..."
|
||||
@if ! command -v cross &> /dev/null; then \
|
||||
echo "📦 Installing cross..."; \
|
||||
cargo install cross; \
|
||||
fi
|
||||
@echo "🔨 Generating protobuf code..."
|
||||
cargo run --bin gproto || true
|
||||
@echo "🔨 Building x86_64-unknown-linux-musl..."
|
||||
cargo build --release --target x86_64-unknown-linux-musl --bin rustfs
|
||||
@echo "🔨 Building aarch64-unknown-linux-gnu..."
|
||||
cross build --release --target aarch64-unknown-linux-gnu --bin rustfs
|
||||
@echo "✅ All architectures built successfully!"
|
||||
|
||||
.PHONY: help-docker
|
||||
help-docker:
|
||||
@echo "🐳 Docker 多架构构建帮助:"
|
||||
@echo ""
|
||||
@echo "基本构建:"
|
||||
@echo " make docker-build-multiarch # 构建多架构镜像(不推送)"
|
||||
@echo " make docker-build-multiarch-push # 构建并推送多架构镜像"
|
||||
@echo ""
|
||||
@echo "版本构建:"
|
||||
@echo " make docker-build-multiarch-version VERSION=v1.0.0 # 构建指定版本"
|
||||
@echo " make docker-push-multiarch-version VERSION=v1.0.0 # 构建并推送指定版本"
|
||||
@echo ""
|
||||
@echo "镜像类型:"
|
||||
@echo " make docker-build-ubuntu # 构建 Ubuntu 镜像"
|
||||
@echo " make docker-build-rockylinux # 构建 RockyLinux 镜像"
|
||||
@echo " make docker-build-devenv # 构建开发环境镜像"
|
||||
@echo " make docker-build-all-types # 构建所有类型镜像"
|
||||
@echo ""
|
||||
@echo "辅助工具:"
|
||||
@echo " make build-cross-all # 构建所有架构的二进制文件"
|
||||
@echo " make docker-inspect-multiarch IMAGE=xxx # 检查镜像的架构支持"
|
||||
@echo ""
|
||||
@echo "环境变量 (在推送时需要设置):"
|
||||
@echo " DOCKERHUB_USERNAME Docker Hub 用户名"
|
||||
@echo " DOCKERHUB_TOKEN Docker Hub 访问令牌"
|
||||
@echo " GITHUB_TOKEN GitHub 访问令牌"
|
||||
|
||||
31
README.md
31
README.md
@@ -73,9 +73,9 @@ export RUSTFS_OBS_ENDPOINT="http://localhost:4317"
|
||||
./rustfs /data/rustfs
|
||||
```
|
||||
|
||||
### Observability Stack
|
||||
## Observability Stack Otel and OpenObserve
|
||||
|
||||
#### Deployment
|
||||
### OpenTelemetry Collector 和 Jaeger、Grafana、Prometheus、Loki
|
||||
|
||||
1. Navigate to the observability directory:
|
||||
```bash
|
||||
@@ -92,3 +92,30 @@ export RUSTFS_OBS_ENDPOINT="http://localhost:4317"
|
||||
- Grafana: `http://localhost:3000` (credentials: `admin`/`admin`)
|
||||
- Jaeger: `http://localhost:16686`
|
||||
- Prometheus: `http://localhost:9090`
|
||||
|
||||
#### Configure observability
|
||||
|
||||
```
|
||||
OpenTelemetry Collector address(endpoint): http://localhost:4317
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### OpenObserve and OpenTelemetry Collector
|
||||
|
||||
1. Navigate to the OpenObserve and OpenTelemetry directory:
|
||||
```bash
|
||||
cd .docker/openobserve-otel
|
||||
```
|
||||
2. Start the OpenObserve and OpenTelemetry Collector services:
|
||||
```bash
|
||||
docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
3. Access the OpenObserve UI:
|
||||
OpenObserve UI: `http://localhost:5080`
|
||||
- Default credentials:
|
||||
- Username: `root@rustfs.com`
|
||||
- Password: `rustfs123`
|
||||
- Exposed ports:
|
||||
- 5080: HTTP API and UI
|
||||
- 5081: OTLP gRPC
|
||||
|
||||
28
README_ZH.md
28
README_ZH.md
@@ -72,9 +72,9 @@ export RUSTFS_OBS_ENDPOINT="http://localhost:4317"
|
||||
./rustfs /data/rustfs
|
||||
```
|
||||
|
||||
### 可观测性系统
|
||||
## 可观测性系统 Otel 和 OpenObserve
|
||||
|
||||
#### 部署
|
||||
### OpenTelemetry Collector 和 Jaeger、Grafana、Prometheus、Loki
|
||||
|
||||
1. 进入可观测性目录:
|
||||
```bash
|
||||
@@ -96,4 +96,26 @@ export RUSTFS_OBS_ENDPOINT="http://localhost:4317"
|
||||
|
||||
```
|
||||
OpenTelemetry Collector 地址(endpoint): http://localhost:4317
|
||||
```
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
### OpenObserve 和 OpenTelemetry Collector
|
||||
|
||||
1. 进入 OpenObserve 和 OpenTelemetry 目录:
|
||||
```bash
|
||||
cd .docker/openobserve-otel
|
||||
```
|
||||
2. 启动 OpenObserve 和 OpenTelemetry Collector 服务:
|
||||
```bash
|
||||
docker compose -f docker-compose.yml up -d
|
||||
```
|
||||
3. 访问 OpenObserve UI:
|
||||
OpenObserve UI: `http://localhost:5080`
|
||||
- 默认凭据:
|
||||
- 用户名:`root@rustfs.com`
|
||||
- 密码:`rustfs123`
|
||||
- 开放端口:
|
||||
- 5080:HTTP API 和 UI
|
||||
- 5081:OTLP gRPC
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@ keyring = { workspace = true }
|
||||
lazy_static = { workspace = true }
|
||||
rfd = { workspace = true }
|
||||
rust-embed = { workspace = true, features = ["interpolate-folder-path"] }
|
||||
rust-i18n = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
|
||||
@@ -11,15 +11,15 @@ pub struct Error {
|
||||
pub struct PingRequest {
|
||||
#[prost(uint64, tag = "1")]
|
||||
pub version: u64,
|
||||
#[prost(bytes = "vec", tag = "2")]
|
||||
pub body: ::prost::alloc::vec::Vec<u8>,
|
||||
#[prost(bytes = "bytes", tag = "2")]
|
||||
pub body: ::prost::bytes::Bytes,
|
||||
}
|
||||
#[derive(Clone, PartialEq, ::prost::Message)]
|
||||
pub struct PingResponse {
|
||||
#[prost(uint64, tag = "1")]
|
||||
pub version: u64,
|
||||
#[prost(bytes = "vec", tag = "2")]
|
||||
pub body: ::prost::alloc::vec::Vec<u8>,
|
||||
#[prost(bytes = "bytes", tag = "2")]
|
||||
pub body: ::prost::bytes::Bytes,
|
||||
}
|
||||
#[derive(Clone, PartialEq, ::prost::Message)]
|
||||
pub struct HealBucketRequest {
|
||||
@@ -105,8 +105,8 @@ pub struct ReadAllRequest {
|
||||
pub struct ReadAllResponse {
|
||||
#[prost(bool, tag = "1")]
|
||||
pub success: bool,
|
||||
#[prost(bytes = "vec", tag = "2")]
|
||||
pub data: ::prost::alloc::vec::Vec<u8>,
|
||||
#[prost(bytes = "bytes", tag = "2")]
|
||||
pub data: ::prost::bytes::Bytes,
|
||||
#[prost(message, optional, tag = "3")]
|
||||
pub error: ::core::option::Option<Error>,
|
||||
}
|
||||
@@ -119,8 +119,8 @@ pub struct WriteAllRequest {
|
||||
pub volume: ::prost::alloc::string::String,
|
||||
#[prost(string, tag = "3")]
|
||||
pub path: ::prost::alloc::string::String,
|
||||
#[prost(bytes = "vec", tag = "4")]
|
||||
pub data: ::prost::alloc::vec::Vec<u8>,
|
||||
#[prost(bytes = "bytes", tag = "4")]
|
||||
pub data: ::prost::bytes::Bytes,
|
||||
}
|
||||
#[derive(Clone, PartialEq, ::prost::Message)]
|
||||
pub struct WriteAllResponse {
|
||||
@@ -202,8 +202,8 @@ pub struct RenamePartRequest {
|
||||
pub dst_volume: ::prost::alloc::string::String,
|
||||
#[prost(string, tag = "5")]
|
||||
pub dst_path: ::prost::alloc::string::String,
|
||||
#[prost(bytes = "vec", tag = "6")]
|
||||
pub meta: ::prost::alloc::vec::Vec<u8>,
|
||||
#[prost(bytes = "bytes", tag = "6")]
|
||||
pub meta: ::prost::bytes::Bytes,
|
||||
}
|
||||
#[derive(Clone, PartialEq, ::prost::Message)]
|
||||
pub struct RenamePartResponse {
|
||||
@@ -243,8 +243,8 @@ pub struct WriteRequest {
|
||||
pub path: ::prost::alloc::string::String,
|
||||
#[prost(bool, tag = "4")]
|
||||
pub is_append: bool,
|
||||
#[prost(bytes = "vec", tag = "5")]
|
||||
pub data: ::prost::alloc::vec::Vec<u8>,
|
||||
#[prost(bytes = "bytes", tag = "5")]
|
||||
pub data: ::prost::bytes::Bytes,
|
||||
}
|
||||
#[derive(Clone, PartialEq, ::prost::Message)]
|
||||
pub struct WriteResponse {
|
||||
@@ -271,8 +271,8 @@ pub struct ReadAtRequest {
|
||||
pub struct ReadAtResponse {
|
||||
#[prost(bool, tag = "1")]
|
||||
pub success: bool,
|
||||
#[prost(bytes = "vec", tag = "2")]
|
||||
pub data: ::prost::alloc::vec::Vec<u8>,
|
||||
#[prost(bytes = "bytes", tag = "2")]
|
||||
pub data: ::prost::bytes::Bytes,
|
||||
#[prost(int64, tag = "3")]
|
||||
pub read_size: i64,
|
||||
#[prost(message, optional, tag = "4")]
|
||||
@@ -300,8 +300,8 @@ pub struct WalkDirRequest {
|
||||
/// indicate which one in the disks
|
||||
#[prost(string, tag = "1")]
|
||||
pub disk: ::prost::alloc::string::String,
|
||||
#[prost(bytes = "vec", tag = "2")]
|
||||
pub walk_dir_options: ::prost::alloc::vec::Vec<u8>,
|
||||
#[prost(bytes = "bytes", tag = "2")]
|
||||
pub walk_dir_options: ::prost::bytes::Bytes,
|
||||
}
|
||||
#[derive(Clone, PartialEq, ::prost::Message)]
|
||||
pub struct WalkDirResponse {
|
||||
@@ -633,8 +633,8 @@ pub struct LocalStorageInfoRequest {
|
||||
pub struct LocalStorageInfoResponse {
|
||||
#[prost(bool, tag = "1")]
|
||||
pub success: bool,
|
||||
#[prost(bytes = "vec", tag = "2")]
|
||||
pub storage_info: ::prost::alloc::vec::Vec<u8>,
|
||||
#[prost(bytes = "bytes", tag = "2")]
|
||||
pub storage_info: ::prost::bytes::Bytes,
|
||||
#[prost(string, optional, tag = "3")]
|
||||
pub error_info: ::core::option::Option<::prost::alloc::string::String>,
|
||||
}
|
||||
@@ -647,8 +647,8 @@ pub struct ServerInfoRequest {
|
||||
pub struct ServerInfoResponse {
|
||||
#[prost(bool, tag = "1")]
|
||||
pub success: bool,
|
||||
#[prost(bytes = "vec", tag = "2")]
|
||||
pub server_properties: ::prost::alloc::vec::Vec<u8>,
|
||||
#[prost(bytes = "bytes", tag = "2")]
|
||||
pub server_properties: ::prost::bytes::Bytes,
|
||||
#[prost(string, optional, tag = "3")]
|
||||
pub error_info: ::core::option::Option<::prost::alloc::string::String>,
|
||||
}
|
||||
@@ -658,8 +658,8 @@ pub struct GetCpusRequest {}
|
||||
pub struct GetCpusResponse {
|
||||
#[prost(bool, tag = "1")]
|
||||
pub success: bool,
|
||||
#[prost(bytes = "vec", tag = "2")]
|
||||
pub cpus: ::prost::alloc::vec::Vec<u8>,
|
||||
#[prost(bytes = "bytes", tag = "2")]
|
||||
pub cpus: ::prost::bytes::Bytes,
|
||||
#[prost(string, optional, tag = "3")]
|
||||
pub error_info: ::core::option::Option<::prost::alloc::string::String>,
|
||||
}
|
||||
@@ -669,8 +669,8 @@ pub struct GetNetInfoRequest {}
|
||||
pub struct GetNetInfoResponse {
|
||||
#[prost(bool, tag = "1")]
|
||||
pub success: bool,
|
||||
#[prost(bytes = "vec", tag = "2")]
|
||||
pub net_info: ::prost::alloc::vec::Vec<u8>,
|
||||
#[prost(bytes = "bytes", tag = "2")]
|
||||
pub net_info: ::prost::bytes::Bytes,
|
||||
#[prost(string, optional, tag = "3")]
|
||||
pub error_info: ::core::option::Option<::prost::alloc::string::String>,
|
||||
}
|
||||
@@ -680,8 +680,8 @@ pub struct GetPartitionsRequest {}
|
||||
pub struct GetPartitionsResponse {
|
||||
#[prost(bool, tag = "1")]
|
||||
pub success: bool,
|
||||
#[prost(bytes = "vec", tag = "2")]
|
||||
pub partitions: ::prost::alloc::vec::Vec<u8>,
|
||||
#[prost(bytes = "bytes", tag = "2")]
|
||||
pub partitions: ::prost::bytes::Bytes,
|
||||
#[prost(string, optional, tag = "3")]
|
||||
pub error_info: ::core::option::Option<::prost::alloc::string::String>,
|
||||
}
|
||||
@@ -691,8 +691,8 @@ pub struct GetOsInfoRequest {}
|
||||
pub struct GetOsInfoResponse {
|
||||
#[prost(bool, tag = "1")]
|
||||
pub success: bool,
|
||||
#[prost(bytes = "vec", tag = "2")]
|
||||
pub os_info: ::prost::alloc::vec::Vec<u8>,
|
||||
#[prost(bytes = "bytes", tag = "2")]
|
||||
pub os_info: ::prost::bytes::Bytes,
|
||||
#[prost(string, optional, tag = "3")]
|
||||
pub error_info: ::core::option::Option<::prost::alloc::string::String>,
|
||||
}
|
||||
@@ -702,8 +702,8 @@ pub struct GetSeLinuxInfoRequest {}
|
||||
pub struct GetSeLinuxInfoResponse {
|
||||
#[prost(bool, tag = "1")]
|
||||
pub success: bool,
|
||||
#[prost(bytes = "vec", tag = "2")]
|
||||
pub sys_services: ::prost::alloc::vec::Vec<u8>,
|
||||
#[prost(bytes = "bytes", tag = "2")]
|
||||
pub sys_services: ::prost::bytes::Bytes,
|
||||
#[prost(string, optional, tag = "3")]
|
||||
pub error_info: ::core::option::Option<::prost::alloc::string::String>,
|
||||
}
|
||||
@@ -713,8 +713,8 @@ pub struct GetSysConfigRequest {}
|
||||
pub struct GetSysConfigResponse {
|
||||
#[prost(bool, tag = "1")]
|
||||
pub success: bool,
|
||||
#[prost(bytes = "vec", tag = "2")]
|
||||
pub sys_config: ::prost::alloc::vec::Vec<u8>,
|
||||
#[prost(bytes = "bytes", tag = "2")]
|
||||
pub sys_config: ::prost::bytes::Bytes,
|
||||
#[prost(string, optional, tag = "3")]
|
||||
pub error_info: ::core::option::Option<::prost::alloc::string::String>,
|
||||
}
|
||||
@@ -724,8 +724,8 @@ pub struct GetSysErrorsRequest {}
|
||||
pub struct GetSysErrorsResponse {
|
||||
#[prost(bool, tag = "1")]
|
||||
pub success: bool,
|
||||
#[prost(bytes = "vec", tag = "2")]
|
||||
pub sys_errors: ::prost::alloc::vec::Vec<u8>,
|
||||
#[prost(bytes = "bytes", tag = "2")]
|
||||
pub sys_errors: ::prost::bytes::Bytes,
|
||||
#[prost(string, optional, tag = "3")]
|
||||
pub error_info: ::core::option::Option<::prost::alloc::string::String>,
|
||||
}
|
||||
@@ -735,24 +735,24 @@ pub struct GetMemInfoRequest {}
|
||||
pub struct GetMemInfoResponse {
|
||||
#[prost(bool, tag = "1")]
|
||||
pub success: bool,
|
||||
#[prost(bytes = "vec", tag = "2")]
|
||||
pub mem_info: ::prost::alloc::vec::Vec<u8>,
|
||||
#[prost(bytes = "bytes", tag = "2")]
|
||||
pub mem_info: ::prost::bytes::Bytes,
|
||||
#[prost(string, optional, tag = "3")]
|
||||
pub error_info: ::core::option::Option<::prost::alloc::string::String>,
|
||||
}
|
||||
#[derive(Clone, PartialEq, ::prost::Message)]
|
||||
pub struct GetMetricsRequest {
|
||||
#[prost(bytes = "vec", tag = "1")]
|
||||
pub metric_type: ::prost::alloc::vec::Vec<u8>,
|
||||
#[prost(bytes = "vec", tag = "2")]
|
||||
pub opts: ::prost::alloc::vec::Vec<u8>,
|
||||
#[prost(bytes = "bytes", tag = "1")]
|
||||
pub metric_type: ::prost::bytes::Bytes,
|
||||
#[prost(bytes = "bytes", tag = "2")]
|
||||
pub opts: ::prost::bytes::Bytes,
|
||||
}
|
||||
#[derive(Clone, PartialEq, ::prost::Message)]
|
||||
pub struct GetMetricsResponse {
|
||||
#[prost(bool, tag = "1")]
|
||||
pub success: bool,
|
||||
#[prost(bytes = "vec", tag = "2")]
|
||||
pub realtime_metrics: ::prost::alloc::vec::Vec<u8>,
|
||||
#[prost(bytes = "bytes", tag = "2")]
|
||||
pub realtime_metrics: ::prost::bytes::Bytes,
|
||||
#[prost(string, optional, tag = "3")]
|
||||
pub error_info: ::core::option::Option<::prost::alloc::string::String>,
|
||||
}
|
||||
@@ -762,8 +762,8 @@ pub struct GetProcInfoRequest {}
|
||||
pub struct GetProcInfoResponse {
|
||||
#[prost(bool, tag = "1")]
|
||||
pub success: bool,
|
||||
#[prost(bytes = "vec", tag = "2")]
|
||||
pub proc_info: ::prost::alloc::vec::Vec<u8>,
|
||||
#[prost(bytes = "bytes", tag = "2")]
|
||||
pub proc_info: ::prost::bytes::Bytes,
|
||||
#[prost(string, optional, tag = "3")]
|
||||
pub error_info: ::core::option::Option<::prost::alloc::string::String>,
|
||||
}
|
||||
@@ -786,7 +786,7 @@ pub struct DownloadProfileDataResponse {
|
||||
#[prost(bool, tag = "1")]
|
||||
pub success: bool,
|
||||
#[prost(map = "string, bytes", tag = "2")]
|
||||
pub data: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::alloc::vec::Vec<u8>>,
|
||||
pub data: ::std::collections::HashMap<::prost::alloc::string::String, ::prost::bytes::Bytes>,
|
||||
#[prost(string, optional, tag = "3")]
|
||||
pub error_info: ::core::option::Option<::prost::alloc::string::String>,
|
||||
}
|
||||
@@ -799,8 +799,8 @@ pub struct GetBucketStatsDataRequest {
|
||||
pub struct GetBucketStatsDataResponse {
|
||||
#[prost(bool, tag = "1")]
|
||||
pub success: bool,
|
||||
#[prost(bytes = "vec", tag = "2")]
|
||||
pub bucket_stats: ::prost::alloc::vec::Vec<u8>,
|
||||
#[prost(bytes = "bytes", tag = "2")]
|
||||
pub bucket_stats: ::prost::bytes::Bytes,
|
||||
#[prost(string, optional, tag = "3")]
|
||||
pub error_info: ::core::option::Option<::prost::alloc::string::String>,
|
||||
}
|
||||
@@ -810,8 +810,8 @@ pub struct GetSrMetricsDataRequest {}
|
||||
pub struct GetSrMetricsDataResponse {
|
||||
#[prost(bool, tag = "1")]
|
||||
pub success: bool,
|
||||
#[prost(bytes = "vec", tag = "2")]
|
||||
pub sr_metrics_summary: ::prost::alloc::vec::Vec<u8>,
|
||||
#[prost(bytes = "bytes", tag = "2")]
|
||||
pub sr_metrics_summary: ::prost::bytes::Bytes,
|
||||
#[prost(string, optional, tag = "3")]
|
||||
pub error_info: ::core::option::Option<::prost::alloc::string::String>,
|
||||
}
|
||||
@@ -821,8 +821,8 @@ pub struct GetAllBucketStatsRequest {}
|
||||
pub struct GetAllBucketStatsResponse {
|
||||
#[prost(bool, tag = "1")]
|
||||
pub success: bool,
|
||||
#[prost(bytes = "vec", tag = "2")]
|
||||
pub bucket_stats_map: ::prost::alloc::vec::Vec<u8>,
|
||||
#[prost(bytes = "bytes", tag = "2")]
|
||||
pub bucket_stats_map: ::prost::bytes::Bytes,
|
||||
#[prost(string, optional, tag = "3")]
|
||||
pub error_info: ::core::option::Option<::prost::alloc::string::String>,
|
||||
}
|
||||
@@ -979,36 +979,36 @@ pub struct BackgroundHealStatusRequest {}
|
||||
pub struct BackgroundHealStatusResponse {
|
||||
#[prost(bool, tag = "1")]
|
||||
pub success: bool,
|
||||
#[prost(bytes = "vec", tag = "2")]
|
||||
pub bg_heal_state: ::prost::alloc::vec::Vec<u8>,
|
||||
#[prost(bytes = "bytes", tag = "2")]
|
||||
pub bg_heal_state: ::prost::bytes::Bytes,
|
||||
#[prost(string, optional, tag = "3")]
|
||||
pub error_info: ::core::option::Option<::prost::alloc::string::String>,
|
||||
}
|
||||
#[derive(Clone, PartialEq, ::prost::Message)]
|
||||
pub struct GetMetacacheListingRequest {
|
||||
#[prost(bytes = "vec", tag = "1")]
|
||||
pub opts: ::prost::alloc::vec::Vec<u8>,
|
||||
#[prost(bytes = "bytes", tag = "1")]
|
||||
pub opts: ::prost::bytes::Bytes,
|
||||
}
|
||||
#[derive(Clone, PartialEq, ::prost::Message)]
|
||||
pub struct GetMetacacheListingResponse {
|
||||
#[prost(bool, tag = "1")]
|
||||
pub success: bool,
|
||||
#[prost(bytes = "vec", tag = "2")]
|
||||
pub metacache: ::prost::alloc::vec::Vec<u8>,
|
||||
#[prost(bytes = "bytes", tag = "2")]
|
||||
pub metacache: ::prost::bytes::Bytes,
|
||||
#[prost(string, optional, tag = "3")]
|
||||
pub error_info: ::core::option::Option<::prost::alloc::string::String>,
|
||||
}
|
||||
#[derive(Clone, PartialEq, ::prost::Message)]
|
||||
pub struct UpdateMetacacheListingRequest {
|
||||
#[prost(bytes = "vec", tag = "1")]
|
||||
pub metacache: ::prost::alloc::vec::Vec<u8>,
|
||||
#[prost(bytes = "bytes", tag = "1")]
|
||||
pub metacache: ::prost::bytes::Bytes,
|
||||
}
|
||||
#[derive(Clone, PartialEq, ::prost::Message)]
|
||||
pub struct UpdateMetacacheListingResponse {
|
||||
#[prost(bool, tag = "1")]
|
||||
pub success: bool,
|
||||
#[prost(bytes = "vec", tag = "2")]
|
||||
pub metacache: ::prost::alloc::vec::Vec<u8>,
|
||||
#[prost(bytes = "bytes", tag = "2")]
|
||||
pub metacache: ::prost::bytes::Bytes,
|
||||
#[prost(string, optional, tag = "3")]
|
||||
pub error_info: ::core::option::Option<::prost::alloc::string::String>,
|
||||
}
|
||||
|
||||
@@ -43,6 +43,7 @@ fn main() -> Result<(), AnyError> {
|
||||
// .file_descriptor_set_path(descriptor_set_path)
|
||||
.protoc_arg("--experimental_allow_proto3_optional")
|
||||
.compile_well_known_types(true)
|
||||
.bytes(["."])
|
||||
.emit_rerun_if_changed(false)
|
||||
.compile_protos(proto_files, &[proto_dir.clone()])
|
||||
.map_err(|e| format!("Failed to generate protobuf file: {e}."))?;
|
||||
|
||||
@@ -7,11 +7,16 @@ rust-version.workspace = true
|
||||
version.workspace = true
|
||||
|
||||
[dependencies]
|
||||
config = { workspace = true }
|
||||
const-str = { workspace = true }
|
||||
const-str = { workspace = true, optional = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[features]
|
||||
default = []
|
||||
constants = ["dep:const-str"]
|
||||
observability = []
|
||||
|
||||
|
||||
@@ -1,200 +0,0 @@
|
||||
use crate::ObservabilityConfig;
|
||||
use crate::event::config::NotifierConfig;
|
||||
|
||||
/// RustFs configuration
|
||||
pub struct RustFsConfig {
|
||||
pub observability: ObservabilityConfig,
|
||||
pub event: NotifierConfig,
|
||||
}
|
||||
|
||||
impl RustFsConfig {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
observability: ObservabilityConfig::new(),
|
||||
event: NotifierConfig::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for RustFsConfig {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_rustfs_config_new() {
|
||||
let config = RustFsConfig::new();
|
||||
|
||||
// Verify that observability config is properly initialized
|
||||
assert!(!config.observability.sinks.is_empty(), "Observability sinks should not be empty");
|
||||
assert!(config.observability.logger.is_some(), "Logger config should be present");
|
||||
|
||||
// Verify that event config is properly initialized
|
||||
assert!(!config.event.store_path.is_empty(), "Event store path should not be empty");
|
||||
assert!(config.event.channel_capacity > 0, "Channel capacity should be positive");
|
||||
assert!(!config.event.adapters.is_empty(), "Event adapters should not be empty");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rustfs_config_default() {
|
||||
let config = RustFsConfig::default();
|
||||
|
||||
// Default should be equivalent to new()
|
||||
let new_config = RustFsConfig::new();
|
||||
|
||||
// Compare observability config
|
||||
assert_eq!(config.observability.sinks.len(), new_config.observability.sinks.len());
|
||||
assert_eq!(config.observability.logger.is_some(), new_config.observability.logger.is_some());
|
||||
|
||||
// Compare event config
|
||||
assert_eq!(config.event.store_path, new_config.event.store_path);
|
||||
assert_eq!(config.event.channel_capacity, new_config.event.channel_capacity);
|
||||
assert_eq!(config.event.adapters.len(), new_config.event.adapters.len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rustfs_config_components_independence() {
|
||||
let mut config = RustFsConfig::new();
|
||||
|
||||
// Modify observability config
|
||||
config.observability.sinks.clear();
|
||||
|
||||
// Event config should remain unchanged
|
||||
assert!(!config.event.adapters.is_empty(), "Event adapters should remain unchanged");
|
||||
assert!(config.event.channel_capacity > 0, "Channel capacity should remain unchanged");
|
||||
|
||||
// Create new config to verify independence
|
||||
let new_config = RustFsConfig::new();
|
||||
assert!(!new_config.observability.sinks.is_empty(), "New config should have default sinks");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rustfs_config_observability_integration() {
|
||||
let config = RustFsConfig::new();
|
||||
|
||||
// Test observability config properties
|
||||
assert!(config.observability.otel.endpoint.is_empty() || !config.observability.otel.endpoint.is_empty());
|
||||
assert!(config.observability.otel.use_stdout.is_some());
|
||||
assert!(config.observability.otel.sample_ratio.is_some());
|
||||
assert!(config.observability.otel.meter_interval.is_some());
|
||||
assert!(config.observability.otel.service_name.is_some());
|
||||
assert!(config.observability.otel.service_version.is_some());
|
||||
assert!(config.observability.otel.environment.is_some());
|
||||
assert!(config.observability.otel.logger_level.is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rustfs_config_event_integration() {
|
||||
let config = RustFsConfig::new();
|
||||
|
||||
// Test event config properties
|
||||
assert!(!config.event.store_path.is_empty(), "Store path should not be empty");
|
||||
assert!(
|
||||
config.event.channel_capacity >= 1000,
|
||||
"Channel capacity should be reasonable for production"
|
||||
);
|
||||
|
||||
// Test that store path is a valid path format
|
||||
let store_path = &config.event.store_path;
|
||||
assert!(!store_path.contains('\0'), "Store path should not contain null characters");
|
||||
|
||||
// Test adapters configuration
|
||||
for adapter in &config.event.adapters {
|
||||
// Each adapter should have a valid configuration
|
||||
match adapter {
|
||||
crate::event::adapters::AdapterConfig::Webhook(_) => {
|
||||
// Webhook adapter should be properly configured
|
||||
}
|
||||
crate::event::adapters::AdapterConfig::Kafka(_) => {
|
||||
// Kafka adapter should be properly configured
|
||||
}
|
||||
crate::event::adapters::AdapterConfig::Mqtt(_) => {
|
||||
// MQTT adapter should be properly configured
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rustfs_config_memory_usage() {
|
||||
// Test that config doesn't use excessive memory
|
||||
let config = RustFsConfig::new();
|
||||
|
||||
// Basic memory usage checks
|
||||
assert!(std::mem::size_of_val(&config) < 10000, "Config should not use excessive memory");
|
||||
|
||||
// Test that strings are not excessively long
|
||||
assert!(config.event.store_path.len() < 1000, "Store path should not be excessively long");
|
||||
|
||||
// Test that collections are reasonably sized
|
||||
assert!(config.observability.sinks.len() < 100, "Sinks collection should be reasonably sized");
|
||||
assert!(config.event.adapters.len() < 100, "Adapters collection should be reasonably sized");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rustfs_config_serialization_compatibility() {
|
||||
let config = RustFsConfig::new();
|
||||
|
||||
// Test that observability config can be serialized (it has Serialize trait)
|
||||
let observability_json = serde_json::to_string(&config.observability);
|
||||
assert!(observability_json.is_ok(), "Observability config should be serializable");
|
||||
|
||||
// Test that event config can be serialized (it has Serialize trait)
|
||||
let event_json = serde_json::to_string(&config.event);
|
||||
assert!(event_json.is_ok(), "Event config should be serializable");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rustfs_config_debug_format() {
|
||||
let config = RustFsConfig::new();
|
||||
|
||||
// Test that observability config has Debug trait
|
||||
let observability_debug = format!("{:?}", config.observability);
|
||||
assert!(!observability_debug.is_empty(), "Observability config should have debug output");
|
||||
assert!(
|
||||
observability_debug.contains("ObservabilityConfig"),
|
||||
"Debug output should contain type name"
|
||||
);
|
||||
|
||||
// Test that event config has Debug trait
|
||||
let event_debug = format!("{:?}", config.event);
|
||||
assert!(!event_debug.is_empty(), "Event config should have debug output");
|
||||
assert!(event_debug.contains("NotifierConfig"), "Debug output should contain type name");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rustfs_config_clone_behavior() {
|
||||
let config = RustFsConfig::new();
|
||||
|
||||
// Test that observability config can be cloned
|
||||
let observability_clone = config.observability.clone();
|
||||
assert_eq!(observability_clone.sinks.len(), config.observability.sinks.len());
|
||||
|
||||
// Test that event config can be cloned
|
||||
let event_clone = config.event.clone();
|
||||
assert_eq!(event_clone.store_path, config.event.store_path);
|
||||
assert_eq!(event_clone.channel_capacity, config.event.channel_capacity);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rustfs_config_environment_independence() {
|
||||
// Test that config creation doesn't depend on specific environment variables
|
||||
// This test ensures the config can be created in any environment
|
||||
|
||||
let config1 = RustFsConfig::new();
|
||||
let config2 = RustFsConfig::new();
|
||||
|
||||
// Both configs should have the same structure
|
||||
assert_eq!(config1.observability.sinks.len(), config2.observability.sinks.len());
|
||||
assert_eq!(config1.event.adapters.len(), config2.event.adapters.len());
|
||||
|
||||
// Store paths should be consistent
|
||||
assert_eq!(config1.event.store_path, config2.event.store_path);
|
||||
assert_eq!(config1.event.channel_capacity, config2.event.channel_capacity);
|
||||
}
|
||||
}
|
||||
@@ -1,27 +0,0 @@
|
||||
use crate::event::kafka::KafkaAdapter;
|
||||
use crate::event::mqtt::MqttAdapter;
|
||||
use crate::event::webhook::WebhookAdapter;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// Configuration for the notification system.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(tag = "type")]
|
||||
pub enum AdapterConfig {
|
||||
Webhook(WebhookAdapter),
|
||||
Kafka(KafkaAdapter),
|
||||
Mqtt(MqttAdapter),
|
||||
}
|
||||
|
||||
impl AdapterConfig {
|
||||
/// create a new configuration with default values
|
||||
pub fn new() -> Self {
|
||||
Self::Webhook(WebhookAdapter::new())
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for AdapterConfig {
|
||||
/// create a new configuration with default values
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
@@ -1,334 +0,0 @@
|
||||
use crate::event::adapters::AdapterConfig;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::env;
|
||||
|
||||
#[allow(dead_code)]
|
||||
const DEFAULT_CONFIG_FILE: &str = "event";
|
||||
|
||||
/// Configuration for the notification system.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct NotifierConfig {
|
||||
#[serde(default = "default_store_path")]
|
||||
pub store_path: String,
|
||||
#[serde(default = "default_channel_capacity")]
|
||||
pub channel_capacity: usize,
|
||||
pub adapters: Vec<AdapterConfig>,
|
||||
}
|
||||
|
||||
impl Default for NotifierConfig {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl NotifierConfig {
|
||||
/// create a new configuration with default values
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
store_path: default_store_path(),
|
||||
channel_capacity: default_channel_capacity(),
|
||||
adapters: vec![AdapterConfig::new()],
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Provide temporary directories as default storage paths
|
||||
fn default_store_path() -> String {
|
||||
env::temp_dir().join("event-notification").to_string_lossy().to_string()
|
||||
}
|
||||
|
||||
/// Provides the recommended default channel capacity for high concurrency systems
|
||||
fn default_channel_capacity() -> usize {
|
||||
10000 // Reasonable default values for high concurrency systems
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::path::Path;
|
||||
|
||||
#[test]
|
||||
fn test_notifier_config_new() {
|
||||
let config = NotifierConfig::new();
|
||||
|
||||
// Verify store path is set
|
||||
assert!(!config.store_path.is_empty(), "Store path should not be empty");
|
||||
assert!(
|
||||
config.store_path.contains("event-notification"),
|
||||
"Store path should contain event-notification"
|
||||
);
|
||||
|
||||
// Verify channel capacity is reasonable
|
||||
assert_eq!(config.channel_capacity, 10000, "Channel capacity should be 10000");
|
||||
assert!(config.channel_capacity > 0, "Channel capacity should be positive");
|
||||
|
||||
// Verify adapters are initialized
|
||||
assert!(!config.adapters.is_empty(), "Adapters should not be empty");
|
||||
assert_eq!(config.adapters.len(), 1, "Should have exactly one default adapter");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_notifier_config_default() {
|
||||
let config = NotifierConfig::default();
|
||||
let new_config = NotifierConfig::new();
|
||||
|
||||
// Default should be equivalent to new()
|
||||
assert_eq!(config.store_path, new_config.store_path);
|
||||
assert_eq!(config.channel_capacity, new_config.channel_capacity);
|
||||
assert_eq!(config.adapters.len(), new_config.adapters.len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_store_path() {
|
||||
let store_path = default_store_path();
|
||||
|
||||
// Verify store path properties
|
||||
assert!(!store_path.is_empty(), "Store path should not be empty");
|
||||
assert!(store_path.contains("event-notification"), "Store path should contain event-notification");
|
||||
|
||||
// Verify it's a valid path format
|
||||
let path = Path::new(&store_path);
|
||||
assert!(path.is_absolute() || path.is_relative(), "Store path should be a valid path");
|
||||
|
||||
// Verify it doesn't contain invalid characters
|
||||
assert!(!store_path.contains('\0'), "Store path should not contain null characters");
|
||||
|
||||
// Verify it's based on temp directory
|
||||
let temp_dir = env::temp_dir();
|
||||
let expected_path = temp_dir.join("event-notification");
|
||||
assert_eq!(store_path, expected_path.to_string_lossy().to_string());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_channel_capacity() {
|
||||
let capacity = default_channel_capacity();
|
||||
|
||||
// Verify capacity is reasonable
|
||||
assert_eq!(capacity, 10000, "Default capacity should be 10000");
|
||||
assert!(capacity > 0, "Capacity should be positive");
|
||||
assert!(capacity >= 1000, "Capacity should be at least 1000 for production use");
|
||||
assert!(capacity <= 1_000_000, "Capacity should not be excessively large");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_notifier_config_serialization() {
|
||||
let config = NotifierConfig::new();
|
||||
|
||||
// Test serialization to JSON
|
||||
let json_result = serde_json::to_string(&config);
|
||||
assert!(json_result.is_ok(), "Config should be serializable to JSON");
|
||||
|
||||
let json_str = json_result.unwrap();
|
||||
assert!(!json_str.is_empty(), "Serialized JSON should not be empty");
|
||||
assert!(json_str.contains("store_path"), "JSON should contain store_path");
|
||||
assert!(json_str.contains("channel_capacity"), "JSON should contain channel_capacity");
|
||||
assert!(json_str.contains("adapters"), "JSON should contain adapters");
|
||||
|
||||
// Test deserialization from JSON
|
||||
let deserialized_result: Result<NotifierConfig, _> = serde_json::from_str(&json_str);
|
||||
assert!(deserialized_result.is_ok(), "Config should be deserializable from JSON");
|
||||
|
||||
let deserialized_config = deserialized_result.unwrap();
|
||||
assert_eq!(deserialized_config.store_path, config.store_path);
|
||||
assert_eq!(deserialized_config.channel_capacity, config.channel_capacity);
|
||||
assert_eq!(deserialized_config.adapters.len(), config.adapters.len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_notifier_config_serialization_with_defaults() {
|
||||
// Test serialization with minimal JSON (using serde defaults)
|
||||
let minimal_json = r#"{"adapters": []}"#;
|
||||
|
||||
let deserialized_result: Result<NotifierConfig, _> = serde_json::from_str(minimal_json);
|
||||
assert!(deserialized_result.is_ok(), "Config should deserialize with defaults");
|
||||
|
||||
let config = deserialized_result.unwrap();
|
||||
assert_eq!(config.store_path, default_store_path(), "Should use default store path");
|
||||
assert_eq!(config.channel_capacity, default_channel_capacity(), "Should use default channel capacity");
|
||||
assert!(config.adapters.is_empty(), "Should have empty adapters as specified");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_notifier_config_debug_format() {
|
||||
let config = NotifierConfig::new();
|
||||
|
||||
let debug_str = format!("{:?}", config);
|
||||
assert!(!debug_str.is_empty(), "Debug output should not be empty");
|
||||
assert!(debug_str.contains("NotifierConfig"), "Debug output should contain struct name");
|
||||
assert!(debug_str.contains("store_path"), "Debug output should contain store_path field");
|
||||
assert!(
|
||||
debug_str.contains("channel_capacity"),
|
||||
"Debug output should contain channel_capacity field"
|
||||
);
|
||||
assert!(debug_str.contains("adapters"), "Debug output should contain adapters field");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_notifier_config_clone() {
|
||||
let config = NotifierConfig::new();
|
||||
let cloned_config = config.clone();
|
||||
|
||||
// Test that clone creates an independent copy
|
||||
assert_eq!(cloned_config.store_path, config.store_path);
|
||||
assert_eq!(cloned_config.channel_capacity, config.channel_capacity);
|
||||
assert_eq!(cloned_config.adapters.len(), config.adapters.len());
|
||||
|
||||
// Verify they are independent (modifying one doesn't affect the other)
|
||||
let mut modified_config = config.clone();
|
||||
modified_config.channel_capacity = 5000;
|
||||
assert_ne!(modified_config.channel_capacity, config.channel_capacity);
|
||||
assert_eq!(cloned_config.channel_capacity, config.channel_capacity);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_notifier_config_modification() {
|
||||
let mut config = NotifierConfig::new();
|
||||
|
||||
// Test modifying store path
|
||||
let original_store_path = config.store_path.clone();
|
||||
config.store_path = "/custom/path".to_string();
|
||||
assert_ne!(config.store_path, original_store_path);
|
||||
assert_eq!(config.store_path, "/custom/path");
|
||||
|
||||
// Test modifying channel capacity
|
||||
let original_capacity = config.channel_capacity;
|
||||
config.channel_capacity = 5000;
|
||||
assert_ne!(config.channel_capacity, original_capacity);
|
||||
assert_eq!(config.channel_capacity, 5000);
|
||||
|
||||
// Test modifying adapters
|
||||
let original_adapters_len = config.adapters.len();
|
||||
config.adapters.push(AdapterConfig::new());
|
||||
assert_eq!(config.adapters.len(), original_adapters_len + 1);
|
||||
|
||||
// Test clearing adapters
|
||||
config.adapters.clear();
|
||||
assert!(config.adapters.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_notifier_config_adapters() {
|
||||
let config = NotifierConfig::new();
|
||||
|
||||
// Test default adapter configuration
|
||||
assert_eq!(config.adapters.len(), 1, "Should have exactly one default adapter");
|
||||
|
||||
// Test that we can add more adapters
|
||||
let mut config_mut = config.clone();
|
||||
config_mut.adapters.push(AdapterConfig::new());
|
||||
assert_eq!(config_mut.adapters.len(), 2, "Should be able to add more adapters");
|
||||
|
||||
// Test adapter types
|
||||
for adapter in &config.adapters {
|
||||
match adapter {
|
||||
AdapterConfig::Webhook(_) => {
|
||||
// Webhook adapter should be properly configured
|
||||
}
|
||||
AdapterConfig::Kafka(_) => {
|
||||
// Kafka adapter should be properly configured
|
||||
}
|
||||
AdapterConfig::Mqtt(_) => {
|
||||
// MQTT adapter should be properly configured
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_notifier_config_edge_cases() {
|
||||
// Test with empty adapters
|
||||
let mut config = NotifierConfig::new();
|
||||
config.adapters.clear();
|
||||
assert!(config.adapters.is_empty(), "Adapters should be empty after clearing");
|
||||
|
||||
// Test serialization with empty adapters
|
||||
let json_result = serde_json::to_string(&config);
|
||||
assert!(json_result.is_ok(), "Config with empty adapters should be serializable");
|
||||
|
||||
// Test with very large channel capacity
|
||||
config.channel_capacity = 1_000_000;
|
||||
assert_eq!(config.channel_capacity, 1_000_000);
|
||||
|
||||
// Test with minimum channel capacity
|
||||
config.channel_capacity = 1;
|
||||
assert_eq!(config.channel_capacity, 1);
|
||||
|
||||
// Test with empty store path
|
||||
config.store_path = String::new();
|
||||
assert!(config.store_path.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_notifier_config_memory_efficiency() {
|
||||
let config = NotifierConfig::new();
|
||||
|
||||
// Test that config doesn't use excessive memory
|
||||
let config_size = std::mem::size_of_val(&config);
|
||||
assert!(config_size < 5000, "Config should not use excessive memory");
|
||||
|
||||
// Test that store path is not excessively long
|
||||
assert!(config.store_path.len() < 1000, "Store path should not be excessively long");
|
||||
|
||||
// Test that adapters collection is reasonably sized
|
||||
assert!(config.adapters.len() < 100, "Adapters collection should be reasonably sized");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_notifier_config_consistency() {
|
||||
// Create multiple configs and ensure they're consistent
|
||||
let config1 = NotifierConfig::new();
|
||||
let config2 = NotifierConfig::new();
|
||||
|
||||
// Both configs should have the same default values
|
||||
assert_eq!(config1.store_path, config2.store_path);
|
||||
assert_eq!(config1.channel_capacity, config2.channel_capacity);
|
||||
assert_eq!(config1.adapters.len(), config2.adapters.len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_notifier_config_path_validation() {
|
||||
let config = NotifierConfig::new();
|
||||
|
||||
// Test that store path is a valid path
|
||||
let path = Path::new(&config.store_path);
|
||||
|
||||
// Path should be valid
|
||||
assert!(path.components().count() > 0, "Path should have components");
|
||||
|
||||
// Path should not contain invalid characters for most filesystems
|
||||
assert!(!config.store_path.contains('\0'), "Path should not contain null characters");
|
||||
assert!(!config.store_path.contains('\x01'), "Path should not contain control characters");
|
||||
|
||||
// Path should be reasonable length
|
||||
assert!(config.store_path.len() < 260, "Path should be shorter than Windows MAX_PATH");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_notifier_config_production_readiness() {
|
||||
let config = NotifierConfig::new();
|
||||
|
||||
// Test production readiness criteria
|
||||
assert!(config.channel_capacity >= 1000, "Channel capacity should be sufficient for production");
|
||||
assert!(!config.store_path.is_empty(), "Store path should be configured");
|
||||
assert!(!config.adapters.is_empty(), "At least one adapter should be configured");
|
||||
|
||||
// Test that configuration is reasonable for high-load scenarios
|
||||
assert!(config.channel_capacity <= 10_000_000, "Channel capacity should not be excessive");
|
||||
|
||||
// Test that store path is in a reasonable location (temp directory)
|
||||
assert!(config.store_path.contains("event-notification"), "Store path should be identifiable");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_config_file_constant() {
|
||||
// Test that the constant is properly defined
|
||||
assert_eq!(DEFAULT_CONFIG_FILE, "event");
|
||||
// DEFAULT_CONFIG_FILE is a const, so is_empty() check is redundant
|
||||
// assert!(!DEFAULT_CONFIG_FILE.is_empty(), "Config file name should not be empty");
|
||||
assert!(!DEFAULT_CONFIG_FILE.contains('/'), "Config file name should not contain path separators");
|
||||
assert!(
|
||||
!DEFAULT_CONFIG_FILE.contains('\\'),
|
||||
"Config file name should not contain Windows path separators"
|
||||
);
|
||||
}
|
||||
}
|
||||
@@ -1,29 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// Configuration for the Kafka adapter.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct KafkaAdapter {
|
||||
pub brokers: String,
|
||||
pub topic: String,
|
||||
pub max_retries: u32,
|
||||
pub timeout: u64,
|
||||
}
|
||||
|
||||
impl KafkaAdapter {
|
||||
/// create a new configuration with default values
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
brokers: "localhost:9092".to_string(),
|
||||
topic: "kafka_topic".to_string(),
|
||||
max_retries: 3,
|
||||
timeout: 1000,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for KafkaAdapter {
|
||||
/// create a new configuration with default values
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
@@ -1,5 +0,0 @@
|
||||
pub(crate) mod adapters;
|
||||
pub(crate) mod config;
|
||||
pub(crate) mod kafka;
|
||||
pub(crate) mod mqtt;
|
||||
pub(crate) mod webhook;
|
||||
@@ -1,31 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
/// Configuration for the MQTT adapter.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct MqttAdapter {
|
||||
pub broker: String,
|
||||
pub port: u16,
|
||||
pub client_id: String,
|
||||
pub topic: String,
|
||||
pub max_retries: u32,
|
||||
}
|
||||
|
||||
impl MqttAdapter {
|
||||
/// create a new configuration with default values
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
broker: "localhost".to_string(),
|
||||
port: 1883,
|
||||
client_id: "mqtt_client".to_string(),
|
||||
topic: "mqtt_topic".to_string(),
|
||||
max_retries: 3,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for MqttAdapter {
|
||||
/// create a new configuration with default values
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
@@ -1,51 +0,0 @@
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Configuration for the notification system.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct WebhookAdapter {
|
||||
pub endpoint: String,
|
||||
pub auth_token: Option<String>,
|
||||
pub custom_headers: Option<HashMap<String, String>>,
|
||||
pub max_retries: u32,
|
||||
pub timeout: u64,
|
||||
}
|
||||
|
||||
impl WebhookAdapter {
|
||||
/// verify that the configuration is valid
|
||||
pub fn validate(&self) -> Result<(), String> {
|
||||
// verify that endpoint cannot be empty
|
||||
if self.endpoint.trim().is_empty() {
|
||||
return Err("Webhook endpoint cannot be empty".to_string());
|
||||
}
|
||||
|
||||
// verification timeout must be reasonable
|
||||
if self.timeout == 0 {
|
||||
return Err("Webhook timeout must be greater than 0".to_string());
|
||||
}
|
||||
|
||||
// Verify that the maximum number of retry is reasonable
|
||||
if self.max_retries > 10 {
|
||||
return Err("Maximum retry count cannot exceed 10".to_string());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Get the default configuration
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
endpoint: "".to_string(),
|
||||
auth_token: None,
|
||||
custom_headers: Some(HashMap::new()),
|
||||
max_retries: 3,
|
||||
timeout: 1000,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for WebhookAdapter {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
@@ -1,11 +1,7 @@
|
||||
use crate::observability::config::ObservabilityConfig;
|
||||
|
||||
mod config;
|
||||
mod constants;
|
||||
mod event;
|
||||
mod observability;
|
||||
|
||||
pub use config::RustFsConfig;
|
||||
#[cfg(feature = "constants")]
|
||||
pub mod constants;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::app::*;
|
||||
|
||||
pub use event::config::NotifierConfig;
|
||||
#[cfg(feature = "observability")]
|
||||
pub mod observability;
|
||||
|
||||
@@ -1,43 +0,0 @@
|
||||
[package]
|
||||
name = "rustfs-event-notifier"
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
version.workspace = true
|
||||
|
||||
[features]
|
||||
default = ["webhook"]
|
||||
webhook = ["dep:reqwest"]
|
||||
mqtt = ["rumqttc"]
|
||||
kafka = ["dep:rdkafka"]
|
||||
|
||||
[dependencies]
|
||||
async-trait = { workspace = true }
|
||||
config = { workspace = true }
|
||||
reqwest = { workspace = true, optional = true }
|
||||
rumqttc = { workspace = true, optional = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
serde_with = { workspace = true }
|
||||
smallvec = { workspace = true, features = ["serde"] }
|
||||
strum = { workspace = true, features = ["derive"] }
|
||||
tracing = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
tokio = { workspace = true, features = ["sync", "net", "macros", "signal", "rt-multi-thread"] }
|
||||
tokio-util = { workspace = true }
|
||||
uuid = { workspace = true, features = ["v4", "serde"] }
|
||||
|
||||
# Only enable kafka features and related dependencies on Linux
|
||||
[target.'cfg(target_os = "linux")'.dependencies]
|
||||
rdkafka = { workspace = true, features = ["tokio"], optional = true }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { workspace = true, features = ["test-util"] }
|
||||
tracing-subscriber = { workspace = true }
|
||||
http = { workspace = true }
|
||||
axum = { workspace = true }
|
||||
dotenvy = { workspace = true }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
@@ -1,28 +0,0 @@
|
||||
## ===== global configuration =====
|
||||
#NOTIFIER__STORE_PATH=/var/log/event-notification
|
||||
#NOTIFIER__CHANNEL_CAPACITY=5000
|
||||
#
|
||||
## ===== adapter configuration array format =====
|
||||
## webhook adapter index 0
|
||||
#NOTIFIER__ADAPTERS_0__type=Webhook
|
||||
#NOTIFIER__ADAPTERS_0__endpoint=http://127.0.0.1:3020/webhook
|
||||
#NOTIFIER__ADAPTERS_0__auth_token=your-auth-token
|
||||
#NOTIFIER__ADAPTERS_0__max_retries=3
|
||||
#NOTIFIER__ADAPTERS_0__timeout=50
|
||||
#NOTIFIER__ADAPTERS_0__custom_headers__x_custom_server=server-value
|
||||
#NOTIFIER__ADAPTERS_0__custom_headers__x_custom_client=client-value
|
||||
#
|
||||
## kafka adapter index 1
|
||||
#NOTIFIER__ADAPTERS_1__type=Kafka
|
||||
#NOTIFIER__ADAPTERS_1__brokers=localhost:9092
|
||||
#NOTIFIER__ADAPTERS_1__topic=notifications
|
||||
#NOTIFIER__ADAPTERS_1__max_retries=3
|
||||
#NOTIFIER__ADAPTERS_1__timeout=60
|
||||
#
|
||||
## mqtt adapter index 2
|
||||
#NOTIFIER__ADAPTERS_2__type=Mqtt
|
||||
#NOTIFIER__ADAPTERS_2__broker=mqtt.example.com
|
||||
#NOTIFIER__ADAPTERS_2__port=1883
|
||||
#NOTIFIER__ADAPTERS_2__client_id=event-notifier
|
||||
#NOTIFIER__ADAPTERS_2__topic=events
|
||||
#NOTIFIER__ADAPTERS_2__max_retries=3
|
||||
@@ -1,28 +0,0 @@
|
||||
## ===== 全局配置 =====
|
||||
#NOTIFIER__STORE_PATH=/var/log/event-notification
|
||||
#NOTIFIER__CHANNEL_CAPACITY=5000
|
||||
#
|
||||
## ===== 适配器配置(数组格式) =====
|
||||
## Webhook 适配器(索引 0)
|
||||
#NOTIFIER__ADAPTERS_0__type=Webhook
|
||||
#NOTIFIER__ADAPTERS_0__endpoint=http://127.0.0.1:3020/webhook
|
||||
#NOTIFIER__ADAPTERS_0__auth_token=your-auth-token
|
||||
#NOTIFIER__ADAPTERS_0__max_retries=3
|
||||
#NOTIFIER__ADAPTERS_0__timeout=50
|
||||
#NOTIFIER__ADAPTERS_0__custom_headers__x_custom_server=value
|
||||
#NOTIFIER__ADAPTERS_0__custom_headers__x_custom_client=value
|
||||
#
|
||||
## Kafka 适配器(索引 1)
|
||||
#NOTIFIER__ADAPTERS_1__type=Kafka
|
||||
#NOTIFIER__ADAPTERS_1__brokers=localhost:9092
|
||||
#NOTIFIER__ADAPTERS_1__topic=notifications
|
||||
#NOTIFIER__ADAPTERS_1__max_retries=3
|
||||
#NOTIFIER__ADAPTERS_1__timeout=60
|
||||
#
|
||||
## MQTT 适配器(索引 2)
|
||||
#NOTIFIER__ADAPTERS_2__type=Mqtt
|
||||
#NOTIFIER__ADAPTERS_2__broker=mqtt.example.com
|
||||
#NOTIFIER__ADAPTERS_2__port=1883
|
||||
#NOTIFIER__ADAPTERS_2__client_id=event-notifier
|
||||
#NOTIFIER__ADAPTERS_2__topic=events
|
||||
#NOTIFIER__ADAPTERS_2__max_retries=3
|
||||
@@ -1,29 +0,0 @@
|
||||
# config.toml
|
||||
store_path = "/var/log/event-notifier"
|
||||
channel_capacity = 5000
|
||||
|
||||
[[adapters]]
|
||||
type = "Webhook"
|
||||
endpoint = "http://127.0.0.1:3020/webhook"
|
||||
auth_token = "your-auth-token"
|
||||
max_retries = 3
|
||||
timeout = 50
|
||||
|
||||
[adapters.custom_headers]
|
||||
custom_server = "value_server"
|
||||
custom_client = "value_client"
|
||||
|
||||
[[adapters]]
|
||||
type = "Kafka"
|
||||
brokers = "localhost:9092"
|
||||
topic = "notifications"
|
||||
max_retries = 3
|
||||
timeout = 60
|
||||
|
||||
[[adapters]]
|
||||
type = "Mqtt"
|
||||
broker = "mqtt.example.com"
|
||||
port = 1883
|
||||
client_id = "event-notifier"
|
||||
topic = "events"
|
||||
max_retries = 3
|
||||
@@ -1,133 +0,0 @@
|
||||
use rustfs_event_notifier::{
|
||||
AdapterConfig, Bucket, Error as NotifierError, Event, Identity, Metadata, Name, NotifierConfig, Object, Source, WebhookConfig,
|
||||
};
|
||||
use std::collections::HashMap;
|
||||
use tokio::signal;
|
||||
use tracing::Level;
|
||||
use tracing_subscriber::FmtSubscriber;
|
||||
|
||||
async fn setup_notification_system() -> Result<(), NotifierError> {
|
||||
let config = NotifierConfig {
|
||||
store_path: "./deploy/logs/event_store".into(),
|
||||
channel_capacity: 100,
|
||||
adapters: vec![AdapterConfig::Webhook(WebhookConfig {
|
||||
endpoint: "http://127.0.0.1:3020/webhook".into(),
|
||||
auth_token: Some("your-auth-token".into()),
|
||||
custom_headers: Some(HashMap::new()),
|
||||
max_retries: 3,
|
||||
timeout: 30,
|
||||
})],
|
||||
};
|
||||
|
||||
rustfs_event_notifier::initialize(config).await?;
|
||||
|
||||
// wait for the system to be ready
|
||||
for _ in 0..50 {
|
||||
// wait up to 5 seconds
|
||||
if rustfs_event_notifier::is_ready() {
|
||||
return Ok(());
|
||||
}
|
||||
tokio::time::sleep(tokio::time::Duration::from_millis(100)).await;
|
||||
}
|
||||
|
||||
Err(NotifierError::custom("notify the system of initialization timeout"))
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
// initialization log
|
||||
// tracing_subscriber::fmt::init();
|
||||
|
||||
let subscriber = FmtSubscriber::builder()
|
||||
.with_max_level(Level::DEBUG) // set to debug or lower level
|
||||
.with_target(false) // simplify output
|
||||
.finish();
|
||||
tracing::subscriber::set_global_default(subscriber).expect("failed to set up log subscriber");
|
||||
|
||||
// set up notification system
|
||||
if let Err(e) = setup_notification_system().await {
|
||||
eprintln!("unable to initialize notification system:{}", e);
|
||||
return Err(e.into());
|
||||
}
|
||||
|
||||
// create a shutdown signal processing
|
||||
let (shutdown_tx, mut shutdown_rx) = tokio::sync::oneshot::channel();
|
||||
|
||||
// start signal processing task
|
||||
tokio::spawn(async move {
|
||||
let _ = signal::ctrl_c().await;
|
||||
println!("Received the shutdown signal and prepared to exit...");
|
||||
let _ = shutdown_tx.send(());
|
||||
});
|
||||
|
||||
// main application logic
|
||||
tokio::select! {
|
||||
_ = async {
|
||||
loop {
|
||||
// application logic
|
||||
// create an s3 metadata object
|
||||
let metadata = Metadata {
|
||||
schema_version: "1.0".to_string(),
|
||||
configuration_id: "test-config".to_string(),
|
||||
bucket: Bucket {
|
||||
name: "my-bucket".to_string(),
|
||||
owner_identity: Identity {
|
||||
principal_id: "owner123".to_string(),
|
||||
},
|
||||
arn: "arn:aws:s3:::my-bucket".to_string(),
|
||||
},
|
||||
object: Object {
|
||||
key: "test.txt".to_string(),
|
||||
size: Some(1024),
|
||||
etag: Some("abc123".to_string()),
|
||||
content_type: Some("text/plain".to_string()),
|
||||
user_metadata: None,
|
||||
version_id: None,
|
||||
sequencer: "1234567890".to_string(),
|
||||
},
|
||||
};
|
||||
|
||||
// create source object
|
||||
let source = Source {
|
||||
host: "localhost".to_string(),
|
||||
port: "80".to_string(),
|
||||
user_agent: "curl/7.68.0".to_string(),
|
||||
};
|
||||
|
||||
// create events using builder mode
|
||||
let event = Event::builder()
|
||||
.event_time("2023-10-01T12:00:00.000Z")
|
||||
.event_name(Name::ObjectCreatedPut)
|
||||
.user_identity(Identity {
|
||||
principal_id: "user123".to_string(),
|
||||
})
|
||||
.s3(metadata)
|
||||
.source(source)
|
||||
.channels(vec!["webhook".to_string()])
|
||||
.build()
|
||||
.expect("failed to create event");
|
||||
|
||||
if let Err(e) = rustfs_event_notifier::send_event(event).await {
|
||||
eprintln!("send event failed:{}", e);
|
||||
}
|
||||
|
||||
tokio::time::sleep(tokio::time::Duration::from_secs(60)).await;
|
||||
}
|
||||
} => {},
|
||||
|
||||
_ = &mut shutdown_rx => {
|
||||
println!("close the app");
|
||||
}
|
||||
}
|
||||
|
||||
// 优雅关闭通知系统
|
||||
println!("turn off the notification system");
|
||||
if let Err(e) = rustfs_event_notifier::shutdown().await {
|
||||
eprintln!("An error occurred while shutting down the notification system:{}", e);
|
||||
} else {
|
||||
println!("the notification system has been closed safely");
|
||||
}
|
||||
|
||||
println!("the application has been closed safely");
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,110 +0,0 @@
|
||||
use rustfs_event_notifier::NotifierSystem;
|
||||
use rustfs_event_notifier::create_adapters;
|
||||
use rustfs_event_notifier::{AdapterConfig, NotifierConfig, WebhookConfig};
|
||||
use rustfs_event_notifier::{Bucket, Event, Identity, Metadata, Name, Object, Source};
|
||||
use std::collections::HashMap;
|
||||
use std::error;
|
||||
use std::sync::Arc;
|
||||
use tokio::signal;
|
||||
use tracing::Level;
|
||||
use tracing_subscriber::FmtSubscriber;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), Box<dyn error::Error>> {
|
||||
let subscriber = FmtSubscriber::builder()
|
||||
.with_max_level(Level::DEBUG) // set to debug or lower level
|
||||
.with_target(false) // simplify output
|
||||
.finish();
|
||||
tracing::subscriber::set_global_default(subscriber).expect("failed to set up log subscriber");
|
||||
|
||||
let config = NotifierConfig {
|
||||
store_path: "./events".to_string(),
|
||||
channel_capacity: 100,
|
||||
adapters: vec![AdapterConfig::Webhook(WebhookConfig {
|
||||
endpoint: "http://127.0.0.1:3020/webhook".to_string(),
|
||||
auth_token: Some("secret-token".to_string()),
|
||||
custom_headers: Some(HashMap::from([("X-Custom".to_string(), "value".to_string())])),
|
||||
max_retries: 3,
|
||||
timeout: 10,
|
||||
})],
|
||||
};
|
||||
|
||||
// event_load_config
|
||||
// loading configuration from environment variables
|
||||
let _config = NotifierConfig::event_load_config(Some("./crates/event-notifier/examples/event.toml".to_string()));
|
||||
tracing::info!("event_load_config config: {:?} \n", _config);
|
||||
dotenvy::dotenv()?;
|
||||
let _config = NotifierConfig::event_load_config(None);
|
||||
tracing::info!("event_load_config config: {:?} \n", _config);
|
||||
let system = Arc::new(tokio::sync::Mutex::new(NotifierSystem::new(config.clone()).await?));
|
||||
let adapters = create_adapters(&config.adapters)?;
|
||||
|
||||
// create an s3 metadata object
|
||||
let metadata = Metadata {
|
||||
schema_version: "1.0".to_string(),
|
||||
configuration_id: "test-config".to_string(),
|
||||
bucket: Bucket {
|
||||
name: "my-bucket".to_string(),
|
||||
owner_identity: Identity {
|
||||
principal_id: "owner123".to_string(),
|
||||
},
|
||||
arn: "arn:aws:s3:::my-bucket".to_string(),
|
||||
},
|
||||
object: Object {
|
||||
key: "test.txt".to_string(),
|
||||
size: Some(1024),
|
||||
etag: Some("abc123".to_string()),
|
||||
content_type: Some("text/plain".to_string()),
|
||||
user_metadata: None,
|
||||
version_id: None,
|
||||
sequencer: "1234567890".to_string(),
|
||||
},
|
||||
};
|
||||
|
||||
// create source object
|
||||
let source = Source {
|
||||
host: "localhost".to_string(),
|
||||
port: "80".to_string(),
|
||||
user_agent: "curl/7.68.0".to_string(),
|
||||
};
|
||||
|
||||
// create events using builder mode
|
||||
let event = Event::builder()
|
||||
.event_time("2023-10-01T12:00:00.000Z")
|
||||
.event_name(Name::ObjectCreatedPut)
|
||||
.user_identity(Identity {
|
||||
principal_id: "user123".to_string(),
|
||||
})
|
||||
.s3(metadata)
|
||||
.source(source)
|
||||
.channels(vec!["webhook".to_string()])
|
||||
.build()
|
||||
.expect("failed to create event");
|
||||
|
||||
{
|
||||
let system = system.lock().await;
|
||||
system.send_event(event).await?;
|
||||
}
|
||||
|
||||
let system_clone = Arc::clone(&system);
|
||||
let system_handle = tokio::spawn(async move {
|
||||
let mut system = system_clone.lock().await;
|
||||
system.start(adapters).await
|
||||
});
|
||||
|
||||
signal::ctrl_c().await?;
|
||||
tracing::info!("Received shutdown signal");
|
||||
let result = {
|
||||
let mut system = system.lock().await;
|
||||
system.shutdown().await
|
||||
};
|
||||
|
||||
if let Err(e) = result {
|
||||
tracing::error!("Failed to shut down the notification system: {}", e);
|
||||
} else {
|
||||
tracing::info!("Notification system shut down successfully");
|
||||
}
|
||||
|
||||
system_handle.await??;
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,94 +0,0 @@
|
||||
use axum::{Router, extract::Json, http::StatusCode, routing::post};
|
||||
use serde_json::Value;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
// 构建应用
|
||||
let app = Router::new().route("/webhook", post(receive_webhook));
|
||||
// 启动服务器
|
||||
let listener = tokio::net::TcpListener::bind("0.0.0.0:3020").await.unwrap();
|
||||
println!("Server running on http://0.0.0.0:3020");
|
||||
|
||||
// 创建关闭信号处理
|
||||
tokio::select! {
|
||||
result = axum::serve(listener, app) => {
|
||||
if let Err(e) = result {
|
||||
eprintln!("Server error: {}", e);
|
||||
}
|
||||
}
|
||||
_ = tokio::signal::ctrl_c() => {
|
||||
println!("Shutting down server...");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn receive_webhook(Json(payload): Json<Value>) -> StatusCode {
|
||||
let start = SystemTime::now();
|
||||
let since_the_epoch = start.duration_since(UNIX_EPOCH).expect("Time went backwards");
|
||||
|
||||
// get the number of seconds since the unix era
|
||||
let seconds = since_the_epoch.as_secs();
|
||||
|
||||
// Manually calculate year, month, day, hour, minute, and second
|
||||
let (year, month, day, hour, minute, second) = convert_seconds_to_date(seconds);
|
||||
|
||||
// output result
|
||||
println!("current time:{:04}-{:02}-{:02} {:02}:{:02}:{:02}", year, month, day, hour, minute, second);
|
||||
println!(
|
||||
"received a webhook request time:{} content:\n {}",
|
||||
seconds,
|
||||
serde_json::to_string_pretty(&payload).unwrap()
|
||||
);
|
||||
StatusCode::OK
|
||||
}
|
||||
|
||||
fn convert_seconds_to_date(seconds: u64) -> (u32, u32, u32, u32, u32, u32) {
|
||||
// assume that the time zone is utc
|
||||
let seconds_per_minute = 60;
|
||||
let seconds_per_hour = 3600;
|
||||
let seconds_per_day = 86400;
|
||||
|
||||
// Calculate the year, month, day, hour, minute, and second corresponding to the number of seconds
|
||||
let mut total_seconds = seconds;
|
||||
let mut year = 1970;
|
||||
let mut month = 1;
|
||||
let mut day = 1;
|
||||
let mut hour = 0;
|
||||
let mut minute = 0;
|
||||
let mut second = 0;
|
||||
|
||||
// calculate year
|
||||
while total_seconds >= 31536000 {
|
||||
year += 1;
|
||||
total_seconds -= 31536000; // simplified processing no leap year considered
|
||||
}
|
||||
|
||||
// calculate month
|
||||
let days_in_month = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31];
|
||||
for m in &days_in_month {
|
||||
if total_seconds >= m * seconds_per_day {
|
||||
month += 1;
|
||||
total_seconds -= m * seconds_per_day;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// calculate the number of days
|
||||
day += total_seconds / seconds_per_day;
|
||||
total_seconds %= seconds_per_day;
|
||||
|
||||
// calculate hours
|
||||
hour += total_seconds / seconds_per_hour;
|
||||
total_seconds %= seconds_per_hour;
|
||||
|
||||
// calculate minutes
|
||||
minute += total_seconds / seconds_per_minute;
|
||||
total_seconds %= seconds_per_minute;
|
||||
|
||||
// calculate the number of seconds
|
||||
second += total_seconds;
|
||||
|
||||
(year as u32, month as u32, day as u32, hour as u32, minute as u32, second as u32)
|
||||
}
|
||||
@@ -1,69 +0,0 @@
|
||||
use crate::ChannelAdapter;
|
||||
use crate::Error;
|
||||
use crate::Event;
|
||||
use crate::KafkaConfig;
|
||||
use async_trait::async_trait;
|
||||
use rdkafka::error::KafkaError;
|
||||
use rdkafka::producer::{FutureProducer, FutureRecord};
|
||||
use rdkafka::types::RDKafkaErrorCode;
|
||||
use rdkafka::util::Timeout;
|
||||
use std::time::Duration;
|
||||
use tokio::time::sleep;
|
||||
|
||||
/// Kafka adapter for sending events to a Kafka topic.
|
||||
pub struct KafkaAdapter {
|
||||
producer: FutureProducer,
|
||||
topic: String,
|
||||
max_retries: u32,
|
||||
}
|
||||
|
||||
impl KafkaAdapter {
|
||||
/// Creates a new Kafka adapter.
|
||||
pub fn new(config: &KafkaConfig) -> Result<Self, Error> {
|
||||
// Create a Kafka producer with the provided configuration.
|
||||
let producer = rdkafka::config::ClientConfig::new()
|
||||
.set("bootstrap.servers", &config.brokers)
|
||||
.set("message.timeout.ms", config.timeout.to_string())
|
||||
.create()?;
|
||||
|
||||
Ok(Self {
|
||||
producer,
|
||||
topic: config.topic.clone(),
|
||||
max_retries: config.max_retries,
|
||||
})
|
||||
}
|
||||
/// Sends an event to the Kafka topic with retry logic.
|
||||
async fn send_with_retry(&self, event: &Event) -> Result<(), Error> {
|
||||
let event_id = event.id.to_string();
|
||||
let payload = serde_json::to_string(&event)?;
|
||||
|
||||
for attempt in 0..self.max_retries {
|
||||
let record = FutureRecord::to(&self.topic).key(&event_id).payload(&payload);
|
||||
|
||||
match self.producer.send(record, Timeout::Never).await {
|
||||
Ok(_) => return Ok(()),
|
||||
Err((KafkaError::MessageProduction(RDKafkaErrorCode::QueueFull), _)) => {
|
||||
tracing::warn!("Kafka attempt {} failed: Queue full. Retrying...", attempt + 1);
|
||||
sleep(Duration::from_secs(2u64.pow(attempt))).await;
|
||||
}
|
||||
Err((e, _)) => {
|
||||
tracing::error!("Kafka send error: {}", e);
|
||||
return Err(Error::Kafka(e));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Err(Error::Custom("Exceeded maximum retry attempts for Kafka message".to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ChannelAdapter for KafkaAdapter {
|
||||
fn name(&self) -> String {
|
||||
"kafka".to_string()
|
||||
}
|
||||
|
||||
async fn send(&self, event: &Event) -> Result<(), Error> {
|
||||
self.send_with_retry(event).await
|
||||
}
|
||||
}
|
||||
@@ -1,54 +0,0 @@
|
||||
use crate::AdapterConfig;
|
||||
use crate::Error;
|
||||
use crate::Event;
|
||||
use async_trait::async_trait;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[cfg(all(feature = "kafka", target_os = "linux"))]
|
||||
pub(crate) mod kafka;
|
||||
#[cfg(feature = "mqtt")]
|
||||
pub(crate) mod mqtt;
|
||||
#[cfg(feature = "webhook")]
|
||||
pub(crate) mod webhook;
|
||||
|
||||
/// The `ChannelAdapter` trait defines the interface for all channel adapters.
|
||||
#[async_trait]
|
||||
pub trait ChannelAdapter: Send + Sync + 'static {
|
||||
/// Sends an event to the channel.
|
||||
fn name(&self) -> String;
|
||||
/// Sends an event to the channel.
|
||||
async fn send(&self, event: &Event) -> Result<(), Error>;
|
||||
}
|
||||
|
||||
/// Creates channel adapters based on the provided configuration.
|
||||
pub fn create_adapters(configs: &[AdapterConfig]) -> Result<Vec<Arc<dyn ChannelAdapter>>, Error> {
|
||||
let mut adapters: Vec<Arc<dyn ChannelAdapter>> = Vec::new();
|
||||
|
||||
for config in configs {
|
||||
match config {
|
||||
#[cfg(feature = "webhook")]
|
||||
AdapterConfig::Webhook(webhook_config) => {
|
||||
webhook_config.validate().map_err(Error::ConfigError)?;
|
||||
adapters.push(Arc::new(webhook::WebhookAdapter::new(webhook_config.clone())));
|
||||
}
|
||||
#[cfg(all(feature = "kafka", target_os = "linux"))]
|
||||
AdapterConfig::Kafka(kafka_config) => {
|
||||
adapters.push(Arc::new(kafka::KafkaAdapter::new(kafka_config)?));
|
||||
}
|
||||
#[cfg(feature = "mqtt")]
|
||||
AdapterConfig::Mqtt(mqtt_config) => {
|
||||
let (mqtt, mut event_loop) = mqtt::MqttAdapter::new(mqtt_config);
|
||||
tokio::spawn(async move { while event_loop.poll().await.is_ok() {} });
|
||||
adapters.push(Arc::new(mqtt));
|
||||
}
|
||||
#[cfg(not(feature = "webhook"))]
|
||||
AdapterConfig::Webhook(_) => return Err(Error::FeatureDisabled("webhook")),
|
||||
#[cfg(any(not(feature = "kafka"), not(target_os = "linux")))]
|
||||
AdapterConfig::Kafka(_) => return Err(Error::FeatureDisabled("kafka")),
|
||||
#[cfg(not(feature = "mqtt"))]
|
||||
AdapterConfig::Mqtt(_) => return Err(Error::FeatureDisabled("mqtt")),
|
||||
}
|
||||
}
|
||||
|
||||
Ok(adapters)
|
||||
}
|
||||
@@ -1,58 +0,0 @@
|
||||
use crate::ChannelAdapter;
|
||||
use crate::Error;
|
||||
use crate::Event;
|
||||
use crate::MqttConfig;
|
||||
use async_trait::async_trait;
|
||||
use rumqttc::{AsyncClient, MqttOptions, QoS};
|
||||
use std::time::Duration;
|
||||
use tokio::time::sleep;
|
||||
|
||||
/// MQTT adapter for sending events to an MQTT broker.
|
||||
pub struct MqttAdapter {
|
||||
client: AsyncClient,
|
||||
topic: String,
|
||||
max_retries: u32,
|
||||
}
|
||||
|
||||
impl MqttAdapter {
|
||||
/// Creates a new MQTT adapter.
|
||||
pub fn new(config: &MqttConfig) -> (Self, rumqttc::EventLoop) {
|
||||
let mqtt_options = MqttOptions::new(&config.client_id, &config.broker, config.port);
|
||||
let (client, event_loop) = rumqttc::AsyncClient::new(mqtt_options, 10);
|
||||
(
|
||||
Self {
|
||||
client,
|
||||
topic: config.topic.clone(),
|
||||
max_retries: config.max_retries,
|
||||
},
|
||||
event_loop,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ChannelAdapter for MqttAdapter {
|
||||
fn name(&self) -> String {
|
||||
"mqtt".to_string()
|
||||
}
|
||||
|
||||
async fn send(&self, event: &Event) -> Result<(), Error> {
|
||||
let payload = serde_json::to_string(event).map_err(Error::Serde)?;
|
||||
let mut attempt = 0;
|
||||
loop {
|
||||
match self
|
||||
.client
|
||||
.publish(&self.topic, QoS::AtLeastOnce, false, payload.clone())
|
||||
.await
|
||||
{
|
||||
Ok(()) => return Ok(()),
|
||||
Err(e) if attempt < self.max_retries => {
|
||||
attempt += 1;
|
||||
tracing::warn!("MQTT attempt {} failed: {}. Retrying...", attempt, e);
|
||||
sleep(Duration::from_secs(2u64.pow(attempt))).await;
|
||||
}
|
||||
Err(e) => return Err(Error::Mqtt(e)),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,64 +0,0 @@
|
||||
use crate::ChannelAdapter;
|
||||
use crate::Error;
|
||||
use crate::Event;
|
||||
use crate::WebhookConfig;
|
||||
use async_trait::async_trait;
|
||||
use reqwest::{Client, RequestBuilder};
|
||||
use std::time::Duration;
|
||||
use tokio::time::sleep;
|
||||
|
||||
/// Webhook adapter for sending events to a webhook endpoint.
|
||||
pub struct WebhookAdapter {
|
||||
config: WebhookConfig,
|
||||
client: Client,
|
||||
}
|
||||
|
||||
impl WebhookAdapter {
|
||||
/// Creates a new Webhook adapter.
|
||||
pub fn new(config: WebhookConfig) -> Self {
|
||||
let client = Client::builder()
|
||||
.timeout(Duration::from_secs(config.timeout))
|
||||
.build()
|
||||
.expect("Failed to build reqwest client");
|
||||
Self { config, client }
|
||||
}
|
||||
/// Builds the request to send the event.
|
||||
fn build_request(&self, event: &Event) -> RequestBuilder {
|
||||
let mut request = self.client.post(&self.config.endpoint).json(event);
|
||||
if let Some(token) = &self.config.auth_token {
|
||||
request = request.header("Authorization", format!("Bearer {}", token));
|
||||
}
|
||||
if let Some(headers) = &self.config.custom_headers {
|
||||
for (key, value) in headers {
|
||||
request = request.header(key, value);
|
||||
}
|
||||
}
|
||||
request
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ChannelAdapter for WebhookAdapter {
|
||||
fn name(&self) -> String {
|
||||
"webhook".to_string()
|
||||
}
|
||||
|
||||
async fn send(&self, event: &Event) -> Result<(), Error> {
|
||||
let mut attempt = 0;
|
||||
tracing::info!("Attempting to send webhook request: {:?}", event);
|
||||
loop {
|
||||
match self.build_request(event).send().await {
|
||||
Ok(response) => {
|
||||
response.error_for_status().map_err(Error::Http)?;
|
||||
return Ok(());
|
||||
}
|
||||
Err(e) if attempt < self.config.max_retries => {
|
||||
attempt += 1;
|
||||
tracing::warn!("Webhook attempt {} failed: {}. Retrying...", attempt, e);
|
||||
sleep(Duration::from_secs(2u64.pow(attempt))).await;
|
||||
}
|
||||
Err(e) => return Err(Error::Http(e)),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,102 +0,0 @@
|
||||
use crate::ChannelAdapter;
|
||||
use crate::Error;
|
||||
use crate::EventStore;
|
||||
use crate::{Event, Log};
|
||||
use std::sync::Arc;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::time::Duration;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::instrument;
|
||||
|
||||
/// Handles incoming events from the producer.
|
||||
///
|
||||
/// This function is responsible for receiving events from the producer and sending them to the appropriate adapters.
|
||||
/// It also handles the shutdown process and saves any pending logs to the event store.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn event_bus(
|
||||
mut rx: mpsc::Receiver<Event>,
|
||||
adapters: Vec<Arc<dyn ChannelAdapter>>,
|
||||
store: Arc<EventStore>,
|
||||
shutdown: CancellationToken,
|
||||
shutdown_complete: Option<tokio::sync::oneshot::Sender<()>>,
|
||||
) -> Result<(), Error> {
|
||||
let mut current_log = Log {
|
||||
event_name: crate::event::Name::Everything,
|
||||
key: SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs().to_string(),
|
||||
records: Vec::new(),
|
||||
};
|
||||
|
||||
let mut unprocessed_events = Vec::new();
|
||||
loop {
|
||||
tokio::select! {
|
||||
Some(event) = rx.recv() => {
|
||||
current_log.records.push(event.clone());
|
||||
let mut send_tasks = Vec::new();
|
||||
for adapter in &adapters {
|
||||
if event.channels.contains(&adapter.name()) {
|
||||
let adapter = adapter.clone();
|
||||
let event = event.clone();
|
||||
send_tasks.push(tokio::spawn(async move {
|
||||
if let Err(e) = adapter.send(&event).await {
|
||||
tracing::error!("Failed to send event to {}: {}", adapter.name(), e);
|
||||
Err(e)
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}));
|
||||
}
|
||||
}
|
||||
for task in send_tasks {
|
||||
if task.await?.is_err() {
|
||||
// If sending fails, add the event to the unprocessed list
|
||||
let failed_event = event.clone();
|
||||
unprocessed_events.push(failed_event);
|
||||
}
|
||||
}
|
||||
|
||||
// Clear the current log because we only care about unprocessed events
|
||||
current_log.records.clear();
|
||||
}
|
||||
_ = shutdown.cancelled() => {
|
||||
tracing::info!("Shutting down event bus, saving pending logs...");
|
||||
// Check if there are still unprocessed messages in the channel
|
||||
while let Ok(Some(event)) = tokio::time::timeout(
|
||||
Duration::from_millis(100),
|
||||
rx.recv()
|
||||
).await {
|
||||
unprocessed_events.push(event);
|
||||
}
|
||||
|
||||
// save only if there are unprocessed events
|
||||
if !unprocessed_events.is_empty() {
|
||||
tracing::info!("Save {} unhandled events", unprocessed_events.len());
|
||||
// create and save logging
|
||||
let shutdown_log = Log {
|
||||
event_name: crate::event::Name::Everything,
|
||||
key: format!("shutdown_{}", SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs()),
|
||||
records: unprocessed_events,
|
||||
};
|
||||
|
||||
store.save_logs(&[shutdown_log]).await?;
|
||||
} else {
|
||||
tracing::info!("no unhandled events need to be saved");
|
||||
}
|
||||
tracing::debug!("shutdown_complete is Some: {}", shutdown_complete.is_some());
|
||||
|
||||
if let Some(complete_sender) = shutdown_complete {
|
||||
// send a completion signal
|
||||
let result = complete_sender.send(());
|
||||
match result {
|
||||
Ok(_) => tracing::info!("Event bus shutdown signal sent"),
|
||||
Err(e) => tracing::error!("Failed to send event bus shutdown signal: {:?}", e),
|
||||
}
|
||||
tracing::info!("Shutting down event bus");
|
||||
}
|
||||
tracing::info!("Event bus shutdown complete");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,166 +0,0 @@
|
||||
use config::{Config, File, FileFormat};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::env;
|
||||
|
||||
/// Configuration for the notification system.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct WebhookConfig {
|
||||
pub endpoint: String,
|
||||
pub auth_token: Option<String>,
|
||||
pub custom_headers: Option<HashMap<String, String>>,
|
||||
pub max_retries: u32,
|
||||
pub timeout: u64,
|
||||
}
|
||||
|
||||
impl WebhookConfig {
|
||||
/// verify that the configuration is valid
|
||||
pub fn validate(&self) -> Result<(), String> {
|
||||
// verify that endpoint cannot be empty
|
||||
if self.endpoint.trim().is_empty() {
|
||||
return Err("Webhook endpoint cannot be empty".to_string());
|
||||
}
|
||||
|
||||
// verification timeout must be reasonable
|
||||
if self.timeout == 0 {
|
||||
return Err("Webhook timeout must be greater than 0".to_string());
|
||||
}
|
||||
|
||||
// Verify that the maximum number of retry is reasonable
|
||||
if self.max_retries > 10 {
|
||||
return Err("Maximum retry count cannot exceed 10".to_string());
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Configuration for the Kafka adapter.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct KafkaConfig {
|
||||
pub brokers: String,
|
||||
pub topic: String,
|
||||
pub max_retries: u32,
|
||||
pub timeout: u64,
|
||||
}
|
||||
|
||||
/// Configuration for the MQTT adapter.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct MqttConfig {
|
||||
pub broker: String,
|
||||
pub port: u16,
|
||||
pub client_id: String,
|
||||
pub topic: String,
|
||||
pub max_retries: u32,
|
||||
}
|
||||
|
||||
/// Configuration for the notification system.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(tag = "type")]
|
||||
pub enum AdapterConfig {
|
||||
Webhook(WebhookConfig),
|
||||
Kafka(KafkaConfig),
|
||||
Mqtt(MqttConfig),
|
||||
}
|
||||
|
||||
/// Configuration for the notification system.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct NotifierConfig {
|
||||
#[serde(default = "default_store_path")]
|
||||
pub store_path: String,
|
||||
#[serde(default = "default_channel_capacity")]
|
||||
pub channel_capacity: usize,
|
||||
pub adapters: Vec<AdapterConfig>,
|
||||
}
|
||||
|
||||
impl Default for NotifierConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
store_path: default_store_path(),
|
||||
channel_capacity: default_channel_capacity(),
|
||||
adapters: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl NotifierConfig {
|
||||
/// create a new configuration with default values
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
/// Loading the configuration file
|
||||
/// Supports TOML, YAML and .env formats, read in order by priority
|
||||
///
|
||||
/// # Parameters
|
||||
/// - `config_dir`: Configuration file path
|
||||
///
|
||||
/// # Returns
|
||||
/// Configuration information
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use rustfs_event_notifier::NotifierConfig;
|
||||
///
|
||||
/// let config = NotifierConfig::event_load_config(None);
|
||||
/// ```
|
||||
pub fn event_load_config(config_dir: Option<String>) -> NotifierConfig {
|
||||
let config_dir = if let Some(path) = config_dir {
|
||||
// If a path is provided, check if it's empty
|
||||
if path.is_empty() {
|
||||
// If empty, use the default config file name
|
||||
DEFAULT_CONFIG_FILE.to_string()
|
||||
} else {
|
||||
// Use the provided path
|
||||
let path = std::path::Path::new(&path);
|
||||
if path.extension().is_some() {
|
||||
// If path has extension, use it as is (extension will be added by Config::builder)
|
||||
path.with_extension("").to_string_lossy().into_owned()
|
||||
} else {
|
||||
// If path is a directory, append the default config file name
|
||||
path.to_string_lossy().into_owned()
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// If no path provided, use current directory + default config file
|
||||
match env::current_dir() {
|
||||
Ok(dir) => dir.join(DEFAULT_CONFIG_FILE).to_string_lossy().into_owned(),
|
||||
Err(_) => {
|
||||
eprintln!("Warning: Failed to get current directory, using default config file");
|
||||
DEFAULT_CONFIG_FILE.to_string()
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Log using proper logging instead of println when possible
|
||||
println!("Using config file base: {}", config_dir);
|
||||
|
||||
let app_config = Config::builder()
|
||||
.add_source(File::with_name(config_dir.as_str()).format(FileFormat::Toml).required(false))
|
||||
.add_source(File::with_name(config_dir.as_str()).format(FileFormat::Yaml).required(false))
|
||||
.build()
|
||||
.unwrap_or_default();
|
||||
match app_config.try_deserialize::<NotifierConfig>() {
|
||||
Ok(app_config) => {
|
||||
println!("Parsed AppConfig: {:?} \n", app_config);
|
||||
app_config
|
||||
}
|
||||
Err(e) => {
|
||||
println!("Failed to deserialize config: {}", e);
|
||||
NotifierConfig::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
const DEFAULT_CONFIG_FILE: &str = "event";
|
||||
|
||||
/// Provide temporary directories as default storage paths
|
||||
fn default_store_path() -> String {
|
||||
std::env::temp_dir().join("event-notification").to_string_lossy().to_string()
|
||||
}
|
||||
|
||||
/// Provides the recommended default channel capacity for high concurrency systems
|
||||
fn default_channel_capacity() -> usize {
|
||||
10000 // Reasonable default values for high concurrency systems
|
||||
}
|
||||
@@ -1,418 +0,0 @@
|
||||
use config::ConfigError;
|
||||
use thiserror::Error;
|
||||
use tokio::sync::mpsc::error;
|
||||
use tokio::task::JoinError;
|
||||
|
||||
/// The `Error` enum represents all possible errors that can occur in the application.
|
||||
/// It implements the `std::error::Error` trait and provides a way to convert various error types into a single error type.
|
||||
#[derive(Error, Debug)]
|
||||
pub enum Error {
|
||||
#[error("Join error: {0}")]
|
||||
JoinError(#[from] JoinError),
|
||||
#[error("IO error: {0}")]
|
||||
Io(#[from] std::io::Error),
|
||||
#[error("Serialization error: {0}")]
|
||||
Serde(#[from] serde_json::Error),
|
||||
#[error("HTTP error: {0}")]
|
||||
Http(#[from] reqwest::Error),
|
||||
#[cfg(all(feature = "kafka", target_os = "linux"))]
|
||||
#[error("Kafka error: {0}")]
|
||||
Kafka(#[from] rdkafka::error::KafkaError),
|
||||
#[cfg(feature = "mqtt")]
|
||||
#[error("MQTT error: {0}")]
|
||||
Mqtt(#[from] rumqttc::ClientError),
|
||||
#[error("Channel send error: {0}")]
|
||||
ChannelSend(#[from] Box<error::SendError<crate::event::Event>>),
|
||||
#[error("Feature disabled: {0}")]
|
||||
FeatureDisabled(&'static str),
|
||||
#[error("Event bus already started")]
|
||||
EventBusStarted,
|
||||
#[error("necessary fields are missing:{0}")]
|
||||
MissingField(&'static str),
|
||||
#[error("field verification failed:{0}")]
|
||||
ValidationError(&'static str),
|
||||
#[error("Custom error: {0}")]
|
||||
Custom(String),
|
||||
#[error("Configuration error: {0}")]
|
||||
ConfigError(String),
|
||||
#[error("Configuration loading error: {0}")]
|
||||
Config(#[from] ConfigError),
|
||||
}
|
||||
|
||||
impl Error {
|
||||
pub fn custom(msg: &str) -> Error {
|
||||
Self::Custom(msg.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::error::Error as StdError;
|
||||
use std::io;
|
||||
use tokio::sync::mpsc;
|
||||
|
||||
#[test]
|
||||
fn test_error_display() {
|
||||
// Test error message display
|
||||
let custom_error = Error::custom("test message");
|
||||
assert_eq!(custom_error.to_string(), "Custom error: test message");
|
||||
|
||||
let feature_error = Error::FeatureDisabled("test feature");
|
||||
assert_eq!(feature_error.to_string(), "Feature disabled: test feature");
|
||||
|
||||
let event_bus_error = Error::EventBusStarted;
|
||||
assert_eq!(event_bus_error.to_string(), "Event bus already started");
|
||||
|
||||
let missing_field_error = Error::MissingField("required_field");
|
||||
assert_eq!(missing_field_error.to_string(), "necessary fields are missing:required_field");
|
||||
|
||||
let validation_error = Error::ValidationError("invalid format");
|
||||
assert_eq!(validation_error.to_string(), "field verification failed:invalid format");
|
||||
|
||||
let config_error = Error::ConfigError("invalid config".to_string());
|
||||
assert_eq!(config_error.to_string(), "Configuration error: invalid config");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_debug() {
|
||||
// Test Debug trait implementation
|
||||
let custom_error = Error::custom("debug test");
|
||||
let debug_str = format!("{:?}", custom_error);
|
||||
assert!(debug_str.contains("Custom"));
|
||||
assert!(debug_str.contains("debug test"));
|
||||
|
||||
let feature_error = Error::FeatureDisabled("debug feature");
|
||||
let debug_str = format!("{:?}", feature_error);
|
||||
assert!(debug_str.contains("FeatureDisabled"));
|
||||
assert!(debug_str.contains("debug feature"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_custom_error_creation() {
|
||||
// Test custom error creation
|
||||
let error = Error::custom("test custom error");
|
||||
match error {
|
||||
Error::Custom(msg) => assert_eq!(msg, "test custom error"),
|
||||
_ => panic!("Expected Custom error variant"),
|
||||
}
|
||||
|
||||
// Test empty string
|
||||
let empty_error = Error::custom("");
|
||||
match empty_error {
|
||||
Error::Custom(msg) => assert_eq!(msg, ""),
|
||||
_ => panic!("Expected Custom error variant"),
|
||||
}
|
||||
|
||||
// Test special characters
|
||||
let special_error = Error::custom("Test Chinese 中文 & special chars: !@#$%");
|
||||
match special_error {
|
||||
Error::Custom(msg) => assert_eq!(msg, "Test Chinese 中文 & special chars: !@#$%"),
|
||||
_ => panic!("Expected Custom error variant"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_io_error_conversion() {
|
||||
// Test IO error conversion
|
||||
let io_error = io::Error::new(io::ErrorKind::NotFound, "file not found");
|
||||
let converted_error: Error = io_error.into();
|
||||
|
||||
match converted_error {
|
||||
Error::Io(err) => {
|
||||
assert_eq!(err.kind(), io::ErrorKind::NotFound);
|
||||
assert_eq!(err.to_string(), "file not found");
|
||||
}
|
||||
_ => panic!("Expected Io error variant"),
|
||||
}
|
||||
|
||||
// Test different types of IO errors
|
||||
let permission_error = io::Error::new(io::ErrorKind::PermissionDenied, "access denied");
|
||||
let converted: Error = permission_error.into();
|
||||
assert!(matches!(converted, Error::Io(_)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serde_error_conversion() {
|
||||
// Test serialization error conversion
|
||||
let invalid_json = r#"{"invalid": json}"#;
|
||||
let serde_error = serde_json::from_str::<serde_json::Value>(invalid_json).unwrap_err();
|
||||
let converted_error: Error = serde_error.into();
|
||||
|
||||
match converted_error {
|
||||
Error::Serde(_) => {
|
||||
// Verify error type is correct
|
||||
assert!(converted_error.to_string().contains("Serialization error"));
|
||||
}
|
||||
_ => panic!("Expected Serde error variant"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_config_error_conversion() {
|
||||
// Test configuration error conversion
|
||||
let config_error = ConfigError::Message("invalid configuration".to_string());
|
||||
let converted_error: Error = config_error.into();
|
||||
|
||||
match converted_error {
|
||||
Error::Config(_) => {
|
||||
assert!(converted_error.to_string().contains("Configuration loading error"));
|
||||
}
|
||||
_ => panic!("Expected Config error variant"),
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_channel_send_error_conversion() {
|
||||
// Test channel send error conversion
|
||||
let (tx, rx) = mpsc::channel::<crate::event::Event>(1);
|
||||
drop(rx); // Close receiver
|
||||
|
||||
// Create a test event
|
||||
use crate::event::{Bucket, Identity, Metadata, Name, Object, Source};
|
||||
use std::collections::HashMap;
|
||||
|
||||
let identity = Identity::new("test-user".to_string());
|
||||
let bucket = Bucket::new("test-bucket".to_string(), identity.clone(), "arn:aws:s3:::test-bucket".to_string());
|
||||
let object = Object::new(
|
||||
"test-key".to_string(),
|
||||
Some(1024),
|
||||
Some("etag123".to_string()),
|
||||
Some("text/plain".to_string()),
|
||||
Some(HashMap::new()),
|
||||
None,
|
||||
"sequencer123".to_string(),
|
||||
);
|
||||
let metadata = Metadata::create("1.0".to_string(), "config1".to_string(), bucket, object);
|
||||
let source = Source::new("localhost".to_string(), "8080".to_string(), "test-agent".to_string());
|
||||
|
||||
let test_event = crate::event::Event::builder()
|
||||
.event_name(Name::ObjectCreatedPut)
|
||||
.s3(metadata)
|
||||
.source(source)
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
let send_result = tx.send(test_event).await;
|
||||
assert!(send_result.is_err());
|
||||
|
||||
let send_error = send_result.unwrap_err();
|
||||
let boxed_error = Box::new(send_error);
|
||||
let converted_error: Error = boxed_error.into();
|
||||
|
||||
match converted_error {
|
||||
Error::ChannelSend(_) => {
|
||||
assert!(converted_error.to_string().contains("Channel send error"));
|
||||
}
|
||||
_ => panic!("Expected ChannelSend error variant"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_source_chain() {
|
||||
// 测试错误源链
|
||||
let io_error = io::Error::new(io::ErrorKind::InvalidData, "invalid data");
|
||||
let converted_error: Error = io_error.into();
|
||||
|
||||
// 验证错误源
|
||||
assert!(converted_error.source().is_some());
|
||||
let source = converted_error.source().unwrap();
|
||||
assert_eq!(source.to_string(), "invalid data");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_variants_exhaustive() {
|
||||
// 测试所有错误变体的创建
|
||||
let errors = vec![
|
||||
Error::FeatureDisabled("test"),
|
||||
Error::EventBusStarted,
|
||||
Error::MissingField("field"),
|
||||
Error::ValidationError("validation"),
|
||||
Error::Custom("custom".to_string()),
|
||||
Error::ConfigError("config".to_string()),
|
||||
];
|
||||
|
||||
for error in errors {
|
||||
// 验证每个错误都能正确显示
|
||||
let error_str = error.to_string();
|
||||
assert!(!error_str.is_empty());
|
||||
|
||||
// 验证每个错误都能正确调试
|
||||
let debug_str = format!("{:?}", error);
|
||||
assert!(!debug_str.is_empty());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_equality_and_matching() {
|
||||
// 测试错误的模式匹配
|
||||
let custom_error = Error::custom("test");
|
||||
match custom_error {
|
||||
Error::Custom(msg) => assert_eq!(msg, "test"),
|
||||
_ => panic!("Pattern matching failed"),
|
||||
}
|
||||
|
||||
let feature_error = Error::FeatureDisabled("feature");
|
||||
match feature_error {
|
||||
Error::FeatureDisabled(feature) => assert_eq!(feature, "feature"),
|
||||
_ => panic!("Pattern matching failed"),
|
||||
}
|
||||
|
||||
let event_bus_error = Error::EventBusStarted;
|
||||
match event_bus_error {
|
||||
Error::EventBusStarted => {} // 正确匹配
|
||||
_ => panic!("Pattern matching failed"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_message_formatting() {
|
||||
// 测试错误消息格式化
|
||||
let test_cases = vec![
|
||||
(Error::FeatureDisabled("kafka"), "Feature disabled: kafka"),
|
||||
(Error::MissingField("bucket_name"), "necessary fields are missing:bucket_name"),
|
||||
(Error::ValidationError("invalid email"), "field verification failed:invalid email"),
|
||||
(Error::ConfigError("missing file".to_string()), "Configuration error: missing file"),
|
||||
];
|
||||
|
||||
for (error, expected_message) in test_cases {
|
||||
assert_eq!(error.to_string(), expected_message);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_memory_efficiency() {
|
||||
// 测试错误类型的内存效率
|
||||
use std::mem;
|
||||
|
||||
let size = mem::size_of::<Error>();
|
||||
// 错误类型应该相对紧凑,考虑到包含多种错误类型,96 字节是合理的
|
||||
assert!(size <= 128, "Error size should be reasonable, got {} bytes", size);
|
||||
|
||||
// 测试 Option<Error>的大小
|
||||
let option_size = mem::size_of::<Option<Error>>();
|
||||
assert!(option_size <= 136, "Option<Error> should be efficient, got {} bytes", option_size);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_thread_safety() {
|
||||
// 测试错误类型的线程安全性
|
||||
fn assert_send<T: Send>() {}
|
||||
fn assert_sync<T: Sync>() {}
|
||||
|
||||
assert_send::<Error>();
|
||||
assert_sync::<Error>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_custom_error_edge_cases() {
|
||||
// 测试自定义错误的边界情况
|
||||
let long_message = "a".repeat(1000);
|
||||
let long_error = Error::custom(&long_message);
|
||||
match long_error {
|
||||
Error::Custom(msg) => assert_eq!(msg.len(), 1000),
|
||||
_ => panic!("Expected Custom error variant"),
|
||||
}
|
||||
|
||||
// 测试包含换行符的消息
|
||||
let multiline_error = Error::custom("line1\nline2\nline3");
|
||||
match multiline_error {
|
||||
Error::Custom(msg) => assert!(msg.contains('\n')),
|
||||
_ => panic!("Expected Custom error variant"),
|
||||
}
|
||||
|
||||
// 测试包含 Unicode 字符的消息
|
||||
let unicode_error = Error::custom("🚀 Unicode test 测试 🎉");
|
||||
match unicode_error {
|
||||
Error::Custom(msg) => assert!(msg.contains('🚀')),
|
||||
_ => panic!("Expected Custom error variant"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_conversion_consistency() {
|
||||
// 测试错误转换的一致性
|
||||
let original_io_error = io::Error::new(io::ErrorKind::TimedOut, "timeout");
|
||||
let error_message = original_io_error.to_string();
|
||||
let converted: Error = original_io_error.into();
|
||||
|
||||
// 验证转换后的错误包含原始错误信息
|
||||
assert!(converted.to_string().contains(&error_message));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_downcast() {
|
||||
// 测试错误的向下转型
|
||||
let io_error = io::Error::other("test error");
|
||||
let converted: Error = io_error.into();
|
||||
|
||||
// 验证可以获取源错误
|
||||
if let Error::Io(ref inner) = converted {
|
||||
assert_eq!(inner.to_string(), "test error");
|
||||
assert_eq!(inner.kind(), io::ErrorKind::Other);
|
||||
} else {
|
||||
panic!("Expected Io error variant");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_chain_depth() {
|
||||
// 测试错误链的深度
|
||||
let root_cause = io::Error::other("root cause");
|
||||
let converted: Error = root_cause.into();
|
||||
|
||||
let mut depth = 0;
|
||||
let mut current_error: &dyn StdError = &converted;
|
||||
|
||||
while let Some(source) = current_error.source() {
|
||||
depth += 1;
|
||||
current_error = source;
|
||||
// 防止无限循环
|
||||
if depth > 10 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
assert!(depth > 0, "Error should have at least one source");
|
||||
assert!(depth <= 3, "Error chain should not be too deep");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_static_str_lifetime() {
|
||||
// 测试静态字符串生命周期
|
||||
fn create_feature_error() -> Error {
|
||||
Error::FeatureDisabled("static_feature")
|
||||
}
|
||||
|
||||
let error = create_feature_error();
|
||||
match error {
|
||||
Error::FeatureDisabled(feature) => assert_eq!(feature, "static_feature"),
|
||||
_ => panic!("Expected FeatureDisabled error variant"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_formatting_consistency() {
|
||||
// 测试错误格式化的一致性
|
||||
let errors = vec![
|
||||
Error::FeatureDisabled("test"),
|
||||
Error::MissingField("field"),
|
||||
Error::ValidationError("validation"),
|
||||
Error::Custom("custom".to_string()),
|
||||
];
|
||||
|
||||
for error in errors {
|
||||
let display_str = error.to_string();
|
||||
let debug_str = format!("{:?}", error);
|
||||
|
||||
// Display 和 Debug 都不应该为空
|
||||
assert!(!display_str.is_empty());
|
||||
assert!(!debug_str.is_empty());
|
||||
|
||||
// Debug 输出通常包含更多信息,但不是绝对的
|
||||
// 这里我们只验证两者都有内容即可
|
||||
assert!(!debug_str.is_empty());
|
||||
assert!(!display_str.is_empty());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,616 +0,0 @@
|
||||
use crate::Error;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_with::{DeserializeFromStr, SerializeDisplay};
|
||||
use smallvec::{SmallVec, smallvec};
|
||||
use std::borrow::Cow;
|
||||
use std::collections::HashMap;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
use strum::{Display, EnumString};
|
||||
use uuid::Uuid;
|
||||
|
||||
/// A struct representing the identity of the user
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct Identity {
|
||||
#[serde(rename = "principalId")]
|
||||
pub principal_id: String,
|
||||
}
|
||||
|
||||
impl Identity {
|
||||
/// Create a new Identity instance
|
||||
pub fn new(principal_id: String) -> Self {
|
||||
Self { principal_id }
|
||||
}
|
||||
|
||||
/// Set the principal ID
|
||||
pub fn set_principal_id(&mut self, principal_id: String) {
|
||||
self.principal_id = principal_id;
|
||||
}
|
||||
}
|
||||
|
||||
/// A struct representing the bucket information
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct Bucket {
|
||||
pub name: String,
|
||||
#[serde(rename = "ownerIdentity")]
|
||||
pub owner_identity: Identity,
|
||||
pub arn: String,
|
||||
}
|
||||
|
||||
impl Bucket {
|
||||
/// Create a new Bucket instance
|
||||
pub fn new(name: String, owner_identity: Identity, arn: String) -> Self {
|
||||
Self {
|
||||
name,
|
||||
owner_identity,
|
||||
arn,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the name of the bucket
|
||||
pub fn set_name(&mut self, name: String) {
|
||||
self.name = name;
|
||||
}
|
||||
|
||||
/// Set the ARN of the bucket
|
||||
pub fn set_arn(&mut self, arn: String) {
|
||||
self.arn = arn;
|
||||
}
|
||||
|
||||
/// Set the owner identity of the bucket
|
||||
pub fn set_owner_identity(&mut self, owner_identity: Identity) {
|
||||
self.owner_identity = owner_identity;
|
||||
}
|
||||
}
|
||||
|
||||
/// A struct representing the object information
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct Object {
|
||||
pub key: String,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none")]
|
||||
pub size: Option<i64>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none", rename = "eTag")]
|
||||
pub etag: Option<String>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none", rename = "contentType")]
|
||||
pub content_type: Option<String>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none", rename = "userMetadata")]
|
||||
pub user_metadata: Option<HashMap<String, String>>,
|
||||
#[serde(default, skip_serializing_if = "Option::is_none", rename = "versionId")]
|
||||
pub version_id: Option<String>,
|
||||
pub sequencer: String,
|
||||
}
|
||||
|
||||
impl Object {
|
||||
/// Create a new Object instance
|
||||
pub fn new(
|
||||
key: String,
|
||||
size: Option<i64>,
|
||||
etag: Option<String>,
|
||||
content_type: Option<String>,
|
||||
user_metadata: Option<HashMap<String, String>>,
|
||||
version_id: Option<String>,
|
||||
sequencer: String,
|
||||
) -> Self {
|
||||
Self {
|
||||
key,
|
||||
size,
|
||||
etag,
|
||||
content_type,
|
||||
user_metadata,
|
||||
version_id,
|
||||
sequencer,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the key
|
||||
pub fn set_key(&mut self, key: String) {
|
||||
self.key = key;
|
||||
}
|
||||
|
||||
/// Set the size
|
||||
pub fn set_size(&mut self, size: Option<i64>) {
|
||||
self.size = size;
|
||||
}
|
||||
|
||||
/// Set the etag
|
||||
pub fn set_etag(&mut self, etag: Option<String>) {
|
||||
self.etag = etag;
|
||||
}
|
||||
|
||||
/// Set the content type
|
||||
pub fn set_content_type(&mut self, content_type: Option<String>) {
|
||||
self.content_type = content_type;
|
||||
}
|
||||
|
||||
/// Set the user metadata
|
||||
pub fn set_user_metadata(&mut self, user_metadata: Option<HashMap<String, String>>) {
|
||||
self.user_metadata = user_metadata;
|
||||
}
|
||||
|
||||
/// Set the version ID
|
||||
pub fn set_version_id(&mut self, version_id: Option<String>) {
|
||||
self.version_id = version_id;
|
||||
}
|
||||
|
||||
/// Set the sequencer
|
||||
pub fn set_sequencer(&mut self, sequencer: String) {
|
||||
self.sequencer = sequencer;
|
||||
}
|
||||
}
|
||||
|
||||
/// A struct representing the metadata of the event
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct Metadata {
|
||||
#[serde(rename = "s3SchemaVersion")]
|
||||
pub schema_version: String,
|
||||
#[serde(rename = "configurationId")]
|
||||
pub configuration_id: String,
|
||||
pub bucket: Bucket,
|
||||
pub object: Object,
|
||||
}
|
||||
|
||||
impl Default for Metadata {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
impl Metadata {
|
||||
/// Create a new Metadata instance with default values
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
schema_version: "1.0".to_string(),
|
||||
configuration_id: "default".to_string(),
|
||||
bucket: Bucket::new(
|
||||
"default".to_string(),
|
||||
Identity::new("default".to_string()),
|
||||
"arn:aws:s3:::default".to_string(),
|
||||
),
|
||||
object: Object::new("default".to_string(), None, None, None, None, None, "default".to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new Metadata instance
|
||||
pub fn create(schema_version: String, configuration_id: String, bucket: Bucket, object: Object) -> Self {
|
||||
Self {
|
||||
schema_version,
|
||||
configuration_id,
|
||||
bucket,
|
||||
object,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the schema version
|
||||
pub fn set_schema_version(&mut self, schema_version: String) {
|
||||
self.schema_version = schema_version;
|
||||
}
|
||||
|
||||
/// Set the configuration ID
|
||||
pub fn set_configuration_id(&mut self, configuration_id: String) {
|
||||
self.configuration_id = configuration_id;
|
||||
}
|
||||
|
||||
/// Set the bucket
|
||||
pub fn set_bucket(&mut self, bucket: Bucket) {
|
||||
self.bucket = bucket;
|
||||
}
|
||||
|
||||
/// Set the object
|
||||
pub fn set_object(&mut self, object: Object) {
|
||||
self.object = object;
|
||||
}
|
||||
}
|
||||
|
||||
/// A struct representing the source of the event
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct Source {
|
||||
pub host: String,
|
||||
pub port: String,
|
||||
#[serde(rename = "userAgent")]
|
||||
pub user_agent: String,
|
||||
}
|
||||
|
||||
impl Source {
|
||||
/// Create a new Source instance
|
||||
pub fn new(host: String, port: String, user_agent: String) -> Self {
|
||||
Self { host, port, user_agent }
|
||||
}
|
||||
|
||||
/// Set the host
|
||||
pub fn set_host(&mut self, host: String) {
|
||||
self.host = host;
|
||||
}
|
||||
|
||||
/// Set the port
|
||||
pub fn set_port(&mut self, port: String) {
|
||||
self.port = port;
|
||||
}
|
||||
|
||||
/// Set the user agent
|
||||
pub fn set_user_agent(&mut self, user_agent: String) {
|
||||
self.user_agent = user_agent;
|
||||
}
|
||||
}
|
||||
|
||||
/// Builder for creating an Event.
|
||||
///
|
||||
/// This struct is used to build an Event object with various parameters.
|
||||
/// It provides methods to set each parameter and a build method to create the Event.
|
||||
#[derive(Default, Clone)]
|
||||
pub struct EventBuilder {
|
||||
event_version: Option<String>,
|
||||
event_source: Option<String>,
|
||||
aws_region: Option<String>,
|
||||
event_time: Option<String>,
|
||||
event_name: Option<Name>,
|
||||
user_identity: Option<Identity>,
|
||||
request_parameters: Option<HashMap<String, String>>,
|
||||
response_elements: Option<HashMap<String, String>>,
|
||||
s3: Option<Metadata>,
|
||||
source: Option<Source>,
|
||||
channels: Option<SmallVec<[String; 2]>>,
|
||||
}
|
||||
|
||||
impl EventBuilder {
|
||||
/// create a builder that pre filled default values
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
event_version: Some(Cow::Borrowed("2.0").to_string()),
|
||||
event_source: Some(Cow::Borrowed("aws:s3").to_string()),
|
||||
aws_region: Some("us-east-1".to_string()),
|
||||
event_time: Some(SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs().to_string()),
|
||||
event_name: None,
|
||||
user_identity: Some(Identity {
|
||||
principal_id: "anonymous".to_string(),
|
||||
}),
|
||||
request_parameters: Some(HashMap::new()),
|
||||
response_elements: Some(HashMap::new()),
|
||||
s3: None,
|
||||
source: None,
|
||||
channels: Some(Vec::new().into()),
|
||||
}
|
||||
}
|
||||
|
||||
/// verify and set the event version
|
||||
pub fn event_version(mut self, event_version: impl Into<String>) -> Self {
|
||||
let event_version = event_version.into();
|
||||
if !event_version.is_empty() {
|
||||
self.event_version = Some(event_version);
|
||||
}
|
||||
self
|
||||
}
|
||||
|
||||
/// verify and set the event source
|
||||
pub fn event_source(mut self, event_source: impl Into<String>) -> Self {
|
||||
let event_source = event_source.into();
|
||||
if !event_source.is_empty() {
|
||||
self.event_source = Some(event_source);
|
||||
}
|
||||
self
|
||||
}
|
||||
|
||||
/// set up aws regions
|
||||
pub fn aws_region(mut self, aws_region: impl Into<String>) -> Self {
|
||||
self.aws_region = Some(aws_region.into());
|
||||
self
|
||||
}
|
||||
|
||||
/// set event time
|
||||
pub fn event_time(mut self, event_time: impl Into<String>) -> Self {
|
||||
self.event_time = Some(event_time.into());
|
||||
self
|
||||
}
|
||||
|
||||
/// set event name
|
||||
pub fn event_name(mut self, event_name: Name) -> Self {
|
||||
self.event_name = Some(event_name);
|
||||
self
|
||||
}
|
||||
|
||||
/// set user identity
|
||||
pub fn user_identity(mut self, user_identity: Identity) -> Self {
|
||||
self.user_identity = Some(user_identity);
|
||||
self
|
||||
}
|
||||
|
||||
/// set request parameters
|
||||
pub fn request_parameters(mut self, request_parameters: HashMap<String, String>) -> Self {
|
||||
self.request_parameters = Some(request_parameters);
|
||||
self
|
||||
}
|
||||
|
||||
/// set response elements
|
||||
pub fn response_elements(mut self, response_elements: HashMap<String, String>) -> Self {
|
||||
self.response_elements = Some(response_elements);
|
||||
self
|
||||
}
|
||||
|
||||
/// setting up s3 metadata
|
||||
pub fn s3(mut self, s3: Metadata) -> Self {
|
||||
self.s3 = Some(s3);
|
||||
self
|
||||
}
|
||||
|
||||
/// set event source information
|
||||
pub fn source(mut self, source: Source) -> Self {
|
||||
self.source = Some(source);
|
||||
self
|
||||
}
|
||||
|
||||
/// set up the sending channel
|
||||
pub fn channels(mut self, channels: Vec<String>) -> Self {
|
||||
self.channels = Some(channels.into());
|
||||
self
|
||||
}
|
||||
|
||||
/// Create a preconfigured builder for common object event scenarios
|
||||
pub fn for_object_creation(s3: Metadata, source: Source) -> Self {
|
||||
Self::new().event_name(Name::ObjectCreatedPut).s3(s3).source(source)
|
||||
}
|
||||
|
||||
/// Create a preconfigured builder for object deletion events
|
||||
pub fn for_object_removal(s3: Metadata, source: Source) -> Self {
|
||||
Self::new().event_name(Name::ObjectRemovedDelete).s3(s3).source(source)
|
||||
}
|
||||
|
||||
/// build event instance
|
||||
///
|
||||
/// Verify the required fields and create a complete Event object
|
||||
pub fn build(self) -> Result<Event, Error> {
|
||||
let event_version = self.event_version.ok_or(Error::MissingField("event_version"))?;
|
||||
|
||||
let event_source = self.event_source.ok_or(Error::MissingField("event_source"))?;
|
||||
|
||||
let aws_region = self.aws_region.ok_or(Error::MissingField("aws_region"))?;
|
||||
|
||||
let event_time = self.event_time.ok_or(Error::MissingField("event_time"))?;
|
||||
|
||||
let event_name = self.event_name.ok_or(Error::MissingField("event_name"))?;
|
||||
|
||||
let user_identity = self.user_identity.ok_or(Error::MissingField("user_identity"))?;
|
||||
|
||||
let request_parameters = self.request_parameters.unwrap_or_default();
|
||||
let response_elements = self.response_elements.unwrap_or_default();
|
||||
|
||||
let s3 = self.s3.ok_or(Error::MissingField("s3"))?;
|
||||
|
||||
let source = self.source.ok_or(Error::MissingField("source"))?;
|
||||
|
||||
let channels = self.channels.unwrap_or_else(|| smallvec![]);
|
||||
|
||||
Ok(Event {
|
||||
event_version,
|
||||
event_source,
|
||||
aws_region,
|
||||
event_time,
|
||||
event_name,
|
||||
user_identity,
|
||||
request_parameters,
|
||||
response_elements,
|
||||
s3,
|
||||
source,
|
||||
id: Uuid::new_v4(),
|
||||
timestamp: SystemTime::now(),
|
||||
channels,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct Event {
|
||||
#[serde(rename = "eventVersion")]
|
||||
pub event_version: String,
|
||||
#[serde(rename = "eventSource")]
|
||||
pub event_source: String,
|
||||
#[serde(rename = "awsRegion")]
|
||||
pub aws_region: String,
|
||||
#[serde(rename = "eventTime")]
|
||||
pub event_time: String,
|
||||
#[serde(rename = "eventName")]
|
||||
pub event_name: Name,
|
||||
#[serde(rename = "userIdentity")]
|
||||
pub user_identity: Identity,
|
||||
#[serde(rename = "requestParameters")]
|
||||
pub request_parameters: HashMap<String, String>,
|
||||
#[serde(rename = "responseElements")]
|
||||
pub response_elements: HashMap<String, String>,
|
||||
pub s3: Metadata,
|
||||
pub source: Source,
|
||||
pub id: Uuid,
|
||||
pub timestamp: SystemTime,
|
||||
pub channels: SmallVec<[String; 2]>,
|
||||
}
|
||||
|
||||
impl Event {
|
||||
/// create a new event builder
|
||||
///
|
||||
/// Returns an EventBuilder instance pre-filled with default values
|
||||
pub fn builder() -> EventBuilder {
|
||||
EventBuilder::new()
|
||||
}
|
||||
|
||||
/// Quickly create Event instances with necessary fields
|
||||
///
|
||||
/// suitable for common s3 event scenarios
|
||||
pub fn create(event_name: Name, s3: Metadata, source: Source, channels: Vec<String>) -> Self {
|
||||
Self::builder()
|
||||
.event_name(event_name)
|
||||
.s3(s3)
|
||||
.source(source)
|
||||
.channels(channels)
|
||||
.build()
|
||||
.expect("Failed to create event, missing necessary parameters")
|
||||
}
|
||||
|
||||
/// a convenient way to create a preconfigured builder
|
||||
pub fn for_object_creation(s3: Metadata, source: Source) -> EventBuilder {
|
||||
EventBuilder::for_object_creation(s3, source)
|
||||
}
|
||||
|
||||
/// a convenient way to create a preconfigured builder
|
||||
pub fn for_object_removal(s3: Metadata, source: Source) -> EventBuilder {
|
||||
EventBuilder::for_object_removal(s3, source)
|
||||
}
|
||||
|
||||
/// Determine whether an event belongs to a specific type
|
||||
pub fn is_type(&self, event_type: Name) -> bool {
|
||||
let mask = event_type.mask();
|
||||
(self.event_name.mask() & mask) != 0
|
||||
}
|
||||
|
||||
/// Determine whether an event needs to be sent to a specific channel
|
||||
pub fn is_for_channel(&self, channel: &str) -> bool {
|
||||
self.channels.iter().any(|c| c == channel)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Debug)]
|
||||
pub struct Log {
|
||||
#[serde(rename = "eventName")]
|
||||
pub event_name: Name,
|
||||
pub key: String,
|
||||
pub records: Vec<Event>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, SerializeDisplay, DeserializeFromStr, Display, EnumString)]
|
||||
#[strum(serialize_all = "SCREAMING_SNAKE_CASE")]
|
||||
pub enum Name {
|
||||
ObjectAccessedGet,
|
||||
ObjectAccessedGetRetention,
|
||||
ObjectAccessedGetLegalHold,
|
||||
ObjectAccessedHead,
|
||||
ObjectAccessedAttributes,
|
||||
ObjectCreatedCompleteMultipartUpload,
|
||||
ObjectCreatedCopy,
|
||||
ObjectCreatedPost,
|
||||
ObjectCreatedPut,
|
||||
ObjectCreatedPutRetention,
|
||||
ObjectCreatedPutLegalHold,
|
||||
ObjectCreatedPutTagging,
|
||||
ObjectCreatedDeleteTagging,
|
||||
ObjectRemovedDelete,
|
||||
ObjectRemovedDeleteMarkerCreated,
|
||||
ObjectRemovedDeleteAllVersions,
|
||||
ObjectRemovedNoOp,
|
||||
BucketCreated,
|
||||
BucketRemoved,
|
||||
ObjectReplicationFailed,
|
||||
ObjectReplicationComplete,
|
||||
ObjectReplicationMissedThreshold,
|
||||
ObjectReplicationReplicatedAfterThreshold,
|
||||
ObjectReplicationNotTracked,
|
||||
ObjectRestorePost,
|
||||
ObjectRestoreCompleted,
|
||||
ObjectTransitionFailed,
|
||||
ObjectTransitionComplete,
|
||||
ObjectManyVersions,
|
||||
ObjectLargeVersions,
|
||||
PrefixManyFolders,
|
||||
IlmDelMarkerExpirationDelete,
|
||||
ObjectAccessedAll,
|
||||
ObjectCreatedAll,
|
||||
ObjectRemovedAll,
|
||||
ObjectReplicationAll,
|
||||
ObjectRestoreAll,
|
||||
ObjectTransitionAll,
|
||||
ObjectScannerAll,
|
||||
Everything,
|
||||
}
|
||||
|
||||
impl Name {
|
||||
pub fn expand(&self) -> Vec<Name> {
|
||||
match self {
|
||||
Name::ObjectAccessedAll => vec![
|
||||
Name::ObjectAccessedGet,
|
||||
Name::ObjectAccessedHead,
|
||||
Name::ObjectAccessedGetRetention,
|
||||
Name::ObjectAccessedGetLegalHold,
|
||||
Name::ObjectAccessedAttributes,
|
||||
],
|
||||
Name::ObjectCreatedAll => vec![
|
||||
Name::ObjectCreatedCompleteMultipartUpload,
|
||||
Name::ObjectCreatedCopy,
|
||||
Name::ObjectCreatedPost,
|
||||
Name::ObjectCreatedPut,
|
||||
Name::ObjectCreatedPutRetention,
|
||||
Name::ObjectCreatedPutLegalHold,
|
||||
Name::ObjectCreatedPutTagging,
|
||||
Name::ObjectCreatedDeleteTagging,
|
||||
],
|
||||
Name::ObjectRemovedAll => vec![
|
||||
Name::ObjectRemovedDelete,
|
||||
Name::ObjectRemovedDeleteMarkerCreated,
|
||||
Name::ObjectRemovedNoOp,
|
||||
Name::ObjectRemovedDeleteAllVersions,
|
||||
],
|
||||
Name::ObjectReplicationAll => vec![
|
||||
Name::ObjectReplicationFailed,
|
||||
Name::ObjectReplicationComplete,
|
||||
Name::ObjectReplicationNotTracked,
|
||||
Name::ObjectReplicationMissedThreshold,
|
||||
Name::ObjectReplicationReplicatedAfterThreshold,
|
||||
],
|
||||
Name::ObjectRestoreAll => vec![Name::ObjectRestorePost, Name::ObjectRestoreCompleted],
|
||||
Name::ObjectTransitionAll => {
|
||||
vec![Name::ObjectTransitionFailed, Name::ObjectTransitionComplete]
|
||||
}
|
||||
Name::ObjectScannerAll => vec![Name::ObjectManyVersions, Name::ObjectLargeVersions, Name::PrefixManyFolders],
|
||||
Name::Everything => (1..=Name::IlmDelMarkerExpirationDelete as u32)
|
||||
.map(|i| Name::from_repr(i).unwrap())
|
||||
.collect(),
|
||||
_ => vec![*self],
|
||||
}
|
||||
}
|
||||
|
||||
pub fn mask(&self) -> u64 {
|
||||
if (*self as u32) < Name::ObjectAccessedAll as u32 {
|
||||
1 << (*self as u32 - 1)
|
||||
} else {
|
||||
self.expand().iter().fold(0, |acc, n| acc | (1 << (*n as u32 - 1)))
|
||||
}
|
||||
}
|
||||
|
||||
fn from_repr(discriminant: u32) -> Option<Self> {
|
||||
match discriminant {
|
||||
1 => Some(Name::ObjectAccessedGet),
|
||||
2 => Some(Name::ObjectAccessedGetRetention),
|
||||
3 => Some(Name::ObjectAccessedGetLegalHold),
|
||||
4 => Some(Name::ObjectAccessedHead),
|
||||
5 => Some(Name::ObjectAccessedAttributes),
|
||||
6 => Some(Name::ObjectCreatedCompleteMultipartUpload),
|
||||
7 => Some(Name::ObjectCreatedCopy),
|
||||
8 => Some(Name::ObjectCreatedPost),
|
||||
9 => Some(Name::ObjectCreatedPut),
|
||||
10 => Some(Name::ObjectCreatedPutRetention),
|
||||
11 => Some(Name::ObjectCreatedPutLegalHold),
|
||||
12 => Some(Name::ObjectCreatedPutTagging),
|
||||
13 => Some(Name::ObjectCreatedDeleteTagging),
|
||||
14 => Some(Name::ObjectRemovedDelete),
|
||||
15 => Some(Name::ObjectRemovedDeleteMarkerCreated),
|
||||
16 => Some(Name::ObjectRemovedDeleteAllVersions),
|
||||
17 => Some(Name::ObjectRemovedNoOp),
|
||||
18 => Some(Name::BucketCreated),
|
||||
19 => Some(Name::BucketRemoved),
|
||||
20 => Some(Name::ObjectReplicationFailed),
|
||||
21 => Some(Name::ObjectReplicationComplete),
|
||||
22 => Some(Name::ObjectReplicationMissedThreshold),
|
||||
23 => Some(Name::ObjectReplicationReplicatedAfterThreshold),
|
||||
24 => Some(Name::ObjectReplicationNotTracked),
|
||||
25 => Some(Name::ObjectRestorePost),
|
||||
26 => Some(Name::ObjectRestoreCompleted),
|
||||
27 => Some(Name::ObjectTransitionFailed),
|
||||
28 => Some(Name::ObjectTransitionComplete),
|
||||
29 => Some(Name::ObjectManyVersions),
|
||||
30 => Some(Name::ObjectLargeVersions),
|
||||
31 => Some(Name::PrefixManyFolders),
|
||||
32 => Some(Name::IlmDelMarkerExpirationDelete),
|
||||
33 => Some(Name::ObjectAccessedAll),
|
||||
34 => Some(Name::ObjectCreatedAll),
|
||||
35 => Some(Name::ObjectRemovedAll),
|
||||
36 => Some(Name::ObjectReplicationAll),
|
||||
37 => Some(Name::ObjectRestoreAll),
|
||||
38 => Some(Name::ObjectTransitionAll),
|
||||
39 => Some(Name::ObjectScannerAll),
|
||||
40 => Some(Name::Everything),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,234 +0,0 @@
|
||||
use crate::{Error, Event, NotifierConfig, NotifierSystem, create_adapters};
|
||||
use std::sync::{Arc, atomic};
|
||||
use tokio::sync::{Mutex, OnceCell};
|
||||
use tracing::instrument;
|
||||
|
||||
static GLOBAL_SYSTEM: OnceCell<Arc<Mutex<NotifierSystem>>> = OnceCell::const_new();
|
||||
static INITIALIZED: atomic::AtomicBool = atomic::AtomicBool::new(false);
|
||||
static READY: atomic::AtomicBool = atomic::AtomicBool::new(false);
|
||||
static INIT_LOCK: Mutex<()> = Mutex::const_new(());
|
||||
|
||||
/// Initializes the global notification system.
|
||||
///
|
||||
/// This function performs the following steps:
|
||||
/// 1. Checks if the system is already initialized.
|
||||
/// 2. Creates a new `NotificationSystem` instance.
|
||||
/// 3. Creates adapters based on the provided configuration.
|
||||
/// 4. Starts the notification system with the created adapters.
|
||||
/// 5. Sets the global system instance.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if:
|
||||
/// - The system is already initialized.
|
||||
/// - Creating the `NotificationSystem` fails.
|
||||
/// - Creating adapters fails.
|
||||
/// - Starting the notification system fails.
|
||||
/// - Setting the global system instance fails.
|
||||
pub async fn initialize(config: NotifierConfig) -> Result<(), Error> {
|
||||
let _lock = INIT_LOCK.lock().await;
|
||||
|
||||
// Check if the system is already initialized.
|
||||
if INITIALIZED.load(atomic::Ordering::SeqCst) {
|
||||
return Err(Error::custom("Notification system has already been initialized"));
|
||||
}
|
||||
|
||||
// Check if the system is already ready.
|
||||
if READY.load(atomic::Ordering::SeqCst) {
|
||||
return Err(Error::custom("Notification system is already ready"));
|
||||
}
|
||||
|
||||
// Check if the system is shutting down.
|
||||
if let Some(system) = GLOBAL_SYSTEM.get() {
|
||||
let system_guard = system.lock().await;
|
||||
if system_guard.shutdown_cancelled() {
|
||||
return Err(Error::custom("Notification system is shutting down"));
|
||||
}
|
||||
}
|
||||
|
||||
// check if config adapters len is than 0
|
||||
if config.adapters.is_empty() {
|
||||
return Err(Error::custom("No adapters configured"));
|
||||
}
|
||||
|
||||
// Attempt to initialize, and reset the INITIALIZED flag if it fails.
|
||||
let result: Result<(), Error> = async {
|
||||
let system = NotifierSystem::new(config.clone()).await.map_err(|e| {
|
||||
tracing::error!("Failed to create NotificationSystem: {:?}", e);
|
||||
e
|
||||
})?;
|
||||
let adapters = create_adapters(&config.adapters).map_err(|e| {
|
||||
tracing::error!("Failed to create adapters: {:?}", e);
|
||||
e
|
||||
})?;
|
||||
tracing::info!("adapters len:{:?}", adapters.len());
|
||||
let system_clone = Arc::new(Mutex::new(system));
|
||||
let adapters_clone = adapters.clone();
|
||||
|
||||
GLOBAL_SYSTEM.set(system_clone.clone()).map_err(|_| {
|
||||
let err = Error::custom("Unable to set up global notification system");
|
||||
tracing::error!("{:?}", err);
|
||||
err
|
||||
})?;
|
||||
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = system_clone.lock().await.start(adapters_clone).await {
|
||||
tracing::error!("Notification system failed to start: {}", e);
|
||||
}
|
||||
tracing::info!("Notification system started in background");
|
||||
});
|
||||
tracing::info!("system start success,start set READY value");
|
||||
|
||||
READY.store(true, atomic::Ordering::SeqCst);
|
||||
tracing::info!("Notification system is ready to process events");
|
||||
|
||||
Ok(())
|
||||
}
|
||||
.await;
|
||||
|
||||
if result.is_err() {
|
||||
INITIALIZED.store(false, atomic::Ordering::SeqCst);
|
||||
READY.store(false, atomic::Ordering::SeqCst);
|
||||
return result;
|
||||
}
|
||||
|
||||
INITIALIZED.store(true, atomic::Ordering::SeqCst);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Checks if the notification system is initialized.
|
||||
pub fn is_initialized() -> bool {
|
||||
INITIALIZED.load(atomic::Ordering::SeqCst)
|
||||
}
|
||||
|
||||
/// Checks if the notification system is ready.
|
||||
pub fn is_ready() -> bool {
|
||||
READY.load(atomic::Ordering::SeqCst)
|
||||
}
|
||||
|
||||
/// Sends an event to the notification system.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if:
|
||||
/// - The system is not initialized.
|
||||
/// - The system is not ready.
|
||||
/// - Sending the event fails.
|
||||
#[instrument(fields(event))]
|
||||
pub async fn send_event(event: Event) -> Result<(), Error> {
|
||||
if !READY.load(atomic::Ordering::SeqCst) {
|
||||
return Err(Error::custom("Notification system not ready, please wait for initialization to complete"));
|
||||
}
|
||||
|
||||
let system = get_system().await?;
|
||||
let system_guard = system.lock().await;
|
||||
system_guard.send_event(event).await
|
||||
}
|
||||
|
||||
/// Shuts down the notification system.
|
||||
#[instrument]
|
||||
pub async fn shutdown() -> Result<(), Error> {
|
||||
if let Some(system) = GLOBAL_SYSTEM.get() {
|
||||
tracing::info!("Shutting down notification system start");
|
||||
let result = {
|
||||
let mut system_guard = system.lock().await;
|
||||
system_guard.shutdown().await
|
||||
};
|
||||
if let Err(e) = &result {
|
||||
tracing::error!("Notification system shutdown failed: {}", e);
|
||||
} else {
|
||||
tracing::info!("Event bus shutdown completed");
|
||||
}
|
||||
|
||||
tracing::info!(
|
||||
"Shutdown method called set static value start, READY: {}, INITIALIZED: {}",
|
||||
READY.load(atomic::Ordering::SeqCst),
|
||||
INITIALIZED.load(atomic::Ordering::SeqCst)
|
||||
);
|
||||
READY.store(false, atomic::Ordering::SeqCst);
|
||||
INITIALIZED.store(false, atomic::Ordering::SeqCst);
|
||||
tracing::info!(
|
||||
"Shutdown method called set static value end, READY: {}, INITIALIZED: {}",
|
||||
READY.load(atomic::Ordering::SeqCst),
|
||||
INITIALIZED.load(atomic::Ordering::SeqCst)
|
||||
);
|
||||
result
|
||||
} else {
|
||||
Err(Error::custom("Notification system not initialized"))
|
||||
}
|
||||
}
|
||||
|
||||
/// Retrieves the global notification system instance.
|
||||
///
|
||||
/// # Errors
|
||||
///
|
||||
/// Returns an error if the system is not initialized.
|
||||
async fn get_system() -> Result<Arc<Mutex<NotifierSystem>>, Error> {
|
||||
GLOBAL_SYSTEM
|
||||
.get()
|
||||
.cloned()
|
||||
.ok_or_else(|| Error::custom("Notification system not initialized"))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::NotifierConfig;
|
||||
|
||||
fn init_tracing() {
|
||||
// Use try_init to avoid panic if already initialized
|
||||
let _ = tracing_subscriber::fmt::try_init();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_initialize_success() {
|
||||
init_tracing();
|
||||
let config = NotifierConfig::default(); // assume there is a default configuration
|
||||
let result = initialize(config).await;
|
||||
assert!(result.is_err(), "Initialization should not succeed");
|
||||
assert!(!is_initialized(), "System should not be marked as initialized");
|
||||
assert!(!is_ready(), "System should not be marked as ready");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_initialize_twice() {
|
||||
init_tracing();
|
||||
let config = NotifierConfig::default();
|
||||
let _ = initialize(config.clone()).await; // first initialization
|
||||
let result = initialize(config).await; // second initialization
|
||||
assert!(result.is_err(), "Initialization should succeed");
|
||||
assert!(result.is_err(), "Re-initialization should fail");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_initialize_failure_resets_state() {
|
||||
init_tracing();
|
||||
// Test with empty adapters to force failure
|
||||
let config = NotifierConfig {
|
||||
adapters: Vec::new(),
|
||||
..Default::default()
|
||||
};
|
||||
let result = initialize(config).await;
|
||||
assert!(result.is_err(), "Initialization should fail with empty adapters");
|
||||
assert!(!is_initialized(), "System should not be marked as initialized after failure");
|
||||
assert!(!is_ready(), "System should not be marked as ready after failure");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_is_initialized_and_is_ready() {
|
||||
init_tracing();
|
||||
// Initially, the system should not be initialized or ready
|
||||
assert!(!is_initialized(), "System should not be initialized initially");
|
||||
assert!(!is_ready(), "System should not be ready initially");
|
||||
|
||||
// Test with empty adapters to ensure failure
|
||||
let config = NotifierConfig {
|
||||
adapters: Vec::new(),
|
||||
..Default::default()
|
||||
};
|
||||
let result = initialize(config).await;
|
||||
assert!(result.is_err(), "Initialization should fail with empty adapters");
|
||||
assert!(!is_initialized(), "System should not be initialized after failed init");
|
||||
assert!(!is_ready(), "System should not be ready after failed init");
|
||||
}
|
||||
}
|
||||
@@ -1,31 +0,0 @@
|
||||
mod adapter;
|
||||
mod bus;
|
||||
mod config;
|
||||
mod error;
|
||||
mod event;
|
||||
mod global;
|
||||
mod notifier;
|
||||
mod store;
|
||||
|
||||
pub use adapter::ChannelAdapter;
|
||||
pub use adapter::create_adapters;
|
||||
#[cfg(all(feature = "kafka", target_os = "linux"))]
|
||||
pub use adapter::kafka::KafkaAdapter;
|
||||
#[cfg(feature = "mqtt")]
|
||||
pub use adapter::mqtt::MqttAdapter;
|
||||
#[cfg(feature = "webhook")]
|
||||
pub use adapter::webhook::WebhookAdapter;
|
||||
pub use bus::event_bus;
|
||||
#[cfg(all(feature = "kafka", target_os = "linux"))]
|
||||
pub use config::KafkaConfig;
|
||||
#[cfg(feature = "mqtt")]
|
||||
pub use config::MqttConfig;
|
||||
#[cfg(feature = "webhook")]
|
||||
pub use config::WebhookConfig;
|
||||
pub use config::{AdapterConfig, NotifierConfig};
|
||||
pub use error::Error;
|
||||
|
||||
pub use event::{Bucket, Event, EventBuilder, Identity, Log, Metadata, Name, Object, Source};
|
||||
pub use global::{initialize, is_initialized, is_ready, send_event, shutdown};
|
||||
pub use notifier::NotifierSystem;
|
||||
pub use store::EventStore;
|
||||
@@ -1,136 +0,0 @@
|
||||
use crate::{ChannelAdapter, Error, Event, EventStore, NotifierConfig, event_bus};
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::mpsc;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::instrument;
|
||||
|
||||
/// The `NotificationSystem` struct represents the notification system.
|
||||
/// It manages the event bus and the adapters.
|
||||
/// It is responsible for sending and receiving events.
|
||||
/// It also handles the shutdown process.
|
||||
pub struct NotifierSystem {
|
||||
tx: mpsc::Sender<Event>,
|
||||
rx: Option<mpsc::Receiver<Event>>,
|
||||
store: Arc<EventStore>,
|
||||
shutdown: CancellationToken,
|
||||
shutdown_complete: Option<tokio::sync::oneshot::Sender<()>>,
|
||||
shutdown_receiver: Option<tokio::sync::oneshot::Receiver<()>>,
|
||||
}
|
||||
|
||||
impl NotifierSystem {
|
||||
/// Creates a new `NotificationSystem` instance.
|
||||
#[instrument(skip(config))]
|
||||
pub async fn new(config: NotifierConfig) -> Result<Self, Error> {
|
||||
let (tx, rx) = mpsc::channel::<Event>(config.channel_capacity);
|
||||
let store = Arc::new(EventStore::new(&config.store_path).await?);
|
||||
let shutdown = CancellationToken::new();
|
||||
|
||||
let restored_logs = store.load_logs().await?;
|
||||
for log in restored_logs {
|
||||
for event in log.records {
|
||||
// For example, where the send method may return a SendError when calling it
|
||||
tx.send(event).await.map_err(|e| Error::ChannelSend(Box::new(e)))?;
|
||||
}
|
||||
}
|
||||
// Initialize shutdown_complete to Some(tx)
|
||||
let (complete_tx, complete_rx) = tokio::sync::oneshot::channel();
|
||||
Ok(Self {
|
||||
tx,
|
||||
rx: Some(rx),
|
||||
store,
|
||||
shutdown,
|
||||
shutdown_complete: Some(complete_tx),
|
||||
shutdown_receiver: Some(complete_rx),
|
||||
})
|
||||
}
|
||||
|
||||
/// Starts the notification system.
|
||||
/// It initializes the event bus and the producer.
|
||||
#[instrument(skip_all)]
|
||||
pub async fn start(&mut self, adapters: Vec<Arc<dyn ChannelAdapter>>) -> Result<(), Error> {
|
||||
if self.shutdown.is_cancelled() {
|
||||
let error = Error::custom("System is shutting down");
|
||||
self.handle_error("start", &error);
|
||||
return Err(error);
|
||||
}
|
||||
self.log(tracing::Level::INFO, "start", "Starting the notification system");
|
||||
let rx = self.rx.take().ok_or_else(|| Error::EventBusStarted)?;
|
||||
let shutdown_clone = self.shutdown.clone();
|
||||
let store_clone = self.store.clone();
|
||||
let shutdown_complete = self.shutdown_complete.take();
|
||||
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = event_bus(rx, adapters, store_clone, shutdown_clone, shutdown_complete).await {
|
||||
tracing::error!("Event bus failed: {}", e);
|
||||
}
|
||||
});
|
||||
self.log(tracing::Level::INFO, "start", "Notification system started successfully");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Sends an event to the notification system.
|
||||
/// This method is used to send events to the event bus.
|
||||
#[instrument(skip(self))]
|
||||
pub async fn send_event(&self, event: Event) -> Result<(), Error> {
|
||||
self.log(tracing::Level::DEBUG, "send_event", &format!("Sending event: {:?}", event));
|
||||
if self.shutdown.is_cancelled() {
|
||||
let error = Error::custom("System is shutting down");
|
||||
self.handle_error("send_event", &error);
|
||||
return Err(error);
|
||||
}
|
||||
if let Err(e) = self.tx.send(event).await {
|
||||
let error = Error::ChannelSend(Box::new(e));
|
||||
self.handle_error("send_event", &error);
|
||||
return Err(error);
|
||||
}
|
||||
self.log(tracing::Level::INFO, "send_event", "Event sent successfully");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Shuts down the notification system.
|
||||
/// This method is used to cancel the event bus and producer tasks.
|
||||
#[instrument(skip(self))]
|
||||
pub async fn shutdown(&mut self) -> Result<(), Error> {
|
||||
tracing::info!("Shutting down the notification system");
|
||||
self.shutdown.cancel();
|
||||
// wait for the event bus to be completely closed
|
||||
if let Some(receiver) = self.shutdown_receiver.take() {
|
||||
match receiver.await {
|
||||
Ok(_) => {
|
||||
tracing::info!("Event bus shutdown completed successfully");
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
let error = Error::custom(format!("Failed to receive shutdown completion: {}", e).as_str());
|
||||
self.handle_error("shutdown", &error);
|
||||
Err(error)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
tracing::warn!("Shutdown receiver not available, the event bus might still be running");
|
||||
Err(Error::custom("Shutdown receiver not available"))
|
||||
}
|
||||
}
|
||||
|
||||
/// shutdown state
|
||||
pub fn shutdown_cancelled(&self) -> bool {
|
||||
self.shutdown.is_cancelled()
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
pub fn handle_error(&self, context: &str, error: &Error) {
|
||||
self.log(tracing::Level::ERROR, context, &format!("{:?}", error));
|
||||
// TODO Can be extended to record to files or send to monitoring systems
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
fn log(&self, level: tracing::Level, context: &str, message: &str) {
|
||||
match level {
|
||||
tracing::Level::ERROR => tracing::error!("[{}] {}", context, message),
|
||||
tracing::Level::WARN => tracing::warn!("[{}] {}", context, message),
|
||||
tracing::Level::INFO => tracing::info!("[{}] {}", context, message),
|
||||
tracing::Level::DEBUG => tracing::debug!("[{}] {}", context, message),
|
||||
tracing::Level::TRACE => tracing::trace!("[{}] {}", context, message),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,60 +0,0 @@
|
||||
use crate::Error;
|
||||
use crate::Log;
|
||||
use std::sync::Arc;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
use tokio::fs::{File, OpenOptions, create_dir_all};
|
||||
use tokio::io::{AsyncBufReadExt, AsyncWriteExt, BufReader, BufWriter};
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::instrument;
|
||||
|
||||
/// `EventStore` is a struct that manages the storage of event logs.
|
||||
pub struct EventStore {
|
||||
path: String,
|
||||
lock: Arc<RwLock<()>>,
|
||||
}
|
||||
|
||||
impl EventStore {
|
||||
pub async fn new(path: &str) -> Result<Self, Error> {
|
||||
create_dir_all(path).await?;
|
||||
Ok(Self {
|
||||
path: path.to_string(),
|
||||
lock: Arc::new(RwLock::new(())),
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(skip(self))]
|
||||
pub async fn save_logs(&self, logs: &[Log]) -> Result<(), Error> {
|
||||
let _guard = self.lock.write().await;
|
||||
let file_path = format!(
|
||||
"{}/events_{}.jsonl",
|
||||
self.path,
|
||||
SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs()
|
||||
);
|
||||
let file = OpenOptions::new().create(true).append(true).open(&file_path).await?;
|
||||
let mut writer = BufWriter::new(file);
|
||||
for log in logs {
|
||||
let line = serde_json::to_string(log)?;
|
||||
writer.write_all(line.as_bytes()).await?;
|
||||
writer.write_all(b"\n").await?;
|
||||
}
|
||||
writer.flush().await?;
|
||||
tracing::info!("Saved logs to {} end", file_path);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn load_logs(&self) -> Result<Vec<Log>, Error> {
|
||||
let _guard = self.lock.read().await;
|
||||
let mut logs = Vec::new();
|
||||
let mut entries = tokio::fs::read_dir(&self.path).await?;
|
||||
while let Some(entry) = entries.next_entry().await? {
|
||||
let file = File::open(entry.path()).await?;
|
||||
let reader = BufReader::new(file);
|
||||
let mut lines = reader.lines();
|
||||
while let Some(line) = lines.next_line().await? {
|
||||
let log: Log = serde_json::from_str(&line)?;
|
||||
logs.push(log);
|
||||
}
|
||||
}
|
||||
Ok(logs)
|
||||
}
|
||||
}
|
||||
@@ -1,159 +0,0 @@
|
||||
use rustfs_event_notifier::{AdapterConfig, NotifierSystem, WebhookConfig};
|
||||
use rustfs_event_notifier::{Bucket, Event, EventBuilder, Identity, Metadata, Name, Object, Source};
|
||||
use rustfs_event_notifier::{ChannelAdapter, WebhookAdapter};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_webhook_adapter() {
|
||||
let adapter = WebhookAdapter::new(WebhookConfig {
|
||||
endpoint: "http://localhost:8080/webhook".to_string(),
|
||||
auth_token: None,
|
||||
custom_headers: None,
|
||||
max_retries: 1,
|
||||
timeout: 5,
|
||||
});
|
||||
|
||||
// create an s3 metadata object
|
||||
let metadata = Metadata {
|
||||
schema_version: "1.0".to_string(),
|
||||
configuration_id: "test-config".to_string(),
|
||||
bucket: Bucket {
|
||||
name: "my-bucket".to_string(),
|
||||
owner_identity: Identity {
|
||||
principal_id: "owner123".to_string(),
|
||||
},
|
||||
arn: "arn:aws:s3:::my-bucket".to_string(),
|
||||
},
|
||||
object: Object {
|
||||
key: "test.txt".to_string(),
|
||||
size: Some(1024),
|
||||
etag: Some("abc123".to_string()),
|
||||
content_type: Some("text/plain".to_string()),
|
||||
user_metadata: None,
|
||||
version_id: None,
|
||||
sequencer: "1234567890".to_string(),
|
||||
},
|
||||
};
|
||||
|
||||
// create source object
|
||||
let source = Source {
|
||||
host: "localhost".to_string(),
|
||||
port: "80".to_string(),
|
||||
user_agent: "curl/7.68.0".to_string(),
|
||||
};
|
||||
|
||||
// Create events using builder mode
|
||||
let event = Event::builder()
|
||||
.event_version("2.0")
|
||||
.event_source("aws:s3")
|
||||
.aws_region("us-east-1")
|
||||
.event_time("2023-10-01T12:00:00.000Z")
|
||||
.event_name(Name::ObjectCreatedPut)
|
||||
.user_identity(Identity {
|
||||
principal_id: "user123".to_string(),
|
||||
})
|
||||
.request_parameters(HashMap::new())
|
||||
.response_elements(HashMap::new())
|
||||
.s3(metadata)
|
||||
.source(source)
|
||||
.channels(vec!["webhook".to_string()])
|
||||
.build()
|
||||
.expect("failed to create event");
|
||||
|
||||
let result = adapter.send(&event).await;
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_notification_system() {
|
||||
let config = rustfs_event_notifier::NotifierConfig {
|
||||
store_path: "./test_events".to_string(),
|
||||
channel_capacity: 100,
|
||||
adapters: vec![AdapterConfig::Webhook(WebhookConfig {
|
||||
endpoint: "http://localhost:8080/webhook".to_string(),
|
||||
auth_token: None,
|
||||
custom_headers: None,
|
||||
max_retries: 1,
|
||||
timeout: 5,
|
||||
})],
|
||||
};
|
||||
let system = Arc::new(tokio::sync::Mutex::new(NotifierSystem::new(config.clone()).await.unwrap()));
|
||||
let adapters: Vec<Arc<dyn ChannelAdapter>> = vec![Arc::new(WebhookAdapter::new(WebhookConfig {
|
||||
endpoint: "http://localhost:8080/webhook".to_string(),
|
||||
auth_token: None,
|
||||
custom_headers: None,
|
||||
max_retries: 1,
|
||||
timeout: 5,
|
||||
}))];
|
||||
|
||||
// create an s3 metadata object
|
||||
let metadata = Metadata {
|
||||
schema_version: "1.0".to_string(),
|
||||
configuration_id: "test-config".to_string(),
|
||||
bucket: Bucket {
|
||||
name: "my-bucket".to_string(),
|
||||
owner_identity: Identity {
|
||||
principal_id: "owner123".to_string(),
|
||||
},
|
||||
arn: "arn:aws:s3:::my-bucket".to_string(),
|
||||
},
|
||||
object: Object {
|
||||
key: "test.txt".to_string(),
|
||||
size: Some(1024),
|
||||
etag: Some("abc123".to_string()),
|
||||
content_type: Some("text/plain".to_string()),
|
||||
user_metadata: None,
|
||||
version_id: None,
|
||||
sequencer: "1234567890".to_string(),
|
||||
},
|
||||
};
|
||||
|
||||
// create source object
|
||||
let source = Source {
|
||||
host: "localhost".to_string(),
|
||||
port: "80".to_string(),
|
||||
user_agent: "curl/7.68.0".to_string(),
|
||||
};
|
||||
|
||||
// create a preconfigured builder with objects
|
||||
let event = EventBuilder::for_object_creation(metadata, source)
|
||||
.user_identity(Identity {
|
||||
principal_id: "user123".to_string(),
|
||||
})
|
||||
.event_time("2023-10-01T12:00:00.000Z")
|
||||
.channels(vec!["webhook".to_string()])
|
||||
.build()
|
||||
.expect("failed to create event");
|
||||
|
||||
{
|
||||
let system_lock = system.lock().await;
|
||||
system_lock.send_event(event).await.unwrap();
|
||||
}
|
||||
|
||||
let system_clone = Arc::clone(&system);
|
||||
let system_handle = tokio::spawn(async move {
|
||||
let mut system = system_clone.lock().await;
|
||||
system.start(adapters).await
|
||||
});
|
||||
|
||||
// set 10 seconds timeout
|
||||
match tokio::time::timeout(std::time::Duration::from_secs(10), system_handle).await {
|
||||
Ok(result) => {
|
||||
println!("System started successfully");
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
Err(_) => {
|
||||
println!("System operation timed out, forcing shutdown");
|
||||
// create a new task to handle the timeout
|
||||
let system = Arc::clone(&system);
|
||||
tokio::spawn(async move {
|
||||
if let Ok(mut guard) = system.try_lock() {
|
||||
guard.shutdown().await.unwrap();
|
||||
}
|
||||
});
|
||||
// give the system some time to clean up resources
|
||||
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -15,7 +15,7 @@ time.workspace = true
|
||||
uuid = { workspace = true, features = ["v4", "fast-rng", "serde"] }
|
||||
tokio = { workspace = true, features = ["io-util", "macros", "sync"] }
|
||||
xxhash-rust = { version = "0.8.15", features = ["xxh64"] }
|
||||
|
||||
bytes.workspace = true
|
||||
rustfs-utils = {workspace = true, features= ["hash"]}
|
||||
byteorder = "1.5.0"
|
||||
tracing.workspace = true
|
||||
|
||||
@@ -111,7 +111,20 @@ impl Clone for Error {
|
||||
|
||||
impl From<std::io::Error> for Error {
|
||||
fn from(e: std::io::Error) -> Self {
|
||||
Error::Io(e)
|
||||
match e.kind() {
|
||||
std::io::ErrorKind::UnexpectedEof => Error::Unexpected,
|
||||
_ => Error::Io(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Error> for std::io::Error {
|
||||
fn from(e: Error) -> Self {
|
||||
match e {
|
||||
Error::Unexpected => std::io::Error::new(std::io::ErrorKind::UnexpectedEof, "Unexpected EOF"),
|
||||
Error::Io(e) => e,
|
||||
_ => std::io::Error::other(e.to_string()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -414,6 +427,9 @@ mod tests {
|
||||
let filemeta_error: Error = io_error.into();
|
||||
|
||||
match filemeta_error {
|
||||
Error::Unexpected => {
|
||||
assert_eq!(kind, ErrorKind::UnexpectedEof);
|
||||
}
|
||||
Error::Io(extracted_io_error) => {
|
||||
assert_eq!(extracted_io_error.kind(), kind);
|
||||
assert!(extracted_io_error.to_string().contains("test error"));
|
||||
|
||||
@@ -1,5 +1,7 @@
|
||||
use crate::error::{Error, Result};
|
||||
use crate::headers::RESERVED_METADATA_PREFIX_LOWER;
|
||||
use crate::headers::RUSTFS_HEALING;
|
||||
use bytes::Bytes;
|
||||
use rmp_serde::Serializer;
|
||||
use rustfs_utils::HashAlgorithm;
|
||||
use serde::Deserialize;
|
||||
@@ -8,9 +10,6 @@ use std::collections::HashMap;
|
||||
use time::OffsetDateTime;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::headers::RESERVED_METADATA_PREFIX;
|
||||
use crate::headers::RUSTFS_HEALING;
|
||||
|
||||
pub const ERASURE_ALGORITHM: &str = "rs-vandermonde";
|
||||
pub const BLOCK_SIZE_V2: usize = 1024 * 1024; // 1M
|
||||
|
||||
@@ -27,10 +26,10 @@ pub struct ObjectPartInfo {
|
||||
pub etag: String,
|
||||
pub number: usize,
|
||||
pub size: usize,
|
||||
pub actual_size: usize, // Original data size
|
||||
pub actual_size: i64, // Original data size
|
||||
pub mod_time: Option<OffsetDateTime>,
|
||||
// Index holds the index of the part in the erasure coding
|
||||
pub index: Option<Vec<u8>>,
|
||||
pub index: Option<Bytes>,
|
||||
// Checksums holds checksums of the part
|
||||
pub checksums: Option<HashMap<String, String>>,
|
||||
}
|
||||
@@ -40,7 +39,7 @@ pub struct ObjectPartInfo {
|
||||
pub struct ChecksumInfo {
|
||||
pub part_number: usize,
|
||||
pub algorithm: HashAlgorithm,
|
||||
pub hash: Vec<u8>,
|
||||
pub hash: Bytes,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Eq, PartialOrd, Default, Clone)]
|
||||
@@ -121,15 +120,21 @@ impl ErasureInfo {
|
||||
}
|
||||
/// Calculate the total erasure file size for a given original size.
|
||||
// Returns the final erasure size from the original size
|
||||
pub fn shard_file_size(&self, total_length: usize) -> usize {
|
||||
pub fn shard_file_size(&self, total_length: i64) -> i64 {
|
||||
if total_length == 0 {
|
||||
return 0;
|
||||
}
|
||||
|
||||
if total_length < 0 {
|
||||
return total_length;
|
||||
}
|
||||
|
||||
let total_length = total_length as usize;
|
||||
|
||||
let num_shards = total_length / self.block_size;
|
||||
let last_block_size = total_length % self.block_size;
|
||||
let last_shard_size = calc_shard_size(last_block_size, self.data_blocks);
|
||||
num_shards * self.shard_size() + last_shard_size
|
||||
(num_shards * self.shard_size() + last_shard_size) as i64
|
||||
}
|
||||
|
||||
/// Check if this ErasureInfo equals another ErasureInfo
|
||||
@@ -158,7 +163,7 @@ pub struct FileInfo {
|
||||
pub expire_restored: bool,
|
||||
pub data_dir: Option<Uuid>,
|
||||
pub mod_time: Option<OffsetDateTime>,
|
||||
pub size: usize,
|
||||
pub size: i64,
|
||||
// File mode bits
|
||||
pub mode: Option<u32>,
|
||||
// WrittenByVersion is the unix time stamp of the version that created this version of the object
|
||||
@@ -170,13 +175,13 @@ pub struct FileInfo {
|
||||
pub mark_deleted: bool,
|
||||
// ReplicationState - Internal replication state to be passed back in ObjectInfo
|
||||
// pub replication_state: Option<ReplicationState>, // TODO: implement ReplicationState
|
||||
pub data: Option<Vec<u8>>,
|
||||
pub data: Option<Bytes>,
|
||||
pub num_versions: usize,
|
||||
pub successor_mod_time: Option<OffsetDateTime>,
|
||||
pub fresh: bool,
|
||||
pub idx: usize,
|
||||
// Combined checksum when object was uploaded
|
||||
pub checksum: Option<Vec<u8>>,
|
||||
pub checksum: Option<Bytes>,
|
||||
pub versioned: bool,
|
||||
}
|
||||
|
||||
@@ -261,7 +266,8 @@ impl FileInfo {
|
||||
etag: String,
|
||||
part_size: usize,
|
||||
mod_time: Option<OffsetDateTime>,
|
||||
actual_size: usize,
|
||||
actual_size: i64,
|
||||
index: Option<Bytes>,
|
||||
) {
|
||||
let part = ObjectPartInfo {
|
||||
etag,
|
||||
@@ -269,7 +275,7 @@ impl FileInfo {
|
||||
size: part_size,
|
||||
mod_time,
|
||||
actual_size,
|
||||
index: None,
|
||||
index,
|
||||
checksums: None,
|
||||
};
|
||||
|
||||
@@ -341,6 +347,12 @@ impl FileInfo {
|
||||
self.metadata
|
||||
.insert(format!("{}inline-data", RESERVED_METADATA_PREFIX_LOWER).to_owned(), "true".to_owned());
|
||||
}
|
||||
|
||||
pub fn set_data_moved(&mut self) {
|
||||
self.metadata
|
||||
.insert(format!("{}data-moved", RESERVED_METADATA_PREFIX_LOWER).to_owned(), "true".to_owned());
|
||||
}
|
||||
|
||||
pub fn inline_data(&self) -> bool {
|
||||
self.metadata
|
||||
.contains_key(format!("{}inline-data", RESERVED_METADATA_PREFIX_LOWER).as_str())
|
||||
@@ -350,7 +362,7 @@ impl FileInfo {
|
||||
/// Check if the object is compressed
|
||||
pub fn is_compressed(&self) -> bool {
|
||||
self.metadata
|
||||
.contains_key(&format!("{}compression", RESERVED_METADATA_PREFIX))
|
||||
.contains_key(&format!("{}compression", RESERVED_METADATA_PREFIX_LOWER))
|
||||
}
|
||||
|
||||
/// Check if the object is remote (transitioned to another tier)
|
||||
@@ -464,7 +476,7 @@ impl FileInfoVersions {
|
||||
}
|
||||
|
||||
/// Calculate the total size of all versions for this object
|
||||
pub fn size(&self) -> usize {
|
||||
pub fn size(&self) -> i64 {
|
||||
self.versions.iter().map(|v| v.size).sum()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6,10 +6,12 @@ use crate::headers::{
|
||||
RESERVED_METADATA_PREFIX_LOWER, VERSION_PURGE_STATUS_KEY,
|
||||
};
|
||||
use byteorder::ByteOrder;
|
||||
use bytes::Bytes;
|
||||
use rmp::Marker;
|
||||
use s3s::header::X_AMZ_RESTORE;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::cmp::Ordering;
|
||||
use std::convert::TryFrom;
|
||||
use std::hash::Hasher;
|
||||
use std::io::{Read, Write};
|
||||
use std::{collections::HashMap, io::Cursor};
|
||||
@@ -433,7 +435,7 @@ impl FileMeta {
|
||||
|
||||
if let Some(ref data) = fi.data {
|
||||
let key = vid.unwrap_or_default().to_string();
|
||||
self.data.replace(&key, data.clone())?;
|
||||
self.data.replace(&key, data.to_vec())?;
|
||||
}
|
||||
|
||||
let version = FileMetaVersion::from(fi);
|
||||
@@ -629,7 +631,10 @@ impl FileMeta {
|
||||
}
|
||||
|
||||
if read_data {
|
||||
fi.data = self.data.find(fi.version_id.unwrap_or_default().to_string().as_str())?;
|
||||
fi.data = self
|
||||
.data
|
||||
.find(fi.version_id.unwrap_or_default().to_string().as_str())?
|
||||
.map(bytes::Bytes::from);
|
||||
}
|
||||
|
||||
fi.num_versions = self.versions.len();
|
||||
@@ -1462,9 +1467,9 @@ pub struct MetaObject {
|
||||
pub part_numbers: Vec<usize>, // Part Numbers
|
||||
pub part_etags: Vec<String>, // Part ETags
|
||||
pub part_sizes: Vec<usize>, // Part Sizes
|
||||
pub part_actual_sizes: Vec<usize>, // Part ActualSizes (compression)
|
||||
pub part_indices: Vec<Vec<u8>>, // Part Indexes (compression)
|
||||
pub size: usize, // Object version size
|
||||
pub part_actual_sizes: Vec<i64>, // Part ActualSizes (compression)
|
||||
pub part_indices: Vec<Bytes>, // Part Indexes (compression)
|
||||
pub size: i64, // Object version size
|
||||
pub mod_time: Option<OffsetDateTime>, // Object version modified time
|
||||
pub meta_sys: HashMap<String, Vec<u8>>, // Object version internal metadata
|
||||
pub meta_user: HashMap<String, String>, // Object version metadata set by user
|
||||
@@ -1621,7 +1626,7 @@ impl MetaObject {
|
||||
let mut buf = vec![0u8; blen as usize];
|
||||
cur.read_exact(&mut buf)?;
|
||||
|
||||
indices.push(buf);
|
||||
indices.push(Bytes::from(buf));
|
||||
}
|
||||
|
||||
self.part_indices = indices;
|
||||
@@ -1893,13 +1898,16 @@ impl MetaObject {
|
||||
}
|
||||
|
||||
for (k, v) in &self.meta_sys {
|
||||
if k == AMZ_STORAGE_CLASS && v == b"STANDARD" {
|
||||
continue;
|
||||
}
|
||||
|
||||
if k.starts_with(RESERVED_METADATA_PREFIX)
|
||||
|| k.starts_with(RESERVED_METADATA_PREFIX_LOWER)
|
||||
|| k == VERSION_PURGE_STATUS_KEY
|
||||
{
|
||||
continue;
|
||||
metadata.insert(k.to_owned(), String::from_utf8(v.to_owned()).unwrap_or_default());
|
||||
}
|
||||
metadata.insert(k.to_owned(), String::from_utf8(v.to_owned()).unwrap_or_default());
|
||||
}
|
||||
|
||||
// todo: ReplicationState,Delete
|
||||
@@ -2616,7 +2624,6 @@ pub async fn read_xl_meta_no_data<R: AsyncRead + Unpin>(reader: &mut R, size: us
|
||||
}
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
|
||||
use super::*;
|
||||
use crate::test_data::*;
|
||||
|
||||
@@ -2736,7 +2743,7 @@ mod test {
|
||||
|
||||
// 验证基本属性
|
||||
assert_eq!(fm.meta_ver, XL_META_VERSION);
|
||||
assert_eq!(fm.versions.len(), 3, "应该有3个版本(1个对象,1个删除标记,1个Legacy)");
|
||||
assert_eq!(fm.versions.len(), 3, "应该有 3 个版本(1 个对象,1 个删除标记,1 个 Legacy)");
|
||||
|
||||
// 验证版本类型
|
||||
let mut object_count = 0;
|
||||
@@ -2752,9 +2759,9 @@ mod test {
|
||||
}
|
||||
}
|
||||
|
||||
assert_eq!(object_count, 1, "应该有1个对象版本");
|
||||
assert_eq!(delete_count, 1, "应该有1个删除标记");
|
||||
assert_eq!(legacy_count, 1, "应该有1个Legacy版本");
|
||||
assert_eq!(object_count, 1, "应该有 1 个对象版本");
|
||||
assert_eq!(delete_count, 1, "应该有 1 个删除标记");
|
||||
assert_eq!(legacy_count, 1, "应该有 1 个 Legacy 版本");
|
||||
|
||||
// 验证兼容性
|
||||
assert!(fm.is_compatible_with_meta(), "应该与 xl 格式兼容");
|
||||
@@ -2777,7 +2784,7 @@ mod test {
|
||||
let fm = FileMeta::load(&data).expect("解析复杂数据失败");
|
||||
|
||||
// 验证版本数量
|
||||
assert!(fm.versions.len() >= 10, "应该有至少10个版本");
|
||||
assert!(fm.versions.len() >= 10, "应该有至少 10 个版本");
|
||||
|
||||
// 验证版本排序
|
||||
assert!(fm.is_sorted_by_mod_time(), "版本应该按修改时间排序");
|
||||
@@ -2798,7 +2805,7 @@ mod test {
|
||||
let data = create_xlmeta_with_inline_data().expect("创建内联数据测试失败");
|
||||
let fm = FileMeta::load(&data).expect("解析内联数据失败");
|
||||
|
||||
assert_eq!(fm.versions.len(), 1, "应该有1个版本");
|
||||
assert_eq!(fm.versions.len(), 1, "应该有 1 个版本");
|
||||
assert!(!fm.data.as_slice().is_empty(), "应该包含内联数据");
|
||||
|
||||
// 验证内联数据内容
|
||||
@@ -2845,7 +2852,7 @@ mod test {
|
||||
|
||||
for version in &fm.versions {
|
||||
let signature = version.header.get_signature();
|
||||
assert_eq!(signature.len(), 4, "签名应该是4字节");
|
||||
assert_eq!(signature.len(), 4, "签名应该是 4 字节");
|
||||
|
||||
// 验证相同版本的签名一致性
|
||||
let signature2 = version.header.get_signature();
|
||||
@@ -2888,7 +2895,7 @@ mod test {
|
||||
// 验证版本内容一致性
|
||||
for (v1, v2) in fm.versions.iter().zip(fm2.versions.iter()) {
|
||||
assert_eq!(v1.header.version_type, v2.header.version_type, "版本类型应该一致");
|
||||
assert_eq!(v1.header.version_id, v2.header.version_id, "版本ID应该一致");
|
||||
assert_eq!(v1.header.version_id, v2.header.version_id, "版本 ID 应该一致");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2909,26 +2916,26 @@ mod test {
|
||||
let _serialized = fm.marshal_msg().expect("序列化失败");
|
||||
let serialization_time = start.elapsed();
|
||||
|
||||
println!("性能测试结果:");
|
||||
println!(" 创建时间: {:?}", creation_time);
|
||||
println!(" 解析时间: {:?}", parsing_time);
|
||||
println!(" 序列化时间: {:?}", serialization_time);
|
||||
println!("性能测试结果:");
|
||||
println!(" 创建时间:{:?}", creation_time);
|
||||
println!(" 解析时间:{:?}", parsing_time);
|
||||
println!(" 序列化时间:{:?}", serialization_time);
|
||||
|
||||
// 基本性能断言(这些值可能需要根据实际性能调整)
|
||||
assert!(parsing_time.as_millis() < 100, "解析时间应该小于100ms");
|
||||
assert!(serialization_time.as_millis() < 100, "序列化时间应该小于100ms");
|
||||
assert!(parsing_time.as_millis() < 100, "解析时间应该小于 100ms");
|
||||
assert!(serialization_time.as_millis() < 100, "序列化时间应该小于 100ms");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_edge_cases() {
|
||||
// 测试边界情况
|
||||
|
||||
// 1. 测试空版本ID
|
||||
// 1. 测试空版本 ID
|
||||
let mut fm = FileMeta::new();
|
||||
let version = FileMetaVersion {
|
||||
version_type: VersionType::Object,
|
||||
object: Some(MetaObject {
|
||||
version_id: None, // 空版本ID
|
||||
version_id: None, // 空版本 ID
|
||||
data_dir: None,
|
||||
erasure_algorithm: crate::fileinfo::ErasureAlgo::ReedSolomon,
|
||||
erasure_m: 1,
|
||||
@@ -2961,13 +2968,13 @@ mod test {
|
||||
|
||||
// 2. 测试极大的文件大小
|
||||
let large_object = MetaObject {
|
||||
size: usize::MAX,
|
||||
size: i64::MAX,
|
||||
part_sizes: vec![usize::MAX],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// 应该能够处理大数值
|
||||
assert_eq!(large_object.size, usize::MAX);
|
||||
assert_eq!(large_object.size, i64::MAX);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -3024,9 +3031,9 @@ mod test {
|
||||
let large_size = mem::size_of_val(&large_fm);
|
||||
println!("Large FileMeta size: {} bytes", large_size);
|
||||
|
||||
// 验证内存使用是合理的(注意:size_of_val只计算栈上的大小,不包括堆分配)
|
||||
// 对于包含Vec的结构体,size_of_val可能相同,因为Vec的容量在堆上
|
||||
println!("版本数量: {}", large_fm.versions.len());
|
||||
// 验证内存使用是合理的(注意:size_of_val 只计算栈上的大小,不包括堆分配)
|
||||
// 对于包含 Vec 的结构体,size_of_val 可能相同,因为 Vec 的容量在堆上
|
||||
println!("版本数量:{}", large_fm.versions.len());
|
||||
assert!(!large_fm.versions.is_empty(), "应该有版本数据");
|
||||
}
|
||||
|
||||
@@ -3097,8 +3104,8 @@ mod test {
|
||||
};
|
||||
|
||||
// 验证参数的合理性
|
||||
assert!(obj.erasure_m > 0, "数据块数量必须大于0");
|
||||
assert!(obj.erasure_n > 0, "校验块数量必须大于0");
|
||||
assert!(obj.erasure_m > 0, "数据块数量必须大于 0");
|
||||
assert!(obj.erasure_n > 0, "校验块数量必须大于 0");
|
||||
assert_eq!(obj.erasure_dist.len(), data_blocks + parity_blocks);
|
||||
|
||||
// 验证序列化和反序列化
|
||||
@@ -3259,7 +3266,7 @@ mod test {
|
||||
// 测试多个版本列表的合并
|
||||
let merged = merge_file_meta_versions(1, false, 0, &[versions1.clone(), versions2.clone()]);
|
||||
// 合并结果可能为空,这取决于版本的兼容性,这是正常的
|
||||
println!("合并结果数量: {}", merged.len());
|
||||
println!("合并结果数量:{}", merged.len());
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -3269,12 +3276,12 @@ mod test {
|
||||
|
||||
for flag in flags {
|
||||
let flag_value = flag as u8;
|
||||
assert!(flag_value > 0, "标志位值应该大于0");
|
||||
assert!(flag_value > 0, "标志位值应该大于 0");
|
||||
|
||||
// 测试标志位组合
|
||||
let combined = Flags::FreeVersion as u8 | Flags::UsesDataDir as u8;
|
||||
// 对于位运算,组合值可能不总是大于单个值,这是正常的
|
||||
assert!(combined > 0, "组合标志位应该大于0");
|
||||
assert!(combined > 0, "组合标志位应该大于 0");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3410,7 +3417,7 @@ mod test {
|
||||
("tabs", "col1\tcol2\tcol3"),
|
||||
("quotes", "\"quoted\" and 'single'"),
|
||||
("backslashes", "path\\to\\file"),
|
||||
("mixed", "Mixed: 中文, English, 123, !@#$%"),
|
||||
("mixed", "Mixed: 中文,English, 123, !@#$%"),
|
||||
];
|
||||
|
||||
for (key, value) in special_cases {
|
||||
@@ -3432,7 +3439,7 @@ mod test {
|
||||
("tabs", "col1\tcol2\tcol3"),
|
||||
("quotes", "\"quoted\" and 'single'"),
|
||||
("backslashes", "path\\to\\file"),
|
||||
("mixed", "Mixed: 中文, English, 123, !@#$%"),
|
||||
("mixed", "Mixed: 中文,English, 123, !@#$%"),
|
||||
] {
|
||||
assert_eq!(obj2.meta_user.get(key), Some(&expected_value.to_string()));
|
||||
}
|
||||
@@ -3529,7 +3536,7 @@ pub struct DetailedVersionStats {
|
||||
pub free_versions: usize,
|
||||
pub versions_with_data_dir: usize,
|
||||
pub versions_with_inline_data: usize,
|
||||
pub total_size: usize,
|
||||
pub total_size: i64,
|
||||
pub latest_mod_time: Option<OffsetDateTime>,
|
||||
}
|
||||
|
||||
|
||||
@@ -19,3 +19,5 @@ pub const X_RUSTFS_DATA_MOV: &str = "X-Rustfs-Internal-data-mov";
|
||||
pub const AMZ_OBJECT_TAGGING: &str = "X-Amz-Tagging";
|
||||
pub const AMZ_BUCKET_REPLICATION_STATUS: &str = "X-Amz-Replication-Status";
|
||||
pub const AMZ_DECODED_CONTENT_LENGTH: &str = "X-Amz-Decoded-Content-Length";
|
||||
|
||||
pub const RUSTFS_DATA_MOVE: &str = "X-Rustfs-Internal-data-mov";
|
||||
|
||||
@@ -91,7 +91,7 @@ pub fn create_complex_xlmeta() -> Result<Vec<u8>> {
|
||||
let mut fm = FileMeta::new();
|
||||
|
||||
// 创建10个版本的对象
|
||||
for i in 0..10 {
|
||||
for i in 0i64..10i64 {
|
||||
let version_id = Uuid::new_v4();
|
||||
let data_dir = if i % 3 == 0 { Some(Uuid::new_v4()) } else { None };
|
||||
|
||||
@@ -113,9 +113,9 @@ pub fn create_complex_xlmeta() -> Result<Vec<u8>> {
|
||||
part_numbers: vec![1],
|
||||
part_etags: vec![format!("etag-{:08x}", i)],
|
||||
part_sizes: vec![1024 * (i + 1) as usize],
|
||||
part_actual_sizes: vec![1024 * (i + 1) as usize],
|
||||
part_actual_sizes: vec![1024 * (i + 1)],
|
||||
part_indices: Vec::new(),
|
||||
size: 1024 * (i + 1) as usize,
|
||||
size: 1024 * (i + 1),
|
||||
mod_time: Some(OffsetDateTime::from_unix_timestamp(1705312200 + i * 60)?),
|
||||
meta_sys: HashMap::new(),
|
||||
meta_user: metadata,
|
||||
@@ -221,7 +221,7 @@ pub fn create_xlmeta_with_inline_data() -> Result<Vec<u8>> {
|
||||
part_sizes: vec![inline_data.len()],
|
||||
part_actual_sizes: Vec::new(),
|
||||
part_indices: Vec::new(),
|
||||
size: inline_data.len(),
|
||||
size: inline_data.len() as i64,
|
||||
mod_time: Some(OffsetDateTime::now_utc()),
|
||||
meta_sys: HashMap::new(),
|
||||
meta_user: HashMap::new(),
|
||||
|
||||
41
crates/notify/Cargo.toml
Normal file
41
crates/notify/Cargo.toml
Normal file
@@ -0,0 +1,41 @@
|
||||
[package]
|
||||
name = "rustfs-notify"
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
version.workspace = true
|
||||
|
||||
[dependencies]
|
||||
rustfs-utils = { workspace = true, features = ["path", "sys"] }
|
||||
async-trait = { workspace = true }
|
||||
chrono = { workspace = true, features = ["serde"] }
|
||||
const-str = { workspace = true }
|
||||
dashmap = { workspace = true }
|
||||
ecstore = { workspace = true }
|
||||
form_urlencoded = { workspace = true }
|
||||
once_cell = { workspace = true }
|
||||
quick-xml = { workspace = true, features = ["serialize", "async-tokio"] }
|
||||
reqwest = { workspace = true }
|
||||
rumqttc = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
snap = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
tokio = { workspace = true, features = ["rt-multi-thread", "sync", "time"] }
|
||||
tracing = { workspace = true }
|
||||
tracing-subscriber = { workspace = true, features = ["env-filter"] }
|
||||
uuid = { workspace = true, features = ["v4", "serde"] }
|
||||
url = { workspace = true }
|
||||
urlencoding = { workspace = true }
|
||||
wildmatch = { workspace = true, features = ["serde"] }
|
||||
|
||||
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { workspace = true, features = ["test-util"] }
|
||||
reqwest = { workspace = true, default-features = false, features = ["rustls-tls", "charset", "http2", "system-proxy", "stream", "json", "blocking"] }
|
||||
axum = { workspace = true }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
171
crates/notify/examples/full_demo.rs
Normal file
171
crates/notify/examples/full_demo.rs
Normal file
@@ -0,0 +1,171 @@
|
||||
use ecstore::config::{Config, ENABLE_KEY, ENABLE_ON, KV, KVS};
|
||||
use rustfs_notify::arn::TargetID;
|
||||
use rustfs_notify::factory::{
|
||||
DEFAULT_TARGET, MQTT_BROKER, MQTT_PASSWORD, MQTT_QOS, MQTT_QUEUE_DIR, MQTT_QUEUE_LIMIT, MQTT_TOPIC, MQTT_USERNAME,
|
||||
NOTIFY_MQTT_SUB_SYS, NOTIFY_WEBHOOK_SUB_SYS, WEBHOOK_AUTH_TOKEN, WEBHOOK_ENDPOINT, WEBHOOK_QUEUE_DIR, WEBHOOK_QUEUE_LIMIT,
|
||||
};
|
||||
use rustfs_notify::store::DEFAULT_LIMIT;
|
||||
use rustfs_notify::{BucketNotificationConfig, Event, EventName, LogLevel, NotificationError, init_logger};
|
||||
use rustfs_notify::{initialize, notification_system};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tracing::info;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), NotificationError> {
|
||||
init_logger(LogLevel::Debug);
|
||||
|
||||
let system = match notification_system() {
|
||||
Some(sys) => sys,
|
||||
None => {
|
||||
let config = Config::new();
|
||||
initialize(config).await?;
|
||||
notification_system().expect("Failed to initialize notification system")
|
||||
}
|
||||
};
|
||||
|
||||
// --- Initial configuration (Webhook and MQTT) ---
|
||||
let mut config = Config::new();
|
||||
let current_root = rustfs_utils::dirs::get_project_root().expect("failed to get project root");
|
||||
println!("Current project root: {}", current_root.display());
|
||||
|
||||
let webhook_kvs_vec = vec![
|
||||
KV {
|
||||
key: ENABLE_KEY.to_string(),
|
||||
value: ENABLE_ON.to_string(),
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
KV {
|
||||
key: WEBHOOK_ENDPOINT.to_string(),
|
||||
value: "http://127.0.0.1:3020/webhook".to_string(),
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
KV {
|
||||
key: WEBHOOK_AUTH_TOKEN.to_string(),
|
||||
value: "secret-token".to_string(),
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
KV {
|
||||
key: WEBHOOK_QUEUE_DIR.to_string(),
|
||||
value: current_root
|
||||
.clone()
|
||||
.join("../../deploy/logs/notify/webhook")
|
||||
.to_str()
|
||||
.unwrap()
|
||||
.to_string(),
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
KV {
|
||||
key: WEBHOOK_QUEUE_LIMIT.to_string(),
|
||||
value: DEFAULT_LIMIT.to_string(),
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
];
|
||||
let webhook_kvs = KVS(webhook_kvs_vec);
|
||||
|
||||
let mut webhook_targets = std::collections::HashMap::new();
|
||||
webhook_targets.insert(DEFAULT_TARGET.to_string(), webhook_kvs);
|
||||
config.0.insert(NOTIFY_WEBHOOK_SUB_SYS.to_string(), webhook_targets);
|
||||
|
||||
// MQTT target configuration
|
||||
let mqtt_kvs_vec = vec![
|
||||
KV {
|
||||
key: ENABLE_KEY.to_string(),
|
||||
value: ENABLE_ON.to_string(),
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
KV {
|
||||
key: MQTT_BROKER.to_string(),
|
||||
value: "mqtt://localhost:1883".to_string(),
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
KV {
|
||||
key: MQTT_TOPIC.to_string(),
|
||||
value: "rustfs/events".to_string(),
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
KV {
|
||||
key: MQTT_QOS.to_string(),
|
||||
value: "1".to_string(), // AtLeastOnce
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
KV {
|
||||
key: MQTT_USERNAME.to_string(),
|
||||
value: "test".to_string(),
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
KV {
|
||||
key: MQTT_PASSWORD.to_string(),
|
||||
value: "123456".to_string(),
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
KV {
|
||||
key: MQTT_QUEUE_DIR.to_string(),
|
||||
value: current_root
|
||||
.join("../../deploy/logs/notify/mqtt")
|
||||
.to_str()
|
||||
.unwrap()
|
||||
.to_string(),
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
KV {
|
||||
key: MQTT_QUEUE_LIMIT.to_string(),
|
||||
value: DEFAULT_LIMIT.to_string(),
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
];
|
||||
|
||||
let mqtt_kvs = KVS(mqtt_kvs_vec);
|
||||
let mut mqtt_targets = std::collections::HashMap::new();
|
||||
mqtt_targets.insert(DEFAULT_TARGET.to_string(), mqtt_kvs);
|
||||
config.0.insert(NOTIFY_MQTT_SUB_SYS.to_string(), mqtt_targets);
|
||||
|
||||
// Load the configuration and initialize the system
|
||||
*system.config.write().await = config;
|
||||
system.init().await?;
|
||||
info!("✅ System initialized with Webhook and MQTT targets.");
|
||||
|
||||
// --- Query the currently active Target ---
|
||||
let active_targets = system.get_active_targets().await;
|
||||
info!("\n---> Currently active targets: {:?}", active_targets);
|
||||
assert_eq!(active_targets.len(), 2);
|
||||
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
|
||||
// --- Exactly delete a Target (e.g. MQTT) ---
|
||||
info!("\n---> Removing MQTT target...");
|
||||
let mqtt_target_id = TargetID::new(DEFAULT_TARGET.to_string(), "mqtt".to_string());
|
||||
system.remove_target(&mqtt_target_id, NOTIFY_MQTT_SUB_SYS).await?;
|
||||
info!("✅ MQTT target removed.");
|
||||
|
||||
// --- Query the activity's Target again ---
|
||||
let active_targets_after_removal = system.get_active_targets().await;
|
||||
info!("\n---> Active targets after removal: {:?}", active_targets_after_removal);
|
||||
assert_eq!(active_targets_after_removal.len(), 1);
|
||||
assert_eq!(active_targets_after_removal[0].id, DEFAULT_TARGET.to_string());
|
||||
|
||||
// --- Send events for verification ---
|
||||
// Configure a rule to point to the Webhook and deleted MQTT
|
||||
let mut bucket_config = BucketNotificationConfig::new("us-east-1");
|
||||
bucket_config.add_rule(
|
||||
&[EventName::ObjectCreatedPut],
|
||||
"*".to_string(),
|
||||
TargetID::new(DEFAULT_TARGET.to_string(), "webhook".to_string()),
|
||||
);
|
||||
bucket_config.add_rule(
|
||||
&[EventName::ObjectCreatedPut],
|
||||
"*".to_string(),
|
||||
TargetID::new(DEFAULT_TARGET.to_string(), "mqtt".to_string()), // This rule will match, but the Target cannot be found
|
||||
);
|
||||
system.load_bucket_notification_config("my-bucket", &bucket_config).await?;
|
||||
|
||||
info!("\n---> Sending an event...");
|
||||
let event = Arc::new(Event::new_test_event("my-bucket", "document.pdf", EventName::ObjectCreatedPut));
|
||||
system.send_event(event).await;
|
||||
info!("✅ Event sent. Only the Webhook target should receive it. Check logs for warnings about the missing MQTT target.");
|
||||
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
|
||||
info!("\nDemo completed successfully");
|
||||
Ok(())
|
||||
}
|
||||
174
crates/notify/examples/full_demo_one.rs
Normal file
174
crates/notify/examples/full_demo_one.rs
Normal file
@@ -0,0 +1,174 @@
|
||||
use ecstore::config::{Config, ENABLE_KEY, ENABLE_ON, KV, KVS};
|
||||
use std::sync::Arc;
|
||||
// Using Global Accessories
|
||||
use rustfs_notify::arn::TargetID;
|
||||
use rustfs_notify::factory::{
|
||||
DEFAULT_TARGET, MQTT_BROKER, MQTT_PASSWORD, MQTT_QOS, MQTT_QUEUE_DIR, MQTT_QUEUE_LIMIT, MQTT_TOPIC, MQTT_USERNAME,
|
||||
NOTIFY_MQTT_SUB_SYS, NOTIFY_WEBHOOK_SUB_SYS, WEBHOOK_AUTH_TOKEN, WEBHOOK_ENDPOINT, WEBHOOK_QUEUE_DIR, WEBHOOK_QUEUE_LIMIT,
|
||||
};
|
||||
use rustfs_notify::store::DEFAULT_LIMIT;
|
||||
use rustfs_notify::{BucketNotificationConfig, Event, EventName, LogLevel, NotificationError, init_logger};
|
||||
use rustfs_notify::{initialize, notification_system};
|
||||
use std::time::Duration;
|
||||
use tracing::info;
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() -> Result<(), NotificationError> {
|
||||
init_logger(LogLevel::Debug);
|
||||
|
||||
// Get global NotificationSystem instance
|
||||
let system = match notification_system() {
|
||||
Some(sys) => sys,
|
||||
None => {
|
||||
let config = Config::new();
|
||||
initialize(config).await?;
|
||||
notification_system().expect("Failed to initialize notification system")
|
||||
}
|
||||
};
|
||||
|
||||
// --- Initial configuration ---
|
||||
let mut config = Config::new();
|
||||
let current_root = rustfs_utils::dirs::get_project_root().expect("failed to get project root");
|
||||
// Webhook target
|
||||
let webhook_kvs_vec = vec![
|
||||
KV {
|
||||
key: ENABLE_KEY.to_string(),
|
||||
value: ENABLE_ON.to_string(),
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
KV {
|
||||
key: WEBHOOK_ENDPOINT.to_string(),
|
||||
value: "http://127.0.0.1:3020/webhook".to_string(),
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
KV {
|
||||
key: WEBHOOK_AUTH_TOKEN.to_string(),
|
||||
value: "secret-token".to_string(),
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
KV {
|
||||
key: WEBHOOK_QUEUE_DIR.to_string(),
|
||||
value: current_root
|
||||
.clone()
|
||||
.join("../../deploy/logs/notify/webhook")
|
||||
.to_str()
|
||||
.unwrap()
|
||||
.to_string(),
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
KV {
|
||||
key: WEBHOOK_QUEUE_LIMIT.to_string(),
|
||||
value: DEFAULT_LIMIT.to_string(),
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
];
|
||||
let webhook_kvs = KVS(webhook_kvs_vec);
|
||||
|
||||
let mut webhook_targets = std::collections::HashMap::new();
|
||||
webhook_targets.insert(DEFAULT_TARGET.to_string(), webhook_kvs);
|
||||
config.0.insert(NOTIFY_WEBHOOK_SUB_SYS.to_string(), webhook_targets);
|
||||
|
||||
// Load the initial configuration and initialize the system
|
||||
*system.config.write().await = config;
|
||||
system.init().await?;
|
||||
info!("✅ System initialized with Webhook target.");
|
||||
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
|
||||
// --- Dynamically update system configuration: Add an MQTT Target ---
|
||||
info!("\n---> Dynamically adding MQTT target...");
|
||||
|
||||
let mqtt_kvs_vec = vec![
|
||||
KV {
|
||||
key: ENABLE_KEY.to_string(),
|
||||
value: ENABLE_ON.to_string(),
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
KV {
|
||||
key: MQTT_BROKER.to_string(),
|
||||
value: "mqtt://localhost:1883".to_string(),
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
KV {
|
||||
key: MQTT_TOPIC.to_string(),
|
||||
value: "rustfs/events".to_string(),
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
KV {
|
||||
key: MQTT_QOS.to_string(),
|
||||
value: "1".to_string(), // AtLeastOnce
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
KV {
|
||||
key: MQTT_USERNAME.to_string(),
|
||||
value: "test".to_string(),
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
KV {
|
||||
key: MQTT_PASSWORD.to_string(),
|
||||
value: "123456".to_string(),
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
KV {
|
||||
key: MQTT_QUEUE_DIR.to_string(),
|
||||
value: current_root
|
||||
.join("../../deploy/logs/notify/mqtt")
|
||||
.to_str()
|
||||
.unwrap()
|
||||
.to_string(),
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
KV {
|
||||
key: MQTT_QUEUE_LIMIT.to_string(),
|
||||
value: DEFAULT_LIMIT.to_string(),
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
];
|
||||
|
||||
let mqtt_kvs = KVS(mqtt_kvs_vec);
|
||||
// let mut mqtt_targets = std::collections::HashMap::new();
|
||||
// mqtt_targets.insert(DEFAULT_TARGET.to_string(), mqtt_kvs.clone());
|
||||
|
||||
system
|
||||
.set_target_config(NOTIFY_MQTT_SUB_SYS, DEFAULT_TARGET, mqtt_kvs)
|
||||
.await?;
|
||||
info!("✅ MQTT target added and system reloaded.");
|
||||
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
|
||||
// --- Loading and managing Bucket configurations ---
|
||||
info!("\n---> Loading bucket notification config...");
|
||||
let mut bucket_config = BucketNotificationConfig::new("us-east-1");
|
||||
bucket_config.add_rule(
|
||||
&[EventName::ObjectCreatedPut],
|
||||
"*".to_string(),
|
||||
TargetID::new(DEFAULT_TARGET.to_string(), "webhook".to_string()),
|
||||
);
|
||||
bucket_config.add_rule(
|
||||
&[EventName::ObjectCreatedPut],
|
||||
"*".to_string(),
|
||||
TargetID::new(DEFAULT_TARGET.to_string(), "mqtt".to_string()),
|
||||
);
|
||||
system.load_bucket_notification_config("my-bucket", &bucket_config).await?;
|
||||
info!("✅ Bucket 'my-bucket' config loaded.");
|
||||
|
||||
// --- Send events ---
|
||||
info!("\n---> Sending an event...");
|
||||
let event = Arc::new(Event::new_test_event("my-bucket", "document.pdf", EventName::ObjectCreatedPut));
|
||||
system.send_event(event).await;
|
||||
info!("✅ Event sent. Both Webhook and MQTT targets should receive it.");
|
||||
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
|
||||
// --- Dynamically remove configuration ---
|
||||
info!("\n---> Dynamically removing Webhook target...");
|
||||
system.remove_target_config("notify_webhook", "1").await?;
|
||||
info!("✅ Webhook target removed and system reloaded.");
|
||||
|
||||
info!("\n---> Removing bucket notification config...");
|
||||
system.remove_bucket_notification_config("my-bucket").await;
|
||||
info!("✅ Bucket 'my-bucket' config removed.");
|
||||
|
||||
info!("\nDemo completed successfully");
|
||||
Ok(())
|
||||
}
|
||||
201
crates/notify/examples/webhook.rs
Normal file
201
crates/notify/examples/webhook.rs
Normal file
@@ -0,0 +1,201 @@
|
||||
use axum::routing::get;
|
||||
use axum::{
|
||||
Router,
|
||||
extract::Json,
|
||||
http::{HeaderMap, Response, StatusCode},
|
||||
routing::post,
|
||||
};
|
||||
use serde_json::Value;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
use axum::extract::Query;
|
||||
use serde::Deserialize;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct ResetParams {
|
||||
reason: Option<String>,
|
||||
}
|
||||
|
||||
// Define a global variable and count the number of data received
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
|
||||
static WEBHOOK_COUNT: AtomicU64 = AtomicU64::new(0);
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
// Build an application
|
||||
let app = Router::new()
|
||||
.route("/webhook", post(receive_webhook))
|
||||
.route("/webhook/reset/{reason}", get(reset_webhook_count_with_path))
|
||||
.route("/webhook/reset", get(reset_webhook_count))
|
||||
.route("/webhook", get(receive_webhook));
|
||||
// Start the server
|
||||
let addr = "0.0.0.0:3020";
|
||||
let listener = tokio::net::TcpListener::bind(addr).await.unwrap();
|
||||
println!("Server running on {}", addr);
|
||||
|
||||
// Self-checking after the service is started
|
||||
tokio::spawn(async move {
|
||||
// Give the server some time to start
|
||||
tokio::time::sleep(std::time::Duration::from_secs(1)).await;
|
||||
|
||||
match is_service_active(addr).await {
|
||||
Ok(true) => println!("Service health check: Successful - Service is running normally"),
|
||||
Ok(false) => eprintln!("Service Health Check: Failed - Service Not Responded"),
|
||||
Err(e) => eprintln!("Service health check errors:{}", e),
|
||||
}
|
||||
});
|
||||
|
||||
// Create a shutdown signal processing
|
||||
tokio::select! {
|
||||
result = axum::serve(listener, app) => {
|
||||
if let Err(e) = result {
|
||||
eprintln!("Server error: {}", e);
|
||||
}
|
||||
}
|
||||
_ = tokio::signal::ctrl_c() => {
|
||||
println!("Shutting down server...");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a method to reset the value of WEBHOOK_COUNT
|
||||
async fn reset_webhook_count_with_path(axum::extract::Path(reason): axum::extract::Path<String>) -> Response<String> {
|
||||
// Output the value of the current counter
|
||||
let current_count = WEBHOOK_COUNT.load(Ordering::SeqCst);
|
||||
println!("Current webhook count: {}", current_count);
|
||||
|
||||
println!("Reset webhook count, reason: {}", reason);
|
||||
// Reset the counter to 0
|
||||
WEBHOOK_COUNT.store(0, Ordering::SeqCst);
|
||||
println!("Webhook count has been reset to 0.");
|
||||
|
||||
Response::builder()
|
||||
.header("Foo", "Bar")
|
||||
.status(StatusCode::OK)
|
||||
.body(format!(
|
||||
"Webhook count reset successfully. Previous count: {}. Reason: {}",
|
||||
current_count, reason
|
||||
))
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
/// Create a method to reset the value of WEBHOOK_COUNT
|
||||
/// You can reset the counter by calling this method
|
||||
async fn reset_webhook_count(Query(params): Query<ResetParams>, headers: HeaderMap) -> Response<String> {
|
||||
// Output the value of the current counter
|
||||
let current_count = WEBHOOK_COUNT.load(Ordering::SeqCst);
|
||||
println!("Current webhook count: {}", current_count);
|
||||
|
||||
let reason = params.reason.unwrap_or_else(|| "Reason not provided".to_string());
|
||||
println!("Reset webhook count, reason: {}", reason);
|
||||
|
||||
for header in headers {
|
||||
let (key, value) = header;
|
||||
println!("Header: {:?}: {:?}", key, value);
|
||||
}
|
||||
|
||||
println!("Reset webhook count printed headers");
|
||||
// Reset the counter to 0
|
||||
WEBHOOK_COUNT.store(0, Ordering::SeqCst);
|
||||
println!("Webhook count has been reset to 0.");
|
||||
Response::builder()
|
||||
.header("Foo", "Bar")
|
||||
.status(StatusCode::OK)
|
||||
.body(format!("Webhook count reset successfully current_count:{}", current_count))
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
async fn is_service_active(addr: &str) -> Result<bool, String> {
|
||||
let socket_addr = tokio::net::lookup_host(addr)
|
||||
.await
|
||||
.map_err(|e| format!("Unable to resolve host:{}", e))?
|
||||
.next()
|
||||
.ok_or_else(|| "Address not found".to_string())?;
|
||||
|
||||
println!("Checking service status:{}", socket_addr);
|
||||
|
||||
match tokio::time::timeout(std::time::Duration::from_secs(5), tokio::net::TcpStream::connect(socket_addr)).await {
|
||||
Ok(Ok(_)) => Ok(true),
|
||||
Ok(Err(e)) => {
|
||||
if e.kind() == std::io::ErrorKind::ConnectionRefused {
|
||||
Ok(false)
|
||||
} else {
|
||||
Err(format!("Connection failed:{}", e))
|
||||
}
|
||||
}
|
||||
Err(_) => Err("Connection timeout".to_string()),
|
||||
}
|
||||
}
|
||||
|
||||
async fn receive_webhook(Json(payload): Json<Value>) -> StatusCode {
|
||||
let start = SystemTime::now();
|
||||
let since_the_epoch = start.duration_since(UNIX_EPOCH).expect("Time went backwards");
|
||||
|
||||
// get the number of seconds since the unix era
|
||||
let seconds = since_the_epoch.as_secs();
|
||||
|
||||
// Manually calculate year, month, day, hour, minute, and second
|
||||
let (year, month, day, hour, minute, second) = convert_seconds_to_date(seconds);
|
||||
|
||||
// output result
|
||||
println!("current time:{:04}-{:02}-{:02} {:02}:{:02}:{:02}", year, month, day, hour, minute, second);
|
||||
println!(
|
||||
"received a webhook request time:{} content:\n {}",
|
||||
seconds,
|
||||
serde_json::to_string_pretty(&payload).unwrap()
|
||||
);
|
||||
WEBHOOK_COUNT.fetch_add(1, Ordering::SeqCst);
|
||||
println!("Total webhook requests received: {}", WEBHOOK_COUNT.load(Ordering::SeqCst));
|
||||
StatusCode::OK
|
||||
}
|
||||
|
||||
fn convert_seconds_to_date(seconds: u64) -> (u32, u32, u32, u32, u32, u32) {
|
||||
// assume that the time zone is utc
|
||||
let seconds_per_minute = 60;
|
||||
let seconds_per_hour = 3600;
|
||||
let seconds_per_day = 86400;
|
||||
|
||||
// Calculate the year, month, day, hour, minute, and second corresponding to the number of seconds
|
||||
let mut total_seconds = seconds;
|
||||
let mut year = 1970;
|
||||
let mut month = 1;
|
||||
let mut day = 1;
|
||||
let mut hour = 0;
|
||||
let mut minute = 0;
|
||||
let mut second = 0;
|
||||
|
||||
// calculate year
|
||||
while total_seconds >= 31536000 {
|
||||
year += 1;
|
||||
total_seconds -= 31536000; // simplified processing no leap year considered
|
||||
}
|
||||
|
||||
// calculate month
|
||||
let days_in_month = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31];
|
||||
for m in &days_in_month {
|
||||
if total_seconds >= m * seconds_per_day {
|
||||
month += 1;
|
||||
total_seconds -= m * seconds_per_day;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// calculate the number of days
|
||||
day += total_seconds / seconds_per_day;
|
||||
total_seconds %= seconds_per_day;
|
||||
|
||||
// calculate hours
|
||||
hour += total_seconds / seconds_per_hour;
|
||||
total_seconds %= seconds_per_hour;
|
||||
|
||||
// calculate minutes
|
||||
minute += total_seconds / seconds_per_minute;
|
||||
total_seconds %= seconds_per_minute;
|
||||
|
||||
// calculate the number of seconds
|
||||
second += total_seconds;
|
||||
|
||||
(year as u32, month as u32, day as u32, hour as u32, minute as u32, second as u32)
|
||||
}
|
||||
236
crates/notify/src/arn.rs
Normal file
236
crates/notify/src/arn.rs
Normal file
@@ -0,0 +1,236 @@
|
||||
use crate::TargetError;
|
||||
use const_str::concat;
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
use std::fmt;
|
||||
use std::str::FromStr;
|
||||
use thiserror::Error;
|
||||
|
||||
pub(crate) const DEFAULT_ARN_PARTITION: &str = "rustfs";
|
||||
|
||||
pub(crate) const DEFAULT_ARN_SERVICE: &str = "sqs";
|
||||
|
||||
/// Default ARN prefix for SQS
|
||||
/// "arn:rustfs:sqs:"
|
||||
const ARN_PREFIX: &str = concat!("arn:", DEFAULT_ARN_PARTITION, ":", DEFAULT_ARN_SERVICE, ":");
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum TargetIDError {
|
||||
#[error("Invalid TargetID format '{0}', expect 'ID:Name'")]
|
||||
InvalidFormat(String),
|
||||
}
|
||||
|
||||
/// Target ID, used to identify notification targets
|
||||
#[derive(Debug, Clone, Eq, PartialEq, Hash, PartialOrd, Ord)]
|
||||
pub struct TargetID {
|
||||
pub id: String,
|
||||
pub name: String,
|
||||
}
|
||||
|
||||
impl TargetID {
|
||||
pub fn new(id: String, name: String) -> Self {
|
||||
Self { id, name }
|
||||
}
|
||||
|
||||
/// Convert to string representation
|
||||
pub fn to_id_string(&self) -> String {
|
||||
format!("{}:{}", self.id, self.name)
|
||||
}
|
||||
|
||||
/// Create an ARN
|
||||
pub fn to_arn(&self, region: &str) -> ARN {
|
||||
ARN {
|
||||
target_id: self.clone(),
|
||||
region: region.to_string(),
|
||||
service: DEFAULT_ARN_SERVICE.to_string(), // Default Service
|
||||
partition: DEFAULT_ARN_PARTITION.to_string(), // Default partition
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for TargetID {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}:{}", self.id, self.name)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for TargetID {
|
||||
type Err = TargetIDError;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
let parts: Vec<&str> = s.splitn(2, ':').collect();
|
||||
if parts.len() == 2 {
|
||||
Ok(TargetID {
|
||||
id: parts[0].to_string(),
|
||||
name: parts[1].to_string(),
|
||||
})
|
||||
} else {
|
||||
Err(TargetIDError::InvalidFormat(s.to_string()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for TargetID {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
serializer.serialize_str(&self.to_id_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for TargetID {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let s = String::deserialize(deserializer)?;
|
||||
TargetID::from_str(&s).map_err(serde::de::Error::custom)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum ArnError {
|
||||
#[error("Invalid ARN format '{0}'")]
|
||||
InvalidFormat(String),
|
||||
#[error("ARN component missing")]
|
||||
MissingComponents,
|
||||
}
|
||||
|
||||
/// ARN - AWS resource name representation
|
||||
#[derive(Debug, Clone, Eq, PartialEq)]
|
||||
pub struct ARN {
|
||||
pub target_id: TargetID,
|
||||
pub region: String,
|
||||
// Service types, such as "sqs", "sns", "lambda", etc. This defaults to "sqs" to match the Go example.
|
||||
pub service: String,
|
||||
// Partitions such as "aws", "aws-cn", or customizations such as "rustfs","rustfs", etc.
|
||||
pub partition: String,
|
||||
}
|
||||
|
||||
impl ARN {
|
||||
pub fn new(target_id: TargetID, region: String) -> Self {
|
||||
ARN {
|
||||
target_id,
|
||||
region,
|
||||
service: DEFAULT_ARN_SERVICE.to_string(), // Default is sqs
|
||||
partition: DEFAULT_ARN_PARTITION.to_string(), // Default is rustfs partition
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the string representation of ARN
|
||||
/// Returns the ARN string in the format "{ARN_PREFIX}:{region}:{target_id}"
|
||||
#[allow(clippy::inherent_to_string)]
|
||||
pub fn to_arn_string(&self) -> String {
|
||||
if self.target_id.id.is_empty() && self.target_id.name.is_empty() && self.region.is_empty() {
|
||||
return String::new();
|
||||
}
|
||||
format!("{}:{}:{}", ARN_PREFIX, self.region, self.target_id.to_id_string())
|
||||
}
|
||||
|
||||
/// Parsing ARN from string
|
||||
pub fn parse(s: &str) -> Result<Self, TargetError> {
|
||||
if !s.starts_with(ARN_PREFIX) {
|
||||
return Err(TargetError::InvalidARN(s.to_string()));
|
||||
}
|
||||
|
||||
let tokens: Vec<&str> = s.split(':').collect();
|
||||
if tokens.len() != 6 {
|
||||
return Err(TargetError::InvalidARN(s.to_string()));
|
||||
}
|
||||
|
||||
if tokens[4].is_empty() || tokens[5].is_empty() {
|
||||
return Err(TargetError::InvalidARN(s.to_string()));
|
||||
}
|
||||
|
||||
Ok(ARN {
|
||||
region: tokens[3].to_string(),
|
||||
target_id: TargetID {
|
||||
id: tokens[4].to_string(),
|
||||
name: tokens[5].to_string(),
|
||||
},
|
||||
service: tokens[2].to_string(), // Service Type
|
||||
partition: tokens[1].to_string(), // Partition
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for ARN {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
if self.target_id.id.is_empty() && self.target_id.name.is_empty() && self.region.is_empty() {
|
||||
// Returns an empty string if all parts are empty
|
||||
return Ok(());
|
||||
}
|
||||
write!(
|
||||
f,
|
||||
"arn:{}:{}:{}:{}:{}",
|
||||
self.partition, self.service, self.region, self.target_id.id, self.target_id.name
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl FromStr for ARN {
|
||||
type Err = ArnError;
|
||||
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
let parts: Vec<&str> = s.split(':').collect();
|
||||
if parts.len() < 6 {
|
||||
return Err(ArnError::InvalidFormat(s.to_string()));
|
||||
}
|
||||
|
||||
if parts[0] != "arn" {
|
||||
return Err(ArnError::InvalidFormat(s.to_string()));
|
||||
}
|
||||
|
||||
let partition = parts[1].to_string();
|
||||
let service = parts[2].to_string();
|
||||
let region = parts[3].to_string();
|
||||
let id = parts[4].to_string();
|
||||
let name = parts[5..].join(":"); // The name section may contain colons, although this is not usually the case in SQS ARN
|
||||
|
||||
if id.is_empty() || name.is_empty() {
|
||||
return Err(ArnError::MissingComponents);
|
||||
}
|
||||
|
||||
Ok(ARN {
|
||||
target_id: TargetID { id, name },
|
||||
region,
|
||||
service,
|
||||
partition,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Serialization implementation
|
||||
impl Serialize for ARN {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
serializer.serialize_str(&self.to_arn_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for ARN {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
// deserializer.deserialize_str(ARNVisitor)
|
||||
let s = String::deserialize(deserializer)?;
|
||||
if s.is_empty() {
|
||||
// Handle an empty ARN string, for example, creating an empty or default Arn instance
|
||||
// Or return an error based on business logic
|
||||
// Here we create an empty TargetID and region Arn
|
||||
return Ok(ARN {
|
||||
target_id: TargetID {
|
||||
id: String::new(),
|
||||
name: String::new(),
|
||||
},
|
||||
region: String::new(),
|
||||
service: DEFAULT_ARN_SERVICE.to_string(),
|
||||
partition: DEFAULT_ARN_PARTITION.to_string(),
|
||||
});
|
||||
}
|
||||
ARN::from_str(&s).map_err(serde::de::Error::custom)
|
||||
}
|
||||
}
|
||||
120
crates/notify/src/error.rs
Normal file
120
crates/notify/src/error.rs
Normal file
@@ -0,0 +1,120 @@
|
||||
use crate::arn::TargetID;
|
||||
use std::io;
|
||||
use thiserror::Error;
|
||||
|
||||
/// Error types for the store
|
||||
#[derive(Debug, Error)]
|
||||
pub enum StoreError {
|
||||
#[error("I/O error: {0}")]
|
||||
Io(#[from] io::Error),
|
||||
|
||||
#[error("Serialization error: {0}")]
|
||||
Serialization(String),
|
||||
|
||||
#[error("Deserialization error: {0}")]
|
||||
Deserialization(String),
|
||||
|
||||
#[error("Compression error: {0}")]
|
||||
Compression(String),
|
||||
|
||||
#[error("Entry limit exceeded")]
|
||||
LimitExceeded,
|
||||
|
||||
#[error("Entry not found")]
|
||||
NotFound,
|
||||
|
||||
#[error("Invalid entry: {0}")]
|
||||
Internal(String), // Added internal error type
|
||||
}
|
||||
|
||||
/// Error types for targets
|
||||
#[derive(Debug, Error)]
|
||||
pub enum TargetError {
|
||||
#[error("Storage error: {0}")]
|
||||
Storage(String),
|
||||
|
||||
#[error("Network error: {0}")]
|
||||
Network(String),
|
||||
|
||||
#[error("Request error: {0}")]
|
||||
Request(String),
|
||||
|
||||
#[error("Timeout error: {0}")]
|
||||
Timeout(String),
|
||||
|
||||
#[error("Authentication error: {0}")]
|
||||
Authentication(String),
|
||||
|
||||
#[error("Configuration error: {0}")]
|
||||
Configuration(String),
|
||||
|
||||
#[error("Encoding error: {0}")]
|
||||
Encoding(String),
|
||||
|
||||
#[error("Serialization error: {0}")]
|
||||
Serialization(String),
|
||||
|
||||
#[error("Target not connected")]
|
||||
NotConnected,
|
||||
|
||||
#[error("Target initialization failed: {0}")]
|
||||
Initialization(String),
|
||||
|
||||
#[error("Invalid ARN: {0}")]
|
||||
InvalidARN(String),
|
||||
|
||||
#[error("Unknown error: {0}")]
|
||||
Unknown(String),
|
||||
|
||||
#[error("Target is disabled")]
|
||||
Disabled,
|
||||
}
|
||||
|
||||
/// Error types for the notification system
|
||||
#[derive(Debug, Error)]
|
||||
pub enum NotificationError {
|
||||
#[error("Target error: {0}")]
|
||||
Target(#[from] TargetError),
|
||||
|
||||
#[error("Configuration error: {0}")]
|
||||
Configuration(String),
|
||||
|
||||
#[error("ARN not found: {0}")]
|
||||
ARNNotFound(String),
|
||||
|
||||
#[error("Invalid ARN: {0}")]
|
||||
InvalidARN(String),
|
||||
|
||||
#[error("Bucket notification error: {0}")]
|
||||
BucketNotification(String),
|
||||
|
||||
#[error("Rule configuration error: {0}")]
|
||||
RuleConfiguration(String),
|
||||
|
||||
#[error("System initialization error: {0}")]
|
||||
Initialization(String),
|
||||
|
||||
#[error("Notification system has already been initialized")]
|
||||
AlreadyInitialized,
|
||||
|
||||
#[error("I/O error: {0}")]
|
||||
Io(std::io::Error),
|
||||
|
||||
#[error("Failed to read configuration: {0}")]
|
||||
ReadConfig(String),
|
||||
|
||||
#[error("Failed to save configuration: {0}")]
|
||||
SaveConfig(String),
|
||||
|
||||
#[error("Target '{0}' not found")]
|
||||
TargetNotFound(TargetID),
|
||||
|
||||
#[error("Server not initialized")]
|
||||
ServerNotInitialized,
|
||||
}
|
||||
|
||||
impl From<url::ParseError> for TargetError {
|
||||
fn from(err: url::ParseError) -> Self {
|
||||
TargetError::Configuration(format!("URL parse error: {}", err))
|
||||
}
|
||||
}
|
||||
543
crates/notify/src/event.rs
Normal file
543
crates/notify/src/event.rs
Normal file
@@ -0,0 +1,543 @@
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::fmt;
|
||||
use url::form_urlencoded;
|
||||
|
||||
/// Error returned when parsing event name string fails。
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct ParseEventNameError(String);
|
||||
|
||||
impl fmt::Display for ParseEventNameError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "Invalid event name:{}", self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for ParseEventNameError {}
|
||||
|
||||
/// Represents the type of event that occurs on the object.
|
||||
/// Based on AWS S3 event type and includes RustFS extension.
|
||||
#[derive(Debug, Clone, Copy, Serialize, Deserialize, PartialEq, Eq, Hash)]
|
||||
pub enum EventName {
|
||||
// Single event type (values are 1-32 for compatible mask logic)
|
||||
ObjectAccessedGet = 1,
|
||||
ObjectAccessedGetRetention = 2,
|
||||
ObjectAccessedGetLegalHold = 3,
|
||||
ObjectAccessedHead = 4,
|
||||
ObjectAccessedAttributes = 5,
|
||||
ObjectCreatedCompleteMultipartUpload = 6,
|
||||
ObjectCreatedCopy = 7,
|
||||
ObjectCreatedPost = 8,
|
||||
ObjectCreatedPut = 9,
|
||||
ObjectCreatedPutRetention = 10,
|
||||
ObjectCreatedPutLegalHold = 11,
|
||||
ObjectCreatedPutTagging = 12,
|
||||
ObjectCreatedDeleteTagging = 13,
|
||||
ObjectRemovedDelete = 14,
|
||||
ObjectRemovedDeleteMarkerCreated = 15,
|
||||
ObjectRemovedDeleteAllVersions = 16,
|
||||
ObjectRemovedNoOP = 17,
|
||||
BucketCreated = 18,
|
||||
BucketRemoved = 19,
|
||||
ObjectReplicationFailed = 20,
|
||||
ObjectReplicationComplete = 21,
|
||||
ObjectReplicationMissedThreshold = 22,
|
||||
ObjectReplicationReplicatedAfterThreshold = 23,
|
||||
ObjectReplicationNotTracked = 24,
|
||||
ObjectRestorePost = 25,
|
||||
ObjectRestoreCompleted = 26,
|
||||
ObjectTransitionFailed = 27,
|
||||
ObjectTransitionComplete = 28,
|
||||
ScannerManyVersions = 29, // ObjectManyVersions corresponding to Go
|
||||
ScannerLargeVersions = 30, // ObjectLargeVersions corresponding to Go
|
||||
ScannerBigPrefix = 31, // PrefixManyFolders corresponding to Go
|
||||
LifecycleDelMarkerExpirationDelete = 32, // ILMDelMarkerExpirationDelete corresponding to Go
|
||||
|
||||
// Compound "All" event type (no sequential value for mask)
|
||||
ObjectAccessedAll,
|
||||
ObjectCreatedAll,
|
||||
ObjectRemovedAll,
|
||||
ObjectReplicationAll,
|
||||
ObjectRestoreAll,
|
||||
ObjectTransitionAll,
|
||||
ObjectScannerAll, // New, from Go
|
||||
Everything, // New, from Go
|
||||
}
|
||||
|
||||
// Single event type sequential array for Everything.expand()
|
||||
const SINGLE_EVENT_NAMES_IN_ORDER: [EventName; 32] = [
|
||||
EventName::ObjectAccessedGet,
|
||||
EventName::ObjectAccessedGetRetention,
|
||||
EventName::ObjectAccessedGetLegalHold,
|
||||
EventName::ObjectAccessedHead,
|
||||
EventName::ObjectAccessedAttributes,
|
||||
EventName::ObjectCreatedCompleteMultipartUpload,
|
||||
EventName::ObjectCreatedCopy,
|
||||
EventName::ObjectCreatedPost,
|
||||
EventName::ObjectCreatedPut,
|
||||
EventName::ObjectCreatedPutRetention,
|
||||
EventName::ObjectCreatedPutLegalHold,
|
||||
EventName::ObjectCreatedPutTagging,
|
||||
EventName::ObjectCreatedDeleteTagging,
|
||||
EventName::ObjectRemovedDelete,
|
||||
EventName::ObjectRemovedDeleteMarkerCreated,
|
||||
EventName::ObjectRemovedDeleteAllVersions,
|
||||
EventName::ObjectRemovedNoOP,
|
||||
EventName::BucketCreated,
|
||||
EventName::BucketRemoved,
|
||||
EventName::ObjectReplicationFailed,
|
||||
EventName::ObjectReplicationComplete,
|
||||
EventName::ObjectReplicationMissedThreshold,
|
||||
EventName::ObjectReplicationReplicatedAfterThreshold,
|
||||
EventName::ObjectReplicationNotTracked,
|
||||
EventName::ObjectRestorePost,
|
||||
EventName::ObjectRestoreCompleted,
|
||||
EventName::ObjectTransitionFailed,
|
||||
EventName::ObjectTransitionComplete,
|
||||
EventName::ScannerManyVersions,
|
||||
EventName::ScannerLargeVersions,
|
||||
EventName::ScannerBigPrefix,
|
||||
EventName::LifecycleDelMarkerExpirationDelete,
|
||||
];
|
||||
|
||||
const LAST_SINGLE_TYPE_VALUE: u32 = EventName::LifecycleDelMarkerExpirationDelete as u32;
|
||||
|
||||
impl EventName {
|
||||
/// The parsed string is EventName.
|
||||
pub fn parse(s: &str) -> Result<Self, ParseEventNameError> {
|
||||
match s {
|
||||
"s3:BucketCreated:*" => Ok(EventName::BucketCreated),
|
||||
"s3:BucketRemoved:*" => Ok(EventName::BucketRemoved),
|
||||
"s3:ObjectAccessed:*" => Ok(EventName::ObjectAccessedAll),
|
||||
"s3:ObjectAccessed:Get" => Ok(EventName::ObjectAccessedGet),
|
||||
"s3:ObjectAccessed:GetRetention" => Ok(EventName::ObjectAccessedGetRetention),
|
||||
"s3:ObjectAccessed:GetLegalHold" => Ok(EventName::ObjectAccessedGetLegalHold),
|
||||
"s3:ObjectAccessed:Head" => Ok(EventName::ObjectAccessedHead),
|
||||
"s3:ObjectAccessed:Attributes" => Ok(EventName::ObjectAccessedAttributes),
|
||||
"s3:ObjectCreated:*" => Ok(EventName::ObjectCreatedAll),
|
||||
"s3:ObjectCreated:CompleteMultipartUpload" => Ok(EventName::ObjectCreatedCompleteMultipartUpload),
|
||||
"s3:ObjectCreated:Copy" => Ok(EventName::ObjectCreatedCopy),
|
||||
"s3:ObjectCreated:Post" => Ok(EventName::ObjectCreatedPost),
|
||||
"s3:ObjectCreated:Put" => Ok(EventName::ObjectCreatedPut),
|
||||
"s3:ObjectCreated:PutRetention" => Ok(EventName::ObjectCreatedPutRetention),
|
||||
"s3:ObjectCreated:PutLegalHold" => Ok(EventName::ObjectCreatedPutLegalHold),
|
||||
"s3:ObjectCreated:PutTagging" => Ok(EventName::ObjectCreatedPutTagging),
|
||||
"s3:ObjectCreated:DeleteTagging" => Ok(EventName::ObjectCreatedDeleteTagging),
|
||||
"s3:ObjectRemoved:*" => Ok(EventName::ObjectRemovedAll),
|
||||
"s3:ObjectRemoved:Delete" => Ok(EventName::ObjectRemovedDelete),
|
||||
"s3:ObjectRemoved:DeleteMarkerCreated" => Ok(EventName::ObjectRemovedDeleteMarkerCreated),
|
||||
"s3:ObjectRemoved:NoOP" => Ok(EventName::ObjectRemovedNoOP),
|
||||
"s3:ObjectRemoved:DeleteAllVersions" => Ok(EventName::ObjectRemovedDeleteAllVersions),
|
||||
"s3:LifecycleDelMarkerExpiration:Delete" => Ok(EventName::LifecycleDelMarkerExpirationDelete),
|
||||
"s3:Replication:*" => Ok(EventName::ObjectReplicationAll),
|
||||
"s3:Replication:OperationFailedReplication" => Ok(EventName::ObjectReplicationFailed),
|
||||
"s3:Replication:OperationCompletedReplication" => Ok(EventName::ObjectReplicationComplete),
|
||||
"s3:Replication:OperationMissedThreshold" => Ok(EventName::ObjectReplicationMissedThreshold),
|
||||
"s3:Replication:OperationReplicatedAfterThreshold" => Ok(EventName::ObjectReplicationReplicatedAfterThreshold),
|
||||
"s3:Replication:OperationNotTracked" => Ok(EventName::ObjectReplicationNotTracked),
|
||||
"s3:ObjectRestore:*" => Ok(EventName::ObjectRestoreAll),
|
||||
"s3:ObjectRestore:Post" => Ok(EventName::ObjectRestorePost),
|
||||
"s3:ObjectRestore:Completed" => Ok(EventName::ObjectRestoreCompleted),
|
||||
"s3:ObjectTransition:Failed" => Ok(EventName::ObjectTransitionFailed),
|
||||
"s3:ObjectTransition:Complete" => Ok(EventName::ObjectTransitionComplete),
|
||||
"s3:ObjectTransition:*" => Ok(EventName::ObjectTransitionAll),
|
||||
"s3:Scanner:ManyVersions" => Ok(EventName::ScannerManyVersions),
|
||||
"s3:Scanner:LargeVersions" => Ok(EventName::ScannerLargeVersions),
|
||||
"s3:Scanner:BigPrefix" => Ok(EventName::ScannerBigPrefix),
|
||||
// ObjectScannerAll and Everything cannot be parsed from strings, because the Go version also does not define their string representation.
|
||||
_ => Err(ParseEventNameError(s.to_string())),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a string representation of the event type.
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
EventName::BucketCreated => "s3:BucketCreated:*",
|
||||
EventName::BucketRemoved => "s3:BucketRemoved:*",
|
||||
EventName::ObjectAccessedAll => "s3:ObjectAccessed:*",
|
||||
EventName::ObjectAccessedGet => "s3:ObjectAccessed:Get",
|
||||
EventName::ObjectAccessedGetRetention => "s3:ObjectAccessed:GetRetention",
|
||||
EventName::ObjectAccessedGetLegalHold => "s3:ObjectAccessed:GetLegalHold",
|
||||
EventName::ObjectAccessedHead => "s3:ObjectAccessed:Head",
|
||||
EventName::ObjectAccessedAttributes => "s3:ObjectAccessed:Attributes",
|
||||
EventName::ObjectCreatedAll => "s3:ObjectCreated:*",
|
||||
EventName::ObjectCreatedCompleteMultipartUpload => "s3:ObjectCreated:CompleteMultipartUpload",
|
||||
EventName::ObjectCreatedCopy => "s3:ObjectCreated:Copy",
|
||||
EventName::ObjectCreatedPost => "s3:ObjectCreated:Post",
|
||||
EventName::ObjectCreatedPut => "s3:ObjectCreated:Put",
|
||||
EventName::ObjectCreatedPutTagging => "s3:ObjectCreated:PutTagging",
|
||||
EventName::ObjectCreatedDeleteTagging => "s3:ObjectCreated:DeleteTagging",
|
||||
EventName::ObjectCreatedPutRetention => "s3:ObjectCreated:PutRetention",
|
||||
EventName::ObjectCreatedPutLegalHold => "s3:ObjectCreated:PutLegalHold",
|
||||
EventName::ObjectRemovedAll => "s3:ObjectRemoved:*",
|
||||
EventName::ObjectRemovedDelete => "s3:ObjectRemoved:Delete",
|
||||
EventName::ObjectRemovedDeleteMarkerCreated => "s3:ObjectRemoved:DeleteMarkerCreated",
|
||||
EventName::ObjectRemovedNoOP => "s3:ObjectRemoved:NoOP",
|
||||
EventName::ObjectRemovedDeleteAllVersions => "s3:ObjectRemoved:DeleteAllVersions",
|
||||
EventName::LifecycleDelMarkerExpirationDelete => "s3:LifecycleDelMarkerExpiration:Delete",
|
||||
EventName::ObjectReplicationAll => "s3:Replication:*",
|
||||
EventName::ObjectReplicationFailed => "s3:Replication:OperationFailedReplication",
|
||||
EventName::ObjectReplicationComplete => "s3:Replication:OperationCompletedReplication",
|
||||
EventName::ObjectReplicationNotTracked => "s3:Replication:OperationNotTracked",
|
||||
EventName::ObjectReplicationMissedThreshold => "s3:Replication:OperationMissedThreshold",
|
||||
EventName::ObjectReplicationReplicatedAfterThreshold => "s3:Replication:OperationReplicatedAfterThreshold",
|
||||
EventName::ObjectRestoreAll => "s3:ObjectRestore:*",
|
||||
EventName::ObjectRestorePost => "s3:ObjectRestore:Post",
|
||||
EventName::ObjectRestoreCompleted => "s3:ObjectRestore:Completed",
|
||||
EventName::ObjectTransitionAll => "s3:ObjectTransition:*",
|
||||
EventName::ObjectTransitionFailed => "s3:ObjectTransition:Failed",
|
||||
EventName::ObjectTransitionComplete => "s3:ObjectTransition:Complete",
|
||||
EventName::ScannerManyVersions => "s3:Scanner:ManyVersions",
|
||||
EventName::ScannerLargeVersions => "s3:Scanner:LargeVersions",
|
||||
EventName::ScannerBigPrefix => "s3:Scanner:BigPrefix",
|
||||
// Go's String() returns "" for ObjectScannerAll and Everything
|
||||
EventName::ObjectScannerAll => "s3:Scanner:*", // Follow the pattern in Go Expand
|
||||
EventName::Everything => "", // Go String() returns "" to unprocessed
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the extended value of the abbreviation event type.
|
||||
pub fn expand(&self) -> Vec<Self> {
|
||||
match self {
|
||||
EventName::ObjectAccessedAll => vec![
|
||||
EventName::ObjectAccessedGet,
|
||||
EventName::ObjectAccessedHead,
|
||||
EventName::ObjectAccessedGetRetention,
|
||||
EventName::ObjectAccessedGetLegalHold,
|
||||
EventName::ObjectAccessedAttributes,
|
||||
],
|
||||
EventName::ObjectCreatedAll => vec![
|
||||
EventName::ObjectCreatedCompleteMultipartUpload,
|
||||
EventName::ObjectCreatedCopy,
|
||||
EventName::ObjectCreatedPost,
|
||||
EventName::ObjectCreatedPut,
|
||||
EventName::ObjectCreatedPutRetention,
|
||||
EventName::ObjectCreatedPutLegalHold,
|
||||
EventName::ObjectCreatedPutTagging,
|
||||
EventName::ObjectCreatedDeleteTagging,
|
||||
],
|
||||
EventName::ObjectRemovedAll => vec![
|
||||
EventName::ObjectRemovedDelete,
|
||||
EventName::ObjectRemovedDeleteMarkerCreated,
|
||||
EventName::ObjectRemovedNoOP,
|
||||
EventName::ObjectRemovedDeleteAllVersions,
|
||||
],
|
||||
EventName::ObjectReplicationAll => vec![
|
||||
EventName::ObjectReplicationFailed,
|
||||
EventName::ObjectReplicationComplete,
|
||||
EventName::ObjectReplicationNotTracked,
|
||||
EventName::ObjectReplicationMissedThreshold,
|
||||
EventName::ObjectReplicationReplicatedAfterThreshold,
|
||||
],
|
||||
EventName::ObjectRestoreAll => vec![EventName::ObjectRestorePost, EventName::ObjectRestoreCompleted],
|
||||
EventName::ObjectTransitionAll => vec![EventName::ObjectTransitionFailed, EventName::ObjectTransitionComplete],
|
||||
EventName::ObjectScannerAll => vec![
|
||||
// New
|
||||
EventName::ScannerManyVersions,
|
||||
EventName::ScannerLargeVersions,
|
||||
EventName::ScannerBigPrefix,
|
||||
],
|
||||
EventName::Everything => {
|
||||
// New
|
||||
SINGLE_EVENT_NAMES_IN_ORDER.to_vec()
|
||||
}
|
||||
// A single type returns to itself directly
|
||||
_ => vec![*self],
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the mask of type.
|
||||
/// The compound "All" type will be expanded.
|
||||
pub fn mask(&self) -> u64 {
|
||||
let value = *self as u32;
|
||||
if value > 0 && value <= LAST_SINGLE_TYPE_VALUE {
|
||||
// It's a single type
|
||||
1u64 << (value - 1)
|
||||
} else {
|
||||
// It's a compound type
|
||||
let mut mask = 0u64;
|
||||
for n in self.expand() {
|
||||
mask |= n.mask(); // Recursively call mask
|
||||
}
|
||||
mask
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for EventName {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.as_str())
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert to `EventName` according to string
|
||||
impl From<&str> for EventName {
|
||||
fn from(event_str: &str) -> Self {
|
||||
EventName::parse(event_str).unwrap_or_else(|e| panic!("{}", e))
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents the identity of the user who triggered the event
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Identity {
|
||||
/// The principal ID of the user
|
||||
pub principal_id: String,
|
||||
}
|
||||
|
||||
/// Represents the bucket that the object is in
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Bucket {
|
||||
/// The name of the bucket
|
||||
pub name: String,
|
||||
/// The owner identity of the bucket
|
||||
pub owner_identity: Identity,
|
||||
/// The Amazon Resource Name (ARN) of the bucket
|
||||
pub arn: String,
|
||||
}
|
||||
|
||||
/// Represents the object that the event occurred on
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||
pub struct Object {
|
||||
/// The key (name) of the object
|
||||
pub key: String,
|
||||
/// The size of the object in bytes
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub size: Option<i64>,
|
||||
/// The entity tag (ETag) of the object
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub etag: Option<String>,
|
||||
/// The content type of the object
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub content_type: Option<String>,
|
||||
/// User-defined metadata associated with the object
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub user_metadata: Option<HashMap<String, String>>,
|
||||
/// The version ID of the object (if versioning is enabled)
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub version_id: Option<String>,
|
||||
/// A unique identifier for the event
|
||||
pub sequencer: String,
|
||||
}
|
||||
|
||||
/// Metadata about the event
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Metadata {
|
||||
/// The schema version of the event
|
||||
#[serde(rename = "s3SchemaVersion")]
|
||||
pub schema_version: String,
|
||||
/// The ID of the configuration that triggered the event
|
||||
pub configuration_id: String,
|
||||
/// Information about the bucket
|
||||
pub bucket: Bucket,
|
||||
/// Information about the object
|
||||
pub object: Object,
|
||||
}
|
||||
|
||||
/// Information about the source of the event
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct Source {
|
||||
/// The host where the event originated
|
||||
pub host: String,
|
||||
/// The port on the host
|
||||
pub port: String,
|
||||
/// The user agent that caused the event
|
||||
#[serde(rename = "userAgent")]
|
||||
pub user_agent: String,
|
||||
}
|
||||
|
||||
/// Represents a storage event
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Event {
|
||||
/// The version of the event
|
||||
pub event_version: String,
|
||||
/// The source of the event
|
||||
pub event_source: String,
|
||||
/// The AWS region where the event occurred
|
||||
pub aws_region: String,
|
||||
/// The time when the event occurred
|
||||
pub event_time: DateTime<Utc>,
|
||||
/// The name of the event
|
||||
pub event_name: EventName,
|
||||
/// The identity of the user who triggered the event
|
||||
pub user_identity: Identity,
|
||||
/// Parameters from the request that caused the event
|
||||
pub request_parameters: HashMap<String, String>,
|
||||
/// Elements from the response
|
||||
pub response_elements: HashMap<String, String>,
|
||||
/// Metadata about the event
|
||||
pub s3: Metadata,
|
||||
/// Information about the source of the event
|
||||
pub source: Source,
|
||||
}
|
||||
|
||||
impl Event {
|
||||
/// Creates a test event for a given bucket and object
|
||||
pub fn new_test_event(bucket: &str, key: &str, event_name: EventName) -> Self {
|
||||
let mut user_metadata = HashMap::new();
|
||||
user_metadata.insert("x-amz-meta-test".to_string(), "value".to_string());
|
||||
user_metadata.insert("x-amz-storage-storage-options".to_string(), "value".to_string());
|
||||
user_metadata.insert("x-amz-meta-".to_string(), "value".to_string());
|
||||
user_metadata.insert("x-rustfs-meta-".to_string(), "rustfs-value".to_string());
|
||||
user_metadata.insert("x-request-id".to_string(), "request-id-123".to_string());
|
||||
user_metadata.insert("x-bucket".to_string(), "bucket".to_string());
|
||||
user_metadata.insert("x-object".to_string(), "object".to_string());
|
||||
user_metadata.insert("x-rustfs-origin-endpoint".to_string(), "http://127.0.0.1".to_string());
|
||||
user_metadata.insert("x-rustfs-user-metadata".to_string(), "metadata".to_string());
|
||||
user_metadata.insert("x-rustfs-deployment-id".to_string(), "deployment-id-123".to_string());
|
||||
user_metadata.insert("x-rustfs-origin-endpoint-code".to_string(), "http://127.0.0.1".to_string());
|
||||
user_metadata.insert("x-rustfs-bucket-name".to_string(), "bucket".to_string());
|
||||
user_metadata.insert("x-rustfs-object-key".to_string(), key.to_string());
|
||||
user_metadata.insert("x-rustfs-object-size".to_string(), "1024".to_string());
|
||||
user_metadata.insert("x-rustfs-object-etag".to_string(), "etag123".to_string());
|
||||
user_metadata.insert("x-rustfs-object-version-id".to_string(), "1".to_string());
|
||||
user_metadata.insert("x-request-time".to_string(), Utc::now().to_rfc3339());
|
||||
|
||||
Event {
|
||||
event_version: "2.1".to_string(),
|
||||
event_source: "rustfs:s3".to_string(),
|
||||
aws_region: "us-east-1".to_string(),
|
||||
event_time: Utc::now(),
|
||||
event_name,
|
||||
user_identity: Identity {
|
||||
principal_id: "rustfs".to_string(),
|
||||
},
|
||||
request_parameters: HashMap::new(),
|
||||
response_elements: HashMap::new(),
|
||||
s3: Metadata {
|
||||
schema_version: "1.0".to_string(),
|
||||
configuration_id: "test-config".to_string(),
|
||||
bucket: Bucket {
|
||||
name: bucket.to_string(),
|
||||
owner_identity: Identity {
|
||||
principal_id: "rustfs".to_string(),
|
||||
},
|
||||
arn: format!("arn:rustfs:s3:::{}", bucket),
|
||||
},
|
||||
object: Object {
|
||||
key: key.to_string(),
|
||||
size: Some(1024),
|
||||
etag: Some("etag123".to_string()),
|
||||
content_type: Some("application/octet-stream".to_string()),
|
||||
user_metadata: Some(user_metadata),
|
||||
version_id: Some("1".to_string()),
|
||||
sequencer: "0055AED6DCD90281E5".to_string(),
|
||||
},
|
||||
},
|
||||
source: Source {
|
||||
host: "127.0.0.1".to_string(),
|
||||
port: "9000".to_string(),
|
||||
user_agent: "RustFS (linux; amd64) rustfs-rs/0.1".to_string(),
|
||||
},
|
||||
}
|
||||
}
|
||||
/// Return event mask
|
||||
pub fn mask(&self) -> u64 {
|
||||
self.event_name.mask()
|
||||
}
|
||||
|
||||
pub fn new(args: EventArgs) -> Self {
|
||||
let event_time = Utc::now().naive_local();
|
||||
let unique_id = match args.object.mod_time {
|
||||
Some(t) => format!("{:X}", t.unix_timestamp_nanos()),
|
||||
None => format!("{:X}", event_time.and_utc().timestamp_nanos_opt().unwrap_or(0)),
|
||||
};
|
||||
|
||||
let mut resp_elements = args.resp_elements.clone();
|
||||
initialize_response_elements(&mut resp_elements, &["x-amz-request-id", "x-amz-id-2"]);
|
||||
|
||||
// URL encoding of object keys
|
||||
let key_name = form_urlencoded::byte_serialize(args.object.name.as_bytes()).collect::<String>();
|
||||
let principal_id = args.req_params.get("principalId").unwrap_or(&String::new()).to_string();
|
||||
|
||||
let mut s3_metadata = Metadata {
|
||||
schema_version: "1.0".to_string(),
|
||||
configuration_id: "Config".to_string(), // or from args
|
||||
bucket: Bucket {
|
||||
name: args.bucket_name.clone(),
|
||||
owner_identity: Identity {
|
||||
principal_id: principal_id.clone(),
|
||||
},
|
||||
arn: format!("arn:aws:s3:::{}", args.bucket_name),
|
||||
},
|
||||
object: Object {
|
||||
key: key_name,
|
||||
version_id: Some(args.object.version_id.unwrap().to_string()),
|
||||
sequencer: unique_id,
|
||||
..Default::default()
|
||||
},
|
||||
};
|
||||
|
||||
let is_removed_event = matches!(
|
||||
args.event_name,
|
||||
EventName::ObjectRemovedDelete | EventName::ObjectRemovedDeleteMarkerCreated
|
||||
);
|
||||
|
||||
if !is_removed_event {
|
||||
s3_metadata.object.size = Some(args.object.size);
|
||||
s3_metadata.object.etag = args.object.etag.clone();
|
||||
s3_metadata.object.content_type = args.object.content_type.clone();
|
||||
// Filter out internal reserved metadata
|
||||
let mut user_metadata = HashMap::new();
|
||||
for (k, v) in &args.object.user_defined.unwrap_or_default() {
|
||||
if !k.to_lowercase().starts_with("x-amz-meta-internal-") {
|
||||
user_metadata.insert(k.clone(), v.clone());
|
||||
}
|
||||
}
|
||||
s3_metadata.object.user_metadata = Some(user_metadata);
|
||||
}
|
||||
|
||||
Self {
|
||||
event_version: "2.1".to_string(),
|
||||
event_source: "rustfs:s3".to_string(),
|
||||
aws_region: args.req_params.get("region").cloned().unwrap_or_default(),
|
||||
event_time: event_time.and_utc(),
|
||||
event_name: args.event_name,
|
||||
user_identity: Identity { principal_id },
|
||||
request_parameters: args.req_params,
|
||||
response_elements: resp_elements,
|
||||
s3: s3_metadata,
|
||||
source: Source {
|
||||
host: args.host,
|
||||
port: "".to_string(),
|
||||
user_agent: args.user_agent,
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn initialize_response_elements(elements: &mut HashMap<String, String>, keys: &[&str]) {
|
||||
for key in keys {
|
||||
elements.entry(key.to_string()).or_default();
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents a log of events for sending to targets
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct EventLog {
|
||||
/// The event name
|
||||
pub event_name: EventName,
|
||||
/// The object key
|
||||
pub key: String,
|
||||
/// The list of events
|
||||
pub records: Vec<Event>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct EventArgs {
|
||||
pub event_name: EventName,
|
||||
pub bucket_name: String,
|
||||
pub object: ecstore::store_api::ObjectInfo,
|
||||
pub req_params: HashMap<String, String>,
|
||||
pub resp_elements: HashMap<String, String>,
|
||||
pub host: String,
|
||||
pub user_agent: String,
|
||||
}
|
||||
|
||||
impl EventArgs {
|
||||
// Helper function to check if it is a copy request
|
||||
pub fn is_replication_request(&self) -> bool {
|
||||
self.req_params.contains_key("x-rustfs-source-replication-request")
|
||||
}
|
||||
}
|
||||
303
crates/notify/src/factory.rs
Normal file
303
crates/notify/src/factory.rs
Normal file
@@ -0,0 +1,303 @@
|
||||
use crate::store::DEFAULT_LIMIT;
|
||||
use crate::{
|
||||
error::TargetError,
|
||||
target::{Target, mqtt::MQTTArgs, webhook::WebhookArgs},
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use ecstore::config::{ENABLE_KEY, ENABLE_ON, KVS};
|
||||
use rumqttc::QoS;
|
||||
use std::time::Duration;
|
||||
use tracing::warn;
|
||||
use url::Url;
|
||||
|
||||
// --- Configuration Constants ---
|
||||
|
||||
// General
|
||||
|
||||
pub const DEFAULT_TARGET: &str = "1";
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub const NOTIFY_KAFKA_SUB_SYS: &str = "notify_kafka";
|
||||
#[allow(dead_code)]
|
||||
pub const NOTIFY_MQTT_SUB_SYS: &str = "notify_mqtt";
|
||||
#[allow(dead_code)]
|
||||
pub const NOTIFY_MY_SQL_SUB_SYS: &str = "notify_mysql";
|
||||
#[allow(dead_code)]
|
||||
pub const NOTIFY_NATS_SUB_SYS: &str = "notify_nats";
|
||||
#[allow(dead_code)]
|
||||
pub const NOTIFY_NSQ_SUB_SYS: &str = "notify_nsq";
|
||||
#[allow(dead_code)]
|
||||
pub const NOTIFY_ES_SUB_SYS: &str = "notify_elasticsearch";
|
||||
#[allow(dead_code)]
|
||||
pub const NOTIFY_AMQP_SUB_SYS: &str = "notify_amqp";
|
||||
#[allow(dead_code)]
|
||||
pub const NOTIFY_POSTGRES_SUB_SYS: &str = "notify_postgres";
|
||||
#[allow(dead_code)]
|
||||
pub const NOTIFY_REDIS_SUB_SYS: &str = "notify_redis";
|
||||
pub const NOTIFY_WEBHOOK_SUB_SYS: &str = "notify_webhook";
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub const NOTIFY_SUB_SYSTEMS: &[&str] = &[NOTIFY_MQTT_SUB_SYS, NOTIFY_WEBHOOK_SUB_SYS];
|
||||
|
||||
// Webhook Keys
|
||||
pub const WEBHOOK_ENDPOINT: &str = "endpoint";
|
||||
pub const WEBHOOK_AUTH_TOKEN: &str = "auth_token";
|
||||
pub const WEBHOOK_QUEUE_LIMIT: &str = "queue_limit";
|
||||
pub const WEBHOOK_QUEUE_DIR: &str = "queue_dir";
|
||||
pub const WEBHOOK_CLIENT_CERT: &str = "client_cert";
|
||||
pub const WEBHOOK_CLIENT_KEY: &str = "client_key";
|
||||
|
||||
// Webhook Environment Variables
|
||||
const ENV_WEBHOOK_ENABLE: &str = "RUSTFS_NOTIFY_WEBHOOK_ENABLE";
|
||||
const ENV_WEBHOOK_ENDPOINT: &str = "RUSTFS_NOTIFY_WEBHOOK_ENDPOINT";
|
||||
const ENV_WEBHOOK_AUTH_TOKEN: &str = "RUSTFS_NOTIFY_WEBHOOK_AUTH_TOKEN";
|
||||
const ENV_WEBHOOK_QUEUE_LIMIT: &str = "RUSTFS_NOTIFY_WEBHOOK_QUEUE_LIMIT";
|
||||
const ENV_WEBHOOK_QUEUE_DIR: &str = "RUSTFS_NOTIFY_WEBHOOK_QUEUE_DIR";
|
||||
const ENV_WEBHOOK_CLIENT_CERT: &str = "RUSTFS_NOTIFY_WEBHOOK_CLIENT_CERT";
|
||||
const ENV_WEBHOOK_CLIENT_KEY: &str = "RUSTFS_NOTIFY_WEBHOOK_CLIENT_KEY";
|
||||
|
||||
// MQTT Keys
|
||||
pub const MQTT_BROKER: &str = "broker";
|
||||
pub const MQTT_TOPIC: &str = "topic";
|
||||
pub const MQTT_QOS: &str = "qos";
|
||||
pub const MQTT_USERNAME: &str = "username";
|
||||
pub const MQTT_PASSWORD: &str = "password";
|
||||
pub const MQTT_RECONNECT_INTERVAL: &str = "reconnect_interval";
|
||||
pub const MQTT_KEEP_ALIVE_INTERVAL: &str = "keep_alive_interval";
|
||||
pub const MQTT_QUEUE_DIR: &str = "queue_dir";
|
||||
pub const MQTT_QUEUE_LIMIT: &str = "queue_limit";
|
||||
|
||||
// MQTT Environment Variables
|
||||
const ENV_MQTT_ENABLE: &str = "RUSTFS_NOTIFY_MQTT_ENABLE";
|
||||
const ENV_MQTT_BROKER: &str = "RUSTFS_NOTIFY_MQTT_BROKER";
|
||||
const ENV_MQTT_TOPIC: &str = "RUSTFS_NOTIFY_MQTT_TOPIC";
|
||||
const ENV_MQTT_QOS: &str = "RUSTFS_NOTIFY_MQTT_QOS";
|
||||
const ENV_MQTT_USERNAME: &str = "RUSTFS_NOTIFY_MQTT_USERNAME";
|
||||
const ENV_MQTT_PASSWORD: &str = "RUSTFS_NOTIFY_MQTT_PASSWORD";
|
||||
const ENV_MQTT_RECONNECT_INTERVAL: &str = "RUSTFS_NOTIFY_MQTT_RECONNECT_INTERVAL";
|
||||
const ENV_MQTT_KEEP_ALIVE_INTERVAL: &str = "RUSTFS_NOTIFY_MQTT_KEEP_ALIVE_INTERVAL";
|
||||
const ENV_MQTT_QUEUE_DIR: &str = "RUSTFS_NOTIFY_MQTT_QUEUE_DIR";
|
||||
const ENV_MQTT_QUEUE_LIMIT: &str = "RUSTFS_NOTIFY_MQTT_QUEUE_LIMIT";
|
||||
|
||||
/// Helper function to get values from environment variables or KVS configurations.
|
||||
///
|
||||
/// It will give priority to reading from environment variables such as `BASE_ENV_KEY_ID` and fall back to the KVS configuration if it fails.
|
||||
fn get_config_value(id: &str, base_env_key: &str, config_key: &str, config: &KVS) -> Option<String> {
|
||||
let env_key = if id != DEFAULT_TARGET {
|
||||
format!("{}_{}", base_env_key, id.to_uppercase().replace('-', "_"))
|
||||
} else {
|
||||
base_env_key.to_string()
|
||||
};
|
||||
|
||||
match std::env::var(&env_key) {
|
||||
Ok(val) => Some(val),
|
||||
Err(_) => config.lookup(config_key),
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait for creating targets from configuration
|
||||
#[async_trait]
|
||||
pub trait TargetFactory: Send + Sync {
|
||||
/// Creates a target from configuration
|
||||
async fn create_target(&self, id: String, config: &KVS) -> Result<Box<dyn Target + Send + Sync>, TargetError>;
|
||||
|
||||
/// Validates target configuration
|
||||
fn validate_config(&self, id: &str, config: &KVS) -> Result<(), TargetError>;
|
||||
}
|
||||
|
||||
/// Factory for creating Webhook targets
|
||||
pub struct WebhookTargetFactory;
|
||||
|
||||
#[async_trait]
|
||||
impl TargetFactory for WebhookTargetFactory {
|
||||
async fn create_target(&self, id: String, config: &KVS) -> Result<Box<dyn Target + Send + Sync>, TargetError> {
|
||||
let get = |base_env_key: &str, config_key: &str| get_config_value(&id, base_env_key, config_key, config);
|
||||
|
||||
let enable = get(ENV_WEBHOOK_ENABLE, ENABLE_KEY)
|
||||
.map(|v| v.eq_ignore_ascii_case(ENABLE_ON) || v.eq_ignore_ascii_case("true"))
|
||||
.unwrap_or(false);
|
||||
|
||||
if !enable {
|
||||
return Err(TargetError::Configuration("Target is disabled".to_string()));
|
||||
}
|
||||
|
||||
let endpoint = get(ENV_WEBHOOK_ENDPOINT, WEBHOOK_ENDPOINT)
|
||||
.ok_or_else(|| TargetError::Configuration("Missing webhook endpoint".to_string()))?;
|
||||
let endpoint_url =
|
||||
Url::parse(&endpoint).map_err(|e| TargetError::Configuration(format!("Invalid endpoint URL: {}", e)))?;
|
||||
|
||||
let auth_token = get(ENV_WEBHOOK_AUTH_TOKEN, WEBHOOK_AUTH_TOKEN).unwrap_or_default();
|
||||
let queue_dir = get(ENV_WEBHOOK_QUEUE_DIR, WEBHOOK_QUEUE_DIR).unwrap_or_default();
|
||||
|
||||
let queue_limit = get(ENV_WEBHOOK_QUEUE_LIMIT, WEBHOOK_QUEUE_LIMIT)
|
||||
.and_then(|v| v.parse::<u64>().ok())
|
||||
.unwrap_or(DEFAULT_LIMIT);
|
||||
|
||||
let client_cert = get(ENV_WEBHOOK_CLIENT_CERT, WEBHOOK_CLIENT_CERT).unwrap_or_default();
|
||||
let client_key = get(ENV_WEBHOOK_CLIENT_KEY, WEBHOOK_CLIENT_KEY).unwrap_or_default();
|
||||
|
||||
let args = WebhookArgs {
|
||||
enable,
|
||||
endpoint: endpoint_url,
|
||||
auth_token,
|
||||
queue_dir,
|
||||
queue_limit,
|
||||
client_cert,
|
||||
client_key,
|
||||
};
|
||||
|
||||
let target = crate::target::webhook::WebhookTarget::new(id, args)?;
|
||||
Ok(Box::new(target))
|
||||
}
|
||||
|
||||
fn validate_config(&self, id: &str, config: &KVS) -> Result<(), TargetError> {
|
||||
let get = |base_env_key: &str, config_key: &str| get_config_value(id, base_env_key, config_key, config);
|
||||
|
||||
let enable = get(ENV_WEBHOOK_ENABLE, ENABLE_KEY)
|
||||
.map(|v| v.eq_ignore_ascii_case(ENABLE_ON) || v.eq_ignore_ascii_case("true"))
|
||||
.unwrap_or(false);
|
||||
|
||||
if !enable {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let endpoint = get(ENV_WEBHOOK_ENDPOINT, WEBHOOK_ENDPOINT)
|
||||
.ok_or_else(|| TargetError::Configuration("Missing webhook endpoint".to_string()))?;
|
||||
Url::parse(&endpoint).map_err(|e| TargetError::Configuration(format!("Invalid endpoint URL: {}", e)))?;
|
||||
|
||||
let client_cert = get(ENV_WEBHOOK_CLIENT_CERT, WEBHOOK_CLIENT_CERT).unwrap_or_default();
|
||||
let client_key = get(ENV_WEBHOOK_CLIENT_KEY, WEBHOOK_CLIENT_KEY).unwrap_or_default();
|
||||
|
||||
if client_cert.is_empty() != client_key.is_empty() {
|
||||
return Err(TargetError::Configuration(
|
||||
"Both client_cert and client_key must be specified together".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let queue_dir = get(ENV_WEBHOOK_QUEUE_DIR, WEBHOOK_QUEUE_DIR).unwrap_or_default();
|
||||
if !queue_dir.is_empty() && !std::path::Path::new(&queue_dir).is_absolute() {
|
||||
return Err(TargetError::Configuration("Webhook queue directory must be an absolute path".to_string()));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Factory for creating MQTT targets
|
||||
pub struct MQTTTargetFactory;
|
||||
|
||||
#[async_trait]
|
||||
impl TargetFactory for MQTTTargetFactory {
|
||||
async fn create_target(&self, id: String, config: &KVS) -> Result<Box<dyn Target + Send + Sync>, TargetError> {
|
||||
let get = |base_env_key: &str, config_key: &str| get_config_value(&id, base_env_key, config_key, config);
|
||||
|
||||
let enable = get(ENV_MQTT_ENABLE, ENABLE_KEY)
|
||||
.map(|v| v.eq_ignore_ascii_case(ENABLE_ON) || v.eq_ignore_ascii_case("true"))
|
||||
.unwrap_or(false);
|
||||
|
||||
if !enable {
|
||||
return Err(TargetError::Configuration("Target is disabled".to_string()));
|
||||
}
|
||||
|
||||
let broker =
|
||||
get(ENV_MQTT_BROKER, MQTT_BROKER).ok_or_else(|| TargetError::Configuration("Missing MQTT broker".to_string()))?;
|
||||
let broker_url = Url::parse(&broker).map_err(|e| TargetError::Configuration(format!("Invalid broker URL: {}", e)))?;
|
||||
|
||||
let topic =
|
||||
get(ENV_MQTT_TOPIC, MQTT_TOPIC).ok_or_else(|| TargetError::Configuration("Missing MQTT topic".to_string()))?;
|
||||
|
||||
let qos = get(ENV_MQTT_QOS, MQTT_QOS)
|
||||
.and_then(|v| v.parse::<u8>().ok())
|
||||
.map(|q| match q {
|
||||
0 => QoS::AtMostOnce,
|
||||
1 => QoS::AtLeastOnce,
|
||||
2 => QoS::ExactlyOnce,
|
||||
_ => QoS::AtLeastOnce,
|
||||
})
|
||||
.unwrap_or(QoS::AtLeastOnce);
|
||||
|
||||
let username = get(ENV_MQTT_USERNAME, MQTT_USERNAME).unwrap_or_default();
|
||||
let password = get(ENV_MQTT_PASSWORD, MQTT_PASSWORD).unwrap_or_default();
|
||||
|
||||
let reconnect_interval = get(ENV_MQTT_RECONNECT_INTERVAL, MQTT_RECONNECT_INTERVAL)
|
||||
.and_then(|v| v.parse::<u64>().ok())
|
||||
.map(Duration::from_secs)
|
||||
.unwrap_or_else(|| Duration::from_secs(5));
|
||||
|
||||
let keep_alive = get(ENV_MQTT_KEEP_ALIVE_INTERVAL, MQTT_KEEP_ALIVE_INTERVAL)
|
||||
.and_then(|v| v.parse::<u64>().ok())
|
||||
.map(Duration::from_secs)
|
||||
.unwrap_or_else(|| Duration::from_secs(30));
|
||||
|
||||
let queue_dir = get(ENV_MQTT_QUEUE_DIR, MQTT_QUEUE_DIR).unwrap_or_default();
|
||||
let queue_limit = get(ENV_MQTT_QUEUE_LIMIT, MQTT_QUEUE_LIMIT)
|
||||
.and_then(|v| v.parse::<u64>().ok())
|
||||
.unwrap_or(DEFAULT_LIMIT);
|
||||
|
||||
let args = MQTTArgs {
|
||||
enable,
|
||||
broker: broker_url,
|
||||
topic,
|
||||
qos,
|
||||
username,
|
||||
password,
|
||||
max_reconnect_interval: reconnect_interval,
|
||||
keep_alive,
|
||||
queue_dir,
|
||||
queue_limit,
|
||||
};
|
||||
|
||||
let target = crate::target::mqtt::MQTTTarget::new(id, args)?;
|
||||
Ok(Box::new(target))
|
||||
}
|
||||
|
||||
fn validate_config(&self, id: &str, config: &KVS) -> Result<(), TargetError> {
|
||||
let get = |base_env_key: &str, config_key: &str| get_config_value(id, base_env_key, config_key, config);
|
||||
|
||||
let enable = get(ENV_MQTT_ENABLE, ENABLE_KEY)
|
||||
.map(|v| v.eq_ignore_ascii_case(ENABLE_ON) || v.eq_ignore_ascii_case("true"))
|
||||
.unwrap_or(false);
|
||||
|
||||
if !enable {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let broker =
|
||||
get(ENV_MQTT_BROKER, MQTT_BROKER).ok_or_else(|| TargetError::Configuration("Missing MQTT broker".to_string()))?;
|
||||
let url = Url::parse(&broker).map_err(|e| TargetError::Configuration(format!("Invalid broker URL: {}", e)))?;
|
||||
|
||||
match url.scheme() {
|
||||
"tcp" | "ssl" | "ws" | "wss" | "mqtt" | "mqtts" => {}
|
||||
_ => {
|
||||
return Err(TargetError::Configuration("Unsupported broker URL scheme".to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
if get(ENV_MQTT_TOPIC, MQTT_TOPIC).is_none() {
|
||||
return Err(TargetError::Configuration("Missing MQTT topic".to_string()));
|
||||
}
|
||||
|
||||
if let Some(qos_str) = get(ENV_MQTT_QOS, MQTT_QOS) {
|
||||
let qos = qos_str
|
||||
.parse::<u8>()
|
||||
.map_err(|_| TargetError::Configuration("Invalid QoS value".to_string()))?;
|
||||
if qos > 2 {
|
||||
return Err(TargetError::Configuration("QoS must be 0, 1, or 2".to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
let queue_dir = get(ENV_MQTT_QUEUE_DIR, MQTT_QUEUE_DIR).unwrap_or_default();
|
||||
if !queue_dir.is_empty() {
|
||||
if !std::path::Path::new(&queue_dir).is_absolute() {
|
||||
return Err(TargetError::Configuration("MQTT queue directory must be an absolute path".to_string()));
|
||||
}
|
||||
if let Some(qos_str) = get(ENV_MQTT_QOS, MQTT_QOS) {
|
||||
if qos_str == "0" {
|
||||
warn!("Using queue_dir with QoS 0 may result in event loss");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
58
crates/notify/src/global.rs
Normal file
58
crates/notify/src/global.rs
Normal file
@@ -0,0 +1,58 @@
|
||||
use crate::{Event, EventArgs, NotificationError, NotificationSystem};
|
||||
use ecstore::config::Config;
|
||||
use once_cell::sync::Lazy;
|
||||
use std::sync::{Arc, OnceLock};
|
||||
|
||||
static NOTIFICATION_SYSTEM: OnceLock<Arc<NotificationSystem>> = OnceLock::new();
|
||||
// Create a globally unique Notifier instance
|
||||
pub static GLOBAL_NOTIFIER: Lazy<Notifier> = Lazy::new(|| Notifier {});
|
||||
|
||||
/// Initialize the global notification system with the given configuration.
|
||||
/// This function should only be called once throughout the application life cycle.
|
||||
pub async fn initialize(config: Config) -> Result<(), NotificationError> {
|
||||
// `new` is synchronous and responsible for creating instances
|
||||
let system = NotificationSystem::new(config);
|
||||
// `init` is asynchronous and responsible for performing I/O-intensive initialization
|
||||
system.init().await?;
|
||||
|
||||
match NOTIFICATION_SYSTEM.set(Arc::new(system)) {
|
||||
Ok(_) => Ok(()),
|
||||
Err(_) => Err(NotificationError::AlreadyInitialized),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a handle to the global NotificationSystem instance.
|
||||
/// Return None if the system has not been initialized.
|
||||
pub fn notification_system() -> Option<Arc<NotificationSystem>> {
|
||||
NOTIFICATION_SYSTEM.get().cloned()
|
||||
}
|
||||
|
||||
pub struct Notifier {
|
||||
// Notifier can hold state, but in this design we make it stateless,
|
||||
// Rely on getting an instance of NotificationSystem from the outside.
|
||||
}
|
||||
|
||||
impl Notifier {
|
||||
/// Notify an event asynchronously.
|
||||
/// This is the only entry point for all event notifications in the system.
|
||||
pub async fn notify(&self, args: EventArgs) {
|
||||
// Dependency injection or service positioning mode obtain NotificationSystem instance
|
||||
let notification_sys = match notification_system() {
|
||||
// If the notification system itself cannot be retrieved, it will be returned directly
|
||||
Some(sys) => sys,
|
||||
None => {
|
||||
tracing::error!("Notification system is not initialized.");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
// Avoid generating notifications for replica creation events
|
||||
if args.is_replication_request() {
|
||||
return;
|
||||
}
|
||||
|
||||
// Create an event and send it
|
||||
let event = Arc::new(Event::new(args));
|
||||
notification_sys.send_event(event).await;
|
||||
}
|
||||
}
|
||||
481
crates/notify/src/integration.rs
Normal file
481
crates/notify/src/integration.rs
Normal file
@@ -0,0 +1,481 @@
|
||||
use crate::arn::TargetID;
|
||||
use crate::store::{Key, Store};
|
||||
use crate::{
|
||||
Event, EventName, StoreError, Target, error::NotificationError, notifier::EventNotifier, registry::TargetRegistry,
|
||||
rules::BucketNotificationConfig, stream,
|
||||
};
|
||||
use ecstore::config::{Config, KVS};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::time::{Duration, Instant};
|
||||
use tokio::sync::{RwLock, Semaphore, mpsc};
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
/// Notify the system of monitoring indicators
|
||||
pub struct NotificationMetrics {
|
||||
/// The number of events currently being processed
|
||||
processing_events: AtomicUsize,
|
||||
/// Number of events that have been successfully processed
|
||||
processed_events: AtomicUsize,
|
||||
/// Number of events that failed to handle
|
||||
failed_events: AtomicUsize,
|
||||
/// System startup time
|
||||
start_time: Instant,
|
||||
}
|
||||
|
||||
impl Default for NotificationMetrics {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl NotificationMetrics {
|
||||
pub fn new() -> Self {
|
||||
NotificationMetrics {
|
||||
processing_events: AtomicUsize::new(0),
|
||||
processed_events: AtomicUsize::new(0),
|
||||
failed_events: AtomicUsize::new(0),
|
||||
start_time: Instant::now(),
|
||||
}
|
||||
}
|
||||
|
||||
// Provide public methods to increase count
|
||||
pub fn increment_processing(&self) {
|
||||
self.processing_events.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub fn increment_processed(&self) {
|
||||
self.processing_events.fetch_sub(1, Ordering::Relaxed);
|
||||
self.processed_events.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
pub fn increment_failed(&self) {
|
||||
self.processing_events.fetch_sub(1, Ordering::Relaxed);
|
||||
self.failed_events.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
// Provide public methods to get count
|
||||
pub fn processing_count(&self) -> usize {
|
||||
self.processing_events.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
pub fn processed_count(&self) -> usize {
|
||||
self.processed_events.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
pub fn failed_count(&self) -> usize {
|
||||
self.failed_events.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
pub fn uptime(&self) -> Duration {
|
||||
self.start_time.elapsed()
|
||||
}
|
||||
}
|
||||
|
||||
/// The notification system that integrates all components
|
||||
pub struct NotificationSystem {
|
||||
/// The event notifier
|
||||
pub notifier: Arc<EventNotifier>,
|
||||
/// The target registry
|
||||
pub registry: Arc<TargetRegistry>,
|
||||
/// The current configuration
|
||||
pub config: Arc<RwLock<Config>>,
|
||||
/// Cancel sender for managing stream processing tasks
|
||||
stream_cancellers: Arc<RwLock<HashMap<TargetID, mpsc::Sender<()>>>>,
|
||||
/// Concurrent control signal quantity
|
||||
concurrency_limiter: Arc<Semaphore>,
|
||||
/// Monitoring indicators
|
||||
metrics: Arc<NotificationMetrics>,
|
||||
}
|
||||
|
||||
impl NotificationSystem {
|
||||
/// Creates a new NotificationSystem
|
||||
pub fn new(config: Config) -> Self {
|
||||
NotificationSystem {
|
||||
notifier: Arc::new(EventNotifier::new()),
|
||||
registry: Arc::new(TargetRegistry::new()),
|
||||
config: Arc::new(RwLock::new(config)),
|
||||
stream_cancellers: Arc::new(RwLock::new(HashMap::new())),
|
||||
concurrency_limiter: Arc::new(Semaphore::new(
|
||||
std::env::var("RUSTFS_TARGET_STREAM_CONCURRENCY")
|
||||
.ok()
|
||||
.and_then(|s| s.parse().ok())
|
||||
.unwrap_or(20),
|
||||
)), // Limit the maximum number of concurrent processing events to 20
|
||||
metrics: Arc::new(NotificationMetrics::new()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Initializes the notification system
|
||||
pub async fn init(&self) -> Result<(), NotificationError> {
|
||||
info!("Initialize notification system...");
|
||||
|
||||
let config = self.config.read().await;
|
||||
debug!("Initializing notification system with config: {:?}", *config);
|
||||
let targets: Vec<Box<dyn Target + Send + Sync>> = self.registry.create_targets_from_config(&config).await?;
|
||||
|
||||
info!("{} notification targets were created", targets.len());
|
||||
|
||||
// Initiate event stream processing for each storage enabled target
|
||||
let mut cancellers = HashMap::new();
|
||||
for target in &targets {
|
||||
let target_id = target.id();
|
||||
info!("Initializing target: {}", target.id());
|
||||
// Initialize the target
|
||||
if let Err(e) = target.init().await {
|
||||
error!("Target {} Initialization failed:{}", target.id(), e);
|
||||
continue;
|
||||
}
|
||||
debug!("Target {} initialized successfully,enabled:{}", target_id, target.is_enabled());
|
||||
// Check if the target is enabled and has storage
|
||||
if target.is_enabled() {
|
||||
if let Some(store) = target.store() {
|
||||
info!("Start event stream processing for target {}", target.id());
|
||||
|
||||
// The storage of the cloned target and the target itself
|
||||
let store_clone = store.boxed_clone();
|
||||
let target_box = target.clone_dyn();
|
||||
let target_arc = Arc::from(target_box);
|
||||
|
||||
// Add a reference to the monitoring metrics
|
||||
let metrics = self.metrics.clone();
|
||||
let semaphore = self.concurrency_limiter.clone();
|
||||
|
||||
// Encapsulated enhanced version of start_event_stream
|
||||
let cancel_tx = self.enhanced_start_event_stream(store_clone, target_arc, metrics, semaphore);
|
||||
|
||||
// Start event stream processing and save cancel sender
|
||||
let target_id_clone = target_id.clone();
|
||||
cancellers.insert(target_id, cancel_tx);
|
||||
info!("Event stream processing for target {} is started successfully", target_id_clone);
|
||||
} else {
|
||||
info!("Target {} No storage is configured, event stream processing is skipped", target_id);
|
||||
}
|
||||
} else {
|
||||
info!("Target {} is not enabled, event stream processing is skipped", target_id);
|
||||
}
|
||||
}
|
||||
|
||||
// Update canceler collection
|
||||
*self.stream_cancellers.write().await = cancellers;
|
||||
// Initialize the bucket target
|
||||
self.notifier.init_bucket_targets(targets).await?;
|
||||
info!("Notification system initialized");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Gets a list of Targets for all currently active (initialized).
|
||||
///
|
||||
/// # Return
|
||||
/// A Vec containing all active Targets `TargetID`.
|
||||
pub async fn get_active_targets(&self) -> Vec<TargetID> {
|
||||
self.notifier.target_list().read().await.keys()
|
||||
}
|
||||
|
||||
/// Checks if there are active subscribers for the given bucket and event name.
|
||||
pub async fn has_subscriber(&self, bucket: &str, event_name: &EventName) -> bool {
|
||||
self.notifier.has_subscriber(bucket, event_name).await
|
||||
}
|
||||
|
||||
async fn update_config_and_reload<F>(&self, mut modifier: F) -> Result<(), NotificationError>
|
||||
where
|
||||
F: FnMut(&mut Config) -> bool, // The closure returns a boolean value indicating whether the configuration has been changed
|
||||
{
|
||||
let Some(store) = ecstore::global::new_object_layer_fn() else {
|
||||
return Err(NotificationError::ServerNotInitialized);
|
||||
};
|
||||
|
||||
let mut new_config = ecstore::config::com::read_config_without_migrate(store.clone())
|
||||
.await
|
||||
.map_err(|e| NotificationError::ReadConfig(e.to_string()))?;
|
||||
|
||||
if !modifier(&mut new_config) {
|
||||
// If the closure indication has not changed, return in advance
|
||||
info!("Configuration not changed, skipping save and reload.");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if let Err(e) = ecstore::config::com::save_server_config(store, &new_config).await {
|
||||
error!("Failed to save config: {}", e);
|
||||
return Err(NotificationError::SaveConfig(e.to_string()));
|
||||
}
|
||||
|
||||
info!("Configuration updated. Reloading system...");
|
||||
self.reload_config(new_config).await
|
||||
}
|
||||
|
||||
/// Accurately remove a Target and its related resources through TargetID.
|
||||
///
|
||||
/// This process includes:
|
||||
/// 1. Stop the event stream associated with the Target (if present).
|
||||
/// 2. Remove the Target instance from the activity list of Notifier.
|
||||
/// 3. Remove the configuration item of the Target from the system configuration.
|
||||
///
|
||||
/// # Parameters
|
||||
/// * `target_id` - The unique identifier of the Target to be removed.
|
||||
///
|
||||
/// # return
|
||||
/// If successful, return `Ok(())`.
|
||||
pub async fn remove_target(&self, target_id: &TargetID, target_type: &str) -> Result<(), NotificationError> {
|
||||
info!("Attempting to remove target: {}", target_id);
|
||||
|
||||
self.update_config_and_reload(|config| {
|
||||
let mut changed = false;
|
||||
if let Some(targets_of_type) = config.0.get_mut(target_type) {
|
||||
if targets_of_type.remove(&target_id.name).is_some() {
|
||||
info!("Removed target {} from configuration", target_id);
|
||||
changed = true;
|
||||
}
|
||||
if targets_of_type.is_empty() {
|
||||
config.0.remove(target_type);
|
||||
}
|
||||
}
|
||||
if !changed {
|
||||
warn!("Target {} not found in configuration", target_id);
|
||||
}
|
||||
changed
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
/// Set or update a Target configuration.
|
||||
/// If the configuration is changed, the entire notification system will be automatically reloaded to apply the changes.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `target_type` - Target type, such as "notify_webhook" or "notify_mqtt".
|
||||
/// * `target_name` - A unique name for a Target, such as "1".
|
||||
/// * `kvs` - The full configuration of the Target.
|
||||
///
|
||||
/// # Returns
|
||||
/// Result<(), NotificationError>
|
||||
/// If the target configuration is successfully set, it returns Ok(()).
|
||||
/// If the target configuration is invalid, it returns Err(NotificationError::Configuration).
|
||||
pub async fn set_target_config(&self, target_type: &str, target_name: &str, kvs: KVS) -> Result<(), NotificationError> {
|
||||
info!("Setting config for target {} of type {}", target_name, target_type);
|
||||
self.update_config_and_reload(|config| {
|
||||
config
|
||||
.0
|
||||
.entry(target_type.to_string())
|
||||
.or_default()
|
||||
.insert(target_name.to_string(), kvs.clone());
|
||||
true // The configuration is always modified
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
/// Removes all notification configurations for a bucket.
|
||||
pub async fn remove_bucket_notification_config(&self, bucket_name: &str) {
|
||||
self.notifier.remove_rules_map(bucket_name).await;
|
||||
}
|
||||
|
||||
/// Removes a Target configuration.
|
||||
/// If the configuration is successfully removed, the entire notification system will be automatically reloaded.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `target_type` - Target type, such as "notify_webhook" or "notify_mqtt".
|
||||
/// * `target_name` - A unique name for a Target, such as "1".
|
||||
///
|
||||
/// # Returns
|
||||
/// Result<(), NotificationError>
|
||||
///
|
||||
/// If the target configuration is successfully removed, it returns Ok(()).
|
||||
/// If the target configuration does not exist, it returns Ok(()) without making any changes.
|
||||
pub async fn remove_target_config(&self, target_type: &str, target_name: &str) -> Result<(), NotificationError> {
|
||||
info!("Removing config for target {} of type {}", target_name, target_type);
|
||||
self.update_config_and_reload(|config| {
|
||||
let mut changed = false;
|
||||
if let Some(targets) = config.0.get_mut(target_type) {
|
||||
if targets.remove(target_name).is_some() {
|
||||
changed = true;
|
||||
}
|
||||
if targets.is_empty() {
|
||||
config.0.remove(target_type);
|
||||
}
|
||||
}
|
||||
if !changed {
|
||||
info!("Target {} of type {} not found, no changes made.", target_name, target_type);
|
||||
}
|
||||
changed
|
||||
})
|
||||
.await
|
||||
}
|
||||
|
||||
/// Enhanced event stream startup function, including monitoring and concurrency control
|
||||
fn enhanced_start_event_stream(
|
||||
&self,
|
||||
store: Box<dyn Store<Event, Error = StoreError, Key = Key> + Send>,
|
||||
target: Arc<dyn Target + Send + Sync>,
|
||||
metrics: Arc<NotificationMetrics>,
|
||||
semaphore: Arc<Semaphore>,
|
||||
) -> mpsc::Sender<()> {
|
||||
// Event Stream Processing Using Batch Version
|
||||
stream::start_event_stream_with_batching(store, target, metrics, semaphore)
|
||||
}
|
||||
|
||||
/// Update configuration
|
||||
async fn update_config(&self, new_config: Config) {
|
||||
let mut config = self.config.write().await;
|
||||
*config = new_config;
|
||||
}
|
||||
|
||||
/// Reloads the configuration
|
||||
pub async fn reload_config(&self, new_config: Config) -> Result<(), NotificationError> {
|
||||
info!("Reload notification configuration starts");
|
||||
|
||||
// Stop all existing streaming services
|
||||
let mut cancellers = self.stream_cancellers.write().await;
|
||||
for (target_id, cancel_tx) in cancellers.drain() {
|
||||
info!("Stop event stream processing for target {}", target_id);
|
||||
let _ = cancel_tx.send(()).await;
|
||||
}
|
||||
|
||||
// Update the config
|
||||
self.update_config(new_config.clone()).await;
|
||||
|
||||
// Create a new target from configuration
|
||||
let targets: Vec<Box<dyn Target + Send + Sync>> = self
|
||||
.registry
|
||||
.create_targets_from_config(&new_config)
|
||||
.await
|
||||
.map_err(NotificationError::Target)?;
|
||||
|
||||
info!("{} notification targets were created from the new configuration", targets.len());
|
||||
|
||||
// Start new event stream processing for each storage enabled target
|
||||
let mut new_cancellers = HashMap::new();
|
||||
for target in &targets {
|
||||
let target_id = target.id();
|
||||
|
||||
// Initialize the target
|
||||
if let Err(e) = target.init().await {
|
||||
error!("Target {} Initialization failed:{}", target_id, e);
|
||||
continue;
|
||||
}
|
||||
// Check if the target is enabled and has storage
|
||||
if target.is_enabled() {
|
||||
if let Some(store) = target.store() {
|
||||
info!("Start new event stream processing for target {}", target_id);
|
||||
|
||||
// The storage of the cloned target and the target itself
|
||||
let store_clone = store.boxed_clone();
|
||||
let target_box = target.clone_dyn();
|
||||
let target_arc = Arc::from(target_box);
|
||||
|
||||
// Add a reference to the monitoring metrics
|
||||
let metrics = self.metrics.clone();
|
||||
let semaphore = self.concurrency_limiter.clone();
|
||||
|
||||
// Encapsulated enhanced version of start_event_stream
|
||||
let cancel_tx = self.enhanced_start_event_stream(store_clone, target_arc, metrics, semaphore);
|
||||
|
||||
// Start event stream processing and save cancel sender
|
||||
// let cancel_tx = start_event_stream(store_clone, target_clone);
|
||||
let target_id_clone = target_id.clone();
|
||||
new_cancellers.insert(target_id, cancel_tx);
|
||||
info!("Event stream processing of target {} is restarted successfully", target_id_clone);
|
||||
} else {
|
||||
info!("Target {} No storage is configured, event stream processing is skipped", target_id);
|
||||
}
|
||||
} else {
|
||||
info!("Target {} disabled, event stream processing is skipped", target_id);
|
||||
}
|
||||
}
|
||||
|
||||
// Update canceler collection
|
||||
*cancellers = new_cancellers;
|
||||
|
||||
// Initialize the bucket target
|
||||
self.notifier.init_bucket_targets(targets).await?;
|
||||
info!("Configuration reloaded end");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Loads the bucket notification configuration
|
||||
pub async fn load_bucket_notification_config(
|
||||
&self,
|
||||
bucket_name: &str,
|
||||
config: &BucketNotificationConfig,
|
||||
) -> Result<(), NotificationError> {
|
||||
let arn_list = self.notifier.get_arn_list(&config.region).await;
|
||||
if arn_list.is_empty() {
|
||||
return Err(NotificationError::Configuration("No targets configured".to_string()));
|
||||
}
|
||||
info!("Available ARNs: {:?}", arn_list);
|
||||
// Validate the configuration against the available ARNs
|
||||
if let Err(e) = config.validate(&config.region, &arn_list) {
|
||||
debug!("Bucket notification config validation region:{} failed: {}", &config.region, e);
|
||||
if !e.to_string().contains("ARN not found") {
|
||||
return Err(NotificationError::BucketNotification(e.to_string()));
|
||||
} else {
|
||||
error!("{}", e);
|
||||
}
|
||||
}
|
||||
|
||||
// let rules_map = config.to_rules_map();
|
||||
let rules_map = config.get_rules_map();
|
||||
self.notifier.add_rules_map(bucket_name, rules_map.clone()).await;
|
||||
info!("Loaded notification config for bucket: {}", bucket_name);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Sends an event
|
||||
pub async fn send_event(&self, event: Arc<Event>) {
|
||||
self.notifier.send(event).await;
|
||||
}
|
||||
|
||||
/// Obtain system status information
|
||||
pub fn get_status(&self) -> HashMap<String, String> {
|
||||
let mut status = HashMap::new();
|
||||
|
||||
status.insert("uptime_seconds".to_string(), self.metrics.uptime().as_secs().to_string());
|
||||
status.insert("processing_events".to_string(), self.metrics.processing_count().to_string());
|
||||
status.insert("processed_events".to_string(), self.metrics.processed_count().to_string());
|
||||
status.insert("failed_events".to_string(), self.metrics.failed_count().to_string());
|
||||
|
||||
status
|
||||
}
|
||||
|
||||
// Add a method to shut down the system
|
||||
pub async fn shutdown(&self) {
|
||||
info!("Turn off the notification system");
|
||||
|
||||
// Get the number of active targets
|
||||
let active_targets = self.stream_cancellers.read().await.len();
|
||||
info!("Stops {} active event stream processing tasks", active_targets);
|
||||
|
||||
let mut cancellers = self.stream_cancellers.write().await;
|
||||
for (target_id, cancel_tx) in cancellers.drain() {
|
||||
info!("Stop event stream processing for target {}", target_id);
|
||||
let _ = cancel_tx.send(()).await;
|
||||
}
|
||||
// Wait for a short while to make sure the task has a chance to complete
|
||||
tokio::time::sleep(Duration::from_millis(500)).await;
|
||||
|
||||
info!("Notify the system to be shut down completed");
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for NotificationSystem {
|
||||
fn drop(&mut self) {
|
||||
// Asynchronous operation cannot be used here, but logs can be recorded.
|
||||
info!("Notify the system instance to be destroyed");
|
||||
let status = self.get_status();
|
||||
for (key, value) in status {
|
||||
info!("key:{}, value:{}", key, value);
|
||||
}
|
||||
|
||||
info!("Notification system status at shutdown:");
|
||||
}
|
||||
}
|
||||
|
||||
/// Loads configuration from a file
|
||||
pub async fn load_config_from_file(path: &str, system: &NotificationSystem) -> Result<(), NotificationError> {
|
||||
let config_data = tokio::fs::read(path)
|
||||
.await
|
||||
.map_err(|e| NotificationError::Configuration(format!("Failed to read config file: {}", e)))?;
|
||||
|
||||
let config = Config::unmarshal(config_data.as_slice())
|
||||
.map_err(|e| NotificationError::Configuration(format!("Failed to parse config: {}", e)))?;
|
||||
system.reload_config(config).await
|
||||
}
|
||||
71
crates/notify/src/lib.rs
Normal file
71
crates/notify/src/lib.rs
Normal file
@@ -0,0 +1,71 @@
|
||||
//! RustFS Notify - A flexible and extensible event notification system for object storage.
|
||||
//!
|
||||
//! This library provides a Rust implementation of a storage bucket notification system,
|
||||
//! similar to RustFS's notification system. It supports sending events to various targets
|
||||
//! (like Webhook and MQTT) and includes features like event persistence and retry on failure.
|
||||
|
||||
pub mod arn;
|
||||
pub mod error;
|
||||
pub mod event;
|
||||
pub mod factory;
|
||||
pub mod global;
|
||||
pub mod integration;
|
||||
pub mod notifier;
|
||||
pub mod registry;
|
||||
pub mod rules;
|
||||
pub mod store;
|
||||
pub mod stream;
|
||||
pub mod target;
|
||||
|
||||
// Re-exports
|
||||
pub use error::{NotificationError, StoreError, TargetError};
|
||||
pub use event::{Event, EventArgs, EventLog, EventName};
|
||||
pub use global::{initialize, notification_system};
|
||||
pub use integration::NotificationSystem;
|
||||
pub use rules::BucketNotificationConfig;
|
||||
use std::io::IsTerminal;
|
||||
pub use target::Target;
|
||||
|
||||
use tracing_subscriber::{EnvFilter, fmt, prelude::*, util::SubscriberInitExt};
|
||||
|
||||
/// Initialize the tracing log system
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// rustfs_notify::init_logger(rustfs_notify::LogLevel::Info);
|
||||
/// ```
|
||||
pub fn init_logger(level: LogLevel) {
|
||||
let filter = EnvFilter::default().add_directive(level.into());
|
||||
tracing_subscriber::registry()
|
||||
.with(filter)
|
||||
.with(
|
||||
fmt::layer()
|
||||
.with_target(true)
|
||||
.with_target(true)
|
||||
.with_ansi(std::io::stdout().is_terminal())
|
||||
.with_thread_names(true)
|
||||
.with_thread_ids(true)
|
||||
.with_file(true)
|
||||
.with_line_number(true),
|
||||
)
|
||||
.init();
|
||||
}
|
||||
|
||||
/// Log level definition
|
||||
pub enum LogLevel {
|
||||
Debug,
|
||||
Info,
|
||||
Warn,
|
||||
Error,
|
||||
}
|
||||
|
||||
impl From<LogLevel> for tracing_subscriber::filter::Directive {
|
||||
fn from(level: LogLevel) -> Self {
|
||||
match level {
|
||||
LogLevel::Debug => "debug".parse().unwrap(),
|
||||
LogLevel::Info => "info".parse().unwrap(),
|
||||
LogLevel::Warn => "warn".parse().unwrap(),
|
||||
LogLevel::Error => "error".parse().unwrap(),
|
||||
}
|
||||
}
|
||||
}
|
||||
260
crates/notify/src/notifier.rs
Normal file
260
crates/notify/src/notifier.rs
Normal file
@@ -0,0 +1,260 @@
|
||||
use crate::arn::TargetID;
|
||||
use crate::{EventName, error::NotificationError, event::Event, rules::RulesMap, target::Target};
|
||||
use dashmap::DashMap;
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{debug, error, info, instrument, warn};
|
||||
|
||||
/// Manages event notification to targets based on rules
|
||||
pub struct EventNotifier {
|
||||
target_list: Arc<RwLock<TargetList>>,
|
||||
bucket_rules_map: Arc<DashMap<String, RulesMap>>,
|
||||
}
|
||||
|
||||
impl Default for EventNotifier {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl EventNotifier {
|
||||
/// Creates a new EventNotifier
|
||||
pub fn new() -> Self {
|
||||
EventNotifier {
|
||||
target_list: Arc::new(RwLock::new(TargetList::new())),
|
||||
bucket_rules_map: Arc::new(DashMap::new()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a reference to the target list
|
||||
/// This method provides access to the target list for external use.
|
||||
///
|
||||
pub fn target_list(&self) -> Arc<RwLock<TargetList>> {
|
||||
Arc::clone(&self.target_list)
|
||||
}
|
||||
|
||||
/// Removes all notification rules for a bucket
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `bucket_name` - The name of the bucket for which to remove rules
|
||||
///
|
||||
/// This method removes all rules associated with the specified bucket name.
|
||||
/// It will log a message indicating the removal of rules.
|
||||
pub async fn remove_rules_map(&self, bucket_name: &str) {
|
||||
if self.bucket_rules_map.remove(bucket_name).is_some() {
|
||||
info!("Removed all notification rules for bucket: {}", bucket_name);
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a list of ARNs for the registered targets
|
||||
pub async fn get_arn_list(&self, region: &str) -> Vec<String> {
|
||||
let target_list_guard = self.target_list.read().await;
|
||||
target_list_guard
|
||||
.keys()
|
||||
.iter()
|
||||
.map(|target_id| target_id.to_arn(region).to_arn_string())
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Adds a rules map for a bucket
|
||||
pub async fn add_rules_map(&self, bucket_name: &str, rules_map: RulesMap) {
|
||||
if rules_map.is_empty() {
|
||||
self.bucket_rules_map.remove(bucket_name);
|
||||
} else {
|
||||
self.bucket_rules_map.insert(bucket_name.to_string(), rules_map);
|
||||
}
|
||||
info!("Added rules for bucket: {}", bucket_name);
|
||||
}
|
||||
|
||||
/// Removes notification rules for a bucket
|
||||
pub async fn remove_notification(&self, bucket_name: &str) {
|
||||
self.bucket_rules_map.remove(bucket_name);
|
||||
info!("Removed notification rules for bucket: {}", bucket_name);
|
||||
}
|
||||
|
||||
/// Removes all targets
|
||||
pub async fn remove_all_bucket_targets(&self) {
|
||||
let mut target_list_guard = self.target_list.write().await;
|
||||
// The logic for sending cancel signals via stream_cancel_senders would be removed.
|
||||
// TargetList::clear_targets_only already handles calling target.close().
|
||||
target_list_guard.clear_targets_only().await; // Modified clear to not re-cancel
|
||||
info!("Removed all targets and their streams");
|
||||
}
|
||||
|
||||
/// Checks if there are active subscribers for the given bucket and event name.
|
||||
///
|
||||
/// # Parameters
|
||||
/// * `bucket_name` - bucket name.
|
||||
/// * `event_name` - Event name.
|
||||
///
|
||||
/// # Return value
|
||||
/// Return `true` if at least one matching notification rule exists.
|
||||
pub async fn has_subscriber(&self, bucket_name: &str, event_name: &EventName) -> bool {
|
||||
// Rules to check if the bucket exists
|
||||
if let Some(rules_map) = self.bucket_rules_map.get(bucket_name) {
|
||||
// A composite event (such as ObjectCreatedAll) is expanded to multiple single events.
|
||||
// We need to check whether any of these single events have the rules configured.
|
||||
rules_map.has_subscriber(event_name)
|
||||
} else {
|
||||
// If no bucket is found, no subscribers
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Sends an event to the appropriate targets based on the bucket rules
|
||||
#[instrument(skip(self, event))]
|
||||
pub async fn send(&self, event: Arc<Event>) {
|
||||
let bucket_name = &event.s3.bucket.name;
|
||||
let object_key = &event.s3.object.key;
|
||||
let event_name = event.event_name;
|
||||
if let Some(rules) = self.bucket_rules_map.get(bucket_name) {
|
||||
let target_ids = rules.match_rules(event_name, object_key);
|
||||
if target_ids.is_empty() {
|
||||
debug!("No matching targets for event in bucket: {}", bucket_name);
|
||||
return;
|
||||
}
|
||||
let target_ids_len = target_ids.len();
|
||||
let mut handles = vec![];
|
||||
|
||||
// Use scope to limit the borrow scope of target_list
|
||||
{
|
||||
let target_list_guard = self.target_list.read().await;
|
||||
info!("Sending event to targets: {:?}", target_ids);
|
||||
for target_id in target_ids {
|
||||
// `get` now returns Option<Arc<dyn Target + Send + Sync>>
|
||||
if let Some(target_arc) = target_list_guard.get(&target_id) {
|
||||
// Clone an Arc<Box<dyn Target>> (which is where target_list is stored) to move into an asynchronous task
|
||||
// target_arc is already Arc, clone it for the async task
|
||||
let cloned_target_for_task = target_arc.clone();
|
||||
let event_clone = event.clone();
|
||||
let target_name_for_task = cloned_target_for_task.name(); // Get the name before generating the task
|
||||
debug!("Preparing to send event to target: {}", target_name_for_task);
|
||||
// Use cloned data in closures to avoid borrowing conflicts
|
||||
let handle = tokio::spawn(async move {
|
||||
if let Err(e) = cloned_target_for_task.save(event_clone).await {
|
||||
error!("Failed to send event to target {}: {}", target_name_for_task, e);
|
||||
} else {
|
||||
debug!("Successfully saved event to target {}", target_name_for_task);
|
||||
}
|
||||
});
|
||||
handles.push(handle);
|
||||
} else {
|
||||
warn!("Target ID {:?} found in rules but not in target list.", target_id);
|
||||
}
|
||||
}
|
||||
// target_list is automatically released here
|
||||
}
|
||||
|
||||
// Wait for all tasks to be completed
|
||||
for handle in handles {
|
||||
if let Err(e) = handle.await {
|
||||
error!("Task for sending/saving event failed: {}", e);
|
||||
}
|
||||
}
|
||||
info!("Event processing initiated for {} targets for bucket: {}", target_ids_len, bucket_name);
|
||||
} else {
|
||||
debug!("No rules found for bucket: {}", bucket_name);
|
||||
}
|
||||
}
|
||||
|
||||
/// Initializes the targets for buckets
|
||||
#[instrument(skip(self, targets_to_init))]
|
||||
pub async fn init_bucket_targets(
|
||||
&self,
|
||||
targets_to_init: Vec<Box<dyn Target + Send + Sync>>,
|
||||
) -> Result<(), NotificationError> {
|
||||
// Currently active, simpler logic
|
||||
let mut target_list_guard = self.target_list.write().await; //Gets a write lock for the TargetList
|
||||
for target_boxed in targets_to_init {
|
||||
// Traverse the incoming Box<dyn Target >
|
||||
debug!("init bucket target: {}", target_boxed.name());
|
||||
// TargetList::add method expectations Arc<dyn Target + Send + Sync>
|
||||
// Therefore, you need to convert Box<dyn Target + Send + Sync> to Arc<dyn Target + Send + Sync>
|
||||
let target_arc: Arc<dyn Target + Send + Sync> = Arc::from(target_boxed);
|
||||
target_list_guard.add(target_arc)?; // Add Arc<dyn Target> to the list
|
||||
}
|
||||
info!(
|
||||
"Initialized {} targets, list size: {}", // Clearer logs
|
||||
target_list_guard.len(),
|
||||
target_list_guard.len()
|
||||
);
|
||||
Ok(()) // Make sure to return a Result
|
||||
}
|
||||
}
|
||||
|
||||
/// A thread-safe list of targets
|
||||
pub struct TargetList {
|
||||
targets: HashMap<TargetID, Arc<dyn Target + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl Default for TargetList {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl TargetList {
|
||||
/// Creates a new TargetList
|
||||
pub fn new() -> Self {
|
||||
TargetList { targets: HashMap::new() }
|
||||
}
|
||||
|
||||
/// Adds a target to the list
|
||||
pub fn add(&mut self, target: Arc<dyn Target + Send + Sync>) -> Result<(), NotificationError> {
|
||||
let id = target.id();
|
||||
if self.targets.contains_key(&id) {
|
||||
// Potentially update or log a warning/error if replacing an existing target.
|
||||
warn!("Target with ID {} already exists in TargetList. It will be overwritten.", id);
|
||||
}
|
||||
self.targets.insert(id, target);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Removes a target by ID. Note: This does not stop its associated event stream.
|
||||
/// Stream cancellation should be handled by EventNotifier.
|
||||
pub async fn remove_target_only(&mut self, id: &TargetID) -> Option<Arc<dyn Target + Send + Sync>> {
|
||||
if let Some(target_arc) = self.targets.remove(id) {
|
||||
if let Err(e) = target_arc.close().await {
|
||||
// Target's own close logic
|
||||
error!("Failed to close target {} during removal: {}", id, e);
|
||||
}
|
||||
Some(target_arc)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Clears all targets from the list. Note: This does not stop their associated event streams.
|
||||
/// Stream cancellation should be handled by EventNotifier.
|
||||
pub async fn clear_targets_only(&mut self) {
|
||||
let target_ids_to_clear: Vec<TargetID> = self.targets.keys().cloned().collect();
|
||||
for id in target_ids_to_clear {
|
||||
if let Some(target_arc) = self.targets.remove(&id) {
|
||||
if let Err(e) = target_arc.close().await {
|
||||
error!("Failed to close target {} during clear: {}", id, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
self.targets.clear();
|
||||
}
|
||||
|
||||
/// Returns a target by ID
|
||||
pub fn get(&self, id: &TargetID) -> Option<Arc<dyn Target + Send + Sync>> {
|
||||
self.targets.get(id).cloned()
|
||||
}
|
||||
|
||||
/// Returns all target IDs
|
||||
pub fn keys(&self) -> Vec<TargetID> {
|
||||
self.targets.keys().cloned().collect()
|
||||
}
|
||||
|
||||
/// Returns the number of targets
|
||||
pub fn len(&self) -> usize {
|
||||
self.targets.len()
|
||||
}
|
||||
|
||||
// is_empty can be derived from len()
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.targets.is_empty()
|
||||
}
|
||||
}
|
||||
96
crates/notify/src/registry.rs
Normal file
96
crates/notify/src/registry.rs
Normal file
@@ -0,0 +1,96 @@
|
||||
use crate::target::ChannelTargetType;
|
||||
use crate::{
|
||||
error::TargetError,
|
||||
factory::{MQTTTargetFactory, TargetFactory, WebhookTargetFactory},
|
||||
target::Target,
|
||||
};
|
||||
use ecstore::config::{Config, ENABLE_KEY, ENABLE_OFF, ENABLE_ON, KVS};
|
||||
use std::collections::HashMap;
|
||||
use tracing::{error, info};
|
||||
|
||||
/// Registry for managing target factories
|
||||
pub struct TargetRegistry {
|
||||
factories: HashMap<String, Box<dyn TargetFactory>>,
|
||||
}
|
||||
|
||||
impl Default for TargetRegistry {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl TargetRegistry {
|
||||
/// Creates a new TargetRegistry with built-in factories
|
||||
pub fn new() -> Self {
|
||||
let mut registry = TargetRegistry {
|
||||
factories: HashMap::new(),
|
||||
};
|
||||
|
||||
// Register built-in factories
|
||||
registry.register(ChannelTargetType::Webhook.as_str(), Box::new(WebhookTargetFactory));
|
||||
registry.register(ChannelTargetType::Mqtt.as_str(), Box::new(MQTTTargetFactory));
|
||||
|
||||
registry
|
||||
}
|
||||
|
||||
/// Registers a new factory for a target type
|
||||
pub fn register(&mut self, target_type: &str, factory: Box<dyn TargetFactory>) {
|
||||
self.factories.insert(target_type.to_string(), factory);
|
||||
}
|
||||
|
||||
/// Creates a target from configuration
|
||||
pub async fn create_target(
|
||||
&self,
|
||||
target_type: &str,
|
||||
id: String,
|
||||
config: &KVS,
|
||||
) -> Result<Box<dyn Target + Send + Sync>, TargetError> {
|
||||
let factory = self
|
||||
.factories
|
||||
.get(target_type)
|
||||
.ok_or_else(|| TargetError::Configuration(format!("Unknown target type: {}", target_type)))?;
|
||||
|
||||
// Validate configuration before creating target
|
||||
factory.validate_config(&id, config)?;
|
||||
|
||||
// Create target
|
||||
factory.create_target(id, config).await
|
||||
}
|
||||
|
||||
/// Creates all targets from a configuration
|
||||
pub async fn create_targets_from_config(&self, config: &Config) -> Result<Vec<Box<dyn Target + Send + Sync>>, TargetError> {
|
||||
let mut targets: Vec<Box<dyn Target + Send + Sync>> = Vec::new();
|
||||
|
||||
// Iterate through configuration sections
|
||||
for (section, subsections) in &config.0 {
|
||||
// Only process notification sections
|
||||
if !section.starts_with("notify_") {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Extract target type from section name
|
||||
let target_type = section.trim_start_matches("notify_");
|
||||
|
||||
// Iterate through subsections (each representing a target instance)
|
||||
for (target_id, target_config) in subsections {
|
||||
// Skip disabled targets
|
||||
if target_config.lookup(ENABLE_KEY).unwrap_or_else(|| ENABLE_OFF.to_string()) != ENABLE_ON {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Create target
|
||||
match self.create_target(target_type, target_id.clone(), target_config).await {
|
||||
Ok(target) => {
|
||||
info!("Created target: {}/{}", target_type, target_id);
|
||||
targets.push(target);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to create target {}/{}: {}", target_type, target_id, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(targets)
|
||||
}
|
||||
}
|
||||
115
crates/notify/src/rules/config.rs
Normal file
115
crates/notify/src/rules/config.rs
Normal file
@@ -0,0 +1,115 @@
|
||||
use super::rules_map::RulesMap;
|
||||
// Keep for existing structure if any, or remove if not used
|
||||
use super::xml_config::ParseConfigError as BucketNotificationConfigError;
|
||||
use crate::EventName;
|
||||
use crate::arn::TargetID;
|
||||
use crate::rules::NotificationConfiguration;
|
||||
use crate::rules::pattern_rules;
|
||||
use crate::rules::target_id_set;
|
||||
use std::collections::HashMap;
|
||||
use std::io::Read;
|
||||
|
||||
/// Configuration for bucket notifications.
|
||||
/// This struct now holds the parsed and validated rules in the new RulesMap format.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct BucketNotificationConfig {
|
||||
pub region: String, // Region where this config is applicable
|
||||
pub rules: RulesMap, // The new, more detailed RulesMap
|
||||
}
|
||||
|
||||
impl BucketNotificationConfig {
|
||||
pub fn new(region: &str) -> Self {
|
||||
BucketNotificationConfig {
|
||||
region: region.to_string(),
|
||||
rules: RulesMap::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Adds a rule to the configuration.
|
||||
/// This method allows adding a rule with a specific event and target ID.
|
||||
pub fn add_rule(
|
||||
&mut self,
|
||||
event_names: &[EventName], // Assuming event_names is a list of event names
|
||||
pattern: String, // The object key pattern for the rule
|
||||
target_id: TargetID, // The target ID for the notification
|
||||
) {
|
||||
self.rules.add_rule_config(event_names, pattern, target_id);
|
||||
}
|
||||
|
||||
/// Parses notification configuration from XML.
|
||||
/// `arn_list` is a list of valid ARN strings for validation.
|
||||
pub fn from_xml<R: Read + std::io::BufRead>(
|
||||
reader: R,
|
||||
current_region: &str,
|
||||
arn_list: &[String],
|
||||
) -> Result<Self, BucketNotificationConfigError> {
|
||||
let mut parsed_config = NotificationConfiguration::from_reader(reader)?;
|
||||
|
||||
// Set defaults (region in ARNs if empty, xmlns) before validation
|
||||
parsed_config.set_defaults(current_region);
|
||||
|
||||
// Validate the parsed configuration
|
||||
parsed_config.validate(current_region, arn_list)?;
|
||||
|
||||
let mut rules_map = RulesMap::new();
|
||||
for queue_conf in parsed_config.queue_list {
|
||||
// The ARN in queue_conf should now have its region set if it was originally empty.
|
||||
// Ensure TargetID can be cloned or extracted correctly.
|
||||
let target_id = queue_conf.arn.target_id.clone();
|
||||
let pattern_str = queue_conf.filter.filter_rule_list.pattern();
|
||||
rules_map.add_rule_config(&queue_conf.events, pattern_str, target_id);
|
||||
}
|
||||
|
||||
Ok(BucketNotificationConfig {
|
||||
region: current_region.to_string(), // Config is for the current_region
|
||||
rules: rules_map,
|
||||
})
|
||||
}
|
||||
|
||||
/// Validates the *current* BucketNotificationConfig.
|
||||
/// This might be redundant if construction always implies validation.
|
||||
/// However, Go's Config has a Validate method.
|
||||
/// The primary validation now happens during `from_xml` via `NotificationConfiguration::validate`.
|
||||
/// This method could re-check against an updated arn_list or region if needed.
|
||||
pub fn validate(&self, current_region: &str, arn_list: &[String]) -> Result<(), BucketNotificationConfigError> {
|
||||
if self.region != current_region {
|
||||
return Err(BucketNotificationConfigError::RegionMismatch {
|
||||
config_region: self.region.clone(),
|
||||
current_region: current_region.to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
// Iterate through the rules in self.rules and validate their TargetIDs against arn_list
|
||||
// This requires RulesMap to expose its internal structure or provide an iterator
|
||||
for (_event_name, pattern_rules) in self.rules.inner().iter() {
|
||||
for (_pattern, target_id_set) in pattern_rules.inner().iter() {
|
||||
// Assuming PatternRules has inner()
|
||||
for target_id in target_id_set {
|
||||
// Construct the ARN string for this target_id and self.region
|
||||
let arn_to_check = target_id.to_arn(&self.region); // Assuming TargetID has to_arn
|
||||
if !arn_list.contains(&arn_to_check.to_arn_string()) {
|
||||
return Err(BucketNotificationConfigError::ArnNotFound(arn_to_check.to_arn_string()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Expose the RulesMap for the notifier
|
||||
pub fn get_rules_map(&self) -> &RulesMap {
|
||||
&self.rules
|
||||
}
|
||||
|
||||
/// Sets the region for the configuration
|
||||
pub fn set_region(&mut self, region: &str) {
|
||||
self.region = region.to_string();
|
||||
}
|
||||
}
|
||||
|
||||
// Add a helper to PatternRules if not already present
|
||||
impl pattern_rules::PatternRules {
|
||||
pub fn inner(&self) -> &HashMap<String, target_id_set::TargetIdSet> {
|
||||
&self.rules
|
||||
}
|
||||
}
|
||||
19
crates/notify/src/rules/mod.rs
Normal file
19
crates/notify/src/rules/mod.rs
Normal file
@@ -0,0 +1,19 @@
|
||||
pub mod pattern;
|
||||
pub mod pattern_rules;
|
||||
pub mod rules_map;
|
||||
pub mod target_id_set;
|
||||
pub mod xml_config; // For XML structure definition and parsing
|
||||
|
||||
pub mod config; // Definition and parsing for BucketNotificationConfig
|
||||
|
||||
// Re-export key types from submodules for easy access to `crate::rules::TypeName`
|
||||
// Re-export key types from submodules for external use
|
||||
pub use config::BucketNotificationConfig;
|
||||
// Assume that BucketNotificationConfigError is also defined in config.rs
|
||||
// Or if it is still an alias for xml_config::ParseConfigError , adjust accordingly
|
||||
pub use xml_config::ParseConfigError as BucketNotificationConfigError;
|
||||
|
||||
pub use pattern_rules::PatternRules;
|
||||
pub use rules_map::RulesMap;
|
||||
pub use target_id_set::TargetIdSet;
|
||||
pub use xml_config::{NotificationConfiguration, ParseConfigError};
|
||||
96
crates/notify/src/rules/pattern.rs
Normal file
96
crates/notify/src/rules/pattern.rs
Normal file
@@ -0,0 +1,96 @@
|
||||
use wildmatch::WildMatch;
|
||||
|
||||
/// Create new pattern string based on prefix and suffix。
|
||||
///
|
||||
/// The rule is similar to event.NewPattern in the Go version:
|
||||
/// - If a prefix is provided and does not end with '*', '*' is appended.
|
||||
/// - If a suffix is provided and does not start with '*', then prefix '*'.
|
||||
/// - Replace "**" with "*".
|
||||
pub fn new_pattern(prefix: Option<&str>, suffix: Option<&str>) -> String {
|
||||
let mut pattern = String::new();
|
||||
|
||||
// Process the prefix part
|
||||
if let Some(p) = prefix {
|
||||
if !p.is_empty() {
|
||||
pattern.push_str(p);
|
||||
if !p.ends_with('*') {
|
||||
pattern.push('*');
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Process the suffix part
|
||||
if let Some(s) = suffix {
|
||||
if !s.is_empty() {
|
||||
let mut s_to_append = s.to_string();
|
||||
if !s.starts_with('*') {
|
||||
s_to_append.insert(0, '*');
|
||||
}
|
||||
|
||||
// If the pattern is empty (only suffixes are provided), then the pattern is the suffix
|
||||
// Otherwise, append the suffix to the pattern
|
||||
if pattern.is_empty() {
|
||||
pattern = s_to_append;
|
||||
} else {
|
||||
pattern.push_str(&s_to_append);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Replace "**" with "*"
|
||||
pattern = pattern.replace("**", "*");
|
||||
|
||||
pattern
|
||||
}
|
||||
|
||||
/// Simple matching object names and patterns。
|
||||
pub fn match_simple(pattern_str: &str, object_name: &str) -> bool {
|
||||
if pattern_str == "*" {
|
||||
// AWS S3 docs: A single asterisk (*) in the rule matches all objects.
|
||||
return true;
|
||||
}
|
||||
// WildMatch considers an empty pattern to not match anything, which is usually desired.
|
||||
// If pattern_str is empty, it means no specific filter, so it depends on interpretation.
|
||||
// Go's wildcard.MatchSimple might treat empty pattern differently.
|
||||
// For now, assume empty pattern means no match unless it's explicitly "*".
|
||||
if pattern_str.is_empty() {
|
||||
return false; // Or true if an empty pattern means "match all" in some contexts.
|
||||
// Given Go's NewRulesMap defaults to "*", an empty pattern from Filter is unlikely to mean "match all".
|
||||
}
|
||||
WildMatch::new(pattern_str).matches(object_name)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_new_pattern() {
|
||||
assert_eq!(new_pattern(Some("images/"), Some(".jpg")), "images/*.jpg");
|
||||
assert_eq!(new_pattern(Some("images/"), None), "images/*");
|
||||
assert_eq!(new_pattern(None, Some(".jpg")), "*.jpg");
|
||||
assert_eq!(new_pattern(Some("foo"), Some("bar")), "foo*bar"); // foo* + *bar -> foo**bar -> foo*bar
|
||||
assert_eq!(new_pattern(Some("foo*"), Some("bar")), "foo*bar"); // foo* + *bar -> foo**bar -> foo*bar
|
||||
assert_eq!(new_pattern(Some("foo"), Some("*bar")), "foo*bar"); // foo* + *bar -> foo**bar -> foo*bar
|
||||
assert_eq!(new_pattern(Some("foo*"), Some("*bar")), "foo*bar"); // foo* + *bar -> foo**bar -> foo*bar
|
||||
assert_eq!(new_pattern(Some("*"), Some("*")), "*"); // * + * -> ** -> *
|
||||
assert_eq!(new_pattern(Some("a"), Some("")), "a*");
|
||||
assert_eq!(new_pattern(Some(""), Some("b")), "*b");
|
||||
assert_eq!(new_pattern(None, None), "");
|
||||
assert_eq!(new_pattern(Some("prefix"), Some("suffix")), "prefix*suffix");
|
||||
assert_eq!(new_pattern(Some("prefix/"), Some("/suffix")), "prefix/*suffix"); // prefix/* + */suffix -> prefix/**/suffix -> prefix/*/suffix
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_match_simple() {
|
||||
assert!(match_simple("foo*", "foobar"));
|
||||
assert!(!match_simple("foo*", "barfoo"));
|
||||
assert!(match_simple("*.jpg", "photo.jpg"));
|
||||
assert!(!match_simple("*.jpg", "photo.png"));
|
||||
assert!(match_simple("*", "anything.anything"));
|
||||
assert!(match_simple("foo*bar", "foobazbar"));
|
||||
assert!(!match_simple("foo*bar", "foobar_baz"));
|
||||
assert!(match_simple("a*b*c", "axbyc"));
|
||||
assert!(!match_simple("a*b*c", "axbc"));
|
||||
}
|
||||
}
|
||||
75
crates/notify/src/rules/pattern_rules.rs
Normal file
75
crates/notify/src/rules/pattern_rules.rs
Normal file
@@ -0,0 +1,75 @@
|
||||
use super::pattern;
|
||||
use super::target_id_set::TargetIdSet;
|
||||
use crate::arn::TargetID;
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// PatternRules - Event rule that maps object name patterns to TargetID collections.
|
||||
/// `event.Rules` (map[string]TargetIDSet) in the Go code
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct PatternRules {
|
||||
pub(crate) rules: HashMap<String, TargetIdSet>,
|
||||
}
|
||||
|
||||
impl PatternRules {
|
||||
pub fn new() -> Self {
|
||||
Default::default()
|
||||
}
|
||||
|
||||
/// Add rules: Pattern and Target ID.
|
||||
/// If the schema already exists, add target_id to the existing TargetIdSet.
|
||||
pub fn add(&mut self, pattern: String, target_id: TargetID) {
|
||||
self.rules.entry(pattern).or_default().insert(target_id);
|
||||
}
|
||||
|
||||
/// Checks if there are any rules that match the given object name.
|
||||
pub fn match_simple(&self, object_name: &str) -> bool {
|
||||
self.rules.keys().any(|p| pattern::match_simple(p, object_name))
|
||||
}
|
||||
|
||||
/// Returns all TargetIDs that match the object name.
|
||||
pub fn match_targets(&self, object_name: &str) -> TargetIdSet {
|
||||
let mut matched_targets = TargetIdSet::new();
|
||||
for (pattern_str, target_set) in &self.rules {
|
||||
if pattern::match_simple(pattern_str, object_name) {
|
||||
matched_targets.extend(target_set.iter().cloned());
|
||||
}
|
||||
}
|
||||
matched_targets
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.rules.is_empty()
|
||||
}
|
||||
|
||||
/// Merge another PatternRules.
|
||||
/// Corresponding to Go's `Rules.Union`.
|
||||
pub fn union(&self, other: &Self) -> Self {
|
||||
let mut new_rules = self.clone();
|
||||
for (pattern, their_targets) in &other.rules {
|
||||
let our_targets = new_rules.rules.entry(pattern.clone()).or_default();
|
||||
our_targets.extend(their_targets.iter().cloned());
|
||||
}
|
||||
new_rules
|
||||
}
|
||||
|
||||
/// Calculate the difference from another PatternRules.
|
||||
/// Corresponding to Go's `Rules.Difference`.
|
||||
pub fn difference(&self, other: &Self) -> Self {
|
||||
let mut result_rules = HashMap::new();
|
||||
for (pattern, self_targets) in &self.rules {
|
||||
match other.rules.get(pattern) {
|
||||
Some(other_targets) => {
|
||||
let diff_targets: TargetIdSet = self_targets.difference(other_targets).cloned().collect();
|
||||
if !diff_targets.is_empty() {
|
||||
result_rules.insert(pattern.clone(), diff_targets);
|
||||
}
|
||||
}
|
||||
None => {
|
||||
// If there is no pattern in other, self_targets are all retained
|
||||
result_rules.insert(pattern.clone(), self_targets.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
PatternRules { rules: result_rules }
|
||||
}
|
||||
}
|
||||
174
crates/notify/src/rules/rules_map.rs
Normal file
174
crates/notify/src/rules/rules_map.rs
Normal file
@@ -0,0 +1,174 @@
|
||||
use super::pattern_rules::PatternRules;
|
||||
use super::target_id_set::TargetIdSet;
|
||||
use crate::arn::TargetID;
|
||||
use crate::event::EventName;
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// RulesMap - Rule mapping organized by event name。
|
||||
/// `event.RulesMap` (map[Name]Rules) in the corresponding Go code
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct RulesMap {
|
||||
map: HashMap<EventName, PatternRules>,
|
||||
/// A bitmask that represents the union of all event types in this map.
|
||||
/// Used for quick checks in `has_subscriber`.
|
||||
total_events_mask: u64,
|
||||
}
|
||||
|
||||
impl RulesMap {
|
||||
/// Create a new, empty RulesMap.
|
||||
pub fn new() -> Self {
|
||||
Default::default()
|
||||
}
|
||||
|
||||
/// Add a rule configuration to the map.
|
||||
///
|
||||
/// This method handles composite event names (such as `s3:ObjectCreated:*`), expanding them as
|
||||
/// Multiple specific event types and add rules for each event type.
|
||||
///
|
||||
/// # Parameters
|
||||
/// * `event_names` - List of event names associated with this rule.
|
||||
/// * `pattern` - Matching pattern for object keys. If empty, the default is `*` (match all).
|
||||
/// * `target_id` - The target ID of the notification.
|
||||
pub fn add_rule_config(&mut self, event_names: &[EventName], pattern: String, target_id: TargetID) {
|
||||
let effective_pattern = if pattern.is_empty() {
|
||||
"*".to_string() // Match all by default
|
||||
} else {
|
||||
pattern
|
||||
};
|
||||
|
||||
for event_name_spec in event_names {
|
||||
// Expand compound event types, for example ObjectCreatedAll -> [ObjectCreatedPut, ObjectCreatedPost, ...]
|
||||
for expanded_event_name in event_name_spec.expand() {
|
||||
// Make sure EventName::expand() returns Vec<EventName>
|
||||
self.map
|
||||
.entry(expanded_event_name)
|
||||
.or_default()
|
||||
.add(effective_pattern.clone(), target_id.clone());
|
||||
// Update the total_events_mask to include this event type
|
||||
self.total_events_mask |= expanded_event_name.mask();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Merge another RulesMap.
|
||||
/// `RulesMap.Add(rulesMap2 RulesMap) corresponding to Go
|
||||
pub fn add_map(&mut self, other_map: &Self) {
|
||||
for (event_name, other_pattern_rules) in &other_map.map {
|
||||
let self_pattern_rules = self.map.entry(*event_name).or_default();
|
||||
// PatternRules::union Returns the new PatternRules, we need to modify the existing ones
|
||||
let merged_rules = self_pattern_rules.union(other_pattern_rules);
|
||||
*self_pattern_rules = merged_rules;
|
||||
}
|
||||
// Directly merge two masks.
|
||||
self.total_events_mask |= other_map.total_events_mask;
|
||||
}
|
||||
|
||||
/// Remove another rule defined in the RulesMap from the current RulesMap.
|
||||
///
|
||||
/// After the rule is removed, `total_events_mask` is recalculated to ensure its accuracy.
|
||||
pub fn remove_map(&mut self, other_map: &Self) {
|
||||
let mut events_to_remove = Vec::new();
|
||||
for (event_name, self_pattern_rules) in &mut self.map {
|
||||
if let Some(other_pattern_rules) = other_map.map.get(event_name) {
|
||||
*self_pattern_rules = self_pattern_rules.difference(other_pattern_rules);
|
||||
if self_pattern_rules.is_empty() {
|
||||
events_to_remove.push(*event_name);
|
||||
}
|
||||
}
|
||||
}
|
||||
for event_name in events_to_remove {
|
||||
self.map.remove(&event_name);
|
||||
}
|
||||
// After removing the rule, recalculate total_events_mask.
|
||||
self.recalculate_mask();
|
||||
}
|
||||
|
||||
/// Checks whether any configured rules exist for a given event type.
|
||||
///
|
||||
/// This method uses a bitmask for a quick check of O(1) complexity.
|
||||
/// `event_name` can be a compound type, such as `ObjectCreatedAll`.
|
||||
pub fn has_subscriber(&self, event_name: &EventName) -> bool {
|
||||
// event_name.mask() will handle compound events correctly
|
||||
(self.total_events_mask & event_name.mask()) != 0
|
||||
}
|
||||
|
||||
/// Rules matching the given event and object keys and return all matching target IDs.
|
||||
///
|
||||
/// # Notice
|
||||
/// The `event_name` parameter should be a specific, non-compound event type.
|
||||
/// Because this is taken from the `Event` object that actually occurs.
|
||||
pub fn match_rules(&self, event_name: EventName, object_key: &str) -> TargetIdSet {
|
||||
// Use bitmask to quickly determine whether there is a matching rule
|
||||
if (self.total_events_mask & event_name.mask()) == 0 {
|
||||
return TargetIdSet::new(); // No matching rules
|
||||
}
|
||||
|
||||
// First try to directly match the event name
|
||||
if let Some(pattern_rules) = self.map.get(&event_name) {
|
||||
let targets = pattern_rules.match_targets(object_key);
|
||||
if !targets.is_empty() {
|
||||
return targets;
|
||||
}
|
||||
}
|
||||
// Go's RulesMap[eventName] is directly retrieved, and if it does not exist, it is empty Rules.
|
||||
// Rust's HashMap::get returns Option. If the event name does not exist, there is no rule.
|
||||
// Compound events (such as ObjectCreatedAll) have been expanded as a single event when add_rule_config.
|
||||
// Therefore, a single event name should be used when querying.
|
||||
// If event_name itself is a single type, look it up directly.
|
||||
// If event_name is a compound type, Go's logic is expanded when added.
|
||||
// Here match_rules should receive events that may already be single.
|
||||
// If the caller passes in a compound event, it should expand itself or handle this function first.
|
||||
// Assume that event_name is already a specific event that can be used for searching.
|
||||
self.map
|
||||
.get(&event_name)
|
||||
.map_or_else(TargetIdSet::new, |pr| pr.match_targets(object_key))
|
||||
}
|
||||
|
||||
/// Check if RulesMap is empty.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.map.is_empty()
|
||||
}
|
||||
|
||||
/// Returns a clone of internal rules for use in scenarios such as BucketNotificationConfig::validate.
|
||||
pub fn inner(&self) -> &HashMap<EventName, PatternRules> {
|
||||
&self.map
|
||||
}
|
||||
|
||||
/// A private helper function that recalculates `total_events_mask` based on the content of the current `map`.
|
||||
/// Called after the removal operation to ensure the accuracy of the mask.
|
||||
fn recalculate_mask(&mut self) {
|
||||
let mut new_mask = 0u64;
|
||||
for event_name in self.map.keys() {
|
||||
new_mask |= event_name.mask();
|
||||
}
|
||||
self.total_events_mask = new_mask;
|
||||
}
|
||||
|
||||
/// Remove rules and optimize performance
|
||||
#[allow(dead_code)]
|
||||
pub fn remove_rule(&mut self, event_name: &EventName, pattern: &str) {
|
||||
if let Some(pattern_rules) = self.map.get_mut(event_name) {
|
||||
pattern_rules.rules.remove(pattern);
|
||||
if pattern_rules.is_empty() {
|
||||
self.map.remove(event_name);
|
||||
}
|
||||
}
|
||||
self.recalculate_mask(); // Delay calculation mask
|
||||
}
|
||||
|
||||
/// Batch Delete Rules
|
||||
#[allow(dead_code)]
|
||||
pub fn remove_rules(&mut self, event_names: &[EventName]) {
|
||||
for event_name in event_names {
|
||||
self.map.remove(event_name);
|
||||
}
|
||||
self.recalculate_mask(); // Unified calculation of mask after batch processing
|
||||
}
|
||||
|
||||
/// Update rules and optimize performance
|
||||
#[allow(dead_code)]
|
||||
pub fn update_rule(&mut self, event_name: EventName, pattern: String, target_id: TargetID) {
|
||||
self.map.entry(event_name).or_default().add(pattern, target_id);
|
||||
self.total_events_mask |= event_name.mask(); // Update only the relevant bitmask
|
||||
}
|
||||
}
|
||||
15
crates/notify/src/rules/target_id_set.rs
Normal file
15
crates/notify/src/rules/target_id_set.rs
Normal file
@@ -0,0 +1,15 @@
|
||||
use crate::arn::TargetID;
|
||||
use std::collections::HashSet;
|
||||
|
||||
/// TargetIDSet - A collection representation of TargetID.
|
||||
pub type TargetIdSet = HashSet<TargetID>;
|
||||
|
||||
/// Provides a Go-like method for TargetIdSet (can be implemented as trait if needed)
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn new_target_id_set(target_ids: Vec<TargetID>) -> TargetIdSet {
|
||||
target_ids.into_iter().collect()
|
||||
}
|
||||
|
||||
// HashSet has built-in clone, union, difference and other operations.
|
||||
// But the Go version of the method returns a new Set, and the HashSet method is usually iterator or modify itself.
|
||||
// If you need to exactly match Go's API style, you can add wrapper functions.
|
||||
274
crates/notify/src/rules/xml_config.rs
Normal file
274
crates/notify/src/rules/xml_config.rs
Normal file
@@ -0,0 +1,274 @@
|
||||
use super::pattern;
|
||||
use crate::arn::{ARN, ArnError, TargetIDError};
|
||||
use crate::event::EventName;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashSet;
|
||||
use std::io::Read;
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum ParseConfigError {
|
||||
#[error("XML parsing error:{0}")]
|
||||
XmlError(#[from] quick_xml::errors::serialize::DeError),
|
||||
#[error("Invalid filter value:{0}")]
|
||||
InvalidFilterValue(String),
|
||||
#[error("Invalid filter name: {0}, only 'prefix' or 'suffix' is allowed")]
|
||||
InvalidFilterName(String),
|
||||
#[error("There can only be one 'prefix' in the filter rule")]
|
||||
DuplicatePrefixFilter,
|
||||
#[error("There can only be one 'suffix' in the filter rule")]
|
||||
DuplicateSuffixFilter,
|
||||
#[error("Missing event name")]
|
||||
MissingEventName,
|
||||
#[error("Duplicate event name:{0}")]
|
||||
DuplicateEventName(String), // EventName is usually an enum, and here String is used to represent its text
|
||||
#[error("Repeated queue configuration: ID={0:?}, ARN={1}")]
|
||||
DuplicateQueueConfiguration(Option<String>, String),
|
||||
#[error("Unsupported configuration types (e.g. Lambda, Topic)")]
|
||||
UnsupportedConfiguration,
|
||||
#[error("ARN not found:{0}")]
|
||||
ArnNotFound(String),
|
||||
#[error("Unknown area:{0}")]
|
||||
UnknownRegion(String),
|
||||
#[error("ARN parsing error:{0}")]
|
||||
ArnParseError(#[from] ArnError),
|
||||
#[error("TargetID parsing error:{0}")]
|
||||
TargetIDParseError(#[from] TargetIDError),
|
||||
#[error("IO Error:{0}")]
|
||||
IoError(#[from] std::io::Error),
|
||||
#[error("Region mismatch: Configure region {config_region}, current region {current_region}")]
|
||||
RegionMismatch { config_region: String, current_region: String },
|
||||
#[error("ARN {0} Not found in the provided list")]
|
||||
ArnValidation(String),
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
|
||||
pub struct FilterRule {
|
||||
#[serde(rename = "Name")]
|
||||
pub name: String,
|
||||
#[serde(rename = "Value")]
|
||||
pub value: String,
|
||||
}
|
||||
|
||||
impl FilterRule {
|
||||
fn validate(&self) -> Result<(), ParseConfigError> {
|
||||
if self.name != "prefix" && self.name != "suffix" {
|
||||
return Err(ParseConfigError::InvalidFilterName(self.name.clone()));
|
||||
}
|
||||
// ValidateFilterRuleValue from Go:
|
||||
// no "." or ".." path segments, <= 1024 chars, valid UTF-8, no '\'.
|
||||
for segment in self.value.split('/') {
|
||||
if segment == "." || segment == ".." {
|
||||
return Err(ParseConfigError::InvalidFilterValue(self.value.clone()));
|
||||
}
|
||||
}
|
||||
if self.value.len() > 1024 || self.value.contains('\\') || std::str::from_utf8(self.value.as_bytes()).is_err() {
|
||||
return Err(ParseConfigError::InvalidFilterValue(self.value.clone()));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, Default, PartialEq, Eq)]
|
||||
pub struct FilterRuleList {
|
||||
#[serde(rename = "FilterRule", default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub rules: Vec<FilterRule>,
|
||||
}
|
||||
|
||||
impl FilterRuleList {
|
||||
pub fn validate(&self) -> Result<(), ParseConfigError> {
|
||||
let mut has_prefix = false;
|
||||
let mut has_suffix = false;
|
||||
for rule in &self.rules {
|
||||
rule.validate()?;
|
||||
if rule.name == "prefix" {
|
||||
if has_prefix {
|
||||
return Err(ParseConfigError::DuplicatePrefixFilter);
|
||||
}
|
||||
has_prefix = true;
|
||||
} else if rule.name == "suffix" {
|
||||
if has_suffix {
|
||||
return Err(ParseConfigError::DuplicateSuffixFilter);
|
||||
}
|
||||
has_suffix = true;
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn pattern(&self) -> String {
|
||||
let mut prefix_val: Option<&str> = None;
|
||||
let mut suffix_val: Option<&str> = None;
|
||||
|
||||
for rule in &self.rules {
|
||||
if rule.name == "prefix" {
|
||||
prefix_val = Some(&rule.value);
|
||||
} else if rule.name == "suffix" {
|
||||
suffix_val = Some(&rule.value);
|
||||
}
|
||||
}
|
||||
pattern::new_pattern(prefix_val, suffix_val)
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.rules.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, Default, PartialEq, Eq)]
|
||||
pub struct S3KeyFilter {
|
||||
#[serde(rename = "FilterRuleList", default, skip_serializing_if = "FilterRuleList::is_empty")]
|
||||
pub filter_rule_list: FilterRuleList,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq)]
|
||||
pub struct QueueConfig {
|
||||
#[serde(rename = "Id", skip_serializing_if = "Option::is_none")]
|
||||
pub id: Option<String>,
|
||||
#[serde(rename = "Queue")] // This is ARN in XML
|
||||
pub arn: ARN,
|
||||
#[serde(rename = "Event", default)] // XML has multiple <Event> tags
|
||||
pub events: Vec<EventName>, // EventName needs to handle XML (de)serialization if not string
|
||||
#[serde(rename = "Filter", default, skip_serializing_if = "s3key_filter_is_empty")]
|
||||
pub filter: S3KeyFilter,
|
||||
}
|
||||
|
||||
fn s3key_filter_is_empty(f: &S3KeyFilter) -> bool {
|
||||
f.filter_rule_list.is_empty()
|
||||
}
|
||||
|
||||
impl QueueConfig {
|
||||
pub fn validate(&self, region: &str, arn_list: &[String]) -> Result<(), ParseConfigError> {
|
||||
if self.events.is_empty() {
|
||||
return Err(ParseConfigError::MissingEventName);
|
||||
}
|
||||
let mut event_set = HashSet::new();
|
||||
for event in &self.events {
|
||||
// EventName::to_string() or similar for uniqueness check
|
||||
if !event_set.insert(event.to_string()) {
|
||||
return Err(ParseConfigError::DuplicateEventName(event.to_string()));
|
||||
}
|
||||
}
|
||||
self.filter.filter_rule_list.validate()?;
|
||||
|
||||
// Validate ARN (similar to Go's Queue.Validate)
|
||||
// The Go code checks targetList.Exists(q.ARN.TargetID)
|
||||
// Here we check against a provided arn_list
|
||||
let _config_arn_str = self.arn.to_arn_string();
|
||||
if !self.arn.region.is_empty() && self.arn.region != region {
|
||||
return Err(ParseConfigError::UnknownRegion(self.arn.region.clone()));
|
||||
}
|
||||
|
||||
// Construct the ARN string that would be in arn_list
|
||||
// The arn_list contains ARNs like "arn:rustfs:sqs:REGION:ID:NAME"
|
||||
// We need to ensure self.arn (potentially with region adjusted) is in arn_list
|
||||
let effective_arn = ARN {
|
||||
target_id: self.arn.target_id.clone(),
|
||||
region: if self.arn.region.is_empty() {
|
||||
region.to_string()
|
||||
} else {
|
||||
self.arn.region.clone()
|
||||
},
|
||||
service: self.arn.service.clone(), // or default "sqs"
|
||||
partition: self.arn.partition.clone(), // or default "rustfs"
|
||||
};
|
||||
|
||||
if !arn_list.contains(&effective_arn.to_arn_string()) {
|
||||
return Err(ParseConfigError::ArnNotFound(effective_arn.to_arn_string()));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Sets the region if it's not already set in the ARN.
|
||||
pub fn set_region_if_empty(&mut self, region: &str) {
|
||||
if self.arn.region.is_empty() {
|
||||
self.arn.region = region.to_string();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Corresponding to the `lambda` structure in the Go code.
|
||||
/// Used to parse <CloudFunction> ARN from inside the <CloudFunctionConfiguration> tag.
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Default)]
|
||||
pub struct LambdaConfigDetail {
|
||||
#[serde(rename = "CloudFunction")]
|
||||
pub arn: String,
|
||||
// According to AWS S3 documentation, <CloudFunctionConfiguration> usually also contains Id, Event, Filter
|
||||
// But in order to strictly correspond to the Go `lambda` structure provided, only ARN is included here.
|
||||
// If full support is required, additional fields can be added.
|
||||
// For example:
|
||||
// #[serde(rename = "Id", skip_serializing_if = "Option::is_none")]
|
||||
// pub id: Option<String>,
|
||||
// #[serde(rename = "Event", default, skip_serializing_if = "Vec::is_empty")]
|
||||
// pub events: Vec<EventName>,
|
||||
// #[serde(rename = "Filter", default, skip_serializing_if = "S3KeyFilterIsEmpty")]
|
||||
// pub filter: S3KeyFilter,
|
||||
}
|
||||
|
||||
/// Corresponding to the `topic` structure in the Go code.
|
||||
/// Used to parse <Topic> ARN from inside the <TopicConfiguration> tag.
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, PartialEq, Eq, Default)]
|
||||
pub struct TopicConfigDetail {
|
||||
#[serde(rename = "Topic")]
|
||||
pub arn: String,
|
||||
// Similar to LambdaConfigDetail, it can be extended to include fields such as Id, Event, Filter, etc.
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, Default, PartialEq, Eq)]
|
||||
#[serde(rename = "NotificationConfiguration")]
|
||||
pub struct NotificationConfiguration {
|
||||
#[serde(rename = "xmlns", skip_serializing_if = "Option::is_none")]
|
||||
pub xmlns: Option<String>,
|
||||
#[serde(rename = "QueueConfiguration", default, skip_serializing_if = "Vec::is_empty")]
|
||||
pub queue_list: Vec<QueueConfig>,
|
||||
#[serde(
|
||||
rename = "CloudFunctionConfiguration", // Tags for each lambda configuration item in XML
|
||||
default,
|
||||
skip_serializing_if = "Vec::is_empty"
|
||||
)]
|
||||
pub lambda_list: Vec<LambdaConfigDetail>, // Modify: Use a new structure
|
||||
#[serde(
|
||||
rename = "TopicConfiguration", // Tags for each topic configuration item in XML
|
||||
default,
|
||||
skip_serializing_if = "Vec::is_empty"
|
||||
)]
|
||||
pub topic_list: Vec<TopicConfigDetail>, // Modify: Use a new structure
|
||||
}
|
||||
|
||||
impl NotificationConfiguration {
|
||||
pub fn from_reader<R: Read + std::io::BufRead>(reader: R) -> Result<Self, ParseConfigError> {
|
||||
let config: NotificationConfiguration = quick_xml::de::from_reader(reader)?;
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
pub fn validate(&self, current_region: &str, arn_list: &[String]) -> Result<(), ParseConfigError> {
|
||||
// Verification logic remains the same: if lambda_list or topic_list is not empty, it is considered an unsupported configuration
|
||||
if !self.lambda_list.is_empty() || !self.topic_list.is_empty() {
|
||||
return Err(ParseConfigError::UnsupportedConfiguration);
|
||||
}
|
||||
|
||||
let mut unique_queues = HashSet::new();
|
||||
for queue_config in &self.queue_list {
|
||||
queue_config.validate(current_region, arn_list)?;
|
||||
let queue_key = (
|
||||
queue_config.id.clone(),
|
||||
queue_config.arn.to_arn_string(), // Assuming that the ARN structure implements Display or ToString
|
||||
);
|
||||
if !unique_queues.insert(queue_key.clone()) {
|
||||
return Err(ParseConfigError::DuplicateQueueConfiguration(queue_key.0, queue_key.1));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn set_defaults(&mut self, region: &str) {
|
||||
for queue_config in &mut self.queue_list {
|
||||
queue_config.set_region_if_empty(region);
|
||||
}
|
||||
if self.xmlns.is_none() {
|
||||
self.xmlns = Some("http://s3.amazonaws.com/doc/2006-03-01/".to_string());
|
||||
}
|
||||
// Note: If LambdaConfigDetail and TopicConfigDetail contain information such as regions in the future,
|
||||
// You may also need to set the default value here. But according to the current definition, they only contain ARN strings.
|
||||
}
|
||||
}
|
||||
483
crates/notify/src/store.rs
Normal file
483
crates/notify/src/store.rs
Normal file
@@ -0,0 +1,483 @@
|
||||
use crate::error::StoreError;
|
||||
use serde::{Serialize, de::DeserializeOwned};
|
||||
use snap::raw::{Decoder, Encoder};
|
||||
use std::sync::{Arc, RwLock};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
marker::PhantomData,
|
||||
path::PathBuf,
|
||||
time::{SystemTime, UNIX_EPOCH},
|
||||
};
|
||||
use tracing::{debug, warn};
|
||||
use uuid::Uuid;
|
||||
|
||||
pub const DEFAULT_LIMIT: u64 = 100000; // Default store limit
|
||||
pub const DEFAULT_EXT: &str = ".unknown"; // Default file extension
|
||||
pub const COMPRESS_EXT: &str = ".snappy"; // Extension for compressed files
|
||||
|
||||
/// STORE_EXTENSION - file extension of an event file in store
|
||||
pub const STORE_EXTENSION: &str = ".event";
|
||||
|
||||
/// Represents a key for an entry in the store
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Key {
|
||||
/// The name of the key (UUID)
|
||||
pub name: String,
|
||||
/// The file extension for the entry
|
||||
pub extension: String,
|
||||
/// The number of items in the entry (for batch storage)
|
||||
pub item_count: usize,
|
||||
/// Whether the entry is compressed
|
||||
pub compress: bool,
|
||||
}
|
||||
|
||||
impl Key {
|
||||
/// Converts the key to a string (filename)
|
||||
pub fn to_key_string(&self) -> String {
|
||||
let name_part = if self.item_count > 1 {
|
||||
format!("{}:{}", self.item_count, self.name)
|
||||
} else {
|
||||
self.name.clone()
|
||||
};
|
||||
|
||||
let mut file_name = name_part;
|
||||
if !self.extension.is_empty() {
|
||||
file_name.push_str(&self.extension);
|
||||
}
|
||||
|
||||
if self.compress {
|
||||
file_name.push_str(COMPRESS_EXT);
|
||||
}
|
||||
file_name
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Key {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let name_part = if self.item_count > 1 {
|
||||
format!("{}:{}", self.item_count, self.name)
|
||||
} else {
|
||||
self.name.clone()
|
||||
};
|
||||
|
||||
let mut file_name = name_part;
|
||||
if !self.extension.is_empty() {
|
||||
file_name.push_str(&self.extension);
|
||||
}
|
||||
|
||||
if self.compress {
|
||||
file_name.push_str(COMPRESS_EXT);
|
||||
}
|
||||
write!(f, "{}", file_name)
|
||||
}
|
||||
}
|
||||
|
||||
/// Parses a string into a Key
|
||||
pub fn parse_key(s: &str) -> Key {
|
||||
debug!("Parsing key: {}", s);
|
||||
|
||||
let mut name = s.to_string();
|
||||
let mut extension = String::new();
|
||||
let mut item_count = 1;
|
||||
let mut compress = false;
|
||||
|
||||
// Check for compressed suffixes
|
||||
if name.ends_with(COMPRESS_EXT) {
|
||||
compress = true;
|
||||
name = name[..name.len() - COMPRESS_EXT.len()].to_string();
|
||||
}
|
||||
|
||||
// Number of batch items parsed
|
||||
if let Some(colon_pos) = name.find(':') {
|
||||
if let Ok(count) = name[..colon_pos].parse::<usize>() {
|
||||
item_count = count;
|
||||
name = name[colon_pos + 1..].to_string();
|
||||
}
|
||||
}
|
||||
|
||||
// Resolve extension
|
||||
if let Some(dot_pos) = name.rfind('.') {
|
||||
extension = name[dot_pos..].to_string();
|
||||
name = name[..dot_pos].to_string();
|
||||
}
|
||||
|
||||
debug!(
|
||||
"Parsed key - name: {}, extension: {}, item_count: {}, compress: {}",
|
||||
name, extension, item_count, compress
|
||||
);
|
||||
|
||||
Key {
|
||||
name,
|
||||
extension,
|
||||
item_count,
|
||||
compress,
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait for a store that can store and retrieve items of type T
|
||||
pub trait Store<T>: Send + Sync {
|
||||
/// The error type for the store
|
||||
type Error;
|
||||
/// The key type for the store
|
||||
type Key;
|
||||
|
||||
/// Opens the store
|
||||
fn open(&self) -> Result<(), Self::Error>;
|
||||
|
||||
/// Stores a single item
|
||||
fn put(&self, item: Arc<T>) -> Result<Self::Key, Self::Error>;
|
||||
|
||||
/// Stores multiple items in a single batch
|
||||
fn put_multiple(&self, items: Vec<T>) -> Result<Self::Key, Self::Error>;
|
||||
|
||||
/// Retrieves a single item by key
|
||||
fn get(&self, key: &Self::Key) -> Result<T, Self::Error>;
|
||||
|
||||
/// Retrieves multiple items by key
|
||||
fn get_multiple(&self, key: &Self::Key) -> Result<Vec<T>, Self::Error>;
|
||||
|
||||
/// Deletes an item by key
|
||||
fn del(&self, key: &Self::Key) -> Result<(), Self::Error>;
|
||||
|
||||
/// Lists all keys in the store
|
||||
fn list(&self) -> Vec<Self::Key>;
|
||||
|
||||
/// Returns the number of items in the store
|
||||
fn len(&self) -> usize;
|
||||
|
||||
/// Returns true if the store is empty
|
||||
fn is_empty(&self) -> bool;
|
||||
|
||||
/// Clones the store into a boxed trait object
|
||||
fn boxed_clone(&self) -> Box<dyn Store<T, Error = Self::Error, Key = Self::Key> + Send + Sync>;
|
||||
}
|
||||
|
||||
/// A store that uses the filesystem to persist events in a queue
|
||||
pub struct QueueStore<T> {
|
||||
entry_limit: u64,
|
||||
directory: PathBuf,
|
||||
file_ext: String,
|
||||
entries: Arc<RwLock<HashMap<String, i64>>>, // key -> modtime as unix nano
|
||||
_phantom: PhantomData<T>,
|
||||
}
|
||||
|
||||
impl<T> Clone for QueueStore<T> {
|
||||
fn clone(&self) -> Self {
|
||||
QueueStore {
|
||||
entry_limit: self.entry_limit,
|
||||
directory: self.directory.clone(),
|
||||
file_ext: self.file_ext.clone(),
|
||||
entries: Arc::clone(&self.entries),
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<T: Serialize + DeserializeOwned + Send + Sync> QueueStore<T> {
|
||||
/// Creates a new QueueStore
|
||||
pub fn new(directory: impl Into<PathBuf>, limit: u64, ext: &str) -> Self {
|
||||
let file_ext = if ext.is_empty() { DEFAULT_EXT } else { ext };
|
||||
|
||||
QueueStore {
|
||||
directory: directory.into(),
|
||||
entry_limit: if limit == 0 { DEFAULT_LIMIT } else { limit },
|
||||
file_ext: file_ext.to_string(),
|
||||
entries: Arc::new(RwLock::new(HashMap::with_capacity(limit as usize))),
|
||||
_phantom: PhantomData,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the full path for a key
|
||||
fn file_path(&self, key: &Key) -> PathBuf {
|
||||
self.directory.join(key.to_string())
|
||||
}
|
||||
|
||||
/// Reads a file for the given key
|
||||
fn read_file(&self, key: &Key) -> Result<Vec<u8>, StoreError> {
|
||||
let path = self.file_path(key);
|
||||
debug!("Reading file for key: {},path: {}", key.to_string(), path.display());
|
||||
let data = std::fs::read(&path).map_err(|e| {
|
||||
if e.kind() == std::io::ErrorKind::NotFound {
|
||||
StoreError::NotFound
|
||||
} else {
|
||||
StoreError::Io(e)
|
||||
}
|
||||
})?;
|
||||
|
||||
if data.is_empty() {
|
||||
return Err(StoreError::NotFound);
|
||||
}
|
||||
|
||||
if key.compress {
|
||||
let mut decoder = Decoder::new();
|
||||
decoder
|
||||
.decompress_vec(&data)
|
||||
.map_err(|e| StoreError::Compression(e.to_string()))
|
||||
} else {
|
||||
Ok(data)
|
||||
}
|
||||
}
|
||||
|
||||
/// Writes data to a file for the given key
|
||||
fn write_file(&self, key: &Key, data: &[u8]) -> Result<(), StoreError> {
|
||||
let path = self.file_path(key);
|
||||
// Create directory if it doesn't exist
|
||||
if let Some(parent) = path.parent() {
|
||||
std::fs::create_dir_all(parent).map_err(StoreError::Io)?;
|
||||
}
|
||||
|
||||
let data = if key.compress {
|
||||
let mut encoder = Encoder::new();
|
||||
encoder
|
||||
.compress_vec(data)
|
||||
.map_err(|e| StoreError::Compression(e.to_string()))?
|
||||
} else {
|
||||
data.to_vec()
|
||||
};
|
||||
|
||||
std::fs::write(&path, &data).map_err(StoreError::Io)?;
|
||||
let modified = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_nanos() as i64;
|
||||
let mut entries = self
|
||||
.entries
|
||||
.write()
|
||||
.map_err(|_| StoreError::Internal("Failed to acquire write lock on entries".to_string()))?;
|
||||
entries.insert(key.to_string(), modified);
|
||||
debug!("Wrote event to store: {}", key.to_string());
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
impl<T> Store<T> for QueueStore<T>
|
||||
where
|
||||
T: Serialize + DeserializeOwned + Clone + Send + Sync + 'static,
|
||||
{
|
||||
type Error = StoreError;
|
||||
type Key = Key;
|
||||
|
||||
fn open(&self) -> Result<(), Self::Error> {
|
||||
std::fs::create_dir_all(&self.directory).map_err(StoreError::Io)?;
|
||||
|
||||
let entries = std::fs::read_dir(&self.directory).map_err(StoreError::Io)?;
|
||||
// Get the write lock to update the internal state
|
||||
let mut entries_map = self
|
||||
.entries
|
||||
.write()
|
||||
.map_err(|_| StoreError::Internal("Failed to acquire write lock on entries".to_string()))?;
|
||||
for entry in entries {
|
||||
let entry = entry.map_err(StoreError::Io)?;
|
||||
let metadata = entry.metadata().map_err(StoreError::Io)?;
|
||||
if metadata.is_file() {
|
||||
let modified = metadata.modified().map_err(StoreError::Io)?;
|
||||
let unix_nano = modified.duration_since(UNIX_EPOCH).unwrap_or_default().as_nanos() as i64;
|
||||
|
||||
let file_name = entry.file_name().to_string_lossy().to_string();
|
||||
entries_map.insert(file_name, unix_nano);
|
||||
}
|
||||
}
|
||||
|
||||
debug!("Opened store at: {:?}", self.directory);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn put(&self, item: Arc<T>) -> Result<Self::Key, Self::Error> {
|
||||
// Check storage limits
|
||||
{
|
||||
let entries = self
|
||||
.entries
|
||||
.read()
|
||||
.map_err(|_| StoreError::Internal("Failed to acquire read lock on entries".to_string()))?;
|
||||
|
||||
if entries.len() as u64 >= self.entry_limit {
|
||||
return Err(StoreError::LimitExceeded);
|
||||
}
|
||||
}
|
||||
|
||||
let uuid = Uuid::new_v4();
|
||||
let key = Key {
|
||||
name: uuid.to_string(),
|
||||
extension: self.file_ext.clone(),
|
||||
item_count: 1,
|
||||
compress: true,
|
||||
};
|
||||
|
||||
let data = serde_json::to_vec(&item).map_err(|e| StoreError::Serialization(e.to_string()))?;
|
||||
self.write_file(&key, &data)?;
|
||||
|
||||
Ok(key)
|
||||
}
|
||||
|
||||
fn put_multiple(&self, items: Vec<T>) -> Result<Self::Key, Self::Error> {
|
||||
// Check storage limits
|
||||
{
|
||||
let entries = self
|
||||
.entries
|
||||
.read()
|
||||
.map_err(|_| StoreError::Internal("Failed to acquire read lock on entries".to_string()))?;
|
||||
|
||||
if entries.len() as u64 >= self.entry_limit {
|
||||
return Err(StoreError::LimitExceeded);
|
||||
}
|
||||
}
|
||||
if items.is_empty() {
|
||||
// Or return an error, or a special key?
|
||||
return Err(StoreError::Internal("Cannot put_multiple with empty items list".to_string()));
|
||||
}
|
||||
let uuid = Uuid::new_v4();
|
||||
let key = Key {
|
||||
name: uuid.to_string(),
|
||||
extension: self.file_ext.clone(),
|
||||
item_count: items.len(),
|
||||
compress: true,
|
||||
};
|
||||
|
||||
// Serialize all items into a single Vec<u8>
|
||||
// This current approach for get_multiple/put_multiple assumes items are concatenated JSON objects.
|
||||
// This might be problematic for deserialization if not handled carefully.
|
||||
// A better approach for multiple items might be to store them as a JSON array `Vec<T>`.
|
||||
// For now, sticking to current logic of concatenating.
|
||||
let mut buffer = Vec::new();
|
||||
for item in items {
|
||||
// If items are Vec<Event>, and Event is large, this could be inefficient.
|
||||
// The current get_multiple deserializes one by one.
|
||||
let item_data = serde_json::to_vec(&item).map_err(|e| StoreError::Serialization(e.to_string()))?;
|
||||
buffer.extend_from_slice(&item_data);
|
||||
// If using JSON array: buffer = serde_json::to_vec(&items)?
|
||||
}
|
||||
|
||||
self.write_file(&key, &buffer)?;
|
||||
|
||||
Ok(key)
|
||||
}
|
||||
|
||||
fn get(&self, key: &Self::Key) -> Result<T, Self::Error> {
|
||||
if key.item_count != 1 {
|
||||
return Err(StoreError::Internal(format!(
|
||||
"get() called on a batch key ({} items), use get_multiple()",
|
||||
key.item_count
|
||||
)));
|
||||
}
|
||||
let items = self.get_multiple(key)?;
|
||||
items.into_iter().next().ok_or(StoreError::NotFound)
|
||||
}
|
||||
|
||||
fn get_multiple(&self, key: &Self::Key) -> Result<Vec<T>, Self::Error> {
|
||||
debug!("Reading items from store for key: {}", key.to_string());
|
||||
let data = self.read_file(key)?;
|
||||
if data.is_empty() {
|
||||
return Err(StoreError::Deserialization("Cannot deserialize empty data".to_string()));
|
||||
}
|
||||
let mut items = Vec::with_capacity(key.item_count);
|
||||
|
||||
// let mut deserializer = serde_json::Deserializer::from_slice(&data);
|
||||
// while let Ok(item) = serde::Deserialize::deserialize(&mut deserializer) {
|
||||
// items.push(item);
|
||||
// }
|
||||
|
||||
// This deserialization logic assumes multiple JSON objects are simply concatenated in the file.
|
||||
// This is fragile. It's better to store a JSON array `[item1, item2, ...]`
|
||||
// or use a streaming deserializer that can handle multiple top-level objects if that's the format.
|
||||
// For now, assuming serde_json::Deserializer::from_slice can handle this if input is well-formed.
|
||||
let mut deserializer = serde_json::Deserializer::from_slice(&data).into_iter::<T>();
|
||||
|
||||
for _ in 0..key.item_count {
|
||||
match deserializer.next() {
|
||||
Some(Ok(item)) => items.push(item),
|
||||
Some(Err(e)) => {
|
||||
return Err(StoreError::Deserialization(format!("Failed to deserialize item in batch: {}", e)));
|
||||
}
|
||||
None => {
|
||||
// Reached end of stream sooner than item_count
|
||||
if items.len() < key.item_count && !items.is_empty() {
|
||||
// Partial read
|
||||
warn!(
|
||||
"Expected {} items for key {}, but only found {}. Possible data corruption or incorrect item_count.",
|
||||
key.item_count,
|
||||
key.to_string(),
|
||||
items.len()
|
||||
);
|
||||
// Depending on strictness, this could be an error.
|
||||
} else if items.is_empty() {
|
||||
// No items at all, but file existed
|
||||
return Err(StoreError::Deserialization(format!(
|
||||
"No items deserialized for key {} though file existed.",
|
||||
key
|
||||
)));
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if items.is_empty() && key.item_count > 0 {
|
||||
return Err(StoreError::Deserialization("No items found".to_string()));
|
||||
}
|
||||
|
||||
Ok(items)
|
||||
}
|
||||
|
||||
fn del(&self, key: &Self::Key) -> Result<(), Self::Error> {
|
||||
let path = self.file_path(key);
|
||||
std::fs::remove_file(&path).map_err(|e| {
|
||||
if e.kind() == std::io::ErrorKind::NotFound {
|
||||
// If file not found, still try to remove from entries map in case of inconsistency
|
||||
warn!(
|
||||
"File not found for key {} during del, but proceeding to remove from entries map.",
|
||||
key.to_string()
|
||||
);
|
||||
StoreError::NotFound
|
||||
} else {
|
||||
StoreError::Io(e)
|
||||
}
|
||||
})?;
|
||||
|
||||
// Get the write lock to update the internal state
|
||||
let mut entries = self
|
||||
.entries
|
||||
.write()
|
||||
.map_err(|_| StoreError::Internal("Failed to acquire write lock on entries".to_string()))?;
|
||||
|
||||
if entries.remove(&key.to_string()).is_none() {
|
||||
// Key was not in the map, could be an inconsistency or already deleted.
|
||||
// This is not necessarily an error if the file deletion succeeded or was NotFound.
|
||||
debug!("Key {} not found in entries map during del, might have been already removed.", key);
|
||||
}
|
||||
debug!("Deleted event from store: {}", key.to_string());
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn list(&self) -> Vec<Self::Key> {
|
||||
// Get the read lock to read the internal state
|
||||
let entries = match self.entries.read() {
|
||||
Ok(entries) => entries,
|
||||
Err(_) => {
|
||||
debug!("Failed to acquire read lock on entries for listing");
|
||||
return Vec::new();
|
||||
}
|
||||
};
|
||||
|
||||
let mut entries_vec: Vec<_> = entries.iter().collect();
|
||||
// Sort by modtime (value in HashMap) to process oldest first
|
||||
entries_vec.sort_by(|a, b| a.1.cmp(b.1)); // Oldest first
|
||||
|
||||
entries_vec.into_iter().map(|(k, _)| parse_key(k)).collect()
|
||||
}
|
||||
|
||||
fn len(&self) -> usize {
|
||||
// Get the read lock to read the internal state
|
||||
match self.entries.read() {
|
||||
Ok(entries) => entries.len(),
|
||||
Err(_) => {
|
||||
debug!("Failed to acquire read lock on entries for len");
|
||||
0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn is_empty(&self) -> bool {
|
||||
self.len() == 0
|
||||
}
|
||||
|
||||
fn boxed_clone(&self) -> Box<dyn Store<T, Error = Self::Error, Key = Self::Key> + Send + Sync> {
|
||||
Box::new(self.clone()) as Box<dyn Store<T, Error = Self::Error, Key = Self::Key> + Send + Sync>
|
||||
}
|
||||
}
|
||||
295
crates/notify/src/stream.rs
Normal file
295
crates/notify/src/stream.rs
Normal file
@@ -0,0 +1,295 @@
|
||||
use crate::{
|
||||
Event, StoreError,
|
||||
error::TargetError,
|
||||
integration::NotificationMetrics,
|
||||
store::{Key, Store},
|
||||
target::Target,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, Instant};
|
||||
use tokio::sync::{Semaphore, mpsc};
|
||||
use tokio::time::sleep;
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
/// Streams events from the store to the target
|
||||
pub async fn stream_events(
|
||||
store: &mut (dyn Store<Event, Error = StoreError, Key = Key> + Send),
|
||||
target: &dyn Target,
|
||||
mut cancel_rx: mpsc::Receiver<()>,
|
||||
) {
|
||||
info!("Starting event stream for target: {}", target.name());
|
||||
|
||||
// Retry configuration
|
||||
const MAX_RETRIES: usize = 5;
|
||||
const RETRY_DELAY: Duration = Duration::from_secs(5);
|
||||
|
||||
loop {
|
||||
// Check for cancellation signal
|
||||
if cancel_rx.try_recv().is_ok() {
|
||||
info!("Cancellation received for target: {}", target.name());
|
||||
return;
|
||||
}
|
||||
|
||||
// Get list of events in the store
|
||||
let keys = store.list();
|
||||
if keys.is_empty() {
|
||||
// No events, wait before checking again
|
||||
sleep(Duration::from_secs(1)).await;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Process each event
|
||||
for key in keys {
|
||||
// Check for cancellation before processing each event
|
||||
if cancel_rx.try_recv().is_ok() {
|
||||
info!("Cancellation received during processing for target: {}", target.name());
|
||||
return;
|
||||
}
|
||||
|
||||
let mut retry_count = 0;
|
||||
let mut success = false;
|
||||
|
||||
// Retry logic
|
||||
while retry_count < MAX_RETRIES && !success {
|
||||
match target.send_from_store(key.clone()).await {
|
||||
Ok(_) => {
|
||||
info!("Successfully sent event for target: {}", target.name());
|
||||
success = true;
|
||||
}
|
||||
Err(e) => {
|
||||
// Handle specific errors
|
||||
match &e {
|
||||
TargetError::NotConnected => {
|
||||
warn!("Target {} not connected, retrying...", target.name());
|
||||
retry_count += 1;
|
||||
sleep(RETRY_DELAY).await;
|
||||
}
|
||||
TargetError::Timeout(_) => {
|
||||
warn!("Timeout for target {}, retrying...", target.name());
|
||||
retry_count += 1;
|
||||
sleep(Duration::from_secs((retry_count * 5) as u64)).await; // Exponential backoff
|
||||
}
|
||||
_ => {
|
||||
// Permanent error, skip this event
|
||||
error!("Permanent error for target {}: {}", target.name(), e);
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Remove event from store if successfully sent
|
||||
if retry_count >= MAX_RETRIES && !success {
|
||||
warn!("Max retries exceeded for event {}, target: {}, skipping", key.to_string(), target.name());
|
||||
}
|
||||
}
|
||||
|
||||
// Small delay before next iteration
|
||||
sleep(Duration::from_millis(100)).await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Starts the event streaming process for a target
|
||||
pub fn start_event_stream(
|
||||
mut store: Box<dyn Store<Event, Error = StoreError, Key = Key> + Send>,
|
||||
target: Arc<dyn Target + Send + Sync>,
|
||||
) -> mpsc::Sender<()> {
|
||||
let (cancel_tx, cancel_rx) = mpsc::channel(1);
|
||||
|
||||
tokio::spawn(async move {
|
||||
stream_events(&mut *store, &*target, cancel_rx).await;
|
||||
info!("Event stream stopped for target: {}", target.name());
|
||||
});
|
||||
|
||||
cancel_tx
|
||||
}
|
||||
|
||||
/// Start event stream with batch processing
|
||||
pub fn start_event_stream_with_batching(
|
||||
mut store: Box<dyn Store<Event, Error = StoreError, Key = Key> + Send>,
|
||||
target: Arc<dyn Target + Send + Sync>,
|
||||
metrics: Arc<NotificationMetrics>,
|
||||
semaphore: Arc<Semaphore>,
|
||||
) -> mpsc::Sender<()> {
|
||||
let (cancel_tx, cancel_rx) = mpsc::channel(1);
|
||||
debug!("Starting event stream with batching for target: {}", target.name());
|
||||
tokio::spawn(async move {
|
||||
stream_events_with_batching(&mut *store, &*target, cancel_rx, metrics, semaphore).await;
|
||||
info!("Event stream stopped for target: {}", target.name());
|
||||
});
|
||||
|
||||
cancel_tx
|
||||
}
|
||||
|
||||
/// Event stream processing with batch processing
|
||||
pub async fn stream_events_with_batching(
|
||||
store: &mut (dyn Store<Event, Error = StoreError, Key = Key> + Send),
|
||||
target: &dyn Target,
|
||||
mut cancel_rx: mpsc::Receiver<()>,
|
||||
metrics: Arc<NotificationMetrics>,
|
||||
semaphore: Arc<Semaphore>,
|
||||
) {
|
||||
info!("Starting event stream with batching for target: {}", target.name());
|
||||
|
||||
// Configuration parameters
|
||||
const DEFAULT_BATCH_SIZE: usize = 1;
|
||||
let batch_size = std::env::var("RUSTFS_EVENT_BATCH_SIZE")
|
||||
.ok()
|
||||
.and_then(|s| s.parse::<usize>().ok())
|
||||
.unwrap_or(DEFAULT_BATCH_SIZE);
|
||||
const BATCH_TIMEOUT: Duration = Duration::from_secs(5);
|
||||
const MAX_RETRIES: usize = 5;
|
||||
const BASE_RETRY_DELAY: Duration = Duration::from_secs(2);
|
||||
|
||||
let mut batch = Vec::with_capacity(batch_size);
|
||||
let mut batch_keys = Vec::with_capacity(batch_size);
|
||||
let mut last_flush = Instant::now();
|
||||
|
||||
loop {
|
||||
// Check the cancel signal
|
||||
if cancel_rx.try_recv().is_ok() {
|
||||
info!("Cancellation received for target: {}", target.name());
|
||||
return;
|
||||
}
|
||||
|
||||
// Get a list of events in storage
|
||||
let keys = store.list();
|
||||
debug!("Found {} keys in store for target: {}", keys.len(), target.name());
|
||||
if keys.is_empty() {
|
||||
// If there is data in the batch and timeout, refresh the batch
|
||||
if !batch.is_empty() && last_flush.elapsed() >= BATCH_TIMEOUT {
|
||||
process_batch(&mut batch, &mut batch_keys, target, MAX_RETRIES, BASE_RETRY_DELAY, &metrics, &semaphore).await;
|
||||
last_flush = Instant::now();
|
||||
}
|
||||
|
||||
// No event, wait before checking
|
||||
tokio::time::sleep(Duration::from_millis(500)).await;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Handle each event
|
||||
for key in keys {
|
||||
// Check the cancel signal again
|
||||
if cancel_rx.try_recv().is_ok() {
|
||||
info!("Cancellation received during processing for target: {}", target.name());
|
||||
|
||||
// Processing collected batches before exiting
|
||||
if !batch.is_empty() {
|
||||
process_batch(&mut batch, &mut batch_keys, target, MAX_RETRIES, BASE_RETRY_DELAY, &metrics, &semaphore).await;
|
||||
}
|
||||
return;
|
||||
}
|
||||
|
||||
// Try to get events from storage
|
||||
match store.get(&key) {
|
||||
Ok(event) => {
|
||||
// Add to batch
|
||||
batch.push(event);
|
||||
batch_keys.push(key);
|
||||
metrics.increment_processing();
|
||||
|
||||
// If the batch is full or enough time has passed since the last refresh, the batch will be processed
|
||||
if batch.len() >= batch_size || last_flush.elapsed() >= BATCH_TIMEOUT {
|
||||
process_batch(&mut batch, &mut batch_keys, target, MAX_RETRIES, BASE_RETRY_DELAY, &metrics, &semaphore)
|
||||
.await;
|
||||
last_flush = Instant::now();
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to target: {}, get event {} from store: {}", target.name(), key.to_string(), e);
|
||||
// Consider deleting unreadable events to prevent infinite loops from trying to read
|
||||
match store.del(&key) {
|
||||
Ok(_) => {
|
||||
info!("Deleted corrupted event {} from store", key.to_string());
|
||||
}
|
||||
Err(del_err) => {
|
||||
error!("Failed to delete corrupted event {}: {}", key.to_string(), del_err);
|
||||
}
|
||||
}
|
||||
|
||||
metrics.increment_failed();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// A small delay will be conducted to check the next round
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Processing event batches
|
||||
async fn process_batch(
|
||||
batch: &mut Vec<Event>,
|
||||
batch_keys: &mut Vec<Key>,
|
||||
target: &dyn Target,
|
||||
max_retries: usize,
|
||||
base_delay: Duration,
|
||||
metrics: &Arc<NotificationMetrics>,
|
||||
semaphore: &Arc<Semaphore>,
|
||||
) {
|
||||
debug!("Processing batch of {} events for target: {}", batch.len(), target.name());
|
||||
if batch.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
// Obtain semaphore permission to limit concurrency
|
||||
let permit = match semaphore.clone().acquire_owned().await {
|
||||
Ok(permit) => permit,
|
||||
Err(e) => {
|
||||
error!("Failed to acquire semaphore permit: {}", e);
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
// Handle every event in the batch
|
||||
for (_event, key) in batch.iter().zip(batch_keys.iter()) {
|
||||
let mut retry_count = 0;
|
||||
let mut success = false;
|
||||
|
||||
// Retry logic
|
||||
while retry_count < max_retries && !success {
|
||||
match target.send_from_store(key.clone()).await {
|
||||
Ok(_) => {
|
||||
info!("Successfully sent event for target: {}, Key: {}", target.name(), key.to_string());
|
||||
success = true;
|
||||
metrics.increment_processed();
|
||||
}
|
||||
Err(e) => {
|
||||
// Different retry strategies are adopted according to the error type
|
||||
match &e {
|
||||
TargetError::NotConnected => {
|
||||
warn!("Target {} not connected, retrying...", target.name());
|
||||
retry_count += 1;
|
||||
tokio::time::sleep(base_delay * (1 << retry_count)).await; // Exponential backoff
|
||||
}
|
||||
TargetError::Timeout(_) => {
|
||||
warn!("Timeout for target {}, retrying...", target.name());
|
||||
retry_count += 1;
|
||||
tokio::time::sleep(base_delay * (1 << retry_count)).await;
|
||||
}
|
||||
_ => {
|
||||
// Permanent error, skip this event
|
||||
error!("Permanent error for target {}: {}", target.name(), e);
|
||||
metrics.increment_failed();
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle the situation where the maximum number of retry exhaustion is exhausted
|
||||
if retry_count >= max_retries && !success {
|
||||
warn!("Max retries exceeded for event {}, target: {}, skipping", key.to_string(), target.name());
|
||||
metrics.increment_failed();
|
||||
}
|
||||
}
|
||||
|
||||
// Clear processed batches
|
||||
batch.clear();
|
||||
batch_keys.clear();
|
||||
|
||||
// Release semaphore permission (via drop)
|
||||
drop(permit);
|
||||
}
|
||||
97
crates/notify/src/target/mod.rs
Normal file
97
crates/notify/src/target/mod.rs
Normal file
@@ -0,0 +1,97 @@
|
||||
use crate::arn::TargetID;
|
||||
use crate::store::{Key, Store};
|
||||
use crate::{Event, StoreError, TargetError};
|
||||
use async_trait::async_trait;
|
||||
use std::sync::Arc;
|
||||
|
||||
pub mod mqtt;
|
||||
pub mod webhook;
|
||||
|
||||
/// Trait for notification targets
|
||||
#[async_trait]
|
||||
pub trait Target: Send + Sync + 'static {
|
||||
/// Returns the ID of the target
|
||||
fn id(&self) -> TargetID;
|
||||
|
||||
/// Returns the name of the target
|
||||
fn name(&self) -> String {
|
||||
self.id().to_string()
|
||||
}
|
||||
|
||||
/// Checks if the target is active and reachable
|
||||
async fn is_active(&self) -> Result<bool, TargetError>;
|
||||
|
||||
/// Saves an event (either sends it immediately or stores it for later)
|
||||
async fn save(&self, event: Arc<Event>) -> Result<(), TargetError>;
|
||||
|
||||
/// Sends an event from the store
|
||||
async fn send_from_store(&self, key: Key) -> Result<(), TargetError>;
|
||||
|
||||
/// Closes the target and releases resources
|
||||
async fn close(&self) -> Result<(), TargetError>;
|
||||
|
||||
/// Returns the store associated with the target (if any)
|
||||
fn store(&self) -> Option<&(dyn Store<Event, Error = StoreError, Key = Key> + Send + Sync)>;
|
||||
|
||||
/// Returns the type of the target
|
||||
fn clone_dyn(&self) -> Box<dyn Target + Send + Sync>;
|
||||
|
||||
/// Initialize the target, such as establishing a connection, etc.
|
||||
async fn init(&self) -> Result<(), TargetError> {
|
||||
// The default implementation is empty
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check if the target is enabled
|
||||
fn is_enabled(&self) -> bool;
|
||||
}
|
||||
|
||||
/// The `ChannelTargetType` enum represents the different types of channel Target
|
||||
/// used in the notification system.
|
||||
///
|
||||
/// It includes:
|
||||
/// - `Webhook`: Represents a webhook target for sending notifications via HTTP requests.
|
||||
/// - `Kafka`: Represents a Kafka target for sending notifications to a Kafka topic.
|
||||
/// - `Mqtt`: Represents an MQTT target for sending notifications via MQTT protocol.
|
||||
///
|
||||
/// Each variant has an associated string representation that can be used for serialization
|
||||
/// or logging purposes.
|
||||
/// The `as_str` method returns the string representation of the target type,
|
||||
/// and the `Display` implementation allows for easy formatting of the target type as a string.
|
||||
///
|
||||
/// example usage:
|
||||
/// ```rust
|
||||
/// use rustfs_notify::target::ChannelTargetType;
|
||||
///
|
||||
/// let target_type = ChannelTargetType::Webhook;
|
||||
/// assert_eq!(target_type.as_str(), "webhook");
|
||||
/// println!("Target type: {}", target_type);
|
||||
/// ```
|
||||
///
|
||||
/// example output:
|
||||
/// Target type: webhook
|
||||
pub enum ChannelTargetType {
|
||||
Webhook,
|
||||
Kafka,
|
||||
Mqtt,
|
||||
}
|
||||
|
||||
impl ChannelTargetType {
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
ChannelTargetType::Webhook => "webhook",
|
||||
ChannelTargetType::Kafka => "kafka",
|
||||
ChannelTargetType::Mqtt => "mqtt",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for ChannelTargetType {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
ChannelTargetType::Webhook => write!(f, "webhook"),
|
||||
ChannelTargetType::Kafka => write!(f, "kafka"),
|
||||
ChannelTargetType::Mqtt => write!(f, "mqtt"),
|
||||
}
|
||||
}
|
||||
}
|
||||
630
crates/notify/src/target/mqtt.rs
Normal file
630
crates/notify/src/target/mqtt.rs
Normal file
@@ -0,0 +1,630 @@
|
||||
use crate::store::{Key, STORE_EXTENSION};
|
||||
use crate::target::ChannelTargetType;
|
||||
use crate::{
|
||||
StoreError, Target,
|
||||
arn::TargetID,
|
||||
error::TargetError,
|
||||
event::{Event, EventLog},
|
||||
store::Store,
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use rumqttc::{AsyncClient, EventLoop, MqttOptions, Outgoing, Packet, QoS};
|
||||
use rumqttc::{ConnectionError, mqttbytes::Error as MqttBytesError};
|
||||
use std::sync::Arc;
|
||||
use std::{
|
||||
path::PathBuf,
|
||||
sync::atomic::{AtomicBool, Ordering},
|
||||
time::Duration,
|
||||
};
|
||||
use tokio::sync::{Mutex, OnceCell, mpsc};
|
||||
use tracing::{debug, error, info, instrument, trace, warn};
|
||||
use url::Url;
|
||||
use urlencoding;
|
||||
|
||||
const DEFAULT_CONNECTION_TIMEOUT: Duration = Duration::from_secs(15);
|
||||
const EVENT_LOOP_POLL_TIMEOUT: Duration = Duration::from_secs(10); // For initial connection check in task
|
||||
|
||||
/// Arguments for configuring an MQTT target
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct MQTTArgs {
|
||||
/// Whether the target is enabled
|
||||
pub enable: bool,
|
||||
/// The broker URL
|
||||
pub broker: Url,
|
||||
/// The topic to publish to
|
||||
pub topic: String,
|
||||
/// The quality of service level
|
||||
pub qos: QoS,
|
||||
/// The username for the broker
|
||||
pub username: String,
|
||||
/// The password for the broker
|
||||
pub password: String,
|
||||
/// The maximum interval for reconnection attempts (Note: rumqttc has internal strategy)
|
||||
pub max_reconnect_interval: Duration,
|
||||
/// The keep alive interval
|
||||
pub keep_alive: Duration,
|
||||
/// The directory to store events in case of failure
|
||||
pub queue_dir: String,
|
||||
/// The maximum number of events to store
|
||||
pub queue_limit: u64,
|
||||
}
|
||||
|
||||
impl MQTTArgs {
|
||||
pub fn validate(&self) -> Result<(), TargetError> {
|
||||
if !self.enable {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
match self.broker.scheme() {
|
||||
"ws" | "wss" | "tcp" | "ssl" | "tls" | "tcps" | "mqtt" | "mqtts" => {}
|
||||
_ => {
|
||||
return Err(TargetError::Configuration("unknown protocol in broker address".to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
if !self.queue_dir.is_empty() {
|
||||
let path = std::path::Path::new(&self.queue_dir);
|
||||
if !path.is_absolute() {
|
||||
return Err(TargetError::Configuration("mqtt queueDir path should be absolute".to_string()));
|
||||
}
|
||||
|
||||
if self.qos == QoS::AtMostOnce {
|
||||
return Err(TargetError::Configuration(
|
||||
"QoS should be AtLeastOnce (1) or ExactlyOnce (2) if queueDir is set".to_string(),
|
||||
));
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
struct BgTaskManager {
|
||||
init_cell: OnceCell<tokio::task::JoinHandle<()>>,
|
||||
cancel_tx: mpsc::Sender<()>,
|
||||
initial_cancel_rx: Mutex<Option<mpsc::Receiver<()>>>,
|
||||
}
|
||||
|
||||
/// A target that sends events to an MQTT broker
|
||||
pub struct MQTTTarget {
|
||||
id: TargetID,
|
||||
args: MQTTArgs,
|
||||
client: Arc<Mutex<Option<AsyncClient>>>,
|
||||
store: Option<Box<dyn Store<Event, Error = StoreError, Key = Key> + Send + Sync>>,
|
||||
connected: Arc<AtomicBool>,
|
||||
bg_task_manager: Arc<BgTaskManager>,
|
||||
}
|
||||
|
||||
impl MQTTTarget {
|
||||
/// Creates a new MQTTTarget
|
||||
#[instrument(skip(args), fields(target_id_as_string = %id))]
|
||||
pub fn new(id: String, args: MQTTArgs) -> Result<Self, TargetError> {
|
||||
args.validate()?;
|
||||
let target_id = TargetID::new(id.clone(), ChannelTargetType::Mqtt.as_str().to_string());
|
||||
let queue_store = if !args.queue_dir.is_empty() {
|
||||
let base_path = PathBuf::from(&args.queue_dir);
|
||||
let unique_dir_name =
|
||||
format!("rustfs-{}-{}-{}", ChannelTargetType::Mqtt.as_str(), target_id.name, target_id.id).replace(":", "_");
|
||||
// Ensure the directory name is valid for filesystem
|
||||
let specific_queue_path = base_path.join(unique_dir_name);
|
||||
debug!(target_id = %target_id, path = %specific_queue_path.display(), "Initializing queue store for MQTT target");
|
||||
let store = crate::store::QueueStore::<Event>::new(specific_queue_path, args.queue_limit, STORE_EXTENSION);
|
||||
if let Err(e) = store.open() {
|
||||
error!(
|
||||
target_id = %target_id,
|
||||
error = %e,
|
||||
"Failed to open store for MQTT target"
|
||||
);
|
||||
return Err(TargetError::Storage(format!("{}", e)));
|
||||
}
|
||||
Some(Box::new(store) as Box<dyn Store<Event, Error = StoreError, Key = Key> + Send + Sync>)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let (cancel_tx, cancel_rx) = mpsc::channel(1);
|
||||
let bg_task_manager = Arc::new(BgTaskManager {
|
||||
init_cell: OnceCell::new(),
|
||||
cancel_tx,
|
||||
initial_cancel_rx: Mutex::new(Some(cancel_rx)),
|
||||
});
|
||||
|
||||
info!(target_id = %target_id, "MQTT target created");
|
||||
Ok(MQTTTarget {
|
||||
id: target_id,
|
||||
args,
|
||||
client: Arc::new(Mutex::new(None)),
|
||||
store: queue_store,
|
||||
connected: Arc::new(AtomicBool::new(false)),
|
||||
bg_task_manager,
|
||||
})
|
||||
}
|
||||
|
||||
#[instrument(skip(self), fields(target_id = %self.id))]
|
||||
async fn init(&self) -> Result<(), TargetError> {
|
||||
if self.connected.load(Ordering::SeqCst) {
|
||||
debug!(target_id = %self.id, "Already connected.");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let bg_task_manager = Arc::clone(&self.bg_task_manager);
|
||||
let client_arc = Arc::clone(&self.client);
|
||||
let connected_arc = Arc::clone(&self.connected);
|
||||
let target_id_clone = self.id.clone();
|
||||
let args_clone = self.args.clone();
|
||||
|
||||
let _ = bg_task_manager
|
||||
.init_cell
|
||||
.get_or_try_init(|| async {
|
||||
debug!(target_id = %target_id_clone, "Initializing MQTT background task.");
|
||||
let host = args_clone.broker.host_str().unwrap_or("localhost");
|
||||
let port = args_clone.broker.port().unwrap_or(1883);
|
||||
let mut mqtt_options = MqttOptions::new(format!("rustfs_notify_{}", uuid::Uuid::new_v4()), host, port);
|
||||
mqtt_options
|
||||
.set_keep_alive(args_clone.keep_alive)
|
||||
.set_max_packet_size(100 * 1024 * 1024, 100 * 1024 * 1024); // 100MB
|
||||
|
||||
if !args_clone.username.is_empty() {
|
||||
mqtt_options.set_credentials(args_clone.username.clone(), args_clone.password.clone());
|
||||
}
|
||||
|
||||
let (new_client, eventloop) = AsyncClient::new(mqtt_options, 10);
|
||||
|
||||
if let Err(e) = new_client.subscribe(&args_clone.topic, args_clone.qos).await {
|
||||
error!(target_id = %target_id_clone, error = %e, "Failed to subscribe to MQTT topic during init");
|
||||
return Err(TargetError::Network(format!("MQTT subscribe failed: {}", e)));
|
||||
}
|
||||
|
||||
let mut rx_guard = bg_task_manager.initial_cancel_rx.lock().await;
|
||||
let cancel_rx = rx_guard.take().ok_or_else(|| {
|
||||
error!(target_id = %target_id_clone, "MQTT cancel receiver already taken for task.");
|
||||
TargetError::Configuration("MQTT cancel receiver already taken for task".to_string())
|
||||
})?;
|
||||
drop(rx_guard);
|
||||
|
||||
*client_arc.lock().await = Some(new_client.clone());
|
||||
|
||||
info!(target_id = %target_id_clone, "Spawning MQTT event loop task.");
|
||||
let task_handle =
|
||||
tokio::spawn(run_mqtt_event_loop(eventloop, connected_arc.clone(), target_id_clone.clone(), cancel_rx));
|
||||
Ok(task_handle)
|
||||
})
|
||||
.await
|
||||
.map_err(|e: TargetError| {
|
||||
error!(target_id = %self.id, error = %e, "Failed to initialize MQTT background task");
|
||||
e
|
||||
})?;
|
||||
debug!(target_id = %self.id, "MQTT background task initialized successfully.");
|
||||
|
||||
match tokio::time::timeout(DEFAULT_CONNECTION_TIMEOUT, async {
|
||||
while !self.connected.load(Ordering::SeqCst) {
|
||||
if let Some(handle) = self.bg_task_manager.init_cell.get() {
|
||||
if handle.is_finished() && !self.connected.load(Ordering::SeqCst) {
|
||||
error!(target_id = %self.id, "MQTT background task exited prematurely before connection was established.");
|
||||
return Err(TargetError::Network("MQTT background task exited prematurely".to_string()));
|
||||
}
|
||||
}
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
}
|
||||
debug!(target_id = %self.id, "MQTT target connected successfully.");
|
||||
Ok(())
|
||||
}).await {
|
||||
Ok(Ok(_)) => {
|
||||
info!(target_id = %self.id, "MQTT target initialized and connected.");
|
||||
Ok(())
|
||||
}
|
||||
Ok(Err(e)) => Err(e),
|
||||
Err(_) => {
|
||||
error!(target_id = %self.id, "Timeout waiting for MQTT connection after task spawn.");
|
||||
Err(TargetError::Network(
|
||||
"Timeout waiting for MQTT connection".to_string(),
|
||||
))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip(self, event), fields(target_id = %self.id))]
|
||||
async fn send(&self, event: &Event) -> Result<(), TargetError> {
|
||||
let client_guard = self.client.lock().await;
|
||||
let client = client_guard
|
||||
.as_ref()
|
||||
.ok_or_else(|| TargetError::Configuration("MQTT client not initialized".to_string()))?;
|
||||
|
||||
let object_name = urlencoding::decode(&event.s3.object.key)
|
||||
.map_err(|e| TargetError::Encoding(format!("Failed to decode object key: {}", e)))?;
|
||||
|
||||
let key = format!("{}/{}", event.s3.bucket.name, object_name);
|
||||
|
||||
let log = EventLog {
|
||||
event_name: event.event_name,
|
||||
key,
|
||||
records: vec![event.clone()],
|
||||
};
|
||||
|
||||
let data =
|
||||
serde_json::to_vec(&log).map_err(|e| TargetError::Serialization(format!("Failed to serialize event: {}", e)))?;
|
||||
|
||||
// Vec<u8> Convert to String, only for printing logs
|
||||
let data_string = String::from_utf8(data.clone())
|
||||
.map_err(|e| TargetError::Encoding(format!("Failed to convert event data to UTF-8: {}", e)))?;
|
||||
debug!("Sending event to mqtt target: {}, event log: {}", self.id, data_string);
|
||||
|
||||
client
|
||||
.publish(&self.args.topic, self.args.qos, false, data)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
if e.to_string().contains("Connection") || e.to_string().contains("Timeout") {
|
||||
self.connected.store(false, Ordering::SeqCst);
|
||||
warn!(target_id = %self.id, error = %e, "Publish failed due to connection issue, marking as not connected.");
|
||||
TargetError::NotConnected
|
||||
} else {
|
||||
TargetError::Request(format!("Failed to publish message: {}", e))
|
||||
}
|
||||
})?;
|
||||
|
||||
debug!(target_id = %self.id, topic = %self.args.topic, "Event published to MQTT topic");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn clone_target(&self) -> Box<dyn Target + Send + Sync> {
|
||||
Box::new(MQTTTarget {
|
||||
id: self.id.clone(),
|
||||
args: self.args.clone(),
|
||||
client: self.client.clone(),
|
||||
store: self.store.as_ref().map(|s| s.boxed_clone()),
|
||||
connected: self.connected.clone(),
|
||||
bg_task_manager: self.bg_task_manager.clone(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
async fn run_mqtt_event_loop(
|
||||
mut eventloop: EventLoop,
|
||||
connected_status: Arc<AtomicBool>,
|
||||
target_id: TargetID,
|
||||
mut cancel_rx: mpsc::Receiver<()>,
|
||||
) {
|
||||
info!(target_id = %target_id, "MQTT event loop task started.");
|
||||
let mut initial_connection_established = false;
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
biased;
|
||||
_ = cancel_rx.recv() => {
|
||||
info!(target_id = %target_id, "MQTT event loop task received cancellation signal. Shutting down.");
|
||||
break;
|
||||
}
|
||||
polled_event_result = async {
|
||||
if !initial_connection_established || !connected_status.load(Ordering::SeqCst) {
|
||||
match tokio::time::timeout(EVENT_LOOP_POLL_TIMEOUT, eventloop.poll()).await {
|
||||
Ok(Ok(event)) => Ok(event),
|
||||
Ok(Err(e)) => Err(e),
|
||||
Err(_) => {
|
||||
debug!(target_id = %target_id, "MQTT poll timed out (EVENT_LOOP_POLL_TIMEOUT) while not connected or status pending.");
|
||||
Err(rumqttc::ConnectionError::NetworkTimeout)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
eventloop.poll().await
|
||||
}
|
||||
} => {
|
||||
match polled_event_result {
|
||||
Ok(notification) => {
|
||||
trace!(target_id = %target_id, event = ?notification, "Received MQTT event");
|
||||
match notification {
|
||||
rumqttc::Event::Incoming(Packet::ConnAck(_conn_ack)) => {
|
||||
info!(target_id = %target_id, "MQTT connected (ConnAck).");
|
||||
connected_status.store(true, Ordering::SeqCst);
|
||||
initial_connection_established = true;
|
||||
}
|
||||
rumqttc::Event::Incoming(Packet::Publish(publish)) => {
|
||||
debug!(target_id = %target_id, topic = %publish.topic, payload_len = publish.payload.len(), "Received message on subscribed topic.");
|
||||
}
|
||||
rumqttc::Event::Incoming(Packet::Disconnect) => {
|
||||
info!(target_id = %target_id, "Received Disconnect packet from broker. MQTT connection lost.");
|
||||
connected_status.store(false, Ordering::SeqCst);
|
||||
}
|
||||
rumqttc::Event::Incoming(Packet::PingResp) => {
|
||||
trace!(target_id = %target_id, "Received PingResp from broker. Connection is alive.");
|
||||
}
|
||||
rumqttc::Event::Incoming(Packet::SubAck(suback)) => {
|
||||
trace!(target_id = %target_id, "Received SubAck for pkid: {}", suback.pkid);
|
||||
}
|
||||
rumqttc::Event::Incoming(Packet::PubAck(puback)) => {
|
||||
trace!(target_id = %target_id, "Received PubAck for pkid: {}", puback.pkid);
|
||||
}
|
||||
// Process other incoming packet types as needed (PubRec, PubRel, PubComp, UnsubAck)
|
||||
rumqttc::Event::Outgoing(Outgoing::Disconnect) => {
|
||||
info!(target_id = %target_id, "MQTT outgoing disconnect initiated by client.");
|
||||
connected_status.store(false, Ordering::SeqCst);
|
||||
}
|
||||
rumqttc::Event::Outgoing(Outgoing::PingReq) => {
|
||||
trace!(target_id = %target_id, "Client sent PingReq to broker.");
|
||||
}
|
||||
// Other Outgoing events (Subscribe, Unsubscribe, Publish) usually do not need to handle connection status here,
|
||||
// Because they are actions initiated by the client.
|
||||
_ => {
|
||||
// Log other unspecified MQTT events that are not handled, which helps debug
|
||||
trace!(target_id = %target_id, "Unhandled or generic MQTT event: {:?}", notification);
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
connected_status.store(false, Ordering::SeqCst);
|
||||
error!(target_id = %target_id, error = %e, "Error from MQTT event loop poll");
|
||||
|
||||
if matches!(e, rumqttc::ConnectionError::NetworkTimeout) && (!initial_connection_established || !connected_status.load(Ordering::SeqCst)) {
|
||||
warn!(target_id = %target_id, "Timeout during initial poll or pending state, will retry.");
|
||||
continue;
|
||||
}
|
||||
|
||||
if matches!(e,
|
||||
ConnectionError::Io(_) |
|
||||
ConnectionError::NetworkTimeout |
|
||||
ConnectionError::ConnectionRefused(_) |
|
||||
ConnectionError::Tls(_)
|
||||
) {
|
||||
warn!(target_id = %target_id, error = %e, "MQTT connection error. Relying on rumqttc for reconnection if applicable.");
|
||||
}
|
||||
// Here you can decide whether to break loops based on the error type.
|
||||
// For example, for some unrecoverable errors.
|
||||
if is_fatal_mqtt_error(&e) {
|
||||
error!(target_id = %target_id, error = %e, "Fatal MQTT error, terminating event loop.");
|
||||
break;
|
||||
}
|
||||
// rumqttc's eventloop.poll() may return Err and terminate after some errors,
|
||||
// Or it will handle reconnection internally. The continue here will make select! wait again.
|
||||
// If the error is temporary and rumqttc is handling reconnection, poll() should eventually succeed or return a different error again.
|
||||
// Sleep briefly to avoid busy cycles in case of rapid failure.
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
connected_status.store(false, Ordering::SeqCst);
|
||||
info!(target_id = %target_id, "MQTT event loop task finished.");
|
||||
}
|
||||
|
||||
/// Check whether the given MQTT connection error should be considered a fatal error,
|
||||
/// For fatal errors, the event loop should terminate.
|
||||
fn is_fatal_mqtt_error(err: &ConnectionError) -> bool {
|
||||
match err {
|
||||
// If the client request has been processed all (for example, AsyncClient is dropped), the event loop can end.
|
||||
ConnectionError::RequestsDone => true,
|
||||
|
||||
// Check for the underlying MQTT status error
|
||||
ConnectionError::MqttState(state_err) => {
|
||||
// The type of state_err is &rumqttc::StateError
|
||||
match state_err {
|
||||
// If StateError is caused by deserialization issues, check the underlying MqttBytesError
|
||||
rumqttc::StateError::Deserialization(mqtt_bytes_err) => { // The type of mqtt_bytes_err is &rumqttc::mqttbytes::Error
|
||||
matches!(
|
||||
mqtt_bytes_err,
|
||||
MqttBytesError::InvalidProtocol // Invalid agreement
|
||||
| MqttBytesError::InvalidProtocolLevel(_) // Invalid protocol level
|
||||
| MqttBytesError::IncorrectPacketFormat // Package format is incorrect
|
||||
| MqttBytesError::InvalidPacketType(_) // Invalid package type
|
||||
| MqttBytesError::MalformedPacket // Package format error
|
||||
| MqttBytesError::PayloadTooLong // Too long load
|
||||
| MqttBytesError::PayloadSizeLimitExceeded(_) // Load size limit exceeded
|
||||
| MqttBytesError::TopicNotUtf8 // Topic Non-UTF-8 (Serious Agreement Violation)
|
||||
)
|
||||
}
|
||||
// Others that are fatal StateError variants
|
||||
rumqttc::StateError::InvalidState // The internal state machine is in invalid state
|
||||
| rumqttc::StateError::WrongPacket // Agreement Violation: Unexpected Data Packet Received
|
||||
| rumqttc::StateError::Unsolicited(_) // Agreement Violation: Unsolicited ACK Received
|
||||
| rumqttc::StateError::OutgoingPacketTooLarge { .. } // Try to send too large packets
|
||||
| rumqttc::StateError::EmptySubscription // Agreement violation (if this stage occurs)
|
||||
=> true,
|
||||
|
||||
// Other StateErrors (such as Io, AwaitPingResp, CollisionTimeout) are not considered deadly here.
|
||||
// They may be processed internally by rumqttc or upgraded to other ConnectionError types.
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
// Other types of ConnectionErrors (such as Io, Tls, NetworkTimeout, ConnectionRefused, NotConnAck, etc.)
|
||||
// It is usually considered temporary, or the reconnect logic inside rumqttc will be processed.
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Target for MQTTTarget {
|
||||
fn id(&self) -> TargetID {
|
||||
self.id.clone()
|
||||
}
|
||||
|
||||
#[instrument(skip(self), fields(target_id = %self.id))]
|
||||
async fn is_active(&self) -> Result<bool, TargetError> {
|
||||
debug!(target_id = %self.id, "Checking if MQTT target is active.");
|
||||
if self.client.lock().await.is_none() && !self.connected.load(Ordering::SeqCst) {
|
||||
// Check if the background task is running and has not panicked
|
||||
if let Some(handle) = self.bg_task_manager.init_cell.get() {
|
||||
if handle.is_finished() {
|
||||
error!(target_id = %self.id, "MQTT background task has finished, possibly due to an error. Target is not active.");
|
||||
return Err(TargetError::Network("MQTT background task terminated".to_string()));
|
||||
}
|
||||
}
|
||||
debug!(target_id = %self.id, "MQTT client not yet initialized or task not running/connected.");
|
||||
return Err(TargetError::Configuration(
|
||||
"MQTT client not available or not initialized/connected".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if self.connected.load(Ordering::SeqCst) {
|
||||
debug!(target_id = %self.id, "MQTT target is active (connected flag is true).");
|
||||
Ok(true)
|
||||
} else {
|
||||
debug!(target_id = %self.id, "MQTT target is not connected (connected flag is false).");
|
||||
Err(TargetError::NotConnected)
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip(self, event), fields(target_id = %self.id))]
|
||||
async fn save(&self, event: Arc<Event>) -> Result<(), TargetError> {
|
||||
if let Some(store) = &self.store {
|
||||
debug!(target_id = %self.id, "Event saved to store start");
|
||||
// If store is configured, ONLY put the event into the store.
|
||||
// Do NOT send it directly here.
|
||||
match store.put(event.clone()) {
|
||||
Ok(_) => {
|
||||
debug!(target_id = %self.id, "Event saved to store for MQTT target successfully.");
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
error!(target_id = %self.id, error = %e, "Failed to save event to store");
|
||||
return Err(TargetError::Storage(format!("Failed to save event to store: {}", e)));
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if !self.is_enabled() {
|
||||
return Err(TargetError::Disabled);
|
||||
}
|
||||
|
||||
if !self.connected.load(Ordering::SeqCst) {
|
||||
warn!(target_id = %self.id, "Attempting to send directly but not connected; trying to init.");
|
||||
// Call the struct's init method, not the trait's default
|
||||
match MQTTTarget::init(self).await {
|
||||
Ok(_) => debug!(target_id = %self.id, "MQTT target initialized successfully."),
|
||||
Err(e) => {
|
||||
error!(target_id = %self.id, error = %e, "Failed to initialize MQTT target.");
|
||||
return Err(TargetError::NotConnected);
|
||||
}
|
||||
}
|
||||
if !self.connected.load(Ordering::SeqCst) {
|
||||
error!(target_id = %self.id, "Cannot save (send directly) as target is not active after init attempt.");
|
||||
return Err(TargetError::NotConnected);
|
||||
}
|
||||
}
|
||||
self.send(&event).await
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument(skip(self), fields(target_id = %self.id))]
|
||||
async fn send_from_store(&self, key: Key) -> Result<(), TargetError> {
|
||||
debug!(target_id = %self.id, ?key, "Attempting to send event from store with key.");
|
||||
|
||||
if !self.is_enabled() {
|
||||
return Err(TargetError::Disabled);
|
||||
}
|
||||
|
||||
if !self.connected.load(Ordering::SeqCst) {
|
||||
warn!(target_id = %self.id, "Not connected; trying to init before sending from store.");
|
||||
match MQTTTarget::init(self).await {
|
||||
Ok(_) => debug!(target_id = %self.id, "MQTT target initialized successfully."),
|
||||
Err(e) => {
|
||||
error!(target_id = %self.id, error = %e, "Failed to initialize MQTT target.");
|
||||
return Err(TargetError::NotConnected);
|
||||
}
|
||||
}
|
||||
if !self.connected.load(Ordering::SeqCst) {
|
||||
error!(target_id = %self.id, "Cannot send from store as target is not active after init attempt.");
|
||||
return Err(TargetError::NotConnected);
|
||||
}
|
||||
}
|
||||
|
||||
let store = self
|
||||
.store
|
||||
.as_ref()
|
||||
.ok_or_else(|| TargetError::Configuration("No store configured".to_string()))?;
|
||||
|
||||
let event = match store.get(&key) {
|
||||
Ok(event) => {
|
||||
debug!(target_id = %self.id, ?key, "Retrieved event from store for sending.");
|
||||
event
|
||||
}
|
||||
Err(StoreError::NotFound) => {
|
||||
// Assuming NotFound takes the key
|
||||
debug!(target_id = %self.id, ?key, "Event not found in store for sending.");
|
||||
return Ok(());
|
||||
}
|
||||
Err(e) => {
|
||||
error!(
|
||||
target_id = %self.id,
|
||||
error = %e,
|
||||
"Failed to get event from store"
|
||||
);
|
||||
return Err(TargetError::Storage(format!("Failed to get event from store: {}", e)));
|
||||
}
|
||||
};
|
||||
|
||||
debug!(target_id = %self.id, ?key, "Sending event from store.");
|
||||
if let Err(e) = self.send(&event).await {
|
||||
if matches!(e, TargetError::NotConnected) {
|
||||
warn!(target_id = %self.id, "Failed to send event from store: Not connected. Event remains in store.");
|
||||
return Err(TargetError::NotConnected);
|
||||
}
|
||||
error!(target_id = %self.id, error = %e, "Failed to send event from store with an unexpected error.");
|
||||
return Err(e);
|
||||
}
|
||||
debug!(target_id = %self.id, ?key, "Event sent from store successfully. deleting from store. ");
|
||||
|
||||
match store.del(&key) {
|
||||
Ok(_) => {
|
||||
debug!(target_id = %self.id, ?key, "Event deleted from store after successful send.")
|
||||
}
|
||||
Err(StoreError::NotFound) => {
|
||||
debug!(target_id = %self.id, ?key, "Event already deleted from store.");
|
||||
}
|
||||
Err(e) => {
|
||||
error!(target_id = %self.id, error = %e, "Failed to delete event from store after send.");
|
||||
return Err(TargetError::Storage(format!("Failed to delete event from store: {}", e)));
|
||||
}
|
||||
}
|
||||
|
||||
debug!(target_id = %self.id, ?key, "Event deleted from store.");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn close(&self) -> Result<(), TargetError> {
|
||||
info!(target_id = %self.id, "Attempting to close MQTT target.");
|
||||
|
||||
if let Err(e) = self.bg_task_manager.cancel_tx.send(()).await {
|
||||
warn!(target_id = %self.id, error = %e, "Failed to send cancel signal to MQTT background task. It might have already exited.");
|
||||
}
|
||||
|
||||
// Wait for the task to finish if it was initialized
|
||||
if let Some(_task_handle) = self.bg_task_manager.init_cell.get() {
|
||||
debug!(target_id = %self.id, "Waiting for MQTT background task to complete...");
|
||||
// It's tricky to await here if close is called from a sync context or Drop
|
||||
// For async close, this is fine. Consider a timeout.
|
||||
// let _ = tokio::time::timeout(Duration::from_secs(5), task_handle.await).await;
|
||||
// If task_handle.await is directly used, ensure it's not awaited multiple times if close can be called multiple times.
|
||||
// For now, we rely on the signal and the task's self-termination.
|
||||
}
|
||||
|
||||
if let Some(client_instance) = self.client.lock().await.take() {
|
||||
info!(target_id = %self.id, "Disconnecting MQTT client.");
|
||||
if let Err(e) = client_instance.disconnect().await {
|
||||
warn!(target_id = %self.id, error = %e, "Error during MQTT client disconnect.");
|
||||
}
|
||||
}
|
||||
|
||||
self.connected.store(false, Ordering::SeqCst);
|
||||
info!(target_id = %self.id, "MQTT target close method finished.");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn store(&self) -> Option<&(dyn Store<Event, Error = StoreError, Key = Key> + Send + Sync)> {
|
||||
self.store.as_deref()
|
||||
}
|
||||
|
||||
fn clone_dyn(&self) -> Box<dyn Target + Send + Sync> {
|
||||
self.clone_target()
|
||||
}
|
||||
|
||||
async fn init(&self) -> Result<(), TargetError> {
|
||||
if !self.is_enabled() {
|
||||
debug!(target_id = %self.id, "Target is disabled, skipping init.");
|
||||
return Ok(());
|
||||
}
|
||||
// Call the internal init logic
|
||||
MQTTTarget::init(self).await
|
||||
}
|
||||
|
||||
fn is_enabled(&self) -> bool {
|
||||
self.args.enable
|
||||
}
|
||||
}
|
||||
398
crates/notify/src/target/webhook.rs
Normal file
398
crates/notify/src/target/webhook.rs
Normal file
@@ -0,0 +1,398 @@
|
||||
use crate::store::STORE_EXTENSION;
|
||||
use crate::target::ChannelTargetType;
|
||||
use crate::{
|
||||
StoreError, Target,
|
||||
arn::TargetID,
|
||||
error::TargetError,
|
||||
event::{Event, EventLog},
|
||||
store::{Key, Store},
|
||||
};
|
||||
use async_trait::async_trait;
|
||||
use reqwest::{Client, StatusCode, Url};
|
||||
use std::{
|
||||
path::PathBuf,
|
||||
sync::{
|
||||
Arc,
|
||||
atomic::{AtomicBool, Ordering},
|
||||
},
|
||||
time::Duration,
|
||||
};
|
||||
use tokio::net::lookup_host;
|
||||
use tokio::sync::mpsc;
|
||||
use tracing::{debug, error, info, instrument};
|
||||
use urlencoding;
|
||||
|
||||
/// Arguments for configuring a Webhook target
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct WebhookArgs {
|
||||
/// Whether the target is enabled
|
||||
pub enable: bool,
|
||||
/// The endpoint URL to send events to
|
||||
pub endpoint: Url,
|
||||
/// The authorization token for the endpoint
|
||||
pub auth_token: String,
|
||||
/// The directory to store events in case of failure
|
||||
pub queue_dir: String,
|
||||
/// The maximum number of events to store
|
||||
pub queue_limit: u64,
|
||||
/// The client certificate for TLS (PEM format)
|
||||
pub client_cert: String,
|
||||
/// The client key for TLS (PEM format)
|
||||
pub client_key: String,
|
||||
}
|
||||
|
||||
// WebhookArgs 的验证方法
|
||||
impl WebhookArgs {
|
||||
pub fn validate(&self) -> Result<(), TargetError> {
|
||||
if !self.enable {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if self.endpoint.as_str().is_empty() {
|
||||
return Err(TargetError::Configuration("endpoint empty".to_string()));
|
||||
}
|
||||
|
||||
if !self.queue_dir.is_empty() {
|
||||
let path = std::path::Path::new(&self.queue_dir);
|
||||
if !path.is_absolute() {
|
||||
return Err(TargetError::Configuration("webhook queueDir path should be absolute".to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
if !self.client_cert.is_empty() && self.client_key.is_empty()
|
||||
|| self.client_cert.is_empty() && !self.client_key.is_empty()
|
||||
{
|
||||
return Err(TargetError::Configuration("cert and key must be specified as a pair".to_string()));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// A target that sends events to a webhook
|
||||
pub struct WebhookTarget {
|
||||
id: TargetID,
|
||||
args: WebhookArgs,
|
||||
http_client: Arc<Client>,
|
||||
// Add Send + Sync constraints to ensure thread safety
|
||||
store: Option<Box<dyn Store<Event, Error = StoreError, Key = Key> + Send + Sync>>,
|
||||
initialized: AtomicBool,
|
||||
addr: String,
|
||||
cancel_sender: mpsc::Sender<()>,
|
||||
}
|
||||
|
||||
impl WebhookTarget {
|
||||
/// Clones the WebhookTarget, creating a new instance with the same configuration
|
||||
pub fn clone_box(&self) -> Box<dyn Target + Send + Sync> {
|
||||
Box::new(WebhookTarget {
|
||||
id: self.id.clone(),
|
||||
args: self.args.clone(),
|
||||
http_client: Arc::clone(&self.http_client),
|
||||
store: self.store.as_ref().map(|s| s.boxed_clone()),
|
||||
initialized: AtomicBool::new(self.initialized.load(Ordering::SeqCst)),
|
||||
addr: self.addr.clone(),
|
||||
cancel_sender: self.cancel_sender.clone(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Creates a new WebhookTarget
|
||||
#[instrument(skip(args), fields(target_id = %id))]
|
||||
pub fn new(id: String, args: WebhookArgs) -> Result<Self, TargetError> {
|
||||
// First verify the parameters
|
||||
args.validate()?;
|
||||
// Create a TargetID
|
||||
let target_id = TargetID::new(id, ChannelTargetType::Webhook.as_str().to_string());
|
||||
// Build HTTP client
|
||||
let mut client_builder = Client::builder()
|
||||
.timeout(Duration::from_secs(30))
|
||||
.user_agent(rustfs_utils::sys::get_user_agent(rustfs_utils::sys::ServiceType::Basis));
|
||||
|
||||
// Supplementary certificate processing logic
|
||||
if !args.client_cert.is_empty() && !args.client_key.is_empty() {
|
||||
// Add client certificate
|
||||
let cert = std::fs::read(&args.client_cert)
|
||||
.map_err(|e| TargetError::Configuration(format!("Failed to read client cert: {}", e)))?;
|
||||
let key = std::fs::read(&args.client_key)
|
||||
.map_err(|e| TargetError::Configuration(format!("Failed to read client key: {}", e)))?;
|
||||
|
||||
let identity = reqwest::Identity::from_pem(&[cert, key].concat())
|
||||
.map_err(|e| TargetError::Configuration(format!("Failed to create identity: {}", e)))?;
|
||||
client_builder = client_builder.identity(identity);
|
||||
}
|
||||
|
||||
let http_client = Arc::new(
|
||||
client_builder
|
||||
.build()
|
||||
.map_err(|e| TargetError::Configuration(format!("Failed to build HTTP client: {}", e)))?,
|
||||
);
|
||||
|
||||
// Build storage
|
||||
let queue_store = if !args.queue_dir.is_empty() {
|
||||
let queue_dir = PathBuf::from(&args.queue_dir).join(format!(
|
||||
"rustfs-{}-{}-{}",
|
||||
ChannelTargetType::Webhook.as_str(),
|
||||
target_id.name,
|
||||
target_id.id
|
||||
));
|
||||
let store = super::super::store::QueueStore::<Event>::new(queue_dir, args.queue_limit, STORE_EXTENSION);
|
||||
|
||||
if let Err(e) = store.open() {
|
||||
error!("Failed to open store for Webhook target {}: {}", target_id.id, e);
|
||||
return Err(TargetError::Storage(format!("{}", e)));
|
||||
}
|
||||
|
||||
// Make sure that the Store trait implemented by QueueStore matches the expected error type
|
||||
Some(Box::new(store) as Box<dyn Store<Event, Error = StoreError, Key = Key> + Send + Sync>)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// resolved address
|
||||
let addr = {
|
||||
let host = args.endpoint.host_str().unwrap_or("localhost");
|
||||
let port = args
|
||||
.endpoint
|
||||
.port()
|
||||
.unwrap_or_else(|| if args.endpoint.scheme() == "https" { 443 } else { 80 });
|
||||
format!("{}:{}", host, port)
|
||||
};
|
||||
|
||||
// Create a cancel channel
|
||||
let (cancel_sender, _) = mpsc::channel(1);
|
||||
info!(target_id = %target_id.id, "Webhook target created");
|
||||
Ok(WebhookTarget {
|
||||
id: target_id,
|
||||
args,
|
||||
http_client,
|
||||
store: queue_store,
|
||||
initialized: AtomicBool::new(false),
|
||||
addr,
|
||||
cancel_sender,
|
||||
})
|
||||
}
|
||||
|
||||
async fn init(&self) -> Result<(), TargetError> {
|
||||
// 使用 CAS 操作确保线程安全初始化
|
||||
if !self.initialized.load(Ordering::SeqCst) {
|
||||
// 检查连接
|
||||
match self.is_active().await {
|
||||
Ok(true) => {
|
||||
info!("Webhook target {} is active", self.id);
|
||||
}
|
||||
Ok(false) => {
|
||||
return Err(TargetError::NotConnected);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to check if Webhook target {} is active: {}", self.id, e);
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
self.initialized.store(true, Ordering::SeqCst);
|
||||
info!("Webhook target {} initialized", self.id);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn send(&self, event: &Event) -> Result<(), TargetError> {
|
||||
info!("Webhook Sending event to webhook target: {}", self.id);
|
||||
let object_name = urlencoding::decode(&event.s3.object.key)
|
||||
.map_err(|e| TargetError::Encoding(format!("Failed to decode object key: {}", e)))?;
|
||||
|
||||
let key = format!("{}/{}", event.s3.bucket.name, object_name);
|
||||
|
||||
let log = EventLog {
|
||||
event_name: event.event_name,
|
||||
key,
|
||||
records: vec![event.clone()],
|
||||
};
|
||||
|
||||
let data =
|
||||
serde_json::to_vec(&log).map_err(|e| TargetError::Serialization(format!("Failed to serialize event: {}", e)))?;
|
||||
|
||||
// Vec<u8> 转换为 String
|
||||
let data_string = String::from_utf8(data.clone())
|
||||
.map_err(|e| TargetError::Encoding(format!("Failed to convert event data to UTF-8: {}", e)))?;
|
||||
debug!("Sending event to webhook target: {}, event log: {}", self.id, data_string);
|
||||
|
||||
// 构建请求
|
||||
let mut req_builder = self
|
||||
.http_client
|
||||
.post(self.args.endpoint.as_str())
|
||||
.header("Content-Type", "application/json");
|
||||
|
||||
if !self.args.auth_token.is_empty() {
|
||||
// Split auth_token string to check if the authentication type is included
|
||||
let tokens: Vec<&str> = self.args.auth_token.split_whitespace().collect();
|
||||
match tokens.len() {
|
||||
2 => {
|
||||
// Already include authentication type and token, such as "Bearer token123"
|
||||
req_builder = req_builder.header("Authorization", &self.args.auth_token);
|
||||
}
|
||||
1 => {
|
||||
// Only tokens, need to add "Bearer" prefix
|
||||
req_builder = req_builder.header("Authorization", format!("Bearer {}", self.args.auth_token));
|
||||
}
|
||||
_ => {
|
||||
// Empty string or other situations, no authentication header is added
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Send a request
|
||||
let resp = req_builder.body(data).send().await.map_err(|e| {
|
||||
if e.is_timeout() || e.is_connect() {
|
||||
TargetError::NotConnected
|
||||
} else {
|
||||
TargetError::Request(format!("Failed to send request: {}", e))
|
||||
}
|
||||
})?;
|
||||
|
||||
let status = resp.status();
|
||||
if status.is_success() {
|
||||
debug!("Event sent to webhook target: {}", self.id);
|
||||
Ok(())
|
||||
} else if status == StatusCode::FORBIDDEN {
|
||||
Err(TargetError::Authentication(format!(
|
||||
"{} returned '{}', please check if your auth token is correctly set",
|
||||
self.args.endpoint, status
|
||||
)))
|
||||
} else {
|
||||
Err(TargetError::Request(format!(
|
||||
"{} returned '{}', please check your endpoint configuration",
|
||||
self.args.endpoint, status
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Target for WebhookTarget {
|
||||
fn id(&self) -> TargetID {
|
||||
self.id.clone()
|
||||
}
|
||||
|
||||
// Make sure Future is Send
|
||||
async fn is_active(&self) -> Result<bool, TargetError> {
|
||||
let socket_addr = lookup_host(&self.addr)
|
||||
.await
|
||||
.map_err(|e| TargetError::Network(format!("Failed to resolve host: {}", e)))?
|
||||
.next()
|
||||
.ok_or_else(|| TargetError::Network("No address found".to_string()))?;
|
||||
debug!("is_active socket addr: {},target id:{}", socket_addr, self.id.id);
|
||||
match tokio::time::timeout(Duration::from_secs(5), tokio::net::TcpStream::connect(socket_addr)).await {
|
||||
Ok(Ok(_)) => {
|
||||
debug!("Connection to {} is active", self.addr);
|
||||
Ok(true)
|
||||
}
|
||||
Ok(Err(e)) => {
|
||||
debug!("Connection to {} failed: {}", self.addr, e);
|
||||
if e.kind() == std::io::ErrorKind::ConnectionRefused {
|
||||
Err(TargetError::NotConnected)
|
||||
} else {
|
||||
Err(TargetError::Network(format!("Connection failed: {}", e)))
|
||||
}
|
||||
}
|
||||
Err(_) => Err(TargetError::Timeout("Connection timed out".to_string())),
|
||||
}
|
||||
}
|
||||
|
||||
async fn save(&self, event: Arc<Event>) -> Result<(), TargetError> {
|
||||
if let Some(store) = &self.store {
|
||||
// Call the store method directly, no longer need to acquire the lock
|
||||
store
|
||||
.put(event)
|
||||
.map_err(|e| TargetError::Storage(format!("Failed to save event to store: {}", e)))?;
|
||||
debug!("Event saved to store for target: {}", self.id);
|
||||
Ok(())
|
||||
} else {
|
||||
match self.init().await {
|
||||
Ok(_) => (),
|
||||
Err(e) => {
|
||||
error!("Failed to initialize Webhook target {}: {}", self.id.id, e);
|
||||
return Err(TargetError::NotConnected);
|
||||
}
|
||||
}
|
||||
self.send(&event).await
|
||||
}
|
||||
}
|
||||
|
||||
async fn send_from_store(&self, key: Key) -> Result<(), TargetError> {
|
||||
debug!("Sending event from store for target: {}", self.id);
|
||||
match self.init().await {
|
||||
Ok(_) => {
|
||||
debug!("Event sent to store for target: {}", self.name());
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to initialize Webhook target {}: {}", self.id.id, e);
|
||||
return Err(TargetError::NotConnected);
|
||||
}
|
||||
}
|
||||
|
||||
let store = self
|
||||
.store
|
||||
.as_ref()
|
||||
.ok_or_else(|| TargetError::Configuration("No store configured".to_string()))?;
|
||||
|
||||
// Get events directly from the store, no longer need to acquire locks
|
||||
let event = match store.get(&key) {
|
||||
Ok(event) => event,
|
||||
Err(StoreError::NotFound) => return Ok(()),
|
||||
Err(e) => {
|
||||
return Err(TargetError::Storage(format!("Failed to get event from store: {}", e)));
|
||||
}
|
||||
};
|
||||
|
||||
if let Err(e) = self.send(&event).await {
|
||||
if let TargetError::NotConnected = e {
|
||||
return Err(TargetError::NotConnected);
|
||||
}
|
||||
return Err(e);
|
||||
}
|
||||
|
||||
// Use the immutable reference of the store to delete the event content corresponding to the key
|
||||
debug!("Deleting event from store for target: {}, key:{}, start", self.id, key.to_string());
|
||||
match store.del(&key) {
|
||||
Ok(_) => debug!("Event deleted from store for target: {}, key:{}, end", self.id, key.to_string()),
|
||||
Err(e) => {
|
||||
error!("Failed to delete event from store: {}", e);
|
||||
return Err(TargetError::Storage(format!("Failed to delete event from store: {}", e)));
|
||||
}
|
||||
}
|
||||
|
||||
debug!("Event sent from store and deleted for target: {}", self.id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn close(&self) -> Result<(), TargetError> {
|
||||
// Send cancel signal to background tasks
|
||||
let _ = self.cancel_sender.try_send(());
|
||||
info!("Webhook target closed: {}", self.id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn store(&self) -> Option<&(dyn Store<Event, Error = StoreError, Key = Key> + Send + Sync)> {
|
||||
// Returns the reference to the internal store
|
||||
self.store.as_deref()
|
||||
}
|
||||
|
||||
fn clone_dyn(&self) -> Box<dyn Target + Send + Sync> {
|
||||
self.clone_box()
|
||||
}
|
||||
|
||||
// The existing init method can meet the needs well, but we need to make sure it complies with the Target trait
|
||||
// We can use the existing init method, but adjust the return value to match the trait requirement
|
||||
async fn init(&self) -> Result<(), TargetError> {
|
||||
// If the target is disabled, return to success directly
|
||||
if !self.is_enabled() {
|
||||
debug!("Webhook target {} is disabled, skipping initialization", self.id);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Use existing initialization logic
|
||||
WebhookTarget::init(self).await
|
||||
}
|
||||
|
||||
fn is_enabled(&self) -> bool {
|
||||
self.args.enable
|
||||
}
|
||||
}
|
||||
@@ -17,10 +17,11 @@ webhook = ["dep:reqwest"]
|
||||
kafka = ["dep:rdkafka"]
|
||||
|
||||
[dependencies]
|
||||
rustfs-config = { workspace = true }
|
||||
rustfs-config = { workspace = true, features = ["constants"] }
|
||||
async-trait = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
flexi_logger = { workspace = true, features = ["trc", "kv"] }
|
||||
lazy_static = { workspace = true }
|
||||
nu-ansi-term = { workspace = true }
|
||||
nvml-wrapper = { workspace = true, optional = true }
|
||||
opentelemetry = { workspace = true }
|
||||
|
||||
@@ -3,11 +3,12 @@ endpoint = "http://localhost:4317" # Default is "http://localhost:4317" if not s
|
||||
use_stdout = false # Output with stdout, true output, false no output
|
||||
sample_ratio = 1
|
||||
meter_interval = 30
|
||||
service_name = "rustfs_obs"
|
||||
service_name = "rustfs"
|
||||
service_version = "0.1.0"
|
||||
environments = "develop"
|
||||
logger_level = "debug"
|
||||
local_logging_enabled = true
|
||||
local_logging_enabled = true # Default is false if not specified
|
||||
|
||||
|
||||
#[[sinks]]
|
||||
#type = "Kafka"
|
||||
|
||||
@@ -34,6 +34,7 @@ mod config;
|
||||
mod entry;
|
||||
mod global;
|
||||
mod logger;
|
||||
mod metrics;
|
||||
mod sinks;
|
||||
mod system;
|
||||
mod telemetry;
|
||||
|
||||
32
crates/obs/src/metrics/audit.rs
Normal file
32
crates/obs/src/metrics/audit.rs
Normal file
@@ -0,0 +1,32 @@
|
||||
/// audit related metric descriptors
|
||||
///
|
||||
/// This module contains the metric descriptors for the audit subsystem.
|
||||
use crate::metrics::{MetricDescriptor, MetricName, new_counter_md, new_gauge_md, subsystems};
|
||||
|
||||
const TARGET_ID: &str = "target_id";
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
pub static ref AUDIT_FAILED_MESSAGES_MD: MetricDescriptor =
|
||||
new_counter_md(
|
||||
MetricName::AuditFailedMessages,
|
||||
"Total number of messages that failed to send since start",
|
||||
&[TARGET_ID],
|
||||
subsystems::AUDIT
|
||||
);
|
||||
|
||||
pub static ref AUDIT_TARGET_QUEUE_LENGTH_MD: MetricDescriptor =
|
||||
new_gauge_md(
|
||||
MetricName::AuditTargetQueueLength,
|
||||
"Number of unsent messages in queue for target",
|
||||
&[TARGET_ID],
|
||||
subsystems::AUDIT
|
||||
);
|
||||
|
||||
pub static ref AUDIT_TOTAL_MESSAGES_MD: MetricDescriptor =
|
||||
new_counter_md(
|
||||
MetricName::AuditTotalMessages,
|
||||
"Total number of messages sent since start",
|
||||
&[TARGET_ID],
|
||||
subsystems::AUDIT
|
||||
);
|
||||
}
|
||||
68
crates/obs/src/metrics/bucket.rs
Normal file
68
crates/obs/src/metrics/bucket.rs
Normal file
@@ -0,0 +1,68 @@
|
||||
/// bucket level s3 metric descriptor
|
||||
use crate::metrics::{MetricDescriptor, MetricName, new_counter_md, new_gauge_md, new_histogram_md, subsystems};
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
pub static ref BUCKET_API_TRAFFIC_SENT_BYTES_MD: MetricDescriptor =
|
||||
new_counter_md(
|
||||
MetricName::ApiTrafficSentBytes,
|
||||
"Total number of bytes received for a bucket",
|
||||
&["bucket", "type"],
|
||||
subsystems::BUCKET_API
|
||||
);
|
||||
|
||||
pub static ref BUCKET_API_TRAFFIC_RECV_BYTES_MD: MetricDescriptor =
|
||||
new_counter_md(
|
||||
MetricName::ApiTrafficRecvBytes,
|
||||
"Total number of bytes sent for a bucket",
|
||||
&["bucket", "type"],
|
||||
subsystems::BUCKET_API
|
||||
);
|
||||
|
||||
pub static ref BUCKET_API_REQUESTS_IN_FLIGHT_MD: MetricDescriptor =
|
||||
new_gauge_md(
|
||||
MetricName::ApiRequestsInFlightTotal,
|
||||
"Total number of requests currently in flight for a bucket",
|
||||
&["bucket", "name", "type"],
|
||||
subsystems::BUCKET_API
|
||||
);
|
||||
|
||||
pub static ref BUCKET_API_REQUESTS_TOTAL_MD: MetricDescriptor =
|
||||
new_counter_md(
|
||||
MetricName::ApiRequestsTotal,
|
||||
"Total number of requests for a bucket",
|
||||
&["bucket", "name", "type"],
|
||||
subsystems::BUCKET_API
|
||||
);
|
||||
|
||||
pub static ref BUCKET_API_REQUESTS_CANCELED_MD: MetricDescriptor =
|
||||
new_counter_md(
|
||||
MetricName::ApiRequestsCanceledTotal,
|
||||
"Total number of requests canceled by the client for a bucket",
|
||||
&["bucket", "name", "type"],
|
||||
subsystems::BUCKET_API
|
||||
);
|
||||
|
||||
pub static ref BUCKET_API_REQUESTS_4XX_ERRORS_MD: MetricDescriptor =
|
||||
new_counter_md(
|
||||
MetricName::ApiRequests4xxErrorsTotal,
|
||||
"Total number of requests with 4xx errors for a bucket",
|
||||
&["bucket", "name", "type"],
|
||||
subsystems::BUCKET_API
|
||||
);
|
||||
|
||||
pub static ref BUCKET_API_REQUESTS_5XX_ERRORS_MD: MetricDescriptor =
|
||||
new_counter_md(
|
||||
MetricName::ApiRequests5xxErrorsTotal,
|
||||
"Total number of requests with 5xx errors for a bucket",
|
||||
&["bucket", "name", "type"],
|
||||
subsystems::BUCKET_API
|
||||
);
|
||||
|
||||
pub static ref BUCKET_API_REQUESTS_TTFB_SECONDS_DISTRIBUTION_MD: MetricDescriptor =
|
||||
new_histogram_md(
|
||||
MetricName::ApiRequestsTTFBSecondsDistribution,
|
||||
"Distribution of time to first byte across API calls for a bucket",
|
||||
&["bucket", "name", "le", "type"],
|
||||
subsystems::BUCKET_API
|
||||
);
|
||||
}
|
||||
168
crates/obs/src/metrics/bucket_replication.rs
Normal file
168
crates/obs/src/metrics/bucket_replication.rs
Normal file
@@ -0,0 +1,168 @@
|
||||
/// Bucket copy metric descriptor
|
||||
use crate::metrics::{MetricDescriptor, MetricName, new_counter_md, new_gauge_md, subsystems};
|
||||
|
||||
/// Bucket level replication metric descriptor
|
||||
pub const BUCKET_L: &str = "bucket";
|
||||
/// Replication operation
|
||||
pub const OPERATION_L: &str = "operation";
|
||||
/// Replication target ARN
|
||||
pub const TARGET_ARN_L: &str = "targetArn";
|
||||
/// Replication range
|
||||
pub const RANGE_L: &str = "range";
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
pub static ref BUCKET_REPL_LAST_HR_FAILED_BYTES_MD: MetricDescriptor =
|
||||
new_gauge_md(
|
||||
MetricName::LastHourFailedBytes,
|
||||
"Total number of bytes failed at least once to replicate in the last hour on a bucket",
|
||||
&[BUCKET_L],
|
||||
subsystems::BUCKET_REPLICATION
|
||||
);
|
||||
|
||||
pub static ref BUCKET_REPL_LAST_HR_FAILED_COUNT_MD: MetricDescriptor =
|
||||
new_gauge_md(
|
||||
MetricName::LastHourFailedCount,
|
||||
"Total number of objects which failed replication in the last hour on a bucket",
|
||||
&[BUCKET_L],
|
||||
subsystems::BUCKET_REPLICATION
|
||||
);
|
||||
|
||||
pub static ref BUCKET_REPL_LAST_MIN_FAILED_BYTES_MD: MetricDescriptor =
|
||||
new_gauge_md(
|
||||
MetricName::LastMinFailedBytes,
|
||||
"Total number of bytes failed at least once to replicate in the last full minute on a bucket",
|
||||
&[BUCKET_L],
|
||||
subsystems::BUCKET_REPLICATION
|
||||
);
|
||||
|
||||
pub static ref BUCKET_REPL_LAST_MIN_FAILED_COUNT_MD: MetricDescriptor =
|
||||
new_gauge_md(
|
||||
MetricName::LastMinFailedCount,
|
||||
"Total number of objects which failed replication in the last full minute on a bucket",
|
||||
&[BUCKET_L],
|
||||
subsystems::BUCKET_REPLICATION
|
||||
);
|
||||
|
||||
pub static ref BUCKET_REPL_LATENCY_MS_MD: MetricDescriptor =
|
||||
new_gauge_md(
|
||||
MetricName::LatencyMilliSec,
|
||||
"Replication latency on a bucket in milliseconds",
|
||||
&[BUCKET_L, OPERATION_L, RANGE_L, TARGET_ARN_L],
|
||||
subsystems::BUCKET_REPLICATION
|
||||
);
|
||||
|
||||
pub static ref BUCKET_REPL_PROXIED_DELETE_TAGGING_REQUESTS_TOTAL_MD: MetricDescriptor =
|
||||
new_counter_md(
|
||||
MetricName::ProxiedDeleteTaggingRequestsTotal,
|
||||
"Number of DELETE tagging requests proxied to replication target",
|
||||
&[BUCKET_L],
|
||||
subsystems::BUCKET_REPLICATION
|
||||
);
|
||||
|
||||
pub static ref BUCKET_REPL_PROXIED_GET_REQUESTS_FAILURES_MD: MetricDescriptor =
|
||||
new_counter_md(
|
||||
MetricName::ProxiedGetRequestsFailures,
|
||||
"Number of failures in GET requests proxied to replication target",
|
||||
&[BUCKET_L],
|
||||
subsystems::BUCKET_REPLICATION
|
||||
);
|
||||
|
||||
pub static ref BUCKET_REPL_PROXIED_GET_REQUESTS_TOTAL_MD: MetricDescriptor =
|
||||
new_counter_md(
|
||||
MetricName::ProxiedGetRequestsTotal,
|
||||
"Number of GET requests proxied to replication target",
|
||||
&[BUCKET_L],
|
||||
subsystems::BUCKET_REPLICATION
|
||||
);
|
||||
|
||||
// TODO - add a metric for the number of PUT requests proxied to replication target
|
||||
pub static ref BUCKET_REPL_PROXIED_GET_TAGGING_REQUESTS_FAILURES_MD: MetricDescriptor =
|
||||
new_counter_md(
|
||||
MetricName::ProxiedGetTaggingRequestFailures,
|
||||
"Number of failures in GET tagging requests proxied to replication target",
|
||||
&[BUCKET_L],
|
||||
subsystems::BUCKET_REPLICATION
|
||||
);
|
||||
|
||||
pub static ref BUCKET_REPL_PROXIED_GET_TAGGING_REQUESTS_TOTAL_MD: MetricDescriptor =
|
||||
new_counter_md(
|
||||
MetricName::ProxiedGetTaggingRequestsTotal,
|
||||
"Number of GET tagging requests proxied to replication target",
|
||||
&[BUCKET_L],
|
||||
subsystems::BUCKET_REPLICATION
|
||||
);
|
||||
|
||||
pub static ref BUCKET_REPL_PROXIED_HEAD_REQUESTS_FAILURES_MD: MetricDescriptor =
|
||||
new_counter_md(
|
||||
MetricName::ProxiedHeadRequestsFailures,
|
||||
"Number of failures in HEAD requests proxied to replication target",
|
||||
&[BUCKET_L],
|
||||
subsystems::BUCKET_REPLICATION
|
||||
);
|
||||
|
||||
pub static ref BUCKET_REPL_PROXIED_HEAD_REQUESTS_TOTAL_MD: MetricDescriptor =
|
||||
new_counter_md(
|
||||
MetricName::ProxiedHeadRequestsTotal,
|
||||
"Number of HEAD requests proxied to replication target",
|
||||
&[BUCKET_L],
|
||||
subsystems::BUCKET_REPLICATION
|
||||
);
|
||||
|
||||
// TODO - add a metric for the number of PUT requests proxied to replication target
|
||||
pub static ref BUCKET_REPL_PROXIED_PUT_TAGGING_REQUESTS_FAILURES_MD: MetricDescriptor =
|
||||
new_counter_md(
|
||||
MetricName::ProxiedPutTaggingRequestFailures,
|
||||
"Number of failures in PUT tagging requests proxied to replication target",
|
||||
&[BUCKET_L],
|
||||
subsystems::BUCKET_REPLICATION
|
||||
);
|
||||
|
||||
pub static ref BUCKET_REPL_PROXIED_PUT_TAGGING_REQUESTS_TOTAL_MD: MetricDescriptor =
|
||||
new_counter_md(
|
||||
MetricName::ProxiedPutTaggingRequestsTotal,
|
||||
"Number of PUT tagging requests proxied to replication target",
|
||||
&[BUCKET_L],
|
||||
subsystems::BUCKET_REPLICATION
|
||||
);
|
||||
|
||||
pub static ref BUCKET_REPL_SENT_BYTES_MD: MetricDescriptor =
|
||||
new_counter_md(
|
||||
MetricName::SentBytes,
|
||||
"Total number of bytes replicated to the target",
|
||||
&[BUCKET_L],
|
||||
subsystems::BUCKET_REPLICATION
|
||||
);
|
||||
|
||||
pub static ref BUCKET_REPL_SENT_COUNT_MD: MetricDescriptor =
|
||||
new_counter_md(
|
||||
MetricName::SentCount,
|
||||
"Total number of objects replicated to the target",
|
||||
&[BUCKET_L],
|
||||
subsystems::BUCKET_REPLICATION
|
||||
);
|
||||
|
||||
pub static ref BUCKET_REPL_TOTAL_FAILED_BYTES_MD: MetricDescriptor =
|
||||
new_counter_md(
|
||||
MetricName::TotalFailedBytes,
|
||||
"Total number of bytes failed at least once to replicate since server start",
|
||||
&[BUCKET_L],
|
||||
subsystems::BUCKET_REPLICATION
|
||||
);
|
||||
|
||||
pub static ref BUCKET_REPL_TOTAL_FAILED_COUNT_MD: MetricDescriptor =
|
||||
new_counter_md(
|
||||
MetricName::TotalFailedCount,
|
||||
"Total number of objects which failed replication since server start",
|
||||
&[BUCKET_L],
|
||||
subsystems::BUCKET_REPLICATION
|
||||
);
|
||||
|
||||
// TODO - add a metric for the number of DELETE requests proxied to replication target
|
||||
pub static ref BUCKET_REPL_PROXIED_DELETE_TAGGING_REQUESTS_FAILURES_MD: MetricDescriptor =
|
||||
new_counter_md(
|
||||
MetricName::ProxiedDeleteTaggingRequestFailures,
|
||||
"Number of failures in DELETE tagging requests proxied to replication target",
|
||||
&[BUCKET_L],
|
||||
subsystems::BUCKET_REPLICATION
|
||||
);
|
||||
}
|
||||
20
crates/obs/src/metrics/cluster_config.rs
Normal file
20
crates/obs/src/metrics/cluster_config.rs
Normal file
@@ -0,0 +1,20 @@
|
||||
/// Metric descriptors related to cluster configuration
|
||||
use crate::metrics::{MetricDescriptor, MetricName, new_gauge_md, subsystems};
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
pub static ref CONFIG_RRS_PARITY_MD: MetricDescriptor =
|
||||
new_gauge_md(
|
||||
MetricName::ConfigRRSParity,
|
||||
"Reduced redundancy storage class parity",
|
||||
&[],
|
||||
subsystems::CLUSTER_CONFIG
|
||||
);
|
||||
|
||||
pub static ref CONFIG_STANDARD_PARITY_MD: MetricDescriptor =
|
||||
new_gauge_md(
|
||||
MetricName::ConfigStandardParity,
|
||||
"Standard storage class parity",
|
||||
&[],
|
||||
subsystems::CLUSTER_CONFIG
|
||||
);
|
||||
}
|
||||
97
crates/obs/src/metrics/cluster_erasure_set.rs
Normal file
97
crates/obs/src/metrics/cluster_erasure_set.rs
Normal file
@@ -0,0 +1,97 @@
|
||||
/// Erasure code set related metric descriptors
|
||||
use crate::metrics::{MetricDescriptor, MetricName, new_gauge_md, subsystems};
|
||||
|
||||
/// The label for the pool ID
|
||||
pub const POOL_ID_L: &str = "pool_id";
|
||||
/// The label for the pool ID
|
||||
pub const SET_ID_L: &str = "set_id";
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
pub static ref ERASURE_SET_OVERALL_WRITE_QUORUM_MD: MetricDescriptor =
|
||||
new_gauge_md(
|
||||
MetricName::ErasureSetOverallWriteQuorum,
|
||||
"Overall write quorum across pools and sets",
|
||||
&[],
|
||||
subsystems::CLUSTER_ERASURE_SET
|
||||
);
|
||||
|
||||
pub static ref ERASURE_SET_OVERALL_HEALTH_MD: MetricDescriptor =
|
||||
new_gauge_md(
|
||||
MetricName::ErasureSetOverallHealth,
|
||||
"Overall health across pools and sets (1=healthy, 0=unhealthy)",
|
||||
&[],
|
||||
subsystems::CLUSTER_ERASURE_SET
|
||||
);
|
||||
|
||||
pub static ref ERASURE_SET_READ_QUORUM_MD: MetricDescriptor =
|
||||
new_gauge_md(
|
||||
MetricName::ErasureSetReadQuorum,
|
||||
"Read quorum for the erasure set in a pool",
|
||||
&[POOL_ID_L, SET_ID_L],
|
||||
subsystems::CLUSTER_ERASURE_SET
|
||||
);
|
||||
|
||||
pub static ref ERASURE_SET_WRITE_QUORUM_MD: MetricDescriptor =
|
||||
new_gauge_md(
|
||||
MetricName::ErasureSetWriteQuorum,
|
||||
"Write quorum for the erasure set in a pool",
|
||||
&[POOL_ID_L, SET_ID_L],
|
||||
subsystems::CLUSTER_ERASURE_SET
|
||||
);
|
||||
|
||||
pub static ref ERASURE_SET_ONLINE_DRIVES_COUNT_MD: MetricDescriptor =
|
||||
new_gauge_md(
|
||||
MetricName::ErasureSetOnlineDrivesCount,
|
||||
"Count of online drives in the erasure set in a pool",
|
||||
&[POOL_ID_L, SET_ID_L],
|
||||
subsystems::CLUSTER_ERASURE_SET
|
||||
);
|
||||
|
||||
pub static ref ERASURE_SET_HEALING_DRIVES_COUNT_MD: MetricDescriptor =
|
||||
new_gauge_md(
|
||||
MetricName::ErasureSetHealingDrivesCount,
|
||||
"Count of healing drives in the erasure set in a pool",
|
||||
&[POOL_ID_L, SET_ID_L],
|
||||
subsystems::CLUSTER_ERASURE_SET
|
||||
);
|
||||
|
||||
pub static ref ERASURE_SET_HEALTH_MD: MetricDescriptor =
|
||||
new_gauge_md(
|
||||
MetricName::ErasureSetHealth,
|
||||
"Health of the erasure set in a pool (1=healthy, 0=unhealthy)",
|
||||
&[POOL_ID_L, SET_ID_L],
|
||||
subsystems::CLUSTER_ERASURE_SET
|
||||
);
|
||||
|
||||
pub static ref ERASURE_SET_READ_TOLERANCE_MD: MetricDescriptor =
|
||||
new_gauge_md(
|
||||
MetricName::ErasureSetReadTolerance,
|
||||
"No of drive failures that can be tolerated without disrupting read operations",
|
||||
&[POOL_ID_L, SET_ID_L],
|
||||
subsystems::CLUSTER_ERASURE_SET
|
||||
);
|
||||
|
||||
pub static ref ERASURE_SET_WRITE_TOLERANCE_MD: MetricDescriptor =
|
||||
new_gauge_md(
|
||||
MetricName::ErasureSetWriteTolerance,
|
||||
"No of drive failures that can be tolerated without disrupting write operations",
|
||||
&[POOL_ID_L, SET_ID_L],
|
||||
subsystems::CLUSTER_ERASURE_SET
|
||||
);
|
||||
|
||||
pub static ref ERASURE_SET_READ_HEALTH_MD: MetricDescriptor =
|
||||
new_gauge_md(
|
||||
MetricName::ErasureSetReadHealth,
|
||||
"Health of the erasure set in a pool for read operations (1=healthy, 0=unhealthy)",
|
||||
&[POOL_ID_L, SET_ID_L],
|
||||
subsystems::CLUSTER_ERASURE_SET
|
||||
);
|
||||
|
||||
pub static ref ERASURE_SET_WRITE_HEALTH_MD: MetricDescriptor =
|
||||
new_gauge_md(
|
||||
MetricName::ErasureSetWriteHealth,
|
||||
"Health of the erasure set in a pool for write operations (1=healthy, 0=unhealthy)",
|
||||
&[POOL_ID_L, SET_ID_L],
|
||||
subsystems::CLUSTER_ERASURE_SET
|
||||
);
|
||||
}
|
||||
28
crates/obs/src/metrics/cluster_health.rs
Normal file
28
crates/obs/src/metrics/cluster_health.rs
Normal file
@@ -0,0 +1,28 @@
|
||||
/// Cluster health-related metric descriptors
|
||||
use crate::metrics::{MetricDescriptor, MetricName, new_gauge_md, subsystems};
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
pub static ref HEALTH_DRIVES_OFFLINE_COUNT_MD: MetricDescriptor =
|
||||
new_gauge_md(
|
||||
MetricName::HealthDrivesOfflineCount,
|
||||
"Count of offline drives in the cluster",
|
||||
&[],
|
||||
subsystems::CLUSTER_HEALTH
|
||||
);
|
||||
|
||||
pub static ref HEALTH_DRIVES_ONLINE_COUNT_MD: MetricDescriptor =
|
||||
new_gauge_md(
|
||||
MetricName::HealthDrivesOnlineCount,
|
||||
"Count of online drives in the cluster",
|
||||
&[],
|
||||
subsystems::CLUSTER_HEALTH
|
||||
);
|
||||
|
||||
pub static ref HEALTH_DRIVES_COUNT_MD: MetricDescriptor =
|
||||
new_gauge_md(
|
||||
MetricName::HealthDrivesCount,
|
||||
"Count of all drives in the cluster",
|
||||
&[],
|
||||
subsystems::CLUSTER_HEALTH
|
||||
);
|
||||
}
|
||||
84
crates/obs/src/metrics/cluster_iam.rs
Normal file
84
crates/obs/src/metrics/cluster_iam.rs
Normal file
@@ -0,0 +1,84 @@
|
||||
/// IAM related metric descriptors
|
||||
use crate::metrics::{MetricDescriptor, MetricName, new_counter_md, subsystems};
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
pub static ref LAST_SYNC_DURATION_MILLIS_MD: MetricDescriptor =
|
||||
new_counter_md(
|
||||
MetricName::LastSyncDurationMillis,
|
||||
"Last successful IAM data sync duration in milliseconds",
|
||||
&[],
|
||||
subsystems::CLUSTER_IAM
|
||||
);
|
||||
|
||||
pub static ref PLUGIN_AUTHN_SERVICE_FAILED_REQUESTS_MINUTE_MD: MetricDescriptor =
|
||||
new_counter_md(
|
||||
MetricName::PluginAuthnServiceFailedRequestsMinute,
|
||||
"When plugin authentication is configured, returns failed requests count in the last full minute",
|
||||
&[],
|
||||
subsystems::CLUSTER_IAM
|
||||
);
|
||||
|
||||
pub static ref PLUGIN_AUTHN_SERVICE_LAST_FAIL_SECONDS_MD: MetricDescriptor =
|
||||
new_counter_md(
|
||||
MetricName::PluginAuthnServiceLastFailSeconds,
|
||||
"When plugin authentication is configured, returns time (in seconds) since the last failed request to the service",
|
||||
&[],
|
||||
subsystems::CLUSTER_IAM
|
||||
);
|
||||
|
||||
pub static ref PLUGIN_AUTHN_SERVICE_LAST_SUCC_SECONDS_MD: MetricDescriptor =
|
||||
new_counter_md(
|
||||
MetricName::PluginAuthnServiceLastSuccSeconds,
|
||||
"When plugin authentication is configured, returns time (in seconds) since the last successful request to the service",
|
||||
&[],
|
||||
subsystems::CLUSTER_IAM
|
||||
);
|
||||
|
||||
pub static ref PLUGIN_AUTHN_SERVICE_SUCC_AVG_RTT_MS_MINUTE_MD: MetricDescriptor =
|
||||
new_counter_md(
|
||||
MetricName::PluginAuthnServiceSuccAvgRttMsMinute,
|
||||
"When plugin authentication is configured, returns average round-trip-time of successful requests in the last full minute",
|
||||
&[],
|
||||
subsystems::CLUSTER_IAM
|
||||
);
|
||||
|
||||
pub static ref PLUGIN_AUTHN_SERVICE_SUCC_MAX_RTT_MS_MINUTE_MD: MetricDescriptor =
|
||||
new_counter_md(
|
||||
MetricName::PluginAuthnServiceSuccMaxRttMsMinute,
|
||||
"When plugin authentication is configured, returns maximum round-trip-time of successful requests in the last full minute",
|
||||
&[],
|
||||
subsystems::CLUSTER_IAM
|
||||
);
|
||||
|
||||
pub static ref PLUGIN_AUTHN_SERVICE_TOTAL_REQUESTS_MINUTE_MD: MetricDescriptor =
|
||||
new_counter_md(
|
||||
MetricName::PluginAuthnServiceTotalRequestsMinute,
|
||||
"When plugin authentication is configured, returns total requests count in the last full minute",
|
||||
&[],
|
||||
subsystems::CLUSTER_IAM
|
||||
);
|
||||
|
||||
pub static ref SINCE_LAST_SYNC_MILLIS_MD: MetricDescriptor =
|
||||
new_counter_md(
|
||||
MetricName::SinceLastSyncMillis,
|
||||
"Time (in milliseconds) since last successful IAM data sync.",
|
||||
&[],
|
||||
subsystems::CLUSTER_IAM
|
||||
);
|
||||
|
||||
pub static ref SYNC_FAILURES_MD: MetricDescriptor =
|
||||
new_counter_md(
|
||||
MetricName::SyncFailures,
|
||||
"Number of failed IAM data syncs since server start.",
|
||||
&[],
|
||||
subsystems::CLUSTER_IAM
|
||||
);
|
||||
|
||||
pub static ref SYNC_SUCCESSES_MD: MetricDescriptor =
|
||||
new_counter_md(
|
||||
MetricName::SyncSuccesses,
|
||||
"Number of successful IAM data syncs since server start.",
|
||||
&[],
|
||||
subsystems::CLUSTER_IAM
|
||||
);
|
||||
}
|
||||
36
crates/obs/src/metrics/cluster_notification.rs
Normal file
36
crates/obs/src/metrics/cluster_notification.rs
Normal file
@@ -0,0 +1,36 @@
|
||||
/// Notify the relevant metric descriptor
|
||||
use crate::metrics::{MetricDescriptor, MetricName, new_counter_md, subsystems};
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
pub static ref NOTIFICATION_CURRENT_SEND_IN_PROGRESS_MD: MetricDescriptor =
|
||||
new_counter_md(
|
||||
MetricName::NotificationCurrentSendInProgress,
|
||||
"Number of concurrent async Send calls active to all targets",
|
||||
&[],
|
||||
subsystems::NOTIFICATION
|
||||
);
|
||||
|
||||
pub static ref NOTIFICATION_EVENTS_ERRORS_TOTAL_MD: MetricDescriptor =
|
||||
new_counter_md(
|
||||
MetricName::NotificationEventsErrorsTotal,
|
||||
"Events that were failed to be sent to the targets",
|
||||
&[],
|
||||
subsystems::NOTIFICATION
|
||||
);
|
||||
|
||||
pub static ref NOTIFICATION_EVENTS_SENT_TOTAL_MD: MetricDescriptor =
|
||||
new_counter_md(
|
||||
MetricName::NotificationEventsSentTotal,
|
||||
"Total number of events sent to the targets",
|
||||
&[],
|
||||
subsystems::NOTIFICATION
|
||||
);
|
||||
|
||||
pub static ref NOTIFICATION_EVENTS_SKIPPED_TOTAL_MD: MetricDescriptor =
|
||||
new_counter_md(
|
||||
MetricName::NotificationEventsSkippedTotal,
|
||||
"Events that were skipped to be sent to the targets due to the in-memory queue being full",
|
||||
&[],
|
||||
subsystems::NOTIFICATION
|
||||
);
|
||||
}
|
||||
131
crates/obs/src/metrics/cluster_usage.rs
Normal file
131
crates/obs/src/metrics/cluster_usage.rs
Normal file
@@ -0,0 +1,131 @@
|
||||
/// Descriptors of metrics related to cluster object and bucket usage
|
||||
use crate::metrics::{MetricDescriptor, MetricName, new_gauge_md, subsystems};
|
||||
|
||||
/// Bucket labels
|
||||
pub const BUCKET_LABEL: &str = "bucket";
|
||||
/// Range labels
|
||||
pub const RANGE_LABEL: &str = "range";
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
pub static ref USAGE_SINCE_LAST_UPDATE_SECONDS_MD: MetricDescriptor =
|
||||
new_gauge_md(
|
||||
MetricName::UsageSinceLastUpdateSeconds,
|
||||
"Time since last update of usage metrics in seconds",
|
||||
&[],
|
||||
subsystems::CLUSTER_USAGE_OBJECTS
|
||||
);
|
||||
|
||||
pub static ref USAGE_TOTAL_BYTES_MD: MetricDescriptor =
|
||||
new_gauge_md(
|
||||
MetricName::UsageTotalBytes,
|
||||
"Total cluster usage in bytes",
|
||||
&[],
|
||||
subsystems::CLUSTER_USAGE_OBJECTS
|
||||
);
|
||||
|
||||
pub static ref USAGE_OBJECTS_COUNT_MD: MetricDescriptor =
|
||||
new_gauge_md(
|
||||
MetricName::UsageObjectsCount,
|
||||
"Total cluster objects count",
|
||||
&[],
|
||||
subsystems::CLUSTER_USAGE_OBJECTS
|
||||
);
|
||||
|
||||
pub static ref USAGE_VERSIONS_COUNT_MD: MetricDescriptor =
|
||||
new_gauge_md(
|
||||
MetricName::UsageVersionsCount,
|
||||
"Total cluster object versions (including delete markers) count",
|
||||
&[],
|
||||
subsystems::CLUSTER_USAGE_OBJECTS
|
||||
);
|
||||
|
||||
pub static ref USAGE_DELETE_MARKERS_COUNT_MD: MetricDescriptor =
|
||||
new_gauge_md(
|
||||
MetricName::UsageDeleteMarkersCount,
|
||||
"Total cluster delete markers count",
|
||||
&[],
|
||||
subsystems::CLUSTER_USAGE_OBJECTS
|
||||
);
|
||||
|
||||
pub static ref USAGE_BUCKETS_COUNT_MD: MetricDescriptor =
|
||||
new_gauge_md(
|
||||
MetricName::UsageBucketsCount,
|
||||
"Total cluster buckets count",
|
||||
&[],
|
||||
subsystems::CLUSTER_USAGE_OBJECTS
|
||||
);
|
||||
|
||||
pub static ref USAGE_OBJECTS_DISTRIBUTION_MD: MetricDescriptor =
|
||||
new_gauge_md(
|
||||
MetricName::UsageSizeDistribution,
|
||||
"Cluster object size distribution",
|
||||
&[RANGE_LABEL],
|
||||
subsystems::CLUSTER_USAGE_OBJECTS
|
||||
);
|
||||
|
||||
pub static ref USAGE_VERSIONS_DISTRIBUTION_MD: MetricDescriptor =
|
||||
new_gauge_md(
|
||||
MetricName::UsageVersionCountDistribution,
|
||||
"Cluster object version count distribution",
|
||||
&[RANGE_LABEL],
|
||||
subsystems::CLUSTER_USAGE_OBJECTS
|
||||
);
|
||||
}
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
pub static ref USAGE_BUCKET_TOTAL_BYTES_MD: MetricDescriptor =
|
||||
new_gauge_md(
|
||||
MetricName::UsageBucketTotalBytes,
|
||||
"Total bucket size in bytes",
|
||||
&[BUCKET_LABEL],
|
||||
subsystems::CLUSTER_USAGE_BUCKETS
|
||||
);
|
||||
|
||||
pub static ref USAGE_BUCKET_OBJECTS_TOTAL_MD: MetricDescriptor =
|
||||
new_gauge_md(
|
||||
MetricName::UsageBucketObjectsCount,
|
||||
"Total objects count in bucket",
|
||||
&[BUCKET_LABEL],
|
||||
subsystems::CLUSTER_USAGE_BUCKETS
|
||||
);
|
||||
|
||||
pub static ref USAGE_BUCKET_VERSIONS_COUNT_MD: MetricDescriptor =
|
||||
new_gauge_md(
|
||||
MetricName::UsageBucketVersionsCount,
|
||||
"Total object versions (including delete markers) count in bucket",
|
||||
&[BUCKET_LABEL],
|
||||
subsystems::CLUSTER_USAGE_BUCKETS
|
||||
);
|
||||
|
||||
pub static ref USAGE_BUCKET_DELETE_MARKERS_COUNT_MD: MetricDescriptor =
|
||||
new_gauge_md(
|
||||
MetricName::UsageBucketDeleteMarkersCount,
|
||||
"Total delete markers count in bucket",
|
||||
&[BUCKET_LABEL],
|
||||
subsystems::CLUSTER_USAGE_BUCKETS
|
||||
);
|
||||
|
||||
pub static ref USAGE_BUCKET_QUOTA_TOTAL_BYTES_MD: MetricDescriptor =
|
||||
new_gauge_md(
|
||||
MetricName::UsageBucketQuotaTotalBytes,
|
||||
"Total bucket quota in bytes",
|
||||
&[BUCKET_LABEL],
|
||||
subsystems::CLUSTER_USAGE_BUCKETS
|
||||
);
|
||||
|
||||
pub static ref USAGE_BUCKET_OBJECT_SIZE_DISTRIBUTION_MD: MetricDescriptor =
|
||||
new_gauge_md(
|
||||
MetricName::UsageBucketObjectSizeDistribution,
|
||||
"Bucket object size distribution",
|
||||
&[RANGE_LABEL, BUCKET_LABEL],
|
||||
subsystems::CLUSTER_USAGE_BUCKETS
|
||||
);
|
||||
|
||||
pub static ref USAGE_BUCKET_OBJECT_VERSION_COUNT_DISTRIBUTION_MD: MetricDescriptor =
|
||||
new_gauge_md(
|
||||
MetricName::UsageBucketObjectVersionCountDistribution,
|
||||
"Bucket object version count distribution",
|
||||
&[RANGE_LABEL, BUCKET_LABEL],
|
||||
subsystems::CLUSTER_USAGE_BUCKETS
|
||||
);
|
||||
}
|
||||
67
crates/obs/src/metrics/entry/descriptor.rs
Normal file
67
crates/obs/src/metrics/entry/descriptor.rs
Normal file
@@ -0,0 +1,67 @@
|
||||
use crate::metrics::{MetricName, MetricNamespace, MetricSubsystem, MetricType};
|
||||
use std::collections::HashSet;
|
||||
|
||||
/// MetricDescriptor - Metric descriptors
|
||||
#[allow(dead_code)]
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct MetricDescriptor {
|
||||
pub name: MetricName,
|
||||
pub metric_type: MetricType,
|
||||
pub help: String,
|
||||
pub variable_labels: Vec<String>,
|
||||
pub namespace: MetricNamespace,
|
||||
pub subsystem: MetricSubsystem,
|
||||
|
||||
// Internal management values
|
||||
label_set: Option<HashSet<String>>,
|
||||
}
|
||||
|
||||
impl MetricDescriptor {
|
||||
/// Create a new metric descriptor
|
||||
pub fn new(
|
||||
name: MetricName,
|
||||
metric_type: MetricType,
|
||||
help: String,
|
||||
variable_labels: Vec<String>,
|
||||
namespace: MetricNamespace,
|
||||
subsystem: impl Into<MetricSubsystem>, // Modify the parameter type
|
||||
) -> Self {
|
||||
Self {
|
||||
name,
|
||||
metric_type,
|
||||
help,
|
||||
variable_labels,
|
||||
namespace,
|
||||
subsystem: subsystem.into(),
|
||||
label_set: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the full metric name, including the prefix and formatting path
|
||||
#[allow(dead_code)]
|
||||
pub fn get_full_metric_name(&self) -> String {
|
||||
let prefix = self.metric_type.as_prom();
|
||||
let namespace = self.namespace.as_str();
|
||||
let formatted_subsystem = self.subsystem.as_str();
|
||||
|
||||
format!("{}{}_{}_{}", prefix, namespace, formatted_subsystem, self.name.as_str())
|
||||
}
|
||||
|
||||
/// check whether the label is in the label set
|
||||
#[allow(dead_code)]
|
||||
pub fn has_label(&mut self, label: &str) -> bool {
|
||||
self.get_label_set().contains(label)
|
||||
}
|
||||
|
||||
/// Gets a collection of tags and creates them if they don't exist
|
||||
pub fn get_label_set(&mut self) -> &HashSet<String> {
|
||||
if self.label_set.is_none() {
|
||||
let mut set = HashSet::with_capacity(self.variable_labels.len());
|
||||
for label in &self.variable_labels {
|
||||
set.insert(label.clone());
|
||||
}
|
||||
self.label_set = Some(set);
|
||||
}
|
||||
self.label_set.as_ref().unwrap()
|
||||
}
|
||||
}
|
||||
666
crates/obs/src/metrics/entry/metric_name.rs
Normal file
666
crates/obs/src/metrics/entry/metric_name.rs
Normal file
@@ -0,0 +1,666 @@
|
||||
/// The metric name is the individual name of the metric
|
||||
#[allow(dead_code)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum MetricName {
|
||||
// The generic metric name
|
||||
AuthTotal,
|
||||
CanceledTotal,
|
||||
ErrorsTotal,
|
||||
HeaderTotal,
|
||||
HealTotal,
|
||||
HitsTotal,
|
||||
InflightTotal,
|
||||
InvalidTotal,
|
||||
LimitTotal,
|
||||
MissedTotal,
|
||||
WaitingTotal,
|
||||
IncomingTotal,
|
||||
ObjectTotal,
|
||||
VersionTotal,
|
||||
DeleteMarkerTotal,
|
||||
OfflineTotal,
|
||||
OnlineTotal,
|
||||
OpenTotal,
|
||||
ReadTotal,
|
||||
TimestampTotal,
|
||||
WriteTotal,
|
||||
Total,
|
||||
FreeInodes,
|
||||
|
||||
// Failure statistical metrics
|
||||
LastMinFailedCount,
|
||||
LastMinFailedBytes,
|
||||
LastHourFailedCount,
|
||||
LastHourFailedBytes,
|
||||
TotalFailedCount,
|
||||
TotalFailedBytes,
|
||||
|
||||
// Worker metrics
|
||||
CurrActiveWorkers,
|
||||
AvgActiveWorkers,
|
||||
MaxActiveWorkers,
|
||||
RecentBacklogCount,
|
||||
CurrInQueueCount,
|
||||
CurrInQueueBytes,
|
||||
ReceivedCount,
|
||||
SentCount,
|
||||
CurrTransferRate,
|
||||
AvgTransferRate,
|
||||
MaxTransferRate,
|
||||
CredentialErrors,
|
||||
|
||||
// Link latency metrics
|
||||
CurrLinkLatency,
|
||||
AvgLinkLatency,
|
||||
MaxLinkLatency,
|
||||
|
||||
// Link status metrics
|
||||
LinkOnline,
|
||||
LinkOfflineDuration,
|
||||
LinkDowntimeTotalDuration,
|
||||
|
||||
// Queue metrics
|
||||
AvgInQueueCount,
|
||||
AvgInQueueBytes,
|
||||
MaxInQueueCount,
|
||||
MaxInQueueBytes,
|
||||
|
||||
// Proxy request metrics
|
||||
ProxiedGetRequestsTotal,
|
||||
ProxiedHeadRequestsTotal,
|
||||
ProxiedPutTaggingRequestsTotal,
|
||||
ProxiedGetTaggingRequestsTotal,
|
||||
ProxiedDeleteTaggingRequestsTotal,
|
||||
ProxiedGetRequestsFailures,
|
||||
ProxiedHeadRequestsFailures,
|
||||
ProxiedPutTaggingRequestFailures,
|
||||
ProxiedGetTaggingRequestFailures,
|
||||
ProxiedDeleteTaggingRequestFailures,
|
||||
|
||||
// Byte-related metrics
|
||||
FreeBytes,
|
||||
ReadBytes,
|
||||
RcharBytes,
|
||||
ReceivedBytes,
|
||||
LatencyMilliSec,
|
||||
SentBytes,
|
||||
TotalBytes,
|
||||
UsedBytes,
|
||||
WriteBytes,
|
||||
WcharBytes,
|
||||
|
||||
// Latency metrics
|
||||
LatencyMicroSec,
|
||||
LatencyNanoSec,
|
||||
|
||||
// Information metrics
|
||||
CommitInfo,
|
||||
UsageInfo,
|
||||
VersionInfo,
|
||||
|
||||
// Distribution metrics
|
||||
SizeDistribution,
|
||||
VersionDistribution,
|
||||
TtfbDistribution,
|
||||
TtlbDistribution,
|
||||
|
||||
// Time metrics
|
||||
LastActivityTime,
|
||||
StartTime,
|
||||
UpTime,
|
||||
Memory,
|
||||
Vmemory,
|
||||
Cpu,
|
||||
|
||||
// Expiration and conversion metrics
|
||||
ExpiryMissedTasks,
|
||||
ExpiryMissedFreeVersions,
|
||||
ExpiryMissedTierJournalTasks,
|
||||
ExpiryNumWorkers,
|
||||
TransitionMissedTasks,
|
||||
TransitionedBytes,
|
||||
TransitionedObjects,
|
||||
TransitionedVersions,
|
||||
|
||||
//Tier request metrics
|
||||
TierRequestsSuccess,
|
||||
TierRequestsFailure,
|
||||
|
||||
// KMS metrics
|
||||
KmsOnline,
|
||||
KmsRequestsSuccess,
|
||||
KmsRequestsError,
|
||||
KmsRequestsFail,
|
||||
KmsUptime,
|
||||
|
||||
// Webhook metrics
|
||||
WebhookOnline,
|
||||
|
||||
// API 拒绝指标
|
||||
ApiRejectedAuthTotal,
|
||||
ApiRejectedHeaderTotal,
|
||||
ApiRejectedTimestampTotal,
|
||||
ApiRejectedInvalidTotal,
|
||||
|
||||
//API request metrics
|
||||
ApiRequestsWaitingTotal,
|
||||
ApiRequestsIncomingTotal,
|
||||
ApiRequestsInFlightTotal,
|
||||
ApiRequestsTotal,
|
||||
ApiRequestsErrorsTotal,
|
||||
ApiRequests5xxErrorsTotal,
|
||||
ApiRequests4xxErrorsTotal,
|
||||
ApiRequestsCanceledTotal,
|
||||
|
||||
// API distribution metrics
|
||||
ApiRequestsTTFBSecondsDistribution,
|
||||
|
||||
// API traffic metrics
|
||||
ApiTrafficSentBytes,
|
||||
ApiTrafficRecvBytes,
|
||||
|
||||
// Audit metrics
|
||||
AuditFailedMessages,
|
||||
AuditTargetQueueLength,
|
||||
AuditTotalMessages,
|
||||
|
||||
// Metrics related to cluster configurations
|
||||
ConfigRRSParity,
|
||||
ConfigStandardParity,
|
||||
|
||||
// Erasure coding set related metrics
|
||||
ErasureSetOverallWriteQuorum,
|
||||
ErasureSetOverallHealth,
|
||||
ErasureSetReadQuorum,
|
||||
ErasureSetWriteQuorum,
|
||||
ErasureSetOnlineDrivesCount,
|
||||
ErasureSetHealingDrivesCount,
|
||||
ErasureSetHealth,
|
||||
ErasureSetReadTolerance,
|
||||
ErasureSetWriteTolerance,
|
||||
ErasureSetReadHealth,
|
||||
ErasureSetWriteHealth,
|
||||
|
||||
// Cluster health-related metrics
|
||||
HealthDrivesOfflineCount,
|
||||
HealthDrivesOnlineCount,
|
||||
HealthDrivesCount,
|
||||
|
||||
// IAM-related metrics
|
||||
LastSyncDurationMillis,
|
||||
PluginAuthnServiceFailedRequestsMinute,
|
||||
PluginAuthnServiceLastFailSeconds,
|
||||
PluginAuthnServiceLastSuccSeconds,
|
||||
PluginAuthnServiceSuccAvgRttMsMinute,
|
||||
PluginAuthnServiceSuccMaxRttMsMinute,
|
||||
PluginAuthnServiceTotalRequestsMinute,
|
||||
SinceLastSyncMillis,
|
||||
SyncFailures,
|
||||
SyncSuccesses,
|
||||
|
||||
// Notify relevant metrics
|
||||
NotificationCurrentSendInProgress,
|
||||
NotificationEventsErrorsTotal,
|
||||
NotificationEventsSentTotal,
|
||||
NotificationEventsSkippedTotal,
|
||||
|
||||
// Metrics related to the usage of cluster objects
|
||||
UsageSinceLastUpdateSeconds,
|
||||
UsageTotalBytes,
|
||||
UsageObjectsCount,
|
||||
UsageVersionsCount,
|
||||
UsageDeleteMarkersCount,
|
||||
UsageBucketsCount,
|
||||
UsageSizeDistribution,
|
||||
UsageVersionCountDistribution,
|
||||
|
||||
// Metrics related to bucket usage
|
||||
UsageBucketQuotaTotalBytes,
|
||||
UsageBucketTotalBytes,
|
||||
UsageBucketObjectsCount,
|
||||
UsageBucketVersionsCount,
|
||||
UsageBucketDeleteMarkersCount,
|
||||
UsageBucketObjectSizeDistribution,
|
||||
UsageBucketObjectVersionCountDistribution,
|
||||
|
||||
// ILM-related metrics
|
||||
IlmExpiryPendingTasks,
|
||||
IlmTransitionActiveTasks,
|
||||
IlmTransitionPendingTasks,
|
||||
IlmTransitionMissedImmediateTasks,
|
||||
IlmVersionsScanned,
|
||||
|
||||
// Webhook logs
|
||||
WebhookQueueLength,
|
||||
WebhookTotalMessages,
|
||||
WebhookFailedMessages,
|
||||
|
||||
// Copy the relevant metrics
|
||||
ReplicationAverageActiveWorkers,
|
||||
ReplicationAverageQueuedBytes,
|
||||
ReplicationAverageQueuedCount,
|
||||
ReplicationAverageDataTransferRate,
|
||||
ReplicationCurrentActiveWorkers,
|
||||
ReplicationCurrentDataTransferRate,
|
||||
ReplicationLastMinuteQueuedBytes,
|
||||
ReplicationLastMinuteQueuedCount,
|
||||
ReplicationMaxActiveWorkers,
|
||||
ReplicationMaxQueuedBytes,
|
||||
ReplicationMaxQueuedCount,
|
||||
ReplicationMaxDataTransferRate,
|
||||
ReplicationRecentBacklogCount,
|
||||
|
||||
// Scanner-related metrics
|
||||
ScannerBucketScansFinished,
|
||||
ScannerBucketScansStarted,
|
||||
ScannerDirectoriesScanned,
|
||||
ScannerObjectsScanned,
|
||||
ScannerVersionsScanned,
|
||||
ScannerLastActivitySeconds,
|
||||
|
||||
// CPU system-related metrics
|
||||
SysCPUAvgIdle,
|
||||
SysCPUAvgIOWait,
|
||||
SysCPULoad,
|
||||
SysCPULoadPerc,
|
||||
SysCPUNice,
|
||||
SysCPUSteal,
|
||||
SysCPUSystem,
|
||||
SysCPUUser,
|
||||
|
||||
// Drive-related metrics
|
||||
DriveUsedBytes,
|
||||
DriveFreeBytes,
|
||||
DriveTotalBytes,
|
||||
DriveUsedInodes,
|
||||
DriveFreeInodes,
|
||||
DriveTotalInodes,
|
||||
DriveTimeoutErrorsTotal,
|
||||
DriveIOErrorsTotal,
|
||||
DriveAvailabilityErrorsTotal,
|
||||
DriveWaitingIO,
|
||||
DriveAPILatencyMicros,
|
||||
DriveHealth,
|
||||
|
||||
DriveOfflineCount,
|
||||
DriveOnlineCount,
|
||||
DriveCount,
|
||||
|
||||
// iostat related metrics
|
||||
DriveReadsPerSec,
|
||||
DriveReadsKBPerSec,
|
||||
DriveReadsAwait,
|
||||
DriveWritesPerSec,
|
||||
DriveWritesKBPerSec,
|
||||
DriveWritesAwait,
|
||||
DrivePercUtil,
|
||||
|
||||
// Memory-related metrics
|
||||
MemTotal,
|
||||
MemUsed,
|
||||
MemUsedPerc,
|
||||
MemFree,
|
||||
MemBuffers,
|
||||
MemCache,
|
||||
MemShared,
|
||||
MemAvailable,
|
||||
|
||||
// Network-related metrics
|
||||
InternodeErrorsTotal,
|
||||
InternodeDialErrorsTotal,
|
||||
InternodeDialAvgTimeNanos,
|
||||
InternodeSentBytesTotal,
|
||||
InternodeRecvBytesTotal,
|
||||
|
||||
// Process-related metrics
|
||||
ProcessLocksReadTotal,
|
||||
ProcessLocksWriteTotal,
|
||||
ProcessCPUTotalSeconds,
|
||||
ProcessGoRoutineTotal,
|
||||
ProcessIORCharBytes,
|
||||
ProcessIOReadBytes,
|
||||
ProcessIOWCharBytes,
|
||||
ProcessIOWriteBytes,
|
||||
ProcessStartTimeSeconds,
|
||||
ProcessUptimeSeconds,
|
||||
ProcessFileDescriptorLimitTotal,
|
||||
ProcessFileDescriptorOpenTotal,
|
||||
ProcessSyscallReadTotal,
|
||||
ProcessSyscallWriteTotal,
|
||||
ProcessResidentMemoryBytes,
|
||||
ProcessVirtualMemoryBytes,
|
||||
ProcessVirtualMemoryMaxBytes,
|
||||
|
||||
// Custom metrics
|
||||
Custom(String),
|
||||
}
|
||||
|
||||
impl MetricName {
|
||||
#[allow(dead_code)]
|
||||
pub fn as_str(&self) -> String {
|
||||
match self {
|
||||
Self::AuthTotal => "auth_total".to_string(),
|
||||
Self::CanceledTotal => "canceled_total".to_string(),
|
||||
Self::ErrorsTotal => "errors_total".to_string(),
|
||||
Self::HeaderTotal => "header_total".to_string(),
|
||||
Self::HealTotal => "heal_total".to_string(),
|
||||
Self::HitsTotal => "hits_total".to_string(),
|
||||
Self::InflightTotal => "inflight_total".to_string(),
|
||||
Self::InvalidTotal => "invalid_total".to_string(),
|
||||
Self::LimitTotal => "limit_total".to_string(),
|
||||
Self::MissedTotal => "missed_total".to_string(),
|
||||
Self::WaitingTotal => "waiting_total".to_string(),
|
||||
Self::IncomingTotal => "incoming_total".to_string(),
|
||||
Self::ObjectTotal => "object_total".to_string(),
|
||||
Self::VersionTotal => "version_total".to_string(),
|
||||
Self::DeleteMarkerTotal => "deletemarker_total".to_string(),
|
||||
Self::OfflineTotal => "offline_total".to_string(),
|
||||
Self::OnlineTotal => "online_total".to_string(),
|
||||
Self::OpenTotal => "open_total".to_string(),
|
||||
Self::ReadTotal => "read_total".to_string(),
|
||||
Self::TimestampTotal => "timestamp_total".to_string(),
|
||||
Self::WriteTotal => "write_total".to_string(),
|
||||
Self::Total => "total".to_string(),
|
||||
Self::FreeInodes => "free_inodes".to_string(),
|
||||
|
||||
Self::LastMinFailedCount => "last_minute_failed_count".to_string(),
|
||||
Self::LastMinFailedBytes => "last_minute_failed_bytes".to_string(),
|
||||
Self::LastHourFailedCount => "last_hour_failed_count".to_string(),
|
||||
Self::LastHourFailedBytes => "last_hour_failed_bytes".to_string(),
|
||||
Self::TotalFailedCount => "total_failed_count".to_string(),
|
||||
Self::TotalFailedBytes => "total_failed_bytes".to_string(),
|
||||
|
||||
Self::CurrActiveWorkers => "current_active_workers".to_string(),
|
||||
Self::AvgActiveWorkers => "average_active_workers".to_string(),
|
||||
Self::MaxActiveWorkers => "max_active_workers".to_string(),
|
||||
Self::RecentBacklogCount => "recent_backlog_count".to_string(),
|
||||
Self::CurrInQueueCount => "last_minute_queued_count".to_string(),
|
||||
Self::CurrInQueueBytes => "last_minute_queued_bytes".to_string(),
|
||||
Self::ReceivedCount => "received_count".to_string(),
|
||||
Self::SentCount => "sent_count".to_string(),
|
||||
Self::CurrTransferRate => "current_transfer_rate".to_string(),
|
||||
Self::AvgTransferRate => "average_transfer_rate".to_string(),
|
||||
Self::MaxTransferRate => "max_transfer_rate".to_string(),
|
||||
Self::CredentialErrors => "credential_errors".to_string(),
|
||||
|
||||
Self::CurrLinkLatency => "current_link_latency_ms".to_string(),
|
||||
Self::AvgLinkLatency => "average_link_latency_ms".to_string(),
|
||||
Self::MaxLinkLatency => "max_link_latency_ms".to_string(),
|
||||
|
||||
Self::LinkOnline => "link_online".to_string(),
|
||||
Self::LinkOfflineDuration => "link_offline_duration_seconds".to_string(),
|
||||
Self::LinkDowntimeTotalDuration => "link_downtime_duration_seconds".to_string(),
|
||||
|
||||
Self::AvgInQueueCount => "average_queued_count".to_string(),
|
||||
Self::AvgInQueueBytes => "average_queued_bytes".to_string(),
|
||||
Self::MaxInQueueCount => "max_queued_count".to_string(),
|
||||
Self::MaxInQueueBytes => "max_queued_bytes".to_string(),
|
||||
|
||||
Self::ProxiedGetRequestsTotal => "proxied_get_requests_total".to_string(),
|
||||
Self::ProxiedHeadRequestsTotal => "proxied_head_requests_total".to_string(),
|
||||
Self::ProxiedPutTaggingRequestsTotal => "proxied_put_tagging_requests_total".to_string(),
|
||||
Self::ProxiedGetTaggingRequestsTotal => "proxied_get_tagging_requests_total".to_string(),
|
||||
Self::ProxiedDeleteTaggingRequestsTotal => "proxied_delete_tagging_requests_total".to_string(),
|
||||
Self::ProxiedGetRequestsFailures => "proxied_get_requests_failures".to_string(),
|
||||
Self::ProxiedHeadRequestsFailures => "proxied_head_requests_failures".to_string(),
|
||||
Self::ProxiedPutTaggingRequestFailures => "proxied_put_tagging_requests_failures".to_string(),
|
||||
Self::ProxiedGetTaggingRequestFailures => "proxied_get_tagging_requests_failures".to_string(),
|
||||
Self::ProxiedDeleteTaggingRequestFailures => "proxied_delete_tagging_requests_failures".to_string(),
|
||||
|
||||
Self::FreeBytes => "free_bytes".to_string(),
|
||||
Self::ReadBytes => "read_bytes".to_string(),
|
||||
Self::RcharBytes => "rchar_bytes".to_string(),
|
||||
Self::ReceivedBytes => "received_bytes".to_string(),
|
||||
Self::LatencyMilliSec => "latency_ms".to_string(),
|
||||
Self::SentBytes => "sent_bytes".to_string(),
|
||||
Self::TotalBytes => "total_bytes".to_string(),
|
||||
Self::UsedBytes => "used_bytes".to_string(),
|
||||
Self::WriteBytes => "write_bytes".to_string(),
|
||||
Self::WcharBytes => "wchar_bytes".to_string(),
|
||||
|
||||
Self::LatencyMicroSec => "latency_us".to_string(),
|
||||
Self::LatencyNanoSec => "latency_ns".to_string(),
|
||||
|
||||
Self::CommitInfo => "commit_info".to_string(),
|
||||
Self::UsageInfo => "usage_info".to_string(),
|
||||
Self::VersionInfo => "version_info".to_string(),
|
||||
|
||||
Self::SizeDistribution => "size_distribution".to_string(),
|
||||
Self::VersionDistribution => "version_distribution".to_string(),
|
||||
Self::TtfbDistribution => "seconds_distribution".to_string(),
|
||||
Self::TtlbDistribution => "ttlb_seconds_distribution".to_string(),
|
||||
|
||||
Self::LastActivityTime => "last_activity_nano_seconds".to_string(),
|
||||
Self::StartTime => "starttime_seconds".to_string(),
|
||||
Self::UpTime => "uptime_seconds".to_string(),
|
||||
Self::Memory => "resident_memory_bytes".to_string(),
|
||||
Self::Vmemory => "virtual_memory_bytes".to_string(),
|
||||
Self::Cpu => "cpu_total_seconds".to_string(),
|
||||
|
||||
Self::ExpiryMissedTasks => "expiry_missed_tasks".to_string(),
|
||||
Self::ExpiryMissedFreeVersions => "expiry_missed_freeversions".to_string(),
|
||||
Self::ExpiryMissedTierJournalTasks => "expiry_missed_tierjournal_tasks".to_string(),
|
||||
Self::ExpiryNumWorkers => "expiry_num_workers".to_string(),
|
||||
Self::TransitionMissedTasks => "transition_missed_immediate_tasks".to_string(),
|
||||
|
||||
Self::TransitionedBytes => "transitioned_bytes".to_string(),
|
||||
Self::TransitionedObjects => "transitioned_objects".to_string(),
|
||||
Self::TransitionedVersions => "transitioned_versions".to_string(),
|
||||
|
||||
Self::TierRequestsSuccess => "requests_success".to_string(),
|
||||
Self::TierRequestsFailure => "requests_failure".to_string(),
|
||||
|
||||
Self::KmsOnline => "online".to_string(),
|
||||
Self::KmsRequestsSuccess => "request_success".to_string(),
|
||||
Self::KmsRequestsError => "request_error".to_string(),
|
||||
Self::KmsRequestsFail => "request_failure".to_string(),
|
||||
Self::KmsUptime => "uptime".to_string(),
|
||||
|
||||
Self::WebhookOnline => "online".to_string(),
|
||||
|
||||
Self::ApiRejectedAuthTotal => "rejected_auth_total".to_string(),
|
||||
Self::ApiRejectedHeaderTotal => "rejected_header_total".to_string(),
|
||||
Self::ApiRejectedTimestampTotal => "rejected_timestamp_total".to_string(),
|
||||
Self::ApiRejectedInvalidTotal => "rejected_invalid_total".to_string(),
|
||||
|
||||
Self::ApiRequestsWaitingTotal => "waiting_total".to_string(),
|
||||
Self::ApiRequestsIncomingTotal => "incoming_total".to_string(),
|
||||
Self::ApiRequestsInFlightTotal => "inflight_total".to_string(),
|
||||
Self::ApiRequestsTotal => "total".to_string(),
|
||||
Self::ApiRequestsErrorsTotal => "errors_total".to_string(),
|
||||
Self::ApiRequests5xxErrorsTotal => "5xx_errors_total".to_string(),
|
||||
Self::ApiRequests4xxErrorsTotal => "4xx_errors_total".to_string(),
|
||||
Self::ApiRequestsCanceledTotal => "canceled_total".to_string(),
|
||||
|
||||
Self::ApiRequestsTTFBSecondsDistribution => "ttfb_seconds_distribution".to_string(),
|
||||
|
||||
Self::ApiTrafficSentBytes => "traffic_sent_bytes".to_string(),
|
||||
Self::ApiTrafficRecvBytes => "traffic_received_bytes".to_string(),
|
||||
|
||||
Self::AuditFailedMessages => "failed_messages".to_string(),
|
||||
Self::AuditTargetQueueLength => "target_queue_length".to_string(),
|
||||
Self::AuditTotalMessages => "total_messages".to_string(),
|
||||
|
||||
// metrics related to cluster configurations
|
||||
Self::ConfigRRSParity => "rrs_parity".to_string(),
|
||||
Self::ConfigStandardParity => "standard_parity".to_string(),
|
||||
|
||||
// Erasure coding set related metrics
|
||||
Self::ErasureSetOverallWriteQuorum => "overall_write_quorum".to_string(),
|
||||
Self::ErasureSetOverallHealth => "overall_health".to_string(),
|
||||
Self::ErasureSetReadQuorum => "read_quorum".to_string(),
|
||||
Self::ErasureSetWriteQuorum => "write_quorum".to_string(),
|
||||
Self::ErasureSetOnlineDrivesCount => "online_drives_count".to_string(),
|
||||
Self::ErasureSetHealingDrivesCount => "healing_drives_count".to_string(),
|
||||
Self::ErasureSetHealth => "health".to_string(),
|
||||
Self::ErasureSetReadTolerance => "read_tolerance".to_string(),
|
||||
Self::ErasureSetWriteTolerance => "write_tolerance".to_string(),
|
||||
Self::ErasureSetReadHealth => "read_health".to_string(),
|
||||
Self::ErasureSetWriteHealth => "write_health".to_string(),
|
||||
|
||||
// Cluster health-related metrics
|
||||
Self::HealthDrivesOfflineCount => "drives_offline_count".to_string(),
|
||||
Self::HealthDrivesOnlineCount => "drives_online_count".to_string(),
|
||||
Self::HealthDrivesCount => "drives_count".to_string(),
|
||||
|
||||
// IAM-related metrics
|
||||
Self::LastSyncDurationMillis => "last_sync_duration_millis".to_string(),
|
||||
Self::PluginAuthnServiceFailedRequestsMinute => "plugin_authn_service_failed_requests_minute".to_string(),
|
||||
Self::PluginAuthnServiceLastFailSeconds => "plugin_authn_service_last_fail_seconds".to_string(),
|
||||
Self::PluginAuthnServiceLastSuccSeconds => "plugin_authn_service_last_succ_seconds".to_string(),
|
||||
Self::PluginAuthnServiceSuccAvgRttMsMinute => "plugin_authn_service_succ_avg_rtt_ms_minute".to_string(),
|
||||
Self::PluginAuthnServiceSuccMaxRttMsMinute => "plugin_authn_service_succ_max_rtt_ms_minute".to_string(),
|
||||
Self::PluginAuthnServiceTotalRequestsMinute => "plugin_authn_service_total_requests_minute".to_string(),
|
||||
Self::SinceLastSyncMillis => "since_last_sync_millis".to_string(),
|
||||
Self::SyncFailures => "sync_failures".to_string(),
|
||||
Self::SyncSuccesses => "sync_successes".to_string(),
|
||||
|
||||
// Notify relevant metrics
|
||||
Self::NotificationCurrentSendInProgress => "current_send_in_progress".to_string(),
|
||||
Self::NotificationEventsErrorsTotal => "events_errors_total".to_string(),
|
||||
Self::NotificationEventsSentTotal => "events_sent_total".to_string(),
|
||||
Self::NotificationEventsSkippedTotal => "events_skipped_total".to_string(),
|
||||
|
||||
// Metrics related to the usage of cluster objects
|
||||
Self::UsageSinceLastUpdateSeconds => "since_last_update_seconds".to_string(),
|
||||
Self::UsageTotalBytes => "total_bytes".to_string(),
|
||||
Self::UsageObjectsCount => "count".to_string(),
|
||||
Self::UsageVersionsCount => "versions_count".to_string(),
|
||||
Self::UsageDeleteMarkersCount => "delete_markers_count".to_string(),
|
||||
Self::UsageBucketsCount => "buckets_count".to_string(),
|
||||
Self::UsageSizeDistribution => "size_distribution".to_string(),
|
||||
Self::UsageVersionCountDistribution => "version_count_distribution".to_string(),
|
||||
|
||||
// Metrics related to bucket usage
|
||||
Self::UsageBucketQuotaTotalBytes => "quota_total_bytes".to_string(),
|
||||
Self::UsageBucketTotalBytes => "total_bytes".to_string(),
|
||||
Self::UsageBucketObjectsCount => "objects_count".to_string(),
|
||||
Self::UsageBucketVersionsCount => "versions_count".to_string(),
|
||||
Self::UsageBucketDeleteMarkersCount => "delete_markers_count".to_string(),
|
||||
Self::UsageBucketObjectSizeDistribution => "object_size_distribution".to_string(),
|
||||
Self::UsageBucketObjectVersionCountDistribution => "object_version_count_distribution".to_string(),
|
||||
|
||||
// ILM-related metrics
|
||||
Self::IlmExpiryPendingTasks => "expiry_pending_tasks".to_string(),
|
||||
Self::IlmTransitionActiveTasks => "transition_active_tasks".to_string(),
|
||||
Self::IlmTransitionPendingTasks => "transition_pending_tasks".to_string(),
|
||||
Self::IlmTransitionMissedImmediateTasks => "transition_missed_immediate_tasks".to_string(),
|
||||
Self::IlmVersionsScanned => "versions_scanned".to_string(),
|
||||
|
||||
// Webhook logs
|
||||
Self::WebhookQueueLength => "queue_length".to_string(),
|
||||
Self::WebhookTotalMessages => "total_messages".to_string(),
|
||||
Self::WebhookFailedMessages => "failed_messages".to_string(),
|
||||
|
||||
// Copy the relevant metrics
|
||||
Self::ReplicationAverageActiveWorkers => "average_active_workers".to_string(),
|
||||
Self::ReplicationAverageQueuedBytes => "average_queued_bytes".to_string(),
|
||||
Self::ReplicationAverageQueuedCount => "average_queued_count".to_string(),
|
||||
Self::ReplicationAverageDataTransferRate => "average_data_transfer_rate".to_string(),
|
||||
Self::ReplicationCurrentActiveWorkers => "current_active_workers".to_string(),
|
||||
Self::ReplicationCurrentDataTransferRate => "current_data_transfer_rate".to_string(),
|
||||
Self::ReplicationLastMinuteQueuedBytes => "last_minute_queued_bytes".to_string(),
|
||||
Self::ReplicationLastMinuteQueuedCount => "last_minute_queued_count".to_string(),
|
||||
Self::ReplicationMaxActiveWorkers => "max_active_workers".to_string(),
|
||||
Self::ReplicationMaxQueuedBytes => "max_queued_bytes".to_string(),
|
||||
Self::ReplicationMaxQueuedCount => "max_queued_count".to_string(),
|
||||
Self::ReplicationMaxDataTransferRate => "max_data_transfer_rate".to_string(),
|
||||
Self::ReplicationRecentBacklogCount => "recent_backlog_count".to_string(),
|
||||
|
||||
// Scanner-related metrics
|
||||
Self::ScannerBucketScansFinished => "bucket_scans_finished".to_string(),
|
||||
Self::ScannerBucketScansStarted => "bucket_scans_started".to_string(),
|
||||
Self::ScannerDirectoriesScanned => "directories_scanned".to_string(),
|
||||
Self::ScannerObjectsScanned => "objects_scanned".to_string(),
|
||||
Self::ScannerVersionsScanned => "versions_scanned".to_string(),
|
||||
Self::ScannerLastActivitySeconds => "last_activity_seconds".to_string(),
|
||||
|
||||
// CPU system-related metrics
|
||||
Self::SysCPUAvgIdle => "avg_idle".to_string(),
|
||||
Self::SysCPUAvgIOWait => "avg_iowait".to_string(),
|
||||
Self::SysCPULoad => "load".to_string(),
|
||||
Self::SysCPULoadPerc => "load_perc".to_string(),
|
||||
Self::SysCPUNice => "nice".to_string(),
|
||||
Self::SysCPUSteal => "steal".to_string(),
|
||||
Self::SysCPUSystem => "system".to_string(),
|
||||
Self::SysCPUUser => "user".to_string(),
|
||||
|
||||
// Drive-related metrics
|
||||
Self::DriveUsedBytes => "used_bytes".to_string(),
|
||||
Self::DriveFreeBytes => "free_bytes".to_string(),
|
||||
Self::DriveTotalBytes => "total_bytes".to_string(),
|
||||
Self::DriveUsedInodes => "used_inodes".to_string(),
|
||||
Self::DriveFreeInodes => "free_inodes".to_string(),
|
||||
Self::DriveTotalInodes => "total_inodes".to_string(),
|
||||
Self::DriveTimeoutErrorsTotal => "timeout_errors_total".to_string(),
|
||||
Self::DriveIOErrorsTotal => "io_errors_total".to_string(),
|
||||
Self::DriveAvailabilityErrorsTotal => "availability_errors_total".to_string(),
|
||||
Self::DriveWaitingIO => "waiting_io".to_string(),
|
||||
Self::DriveAPILatencyMicros => "api_latency_micros".to_string(),
|
||||
Self::DriveHealth => "health".to_string(),
|
||||
|
||||
Self::DriveOfflineCount => "offline_count".to_string(),
|
||||
Self::DriveOnlineCount => "online_count".to_string(),
|
||||
Self::DriveCount => "count".to_string(),
|
||||
|
||||
// iostat related metrics
|
||||
Self::DriveReadsPerSec => "reads_per_sec".to_string(),
|
||||
Self::DriveReadsKBPerSec => "reads_kb_per_sec".to_string(),
|
||||
Self::DriveReadsAwait => "reads_await".to_string(),
|
||||
Self::DriveWritesPerSec => "writes_per_sec".to_string(),
|
||||
Self::DriveWritesKBPerSec => "writes_kb_per_sec".to_string(),
|
||||
Self::DriveWritesAwait => "writes_await".to_string(),
|
||||
Self::DrivePercUtil => "perc_util".to_string(),
|
||||
|
||||
// Memory-related metrics
|
||||
Self::MemTotal => "total".to_string(),
|
||||
Self::MemUsed => "used".to_string(),
|
||||
Self::MemUsedPerc => "used_perc".to_string(),
|
||||
Self::MemFree => "free".to_string(),
|
||||
Self::MemBuffers => "buffers".to_string(),
|
||||
Self::MemCache => "cache".to_string(),
|
||||
Self::MemShared => "shared".to_string(),
|
||||
Self::MemAvailable => "available".to_string(),
|
||||
|
||||
// Network-related metrics
|
||||
Self::InternodeErrorsTotal => "errors_total".to_string(),
|
||||
Self::InternodeDialErrorsTotal => "dial_errors_total".to_string(),
|
||||
Self::InternodeDialAvgTimeNanos => "dial_avg_time_nanos".to_string(),
|
||||
Self::InternodeSentBytesTotal => "sent_bytes_total".to_string(),
|
||||
Self::InternodeRecvBytesTotal => "recv_bytes_total".to_string(),
|
||||
|
||||
// Process-related metrics
|
||||
Self::ProcessLocksReadTotal => "locks_read_total".to_string(),
|
||||
Self::ProcessLocksWriteTotal => "locks_write_total".to_string(),
|
||||
Self::ProcessCPUTotalSeconds => "cpu_total_seconds".to_string(),
|
||||
Self::ProcessGoRoutineTotal => "go_routine_total".to_string(),
|
||||
Self::ProcessIORCharBytes => "io_rchar_bytes".to_string(),
|
||||
Self::ProcessIOReadBytes => "io_read_bytes".to_string(),
|
||||
Self::ProcessIOWCharBytes => "io_wchar_bytes".to_string(),
|
||||
Self::ProcessIOWriteBytes => "io_write_bytes".to_string(),
|
||||
Self::ProcessStartTimeSeconds => "start_time_seconds".to_string(),
|
||||
Self::ProcessUptimeSeconds => "uptime_seconds".to_string(),
|
||||
Self::ProcessFileDescriptorLimitTotal => "file_descriptor_limit_total".to_string(),
|
||||
Self::ProcessFileDescriptorOpenTotal => "file_descriptor_open_total".to_string(),
|
||||
Self::ProcessSyscallReadTotal => "syscall_read_total".to_string(),
|
||||
Self::ProcessSyscallWriteTotal => "syscall_write_total".to_string(),
|
||||
Self::ProcessResidentMemoryBytes => "resident_memory_bytes".to_string(),
|
||||
Self::ProcessVirtualMemoryBytes => "virtual_memory_bytes".to_string(),
|
||||
Self::ProcessVirtualMemoryMaxBytes => "virtual_memory_max_bytes".to_string(),
|
||||
|
||||
Self::Custom(name) => name.clone(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<String> for MetricName {
|
||||
fn from(s: String) -> Self {
|
||||
Self::Custom(s)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<&str> for MetricName {
|
||||
fn from(s: &str) -> Self {
|
||||
Self::Custom(s.to_string())
|
||||
}
|
||||
}
|
||||
31
crates/obs/src/metrics/entry/metric_type.rs
Normal file
31
crates/obs/src/metrics/entry/metric_type.rs
Normal file
@@ -0,0 +1,31 @@
|
||||
/// MetricType - Indicates the type of indicator
|
||||
#[allow(dead_code)]
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum MetricType {
|
||||
Counter,
|
||||
Gauge,
|
||||
Histogram,
|
||||
}
|
||||
|
||||
impl MetricType {
|
||||
/// convert the metric type to a string representation
|
||||
#[allow(dead_code)]
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
Self::Counter => "counter",
|
||||
Self::Gauge => "gauge",
|
||||
Self::Histogram => "histogram",
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert the metric type to the Prometheus value type
|
||||
/// In a Rust implementation, this might return the corresponding Prometheus Rust client type
|
||||
#[allow(dead_code)]
|
||||
pub fn as_prom(&self) -> &'static str {
|
||||
match self {
|
||||
Self::Counter => "counter.",
|
||||
Self::Gauge => "gauge.",
|
||||
Self::Histogram => "histogram.", // Histograms still use the counter value in Prometheus
|
||||
}
|
||||
}
|
||||
}
|
||||
115
crates/obs/src/metrics/entry/mod.rs
Normal file
115
crates/obs/src/metrics/entry/mod.rs
Normal file
@@ -0,0 +1,115 @@
|
||||
use crate::metrics::{MetricDescriptor, MetricName, MetricNamespace, MetricSubsystem, MetricType};
|
||||
|
||||
pub(crate) mod descriptor;
|
||||
pub(crate) mod metric_name;
|
||||
pub(crate) mod metric_type;
|
||||
pub(crate) mod namespace;
|
||||
mod path_utils;
|
||||
pub(crate) mod subsystem;
|
||||
|
||||
/// Create a new counter metric descriptor
|
||||
pub fn new_counter_md(
|
||||
name: impl Into<MetricName>,
|
||||
help: impl Into<String>,
|
||||
labels: &[&str],
|
||||
subsystem: impl Into<MetricSubsystem>,
|
||||
) -> MetricDescriptor {
|
||||
MetricDescriptor::new(
|
||||
name.into(),
|
||||
MetricType::Counter,
|
||||
help.into(),
|
||||
labels.iter().map(|&s| s.to_string()).collect(),
|
||||
MetricNamespace::RustFS,
|
||||
subsystem,
|
||||
)
|
||||
}
|
||||
|
||||
/// create a new dashboard metric descriptor
|
||||
pub fn new_gauge_md(
|
||||
name: impl Into<MetricName>,
|
||||
help: impl Into<String>,
|
||||
labels: &[&str],
|
||||
subsystem: impl Into<MetricSubsystem>,
|
||||
) -> MetricDescriptor {
|
||||
MetricDescriptor::new(
|
||||
name.into(),
|
||||
MetricType::Gauge,
|
||||
help.into(),
|
||||
labels.iter().map(|&s| s.to_string()).collect(),
|
||||
MetricNamespace::RustFS,
|
||||
subsystem,
|
||||
)
|
||||
}
|
||||
|
||||
/// create a new histogram indicator descriptor
|
||||
#[allow(dead_code)]
|
||||
pub fn new_histogram_md(
|
||||
name: impl Into<MetricName>,
|
||||
help: impl Into<String>,
|
||||
labels: &[&str],
|
||||
subsystem: impl Into<MetricSubsystem>,
|
||||
) -> MetricDescriptor {
|
||||
MetricDescriptor::new(
|
||||
name.into(),
|
||||
MetricType::Histogram,
|
||||
help.into(),
|
||||
labels.iter().map(|&s| s.to_string()).collect(),
|
||||
MetricNamespace::RustFS,
|
||||
subsystem,
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::metrics::subsystems;
|
||||
|
||||
#[test]
|
||||
fn test_new_histogram_md() {
|
||||
// create a histogram indicator descriptor
|
||||
let histogram_md = new_histogram_md(
|
||||
MetricName::TtfbDistribution,
|
||||
"test the response time distribution",
|
||||
&["api", "method", "le"],
|
||||
subsystems::API_REQUESTS,
|
||||
);
|
||||
|
||||
// verify that the metric type is correct
|
||||
assert_eq!(histogram_md.metric_type, MetricType::Histogram);
|
||||
|
||||
// verify that the metric name is correct
|
||||
assert_eq!(histogram_md.name.as_str(), "seconds_distribution");
|
||||
|
||||
// verify that the help information is correct
|
||||
assert_eq!(histogram_md.help, "test the response time distribution");
|
||||
|
||||
// Verify that the label is correct
|
||||
assert_eq!(histogram_md.variable_labels.len(), 3);
|
||||
assert!(histogram_md.variable_labels.contains(&"api".to_string()));
|
||||
assert!(histogram_md.variable_labels.contains(&"method".to_string()));
|
||||
assert!(histogram_md.variable_labels.contains(&"le".to_string()));
|
||||
|
||||
// Verify that the namespace is correct
|
||||
assert_eq!(histogram_md.namespace, MetricNamespace::RustFS);
|
||||
|
||||
// Verify that the subsystem is correct
|
||||
assert_eq!(histogram_md.subsystem, MetricSubsystem::ApiRequests);
|
||||
|
||||
// Verify that the full metric name generated is formatted correctly
|
||||
assert_eq!(histogram_md.get_full_metric_name(), "histogram.rustfs_api_requests_seconds_distribution");
|
||||
|
||||
// Tests use custom subsystems
|
||||
let custom_histogram_md = new_histogram_md(
|
||||
"custom_latency_distribution",
|
||||
"custom latency distribution",
|
||||
&["endpoint", "le"],
|
||||
MetricSubsystem::new("/custom/path-metrics"),
|
||||
);
|
||||
|
||||
// Verify the custom name and subsystem
|
||||
assert_eq!(
|
||||
custom_histogram_md.get_full_metric_name(),
|
||||
"histogram.rustfs_custom_path_metrics_custom_latency_distribution"
|
||||
);
|
||||
}
|
||||
}
|
||||
14
crates/obs/src/metrics/entry/namespace.rs
Normal file
14
crates/obs/src/metrics/entry/namespace.rs
Normal file
@@ -0,0 +1,14 @@
|
||||
/// The metric namespace, which represents the top-level grouping of the metric
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum MetricNamespace {
|
||||
RustFS,
|
||||
}
|
||||
|
||||
impl MetricNamespace {
|
||||
#[allow(dead_code)]
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
Self::RustFS => "rustfs",
|
||||
}
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user