Compare commits
29 Commits
1.0.0-alph
...
1.0.0-alph
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
c2d782bed1 | ||
|
|
e00f5be746 | ||
|
|
e23297f695 | ||
|
|
d6840a6e04 | ||
|
|
3557a52dc4 | ||
|
|
fd2aab2bd9 | ||
|
|
f1c50fcb74 | ||
|
|
bdcba3460e | ||
|
|
8857f31b07 | ||
|
|
5b85bf7a00 | ||
|
|
46bd75c0f8 | ||
|
|
5fc5dd0fd9 | ||
|
|
adc07e5209 | ||
|
|
357cced49c | ||
|
|
a104c33974 | ||
|
|
516e00f15f | ||
|
|
a64c3c28b8 | ||
|
|
e9c9a2d1f2 | ||
|
|
3ebab98d2d | ||
|
|
10c949af62 | ||
|
|
4a3325276d | ||
|
|
c5f6c66f72 | ||
|
|
c7c149975b | ||
|
|
d552210b59 | ||
|
|
581607da6a | ||
|
|
e95107f7d6 | ||
|
|
a693cb52f3 | ||
|
|
2c7366038e | ||
|
|
1cc6dfde87 |
58
.copilot-rules.md
Normal file
@@ -0,0 +1,58 @@
|
||||
# GitHub Copilot Rules for RustFS Project
|
||||
|
||||
## Core Rules Reference
|
||||
|
||||
This project follows the comprehensive AI coding rules defined in `.rules.md`. Please refer to that file for the complete set of development guidelines, coding standards, and best practices.
|
||||
|
||||
## Copilot-Specific Configuration
|
||||
|
||||
When using GitHub Copilot for this project, ensure you:
|
||||
|
||||
1. **Review the unified rules**: Always check `.rules.md` for the latest project guidelines
|
||||
2. **Follow branch protection**: Never attempt to commit directly to main/master branch
|
||||
3. **Use English**: All code comments, documentation, and variable names must be in English
|
||||
4. **Clean code practices**: Only make modifications you're confident about
|
||||
5. **Test thoroughly**: Ensure all changes pass formatting, linting, and testing requirements
|
||||
|
||||
## Quick Reference
|
||||
|
||||
### Critical Rules
|
||||
- 🚫 **NEVER commit directly to main/master branch**
|
||||
- ✅ **ALWAYS work on feature branches**
|
||||
- 📝 **ALWAYS use English for code and documentation**
|
||||
- 🧹 **ALWAYS clean up temporary files after use**
|
||||
- 🎯 **ONLY make confident, necessary modifications**
|
||||
|
||||
### Pre-commit Checklist
|
||||
```bash
|
||||
# Before committing, always run:
|
||||
cargo fmt --all
|
||||
cargo clippy --all-targets --all-features -- -D warnings
|
||||
cargo check --all-targets
|
||||
cargo test
|
||||
```
|
||||
|
||||
### Branch Workflow
|
||||
```bash
|
||||
git checkout main
|
||||
git pull origin main
|
||||
git checkout -b feat/your-feature-name
|
||||
# Make your changes
|
||||
git add .
|
||||
git commit -m "feat: your feature description"
|
||||
git push origin feat/your-feature-name
|
||||
gh pr create
|
||||
```
|
||||
|
||||
## Important Notes
|
||||
|
||||
- This file serves as an entry point for GitHub Copilot
|
||||
- All detailed rules and guidelines are maintained in `.rules.md`
|
||||
- Updates to coding standards should be made in `.rules.md` to ensure consistency across all AI tools
|
||||
- When in doubt, always refer to `.rules.md` for authoritative guidance
|
||||
|
||||
## See Also
|
||||
|
||||
- [.rules.md](./.rules.md) - Complete AI coding rules and guidelines
|
||||
- [CONTRIBUTING.md](./CONTRIBUTING.md) - Contribution guidelines
|
||||
- [README.md](./README.md) - Project overview and setup instructions
|
||||
3
.github/workflows/audit.yml
vendored
@@ -31,6 +31,9 @@ on:
|
||||
- cron: '0 0 * * 0' # Weekly on Sunday at midnight UTC
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
|
||||
3
.github/workflows/build.yml
vendored
@@ -70,6 +70,9 @@ on:
|
||||
default: true
|
||||
type: boolean
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
RUST_BACKTRACE: 1
|
||||
|
||||
3
.github/workflows/ci.yml
vendored
@@ -59,6 +59,9 @@ on:
|
||||
- cron: "0 0 * * 0" # Weekly on Sunday at midnight UTC
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
RUST_BACKTRACE: 1
|
||||
|
||||
24
.github/workflows/docker.yml
vendored
@@ -58,6 +58,10 @@ on:
|
||||
type: boolean
|
||||
|
||||
env:
|
||||
CONCLUSION: ${{ github.event.workflow_run.conclusion }}
|
||||
HEAD_BRANCH: ${{ github.event.workflow_run.head_branch }}
|
||||
HEAD_SHA: ${{ github.event.workflow_run.head_sha }}
|
||||
TRIGGERING_EVENT: ${{ github.event.workflow_run.event }}
|
||||
DOCKERHUB_USERNAME: rustfs
|
||||
CARGO_TERM_COLOR: always
|
||||
REGISTRY_DOCKERHUB: rustfs/rustfs
|
||||
@@ -102,27 +106,27 @@ jobs:
|
||||
|
||||
# Check if the triggering workflow was successful
|
||||
# If the workflow succeeded, it means ALL builds (including Linux x86_64 and aarch64) succeeded
|
||||
if [[ "${{ github.event.workflow_run.conclusion }}" == "success" ]]; then
|
||||
if [[ "$CONCLUSION" == "success" ]]; then
|
||||
echo "✅ Build workflow succeeded, all builds including Linux are successful"
|
||||
should_build=true
|
||||
should_push=true
|
||||
else
|
||||
echo "❌ Build workflow failed (conclusion: ${{ github.event.workflow_run.conclusion }}), skipping Docker build"
|
||||
echo "❌ Build workflow failed (conclusion: $CONCLUSION), skipping Docker build"
|
||||
should_build=false
|
||||
fi
|
||||
|
||||
# Extract version info from commit message or use commit SHA
|
||||
# Use Git to generate consistent short SHA (ensures uniqueness like build.yml)
|
||||
short_sha=$(git rev-parse --short "${{ github.event.workflow_run.head_sha }}")
|
||||
short_sha=$(git rev-parse --short "$HEAD_SHA")
|
||||
|
||||
# Determine build type based on triggering workflow event and ref
|
||||
triggering_event="${{ github.event.workflow_run.event }}"
|
||||
head_branch="${{ github.event.workflow_run.head_branch }}"
|
||||
triggering_event="$TRIGGERING_EVENT"
|
||||
head_branch="$HEAD_BRANCH"
|
||||
|
||||
echo "🔍 Analyzing triggering workflow:"
|
||||
echo " 📋 Event: $triggering_event"
|
||||
echo " 🌿 Head branch: $head_branch"
|
||||
echo " 📎 Head SHA: ${{ github.event.workflow_run.head_sha }}"
|
||||
echo " 📎 Head SHA: $HEAD_SHA"
|
||||
|
||||
# Check if this was triggered by a tag push
|
||||
if [[ "$triggering_event" == "push" ]]; then
|
||||
@@ -174,10 +178,10 @@ jobs:
|
||||
fi
|
||||
|
||||
echo "🔄 Build triggered by workflow_run:"
|
||||
echo " 📋 Conclusion: ${{ github.event.workflow_run.conclusion }}"
|
||||
echo " 🌿 Branch: ${{ github.event.workflow_run.head_branch }}"
|
||||
echo " 📎 SHA: ${{ github.event.workflow_run.head_sha }}"
|
||||
echo " 🎯 Event: ${{ github.event.workflow_run.event }}"
|
||||
echo " 📋 Conclusion: $CONCLUSION"
|
||||
echo " 🌿 Branch: $HEAD_BRANCH"
|
||||
echo " 📎 SHA: $HEAD_SHA"
|
||||
echo " 🎯 Event: $TRIGGERING_EVENT"
|
||||
|
||||
elif [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then
|
||||
# Manual trigger
|
||||
|
||||
8
.github/workflows/issue-translator.yml
vendored
@@ -15,9 +15,13 @@
|
||||
name: "issue-translator"
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
types: [ created ]
|
||||
issues:
|
||||
types: [opened]
|
||||
types: [ opened ]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
||||
3
.github/workflows/performance.yml
vendored
@@ -30,6 +30,9 @@ on:
|
||||
default: "120"
|
||||
type: string
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
RUST_BACKTRACE: 1
|
||||
|
||||
702
.rules.md
Normal file
@@ -0,0 +1,702 @@
|
||||
# RustFS Project AI Coding Rules
|
||||
|
||||
## 🚨🚨🚨 CRITICAL DEVELOPMENT RULES - ZERO TOLERANCE 🚨🚨🚨
|
||||
|
||||
### ⛔️ ABSOLUTE PROHIBITION: NEVER COMMIT DIRECTLY TO MASTER/MAIN BRANCH ⛔️
|
||||
|
||||
**🔥 THIS IS THE MOST CRITICAL RULE - VIOLATION WILL RESULT IN IMMEDIATE REVERSAL 🔥**
|
||||
|
||||
- **🚫 ZERO DIRECT COMMITS TO MAIN/MASTER BRANCH - ABSOLUTELY FORBIDDEN**
|
||||
- **🚫 ANY DIRECT COMMIT TO MAIN BRANCH MUST BE IMMEDIATELY REVERTED**
|
||||
- **🚫 NO EXCEPTIONS FOR HOTFIXES, EMERGENCIES, OR URGENT CHANGES**
|
||||
- **🚫 NO EXCEPTIONS FOR SMALL CHANGES, TYPOS, OR DOCUMENTATION UPDATES**
|
||||
- **🚫 NO EXCEPTIONS FOR ANYONE - MAINTAINERS, CONTRIBUTORS, OR ADMINS**
|
||||
|
||||
### 📋 MANDATORY WORKFLOW - STRICTLY ENFORCED
|
||||
|
||||
**EVERY SINGLE CHANGE MUST FOLLOW THIS WORKFLOW:**
|
||||
|
||||
1. **Check current branch**: `git branch` (MUST NOT be on main/master)
|
||||
2. **Switch to main**: `git checkout main`
|
||||
3. **Pull latest**: `git pull origin main`
|
||||
4. **Create feature branch**: `git checkout -b feat/your-feature-name`
|
||||
5. **Make changes ONLY on feature branch**
|
||||
6. **Test thoroughly before committing**
|
||||
7. **Commit and push to feature branch**: `git push origin feat/your-feature-name`
|
||||
8. **Create Pull Request**: Use `gh pr create` (MANDATORY)
|
||||
9. **Wait for PR approval**: NO self-merging allowed
|
||||
10. **Merge through GitHub interface**: ONLY after approval
|
||||
|
||||
### 🔒 ENFORCEMENT MECHANISMS
|
||||
|
||||
- **Branch protection rules**: Main branch is protected
|
||||
- **Pre-commit hooks**: Will block direct commits to main
|
||||
- **CI/CD checks**: All PRs must pass before merging
|
||||
- **Code review requirement**: At least one approval needed
|
||||
- **Automated reversal**: Direct commits to main will be automatically reverted
|
||||
|
||||
## 🎯 Core AI Development Principles
|
||||
|
||||
### Five Execution Steps
|
||||
|
||||
#### 1. Task Analysis and Planning
|
||||
- **Clear Objectives**: Deeply understand task requirements and expected results before starting coding
|
||||
- **Plan Development**: List specific files, components, and functions that need modification, explaining the reasons for changes
|
||||
- **Risk Assessment**: Evaluate the impact of changes on existing functionality, develop rollback plans
|
||||
|
||||
#### 2. Precise Code Location
|
||||
- **File Identification**: Determine specific files and line numbers that need modification
|
||||
- **Impact Analysis**: Avoid modifying irrelevant files, clearly state the reason for each file modification
|
||||
- **Minimization Principle**: Unless explicitly required by the task, do not create new abstraction layers or refactor existing code
|
||||
|
||||
#### 3. Minimal Code Changes
|
||||
- **Focus on Core**: Only write code directly required by the task
|
||||
- **Avoid Redundancy**: Do not add unnecessary logs, comments, tests, or error handling
|
||||
- **Isolation**: Ensure new code does not interfere with existing functionality, maintain code independence
|
||||
|
||||
#### 4. Strict Code Review
|
||||
- **Correctness Check**: Verify the correctness and completeness of code logic
|
||||
- **Style Consistency**: Ensure code conforms to established project coding style
|
||||
- **Side Effect Assessment**: Evaluate the impact of changes on downstream systems
|
||||
|
||||
#### 5. Clear Delivery Documentation
|
||||
- **Change Summary**: Detailed explanation of all modifications and reasons
|
||||
- **File List**: List all modified files and their specific changes
|
||||
- **Risk Statement**: Mark any assumptions or potential risk points
|
||||
|
||||
### Core Principles
|
||||
- **🎯 Precise Execution**: Strictly follow task requirements, no arbitrary innovation
|
||||
- **⚡ Efficient Development**: Avoid over-design, only do necessary work
|
||||
- **🛡️ Safe and Reliable**: Always follow development processes, ensure code quality and system stability
|
||||
- **🔒 Cautious Modification**: Only modify when clearly knowing what needs to be changed and having confidence
|
||||
|
||||
### Additional AI Behavior Rules
|
||||
|
||||
1. **Use English for all code comments and documentation** - All comments, variable names, function names, documentation, and user-facing text in code should be in English
|
||||
2. **Clean up temporary scripts after use** - Any temporary scripts, test files, or helper files created during AI work should be removed after task completion
|
||||
3. **Only make confident modifications** - Do not make speculative changes or "convenient" modifications outside the task scope. If uncertain about a change, ask for clarification rather than guessing
|
||||
|
||||
## Project Overview
|
||||
|
||||
RustFS is a high-performance distributed object storage system written in Rust, compatible with S3 API. The project adopts a modular architecture, supporting erasure coding storage, multi-tenant management, observability, and other enterprise-level features.
|
||||
|
||||
## Core Architecture Principles
|
||||
|
||||
### 1. Modular Design
|
||||
|
||||
- Project uses Cargo workspace structure, containing multiple independent crates
|
||||
- Core modules: `rustfs` (main service), `ecstore` (erasure coding storage), `common` (shared components)
|
||||
- Functional modules: `iam` (identity management), `madmin` (management interface), `crypto` (encryption), etc.
|
||||
- Tool modules: `cli` (command line tool), `crates/*` (utility libraries)
|
||||
|
||||
### 2. Asynchronous Programming Pattern
|
||||
|
||||
- Comprehensive use of `tokio` async runtime
|
||||
- Prioritize `async/await` syntax
|
||||
- Use `async-trait` for async methods in traits
|
||||
- Avoid blocking operations, use `spawn_blocking` when necessary
|
||||
|
||||
### 3. Error Handling Strategy
|
||||
|
||||
- **Use modular, type-safe error handling with `thiserror`**
|
||||
- Each module should define its own error type using `thiserror::Error` derive macro
|
||||
- Support error chains and context information through `#[from]` and `#[source]` attributes
|
||||
- Use `Result<T>` type aliases for consistency within each module
|
||||
- Error conversion between modules should use explicit `From` implementations
|
||||
- Follow the pattern: `pub type Result<T> = core::result::Result<T, Error>`
|
||||
- Use `#[error("description")]` attributes for clear error messages
|
||||
- Support error downcasting when needed through `other()` helper methods
|
||||
- Implement `Clone` for errors when required by the domain logic
|
||||
|
||||
## Code Style Guidelines
|
||||
|
||||
### 1. Formatting Configuration
|
||||
|
||||
```toml
|
||||
max_width = 130
|
||||
fn_call_width = 90
|
||||
single_line_let_else_max_width = 100
|
||||
```
|
||||
|
||||
### 2. **🔧 MANDATORY Code Formatting Rules**
|
||||
|
||||
**CRITICAL**: All code must be properly formatted before committing. This project enforces strict formatting standards to maintain code consistency and readability.
|
||||
|
||||
#### Pre-commit Requirements (MANDATORY)
|
||||
|
||||
Before every commit, you **MUST**:
|
||||
|
||||
1. **Format your code**:
|
||||
```bash
|
||||
cargo fmt --all
|
||||
```
|
||||
|
||||
2. **Verify formatting**:
|
||||
```bash
|
||||
cargo fmt --all --check
|
||||
```
|
||||
|
||||
3. **Pass clippy checks**:
|
||||
```bash
|
||||
cargo clippy --all-targets --all-features -- -D warnings
|
||||
```
|
||||
|
||||
4. **Ensure compilation**:
|
||||
```bash
|
||||
cargo check --all-targets
|
||||
```
|
||||
|
||||
#### Quick Commands
|
||||
|
||||
Use these convenient Makefile targets for common tasks:
|
||||
|
||||
```bash
|
||||
# Format all code
|
||||
make fmt
|
||||
|
||||
# Check if code is properly formatted
|
||||
make fmt-check
|
||||
|
||||
# Run clippy checks
|
||||
make clippy
|
||||
|
||||
# Run compilation check
|
||||
make check
|
||||
|
||||
# Run tests
|
||||
make test
|
||||
|
||||
# Run all pre-commit checks (format + clippy + check + test)
|
||||
make pre-commit
|
||||
|
||||
# Setup git hooks (one-time setup)
|
||||
make setup-hooks
|
||||
```
|
||||
|
||||
### 3. Naming Conventions
|
||||
|
||||
- Use `snake_case` for functions, variables, modules
|
||||
- Use `PascalCase` for types, traits, enums
|
||||
- Constants use `SCREAMING_SNAKE_CASE`
|
||||
- Global variables prefix `GLOBAL_`, e.g., `GLOBAL_Endpoints`
|
||||
- Use meaningful and descriptive names for variables, functions, and methods
|
||||
- Avoid meaningless names like `temp`, `data`, `foo`, `bar`, `test123`
|
||||
- Choose names that clearly express the purpose and intent
|
||||
|
||||
### 4. Type Declaration Guidelines
|
||||
|
||||
- **Prefer type inference over explicit type declarations** when the type is obvious from context
|
||||
- Let the Rust compiler infer types whenever possible to reduce verbosity and improve maintainability
|
||||
- Only specify types explicitly when:
|
||||
- The type cannot be inferred by the compiler
|
||||
- Explicit typing improves code clarity and readability
|
||||
- Required for API boundaries (function signatures, public struct fields)
|
||||
- Needed to resolve ambiguity between multiple possible types
|
||||
|
||||
### 5. Documentation Comments
|
||||
|
||||
- Public APIs must have documentation comments
|
||||
- Use `///` for documentation comments
|
||||
- Complex functions add `# Examples` and `# Parameters` descriptions
|
||||
- Error cases use `# Errors` descriptions
|
||||
- Always use English for all comments and documentation
|
||||
- Avoid meaningless comments like "debug 111" or placeholder text
|
||||
|
||||
### 6. Import Guidelines
|
||||
|
||||
- Standard library imports first
|
||||
- Third-party crate imports in the middle
|
||||
- Project internal imports last
|
||||
- Group `use` statements with blank lines between groups
|
||||
|
||||
## Asynchronous Programming Guidelines
|
||||
|
||||
### 1. Trait Definition
|
||||
|
||||
```rust
|
||||
#[async_trait::async_trait]
|
||||
pub trait StorageAPI: Send + Sync {
|
||||
async fn get_object(&self, bucket: &str, object: &str) -> Result<ObjectInfo>;
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Error Handling
|
||||
|
||||
```rust
|
||||
// Use ? operator to propagate errors
|
||||
async fn example_function() -> Result<()> {
|
||||
let data = read_file("path").await?;
|
||||
process_data(data).await?;
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Concurrency Control
|
||||
|
||||
- Use `Arc` and `Mutex`/`RwLock` for shared state management
|
||||
- Prioritize async locks from `tokio::sync`
|
||||
- Avoid holding locks for long periods
|
||||
|
||||
## Logging and Tracing Guidelines
|
||||
|
||||
### 1. Tracing Usage
|
||||
|
||||
```rust
|
||||
#[tracing::instrument(skip(self, data))]
|
||||
async fn process_data(&self, data: &[u8]) -> Result<()> {
|
||||
info!("Processing {} bytes", data.len());
|
||||
// Implementation logic
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Log Levels
|
||||
|
||||
- `error!`: System errors requiring immediate attention
|
||||
- `warn!`: Warning information that may affect functionality
|
||||
- `info!`: Important business information
|
||||
- `debug!`: Debug information for development use
|
||||
- `trace!`: Detailed execution paths
|
||||
|
||||
### 3. Structured Logging
|
||||
|
||||
```rust
|
||||
info!(
|
||||
counter.rustfs_api_requests_total = 1_u64,
|
||||
key_request_method = %request.method(),
|
||||
key_request_uri_path = %request.uri().path(),
|
||||
"API request processed"
|
||||
);
|
||||
```
|
||||
|
||||
## Error Handling Guidelines
|
||||
|
||||
### 1. Error Type Definition
|
||||
|
||||
```rust
|
||||
// Use thiserror for module-specific error types
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum MyError {
|
||||
#[error("IO error: {0}")]
|
||||
Io(#[from] std::io::Error),
|
||||
|
||||
#[error("Storage error: {0}")]
|
||||
Storage(#[from] ecstore::error::StorageError),
|
||||
|
||||
#[error("Custom error: {message}")]
|
||||
Custom { message: String },
|
||||
|
||||
#[error("File not found: {path}")]
|
||||
FileNotFound { path: String },
|
||||
|
||||
#[error("Invalid configuration: {0}")]
|
||||
InvalidConfig(String),
|
||||
}
|
||||
|
||||
// Provide Result type alias for the module
|
||||
pub type Result<T> = core::result::Result<T, MyError>;
|
||||
```
|
||||
|
||||
### 2. Error Helper Methods
|
||||
|
||||
```rust
|
||||
impl MyError {
|
||||
/// Create error from any compatible error type
|
||||
pub fn other<E>(error: E) -> Self
|
||||
where
|
||||
E: Into<Box<dyn std::error::Error + Send + Sync>>,
|
||||
{
|
||||
MyError::Io(std::io::Error::other(error))
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Error Context and Propagation
|
||||
|
||||
```rust
|
||||
// Use ? operator for clean error propagation
|
||||
async fn example_function() -> Result<()> {
|
||||
let data = read_file("path").await?;
|
||||
process_data(data).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Add context to errors
|
||||
fn process_with_context(path: &str) -> Result<()> {
|
||||
std::fs::read(path)
|
||||
.map_err(|e| MyError::Custom {
|
||||
message: format!("Failed to read {}: {}", path, e)
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
## Performance Optimization Guidelines
|
||||
|
||||
### 1. Memory Management
|
||||
|
||||
- Use `Bytes` instead of `Vec<u8>` for zero-copy operations
|
||||
- Avoid unnecessary cloning, use reference passing
|
||||
- Use `Arc` for sharing large objects
|
||||
|
||||
### 2. Concurrency Optimization
|
||||
|
||||
```rust
|
||||
// Use join_all for concurrent operations
|
||||
let futures = disks.iter().map(|disk| disk.operation());
|
||||
let results = join_all(futures).await;
|
||||
```
|
||||
|
||||
### 3. Caching Strategy
|
||||
|
||||
- Use `LazyLock` for global caching
|
||||
- Implement LRU cache to avoid memory leaks
|
||||
|
||||
## Testing Guidelines
|
||||
|
||||
### 1. Unit Tests
|
||||
|
||||
```rust
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use test_case::test_case;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_async_function() {
|
||||
let result = async_function().await;
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[test_case("input1", "expected1")]
|
||||
#[test_case("input2", "expected2")]
|
||||
fn test_with_cases(input: &str, expected: &str) {
|
||||
assert_eq!(function(input), expected);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Integration Tests
|
||||
|
||||
- Use `e2e_test` module for end-to-end testing
|
||||
- Simulate real storage environments
|
||||
|
||||
### 3. Test Quality Standards
|
||||
|
||||
- Write meaningful test cases that verify actual functionality
|
||||
- Avoid placeholder or debug content like "debug 111", "test test", etc.
|
||||
- Use descriptive test names that clearly indicate what is being tested
|
||||
- Each test should have a clear purpose and verify specific behavior
|
||||
- Test data should be realistic and representative of actual use cases
|
||||
|
||||
## Cross-Platform Compatibility Guidelines
|
||||
|
||||
### 1. CPU Architecture Compatibility
|
||||
|
||||
- **Always consider multi-platform and different CPU architecture compatibility** when writing code
|
||||
- Support major architectures: x86_64, aarch64 (ARM64), and other target platforms
|
||||
- Use conditional compilation for architecture-specific code:
|
||||
|
||||
```rust
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
fn optimized_x86_64_function() { /* x86_64 specific implementation */ }
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
fn optimized_aarch64_function() { /* ARM64 specific implementation */ }
|
||||
|
||||
#[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
|
||||
fn generic_function() { /* Generic fallback implementation */ }
|
||||
```
|
||||
|
||||
### 2. Platform-Specific Dependencies
|
||||
|
||||
- Use feature flags for platform-specific dependencies
|
||||
- Provide fallback implementations for unsupported platforms
|
||||
- Test on multiple architectures in CI/CD pipeline
|
||||
|
||||
### 3. Endianness Considerations
|
||||
|
||||
- Use explicit byte order conversion when dealing with binary data
|
||||
- Prefer `to_le_bytes()`, `from_le_bytes()` for consistent little-endian format
|
||||
- Use `byteorder` crate for complex binary format handling
|
||||
|
||||
### 4. SIMD and Performance Optimizations
|
||||
|
||||
- Use portable SIMD libraries like `wide` or `packed_simd`
|
||||
- Provide fallback implementations for non-SIMD architectures
|
||||
- Use runtime feature detection when appropriate
|
||||
|
||||
## Security Guidelines
|
||||
|
||||
### 1. Memory Safety
|
||||
|
||||
- Disable `unsafe` code (workspace.lints.rust.unsafe_code = "deny")
|
||||
- Use `rustls` instead of `openssl`
|
||||
|
||||
### 2. Authentication and Authorization
|
||||
|
||||
```rust
|
||||
// Use IAM system for permission checks
|
||||
let identity = iam.authenticate(&access_key, &secret_key).await?;
|
||||
iam.authorize(&identity, &action, &resource).await?;
|
||||
```
|
||||
|
||||
## Configuration Management Guidelines
|
||||
|
||||
### 1. Environment Variables
|
||||
|
||||
- Use `RUSTFS_` prefix
|
||||
- Support both configuration files and environment variables
|
||||
- Provide reasonable default values
|
||||
|
||||
### 2. Configuration Structure
|
||||
|
||||
```rust
|
||||
#[derive(Debug, Deserialize, Clone)]
|
||||
pub struct Config {
|
||||
pub address: String,
|
||||
pub volumes: String,
|
||||
#[serde(default)]
|
||||
pub console_enable: bool,
|
||||
}
|
||||
```
|
||||
|
||||
## Dependency Management Guidelines
|
||||
|
||||
### 1. Workspace Dependencies
|
||||
|
||||
- Manage versions uniformly at workspace level
|
||||
- Use `workspace = true` to inherit configuration
|
||||
|
||||
### 2. Feature Flags
|
||||
|
||||
```rust
|
||||
[features]
|
||||
default = ["file"]
|
||||
gpu = ["dep:nvml-wrapper"]
|
||||
kafka = ["dep:rdkafka"]
|
||||
```
|
||||
|
||||
## Deployment and Operations Guidelines
|
||||
|
||||
### 1. Containerization
|
||||
|
||||
- Provide Dockerfile and docker-compose configuration
|
||||
- Support multi-stage builds to optimize image size
|
||||
|
||||
### 2. Observability
|
||||
|
||||
- Integrate OpenTelemetry for distributed tracing
|
||||
- Support Prometheus metrics collection
|
||||
- Provide Grafana dashboards
|
||||
|
||||
### 3. Health Checks
|
||||
|
||||
```rust
|
||||
// Implement health check endpoint
|
||||
async fn health_check() -> Result<HealthStatus> {
|
||||
// Check component status
|
||||
}
|
||||
```
|
||||
|
||||
## Code Review Checklist
|
||||
|
||||
### 1. **Code Formatting and Quality (MANDATORY)**
|
||||
|
||||
- [ ] **Code is properly formatted** (`cargo fmt --all --check` passes)
|
||||
- [ ] **All clippy warnings are resolved** (`cargo clippy --all-targets --all-features -- -D warnings` passes)
|
||||
- [ ] **Code compiles successfully** (`cargo check --all-targets` passes)
|
||||
- [ ] **Pre-commit hooks are working** and all checks pass
|
||||
- [ ] **No formatting-related changes** mixed with functional changes (separate commits)
|
||||
|
||||
### 2. Functionality
|
||||
|
||||
- [ ] Are all error cases properly handled?
|
||||
- [ ] Is there appropriate logging?
|
||||
- [ ] Is there necessary test coverage?
|
||||
|
||||
### 3. Performance
|
||||
|
||||
- [ ] Are unnecessary memory allocations avoided?
|
||||
- [ ] Are async operations used correctly?
|
||||
- [ ] Are there potential deadlock risks?
|
||||
|
||||
### 4. Security
|
||||
|
||||
- [ ] Are input parameters properly validated?
|
||||
- [ ] Are there appropriate permission checks?
|
||||
- [ ] Is information leakage avoided?
|
||||
|
||||
### 5. Cross-Platform Compatibility
|
||||
|
||||
- [ ] Does the code work on different CPU architectures (x86_64, aarch64)?
|
||||
- [ ] Are platform-specific features properly gated with conditional compilation?
|
||||
- [ ] Is byte order handling correct for binary data?
|
||||
- [ ] Are there appropriate fallback implementations for unsupported platforms?
|
||||
|
||||
### 6. Code Commits and Documentation
|
||||
|
||||
- [ ] Does it comply with [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/)?
|
||||
- [ ] Are commit messages concise and under 72 characters for the title line?
|
||||
- [ ] Commit titles should be concise and in English, avoid Chinese
|
||||
- [ ] Is PR description provided in copyable markdown format for easy copying?
|
||||
|
||||
## Common Patterns and Best Practices
|
||||
|
||||
### 1. Resource Management
|
||||
|
||||
```rust
|
||||
// Use RAII pattern for resource management
|
||||
pub struct ResourceGuard {
|
||||
resource: Resource,
|
||||
}
|
||||
|
||||
impl Drop for ResourceGuard {
|
||||
fn drop(&mut self) {
|
||||
// Clean up resources
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Dependency Injection
|
||||
|
||||
```rust
|
||||
// Use dependency injection pattern
|
||||
pub struct Service {
|
||||
config: Arc<Config>,
|
||||
storage: Arc<dyn StorageAPI>,
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Graceful Shutdown
|
||||
|
||||
```rust
|
||||
// Implement graceful shutdown
|
||||
async fn shutdown_gracefully(shutdown_rx: &mut Receiver<()>) {
|
||||
tokio::select! {
|
||||
_ = shutdown_rx.recv() => {
|
||||
info!("Received shutdown signal");
|
||||
// Perform cleanup operations
|
||||
}
|
||||
_ = tokio::time::sleep(SHUTDOWN_TIMEOUT) => {
|
||||
warn!("Shutdown timeout reached");
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Domain-Specific Guidelines
|
||||
|
||||
### 1. Storage Operations
|
||||
|
||||
- All storage operations must support erasure coding
|
||||
- Implement read/write quorum mechanisms
|
||||
- Support data integrity verification
|
||||
|
||||
### 2. Network Communication
|
||||
|
||||
- Use gRPC for internal service communication
|
||||
- HTTP/HTTPS support for S3-compatible API
|
||||
- Implement connection pooling and retry mechanisms
|
||||
|
||||
### 3. Metadata Management
|
||||
|
||||
- Use FlatBuffers for serialization
|
||||
- Support version control and migration
|
||||
- Implement metadata caching
|
||||
|
||||
## Branch Management and Development Workflow
|
||||
|
||||
### Branch Management
|
||||
|
||||
- **🚨 CRITICAL: NEVER modify code directly on main or master branch - THIS IS ABSOLUTELY FORBIDDEN 🚨**
|
||||
- **⚠️ ANY DIRECT COMMITS TO MASTER/MAIN WILL BE REJECTED AND MUST BE REVERTED IMMEDIATELY ⚠️**
|
||||
- **🔒 ALL CHANGES MUST GO THROUGH PULL REQUESTS - NO DIRECT COMMITS TO MAIN UNDER ANY CIRCUMSTANCES 🔒**
|
||||
- **Always work on feature branches - NO EXCEPTIONS**
|
||||
- Always check the .rules.md file before starting to ensure you understand the project guidelines
|
||||
- **MANDATORY workflow for ALL changes:**
|
||||
1. `git checkout main` (switch to main branch)
|
||||
2. `git pull` (get latest changes)
|
||||
3. `git checkout -b feat/your-feature-name` (create and switch to feature branch)
|
||||
4. Make your changes ONLY on the feature branch
|
||||
5. Test thoroughly before committing
|
||||
6. Commit and push to the feature branch
|
||||
7. **Create a pull request for code review - THIS IS THE ONLY WAY TO MERGE TO MAIN**
|
||||
8. **Wait for PR approval before merging - NEVER merge your own PRs without review**
|
||||
- Use descriptive branch names following the pattern: `feat/feature-name`, `fix/issue-name`, `refactor/component-name`, etc.
|
||||
- **Double-check current branch before ANY commit: `git branch` to ensure you're NOT on main/master**
|
||||
- **Pull Request Requirements:**
|
||||
- All changes must be submitted via PR regardless of size or urgency
|
||||
- PRs must include comprehensive description and testing information
|
||||
- PRs must pass all CI/CD checks before merging
|
||||
- PRs require at least one approval from code reviewers
|
||||
- Even hotfixes and emergency changes must go through PR process
|
||||
- **Enforcement:**
|
||||
- Main branch should be protected with branch protection rules
|
||||
- Direct pushes to main should be blocked by repository settings
|
||||
- Any accidental direct commits to main must be immediately reverted via PR
|
||||
|
||||
### Development Workflow
|
||||
|
||||
## 🎯 **Core Development Principles**
|
||||
|
||||
- **🔴 Every change must be precise - don't modify unless you're confident**
|
||||
- Carefully analyze code logic and ensure complete understanding before making changes
|
||||
- When uncertain, prefer asking users or consulting documentation over blind modifications
|
||||
- Use small iterative steps, modify only necessary parts at a time
|
||||
- Evaluate impact scope before changes to ensure no new issues are introduced
|
||||
|
||||
- **🚀 GitHub PR creation prioritizes gh command usage**
|
||||
- Prefer using `gh pr create` command to create Pull Requests
|
||||
- Avoid having users manually create PRs through web interface
|
||||
- Provide clear and professional PR titles and descriptions
|
||||
- Using `gh` commands ensures better integration and automation
|
||||
|
||||
## 📝 **Code Quality Requirements**
|
||||
|
||||
- Use English for all code comments, documentation, and variable names
|
||||
- Write meaningful and descriptive names for variables, functions, and methods
|
||||
- Avoid meaningless test content like "debug 111" or placeholder values
|
||||
- Before each change, carefully read the existing code to ensure you understand the code structure and implementation, do not break existing logic implementation, do not introduce new issues
|
||||
- Ensure each change provides sufficient test cases to guarantee code correctness
|
||||
- Do not arbitrarily modify numbers and constants in test cases, carefully analyze their meaning to ensure test case correctness
|
||||
- When writing or modifying tests, check existing test cases to ensure they have scientific naming and rigorous logic testing, if not compliant, modify test cases to ensure scientific and rigorous testing
|
||||
- **Before committing any changes, run `cargo clippy --all-targets --all-features -- -D warnings` to ensure all code passes Clippy checks**
|
||||
- After each development completion, first git add . then git commit -m "feat: feature description" or "fix: issue description", ensure compliance with [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/)
|
||||
- **Keep commit messages concise and under 72 characters** for the title line, use body for detailed explanations if needed
|
||||
- After each development completion, first git push to remote repository
|
||||
- After each change completion, summarize the changes, do not create summary files, provide a brief change description, ensure compliance with [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/)
|
||||
- Provide change descriptions needed for PR in the conversation, ensure compliance with [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/)
|
||||
- **Always provide PR descriptions in English** after completing any changes, including:
|
||||
- Clear and concise title following Conventional Commits format
|
||||
- Detailed description of what was changed and why
|
||||
- List of key changes and improvements
|
||||
- Any breaking changes or migration notes if applicable
|
||||
- Testing information and verification steps
|
||||
- **Provide PR descriptions in copyable markdown format** enclosed in code blocks for easy one-click copying
|
||||
|
||||
## 🚫 AI Documentation Generation Restrictions
|
||||
|
||||
### Forbidden Summary Documents
|
||||
|
||||
- **Strictly forbidden to create any form of AI-generated summary documents**
|
||||
- **Do not create documents containing large amounts of emoji, detailed formatting tables and typical AI style**
|
||||
- **Do not generate the following types of documents in the project:**
|
||||
- Benchmark summary documents (BENCHMARK*.md)
|
||||
- Implementation comparison analysis documents (IMPLEMENTATION_COMPARISON*.md)
|
||||
- Performance analysis report documents
|
||||
- Architecture summary documents
|
||||
- Feature comparison documents
|
||||
- Any documents with large amounts of emoji and formatted content
|
||||
- **If documentation is needed, only create when explicitly requested by the user, and maintain a concise and practical style**
|
||||
- **Documentation should focus on actually needed information, avoiding excessive formatting and decorative content**
|
||||
- **Any discovered AI-generated summary documents should be immediately deleted**
|
||||
|
||||
### Allowed Documentation Types
|
||||
|
||||
- README.md (project introduction, keep concise)
|
||||
- Technical documentation (only create when explicitly needed)
|
||||
- User manual (only create when explicitly needed)
|
||||
- API documentation (generated from code)
|
||||
- Changelog (CHANGELOG.md)
|
||||
|
||||
These rules should serve as guiding principles when developing the RustFS project, ensuring code quality, performance, and maintainability.
|
||||
68
CLAUDE.md
Normal file
@@ -0,0 +1,68 @@
|
||||
# Claude AI Rules for RustFS Project
|
||||
|
||||
## Core Rules Reference
|
||||
|
||||
This project follows the comprehensive AI coding rules defined in `.rules.md`. Please refer to that file for the complete set of development guidelines, coding standards, and best practices.
|
||||
|
||||
## Claude-Specific Configuration
|
||||
|
||||
When using Claude for this project, ensure you:
|
||||
|
||||
1. **Review the unified rules**: Always check `.rules.md` for the latest project guidelines
|
||||
2. **Follow branch protection**: Never attempt to commit directly to main/master branch
|
||||
3. **Use English**: All code comments, documentation, and variable names must be in English
|
||||
4. **Clean code practices**: Only make modifications you're confident about
|
||||
5. **Test thoroughly**: Ensure all changes pass formatting, linting, and testing requirements
|
||||
6. **Clean up after yourself**: Remove any temporary scripts or test files created during the session
|
||||
|
||||
## Quick Reference
|
||||
|
||||
### Critical Rules
|
||||
- 🚫 **NEVER commit directly to main/master branch**
|
||||
- ✅ **ALWAYS work on feature branches**
|
||||
- 📝 **ALWAYS use English for code and documentation**
|
||||
- 🧹 **ALWAYS clean up temporary files after use**
|
||||
- 🎯 **ONLY make confident, necessary modifications**
|
||||
|
||||
### Pre-commit Checklist
|
||||
```bash
|
||||
# Before committing, always run:
|
||||
cargo fmt --all
|
||||
cargo clippy --all-targets --all-features -- -D warnings
|
||||
cargo check --all-targets
|
||||
cargo test
|
||||
```
|
||||
|
||||
### Branch Workflow
|
||||
```bash
|
||||
git checkout main
|
||||
git pull origin main
|
||||
git checkout -b feat/your-feature-name
|
||||
# Make your changes
|
||||
git add .
|
||||
git commit -m "feat: your feature description"
|
||||
git push origin feat/your-feature-name
|
||||
gh pr create
|
||||
```
|
||||
|
||||
## Claude-Specific Best Practices
|
||||
|
||||
1. **Task Analysis**: Always thoroughly analyze the task before starting implementation
|
||||
2. **Minimal Changes**: Make only the necessary changes to accomplish the task
|
||||
3. **Clear Communication**: Provide clear explanations of changes and their rationale
|
||||
4. **Error Prevention**: Verify code correctness before suggesting changes
|
||||
5. **Documentation**: Ensure all code changes are properly documented in English
|
||||
|
||||
## Important Notes
|
||||
|
||||
- This file serves as an entry point for Claude AI
|
||||
- All detailed rules and guidelines are maintained in `.rules.md`
|
||||
- Updates to coding standards should be made in `.rules.md` to ensure consistency across all AI tools
|
||||
- When in doubt, always refer to `.rules.md` for authoritative guidance
|
||||
- Claude should prioritize code quality, safety, and maintainability over speed
|
||||
|
||||
## See Also
|
||||
|
||||
- [.rules.md](./.rules.md) - Complete AI coding rules and guidelines
|
||||
- [CONTRIBUTING.md](./CONTRIBUTING.md) - Contribution guidelines
|
||||
- [README.md](./README.md) - Project overview and setup instructions
|
||||
4221
Cargo.lock
generated
45
Cargo.toml
@@ -15,8 +15,8 @@
|
||||
[workspace]
|
||||
members = [
|
||||
"rustfs", # Core file system implementation
|
||||
"cli/rustfs-gui", # Graphical user interface client
|
||||
"crates/appauth", # Application authentication and authorization
|
||||
"crates/audit-logger", # Audit logging system for file operations
|
||||
"crates/common", # Shared utilities and data structures
|
||||
"crates/config", # Configuration management
|
||||
"crates/crypto", # Cryptography and security features
|
||||
@@ -30,6 +30,7 @@ members = [
|
||||
"crates/obs", # Observability utilities
|
||||
"crates/protos", # Protocol buffer definitions
|
||||
"crates/rio", # Rust I/O utilities and abstractions
|
||||
"crates/targets", # Target-specific configurations and utilities
|
||||
"crates/s3select-api", # S3 Select API interface
|
||||
"crates/s3select-query", # S3 Select query engine
|
||||
"crates/signer", # client signer
|
||||
@@ -37,7 +38,7 @@ members = [
|
||||
"crates/utils", # Utility functions and helpers
|
||||
"crates/workers", # Worker thread pools and task scheduling
|
||||
"crates/zip", # ZIP file handling and compression
|
||||
"crates/ahm",
|
||||
"crates/ahm", # Asynchronous Hash Map for concurrent data structures
|
||||
"crates/mcp", # MCP server for S3 operations
|
||||
]
|
||||
resolver = "2"
|
||||
@@ -59,15 +60,11 @@ unsafe_code = "deny"
|
||||
[workspace.lints.clippy]
|
||||
all = "warn"
|
||||
|
||||
[patch.crates-io]
|
||||
rustfs-utils = { path = "crates/utils" }
|
||||
rustfs-filemeta = { path = "crates/filemeta" }
|
||||
rustfs-rio = { path = "crates/rio" }
|
||||
|
||||
[workspace.dependencies]
|
||||
rustfs-ahm = { path = "crates/ahm", version = "0.0.5" }
|
||||
rustfs-s3select-api = { path = "crates/s3select-api", version = "0.0.5" }
|
||||
rustfs-appauth = { path = "crates/appauth", version = "0.0.5" }
|
||||
rustfs-audit-logger = { path = "crates/audit-logger", version = "0.0.5" }
|
||||
rustfs-common = { path = "crates/common", version = "0.0.5" }
|
||||
rustfs-crypto = { path = "crates/crypto", version = "0.0.5" }
|
||||
rustfs-ecstore = { path = "crates/ecstore", version = "0.0.5" }
|
||||
@@ -89,6 +86,7 @@ rustfs-signer = { path = "crates/signer", version = "0.0.5" }
|
||||
rustfs-checksums = { path = "crates/checksums", version = "0.0.5" }
|
||||
rustfs-workers = { path = "crates/workers", version = "0.0.5" }
|
||||
rustfs-mcp = { path = "crates/mcp", version = "0.0.5" }
|
||||
rustfs-targets = { path = "crates/targets", version = "0.0.5" }
|
||||
aes-gcm = { version = "0.10.3", features = ["std"] }
|
||||
anyhow = "1.0.99"
|
||||
arc-swap = "1.7.1"
|
||||
@@ -96,15 +94,15 @@ argon2 = { version = "0.5.3", features = ["std"] }
|
||||
atoi = "2.0.0"
|
||||
async-channel = "2.5.0"
|
||||
async-recursion = "1.1.1"
|
||||
async-trait = "0.1.88"
|
||||
async-trait = "0.1.89"
|
||||
async-compression = { version = "0.4.19" }
|
||||
atomic_enum = "0.3.0"
|
||||
aws-config = { version = "1.8.4" }
|
||||
aws-config = { version = "1.8.5" }
|
||||
aws-sdk-s3 = "1.101.0"
|
||||
axum = "0.8.4"
|
||||
base64-simd = "0.8.0"
|
||||
base64 = "0.22.1"
|
||||
brotli = "8.0.1"
|
||||
brotli = "8.0.2"
|
||||
bytes = { version = "1.10.1", features = ["serde"] }
|
||||
bytesize = "2.0.1"
|
||||
byteorder = "1.5.0"
|
||||
@@ -112,16 +110,14 @@ cfg-if = "1.0.1"
|
||||
crc-fast = "1.4.0"
|
||||
chacha20poly1305 = { version = "0.10.1" }
|
||||
chrono = { version = "0.4.41", features = ["serde"] }
|
||||
clap = { version = "4.5.44", features = ["derive", "env"] }
|
||||
clap = { version = "4.5.45", features = ["derive", "env"] }
|
||||
const-str = { version = "0.6.4", features = ["std", "proc"] }
|
||||
crc32fast = "1.5.0"
|
||||
criterion = { version = "0.7", features = ["html_reports"] }
|
||||
dashmap = "6.1.0"
|
||||
datafusion = "46.0.1"
|
||||
derive_builder = "0.20.2"
|
||||
dioxus = { version = "0.6.3", features = ["router"] }
|
||||
dirs = "6.0.0"
|
||||
enumset = "1.1.7"
|
||||
enumset = "1.1.9"
|
||||
flatbuffers = "25.2.10"
|
||||
flate2 = "1.1.2"
|
||||
flexi_logger = { version = "0.31.2", features = ["trc", "dont_minimize_extra_stacks"] }
|
||||
@@ -134,7 +130,7 @@ hex = "0.4.3"
|
||||
hex-simd = "0.8.0"
|
||||
highway = { version = "1.3.0" }
|
||||
hmac = "0.12.1"
|
||||
hyper = "1.6.0"
|
||||
hyper = "1.7.0"
|
||||
hyper-util = { version = "0.1.16", features = [
|
||||
"tokio",
|
||||
"server-auto",
|
||||
@@ -146,11 +142,6 @@ http-body = "1.0.1"
|
||||
humantime = "2.2.0"
|
||||
ipnetwork = { version = "0.21.1", features = ["serde"] }
|
||||
jsonwebtoken = "9.3.1"
|
||||
keyring = { version = "3.6.3", features = [
|
||||
"apple-native",
|
||||
"windows-native",
|
||||
"sync-secret-service",
|
||||
] }
|
||||
lazy_static = "1.5.0"
|
||||
libsystemd = { version = "0.7.2" }
|
||||
local-ip-address = "0.6.5"
|
||||
@@ -193,7 +184,7 @@ rand = "0.9.2"
|
||||
rdkafka = { version = "0.38.0", features = ["tokio"] }
|
||||
reed-solomon-simd = { version = "3.0.1" }
|
||||
regex = { version = "1.11.1" }
|
||||
reqwest = { version = "0.12.22", default-features = false, features = [
|
||||
reqwest = { version = "0.12.23", default-features = false, features = [
|
||||
"rustls-tls",
|
||||
"charset",
|
||||
"http2",
|
||||
@@ -202,17 +193,12 @@ reqwest = { version = "0.12.22", default-features = false, features = [
|
||||
"json",
|
||||
"blocking",
|
||||
] }
|
||||
rfd = { version = "0.15.4", default-features = false, features = [
|
||||
"xdg-portal",
|
||||
"tokio",
|
||||
] }
|
||||
rmcp = { version = "0.5.0" }
|
||||
rmp = "0.8.14"
|
||||
rmp-serde = "1.3.0"
|
||||
rsa = "0.9.8"
|
||||
rumqttc = { version = "0.24" }
|
||||
rust-embed = { version = "8.7.2" }
|
||||
rust-i18n = { version = "3.1.5" }
|
||||
rustfs-rsc = "2025.506.1"
|
||||
rustls = { version = "0.23.31" }
|
||||
rustls-pki-types = "1.12.0"
|
||||
@@ -220,7 +206,7 @@ rustls-pemfile = "2.2.0"
|
||||
s3s = { version = "0.12.0-minio-preview.3" }
|
||||
schemars = "1.0.4"
|
||||
serde = { version = "1.0.219", features = ["derive"] }
|
||||
serde_json = { version = "1.0.142", features = ["raw_value"] }
|
||||
serde_json = { version = "1.0.143", features = ["raw_value"] }
|
||||
serde_urlencoded = "0.7.1"
|
||||
serial_test = "3.2.0"
|
||||
sha1 = "0.10.6"
|
||||
@@ -237,7 +223,7 @@ sysctl = "0.6.0"
|
||||
tempfile = "3.20.0"
|
||||
temp-env = "0.3.6"
|
||||
test-case = "3.3.1"
|
||||
thiserror = "2.0.14"
|
||||
thiserror = "2.0.15"
|
||||
time = { version = "0.3.41", features = [
|
||||
"std",
|
||||
"parsing",
|
||||
@@ -257,7 +243,6 @@ tonic-prost-build = { version = "0.14.1" }
|
||||
tower = { version = "0.5.2", features = ["timeout"] }
|
||||
tower-http = { version = "0.6.6", features = ["cors"] }
|
||||
tracing = "0.1.41"
|
||||
tracing-appender = "0.2.3"
|
||||
tracing-core = "0.1.34"
|
||||
tracing-error = "0.2.1"
|
||||
tracing-opentelemetry = "0.31.0"
|
||||
@@ -278,7 +263,7 @@ zstd = "0.13.3"
|
||||
|
||||
|
||||
[workspace.metadata.cargo-shear]
|
||||
ignored = ["rustfs", "rust-i18n", "rustfs-mcp"]
|
||||
ignored = ["rustfs", "rust-i18n", "rustfs-mcp", "rustfs-audit-logger", "tokio-test"]
|
||||
|
||||
[profile.wasm-dev]
|
||||
inherits = "dev"
|
||||
|
||||
106
Dockerfile
@@ -1,48 +1,47 @@
|
||||
# Multi-stage build for RustFS production image
|
||||
|
||||
# Build stage: Download and extract RustFS binary
|
||||
FROM alpine:3.22 AS build
|
||||
|
||||
# Build arguments for platform and release
|
||||
ARG TARGETARCH
|
||||
ARG RELEASE=latest
|
||||
|
||||
# Install minimal dependencies for downloading and extracting
|
||||
RUN apk add --no-cache ca-certificates curl unzip
|
||||
|
||||
# Create build directory
|
||||
WORKDIR /build
|
||||
|
||||
# Set architecture-specific variables
|
||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||
echo "x86_64-musl" > /tmp/arch; \
|
||||
elif [ "$TARGETARCH" = "arm64" ]; then \
|
||||
echo "aarch64-musl" > /tmp/arch; \
|
||||
RUN set -eux; \
|
||||
case "$TARGETARCH" in \
|
||||
amd64) ARCH_SUBSTR="x86_64-musl" ;; \
|
||||
arm64) ARCH_SUBSTR="aarch64-musl" ;; \
|
||||
*) echo "Unsupported TARGETARCH=$TARGETARCH" >&2; exit 1 ;; \
|
||||
esac; \
|
||||
if [ "$RELEASE" = "latest" ]; then \
|
||||
TAG="$(curl -fsSL https://api.github.com/repos/rustfs/rustfs/releases \
|
||||
| grep -o '"tag_name": "[^"]*"' | cut -d'"' -f4 | head -n 1)"; \
|
||||
else \
|
||||
echo "unsupported" > /tmp/arch; \
|
||||
fi
|
||||
RUN ARCH=$(cat /tmp/arch) && \
|
||||
if [ "$ARCH" = "unsupported" ]; then \
|
||||
echo "Unsupported architecture: $TARGETARCH" && exit 1; \
|
||||
fi && \
|
||||
if [ "${RELEASE}" = "latest" ]; then \
|
||||
VERSION="latest"; \
|
||||
else \
|
||||
VERSION="v${RELEASE#v}"; \
|
||||
fi && \
|
||||
BASE_URL="https://dl.rustfs.com/artifacts/rustfs/release" && \
|
||||
PACKAGE_NAME="rustfs-linux-${ARCH}-${VERSION}.zip" && \
|
||||
DOWNLOAD_URL="${BASE_URL}/${PACKAGE_NAME}" && \
|
||||
echo "Downloading ${PACKAGE_NAME} from ${DOWNLOAD_URL}" >&2 && \
|
||||
curl -f -L "${DOWNLOAD_URL}" -o rustfs.zip && \
|
||||
unzip rustfs.zip -d /build && \
|
||||
chmod +x /build/rustfs && \
|
||||
rm rustfs.zip || { echo "Failed to download or extract ${PACKAGE_NAME}" >&2; exit 1; }
|
||||
TAG="$RELEASE"; \
|
||||
fi; \
|
||||
echo "Using tag: $TAG (arch pattern: $ARCH_SUBSTR)"; \
|
||||
# Find download URL in assets list for this tag that contains arch substring and ends with .zip
|
||||
URL="$(curl -fsSL "https://api.github.com/repos/rustfs/rustfs/releases/tags/$TAG" \
|
||||
| grep -o "\"browser_download_url\": \"[^\"]*${ARCH_SUBSTR}[^\"]*\\.zip\"" \
|
||||
| cut -d'"' -f4 | head -n 1)"; \
|
||||
if [ -z "$URL" ]; then echo "Failed to locate release asset for $ARCH_SUBSTR at tag $TAG" >&2; exit 1; fi; \
|
||||
echo "Downloading: $URL"; \
|
||||
curl -fL "$URL" -o rustfs.zip; \
|
||||
unzip -q rustfs.zip -d /build; \
|
||||
# If binary is not in root directory, try to locate and move from zip to /build/rustfs
|
||||
if [ ! -x /build/rustfs ]; then \
|
||||
BIN_PATH="$(unzip -Z -1 rustfs.zip | grep -E '(^|/)rustfs$' | head -n 1 || true)"; \
|
||||
if [ -n "$BIN_PATH" ]; then \
|
||||
mkdir -p /build/.tmp && unzip -q rustfs.zip "$BIN_PATH" -d /build/.tmp && \
|
||||
mv "/build/.tmp/$BIN_PATH" /build/rustfs; \
|
||||
fi; \
|
||||
fi; \
|
||||
[ -x /build/rustfs ] || { echo "rustfs binary not found in asset" >&2; exit 1; }; \
|
||||
chmod +x /build/rustfs; \
|
||||
rm -rf rustfs.zip /build/.tmp || true
|
||||
|
||||
# Runtime stage: Configure runtime environment
|
||||
FROM alpine:3.22.1
|
||||
|
||||
# Build arguments and labels
|
||||
FROM alpine:3.22
|
||||
|
||||
ARG RELEASE=latest
|
||||
ARG BUILD_DATE
|
||||
ARG VCS_REF
|
||||
@@ -50,7 +49,7 @@ ARG VCS_REF
|
||||
LABEL name="RustFS" \
|
||||
vendor="RustFS Team" \
|
||||
maintainer="RustFS Team <dev@rustfs.com>" \
|
||||
version="${RELEASE}" \
|
||||
version="v${RELEASE#v}" \
|
||||
release="${RELEASE}" \
|
||||
build-date="${BUILD_DATE}" \
|
||||
vcs-ref="${VCS_REF}" \
|
||||
@@ -59,43 +58,28 @@ LABEL name="RustFS" \
|
||||
url="https://rustfs.com" \
|
||||
license="Apache-2.0"
|
||||
|
||||
# Install runtime dependencies
|
||||
RUN echo "https://dl-cdn.alpinelinux.org/alpine/v3.20/community" >> /etc/apk/repositories && \
|
||||
apk update && \
|
||||
apk add --no-cache ca-certificates bash gosu coreutils shadow && \
|
||||
addgroup -g 1000 rustfs && \
|
||||
adduser -u 1000 -G rustfs -s /bin/bash -D rustfs
|
||||
RUN apk add --no-cache ca-certificates coreutils
|
||||
|
||||
# Copy CA certificates and RustFS binary from build stage
|
||||
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=build /build/rustfs /usr/bin/rustfs
|
||||
|
||||
# Copy entry point script
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
|
||||
# Set permissions
|
||||
RUN chmod +x /usr/bin/rustfs /entrypoint.sh && \
|
||||
mkdir -p /data /logs && \
|
||||
chown rustfs:rustfs /data /logs && \
|
||||
chmod 700 /data /logs
|
||||
chmod 0750 /data /logs
|
||||
|
||||
# Environment variables (credentials should be set via environment or secrets)
|
||||
ENV RUSTFS_ADDRESS=:9000 \
|
||||
RUSTFS_ACCESS_KEY=rustfsadmin \
|
||||
RUSTFS_SECRET_KEY=rustfsadmin \
|
||||
RUSTFS_CONSOLE_ENABLE=true \
|
||||
RUSTFS_VOLUMES=/data \
|
||||
RUST_LOG=warn \
|
||||
RUSTFS_OBS_LOG_DIRECTORY=/logs \
|
||||
RUSTFS_SINKS_FILE_PATH=/logs
|
||||
ENV RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_ACCESS_KEY="rustfsadmin" \
|
||||
RUSTFS_SECRET_KEY="rustfsadmin" \
|
||||
RUSTFS_CONSOLE_ENABLE="true" \
|
||||
RUSTFS_VOLUMES="/data" \
|
||||
RUST_LOG="warn" \
|
||||
RUSTFS_OBS_LOG_DIRECTORY="/logs" \
|
||||
RUSTFS_SINKS_FILE_PATH="/logs"
|
||||
|
||||
# Expose port
|
||||
EXPOSE 9000
|
||||
|
||||
# Volumes for data and logs
|
||||
VOLUME ["/data", "/logs"]
|
||||
|
||||
# Set entry point
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
CMD ["/usr/bin/rustfs"]
|
||||
|
||||
CMD ["rustfs"]
|
||||
|
||||
@@ -1,80 +1,88 @@
|
||||
# syntax=docker/dockerfile:1.6
|
||||
# Multi-stage Dockerfile for RustFS - LOCAL DEVELOPMENT ONLY
|
||||
#
|
||||
# ⚠️ IMPORTANT: This Dockerfile is for local development and testing only.
|
||||
# ⚠️ It builds RustFS from source code and is NOT used in CI/CD pipelines.
|
||||
# ⚠️ CI/CD pipeline uses pre-built binaries from Dockerfile instead.
|
||||
# IMPORTANT: This Dockerfile builds RustFS from source for local development and testing.
|
||||
# CI/CD uses the production Dockerfile with prebuilt binaries instead.
|
||||
#
|
||||
# Usage for local development:
|
||||
# Example:
|
||||
# docker build -f Dockerfile.source -t rustfs:dev-local .
|
||||
# docker run --rm -p 9000:9000 rustfs:dev-local
|
||||
#
|
||||
# Supports cross-compilation for amd64 and arm64 architectures
|
||||
# Supports cross-compilation for amd64 and arm64 via TARGETPLATFORM.
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
ARG BUILDPLATFORM
|
||||
|
||||
# -----------------------------
|
||||
# Build stage
|
||||
FROM --platform=$BUILDPLATFORM rust:1.88-bookworm AS builder
|
||||
# -----------------------------
|
||||
FROM rust:1.88-bookworm AS builder
|
||||
|
||||
# Re-declare build arguments after FROM (required for multi-stage builds)
|
||||
# Re-declare args after FROM
|
||||
ARG TARGETPLATFORM
|
||||
ARG BUILDPLATFORM
|
||||
|
||||
# Debug: Print platform information
|
||||
RUN echo "🐳 Build Info: BUILDPLATFORM=$BUILDPLATFORM, TARGETPLATFORM=$TARGETPLATFORM"
|
||||
# Debug: print platforms
|
||||
RUN echo "Build info -> BUILDPLATFORM=${BUILDPLATFORM}, TARGETPLATFORM=${TARGETPLATFORM}"
|
||||
|
||||
# Install required build dependencies
|
||||
RUN apt-get update && apt-get install -y \
|
||||
wget \
|
||||
git \
|
||||
# Install build toolchain and headers
|
||||
# Use distro packages for protoc/flatc to avoid host-arch mismatch
|
||||
RUN set -eux; \
|
||||
export DEBIAN_FRONTEND=noninteractive; \
|
||||
apt-get update; \
|
||||
apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
ca-certificates \
|
||||
curl \
|
||||
unzip \
|
||||
gcc \
|
||||
git \
|
||||
pkg-config \
|
||||
libssl-dev \
|
||||
lld \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
protobuf-compiler \
|
||||
flatbuffers-compiler; \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Note: sccache removed for simpler builds
|
||||
|
||||
# Install cross-compilation tools for ARM64
|
||||
RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
|
||||
apt-get update && \
|
||||
apt-get install -y gcc-aarch64-linux-gnu && \
|
||||
rm -rf /var/lib/apt/lists/*; \
|
||||
# Optional: cross toolchain for aarch64 (only when targeting linux/arm64)
|
||||
RUN set -eux; \
|
||||
if [ "${TARGETPLATFORM:-linux/amd64}" = "linux/arm64" ]; then \
|
||||
export DEBIAN_FRONTEND=noninteractive; \
|
||||
apt-get update; \
|
||||
apt-get install -y --no-install-recommends gcc-aarch64-linux-gnu; \
|
||||
rm -rf /var/lib/apt/lists/*; \
|
||||
fi
|
||||
|
||||
# Install protoc
|
||||
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip \
|
||||
&& unzip protoc-31.1-linux-x86_64.zip -d protoc3 \
|
||||
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc \
|
||||
&& mv protoc3/include/* /usr/local/include/ && rm -rf protoc-31.1-linux-x86_64.zip protoc3
|
||||
|
||||
# Install flatc
|
||||
RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip \
|
||||
&& unzip Linux.flatc.binary.g++-13.zip \
|
||||
&& mv flatc /usr/local/bin/ && chmod +x /usr/local/bin/flatc && rm -rf Linux.flatc.binary.g++-13.zip
|
||||
|
||||
# Set up Rust targets based on platform
|
||||
RUN set -e && \
|
||||
PLATFORM="${TARGETPLATFORM:-linux/amd64}" && \
|
||||
echo "🎯 Setting up Rust target for platform: $PLATFORM" && \
|
||||
case "$PLATFORM" in \
|
||||
"linux/amd64") rustup target add x86_64-unknown-linux-gnu ;; \
|
||||
"linux/arm64") rustup target add aarch64-unknown-linux-gnu ;; \
|
||||
*) echo "❌ Unsupported platform: $PLATFORM" && exit 1 ;; \
|
||||
# Add Rust targets based on TARGETPLATFORM
|
||||
RUN set -eux; \
|
||||
case "${TARGETPLATFORM:-linux/amd64}" in \
|
||||
linux/amd64) rustup target add x86_64-unknown-linux-gnu ;; \
|
||||
linux/arm64) rustup target add aarch64-unknown-linux-gnu ;; \
|
||||
*) echo "Unsupported TARGETPLATFORM=${TARGETPLATFORM}" >&2; exit 1 ;; \
|
||||
esac
|
||||
|
||||
# Set up environment for cross-compilation
|
||||
# Cross-compilation environment (used only when targeting aarch64)
|
||||
ENV CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=aarch64-linux-gnu-gcc
|
||||
ENV CC_aarch64_unknown_linux_gnu=aarch64-linux-gnu-gcc
|
||||
ENV CXX_aarch64_unknown_linux_gnu=aarch64-linux-gnu-g++
|
||||
|
||||
WORKDIR /usr/src/rustfs
|
||||
|
||||
# Copy all source code
|
||||
# Layered copy to maximize caching:
|
||||
# 1) top-level manifests
|
||||
COPY Cargo.toml Cargo.lock ./
|
||||
# 2) workspace member manifests (adjust if workspace layout changes)
|
||||
COPY rustfs/Cargo.toml rustfs/Cargo.toml
|
||||
COPY crates/*/Cargo.toml crates/
|
||||
COPY cli/rustfs-gui/Cargo.toml cli/rustfs-gui/Cargo.toml
|
||||
|
||||
# Pre-fetch dependencies for better caching
|
||||
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||
--mount=type=cache,target=/usr/local/cargo/git \
|
||||
cargo fetch --locked || true
|
||||
|
||||
# 3) copy full sources (this is the main cache invalidation point)
|
||||
COPY . .
|
||||
|
||||
# Configure cargo for optimized builds
|
||||
# Cargo build configuration for lean release artifacts
|
||||
ENV CARGO_NET_GIT_FETCH_WITH_CLI=true \
|
||||
CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse \
|
||||
CARGO_INCREMENTAL=0 \
|
||||
@@ -82,75 +90,92 @@ ENV CARGO_NET_GIT_FETCH_WITH_CLI=true \
|
||||
CARGO_PROFILE_RELEASE_SPLIT_DEBUGINFO=off \
|
||||
CARGO_PROFILE_RELEASE_STRIP=symbols
|
||||
|
||||
# Generate protobuf code
|
||||
RUN cargo run --bin gproto
|
||||
# Generate protobuf/flatbuffers code (uses protoc/flatc from distro)
|
||||
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||
--mount=type=cache,target=/usr/local/cargo/git \
|
||||
--mount=type=cache,target=/usr/src/rustfs/target \
|
||||
cargo run --bin gproto
|
||||
|
||||
# Build the actual application with optimizations
|
||||
RUN case "$TARGETPLATFORM" in \
|
||||
"linux/amd64") \
|
||||
echo "🔨 Building for amd64..." && \
|
||||
rustup target add x86_64-unknown-linux-gnu && \
|
||||
cargo build --release --target x86_64-unknown-linux-gnu --bin rustfs -j $(nproc) && \
|
||||
cp target/x86_64-unknown-linux-gnu/release/rustfs /usr/local/bin/rustfs \
|
||||
;; \
|
||||
"linux/arm64") \
|
||||
echo "🔨 Building for arm64..." && \
|
||||
rustup target add aarch64-unknown-linux-gnu && \
|
||||
cargo build --release --target aarch64-unknown-linux-gnu --bin rustfs -j $(nproc) && \
|
||||
cp target/aarch64-unknown-linux-gnu/release/rustfs /usr/local/bin/rustfs \
|
||||
;; \
|
||||
*) \
|
||||
echo "❌ Unsupported platform: $TARGETPLATFORM" && exit 1 \
|
||||
;; \
|
||||
# Build RustFS (target depends on TARGETPLATFORM)
|
||||
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||
--mount=type=cache,target=/usr/local/cargo/git \
|
||||
--mount=type=cache,target=/usr/src/rustfs/target \
|
||||
set -eux; \
|
||||
case "${TARGETPLATFORM:-linux/amd64}" in \
|
||||
linux/amd64) \
|
||||
echo "Building for x86_64-unknown-linux-gnu"; \
|
||||
cargo build --release --locked --target x86_64-unknown-linux-gnu --bin rustfs -j "$(nproc)"; \
|
||||
install -m 0755 target/x86_64-unknown-linux-gnu/release/rustfs /usr/local/bin/rustfs \
|
||||
;; \
|
||||
linux/arm64) \
|
||||
echo "Building for aarch64-unknown-linux-gnu"; \
|
||||
cargo build --release --locked --target aarch64-unknown-linux-gnu --bin rustfs -j "$(nproc)"; \
|
||||
install -m 0755 target/aarch64-unknown-linux-gnu/release/rustfs /usr/local/bin/rustfs \
|
||||
;; \
|
||||
*) \
|
||||
echo "Unsupported TARGETPLATFORM=${TARGETPLATFORM}" >&2; exit 1 \
|
||||
;; \
|
||||
esac
|
||||
|
||||
# Runtime stage - Ubuntu minimal for better compatibility
|
||||
# -----------------------------
|
||||
# Runtime stage (Ubuntu minimal)
|
||||
# -----------------------------
|
||||
FROM ubuntu:22.04
|
||||
|
||||
# Install runtime dependencies
|
||||
RUN apt-get update && apt-get install -y \
|
||||
ARG BUILD_DATE
|
||||
ARG VCS_REF
|
||||
|
||||
LABEL name="RustFS (dev-local)" \
|
||||
maintainer="RustFS Team" \
|
||||
build-date="${BUILD_DATE}" \
|
||||
vcs-ref="${VCS_REF}" \
|
||||
description="RustFS - local development image built from source (NOT for production)."
|
||||
|
||||
# Minimal runtime deps: certificates + tzdata + coreutils (for chroot --userspec)
|
||||
RUN set -eux; \
|
||||
export DEBIAN_FRONTEND=noninteractive; \
|
||||
apt-get update; \
|
||||
apt-get install -y --no-install-recommends \
|
||||
ca-certificates \
|
||||
tzdata \
|
||||
wget \
|
||||
coreutils \
|
||||
passwd \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
coreutils; \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Create rustfs user and group
|
||||
RUN groupadd -g 1000 rustfs && \
|
||||
useradd -d /app -g rustfs -u 1000 -s /bin/bash rustfs
|
||||
# Create a conventional runtime user/group (final switch happens in entrypoint via chroot --userspec)
|
||||
RUN set -eux; \
|
||||
groupadd -g 1000 rustfs; \
|
||||
useradd -u 1000 -g rustfs -M -s /usr/sbin/nologin rustfs
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Create data directories
|
||||
RUN mkdir -p /data/rustfs{0,1,2,3} && \
|
||||
chown -R rustfs:rustfs /data /app
|
||||
# Prepare data/log directories with sane defaults
|
||||
RUN set -eux; \
|
||||
mkdir -p /data /logs; \
|
||||
chown -R rustfs:rustfs /data /logs /app; \
|
||||
chmod 0750 /data /logs
|
||||
|
||||
# Copy binary from builder stage
|
||||
COPY --from=builder /usr/local/bin/rustfs /app/rustfs
|
||||
RUN chmod +x /app/rustfs && chown rustfs:rustfs /app/rustfs
|
||||
|
||||
# Copy entrypoint script
|
||||
# Copy the freshly built binary and the entrypoint
|
||||
COPY --from=builder /usr/local/bin/rustfs /usr/bin/rustfs
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
RUN chmod +x /entrypoint.sh
|
||||
RUN chmod +x /usr/bin/rustfs /entrypoint.sh
|
||||
|
||||
# Switch to non-root user
|
||||
USER rustfs
|
||||
# Default environment (override in docker run/compose as needed)
|
||||
ENV RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_ACCESS_KEY="rustfsadmin" \
|
||||
RUSTFS_SECRET_KEY="rustfsadmin" \
|
||||
RUSTFS_CONSOLE_ENABLE="true" \
|
||||
RUSTFS_VOLUMES="/data" \
|
||||
RUST_LOG="warn" \
|
||||
RUSTFS_OBS_LOG_DIRECTORY="/logs" \
|
||||
RUSTFS_SINKS_FILE_PATH="/logs" \
|
||||
RUSTFS_USERNAME="rustfs" \
|
||||
RUSTFS_GROUPNAME="rustfs" \
|
||||
RUSTFS_UID="1000" \
|
||||
RUSTFS_GID="1000"
|
||||
|
||||
# Expose ports
|
||||
EXPOSE 9000
|
||||
VOLUME ["/data", "/logs"]
|
||||
|
||||
# Environment variables
|
||||
ENV RUSTFS_ACCESS_KEY=rustfsadmin \
|
||||
RUSTFS_SECRET_KEY=rustfsadmin \
|
||||
RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_CONSOLE_ENABLE=true \
|
||||
RUSTFS_VOLUMES=/data \
|
||||
RUST_LOG=warn
|
||||
|
||||
# Volume for data
|
||||
VOLUME ["/data"]
|
||||
|
||||
# Set entrypoint and default command
|
||||
# Keep root here; entrypoint will drop privileges using chroot --userspec
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
CMD ["/app/rustfs"]
|
||||
CMD ["/usr/bin/rustfs"]
|
||||
|
||||
159
Makefile
@@ -1,5 +1,5 @@
|
||||
###########
|
||||
# 远程开发,需要 VSCode 安装 Dev Containers, Remote SSH, Remote Explorer
|
||||
# Remote development requires VSCode with Dev Containers, Remote SSH, Remote Explorer
|
||||
# https://code.visualstudio.com/docs/remote/containers
|
||||
###########
|
||||
DOCKER_CLI ?= docker
|
||||
@@ -23,7 +23,7 @@ fmt-check:
|
||||
.PHONY: clippy
|
||||
clippy:
|
||||
@echo "🔍 Running clippy checks..."
|
||||
cargo clippy --fix --allow-dirty
|
||||
cargo clippy --fix --allow-dirty
|
||||
cargo clippy --all-targets --all-features -- -D warnings
|
||||
|
||||
.PHONY: check
|
||||
@@ -34,7 +34,12 @@ check:
|
||||
.PHONY: test
|
||||
test:
|
||||
@echo "🧪 Running tests..."
|
||||
cargo nextest run --all --exclude e2e_test
|
||||
@if command -v cargo-nextest >/dev/null 2>&1; then \
|
||||
cargo nextest run --all --exclude e2e_test; \
|
||||
else \
|
||||
echo "ℹ️ cargo-nextest not found; falling back to 'cargo test'"; \
|
||||
cargo test --workspace --exclude e2e_test -- --nocapture; \
|
||||
fi
|
||||
cargo test --all --doc
|
||||
|
||||
.PHONY: pre-commit
|
||||
@@ -126,7 +131,7 @@ docker-buildx-push:
|
||||
.PHONY: docker-buildx-version
|
||||
docker-buildx-version:
|
||||
@if [ -z "$(VERSION)" ]; then \
|
||||
echo "❌ 错误: 请指定版本, 例如: make docker-buildx-version VERSION=v1.0.0"; \
|
||||
echo "❌ Error: Please specify version, example: make docker-buildx-version VERSION=v1.0.0"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🏗️ Building multi-architecture production Docker images (version: $(VERSION))..."
|
||||
@@ -135,7 +140,7 @@ docker-buildx-version:
|
||||
.PHONY: docker-buildx-push-version
|
||||
docker-buildx-push-version:
|
||||
@if [ -z "$(VERSION)" ]; then \
|
||||
echo "❌ 错误: 请指定版本, 例如: make docker-buildx-push-version VERSION=v1.0.0"; \
|
||||
echo "❌ Error: Please specify version, example: make docker-buildx-push-version VERSION=v1.0.0"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🚀 Building and pushing multi-architecture production Docker images (version: $(VERSION))..."
|
||||
@@ -168,11 +173,11 @@ docker-dev-local:
|
||||
.PHONY: docker-dev-push
|
||||
docker-dev-push:
|
||||
@if [ -z "$(REGISTRY)" ]; then \
|
||||
echo "❌ 错误: 请指定镜像仓库, 例如: make docker-dev-push REGISTRY=ghcr.io/username"; \
|
||||
echo "❌ Error: Please specify registry, example: make docker-dev-push REGISTRY=ghcr.io/username"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🚀 Building and pushing multi-architecture development Docker images..."
|
||||
@echo "💡 推送到仓库: $(REGISTRY)"
|
||||
@echo "💡 Pushing to registry: $(REGISTRY)"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
@@ -210,7 +215,9 @@ docker-build-production:
|
||||
docker-build-source:
|
||||
@echo "🏗️ Building single-architecture source Docker image..."
|
||||
@echo "💡 Consider using 'make docker-dev-local' for multi-arch support"
|
||||
$(DOCKER_CLI) build -f $(DOCKERFILE_SOURCE) -t rustfs:source .
|
||||
DOCKER_BUILDKIT=1 $(DOCKER_CLI) build \
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||
-f $(DOCKERFILE_SOURCE) -t rustfs:source .
|
||||
|
||||
# ========================================================================================
|
||||
# Development Environment
|
||||
@@ -249,7 +256,7 @@ dev-env-restart: dev-env-stop dev-env-start
|
||||
.PHONY: docker-inspect-multiarch
|
||||
docker-inspect-multiarch:
|
||||
@if [ -z "$(IMAGE)" ]; then \
|
||||
echo "❌ 错误: 请指定镜像, 例如: make docker-inspect-multiarch IMAGE=rustfs/rustfs:latest"; \
|
||||
echo "❌ Error: Please specify image, example: make docker-inspect-multiarch IMAGE=rustfs/rustfs:latest"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🔍 Inspecting multi-architecture image: $(IMAGE)"
|
||||
@@ -277,93 +284,93 @@ build-cross-all:
|
||||
|
||||
.PHONY: help-build
|
||||
help-build:
|
||||
@echo "🔨 RustFS 构建帮助:"
|
||||
@echo "🔨 RustFS Build Help:"
|
||||
@echo ""
|
||||
@echo "🚀 本地构建 (推荐使用):"
|
||||
@echo " make build # 构建 RustFS 二进制文件 (默认包含 console)"
|
||||
@echo " make build-dev # 开发模式构建"
|
||||
@echo " make build-musl # 构建 x86_64 musl 版本"
|
||||
@echo " make build-gnu # 构建 x86_64 GNU 版本"
|
||||
@echo " make build-musl-arm64 # 构建 aarch64 musl 版本"
|
||||
@echo " make build-gnu-arm64 # 构建 aarch64 GNU 版本"
|
||||
@echo "🚀 Local Build (Recommended):"
|
||||
@echo " make build # Build RustFS binary (includes console by default)"
|
||||
@echo " make build-dev # Development mode build"
|
||||
@echo " make build-musl # Build x86_64 musl version"
|
||||
@echo " make build-gnu # Build x86_64 GNU version"
|
||||
@echo " make build-musl-arm64 # Build aarch64 musl version"
|
||||
@echo " make build-gnu-arm64 # Build aarch64 GNU version"
|
||||
@echo ""
|
||||
@echo "🐳 Docker 构建:"
|
||||
@echo " make build-docker # 使用 Docker 容器构建"
|
||||
@echo " make build-docker BUILD_OS=ubuntu22.04 # 指定构建系统"
|
||||
@echo "🐳 Docker Build:"
|
||||
@echo " make build-docker # Build using Docker container"
|
||||
@echo " make build-docker BUILD_OS=ubuntu22.04 # Specify build system"
|
||||
@echo ""
|
||||
@echo "🏗️ 跨架构构建:"
|
||||
@echo " make build-cross-all # 构建所有架构的二进制文件"
|
||||
@echo "🏗️ Cross-architecture Build:"
|
||||
@echo " make build-cross-all # Build binaries for all architectures"
|
||||
@echo ""
|
||||
@echo "🔧 直接使用 build-rustfs.sh 脚本:"
|
||||
@echo " ./build-rustfs.sh --help # 查看脚本帮助"
|
||||
@echo " ./build-rustfs.sh --no-console # 构建时跳过 console 资源"
|
||||
@echo " ./build-rustfs.sh --force-console-update # 强制更新 console 资源"
|
||||
@echo " ./build-rustfs.sh --dev # 开发模式构建"
|
||||
@echo " ./build-rustfs.sh --sign # 签名二进制文件"
|
||||
@echo " ./build-rustfs.sh --platform x86_64-unknown-linux-gnu # 指定目标平台"
|
||||
@echo " ./build-rustfs.sh --skip-verification # 跳过二进制验证"
|
||||
@echo "🔧 Direct usage of build-rustfs.sh script:"
|
||||
@echo " ./build-rustfs.sh --help # View script help"
|
||||
@echo " ./build-rustfs.sh --no-console # Build without console resources"
|
||||
@echo " ./build-rustfs.sh --force-console-update # Force update console resources"
|
||||
@echo " ./build-rustfs.sh --dev # Development mode build"
|
||||
@echo " ./build-rustfs.sh --sign # Sign binary files"
|
||||
@echo " ./build-rustfs.sh --platform x86_64-unknown-linux-gnu # Specify target platform"
|
||||
@echo " ./build-rustfs.sh --skip-verification # Skip binary verification"
|
||||
@echo ""
|
||||
@echo "💡 build-rustfs.sh 脚本提供了更多选项、智能检测和二进制验证功能"
|
||||
@echo "💡 build-rustfs.sh script provides more options, smart detection and binary verification"
|
||||
|
||||
.PHONY: help-docker
|
||||
help-docker:
|
||||
@echo "🐳 Docker 多架构构建帮助:"
|
||||
@echo "🐳 Docker Multi-architecture Build Help:"
|
||||
@echo ""
|
||||
@echo "🚀 生产镜像构建 (推荐使用 docker-buildx.sh):"
|
||||
@echo " make docker-buildx # 构建生产多架构镜像(不推送)"
|
||||
@echo " make docker-buildx-push # 构建并推送生产多架构镜像"
|
||||
@echo " make docker-buildx-version VERSION=v1.0.0 # 构建指定版本"
|
||||
@echo " make docker-buildx-push-version VERSION=v1.0.0 # 构建并推送指定版本"
|
||||
@echo "🚀 Production Image Build (Recommended to use docker-buildx.sh):"
|
||||
@echo " make docker-buildx # Build production multi-arch image (no push)"
|
||||
@echo " make docker-buildx-push # Build and push production multi-arch image"
|
||||
@echo " make docker-buildx-version VERSION=v1.0.0 # Build specific version"
|
||||
@echo " make docker-buildx-push-version VERSION=v1.0.0 # Build and push specific version"
|
||||
@echo ""
|
||||
@echo "🔧 开发/源码镜像构建 (本地开发测试):"
|
||||
@echo " make docker-dev # 构建开发多架构镜像(无法本地加载)"
|
||||
@echo " make docker-dev-local # 构建开发单架构镜像(本地加载)"
|
||||
@echo " make docker-dev-push REGISTRY=xxx # 构建并推送开发镜像"
|
||||
@echo "🔧 Development/Source Image Build (Local development testing):"
|
||||
@echo " make docker-dev # Build dev multi-arch image (cannot load locally)"
|
||||
@echo " make docker-dev-local # Build dev single-arch image (local load)"
|
||||
@echo " make docker-dev-push REGISTRY=xxx # Build and push dev image"
|
||||
@echo ""
|
||||
@echo "🏗️ 本地生产镜像构建 (替代方案):"
|
||||
@echo " make docker-buildx-production-local # 本地构建生产单架构镜像"
|
||||
@echo "🏗️ Local Production Image Build (Alternative):"
|
||||
@echo " make docker-buildx-production-local # Build production single-arch image locally"
|
||||
@echo ""
|
||||
@echo "📦 单架构构建 (传统方式):"
|
||||
@echo " make docker-build-production # 构建单架构生产镜像"
|
||||
@echo " make docker-build-source # 构建单架构源码镜像"
|
||||
@echo "📦 Single-architecture Build (Traditional way):"
|
||||
@echo " make docker-build-production # Build single-arch production image"
|
||||
@echo " make docker-build-source # Build single-arch source image"
|
||||
@echo ""
|
||||
@echo "🚀 开发环境管理:"
|
||||
@echo " make dev-env-start # 启动开发容器环境"
|
||||
@echo " make dev-env-stop # 停止开发容器环境"
|
||||
@echo " make dev-env-restart # 重启开发容器环境"
|
||||
@echo "🚀 Development Environment Management:"
|
||||
@echo " make dev-env-start # Start development container environment"
|
||||
@echo " make dev-env-stop # Stop development container environment"
|
||||
@echo " make dev-env-restart # Restart development container environment"
|
||||
@echo ""
|
||||
@echo "🔧 辅助工具:"
|
||||
@echo " make build-cross-all # 构建所有架构的二进制文件"
|
||||
@echo " make docker-inspect-multiarch IMAGE=xxx # 检查镜像的架构支持"
|
||||
@echo "🔧 Auxiliary Tools:"
|
||||
@echo " make build-cross-all # Build binaries for all architectures"
|
||||
@echo " make docker-inspect-multiarch IMAGE=xxx # Check image architecture support"
|
||||
@echo ""
|
||||
@echo "📋 环境变量:"
|
||||
@echo " REGISTRY 镜像仓库地址 (推送时需要)"
|
||||
@echo " DOCKERHUB_USERNAME Docker Hub 用户名"
|
||||
@echo " DOCKERHUB_TOKEN Docker Hub 访问令牌"
|
||||
@echo " GITHUB_TOKEN GitHub 访问令牌"
|
||||
@echo "📋 Environment Variables:"
|
||||
@echo " REGISTRY Image registry address (required for push)"
|
||||
@echo " DOCKERHUB_USERNAME Docker Hub username"
|
||||
@echo " DOCKERHUB_TOKEN Docker Hub access token"
|
||||
@echo " GITHUB_TOKEN GitHub access token"
|
||||
@echo ""
|
||||
@echo "💡 建议:"
|
||||
@echo " - 生产用途: 使用 docker-buildx* 命令 (基于预编译二进制)"
|
||||
@echo " - 本地开发: 使用 docker-dev* 命令 (从源码构建)"
|
||||
@echo " - 开发环境: 使用 dev-env-* 命令管理开发容器"
|
||||
@echo "💡 Suggestions:"
|
||||
@echo " - Production use: Use docker-buildx* commands (based on precompiled binaries)"
|
||||
@echo " - Local development: Use docker-dev* commands (build from source)"
|
||||
@echo " - Development environment: Use dev-env-* commands to manage dev containers"
|
||||
|
||||
.PHONY: help
|
||||
help:
|
||||
@echo "🦀 RustFS Makefile 帮助:"
|
||||
@echo "🦀 RustFS Makefile Help:"
|
||||
@echo ""
|
||||
@echo "📋 主要命令分类:"
|
||||
@echo " make help-build # 显示构建相关帮助"
|
||||
@echo " make help-docker # 显示 Docker 相关帮助"
|
||||
@echo "📋 Main Command Categories:"
|
||||
@echo " make help-build # Show build-related help"
|
||||
@echo " make help-docker # Show Docker-related help"
|
||||
@echo ""
|
||||
@echo "🔧 代码质量:"
|
||||
@echo " make fmt # 格式化代码"
|
||||
@echo " make clippy # 运行 clippy 检查"
|
||||
@echo " make test # 运行测试"
|
||||
@echo " make pre-commit # 运行所有预提交检查"
|
||||
@echo "🔧 Code Quality:"
|
||||
@echo " make fmt # Format code"
|
||||
@echo " make clippy # Run clippy checks"
|
||||
@echo " make test # Run tests"
|
||||
@echo " make pre-commit # Run all pre-commit checks"
|
||||
@echo ""
|
||||
@echo "🚀 快速开始:"
|
||||
@echo " make build # 构建 RustFS 二进制"
|
||||
@echo " make docker-dev-local # 构建开发 Docker 镜像(本地)"
|
||||
@echo " make dev-env-start # 启动开发环境"
|
||||
@echo "🚀 Quick Start:"
|
||||
@echo " make build # Build RustFS binary"
|
||||
@echo " make docker-dev-local # Build development Docker image (local)"
|
||||
@echo " make dev-env-start # Start development environment"
|
||||
@echo ""
|
||||
@echo "💡 更多帮助请使用 'make help-build' 或 'make help-docker'"
|
||||
@echo "💡 For more help use 'make help-build' or 'make help-docker'"
|
||||
|
||||
10
README.md
@@ -81,14 +81,14 @@ To get started with RustFS, follow these steps:
|
||||
2. **Docker Quick Start (Option 2)**
|
||||
|
||||
```bash
|
||||
# Latest stable release
|
||||
docker run -d -p 9000:9000 -v /data:/data rustfs/rustfs:latest
|
||||
# create data and logs directories
|
||||
mkdir -p data logs
|
||||
|
||||
# Development version (main branch)
|
||||
docker run -d -p 9000:9000 -v /data:/data rustfs/rustfs:main-latest
|
||||
# using latest alpha version
|
||||
docker run -d -p 9000:9000 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:alpha
|
||||
|
||||
# Specific version
|
||||
docker run -d -p 9000:9000 -v /data:/data rustfs/rustfs:v1.0.0
|
||||
docker run -d -p 9000:9000 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:1.0.0.alpha.45
|
||||
```
|
||||
|
||||
3. **Build from Source (Option 3) - Advanced Users**
|
||||
|
||||
@@ -1,46 +0,0 @@
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
[package]
|
||||
name = "rustfs-gui"
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
version.workspace = true
|
||||
|
||||
[dependencies]
|
||||
chrono = { workspace = true }
|
||||
dioxus = { workspace = true, features = ["router"] }
|
||||
dirs = { workspace = true }
|
||||
hex = { workspace = true }
|
||||
keyring = { workspace = true }
|
||||
rfd = { workspace = true }
|
||||
rust-embed = { workspace = true, features = ["interpolate-folder-path"] }
|
||||
rust-i18n = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
tokio = { workspace = true, features = ["io-util", "net", "process", "sync"] }
|
||||
tracing-subscriber = { workspace = true, features = ["fmt", "env-filter", "tracing-log", "time", "local-time", "json"] }
|
||||
tracing-appender = { workspace = true }
|
||||
|
||||
[features]
|
||||
default = ["desktop"]
|
||||
web = ["dioxus/web"]
|
||||
desktop = ["dioxus/desktop"]
|
||||
mobile = ["dioxus/mobile"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
@@ -1,52 +0,0 @@
|
||||
[application]
|
||||
|
||||
# App (Project) Name
|
||||
name = "rustfs-gui"
|
||||
|
||||
# The static resource path
|
||||
asset_dir = "public"
|
||||
|
||||
[web.app]
|
||||
|
||||
# HTML title tag content
|
||||
title = "rustfs-gui"
|
||||
|
||||
# include `assets` in web platform
|
||||
[web.resource]
|
||||
|
||||
# Additional CSS style files
|
||||
style = []
|
||||
|
||||
# Additional JavaScript files
|
||||
script = []
|
||||
|
||||
[web.resource.dev]
|
||||
|
||||
# Javascript code file
|
||||
# serve: [dev-server] only
|
||||
script = []
|
||||
|
||||
[bundle]
|
||||
identifier = "com.rustfs.cli.gui"
|
||||
|
||||
publisher = "RustFsGUI"
|
||||
|
||||
category = "Utility"
|
||||
|
||||
copyright = "Copyright 2025 rustfs.com"
|
||||
|
||||
icon = [
|
||||
"assets/icons/icon.icns",
|
||||
"assets/icons/icon.ico",
|
||||
"assets/icons/icon.png",
|
||||
"assets/icons/rustfs-icon.png",
|
||||
]
|
||||
#[bundle.macos]
|
||||
#provider_short_name = "RustFs"
|
||||
[bundle.windows]
|
||||
tsp = true
|
||||
icon_path = "assets/icons/icon.ico"
|
||||
allow_downgrades = true
|
||||
[bundle.windows.webview_install_mode]
|
||||
[bundle.windows.webview_install_mode.EmbedBootstrapper]
|
||||
silent = true
|
||||
@@ -1,34 +0,0 @@
|
||||
## Rustfs GUI
|
||||
|
||||
### Tailwind
|
||||
|
||||
1. Install npm: https://docs.npmjs.com/downloading-and-installing-node-js-and-npm
|
||||
2. Install the Tailwind CSS CLI: https://tailwindcss.com/docs/installation
|
||||
3. Run the following command in the root of the project to start the Tailwind CSS compiler:
|
||||
|
||||
```bash
|
||||
npx tailwindcss -i ./input.css -o ./assets/tailwind.css --watch
|
||||
```
|
||||
|
||||
### Dioxus CLI
|
||||
|
||||
#### Install the stable version (recommended)
|
||||
|
||||
```shell
|
||||
cargo install dioxus-cli
|
||||
```
|
||||
|
||||
### Serving Your App
|
||||
|
||||
Run the following command in the root of your project to start developing with the default platform:
|
||||
|
||||
```bash
|
||||
dx serve
|
||||
```
|
||||
|
||||
To run for a different platform, use the `--platform platform` flag. E.g.
|
||||
|
||||
```bash
|
||||
dx serve --platform desktop
|
||||
```
|
||||
|
||||
|
Before Width: | Height: | Size: 80 KiB |
|
Before Width: | Height: | Size: 23 KiB |
|
Before Width: | Height: | Size: 80 KiB |
|
Before Width: | Height: | Size: 80 KiB |
|
Before Width: | Height: | Size: 23 KiB |
|
Before Width: | Height: | Size: 4.5 KiB |
|
Before Width: | Height: | Size: 9.9 KiB |
|
Before Width: | Height: | Size: 498 B |
|
Before Width: | Height: | Size: 969 B |
|
Before Width: | Height: | Size: 9.9 KiB |
|
Before Width: | Height: | Size: 23 KiB |
|
Before Width: | Height: | Size: 969 B |
|
Before Width: | Height: | Size: 2.0 KiB |
|
Before Width: | Height: | Size: 23 KiB |
|
Before Width: | Height: | Size: 47 KiB |
|
Before Width: | Height: | Size: 23 KiB |
@@ -1,48 +0,0 @@
|
||||
/**
|
||||
* Copyright 2024 RustFS Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
window.switchTab = function (tabId) {
|
||||
// Hide everything
|
||||
document.querySelectorAll('.tab-content').forEach(content => {
|
||||
content.classList.add('hidden');
|
||||
});
|
||||
|
||||
// Reset all label styles
|
||||
document.querySelectorAll('.tab-btn').forEach(btn => {
|
||||
btn.classList.remove('border-b-2', 'border-black');
|
||||
btn.classList.add('text-gray-500');
|
||||
});
|
||||
|
||||
// Displays the selected content
|
||||
const activeContent = document.getElementById(tabId);
|
||||
if (activeContent) {
|
||||
activeContent.classList.remove('hidden');
|
||||
}
|
||||
|
||||
// Updates the selected label style
|
||||
const activeBtn = document.querySelector(`[data-tab="${tabId}"]`);
|
||||
if (activeBtn) {
|
||||
activeBtn.classList.add('border-b-2', 'border-black');
|
||||
activeBtn.classList.remove('text-gray-500');
|
||||
}
|
||||
};
|
||||
|
||||
window.togglePassword = function (button) {
|
||||
const input = button.parentElement.querySelector('input[type="password"], input[type="text"]');
|
||||
if (input) {
|
||||
input.type = input.type === 'password' ? 'text' : 'password';
|
||||
}
|
||||
};
|
||||
|
Before Width: | Height: | Size: 23 KiB |
|
Before Width: | Height: | Size: 34 KiB |
@@ -1,15 +0,0 @@
|
||||
<svg width="1558" height="260" viewBox="0 0 1558 260" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g clip-path="url(#clip0_0_3)">
|
||||
<path d="M1288.5 112.905H1159.75V58.4404H1262L1270 0L1074 0V260H1159.75V162.997H1296.95L1288.5 112.905Z" fill="#0196D0"/>
|
||||
<path d="M1058.62 58.4404V0H789V58.4404H881.133V260H966.885V58.4404H1058.62Z" fill="#0196D0"/>
|
||||
<path d="M521 179.102V0L454.973 15V161C454.973 181.124 452.084 193.146 443.5 202C434.916 211.257 419.318 214.5 400.5 214.5C381.022 214.5 366.744 210.854 357.5 202C348.916 193.548 346.357 175.721 346.357 156V0L280 15V175.48C280 208.08 290.234 229.412 309.712 241.486C329.19 253.56 358.903 260 400.5 260C440.447 260 470.159 253.56 490.297 241.486C510.766 229.412 521 208.483 521 179.102Z" fill="#0196D0"/>
|
||||
<path d="M172.84 84.2813C172.84 97.7982 168.249 107.737 158.41 113.303C149.883 118.471 137.092 121.254 120.693 122.049V162.997C129.876 163.792 138.076 166.177 144.307 176.514L184.647 260H265L225.316 180.489C213.181 155.046 201.374 149.48 178.744 143.517C212.197 138.349 241.386 118.471 241.386 73.1499C241.386 53.2722 233.843 30.2141 218.756 17.8899C203.998 5.56575 183.991 0 159.394 0H120.693V48.5015H127.58C142.23 48.5015 153.6 51.4169 161.689 57.2477C169.233 62.8135 172.84 71.5596 172.84 84.2813ZM120.693 122.049C119.163 122.049 117.741 122.049 116.43 122.049H68.5457V48.5015H120.693V0H0V260H70.5137V162.997H110.526C113.806 162.997 117.741 162.997 120.693 162.997V122.049Z" fill="#0196D0"/>
|
||||
<path d="M774 179.297C774 160.829 766.671 144.669 752.013 131.972C738.127 119.66 712.025 110.169 673.708 103.5C662.136 101.191 651.722 99.6523 643.235 97.3437C586.532 84.6467 594.632 52.7118 650.564 52.7118C680.651 52.7118 709.582 61.946 738.127 66.9478C742.37 67.7174 743.913 68.1021 744.298 68.1021L750.47 12.697C720.383 3.46282 684.895 0 654.036 0C616.619 0 587.689 6.54088 567.245 19.2379C546.801 31.9349 536 57.7137 536 82.3382C536 103.5 543.715 119.66 559.916 131.972C575.731 143.515 604.276 152.749 645.55 160.059C658.279 162.368 668.694 163.907 676.794 166.215C685.023 168.524 691.066 170.704 694.924 172.756C702.253 176.604 706.11 182.375 706.11 188.531C706.11 196.611 701.481 202.767 692.224 207C664.836 220.081 587.689 212.001 556.83 198.15L543.715 247.784C547.186 248.169 552.972 249.323 559.916 250.477C616.619 259.327 690.681 270.869 741.212 238.935C762.814 225.468 774 206.23 774 179.297Z" fill="#0196D0"/>
|
||||
<path d="M1558 179.568C1558 160.383 1550.42 144.268 1535.67 131.99C1521.32 119.968 1494.34 110.631 1454.74 103.981C1442.38 101.679 1432.01 99.3764 1422.84 97.8416C1422.44 97.8416 1422.04 97.8416 1422.04 97.4579V112.422L1361.04 75.2038L1422.04 38.3692V52.9496C1424.7 52.9496 1427.49 52.9496 1430.41 52.9496C1461.51 52.9496 1491.42 62.5419 1521.32 67.5299C1525.31 67.9136 1526.9 67.9136 1527.3 67.9136L1533.68 12.6619C1502.98 3.83692 1465.9 0 1434 0C1395.33 0 1365.43 6.52277 1345.09 19.5683C1323.16 32.6139 1312 57.9376 1312 82.8776C1312 103.981 1320.37 120.096 1336.72 131.607C1353.46 143.885 1382.97 153.093 1425.23 160.383C1434 161.535 1441.18 162.686 1447.56 164.22L1448.36 150.791L1507.36 190.312L1445.57 224.844L1445.96 212.949C1409.68 215.635 1357.45 209.112 1333.53 197.985L1320.37 247.482C1323.56 248.249 1329.54 248.633 1336.72 250.551C1395.33 259.376 1471.88 270.887 1524.11 238.657C1546.84 225.611 1558 205.659 1558 179.568Z" fill="#0196D0"/>
|
||||
</g>
|
||||
<defs>
|
||||
<clipPath id="clip0_0_3">
|
||||
<rect width="1558" height="260" fill="white"/>
|
||||
</clipPath>
|
||||
</defs>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 3.4 KiB |
@@ -1,33 +0,0 @@
|
||||
/**
|
||||
* Copyright 2024 RustFS Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#navbar {
|
||||
display: flex;
|
||||
flex-direction: row;
|
||||
}
|
||||
|
||||
#navbar a {
|
||||
color: #ffffff;
|
||||
margin-right: 20px;
|
||||
text-decoration: none;
|
||||
transition: color 0.2s ease;
|
||||
}
|
||||
|
||||
#navbar a:hover {
|
||||
cursor: pointer;
|
||||
color: #ffffff;
|
||||
/ / #91a4d2;
|
||||
}
|
||||
@@ -1,972 +0,0 @@
|
||||
/**
|
||||
* Copyright 2024 RustFS Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
*, ::before, ::after {
|
||||
--tw-border-spacing-x: 0;
|
||||
--tw-border-spacing-y: 0;
|
||||
--tw-translate-x: 0;
|
||||
--tw-translate-y: 0;
|
||||
--tw-rotate: 0;
|
||||
--tw-skew-x: 0;
|
||||
--tw-skew-y: 0;
|
||||
--tw-scale-x: 1;
|
||||
--tw-scale-y: 1;
|
||||
--tw-pan-x: ;
|
||||
--tw-pan-y: ;
|
||||
--tw-pinch-zoom: ;
|
||||
--tw-scroll-snap-strictness: proximity;
|
||||
--tw-gradient-from-position: ;
|
||||
--tw-gradient-via-position: ;
|
||||
--tw-gradient-to-position: ;
|
||||
--tw-ordinal: ;
|
||||
--tw-slashed-zero: ;
|
||||
--tw-numeric-figure: ;
|
||||
--tw-numeric-spacing: ;
|
||||
--tw-numeric-fraction: ;
|
||||
--tw-ring-inset: ;
|
||||
--tw-ring-offset-width: 0px;
|
||||
--tw-ring-offset-color: #fff;
|
||||
--tw-ring-color: rgb(59 130 246 / 0.5);
|
||||
--tw-ring-offset-shadow: 0 0 #0000;
|
||||
--tw-ring-shadow: 0 0 #0000;
|
||||
--tw-shadow: 0 0 #0000;
|
||||
--tw-shadow-colored: 0 0 #0000;
|
||||
--tw-blur: ;
|
||||
--tw-brightness: ;
|
||||
--tw-contrast: ;
|
||||
--tw-grayscale: ;
|
||||
--tw-hue-rotate: ;
|
||||
--tw-invert: ;
|
||||
--tw-saturate: ;
|
||||
--tw-sepia: ;
|
||||
--tw-drop-shadow: ;
|
||||
--tw-backdrop-blur: ;
|
||||
--tw-backdrop-brightness: ;
|
||||
--tw-backdrop-contrast: ;
|
||||
--tw-backdrop-grayscale: ;
|
||||
--tw-backdrop-hue-rotate: ;
|
||||
--tw-backdrop-invert: ;
|
||||
--tw-backdrop-opacity: ;
|
||||
--tw-backdrop-saturate: ;
|
||||
--tw-backdrop-sepia: ;
|
||||
--tw-contain-size: ;
|
||||
--tw-contain-layout: ;
|
||||
--tw-contain-paint: ;
|
||||
--tw-contain-style: ;
|
||||
}
|
||||
|
||||
::backdrop {
|
||||
--tw-border-spacing-x: 0;
|
||||
--tw-border-spacing-y: 0;
|
||||
--tw-translate-x: 0;
|
||||
--tw-translate-y: 0;
|
||||
--tw-rotate: 0;
|
||||
--tw-skew-x: 0;
|
||||
--tw-skew-y: 0;
|
||||
--tw-scale-x: 1;
|
||||
--tw-scale-y: 1;
|
||||
--tw-pan-x: ;
|
||||
--tw-pan-y: ;
|
||||
--tw-pinch-zoom: ;
|
||||
--tw-scroll-snap-strictness: proximity;
|
||||
--tw-gradient-from-position: ;
|
||||
--tw-gradient-via-position: ;
|
||||
--tw-gradient-to-position: ;
|
||||
--tw-ordinal: ;
|
||||
--tw-slashed-zero: ;
|
||||
--tw-numeric-figure: ;
|
||||
--tw-numeric-spacing: ;
|
||||
--tw-numeric-fraction: ;
|
||||
--tw-ring-inset: ;
|
||||
--tw-ring-offset-width: 0px;
|
||||
--tw-ring-offset-color: #fff;
|
||||
--tw-ring-color: rgb(59 130 246 / 0.5);
|
||||
--tw-ring-offset-shadow: 0 0 #0000;
|
||||
--tw-ring-shadow: 0 0 #0000;
|
||||
--tw-shadow: 0 0 #0000;
|
||||
--tw-shadow-colored: 0 0 #0000;
|
||||
--tw-blur: ;
|
||||
--tw-brightness: ;
|
||||
--tw-contrast: ;
|
||||
--tw-grayscale: ;
|
||||
--tw-hue-rotate: ;
|
||||
--tw-invert: ;
|
||||
--tw-saturate: ;
|
||||
--tw-sepia: ;
|
||||
--tw-drop-shadow: ;
|
||||
--tw-backdrop-blur: ;
|
||||
--tw-backdrop-brightness: ;
|
||||
--tw-backdrop-contrast: ;
|
||||
--tw-backdrop-grayscale: ;
|
||||
--tw-backdrop-hue-rotate: ;
|
||||
--tw-backdrop-invert: ;
|
||||
--tw-backdrop-opacity: ;
|
||||
--tw-backdrop-saturate: ;
|
||||
--tw-backdrop-sepia: ;
|
||||
--tw-contain-size: ;
|
||||
--tw-contain-layout: ;
|
||||
--tw-contain-paint: ;
|
||||
--tw-contain-style: ;
|
||||
}
|
||||
|
||||
/*
|
||||
! tailwindcss v3.4.17 | MIT License | https://tailwindcss.com
|
||||
*/
|
||||
|
||||
/*
|
||||
1. Prevent padding and border from affecting element width. (https://github.com/mozdevs/cssremedy/issues/4)
|
||||
2. Allow adding a border to an element by just adding a border-width. (https://github.com/tailwindcss/tailwindcss/pull/116)
|
||||
*/
|
||||
|
||||
*,
|
||||
::before,
|
||||
::after {
|
||||
box-sizing: border-box;
|
||||
/* 1 */
|
||||
border-width: 0;
|
||||
/* 2 */
|
||||
border-style: solid;
|
||||
/* 2 */
|
||||
border-color: #e5e7eb;
|
||||
/* 2 */
|
||||
}
|
||||
|
||||
::before,
|
||||
::after {
|
||||
--tw-content: '';
|
||||
}
|
||||
|
||||
/*
|
||||
1. Use a consistent sensible line-height in all browsers.
|
||||
2. Prevent adjustments of font size after orientation changes in iOS.
|
||||
3. Use a more readable tab size.
|
||||
4. Use the user's configured `sans` font-family by default.
|
||||
5. Use the user's configured `sans` font-feature-settings by default.
|
||||
6. Use the user's configured `sans` font-variation-settings by default.
|
||||
7. Disable tap highlights on iOS
|
||||
*/
|
||||
|
||||
html,
|
||||
:host {
|
||||
line-height: 1.5;
|
||||
/* 1 */
|
||||
-webkit-text-size-adjust: 100%;
|
||||
/* 2 */
|
||||
-moz-tab-size: 4;
|
||||
/* 3 */
|
||||
-o-tab-size: 4;
|
||||
tab-size: 4;
|
||||
/* 3 */
|
||||
font-family: ui-sans-serif, system-ui, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji";
|
||||
/* 4 */
|
||||
font-feature-settings: normal;
|
||||
/* 5 */
|
||||
font-variation-settings: normal;
|
||||
/* 6 */
|
||||
-webkit-tap-highlight-color: transparent;
|
||||
/* 7 */
|
||||
}
|
||||
|
||||
/*
|
||||
1. Remove the margin in all browsers.
|
||||
2. Inherit line-height from `html` so users can set them as a class directly on the `html` element.
|
||||
*/
|
||||
|
||||
body {
|
||||
margin: 0;
|
||||
/* 1 */
|
||||
line-height: inherit;
|
||||
/* 2 */
|
||||
}
|
||||
|
||||
/*
|
||||
1. Add the correct height in Firefox.
|
||||
2. Correct the inheritance of border color in Firefox. (https://bugzilla.mozilla.org/show_bug.cgi?id=190655)
|
||||
3. Ensure horizontal rules are visible by default.
|
||||
*/
|
||||
|
||||
hr {
|
||||
height: 0;
|
||||
/* 1 */
|
||||
color: inherit;
|
||||
/* 2 */
|
||||
border-top-width: 1px;
|
||||
/* 3 */
|
||||
}
|
||||
|
||||
/*
|
||||
Add the correct text decoration in Chrome, Edge, and Safari.
|
||||
*/
|
||||
|
||||
abbr:where([title]) {
|
||||
-webkit-text-decoration: underline dotted;
|
||||
text-decoration: underline dotted;
|
||||
}
|
||||
|
||||
/*
|
||||
Remove the default font size and weight for headings.
|
||||
*/
|
||||
|
||||
h1,
|
||||
h2,
|
||||
h3,
|
||||
h4,
|
||||
h5,
|
||||
h6 {
|
||||
font-size: inherit;
|
||||
font-weight: inherit;
|
||||
}
|
||||
|
||||
/*
|
||||
Reset links to optimize for opt-in styling instead of opt-out.
|
||||
*/
|
||||
|
||||
a {
|
||||
color: inherit;
|
||||
text-decoration: inherit;
|
||||
}
|
||||
|
||||
/*
|
||||
Add the correct font weight in Edge and Safari.
|
||||
*/
|
||||
|
||||
b,
|
||||
strong {
|
||||
font-weight: bolder;
|
||||
}
|
||||
|
||||
/*
|
||||
1. Use the user's configured `mono` font-family by default.
|
||||
2. Use the user's configured `mono` font-feature-settings by default.
|
||||
3. Use the user's configured `mono` font-variation-settings by default.
|
||||
4. Correct the odd `em` font sizing in all browsers.
|
||||
*/
|
||||
|
||||
code,
|
||||
kbd,
|
||||
samp,
|
||||
pre {
|
||||
font-family: ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace;
|
||||
/* 1 */
|
||||
font-feature-settings: normal;
|
||||
/* 2 */
|
||||
font-variation-settings: normal;
|
||||
/* 3 */
|
||||
font-size: 1em;
|
||||
/* 4 */
|
||||
}
|
||||
|
||||
/*
|
||||
Add the correct font size in all browsers.
|
||||
*/
|
||||
|
||||
small {
|
||||
font-size: 80%;
|
||||
}
|
||||
|
||||
/*
|
||||
Prevent `sub` and `sup` elements from affecting the line height in all browsers.
|
||||
*/
|
||||
|
||||
sub,
|
||||
sup {
|
||||
font-size: 75%;
|
||||
line-height: 0;
|
||||
position: relative;
|
||||
vertical-align: baseline;
|
||||
}
|
||||
|
||||
sub {
|
||||
bottom: -0.25em;
|
||||
}
|
||||
|
||||
sup {
|
||||
top: -0.5em;
|
||||
}
|
||||
|
||||
/*
|
||||
1. Remove text indentation from table contents in Chrome and Safari. (https://bugs.chromium.org/p/chromium/issues/detail?id=999088, https://bugs.webkit.org/show_bug.cgi?id=201297)
|
||||
2. Correct table border color inheritance in all Chrome and Safari. (https://bugs.chromium.org/p/chromium/issues/detail?id=935729, https://bugs.webkit.org/show_bug.cgi?id=195016)
|
||||
3. Remove gaps between table borders by default.
|
||||
*/
|
||||
|
||||
table {
|
||||
text-indent: 0;
|
||||
/* 1 */
|
||||
border-color: inherit;
|
||||
/* 2 */
|
||||
border-collapse: collapse;
|
||||
/* 3 */
|
||||
}
|
||||
|
||||
/*
|
||||
1. Change the font styles in all browsers.
|
||||
2. Remove the margin in Firefox and Safari.
|
||||
3. Remove default padding in all browsers.
|
||||
*/
|
||||
|
||||
button,
|
||||
input,
|
||||
optgroup,
|
||||
select,
|
||||
textarea {
|
||||
font-family: inherit;
|
||||
/* 1 */
|
||||
font-feature-settings: inherit;
|
||||
/* 1 */
|
||||
font-variation-settings: inherit;
|
||||
/* 1 */
|
||||
font-size: 100%;
|
||||
/* 1 */
|
||||
font-weight: inherit;
|
||||
/* 1 */
|
||||
line-height: inherit;
|
||||
/* 1 */
|
||||
letter-spacing: inherit;
|
||||
/* 1 */
|
||||
color: inherit;
|
||||
/* 1 */
|
||||
margin: 0;
|
||||
/* 2 */
|
||||
padding: 0;
|
||||
/* 3 */
|
||||
}
|
||||
|
||||
/*
|
||||
Remove the inheritance of text transform in Edge and Firefox.
|
||||
*/
|
||||
|
||||
button,
|
||||
select {
|
||||
text-transform: none;
|
||||
}
|
||||
|
||||
/*
|
||||
1. Correct the inability to style clickable types in iOS and Safari.
|
||||
2. Remove default button styles.
|
||||
*/
|
||||
|
||||
button,
|
||||
input:where([type='button']),
|
||||
input:where([type='reset']),
|
||||
input:where([type='submit']) {
|
||||
-webkit-appearance: button;
|
||||
/* 1 */
|
||||
background-color: transparent;
|
||||
/* 2 */
|
||||
background-image: none;
|
||||
/* 2 */
|
||||
}
|
||||
|
||||
/*
|
||||
Use the modern Firefox focus style for all focusable elements.
|
||||
*/
|
||||
|
||||
:-moz-focusring {
|
||||
outline: auto;
|
||||
}
|
||||
|
||||
/*
|
||||
Remove the additional `:invalid` styles in Firefox. (https://github.com/mozilla/gecko-dev/blob/2f9eacd9d3d995c937b4251a5557d95d494c9be1/layout/style/res/forms.css#L728-L737)
|
||||
*/
|
||||
|
||||
:-moz-ui-invalid {
|
||||
box-shadow: none;
|
||||
}
|
||||
|
||||
/*
|
||||
Add the correct vertical alignment in Chrome and Firefox.
|
||||
*/
|
||||
|
||||
progress {
|
||||
vertical-align: baseline;
|
||||
}
|
||||
|
||||
/*
|
||||
Correct the cursor style of increment and decrement buttons in Safari.
|
||||
*/
|
||||
|
||||
::-webkit-inner-spin-button,
|
||||
::-webkit-outer-spin-button {
|
||||
height: auto;
|
||||
}
|
||||
|
||||
/*
|
||||
1. Correct the odd appearance in Chrome and Safari.
|
||||
2. Correct the outline style in Safari.
|
||||
*/
|
||||
|
||||
[type='search'] {
|
||||
-webkit-appearance: textfield;
|
||||
/* 1 */
|
||||
outline-offset: -2px;
|
||||
/* 2 */
|
||||
}
|
||||
|
||||
/*
|
||||
Remove the inner padding in Chrome and Safari on macOS.
|
||||
*/
|
||||
|
||||
::-webkit-search-decoration {
|
||||
-webkit-appearance: none;
|
||||
}
|
||||
|
||||
/*
|
||||
1. Correct the inability to style clickable types in iOS and Safari.
|
||||
2. Change font properties to `inherit` in Safari.
|
||||
*/
|
||||
|
||||
::-webkit-file-upload-button {
|
||||
-webkit-appearance: button;
|
||||
/* 1 */
|
||||
font: inherit;
|
||||
/* 2 */
|
||||
}
|
||||
|
||||
/*
|
||||
Add the correct display in Chrome and Safari.
|
||||
*/
|
||||
|
||||
summary {
|
||||
display: list-item;
|
||||
}
|
||||
|
||||
/*
|
||||
Removes the default spacing and border for appropriate elements.
|
||||
*/
|
||||
|
||||
blockquote,
|
||||
dl,
|
||||
dd,
|
||||
h1,
|
||||
h2,
|
||||
h3,
|
||||
h4,
|
||||
h5,
|
||||
h6,
|
||||
hr,
|
||||
figure,
|
||||
p,
|
||||
pre {
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
fieldset {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
legend {
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
ol,
|
||||
ul,
|
||||
menu {
|
||||
list-style: none;
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
/*
|
||||
Reset default styling for dialogs.
|
||||
*/
|
||||
|
||||
dialog {
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
/*
|
||||
Prevent resizing textareas horizontally by default.
|
||||
*/
|
||||
|
||||
textarea {
|
||||
resize: vertical;
|
||||
}
|
||||
|
||||
/*
|
||||
1. Reset the default placeholder opacity in Firefox. (https://github.com/tailwindlabs/tailwindcss/issues/3300)
|
||||
2. Set the default placeholder color to the user's configured gray 400 color.
|
||||
*/
|
||||
|
||||
input::-moz-placeholder, textarea::-moz-placeholder {
|
||||
opacity: 1;
|
||||
/* 1 */
|
||||
color: #9ca3af;
|
||||
/* 2 */
|
||||
}
|
||||
|
||||
input::placeholder,
|
||||
textarea::placeholder {
|
||||
opacity: 1;
|
||||
/* 1 */
|
||||
color: #9ca3af;
|
||||
/* 2 */
|
||||
}
|
||||
|
||||
/*
|
||||
Set the default cursor for buttons.
|
||||
*/
|
||||
|
||||
button,
|
||||
[role="button"] {
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
/*
|
||||
Make sure disabled buttons don't get the pointer cursor.
|
||||
*/
|
||||
|
||||
:disabled {
|
||||
cursor: default;
|
||||
}
|
||||
|
||||
/*
|
||||
1. Make replaced elements `display: block` by default. (https://github.com/mozdevs/cssremedy/issues/14)
|
||||
2. Add `vertical-align: middle` to align replaced elements more sensibly by default. (https://github.com/jensimmons/cssremedy/issues/14#issuecomment-634934210)
|
||||
This can trigger a poorly considered lint error in some tools but is included by design.
|
||||
*/
|
||||
|
||||
img,
|
||||
svg,
|
||||
video,
|
||||
canvas,
|
||||
audio,
|
||||
iframe,
|
||||
embed,
|
||||
object {
|
||||
display: block;
|
||||
/* 1 */
|
||||
vertical-align: middle;
|
||||
/* 2 */
|
||||
}
|
||||
|
||||
/*
|
||||
Constrain images and videos to the parent width and preserve their intrinsic aspect ratio. (https://github.com/mozdevs/cssremedy/issues/14)
|
||||
*/
|
||||
|
||||
img,
|
||||
video {
|
||||
max-width: 100%;
|
||||
height: auto;
|
||||
}
|
||||
|
||||
/* Make elements with the HTML hidden attribute stay hidden by default */
|
||||
|
||||
[hidden]:where(:not([hidden="until-found"])) {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.static {
|
||||
position: static;
|
||||
}
|
||||
|
||||
.absolute {
|
||||
position: absolute;
|
||||
}
|
||||
|
||||
.relative {
|
||||
position: relative;
|
||||
}
|
||||
|
||||
.right-2 {
|
||||
right: 0.5rem;
|
||||
}
|
||||
|
||||
.right-6 {
|
||||
right: 1.5rem;
|
||||
}
|
||||
|
||||
.top-1\/2 {
|
||||
top: 50%;
|
||||
}
|
||||
|
||||
.top-4 {
|
||||
top: 1rem;
|
||||
}
|
||||
|
||||
.z-10 {
|
||||
z-index: 10;
|
||||
}
|
||||
|
||||
.mb-2 {
|
||||
margin-bottom: 0.5rem;
|
||||
}
|
||||
|
||||
.mb-4 {
|
||||
margin-bottom: 1rem;
|
||||
}
|
||||
|
||||
.mb-6 {
|
||||
margin-bottom: 1.5rem;
|
||||
}
|
||||
|
||||
.mb-8 {
|
||||
margin-bottom: 2rem;
|
||||
}
|
||||
|
||||
.ml-2 {
|
||||
margin-left: 0.5rem;
|
||||
}
|
||||
|
||||
.flex {
|
||||
display: flex;
|
||||
}
|
||||
|
||||
.hidden {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.h-16 {
|
||||
height: 4rem;
|
||||
}
|
||||
|
||||
.h-24 {
|
||||
height: 6rem;
|
||||
}
|
||||
|
||||
.h-4 {
|
||||
height: 1rem;
|
||||
}
|
||||
|
||||
.h-5 {
|
||||
height: 1.25rem;
|
||||
}
|
||||
|
||||
.h-6 {
|
||||
height: 1.5rem;
|
||||
}
|
||||
|
||||
.min-h-screen {
|
||||
min-height: 100vh;
|
||||
}
|
||||
|
||||
.w-16 {
|
||||
width: 4rem;
|
||||
}
|
||||
|
||||
.w-20 {
|
||||
width: 5rem;
|
||||
}
|
||||
|
||||
.w-24 {
|
||||
width: 6rem;
|
||||
}
|
||||
|
||||
.w-4 {
|
||||
width: 1rem;
|
||||
}
|
||||
|
||||
.w-48 {
|
||||
width: 12rem;
|
||||
}
|
||||
|
||||
.w-5 {
|
||||
width: 1.25rem;
|
||||
}
|
||||
|
||||
.w-6 {
|
||||
width: 1.5rem;
|
||||
}
|
||||
|
||||
.w-full {
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.flex-1 {
|
||||
flex: 1 1 0%;
|
||||
}
|
||||
|
||||
.-translate-y-1\/2 {
|
||||
--tw-translate-y: -50%;
|
||||
transform: translate(var(--tw-translate-x), var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y));
|
||||
}
|
||||
|
||||
.transform {
|
||||
transform: translate(var(--tw-translate-x), var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y));
|
||||
}
|
||||
|
||||
@keyframes spin {
|
||||
to {
|
||||
transform: rotate(360deg);
|
||||
}
|
||||
}
|
||||
|
||||
.animate-spin {
|
||||
animation: spin 1s linear infinite;
|
||||
}
|
||||
|
||||
.flex-col {
|
||||
flex-direction: column;
|
||||
}
|
||||
|
||||
.items-center {
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.justify-center {
|
||||
justify-content: center;
|
||||
}
|
||||
|
||||
.space-x-2 > :not([hidden]) ~ :not([hidden]) {
|
||||
--tw-space-x-reverse: 0;
|
||||
margin-right: calc(0.5rem * var(--tw-space-x-reverse));
|
||||
margin-left: calc(0.5rem * calc(1 - var(--tw-space-x-reverse)));
|
||||
}
|
||||
|
||||
.space-x-4 > :not([hidden]) ~ :not([hidden]) {
|
||||
--tw-space-x-reverse: 0;
|
||||
margin-right: calc(1rem * var(--tw-space-x-reverse));
|
||||
margin-left: calc(1rem * calc(1 - var(--tw-space-x-reverse)));
|
||||
}
|
||||
|
||||
.space-x-8 > :not([hidden]) ~ :not([hidden]) {
|
||||
--tw-space-x-reverse: 0;
|
||||
margin-right: calc(2rem * var(--tw-space-x-reverse));
|
||||
margin-left: calc(2rem * calc(1 - var(--tw-space-x-reverse)));
|
||||
}
|
||||
|
||||
.space-y-4 > :not([hidden]) ~ :not([hidden]) {
|
||||
--tw-space-y-reverse: 0;
|
||||
margin-top: calc(1rem * calc(1 - var(--tw-space-y-reverse)));
|
||||
margin-bottom: calc(1rem * var(--tw-space-y-reverse));
|
||||
}
|
||||
|
||||
.space-y-6 > :not([hidden]) ~ :not([hidden]) {
|
||||
--tw-space-y-reverse: 0;
|
||||
margin-top: calc(1.5rem * calc(1 - var(--tw-space-y-reverse)));
|
||||
margin-bottom: calc(1.5rem * var(--tw-space-y-reverse));
|
||||
}
|
||||
|
||||
.rounded {
|
||||
border-radius: 0.25rem;
|
||||
}
|
||||
|
||||
.rounded-full {
|
||||
border-radius: 9999px;
|
||||
}
|
||||
|
||||
.rounded-lg {
|
||||
border-radius: 0.5rem;
|
||||
}
|
||||
|
||||
.rounded-md {
|
||||
border-radius: 0.375rem;
|
||||
}
|
||||
|
||||
.border {
|
||||
border-width: 1px;
|
||||
}
|
||||
|
||||
.border-b {
|
||||
border-bottom-width: 1px;
|
||||
}
|
||||
|
||||
.border-b-2 {
|
||||
border-bottom-width: 2px;
|
||||
}
|
||||
|
||||
.border-black {
|
||||
--tw-border-opacity: 1;
|
||||
border-color: rgb(0 0 0 / var(--tw-border-opacity, 1));
|
||||
}
|
||||
|
||||
.border-gray-200 {
|
||||
--tw-border-opacity: 1;
|
||||
border-color: rgb(229 231 235 / var(--tw-border-opacity, 1));
|
||||
}
|
||||
|
||||
.bg-\[\#111827\] {
|
||||
--tw-bg-opacity: 1;
|
||||
background-color: rgb(17 24 39 / var(--tw-bg-opacity, 1));
|
||||
}
|
||||
|
||||
.bg-gray-100 {
|
||||
--tw-bg-opacity: 1;
|
||||
background-color: rgb(243 244 246 / var(--tw-bg-opacity, 1));
|
||||
}
|
||||
|
||||
.bg-gray-900 {
|
||||
--tw-bg-opacity: 1;
|
||||
background-color: rgb(17 24 39 / var(--tw-bg-opacity, 1));
|
||||
}
|
||||
|
||||
.bg-red-500 {
|
||||
--tw-bg-opacity: 1;
|
||||
background-color: rgb(239 68 68 / var(--tw-bg-opacity, 1));
|
||||
}
|
||||
|
||||
.bg-white {
|
||||
--tw-bg-opacity: 1;
|
||||
background-color: rgb(255 255 255 / var(--tw-bg-opacity, 1));
|
||||
}
|
||||
|
||||
.p-2 {
|
||||
padding: 0.5rem;
|
||||
}
|
||||
|
||||
.p-4 {
|
||||
padding: 1rem;
|
||||
}
|
||||
|
||||
.p-8 {
|
||||
padding: 2rem;
|
||||
}
|
||||
|
||||
.px-1 {
|
||||
padding-left: 0.25rem;
|
||||
padding-right: 0.25rem;
|
||||
}
|
||||
|
||||
.px-3 {
|
||||
padding-left: 0.75rem;
|
||||
padding-right: 0.75rem;
|
||||
}
|
||||
|
||||
.px-4 {
|
||||
padding-left: 1rem;
|
||||
padding-right: 1rem;
|
||||
}
|
||||
|
||||
.py-0\.5 {
|
||||
padding-top: 0.125rem;
|
||||
padding-bottom: 0.125rem;
|
||||
}
|
||||
|
||||
.py-2 {
|
||||
padding-top: 0.5rem;
|
||||
padding-bottom: 0.5rem;
|
||||
}
|
||||
|
||||
.py-4 {
|
||||
padding-top: 1rem;
|
||||
padding-bottom: 1rem;
|
||||
}
|
||||
|
||||
.py-6 {
|
||||
padding-top: 1.5rem;
|
||||
padding-bottom: 1.5rem;
|
||||
}
|
||||
|
||||
.pr-10 {
|
||||
padding-right: 2.5rem;
|
||||
}
|
||||
|
||||
.text-2xl {
|
||||
font-size: 1.5rem;
|
||||
line-height: 2rem;
|
||||
}
|
||||
|
||||
.text-base {
|
||||
font-size: 1rem;
|
||||
line-height: 1.5rem;
|
||||
}
|
||||
|
||||
.text-sm {
|
||||
font-size: 0.875rem;
|
||||
line-height: 1.25rem;
|
||||
}
|
||||
|
||||
.font-medium {
|
||||
font-weight: 500;
|
||||
}
|
||||
|
||||
.font-semibold {
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
.text-blue-500 {
|
||||
--tw-text-opacity: 1;
|
||||
color: rgb(59 130 246 / var(--tw-text-opacity, 1));
|
||||
}
|
||||
|
||||
.text-blue-600 {
|
||||
--tw-text-opacity: 1;
|
||||
color: rgb(37 99 235 / var(--tw-text-opacity, 1));
|
||||
}
|
||||
|
||||
.text-gray-400 {
|
||||
--tw-text-opacity: 1;
|
||||
color: rgb(156 163 175 / var(--tw-text-opacity, 1));
|
||||
}
|
||||
|
||||
.text-gray-500 {
|
||||
--tw-text-opacity: 1;
|
||||
color: rgb(107 114 128 / var(--tw-text-opacity, 1));
|
||||
}
|
||||
|
||||
.text-gray-600 {
|
||||
--tw-text-opacity: 1;
|
||||
color: rgb(75 85 99 / var(--tw-text-opacity, 1));
|
||||
}
|
||||
|
||||
.text-white {
|
||||
--tw-text-opacity: 1;
|
||||
color: rgb(255 255 255 / var(--tw-text-opacity, 1));
|
||||
}
|
||||
|
||||
.opacity-25 {
|
||||
opacity: 0.25;
|
||||
}
|
||||
|
||||
.opacity-75 {
|
||||
opacity: 0.75;
|
||||
}
|
||||
|
||||
.filter {
|
||||
filter: var(--tw-blur) var(--tw-brightness) var(--tw-contrast) var(--tw-grayscale) var(--tw-hue-rotate) var(--tw-invert) var(--tw-saturate) var(--tw-sepia) var(--tw-drop-shadow);
|
||||
}
|
||||
|
||||
.hover\:bg-\[\#1f2937\]:hover {
|
||||
--tw-bg-opacity: 1;
|
||||
background-color: rgb(31 41 55 / var(--tw-bg-opacity, 1));
|
||||
}
|
||||
|
||||
.hover\:bg-gray-100:hover {
|
||||
--tw-bg-opacity: 1;
|
||||
background-color: rgb(243 244 246 / var(--tw-bg-opacity, 1));
|
||||
}
|
||||
|
||||
.hover\:bg-red-600:hover {
|
||||
--tw-bg-opacity: 1;
|
||||
background-color: rgb(220 38 38 / var(--tw-bg-opacity, 1));
|
||||
}
|
||||
|
||||
.hover\:text-gray-700:hover {
|
||||
--tw-text-opacity: 1;
|
||||
color: rgb(55 65 81 / var(--tw-text-opacity, 1));
|
||||
}
|
||||
|
||||
.hover\:text-gray-900:hover {
|
||||
--tw-text-opacity: 1;
|
||||
color: rgb(17 24 39 / var(--tw-text-opacity, 1));
|
||||
}
|
||||
|
||||
.focus\:outline-none:focus {
|
||||
outline: 2px solid transparent;
|
||||
outline-offset: 2px;
|
||||
}
|
||||
|
||||
.focus\:ring-2:focus {
|
||||
--tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
|
||||
--tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(2px + var(--tw-ring-offset-width)) var(--tw-ring-color);
|
||||
box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
|
||||
}
|
||||
|
||||
.focus\:ring-blue-500:focus {
|
||||
--tw-ring-opacity: 1;
|
||||
--tw-ring-color: rgb(59 130 246 / var(--tw-ring-opacity, 1));
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
rustfs bin path, do not delete
|
||||
@@ -1,19 +0,0 @@
|
||||
/**
|
||||
* Copyright 2024 RustFS Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
@tailwind base;
|
||||
@tailwind components;
|
||||
@tailwind utilities;
|
||||
@@ -1,330 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::components::navbar::LoadingSpinner;
|
||||
use crate::route::Route;
|
||||
use crate::utils::{RustFSConfig, ServiceManager};
|
||||
use chrono::Datelike;
|
||||
use dioxus::logger::tracing::debug;
|
||||
use dioxus::prelude::*;
|
||||
use std::time::Duration;
|
||||
|
||||
const HEADER_LOGO: Asset = asset!("/assets/rustfs-logo.svg");
|
||||
const TAILWIND_CSS: Asset = asset!("/assets/tailwind.css");
|
||||
|
||||
/// Define the state of the service
|
||||
#[derive(PartialEq, Debug, Clone)]
|
||||
enum ServiceState {
|
||||
Start,
|
||||
Stop,
|
||||
}
|
||||
|
||||
/// Define the Home component
|
||||
/// The Home component is the main component of the application
|
||||
/// It is responsible for starting and stopping the service
|
||||
/// It also displays the service status and provides a button to toggle the service
|
||||
/// The Home component also displays the footer of the application
|
||||
/// The footer contains links to the official site, documentation, GitHub, and license
|
||||
/// The footer also displays the version of the application
|
||||
/// The Home component also contains a button to change the theme of the application
|
||||
/// The Home component also contains a button to go to the settings page
|
||||
#[component]
|
||||
pub fn Home() -> Element {
|
||||
#[allow(clippy::redundant_closure)]
|
||||
let service = use_signal(|| ServiceManager::new());
|
||||
let conf = RustFSConfig::load().unwrap_or_else(|e| {
|
||||
ServiceManager::show_error(&format!("load config failed: {e}"));
|
||||
RustFSConfig::default()
|
||||
});
|
||||
|
||||
debug!("loaded configurations: {:?}", conf);
|
||||
let config = use_signal(|| conf.clone());
|
||||
|
||||
use dioxus_router::prelude::Link;
|
||||
use document::{Meta, Stylesheet, Title};
|
||||
let mut service_state = use_signal(|| ServiceState::Start);
|
||||
// Create a periodic check on the effect of the service status
|
||||
use_effect(move || {
|
||||
spawn(async move {
|
||||
loop {
|
||||
if let Some(pid) = ServiceManager::check_service_status().await {
|
||||
debug!("service_running true pid: {:?}", pid);
|
||||
service_state.set(ServiceState::Stop);
|
||||
} else {
|
||||
debug!("service_running true pid: 0");
|
||||
service_state.set(ServiceState::Start);
|
||||
}
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
}
|
||||
});
|
||||
});
|
||||
debug!("project start service_state: {:?}", service_state.read());
|
||||
// Use 'use_signal' to manage service status
|
||||
let mut loading = use_signal(|| false);
|
||||
let mut start_service = move |_| {
|
||||
let service = service;
|
||||
let config = config.read().clone();
|
||||
let mut service_state = service_state;
|
||||
// set the loading status
|
||||
loading.set(true);
|
||||
debug!("stop loading_state: {:?}", loading.read());
|
||||
spawn(async move {
|
||||
match service.read().start(config).await {
|
||||
Ok(result) => {
|
||||
if result.success {
|
||||
let duration = result.end_time - result.start_time;
|
||||
debug!("The service starts successfully and takes a long time:{}ms", duration.num_milliseconds());
|
||||
service_state.set(ServiceState::Stop);
|
||||
} else {
|
||||
ServiceManager::show_error(&result.message);
|
||||
service_state.set(ServiceState::Start);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
ServiceManager::show_error(&format!("start service failed: {e}"));
|
||||
}
|
||||
}
|
||||
// Only set loading to false when it's actually done
|
||||
loading.set(false);
|
||||
debug!("start loading_state: {:?}", loading.read());
|
||||
});
|
||||
};
|
||||
|
||||
let mut stop_service = move |_| {
|
||||
let service = service;
|
||||
let mut service_state = service_state;
|
||||
// set the loading status
|
||||
loading.set(true);
|
||||
spawn(async move {
|
||||
match service.read().stop().await {
|
||||
Ok(result) => {
|
||||
if result.success {
|
||||
let duration = result.end_time - result.start_time;
|
||||
debug!("The service stops successfully and takes a long time:{}ms", duration.num_milliseconds());
|
||||
service_state.set(ServiceState::Start);
|
||||
} else {
|
||||
ServiceManager::show_error(&result.message);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
ServiceManager::show_error(&format!("stop service failed: {e}"));
|
||||
}
|
||||
}
|
||||
debug!("service_state: {:?}", service_state.read());
|
||||
// Only set loading to false when it's actually done
|
||||
loading.set(false);
|
||||
debug!("stop loading_state: {:?}", loading.read());
|
||||
});
|
||||
};
|
||||
|
||||
// Toggle the state when the button is clicked
|
||||
let toggle_service = {
|
||||
let mut service_state = service_state;
|
||||
debug!("toggle_service service_state: {:?}", service_state.read());
|
||||
move |_| {
|
||||
if service_state.read().eq(&ServiceState::Stop) {
|
||||
// If the service status is started, you need to run a command to stop the service
|
||||
stop_service(());
|
||||
service_state.set(ServiceState::Start);
|
||||
} else {
|
||||
start_service(());
|
||||
service_state.set(ServiceState::Stop);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Define dynamic styles based on state
|
||||
let button_class = if service_state.read().eq(&ServiceState::Start) {
|
||||
"bg-[#111827] hover:bg-[#1f2937] text-white px-4 py-2 rounded-md flex items-center space-x-2"
|
||||
} else {
|
||||
"bg-red-500 hover:bg-red-600 text-white px-4 py-2 rounded-md flex items-center space-x-2"
|
||||
};
|
||||
|
||||
rsx! {
|
||||
// The Stylesheet component inserts a style link into the head of the document
|
||||
Stylesheet {href: TAILWIND_CSS,}
|
||||
Title { "RustFS APP" }
|
||||
Meta {
|
||||
name: "description",
|
||||
// TODO: translate to english
|
||||
content: "RustFS RustFS 用热门安全的 Rust 语言开发,兼容 S3 协议。适用于 AI/ML 及海量数据存储、大数据、互联网、工业和保密存储等全部场景。近乎免费使用。遵循 Apache 2 协议,支持国产保密设备和系统。",
|
||||
}
|
||||
div { class: "min-h-screen flex flex-col items-center bg-white",
|
||||
div { class: "absolute top-4 right-6 flex space-x-2",
|
||||
// change theme
|
||||
button { class: "p-2 hover:bg-gray-100 rounded-lg", ChangeThemeButton {} }
|
||||
// setting button
|
||||
Link {
|
||||
class: "p-2 hover:bg-gray-100 rounded-lg",
|
||||
to: Route::SettingViews {},
|
||||
SettingButton {}
|
||||
}
|
||||
}
|
||||
main { class: "flex-1 flex flex-col items-center justify-center space-y-6 p-4",
|
||||
div { class: "w-24 h-24 bg-gray-900 rounded-full flex items-center justify-center",
|
||||
img { alt: "Logo", class: "w-16 h-16", src: HEADER_LOGO }
|
||||
}
|
||||
div { class: "text-gray-600",
|
||||
"Service is running on "
|
||||
span { class: "text-blue-600", " 127.0.0.1:9000 " }
|
||||
}
|
||||
LoadingSpinner {
|
||||
loading: loading.read().to_owned(),
|
||||
text: "processing...",
|
||||
}
|
||||
button { class: button_class, onclick: toggle_service,
|
||||
svg {
|
||||
class: "h-4 w-4",
|
||||
fill: "none",
|
||||
stroke: "currentColor",
|
||||
view_box: "0 0 24 24",
|
||||
xmlns: "http://www.w3.org/2000/svg",
|
||||
if service_state.read().eq(&ServiceState::Start) {
|
||||
path {
|
||||
d: "M14.752 11.168l-3.197-2.132A1 1 0 0010 9.87v4.263a1 1 0 001.555.832l3.197-2.132a1 1 0 000-1.664z",
|
||||
stroke_linecap: "round",
|
||||
stroke_linejoin: "round",
|
||||
stroke_width: "2",
|
||||
}
|
||||
path {
|
||||
d: "M21 12a9 9 0 11-18 0 9 9 0 0118 0z",
|
||||
stroke_linecap: "round",
|
||||
stroke_linejoin: "round",
|
||||
stroke_width: "2",
|
||||
}
|
||||
} else {
|
||||
path {
|
||||
stroke_linecap: "round",
|
||||
stroke_linejoin: "round",
|
||||
stroke_width: "2",
|
||||
d: "M21 12a9 9 0 11-18 0 9 9 0 0118 0z",
|
||||
}
|
||||
path {
|
||||
stroke_linecap: "round",
|
||||
stroke_linejoin: "round",
|
||||
stroke_width: "2",
|
||||
d: "M9 10h6v4H9z",
|
||||
}
|
||||
}
|
||||
}
|
||||
span { id: "serviceStatus",
|
||||
if service_state.read().eq(&ServiceState::Start) {
|
||||
"Start service"
|
||||
} else {
|
||||
"Stop service"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Footer { version: "v1.0.0".to_string() }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[component]
|
||||
pub fn Footer(version: String) -> Element {
|
||||
let now = chrono::Local::now();
|
||||
let year = now.naive_local().year();
|
||||
rsx! {
|
||||
footer { class: "w-full py-6 flex flex-col items-center space-y-4 mb-6",
|
||||
nav { class: "flex space-x-4 text-gray-600",
|
||||
a { class: "hover:text-gray-900", href: "https://rustfs.com", "Official Site" }
|
||||
a {
|
||||
class: "hover:text-gray-900",
|
||||
href: "https://rustfs.com/docs",
|
||||
"Documentation"
|
||||
}
|
||||
a {
|
||||
class: "hover:text-gray-900",
|
||||
href: "https://github.com/rustfs/rustfs",
|
||||
"GitHub"
|
||||
}
|
||||
a {
|
||||
class: "hover:text-gray-900",
|
||||
href: "https://rustfs.com/docs/license/",
|
||||
"License"
|
||||
}
|
||||
a { class: "hover:text-gray-900", href: "#", "Sponsors" }
|
||||
}
|
||||
div { class: "text-gray-500 text-sm", " © rustfs.com {year}, All rights reserved." }
|
||||
div { class: "text-gray-400 text-sm mb-8", " version {version} " }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[component]
|
||||
pub fn GoBackButtons() -> Element {
|
||||
rsx! {
|
||||
button {
|
||||
class: "p-2 hover:bg-gray-100 rounded-lg",
|
||||
"onclick": "window.history.back()",
|
||||
"Back to the Past"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[component]
|
||||
pub fn GoForwardButtons() -> Element {
|
||||
rsx! {
|
||||
button {
|
||||
class: "p-2 hover:bg-gray-100 rounded-lg",
|
||||
"onclick": "window.history.forward()",
|
||||
"Back to the Future"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[component]
|
||||
pub fn ChangeThemeButton() -> Element {
|
||||
rsx! {
|
||||
svg {
|
||||
class: "h-6 w-6 text-gray-600",
|
||||
fill: "none",
|
||||
stroke: "currentColor",
|
||||
view_box: "0 0 24 24",
|
||||
xmlns: "http://www.w3.org/2000/svg",
|
||||
path {
|
||||
d: "M9 3v2m6-2v2M9 19v2m6-2v2M5 9H3m2 6H3m18-6h-2m2 6h-2M7 19h10a2 2 0 002-2V7a2 2 0 00-2-2H7a2 2 0 00-2 2v10a2 2 0 002 2zM9 9h6v6H9V9z",
|
||||
stroke_linecap: "round",
|
||||
stroke_linejoin: "round",
|
||||
stroke_width: "2",
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[component]
|
||||
pub fn SettingButton() -> Element {
|
||||
rsx! {
|
||||
svg {
|
||||
class: "h-6 w-6 text-gray-600",
|
||||
fill: "none",
|
||||
stroke: "currentColor",
|
||||
view_box: "0 0 24 24",
|
||||
xmlns: "http://www.w3.org/2000/svg",
|
||||
path {
|
||||
d: "M10.325 4.317c.426-1.756 2.924-1.756 3.35 0a1.724 1.724 0 002.573 1.066c1.543-.94 3.31.826 2.37 2.37a1.724 1.724 0 001.065 2.572c1.756.426 1.756 2.924 0 3.35a1.724 1.724 0 00-1.066 2.573c.94 1.543-.826 3.31-2.37 2.37a1.724 1.724 0 00-2.572 1.065c-.426 1.756-2.924 1.756-3.35 0a1.724 1.724 0 00-2.573-1.066c-1.543.94-3.31-.826-2.37-2.37a1.724 1.724 0 00-1.065-2.572c-1.756-.426-1.756-2.924 0-3.35a1.724 1.724 0 001.066-2.573c-.94-1.543.826-3.31 2.37-2.37.996.608 2.296.07 2.572-1.065z",
|
||||
stroke_linecap: "round",
|
||||
stroke_linejoin: "round",
|
||||
stroke_width: "2",
|
||||
}
|
||||
path {
|
||||
d: "M15 12a3 3 0 11-6 0 3 3 0 016 0z",
|
||||
stroke_linecap: "round",
|
||||
stroke_linejoin: "round",
|
||||
stroke_width: "2",
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod home;
|
||||
pub use home::Home;
|
||||
mod navbar;
|
||||
pub use navbar::Navbar;
|
||||
mod setting;
|
||||
pub use setting::Setting;
|
||||
@@ -1,74 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::route::Route;
|
||||
use dioxus::logger::tracing::debug;
|
||||
use dioxus::prelude::*;
|
||||
|
||||
const NAVBAR_CSS: Asset = asset!("/assets/styling/navbar.css");
|
||||
|
||||
#[component]
|
||||
pub fn Navbar() -> Element {
|
||||
rsx! {
|
||||
document::Link { rel: "stylesheet", href: NAVBAR_CSS }
|
||||
|
||||
div { id: "navbar", class: "hidden", style: "display: none;",
|
||||
Link { to: Route::HomeViews {}, "Home" }
|
||||
Link { to: Route::SettingViews {}, "Setting" }
|
||||
}
|
||||
|
||||
Outlet::<Route> {}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Props, PartialEq, Debug, Clone)]
|
||||
pub struct LoadingSpinnerProps {
|
||||
#[props(default = true)]
|
||||
loading: bool,
|
||||
#[props(default = "正在处理中...")]
|
||||
text: &'static str,
|
||||
}
|
||||
|
||||
#[component]
|
||||
pub fn LoadingSpinner(props: LoadingSpinnerProps) -> Element {
|
||||
debug!("loading: {}", props.loading);
|
||||
if !props.loading {
|
||||
debug!("LoadingSpinner false loading: {}", props.loading);
|
||||
return rsx! {};
|
||||
}
|
||||
rsx! {
|
||||
div { class: "flex items-center justify-center z-10",
|
||||
svg {
|
||||
class: "animate-spin h-5 w-5 text-blue-500",
|
||||
xmlns: "http://www.w3.org/2000/svg",
|
||||
fill: "none",
|
||||
view_box: "0 0 24 24",
|
||||
circle {
|
||||
class: "opacity-25",
|
||||
cx: "12",
|
||||
cy: "12",
|
||||
r: "10",
|
||||
stroke: "currentColor",
|
||||
stroke_width: "4",
|
||||
}
|
||||
path {
|
||||
class: "opacity-75",
|
||||
fill: "currentColor",
|
||||
d: "M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z",
|
||||
}
|
||||
}
|
||||
span { class: "ml-2 text-gray-600", "{props.text}" }
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,216 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::components::navbar::LoadingSpinner;
|
||||
use dioxus::logger::tracing::{debug, error};
|
||||
use dioxus::prelude::*;
|
||||
|
||||
const SETTINGS_JS: Asset = asset!("/assets/js/sts.js");
|
||||
const TAILWIND_CSS: Asset = asset!("/assets/tailwind.css");
|
||||
#[component]
|
||||
pub fn Setting() -> Element {
|
||||
use crate::utils::{RustFSConfig, ServiceManager};
|
||||
use document::{Meta, Script, Stylesheet, Title};
|
||||
|
||||
#[allow(clippy::redundant_closure)]
|
||||
let service = use_signal(|| ServiceManager::new());
|
||||
let conf = RustFSConfig::load().unwrap_or_else(|e| {
|
||||
error!("load config error: {}", e);
|
||||
RustFSConfig::default_config()
|
||||
});
|
||||
debug!("conf address: {:?}", conf.clone().address);
|
||||
|
||||
let config = use_signal(|| conf.clone());
|
||||
let address_state = use_signal(|| conf.address.to_string());
|
||||
let mut host_state = use_signal(|| conf.host.to_string());
|
||||
let mut port_state = use_signal(|| conf.port.to_string());
|
||||
let mut access_key_state = use_signal(|| conf.access_key.to_string());
|
||||
let mut secret_key_state = use_signal(|| conf.secret_key.to_string());
|
||||
let mut volume_name_state = use_signal(|| conf.volume_name.to_string());
|
||||
let loading = use_signal(|| false);
|
||||
|
||||
let save_and_restart = {
|
||||
let host_state = host_state;
|
||||
let port_state = port_state;
|
||||
let access_key_state = access_key_state;
|
||||
let secret_key_state = secret_key_state;
|
||||
let volume_name_state = volume_name_state;
|
||||
let mut loading = loading;
|
||||
debug!("save_and_restart access_key:{}", access_key_state.read());
|
||||
move |_| {
|
||||
// set the loading status
|
||||
loading.set(true);
|
||||
let mut config = config;
|
||||
config.write().address = format!("{}:{}", host_state.read(), port_state.read());
|
||||
config.write().host = host_state.read().to_string();
|
||||
config.write().port = port_state.read().to_string();
|
||||
config.write().access_key = access_key_state.read().to_string();
|
||||
config.write().secret_key = secret_key_state.read().to_string();
|
||||
config.write().volume_name = volume_name_state.read().to_string();
|
||||
// restart service
|
||||
let service = service;
|
||||
let config = config.read().clone();
|
||||
spawn(async move {
|
||||
if let Err(e) = service.read().restart(config).await {
|
||||
ServiceManager::show_error(&format!("发送重启命令失败:{e}"));
|
||||
}
|
||||
// reset the status when you're done
|
||||
loading.set(false);
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
rsx! {
|
||||
Title { "Settings - RustFS App" }
|
||||
Meta { name: "description", content: "Settings - RustFS App." }
|
||||
// The Stylesheet component inserts a style link into the head of the document
|
||||
Stylesheet { href: TAILWIND_CSS }
|
||||
Script { src: SETTINGS_JS }
|
||||
div { class: "bg-white p-8",
|
||||
h1 { class: "text-2xl font-semibold mb-6", "Settings" }
|
||||
div { class: "border-b border-gray-200 mb-6",
|
||||
nav { class: "flex space-x-8",
|
||||
button {
|
||||
class: "tab-btn px-1 py-4 text-sm font-medium border-b-2 border-black",
|
||||
"data-tab": "service",
|
||||
"onclick": "switchTab('service')",
|
||||
"Service "
|
||||
}
|
||||
button {
|
||||
class: "tab-btn px-1 py-4 text-sm font-medium text-gray-500 hover:text-gray-700",
|
||||
"data-tab": "user",
|
||||
"onclick": "switchTab('user')",
|
||||
"User "
|
||||
}
|
||||
button {
|
||||
class: "tab-btn px-1 py-4 text-sm font-medium text-gray-500 hover:text-gray-700 hidden",
|
||||
"data-tab": "logs",
|
||||
"onclick": "switchTab('logs')",
|
||||
"Logs "
|
||||
}
|
||||
}
|
||||
}
|
||||
div { id: "tabContent",
|
||||
div { class: "tab-content", id: "service",
|
||||
div { class: "mb-8",
|
||||
h2 { class: "text-base font-medium mb-2", "Service address" }
|
||||
p { class: "text-gray-600 mb-4",
|
||||
" The service address is the IP address and port number of the service. the default address is "
|
||||
code { class: "bg-gray-100 px-1 py-0.5 rounded", {address_state} }
|
||||
". "
|
||||
}
|
||||
div { class: "flex space-x-2",
|
||||
input {
|
||||
class: "border rounded px-3 py-2 w-48 focus:outline-none focus:ring-2 focus:ring-blue-500",
|
||||
r#type: "text",
|
||||
value: host_state,
|
||||
oninput: move |evt| host_state.set(evt.value().clone()),
|
||||
}
|
||||
span { class: "flex items-center", ":" }
|
||||
input {
|
||||
class: "border rounded px-3 py-2 w-20 focus:outline-none focus:ring-2 focus:ring-blue-500",
|
||||
r#type: "text",
|
||||
value: port_state,
|
||||
oninput: move |evt| port_state.set(evt.value().clone()),
|
||||
}
|
||||
}
|
||||
}
|
||||
div { class: "mb-8",
|
||||
h2 { class: "text-base font-medium mb-2", "Storage path" }
|
||||
p { class: "text-gray-600 mb-4",
|
||||
"Update the storage path of the service. the default path is {volume_name_state}."
|
||||
}
|
||||
input {
|
||||
class: "border rounded px-3 py-2 w-full focus:outline-none focus:ring-2 focus:ring-blue-500",
|
||||
r#type: "text",
|
||||
value: volume_name_state,
|
||||
oninput: move |evt| volume_name_state.set(evt.value().clone()),
|
||||
}
|
||||
}
|
||||
}
|
||||
div { class: "tab-content hidden", id: "user",
|
||||
div { class: "mb-8",
|
||||
h2 { class: "text-base font-medium mb-2", "User" }
|
||||
p { class: "text-gray-600 mb-4",
|
||||
"The user is the owner of the service. the default user is "
|
||||
code { class: "bg-gray-100 px-1 py-0.5 rounded", {access_key_state} }
|
||||
}
|
||||
input {
|
||||
class: "border rounded px-3 py-2 w-full focus:outline-none focus:ring-2 focus:ring-blue-500",
|
||||
r#type: "text",
|
||||
value: access_key_state,
|
||||
oninput: move |evt| access_key_state.set(evt.value().clone()),
|
||||
}
|
||||
}
|
||||
div { class: "mb-8",
|
||||
h2 { class: "text-base font-medium mb-2", "Password" }
|
||||
p { class: "text-gray-600 mb-4",
|
||||
"The password is the password of the user. the default password is "
|
||||
code { class: "bg-gray-100 px-1 py-0.5 rounded", {secret_key_state} }
|
||||
}
|
||||
div { class: "relative",
|
||||
input {
|
||||
class: "border rounded px-3 py-2 w-full pr-10 focus:outline-none focus:ring-2 focus:ring-blue-500",
|
||||
r#type: "password",
|
||||
value: secret_key_state,
|
||||
oninput: move |evt| secret_key_state.set(evt.value().clone()),
|
||||
}
|
||||
button {
|
||||
class: "absolute right-2 top-1/2 transform -translate-y-1/2 text-gray-500 hover:text-gray-700",
|
||||
"onclick": "togglePassword(this)",
|
||||
svg {
|
||||
class: "h-5 w-5",
|
||||
fill: "currentColor",
|
||||
view_box: "0 0 20 20",
|
||||
xmlns: "http://www.w3.org/2000/svg",
|
||||
path { d: "M10 12a2 2 0 100-4 2 2 0 000 4z" }
|
||||
path {
|
||||
clip_rule: "evenodd",
|
||||
d: "M.458 10C1.732 5.943 5.522 3 10 3s8.268 2.943 9.542 7c-1.274 4.057-5.064 7-9.542 7S1.732 14.057.458 10zM14 10a4 4 0 11-8 0 4 4 0 018 0z",
|
||||
fill_rule: "evenodd",
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
div { class: "tab-content hidden", id: "logs",
|
||||
div { class: "mb-8",
|
||||
h2 { class: "text-base font-medium mb-2", "Logs storage path" }
|
||||
p { class: "text-gray-600 mb-4",
|
||||
"The logs storage path is the path where the logs are stored. the default path is /var/log/rustfs. "
|
||||
}
|
||||
input {
|
||||
class: "border rounded px-3 py-2 w-full focus:outline-none focus:ring-2 focus:ring-blue-500",
|
||||
r#type: "text",
|
||||
value: "/var/logs/rustfs",
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
div { class: "flex space-x-4",
|
||||
button {
|
||||
class: "bg-[#111827] text-white px-4 py-2 rounded hover:bg-[#1f2937]",
|
||||
onclick: save_and_restart,
|
||||
" Save and restart "
|
||||
}
|
||||
GoBackButton { "Back" }
|
||||
}
|
||||
LoadingSpinner {
|
||||
loading: loading.read().to_owned(),
|
||||
text: "服务处理中...",
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod components;
|
||||
mod route;
|
||||
mod utils;
|
||||
mod views;
|
||||
|
||||
fn main() {
|
||||
let _worker_guard = utils::init_logger();
|
||||
dioxus::launch(views::App);
|
||||
}
|
||||
@@ -1,17 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod router;
|
||||
|
||||
pub use router::Route;
|
||||
@@ -1,564 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use keyring::Entry;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::error::Error;
|
||||
|
||||
/// Configuration for the RustFS service
|
||||
///
|
||||
/// # Fields
|
||||
/// * `address` - The address of the RustFS service
|
||||
/// * `host` - The host of the RustFS service
|
||||
/// * `port` - The port of the RustFS service
|
||||
/// * `access_key` - The access key of the RustFS service
|
||||
/// * `secret_key` - The secret key of the RustFS service
|
||||
/// * `domain_name` - The domain name of the RustFS service
|
||||
/// * `volume_name` - The volume name of the RustFS service
|
||||
/// * `console_address` - The console address of the RustFS service
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let config = RustFSConfig {
|
||||
/// address: "127.0.0.1:9000".to_string(),
|
||||
/// host: "127.0.0.1".to_string(),
|
||||
/// port: "9000".to_string(),
|
||||
/// access_key: "rustfsadmin".to_string(),
|
||||
/// secret_key: "rustfsadmin".to_string(),
|
||||
/// domain_name: "demo.rustfs.com".to_string(),
|
||||
/// volume_name: "data".to_string(),
|
||||
/// console_address: "127.0.0.1:9001".to_string(),
|
||||
/// };
|
||||
/// println!("{:?}", config);
|
||||
/// assert_eq!(config.address, "127.0.0.1:9000");
|
||||
/// ```
|
||||
#[derive(Debug, Clone, Default, Deserialize, Serialize, Ord, PartialOrd, Eq, PartialEq)]
|
||||
pub struct RustFSConfig {
|
||||
pub address: String,
|
||||
pub host: String,
|
||||
pub port: String,
|
||||
pub access_key: String,
|
||||
pub secret_key: String,
|
||||
pub domain_name: String,
|
||||
pub volume_name: String,
|
||||
pub console_address: String,
|
||||
}
|
||||
|
||||
impl RustFSConfig {
|
||||
/// keyring the name of the service
|
||||
const SERVICE_NAME: &'static str = "rustfs-service";
|
||||
/// keyring the key of the service
|
||||
const SERVICE_KEY: &'static str = "rustfs_key";
|
||||
/// default domain name
|
||||
const DEFAULT_DOMAIN_NAME_VALUE: &'static str = "demo.rustfs.com";
|
||||
/// default address value
|
||||
const DEFAULT_ADDRESS_VALUE: &'static str = "127.0.0.1:9000";
|
||||
/// default port value
|
||||
const DEFAULT_PORT_VALUE: &'static str = "9000";
|
||||
/// default host value
|
||||
const DEFAULT_HOST_VALUE: &'static str = "127.0.0.1";
|
||||
/// default access key value
|
||||
const DEFAULT_ACCESS_KEY_VALUE: &'static str = "rustfsadmin";
|
||||
/// default secret key value
|
||||
const DEFAULT_SECRET_KEY_VALUE: &'static str = "rustfsadmin";
|
||||
/// default console address value
|
||||
const DEFAULT_CONSOLE_ADDRESS_VALUE: &'static str = "127.0.0.1:9001";
|
||||
|
||||
/// get the default volume_name
|
||||
///
|
||||
/// # Returns
|
||||
/// * The default volume name
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let volume_name = RustFSConfig::default_volume_name();
|
||||
/// ```
|
||||
pub fn default_volume_name() -> String {
|
||||
dirs::home_dir()
|
||||
.map(|home| home.join("rustfs").join("data"))
|
||||
.and_then(|path| path.to_str().map(String::from))
|
||||
.unwrap_or_else(|| "data".to_string())
|
||||
}
|
||||
|
||||
/// create a default configuration
|
||||
///
|
||||
/// # Returns
|
||||
/// * The default configuration
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let config = RustFSConfig::default_config();
|
||||
/// println!("{:?}", config);
|
||||
/// assert_eq!(config.address, "127.0.0.1:9000");
|
||||
/// ```
|
||||
pub fn default_config() -> Self {
|
||||
Self {
|
||||
address: Self::DEFAULT_ADDRESS_VALUE.to_string(),
|
||||
host: Self::DEFAULT_HOST_VALUE.to_string(),
|
||||
port: Self::DEFAULT_PORT_VALUE.to_string(),
|
||||
access_key: Self::DEFAULT_ACCESS_KEY_VALUE.to_string(),
|
||||
secret_key: Self::DEFAULT_SECRET_KEY_VALUE.to_string(),
|
||||
domain_name: Self::DEFAULT_DOMAIN_NAME_VALUE.to_string(),
|
||||
volume_name: Self::default_volume_name(),
|
||||
console_address: Self::DEFAULT_CONSOLE_ADDRESS_VALUE.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Load the configuration from the keyring
|
||||
///
|
||||
/// # Errors
|
||||
/// * If the configuration cannot be loaded from the keyring
|
||||
/// * If the configuration cannot be deserialized
|
||||
/// * If the address cannot be extracted from the configuration
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let config = RustFSConfig::load().unwrap();
|
||||
/// println!("{:?}", config);
|
||||
/// assert_eq!(config.address, "127.0.0.1:9000");
|
||||
/// ```
|
||||
pub fn load() -> Result<Self, Box<dyn Error>> {
|
||||
let mut config = Self::default_config();
|
||||
|
||||
// Try to get the configuration of the storage from the keyring
|
||||
let entry = Entry::new(Self::SERVICE_NAME, Self::SERVICE_KEY)?;
|
||||
if let Ok(stored_json) = entry.get_password() {
|
||||
if let Ok(stored_config) = serde_json::from_str::<RustFSConfig>(&stored_json) {
|
||||
// update fields that are not empty and non default
|
||||
if !stored_config.address.is_empty() && stored_config.address != Self::DEFAULT_ADDRESS_VALUE {
|
||||
config.address = stored_config.address;
|
||||
let (host, port) = Self::extract_host_port(config.address.as_str())
|
||||
.ok_or_else(|| format!("无法从地址 '{}' 中提取主机和端口", config.address))?;
|
||||
config.host = host.to_string();
|
||||
config.port = port.to_string();
|
||||
}
|
||||
if !stored_config.access_key.is_empty() && stored_config.access_key != Self::DEFAULT_ACCESS_KEY_VALUE {
|
||||
config.access_key = stored_config.access_key;
|
||||
}
|
||||
if !stored_config.secret_key.is_empty() && stored_config.secret_key != Self::DEFAULT_SECRET_KEY_VALUE {
|
||||
config.secret_key = stored_config.secret_key;
|
||||
}
|
||||
if !stored_config.domain_name.is_empty() && stored_config.domain_name != Self::DEFAULT_DOMAIN_NAME_VALUE {
|
||||
config.domain_name = stored_config.domain_name;
|
||||
}
|
||||
// The stored volume_name is updated only if it is not empty and different from the default
|
||||
if !stored_config.volume_name.is_empty() && stored_config.volume_name != Self::default_volume_name() {
|
||||
config.volume_name = stored_config.volume_name;
|
||||
}
|
||||
if !stored_config.console_address.is_empty()
|
||||
&& stored_config.console_address != Self::DEFAULT_CONSOLE_ADDRESS_VALUE
|
||||
{
|
||||
config.console_address = stored_config.console_address;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
/// Auxiliary method: Extract the host and port from the address string
|
||||
/// # Arguments
|
||||
/// * `address` - The address string
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Some((host, port))` - The host and port
|
||||
///
|
||||
/// # Errors
|
||||
/// * If the address is not in the form 'host:port'
|
||||
/// * If the port is not a valid u16
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let (host, port) = RustFSConfig::extract_host_port("127.0.0.1:9000").unwrap();
|
||||
/// assert_eq!(host, "127.0.0.1");
|
||||
/// assert_eq!(port, 9000);
|
||||
/// ```
|
||||
pub fn extract_host_port(address: &str) -> Option<(&str, u16)> {
|
||||
let parts: Vec<&str> = address.split(':').collect();
|
||||
if parts.len() == 2 {
|
||||
if let Ok(port) = parts[1].parse::<u16>() {
|
||||
return Some((parts[0], port));
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// save the configuration to keyring
|
||||
///
|
||||
/// # Errors
|
||||
/// * If the configuration cannot be serialized
|
||||
/// * If the configuration cannot be saved to the keyring
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let config = RustFSConfig::default_config();
|
||||
/// config.save().unwrap();
|
||||
/// ```
|
||||
pub fn save(&self) -> Result<(), Box<dyn Error>> {
|
||||
let entry = Entry::new(Self::SERVICE_NAME, Self::SERVICE_KEY)?;
|
||||
let json = serde_json::to_string(self)?;
|
||||
entry.set_password(&json)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Clear the stored configuration from the system keyring
|
||||
///
|
||||
/// # Returns
|
||||
/// `Ok(())` if the configuration was successfully cleared, or an error if the operation failed.
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// RustFSConfig::clear().unwrap();
|
||||
/// ```
|
||||
#[allow(dead_code)]
|
||||
pub fn clear() -> Result<(), Box<dyn Error>> {
|
||||
let entry = Entry::new(Self::SERVICE_NAME, Self::SERVICE_KEY)?;
|
||||
entry.delete_credential()?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_rustfs_config_default() {
|
||||
let config = RustFSConfig::default();
|
||||
assert!(config.address.is_empty());
|
||||
assert!(config.host.is_empty());
|
||||
assert!(config.port.is_empty());
|
||||
assert!(config.access_key.is_empty());
|
||||
assert!(config.secret_key.is_empty());
|
||||
assert!(config.domain_name.is_empty());
|
||||
assert!(config.volume_name.is_empty());
|
||||
assert!(config.console_address.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rustfs_config_creation() {
|
||||
let config = RustFSConfig {
|
||||
address: "192.168.1.100:9000".to_string(),
|
||||
host: "192.168.1.100".to_string(),
|
||||
port: "9000".to_string(),
|
||||
access_key: "testuser".to_string(),
|
||||
secret_key: "testpass".to_string(),
|
||||
domain_name: "test.rustfs.com".to_string(),
|
||||
volume_name: "/data/rustfs".to_string(),
|
||||
console_address: "192.168.1.100:9001".to_string(),
|
||||
};
|
||||
|
||||
assert_eq!(config.address, "192.168.1.100:9000");
|
||||
assert_eq!(config.host, "192.168.1.100");
|
||||
assert_eq!(config.port, "9000");
|
||||
assert_eq!(config.access_key, "testuser");
|
||||
assert_eq!(config.secret_key, "testpass");
|
||||
assert_eq!(config.domain_name, "test.rustfs.com");
|
||||
assert_eq!(config.volume_name, "/data/rustfs");
|
||||
assert_eq!(config.console_address, "192.168.1.100:9001");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_volume_name() {
|
||||
let volume_name = RustFSConfig::default_volume_name();
|
||||
assert!(!volume_name.is_empty());
|
||||
// Should either be the home directory path or fallback to "data"
|
||||
assert!(volume_name.contains("rustfs") || volume_name == "data");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_config() {
|
||||
let config = RustFSConfig::default_config();
|
||||
assert_eq!(config.address, RustFSConfig::DEFAULT_ADDRESS_VALUE);
|
||||
assert_eq!(config.host, RustFSConfig::DEFAULT_HOST_VALUE);
|
||||
assert_eq!(config.port, RustFSConfig::DEFAULT_PORT_VALUE);
|
||||
assert_eq!(config.access_key, RustFSConfig::DEFAULT_ACCESS_KEY_VALUE);
|
||||
assert_eq!(config.secret_key, RustFSConfig::DEFAULT_SECRET_KEY_VALUE);
|
||||
assert_eq!(config.domain_name, RustFSConfig::DEFAULT_DOMAIN_NAME_VALUE);
|
||||
assert_eq!(config.console_address, RustFSConfig::DEFAULT_CONSOLE_ADDRESS_VALUE);
|
||||
assert!(!config.volume_name.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_host_port_valid() {
|
||||
let test_cases = vec![
|
||||
("127.0.0.1:9000", Some(("127.0.0.1", 9000))),
|
||||
("localhost:8080", Some(("localhost", 8080))),
|
||||
("192.168.1.100:3000", Some(("192.168.1.100", 3000))),
|
||||
("0.0.0.0:80", Some(("0.0.0.0", 80))),
|
||||
("example.com:443", Some(("example.com", 443))),
|
||||
];
|
||||
|
||||
for (input, expected) in test_cases {
|
||||
let result = RustFSConfig::extract_host_port(input);
|
||||
assert_eq!(result, expected, "Failed for input: {input}");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_host_port_invalid() {
|
||||
let invalid_cases = vec![
|
||||
"127.0.0.1", // Missing port
|
||||
"127.0.0.1:", // Empty port
|
||||
"127.0.0.1:abc", // Invalid port
|
||||
"127.0.0.1:99999", // Port out of range
|
||||
"", // Empty string
|
||||
"127.0.0.1:9000:extra", // Too many parts
|
||||
"invalid", // No colon
|
||||
];
|
||||
|
||||
for input in invalid_cases {
|
||||
let result = RustFSConfig::extract_host_port(input);
|
||||
assert_eq!(result, None, "Should be None for input: {input}");
|
||||
}
|
||||
|
||||
// Special case: empty host but valid port should still work
|
||||
let result = RustFSConfig::extract_host_port(":9000");
|
||||
assert_eq!(result, Some(("", 9000)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_host_port_edge_cases() {
|
||||
// Test edge cases for port numbers
|
||||
assert_eq!(RustFSConfig::extract_host_port("host:0"), Some(("host", 0)));
|
||||
assert_eq!(RustFSConfig::extract_host_port("host:65535"), Some(("host", 65535)));
|
||||
assert_eq!(RustFSConfig::extract_host_port("host:65536"), None); // Out of range
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialization() {
|
||||
let config = RustFSConfig {
|
||||
address: "127.0.0.1:9000".to_string(),
|
||||
host: "127.0.0.1".to_string(),
|
||||
port: "9000".to_string(),
|
||||
access_key: "admin".to_string(),
|
||||
secret_key: "password".to_string(),
|
||||
domain_name: "test.com".to_string(),
|
||||
volume_name: "/data".to_string(),
|
||||
console_address: "127.0.0.1:9001".to_string(),
|
||||
};
|
||||
|
||||
let json = serde_json::to_string(&config).unwrap();
|
||||
assert!(json.contains("127.0.0.1:9000"));
|
||||
assert!(json.contains("admin"));
|
||||
assert!(json.contains("test.com"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deserialization() {
|
||||
let json = r#"{
|
||||
"address": "192.168.1.100:9000",
|
||||
"host": "192.168.1.100",
|
||||
"port": "9000",
|
||||
"access_key": "testuser",
|
||||
"secret_key": "testpass",
|
||||
"domain_name": "example.com",
|
||||
"volume_name": "/opt/data",
|
||||
"console_address": "192.168.1.100:9001"
|
||||
}"#;
|
||||
|
||||
let config: RustFSConfig = serde_json::from_str(json).unwrap();
|
||||
assert_eq!(config.address, "192.168.1.100:9000");
|
||||
assert_eq!(config.host, "192.168.1.100");
|
||||
assert_eq!(config.port, "9000");
|
||||
assert_eq!(config.access_key, "testuser");
|
||||
assert_eq!(config.secret_key, "testpass");
|
||||
assert_eq!(config.domain_name, "example.com");
|
||||
assert_eq!(config.volume_name, "/opt/data");
|
||||
assert_eq!(config.console_address, "192.168.1.100:9001");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialization_deserialization_roundtrip() {
|
||||
let original_config = RustFSConfig {
|
||||
address: "10.0.0.1:8080".to_string(),
|
||||
host: "10.0.0.1".to_string(),
|
||||
port: "8080".to_string(),
|
||||
access_key: "roundtrip_user".to_string(),
|
||||
secret_key: "roundtrip_pass".to_string(),
|
||||
domain_name: "roundtrip.test".to_string(),
|
||||
volume_name: "/tmp/roundtrip".to_string(),
|
||||
console_address: "10.0.0.1:8081".to_string(),
|
||||
};
|
||||
|
||||
let json = serde_json::to_string(&original_config).unwrap();
|
||||
let deserialized_config: RustFSConfig = serde_json::from_str(&json).unwrap();
|
||||
|
||||
assert_eq!(original_config, deserialized_config);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_config_ordering() {
|
||||
let config1 = RustFSConfig {
|
||||
address: "127.0.0.1:9000".to_string(),
|
||||
host: "127.0.0.1".to_string(),
|
||||
port: "9000".to_string(),
|
||||
access_key: "admin".to_string(),
|
||||
secret_key: "password".to_string(),
|
||||
domain_name: "test.com".to_string(),
|
||||
volume_name: "/data".to_string(),
|
||||
console_address: "127.0.0.1:9001".to_string(),
|
||||
};
|
||||
|
||||
let config2 = RustFSConfig {
|
||||
address: "127.0.0.1:9000".to_string(),
|
||||
host: "127.0.0.1".to_string(),
|
||||
port: "9000".to_string(),
|
||||
access_key: "admin".to_string(),
|
||||
secret_key: "password".to_string(),
|
||||
domain_name: "test.com".to_string(),
|
||||
volume_name: "/data".to_string(),
|
||||
console_address: "127.0.0.1:9001".to_string(),
|
||||
};
|
||||
|
||||
let config3 = RustFSConfig {
|
||||
address: "127.0.0.1:9001".to_string(), // Different port
|
||||
host: "127.0.0.1".to_string(),
|
||||
port: "9001".to_string(),
|
||||
access_key: "admin".to_string(),
|
||||
secret_key: "password".to_string(),
|
||||
domain_name: "test.com".to_string(),
|
||||
volume_name: "/data".to_string(),
|
||||
console_address: "127.0.0.1:9002".to_string(),
|
||||
};
|
||||
|
||||
assert_eq!(config1, config2);
|
||||
assert_ne!(config1, config3);
|
||||
assert!(config1 < config3); // Lexicographic ordering
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_clone() {
|
||||
let original = RustFSConfig::default_config();
|
||||
let cloned = original.clone();
|
||||
|
||||
assert_eq!(original, cloned);
|
||||
assert_eq!(original.address, cloned.address);
|
||||
assert_eq!(original.access_key, cloned.access_key);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_debug_format() {
|
||||
let config = RustFSConfig::default_config();
|
||||
let debug_str = format!("{config:?}");
|
||||
|
||||
assert!(debug_str.contains("RustFSConfig"));
|
||||
assert!(debug_str.contains("address"));
|
||||
assert!(debug_str.contains("127.0.0.1:9000"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_constants() {
|
||||
assert_eq!(RustFSConfig::SERVICE_NAME, "rustfs-service");
|
||||
assert_eq!(RustFSConfig::SERVICE_KEY, "rustfs_key");
|
||||
assert_eq!(RustFSConfig::DEFAULT_DOMAIN_NAME_VALUE, "demo.rustfs.com");
|
||||
assert_eq!(RustFSConfig::DEFAULT_ADDRESS_VALUE, "127.0.0.1:9000");
|
||||
assert_eq!(RustFSConfig::DEFAULT_PORT_VALUE, "9000");
|
||||
assert_eq!(RustFSConfig::DEFAULT_HOST_VALUE, "127.0.0.1");
|
||||
assert_eq!(RustFSConfig::DEFAULT_ACCESS_KEY_VALUE, "rustfsadmin");
|
||||
assert_eq!(RustFSConfig::DEFAULT_SECRET_KEY_VALUE, "rustfsadmin");
|
||||
assert_eq!(RustFSConfig::DEFAULT_CONSOLE_ADDRESS_VALUE, "127.0.0.1:9001");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty_strings() {
|
||||
let config = RustFSConfig {
|
||||
address: "".to_string(),
|
||||
host: "".to_string(),
|
||||
port: "".to_string(),
|
||||
access_key: "".to_string(),
|
||||
secret_key: "".to_string(),
|
||||
domain_name: "".to_string(),
|
||||
volume_name: "".to_string(),
|
||||
console_address: "".to_string(),
|
||||
};
|
||||
|
||||
assert!(config.address.is_empty());
|
||||
assert!(config.host.is_empty());
|
||||
assert!(config.port.is_empty());
|
||||
assert!(config.access_key.is_empty());
|
||||
assert!(config.secret_key.is_empty());
|
||||
assert!(config.domain_name.is_empty());
|
||||
assert!(config.volume_name.is_empty());
|
||||
assert!(config.console_address.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_very_long_strings() {
|
||||
let long_string = "a".repeat(1000);
|
||||
let config = RustFSConfig {
|
||||
address: format!("{long_string}:9000"),
|
||||
host: long_string.clone(),
|
||||
port: "9000".to_string(),
|
||||
access_key: long_string.clone(),
|
||||
secret_key: long_string.clone(),
|
||||
domain_name: format!("{long_string}.com"),
|
||||
volume_name: format!("/data/{long_string}"),
|
||||
console_address: format!("{long_string}:9001"),
|
||||
};
|
||||
|
||||
assert_eq!(config.host.len(), 1000);
|
||||
assert_eq!(config.access_key.len(), 1000);
|
||||
assert_eq!(config.secret_key.len(), 1000);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_special_characters() {
|
||||
let config = RustFSConfig {
|
||||
address: "127.0.0.1:9000".to_string(),
|
||||
host: "127.0.0.1".to_string(),
|
||||
port: "9000".to_string(),
|
||||
access_key: "user@domain.com".to_string(),
|
||||
secret_key: "p@ssw0rd!#$%".to_string(),
|
||||
domain_name: "test-domain.example.com".to_string(),
|
||||
volume_name: "/data/rust-fs/storage".to_string(),
|
||||
console_address: "127.0.0.1:9001".to_string(),
|
||||
};
|
||||
|
||||
assert!(config.access_key.contains("@"));
|
||||
assert!(config.secret_key.contains("!#$%"));
|
||||
assert!(config.domain_name.contains("-"));
|
||||
assert!(config.volume_name.contains("/"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_unicode_strings() {
|
||||
let config = RustFSConfig {
|
||||
address: "127.0.0.1:9000".to_string(),
|
||||
host: "127.0.0.1".to_string(),
|
||||
port: "9000".to_string(),
|
||||
access_key: "用户名".to_string(),
|
||||
secret_key: "密码 123".to_string(),
|
||||
domain_name: "测试.com".to_string(),
|
||||
volume_name: "/数据/存储".to_string(),
|
||||
console_address: "127.0.0.1:9001".to_string(),
|
||||
};
|
||||
|
||||
assert_eq!(config.access_key, "用户名");
|
||||
assert_eq!(config.secret_key, "密码 123");
|
||||
assert_eq!(config.domain_name, "测试.com");
|
||||
assert_eq!(config.volume_name, "/数据/存储");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_memory_efficiency() {
|
||||
// Test that the structure doesn't use excessive memory
|
||||
assert!(std::mem::size_of::<RustFSConfig>() < 1000);
|
||||
}
|
||||
|
||||
// Note: Keyring-related tests (load, save, clear) are not included here
|
||||
// because they require actual keyring access and would be integration tests
|
||||
// rather than unit tests. They should be tested separately in an integration
|
||||
// test environment where keyring access can be properly mocked or controlled.
|
||||
}
|
||||
@@ -1,899 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::utils::RustFSConfig;
|
||||
use dioxus::logger::tracing::{debug, error, info};
|
||||
use rust_embed::RustEmbed;
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::error::Error;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::Command as StdCommand;
|
||||
use std::sync::LazyLock;
|
||||
use std::time::Duration;
|
||||
use tokio::fs;
|
||||
use tokio::fs::File;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
use tokio::net::TcpStream;
|
||||
use tokio::sync::{Mutex, mpsc};
|
||||
|
||||
#[derive(RustEmbed)]
|
||||
#[folder = "$CARGO_MANIFEST_DIR/embedded-rustfs/"]
|
||||
struct Asset;
|
||||
|
||||
// Use `LazyLock` to cache the checksum of embedded resources
|
||||
static RUSTFS_HASH: LazyLock<Mutex<String>> = LazyLock::new(|| {
|
||||
let rustfs_file = if cfg!(windows) { "rustfs.exe" } else { "rustfs" };
|
||||
let rustfs_data = Asset::get(rustfs_file).expect("RustFs binary not embedded");
|
||||
let hash = hex::encode(Sha256::digest(&rustfs_data.data));
|
||||
Mutex::new(hash)
|
||||
});
|
||||
|
||||
/// Service command
|
||||
/// This enum represents the commands that can be sent to the service manager
|
||||
/// to start, stop, or restart the service
|
||||
/// The `Start` variant contains the configuration for the service
|
||||
/// The `Restart` variant contains the configuration for the service
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let config = RustFSConfig {
|
||||
/// address: "127.0.0.1:9000".to_string(),
|
||||
/// host: "127.0.0.1".to_string(),
|
||||
/// port: "9000".to_string(),
|
||||
/// access_key: "rustfsadmin".to_string(),
|
||||
/// secret_key: "rustfsadmin".to_string(),
|
||||
/// domain_name: "demo.rustfs.com".to_string(),
|
||||
/// volume_name: "data".to_string(),
|
||||
/// console_address: "127.0.0.1:9001".to_string(),
|
||||
/// };
|
||||
///
|
||||
/// let command = ServiceCommand::Start(config);
|
||||
/// println!("{:?}", command);
|
||||
///
|
||||
/// assert_eq!(command, ServiceCommand::Start(config));
|
||||
/// ```
|
||||
pub enum ServiceCommand {
|
||||
Start(RustFSConfig),
|
||||
Stop,
|
||||
Restart(RustFSConfig),
|
||||
}
|
||||
|
||||
/// Service operation result
|
||||
/// This struct represents the result of a service operation
|
||||
/// It contains information about the success of the operation,
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use chrono::Local;
|
||||
///
|
||||
/// let result = ServiceOperationResult {
|
||||
/// success: true,
|
||||
/// start_time: chrono::Local::now(),
|
||||
/// end_time: chrono::Local::now(),
|
||||
/// message: "服务启动成功".to_string(),
|
||||
/// };
|
||||
///
|
||||
/// println!("{:?}", result);
|
||||
/// assert_eq!(result.success, true);
|
||||
/// ```
|
||||
#[derive(Debug)]
|
||||
pub struct ServiceOperationResult {
|
||||
pub success: bool,
|
||||
pub start_time: chrono::DateTime<chrono::Local>,
|
||||
pub end_time: chrono::DateTime<chrono::Local>,
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
/// Service manager
|
||||
/// This struct represents a service manager that can be used to start, stop, or restart a service
|
||||
/// It contains a command sender that can be used to send commands to the service manager
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let service_manager = ServiceManager::new();
|
||||
/// println!("{:?}", service_manager);
|
||||
/// ```
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ServiceManager {
|
||||
command_tx: mpsc::Sender<ServiceCommand>,
|
||||
// process: Arc<Mutex<Option<Child>>>,
|
||||
// pid: Arc<Mutex<Option<u32>>>, // Add PID storage
|
||||
// current_config: Arc<Mutex<Option<RustFSConfig>>>, // Add configuration storage
|
||||
}
|
||||
|
||||
impl ServiceManager {
|
||||
/// check if the service is running and return a pid
|
||||
/// This function is platform dependent
|
||||
/// On Unix systems, it uses the `ps` command to check for the service
|
||||
/// On Windows systems, it uses the `wmic` command to check for the service
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let pid = check_service_status().await;
|
||||
/// println!("{:?}", pid);
|
||||
/// ```
|
||||
pub async fn check_service_status() -> Option<u32> {
|
||||
#[cfg(unix)]
|
||||
{
|
||||
// use the ps command on a unix system
|
||||
if let Ok(output) = StdCommand::new("ps").arg("-ef").output() {
|
||||
let output_str = String::from_utf8_lossy(&output.stdout);
|
||||
for line in output_str.lines() {
|
||||
// match contains `rustfs/bin/rustfs` of the line
|
||||
if line.contains("rustfs/bin/rustfs") && !line.contains("grep") {
|
||||
if let Some(pid_str) = line.split_whitespace().nth(1) {
|
||||
if let Ok(pid) = pid_str.parse::<u32>() {
|
||||
return Some(pid);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
{
|
||||
if let Ok(output) = StdCommand::new("wmic")
|
||||
.arg("process")
|
||||
.arg("where")
|
||||
.arg("caption='rustfs.exe'")
|
||||
.arg("get")
|
||||
.arg("processid")
|
||||
.output()
|
||||
{
|
||||
let output_str = String::from_utf8_lossy(&output.stdout);
|
||||
for line in output_str.lines() {
|
||||
if let Ok(pid) = line.trim().parse::<u32>() {
|
||||
return Some(pid);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// Prepare the service
|
||||
/// This function downloads the service executable if it doesn't exist
|
||||
/// It also creates the necessary directories for the service
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let executable_path = prepare_service().await;
|
||||
/// println!("{:?}", executable_path);
|
||||
/// ```
|
||||
async fn prepare_service() -> Result<PathBuf, Box<dyn Error>> {
|
||||
// get the user directory
|
||||
let home_dir = dirs::home_dir().ok_or("无法获取用户目录")?;
|
||||
let rustfs_dir = home_dir.join("rustfs");
|
||||
let bin_dir = rustfs_dir.join("bin");
|
||||
let data_dir = rustfs_dir.join("data");
|
||||
let logs_dir = rustfs_dir.join("logs");
|
||||
|
||||
// create the necessary directories
|
||||
for dir in [&bin_dir, &data_dir, &logs_dir] {
|
||||
if !dir.exists() {
|
||||
tokio::fs::create_dir_all(dir).await?;
|
||||
}
|
||||
}
|
||||
|
||||
let rustfs_file = if cfg!(windows) { "rustfs.exe" } else { "rustfs" };
|
||||
let executable_path = bin_dir.join(rustfs_file);
|
||||
let hash_path = bin_dir.join("embedded_rustfs.sha256");
|
||||
|
||||
if executable_path.exists() && hash_path.exists() {
|
||||
let cached_hash = fs::read_to_string(&hash_path).await?;
|
||||
let expected_hash = RUSTFS_HASH.lock().await;
|
||||
if cached_hash == *expected_hash {
|
||||
println!("Use cached rustfs: {executable_path:?}");
|
||||
return Ok(executable_path);
|
||||
}
|
||||
}
|
||||
|
||||
// Extract and write files
|
||||
let rustfs_data = Asset::get(rustfs_file).expect("RustFS binary not embedded");
|
||||
let mut file = File::create(&executable_path).await?;
|
||||
file.write_all(&rustfs_data.data).await?;
|
||||
let expected_hash = hex::encode(Sha256::digest(&rustfs_data.data));
|
||||
fs::write(&hash_path, expected_hash).await?;
|
||||
|
||||
// set execution permissions on unix systems
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
let mut perms = std::fs::metadata(&executable_path)?.permissions();
|
||||
perms.set_mode(0o755);
|
||||
std::fs::set_permissions(&executable_path, perms)?;
|
||||
}
|
||||
|
||||
Ok(executable_path)
|
||||
}
|
||||
|
||||
/// Helper function: Extracts the port from the address string
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let address = "127.0.0.1:9000";
|
||||
/// let port = extract_port(address);
|
||||
/// println!("{:?}", port);
|
||||
/// ```
|
||||
fn extract_port(address: &str) -> Option<u16> {
|
||||
address.split(':').nth(1)?.parse().ok()
|
||||
}
|
||||
|
||||
/// Create a new instance of the service manager
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let service_manager = ServiceManager::new();
|
||||
/// println!("{:?}", service_manager);
|
||||
/// ```
|
||||
pub(crate) fn new() -> Self {
|
||||
let (command_tx, mut command_rx) = mpsc::channel(10);
|
||||
// Start the control loop
|
||||
tokio::spawn(async move {
|
||||
while let Some(cmd) = command_rx.recv().await {
|
||||
match cmd {
|
||||
ServiceCommand::Start(config) => {
|
||||
if let Err(e) = Self::start_service(&config).await {
|
||||
Self::show_error(&format!("启动服务失败:{e}"));
|
||||
}
|
||||
}
|
||||
ServiceCommand::Stop => {
|
||||
if let Err(e) = Self::stop_service().await {
|
||||
Self::show_error(&format!("停止服务失败:{e}"));
|
||||
}
|
||||
}
|
||||
ServiceCommand::Restart(config) => {
|
||||
if Self::check_service_status().await.is_some() {
|
||||
if let Err(e) = Self::stop_service().await {
|
||||
Self::show_error(&format!("重启服务失败:{e}"));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if let Err(e) = Self::start_service(&config).await {
|
||||
Self::show_error(&format!("重启服务失败:{e}"));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
ServiceManager { command_tx }
|
||||
}
|
||||
|
||||
/// Start the service
|
||||
/// This function starts the service with the given configuration
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let config = RustFSConfig {
|
||||
/// address: "127.0.0.1:9000".to_string(),
|
||||
/// host: "127.0.0.1".to_string(),
|
||||
/// port: "9000".to_string(),
|
||||
/// access_key: "rustfsadmin".to_string(),
|
||||
/// secret_key: "rustfsadmin".to_string(),
|
||||
/// domain_name: "demo.rustfs.com".to_string(),
|
||||
/// volume_name: "data".to_string(),
|
||||
/// console_address: "127.0.0.1:9001".to_string(),
|
||||
/// };
|
||||
///
|
||||
/// let result = start_service(&config).await;
|
||||
/// println!("{:?}", result);
|
||||
/// ```
|
||||
async fn start_service(config: &RustFSConfig) -> Result<(), Box<dyn Error>> {
|
||||
// Check if the service is already running
|
||||
if let Some(existing_pid) = Self::check_service_status().await {
|
||||
return Err(format!("服务已经在运行,PID: {existing_pid}").into());
|
||||
}
|
||||
|
||||
// Prepare the service program
|
||||
let executable_path = Self::prepare_service().await?;
|
||||
// Check the data catalog
|
||||
let volume_name_path = Path::new(&config.volume_name);
|
||||
if !volume_name_path.exists() {
|
||||
tokio::fs::create_dir_all(&config.volume_name).await?;
|
||||
}
|
||||
|
||||
// Extract the port from the configuration
|
||||
let main_port = Self::extract_port(&config.address).ok_or("无法解析主服务端口")?;
|
||||
let console_port = Self::extract_port(&config.console_address).ok_or("无法解析控制台端口")?;
|
||||
|
||||
let host = config.address.split(':').next().ok_or("无法解析主机地址")?;
|
||||
|
||||
// Check the port
|
||||
let ports = vec![main_port, console_port];
|
||||
for port in ports {
|
||||
if Self::is_port_in_use(host, port).await {
|
||||
return Err(format!("端口 {port} 已被占用").into());
|
||||
}
|
||||
}
|
||||
|
||||
// Start the service
|
||||
let mut child = tokio::process::Command::new(executable_path)
|
||||
.arg("--address")
|
||||
.arg(&config.address)
|
||||
.arg("--access-key")
|
||||
.arg(&config.access_key)
|
||||
.arg("--secret-key")
|
||||
.arg(&config.secret_key)
|
||||
.arg("--console-address")
|
||||
.arg(&config.console_address)
|
||||
.arg(config.volume_name.clone())
|
||||
.spawn()?;
|
||||
|
||||
let process_pid = child.id().unwrap();
|
||||
// Wait for the service to start
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
|
||||
// Check if the service started successfully
|
||||
if Self::is_port_in_use(host, main_port).await {
|
||||
Self::show_info(&format!("服务启动成功!进程 ID: {process_pid}"));
|
||||
|
||||
Ok(())
|
||||
} else {
|
||||
child.kill().await?;
|
||||
Err("服务启动失败".into())
|
||||
}
|
||||
}
|
||||
|
||||
/// Stop the service
|
||||
/// This function stops the service
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let result = stop_service().await;
|
||||
/// println!("{:?}", result);
|
||||
/// ```
|
||||
async fn stop_service() -> Result<(), Box<dyn Error>> {
|
||||
let existing_pid = Self::check_service_status().await;
|
||||
debug!("existing_pid: {:?}", existing_pid);
|
||||
if let Some(service_pid) = existing_pid {
|
||||
// An attempt was made to terminate the process
|
||||
#[cfg(unix)]
|
||||
{
|
||||
StdCommand::new("kill").arg("-9").arg(service_pid.to_string()).output()?;
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
{
|
||||
StdCommand::new("taskkill")
|
||||
.arg("/F")
|
||||
.arg("/PID")
|
||||
.arg(service_pid.to_string())
|
||||
.output()?;
|
||||
}
|
||||
|
||||
// Verify that the service is indeed stopped
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
if Self::check_service_status().await.is_some() {
|
||||
return Err("服务停止失败".into());
|
||||
}
|
||||
Self::show_info("服务已成功停止");
|
||||
|
||||
Ok(())
|
||||
} else {
|
||||
Err("服务未运行".into())
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if the port is in use
|
||||
/// This function checks if the given port is in use on the given host
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let host = "127.0.0.1";
|
||||
/// let port = 9000;
|
||||
/// let result = is_port_in_use(host, port).await;
|
||||
/// println!("{:?}", result);
|
||||
/// ```
|
||||
async fn is_port_in_use(host: &str, port: u16) -> bool {
|
||||
TcpStream::connect(format!("{host}:{port}")).await.is_ok()
|
||||
}
|
||||
|
||||
/// Show an error message
|
||||
/// This function shows an error message dialog
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// show_error("This is an error message");
|
||||
/// ```
|
||||
pub(crate) fn show_error(message: &str) {
|
||||
rfd::MessageDialog::new()
|
||||
.set_title("错误")
|
||||
.set_description(message)
|
||||
.set_level(rfd::MessageLevel::Error)
|
||||
.show();
|
||||
}
|
||||
|
||||
/// Show an information message
|
||||
/// This function shows an information message dialog
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// show_info("This is an information message");
|
||||
/// ```
|
||||
pub(crate) fn show_info(message: &str) {
|
||||
rfd::MessageDialog::new()
|
||||
.set_title("成功")
|
||||
.set_description(message)
|
||||
.set_level(rfd::MessageLevel::Info)
|
||||
.show();
|
||||
}
|
||||
|
||||
/// Start the service
|
||||
/// This function sends a `Start` command to the service manager
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let config = RustFSConfig {
|
||||
/// address: "127.0.0.1:9000".to_string(),
|
||||
/// host: "127.0.0.1".to_string(),
|
||||
/// port: "9000".to_string(),
|
||||
/// access_key: "rustfsadmin".to_string(),
|
||||
/// secret_key: "rustfsadmin".to_string(),
|
||||
/// domain_name: "demo.rustfs.com".to_string(),
|
||||
/// volume_name: "data".to_string(),
|
||||
/// console_address: "127.0.0.1:9001".to_string(),
|
||||
/// };
|
||||
///
|
||||
/// let service_manager = ServiceManager::new();
|
||||
/// let result = service_manager.start(config).await;
|
||||
/// println!("{:?}", result);
|
||||
/// ```
|
||||
///
|
||||
/// # Errors
|
||||
/// This function returns an error if the service fails to start
|
||||
///
|
||||
/// # Panics
|
||||
/// This function panics if the port number is invalid
|
||||
///
|
||||
/// # Safety
|
||||
/// This function is not marked as unsafe
|
||||
///
|
||||
/// # Performance
|
||||
/// This function is not optimized for performance
|
||||
///
|
||||
/// # Design
|
||||
/// This function is designed to be simple and easy to use
|
||||
///
|
||||
/// # Security
|
||||
/// This function does not have any security implications
|
||||
pub async fn start(&self, config: RustFSConfig) -> Result<ServiceOperationResult, Box<dyn Error>> {
|
||||
let start_time = chrono::Local::now();
|
||||
self.command_tx.send(ServiceCommand::Start(config.clone())).await?;
|
||||
|
||||
let host = &config.host;
|
||||
let port = config.port.parse::<u16>().expect("无效的端口号");
|
||||
// wait for the service to actually start
|
||||
let mut retries = 0;
|
||||
while retries < 30 {
|
||||
// wait up to 30 seconds
|
||||
if Self::check_service_status().await.is_some() && Self::is_port_in_use(host, port).await {
|
||||
let end_time = chrono::Local::now();
|
||||
return Ok(ServiceOperationResult {
|
||||
success: true,
|
||||
start_time,
|
||||
end_time,
|
||||
message: "服务启动成功".to_string(),
|
||||
});
|
||||
}
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
retries += 1;
|
||||
}
|
||||
|
||||
Err("服务启动超时".into())
|
||||
}
|
||||
|
||||
/// Stop the service
|
||||
/// This function sends a `Stop` command to the service manager
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let service_manager = ServiceManager::new();
|
||||
/// let result = service_manager.stop().await;
|
||||
/// println!("{:?}", result);
|
||||
/// ```
|
||||
///
|
||||
/// # Errors
|
||||
/// This function returns an error if the service fails to stop
|
||||
///
|
||||
/// # Panics
|
||||
/// This function panics if the port number is invalid
|
||||
///
|
||||
/// # Safety
|
||||
/// This function is not marked as unsafe
|
||||
///
|
||||
/// # Performance
|
||||
/// This function is not optimized for performance
|
||||
///
|
||||
/// # Design
|
||||
/// This function is designed to be simple and easy to use
|
||||
///
|
||||
/// # Security
|
||||
/// This function does not have any security implications
|
||||
pub async fn stop(&self) -> Result<ServiceOperationResult, Box<dyn Error>> {
|
||||
let start_time = chrono::Local::now();
|
||||
self.command_tx.send(ServiceCommand::Stop).await?;
|
||||
|
||||
// Wait for the service to actually stop
|
||||
let mut retries = 0;
|
||||
while retries < 15 {
|
||||
// Wait up to 15 seconds
|
||||
if Self::check_service_status().await.is_none() {
|
||||
let end_time = chrono::Local::now();
|
||||
return Ok(ServiceOperationResult {
|
||||
success: true,
|
||||
start_time,
|
||||
end_time,
|
||||
message: "服务停止成功".to_string(),
|
||||
});
|
||||
}
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
retries += 1;
|
||||
}
|
||||
|
||||
Err("服务停止超时".into())
|
||||
}
|
||||
|
||||
/// Restart the service
|
||||
/// This function sends a `Restart` command to the service manager
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let config = RustFSConfig {
|
||||
/// address: "127.0.0.1:9000".to_string(),
|
||||
/// host: "127.0.0.1".to_string(),
|
||||
/// port: "9000".to_string(),
|
||||
/// access_key: "rustfsadmin".to_string(),
|
||||
/// secret_key: "rustfsadmin".to_string(),
|
||||
/// domain_name: "demo.rustfs.com".to_string(),
|
||||
/// volume_name: "data".to_string(),
|
||||
/// console_address: "127.0.0.1:9001".to_string(),
|
||||
/// };
|
||||
///
|
||||
/// let service_manager = ServiceManager::new();
|
||||
/// let result = service_manager.restart(config).await;
|
||||
/// println!("{:?}", result);
|
||||
/// ```
|
||||
///
|
||||
/// # Errors
|
||||
/// This function returns an error if the service fails to restart
|
||||
///
|
||||
/// # Panics
|
||||
/// This function panics if the port number is invalid
|
||||
///
|
||||
/// # Safety
|
||||
/// This function is not marked as unsafe
|
||||
///
|
||||
/// # Performance
|
||||
/// This function is not optimized for performance
|
||||
///
|
||||
/// # Design
|
||||
/// This function is designed to be simple and easy to use
|
||||
///
|
||||
/// # Security
|
||||
/// This function does not have any security implications
|
||||
pub async fn restart(&self, config: RustFSConfig) -> Result<ServiceOperationResult, Box<dyn Error>> {
|
||||
let start_time = chrono::Local::now();
|
||||
self.command_tx.send(ServiceCommand::Restart(config.clone())).await?;
|
||||
|
||||
let host = &config.host;
|
||||
let port = config.port.parse::<u16>().expect("无效的端口号");
|
||||
|
||||
// wait for the service to restart
|
||||
let mut retries = 0;
|
||||
while retries < 45 {
|
||||
// Longer waiting time is given as both the stop and start processes are involved
|
||||
if Self::check_service_status().await.is_some() && Self::is_port_in_use(host, port).await {
|
||||
match config.save() {
|
||||
Ok(_) => info!("save config success"),
|
||||
Err(e) => {
|
||||
error!("save config error: {}", e);
|
||||
self.command_tx.send(ServiceCommand::Stop).await?;
|
||||
Self::show_error("保存配置失败");
|
||||
return Err("保存配置失败".into());
|
||||
}
|
||||
}
|
||||
let end_time = chrono::Local::now();
|
||||
return Ok(ServiceOperationResult {
|
||||
success: true,
|
||||
start_time,
|
||||
end_time,
|
||||
message: "服务重启成功".to_string(),
|
||||
});
|
||||
}
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
retries += 1;
|
||||
}
|
||||
Err("服务重启超时".into())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::time::Duration;
|
||||
|
||||
#[test]
|
||||
fn test_service_command_creation() {
|
||||
let config = RustFSConfig::default_config();
|
||||
|
||||
let start_cmd = ServiceCommand::Start(config.clone());
|
||||
let stop_cmd = ServiceCommand::Stop;
|
||||
let restart_cmd = ServiceCommand::Restart(config);
|
||||
|
||||
// Test that commands can be created
|
||||
match start_cmd {
|
||||
ServiceCommand::Start(_) => {}
|
||||
_ => panic!("Expected Start command"),
|
||||
}
|
||||
|
||||
match stop_cmd {
|
||||
ServiceCommand::Stop => {}
|
||||
_ => panic!("Expected Stop command"),
|
||||
}
|
||||
|
||||
match restart_cmd {
|
||||
ServiceCommand::Restart(_) => {}
|
||||
_ => panic!("Expected Restart command"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_service_operation_result_creation() {
|
||||
let start_time = chrono::Local::now();
|
||||
let end_time = chrono::Local::now();
|
||||
|
||||
let success_result = ServiceOperationResult {
|
||||
success: true,
|
||||
start_time,
|
||||
end_time,
|
||||
message: "Operation successful".to_string(),
|
||||
};
|
||||
|
||||
let failure_result = ServiceOperationResult {
|
||||
success: false,
|
||||
start_time,
|
||||
end_time,
|
||||
message: "Operation failed".to_string(),
|
||||
};
|
||||
|
||||
assert!(success_result.success);
|
||||
assert_eq!(success_result.message, "Operation successful");
|
||||
|
||||
assert!(!failure_result.success);
|
||||
assert_eq!(failure_result.message, "Operation failed");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_service_operation_result_debug() {
|
||||
let result = ServiceOperationResult {
|
||||
success: true,
|
||||
start_time: chrono::Local::now(),
|
||||
end_time: chrono::Local::now(),
|
||||
message: "Test message".to_string(),
|
||||
};
|
||||
|
||||
let debug_str = format!("{result:?}");
|
||||
assert!(debug_str.contains("ServiceOperationResult"));
|
||||
assert!(debug_str.contains("success: true"));
|
||||
assert!(debug_str.contains("Test message"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_service_manager_creation() {
|
||||
// Test ServiceManager creation in a tokio runtime
|
||||
let rt = tokio::runtime::Runtime::new().unwrap();
|
||||
rt.block_on(async {
|
||||
let service_manager = ServiceManager::new();
|
||||
|
||||
// Test that ServiceManager can be created and cloned
|
||||
let cloned_manager = service_manager.clone();
|
||||
|
||||
// Both should be valid (we can't test much more without async runtime)
|
||||
assert!(format!("{service_manager:?}").contains("ServiceManager"));
|
||||
assert!(format!("{cloned_manager:?}").contains("ServiceManager"));
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_port_valid() {
|
||||
let test_cases = vec![
|
||||
("127.0.0.1:9000", Some(9000)),
|
||||
("localhost:8080", Some(8080)),
|
||||
("192.168.1.100:3000", Some(3000)),
|
||||
("0.0.0.0:80", Some(80)),
|
||||
("example.com:443", Some(443)),
|
||||
("host:65535", Some(65535)),
|
||||
("host:1", Some(1)),
|
||||
];
|
||||
|
||||
for (input, expected) in test_cases {
|
||||
let result = ServiceManager::extract_port(input);
|
||||
assert_eq!(result, expected, "Failed for input: {input}");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_port_invalid() {
|
||||
let invalid_cases = vec![
|
||||
"127.0.0.1", // Missing port
|
||||
"127.0.0.1:", // Empty port
|
||||
"127.0.0.1:abc", // Invalid port
|
||||
"127.0.0.1:99999", // Port out of range
|
||||
"", // Empty string
|
||||
"invalid", // No colon
|
||||
"host:-1", // Negative port
|
||||
"host:0.5", // Decimal port
|
||||
];
|
||||
|
||||
for input in invalid_cases {
|
||||
let result = ServiceManager::extract_port(input);
|
||||
assert_eq!(result, None, "Should be None for input: {input}");
|
||||
}
|
||||
|
||||
// Special case: empty host but valid port should still work
|
||||
assert_eq!(ServiceManager::extract_port(":9000"), Some(9000));
|
||||
|
||||
// Special case: multiple colons - extract_port takes the second part
|
||||
// For "127.0.0.1:9000:extra", it takes "9000" which is valid
|
||||
assert_eq!(ServiceManager::extract_port("127.0.0.1:9000:extra"), Some(9000));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_port_edge_cases() {
|
||||
// Test edge cases for port numbers
|
||||
assert_eq!(ServiceManager::extract_port("host:0"), Some(0));
|
||||
assert_eq!(ServiceManager::extract_port("host:65535"), Some(65535));
|
||||
assert_eq!(ServiceManager::extract_port("host:65536"), None); // Out of range
|
||||
// IPv6-like address - extract_port takes the second part after split(':')
|
||||
// For "::1:8080", split(':') gives ["", "", "1", "8080"], nth(1) gives ""
|
||||
assert_eq!(ServiceManager::extract_port("::1:8080"), None); // Second part is empty
|
||||
// For "[::1]:8080", split(':') gives ["[", "", "1]", "8080"], nth(1) gives ""
|
||||
assert_eq!(ServiceManager::extract_port("[::1]:8080"), None); // Second part is empty
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_show_error() {
|
||||
// Test that show_error function exists and can be called
|
||||
// We can't actually test the dialog in a test environment
|
||||
// so we just verify the function signature
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_show_info() {
|
||||
// Test that show_info function exists and can be called
|
||||
// We can't actually test the dialog in a test environment
|
||||
// so we just verify the function signature
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_service_operation_result_timing() {
|
||||
let start_time = chrono::Local::now();
|
||||
std::thread::sleep(Duration::from_millis(10)); // Small delay
|
||||
let end_time = chrono::Local::now();
|
||||
|
||||
let result = ServiceOperationResult {
|
||||
success: true,
|
||||
start_time,
|
||||
end_time,
|
||||
message: "Timing test".to_string(),
|
||||
};
|
||||
|
||||
// End time should be after start time
|
||||
assert!(result.end_time >= result.start_time);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_service_operation_result_with_unicode() {
|
||||
let result = ServiceOperationResult {
|
||||
success: true,
|
||||
start_time: chrono::Local::now(),
|
||||
end_time: chrono::Local::now(),
|
||||
message: "操作成功 🎉".to_string(),
|
||||
};
|
||||
|
||||
assert_eq!(result.message, "操作成功 🎉");
|
||||
assert!(result.success);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_service_operation_result_with_long_message() {
|
||||
let long_message = "A".repeat(10000);
|
||||
let result = ServiceOperationResult {
|
||||
success: false,
|
||||
start_time: chrono::Local::now(),
|
||||
end_time: chrono::Local::now(),
|
||||
message: long_message.clone(),
|
||||
};
|
||||
|
||||
assert_eq!(result.message.len(), 10000);
|
||||
assert_eq!(result.message, long_message);
|
||||
assert!(!result.success);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_service_command_with_different_configs() {
|
||||
let config1 = RustFSConfig {
|
||||
address: "127.0.0.1:9000".to_string(),
|
||||
host: "127.0.0.1".to_string(),
|
||||
port: "9000".to_string(),
|
||||
access_key: "admin1".to_string(),
|
||||
secret_key: "pass1".to_string(),
|
||||
domain_name: "test1.com".to_string(),
|
||||
volume_name: "/data1".to_string(),
|
||||
console_address: "127.0.0.1:9001".to_string(),
|
||||
};
|
||||
|
||||
let config2 = RustFSConfig {
|
||||
address: "192.168.1.100:8080".to_string(),
|
||||
host: "192.168.1.100".to_string(),
|
||||
port: "8080".to_string(),
|
||||
access_key: "admin2".to_string(),
|
||||
secret_key: "pass2".to_string(),
|
||||
domain_name: "test2.com".to_string(),
|
||||
volume_name: "/data2".to_string(),
|
||||
console_address: "192.168.1.100:8081".to_string(),
|
||||
};
|
||||
|
||||
let start_cmd1 = ServiceCommand::Start(config1);
|
||||
let restart_cmd2 = ServiceCommand::Restart(config2);
|
||||
|
||||
// Test that different configs can be used
|
||||
match start_cmd1 {
|
||||
ServiceCommand::Start(config) => {
|
||||
assert_eq!(config.address, "127.0.0.1:9000");
|
||||
assert_eq!(config.access_key, "admin1");
|
||||
}
|
||||
_ => panic!("Expected Start command"),
|
||||
}
|
||||
|
||||
match restart_cmd2 {
|
||||
ServiceCommand::Restart(config) => {
|
||||
assert_eq!(config.address, "192.168.1.100:8080");
|
||||
assert_eq!(config.access_key, "admin2");
|
||||
}
|
||||
_ => panic!("Expected Restart command"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_memory_efficiency() {
|
||||
// Test that structures don't use excessive memory
|
||||
assert!(std::mem::size_of::<ServiceCommand>() < 2000);
|
||||
assert!(std::mem::size_of::<ServiceOperationResult>() < 1000);
|
||||
assert!(std::mem::size_of::<ServiceManager>() < 1000);
|
||||
}
|
||||
|
||||
// Note: The following methods are not tested here because they require:
|
||||
// - Async runtime (tokio)
|
||||
// - File system access
|
||||
// - Network access
|
||||
// - Process management
|
||||
// - External dependencies (embedded assets)
|
||||
//
|
||||
// These should be tested in integration tests:
|
||||
// - check_service_status()
|
||||
// - prepare_service()
|
||||
// - start_service()
|
||||
// - stop_service()
|
||||
// - is_port_in_use()
|
||||
// - ServiceManager::start()
|
||||
// - ServiceManager::stop()
|
||||
// - ServiceManager::restart()
|
||||
//
|
||||
// The RUSTFS_HASH lazy_static is also not tested here as it depends
|
||||
// on embedded assets that may not be available in unit test environment.
|
||||
}
|
||||
@@ -1,300 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use dioxus::logger::tracing::debug;
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
use tracing_appender::rolling::{RollingFileAppender, Rotation};
|
||||
use tracing_subscriber::fmt;
|
||||
use tracing_subscriber::layer::SubscriberExt;
|
||||
use tracing_subscriber::util::SubscriberInitExt;
|
||||
|
||||
/// Initialize the logger with a rolling file appender
|
||||
/// that rotates log files daily
|
||||
pub fn init_logger() -> WorkerGuard {
|
||||
// configuring rolling logs rolling by day
|
||||
let home_dir = dirs::home_dir().expect("无法获取用户目录");
|
||||
let rustfs_dir = home_dir.join("rustfs");
|
||||
let logs_dir = rustfs_dir.join("logs");
|
||||
let file_appender = RollingFileAppender::builder()
|
||||
.rotation(Rotation::DAILY) // rotate log files once every hour
|
||||
.filename_prefix("rustfs-cli") // log file names will be prefixed with `myapp.`
|
||||
.filename_suffix("log") // log file names will be suffixed with `.log`
|
||||
.build(logs_dir) // try to build an appender that stores log files in `/ var/ log`
|
||||
.expect("initializing rolling file appender failed");
|
||||
// non-blocking writer for improved performance
|
||||
let (non_blocking_file, worker_guard) = tracing_appender::non_blocking(file_appender);
|
||||
|
||||
// console output layer
|
||||
let console_layer = fmt::layer()
|
||||
.with_writer(std::io::stdout)
|
||||
.with_ansi(true)
|
||||
.with_line_number(true); // enable colors in the console
|
||||
|
||||
// file output layer
|
||||
let file_layer = fmt::layer()
|
||||
.with_writer(non_blocking_file)
|
||||
.with_ansi(false)
|
||||
.with_thread_names(true)
|
||||
.with_target(true)
|
||||
.with_thread_ids(true)
|
||||
.with_level(true)
|
||||
.with_line_number(true); // disable colors in the file
|
||||
|
||||
// Combine all tiers and initialize global subscribers
|
||||
tracing_subscriber::registry()
|
||||
.with(console_layer)
|
||||
.with(file_layer)
|
||||
.with(tracing_subscriber::EnvFilter::new("info")) // filter the log level by environment variables
|
||||
.init();
|
||||
debug!("Logger initialized");
|
||||
worker_guard
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::sync::Once;
|
||||
|
||||
static INIT: Once = Once::new();
|
||||
|
||||
// Helper function to ensure logger is only initialized once in tests
|
||||
fn ensure_logger_init() {
|
||||
INIT.call_once(|| {
|
||||
// Initialize a simple test logger to avoid conflicts
|
||||
let _ = tracing_subscriber::fmt().with_test_writer().try_init();
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_logger_initialization_components() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test that we can create the components used in init_logger
|
||||
// without actually initializing the global logger again
|
||||
|
||||
// Test home directory access
|
||||
let home_dir_result = dirs::home_dir();
|
||||
assert!(home_dir_result.is_some(), "Should be able to get home directory");
|
||||
|
||||
let home_dir = home_dir_result.unwrap();
|
||||
let rustfs_dir = home_dir.join("rustfs");
|
||||
let logs_dir = rustfs_dir.join("logs");
|
||||
|
||||
// Test path construction
|
||||
assert!(rustfs_dir.to_string_lossy().contains("rustfs"));
|
||||
assert!(logs_dir.to_string_lossy().contains("logs"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rolling_file_appender_builder() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test that we can create a RollingFileAppender builder
|
||||
let builder = RollingFileAppender::builder()
|
||||
.rotation(Rotation::DAILY)
|
||||
.filename_prefix("test-rustfs-cli")
|
||||
.filename_suffix("log");
|
||||
|
||||
// We can't actually build it without creating directories,
|
||||
// but we can verify the builder pattern works
|
||||
let debug_str = format!("{builder:?}");
|
||||
// The actual debug format might be different, so just check it's not empty
|
||||
assert!(!debug_str.is_empty());
|
||||
// Check that it contains some expected parts
|
||||
assert!(debug_str.contains("Builder") || debug_str.contains("builder") || debug_str.contains("RollingFileAppender"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rotation_types() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test different rotation types
|
||||
let daily = Rotation::DAILY;
|
||||
let hourly = Rotation::HOURLY;
|
||||
let minutely = Rotation::MINUTELY;
|
||||
let never = Rotation::NEVER;
|
||||
|
||||
// Test that rotation types can be created and formatted
|
||||
assert!(!format!("{daily:?}").is_empty());
|
||||
assert!(!format!("{hourly:?}").is_empty());
|
||||
assert!(!format!("{minutely:?}").is_empty());
|
||||
assert!(!format!("{never:?}").is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fmt_layer_configuration() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test that we can create fmt layers with different configurations
|
||||
// We can't actually test the layers directly due to type complexity,
|
||||
// but we can test that the configuration values are correct
|
||||
|
||||
// Test console layer settings
|
||||
let console_ansi = true;
|
||||
let console_line_number = true;
|
||||
assert!(console_ansi);
|
||||
assert!(console_line_number);
|
||||
|
||||
// Test file layer settings
|
||||
let file_ansi = false;
|
||||
let file_thread_names = true;
|
||||
let file_target = true;
|
||||
let file_thread_ids = true;
|
||||
let file_level = true;
|
||||
let file_line_number = true;
|
||||
|
||||
assert!(!file_ansi);
|
||||
assert!(file_thread_names);
|
||||
assert!(file_target);
|
||||
assert!(file_thread_ids);
|
||||
assert!(file_level);
|
||||
assert!(file_line_number);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_env_filter_creation() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test that EnvFilter can be created with different levels
|
||||
let info_filter = tracing_subscriber::EnvFilter::new("info");
|
||||
let debug_filter = tracing_subscriber::EnvFilter::new("debug");
|
||||
let warn_filter = tracing_subscriber::EnvFilter::new("warn");
|
||||
let error_filter = tracing_subscriber::EnvFilter::new("error");
|
||||
|
||||
// Test that filters can be created
|
||||
assert!(!format!("{info_filter:?}").is_empty());
|
||||
assert!(!format!("{debug_filter:?}").is_empty());
|
||||
assert!(!format!("{warn_filter:?}").is_empty());
|
||||
assert!(!format!("{error_filter:?}").is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_path_construction() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test path construction logic used in init_logger
|
||||
if let Some(home_dir) = dirs::home_dir() {
|
||||
let rustfs_dir = home_dir.join("rustfs");
|
||||
let logs_dir = rustfs_dir.join("logs");
|
||||
|
||||
// Test that paths are constructed correctly
|
||||
assert!(rustfs_dir.ends_with("rustfs"));
|
||||
assert!(logs_dir.ends_with("logs"));
|
||||
assert!(logs_dir.parent().unwrap().ends_with("rustfs"));
|
||||
|
||||
// Test path string representation
|
||||
let rustfs_str = rustfs_dir.to_string_lossy();
|
||||
let logs_str = logs_dir.to_string_lossy();
|
||||
|
||||
assert!(rustfs_str.contains("rustfs"));
|
||||
assert!(logs_str.contains("rustfs"));
|
||||
assert!(logs_str.contains("logs"));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_filename_patterns() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test the filename patterns used in the logger
|
||||
let prefix = "rustfs-cli";
|
||||
let suffix = "log";
|
||||
|
||||
assert_eq!(prefix, "rustfs-cli");
|
||||
assert_eq!(suffix, "log");
|
||||
|
||||
// Test that these would create valid filenames
|
||||
let sample_filename = format!("{prefix}.2024-01-01.{suffix}");
|
||||
assert_eq!(sample_filename, "rustfs-cli.2024-01-01.log");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_worker_guard_type() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test that WorkerGuard type exists and can be referenced
|
||||
// We can't actually create one without the full setup, but we can test the type
|
||||
let guard_size = std::mem::size_of::<WorkerGuard>();
|
||||
assert!(guard_size > 0, "WorkerGuard should have non-zero size");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_logger_configuration_constants() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test the configuration values used in the logger
|
||||
let default_log_level = "info";
|
||||
let filename_prefix = "rustfs-cli";
|
||||
let filename_suffix = "log";
|
||||
let rotation = Rotation::DAILY;
|
||||
|
||||
assert_eq!(default_log_level, "info");
|
||||
assert_eq!(filename_prefix, "rustfs-cli");
|
||||
assert_eq!(filename_suffix, "log");
|
||||
assert!(matches!(rotation, Rotation::DAILY));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_directory_names() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test the directory names used in the logger setup
|
||||
let rustfs_dir_name = "rustfs";
|
||||
let logs_dir_name = "logs";
|
||||
|
||||
assert_eq!(rustfs_dir_name, "rustfs");
|
||||
assert_eq!(logs_dir_name, "logs");
|
||||
|
||||
// Test path joining
|
||||
let combined = format!("{rustfs_dir_name}/{logs_dir_name}");
|
||||
assert_eq!(combined, "rustfs/logs");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_layer_settings() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test the boolean settings used in layer configuration
|
||||
let console_ansi = true;
|
||||
let console_line_number = true;
|
||||
let file_ansi = false;
|
||||
let file_thread_names = true;
|
||||
let file_target = true;
|
||||
let file_thread_ids = true;
|
||||
let file_level = true;
|
||||
let file_line_number = true;
|
||||
|
||||
// Verify the settings
|
||||
assert!(console_ansi);
|
||||
assert!(console_line_number);
|
||||
assert!(!file_ansi);
|
||||
assert!(file_thread_names);
|
||||
assert!(file_target);
|
||||
assert!(file_thread_ids);
|
||||
assert!(file_level);
|
||||
assert!(file_line_number);
|
||||
}
|
||||
|
||||
// Note: The actual init_logger() function is not tested here because:
|
||||
// 1. It initializes a global tracing subscriber which can only be done once
|
||||
// 2. It requires file system access to create directories
|
||||
// 3. It has side effects that would interfere with other tests
|
||||
// 4. It returns a WorkerGuard that needs to be kept alive
|
||||
//
|
||||
// This function should be tested in integration tests where:
|
||||
// - File system access can be properly controlled
|
||||
// - The global state can be managed
|
||||
// - The actual logging behavior can be verified
|
||||
// - The WorkerGuard lifecycle can be properly managed
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod config;
|
||||
mod helper;
|
||||
mod logger;
|
||||
|
||||
pub use config::RustFSConfig;
|
||||
pub use helper::ServiceManager;
|
||||
pub use logger::init_logger;
|
||||
@@ -1,38 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::route::Route;
|
||||
use dioxus::logger::tracing::info;
|
||||
use dioxus::prelude::*;
|
||||
|
||||
const FAVICON: Asset = asset!("/assets/favicon.ico");
|
||||
const TAILWIND_CSS: Asset = asset!("/assets/tailwind.css");
|
||||
|
||||
/// The main application component
|
||||
/// This is the root component of the application
|
||||
/// It contains the global resources and the router
|
||||
/// for the application
|
||||
#[component]
|
||||
pub fn App() -> Element {
|
||||
// Build cool things ✌️
|
||||
use document::{Link, Title};
|
||||
info!("App rendered");
|
||||
rsx! {
|
||||
// Global app resources
|
||||
Link { rel: "icon", href: FAVICON }
|
||||
Link { rel: "stylesheet", href: TAILWIND_CSS }
|
||||
Title { "RustFS" }
|
||||
Router::<Route> {}
|
||||
}
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::components::Home;
|
||||
use dioxus::prelude::*;
|
||||
|
||||
#[component]
|
||||
pub fn HomeViews() -> Element {
|
||||
rsx! {
|
||||
Home {}
|
||||
}
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod app;
|
||||
mod home;
|
||||
mod setting;
|
||||
|
||||
pub use app::App;
|
||||
pub use home::HomeViews;
|
||||
pub use setting::SettingViews;
|
||||
@@ -1,24 +0,0 @@
|
||||
/**
|
||||
* Copyright 2024 RustFS Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
module.exports = {
|
||||
mode: "all",
|
||||
content: ["./src/**/*.{rs,html,css}", "./dist/**/*.html"],
|
||||
theme: {
|
||||
extend: {},
|
||||
},
|
||||
plugins: [],
|
||||
};
|
||||
@@ -1258,7 +1258,7 @@ impl Scanner {
|
||||
objects_with_issues += 1;
|
||||
warn!("Object {} has no versions", entry.name);
|
||||
|
||||
// 对象元数据损坏,提交元数据heal任务
|
||||
// 对象元数据损坏,提交元数据 heal 任务
|
||||
let enable_healing = self.config.read().await.enable_healing;
|
||||
if enable_healing {
|
||||
if let Some(heal_manager) = &self.heal_manager {
|
||||
@@ -1296,7 +1296,7 @@ impl Scanner {
|
||||
objects_with_issues += 1;
|
||||
warn!("Failed to parse metadata for object {}", entry.name);
|
||||
|
||||
// 对象元数据解析失败,提交元数据heal任务
|
||||
// 对象元数据解析失败,提交元数据 heal 任务
|
||||
let enable_healing = self.config.read().await.enable_healing;
|
||||
if enable_healing {
|
||||
if let Some(heal_manager) = &self.heal_manager {
|
||||
|
||||
@@ -1,3 +1,17 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use rustfs_ahm::heal::{
|
||||
manager::{HealConfig, HealManager},
|
||||
storage::{ECStoreHealStorage, HealStorageAPI},
|
||||
|
||||
@@ -1,3 +1,17 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use rustfs_ahm::scanner::{Scanner, data_scanner::ScannerConfig};
|
||||
use rustfs_ecstore::{
|
||||
bucket::metadata::BUCKET_LIFECYCLE_CONFIG,
|
||||
|
||||
44
crates/audit-logger/Cargo.toml
Normal file
@@ -0,0 +1,44 @@
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
[package]
|
||||
name = "rustfs-audit-logger"
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
version.workspace = true
|
||||
homepage.workspace = true
|
||||
description = "Audit logging system for RustFS, providing detailed logging of file operations and system events."
|
||||
documentation = "https://docs.rs/audit-logger/latest/audit_logger/"
|
||||
keywords = ["audit", "logging", "file-operations", "system-events", "RustFS"]
|
||||
categories = ["web-programming", "development-tools::profiling", "asynchronous", "api-bindings", "development-tools::debugging"]
|
||||
|
||||
[dependencies]
|
||||
rustfs-targets = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
reqwest = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
tracing = { workspace = true, features = ["std", "attributes"] }
|
||||
tracing-core = { workspace = true }
|
||||
tokio = { workspace = true, features = ["sync", "fs", "rt-multi-thread", "rt", "time", "macros"] }
|
||||
url = { workspace = true }
|
||||
uuid = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
figment = { version = "0.10", features = ["json", "env"] }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
34
crates/audit-logger/examples/config.json
Normal file
@@ -0,0 +1,34 @@
|
||||
{
|
||||
"console": {
|
||||
"enabled": true
|
||||
},
|
||||
"logger_webhook": {
|
||||
"default": {
|
||||
"enabled": true,
|
||||
"endpoint": "http://localhost:3000/logs",
|
||||
"auth_token": "secret-token-for-logs",
|
||||
"batch_size": 5,
|
||||
"queue_size": 1000,
|
||||
"max_retry": 3,
|
||||
"retry_interval": "2s"
|
||||
}
|
||||
},
|
||||
"audit_webhook": {
|
||||
"splunk": {
|
||||
"enabled": true,
|
||||
"endpoint": "http://localhost:3000/audit",
|
||||
"auth_token": "secret-token-for-audit",
|
||||
"batch_size": 10
|
||||
}
|
||||
},
|
||||
"audit_kafka": {
|
||||
"default": {
|
||||
"enabled": false,
|
||||
"brokers": [
|
||||
"kafka1:9092",
|
||||
"kafka2:9092"
|
||||
],
|
||||
"topic": "minio-audit-events"
|
||||
}
|
||||
}
|
||||
}
|
||||
17
crates/audit-logger/examples/main.rs
Normal file
@@ -0,0 +1,17 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
fn main() {
|
||||
println!("Audit Logger Example");
|
||||
}
|
||||
90
crates/audit-logger/src/entry/args.rs
Normal file
@@ -0,0 +1,90 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use crate::entry::ObjectVersion;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Args - defines the arguments for API operations
|
||||
/// Args is used to define the arguments for API operations.
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use rustfs_audit_logger::Args;
|
||||
/// use std::collections::HashMap;
|
||||
///
|
||||
/// let args = Args::new()
|
||||
/// .set_bucket(Some("my-bucket".to_string()))
|
||||
/// .set_object(Some("my-object".to_string()))
|
||||
/// .set_version_id(Some("123".to_string()))
|
||||
/// .set_metadata(Some(HashMap::new()));
|
||||
/// ```
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default, Eq, PartialEq)]
|
||||
pub struct Args {
|
||||
#[serde(rename = "bucket", skip_serializing_if = "Option::is_none")]
|
||||
pub bucket: Option<String>,
|
||||
#[serde(rename = "object", skip_serializing_if = "Option::is_none")]
|
||||
pub object: Option<String>,
|
||||
#[serde(rename = "versionId", skip_serializing_if = "Option::is_none")]
|
||||
pub version_id: Option<String>,
|
||||
#[serde(rename = "objects", skip_serializing_if = "Option::is_none")]
|
||||
pub objects: Option<Vec<ObjectVersion>>,
|
||||
#[serde(rename = "metadata", skip_serializing_if = "Option::is_none")]
|
||||
pub metadata: Option<HashMap<String, String>>,
|
||||
}
|
||||
|
||||
impl Args {
|
||||
/// Create a new Args object
|
||||
pub fn new() -> Self {
|
||||
Args {
|
||||
bucket: None,
|
||||
object: None,
|
||||
version_id: None,
|
||||
objects: None,
|
||||
metadata: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the bucket
|
||||
pub fn set_bucket(mut self, bucket: Option<String>) -> Self {
|
||||
self.bucket = bucket;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the object
|
||||
pub fn set_object(mut self, object: Option<String>) -> Self {
|
||||
self.object = object;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the version ID
|
||||
pub fn set_version_id(mut self, version_id: Option<String>) -> Self {
|
||||
self.version_id = version_id;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the objects
|
||||
pub fn set_objects(mut self, objects: Option<Vec<ObjectVersion>>) -> Self {
|
||||
self.objects = objects;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the metadata
|
||||
pub fn set_metadata(mut self, metadata: Option<HashMap<String, String>>) -> Self {
|
||||
self.metadata = metadata;
|
||||
self
|
||||
}
|
||||
}
|
||||
469
crates/audit-logger/src/entry/audit.rs
Normal file
@@ -0,0 +1,469 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use crate::{BaseLogEntry, LogRecord, ObjectVersion};
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// API details structure
|
||||
/// ApiDetails is used to define the details of an API operation
|
||||
///
|
||||
/// The `ApiDetails` structure contains the following fields:
|
||||
/// - `name` - the name of the API operation
|
||||
/// - `bucket` - the bucket name
|
||||
/// - `object` - the object name
|
||||
/// - `objects` - the list of objects
|
||||
/// - `status` - the status of the API operation
|
||||
/// - `status_code` - the status code of the API operation
|
||||
/// - `input_bytes` - the input bytes
|
||||
/// - `output_bytes` - the output bytes
|
||||
/// - `header_bytes` - the header bytes
|
||||
/// - `time_to_first_byte` - the time to first byte
|
||||
/// - `time_to_first_byte_in_ns` - the time to first byte in nanoseconds
|
||||
/// - `time_to_response` - the time to response
|
||||
/// - `time_to_response_in_ns` - the time to response in nanoseconds
|
||||
///
|
||||
/// The `ApiDetails` structure contains the following methods:
|
||||
/// - `new` - create a new `ApiDetails` with default values
|
||||
/// - `set_name` - set the name
|
||||
/// - `set_bucket` - set the bucket
|
||||
/// - `set_object` - set the object
|
||||
/// - `set_objects` - set the objects
|
||||
/// - `set_status` - set the status
|
||||
/// - `set_status_code` - set the status code
|
||||
/// - `set_input_bytes` - set the input bytes
|
||||
/// - `set_output_bytes` - set the output bytes
|
||||
/// - `set_header_bytes` - set the header bytes
|
||||
/// - `set_time_to_first_byte` - set the time to first byte
|
||||
/// - `set_time_to_first_byte_in_ns` - set the time to first byte in nanoseconds
|
||||
/// - `set_time_to_response` - set the time to response
|
||||
/// - `set_time_to_response_in_ns` - set the time to response in nanoseconds
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use rustfs_audit_logger::ApiDetails;
|
||||
/// use rustfs_audit_logger::ObjectVersion;
|
||||
///
|
||||
/// let api = ApiDetails::new()
|
||||
/// .set_name(Some("GET".to_string()))
|
||||
/// .set_bucket(Some("my-bucket".to_string()))
|
||||
/// .set_object(Some("my-object".to_string()))
|
||||
/// .set_objects(vec![ObjectVersion::new_with_object_name("my-object".to_string())])
|
||||
/// .set_status(Some("OK".to_string()))
|
||||
/// .set_status_code(Some(200))
|
||||
/// .set_input_bytes(100)
|
||||
/// .set_output_bytes(200)
|
||||
/// .set_header_bytes(Some(50))
|
||||
/// .set_time_to_first_byte(Some("100ms".to_string()))
|
||||
/// .set_time_to_first_byte_in_ns(Some("100000000ns".to_string()))
|
||||
/// .set_time_to_response(Some("200ms".to_string()))
|
||||
/// .set_time_to_response_in_ns(Some("200000000ns".to_string()));
|
||||
/// ```
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, Default, PartialEq, Eq)]
|
||||
pub struct ApiDetails {
|
||||
#[serde(rename = "name", skip_serializing_if = "Option::is_none")]
|
||||
pub name: Option<String>,
|
||||
#[serde(rename = "bucket", skip_serializing_if = "Option::is_none")]
|
||||
pub bucket: Option<String>,
|
||||
#[serde(rename = "object", skip_serializing_if = "Option::is_none")]
|
||||
pub object: Option<String>,
|
||||
#[serde(rename = "objects", skip_serializing_if = "Vec::is_empty", default)]
|
||||
pub objects: Vec<ObjectVersion>,
|
||||
#[serde(rename = "status", skip_serializing_if = "Option::is_none")]
|
||||
pub status: Option<String>,
|
||||
#[serde(rename = "statusCode", skip_serializing_if = "Option::is_none")]
|
||||
pub status_code: Option<i32>,
|
||||
#[serde(rename = "rx")]
|
||||
pub input_bytes: i64,
|
||||
#[serde(rename = "tx")]
|
||||
pub output_bytes: i64,
|
||||
#[serde(rename = "txHeaders", skip_serializing_if = "Option::is_none")]
|
||||
pub header_bytes: Option<i64>,
|
||||
#[serde(rename = "timeToFirstByte", skip_serializing_if = "Option::is_none")]
|
||||
pub time_to_first_byte: Option<String>,
|
||||
#[serde(rename = "timeToFirstByteInNS", skip_serializing_if = "Option::is_none")]
|
||||
pub time_to_first_byte_in_ns: Option<String>,
|
||||
#[serde(rename = "timeToResponse", skip_serializing_if = "Option::is_none")]
|
||||
pub time_to_response: Option<String>,
|
||||
#[serde(rename = "timeToResponseInNS", skip_serializing_if = "Option::is_none")]
|
||||
pub time_to_response_in_ns: Option<String>,
|
||||
}
|
||||
|
||||
impl ApiDetails {
|
||||
/// Create a new `ApiDetails` with default values
|
||||
pub fn new() -> Self {
|
||||
ApiDetails {
|
||||
name: None,
|
||||
bucket: None,
|
||||
object: None,
|
||||
objects: Vec::new(),
|
||||
status: None,
|
||||
status_code: None,
|
||||
input_bytes: 0,
|
||||
output_bytes: 0,
|
||||
header_bytes: None,
|
||||
time_to_first_byte: None,
|
||||
time_to_first_byte_in_ns: None,
|
||||
time_to_response: None,
|
||||
time_to_response_in_ns: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the name
|
||||
pub fn set_name(mut self, name: Option<String>) -> Self {
|
||||
self.name = name;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the bucket
|
||||
pub fn set_bucket(mut self, bucket: Option<String>) -> Self {
|
||||
self.bucket = bucket;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the object
|
||||
pub fn set_object(mut self, object: Option<String>) -> Self {
|
||||
self.object = object;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the objects
|
||||
pub fn set_objects(mut self, objects: Vec<ObjectVersion>) -> Self {
|
||||
self.objects = objects;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the status
|
||||
pub fn set_status(mut self, status: Option<String>) -> Self {
|
||||
self.status = status;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the status code
|
||||
pub fn set_status_code(mut self, status_code: Option<i32>) -> Self {
|
||||
self.status_code = status_code;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the input bytes
|
||||
pub fn set_input_bytes(mut self, input_bytes: i64) -> Self {
|
||||
self.input_bytes = input_bytes;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the output bytes
|
||||
pub fn set_output_bytes(mut self, output_bytes: i64) -> Self {
|
||||
self.output_bytes = output_bytes;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the header bytes
|
||||
pub fn set_header_bytes(mut self, header_bytes: Option<i64>) -> Self {
|
||||
self.header_bytes = header_bytes;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the time to first byte
|
||||
pub fn set_time_to_first_byte(mut self, time_to_first_byte: Option<String>) -> Self {
|
||||
self.time_to_first_byte = time_to_first_byte;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the time to first byte in nanoseconds
|
||||
pub fn set_time_to_first_byte_in_ns(mut self, time_to_first_byte_in_ns: Option<String>) -> Self {
|
||||
self.time_to_first_byte_in_ns = time_to_first_byte_in_ns;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the time to response
|
||||
pub fn set_time_to_response(mut self, time_to_response: Option<String>) -> Self {
|
||||
self.time_to_response = time_to_response;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the time to response in nanoseconds
|
||||
pub fn set_time_to_response_in_ns(mut self, time_to_response_in_ns: Option<String>) -> Self {
|
||||
self.time_to_response_in_ns = time_to_response_in_ns;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// Entry - audit entry logs
|
||||
/// AuditLogEntry is used to define the structure of an audit log entry
|
||||
///
|
||||
/// The `AuditLogEntry` structure contains the following fields:
|
||||
/// - `base` - the base log entry
|
||||
/// - `version` - the version of the audit log entry
|
||||
/// - `deployment_id` - the deployment ID
|
||||
/// - `event` - the event
|
||||
/// - `entry_type` - the type of audit message
|
||||
/// - `api` - the API details
|
||||
/// - `remote_host` - the remote host
|
||||
/// - `user_agent` - the user agent
|
||||
/// - `req_path` - the request path
|
||||
/// - `req_host` - the request host
|
||||
/// - `req_claims` - the request claims
|
||||
/// - `req_query` - the request query
|
||||
/// - `req_header` - the request header
|
||||
/// - `resp_header` - the response header
|
||||
/// - `access_key` - the access key
|
||||
/// - `parent_user` - the parent user
|
||||
/// - `error` - the error
|
||||
///
|
||||
/// The `AuditLogEntry` structure contains the following methods:
|
||||
/// - `new` - create a new `AuditEntry` with default values
|
||||
/// - `new_with_values` - create a new `AuditEntry` with version, time, event and api details
|
||||
/// - `with_base` - set the base log entry
|
||||
/// - `set_version` - set the version
|
||||
/// - `set_deployment_id` - set the deployment ID
|
||||
/// - `set_event` - set the event
|
||||
/// - `set_entry_type` - set the entry type
|
||||
/// - `set_api` - set the API details
|
||||
/// - `set_remote_host` - set the remote host
|
||||
/// - `set_user_agent` - set the user agent
|
||||
/// - `set_req_path` - set the request path
|
||||
/// - `set_req_host` - set the request host
|
||||
/// - `set_req_claims` - set the request claims
|
||||
/// - `set_req_query` - set the request query
|
||||
/// - `set_req_header` - set the request header
|
||||
/// - `set_resp_header` - set the response header
|
||||
/// - `set_access_key` - set the access key
|
||||
/// - `set_parent_user` - set the parent user
|
||||
/// - `set_error` - set the error
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use rustfs_audit_logger::AuditLogEntry;
|
||||
/// use rustfs_audit_logger::ApiDetails;
|
||||
/// use std::collections::HashMap;
|
||||
///
|
||||
/// let entry = AuditLogEntry::new()
|
||||
/// .set_version("1.0".to_string())
|
||||
/// .set_deployment_id(Some("123".to_string()))
|
||||
/// .set_event("event".to_string())
|
||||
/// .set_entry_type(Some("type".to_string()))
|
||||
/// .set_api(ApiDetails::new())
|
||||
/// .set_remote_host(Some("remote-host".to_string()))
|
||||
/// .set_user_agent(Some("user-agent".to_string()))
|
||||
/// .set_req_path(Some("req-path".to_string()))
|
||||
/// .set_req_host(Some("req-host".to_string()))
|
||||
/// .set_req_claims(Some(HashMap::new()))
|
||||
/// .set_req_query(Some(HashMap::new()))
|
||||
/// .set_req_header(Some(HashMap::new()))
|
||||
/// .set_resp_header(Some(HashMap::new()))
|
||||
/// .set_access_key(Some("access-key".to_string()))
|
||||
/// .set_parent_user(Some("parent-user".to_string()))
|
||||
/// .set_error(Some("error".to_string()));
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, Default)]
|
||||
pub struct AuditLogEntry {
|
||||
#[serde(flatten)]
|
||||
pub base: BaseLogEntry,
|
||||
pub version: String,
|
||||
#[serde(rename = "deploymentid", skip_serializing_if = "Option::is_none")]
|
||||
pub deployment_id: Option<String>,
|
||||
pub event: String,
|
||||
// Class of audit message - S3, admin ops, bucket management
|
||||
#[serde(rename = "type", skip_serializing_if = "Option::is_none")]
|
||||
pub entry_type: Option<String>,
|
||||
pub api: ApiDetails,
|
||||
#[serde(rename = "remotehost", skip_serializing_if = "Option::is_none")]
|
||||
pub remote_host: Option<String>,
|
||||
#[serde(rename = "userAgent", skip_serializing_if = "Option::is_none")]
|
||||
pub user_agent: Option<String>,
|
||||
#[serde(rename = "requestPath", skip_serializing_if = "Option::is_none")]
|
||||
pub req_path: Option<String>,
|
||||
#[serde(rename = "requestHost", skip_serializing_if = "Option::is_none")]
|
||||
pub req_host: Option<String>,
|
||||
#[serde(rename = "requestClaims", skip_serializing_if = "Option::is_none")]
|
||||
pub req_claims: Option<HashMap<String, Value>>,
|
||||
#[serde(rename = "requestQuery", skip_serializing_if = "Option::is_none")]
|
||||
pub req_query: Option<HashMap<String, String>>,
|
||||
#[serde(rename = "requestHeader", skip_serializing_if = "Option::is_none")]
|
||||
pub req_header: Option<HashMap<String, String>>,
|
||||
#[serde(rename = "responseHeader", skip_serializing_if = "Option::is_none")]
|
||||
pub resp_header: Option<HashMap<String, String>>,
|
||||
#[serde(rename = "accessKey", skip_serializing_if = "Option::is_none")]
|
||||
pub access_key: Option<String>,
|
||||
#[serde(rename = "parentUser", skip_serializing_if = "Option::is_none")]
|
||||
pub parent_user: Option<String>,
|
||||
#[serde(rename = "error", skip_serializing_if = "Option::is_none")]
|
||||
pub error: Option<String>,
|
||||
}
|
||||
|
||||
impl AuditLogEntry {
|
||||
/// Create a new `AuditEntry` with default values
|
||||
pub fn new() -> Self {
|
||||
AuditLogEntry {
|
||||
base: BaseLogEntry::new(),
|
||||
version: String::new(),
|
||||
deployment_id: None,
|
||||
event: String::new(),
|
||||
entry_type: None,
|
||||
api: ApiDetails::new(),
|
||||
remote_host: None,
|
||||
user_agent: None,
|
||||
req_path: None,
|
||||
req_host: None,
|
||||
req_claims: None,
|
||||
req_query: None,
|
||||
req_header: None,
|
||||
resp_header: None,
|
||||
access_key: None,
|
||||
parent_user: None,
|
||||
error: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new `AuditEntry` with version, time, event and api details
|
||||
pub fn new_with_values(version: String, time: DateTime<Utc>, event: String, api: ApiDetails) -> Self {
|
||||
let mut base = BaseLogEntry::new();
|
||||
base.timestamp = time;
|
||||
|
||||
AuditLogEntry {
|
||||
base,
|
||||
version,
|
||||
deployment_id: None,
|
||||
event,
|
||||
entry_type: None,
|
||||
api,
|
||||
remote_host: None,
|
||||
user_agent: None,
|
||||
req_path: None,
|
||||
req_host: None,
|
||||
req_claims: None,
|
||||
req_query: None,
|
||||
req_header: None,
|
||||
resp_header: None,
|
||||
access_key: None,
|
||||
parent_user: None,
|
||||
error: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the base log entry
|
||||
pub fn with_base(mut self, base: BaseLogEntry) -> Self {
|
||||
self.base = base;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the version
|
||||
pub fn set_version(mut self, version: String) -> Self {
|
||||
self.version = version;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the deployment ID
|
||||
pub fn set_deployment_id(mut self, deployment_id: Option<String>) -> Self {
|
||||
self.deployment_id = deployment_id;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the event
|
||||
pub fn set_event(mut self, event: String) -> Self {
|
||||
self.event = event;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the entry type
|
||||
pub fn set_entry_type(mut self, entry_type: Option<String>) -> Self {
|
||||
self.entry_type = entry_type;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the API details
|
||||
pub fn set_api(mut self, api: ApiDetails) -> Self {
|
||||
self.api = api;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the remote host
|
||||
pub fn set_remote_host(mut self, remote_host: Option<String>) -> Self {
|
||||
self.remote_host = remote_host;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the user agent
|
||||
pub fn set_user_agent(mut self, user_agent: Option<String>) -> Self {
|
||||
self.user_agent = user_agent;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the request path
|
||||
pub fn set_req_path(mut self, req_path: Option<String>) -> Self {
|
||||
self.req_path = req_path;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the request host
|
||||
pub fn set_req_host(mut self, req_host: Option<String>) -> Self {
|
||||
self.req_host = req_host;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the request claims
|
||||
pub fn set_req_claims(mut self, req_claims: Option<HashMap<String, Value>>) -> Self {
|
||||
self.req_claims = req_claims;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the request query
|
||||
pub fn set_req_query(mut self, req_query: Option<HashMap<String, String>>) -> Self {
|
||||
self.req_query = req_query;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the request header
|
||||
pub fn set_req_header(mut self, req_header: Option<HashMap<String, String>>) -> Self {
|
||||
self.req_header = req_header;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the response header
|
||||
pub fn set_resp_header(mut self, resp_header: Option<HashMap<String, String>>) -> Self {
|
||||
self.resp_header = resp_header;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the access key
|
||||
pub fn set_access_key(mut self, access_key: Option<String>) -> Self {
|
||||
self.access_key = access_key;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the parent user
|
||||
pub fn set_parent_user(mut self, parent_user: Option<String>) -> Self {
|
||||
self.parent_user = parent_user;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the error
|
||||
pub fn set_error(mut self, error: Option<String>) -> Self {
|
||||
self.error = error;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl LogRecord for AuditLogEntry {
|
||||
fn to_json(&self) -> String {
|
||||
serde_json::to_string(self).unwrap_or_else(|_| String::from("{}"))
|
||||
}
|
||||
|
||||
fn get_timestamp(&self) -> DateTime<Utc> {
|
||||
self.base.timestamp
|
||||
}
|
||||
}
|
||||
108
crates/audit-logger/src/entry/base.rs
Normal file
@@ -0,0 +1,108 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Base log entry structure shared by all log types
|
||||
/// This structure is used to serialize log entries to JSON
|
||||
/// and send them to the log sinks
|
||||
/// This structure is also used to deserialize log entries from JSON
|
||||
/// This structure is also used to store log entries in the database
|
||||
/// This structure is also used to query log entries from the database
|
||||
///
|
||||
/// The `BaseLogEntry` structure contains the following fields:
|
||||
/// - `timestamp` - the timestamp of the log entry
|
||||
/// - `request_id` - the request ID of the log entry
|
||||
/// - `message` - the message of the log entry
|
||||
/// - `tags` - the tags of the log entry
|
||||
///
|
||||
/// The `BaseLogEntry` structure contains the following methods:
|
||||
/// - `new` - create a new `BaseLogEntry` with default values
|
||||
/// - `message` - set the message
|
||||
/// - `request_id` - set the request ID
|
||||
/// - `tags` - set the tags
|
||||
/// - `timestamp` - set the timestamp
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use rustfs_audit_logger::BaseLogEntry;
|
||||
/// use chrono::{DateTime, Utc};
|
||||
/// use std::collections::HashMap;
|
||||
///
|
||||
/// let timestamp = Utc::now();
|
||||
/// let request = Some("req-123".to_string());
|
||||
/// let message = Some("This is a log message".to_string());
|
||||
/// let tags = Some(HashMap::new());
|
||||
///
|
||||
/// let entry = BaseLogEntry::new()
|
||||
/// .timestamp(timestamp)
|
||||
/// .request_id(request)
|
||||
/// .message(message)
|
||||
/// .tags(tags);
|
||||
/// ```
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq, Default)]
|
||||
pub struct BaseLogEntry {
|
||||
#[serde(rename = "time")]
|
||||
pub timestamp: DateTime<Utc>,
|
||||
|
||||
#[serde(rename = "requestID", skip_serializing_if = "Option::is_none")]
|
||||
pub request_id: Option<String>,
|
||||
|
||||
#[serde(rename = "message", skip_serializing_if = "Option::is_none")]
|
||||
pub message: Option<String>,
|
||||
|
||||
#[serde(rename = "tags", skip_serializing_if = "Option::is_none")]
|
||||
pub tags: Option<HashMap<String, Value>>,
|
||||
}
|
||||
|
||||
impl BaseLogEntry {
|
||||
/// Create a new BaseLogEntry with default values
|
||||
pub fn new() -> Self {
|
||||
BaseLogEntry {
|
||||
timestamp: Utc::now(),
|
||||
request_id: None,
|
||||
message: None,
|
||||
tags: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the message
|
||||
pub fn message(mut self, message: Option<String>) -> Self {
|
||||
self.message = message;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the request ID
|
||||
pub fn request_id(mut self, request_id: Option<String>) -> Self {
|
||||
self.request_id = request_id;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the tags
|
||||
pub fn tags(mut self, tags: Option<HashMap<String, Value>>) -> Self {
|
||||
self.tags = tags;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the timestamp
|
||||
pub fn timestamp(mut self, timestamp: DateTime<Utc>) -> Self {
|
||||
self.timestamp = timestamp;
|
||||
self
|
||||
}
|
||||
}
|
||||
159
crates/audit-logger/src/entry/mod.rs
Normal file
@@ -0,0 +1,159 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![allow(dead_code)]
|
||||
pub(crate) mod args;
|
||||
pub(crate) mod audit;
|
||||
pub(crate) mod base;
|
||||
pub(crate) mod unified;
|
||||
|
||||
use serde::de::Error;
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
use tracing_core::Level;
|
||||
|
||||
/// ObjectVersion is used across multiple modules
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
|
||||
pub struct ObjectVersion {
|
||||
#[serde(rename = "name")]
|
||||
pub object_name: String,
|
||||
#[serde(rename = "versionId", skip_serializing_if = "Option::is_none")]
|
||||
pub version_id: Option<String>,
|
||||
}
|
||||
|
||||
impl ObjectVersion {
|
||||
/// Create a new ObjectVersion object
|
||||
pub fn new() -> Self {
|
||||
ObjectVersion {
|
||||
object_name: String::new(),
|
||||
version_id: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new ObjectVersion with object name
|
||||
pub fn new_with_object_name(object_name: String) -> Self {
|
||||
ObjectVersion {
|
||||
object_name,
|
||||
version_id: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the object name
|
||||
pub fn set_object_name(mut self, object_name: String) -> Self {
|
||||
self.object_name = object_name;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the version ID
|
||||
pub fn set_version_id(mut self, version_id: Option<String>) -> Self {
|
||||
self.version_id = version_id;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ObjectVersion {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
/// Log kind/level enum
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)]
|
||||
pub enum LogKind {
|
||||
#[serde(rename = "INFO")]
|
||||
#[default]
|
||||
Info,
|
||||
#[serde(rename = "WARNING")]
|
||||
Warning,
|
||||
#[serde(rename = "ERROR")]
|
||||
Error,
|
||||
#[serde(rename = "FATAL")]
|
||||
Fatal,
|
||||
}
|
||||
|
||||
/// Trait for types that can be serialized to JSON and have a timestamp
|
||||
/// This trait is used by `ServerLogEntry` to convert the log entry to JSON
|
||||
/// and get the timestamp of the log entry
|
||||
/// This trait is implemented by `ServerLogEntry`
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use rustfs_audit_logger::LogRecord;
|
||||
/// use chrono::{DateTime, Utc};
|
||||
/// use rustfs_audit_logger::ServerLogEntry;
|
||||
/// use tracing_core::Level;
|
||||
///
|
||||
/// let log_entry = ServerLogEntry::new(Level::INFO, "api_handler".to_string());
|
||||
/// let json = log_entry.to_json();
|
||||
/// let timestamp = log_entry.get_timestamp();
|
||||
/// ```
|
||||
pub trait LogRecord {
|
||||
fn to_json(&self) -> String;
|
||||
fn get_timestamp(&self) -> chrono::DateTime<chrono::Utc>;
|
||||
}
|
||||
|
||||
/// Wrapper for `tracing_core::Level` to implement `Serialize` and `Deserialize`
|
||||
/// for `ServerLogEntry`
|
||||
/// This is necessary because `tracing_core::Level` does not implement `Serialize`
|
||||
/// and `Deserialize`
|
||||
/// This is a workaround to allow `ServerLogEntry` to be serialized and deserialized
|
||||
/// using `serde`
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use rustfs_audit_logger::SerializableLevel;
|
||||
/// use tracing_core::Level;
|
||||
///
|
||||
/// let level = Level::INFO;
|
||||
/// let serializable_level = SerializableLevel::from(level);
|
||||
/// ```
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct SerializableLevel(pub Level);
|
||||
|
||||
impl From<Level> for SerializableLevel {
|
||||
fn from(level: Level) -> Self {
|
||||
SerializableLevel(level)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SerializableLevel> for Level {
|
||||
fn from(serializable_level: SerializableLevel) -> Self {
|
||||
serializable_level.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for SerializableLevel {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
serializer.serialize_str(self.0.as_str())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for SerializableLevel {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let s = String::deserialize(deserializer)?;
|
||||
match s.as_str() {
|
||||
"TRACE" => Ok(SerializableLevel(Level::TRACE)),
|
||||
"DEBUG" => Ok(SerializableLevel(Level::DEBUG)),
|
||||
"INFO" => Ok(SerializableLevel(Level::INFO)),
|
||||
"WARN" => Ok(SerializableLevel(Level::WARN)),
|
||||
"ERROR" => Ok(SerializableLevel(Level::ERROR)),
|
||||
_ => Err(D::Error::custom("unknown log level")),
|
||||
}
|
||||
}
|
||||
}
|
||||
266
crates/audit-logger/src/entry/unified.rs
Normal file
@@ -0,0 +1,266 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use crate::{AuditLogEntry, BaseLogEntry, LogKind, LogRecord, SerializableLevel};
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tracing_core::Level;
|
||||
|
||||
/// Server log entry with structured fields
|
||||
/// ServerLogEntry is used to log structured log entries from the server
|
||||
///
|
||||
/// The `ServerLogEntry` structure contains the following fields:
|
||||
/// - `base` - the base log entry
|
||||
/// - `level` - the log level
|
||||
/// - `source` - the source of the log entry
|
||||
/// - `user_id` - the user ID
|
||||
/// - `fields` - the structured fields of the log entry
|
||||
///
|
||||
/// The `ServerLogEntry` structure contains the following methods:
|
||||
/// - `new` - create a new `ServerLogEntry` with specified level and source
|
||||
/// - `with_base` - set the base log entry
|
||||
/// - `user_id` - set the user ID
|
||||
/// - `fields` - set the fields
|
||||
/// - `add_field` - add a field
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use rustfs_audit_logger::ServerLogEntry;
|
||||
/// use tracing_core::Level;
|
||||
///
|
||||
/// let entry = ServerLogEntry::new(Level::INFO, "test_module".to_string())
|
||||
/// .user_id(Some("user-456".to_string()))
|
||||
/// .add_field("operation".to_string(), "login".to_string());
|
||||
/// ```
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct ServerLogEntry {
|
||||
#[serde(flatten)]
|
||||
pub base: BaseLogEntry,
|
||||
|
||||
pub level: SerializableLevel,
|
||||
pub source: String,
|
||||
|
||||
#[serde(rename = "userId", skip_serializing_if = "Option::is_none")]
|
||||
pub user_id: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Vec::is_empty", default)]
|
||||
pub fields: Vec<(String, String)>,
|
||||
}
|
||||
|
||||
impl ServerLogEntry {
|
||||
/// Create a new ServerLogEntry with specified level and source
|
||||
pub fn new(level: Level, source: String) -> Self {
|
||||
ServerLogEntry {
|
||||
base: BaseLogEntry::new(),
|
||||
level: SerializableLevel(level),
|
||||
source,
|
||||
user_id: None,
|
||||
fields: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the base log entry
|
||||
pub fn with_base(mut self, base: BaseLogEntry) -> Self {
|
||||
self.base = base;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the user ID
|
||||
pub fn user_id(mut self, user_id: Option<String>) -> Self {
|
||||
self.user_id = user_id;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set fields
|
||||
pub fn fields(mut self, fields: Vec<(String, String)>) -> Self {
|
||||
self.fields = fields;
|
||||
self
|
||||
}
|
||||
|
||||
/// Add a field
|
||||
pub fn add_field(mut self, key: String, value: String) -> Self {
|
||||
self.fields.push((key, value));
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl LogRecord for ServerLogEntry {
|
||||
fn to_json(&self) -> String {
|
||||
serde_json::to_string(self).unwrap_or_else(|_| String::from("{}"))
|
||||
}
|
||||
|
||||
fn get_timestamp(&self) -> DateTime<Utc> {
|
||||
self.base.timestamp
|
||||
}
|
||||
}
|
||||
|
||||
/// Console log entry structure
|
||||
/// ConsoleLogEntry is used to log console log entries
|
||||
/// The `ConsoleLogEntry` structure contains the following fields:
|
||||
/// - `base` - the base log entry
|
||||
/// - `level` - the log level
|
||||
/// - `console_msg` - the console message
|
||||
/// - `node_name` - the node name
|
||||
/// - `err` - the error message
|
||||
///
|
||||
/// The `ConsoleLogEntry` structure contains the following methods:
|
||||
/// - `new` - create a new `ConsoleLogEntry`
|
||||
/// - `new_with_console_msg` - create a new `ConsoleLogEntry` with console message and node name
|
||||
/// - `with_base` - set the base log entry
|
||||
/// - `set_level` - set the log level
|
||||
/// - `set_node_name` - set the node name
|
||||
/// - `set_console_msg` - set the console message
|
||||
/// - `set_err` - set the error message
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use rustfs_audit_logger::ConsoleLogEntry;
|
||||
///
|
||||
/// let entry = ConsoleLogEntry::new_with_console_msg("Test message".to_string(), "node-123".to_string());
|
||||
/// ```
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ConsoleLogEntry {
|
||||
#[serde(flatten)]
|
||||
pub base: BaseLogEntry,
|
||||
|
||||
pub level: LogKind,
|
||||
pub console_msg: String,
|
||||
pub node_name: String,
|
||||
|
||||
#[serde(skip)]
|
||||
pub err: Option<String>,
|
||||
}
|
||||
|
||||
impl ConsoleLogEntry {
|
||||
/// Create a new ConsoleLogEntry
|
||||
pub fn new() -> Self {
|
||||
ConsoleLogEntry {
|
||||
base: BaseLogEntry::new(),
|
||||
level: LogKind::Info,
|
||||
console_msg: String::new(),
|
||||
node_name: String::new(),
|
||||
err: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new ConsoleLogEntry with console message and node name
|
||||
pub fn new_with_console_msg(console_msg: String, node_name: String) -> Self {
|
||||
ConsoleLogEntry {
|
||||
base: BaseLogEntry::new(),
|
||||
level: LogKind::Info,
|
||||
console_msg,
|
||||
node_name,
|
||||
err: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the base log entry
|
||||
pub fn with_base(mut self, base: BaseLogEntry) -> Self {
|
||||
self.base = base;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the log level
|
||||
pub fn set_level(mut self, level: LogKind) -> Self {
|
||||
self.level = level;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the node name
|
||||
pub fn set_node_name(mut self, node_name: String) -> Self {
|
||||
self.node_name = node_name;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the console message
|
||||
pub fn set_console_msg(mut self, console_msg: String) -> Self {
|
||||
self.console_msg = console_msg;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the error message
|
||||
pub fn set_err(mut self, err: Option<String>) -> Self {
|
||||
self.err = err;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ConsoleLogEntry {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl LogRecord for ConsoleLogEntry {
|
||||
fn to_json(&self) -> String {
|
||||
serde_json::to_string(self).unwrap_or_else(|_| String::from("{}"))
|
||||
}
|
||||
|
||||
fn get_timestamp(&self) -> DateTime<Utc> {
|
||||
self.base.timestamp
|
||||
}
|
||||
}
|
||||
|
||||
/// Unified log entry type
|
||||
/// UnifiedLogEntry is used to log different types of log entries
|
||||
///
|
||||
/// The `UnifiedLogEntry` enum contains the following variants:
|
||||
/// - `Server` - a server log entry
|
||||
/// - `Audit` - an audit log entry
|
||||
/// - `Console` - a console log entry
|
||||
///
|
||||
/// The `UnifiedLogEntry` enum contains the following methods:
|
||||
/// - `to_json` - convert the log entry to JSON
|
||||
/// - `get_timestamp` - get the timestamp of the log entry
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use rustfs_audit_logger::{UnifiedLogEntry, ServerLogEntry};
|
||||
/// use tracing_core::Level;
|
||||
///
|
||||
/// let server_entry = ServerLogEntry::new(Level::INFO, "test_module".to_string());
|
||||
/// let unified = UnifiedLogEntry::Server(server_entry);
|
||||
/// ```
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(tag = "type")]
|
||||
pub enum UnifiedLogEntry {
|
||||
#[serde(rename = "server")]
|
||||
Server(ServerLogEntry),
|
||||
|
||||
#[serde(rename = "audit")]
|
||||
Audit(Box<AuditLogEntry>),
|
||||
|
||||
#[serde(rename = "console")]
|
||||
Console(ConsoleLogEntry),
|
||||
}
|
||||
|
||||
impl LogRecord for UnifiedLogEntry {
|
||||
fn to_json(&self) -> String {
|
||||
match self {
|
||||
UnifiedLogEntry::Server(entry) => entry.to_json(),
|
||||
UnifiedLogEntry::Audit(entry) => entry.to_json(),
|
||||
UnifiedLogEntry::Console(entry) => entry.to_json(),
|
||||
}
|
||||
}
|
||||
|
||||
fn get_timestamp(&self) -> DateTime<Utc> {
|
||||
match self {
|
||||
UnifiedLogEntry::Server(entry) => entry.get_timestamp(),
|
||||
UnifiedLogEntry::Audit(entry) => entry.get_timestamp(),
|
||||
UnifiedLogEntry::Console(entry) => entry.get_timestamp(),
|
||||
}
|
||||
}
|
||||
}
|
||||
8
crates/audit-logger/src/lib.rs
Normal file
@@ -0,0 +1,8 @@
|
||||
mod entry;
|
||||
mod logger;
|
||||
|
||||
pub use entry::args::Args;
|
||||
pub use entry::audit::{ApiDetails, AuditLogEntry};
|
||||
pub use entry::base::BaseLogEntry;
|
||||
pub use entry::unified::{ConsoleLogEntry, ServerLogEntry, UnifiedLogEntry};
|
||||
pub use entry::{LogKind, LogRecord, ObjectVersion, SerializableLevel};
|
||||
@@ -12,12 +12,18 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::components::Setting;
|
||||
use dioxus::prelude::*;
|
||||
#![allow(dead_code)]
|
||||
|
||||
#[component]
|
||||
pub fn SettingViews() -> Element {
|
||||
rsx! {
|
||||
Setting {}
|
||||
}
|
||||
// Default value function
|
||||
fn default_batch_size() -> usize {
|
||||
10
|
||||
}
|
||||
fn default_queue_size() -> usize {
|
||||
10000
|
||||
}
|
||||
fn default_max_retry() -> u32 {
|
||||
5
|
||||
}
|
||||
fn default_retry_interval() -> std::time::Duration {
|
||||
std::time::Duration::from_secs(3)
|
||||
}
|
||||
13
crates/audit-logger/src/logger/dispatch.rs
Normal file
@@ -0,0 +1,13 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
108
crates/audit-logger/src/logger/entry.rs
Normal file
@@ -0,0 +1,108 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde::Serialize;
|
||||
use std::collections::HashMap;
|
||||
use uuid::Uuid;
|
||||
|
||||
///A Trait for a log entry that can be serialized and sent
|
||||
pub trait Loggable: Serialize + Send + Sync + 'static {
|
||||
fn to_json(&self) -> Result<String, serde_json::Error> {
|
||||
serde_json::to_string(self)
|
||||
}
|
||||
}
|
||||
|
||||
/// Standard log entries
|
||||
#[derive(Serialize, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct LogEntry {
|
||||
pub deployment_id: String,
|
||||
pub level: String,
|
||||
pub message: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub trace: Option<Trace>,
|
||||
pub time: DateTime<Utc>,
|
||||
pub request_id: String,
|
||||
}
|
||||
|
||||
impl Loggable for LogEntry {}
|
||||
|
||||
/// Audit log entry
|
||||
#[derive(Serialize, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct AuditEntry {
|
||||
pub version: String,
|
||||
pub deployment_id: String,
|
||||
pub time: DateTime<Utc>,
|
||||
pub trigger: String,
|
||||
pub api: ApiDetails,
|
||||
pub remote_host: String,
|
||||
pub request_id: String,
|
||||
pub user_agent: String,
|
||||
pub access_key: String,
|
||||
#[serde(skip_serializing_if = "HashMap::is_empty")]
|
||||
pub tags: HashMap<String, String>,
|
||||
}
|
||||
|
||||
impl Loggable for AuditEntry {}
|
||||
|
||||
#[derive(Serialize, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Trace {
|
||||
pub message: String,
|
||||
pub source: Vec<String>,
|
||||
#[serde(skip_serializing_if = "HashMap::is_empty")]
|
||||
pub variables: HashMap<String, String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ApiDetails {
|
||||
pub name: String,
|
||||
pub bucket: String,
|
||||
pub object: String,
|
||||
pub status: String,
|
||||
pub status_code: u16,
|
||||
pub time_to_first_byte: String,
|
||||
pub time_to_response: String,
|
||||
}
|
||||
|
||||
// Helper functions to create entries
|
||||
impl AuditEntry {
|
||||
pub fn new(api_name: &str, bucket: &str, object: &str) -> Self {
|
||||
AuditEntry {
|
||||
version: "1".to_string(),
|
||||
deployment_id: "global-deployment-id".to_string(),
|
||||
time: Utc::now(),
|
||||
trigger: "incoming".to_string(),
|
||||
api: ApiDetails {
|
||||
name: api_name.to_string(),
|
||||
bucket: bucket.to_string(),
|
||||
object: object.to_string(),
|
||||
status: "OK".to_string(),
|
||||
status_code: 200,
|
||||
time_to_first_byte: "10ms".to_string(),
|
||||
time_to_response: "50ms".to_string(),
|
||||
},
|
||||
remote_host: "127.0.0.1".to_string(),
|
||||
request_id: Uuid::new_v4().to_string(),
|
||||
user_agent: "Rust-Client/1.0".to_string(),
|
||||
access_key: "minioadmin".to_string(),
|
||||
tags: HashMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
13
crates/audit-logger/src/logger/factory.rs
Normal file
@@ -0,0 +1,13 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
36
crates/audit-logger/src/logger/mod.rs
Normal file
@@ -0,0 +1,36 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
pub mod config;
|
||||
pub mod dispatch;
|
||||
pub mod entry;
|
||||
pub mod factory;
|
||||
|
||||
use async_trait::async_trait;
|
||||
use std::error::Error;
|
||||
|
||||
/// General Log Target Trait
|
||||
#[async_trait]
|
||||
pub trait Target: Send + Sync {
|
||||
/// Send a single logizable entry
|
||||
async fn send(&self, entry: Box<Self>) -> Result<(), Box<dyn Error + Send>>;
|
||||
|
||||
/// Returns the unique name of the target
|
||||
fn name(&self) -> &str;
|
||||
|
||||
/// Close target gracefully, ensuring all buffered logs are processed
|
||||
async fn shutdown(&self);
|
||||
}
|
||||
@@ -31,8 +31,9 @@ const-str = { workspace = true, optional = true }
|
||||
workspace = true
|
||||
|
||||
[features]
|
||||
default = []
|
||||
default = ["constants"]
|
||||
audit = ["dep:const-str", "constants"]
|
||||
constants = ["dep:const-str"]
|
||||
notify = ["dep:const-str"]
|
||||
observability = []
|
||||
notify = ["dep:const-str", "constants"]
|
||||
observability = ["constants"]
|
||||
|
||||
|
||||
31
crates/config/src/audit/mod.rs
Normal file
@@ -0,0 +1,31 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Audit configuration module
|
||||
//! //! This module defines the configuration for audit systems, including
|
||||
//! webhook and other audit-related settings.
|
||||
pub const AUDIT_WEBHOOK_SUB_SYS: &str = "audit_webhook";
|
||||
|
||||
pub const AUDIT_STORE_EXTENSION: &str = ".audit";
|
||||
|
||||
pub const WEBHOOK_ENDPOINT: &str = "endpoint";
|
||||
pub const WEBHOOK_AUTH_TOKEN: &str = "auth_token";
|
||||
pub const WEBHOOK_CLIENT_CERT: &str = "client_cert";
|
||||
pub const WEBHOOK_CLIENT_KEY: &str = "client_key";
|
||||
pub const WEBHOOK_BATCH_SIZE: &str = "batch_size";
|
||||
pub const WEBHOOK_QUEUE_SIZE: &str = "queue_size";
|
||||
pub const WEBHOOK_QUEUE_DIR: &str = "queue_dir";
|
||||
pub const WEBHOOK_MAX_RETRY: &str = "max_retry";
|
||||
pub const WEBHOOK_RETRY_INTERVAL: &str = "retry_interval";
|
||||
pub const WEBHOOK_HTTP_TIMEOUT: &str = "http_timeout";
|
||||
@@ -16,6 +16,13 @@ pub const DEFAULT_DELIMITER: &str = "_";
|
||||
pub const ENV_PREFIX: &str = "RUSTFS_";
|
||||
pub const ENV_WORD_DELIMITER: &str = "_";
|
||||
|
||||
pub const DEFAULT_DIR: &str = "/opt/rustfs/events"; // Default directory for event store
|
||||
pub const DEFAULT_LIMIT: u64 = 100000; // Default store limit
|
||||
|
||||
/// Standard config keys and values.
|
||||
pub const ENABLE_KEY: &str = "enable";
|
||||
pub const COMMENT_KEY: &str = "comment";
|
||||
|
||||
/// Medium-drawn lines separator
|
||||
/// This is used to separate words in environment variable names.
|
||||
pub const ENV_WORD_DELIMITER_DASH: &str = "-";
|
||||
|
||||
@@ -20,6 +20,8 @@ pub use constants::app::*;
|
||||
pub use constants::env::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::tls::*;
|
||||
#[cfg(feature = "audit")]
|
||||
pub mod audit;
|
||||
#[cfg(feature = "notify")]
|
||||
pub mod notify;
|
||||
#[cfg(feature = "observability")]
|
||||
|
||||
@@ -29,14 +29,6 @@ pub const NOTIFY_PREFIX: &str = "notify";
|
||||
|
||||
pub const NOTIFY_ROUTE_PREFIX: &str = const_str::concat!(NOTIFY_PREFIX, "_");
|
||||
|
||||
/// Standard config keys and values.
|
||||
pub const ENABLE_KEY: &str = "enable";
|
||||
pub const COMMENT_KEY: &str = "comment";
|
||||
|
||||
/// Enable values
|
||||
pub const ENABLE_ON: &str = "on";
|
||||
pub const ENABLE_OFF: &str = "off";
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub const NOTIFY_SUB_SYSTEMS: &[&str] = &[NOTIFY_MQTT_SUB_SYS, NOTIFY_WEBHOOK_SUB_SYS];
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::notify::{COMMENT_KEY, ENABLE_KEY};
|
||||
use crate::{COMMENT_KEY, ENABLE_KEY};
|
||||
|
||||
// MQTT Keys
|
||||
pub const MQTT_BROKER: &str = "broker";
|
||||
|
||||
@@ -12,8 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub const DEFAULT_DIR: &str = "/opt/rustfs/events"; // Default directory for event store
|
||||
pub const DEFAULT_LIMIT: u64 = 100000; // Default store limit
|
||||
pub const DEFAULT_EXT: &str = ".unknown"; // Default file extension
|
||||
pub const COMPRESS_EXT: &str = ".snappy"; // Extension for compressed files
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::notify::{COMMENT_KEY, ENABLE_KEY};
|
||||
use crate::{COMMENT_KEY, ENABLE_KEY};
|
||||
|
||||
// Webhook Keys
|
||||
pub const WEBHOOK_ENDPOINT: &str = "endpoint";
|
||||
|
||||
347
crates/e2e_test/src/reliant/conditional_writes.rs
Normal file
@@ -0,0 +1,347 @@
|
||||
#![cfg(test)]
|
||||
|
||||
use aws_config::meta::region::RegionProviderChain;
|
||||
use aws_sdk_s3::Client;
|
||||
use aws_sdk_s3::config::{Credentials, Region};
|
||||
use aws_sdk_s3::error::SdkError;
|
||||
use aws_sdk_s3::types::{CompletedMultipartUpload, CompletedPart};
|
||||
use bytes::Bytes;
|
||||
use serial_test::serial;
|
||||
use std::error::Error;
|
||||
|
||||
const ENDPOINT: &str = "http://localhost:9000";
|
||||
const ACCESS_KEY: &str = "rustfsadmin";
|
||||
const SECRET_KEY: &str = "rustfsadmin";
|
||||
const BUCKET: &str = "api-test";
|
||||
|
||||
async fn create_aws_s3_client() -> Result<Client, Box<dyn Error>> {
|
||||
let region_provider = RegionProviderChain::default_provider().or_else(Region::new("us-east-1"));
|
||||
let shared_config = aws_config::defaults(aws_config::BehaviorVersion::latest())
|
||||
.region(region_provider)
|
||||
.credentials_provider(Credentials::new(ACCESS_KEY, SECRET_KEY, None, None, "static"))
|
||||
.endpoint_url(ENDPOINT)
|
||||
.load()
|
||||
.await;
|
||||
|
||||
let client = Client::from_conf(
|
||||
aws_sdk_s3::Config::from(&shared_config)
|
||||
.to_builder()
|
||||
.force_path_style(true)
|
||||
.build(),
|
||||
);
|
||||
Ok(client)
|
||||
}
|
||||
|
||||
/// Setup test bucket, creating it if it doesn't exist
|
||||
async fn setup_test_bucket(client: &Client) -> Result<(), Box<dyn Error>> {
|
||||
match client.create_bucket().bucket(BUCKET).send().await {
|
||||
Ok(_) => {}
|
||||
Err(SdkError::ServiceError(e)) => {
|
||||
let e = e.into_err();
|
||||
let error_code = e.meta().code().unwrap_or("");
|
||||
if !error_code.eq("BucketAlreadyExists") {
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Generate test data of specified size
|
||||
fn generate_test_data(size: usize) -> Vec<u8> {
|
||||
let pattern = b"0123456789ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz";
|
||||
let mut data = Vec::with_capacity(size);
|
||||
for i in 0..size {
|
||||
data.push(pattern[i % pattern.len()]);
|
||||
}
|
||||
data
|
||||
}
|
||||
|
||||
/// Upload an object and return its ETag
|
||||
async fn upload_object_with_metadata(client: &Client, bucket: &str, key: &str, data: &[u8]) -> Result<String, Box<dyn Error>> {
|
||||
let response = client
|
||||
.put_object()
|
||||
.bucket(bucket)
|
||||
.key(key)
|
||||
.body(Bytes::from(data.to_vec()).into())
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
let etag = response.e_tag().unwrap_or("").to_string();
|
||||
Ok(etag)
|
||||
}
|
||||
|
||||
/// Cleanup test objects from bucket
|
||||
async fn cleanup_objects(client: &Client, bucket: &str, keys: &[&str]) {
|
||||
for key in keys {
|
||||
let _ = client.delete_object().bucket(bucket).key(*key).send().await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Generate unique test object key
|
||||
fn generate_test_key(prefix: &str) -> String {
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
let timestamp = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_nanos();
|
||||
format!("{prefix}-{timestamp}")
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
#[ignore = "requires running RustFS server at localhost:9000"]
|
||||
async fn test_conditional_put_okay() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let client = create_aws_s3_client().await?;
|
||||
setup_test_bucket(&client).await?;
|
||||
|
||||
let test_key = generate_test_key("conditional-put-ok");
|
||||
let initial_data = generate_test_data(1024); // 1KB test data
|
||||
let updated_data = generate_test_data(2048); // 2KB updated data
|
||||
|
||||
// Upload initial object and get its ETag
|
||||
let initial_etag = upload_object_with_metadata(&client, BUCKET, &test_key, &initial_data).await?;
|
||||
|
||||
// Test 1: PUT with matching If-Match condition (should succeed)
|
||||
let response1 = client
|
||||
.put_object()
|
||||
.bucket(BUCKET)
|
||||
.key(&test_key)
|
||||
.body(Bytes::from(updated_data.clone()).into())
|
||||
.if_match(&initial_etag)
|
||||
.send()
|
||||
.await;
|
||||
assert!(response1.is_ok(), "PUT with matching If-Match should succeed");
|
||||
|
||||
// Test 2: PUT with non-matching If-None-Match condition (should succeed)
|
||||
let fake_etag = "\"fake-etag-12345\"";
|
||||
let response2 = client
|
||||
.put_object()
|
||||
.bucket(BUCKET)
|
||||
.key(&test_key)
|
||||
.body(Bytes::from(updated_data.clone()).into())
|
||||
.if_none_match(fake_etag)
|
||||
.send()
|
||||
.await;
|
||||
assert!(response2.is_ok(), "PUT with non-matching If-None-Match should succeed");
|
||||
|
||||
// Cleanup
|
||||
cleanup_objects(&client, BUCKET, &[&test_key]).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
#[ignore = "requires running RustFS server at localhost:9000"]
|
||||
async fn test_conditional_put_failed() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let client = create_aws_s3_client().await?;
|
||||
setup_test_bucket(&client).await?;
|
||||
|
||||
let test_key = generate_test_key("conditional-put-failed");
|
||||
let initial_data = generate_test_data(1024);
|
||||
let updated_data = generate_test_data(2048);
|
||||
|
||||
// Upload initial object and get its ETag
|
||||
let initial_etag = upload_object_with_metadata(&client, BUCKET, &test_key, &initial_data).await?;
|
||||
|
||||
// Test 1: PUT with non-matching If-Match condition (should fail with 412)
|
||||
let fake_etag = "\"fake-etag-should-not-match\"";
|
||||
let response1 = client
|
||||
.put_object()
|
||||
.bucket(BUCKET)
|
||||
.key(&test_key)
|
||||
.body(Bytes::from(updated_data.clone()).into())
|
||||
.if_match(fake_etag)
|
||||
.send()
|
||||
.await;
|
||||
|
||||
assert!(response1.is_err(), "PUT with non-matching If-Match should fail");
|
||||
if let Err(e) = response1 {
|
||||
if let SdkError::ServiceError(e) = e {
|
||||
let e = e.into_err();
|
||||
let error_code = e.meta().code().unwrap_or("");
|
||||
assert_eq!("PreconditionFailed", error_code);
|
||||
} else {
|
||||
panic!("Unexpected error: {e:?}");
|
||||
}
|
||||
}
|
||||
|
||||
// Test 2: PUT with matching If-None-Match condition (should fail with 412)
|
||||
let response2 = client
|
||||
.put_object()
|
||||
.bucket(BUCKET)
|
||||
.key(&test_key)
|
||||
.body(Bytes::from(updated_data.clone()).into())
|
||||
.if_none_match(&initial_etag)
|
||||
.send()
|
||||
.await;
|
||||
|
||||
assert!(response2.is_err(), "PUT with matching If-None-Match should fail");
|
||||
if let Err(e) = response2 {
|
||||
if let SdkError::ServiceError(e) = e {
|
||||
let e = e.into_err();
|
||||
let error_code = e.meta().code().unwrap_or("");
|
||||
assert_eq!("PreconditionFailed", error_code);
|
||||
} else {
|
||||
panic!("Unexpected error: {e:?}");
|
||||
}
|
||||
}
|
||||
|
||||
// Cleanup - only need to clean up the initial object since failed PUTs shouldn't create objects
|
||||
cleanup_objects(&client, BUCKET, &[&test_key]).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
#[ignore = "requires running RustFS server at localhost:9000"]
|
||||
async fn test_conditional_put_when_object_does_not_exist() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let client = create_aws_s3_client().await?;
|
||||
setup_test_bucket(&client).await?;
|
||||
|
||||
let key = "some_key";
|
||||
cleanup_objects(&client, BUCKET, &[key]).await;
|
||||
|
||||
// When the object does not exist, the If-Match condition should always fail
|
||||
let response1 = client
|
||||
.put_object()
|
||||
.bucket(BUCKET)
|
||||
.key(key)
|
||||
.body(Bytes::from(generate_test_data(1024)).into())
|
||||
.if_match("*")
|
||||
.send()
|
||||
.await;
|
||||
assert!(response1.is_err());
|
||||
if let Err(e) = response1 {
|
||||
if let SdkError::ServiceError(e) = e {
|
||||
let e = e.into_err();
|
||||
let error_code = e.meta().code().unwrap_or("");
|
||||
assert_eq!("NoSuchKey", error_code);
|
||||
} else {
|
||||
panic!("Unexpected error: {e:?}");
|
||||
}
|
||||
}
|
||||
|
||||
// When the object does not exist, the If-None-Match condition should be able to succeed
|
||||
let response2 = client
|
||||
.put_object()
|
||||
.bucket(BUCKET)
|
||||
.key(key)
|
||||
.body(Bytes::from(generate_test_data(1024)).into())
|
||||
.if_none_match("*")
|
||||
.send()
|
||||
.await;
|
||||
assert!(response2.is_ok());
|
||||
|
||||
cleanup_objects(&client, BUCKET, &[key]).await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
#[ignore = "requires running RustFS server at localhost:9000"]
|
||||
async fn test_conditional_multi_part_upload() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let client = create_aws_s3_client().await?;
|
||||
setup_test_bucket(&client).await?;
|
||||
|
||||
let test_key = generate_test_key("multipart-upload-ok");
|
||||
let test_data = generate_test_data(1024);
|
||||
let initial_etag = upload_object_with_metadata(&client, BUCKET, &test_key, &test_data).await?;
|
||||
|
||||
let part_size = 5 * 1024 * 1024; // 5MB per part (minimum for multipart)
|
||||
let num_parts = 3;
|
||||
let mut parts = Vec::new();
|
||||
|
||||
// Initiate multipart upload
|
||||
let initiate_response = client.create_multipart_upload().bucket(BUCKET).key(&test_key).send().await?;
|
||||
|
||||
let upload_id = initiate_response
|
||||
.upload_id()
|
||||
.ok_or(std::io::Error::other("No upload ID returned"))?;
|
||||
|
||||
// Upload parts
|
||||
for part_number in 1..=num_parts {
|
||||
let part_data = generate_test_data(part_size);
|
||||
|
||||
let upload_part_response = client
|
||||
.upload_part()
|
||||
.bucket(BUCKET)
|
||||
.key(&test_key)
|
||||
.upload_id(upload_id)
|
||||
.part_number(part_number)
|
||||
.body(Bytes::from(part_data).into())
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
let part_etag = upload_part_response
|
||||
.e_tag()
|
||||
.ok_or(std::io::Error::other("Do not have etag"))?
|
||||
.to_string();
|
||||
|
||||
let completed_part = CompletedPart::builder().part_number(part_number).e_tag(part_etag).build();
|
||||
|
||||
parts.push(completed_part);
|
||||
}
|
||||
|
||||
// Complete multipart upload
|
||||
let completed_upload = CompletedMultipartUpload::builder().set_parts(Some(parts)).build();
|
||||
|
||||
// Test 1: Multipart upload with wildcard If-None-Match, should fail
|
||||
let complete_response = client
|
||||
.complete_multipart_upload()
|
||||
.bucket(BUCKET)
|
||||
.key(&test_key)
|
||||
.upload_id(upload_id)
|
||||
.multipart_upload(completed_upload.clone())
|
||||
.if_none_match("*")
|
||||
.send()
|
||||
.await;
|
||||
|
||||
assert!(complete_response.is_err());
|
||||
|
||||
// Test 2: Multipart upload with matching If-None-Match, should fail
|
||||
let complete_response = client
|
||||
.complete_multipart_upload()
|
||||
.bucket(BUCKET)
|
||||
.key(&test_key)
|
||||
.upload_id(upload_id)
|
||||
.multipart_upload(completed_upload.clone())
|
||||
.if_none_match(initial_etag.clone())
|
||||
.send()
|
||||
.await;
|
||||
|
||||
assert!(complete_response.is_err());
|
||||
|
||||
// Test 3: Multipart upload with unmatching If-Match, should fail
|
||||
let complete_response = client
|
||||
.complete_multipart_upload()
|
||||
.bucket(BUCKET)
|
||||
.key(&test_key)
|
||||
.upload_id(upload_id)
|
||||
.multipart_upload(completed_upload.clone())
|
||||
.if_match("\"abcdef\"")
|
||||
.send()
|
||||
.await;
|
||||
|
||||
assert!(complete_response.is_err());
|
||||
|
||||
// Test 4: Multipart upload with matching If-Match, should succeed
|
||||
let complete_response = client
|
||||
.complete_multipart_upload()
|
||||
.bucket(BUCKET)
|
||||
.key(&test_key)
|
||||
.upload_id(upload_id)
|
||||
.multipart_upload(completed_upload.clone())
|
||||
.if_match(initial_etag)
|
||||
.send()
|
||||
.await;
|
||||
|
||||
assert!(complete_response.is_ok());
|
||||
|
||||
// Cleanup
|
||||
cleanup_objects(&client, BUCKET, &[&test_key]).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod conditional_writes;
|
||||
mod lifecycle;
|
||||
mod lock;
|
||||
mod node_interact_test;
|
||||
|
||||
@@ -34,7 +34,7 @@ workspace = true
|
||||
default = []
|
||||
|
||||
[dependencies]
|
||||
rustfs-config = { workspace = true, features = ["constants", "notify"] }
|
||||
rustfs-config = { workspace = true, features = ["constants", "notify", "audit"] }
|
||||
async-trait.workspace = true
|
||||
bytes.workspace = true
|
||||
byteorder = { workspace = true }
|
||||
@@ -100,6 +100,7 @@ rustfs-rio.workspace = true
|
||||
rustfs-signer.workspace = true
|
||||
rustfs-checksums.workspace = true
|
||||
futures-util.workspace = true
|
||||
async-recursion.workspace = true
|
||||
|
||||
[target.'cfg(not(windows))'.dependencies]
|
||||
nix = { workspace = true }
|
||||
|
||||
@@ -317,7 +317,7 @@ impl TransitionClient {
|
||||
//}
|
||||
|
||||
let mut retry_timer = RetryTimer::new(req_retry, DEFAULT_RETRY_UNIT, DEFAULT_RETRY_CAP, MAX_JITTER, self.random);
|
||||
while let Some(v) = retry_timer.next().await {
|
||||
while retry_timer.next().await.is_some() {
|
||||
let req = self.new_request(&method, metadata).await?;
|
||||
|
||||
resp = self.doit(req).await?;
|
||||
@@ -569,7 +569,16 @@ impl TransitionClient {
|
||||
}
|
||||
|
||||
pub fn is_virtual_host_style_request(&self, url: &Url, bucket_name: &str) -> bool {
|
||||
if bucket_name == "" {
|
||||
// Contract:
|
||||
// - return true if we should use virtual-hosted-style addressing (bucket as subdomain)
|
||||
// Heuristics (aligned with AWS S3/MinIO clients):
|
||||
// - explicit DNS mode => true
|
||||
// - explicit PATH mode => false
|
||||
// - AUTO:
|
||||
// - bucket must be non-empty and DNS compatible
|
||||
// - endpoint host must be a DNS name (not an IPv4/IPv6 literal)
|
||||
// - when using TLS (https), buckets with dots are avoided due to wildcard/cert issues
|
||||
if bucket_name.is_empty() {
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@@ -728,7 +728,7 @@ impl ReplicationPool {
|
||||
// Either already satisfied or worker count changed while waiting for the lock.
|
||||
return;
|
||||
}
|
||||
println!("2 resize_lrg_workers");
|
||||
debug!("Resizing large workers pool");
|
||||
|
||||
let active_workers = Arc::clone(&self.active_lrg_workers);
|
||||
let obj_layer = Arc::clone(&self.obj_layer);
|
||||
@@ -743,7 +743,7 @@ impl ReplicationPool {
|
||||
|
||||
tokio::spawn(async move {
|
||||
while let Some(operation) = receiver.recv().await {
|
||||
println!("resize workers 1");
|
||||
debug!("Processing replication operation in worker");
|
||||
active_workers_clone.fetch_add(1, Ordering::SeqCst);
|
||||
|
||||
if let Some(info) = operation.as_any().downcast_ref::<ReplicateObjectInfo>() {
|
||||
|
||||
84
crates/ecstore/src/config/audit.rs
Normal file
@@ -0,0 +1,84 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::config::{KV, KVS};
|
||||
use rustfs_config::audit::{
|
||||
WEBHOOK_AUTH_TOKEN, WEBHOOK_BATCH_SIZE, WEBHOOK_CLIENT_CERT, WEBHOOK_CLIENT_KEY, WEBHOOK_ENDPOINT, WEBHOOK_HTTP_TIMEOUT,
|
||||
WEBHOOK_MAX_RETRY, WEBHOOK_QUEUE_DIR, WEBHOOK_QUEUE_SIZE, WEBHOOK_RETRY_INTERVAL,
|
||||
};
|
||||
use rustfs_config::{DEFAULT_DIR, DEFAULT_LIMIT, ENABLE_KEY, EnableState};
|
||||
use std::sync::LazyLock;
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[allow(clippy::declare_interior_mutable_const)]
|
||||
/// Default KVS for audit webhook settings.
|
||||
pub const DEFAULT_AUDIT_WEBHOOK_KVS: LazyLock<KVS> = LazyLock::new(|| {
|
||||
KVS(vec![
|
||||
KV {
|
||||
key: ENABLE_KEY.to_owned(),
|
||||
value: EnableState::Off.to_string(),
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
KV {
|
||||
key: WEBHOOK_ENDPOINT.to_owned(),
|
||||
value: "".to_owned(),
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
KV {
|
||||
key: WEBHOOK_AUTH_TOKEN.to_owned(),
|
||||
value: "".to_owned(),
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
KV {
|
||||
key: WEBHOOK_CLIENT_CERT.to_owned(),
|
||||
value: "".to_owned(),
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
KV {
|
||||
key: WEBHOOK_CLIENT_KEY.to_owned(),
|
||||
value: "".to_owned(),
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
KV {
|
||||
key: WEBHOOK_BATCH_SIZE.to_owned(),
|
||||
value: "1".to_owned(),
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
KV {
|
||||
key: WEBHOOK_QUEUE_SIZE.to_owned(),
|
||||
value: DEFAULT_LIMIT.to_string(),
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
KV {
|
||||
key: WEBHOOK_QUEUE_DIR.to_owned(),
|
||||
value: DEFAULT_DIR.to_owned(),
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
KV {
|
||||
key: WEBHOOK_MAX_RETRY.to_owned(),
|
||||
value: "0".to_owned(),
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
KV {
|
||||
key: WEBHOOK_RETRY_INTERVAL.to_owned(),
|
||||
value: "3s".to_owned(),
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
KV {
|
||||
key: WEBHOOK_HTTP_TIMEOUT.to_owned(),
|
||||
value: "5s".to_owned(),
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
])
|
||||
});
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod audit;
|
||||
pub mod com;
|
||||
#[allow(dead_code)]
|
||||
pub mod heal;
|
||||
@@ -21,8 +22,9 @@ pub mod storageclass;
|
||||
use crate::error::Result;
|
||||
use crate::store::ECStore;
|
||||
use com::{STORAGE_CLASS_SUB_SYS, lookup_configs, read_config_without_migrate};
|
||||
use rustfs_config::COMMENT_KEY;
|
||||
use rustfs_config::DEFAULT_DELIMITER;
|
||||
use rustfs_config::notify::{COMMENT_KEY, NOTIFY_MQTT_SUB_SYS, NOTIFY_WEBHOOK_SUB_SYS};
|
||||
use rustfs_config::notify::{NOTIFY_MQTT_SUB_SYS, NOTIFY_WEBHOOK_SUB_SYS};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::LazyLock;
|
||||
|
||||
@@ -14,10 +14,11 @@
|
||||
|
||||
use crate::config::{KV, KVS};
|
||||
use rustfs_config::notify::{
|
||||
COMMENT_KEY, DEFAULT_DIR, DEFAULT_LIMIT, ENABLE_KEY, ENABLE_OFF, MQTT_BROKER, MQTT_KEEP_ALIVE_INTERVAL, MQTT_PASSWORD,
|
||||
MQTT_QOS, MQTT_QUEUE_DIR, MQTT_QUEUE_LIMIT, MQTT_RECONNECT_INTERVAL, MQTT_TOPIC, MQTT_USERNAME, WEBHOOK_AUTH_TOKEN,
|
||||
WEBHOOK_CLIENT_CERT, WEBHOOK_CLIENT_KEY, WEBHOOK_ENDPOINT, WEBHOOK_QUEUE_DIR, WEBHOOK_QUEUE_LIMIT,
|
||||
MQTT_BROKER, MQTT_KEEP_ALIVE_INTERVAL, MQTT_PASSWORD, MQTT_QOS, MQTT_QUEUE_DIR, MQTT_QUEUE_LIMIT, MQTT_RECONNECT_INTERVAL,
|
||||
MQTT_TOPIC, MQTT_USERNAME, WEBHOOK_AUTH_TOKEN, WEBHOOK_CLIENT_CERT, WEBHOOK_CLIENT_KEY, WEBHOOK_ENDPOINT, WEBHOOK_QUEUE_DIR,
|
||||
WEBHOOK_QUEUE_LIMIT,
|
||||
};
|
||||
use rustfs_config::{COMMENT_KEY, DEFAULT_DIR, DEFAULT_LIMIT, ENABLE_KEY, EnableState};
|
||||
use std::sync::LazyLock;
|
||||
|
||||
/// The default configuration collection of webhooks,
|
||||
@@ -26,7 +27,7 @@ pub static DEFAULT_WEBHOOK_KVS: LazyLock<KVS> = LazyLock::new(|| {
|
||||
KVS(vec![
|
||||
KV {
|
||||
key: ENABLE_KEY.to_owned(),
|
||||
value: ENABLE_OFF.to_owned(),
|
||||
value: EnableState::Off.to_string(),
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
KV {
|
||||
@@ -73,7 +74,7 @@ pub static DEFAULT_MQTT_KVS: LazyLock<KVS> = LazyLock::new(|| {
|
||||
KVS(vec![
|
||||
KV {
|
||||
key: ENABLE_KEY.to_owned(),
|
||||
value: ENABLE_OFF.to_owned(),
|
||||
value: EnableState::Off.to_string(),
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
KV {
|
||||
|
||||
@@ -440,6 +440,7 @@ impl LocalDisk {
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(self))]
|
||||
#[async_recursion::async_recursion]
|
||||
pub async fn delete_file(
|
||||
&self,
|
||||
base_path: &PathBuf,
|
||||
@@ -803,13 +804,17 @@ impl LocalDisk {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn scan_dir<W: AsyncWrite + Unpin>(
|
||||
#[async_recursion::async_recursion]
|
||||
async fn scan_dir<W>(
|
||||
&self,
|
||||
current: &mut String,
|
||||
opts: &WalkDirOptions,
|
||||
out: &mut MetacacheWriter<W>,
|
||||
objs_returned: &mut i32,
|
||||
) -> Result<()> {
|
||||
) -> Result<()>
|
||||
where
|
||||
W: AsyncWrite + Unpin + Send,
|
||||
{
|
||||
let forward = {
|
||||
opts.forward_to.as_ref().filter(|v| v.starts_with(&*current)).map(|v| {
|
||||
let forward = v.trim_start_matches(&*current);
|
||||
|
||||
@@ -187,6 +187,9 @@ pub enum StorageError {
|
||||
|
||||
#[error("Lock error: {0}")]
|
||||
Lock(#[from] rustfs_lock::LockError),
|
||||
|
||||
#[error("Precondition failed")]
|
||||
PreconditionFailed,
|
||||
}
|
||||
|
||||
impl StorageError {
|
||||
@@ -416,6 +419,7 @@ impl Clone for StorageError {
|
||||
StorageError::Lock(e) => StorageError::Lock(e.clone()),
|
||||
StorageError::InsufficientReadQuorum(a, b) => StorageError::InsufficientReadQuorum(a.clone(), b.clone()),
|
||||
StorageError::InsufficientWriteQuorum(a, b) => StorageError::InsufficientWriteQuorum(a.clone(), b.clone()),
|
||||
StorageError::PreconditionFailed => StorageError::PreconditionFailed,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -481,6 +485,7 @@ impl StorageError {
|
||||
StorageError::Lock(_) => 0x38,
|
||||
StorageError::InsufficientReadQuorum(_, _) => 0x39,
|
||||
StorageError::InsufficientWriteQuorum(_, _) => 0x3A,
|
||||
StorageError::PreconditionFailed => 0x3B,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -548,6 +553,7 @@ impl StorageError {
|
||||
0x38 => Some(StorageError::Lock(rustfs_lock::LockError::internal("Generic lock error".to_string()))),
|
||||
0x39 => Some(StorageError::InsufficientReadQuorum(Default::default(), Default::default())),
|
||||
0x3A => Some(StorageError::InsufficientWriteQuorum(Default::default(), Default::default())),
|
||||
0x3B => Some(StorageError::PreconditionFailed),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -177,15 +177,17 @@ impl S3PeerSys {
|
||||
let pools = cli.get_pools();
|
||||
let idx = i;
|
||||
if pools.unwrap_or_default().contains(&idx) {
|
||||
per_pool_errs.push(errors[j].as_ref());
|
||||
per_pool_errs.push(errors[j].clone());
|
||||
}
|
||||
|
||||
// TODO: reduceWriteQuorumErrs
|
||||
if let Some(pool_err) =
|
||||
reduce_write_quorum_errs(&per_pool_errs, BUCKET_OP_IGNORED_ERRS, (per_pool_errs.len() / 2) + 1)
|
||||
{
|
||||
return Err(pool_err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// TODO:
|
||||
|
||||
Ok(())
|
||||
}
|
||||
pub async fn list_bucket(&self, opts: &BucketOptions) -> Result<Vec<BucketInfo>> {
|
||||
@@ -387,7 +389,6 @@ impl PeerS3Client for LocalPeerS3Client {
|
||||
if opts.force_create && matches!(e, Error::VolumeExists) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
@@ -405,7 +406,9 @@ impl PeerS3Client for LocalPeerS3Client {
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: reduceWriteQuorumErrs
|
||||
if let Some(err) = reduce_write_quorum_errs(&errs, BUCKET_OP_IGNORED_ERRS, (local_disks.len() / 2) + 1) {
|
||||
return Err(err);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -61,6 +61,7 @@ use glob::Pattern;
|
||||
use http::HeaderMap;
|
||||
use md5::{Digest as Md5Digest, Md5};
|
||||
use rand::{Rng, seq::SliceRandom};
|
||||
use regex::Regex;
|
||||
use rustfs_common::heal_channel::{DriveState, HealChannelPriority, HealItemType, HealOpts, HealScanMode, send_heal_disk};
|
||||
use rustfs_filemeta::headers::RESERVED_METADATA_PREFIX_LOWER;
|
||||
use rustfs_filemeta::{
|
||||
@@ -3218,6 +3219,44 @@ impl SetDisks {
|
||||
obj?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn check_write_precondition(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Option<StorageError> {
|
||||
let mut opts = opts.clone();
|
||||
|
||||
let http_preconditions = opts.http_preconditions?;
|
||||
opts.http_preconditions = None;
|
||||
|
||||
// Never claim a lock here, to avoid deadlock
|
||||
// - If no_lock is false, we must have obtained the lock out side of this function
|
||||
// - If no_lock is true, we should not obtain locks
|
||||
opts.no_lock = true;
|
||||
let oi = self.get_object_info(bucket, object, &opts).await;
|
||||
|
||||
match oi {
|
||||
Ok(oi) => {
|
||||
if should_prevent_write(&oi, http_preconditions.if_none_match, http_preconditions.if_match) {
|
||||
return Some(StorageError::PreconditionFailed);
|
||||
}
|
||||
}
|
||||
|
||||
Err(StorageError::VersionNotFound(_, _, _))
|
||||
| Err(StorageError::ObjectNotFound(_, _))
|
||||
| Err(StorageError::ErasureReadQuorum) => {
|
||||
// When the object is not found,
|
||||
// - if If-Match is set, we should return 404 NotFound
|
||||
// - if If-None-Match is set, we should be able to proceed with the request
|
||||
if http_preconditions.if_match.is_some() {
|
||||
return Some(StorageError::ObjectNotFound(bucket.to_string(), object.to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
Err(e) => {
|
||||
return Some(e);
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
@@ -3335,6 +3374,12 @@ impl ObjectIO for SetDisks {
|
||||
_object_lock_guard = guard_opt;
|
||||
}
|
||||
|
||||
if let Some(http_preconditions) = opts.http_preconditions.clone() {
|
||||
if let Some(err) = self.check_write_precondition(bucket, object, opts).await {
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
|
||||
let mut user_defined = opts.user_defined.clone();
|
||||
|
||||
let sc_parity_drives = {
|
||||
@@ -5123,6 +5168,26 @@ impl StorageAPI for SetDisks {
|
||||
let disks = disks.clone();
|
||||
// let disks = Self::shuffle_disks(&disks, &fi.erasure.distribution);
|
||||
|
||||
// Acquire per-object exclusive lock via RAII guard. It auto-releases asynchronously on drop.
|
||||
let mut _object_lock_guard: Option<rustfs_lock::LockGuard> = None;
|
||||
if let Some(http_preconditions) = opts.http_preconditions.clone() {
|
||||
if !opts.no_lock {
|
||||
let guard_opt = self
|
||||
.namespace_lock
|
||||
.lock_guard(object, &self.locker_owner, Duration::from_secs(5), Duration::from_secs(10))
|
||||
.await?;
|
||||
|
||||
if guard_opt.is_none() {
|
||||
return Err(Error::other("can not get lock. please retry".to_string()));
|
||||
}
|
||||
_object_lock_guard = guard_opt;
|
||||
}
|
||||
|
||||
if let Some(err) = self.check_write_precondition(bucket, object, opts).await {
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
|
||||
let part_path = format!("{}/{}/", upload_id_path, fi.data_dir.unwrap_or(Uuid::nil()));
|
||||
|
||||
let part_meta_paths = uploaded_parts
|
||||
@@ -5942,13 +6007,45 @@ fn get_complete_multipart_md5(parts: &[CompletePart]) -> String {
|
||||
format!("{:x}-{}", hasher.finalize(), parts.len())
|
||||
}
|
||||
|
||||
pub fn canonicalize_etag(etag: &str) -> String {
|
||||
let re = Regex::new("\"*?([^\"]*?)\"*?$").unwrap();
|
||||
re.replace_all(etag, "$1").to_string()
|
||||
}
|
||||
|
||||
pub fn e_tag_matches(etag: &str, condition: &str) -> bool {
|
||||
if condition.trim() == "*" {
|
||||
return true;
|
||||
}
|
||||
canonicalize_etag(etag) == canonicalize_etag(condition)
|
||||
}
|
||||
|
||||
pub fn should_prevent_write(oi: &ObjectInfo, if_none_match: Option<String>, if_match: Option<String>) -> bool {
|
||||
match &oi.etag {
|
||||
Some(etag) => {
|
||||
if let Some(if_none_match) = if_none_match {
|
||||
if e_tag_matches(etag, &if_none_match) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
if let Some(if_match) = if_match {
|
||||
if !e_tag_matches(etag, &if_match) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
// If we can't obtain the etag of the object, perevent the write only when we have at least one condition
|
||||
None => if_none_match.is_some() || if_match.is_some(),
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::disk::CHECK_PART_UNKNOWN;
|
||||
use crate::disk::CHECK_PART_VOLUME_NOT_FOUND;
|
||||
use crate::disk::error::DiskError;
|
||||
use crate::store_api::CompletePart;
|
||||
use crate::store_api::{CompletePart, ObjectInfo};
|
||||
use rustfs_filemeta::ErasureInfo;
|
||||
use std::collections::HashMap;
|
||||
use time::OffsetDateTime;
|
||||
@@ -6373,4 +6470,62 @@ mod tests {
|
||||
assert_eq!(result2.len(), 3);
|
||||
assert!(result2.iter().all(|d| d.is_none()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_etag_matches() {
|
||||
assert!(e_tag_matches("abc", "abc"));
|
||||
assert!(e_tag_matches("\"abc\"", "abc"));
|
||||
assert!(e_tag_matches("\"abc\"", "*"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_should_prevent_write() {
|
||||
let oi = ObjectInfo {
|
||||
etag: Some("abc".to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
let if_none_match = Some("abc".to_string());
|
||||
let if_match = None;
|
||||
assert!(should_prevent_write(&oi, if_none_match, if_match));
|
||||
|
||||
let if_none_match = Some("*".to_string());
|
||||
let if_match = None;
|
||||
assert!(should_prevent_write(&oi, if_none_match, if_match));
|
||||
|
||||
let if_none_match = None;
|
||||
let if_match = Some("def".to_string());
|
||||
assert!(should_prevent_write(&oi, if_none_match, if_match));
|
||||
|
||||
let if_none_match = None;
|
||||
let if_match = Some("*".to_string());
|
||||
assert!(!should_prevent_write(&oi, if_none_match, if_match));
|
||||
|
||||
let if_none_match = Some("def".to_string());
|
||||
let if_match = None;
|
||||
assert!(!should_prevent_write(&oi, if_none_match, if_match));
|
||||
|
||||
let if_none_match = Some("def".to_string());
|
||||
let if_match = Some("*".to_string());
|
||||
assert!(!should_prevent_write(&oi, if_none_match, if_match));
|
||||
|
||||
let if_none_match = Some("def".to_string());
|
||||
let if_match = Some("\"abc\"".to_string());
|
||||
assert!(!should_prevent_write(&oi, if_none_match, if_match));
|
||||
|
||||
let if_none_match = Some("*".to_string());
|
||||
let if_match = Some("\"abc\"".to_string());
|
||||
assert!(should_prevent_write(&oi, if_none_match, if_match));
|
||||
|
||||
let oi = ObjectInfo {
|
||||
etag: None,
|
||||
..Default::default()
|
||||
};
|
||||
let if_none_match = Some("*".to_string());
|
||||
let if_match = Some("\"abc\"".to_string());
|
||||
assert!(should_prevent_write(&oi, if_none_match, if_match));
|
||||
|
||||
let if_none_match = None;
|
||||
let if_match = None;
|
||||
assert!(!should_prevent_write(&oi, if_none_match, if_match));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1221,7 +1221,7 @@ impl StorageAPI for ECStore {
|
||||
}
|
||||
|
||||
if let Err(err) = self.peer_sys.make_bucket(bucket, opts).await {
|
||||
let err = err.into();
|
||||
let err = to_object_err(err.into(), vec![bucket]);
|
||||
if !is_err_bucket_exists(&err) {
|
||||
let _ = self
|
||||
.delete_bucket(
|
||||
@@ -1234,7 +1234,6 @@ impl StorageAPI for ECStore {
|
||||
)
|
||||
.await;
|
||||
}
|
||||
|
||||
return Err(err);
|
||||
};
|
||||
|
||||
|
||||
@@ -132,30 +132,50 @@ impl GetObjectReader {
|
||||
|
||||
if is_compressed {
|
||||
let actual_size = oi.get_actual_size()?;
|
||||
let (off, length) = (0, oi.size);
|
||||
let (_dec_off, dec_length) = (0, actual_size);
|
||||
if let Some(_rs) = rs {
|
||||
// TODO: range spec is not supported for compressed object
|
||||
return Err(Error::other("The requested range is not satisfiable"));
|
||||
// let (off, length) = rs.get_offset_length(actual_size)?;
|
||||
}
|
||||
let (off, length, dec_off, dec_length) = if let Some(rs) = rs {
|
||||
// Support range requests for compressed objects
|
||||
let (dec_off, dec_length) = rs.get_offset_length(actual_size)?;
|
||||
(0, oi.size, dec_off, dec_length)
|
||||
} else {
|
||||
(0, oi.size, 0, actual_size)
|
||||
};
|
||||
|
||||
let dec_reader = DecompressReader::new(reader, algo);
|
||||
|
||||
let actual_size = if actual_size > 0 {
|
||||
let actual_size_usize = if actual_size > 0 {
|
||||
actual_size as usize
|
||||
} else {
|
||||
return Err(Error::other(format!("invalid decompressed size {actual_size}")));
|
||||
};
|
||||
|
||||
let dec_reader = LimitReader::new(dec_reader, actual_size);
|
||||
let final_reader: Box<dyn AsyncRead + Unpin + Send + Sync> = if dec_off > 0 || dec_length != actual_size {
|
||||
// Use RangedDecompressReader for streaming range processing
|
||||
// The new implementation supports any offset size by streaming and skipping data
|
||||
match RangedDecompressReader::new(dec_reader, dec_off, dec_length, actual_size_usize) {
|
||||
Ok(ranged_reader) => {
|
||||
tracing::debug!(
|
||||
"Successfully created RangedDecompressReader for offset={}, length={}",
|
||||
dec_off,
|
||||
dec_length
|
||||
);
|
||||
Box::new(ranged_reader)
|
||||
}
|
||||
Err(e) => {
|
||||
// Only fail if the range parameters are fundamentally invalid (e.g., offset >= file size)
|
||||
tracing::error!("RangedDecompressReader failed with invalid range parameters: {}", e);
|
||||
return Err(e);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
Box::new(LimitReader::new(dec_reader, actual_size_usize))
|
||||
};
|
||||
|
||||
let mut oi = oi.clone();
|
||||
oi.size = dec_length;
|
||||
|
||||
return Ok((
|
||||
GetObjectReader {
|
||||
stream: Box::new(dec_reader),
|
||||
stream: final_reader,
|
||||
object_info: oi,
|
||||
},
|
||||
off,
|
||||
@@ -283,6 +303,12 @@ impl HTTPRangeSpec {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct HTTPPreconditions {
|
||||
pub if_match: Option<String>,
|
||||
pub if_none_match: Option<String>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct ObjectOptions {
|
||||
// Use the maximum parity (N/2), used when saving server configuration files
|
||||
@@ -306,6 +332,7 @@ pub struct ObjectOptions {
|
||||
pub user_defined: HashMap<String, String>,
|
||||
pub preserve_etag: Option<String>,
|
||||
pub metadata_chg: bool,
|
||||
pub http_preconditions: Option<HTTPPreconditions>,
|
||||
|
||||
pub replication_request: bool,
|
||||
pub delete_marker: bool,
|
||||
@@ -1084,3 +1111,338 @@ pub trait StorageAPI: ObjectIO {
|
||||
async fn get_pool_and_set(&self, id: &str) -> Result<(Option<usize>, Option<usize>, Option<usize>)>;
|
||||
async fn check_abandoned_parts(&self, bucket: &str, object: &str, opts: &HealOpts) -> Result<()>;
|
||||
}
|
||||
|
||||
/// A streaming decompression reader that supports range requests by skipping data in the decompressed stream.
|
||||
/// This implementation acknowledges that compressed streams (like LZ4) must be decompressed sequentially
|
||||
/// from the beginning, so it streams and discards data until reaching the target offset.
|
||||
#[derive(Debug)]
|
||||
pub struct RangedDecompressReader<R> {
|
||||
inner: R,
|
||||
target_offset: usize,
|
||||
target_length: usize,
|
||||
current_offset: usize,
|
||||
bytes_returned: usize,
|
||||
}
|
||||
|
||||
impl<R: AsyncRead + Unpin + Send + Sync> RangedDecompressReader<R> {
|
||||
pub fn new(inner: R, offset: usize, length: i64, total_size: usize) -> Result<Self> {
|
||||
// Validate the range request
|
||||
if offset >= total_size {
|
||||
tracing::debug!("Range offset {} exceeds total size {}", offset, total_size);
|
||||
return Err(Error::other("Range offset exceeds file size"));
|
||||
}
|
||||
|
||||
// Adjust length if it extends beyond file end
|
||||
let actual_length = std::cmp::min(length as usize, total_size - offset);
|
||||
|
||||
tracing::debug!(
|
||||
"Creating RangedDecompressReader: offset={}, length={}, total_size={}, actual_length={}",
|
||||
offset,
|
||||
length,
|
||||
total_size,
|
||||
actual_length
|
||||
);
|
||||
|
||||
Ok(Self {
|
||||
inner,
|
||||
target_offset: offset,
|
||||
target_length: actual_length,
|
||||
current_offset: 0,
|
||||
bytes_returned: 0,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: AsyncRead + Unpin + Send + Sync> AsyncRead for RangedDecompressReader<R> {
|
||||
fn poll_read(
|
||||
mut self: std::pin::Pin<&mut Self>,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
buf: &mut tokio::io::ReadBuf<'_>,
|
||||
) -> std::task::Poll<std::io::Result<()>> {
|
||||
use std::pin::Pin;
|
||||
use std::task::Poll;
|
||||
use tokio::io::ReadBuf;
|
||||
|
||||
loop {
|
||||
// If we've returned all the bytes we need, return EOF
|
||||
if self.bytes_returned >= self.target_length {
|
||||
return Poll::Ready(Ok(()));
|
||||
}
|
||||
|
||||
// Read from the inner stream
|
||||
let buf_capacity = buf.remaining();
|
||||
if buf_capacity == 0 {
|
||||
return Poll::Ready(Ok(()));
|
||||
}
|
||||
|
||||
// Prepare a temporary buffer for reading
|
||||
let mut temp_buf = vec![0u8; std::cmp::min(buf_capacity, 8192)];
|
||||
let mut temp_read_buf = ReadBuf::new(&mut temp_buf);
|
||||
|
||||
match Pin::new(&mut self.inner).poll_read(cx, &mut temp_read_buf) {
|
||||
Poll::Pending => return Poll::Pending,
|
||||
Poll::Ready(Err(e)) => return Poll::Ready(Err(e)),
|
||||
Poll::Ready(Ok(())) => {
|
||||
let n = temp_read_buf.filled().len();
|
||||
if n == 0 {
|
||||
// EOF from inner stream
|
||||
if self.current_offset < self.target_offset {
|
||||
// We haven't reached the target offset yet - this is an error
|
||||
return Poll::Ready(Err(std::io::Error::new(
|
||||
std::io::ErrorKind::UnexpectedEof,
|
||||
format!(
|
||||
"Unexpected EOF: only read {} bytes, target offset is {}",
|
||||
self.current_offset, self.target_offset
|
||||
),
|
||||
)));
|
||||
}
|
||||
// Normal EOF after reaching target
|
||||
return Poll::Ready(Ok(()));
|
||||
}
|
||||
|
||||
// Update current position
|
||||
let old_offset = self.current_offset;
|
||||
self.current_offset += n;
|
||||
|
||||
// Check if we're still in the skip phase
|
||||
if old_offset < self.target_offset {
|
||||
// We're still skipping data
|
||||
let skip_end = std::cmp::min(self.current_offset, self.target_offset);
|
||||
let bytes_to_skip_in_this_read = skip_end - old_offset;
|
||||
|
||||
if self.current_offset <= self.target_offset {
|
||||
// All data in this read should be skipped
|
||||
tracing::trace!("Skipping {} bytes at offset {}", n, old_offset);
|
||||
// Continue reading in the loop instead of recursive call
|
||||
continue;
|
||||
} else {
|
||||
// Partial skip: some data should be returned
|
||||
let data_start_in_buffer = bytes_to_skip_in_this_read;
|
||||
let available_data = n - data_start_in_buffer;
|
||||
let bytes_to_return = std::cmp::min(
|
||||
available_data,
|
||||
std::cmp::min(buf.remaining(), self.target_length - self.bytes_returned),
|
||||
);
|
||||
|
||||
if bytes_to_return > 0 {
|
||||
let data_slice =
|
||||
&temp_read_buf.filled()[data_start_in_buffer..data_start_in_buffer + bytes_to_return];
|
||||
buf.put_slice(data_slice);
|
||||
self.bytes_returned += bytes_to_return;
|
||||
|
||||
tracing::trace!(
|
||||
"Skipped {} bytes, returned {} bytes at offset {}",
|
||||
bytes_to_skip_in_this_read,
|
||||
bytes_to_return,
|
||||
old_offset
|
||||
);
|
||||
}
|
||||
return Poll::Ready(Ok(()));
|
||||
}
|
||||
} else {
|
||||
// We're in the data return phase
|
||||
let bytes_to_return =
|
||||
std::cmp::min(n, std::cmp::min(buf.remaining(), self.target_length - self.bytes_returned));
|
||||
|
||||
if bytes_to_return > 0 {
|
||||
buf.put_slice(&temp_read_buf.filled()[..bytes_to_return]);
|
||||
self.bytes_returned += bytes_to_return;
|
||||
|
||||
tracing::trace!("Returned {} bytes at offset {}", bytes_to_return, old_offset);
|
||||
}
|
||||
return Poll::Ready(Ok(()));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// A wrapper that ensures the inner stream is fully consumed even if the outer reader stops early.
|
||||
/// This prevents broken pipe errors in erasure coding scenarios where the writer expects
|
||||
/// the full stream to be consumed.
|
||||
pub struct StreamConsumer<R: AsyncRead + Unpin + Send + 'static> {
|
||||
inner: Option<R>,
|
||||
consumer_task: Option<tokio::task::JoinHandle<()>>,
|
||||
}
|
||||
|
||||
impl<R: AsyncRead + Unpin + Send + 'static> StreamConsumer<R> {
|
||||
pub fn new(inner: R) -> Self {
|
||||
Self {
|
||||
inner: Some(inner),
|
||||
consumer_task: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn ensure_consumer_started(&mut self) {
|
||||
if self.consumer_task.is_none() && self.inner.is_some() {
|
||||
let mut inner = self.inner.take().unwrap();
|
||||
let task = tokio::spawn(async move {
|
||||
let mut buf = [0u8; 8192];
|
||||
loop {
|
||||
match inner.read(&mut buf).await {
|
||||
Ok(0) => break, // EOF
|
||||
Ok(_) => continue, // Keep consuming
|
||||
Err(_) => break, // Error, stop consuming
|
||||
}
|
||||
}
|
||||
});
|
||||
self.consumer_task = Some(task);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: AsyncRead + Unpin + Send + 'static> AsyncRead for StreamConsumer<R> {
|
||||
fn poll_read(
|
||||
mut self: std::pin::Pin<&mut Self>,
|
||||
cx: &mut std::task::Context<'_>,
|
||||
buf: &mut tokio::io::ReadBuf<'_>,
|
||||
) -> std::task::Poll<std::io::Result<()>> {
|
||||
use std::pin::Pin;
|
||||
use std::task::Poll;
|
||||
|
||||
if let Some(ref mut inner) = self.inner {
|
||||
Pin::new(inner).poll_read(cx, buf)
|
||||
} else {
|
||||
Poll::Ready(Ok(())) // EOF
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl<R: AsyncRead + Unpin + Send + 'static> Drop for StreamConsumer<R> {
|
||||
fn drop(&mut self) {
|
||||
if self.consumer_task.is_none() && self.inner.is_some() {
|
||||
let mut inner = self.inner.take().unwrap();
|
||||
let task = tokio::spawn(async move {
|
||||
let mut buf = [0u8; 8192];
|
||||
loop {
|
||||
match inner.read(&mut buf).await {
|
||||
Ok(0) => break, // EOF
|
||||
Ok(_) => continue, // Keep consuming
|
||||
Err(_) => break, // Error, stop consuming
|
||||
}
|
||||
}
|
||||
});
|
||||
self.consumer_task = Some(task);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::io::Cursor;
|
||||
use tokio::io::AsyncReadExt;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ranged_decompress_reader() {
|
||||
// Create test data
|
||||
let original_data = b"Hello, World! This is a test for range requests on compressed data.";
|
||||
|
||||
// For this test, we'll simulate using the original data directly as "decompressed"
|
||||
let cursor = Cursor::new(original_data.to_vec());
|
||||
|
||||
// Test reading a range from the middle
|
||||
let mut ranged_reader = RangedDecompressReader::new(cursor, 7, 5, original_data.len()).unwrap();
|
||||
|
||||
let mut result = Vec::new();
|
||||
ranged_reader.read_to_end(&mut result).await.unwrap();
|
||||
|
||||
// Should read "World" (5 bytes starting from position 7)
|
||||
assert_eq!(result, b"World");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ranged_decompress_reader_from_start() {
|
||||
let original_data = b"Hello, World! This is a test.";
|
||||
let cursor = Cursor::new(original_data.to_vec());
|
||||
|
||||
let mut ranged_reader = RangedDecompressReader::new(cursor, 0, 5, original_data.len()).unwrap();
|
||||
|
||||
let mut result = Vec::new();
|
||||
ranged_reader.read_to_end(&mut result).await.unwrap();
|
||||
|
||||
// Should read "Hello" (5 bytes from the start)
|
||||
assert_eq!(result, b"Hello");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ranged_decompress_reader_to_end() {
|
||||
let original_data = b"Hello, World!";
|
||||
let cursor = Cursor::new(original_data.to_vec());
|
||||
|
||||
let mut ranged_reader = RangedDecompressReader::new(cursor, 7, 6, original_data.len()).unwrap();
|
||||
|
||||
let mut result = Vec::new();
|
||||
ranged_reader.read_to_end(&mut result).await.unwrap();
|
||||
|
||||
// Should read "World!" (6 bytes starting from position 7)
|
||||
assert_eq!(result, b"World!");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_http_range_spec_with_compressed_data() {
|
||||
// Test that HTTPRangeSpec::get_offset_length works correctly
|
||||
let range_spec = HTTPRangeSpec {
|
||||
is_suffix_length: false,
|
||||
start: 5,
|
||||
end: 14, // inclusive
|
||||
};
|
||||
|
||||
let total_size = 100i64;
|
||||
let (offset, length) = range_spec.get_offset_length(total_size).unwrap();
|
||||
|
||||
assert_eq!(offset, 5);
|
||||
assert_eq!(length, 10); // end - start + 1 = 14 - 5 + 1 = 10
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ranged_decompress_reader_zero_length() {
|
||||
let original_data = b"Hello, World!";
|
||||
let cursor = Cursor::new(original_data.to_vec());
|
||||
let mut ranged_reader = RangedDecompressReader::new(cursor, 5, 0, original_data.len()).unwrap();
|
||||
let mut result = Vec::new();
|
||||
ranged_reader.read_to_end(&mut result).await.unwrap();
|
||||
// Should read nothing
|
||||
assert_eq!(result, b"");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ranged_decompress_reader_skip_entire_data() {
|
||||
let original_data = b"Hello, World!";
|
||||
let cursor = Cursor::new(original_data.to_vec());
|
||||
// Skip to end of data with length 0 - this should read nothing
|
||||
let mut ranged_reader = RangedDecompressReader::new(cursor, original_data.len() - 1, 0, original_data.len()).unwrap();
|
||||
let mut result = Vec::new();
|
||||
ranged_reader.read_to_end(&mut result).await.unwrap();
|
||||
assert_eq!(result, b"");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ranged_decompress_reader_out_of_bounds_offset() {
|
||||
let original_data = b"Hello, World!";
|
||||
let cursor = Cursor::new(original_data.to_vec());
|
||||
// Offset beyond EOF should return error in constructor
|
||||
let result = RangedDecompressReader::new(cursor, original_data.len() + 10, 5, original_data.len());
|
||||
assert!(result.is_err());
|
||||
// Use pattern matching to avoid requiring Debug on the error type
|
||||
if let Err(e) = result {
|
||||
assert!(e.to_string().contains("Range offset exceeds file size"));
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ranged_decompress_reader_partial_read() {
|
||||
let original_data = b"abcdef";
|
||||
let cursor = Cursor::new(original_data.to_vec());
|
||||
let mut ranged_reader = RangedDecompressReader::new(cursor, 2, 3, original_data.len()).unwrap();
|
||||
let mut buf = [0u8; 2];
|
||||
let n = ranged_reader.read(&mut buf).await.unwrap();
|
||||
assert_eq!(n, 2);
|
||||
assert_eq!(&buf, b"cd");
|
||||
let mut buf2 = [0u8; 2];
|
||||
let n2 = ranged_reader.read(&mut buf2).await.unwrap();
|
||||
assert_eq!(n2, 1);
|
||||
assert_eq!(&buf2[..1], b"e");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -172,7 +172,12 @@ impl ObjectStore {
|
||||
}
|
||||
|
||||
if let Some(info) = v.item {
|
||||
let name = info.name.trim_start_matches(&prefix).trim_end_matches(SLASH_SEPARATOR);
|
||||
let object_name = if cfg!(target_os = "windows") {
|
||||
info.name.replace('\\', "/")
|
||||
} else {
|
||||
info.name
|
||||
};
|
||||
let name = object_name.trim_start_matches(&prefix).trim_end_matches(SLASH_SEPARATOR);
|
||||
let _ = sender
|
||||
.send(StringOrErr {
|
||||
item: Some(name.to_owned()),
|
||||
|
||||
@@ -114,7 +114,7 @@ impl Drop for LockGuard {
|
||||
let _ = futures::future::join_all(futures_iter).await;
|
||||
});
|
||||
// Explicitly drop the JoinHandle to acknowledge detaching the task.
|
||||
std::mem::drop(handle);
|
||||
drop(handle);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -231,6 +231,20 @@ Retrieve an object from S3 with two operation modes: read content directly or do
|
||||
- `local_path` (string, optional): Local file path (required when mode is "download")
|
||||
- `max_content_size` (number, optional): Maximum content size in bytes for read mode (default: 1MB)
|
||||
|
||||
### `create_bucket`
|
||||
|
||||
Create a new S3 bucket with the specified name.
|
||||
|
||||
**Parameters:**
|
||||
|
||||
- `bucket_name` (string): Source S3 bucket.
|
||||
|
||||
### `delete_bucket`
|
||||
|
||||
Delete the specified S3 bucket. If the bucket is not empty, the deletion will fail. You should delete all objects and objects inside them before calling this method.**WARNING: This operation will permanently delete the bucket and all objects within it!**
|
||||
|
||||
- `bucket_name` (string): Source S3 bucket.
|
||||
|
||||
## Architecture
|
||||
|
||||
The MCP server is built with a modular architecture:
|
||||
|
||||