Compare commits

...

362 Commits

Author SHA1 Message Date
weisd
00787cbce4 todo 2025-12-10 15:36:52 +08:00
weisd
3ac004510a fix clippy 2025-12-09 17:33:08 +08:00
weisd
d8f8bfa5b7 fix heal replication 2025-12-09 17:07:39 +08:00
weisd
1768e7bbdb Merge branch 'main' into feat/scan 2025-12-09 14:44:08 +08:00
Copilot
20961d7c91 Add comprehensive special character handling with validation refactoring and extensive test coverage (#1078)
Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>
Co-authored-by: houseme <housemecn@gmail.com>
2025-12-09 13:40:29 +08:00
shiro.lee
8de8172833 fix: the If-None-Match error handling in the complete_multipart_uploa… (#1065)
Co-authored-by: houseme <housemecn@gmail.com>
Co-authored-by: loverustfs <hello@rustfs.com>
2025-12-08 23:10:20 +08:00
houseme
3326737c01 Merge branch 'main' into feat/scan 2025-12-08 22:46:28 +08:00
orbisai0security
7c98c62d60 [Security] Fix HIGH vulnerability: yaml.docker-compose.security.writable-filesystem-service.writable-filesystem-service (#1005)
Co-authored-by: orbisai0security <orbisai0security@users.noreply.github.com>
Co-authored-by: houseme <housemecn@gmail.com>
2025-12-08 22:05:10 +08:00
weisd
91770ffd1b Merge branch 'main' into feat/scan 2025-12-08 21:25:51 +08:00
Ali Mehraji
15c75b9d36 simple deployment via docker-compose (#1043)
Signed-off-by: Ali Mehraji <a.mehraji75@gmail.com>
Co-authored-by: houseme <housemecn@gmail.com>
2025-12-08 21:25:11 +08:00
weisd
7940b69bf8 Merge branch 'main' into feat/scan 2025-12-08 21:24:36 +08:00
yxrxy
af650716da feat: add is-admin user api (#1063) 2025-12-08 21:15:04 +08:00
weisd
427d31d09c Merge branch 'main' into feat/scan 2025-12-08 17:31:56 +08:00
weisd
dbdcecb9c5 optimize 2025-12-08 17:26:07 +08:00
shiro.lee
552e95e368 fix: the If-None-Match error handling in the put_object method when t… (#1034)
Co-authored-by: 0xdx2 <xuedamon2@gmail.com>
Co-authored-by: loverustfs <hello@rustfs.com>
2025-12-08 15:36:31 +08:00
dependabot[bot]
619cc69512 build(deps): bump the dependencies group with 3 updates (#1052)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: houseme <housemecn@gmail.com>
2025-12-08 14:31:53 +08:00
Jitter
76d25d9a20 Fix/issue #1001 dead node detection (#1054)
Co-authored-by: weisd <im@weisd.in>
Co-authored-by: Jitterx69 <mohit@example.com>
2025-12-08 12:29:46 +08:00
yihong
834025d9e3 docs: fix some dead link (#1053)
Signed-off-by: yihong0618 <zouzou0208@gmail.com>
2025-12-08 11:23:24 +08:00
houseme
e2d8e9e3d3 Feature/improve profiling (#1038)
Co-authored-by: Jitter <jitterx69@gmail.com>
Co-authored-by: weisd <im@weisd.in>
2025-12-07 22:39:47 +08:00
houseme
ad34f1b031 Merge branch 'main' into feat/scan 2025-12-07 21:45:36 +08:00
Jitter
cd6a26bc3a fix(net): resolve 1GB upload hang and macos build (Issue #1001 regression) (#1035) 2025-12-07 18:05:51 +08:00
tennisleng
5f256249f4 fix: correct ARN parsing for notification targets (#1010)
Co-authored-by: Andrew Leng <work@Andrews-MacBook-Air.local>
Co-authored-by: houseme <housemecn@gmail.com>
2025-12-06 23:12:58 +08:00
Jitter
b10d80cbb6 fix: detect dead nodes via HTTP/2 keepalives (Issue #1001) (#1025)
Co-authored-by: weisd <im@weisd.in>
2025-12-06 21:45:42 +08:00
0xdx2
7c6cbaf837 feat: enhance error handling and add precondition checks for object o… (#1008) 2025-12-06 20:39:03 +08:00
Hunter Wu
72930b1e30 security: Fix timing attack vulnerability in credential comparison (#1014)
Co-authored-by: Copilot AI <copilot@github.com>
2025-12-06 15:13:27 +08:00
LemonDouble
6ca8945ca7 feat(helm): split storageSize into data and log storage parameters (#1018) 2025-12-06 14:01:49 +08:00
majinghe
0d0edc22be update helm package ci file and helm values file (#1004) 2025-12-05 22:13:00 +08:00
weisd
030d3c9426 fix filemeta nil versionid (#1002) 2025-12-05 20:30:08 +08:00
majinghe
b8b905be86 add helm package ci file (#994) 2025-12-05 15:09:53 +08:00
Damien Degois
ace58fea0d feat(helm): add existingSecret handling and support for extra manifests (#992) 2025-12-05 14:14:59 +08:00
唐小鸭
3a79242133 feat: The observability module can be set separately. (#993) 2025-12-05 13:46:06 +08:00
Andrew Steurer
63d846ed14 Fix link to CONTRIBUTING.md in README (#991) 2025-12-05 09:23:26 +08:00
shiro.lee
3a79fcfe73 fix: add the is_truncated field to the return of the list_object_vers… (#985) 2025-12-04 22:26:31 +08:00
weisd
2a5ccd2211 fix meta bucket check 2025-12-04 17:12:01 +08:00
guojidan
c43166c4c6 Enhance heal and lifecycle integration tests, improve replication han… (#944)
Signed-off-by: junxiang Mu <1948535941@qq.com>
Co-authored-by: houseme <housemecn@gmail.com>
Co-authored-by: weisd <im@weisd.in>
2025-12-04 16:14:47 +08:00
weisd
b3c80ae362 fix: listdir rpc (#979)
Co-authored-by: houseme <housemecn@gmail.com>
Co-authored-by: loverustfs <hello@rustfs.com>
2025-12-04 16:12:10 +08:00
Hey Martin
3fd003b21d Delete duplicate titles in the README (#977) 2025-12-04 15:55:26 +08:00
houseme
1d3f622922 console: add version_handler and improve comments (#975) 2025-12-04 13:41:06 +08:00
loverustfs
e31b4303ed fix link error 2025-12-04 08:26:41 +08:00
houseme
5b0a3a0764 upgrade crate version and improve heal config (#963) 2025-12-03 18:49:11 +08:00
weisd
a8b7b28fd0 Fix Admin Heal API and Add Pagination Support for Large Buckets (#933)
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: loverustfs <hello@rustfs.com>
Co-authored-by: houseme <housemecn@gmail.com>
2025-12-03 18:10:46 +08:00
loverustfs
e355d3db80 Modify readme 2025-12-03 17:18:53 +08:00
weisd
4d7bf98c82 add logs (#962) 2025-12-03 13:17:47 +08:00
shiro.lee
699164e05e fix: add the is_truncated field to the return of the list_objects int… (#958) 2025-12-03 03:14:17 +08:00
dependabot[bot]
d35ceac441 build(deps): bump criterion in the dependencies group (#947)
Bumps the dependencies group with 1 update: [criterion](https://github.com/criterion-rs/criterion.rs).


Updates `criterion` from 0.7.0 to 0.8.0
- [Release notes](https://github.com/criterion-rs/criterion.rs/releases)
- [Changelog](https://github.com/criterion-rs/criterion.rs/blob/master/CHANGELOG.md)
- [Commits](https://github.com/criterion-rs/criterion.rs/compare/criterion-plot-v0.7.0...criterion-v0.8.0)

---
updated-dependencies:
- dependency-name: criterion
  dependency-version: 0.8.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: dependencies
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-12-02 00:16:28 +08:00
houseme
93982227ac Improve health check handlers for endpoint and console (GET/HEAD, safer error handling) (#942)
* Improve health check handlers for endpoint and console

- Add unified GET/HEAD handling for `/health` and `/rustfs/console/health`
- Implement proper method filtering and 405 with `Allow: GET, HEAD`
- Avoid panics by removing `unwrap()` in health check logic
- Add safe fallbacks for JSON serialization and uptime calculation
- Ensure HEAD requests return only status and headers (empty body)
- Keep response format backward compatible for monitoring systems

* fix
2025-11-30 02:43:59 +08:00
Copilot
fdcdb30d28 Optimize concurrent GetObject performance with Moka cache, comprehensive metrics, complete test suite, cache writeback, and comprehensive documentation (#916)
* Initial plan

* feat: add concurrency-aware buffer sizing and hot object caching for GetObject

- Implement adaptive buffer sizing based on concurrent request load
- Add per-request tracking with automatic cleanup using RAII guards
- Implement hot object cache (LRU) for frequently accessed small files (<= 10MB)
- Add disk I/O semaphore to prevent saturation under extreme load
- Integrate concurrency module into GetObject implementation
- Buffer sizes now adapt: low concurrency uses large buffers for throughput,
  high concurrency uses smaller buffers for fairness and memory efficiency
- Add comprehensive metrics collection for monitoring performance

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* docs: add comprehensive documentation and tests for concurrent GetObject optimization

- Add detailed technical documentation explaining the solution
- Document root cause analysis and solution architecture
- Include performance expectations and testing recommendations
- Add integration tests for concurrency tracking and buffer sizing
- Add cache behavior tests
- Include benchmark tests for concurrent request handling

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* fix: address code review issues in concurrency module

- Fix race condition in cache size tracking by using consistent atomic operations within lock
- Correct buffer sizing logic: 1-2 requests use 100%, 3-4 use 75%, 5-8 use 50%, >8 use 40%
- Improve error message for semaphore acquire failure
- Document limitation of streaming cache implementation (not yet implemented)
- Add TODO for proper streaming cache with suggested approaches
- Update tests to match corrected buffer sizing thresholds

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* docs: add comprehensive implementation summary for concurrent GetObject optimization

- Executive summary of problem and solution
- Detailed architecture documentation for each optimization
- Integration points and code examples
- Complete testing strategy and performance validation steps
- Monitoring and observability guidelines with Prometheus queries
- Deployment considerations and rollout strategy
- Future enhancement roadmap
- Success criteria and quantitative metrics

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* fix

* fix

* refactor: optimize cache with lru 0.16.2 read-first pattern and add advanced features

- Implement optimized read-first cache access using peek() to reduce write lock contention
- Add batch cache operations: get_cached_batch() for efficient multi-object retrieval
- Add cache utility methods: is_cached(), remove_cached(), get_hot_keys()
- Implement warm_cache() for pre-populating cache on startup
- Add get_advanced_buffer_size() with file size and sequential read optimization
- Enhance test suite with 8 new comprehensive tests covering:
  - Batch operations and cache warming
  - Hot keys tracking and analysis
  - Cache removal and LRU behavior verification
  - Concurrent cache access performance
  - Advanced buffer sizing strategies
- Improve documentation and code comments in English throughout

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* docs: add final comprehensive optimization summary

- Complete technical documentation of all optimizations
- Detailed performance analysis and metrics
- Production deployment guide with examples
- Comprehensive API reference and usage patterns
- Migration guide and future enhancement roadmap
- All documentation in professional English

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* fix

* fix

* add moka crate for cache

* feat: replace LRU with Moka cache and add comprehensive metrics

- Replace lru crate with moka 0.12.11 for superior concurrent performance
- Implement lock-free cache with automatic TTL/TTI expiration
- Add size-based eviction using Moka's weigher function
- Integrate comprehensive metrics collection throughout GetObject flow:
  * Cache hit/miss tracking with per-key access counts
  * Request concurrency gauges
  * Disk permit wait time histograms
  * Total request duration tracking
  * Response size and buffer size histograms
- Deep integration with ecfs.rs GetObject operation
- Add hit rate calculation method
- Enhanced CacheStats with hit/miss counters
- Lock-free concurrent reads for better scalability

Moka advantages over LRU:
- True lock-free concurrent access
- Built-in TTL and TTI support
- Automatic size-based eviction
- Better performance under high concurrency
- Native async support

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* docs: add comprehensive Moka cache migration and metrics documentation

- Complete technical documentation of LRU to Moka migration
- Detailed performance comparison and benchmarks
- Comprehensive metrics catalog with 15+ Prometheus metrics
- Prometheus query examples for monitoring
- Dashboard and alerting recommendations
- Migration guide with code examples
- Troubleshooting guide for common issues
- Future enhancement roadmap

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* fix

* fix

* refactor: update tests for Moka cache implementation

- Completely refactor test suite to align with Moka-based concurrency.rs
- Add Clone derive to ConcurrencyManager for test convenience
- Update all tests to handle Moka's async behavior with proper delays
- Add new tests:
  * test_cache_hit_rate - validate hit rate calculation
  * test_ttl_expiration - verify TTL configuration
  * test_is_cached_no_side_effects - ensure contains doesn't affect LRU
  * bench_concurrent_cache_performance - benchmark concurrent access
- Updated existing tests:
  * test_moka_cache_operations - renamed and updated for Moka API
  * test_moka_cache_eviction - validate automatic eviction
  * test_hot_keys_tracking - improved assertions for sorted results
  * test_concurrent_cache_access - validate lock-free performance
- All tests now include appropriate sleep delays for Moka's async processing
- Enhanced documentation and assertions for better test clarity
- Total: 18 comprehensive integration tests

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* docs: add comprehensive Moka test suite documentation

- Complete test suite documentation for all 18 tests
- Detailed test patterns and best practices for Moka
- Debugging guide for common test failures
- Performance expectations and CI integration
- 100% API coverage validation
- Future enhancement roadmap

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* fix

* fmt

* docs: enhance code comments and test documentation throughout

- Significantly improve module-level documentation in concurrency.rs
- Add detailed explanations of performance characteristics and expected improvements
- Document the solution to issue #911 with before/after metrics
- Enhance constant documentation with purpose and impact explanations
- Add comprehensive RAII guard documentation with thread safety notes
- Improve test documentation with detailed test strategy and rationale
- Add Moka-specific test patterns and timing guidance
- Include performance expectations for each test category
- Document why each test matters for solving the original issue
- All documentation written in professional English
- Follow Rust documentation best practices with examples

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* remove lru crate

* upgrade version

* fix: resolve test errors by correcting module structure and test assertions

- Fix test import paths to use crate:: instead of rustfs:: (binary-only crate)
- Keep test file in src/storage/ instead of tests/ (no lib.rs exists)
- Add #[cfg(test)] guard to mod declaration in storage/mod.rs
- Fix Arc type annotations for Moka's ConcurrencyManager in concurrent tests
- Correct test_buffer_size_bounds assertions to match actual implementation:
  * Minimum buffer is 32KB for files <100KB, 64KB otherwise
  * Maximum buffer respects base_buffer_size when concurrency is low
  * Buffer sizing doesn't cap at file size, only at min/max constraints
- All 17 integration tests now pass successfully

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* fix: modify `TimeoutLayer::new` to `TimeoutLayer::with_status_code` and improve docker health check

* fix

* feat: implement cache writeback for small objects in GetObject

- Add cache writeback logic for objects meeting caching criteria:
  * No range/part request (full object retrieval)
  * Object size known and <= 10MB (max_object_size threshold)
  * Not encrypted (SSE-C or managed encryption)
- Read eligible objects into memory and cache via background task
- Serve response from in-memory data for immediate client response
- Add metrics counter for cache writeback operations
- Add 3 new tests for cache writeback functionality:
  * test_cache_writeback_flow - validates round-trip caching
  * test_cache_writeback_size_limit - ensures large objects aren't cached
  * test_cache_writeback_concurrent - validates thread-safe concurrent writes
- Update test suite documentation (now 20 comprehensive tests)

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* improve code for const

* cargo clippy

* feat: add cache enable/disable configuration via environment variable

- Add is_cache_enabled() method to ConcurrencyManager
- Read RUSTFS_OBJECT_CACHE_ENABLE env var (default: false) at startup
- Update ecfs.rs to check is_cache_enabled() before cache lookup and writeback
- Cache lookup and writeback now respect the enable flag
- Add test_cache_enable_configuration test
- Constants already exist in rustfs_config:
  * ENV_OBJECT_CACHE_ENABLE = "RUSTFS_OBJECT_CACHE_ENABLE"
  * DEFAULT_OBJECT_CACHE_ENABLE = false
- Total: 21 comprehensive tests passing

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* fix

* fmt

* fix

* fix

* feat: implement comprehensive CachedGetObject response cache with metadata

- Add CachedGetObject struct with full response metadata fields:
  * body, content_length, content_type, e_tag, last_modified
  * expires, cache_control, content_disposition, content_encoding
  * storage_class, version_id, delete_marker, tag_count, etc.
- Add dual cache architecture in HotObjectCache:
  * Legacy simple byte cache for backward compatibility
  * New response cache for complete GetObject responses
- Add ConcurrencyManager methods for response caching:
  * get_cached_object() - retrieve cached response with metadata
  * put_cached_object() - store complete response
  * invalidate_cache() - invalidate on write operations
  * invalidate_cache_versioned() - invalidate both version and latest
  * make_cache_key() - generate cache keys with version support
  * max_object_size() - get cache threshold
- Add builder pattern for CachedGetObject construction
- Add 6 new tests for response cache functionality (27 total):
  * test_cached_get_object_basic - basic operations
  * test_cached_get_object_versioned - version key handling
  * test_cache_invalidation - write operation invalidation
  * test_cache_invalidation_versioned - versioned invalidation
  * test_cached_get_object_size_limit - size enforcement
  * test_max_object_size - threshold accessor

All 27 tests pass successfully.

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* feat: integrate CachedGetObject cache in ecfs.rs with full metadata and cache invalidation

Integration of CachedGetObject response cache in ecfs.rs:
1. get_object: Cache lookup uses get_cached_object() with full metadata
   - Returns complete response with e_tag, last_modified, content_type, etc.
   - Parses last_modified from RFC3339 string
   - Supports versioned cache keys via make_cache_key()

2. get_object: Cache writeback uses put_cached_object() with metadata
   - Stores content_type, e_tag, last_modified in CachedGetObject
   - Background writeback via tokio::spawn()

3. Cache invalidation added to write operations:
   - put_object: invalidate_cache_versioned() after store.put_object()
   - put_object_extract: invalidate_cache_versioned() after each file extraction
   - copy_object: invalidate_cache_versioned() after store.copy_object()
   - delete_object: invalidate_cache_versioned() after store.delete_object()
   - delete_objects: invalidate_cache_versioned() for each deleted object
   - complete_multipart_upload: invalidate_cache_versioned() after completion

4. Fixed test_adaptive_buffer_sizing to be more robust with parallel tests

All 27 tests pass.

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* fix: add error logging for time parsing in cache lookup and writeback

- Add warning log when RFC3339 parsing fails in cache lookup
- Add warning log when time formatting fails in cache writeback
- Improves debugging for cache-related issues

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* fix

* fix

* upgrade version

* fmt

* add http: 4138 and add max_object_size key

* fix

* fix

* fix

* modify metrics key

* add

* upgrade crates version and improve docker observability

* feat: implement adaptive I/O strategy based on disk permit wait time

Implements adaptive I/O strategy for GetObject operations based on disk permit
wait times, as requested in issue #911.

Key changes:

1. New types in concurrency.rs:
   - IoLoadLevel enum: Low (<10ms), Medium (10-50ms), High (50-200ms), Critical (>200ms)
   - IoStrategy struct: contains optimized I/O parameters based on load level
   - IoLoadMetrics: rolling window metrics for load tracking

2. New ConcurrencyManager methods:
   - calculate_io_strategy(): calculates adaptive strategy from permit wait duration
   - record_permit_wait(): records observation for load tracking
   - smoothed_load_level(): returns averaged load level for stability
   - io_load_stats(): returns (avg_wait, p95_wait, max_wait, count) for monitoring
   - adaptive_buffer_size(): convenience method for buffer sizing

3. Integration in ecfs.rs get_object:
   - Calculate IoStrategy after acquiring disk permit
   - Use strategy buffer_size instead of static sizing
   - Consider strategy.cache_writeback_enabled in cache decision
   - Record new metrics: io.load.level gauge, io.buffer.multiplier gauge,
     io.strategy.selected counter by load level

4. New tests (4 added, 31 total):
   - test_io_load_level_classification: validates load level thresholds
   - test_io_strategy_buffer_sizing: validates buffer multipliers
   - test_calculate_io_strategy: validates manager strategy calculation
   - test_io_load_stats: validates stats tracking

All 31 tests pass. Clippy clean. Formatted.

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* fix

* fix

* docs: add comprehensive architecture design and fix cache hit event notification

1. Added CONCURRENCY_ARCHITECTURE.md with complete design documentation:
   - Executive summary and problem statement
   - Architecture overview with request flow diagram
   - Detailed module analysis for concurrency.rs and ecfs.rs
   - Critical analysis of helper.complete() for cache hits
   - Adaptive I/O strategy design with algorithm
   - Cache architecture with CachedGetObject structure
   - Metrics and monitoring with Prometheus queries
   - Performance characteristics and future enhancements

2. Fixed critical issue: Cache hit path now calls helper.complete()
   - S3 bucket notifications (s3:GetObject events) now trigger for cache hits
   - Event-driven workflows (Lambda, SNS) work correctly for all object access
   - Maintains audit trail for both cache hits and misses

All 31 tests pass.

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* fix: set object info and version_id on helper before complete() for cache hits

When serving from cache, properly configure the OperationHelper before
calling complete() to ensure S3 bucket notifications include complete
object metadata:

1. Build ObjectInfo from cached metadata:
   - bucket, name, size, actual_size
   - etag, mod_time, version_id, delete_marker
   - storage_class, content_type, content_encoding
   - user_metadata (user_defined)

2. Set helper.object(event_info).version_id(version_id_str) before complete()

3. Updated CONCURRENCY_ARCHITECTURE.md with:
   - Complete code example for cache hit event notification
   - Explanation of why ObjectInfo is required
   - Documentation of version_id handling

This ensures:
- Lambda triggers receive proper object metadata for cache hits
- SNS/SQS notifications include complete information
- Audit logs contain accurate object details
- Version-specific event routing works correctly

All 31 tests pass.

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* fix

* improve code

* fmt

---------

Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>
Co-authored-by: houseme <housemecn@gmail.com>
2025-11-30 01:16:55 +08:00
Serhiy Novoseletskiy
a6cf0740cb Updated RUSTFS_VOLUMES (#922)
1. Removed .rustfs.svc.cluster.local as all pods for statefulset are running in the same namespace
2. used "rustfs.fullname" as it's used in statefulset services and statefull set names

Co-authored-by: houseme <housemecn@gmail.com>
2025-11-29 23:50:18 +08:00
loverustfs
a2e3a719d3 Improve reading experience 2025-11-28 16:03:41 +08:00
loverustfs
76efee37fa fix error 2025-11-28 15:23:26 +08:00
loverustfs
fd7c0964a0 Modify Readme 2025-11-28 15:16:59 +08:00
唐小鸭
701960dd81 fix out of range for slice (#931) 2025-11-27 15:57:38 +08:00
Shyim
ee04cc77a0 remove debug (#912)
* remove debug

* Refactor get_global_encryption_service function

* Refactor get_global_encryption_service function

---------

Co-authored-by: loverustfs <hello@rustfs.com>
Co-authored-by: houseme <housemecn@gmail.com>
2025-11-26 11:56:01 +08:00
houseme
069194f553 Fix/getobjectlength (#920)
* fix getobject content length resp

* Fix regression in exception handling for non-existent key with enhanced compression predicate and metadata improvements (#915)

* Initial plan

* Fix GetObject regression by excluding error responses from compression

The issue was that CompressionLayer was attempting to compress error responses,
which could cause Content-Length header mismatches. By excluding 4xx and 5xx
responses from compression, we ensure error responses (like NoSuchKey) are sent
correctly without body truncation.

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* Enhance NoSuchKey fix with improved compression predicate and comprehensive tests

- Enhanced ShouldCompress predicate with size-based exclusion (< 256 bytes)
- Added detailed documentation explaining the compression logic
- Added debug logging for better observability
- Created comprehensive test suite with 4 test cases:
  - test_get_deleted_object_returns_nosuchkey
  - test_head_deleted_object_returns_nosuchkey
  - test_get_nonexistent_object_returns_nosuchkey
  - test_multiple_gets_deleted_object
- Added extensive inline documentation and comments
- Created docs/fix-nosuchkey-regression.md with full analysis

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* Add compression best practices documentation

Added comprehensive guide covering:
- Best practices for HTTP response compression
- Common pitfalls and solutions
- Performance considerations and trade-offs
- Testing guidelines and examples
- Monitoring and alerting recommendations
- Migration guide for existing services

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* fix

* fmt

* fmt

* Fix/objectdelete (#917)

* fix getobject content length resp

* fix delete object

---------

Co-authored-by: houseme <housemecn@gmail.com>

* Add comprehensive analysis of NoSuchKey fix and related improvements

Created detailed documentation analyzing:
- HTTP compression layer fix (primary issue)
- Content-length calculation fix from PR #917
- Delete object metadata fixes from PR #917
- How all components work together
- Complete scenario walkthrough
- Performance impact analysis
- Testing strategy and deployment checklist

This ties together all the changes in the PR branch including the merged
improvements from PR #917.

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* replace `once_cell` to `std`

* fmt

---------

Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>
Co-authored-by: houseme <housemecn@gmail.com>
Co-authored-by: weisd <im@weisd.in>

* fmt

---------

Co-authored-by: weisd <weishidavip@163.com>
Co-authored-by: Copilot <198982749+Copilot@users.noreply.github.com>
Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>
Co-authored-by: weisd <im@weisd.in>
2025-11-24 18:56:34 +08:00
weisd
fce4e64da4 Fix/objectdelete (#917)
* fix getobject content length resp

* fix delete object

---------

Co-authored-by: houseme <housemecn@gmail.com>
2025-11-24 16:35:51 +08:00
houseme
44bdebe6e9 build(deps): bump the dependencies group with 10 updates (#914)
* build(deps): bump the dependencies group with 10 updates

* build(deps): bump the dependencies group with 8 updates (#913)

Bumps the dependencies group with 8 updates:

| Package | From | To |
| --- | --- | --- |
| [bytesize](https://github.com/bytesize-rs/bytesize) | `2.2.0` | `2.3.0` |
| [aws-config](https://github.com/smithy-lang/smithy-rs) | `1.8.10` | `1.8.11` |
| [aws-credential-types](https://github.com/smithy-lang/smithy-rs) | `1.2.9` | `1.2.10` |
| [aws-sdk-s3](https://github.com/awslabs/aws-sdk-rust) | `1.113.0` | `1.115.0` |
| [convert_case](https://github.com/rutrum/convert-case) | `0.9.0` | `0.10.0` |
| [hashbrown](https://github.com/rust-lang/hashbrown) | `0.16.0` | `0.16.1` |
| [rumqttc](https://github.com/bytebeamio/rumqtt) | `0.25.0` | `0.25.1` |
| [starshard](https://github.com/houseme/starshard) | `0.5.0` | `0.6.0` |


Updates `bytesize` from 2.2.0 to 2.3.0
- [Release notes](https://github.com/bytesize-rs/bytesize/releases)
- [Changelog](https://github.com/bytesize-rs/bytesize/blob/master/CHANGELOG.md)
- [Commits](https://github.com/bytesize-rs/bytesize/compare/bytesize-v2.2.0...bytesize-v2.3.0)

Updates `aws-config` from 1.8.10 to 1.8.11
- [Release notes](https://github.com/smithy-lang/smithy-rs/releases)
- [Changelog](https://github.com/smithy-lang/smithy-rs/blob/main/CHANGELOG.md)
- [Commits](https://github.com/smithy-lang/smithy-rs/commits)

Updates `aws-credential-types` from 1.2.9 to 1.2.10
- [Release notes](https://github.com/smithy-lang/smithy-rs/releases)
- [Changelog](https://github.com/smithy-lang/smithy-rs/blob/main/CHANGELOG.md)
- [Commits](https://github.com/smithy-lang/smithy-rs/commits)

Updates `aws-sdk-s3` from 1.113.0 to 1.115.0
- [Release notes](https://github.com/awslabs/aws-sdk-rust/releases)
- [Commits](https://github.com/awslabs/aws-sdk-rust/commits)

Updates `convert_case` from 0.9.0 to 0.10.0
- [Commits](https://github.com/rutrum/convert-case/commits)

Updates `hashbrown` from 0.16.0 to 0.16.1
- [Release notes](https://github.com/rust-lang/hashbrown/releases)
- [Changelog](https://github.com/rust-lang/hashbrown/blob/master/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/hashbrown/compare/v0.16.0...v0.16.1)

Updates `rumqttc` from 0.25.0 to 0.25.1
- [Release notes](https://github.com/bytebeamio/rumqtt/releases)
- [Changelog](https://github.com/bytebeamio/rumqtt/blob/main/CHANGELOG.md)
- [Commits](https://github.com/bytebeamio/rumqtt/compare/rumqttc-0.25.0...rumqttc-0.25.1)

Updates `starshard` from 0.5.0 to 0.6.0
- [Commits](https://github.com/houseme/starshard/compare/0.5.0...0.6.0)

---
updated-dependencies:
- dependency-name: bytesize
  dependency-version: 2.3.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: dependencies
- dependency-name: aws-config
  dependency-version: 1.8.11
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dependencies
- dependency-name: aws-credential-types
  dependency-version: 1.2.10
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dependencies
- dependency-name: aws-sdk-s3
  dependency-version: 1.115.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: dependencies
- dependency-name: convert_case
  dependency-version: 0.10.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: dependencies
- dependency-name: hashbrown
  dependency-version: 0.16.1
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dependencies
- dependency-name: rumqttc
  dependency-version: 0.25.1
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dependencies
- dependency-name: starshard
  dependency-version: 0.6.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: dependencies
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: houseme <housemecn@gmail.com>

---------

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-11-24 11:03:35 +08:00
majinghe
2b268fdd7f update tls configuration in helm chart (#900)
* update tls configuration in helm chart

* typo fix
2025-11-20 22:20:11 +08:00
houseme
18cd9a8b46 build(deps): bump the dependencies group with 5 updates (#896) 2025-11-20 13:04:24 +08:00
loverustfs
e14809ee04 Revise data sovereignty and compliance details in README
Updated the comparison between RustFS and other object storage solutions to clarify data sovereignty and compliance aspects.
2025-11-20 09:11:15 +08:00
loverustfs
390d051ddd Update README.md
Correcting inaccurate expressions
2025-11-20 08:55:14 +08:00
weisd
b73ca0220e add ifmatch support (#885)
Co-authored-by: loverustfs <hello@rustfs.com>
2025-11-20 08:40:47 +08:00
loverustfs
b4071d493c Modify README 2025-11-20 08:24:07 +08:00
Copilot
277d80de13 Fix: Implement priority-based heal queue with comprehensive diagnostic logging (#884)
* Initial plan

* Implement priority-based heal queue with deduplication

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* Apply cargo fmt formatting fixes

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* Add queue monitoring, better error handling, and adaptive processing

- Add priority-based queue statistics tracking
- Implement queue capacity warnings (>80% full)
- Process multiple tasks per cycle when capacity allows
- Add proper error logging for failed heal request submissions
- Add Hash trait to HealPriority for HashMap support
- Improve observability with detailed queue status logs

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* Add comprehensive diagnostic logging to heal flow

- Add detailed logging for shard availability checks during reads
- Log successful heal request submissions
- Add detailed logging in heal_object for disk checking
- Log each disk's heal decision with reason and endpoint
- Add logging for heal writing and renaming phases
- Improve variable naming (nil_count -> available_shards)
- Add success confirmation messages for renamed healed data

These logs will help diagnose where the heal flow is failing.

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* fix

* feat(heal): enhance robustness, safety, and observability of heal process

- **Logging & Observability**:
  - Upgrade critical heal logs from [info](/crates/e2e_test/src/reliant/node_interact_test.rs:196:0-213:1) to `warn` for better visibility.
  - Implement structured logging with `tracing` fields for machine readability.
  - Add `#[tracing::instrument]` to [HealTask](c/crates/ahm/src/heal/task.rs:182:0-205:1) and [SetDisks](/crates/ecstore/src/set_disk.rs:120:0-131:1) methods for automatic context propagation.

- **Robustness**:
  - Add exponential backoff retry (3 attempts) for acquiring write locks in [heal_object](/crates/ahm/src/heal/storage.rs:438:4-460:5) to handle contention.
  - Handle [rename_data](/crates/ecstore/src/set_disk.rs:392:4-516:5) failures gracefully by preserving temporary files instead of forcing deletion, preventing potential data loss.

- **Data Safety**:
  - Fix [object_exists](/crates/ahm/src/heal/storage.rs:395:4-412:5) to propagate IO errors instead of treating them as "object not found".
  - Update [ErasureSetHealer](/crates/ahm/src/heal/erasure_healer.rs:28:0-33:1) to mark objects as failed rather than skipped when existence checks error, ensuring they are tracked for retry.

* fix

* fmt

* improve code for heal_object

* fix

* fix

* fix

---------

Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>
Co-authored-by: houseme <housemecn@gmail.com>
2025-11-20 00:36:25 +08:00
shiro.lee
9b9bbb662b fix: removing the Limit on the Number of Object Versions (#819) (#892)
removing the Limit on the Number of Object Versions (#819)
2025-11-19 22:34:26 +08:00
majinghe
44f3f3d070 add standalone mode support (#881)
* add standalone mode support

* update readme file

* change non-root from 1000 to 10001

* delete self sign crt content

* modify security content

* fix synatx error for readme file.

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* update image repository and tag info.

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* fix helm chart syntax issue.

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* fix helm chart syntax issue.

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

---------

Co-authored-by: houseme <housemecn@gmail.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: loverustfs <hello@rustfs.com>
2025-11-19 13:56:21 +08:00
weisd
a13ce08590 fix listbucket sign (#890) 2025-11-19 11:08:16 +08:00
weisd
55d44622ed list object include deleted support (#882)
Co-authored-by: houseme <housemecn@gmail.com>
2025-11-18 21:51:10 +08:00
Copilot
6da5766ea2 Implement adaptive buffer sizing with workload profiles - Production-Ready Implementation (#878)
* Initial plan

* Add workload profiles module with adaptive buffer sizing

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* Add comprehensive adaptive buffer sizing documentation

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* Add implementation summary documentation

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* Implement Phase 2: Opt-In Usage for buffer profiles

- Add RUSTFS_BUFFER_PROFILE_ENABLE and RUSTFS_BUFFER_PROFILE config options
- Implement global buffer configuration with enable/disable flag
- Add WorkloadProfile::from_name() for parsing profile names
- Update put_object, put_object_extract, and upload_part to use opt-in function
- Add init_buffer_profile_system() in main.rs for initialization
- Add comprehensive tests for opt-in functionality
- Update documentation with Phase 2 usage examples

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* Implement Phase 3: Default Enablement for buffer profiles

- Change default to enabled with GeneralPurpose profile
- Replace --buffer-profile-enable with --buffer-profile-disable for opt-out
- Update init_buffer_profile_system() to reflect Phase 3 logic
- Add comprehensive Phase 3 documentation
- Create MIGRATION_PHASE3.md guide for smooth transition
- Add test for Phase 3 default behavior
- Update IMPLEMENTATION_SUMMARY.md to mark Phase 3 as complete

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* Implement Phase 4: Full Integration with deprecated legacy and metrics

- Deprecate get_adaptive_buffer_size() function (backward compatible)
- Implement profile-only buffer sizing in get_buffer_size_opt_in()
- Add performance metrics collection (optional, via metrics feature)
- Even disabled mode now uses GeneralPurpose profile (no hardcoded values)
- Add comprehensive Phase 4 documentation and guide
- Add test_phase4_full_integration() test
- Update IMPLEMENTATION_SUMMARY.md to mark Phase 4 as complete

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* Finalize adaptive buffer sizing: remove deprecated code and improve documentation

English:
- Remove deprecated get_adaptive_buffer_size() function
- Remove deprecated test cases (test_adaptive_buffer_size, Phase 3 legacy comparisons)
- Improve all code comments to be clear and professional English
- Add comprehensive bilingual implementation summary (COMPLETE_SUMMARY.md)
- Update buffer sizing comments in put_object, put_object_extract, upload_part
- Enhance init_buffer_profile_system documentation
- Clean up phase-specific references in comments

Chinese (中文):
- 移除已弃用的 get_adaptive_buffer_size() 函数
- 移除已弃用的测试用例(test_adaptive_buffer_size、Phase 3 旧版比较)
- 改进所有代码注释,使用清晰专业的英文
- 添加全面的双语实现摘要(COMPLETE_SUMMARY.md)
- 更新 put_object、put_object_extract、upload_part 中的缓冲区调整注释
- 增强 init_buffer_profile_system 文档
- 清理注释中的特定阶段引用

This commit completes the adaptive buffer sizing implementation by:
1. Removing all deprecated legacy code and tests
2. Improving code documentation quality
3. Providing comprehensive bilingual summary

本提交完成自适应缓冲区大小实现:
1. 移除所有已弃用的旧代码和测试
2. 提高代码文档质量
3. 提供全面的双语摘要

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* fmt

* fix

* fix

* fix

* fix

---------

Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>
Co-authored-by: houseme <housemecn@gmail.com>
2025-11-18 13:32:02 +08:00
weisd
85bc0ce2d5 fix: filemeta version handling and delete operations (#879)
* fix filemeta version

* fix clippy

* fix delete version

* fix clippy/test
2025-11-18 09:24:22 +08:00
Copilot
601f3456bc Fix large file upload freeze with adaptive buffer sizing (#869)
* Initial plan

* Fix large file upload freeze by increasing StreamReader buffer size

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* Add comprehensive documentation for large file upload freeze fix

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* upgrade s3s version

* Fix compilation error: use BufReader instead of non-existent StreamReader::with_capacity

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* Update documentation with correct BufReader implementation

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* add tokio feature `io-util`

* Implement adaptive buffer sizing based on file size

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* Constants are managed uniformly and fmt code

* fix

* Fix: Trigger self-heal on read when shards missing from rejoined nodes (#871)

* Initial plan

* Fix: Trigger self-heal when missing shards detected during read

- Added proactive heal detection in get_object_with_fileinfo
- When reading an object, now checks if any shards are missing even if read succeeds
- Sends low-priority heal request to reconstruct missing shards on rejoined nodes
- This fixes the issue where data written during node outage is not healed when node rejoins

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* fix

* Unify CRC implementations to crc-fast (#873)

* Initial plan

* Replace CRC libraries with unified crc-fast implementation

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* fix

* fix: replace low to Normal

---------

Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>
Co-authored-by: houseme <housemecn@gmail.com>

---------

Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: houseme <housemecn@gmail.com>
Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

---------

Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>
Co-authored-by: houseme <housemecn@gmail.com>
2025-11-17 23:15:20 +08:00
weisd
1279baa72b fix replication (#875) 2025-11-17 17:37:41 +08:00
weisd
acdefb6703 fix read lock (#866) 2025-11-16 11:44:13 +08:00
Copilot
b7964081ce Fix KMS configuration synchronization across cluster nodes (#855)
* Initial plan

* Add KMS configuration persistence to cluster storage

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* Apply code formatting to KMS configuration changes

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* add comment

* fix fmt

* fix

* Fix overlapping dependabot cargo configurations

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* improve code for comment and replace  `Once_Cell` to `std::sync::OnceLock`

---------

Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>
Co-authored-by: houseme <housemecn@gmail.com>
Co-authored-by: loverustfs <155562731+loverustfs@users.noreply.github.com>
2025-11-16 00:05:03 +08:00
Nugine
f73fa59bf6 ci: fix dependabot (#860) 2025-11-15 22:35:59 +08:00
Nugine
0b1b7832fe ci: update s3s weekly (#858) 2025-11-15 22:05:03 +08:00
houseme
c242957c6f build(deps): bump the dependencies group with 8 updates (#857) 2025-11-15 19:51:07 +08:00
houseme
55e3a1f7e0 fix(audit): prevent state transition when no targets exist (#854)
Avoid setting AuditSystemState::Starting when target list is empty.
Now checks target availability before state transition, keeping the
system in Stopped state if no enabled targets are found.

- Check targets.is_empty() before setting Starting state
- Return early with Ok(()) when no targets exist
- Maintain consistent state machine behavior
- Prevent transient "Starting" state with no actual targets

Resolves issue where audit system would incorrectly enter Starting
state even when configuration contained no enabled targets.
2025-11-14 20:23:21 +08:00
majinghe
3cf565e847 delete sink file path env and update readme file with container user change (#852)
* update container user change in readme file

* delete sink file path env vars

---------

Co-authored-by: houseme <housemecn@gmail.com>
2025-11-14 13:00:15 +08:00
houseme
9d553620cf remove linux dep and upgrade Protocol Buffers and FlatBuffers (#853) 2025-11-14 12:50:55 +08:00
houseme
51584986e1 feat(obs): unify metrics initialization and fix exporter move error (#851)
* feat(obs): unify metrics initialization and fix exporter move error

- Fix Rust E0382 (use after move) by removing duplicate MetricExporter consumption.
- Consolidate MeterProvider construction into single Recorder builder path.
- Remove redundant Recorder::builder(...).install_global() call.
- Ensure PeriodicReader setup is performed only once (HTTP + optional stdout).
- Set global meter provider and metrics recorder exactly once.
- Preserve existing behavior for stdout/file vs HTTP modes.
- Minor cleanup: consistent resource reuse and interval handling.

* update telemetry.rs

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* fix

* fix

* fix

* fix: modify logger level from error to event

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-11-14 00:50:07 +08:00
majinghe
93090adf7c enhance security context part for k8s deployment (#850) 2025-11-13 18:18:19 +08:00
houseme
d4817a4bea fix: modify logger from warn to info (#842)
* fix: modify logger from warn to info

* upgrade version
2025-11-12 11:13:38 +08:00
houseme
7e1a9e2ede 🔒 Upgrade Cryptography Libraries to Latest RC Versions (#837)
* fix

* chore: upgrade cryptography libraries to RC versions

- Upgrade aes-gcm to 0.11.0-rc.2 with rand_core support
- Upgrade chacha20poly1305 to 0.11.0-rc.2
- Upgrade argon2 to 0.6.0-rc.2 with std features
- Upgrade hmac to 0.13.0-rc.3
- Upgrade pbkdf2 to 0.13.0-rc.2
- Upgrade rsa to 0.10.0-rc.10
- Upgrade sha1 and sha2 to 0.11.0-rc.3
- Upgrade md-5 to 0.11.0-rc.3

These upgrades provide enhanced security features and performance
improvements while maintaining backward compatibility with existing
encryption workflows.

* add

* improve code

* fix
2025-11-11 21:10:03 +08:00
安正超
8a020ec4d9 wip (#830) 2025-11-11 09:34:58 +08:00
weisd
77a3489ed2 fix list object err (#831)
fix list object err (#831)

#827
#815
#635
#752
2025-11-10 23:42:15 +08:00
weisd
5941062909 fix (#828) 2025-11-10 19:22:58 +08:00
houseme
98be7df0f5 feat(storage): refactor audit and notification with OperationHelper (#825)
* improve code for audit

* improve code ecfs.rs

* improve code

* improve code for ecfs.rs

* feat(storage): refactor audit and notification with OperationHelper

This commit introduces a significant refactoring of the audit logging and event notification mechanisms within `ecfs.rs`.

The core of this change is the new `OperationHelper` struct, which encapsulates and simplifies the logic for both concerns. It replaces the previous `AuditHelper` and manual event dispatching.

Key improvements include:

- **Unified Handling**: `OperationHelper` manages both audit and notification builders, providing a single, consistent entry point for S3 operations.
- **RAII for Automation**: By leveraging the `Drop` trait, the helper automatically dispatches logs and notifications when it goes out of scope. This simplifies S3 method implementations and ensures cleanup even on early returns.
- **Fluent API**: A builder-like pattern with methods such as `.object()`, `.version_id()`, and `.suppress_event()` makes the code more readable and expressive.
- **Context-Aware Logic**: The helper's `.complete()` method intelligently populates log details based on the operation's `S3Result` and only triggers notifications on success.
- **Modular Design**: All helper logic is now isolated in `rustfs/src/storage/helper.rs`, improving separation of concerns and making `ecfs.rs` cleaner.

This refactoring significantly enhances code clarity, reduces boilerplate, and improves the robustness of logging and notification handling across the storage layer.

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* improve code for audit and notify

* fix

* fix

* fix
2025-11-10 17:30:50 +08:00
houseme
b26aad4129 improve code for logger (#822)
* improve code for logger

* fix
2025-11-08 22:36:24 +08:00
Alex Bykov
5989589c3e Update configuration.md (#812)
Escaping Pipe Character in the table "CLI Flags..."

Co-authored-by: loverustfs <155562731+loverustfs@users.noreply.github.com>
2025-11-08 10:56:14 +08:00
majinghe
4716454faa add non root user support for container deployment (#817) 2025-11-08 10:00:14 +08:00
houseme
29056a767a Refactor Telemetry Initialization and Environment Utilities (#811)
* improve code for metrics

* improve code for metrics

* fix

* fix

* Refactor telemetry initialization and environment functions ordering

- Reorder functions in envs.rs by type size (8-bit to 64-bit, signed before unsigned) and add missing variants like get_env_opt_u16.
- Optimize init_telemetry to support three modes: stdout logging (default error level with span tracing), file rolling logs (size-based with retention), and HTTP-based observability with sub-endpoints (trace, metric, log) falling back to unified endpoint.
- Fix stdout logging issue by retaining WorkerGuard in OtelGuard to prevent premature release of async writer threads.
- Enhance observability mode with HTTP protocol, compression, and proper resource management.
- Update OtelGuard to include tracing_guard for stdout and flexi_logger_handles for file logging.
- Improve error handling and configuration extraction in OtelConfig.

* fix

* up

* fix

* fix

* improve code for obs

* fix

* fix
2025-11-07 20:01:54 +08:00
weisd
e823922654 feat:add api error message (#801)
* feat:add api error message
* fix: check input
* fix: test
2025-11-07 09:53:49 +08:00
shiro.lee
8203f9ff6f fix: when the Object Lock configuration does not exist, an error message should be returned (#771) (#798)
fix: when the Object Lock configuration does not exist, an error message should be returned (#771) (#798)
2025-11-05 23:48:54 +08:00
houseme
1b22a1e078 Refactor modify stdout (#797)
* fix

* fix
2025-11-05 20:04:28 +08:00
weisd
461d5dff86 fix list max keys (#795) 2025-11-05 15:30:32 +08:00
houseme
38f26b7c94 improve import,crate version,and copyright (#790) 2025-11-05 09:10:06 +08:00
安正超
eb7eb9c5a1 fix: resolve logic errors in ahm heal module (#788)
* fix: resolve logic errors in ahm heal module

- Fix response publishing logic in HealChannelProcessor to properly handle errors
- Fix negative index handling in DiskStatusChange event to fail fast instead of silently converting to 0
- Enhance timeout control in heal_erasure_set Step 3 loop to immediately respond to cancellation/timeout
- Add proper error propagation for task cancellation and timeout in bucket healing loop

* fix: stabilize performance impact measurement test

- Increase measurement count from 3 to 5 runs for better stability
- Increase workload from 5000 to 10000 operations for more accurate timing
- Use median of 5 measurements instead of single measurement
- Ensure with_scanner duration is at least baseline to avoid negative overhead
- Increase wait time for scanner state stabilization

* wip

* Update crates/ahm/src/heal/channel.rs

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* refactor: remove redundant ok_or_else + expect in event.rs

Replace redundant ok_or_else() + expect() pattern with
unwrap_or_else() + panic!() to avoid creating unnecessary Error
type when the value will panic anyway. This also defers error
message formatting until the error actually occurs.

* Update crates/ahm/src/heal/task.rs

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* fix(ahm): fix logic errors and add unit tests

- Fix panic in HealEvent::to_heal_request for invalid indices
- Replace unwrap() calls with proper error handling in resume.rs
- Fix race conditions and timeout calculation in task.rs
- Fix semaphore acquisition error handling in erasure_healer.rs
- Improve error message for large objects in storage.rs
- Add comprehensive unit tests for progress, event, and channel modules
- Fix clippy warning: move test module to end of file in heal_channel.rs

* style: apply cargo fmt formatting

* refactor(ahm): address copilot review suggestions

- Add comment to check_control_flags explaining why return value is discarded
- Fix hardcoded median index in performance test using constant and dynamic calculation

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-11-05 08:15:23 +08:00
houseme
d934e3905b Refactor telemetry initialization for non-production environments (#789)
* add dep `scopeguard`

* improve for tracing

* fix

* fix

* improve code for import

* add logger trace id

* fix

* fix

* fix

* fix

* fix
2025-11-05 00:55:08 +08:00
weisd
6617372b33 fix rmdir versionid (#784) 2025-11-03 18:23:16 +08:00
weisd
769778e565 fix iam (#783) 2025-11-03 17:39:51 +08:00
houseme
a7f5c4af46 fix windows response (#781) 2025-11-03 12:49:39 +08:00
dependabot[bot]
a9d5fbac54 build(deps): bump the dependencies group with 6 updates (#777)
* build(deps): bump the dependencies group with 6 updates

Bumps the dependencies group with 6 updates:

| Package | From | To |
| --- | --- | --- |
| [axum-extra](https://github.com/tokio-rs/axum) | `0.10.3` | `0.12.0` |
| [aws-config](https://github.com/smithy-lang/smithy-rs) | `1.8.8` | `1.8.10` |
| [aws-sdk-s3](https://github.com/awslabs/aws-sdk-rust) | `1.109.0` | `1.110.0` |
| [aws-smithy-types](https://github.com/smithy-lang/smithy-rs) | `1.3.3` | `1.3.4` |
| [clap](https://github.com/clap-rs/clap) | `4.5.50` | `4.5.51` |
| [matchit](https://github.com/ibraheemdev/matchit) | `0.8.4` | `0.9.0` |


Updates `axum-extra` from 0.10.3 to 0.12.0
- [Release notes](https://github.com/tokio-rs/axum/releases)
- [Changelog](https://github.com/tokio-rs/axum/blob/main/CHANGELOG.md)
- [Commits](https://github.com/tokio-rs/axum/compare/axum-extra-v0.10.3...axum-extra-v0.12.0)

Updates `aws-config` from 1.8.8 to 1.8.10
- [Release notes](https://github.com/smithy-lang/smithy-rs/releases)
- [Changelog](https://github.com/smithy-lang/smithy-rs/blob/main/CHANGELOG.md)
- [Commits](https://github.com/smithy-lang/smithy-rs/commits)

Updates `aws-sdk-s3` from 1.109.0 to 1.110.0
- [Release notes](https://github.com/awslabs/aws-sdk-rust/releases)
- [Commits](https://github.com/awslabs/aws-sdk-rust/commits)

Updates `aws-smithy-types` from 1.3.3 to 1.3.4
- [Release notes](https://github.com/smithy-lang/smithy-rs/releases)
- [Changelog](https://github.com/smithy-lang/smithy-rs/blob/main/CHANGELOG.md)
- [Commits](https://github.com/smithy-lang/smithy-rs/commits)

Updates `clap` from 4.5.50 to 4.5.51
- [Release notes](https://github.com/clap-rs/clap/releases)
- [Changelog](https://github.com/clap-rs/clap/blob/master/CHANGELOG.md)
- [Commits](https://github.com/clap-rs/clap/compare/clap_complete-v4.5.50...clap_complete-v4.5.51)

Updates `matchit` from 0.8.4 to 0.9.0
- [Release notes](https://github.com/ibraheemdev/matchit/releases)
- [Commits](https://github.com/ibraheemdev/matchit/compare/v0.8.4...v0.9.0)

---
updated-dependencies:
- dependency-name: axum-extra
  dependency-version: 0.12.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: dependencies
- dependency-name: aws-config
  dependency-version: 1.8.10
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dependencies
- dependency-name: aws-sdk-s3
  dependency-version: 1.110.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: dependencies
- dependency-name: aws-smithy-types
  dependency-version: 1.3.4
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dependencies
- dependency-name: clap
  dependency-version: 4.5.51
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dependencies
- dependency-name: matchit
  dependency-version: 0.9.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: dependencies
...

Signed-off-by: dependabot[bot] <support@github.com>

* upgrade crates version

---------

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: houseme <housemecn@gmail.com>
2025-11-03 00:34:54 +08:00
houseme
281e68c9bf fix (#776) 2025-11-01 09:28:46 +08:00
houseme
d30c42f85a feat(admin): Add admin v3 API routes and profiling endpoints for RustFS (#774)
* add Jemalloc

* feat: optimize AI rules with unified .rules.md  (#401)

* feat: optimize AI rules with unified .rules.md and entry points

- Create .rules.md as the central AI coding rules file
- Add .copilot-rules.md as GitHub Copilot entry point
- Add CLAUDE.md as Claude AI entry point
- Incorporate principles from rustfs.com project
- Add three critical rules:
  1. Use English for all code comments and documentation
  2. Clean up temporary scripts after use
  3. Only make confident modifications

* Update CLAUDE.md

---------

Co-authored-by: Cursor Agent <cursoragent@cursor.com>

* feat: translate chinese to english (#402)

* Checkpoint before follow-up message

Co-authored-by: anzhengchao <anzhengchao@gmail.com>

* Translate project documentation and comments from Chinese to English

Co-authored-by: anzhengchao <anzhengchao@gmail.com>

* Fix typo: "unparseable" to "unparsable" in version test comment

Co-authored-by: anzhengchao <anzhengchao@gmail.com>

* Refactor compression test code with minor syntax improvements

Co-authored-by: anzhengchao <anzhengchao@gmail.com>

---------

Co-authored-by: Cursor Agent <cursoragent@cursor.com>

* fix: the automatic logout issue and user list display failure on Windows systems (#353) (#343) (#403)

Co-authored-by: 安正超 <anzhengchao@gmail.com>

* upgrade version

* improve code for profiling

* fix

* Initial plan

* feat: Implement layered DNS resolver with caching and validation

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* feat: Integrate DNS resolver into main application and fix formatting

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* feat: Implement enhanced DNS resolver with Moka cache and layered fallback

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* feat: Implement hickory-resolver with TLS support for enhanced DNS resolution

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* upgrade

* add .gitignore config

* fix

* add

* add

* up

* improve linux profiling

* fix

* fix

* fix

* feat(admin): Refactor profiling endpoints

Replaces the existing pprof profiling endpoints with new trigger-based APIs for CPU and memory profiling. This change simplifies the handler logic by moving the profiling implementation to a dedicated module.

A new handler file `admin/handlers/profile.rs` is created to contain the logic for these new endpoints. The core profiling functions are now expected to be in the `profiling` module, which the new handlers call to generate and save profile data.

* cargo shear --fix

* fix

* fix

* fix

---------

Co-authored-by: 安正超 <anzhengchao@gmail.com>
Co-authored-by: Cursor Agent <cursoragent@cursor.com>
Co-authored-by: shiro.lee <69624924+shiroleeee@users.noreply.github.com>
Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>
2025-11-01 03:16:37 +08:00
Niklas Mollenhauer
79012be2c8 Add default storage class to ListObjectsV2 (#765)
* Add InvalidRangeSpec error

* Add EntityTooSmall to from_u32

* Add InvalidRangeSpec to from_u32

* Map InvalidRangeSpec to correct S3ErrorCode

* Return Error::InvalidRangeSpec

* Use auto implementation

* Add default storage class to ListObjectsV2

Resolves #764

* Add storage_class to response

* Make storage class optional so default won't be an empty string

---------

Co-authored-by: houseme <housemecn@gmail.com>
2025-10-31 19:32:25 +08:00
loverustfs
325ff62684 Issue 762 (#763)
* Add InvalidRangeSpec error

* Add EntityTooSmall to from_u32

* Add InvalidRangeSpec to from_u32

* Map InvalidRangeSpec to correct S3ErrorCode

* Return Error::InvalidRangeSpec

* Use auto implementation

---------

Co-authored-by: Niklas Mollenhauer <nikeee@outlook.com>
2025-10-31 17:20:18 +08:00
安正超
f0c2ede7a7 Remove unnecessary tools folder in CI workflow (#770) 2025-10-31 16:44:08 +08:00
安正超
b9fd66c1cd Delete deploy/build/rustfs.run-zh.md (#757) 2025-10-30 13:56:26 +08:00
安正超
c43b11fb92 Delete deploy/build/rustfs-zh.service (#756) 2025-10-30 13:55:51 +08:00
安正超
d737a439d5 Delete deploy/config/rustfs-zh.env (#755) 2025-10-30 13:54:53 +08:00
houseme
0714c7a9ca modify logger level from info to error (#744)
* modify logger level from `info` to `error`

* fix test

* improve tokio runtime config

* add rustfs helm chart files (#747)

* add rustfs helm chart files

* update readme file with helm chart

* delete helm chart license file

* fix typo in readme file

* fix: restore localized samples in tests (#749)

* fix: restore required localized examples

* style: fix formatting issues

* improve code for Observability

* upgrade crates version

* fix

* up

* fix

---------

Co-authored-by: majinghe <42570491+majinghe@users.noreply.github.com>
Co-authored-by: 安正超 <anzhengchao@gmail.com>
2025-10-29 19:20:53 +08:00
loverustfs
2ceb65adb4 replace rustfs pic 2025-10-29 15:50:18 +08:00
安正超
dd47fcf2a8 fix: restore localized samples in tests (#749)
* fix: restore required localized examples

* style: fix formatting issues
2025-10-29 13:16:31 +08:00
majinghe
64ba52bc1e add rustfs helm chart files (#747)
* add rustfs helm chart files

* update readme file with helm chart

* delete helm chart license file

* fix typo in readme file
2025-10-29 12:23:21 +08:00
shiro.lee
d2ced233e5 fix: when the error returned by make_bucket is BucketExists, replace … (#735)
* fix: when the error returned by make_bucket is BucketExists, replace BucketAlreadyExists with BucketAlreadyOwnedByYou (#719)

* test: In the test_api_error_from_storage_error_mappings test method, modify the corresponding mapping relationships

---------

Co-authored-by: weisd <im@weisd.in>
2025-10-28 15:26:34 +08:00
weisd
40660e7b80 fix: scandir object (#733)
* fix: scandir object count

* fix: base64 list continuation_token
2025-10-28 15:02:43 +08:00
likewu
2aca1f77af Fix/ilm (#721)
* fix tip remote tier error
* fix transitioned_object
* fix filemeta
* add GCS R2
* add aliyun tencent huaweicloud azure gcs r2 backend tier
* fix signer
* change azure to s3
Co-authored-by: houseme <housemecn@gmail.com>
Co-authored-by: loverustfs <155562731+loverustfs@users.noreply.github.com>
2025-10-27 20:23:50 +08:00
Ben Scholzen
6f3d2885cd fix: take content type from PutObjectInput instead of headers (#718)
fixes #716

Co-authored-by: loverustfs <155562731+loverustfs@users.noreply.github.com>
2025-10-26 21:44:54 +08:00
shiro.lee
6ab7619023 fix: The issue of multi-level objects created in Windows not being displayed has been fixed (#661) (#723) 2025-10-26 12:00:13 +08:00
weisd
ed73e2b782 fix:add favicon.ico route (#713) 2025-10-25 16:11:18 +08:00
weisd
6a59c0a474 fix: multipart upload checksum validation (#712)
* fix multipart upload checksum
2025-10-24 18:23:32 +08:00
houseme
c5264f9703 improve code for metrics and switch tokio-tar to astral-tokio-tar (#705)
* improve code for metrics and switch tokio-tar to astral-tokio-tar

* remove log

* fix
2025-10-24 13:07:56 +08:00
DamonXue
b47765b4c0 docs: add Star History section to README files (#696)
Co-authored-by: 0xdx2 <xuedamon2@gmail.com>
Co-authored-by: loverustfs <155562731+loverustfs@users.noreply.github.com>
2025-10-24 08:58:58 +08:00
houseme
e22b24684f chore: bump dependencies, add metrics support, remove DNS resolver (#699)
* upgrade version

* add metrics

* remove dns resolver

* add metrics counter for create bucket

* fix

* fix

* fix
2025-10-24 00:16:17 +08:00
weisd
1d069fd351 Improve the peer client (#693) 2025-10-23 17:21:55 +08:00
houseme
416d3ad5b7 Refactor: Add observability enable flag, improve comments, remove unused config params, and enhance run function error logging. (#689)
* improve code for dns log

* fix

* Improve comments, remove unused parameters in config.rs (opt), add observability enable flag, and enhance error logging in run function execution.
2025-10-23 13:59:57 +08:00
weisd
f30698ec7f Refactor Console Server Architecture (#685)
* todo

* fix console server

* fix console server

* fix console server

* fix console server

* fix console server
2025-10-23 00:06:09 +08:00
houseme
7dcf01f127 feat: adjust metrics push interval to 3 seconds (#686)
- Reduce metrics push frequency from default to 3s for better performance
- Optimize resource utilization during metrics collection
- Improve real-time monitoring responsiveness

Related to admin metrics optimization on fix/admin-metrics branch
2025-10-22 23:47:11 +08:00
weisd
e524a106c5 add make bucket error logs (#683)
* add make bucket error logs
2025-10-22 16:23:08 +08:00
weisd
d9e5f5d2e3 fix (#682) 2025-10-22 10:35:40 +08:00
livelycode36
684e832530 fix: prevent duplicate data volumes in entrypoint.sh (#681) 2025-10-22 09:04:04 +08:00
weisd
a65856bdf4 Fix CRC32C Checksum Implementation and Enhance Authentication System (#678)
* fix: get_condition_values

* fix checksum crc32c

* fix clippy
2025-10-21 21:28:00 +08:00
weisd
2edb2929b2 fix: DataUsageInfo add list bucket permission (#674) 2025-10-21 10:05:54 +08:00
majinghe
14bc55479b fix docker healthcheck unhealthy issue (#672) 2025-10-21 09:39:15 +08:00
weisd
cd1e244c68 Refactor: Introduce content checksums and improve multipart/object metadata handling (#671)
* feat:  adapt to s3s typed etag support

* refactor: move replication struct to rustfs_filemeta, fix filemeta transition bug

* add head_object checksum, filter object metadata output

* fix multipart checksum

* fix multipart checksum

* add content md5,sha256 check

* fix test

* fix cargo

---------

Co-authored-by: overtrue <anzhengchao@gmail.com>
2025-10-20 23:46:13 +08:00
songhahaha66
46797dc815 fix(export): fix the policy and service account export (#665)
* fix(export): fix the policy export mechanism

* fix: correct service account check logic in IamSys
2025-10-20 19:40:54 +08:00
Nugine
7f24dbda19 build(deps): upgrade s3s (#667)
Co-authored-by: loverustfs <155562731+loverustfs@users.noreply.github.com>
2025-10-19 18:32:01 +08:00
loverustfs
ef11d3a2eb fix words error 2025-10-19 18:13:58 +08:00
loverustfs
d1398cb3ab fix error 2025-10-19 18:10:45 +08:00
majinghe
95019c4cb5 add ansible installation with mnmd (#664)
* add ansible installation with mnmd

* change script install dir name
2025-10-18 22:20:17 +08:00
houseme
4168e6c180 chore(docs): move root examples to docs/examples/docker and update README (#663)
* chore(docs): move root `examples` to `docs/examples/docker` and update README

- Move root `examples/` contents into `docs/examples/docker/`.
- Update `docs/examples/README.md` to add migration note, new `docker/` entry and usage examples.
- Replace references from `examples/` to `docs/examples/docker/` where applicable.
- Reminder: verify CI and external links still point to the correct paths.

* fix
2025-10-17 17:17:36 +08:00
houseme
42d3645d6f fix(targets): make target removal and reload transactional; prevent reappearing entries (#662)
* feat: improve code for notify

* upgrade starshard version

* upgrade version

* Fix ETag format to comply with HTTP standards by wrapping with quotes (#592)

* Initial plan

* Fix ETag format to comply with HTTP standards by wrapping with quotes

Co-authored-by: overtrue <1472352+overtrue@users.noreply.github.com>

* bufigx

---------

Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: overtrue <1472352+overtrue@users.noreply.github.com>
Co-authored-by: overtrue <anzhengchao@gmail.com>

* Improve lock (#596)

* improve lock

Signed-off-by: Mu junxiang <1948535941@qq.com>

* feat(tests): add wait_for_object_absence helper and improve lifecycle test reliability

Signed-off-by: Mu junxiang <1948535941@qq.com>

* chore: remove dirty docs

Signed-off-by: Mu junxiang <1948535941@qq.com>

---------

Signed-off-by: Mu junxiang <1948535941@qq.com>

* feat(append): implement object append operations with state tracking (#599)

* feat(append): implement object append operations with state tracking

Signed-off-by: junxiang Mu <1948535941@qq.com>

* chore: rebase

Signed-off-by: junxiang Mu <1948535941@qq.com>

---------

Signed-off-by: junxiang Mu <1948535941@qq.com>

* build(deps): upgrade s3s (#595)

Co-authored-by: loverustfs <155562731+loverustfs@users.noreply.github.com>

* fix: validate mqtt broker

* improve code for `import`

* fix

* improve

* remove logger from `rustfs-obs` crate

* remove code for config Observability

* fix

* improve code

* fix comment

* up

* up

* upgrade version

* fix

* fmt

* upgrade tokio version to 1.48.0

* upgrade `datafusion` and `reed-solomon-simd` version

* fix

* fmt

* improve code for notify webhook example

* improve code

* fix

* fix

* fmt

---------

Signed-off-by: Mu junxiang <1948535941@qq.com>
Signed-off-by: junxiang Mu <1948535941@qq.com>
Co-authored-by: Copilot <198982749+Copilot@users.noreply.github.com>
Co-authored-by: overtrue <1472352+overtrue@users.noreply.github.com>
Co-authored-by: overtrue <anzhengchao@gmail.com>
Co-authored-by: guojidan <63799833+guojidan@users.noreply.github.com>
Co-authored-by: Nugine <nugine@foxmail.com>
Co-authored-by: loverustfs <155562731+loverustfs@users.noreply.github.com>
2025-10-17 15:34:53 +08:00
安正超
30e7f00b02 fix: update ahm integration test fixture (#659) 2025-10-17 09:13:56 +08:00
overtrue
58f8a8f46b fix: correct HTTP range suffix handling 2025-10-16 21:39:21 +08:00
gatewayJ
aae768f446 feat: Simple OPA support (#644)
* opa-feature

* Update crates/policy/src/policy/opa.rs

* add the content related to 'Copyright'

---------

Co-authored-by: root <root@debian.localdomain>
Co-authored-by: houseme <housemecn@gmail.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-10-16 10:35:26 +08:00
安正超
d447b3e426 feat: adapt to s3s typed etag support (#653)
* feat:  adapt to s3s typed etag support

* refactor: streamline etag handling
2025-10-15 21:27:20 +08:00
安正超
8f310cd4a8 test: allow mocking dns resolver (#656) 2025-10-15 21:24:03 +08:00
majinghe
8ed01a3e06 Refactor mnmd docker compose for extendence (#652) 2025-10-15 03:48:05 +08:00
loverustfs
9e1739ed8d chore(docs): update README and README_ZH (#649) 2025-10-13 18:49:34 +08:00
loverustfs
7abbfc9c2c RustFS trending images
RustFS trending
2025-10-13 17:45:54 +08:00
安正超
639bf0c233 Revert "feat(append): implement object append operations with state tracking (#599)" (#646)
This reverts commit 4f73760a45.
2025-10-12 23:47:51 +08:00
Copilot
ad99019749 Add complete MNMD Docker deployment example with startup coordination and VolumeNotFound fix (#642)
* Initial plan

* Add MNMD Docker deployment example with 4 nodes x 4 drives

- Create docs/examples/mnmd/ directory structure
- Add docker-compose.yml with proper disk indexing (1..4)
- Add wait-and-start.sh for startup coordination
- Add README.md with usage instructions and alternatives
- Add CHECKLIST.md with step-by-step verification
- Fixes VolumeNotFound issue by using correct volume paths
- Implements health checks and startup ordering
- Uses service names for stable inter-node addressing

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* Add docs/examples README as index for deployment examples

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* Add automated test script for MNMD deployment

- Add test-deployment.sh with comprehensive validation
- Test container status, health, endpoints, connectivity
- Update README to reference test script
- Make script executable

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* improve code

* improve code

* improve dep crates `cargo shear --fix`

* upgrade aws-sdk-s3

---------

Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>
Co-authored-by: houseme <housemecn@gmail.com>
2025-10-12 13:15:14 +08:00
houseme
aac9b1edb7 chore: improve event and docker-compose ,Improve the permissions of the endpoint health interface, upgrade otel from 0.30.0 to 0.31.0 (#620)
* feat: improve code for notify

* upgrade starshard version

* upgrade version

* Fix ETag format to comply with HTTP standards by wrapping with quotes (#592)

* Initial plan

* Fix ETag format to comply with HTTP standards by wrapping with quotes

Co-authored-by: overtrue <1472352+overtrue@users.noreply.github.com>

* bufigx

---------

Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: overtrue <1472352+overtrue@users.noreply.github.com>
Co-authored-by: overtrue <anzhengchao@gmail.com>

* Improve lock (#596)

* improve lock

Signed-off-by: Mu junxiang <1948535941@qq.com>

* feat(tests): add wait_for_object_absence helper and improve lifecycle test reliability

Signed-off-by: Mu junxiang <1948535941@qq.com>

* chore: remove dirty docs

Signed-off-by: Mu junxiang <1948535941@qq.com>

---------

Signed-off-by: Mu junxiang <1948535941@qq.com>

* feat(append): implement object append operations with state tracking (#599)

* feat(append): implement object append operations with state tracking

Signed-off-by: junxiang Mu <1948535941@qq.com>

* chore: rebase

Signed-off-by: junxiang Mu <1948535941@qq.com>

---------

Signed-off-by: junxiang Mu <1948535941@qq.com>

* build(deps): upgrade s3s (#595)

Co-authored-by: loverustfs <155562731+loverustfs@users.noreply.github.com>

* fix: validate mqtt broker

* improve code for `import`

* upgrade otel relation crates version

* fix:dep("jsonwebtoken") feature = 'rust_crypto'

* fix

* fix

* fix

* upgrade version

* improve code for ecfs

* chore: improve event and docker-compose ,Improve the permissions of the `endpoint` health interface

* fix

* fix

* fix

* fix

* improve code

* fix

---------

Signed-off-by: Mu junxiang <1948535941@qq.com>
Signed-off-by: junxiang Mu <1948535941@qq.com>
Co-authored-by: Copilot <198982749+Copilot@users.noreply.github.com>
Co-authored-by: overtrue <1472352+overtrue@users.noreply.github.com>
Co-authored-by: overtrue <anzhengchao@gmail.com>
Co-authored-by: guojidan <63799833+guojidan@users.noreply.github.com>
Co-authored-by: Nugine <nugine@foxmail.com>
Co-authored-by: loverustfs <155562731+loverustfs@users.noreply.github.com>
2025-10-11 09:08:25 +08:00
weisd
5689311cff fix:#630 (#633) 2025-10-10 15:16:28 +08:00
安正超
007d9c0b21 fix: normalize ETag comparison in multipart upload and replication (#627)
- Normalize ETags by removing quotes before comparison in complete_multipart_upload
- Fix ETag comparison in replication logic to handle quoted ETags from API responses
- Fix ETag comparison in transition object logic
- Add unit tests for trim_etag function

This fixes the ETag mismatch error when uploading large files (5GB+) via multipart upload,
which was caused by PR #592 adding quotes to ETag responses while internal storage remains unquoted.

Fixes #625
2025-10-08 21:19:57 +08:00
Nugine
626c7ed34a fix: CompleteMultipartUpload encryption (#626) 2025-10-08 20:27:40 +08:00
houseme
0e680eae31 fix typos and bump the dependencies group with 9 updates (#614)
* fix typos

* build(deps): bump the dependencies group with 9 updates (#613)

Bumps the dependencies group with 9 updates:

| Package | From | To |
| --- | --- | --- |
| [axum](https://github.com/tokio-rs/axum) | `0.8.4` | `0.8.6` |
| [axum-extra](https://github.com/tokio-rs/axum) | `0.10.1` | `0.10.3` |
| [regex](https://github.com/rust-lang/regex) | `1.11.2` | `1.11.3` |
| [serde](https://github.com/serde-rs/serde) | `1.0.226` | `1.0.228` |
| [shadow-rs](https://github.com/baoyachi/shadow-rs) | `1.3.0` | `1.4.0` |
| [sysinfo](https://github.com/GuillaumeGomez/sysinfo) | `0.37.0` | `0.37.1` |
| [thiserror](https://github.com/dtolnay/thiserror) | `2.0.16` | `2.0.17` |
| [tokio-rustls](https://github.com/rustls/tokio-rustls) | `0.26.3` | `0.26.4` |
| [zeroize](https://github.com/RustCrypto/utils) | `1.8.1` | `1.8.2` |


Updates `axum` from 0.8.4 to 0.8.6
- [Release notes](https://github.com/tokio-rs/axum/releases)
- [Changelog](https://github.com/tokio-rs/axum/blob/main/CHANGELOG.md)
- [Commits](https://github.com/tokio-rs/axum/compare/axum-v0.8.4...axum-v0.8.6)

Updates `axum-extra` from 0.10.1 to 0.10.3
- [Release notes](https://github.com/tokio-rs/axum/releases)
- [Changelog](https://github.com/tokio-rs/axum/blob/main/CHANGELOG.md)
- [Commits](https://github.com/tokio-rs/axum/compare/axum-extra-v0.10.1...axum-extra-v0.10.3)

Updates `regex` from 1.11.2 to 1.11.3
- [Release notes](https://github.com/rust-lang/regex/releases)
- [Changelog](https://github.com/rust-lang/regex/blob/master/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/regex/compare/1.11.2...1.11.3)

Updates `serde` from 1.0.226 to 1.0.228
- [Release notes](https://github.com/serde-rs/serde/releases)
- [Commits](https://github.com/serde-rs/serde/compare/v1.0.226...v1.0.228)

Updates `shadow-rs` from 1.3.0 to 1.4.0
- [Release notes](https://github.com/baoyachi/shadow-rs/releases)
- [Commits](https://github.com/baoyachi/shadow-rs/compare/1.3.0...v1.4.0)

Updates `sysinfo` from 0.37.0 to 0.37.1
- [Changelog](https://github.com/GuillaumeGomez/sysinfo/blob/master/CHANGELOG.md)
- [Commits](https://github.com/GuillaumeGomez/sysinfo/compare/v0.37.0...v0.37.1)

Updates `thiserror` from 2.0.16 to 2.0.17
- [Release notes](https://github.com/dtolnay/thiserror/releases)
- [Commits](https://github.com/dtolnay/thiserror/compare/2.0.16...2.0.17)

Updates `tokio-rustls` from 0.26.3 to 0.26.4
- [Release notes](https://github.com/rustls/tokio-rustls/releases)
- [Commits](https://github.com/rustls/tokio-rustls/compare/v/0.26.3...v/0.26.4)

Updates `zeroize` from 1.8.1 to 1.8.2
- [Commits](https://github.com/RustCrypto/utils/compare/zeroize-v1.8.1...zeroize-v1.8.2)

---
updated-dependencies:
- dependency-name: axum
  dependency-version: 0.8.6
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dependencies
- dependency-name: axum-extra
  dependency-version: 0.10.3
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dependencies
- dependency-name: regex
  dependency-version: 1.11.3
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dependencies
- dependency-name: serde
  dependency-version: 1.0.228
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dependencies
- dependency-name: shadow-rs
  dependency-version: 1.4.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: dependencies
- dependency-name: sysinfo
  dependency-version: 0.37.1
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dependencies
- dependency-name: thiserror
  dependency-version: 2.0.17
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dependencies
- dependency-name: tokio-rustls
  dependency-version: 0.26.4
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dependencies
- dependency-name: zeroize
  dependency-version: 1.8.2
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dependencies
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>

---------

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-10-02 23:29:18 +08:00
weisd
7622b37f7b add iam notification (#604)
move tonic service to rustfs
2025-09-30 17:32:23 +08:00
Nugine
f1dd3a982e build(deps): upgrade s3s (#595)
Co-authored-by: loverustfs <155562731+loverustfs@users.noreply.github.com>
2025-09-28 21:10:42 +08:00
guojidan
4f73760a45 feat(append): implement object append operations with state tracking (#599)
* feat(append): implement object append operations with state tracking

Signed-off-by: junxiang Mu <1948535941@qq.com>

* chore: rebase

Signed-off-by: junxiang Mu <1948535941@qq.com>

---------

Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-09-27 20:06:26 -07:00
guojidan
be66cf8bd3 Improve lock (#596)
* improve lock

Signed-off-by: Mu junxiang <1948535941@qq.com>

* feat(tests): add wait_for_object_absence helper and improve lifecycle test reliability

Signed-off-by: Mu junxiang <1948535941@qq.com>

* chore: remove dirty docs

Signed-off-by: Mu junxiang <1948535941@qq.com>

---------

Signed-off-by: Mu junxiang <1948535941@qq.com>
2025-09-27 17:57:56 -07:00
Copilot
23b40d398f Fix ETag format to comply with HTTP standards by wrapping with quotes (#592)
* Initial plan

* Fix ETag format to comply with HTTP standards by wrapping with quotes

Co-authored-by: overtrue <1472352+overtrue@users.noreply.github.com>

* bufigx

---------

Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: overtrue <1472352+overtrue@users.noreply.github.com>
Co-authored-by: overtrue <anzhengchao@gmail.com>
2025-09-27 10:03:05 +08:00
weisd
90f21a9102 refactor: Reimplement bucket replication system with enhanced architecture (#590)
* feat:refactor replication

* use aws sdk for replication client

* refactor/replication

* merge main

* fix lifecycle test
2025-09-26 14:27:53 +08:00
guojidan
9b029d18b2 feat(lock): enhance lock management with timeout and ownership tracking (#589)
- Add lock timeout support and track acquisition time in lock state
- Improve lock conflict handling with detailed error messages
- Optimize lock reuse when already held by same owner
- Refactor lock state to store owner info and timeout duration
- Update all lock operations to handle new state structure

Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-09-25 20:21:53 -07:00
houseme
9b7f4d477a Fix Tokio Runtime Initialization: Remove Private API Usage and Ensure IO Enabled (#587)
* fix: remove code

* improve code for tokio runtime config

* improve code for main

* fix: add tokio enable_all

* upgrade version

* improve for Cargo.toml
2025-09-24 22:23:31 +08:00
guojidan
12ecb36c6d Fix collect (#586)
* fix: fix datausageinfo

Signed-off-by: junxiang Mu <1948535941@qq.com>

* feat(data-usage): implement local disk snapshot aggregation for data usage statistics

Signed-off-by: junxiang Mu <1948535941@qq.com>

* feat(scanner): improve data usage collection with local scan aggregation

Signed-off-by: junxiang Mu <1948535941@qq.com>

* refactor: improve object existence check and code style

Signed-off-by: junxiang Mu <1948535941@qq.com>

---------

Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-09-24 02:48:23 -07:00
guojidan
ef0dbaaeb5 feat(encryption): add managed encryption support for SSE-S3 and SSE-KMS (#583)
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-09-24 02:09:04 -07:00
Copilot
29b0935be7 RustFS rustfs-audit Complete Implementation with Enterprise Observability (#557)
* Initial plan

* Implement core audit system with multi-target fan-out and configuration management

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* Changes before error encountered

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* Complete audit system with comprehensive observability and test coverage

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* improve code

* fix

* improve code

* fix test

* fix test

* fix

* add `rustfs-audit` to `rustfs`

* upgrade crate version

* fmt

* fmt

* fix

---------

Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>
Co-authored-by: houseme <housemecn@gmail.com>
2025-09-24 08:23:46 +08:00
安正超
08aeca89ef feat: Allow alpha versions to create latest Docker tag (#577)
Co-authored-by: Cursor Agent <cursoragent@cursor.com>
2025-09-23 19:39:00 +08:00
gatewayJ
d39ce6d8e9 fix: correct DeleteObjectVersionAction (#574)
Co-authored-by: loverustfs <155562731+loverustfs@users.noreply.github.com>
2025-09-23 09:49:41 +08:00
guojidan
9ddf6a011d feature: support kms && encryt (#573)
* feat(kms): implement key management service with local and vault backends

Signed-off-by: junxiang Mu <1948535941@qq.com>

* feat(kms): enhance security with zeroize for sensitive data and improve key management

Signed-off-by: junxiang Mu <1948535941@qq.com>

* remove Hashi word

Signed-off-by: junxiang Mu <1948535941@qq.com>

* refactor: remove unused request structs from kms handlers

Signed-off-by: junxiang Mu <1948535941@qq.com>

---------

Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-09-22 17:53:05 +08:00
houseme
f7e188eee7 feat: upgrade datafusion to v50.0.0 and update related dependencies f… (#563)
* feat: upgrade datafusion to v50.0.0 and update related dependencies for compatibility

* fix

* fmt
2025-09-18 23:30:25 +08:00
houseme
4b9cb512f2 remove crate rustfs-audit-logger (#562) 2025-09-18 17:46:46 +08:00
Copilot
e5f0760009 Fix entrypoint.sh incorrectly passing logs directory as data volume with improved separation (#561)
* Initial plan

* Fix entrypoint.sh: separate log directory from data volumes

Co-authored-by: overtrue <1472352+overtrue@users.noreply.github.com>

* Improve separation: use functions and RUSTFS_OBS_LOG_DIRECTORY env var

Co-authored-by: overtrue <1472352+overtrue@users.noreply.github.com>

---------

Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: overtrue <1472352+overtrue@users.noreply.github.com>
2025-09-18 17:05:14 +08:00
houseme
a6c211f4ea Feature/add dns logs (#558)
* add logs

* improve code for dns and logger
2025-09-18 12:00:43 +08:00
shiro.lee
f049c656d9 fix: list_objects does not return common_prefixes field. (#543) (#554)
Co-authored-by: loverustfs <155562731+loverustfs@users.noreply.github.com>
2025-09-18 07:27:37 +08:00
majinghe
65dd947350 add tls support for docker compose (#553)
* add tls support for docker compose

* update docker compose file with comment
2025-09-17 22:45:23 +08:00
0xdx2
57f082ee2b fix: enforce max-keys limit to 1000 in S3 implementation (#549)
Co-authored-by: damon <damonxue2@gmail.com>
2025-09-16 18:02:24 +08:00
weisd
ae7e86d7ef refactor: simplify initialization flow and modernize string formatting (#548) 2025-09-16 15:44:50 +08:00
houseme
a12a3bedc3 feat(obs): optimize WriteMode selection logic in init_telemetry (#546)
- Refactor WriteMode selection to ensure all variables moved into thread closures are owned types, preventing lifetime issues.
- Simplify and clarify WriteMode assignment for production and non-production environments.
- Improve code readability and maintainability for logger initialization.
2025-09-16 08:25:37 +08:00
Copilot
cafec06b7e [Optimization] Enhance obs module telemetry.rs with environment-aware logging and production security (#539)
* Initial plan

* Implement environment-aware logging with production stdout auto-disable

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* add mimalloc crate

* fix

* improve code

---------

Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>
Co-authored-by: houseme <housemecn@gmail.com>
Co-authored-by: loverustfs <155562731+loverustfs@users.noreply.github.com>
2025-09-15 14:52:20 +08:00
Parm Gill
1770679e66 Adding a toggle for update check (#532) 2025-09-14 22:26:48 +08:00
jon
a4fbf596e6 add startup logo (#528)
* add startup logo

* Replace logo ASCII art in main.rs

---------

Co-authored-by: houseme <housemecn@gmail.com>
Co-authored-by: 安正超 <anzhengchao@gmail.com>
2025-09-14 12:04:00 +08:00
houseme
3f717292bf feat(console): support accessing console endpoint via port 9000 (#533)
* fix(main): update dns_init startup logic and remove unused code in http.rs

- Refactored the DNS resolver initialization logic in `main.rs` for improved startup reliability and error handling.
- Removed unused code from `http.rs` to keep the codebase clean and maintainable.

* feat(console): support accessing console endpoint via port 9000

- Added compatibility to allow console access through port 9000.
- Improved endpoint detection and routing for console service on standard and custom ports.
- Enhanced user experience for environments using port 9000 as the default access point.
2025-09-14 01:14:14 +08:00
houseme
73f0ecbf8f fix(main): update dns_init startup logic and remove unused code in http.rs (#531)
- Refactored the DNS resolver initialization logic in `main.rs` for improved startup reliability and error handling.
- Removed unused code from `http.rs` to keep the codebase clean and maintainable.
2025-09-13 23:43:25 +08:00
houseme
0c3079ae5e remove deps (#529) 2025-09-13 21:06:40 +08:00
majinghe
ebf30b0db5 update docker compose usage guidence in READEME file (#526) 2025-09-13 16:24:36 +08:00
Copilot
29c004d935 feat: enhance console separation with enterprise-grade security, monitoring, and advanced tower-http integration (#513)
* Initial plan

* feat: implement console service separation from endpoint

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* feat: add console separation documentation and tests

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* feat: enhance console separation with configurable CORS and improved Docker support

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* feat: implement enhanced console separation with security hardening and monitoring

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* refactor: implement console TLS following endpoint logic and improve configuration

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* add tower-http feature "timeout|limit"

* add dependencies crates `axum-server`

* refactor: reconstruct console server with enhanced tower-http features and environment variables

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* upgrade dep

* improve code for dns and console port `:9001`

* improve code

* fix

* docs: comprehensive improvement of console separation documentation and Docker deployment standards

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* fmt

* add logs

* improve code for Config handler

* remove logs

* fix

---------

Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>
Co-authored-by: houseme <housemecn@gmail.com>
2025-09-13 14:48:14 +08:00
majinghe
4595bf7db6 fix docker compose running with no such file error (#519)
* fix docker compose running with no such file error

* fix observability docker compose
2025-09-13 13:04:06 +08:00
guojidan
f372ccf4a8 disable pprof on win (#524)
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-09-12 18:43:45 +08:00
guojidan
9ce867f585 feat(lock): Optimize lock management performance in high-concurrency scenarios (#523)
Increase the size of the notification pool to reduce the thundering herd effect under high concurrency
Implement an adaptive timeout mechanism that dynamically adjusts based on system load and priority
Add a lock protection mechanism to prevent premature cleanup of active locks
Add lock acquisition methods for high-priority and critical-priority locks
Improve the cleanup strategy to be more conservative under high load
Add detailed debug logs to assist in diagnosing lock issues

Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-09-12 18:17:07 +08:00
guojidan
124c31a68b refactor(profiling): Remove performance profiling support for Windows and optimize dependency management (#518)
Remove the pprof performance profiling functionality on the Windows platform, as this platform does not support the relevant features
Move the pprof dependency to the platform-specific configuration for non-Windows systems
Update the performance profiling endpoint handling logic to distinguish between platform support statuses
Add the CLAUDE.md document to explain project build and architecture information

Signed-off-by: RustFS Developer <dandan@rustfs.com>
Co-authored-by: RustFS Developer <dandan@rustfs.com>
2025-09-12 09:11:44 +08:00
guojidan
62a01f3801 Performance: improve (#514)
* Performance: improve

Signed-off-by: junxiang Mu <1948535941@qq.com>

* remove dirty

Signed-off-by: junxiang Mu <1948535941@qq.com>

* fix some err

Signed-off-by: junxiang Mu <1948535941@qq.com>

---------

Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-09-11 19:48:28 +08:00
weisd
70e6bec2a4 feat:admin auth (#512)
* feat:admin auth

* fix:#509
2025-09-11 16:49:07 +08:00
guojidan
cf863ba059 feat(lock): Add support for disabling lock manager (#511)
* feat(lock): Add support for disabling lock manager
Implement control of lock system activation and deactivation via environment variables
Add DisabledLockManager for lock-free operation scenarios
Introduce LockManager trait to uniformly manage different lock managers

Signed-off-by: junxiang Mu <1948535941@qq.com>

* refactor(lock): Optimize implementation of global lock manager and parsing of boolean environment variables
Refactor the implementation of the global lock manager: wrap FastObjectLockManager with Arc and add the as_fast_lock_manager method
Extract the boolean environment variable parsing logic into an independent function parse_bool_env_var

Signed-off-by: junxiang Mu <1948535941@qq.com>

---------

Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-09-11 13:46:06 +08:00
guojidan
d4beb1cc0b Fix lock (#510)
* Refactor: reimplement lock

Signed-off-by: junxiang Mu <1948535941@qq.com>

* Fix: fix test case failed

Signed-off-by: junxiang Mu <1948535941@qq.com>

* Improve: lock pref

Signed-off-by: junxiang Mu <1948535941@qq.com>

* fix(lock): Fix resource cleanup issue when batch lock acquisition fails
Ensure that the locks already acquired are properly released when batch lock acquisition fails to avoid memory leaks
Improve the lock protection mechanism to prevent double release issues
Add complete Apache license declarations to all files

Signed-off-by: junxiang Mu <1948535941@qq.com>

---------

Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-09-11 12:10:35 +08:00
0xdx2
971e74281c fix:Fix some errors tested in mint (#507)
* refactor: replace new_object_layer_fn with get_validated_store for bucket validation

* feat: add validation for object tagging limits and uniqueness

* Apply suggestion from @Copilot

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Apply suggestion from @Copilot

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* feat: add EntityTooSmall error for multipart uploads and update error handling

* feat: validate max_parts input range for S3 multipart uploads

* Update rustfs/src/storage/ecfs.rs

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* fix: optimize tag key and value length validation checks

---------

Co-authored-by: damon <damonxue2@gmail.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-09-10 22:22:29 +08:00
Copilot
ca9a2b6ab9 feat: Implement enhanced DNS resolver with hickory-resolver, TLS support, and layered fallback for Kubernetes environments (#505)
* Initial plan

* feat: Implement layered DNS resolver with caching and validation

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* feat: Integrate DNS resolver into main application and fix formatting

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* feat: Implement enhanced DNS resolver with Moka cache and layered fallback

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* feat: Implement hickory-resolver with TLS support for enhanced DNS resolution

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* upgrade

---------

Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>
Co-authored-by: houseme <housemecn@gmail.com>
2025-09-10 21:16:33 +08:00
houseme
4e00110bfe add bucket notification configuration (#502) 2025-09-10 00:56:27 +08:00
安正超
9c97524c3b feat: consolidate AI rules into unified AGENTS.md (#501)
- Merge all AI rules from .rules.md, .cursorrules, and CLAUDE.md into AGENTS.md
- Add competitor keyword prohibition rules (minio, ceph, swift, etc.)
- Simplify rules by removing overly detailed code examples
- Integrate new development principles as highest priority
- Remove old tool-specific rule files
- Fix clippy warnings for format string improvements
2025-09-09 21:36:34 +08:00
guojidan
14a8802ce7 Fix: fix collect usage data (#500)
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-09-09 18:39:51 +08:00
guojidan
9d5ed1acac Feature/scanner performance optimization (#498)
* Refactor: reimplement scanner

Signed-off-by: RustFS Developer <dandan@rustfs.com>

* comment lock

Signed-off-by: junxiang Mu <1948535941@qq.com>

* remove dirty file

Signed-off-by: junxiang Mu <1948535941@qq.com>

* Fix: fix rebase

* fix(scanner): Improve error handling and logging

Signed-off-by: junxiang Mu <1948535941@qq.com>

---------

Signed-off-by: RustFS Developer <dandan@rustfs.com>
Signed-off-by: junxiang Mu <1948535941@qq.com>
Co-authored-by: RustFS Developer <dandan@rustfs.com>
2025-09-08 18:35:45 +08:00
0xdx2
44f3eb7244 Fix: add support for additional AWS S3 storage classes and validation logic (#487)
* Fix: add pagination fields to S3 response

* Fix: add support for additional AWS S3 storage classes and validation logic

* Fix: improve handling of optional fields in S3 response

---------

Co-authored-by: DamonXue <damonxue2@gmail.com>
2025-09-05 09:50:41 +08:00
weisd
01b2623f66 Fix/response (#485)
* fix:list_parts response

* fix:list_objects skip delete_marker
2025-09-03 17:52:31 +08:00
dependabot[bot]
cf4d63795f build(deps): bump crc-fast from 1.4.0 to 1.5.0 in the dependencies group (#481)
Bumps the dependencies group with 1 update: [crc-fast](https://github.com/awesomized/crc-fast-rust).


Updates `crc-fast` from 1.4.0 to 1.5.0
- [Release notes](https://github.com/awesomized/crc-fast-rust/releases)
- [Changelog](https://github.com/awesomized/crc-fast-rust/blob/main/CHANGELOG.md)
- [Commits](https://github.com/awesomized/crc-fast-rust/compare/1.4.0...1.5.0)

---
updated-dependencies:
- dependency-name: crc-fast
  dependency-version: 1.5.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: dependencies
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: weisd <im@weisd.in>
2025-09-03 17:30:08 +08:00
WenTao
0efc818635 Fix Windows path separator issue using PathBuf (#482)
* Update mod.rs

The following code uses a separator that is not compatible with Windows:

format!("{}/{}", file_config.path.clone(), rustfs_config::DEFAULT_SINK_FILE_LOG_FILE)


Change it to the following code:


std::path::Path::new(&file_config.path)
    .join(rustfs_config::DEFAULT_SINK_FILE_LOG_FILE)
    .to_string_lossy()
    .to_string()

* Replaced format! macro with PathBuf::join to fix path separator issue on Windows.Tested on Windows 10 with Rust 1.85.0, paths now correctly use \ separator.
2025-09-03 15:25:08 +08:00
weisd
c9d26c6e88 Fix/delete version (#484)
* fix:delete_version

* fix:test_lifecycle_expiry_basic

---------

Co-authored-by: likewu <likewu@126.com>
2025-09-03 15:12:58 +08:00
likewu
087df484a3 Fix/ilm (#478) 2025-09-02 18:18:26 +08:00
houseme
04bf4b0f98 feat: add S3 object legal hold and retention management APIs (#476)
* add bucket rule

* translation

* improve code for event notice add rule
2025-09-02 00:14:10 +08:00
likewu
7462be983a Feature up/ilm (#470)
* fix delete-marker expiration. add api_restore.

* time retry object upload

* lock file

* make fmt

* restore object

* serde-rs-xml -> quick-xml

* scanner_item prefix object_name

* object_path

* object_name

* fi version_purge_status

* old_dir None

Co-authored-by: houseme <housemecn@gmail.com>
2025-09-01 16:11:28 +08:00
houseme
5264503e47 build(deps): bump aws-config and clap upgrade version (#472) 2025-08-30 20:30:46 +08:00
dependabot[bot]
3b8cb0df41 build(deps): bump tracing-subscriber in the cargo group (#471)
Bumps the cargo group with 1 update: [tracing-subscriber](https://github.com/tokio-rs/tracing).


Updates `tracing-subscriber` from 0.3.19 to 0.3.20
- [Release notes](https://github.com/tokio-rs/tracing/releases)
- [Commits](https://github.com/tokio-rs/tracing/compare/tracing-subscriber-0.3.19...tracing-subscriber-0.3.20)

---
updated-dependencies:
- dependency-name: tracing-subscriber
  dependency-version: 0.3.20
  dependency-type: direct:production
  dependency-group: cargo
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-08-30 19:02:26 +08:00
houseme
9aebef31ff refactor(admin/event): optimize notification target routing and logic handling (#463)
* add

* fix

* add target arns list

* improve code for arns

* upgrade crates version

* fix

* improve import code mod.rs

* fix

* improve

* improve code

* improve code

* fix

* fmt
2025-08-27 09:39:25 +08:00
zzhpro
c2d782bed1 feat: support conditional writes (#409)
* feat: support conditional writes

* refactor: avoid using unwrap

* fix: obtain lock before check in CompleteMultiPartUpload

* refactor: do not obtain a lock when getting object meta

* fix: avoid using unwrap and modifying incoming arguments

* test: add e2e tests for conditional writes

---------

Co-authored-by: guojidan <63799833+guojidan@users.noreply.github.com>
Co-authored-by: 安正超 <anzhengchao@gmail.com>
Co-authored-by: loverustfs <155562731+loverustfs@users.noreply.github.com>
2025-08-25 18:35:24 -07:00
likewu
e00f5be746 Fix/addtier (#454)
* fix retry

* fmt

* fix

* fix

* fix

---------

Co-authored-by: loverustfs <155562731+loverustfs@users.noreply.github.com>
2025-08-25 10:24:48 +08:00
shiro.lee
e23297f695 fix: add the default port number to the given server domains (#373) (#458) 2025-08-25 07:49:36 +08:00
0xdx2
d6840a6e04 feat: add support for range requests in upload_part_copy and implement parse_copy_source_range function (#453)
* feat: add support for range requests in upload_part_copy and implement parse_copy_source_range function

* style: format debug and error logging for improved readability

* feat: implement parse_copy_source_range function and improve error handling in range requests

* Update rustfs/src/storage/ecfs.rs

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* fix: correct return type in parse_copy_source_range function

* fix: remove unnecessary unwrap in parse_copy_source_range tests

* fix: simplify etag comparison in copy condition validation

---------

Co-authored-by: DamonXue <damonxue2@gmail.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: loverustfs <155562731+loverustfs@users.noreply.github.com>
2025-08-24 10:54:48 +08:00
houseme
3557a52dc4 Potential fix for code scanning alert no. 7: Workflow does not contain permissions (#457)
Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>
2025-08-24 10:10:04 +08:00
houseme
fd2aab2bd9 fix:revet #443 #446 (#452)
* fix: revet #443 #446

* fix
2025-08-23 17:30:06 +08:00
houseme
f1c50fcb74 fix:Workflow does not contain permissions (#451) 2025-08-23 12:35:23 +08:00
houseme
bdcba3460e Potential fix for code scanning alert no. 13: Code injection (#447)
Co-authored-by: Copilot Autofix powered by AI <62310815+github-advanced-security[bot]@users.noreply.github.com>
Co-authored-by: 安正超 <anzhengchao@gmail.com>
2025-08-23 10:05:00 +08:00
houseme
8857f31b07 Comment out error log for missing subscribers (#448) 2025-08-22 21:15:46 +08:00
loverustfs
5b85bf7a00 lock: dedicate unlock worker to thread runtime; robust fallback in Drop (#446)
* lock: dedicate unlock worker to thread runtime; robust fallback in Drop

* Update crates/lock/src/guard.rs

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update crates/lock/src/guard.rs

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update crates/lock/src/guard.rs

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Refactor logging in UNLOCK_TX error handling

Removed redundant logging of lock_id in warning message.

---------

Co-authored-by: houseme <housemecn@gmail.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-22 16:51:56 +08:00
loverustfs
46bd75c0f8 ahm(scanner): throttle scanning, skip recently-modified objects, and … (#443)
* ahm(scanner): throttle scanning, skip recently-modified objects, and gate missing-object heals to deep mode; adjust conservative defaults

Signed-off-by: loverustfs <hello@rustfs.com>

* ecstore: enable virtual-host AUTO heuristics and URL building; signer: fix SigV2 canonical resource for vhost; add unit tests

* ecstore: AUTO virtual-host style URL selection; signer: SigV2 canonical resource fixes for vhost; tests added.\nahm: fix clippy drop_non_drop; integration tests robust to existing bucket; ignore flaky lifecycle test.\nMakefile: test target falls back to cargo test when nextest missing.\npre-commit: all checks green.

---------

Signed-off-by: loverustfs <hello@rustfs.com>
2025-08-22 16:03:29 +08:00
houseme
5fc5dd0fd9 Remove rustfs-gui module (#445)
This commit completely removes the rustfs-gui module from the project. The deletion includes:

- All source code files (*.rs) and associated resources
- GUI-specific dependencies from Cargo.toml
- Build scripts and configuration files specific to the GUI module
- Documentation and assets related to the graphical interface

The removal is performed because:
- The GUI component is no longer maintained
- Focus is shifting to core functionality and CLI interface
- Limited resources available for GUI development and maintenance

The core filesystem functionality remains available through the rustfs library and CLI interface.
2025-08-22 09:15:22 +08:00
houseme
adc07e5209 feat(targets): extract targets module into a standalone crate (#441)
* init audit logger module

* add audit webhook default config kvs

* feat: Add comprehensive tests for authentication module (#309)

* feat: add comprehensive tests for authentication module

- Add 33 unit tests covering all public functions in auth.rs
- Test IAMAuth struct creation and secret key validation
- Test check_claims_from_token with various credential types and scenarios
- Test session token extraction from headers and query parameters
- Test condition values generation for different user types
- Test query parameter parsing with edge cases
- Test Credentials helper methods (is_expired, is_temp, is_service_account)
- Ensure tests handle global state dependencies gracefully
- All tests pass successfully with 100% coverage of testable functions

* style: fix code formatting issues

* Add verification script for checking PR branch statuses and tests

Co-authored-by: anzhengchao <anzhengchao@gmail.com>

* fix: resolve clippy uninlined format args warning

---------

Co-authored-by: Cursor Agent <cursoragent@cursor.com>

* feat: add basic tests for core storage module (#313)

* feat: add basic tests for core storage module

- Add 6 unit tests for FS struct and basic functionality
- Test FS creation, Debug and Clone trait implementations
- Test RUSTFS_OWNER constant definition and values
- Test S3 error code creation and handling
- Test compression format detection for common file types
- Include comprehensive documentation about integration test needs

Note: Full S3 API testing requires complex setup with storage backend,
global configuration, and network infrastructure - better suited for
integration tests rather than unit tests.

* style: fix code formatting issues

* fix: resolve clippy warnings in storage tests

---------

Co-authored-by: Cursor Agent <cursoragent@cursor.com>

* feat: add tests for admin handlers module (#314)

* feat: add tests for admin handlers module

- Add 5 new unit tests for admin handler functionality
- Test AccountInfo struct creation, serialization and default values
- Test creation of all admin handler structs (13 handlers)
- Test HealOpts JSON serialization and deserialization
- Test HealOpts URL encoding/decoding with proper field types
- Maintain existing test while adding comprehensive coverage
- Include documentation about integration test requirements

All tests pass successfully with proper error handling for complex dependencies.

* style: fix code formatting issues

* fix: resolve clippy warnings in admin handlers tests

---------

Co-authored-by: Cursor Agent <cursoragent@cursor.com>

* build(deps): bump the dependencies group with 3 updates (#326)

* perf: avoid transmitting parity shards when the object is good (#322)

* upgrade version

* Fix: fix data integrity check

Signed-off-by: junxiang Mu <1948535941@qq.com>

* Fix: Separate Clippy's fix and check commands into two commands.

Signed-off-by: junxiang Mu <1948535941@qq.com>

* fix: miss inline metadata (#345)

* Update dependabot.yml

* fix: Fixed an issue where the list_objects_v2 API did not return dire… (#352)

* fix: Fixed an issue where the list_objects_v2 API did not return directory names when they conflicted with file names in the same bucket (e.g., test/ vs. test.txt, aaa/ vs. aaa.csv) (#335)

* fix: adjusted the order of directory listings

* init

* fix

* fix

* feat: add docker usage for rustfs mcp (#365)

* feat: enhance metadata extraction with object name for MIME type detection

Signed-off-by: junxiang Mu <1948535941@qq.com>

* Feature: lock support auto release

Signed-off-by: junxiang Mu <1948535941@qq.com>

* improve lock

Signed-off-by: junxiang Mu <1948535941@qq.com>

* Fix: fix scanner detect

Signed-off-by: junxiang Mu <1948535941@qq.com>

* Fix: clippy && fmt

Signed-off-by: junxiang Mu <1948535941@qq.com>

* refactor(ecstore): Optimize memory usage for object integrity verification

Change the object integrity verification from reading all data to streaming processing to avoid memory overflow caused by large objects.

Modify the TLS key log check to use environment variables directly instead of configuration constants.

Add memory limits for object data reading in the AHM module.

Signed-off-by: junxiang Mu <1948535941@qq.com>

* Chore: reduce PR template checklist

Signed-off-by: junxiang Mu <1948535941@qq.com>

* Chore: remove comment code (#376)

Signed-off-by: junxiang Mu <1948535941@qq.com>

* chore: upgrade actions/checkout from v4 to v5 (#381)

* chore: upgrade actions/checkout from v4 to v5

- Update GitHub Actions checkout action version
- Ensure compatibility with latest workflow features
- Maintain existing checkout behavior and configuration

* upgrade version

* fix

* add and improve code for notify

* feat: extend rustfs mcp with bucket creation and deletion (#416)

* feat: extend rustfs mcp with bucket creation and deletion

* update file to fix pipeline error

* change variable name to fix pipeline error

* fix(ecstore): add async-recursion to resolve nightly trait solver reg… (#415)

* fix(ecstore): add async-recursion to resolve nightly trait solver regression

The newest nightly compiler switched to the new trait solver, which
currently rejects async recursive functions that were previously accepted.
This causes the following compilation failures:

- `LocalDisk::delete_file()`
- `LocalDisk::scan_dir()`

Add `async-recursion` as a workspace dependency and annotate both functions with `#[async_recursion]` so that the crate compiles cleanly with the latest nightly and will continue to build once the new solver lands in stable.

Signed-off-by: reigadegr <2722688642@qq.com>

* fix: resolve duplicate bound error in scan_dir function

Replaced inline trait bounds with where clause to avoid duplication caused by macro expansion.

Signed-off-by: reigadegr <2722688642@qq.com>

---------

Signed-off-by: reigadegr <2722688642@qq.com>
Co-authored-by: 安正超 <anzhengchao@gmail.com>

* fix:make bucket exists (#428)

* feat: include user-defined metadata in S3 response (#431)

* fix: simplify Docker entrypoint following efficient user switching pattern (#421)

* fix: simplify Docker entrypoint following efficient user switching pattern

- Remove ALL file permission modifications (no chown at all)
- Use chroot --userspec or gosu to switch user context
- Extremely simple and fast implementation
- Zero filesystem modifications for permissions

Fixes #388

* Update entrypoint.sh

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update entrypoint.sh

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update entrypoint.sh

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* wip

* wip

* wip

---------

Co-authored-by: Cursor Agent <cursoragent@cursor.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* docs: update doc/docker-data-dir README.md (#432)

* add targets crates

* feat(targets): extract targets module into a standalone crate

- Move all target-related code (MQTT, Webhook, etc.) into a new `targets` crate
- Update imports and dependencies to reference the new crate
- Refactor interfaces to ensure compatibility with the new crate structure
- Adjust Cargo.toml and workspace configuration accordingly

* fix

* fix

---------

Signed-off-by: junxiang Mu <1948535941@qq.com>
Signed-off-by: reigadegr <2722688642@qq.com>
Co-authored-by: 安正超 <anzhengchao@gmail.com>
Co-authored-by: Cursor Agent <cursoragent@cursor.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: zzhpro <56196563+zzhpro@users.noreply.github.com>
Co-authored-by: junxiang Mu <1948535941@qq.com>
Co-authored-by: weisd <im@weisd.in>
Co-authored-by: shiro.lee <69624924+shiroleeee@users.noreply.github.com>
Co-authored-by: majinghe <42570491+majinghe@users.noreply.github.com>
Co-authored-by: guojidan <63799833+guojidan@users.noreply.github.com>
Co-authored-by: reigadegr <103645642+reigadegr@users.noreply.github.com>
Co-authored-by: 0xdx2 <xuedamon2@gmail.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-21 22:33:07 +08:00
安正超
357cced49c Replace prints with logs and fix grammar (#437)
* refactor: replace print statements with proper logging and fix grammar

- Fix English grammar errors in existing log messages
- Add tracing imports where needed
- Improve log message clarity and consistency
- Follow project logging best practices using tracing crate

* fix: resolve clippy warnings and format code

- Fix unused import warnings by making test imports conditional with #[cfg(test)]
- Fix unused variable warning by prefixing with underscore
- Run cargo fmt to fix formatting issues
- Ensure all code passes clippy checks with -D warnings flag

* refactor: move tracing::debug import into test module

Move the tracing::debug import from file-level #[cfg(test)] into the test module itself for better code organization and consistency with other test modules

* Checkpoint before follow-up message

Co-authored-by: anzhengchao <anzhengchao@gmail.com>

* refactor: move tracing::debug import into test module in user_agent.rs

Complete the refactoring by moving the tracing::debug import from file-level #[cfg(test)] into the test module for consistency across all test files

---------

Co-authored-by: Cursor Agent <cursoragent@cursor.com>
2025-08-21 05:49:40 +08:00
Csrayz
a104c33974 [FEAT] add error message (#435)
When the bucket is not found, return a helpful message
2025-08-20 23:49:59 +08:00
安正超
516e00f15f fix: Dockerfile with error permission change. (#436)
* fix: dockerfile and permission error.

* fix: dockerfile and permission error.
2025-08-20 23:32:03 +08:00
安正超
a64c3c28b8 docs: update doc/docker-data-dir README.md (#432) 2025-08-20 00:00:20 +08:00
安正超
e9c9a2d1f2 fix: simplify Docker entrypoint following efficient user switching pattern (#421)
* fix: simplify Docker entrypoint following efficient user switching pattern

- Remove ALL file permission modifications (no chown at all)
- Use chroot --userspec or gosu to switch user context
- Extremely simple and fast implementation
- Zero filesystem modifications for permissions

Fixes #388

* Update entrypoint.sh

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update entrypoint.sh

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update entrypoint.sh

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* wip

* wip

* wip

---------

Co-authored-by: Cursor Agent <cursoragent@cursor.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-19 22:58:54 +08:00
0xdx2
3ebab98d2d feat: include user-defined metadata in S3 response (#431) 2025-08-19 22:09:50 +08:00
weisd
10c949af62 fix:make bucket exists (#428) 2025-08-19 16:14:59 +08:00
reigadegr
4a3325276d fix(ecstore): add async-recursion to resolve nightly trait solver reg… (#415)
* fix(ecstore): add async-recursion to resolve nightly trait solver regression

The newest nightly compiler switched to the new trait solver, which
currently rejects async recursive functions that were previously accepted.
This causes the following compilation failures:

- `LocalDisk::delete_file()`
- `LocalDisk::scan_dir()`

Add `async-recursion` as a workspace dependency and annotate both functions with `#[async_recursion]` so that the crate compiles cleanly with the latest nightly and will continue to build once the new solver lands in stable.

Signed-off-by: reigadegr <2722688642@qq.com>

* fix: resolve duplicate bound error in scan_dir function

Replaced inline trait bounds with where clause to avoid duplication caused by macro expansion.

Signed-off-by: reigadegr <2722688642@qq.com>

---------

Signed-off-by: reigadegr <2722688642@qq.com>
Co-authored-by: 安正超 <anzhengchao@gmail.com>
2025-08-18 20:58:05 +08:00
majinghe
c5f6c66f72 feat: extend rustfs mcp with bucket creation and deletion (#416)
* feat: extend rustfs mcp with bucket creation and deletion

* update file to fix pipeline error

* change variable name to fix pipeline error
2025-08-18 09:06:55 +08:00
shiro.lee
c7c149975b fix: the automatic logout issue and user list display failure on Windows systems (#353) (#343) (#403)
Co-authored-by: 安正超 <anzhengchao@gmail.com>
2025-08-14 00:20:27 +08:00
安正超
d552210b59 feat: translate chinese to english (#402)
* Checkpoint before follow-up message

Co-authored-by: anzhengchao <anzhengchao@gmail.com>

* Translate project documentation and comments from Chinese to English

Co-authored-by: anzhengchao <anzhengchao@gmail.com>

* Fix typo: "unparseable" to "unparsable" in version test comment

Co-authored-by: anzhengchao <anzhengchao@gmail.com>

* Refactor compression test code with minor syntax improvements

Co-authored-by: anzhengchao <anzhengchao@gmail.com>

---------

Co-authored-by: Cursor Agent <cursoragent@cursor.com>
2025-08-14 00:19:01 +08:00
安正超
581607da6a feat: optimize AI rules with unified .rules.md (#401)
* feat: optimize AI rules with unified .rules.md and entry points

- Create .rules.md as the central AI coding rules file
- Add .copilot-rules.md as GitHub Copilot entry point
- Add CLAUDE.md as Claude AI entry point
- Incorporate principles from rustfs.com project
- Add three critical rules:
  1. Use English for all code comments and documentation
  2. Clean up temporary scripts after use
  3. Only make confident modifications

* Update CLAUDE.md

---------

Co-authored-by: Cursor Agent <cursoragent@cursor.com>
2025-08-14 00:18:09 +08:00
安正超
e95107f7d6 fix: separate RELEASE tag and VERSION in Docker build (#399)
- RELEASE: GitHub release tag without 'v' prefix (e.g., 1.0.0-alpha.42)
- VERSION: filename version with 'v' prefix (e.g., v1.0.0-alpha.42)
- Download URL uses RELEASE for path, VERSION for filename
- Fixes incorrect URL generation that was adding extra 'v' prefix

Co-authored-by: Cursor Agent <cursoragent@cursor.com>
2025-08-13 22:49:48 +08:00
安正超
a693cb52f3 feat: change Docker build to download from GitHub releases instead of dl.rustfs.com (#398)
- Modified Dockerfile to download pre-built binaries from GitHub releases
- For latest releases, use GitHub API to find the correct download URL
- For specific versions, construct the GitHub release URL directly
- Updated docker-buildx.sh script messages to reflect new download source
- This change addresses security concerns about potential tampering with binaries from dl.rustfs.com

Co-authored-by: Cursor Agent <cursoragent@cursor.com>
2025-08-13 22:00:41 +08:00
houseme
2c7366038e modify protobuf version from to 2025-08-13 01:01:50 +08:00
houseme
1cc6dfde87 modify protobuf version from 31.1 to 31.0 2025-08-13 00:58:22 +08:00
weisd
387f4faf78 fix:rm object versions (#385) 2025-08-12 15:33:47 +08:00
houseme
0f7093c5f9 chore: upgrade actions/checkout from v4 to v5 (#381)
* chore: upgrade actions/checkout from v4 to v5

- Update GitHub Actions checkout action version
- Ensure compatibility with latest workflow features
- Maintain existing checkout behavior and configuration

* upgrade version
2025-08-12 11:17:58 +08:00
guojidan
6a5c0055e7 Chore: remove comment code (#376)
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-08-11 08:57:33 +08:00
guojidan
76288f2501 Merge pull request #372 from guojidan/fix-scanner
refactor(ecstore): Optimize memory usage for object integrity verification
2025-08-10 06:44:05 -07:00
junxiang Mu
3497ccfada Chore: reduce PR template checklist
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-08-10 21:29:30 +08:00
junxiang Mu
24e3d3a2ce refactor(ecstore): Optimize memory usage for object integrity verification
Change the object integrity verification from reading all data to streaming processing to avoid memory overflow caused by large objects.

Modify the TLS key log check to use environment variables directly instead of configuration constants.

Add memory limits for object data reading in the AHM module.

Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-08-10 21:24:15 +08:00
guojidan
ebad748cdc Merge pull request #368 from guojidan/fix-sql
Fix scanner && lock
2025-08-09 06:37:36 -07:00
junxiang Mu
b7e56ed92c Fix: clippy && fmt
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-08-09 21:16:56 +08:00
junxiang Mu
4811632751 Fix: fix scanner detect
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-08-09 21:06:17 +08:00
junxiang Mu
374a702f04 improve lock
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-08-09 21:05:46 +08:00
junxiang Mu
e369e9f481 Feature: lock support auto release
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-08-09 17:52:08 +08:00
guojidan
fe2e4a2274 Merge pull request #367 from guojidan/fix-sql
feat: enhance metadata extraction with object name for MIME type dete…
2025-08-08 21:53:12 -07:00
junxiang Mu
b391272e94 feat: enhance metadata extraction with object name for MIME type detection
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-08-09 12:29:04 +08:00
majinghe
c55c7a6373 feat: add docker usage for rustfs mcp (#365) 2025-08-08 17:18:20 +08:00
houseme
67f1c371a9 upgrade version 2025-08-08 11:33:32 +08:00
guojidan
d987686c14 feat(lifecycle): Implement object lifecycle management functionality (#358)
* feat(lifecycle): Implement object lifecycle management functionality

Add a lifecycle module to automatically handle object expiration and transition during scanning
Modify the file metadata cache module to be publicly visible to support lifecycle operations
Adjust the scanning interval to a shorter time for testing lifecycle rules
Implement the parsing and execution logic for S3 lifecycle configurations
Add integration tests to verify the lifecycle expiration functionality
Update dependencies to support the new lifecycle features

Signed-off-by: junxiang Mu <1948535941@qq.com>

* fix cargo dependencies

Signed-off-by: junxiang Mu <1948535941@qq.com>

* fix fmt

Signed-off-by: junxiang Mu <1948535941@qq.com>

---------

Signed-off-by: junxiang Mu <1948535941@qq.com>
Co-authored-by: houseme <housemecn@gmail.com>
2025-08-08 10:51:02 +08:00
houseme
48a9707110 fix: add tokio-test (#363)
* fix: add tokio-test

* fix: "called `unwrap` on `v` after checking its variant with `is_some`"

    = help: try using `if let` or `match`
    = help: for further information visit https://rust-lang.github.io/rust-clippy/master/index.html#unnecessary_unwrap
    = note: `-D clippy::unnecessary-unwrap` implied by `-D warnings`
    = help: to override `-D warnings` add `#[allow(clippy::unnecessary_unwrap)]`

* fmt

* set toolchain 1.88.0

* fmt

* fix: cliip
2025-08-08 10:23:22 +08:00
bestgopher
b89450f54d replace make with just (#349) 2025-08-07 22:37:05 +08:00
houseme
e0c99bced4 chore: add tls log and removing unused crates (#359)
* chore: add tls log

* improve code for http

* improve code dependencies for `cargo.toml` and removing unused crates

* modify name

* improve code

* fix

* Update crates/config/src/constants/env.rs

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* improve code

* fix

* add `is_enabled` and `is_disabled`

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-08-07 19:02:09 +08:00
houseme
130f85a575 chore: add tls log (#357) 2025-08-07 17:33:57 +08:00
shiro.lee
c42fbed3d2 fix: Fixed an issue where the list_objects_v2 API did not return dire… (#352)
* fix: Fixed an issue where the list_objects_v2 API did not return directory names when they conflicted with file names in the same bucket (e.g., test/ vs. test.txt, aaa/ vs. aaa.csv) (#335)

* fix: adjusted the order of directory listings
2025-08-07 11:05:05 +08:00
安正超
fd539f0f0a Update dependabot.yml 2025-08-06 22:55:52 +08:00
weisd
9aba89a12c fix: miss inline metadata (#345) 2025-08-06 11:45:23 +08:00
guojidan
7b27b29e3a Merge pull request #344 from guojidan/bug-fix
Fix: fix data integrity check
2025-08-05 20:31:10 -07:00
junxiang Mu
7ef014a433 Fix: Separate Clippy's fix and check commands into two commands.
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-08-06 11:22:08 +08:00
junxiang Mu
1b88714d27 Fix: fix data integrity check
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-08-06 11:03:29 +08:00
zzhpro
b119894425 perf: avoid transmitting parity shards when the object is good (#322) 2025-08-02 14:37:43 +08:00
dependabot[bot]
a37aa664f5 build(deps): bump the dependencies group with 3 updates (#326) 2025-08-02 06:44:16 +08:00
安正超
9b8abbb009 feat: add tests for admin handlers module (#314)
* feat: add tests for admin handlers module

- Add 5 new unit tests for admin handler functionality
- Test AccountInfo struct creation, serialization and default values
- Test creation of all admin handler structs (13 handlers)
- Test HealOpts JSON serialization and deserialization
- Test HealOpts URL encoding/decoding with proper field types
- Maintain existing test while adding comprehensive coverage
- Include documentation about integration test requirements

All tests pass successfully with proper error handling for complex dependencies.

* style: fix code formatting issues

* fix: resolve clippy warnings in admin handlers tests

---------

Co-authored-by: Cursor Agent <cursoragent@cursor.com>
2025-08-02 06:38:35 +08:00
安正超
3e5a48af65 feat: add basic tests for core storage module (#313)
* feat: add basic tests for core storage module

- Add 6 unit tests for FS struct and basic functionality
- Test FS creation, Debug and Clone trait implementations
- Test RUSTFS_OWNER constant definition and values
- Test S3 error code creation and handling
- Test compression format detection for common file types
- Include comprehensive documentation about integration test needs

Note: Full S3 API testing requires complex setup with storage backend,
global configuration, and network infrastructure - better suited for
integration tests rather than unit tests.

* style: fix code formatting issues

* fix: resolve clippy warnings in storage tests

---------

Co-authored-by: Cursor Agent <cursoragent@cursor.com>
2025-08-02 06:37:31 +08:00
安正超
d5aef963f9 feat: Add comprehensive tests for authentication module (#309)
* feat: add comprehensive tests for authentication module

- Add 33 unit tests covering all public functions in auth.rs
- Test IAMAuth struct creation and secret key validation
- Test check_claims_from_token with various credential types and scenarios
- Test session token extraction from headers and query parameters
- Test condition values generation for different user types
- Test query parameter parsing with edge cases
- Test Credentials helper methods (is_expired, is_temp, is_service_account)
- Ensure tests handle global state dependencies gracefully
- All tests pass successfully with 100% coverage of testable functions

* style: fix code formatting issues

* Add verification script for checking PR branch statuses and tests

Co-authored-by: anzhengchao <anzhengchao@gmail.com>

* fix: resolve clippy uninlined format args warning

---------

Co-authored-by: Cursor Agent <cursoragent@cursor.com>
2025-08-02 06:36:45 +08:00
houseme
6c37e1cb2a refactor: replace lazy_static with LazyLock (#318)
* refactor: replace `lazy_static` with `LazyLock`

Replace `lazy_static` with `LazyLock`.

Compile time may reduce a little.

See https://github.com/rust-lang-nursery/lazy-static.rs/issues/214

* fmt

* fix
2025-07-31 14:25:39 +08:00
0xdx2
e9d7e211b9 fix:Add etag to get object response
fix:Add etag to  get object response
2025-07-31 11:31:15 +08:00
0xdx2
45bbd1e5c4 Add etag to get object response
Add etag to  get object response
2025-07-31 11:20:10 +08:00
0xdx2
57d196771a Merge pull request #312 from rustfs/0xdx2-s3s_xmlns
fix: update s3s version to solve xml namespace type attribute bug.
2025-07-30 23:53:56 +08:00
0xdx2
6202f50e15 fix: update s3s version to solve xml namespace type attribute bug.
update s3s version to solve xml namespace type attribute bug.
2025-07-30 23:40:43 +08:00
houseme
c5df1f92c2 refactor: replace lazy_static with LazyLock and notify crate registry create_targets_from_config (#311)
* improve code for notify

* improve code for logger and fix typo (#272)

* Add GNU to  build.yml (#275)

* fix unzip error

* fix url change error

fix url change error

* Simplify user experience and integrate console and endpoint

Simplify user experience and integrate console and endpoint

* Add gnu to  build.yml

* upgrade version

* feat: add `cargo clippy --fix --allow-dirty` to pre-commit command (#282)

Resolves #277

- Add --fix flag to automatically fix clippy warnings
- Add --allow-dirty flag to run on dirty Git trees
- Improves code quality in pre-commit workflow

* fix: the issue where preview fails when the path length exceeds 255 characters (#280)

* fix

* fix: improve Windows build support and CI/CD workflow (#283)

- Fix Windows zip command issue by using PowerShell Compress-Archive
- Add Windows support for OSS upload with ossutil
- Replace Chinese comments with English in build.yml
- Fix bash syntax error in package_zip function
- Improve code formatting and consistency
- Update various configuration files for better cross-platform support

Resolves Windows build failures in GitHub Actions.

* fix: update link in README.md leading to a 404 error (#285)

* add rustfs.spec for rustfs (#103)

add support on loongarch64

* improve cargo.lock

* build(deps): bump the dependencies group with 5 updates (#289)

Bumps the dependencies group with 5 updates:

| Package | From | To |
| --- | --- | --- |
| [hyper-util](https://github.com/hyperium/hyper-util) | `0.1.15` | `0.1.16` |
| [rand](https://github.com/rust-random/rand) | `0.9.1` | `0.9.2` |
| [serde_json](https://github.com/serde-rs/json) | `1.0.140` | `1.0.141` |
| [strum](https://github.com/Peternator7/strum) | `0.27.1` | `0.27.2` |
| [sysinfo](https://github.com/GuillaumeGomez/sysinfo) | `0.36.0` | `0.36.1` |


Updates `hyper-util` from 0.1.15 to 0.1.16
- [Release notes](https://github.com/hyperium/hyper-util/releases)
- [Changelog](https://github.com/hyperium/hyper-util/blob/master/CHANGELOG.md)
- [Commits](https://github.com/hyperium/hyper-util/compare/v0.1.15...v0.1.16)

Updates `rand` from 0.9.1 to 0.9.2
- [Release notes](https://github.com/rust-random/rand/releases)
- [Changelog](https://github.com/rust-random/rand/blob/master/CHANGELOG.md)
- [Commits](https://github.com/rust-random/rand/compare/rand_core-0.9.1...rand_core-0.9.2)

Updates `serde_json` from 1.0.140 to 1.0.141
- [Release notes](https://github.com/serde-rs/json/releases)
- [Commits](https://github.com/serde-rs/json/compare/v1.0.140...v1.0.141)

Updates `strum` from 0.27.1 to 0.27.2
- [Release notes](https://github.com/Peternator7/strum/releases)
- [Changelog](https://github.com/Peternator7/strum/blob/master/CHANGELOG.md)
- [Commits](https://github.com/Peternator7/strum/compare/v0.27.1...v0.27.2)

Updates `sysinfo` from 0.36.0 to 0.36.1
- [Changelog](https://github.com/GuillaumeGomez/sysinfo/blob/master/CHANGELOG.md)
- [Commits](https://github.com/GuillaumeGomez/sysinfo/compare/v0.36.0...v0.36.1)

---
updated-dependencies:
- dependency-name: hyper-util
  dependency-version: 0.1.16
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dependencies
- dependency-name: rand
  dependency-version: 0.9.2
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dependencies
- dependency-name: serde_json
  dependency-version: 1.0.141
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dependencies
- dependency-name: strum
  dependency-version: 0.27.2
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dependencies
- dependency-name: sysinfo
  dependency-version: 0.36.1
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dependencies
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>

* improve code for logger

* improve

* upgrade

* refactor: 优化构建工作流,统一 latest 文件处理和简化制品上传 (#293)

* Refactor: DatabaseManagerSystem as global

Signed-off-by: junxiang Mu <1948535941@qq.com>

* fix: fmt

Signed-off-by: junxiang Mu <1948535941@qq.com>

* Test: add e2e_test for s3select

Signed-off-by: junxiang Mu <1948535941@qq.com>

* Test: add test script for e2e

Signed-off-by: junxiang Mu <1948535941@qq.com>

* improve code for registry and intergation

* improve code for registry `create_targets_from_config`

* fix

* Feature up/ilm (#305)

* fix

* fix

* fix

* fix delete-marker expiration. add api_restore.

* fix

* time retry object upload

* lock file

* make fmt

* fix

* restore object

* fix

* fix

* serde-rs-xml -> quick-xml

* fix

* checksum

* fix

* fix

* fix

* fix

* fix

* fix

* fix

* transfer lang to english

* upgrade clap version from 4.5.41 to 4.5.42

* refactor: replace `lazy_static` with `LazyLock`

* add router

* fix: modify comment

* improve code

* fix typos

* fix

* fix: modify name and fmt

* improve code for registry

* fix test

---------

Signed-off-by: dependabot[bot] <support@github.com>
Signed-off-by: junxiang Mu <1948535941@qq.com>
Co-authored-by: loverustfs <155562731+loverustfs@users.noreply.github.com>
Co-authored-by: 安正超 <anzhengchao@gmail.com>
Co-authored-by: shiro.lee <69624924+shiroleeee@users.noreply.github.com>
Co-authored-by: Marco Orlandin <mipnamic@mipnamic.net>
Co-authored-by: zhangwenlong <zhangwenlong@loongson.cn>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: junxiang Mu <1948535941@qq.com>
Co-authored-by: likewu <likewu@126.com>
2025-07-30 19:02:10 +08:00
wangsl
4f1770d3fe feat:add mcp integration (#300)
* add list_buckets mcp server

* add list_objects mcp

* add upload object mcp

* add get object mcp

* add list_buckets mcp server

* fix: resolve clippy warnings in rustfs-mcp-server

* fix: rename mcp package

* fix

* fix:remove useless comment

* feat:add mcp doc
2025-07-30 14:25:01 +08:00
likewu
d56cee26db Feature up/ilm (#305)
* fix

* fix

* fix

* fix delete-marker expiration. add api_restore.

* fix

* time retry object upload

* lock file

* make fmt

* fix

* restore object

* fix

* fix

* serde-rs-xml -> quick-xml

* fix

* checksum

* fix

* fix

* fix

* fix

* fix

* fix

* fix
2025-07-29 14:21:19 +08:00
weisd
56fd8132e9 fix:#303 returns empty when querying an empty or not dir (#304) 2025-07-28 16:17:40 +08:00
guojidan
35daa74430 Merge pull request #302 from guojidan/lock
Lock: add transactional
2025-07-28 12:00:44 +08:00
junxiang Mu
dc156fb4cd Fix: clippy
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-07-28 11:38:42 +08:00
junxiang Mu
de905a878c Cargo: use workspace dependence
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-07-28 11:02:40 +08:00
junxiang Mu
f3252f989b Test: Add e2e test case for lock transactional
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-07-28 11:00:10 +08:00
junxiang Mu
01a2afca9a lock: Add transactional
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-07-28 10:59:43 +08:00
guojidan
a4fe68ad21 Merge pull request #301 from guojidan/improve-sql
s3Select: add unit test case
2025-07-28 09:56:10 +08:00
junxiang Mu
c03f86b23c s3Select: add unit test case
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-07-28 09:19:47 +08:00
guojidan
5667f324ae Merge pull request #297 from guojidan/improve-sql
Test: Add e2e_test case for sql && add script for e2e_test
2025-07-25 17:16:41 +08:00
junxiang Mu
bcd806796f Test: add test script for e2e
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-07-25 16:52:06 +08:00
junxiang Mu
612404c47f Test: add e2e_test for s3select
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-07-25 15:07:44 +08:00
guojidan
85388262b3 Merge pull request #294 from guojidan/improve-sql
Refactor: DatabaseManagerSystem as global
2025-07-25 08:33:54 +08:00
junxiang Mu
25a4503285 fix: fmt
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-07-25 08:18:14 +08:00
安正超
526c4d5a61 refactor: 优化构建工作流,统一 latest 文件处理和简化制品上传 (#293) 2025-07-25 01:10:04 +08:00
junxiang Mu
addc964d56 Refactor: DatabaseManagerSystem as global
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-07-24 17:12:51 +08:00
loverustfs
371119f733 GNU to MUSL modify Dockerfile 2025-07-24 16:36:15 +08:00
guojidan
021abc0398 Merge pull request #292 from guojidan/Arc
Chore: remove dirty file(cache.rs)
2025-07-24 16:32:20 +08:00
junxiang Mu
0672b6dd3e Chore: remove dirty file(cache.rs)
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-07-24 14:57:48 +08:00
guojidan
1372dc2857 Merge pull request #288 from guojidan/scanner
Refactor: Scanner
2025-07-24 14:42:54 +08:00
houseme
77bc9af109 Update Cargo.toml 2025-07-24 14:14:12 +08:00
junxiang Mu
91b1c84430 rebase
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-07-24 12:18:05 +08:00
junxiang Mu
b667927216 fix fmt
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-07-24 12:14:28 +08:00
junxiang Mu
29795fac51 fix Cargo.toml
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-07-24 12:14:28 +08:00
junxiang Mu
2ce7e01f55 Chore: remove dirty file(heal)
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-07-24 12:14:27 +08:00
junxiang Mu
4fefd63a5b rebase
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-07-24 12:14:05 +08:00
junxiang Mu
2a8c46874d fix: auto heal when xl.meta lose
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-07-24 12:14:05 +08:00
junxiang Mu
b8b5511b68 fix: heal data part lose
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-07-24 12:14:05 +08:00
junxiang Mu
bdaee228db fix(ahm): adjust test expectations for missing xl.meta recovery scenario
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-07-24 12:14:05 +08:00
junxiang Mu
d562620e99 fix: implement uses_data_dir method
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-07-24 12:14:05 +08:00
junxiang Mu
69b0c828c9 fix: scanner add heal bucket
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-07-24 12:14:05 +08:00
junxiang Mu
2bfd1efb9b Fix: fix add heal_manager into scanner when scanner start
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-07-24 12:14:05 +08:00
junxiang Mu
0854e6b921 Chore: rename init_heal_manager_with_channel
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-07-24 12:14:05 +08:00
junxiang Mu
b907f4e61b refactor(ahm): remove obsolete scanner/data_usage.rs after data usage refactor
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-07-24 12:14:05 +08:00
junxiang Mu
6ec568459c chore: update admin handlers, lockfile, and minor fixes
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-07-24 12:14:05 +08:00
junxiang Mu
ea210d52dc refactor(heal): unify heal request interface, add disk field, update ahm/ecstore/common for erasure set healing
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-07-24 12:14:03 +08:00
junxiang Mu
3d3c6e4e06 chore(protos): update proto definitions, remove ns_scanner, fix codegen and formatting
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-07-24 12:12:49 +08:00
junxiang Mu
e7d0a8d4b9 feat: integrate global metrics system into AHM scanner
- Add global metrics system to common crate for cross-module usage
- Integrate global metrics collection into AHM scanner operations
- Update ECStore to use common metrics system instead of local implementation
- Add chrono dependency to AHM crate for timestamp handling
- Re-export IlmAction from common metrics in ECStore lifecycle module
- Update scanner methods to use global metrics for cycle, disk, and volume scans
- Maintain backward compatibility with local metrics collector
- Fix clippy warnings and ensure proper code formatting

This change enables unified metrics collection across the entire RustFS system,
allowing better monitoring and observability of scanner operations.

Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-07-24 12:12:49 +08:00
junxiang Mu
7d3b2b774c fix heal disk
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-07-24 12:12:49 +08:00
junxiang Mu
aed8f52423 refactor: integrate disk healing into erasure set healing
- Remove HealType::Disk and related disk-specific healing methods
- Integrate disk format healing into heal_erasure_set with include_format_heal option
- Update auto disk scanner to use ErasureSet heal type instead of Disk heal
- Fix disk status change event handling to use ErasureSet heal requests
- Add proper bucket list retrieval for auto healing scenarios
- Update data scanner to submit ErasureSet heal tasks for offline disks
- Remove duplicate healing logic between Disk and ErasureSet types
- Ensure all healing operations go through unified ErasureSet healing path
2025-07-24 12:12:49 +08:00
junxiang Mu
c49414f6ac fix: resolve test conflicts and improve data scanner functionality
- Fix multi-threaded test conflicts in AHM heal integration tests
- Remove global environment sharing to prevent test state pollution
- Fix test_all_disk_method by clearing global disk map before test
- Improve data scanner and cache value implementations
- Update dependencies and resolve clippy warnings

Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-07-24 12:12:49 +08:00
junxiang Mu
8e766b90cd feat: implement heal channel mechanism for admin-ahm communication
- Add global unbounded channel in common crate for heal requests
- Implement channel processor in ahm to handle heal commands
- Add Start/Query/Cancel commands support via channel
- Integrate heal manager initialization in main.rs
- Replace direct MRF calls with channel-based heal requests in ecstore
- Support advanced heal options including pool_index and set_index
- Enable admin handlers to send heal requests via channel
2025-07-24 12:12:49 +08:00
junxiang Mu
3409cd8dff feat(ahm): add HealingTracker support & complete fresh-disk healing
• Introduce ecstore HealingTracker into ahm crate; load/init/save tracker
• Re-implement heal_fresh_disk to use heal_erasure_set with tracker
• Enhance auto-disk scanner: detect unformatted disks via get_disk_id()
• Remove DataUsageCache handling for now
• Refactor imports & types, clean up duplicate constants
2025-07-24 12:12:49 +08:00
junxiang Mu
f4973a681c feat: implement complete ahm heal system with ecstore integration
- Add comprehensive heal storage API with ECStore integration
- Implement heal object, bucket, disk, metadata, and EC decode operations
- Add heal task management with progress tracking and statistics
- Optimize heal manager by removing unnecessary workers
- Add integration tests for core heal functionality (heal_object, heal_bucket, heal_format)
- Integrate with ecstore's native heal commands for actual repair operations

Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-07-24 12:12:49 +08:00
junxiang Mu
4fb3d187d0 feat: implement heal subsystem for automatic data repair
- Add heal module with core types (HealType, HealRequest, HealTask)
- Implement HealManager for task scheduling and execution
- Add HealStorageAPI trait and ECStoreHealStorage implementation
- Integrate heal capabilities into scanner for automatic repair
- Support multiple heal types: object, bucket, disk, metadata, MRF, EC decode
- Add progress tracking and event system for heal operations
- Merge heal and scanner error types for unified error handling
- Include comprehensive logging and metrics for heal operations

Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-07-24 12:12:49 +08:00
dandan
0aff736efd Chore: fix ref and fix comment
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-07-24 12:12:49 +08:00
dandan
2aa7a631ef feat: refactor scanner module and add data usage statistics
- Move scanner code to scanner/ subdirectory for better organization
- Add data usage statistics collection and persistence
- Implement histogram support for size and version distribution
- Add global cancel token management for scanner operations
- Integrate scanner with ECStore for comprehensive data analysis
- Update error handling and improve test isolation
- Add data usage API endpoints and backend integration

Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-07-24 12:12:49 +08:00
dandan
b40ef147a9 refact: step 2
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-07-24 12:12:49 +08:00
junxiang Mu
1f11a3167b fix: Refact heal and scanner design
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-07-24 12:12:49 +08:00
guojidan
18b0134ddf Merge pull request #290 from guojidan/feat/complete-lock-implementation
refactor: reimplement lock
2025-07-24 12:11:19 +08:00
junxiang Mu
b48a5fdc94 fix fmt
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-07-24 11:52:57 +08:00
junxiang Mu
168a07a670 add api into ecstore
Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-07-24 11:52:57 +08:00
junxiang Mu
cad005bc21 refactor(lock): unify NamespaceLock client model and LockRequest API
- Refactor NamespaceLock to use a unified client vector and quorum mechanism,
  removing legacy local/distributed lock split and related code.
- Update LockRequest to split timeout into acquire_timeout and ttl, and add
  builder methods for both.
- Adjust all batch lock APIs to accept ttl and use new LockRequest fields.
- Update all affected tests and documentation for the new API.

Signed-off-by: junxiang Mu <1948535941@qq.com>
2025-07-24 11:52:57 +08:00
root
dc44cde081 tmp
Signed-off-by: root <root@PC.localdomain>
2025-07-24 11:52:57 +08:00
dandan
4ccdeb9d2a refactor(lock): restructure lock crate, remove unused modules and clarify directory layout
- Remove unused core/rwlock.rs and manager/ modules (ManagerFactory, LifecycleManager, NamespaceManager)
- Move all lock-related code into crates/lock/src with clear submodules: client, core, utils, etc.
- Ensure only necessary files and APIs are exposed, improve maintainability
- No functional logic change, pure structure and cleanup refactor

Signed-off-by: dandan <dandan@dandandeMac-Studio.local>
2025-07-24 11:52:55 +08:00
dependabot[bot]
1b48934f47 build(deps): bump the dependencies group with 5 updates (#289)
Bumps the dependencies group with 5 updates:

| Package | From | To |
| --- | --- | --- |
| [hyper-util](https://github.com/hyperium/hyper-util) | `0.1.15` | `0.1.16` |
| [rand](https://github.com/rust-random/rand) | `0.9.1` | `0.9.2` |
| [serde_json](https://github.com/serde-rs/json) | `1.0.140` | `1.0.141` |
| [strum](https://github.com/Peternator7/strum) | `0.27.1` | `0.27.2` |
| [sysinfo](https://github.com/GuillaumeGomez/sysinfo) | `0.36.0` | `0.36.1` |


Updates `hyper-util` from 0.1.15 to 0.1.16
- [Release notes](https://github.com/hyperium/hyper-util/releases)
- [Changelog](https://github.com/hyperium/hyper-util/blob/master/CHANGELOG.md)
- [Commits](https://github.com/hyperium/hyper-util/compare/v0.1.15...v0.1.16)

Updates `rand` from 0.9.1 to 0.9.2
- [Release notes](https://github.com/rust-random/rand/releases)
- [Changelog](https://github.com/rust-random/rand/blob/master/CHANGELOG.md)
- [Commits](https://github.com/rust-random/rand/compare/rand_core-0.9.1...rand_core-0.9.2)

Updates `serde_json` from 1.0.140 to 1.0.141
- [Release notes](https://github.com/serde-rs/json/releases)
- [Commits](https://github.com/serde-rs/json/compare/v1.0.140...v1.0.141)

Updates `strum` from 0.27.1 to 0.27.2
- [Release notes](https://github.com/Peternator7/strum/releases)
- [Changelog](https://github.com/Peternator7/strum/blob/master/CHANGELOG.md)
- [Commits](https://github.com/Peternator7/strum/compare/v0.27.1...v0.27.2)

Updates `sysinfo` from 0.36.0 to 0.36.1
- [Changelog](https://github.com/GuillaumeGomez/sysinfo/blob/master/CHANGELOG.md)
- [Commits](https://github.com/GuillaumeGomez/sysinfo/compare/v0.36.0...v0.36.1)

---
updated-dependencies:
- dependency-name: hyper-util
  dependency-version: 0.1.16
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dependencies
- dependency-name: rand
  dependency-version: 0.9.2
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dependencies
- dependency-name: serde_json
  dependency-version: 1.0.141
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dependencies
- dependency-name: strum
  dependency-version: 0.27.2
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dependencies
- dependency-name: sysinfo
  dependency-version: 0.36.1
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dependencies
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-07-24 11:50:52 +08:00
zhangwenlong
25fa645184 add rustfs.spec for rustfs (#103)
add support on loongarch64
2025-07-24 11:39:09 +08:00
Marco Orlandin
3a3bb880f2 fix: update link in README.md leading to a 404 error (#285) 2025-07-24 09:15:04 +08:00
安正超
affe27298c fix: improve Windows build support and CI/CD workflow (#283)
- Fix Windows zip command issue by using PowerShell Compress-Archive
- Add Windows support for OSS upload with ossutil
- Replace Chinese comments with English in build.yml
- Fix bash syntax error in package_zip function
- Improve code formatting and consistency
- Update various configuration files for better cross-platform support

Resolves Windows build failures in GitHub Actions.
2025-07-22 23:55:57 +08:00
shiro.lee
629db6218e fix: the issue where preview fails when the path length exceeds 255 characters (#280) 2025-07-22 22:10:57 +08:00
安正超
aa1a3ce4e8 feat: add cargo clippy --fix --allow-dirty to pre-commit command (#282)
Resolves #277

- Add --fix flag to automatically fix clippy warnings
- Add --allow-dirty flag to run on dirty Git trees
- Improves code quality in pre-commit workflow
2025-07-22 22:10:53 +08:00
houseme
693db59fcc fix 2025-07-21 20:45:59 +08:00
houseme
0a7df4ef26 fix 2025-07-21 19:03:15 +08:00
houseme
9dcdc44718 fix 2025-07-21 18:03:01 +08:00
houseme
2a0c618f8b fix: windows build 2025-07-21 17:45:56 +08:00
loverustfs
bebd78fbbb Add GNU to build.yml (#275)
* fix unzip error

* fix url change error

fix url change error

* Simplify user experience and integrate console and endpoint

Simplify user experience and integrate console and endpoint

* Add gnu to  build.yml
2025-07-21 16:58:29 +08:00
houseme
3f095e75cb improve code for logger and fix typo (#272) 2025-07-21 15:20:36 +08:00
houseme
f7d30da9e0 fix typo (#267)
* fix typo

* cargo fmt
2025-07-20 00:11:15 +08:00
Chrislearn Young
823d4b6f79 Add typos github actions and fix typos (#265)
* Add typo github actions and fix typos

* cargo fmt
2025-07-19 22:08:50 +08:00
安正超
051ea7786f fix: ossutil install command. (#263) 2025-07-19 18:21:31 +08:00
安正超
42b645e355 fix: robust Dockerfile version logic for v prefix handling (#262)
* fix: robust Dockerfile version logic for v prefix handling

* wip
2025-07-19 15:50:15 +08:00
安正超
f27ee96014 feat: enhance entrypoint and Dockerfiles for flexible volume and permission management (#260)
* feat: enhance entrypoint and Dockerfiles for flexible volume and permission management\n\n- Support batch mount and permission fix in entrypoint.sh\n- Add coreutils/shadow (alpine) and coreutils/passwd (ubuntu) for UID/GID/ownership\n- Use ENTRYPOINT for unified startup\n- Make local dev and prod Dockerfile behavior consistent\n- Improve security and user experience\n\nBREAKING CHANGE: entrypoint.sh and Dockerfile now require additional packages for permission management, and support batch volume mount via RUSTFS_VOLUMES.

* chore: update Dockerfile comments to English only

* fix(entrypoint): improve local/remote volume detection and permission logic in entrypoint.sh

* Update entrypoint.sh

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update entrypoint.sh

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update Dockerfile

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-07-19 11:48:46 +08:00
houseme
20cd117aa6 improve code for dockerfile (#256)
* improve code for dockerfile

* Update Dockerfile

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* improve code for file name

* improve code for dockerfile

* fix

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-07-18 15:53:00 +08:00
houseme
fc8931d69f improve code for dockerfile (#253)
* improve code for dockerfile

* Update Dockerfile

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-07-18 11:05:00 +08:00
weisd
0167b2decd fix: optimize RPC connection management and prevent race conditions (#252) 2025-07-18 10:41:00 +08:00
weisd
e67980ff3c Fix/range content length (#251)
* fix:getobject range length
2025-07-17 23:25:21 +08:00
weisd
96760bba5a fix:getobject range length (#250) 2025-07-17 23:14:19 +08:00
overtrue
2501d7d241 fix: remove branch restriction from Docker workflow_run trigger
The Docker workflow was not triggering for tag-based releases because it had
'branches: [main]' restriction in the workflow_run configuration. When pushing
tags, the triggering workflow runs on the tag, not on main branch.

Changes:
- Remove 'branches: [main]' from workflow_run trigger
- Simplify tag detection using github.event.workflow_run context instead of API calls
- Use official workflow_run event properties (head_branch, event) for reliable detection
- Support both 'refs/tags/VERSION' and direct 'VERSION' formats
- Add better logging for debugging workflow trigger issues

This fixes the issue where Docker images were not built for tagged releases.
2025-07-17 08:13:34 +08:00
overtrue
55b84262b5 fix: use GitHub API for reliable tag detection in Docker workflow
- Replace git commands with GitHub API calls for tag detection
- Add proper commit checkout for workflow_run events
- Use gh CLI and curl fallback for better reliability
- Add debug output to help troubleshoot tag detection issues

This should fix the issue where Docker builds were not triggered for tagged releases
due to missing tag information in the workflow_run environment.
2025-07-17 08:01:33 +08:00
overtrue
ce4252eb1a fix: correct Docker workflow trigger logic for tag-based releases
BREAKING CHANGE: Fixed Docker workflow that was incorrectly skipping builds for tagged releases
- Fix logic to detect tag pushes using git refs instead of branch names
- Properly identify tag pushes vs branch pushes using git show-ref
- Support both v-prefixed and bare version formats
- Ensure Docker images are built for all tagged releases including prereleases
2025-07-17 07:46:54 +08:00
overtrue
db708917b4 docs: update .docker/README.md to reflect simplified Makefile commands
- Add new Makefile Commands section with simplified docker-dev* commands
- Update Development Workflow to use new dev-env-* commands
- Update directory structure (remove deleted alpine/ directory)
- Reorganize build instructions to prioritize Makefile over direct scripts
- Add Common Development Tasks section with make help commands
2025-07-17 07:30:13 +08:00
overtrue
8ddb45627d refactor: simplify Docker build commands and fix version matching
- Remove obsolete .docker/alpine/Dockerfile.protoc (superseded by Dockerfile.source)
- Simplify Makefile commands by removing backward compatibility aliases
  * Replace docker-buildx-source* with shorter docker-dev* commands
  * Replace start/stop with explicit dev-env-start/dev-env-stop commands
- Fix Docker workflow version matching logic to correctly distinguish:
  * 1.0.0 vs 1.0.0-alpha.11 (prerelease detection)
  * Support both v1.0.0 and 1.0.0 formats (with/without v prefix)
  * Reorder case patterns to match prereleases before releases

BREAKING CHANGE: Removed legacy command aliases
- Use 'make docker-dev-local' instead of 'make docker-buildx-source-local'
- Use 'make dev-env-start' instead of 'make start'
2025-07-17 07:29:00 +08:00
704 changed files with 121435 additions and 36920 deletions

View File

@@ -6,10 +6,10 @@ This directory contains Docker configuration files and supporting infrastructure
```
rustfs/
├── Dockerfile # Production image (Alpine + GitHub Releases)
├── Dockerfile.source # Source build (Debian + cross-compilation)
├── cargo.config.toml # Rust cargo configuration
├── Dockerfile # Production image (Alpine + pre-built binaries)
├── Dockerfile.source # Development image (Debian + source build)
├── docker-buildx.sh # Multi-architecture build script
├── Makefile # Build automation with simplified commands
└── .docker/ # Supporting infrastructure
├── observability/ # Monitoring and observability configs
├── compose/ # Docker Compose configurations
@@ -64,7 +64,11 @@ docker run rustfs/rustfs:main-latest # Main branch latest
### Development Environment
```bash
# Start development container
# Quick setup using Makefile (recommended)
make docker-dev-local # Build development image locally
make dev-env-start # Start development container
# Manual Docker commands
docker run -it -v $(pwd):/workspace -p 9000:9000 rustfs/rustfs:latest-dev
# Build from source locally
@@ -76,9 +80,33 @@ docker-compose up rustfs-dev
## 🏗️ Build Arguments and Scripts
### Using docker-buildx.sh (Recommended)
### Using Makefile Commands (Recommended)
For multi-architecture builds, use the provided script:
The easiest way to build images using simplified commands:
```bash
# Development images (build from source)
make docker-dev-local # Build for local use (single arch)
make docker-dev # Build multi-arch (for CI/CD)
make docker-dev-push REGISTRY=xxx # Build and push to registry
# Production images (using pre-built binaries)
make docker-buildx # Build multi-arch production images
make docker-buildx-push # Build and push production images
make docker-buildx-version VERSION=v1.0.0 # Build specific version
# Development environment
make dev-env-start # Start development container
make dev-env-stop # Stop development container
make dev-env-restart # Restart development container
# Help
make help-docker # Show all Docker-related commands
```
### Using docker-buildx.sh (Advanced)
For direct script usage and advanced scenarios:
```bash
# Build latest version for all architectures
@@ -147,17 +175,51 @@ Architecture is automatically detected during build using Docker's `TARGETARCH`
## 🛠️ Development Workflow
For local development and testing:
### Quick Start with Makefile (Recommended)
```bash
# Quick development setup
docker-compose up rustfs-dev
# 1. Start development environment
make dev-env-start
# Custom source build
docker build -f Dockerfile.source -t rustfs:custom .
# 2. Your development container is now running with:
# - Port 9000 exposed for RustFS
# - Port 9010 exposed for admin console
# - Current directory mounted as /workspace
# 3. Stop when done
make dev-env-stop
```
### Manual Development Setup
```bash
# Build development image from source
make docker-dev-local
# Or use traditional Docker commands
docker build -f Dockerfile.source -t rustfs:dev .
# Run with development tools
docker run -it -v $(pwd):/workspace rustfs:custom bash
docker run -it -v $(pwd):/workspace -p 9000:9000 rustfs:dev bash
# Or use docker-compose for complex setups
docker-compose up rustfs-dev
```
### Common Development Tasks
```bash
# Build and test locally
make build # Build binary natively
make docker-dev-local # Build development Docker image
make test # Run tests
make fmt # Format code
make clippy # Run linter
# Get help
make help # General help
make help-docker # Docker-specific help
make help-build # Build-specific help
```
## 🚀 CI/CD Integration

View File

@@ -1,40 +0,0 @@
FROM alpine:3.18
ENV LANG C.UTF-8
# Install base dependencies
RUN apk add --no-cache \
wget \
git \
curl \
unzip \
gcc \
musl-dev \
pkgconfig \
openssl-dev \
dbus-dev \
wayland-dev \
webkit2gtk-4.1-dev \
build-base \
linux-headers
# install protoc
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v30.2/protoc-30.2-linux-x86_64.zip \
&& unzip protoc-30.2-linux-x86_64.zip -d protoc3 \
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc \
&& mv protoc3/include/* /usr/local/include/ && rm -rf protoc-30.2-linux-x86_64.zip protoc3
# install flatc
RUN wget https://github.com/google/flatbuffers/releases/download/v24.3.25/Linux.flatc.binary.g++-13.zip \
&& unzip Linux.flatc.binary.g++-13.zip \
&& mv flatc /usr/local/bin/ && chmod +x /usr/local/bin/flatc && rm -rf Linux.flatc.binary.g++-13.zip
# install rust
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
# Set PATH for rust
ENV PATH="/root/.cargo/bin:${PATH}"
COPY .docker/cargo.config.toml /root/.cargo/config.toml
WORKDIR /root/rustfs

View File

@@ -27,7 +27,7 @@ services:
ports:
- "9000:9000" # Map port 9001 of the host to port 9000 of the container
volumes:
- ../../target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs
- ../../target/x86_64-unknown-linux-gnu/release/rustfs:/app/rustfs
command: "/app/rustfs"
node1:
@@ -44,7 +44,7 @@ services:
ports:
- "9001:9000" # Map port 9002 of the host to port 9000 of the container
volumes:
- ../../target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs
- ../../target/x86_64-unknown-linux-gnu/release/rustfs:/app/rustfs
command: "/app/rustfs"
node2:
@@ -61,7 +61,7 @@ services:
ports:
- "9002:9000" # Map port 9003 of the host to port 9000 of the container
volumes:
- ../../target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs
- ../../target/x86_64-unknown-linux-gnu/release/rustfs:/app/rustfs
command: "/app/rustfs"
node3:
@@ -78,5 +78,5 @@ services:
ports:
- "9003:9000" # Map port 9004 of the host to port 9000 of the container
volumes:
- ../../target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs
- ../../target/x86_64-unknown-linux-gnu/release/rustfs:/app/rustfs
command: "/app/rustfs"

View File

@@ -14,84 +14,165 @@
services:
tempo-init:
image: busybox:latest
command: [ "sh", "-c", "chown -R 10001:10001 /var/tempo" ]
volumes:
- ./tempo-data:/var/tempo
user: root
networks:
- otel-network
restart: "no"
tempo:
image: grafana/tempo:latest
#user: root # The container must be started with root to execute chown in the script
#entrypoint: [ "/etc/tempo/entrypoint.sh" ] # Specify a custom entry point
user: "10001" # The container must be started with root to execute chown in the script
command: [ "-config.file=/etc/tempo.yaml" ] # This is passed as a parameter to the entry point script
volumes:
- ./tempo-entrypoint.sh:/etc/tempo/entrypoint.sh # Mount entry point script
- ./tempo.yaml:/etc/tempo.yaml
- ./tempo.yaml:/etc/tempo.yaml:ro
- ./tempo-data:/var/tempo
ports:
- "3200:3200" # tempo
- "24317:4317" # otlp grpc
- "24318:4318" # otlp http
restart: unless-stopped
networks:
- otel-network
healthcheck:
test: [ "CMD", "wget", "--spider", "-q", "http://localhost:3200/metrics" ]
interval: 10s
timeout: 5s
retries: 3
start_period: 15s
otel-collector:
image: otel/opentelemetry-collector-contrib:0.129.1
image: otel/opentelemetry-collector-contrib:latest
environment:
- TZ=Asia/Shanghai
volumes:
- ./otel-collector-config.yaml:/etc/otelcol-contrib/config.yaml
- ./otel-collector-config.yaml:/etc/otelcol-contrib/config.yaml:ro
ports:
- "1888:1888"
- "8888:8888"
- "8889:8889"
- "13133:13133"
- "4317:4317"
- "4318:4318"
- "55679:55679"
- "1888:1888" # pprof
- "8888:8888" # Prometheus metrics for Collector
- "8889:8889" # Prometheus metrics for application indicators
- "13133:13133" # health check
- "4317:4317" # OTLP gRPC
- "4318:4318" # OTLP HTTP
- "55679:55679" # zpages
networks:
- otel-network
depends_on:
jaeger:
condition: service_started
tempo:
condition: service_started
prometheus:
condition: service_started
loki:
condition: service_started
healthcheck:
test: [ "CMD", "wget", "--spider", "-q", "http://localhost:13133" ]
interval: 10s
timeout: 5s
retries: 3
jaeger:
image: jaegertracing/jaeger:2.8.0
image: jaegertracing/jaeger:latest
environment:
- TZ=Asia/Shanghai
- SPAN_STORAGE_TYPE=memory
- COLLECTOR_OTLP_ENABLED=true
ports:
- "16686:16686"
- "14317:4317"
- "14318:4318"
- "16686:16686" # Web UI
- "14317:4317" # OTLP gRPC
- "14318:4318" # OTLP HTTP
- "18888:8888" # collector
networks:
- otel-network
healthcheck:
test: [ "CMD", "wget", "--spider", "-q", "http://localhost:16686" ]
interval: 10s
timeout: 5s
retries: 3
prometheus:
image: prom/prometheus:v3.4.2
image: prom/prometheus:latest
environment:
- TZ=Asia/Shanghai
volumes:
- ./prometheus.yml:/etc/prometheus/prometheus.yml
- ./prometheus.yml:/etc/prometheus/prometheus.yml:ro
- ./prometheus-data:/prometheus
ports:
- "9090:9090"
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--web.enable-otlp-receiver' # Enable OTLP
- '--web.enable-remote-write-receiver' # Enable remote write
- '--enable-feature=promql-experimental-functions' # Enable info()
- '--storage.tsdb.min-block-duration=15m' # Minimum block duration
- '--storage.tsdb.max-block-duration=1h' # Maximum block duration
- '--log.level=info'
- '--storage.tsdb.retention.time=30d'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/usr/share/prometheus/console_libraries'
- '--web.console.templates=/usr/share/prometheus/consoles'
restart: unless-stopped
networks:
- otel-network
healthcheck:
test: [ "CMD", "wget", "--spider", "-q", "http://localhost:9090/-/healthy" ]
interval: 10s
timeout: 5s
retries: 3
loki:
image: grafana/loki:3.5.1
image: grafana/loki:latest
environment:
- TZ=Asia/Shanghai
volumes:
- ./loki-config.yaml:/etc/loki/local-config.yaml
- ./loki-config.yaml:/etc/loki/local-config.yaml:ro
ports:
- "3100:3100"
command: -config.file=/etc/loki/local-config.yaml
networks:
- otel-network
healthcheck:
test: [ "CMD", "wget", "--spider", "-q", "http://localhost:3100/ready" ]
interval: 10s
timeout: 5s
retries: 3
grafana:
image: grafana/grafana:12.0.2
image: grafana/grafana:latest
ports:
- "3000:3000" # Web UI
volumes:
- ./grafana-datasources.yaml:/etc/grafana/provisioning/datasources/datasources.yaml
environment:
- GF_SECURITY_ADMIN_PASSWORD=admin
- GF_SECURITY_ADMIN_USER=admin
- TZ=Asia/Shanghai
- GF_INSTALL_PLUGINS=grafana-pyroscope-datasource
restart: unless-stopped
networks:
- otel-network
depends_on:
- prometheus
- tempo
- loki
healthcheck:
test: [ "CMD", "wget", "--spider", "-q", "http://localhost:3000/api/health" ]
interval: 10s
timeout: 5s
retries: 3
volumes:
prometheus-data:
tempo-data:
networks:
otel-network:
driver: bridge
name: "network_otel_config"
ipam:
config:
- subnet: 172.28.0.0/16
driver_opts:
com.docker.network.enable_ipv6: "true"
com.docker.network.enable_ipv6: "true"

View File

@@ -29,4 +29,80 @@ datasources:
serviceMap:
datasourceUid: prometheus
streamingEnabled:
search: true
search: true
tracesToLogsV2:
# Field with an internal link pointing to a logs data source in Grafana.
# datasourceUid value must match the uid value of the logs data source.
datasourceUid: 'loki'
spanStartTimeShift: '-1h'
spanEndTimeShift: '1h'
tags: [ 'job', 'instance', 'pod', 'namespace' ]
filterByTraceID: false
filterBySpanID: false
customQuery: true
query: 'method="$${__span.tags.method}"'
tracesToMetrics:
datasourceUid: 'prometheus'
spanStartTimeShift: '-1h'
spanEndTimeShift: '1h'
tags: [ { key: 'service.name', value: 'service' }, { key: 'job' } ]
queries:
- name: 'Sample query'
query: 'sum(rate(traces_spanmetrics_latency_bucket{$$__tags}[5m]))'
tracesToProfiles:
datasourceUid: 'grafana-pyroscope-datasource'
tags: [ 'job', 'instance', 'pod', 'namespace' ]
profileTypeId: 'process_cpu:cpu:nanoseconds:cpu:nanoseconds'
customQuery: true
query: 'method="$${__span.tags.method}"'
serviceMap:
datasourceUid: 'prometheus'
nodeGraph:
enabled: true
search:
hide: false
traceQuery:
timeShiftEnabled: true
spanStartTimeShift: '-1h'
spanEndTimeShift: '1h'
spanBar:
type: 'Tag'
tag: 'http.path'
streamingEnabled:
search: true
- name: Jaeger
type: jaeger
uid: Jaeger
url: http://jaeger:16686
basicAuth: false
access: proxy
readOnly: false
isDefault: false
jsonData:
tracesToLogsV2:
# Field with an internal link pointing to a logs data source in Grafana.
# datasourceUid value must match the uid value of the logs data source.
datasourceUid: 'loki'
spanStartTimeShift: '1h'
spanEndTimeShift: '-1h'
tags: [ 'job', 'instance', 'pod', 'namespace' ]
filterByTraceID: false
filterBySpanID: false
customQuery: true
query: 'method="$${__span.tags.method}"'
tracesToMetrics:
datasourceUid: 'Prometheus'
spanStartTimeShift: '1h'
spanEndTimeShift: '-1h'
tags: [ { key: 'service.name', value: 'service' }, { key: 'job' } ]
queries:
- name: 'Sample query'
query: 'sum(rate(traces_spanmetrics_latency_bucket{$$__tags}[5m]))'
nodeGraph:
enabled: true
traceQuery:
timeShiftEnabled: true
spanStartTimeShift: '1h'
spanEndTimeShift: '-1h'
spanBar:
type: 'None'

View File

@@ -65,6 +65,7 @@ extensions:
some_store:
memory:
max_traces: 1000000
max_events: 100000
another_store:
memory:
max_traces: 1000000
@@ -102,6 +103,7 @@ receivers:
processors:
batch:
metadata_keys: [ "span.kind", "http.method", "http.status_code", "db.system", "db.statement", "messaging.system", "messaging.destination", "messaging.operation","span.events","span.links" ]
# Adaptive Sampling Processor is required to support adaptive sampling.
# It expects remote_sampling extension with `adaptive:` config to be enabled.
adaptive_sampling:

View File

@@ -41,6 +41,9 @@ query_range:
limits_config:
metric_aggregation_enabled: true
max_line_size: 256KB
max_line_size_truncate: false
allow_structured_metadata: true
schema_config:
configs:
@@ -51,6 +54,7 @@ schema_config:
index:
prefix: index_
period: 24h
row_shards: 16
pattern_ingester:
enabled: true
@@ -63,6 +67,7 @@ ruler:
frontend:
encoding: protobuf
# By default, Loki will send anonymous, but uniquely-identifiable usage and configuration
# analytics to Grafana Labs. These statistics are sent to https://stats.grafana.org/
#

View File

@@ -15,67 +15,108 @@
receivers:
otlp:
protocols:
grpc: # OTLP gRPC 接收器
grpc: # OTLP gRPC receiver
endpoint: 0.0.0.0:4317
http: # OTLP HTTP 接收器
http: # OTLP HTTP receiver
endpoint: 0.0.0.0:4318
processors:
batch: # 批处理处理器,提升吞吐量
batch: # Batch processor to improve throughput
timeout: 5s
send_batch_size: 1000
metadata_keys: [ ]
metadata_cardinality_limit: 1000
memory_limiter:
check_interval: 1s
limit_mib: 512
transform/logs:
log_statements:
- context: log
statements:
# Extract Body as attribute "message"
- set(attributes["message"], body.string)
# Retain the original Body
- set(attributes["log.body"], body.string)
exporters:
otlp/traces: # OTLP 导出器,用于跟踪数据
endpoint: "jaeger:4317" # Jaeger 的 OTLP gRPC 端点
otlp/traces: # OTLP exporter for trace data
endpoint: "http://jaeger:4317" # OTLP gRPC endpoint for Jaeger
tls:
insecure: true # 开发环境禁用 TLS生产环境需配置证书
otlp/tempo: # OTLP 导出器,用于跟踪数据
endpoint: "tempo:4317" # tempo 的 OTLP gRPC 端点
insecure: true # TLS is disabled in the development environment and a certificate needs to be configured in the production environment.
compression: gzip # Enable compression to reduce network bandwidth
retry_on_failure:
enabled: true # Enable retry on failure
initial_interval: 1s # Initial interval for retry
max_interval: 30s # Maximum interval for retry
max_elapsed_time: 300s # Maximum elapsed time for retry
sending_queue:
enabled: true # Enable sending queue
num_consumers: 10 # Number of consumers
queue_size: 5000 # Queue size
otlp/tempo: # OTLP exporter for trace data
endpoint: "http://tempo:4317" # OTLP gRPC endpoint for tempo
tls:
insecure: true # 开发环境禁用 TLS生产环境需配置证书
prometheus: # Prometheus 导出器,用于指标数据
endpoint: "0.0.0.0:8889" # Prometheus 刮取端点
namespace: "rustfs" # 指标前缀
send_timestamps: true # 发送时间戳
# enable_open_metrics: true
loki: # Loki 导出器,用于日志数据
# endpoint: "http://loki:3100/otlp/v1/logs"
endpoint: "http://loki:3100/loki/api/v1/push"
insecure: true # TLS is disabled in the development environment and a certificate needs to be configured in the production environment.
compression: gzip # Enable compression to reduce network bandwidth
retry_on_failure:
enabled: true # Enable retry on failure
initial_interval: 1s # Initial interval for retry
max_interval: 30s # Maximum interval for retry
max_elapsed_time: 300s # Maximum elapsed time for retry
sending_queue:
enabled: true # Enable sending queue
num_consumers: 10 # Number of consumers
queue_size: 5000 # Queue size
prometheus: # Prometheus exporter for metrics data
endpoint: "0.0.0.0:8889" # Prometheus scraping endpoint
namespace: "metrics" # indicator prefix
send_timestamps: true # Send timestamp
metric_expiration: 5m # Metric expiration time
resource_to_telemetry_conversion:
enabled: true # Enable resource to telemetry conversion
otlphttp/loki: # Loki exporter for log data
endpoint: "http://loki:3100/otlp"
tls:
insecure: true
compression: gzip # Enable compression to reduce network bandwidth
extensions:
health_check:
endpoint: 0.0.0.0:13133
pprof:
endpoint: 0.0.0.0:1888
zpages:
endpoint: 0.0.0.0:55679
service:
extensions: [ health_check, pprof, zpages ] # 启用扩展
extensions: [ health_check, pprof, zpages ] # Enable extension
pipelines:
traces:
receivers: [ otlp ]
processors: [ memory_limiter,batch ]
exporters: [ otlp/traces,otlp/tempo ]
processors: [ memory_limiter, batch ]
exporters: [ otlp/traces, otlp/tempo ]
metrics:
receivers: [ otlp ]
processors: [ batch ]
exporters: [ prometheus ]
logs:
receivers: [ otlp ]
processors: [ batch ]
exporters: [ loki ]
processors: [ batch, transform/logs ]
exporters: [ otlphttp/loki ]
telemetry:
logs:
level: "info" # Collector 日志级别
level: "debug" # Collector log level
encoding: "json" # Log encoding: console or json
metrics:
level: "detailed" # 可以是 basic, normal, detailed
level: "detailed" # Can be basic, normal, detailed
readers:
- periodic:
exporter:
otlp:
protocol: http/protobuf
endpoint: http://otel-collector:4318
- pull:
exporter:
prometheus:
host: '0.0.0.0'
port: 8888

View File

@@ -0,0 +1 @@
*

View File

@@ -13,16 +13,53 @@
# limitations under the License.
global:
scrape_interval: 5s # 刮取间隔
scrape_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
evaluation_interval: 15s
external_labels:
cluster: 'rustfs-dev' # Label to identify the cluster
relica: '1' # Replica identifier
scrape_configs:
- job_name: 'otel-collector'
- job_name: 'otel-collector-internal'
static_configs:
- targets: [ 'otel-collector:8888' ] # Collector 刮取指标
- job_name: 'otel-metrics'
- targets: [ 'otel-collector:8888' ] # Scrape metrics from Collector
scrape_interval: 10s
- job_name: 'rustfs-app-metrics'
static_configs:
- targets: [ 'otel-collector:8889' ] # 应用指标
- targets: [ 'otel-collector:8889' ] # Application indicators
scrape_interval: 15s
metric_relabel_configs:
- job_name: 'tempo'
static_configs:
- targets: [ 'tempo:3200' ]
- targets: [ 'tempo:3200' ] # Scrape metrics from Tempo
- job_name: 'jaeger'
static_configs:
- targets: [ 'jaeger:8888' ] # Jaeger admin port
otlp:
# Recommended attributes to be promoted to labels.
promote_resource_attributes:
- service.instance.id
- service.name
- service.namespace
- cloud.availability_zone
- cloud.region
- container.name
- deployment.environment.name
- k8s.cluster.name
- k8s.container.name
- k8s.cronjob.name
- k8s.daemonset.name
- k8s.deployment.name
- k8s.job.name
- k8s.namespace.name
- k8s.pod.name
- k8s.replicaset.name
- k8s.statefulset.name
# Ingest OTLP data keeping all characters in metric/label names.
translation_strategy: NoUTF8EscapingWithSuffixes
storage:
# OTLP is a push-based protocol, Out of order samples is a common scenario.
tsdb:
out_of_order_time_window: 30m

View File

@@ -1,8 +0,0 @@
#!/bin/sh
# Run as root to fix directory permissions
chown -R 10001:10001 /var/tempo
# Use su-exec (a lightweight sudo/gosu alternative, commonly used in Alpine mirroring)
# Switch to user 10001 and execute the original command (CMD) passed to the script
# "$@" represents all parameters passed to this script, i.e. command in docker-compose
exec su-exec 10001:10001 /tempo "$@"

View File

@@ -18,7 +18,9 @@ distributor:
otlp:
protocols:
grpc:
endpoint: "tempo:4317"
endpoint: "0.0.0.0:4317"
http:
endpoint: "0.0.0.0:4318"
ingester:
max_block_duration: 5m # cut the headblock when this much time passes. this is being set for demo purposes and should probably be left alone normally

1
.dockerignore Normal file
View File

@@ -0,0 +1 @@
target

View File

@@ -52,24 +52,19 @@ runs:
sudo apt-get install -y \
musl-tools \
build-essential \
lld \
libdbus-1-dev \
libwayland-dev \
libwebkit2gtk-4.1-dev \
libxdo-dev \
pkg-config \
libssl-dev
- name: Install protoc
uses: arduino/setup-protoc@v3
with:
version: "31.1"
version: "33.1"
repo-token: ${{ inputs.github-token }}
- name: Install flatc
uses: Nugine/setup-flatc@v1
with:
version: "25.2.10"
version: "25.9.23"
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@stable

View File

@@ -22,8 +22,18 @@ updates:
- package-ecosystem: "cargo" # See documentation for possible values
directory: "/" # Location of package manifests
schedule:
interval: "monthly"
interval: "weekly"
day: "monday"
timezone: "Asia/Shanghai"
time: "08:00"
groups:
s3s:
update-types:
- "minor"
- "patch"
patterns:
- "s3s"
- "s3s-*"
dependencies:
patterns:
- "*"

View File

@@ -19,9 +19,7 @@ Pull Request Template for RustFS
## Checklist
- [ ] I have read and followed the [CONTRIBUTING.md](CONTRIBUTING.md) guidelines
- [ ] Code is formatted with `cargo fmt --all`
- [ ] Passed `cargo clippy --all-targets --all-features -- -D warnings`
- [ ] Passed `cargo check --all-targets`
- [ ] Passed `make pre-commit`
- [ ] Added/updated necessary tests
- [ ] Documentation updated (if needed)
- [ ] CI/CD passed (if applicable)

View File

@@ -16,13 +16,13 @@ name: Security Audit
on:
push:
branches: [main]
branches: [ main ]
paths:
- '**/Cargo.toml'
- '**/Cargo.lock'
- '.github/workflows/audit.yml'
pull_request:
branches: [main]
branches: [ main ]
paths:
- '**/Cargo.toml'
- '**/Cargo.lock'
@@ -31,6 +31,9 @@ on:
- cron: '0 0 * * 0' # Weekly on Sunday at midnight UTC
workflow_dispatch:
permissions:
contents: read
env:
CARGO_TERM_COLOR: always
@@ -41,7 +44,7 @@ jobs:
timeout-minutes: 15
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v5
- name: Install cargo-audit
uses: taiki-e/install-action@v2
@@ -69,7 +72,7 @@ jobs:
pull-requests: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v5
- name: Dependency Review
uses: actions/dependency-review-action@v4

View File

@@ -28,8 +28,8 @@ name: Build and Release
on:
push:
tags: ["*.*.*"]
branches: [main]
tags: [ "*.*.*" ]
branches: [ main ]
paths-ignore:
- "**.md"
- "**.txt"
@@ -45,7 +45,7 @@ on:
- ".gitignore"
- ".dockerignore"
pull_request:
branches: [main]
branches: [ main ]
paths-ignore:
- "**.md"
- "**.txt"
@@ -70,6 +70,9 @@ on:
default: true
type: boolean
permissions:
contents: read
env:
CARGO_TERM_COLOR: always
RUST_BACKTRACE: 1
@@ -89,7 +92,7 @@ jobs:
is_prerelease: ${{ steps.check.outputs.is_prerelease }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
fetch-depth: 0
@@ -153,7 +156,7 @@ jobs:
# Build RustFS binaries
build-rustfs:
name: Build RustFS
needs: [build-check]
needs: [ build-check ]
if: needs.build-check.outputs.should_build == 'true'
runs-on: ${{ matrix.os }}
timeout-minutes: 60
@@ -172,6 +175,14 @@ jobs:
target: aarch64-unknown-linux-musl
cross: true
platform: linux
- os: ubuntu-latest
target: x86_64-unknown-linux-gnu
cross: false
platform: linux
- os: ubuntu-latest
target: aarch64-unknown-linux-gnu
cross: true
platform: linux
# macOS builds
- os: macos-latest
target: aarch64-apple-darwin
@@ -181,18 +192,18 @@ jobs:
target: x86_64-apple-darwin
cross: false
platform: macos
# # Windows builds (temporarily disabled)
# - os: windows-latest
# target: x86_64-pc-windows-msvc
# cross: false
# platform: windows
# - os: windows-latest
# target: aarch64-pc-windows-msvc
# cross: true
# platform: windows
# Windows builds (temporarily disabled)
- os: windows-latest
target: x86_64-pc-windows-msvc
cross: false
platform: windows
#- os: windows-latest
# target: aarch64-pc-windows-msvc
# cross: true
# platform: windows
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
fetch-depth: 0
@@ -207,6 +218,7 @@ jobs:
install-cross-tools: ${{ matrix.cross }}
- name: Download static console assets
shell: bash
run: |
mkdir -p ./rustfs/static
if [[ "${{ matrix.platform }}" == "windows" ]]; then
@@ -232,6 +244,7 @@ jobs:
fi
- name: Build RustFS
shell: bash
run: |
# Force rebuild by touching build.rs
touch rustfs/build.rs
@@ -260,30 +273,55 @@ jobs:
# Extract platform and arch from target
TARGET="${{ matrix.target }}"
PLATFORM="${{ matrix.platform }}"
# Map target to architecture
# Map target to architecture and variant
case "$TARGET" in
*x86_64*musl*)
ARCH="x86_64"
VARIANT="musl"
;;
*x86_64*gnu*)
ARCH="x86_64"
VARIANT="gnu"
;;
*x86_64*)
ARCH="x86_64"
VARIANT=""
;;
*aarch64*musl*|*arm64*musl*)
ARCH="aarch64"
VARIANT="musl"
;;
*aarch64*gnu*|*arm64*gnu*)
ARCH="aarch64"
VARIANT="gnu"
;;
*aarch64*|*arm64*)
ARCH="aarch64"
VARIANT=""
;;
*armv7*)
ARCH="armv7"
VARIANT=""
;;
*)
ARCH="unknown"
VARIANT=""
;;
esac
# Generate package name based on build type
if [[ "$BUILD_TYPE" == "development" ]]; then
# Development build: rustfs-${platform}-${arch}-dev-${short_sha}.zip
PACKAGE_NAME="rustfs-${PLATFORM}-${ARCH}-dev-${SHORT_SHA}"
if [[ -n "$VARIANT" ]]; then
ARCH_WITH_VARIANT="${ARCH}-${VARIANT}"
else
# Release/Prerelease build: rustfs-${platform}-${arch}-v${version}.zip
PACKAGE_NAME="rustfs-${PLATFORM}-${ARCH}-v${VERSION}"
ARCH_WITH_VARIANT="${ARCH}"
fi
if [[ "$BUILD_TYPE" == "development" ]]; then
# Development build: rustfs-${platform}-${arch}-${variant}-dev-${short_sha}.zip
PACKAGE_NAME="rustfs-${PLATFORM}-${ARCH_WITH_VARIANT}-dev-${SHORT_SHA}"
else
# Release/Prerelease build: rustfs-${platform}-${arch}-${variant}-v${version}.zip
PACKAGE_NAME="rustfs-${PLATFORM}-${ARCH_WITH_VARIANT}-v${VERSION}"
fi
# Create zip packages for all platforms
@@ -295,23 +333,119 @@ jobs:
fi
cd target/${{ matrix.target }}/release
zip "../../../${PACKAGE_NAME}.zip" rustfs
# Determine the binary name based on platform
if [[ "${{ matrix.platform }}" == "windows" ]]; then
BINARY_NAME="rustfs.exe"
else
BINARY_NAME="rustfs"
fi
# Verify the binary exists before packaging
if [[ ! -f "$BINARY_NAME" ]]; then
echo "❌ Binary $BINARY_NAME not found in $(pwd)"
if [[ "${{ matrix.platform }}" == "windows" ]]; then
dir
else
ls -la
fi
exit 1
fi
# Universal packaging function
package_zip() {
local src=$1
local dst=$2
if [[ "${{ matrix.platform }}" == "windows" ]]; then
# Windows uses PowerShell Compress-Archive
powershell -Command "Compress-Archive -Path '$src' -DestinationPath '$dst' -Force"
elif command -v zip &> /dev/null; then
# Unix systems use zip command
zip "$dst" "$src"
else
echo "❌ No zip utility available"
exit 1
fi
}
# Create the zip package
echo "Start packaging: $BINARY_NAME -> ../../../${PACKAGE_NAME}.zip"
package_zip "$BINARY_NAME" "../../../${PACKAGE_NAME}.zip"
cd ../../..
# Verify the package was created
if [[ -f "${PACKAGE_NAME}.zip" ]]; then
echo "✅ Package created successfully: ${PACKAGE_NAME}.zip"
if [[ "${{ matrix.platform }}" == "windows" ]]; then
dir
else
ls -lh ${PACKAGE_NAME}.zip
fi
else
echo "❌ Failed to create package: ${PACKAGE_NAME}.zip"
exit 1
fi
# Create latest version files right after the main package
LATEST_FILES=""
if [[ "$BUILD_TYPE" == "release" ]] || [[ "$BUILD_TYPE" == "prerelease" ]]; then
# Create latest version filename
# Convert from rustfs-linux-x86_64-musl-v1.0.0 to rustfs-linux-x86_64-musl-latest
LATEST_FILE="${PACKAGE_NAME%-v*}-latest.zip"
echo "🔄 Creating latest version: ${PACKAGE_NAME}.zip -> $LATEST_FILE"
cp "${PACKAGE_NAME}.zip" "$LATEST_FILE"
if [[ -f "$LATEST_FILE" ]]; then
echo "✅ Latest version created: $LATEST_FILE"
LATEST_FILES="$LATEST_FILE"
fi
elif [[ "$BUILD_TYPE" == "development" ]]; then
# Development builds (only main branch triggers development builds)
# Create main-latest version filename
# Convert from rustfs-linux-x86_64-dev-abc123 to rustfs-linux-x86_64-main-latest
MAIN_LATEST_FILE="${PACKAGE_NAME%-dev-*}-main-latest.zip"
echo "🔄 Creating main-latest version: ${PACKAGE_NAME}.zip -> $MAIN_LATEST_FILE"
cp "${PACKAGE_NAME}.zip" "$MAIN_LATEST_FILE"
if [[ -f "$MAIN_LATEST_FILE" ]]; then
echo "✅ Main-latest version created: $MAIN_LATEST_FILE"
LATEST_FILES="$MAIN_LATEST_FILE"
# Also create a generic main-latest for Docker builds (Linux only)
if [[ "${{ matrix.platform }}" == "linux" ]]; then
DOCKER_MAIN_LATEST_FILE="rustfs-linux-${ARCH_WITH_VARIANT}-main-latest.zip"
echo "🔄 Creating Docker main-latest version: ${PACKAGE_NAME}.zip -> $DOCKER_MAIN_LATEST_FILE"
cp "${PACKAGE_NAME}.zip" "$DOCKER_MAIN_LATEST_FILE"
if [[ -f "$DOCKER_MAIN_LATEST_FILE" ]]; then
echo "✅ Docker main-latest version created: $DOCKER_MAIN_LATEST_FILE"
LATEST_FILES="$LATEST_FILES $DOCKER_MAIN_LATEST_FILE"
fi
fi
fi
fi
echo "package_name=${PACKAGE_NAME}" >> $GITHUB_OUTPUT
echo "package_file=${PACKAGE_NAME}.zip" >> $GITHUB_OUTPUT
echo "latest_files=${LATEST_FILES}" >> $GITHUB_OUTPUT
echo "build_type=${BUILD_TYPE}" >> $GITHUB_OUTPUT
echo "version=${VERSION}" >> $GITHUB_OUTPUT
echo "📦 Package created: ${PACKAGE_NAME}.zip"
if [[ -n "$LATEST_FILES" ]]; then
echo "📦 Latest files created: $LATEST_FILES"
fi
echo "🔧 Build type: ${BUILD_TYPE}"
echo "📊 Version: ${VERSION}"
- name: Upload artifacts
- name: Upload to GitHub artifacts
uses: actions/upload-artifact@v4
with:
name: ${{ steps.package.outputs.package_name }}
path: ${{ steps.package.outputs.package_file }}
path: "rustfs-*.zip"
retention-days: ${{ startsWith(github.ref, 'refs/tags/') && 30 || 7 }}
- name: Upload to Aliyun OSS
@@ -321,6 +455,7 @@ jobs:
OSS_ACCESS_KEY_SECRET: ${{ secrets.ALICLOUDOSS_KEY_SECRET }}
OSS_REGION: cn-beijing
OSS_ENDPOINT: https://oss-cn-beijing.aliyuncs.com
shell: bash
run: |
BUILD_TYPE="${{ needs.build-check.outputs.build_type }}"
@@ -359,6 +494,16 @@ jobs:
chmod +x /usr/local/bin/ossutil
OSSUTIL_BIN=ossutil
;;
windows)
OSSUTIL_ZIP="ossutil-${OSSUTIL_VERSION}-windows-amd64.zip"
OSSUTIL_DIR="ossutil-${OSSUTIL_VERSION}-windows-amd64"
curl -o "$OSSUTIL_ZIP" "https://gosspublic.alicdn.com/ossutil/v2/${OSSUTIL_VERSION}/${OSSUTIL_ZIP}"
unzip "$OSSUTIL_ZIP"
mv "${OSSUTIL_DIR}/ossutil.exe" ./ossutil.exe
rm -rf "$OSSUTIL_DIR" "$OSSUTIL_ZIP"
OSSUTIL_BIN=./ossutil.exe
;;
esac
# Determine upload path based on build type
@@ -370,83 +515,27 @@ jobs:
echo "📤 Uploading release build to OSS release directory"
fi
# Upload the package file to OSS
echo "Uploading ${{ steps.package.outputs.package_file }} to $OSS_PATH..."
$OSSUTIL_BIN cp "${{ steps.package.outputs.package_file }}" "$OSS_PATH" --force
# For release and prerelease builds, also create a latest version
if [[ "$BUILD_TYPE" == "release" ]] || [[ "$BUILD_TYPE" == "prerelease" ]]; then
# Extract platform and arch from package name
PACKAGE_NAME="${{ steps.package.outputs.package_name }}"
# Create latest version filename
# Convert from rustfs-linux-x86_64-v1.0.0 to rustfs-linux-x86_64-latest
LATEST_FILE="${PACKAGE_NAME%-v*}-latest.zip"
# Copy the original file to latest version
cp "${{ steps.package.outputs.package_file }}" "$LATEST_FILE"
# Upload the latest version
echo "Uploading latest version: $LATEST_FILE to $OSS_PATH..."
$OSSUTIL_BIN cp "$LATEST_FILE" "$OSS_PATH" --force
echo "✅ Latest version uploaded: $LATEST_FILE"
fi
# For development builds, create dev-latest version
if [[ "$BUILD_TYPE" == "development" ]]; then
# Extract platform and arch from package name
PACKAGE_NAME="${{ steps.package.outputs.package_name }}"
# Create dev-latest version filename
# Convert from rustfs-linux-x86_64-dev-abc123 to rustfs-linux-x86_64-dev-latest
DEV_LATEST_FILE="${PACKAGE_NAME%-*}-latest.zip"
# Copy the original file to dev-latest version
cp "${{ steps.package.outputs.package_file }}" "$DEV_LATEST_FILE"
# Upload the dev-latest version
echo "Uploading dev-latest version: $DEV_LATEST_FILE to $OSS_PATH..."
$OSSUTIL_BIN cp "$DEV_LATEST_FILE" "$OSS_PATH" --force
echo "✅ Dev-latest version uploaded: $DEV_LATEST_FILE"
# For main branch builds, also create a main-latest version
if [[ "${{ github.ref }}" == "refs/heads/main" ]]; then
# Create main-latest version filename
# Convert from rustfs-linux-x86_64-dev-abc123 to rustfs-linux-x86_64-main-latest
MAIN_LATEST_FILE="${PACKAGE_NAME%-dev-*}-main-latest.zip"
# Copy the original file to main-latest version
cp "${{ steps.package.outputs.package_file }}" "$MAIN_LATEST_FILE"
# Upload the main-latest version
echo "Uploading main-latest version: $MAIN_LATEST_FILE to $OSS_PATH..."
$OSSUTIL_BIN cp "$MAIN_LATEST_FILE" "$OSS_PATH" --force
echo "✅ Main-latest version uploaded: $MAIN_LATEST_FILE"
# Also create a generic main-latest for Docker builds
if [[ "${{ matrix.platform }}" == "linux" ]]; then
DOCKER_MAIN_LATEST_FILE="rustfs-linux-${{ matrix.target == 'x86_64-unknown-linux-musl' && 'x86_64' || 'aarch64' }}-main-latest.zip"
cp "${{ steps.package.outputs.package_file }}" "$DOCKER_MAIN_LATEST_FILE"
$OSSUTIL_BIN cp "$DOCKER_MAIN_LATEST_FILE" "$OSS_PATH" --force
echo "✅ Docker main-latest version uploaded: $DOCKER_MAIN_LATEST_FILE"
fi
# Upload all rustfs zip files to OSS using glob pattern
echo "📤 Uploading all rustfs-*.zip files to $OSS_PATH..."
for zip_file in rustfs-*.zip; do
if [[ -f "$zip_file" ]]; then
echo "Uploading: $zip_file to $OSS_PATH..."
$OSSUTIL_BIN cp "$zip_file" "$OSS_PATH" --force
echo "✅ Uploaded: $zip_file"
fi
fi
done
echo "✅ Upload completed successfully"
# Build summary
build-summary:
name: Build Summary
needs: [build-check, build-rustfs]
needs: [ build-check, build-rustfs ]
if: always() && needs.build-check.outputs.should_build == 'true'
runs-on: ubuntu-latest
steps:
- name: Build completion summary
shell: bash
run: |
BUILD_TYPE="${{ needs.build-check.outputs.build_type }}"
VERSION="${{ needs.build-check.outputs.version }}"
@@ -493,7 +582,7 @@ jobs:
# Create GitHub Release (only for tag pushes)
create-release:
name: Create GitHub Release
needs: [build-check, build-rustfs]
needs: [ build-check, build-rustfs ]
if: startsWith(github.ref, 'refs/tags/') && needs.build-check.outputs.build_type != 'development'
runs-on: ubuntu-latest
permissions:
@@ -503,7 +592,7 @@ jobs:
release_url: ${{ steps.create.outputs.release_url }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
fetch-depth: 0
@@ -511,6 +600,7 @@ jobs:
id: create
env:
GH_TOKEN: ${{ github.token }}
shell: bash
run: |
TAG="${{ needs.build-check.outputs.version }}"
VERSION="${{ needs.build-check.outputs.version }}"
@@ -578,7 +668,7 @@ jobs:
# Prepare and upload release assets
upload-release-assets:
name: Upload Release Assets
needs: [build-check, build-rustfs, create-release]
needs: [ build-check, build-rustfs, create-release ]
if: startsWith(github.ref, 'refs/tags/') && needs.build-check.outputs.build_type != 'development'
runs-on: ubuntu-latest
permissions:
@@ -586,10 +676,10 @@ jobs:
actions: read
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v5
- name: Download all build artifacts
uses: actions/download-artifact@v4
uses: actions/download-artifact@v5
with:
path: ./artifacts
pattern: rustfs-*
@@ -597,13 +687,14 @@ jobs:
- name: Prepare release assets
id: prepare
shell: bash
run: |
VERSION="${{ needs.build-check.outputs.version }}"
TAG="${{ needs.build-check.outputs.version }}"
mkdir -p ./release-assets
# Copy and verify artifacts
# Copy and verify artifacts (including latest files created during build)
ASSETS_COUNT=0
for file in ./artifacts/*.zip; do
if [[ -f "$file" ]]; then
@@ -619,7 +710,7 @@ jobs:
cd ./release-assets
# Generate checksums
# Generate checksums for all files (including latest versions)
if ls *.zip >/dev/null 2>&1; then
sha256sum *.zip > SHA256SUMS
sha512sum *.zip > SHA512SUMS
@@ -634,11 +725,12 @@ jobs:
echo "📦 Prepared assets:"
ls -la
echo "🔢 Asset count: $ASSETS_COUNT"
echo "🔢 Total asset count: $ASSETS_COUNT"
- name: Upload to GitHub Release
env:
GH_TOKEN: ${{ github.token }}
shell: bash
run: |
TAG="${{ needs.build-check.outputs.version }}"
@@ -657,14 +749,17 @@ jobs:
# Update latest.json for stable releases only
update-latest-version:
name: Update Latest Version
needs: [build-check, upload-release-assets]
if: startsWith(github.ref, 'refs/tags/') && needs.build-check.outputs.is_prerelease == 'false'
needs: [ build-check, upload-release-assets ]
if: startsWith(github.ref, 'refs/tags/')
runs-on: ubuntu-latest
steps:
- name: Update latest.json
env:
OSS_ACCESS_KEY_ID: ${{ secrets.ALICLOUDOSS_KEY_ID }}
OSS_ACCESS_KEY_SECRET: ${{ secrets.ALICLOUDOSS_KEY_SECRET }}
OSS_REGION: cn-beijing
OSS_ENDPOINT: https://oss-cn-beijing.aliyuncs.com
shell: bash
run: |
if [[ -z "$OSS_ACCESS_KEY_ID" ]]; then
echo "⚠️ OSS credentials not available, skipping latest.json update"
@@ -681,7 +776,9 @@ jobs:
curl -o "$OSSUTIL_ZIP" "https://gosspublic.alicdn.com/ossutil/v2/${OSSUTIL_VERSION}/${OSSUTIL_ZIP}"
unzip "$OSSUTIL_ZIP"
chmod +x "${OSSUTIL_DIR}/ossutil"
mv "${OSSUTIL_DIR}/ossutil" /usr/local/bin/
rm -rf "$OSSUTIL_DIR" "$OSSUTIL_ZIP"
chmod +x /usr/local/bin/ossutil
# Create latest.json
cat > latest.json << EOF
@@ -695,25 +792,26 @@ jobs:
EOF
# Upload to OSS
./${OSSUTIL_DIR}/ossutil cp latest.json oss://rustfs-version/latest.json --force
ossutil cp latest.json oss://rustfs-version/latest.json --force
echo "✅ Updated latest.json for stable release $VERSION"
# Publish release (remove draft status)
publish-release:
name: Publish Release
needs: [build-check, create-release, upload-release-assets]
needs: [ build-check, create-release, upload-release-assets ]
if: startsWith(github.ref, 'refs/tags/') && needs.build-check.outputs.build_type != 'development'
runs-on: ubuntu-latest
permissions:
contents: write
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v5
- name: Update release notes and publish
env:
GH_TOKEN: ${{ github.token }}
shell: bash
run: |
TAG="${{ needs.build-check.outputs.version }}"
VERSION="${{ needs.build-check.outputs.version }}"

View File

@@ -16,7 +16,7 @@ name: Continuous Integration
on:
push:
branches: [main]
branches: [ main ]
paths-ignore:
- "**.md"
- "**.txt"
@@ -36,7 +36,7 @@ on:
- ".github/workflows/audit.yml"
- ".github/workflows/performance.yml"
pull_request:
branches: [main]
branches: [ main ]
paths-ignore:
- "**.md"
- "**.txt"
@@ -59,6 +59,9 @@ on:
- cron: "0 0 * * 0" # Weekly on Sunday at midnight UTC
workflow_dispatch:
permissions:
contents: read
env:
CARGO_TERM_COLOR: always
RUST_BACKTRACE: 1
@@ -83,6 +86,16 @@ jobs:
# Never skip release events and tag pushes
do_not_skip: '["workflow_dispatch", "schedule", "merge_group", "release", "push"]'
typos:
name: Typos
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v5
- uses: dtolnay/rust-toolchain@stable
- name: Typos check with custom config file
uses: crate-ci/typos@master
test-and-lint:
name: Test and Lint
needs: skip-check
@@ -90,8 +103,10 @@ jobs:
runs-on: ubuntu-latest
timeout-minutes: 60
steps:
- name: Delete huge unnecessary tools folder
run: rm -rf /opt/hostedtoolcache
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v5
- name: Setup Rust environment
uses: ./.github/actions/setup
@@ -120,7 +135,7 @@ jobs:
timeout-minutes: 30
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v5
- name: Setup Rust environment
uses: ./.github/actions/setup

View File

@@ -36,9 +36,8 @@ permissions:
on:
# Automatically triggered when build workflow completes
workflow_run:
workflows: ["Build and Release"]
types: [completed]
branches: [main]
workflows: [ "Build and Release" ]
types: [ completed ]
# Manual trigger with same parameters for consistency
workflow_dispatch:
inputs:
@@ -59,6 +58,10 @@ on:
type: boolean
env:
CONCLUSION: ${{ github.event.workflow_run.conclusion }}
HEAD_BRANCH: ${{ github.event.workflow_run.head_branch }}
HEAD_SHA: ${{ github.event.workflow_run.head_sha }}
TRIGGERING_EVENT: ${{ github.event.workflow_run.event }}
DOCKERHUB_USERNAME: rustfs
CARGO_TERM_COLOR: always
REGISTRY_DOCKERHUB: rustfs/rustfs
@@ -80,9 +83,11 @@ jobs:
create_latest: ${{ steps.check.outputs.create_latest }}
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v5
with:
fetch-depth: 0
# For workflow_run events, checkout the specific commit that triggered the workflow
ref: ${{ github.event.workflow_run.head_sha || github.sha }}
- name: Check build conditions
id: check
@@ -101,52 +106,89 @@ jobs:
# Check if the triggering workflow was successful
# If the workflow succeeded, it means ALL builds (including Linux x86_64 and aarch64) succeeded
if [[ "${{ github.event.workflow_run.conclusion }}" == "success" ]]; then
if [[ "$CONCLUSION" == "success" ]]; then
echo "✅ Build workflow succeeded, all builds including Linux are successful"
should_build=true
should_push=true
else
echo "❌ Build workflow failed (conclusion: ${{ github.event.workflow_run.conclusion }}), skipping Docker build"
echo "❌ Build workflow failed (conclusion: $CONCLUSION), skipping Docker build"
should_build=false
fi
# Extract version info from commit message or use commit SHA
# Use Git to generate consistent short SHA (ensures uniqueness like build.yml)
short_sha=$(git rev-parse --short "${{ github.event.workflow_run.head_sha }}")
short_sha=$(git rev-parse --short "$HEAD_SHA")
# Determine build type based on branch and commit
if [[ "${{ github.event.workflow_run.head_branch }}" == "main" ]]; then
build_type="development"
version="dev-${short_sha}"
# Skip Docker build for development builds
should_build=false
echo "⏭️ Skipping Docker build for development version (main branch)"
elif [[ "${{ github.event.workflow_run.event }}" == "push" ]] && [[ "${{ github.event.workflow_run.head_branch }}" =~ ^refs/tags/ ]]; then
# Tag push - only build for releases and prereleases
tag_name="${{ github.event.workflow_run.head_branch }}"
version="${tag_name#refs/tags/}"
if [[ "$version" == *"alpha"* ]] || [[ "$version" == *"beta"* ]] || [[ "$version" == *"rc"* ]]; then
build_type="prerelease"
is_prerelease=true
echo "🧪 Building Docker image for prerelease: $version"
# Determine build type based on triggering workflow event and ref
triggering_event="$TRIGGERING_EVENT"
head_branch="$HEAD_BRANCH"
echo "🔍 Analyzing triggering workflow:"
echo " 📋 Event: $triggering_event"
echo " 🌿 Head branch: $head_branch"
echo " 📎 Head SHA: $HEAD_SHA"
# Check if this was triggered by a tag push
if [[ "$triggering_event" == "push" ]]; then
# For tag pushes, head_branch will be like "refs/tags/v1.0.0" or just "v1.0.0"
if [[ "$head_branch" == refs/tags/* ]]; then
# Extract tag name from refs/tags/TAG_NAME
tag_name="${head_branch#refs/tags/}"
version="$tag_name"
elif [[ "$head_branch" =~ ^v?[0-9]+\.[0-9]+\.[0-9]+ ]]; then
# Direct tag name like "v1.0.0" or "1.0.0-alpha.1"
version="$head_branch"
elif [[ "$head_branch" == "main" ]]; then
# Regular branch push to main
build_type="development"
version="dev-${short_sha}"
should_build=false
echo "⏭️ Skipping Docker build for development version (main branch push)"
else
build_type="release"
create_latest=true
echo "🚀 Building Docker image for release: $version"
# Other branch push
build_type="development"
version="dev-${short_sha}"
should_build=false
echo "⏭️ Skipping Docker build for development version (branch: $head_branch)"
fi
# If we extracted a version (tag), determine release type
if [[ -n "$version" ]] && [[ "$version" != "dev-${short_sha}" ]]; then
# Remove 'v' prefix if present for consistent version format
if [[ "$version" == v* ]]; then
version="${version#v}"
fi
if [[ "$version" == *"alpha"* ]] || [[ "$version" == *"beta"* ]] || [[ "$version" == *"rc"* ]]; then
build_type="prerelease"
is_prerelease=true
# TODO: 临时修改 - 当前允许 alpha 版本也创建 latest 标签
# 等版本稳定后,需要移除下面这行,恢复原有逻辑(只有稳定版本才创建 latest
if [[ "$version" == *"alpha"* ]]; then
create_latest=true
echo "🧪 Building Docker image for prerelease: $version (临时允许创建 latest 标签)"
else
echo "🧪 Building Docker image for prerelease: $version"
fi
else
build_type="release"
create_latest=true
echo "🚀 Building Docker image for release: $version"
fi
fi
else
# Non-push events
build_type="development"
version="dev-${short_sha}"
# Skip Docker build for development builds
should_build=false
echo "⏭️ Skipping Docker build for development version"
echo "⏭️ Skipping Docker build for development version (event: $triggering_event)"
fi
echo "🔄 Build triggered by workflow_run:"
echo " 📋 Conclusion: ${{ github.event.workflow_run.conclusion }}"
echo " 🌿 Branch: ${{ github.event.workflow_run.head_branch }}"
echo " 📎 SHA: ${{ github.event.workflow_run.head_sha }}"
echo " 🎯 Event: ${{ github.event.workflow_run.event }}"
echo " 📋 Conclusion: $CONCLUSION"
echo " 🌿 Branch: $HEAD_BRANCH"
echo " 📎 SHA: $HEAD_SHA"
echo " 🎯 Event: $TRIGGERING_EVENT"
elif [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then
# Manual trigger
@@ -169,21 +211,30 @@ jobs:
create_latest=true
echo "🚀 Building with latest stable release version"
;;
v[0-9]*)
# Prerelease versions (must match first, more specific)
v*alpha*|v*beta*|v*rc*|*alpha*|*beta*|*rc*)
build_type="prerelease"
is_prerelease=true
# TODO: 临时修改 - 当前允许 alpha 版本也创建 latest 标签
# 等版本稳定后,需要移除下面的 if 块,恢复原有逻辑
if [[ "$input_version" == *"alpha"* ]]; then
create_latest=true
echo "🧪 Building with prerelease version: $input_version (临时允许创建 latest 标签)"
else
echo "🧪 Building with prerelease version: $input_version"
fi
;;
# Release versions (match after prereleases, more general)
v[0-9]*|[0-9]*.*.*)
build_type="release"
create_latest=true
echo "📦 Building with specific release version: $input_version"
;;
v*alpha*|v*beta*|v*rc*)
build_type="prerelease"
is_prerelease=true
echo "🧪 Building with prerelease version: $input_version"
;;
*)
# Invalid version for Docker build
should_build=false
echo "❌ Invalid version for Docker build: $input_version"
echo "⚠️ Only release versions (latest, v1.0.0) and prereleases (v1.0.0-alpha1) are supported"
echo "⚠️ Only release versions (latest, v1.0.0, 1.0.0) and prereleases (v1.0.0-alpha1, 1.0.0-beta2) are supported"
;;
esac
fi
@@ -217,7 +268,7 @@ jobs:
timeout-minutes: 60
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v5
- name: Login to Docker Hub
uses: docker/login-action@v3
@@ -279,7 +330,9 @@ jobs:
# Add channel tags for prereleases and latest for stable
if [[ "$CREATE_LATEST" == "true" ]]; then
# Stable release
# TODO: 临时修改 - 当前 alpha 版本也会创建 latest 标签
# 等版本稳定后,这里的逻辑保持不变,但上游的 CREATE_LATEST 设置需要恢复
# Stable release (以及临时的 alpha 版本)
TAGS="$TAGS,${{ env.REGISTRY_DOCKERHUB }}:latest"
elif [[ "$BUILD_TYPE" == "prerelease" ]]; then
# Prerelease channel tags (alpha, beta, rc)
@@ -349,7 +402,7 @@ jobs:
# Docker build summary
docker-summary:
name: Docker Build Summary
needs: [build-check, build-docker]
needs: [ build-check, build-docker ]
if: always() && needs.build-check.outputs.should_build == 'true'
runs-on: ubuntu-latest
steps:
@@ -376,7 +429,13 @@ jobs:
"prerelease")
echo "🧪 Prerelease Docker image has been built with ${VERSION} tags"
echo "⚠️ This is a prerelease image - use with caution"
echo "🚫 Latest tag NOT created for prerelease"
# TODO: 临时修改 - alpha 版本当前会创建 latest 标签
# 等版本稳定后,需要恢复下面的提示信息
if [[ "$VERSION" == *"alpha"* ]] && [[ "$CREATE_LATEST" == "true" ]]; then
echo "🏷️ Latest tag has been created for alpha version (临时措施)"
else
echo "🚫 Latest tag NOT created for prerelease"
fi
;;
*)
echo "❌ Unexpected build type: $BUILD_TYPE"

78
.github/workflows/helm-package.yml vendored Normal file
View File

@@ -0,0 +1,78 @@
name: Publish helm chart to artifacthub
on:
workflow_run:
workflows: ["Build and Release"]
types: [completed]
env:
new_version: ${{ github.event.workflow_run.head_branch }}
jobs:
build-helm-package:
runs-on: ubuntu-latest
# Only run on successful builds triggered by tag pushes (version format: x.y.z or x.y.z-suffix)
if: |
github.event.workflow_run.conclusion == 'success' &&
github.event.workflow_run.event == 'push' &&
contains(github.event.workflow_run.head_branch, '.')
steps:
- name: Checkout helm chart repo
uses: actions/checkout@v2
- name: Replace chart appversion
run: |
set -e
set -x
old_version=$(grep "^appVersion:" helm/rustfs/Chart.yaml | awk '{print $2}')
sed -i "s/$old_version/$new_version/g" helm/rustfs/Chart.yaml
sed -i "/^image:/,/^[^ ]/ s/tag:.*/tag: "$new_version"/" helm/rustfs/values.yaml
- name: Set up Helm
uses: azure/setup-helm@v4.3.0
- name: Package Helm Chart
run: |
cp helm/README.md helm/rustfs/
package_version=$(echo $new_version | awk -F '-' '{print $2}' | awk -F '.' '{print $NF}')
helm package ./helm/rustfs --destination helm/rustfs/ --version "0.0.$package_version"
- name: Upload helm package as artifact
uses: actions/upload-artifact@v4
with:
name: helm-package
path: helm/rustfs/*.tgz
retention-days: 1
publish-helm-package:
runs-on: ubuntu-latest
needs: [build-helm-package]
steps:
- name: Checkout helm package repo
uses: actions/checkout@v2
with:
repository: rustfs/helm
token: ${{ secrets.RUSTFS_HELM_PACKAGE }}
- name: Download helm package
uses: actions/download-artifact@v4
with:
name: helm-package
path: ./
- name: Set up helm
uses: azure/setup-helm@v4.3.0
- name: Generate index
run: helm repo index . --url https://charts.rustfs.com
- name: Push helm package and index file
run: |
git config --global user.name "${{ secrets.USERNAME }}"
git config --global user.email "${{ secrets.EMAIL_ADDRESS }}"
git status .
git add .
git commit -m "Update rustfs helm package with $new_version."
git push origin main

View File

@@ -15,9 +15,13 @@
name: "issue-translator"
on:
issue_comment:
types: [created]
types: [ created ]
issues:
types: [opened]
types: [ opened ]
permissions:
contents: read
issues: write
jobs:
build:

View File

@@ -16,7 +16,7 @@ name: Performance Testing
on:
push:
branches: [main]
branches: [ main ]
paths:
- "**/*.rs"
- "**/Cargo.toml"
@@ -30,6 +30,9 @@ on:
default: "120"
type: string
permissions:
contents: read
env:
CARGO_TERM_COLOR: always
RUST_BACKTRACE: 1
@@ -41,7 +44,7 @@ jobs:
timeout-minutes: 30
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v5
- name: Setup Rust environment
uses: ./.github/actions/setup
@@ -116,7 +119,7 @@ jobs:
timeout-minutes: 45
steps:
- name: Checkout repository
uses: actions/checkout@v4
uses: actions/checkout@v5
- name: Setup Rust environment
uses: ./.github/actions/setup

4
.gitignore vendored
View File

@@ -20,3 +20,7 @@ profile.json
.docker/openobserve-otel/data
*.zst
.secrets
*.go
*.pb
*.svg
deploy/logs/*.log.*

View File

@@ -1,4 +1,4 @@
# RustFS Project Cursor Rules
# RustFS Project AI Coding Rules
## 🚨🚨🚨 CRITICAL DEVELOPMENT RULES - ZERO TOLERANCE 🚨🚨🚨
@@ -35,6 +35,47 @@
- **Code review requirement**: At least one approval needed
- **Automated reversal**: Direct commits to main will be automatically reverted
## 🎯 Core AI Development Principles
### Five Execution Steps
#### 1. Task Analysis and Planning
- **Clear Objectives**: Deeply understand task requirements and expected results before starting coding
- **Plan Development**: List specific files, components, and functions that need modification, explaining the reasons for changes
- **Risk Assessment**: Evaluate the impact of changes on existing functionality, develop rollback plans
#### 2. Precise Code Location
- **File Identification**: Determine specific files and line numbers that need modification
- **Impact Analysis**: Avoid modifying irrelevant files, clearly state the reason for each file modification
- **Minimization Principle**: Unless explicitly required by the task, do not create new abstraction layers or refactor existing code
#### 3. Minimal Code Changes
- **Focus on Core**: Only write code directly required by the task
- **Avoid Redundancy**: Do not add unnecessary logs, comments, tests, or error handling
- **Isolation**: Ensure new code does not interfere with existing functionality, maintain code independence
#### 4. Strict Code Review
- **Correctness Check**: Verify the correctness and completeness of code logic
- **Style Consistency**: Ensure code conforms to established project coding style
- **Side Effect Assessment**: Evaluate the impact of changes on downstream systems
#### 5. Clear Delivery Documentation
- **Change Summary**: Detailed explanation of all modifications and reasons
- **File List**: List all modified files and their specific changes
- **Risk Statement**: Mark any assumptions or potential risk points
### Core Principles
- **🎯 Precise Execution**: Strictly follow task requirements, no arbitrary innovation
- **⚡ Efficient Development**: Avoid over-design, only do necessary work
- **🛡️ Safe and Reliable**: Always follow development processes, ensure code quality and system stability
- **🔒 Cautious Modification**: Only modify when clearly knowing what needs to be changed and having confidence
### Additional AI Behavior Rules
1. **Use English for all code comments and documentation** - All comments, variable names, function names, documentation, and user-facing text in code should be in English
2. **Clean up temporary scripts after use** - Any temporary scripts, test files, or helper files created during AI work should be removed after task completion
3. **Only make confident modifications** - Do not make speculative changes or "convenient" modifications outside the task scope. If uncertain about a change, ask for clarification rather than guessing
## Project Overview
RustFS is a high-performance distributed object storage system written in Rust, compatible with S3 API. The project adopts a modular architecture, supporting erasure coding storage, multi-tenant management, observability, and other enterprise-level features.
@@ -66,15 +107,6 @@ RustFS is a high-performance distributed object storage system written in Rust,
- Use `#[error("description")]` attributes for clear error messages
- Support error downcasting when needed through `other()` helper methods
- Implement `Clone` for errors when required by the domain logic
- **Current module error types:**
- `ecstore::error::StorageError` - Storage layer errors
- `ecstore::disk::error::DiskError` - Disk operation errors
- `iam::error::Error` - Identity and access management errors
- `policy::error::Error` - Policy-related errors
- `crypto::error::Error` - Cryptographic operation errors
- `filemeta::error::Error` - File metadata errors
- `rustfs::error::ApiError` - API layer errors
- Module-specific error types for specialized functionality
## Code Style Guidelines
@@ -95,25 +127,21 @@ single_line_let_else_max_width = 100
Before every commit, you **MUST**:
1. **Format your code**:
```bash
cargo fmt --all
```
2. **Verify formatting**:
```bash
cargo fmt --all --check
```
3. **Pass clippy checks**:
```bash
cargo clippy --all-targets --all-features -- -D warnings
```
4. **Ensure compilation**:
```bash
cargo check --all-targets
```
@@ -145,48 +173,6 @@ make pre-commit
make setup-hooks
```
#### 🔒 Automated Pre-commit Hooks
This project includes a pre-commit hook that automatically runs before each commit to ensure:
- ✅ Code is properly formatted (`cargo fmt --all --check`)
- ✅ No clippy warnings (`cargo clippy --all-targets --all-features -- -D warnings`)
- ✅ Code compiles successfully (`cargo check --all-targets`)
**Setting Up Pre-commit Hooks** (MANDATORY for all developers):
Run this command once after cloning the repository:
```bash
make setup-hooks
```
Or manually:
```bash
chmod +x .git/hooks/pre-commit
```
#### 🚫 Commit Prevention
If your code doesn't meet the formatting requirements, the pre-commit hook will:
1. **Block the commit** and show clear error messages
2. **Provide exact commands** to fix the issues
3. **Guide you through** the resolution process
Example output when formatting fails:
```
❌ Code formatting check failed!
💡 Please run 'cargo fmt --all' to format your code before committing.
🔧 Quick fix:
cargo fmt --all
git add .
git commit
```
### 3. Naming Conventions
- Use `snake_case` for functions, variables, modules
@@ -207,40 +193,6 @@ Example output when formatting fails:
- Required for API boundaries (function signatures, public struct fields)
- Needed to resolve ambiguity between multiple possible types
**Good examples (prefer these):**
```rust
// Compiler can infer the type
let items = vec![1, 2, 3, 4];
let config = Config::default();
let result = process_data(&input);
// Iterator chains with clear context
let filtered: Vec<_> = items.iter().filter(|&&x| x > 2).collect();
```
**Avoid unnecessary explicit types:**
```rust
// Unnecessary - type is obvious
let items: Vec<i32> = vec![1, 2, 3, 4];
let config: Config = Config::default();
let result: ProcessResult = process_data(&input);
```
**When explicit types are beneficial:**
```rust
// API boundaries - always specify types
pub fn process_data(input: &[u8]) -> Result<ProcessResult, Error> { ... }
// Ambiguous cases - explicit type needed
let value: f64 = "3.14".parse().unwrap();
// Complex generic types - explicit for clarity
let cache: HashMap<String, Arc<Mutex<CacheEntry>>> = HashMap::new();
```
### 5. Documentation Comments
- Public APIs must have documentation comments
@@ -358,34 +310,7 @@ impl MyError {
}
```
### 3. Error Conversion Between Modules
```rust
// Convert between different module error types
impl From<ecstore::error::StorageError> for MyError {
fn from(e: ecstore::error::StorageError) -> Self {
match e {
ecstore::error::StorageError::FileNotFound => {
MyError::FileNotFound { path: "unknown".to_string() }
}
_ => MyError::Storage(e),
}
}
}
// Provide reverse conversion when needed
impl From<MyError> for ecstore::error::StorageError {
fn from(e: MyError) -> Self {
match e {
MyError::FileNotFound { .. } => ecstore::error::StorageError::FileNotFound,
MyError::Storage(e) => e,
_ => ecstore::error::StorageError::other(e),
}
}
}
```
### 4. Error Context and Propagation
### 3. Error Context and Propagation
```rust
// Use ? operator for clean error propagation
@@ -405,117 +330,6 @@ fn process_with_context(path: &str) -> Result<()> {
}
```
### 5. API Error Conversion (S3 Example)
```rust
// Convert storage errors to API-specific errors
use s3s::{S3Error, S3ErrorCode};
#[derive(Debug)]
pub struct ApiError {
pub code: S3ErrorCode,
pub message: String,
pub source: Option<Box<dyn std::error::Error + Send + Sync>>,
}
impl From<ecstore::error::StorageError> for ApiError {
fn from(err: ecstore::error::StorageError) -> Self {
let code = match &err {
ecstore::error::StorageError::BucketNotFound(_) => S3ErrorCode::NoSuchBucket,
ecstore::error::StorageError::ObjectNotFound(_, _) => S3ErrorCode::NoSuchKey,
ecstore::error::StorageError::BucketExists(_) => S3ErrorCode::BucketAlreadyExists,
ecstore::error::StorageError::InvalidArgument(_, _, _) => S3ErrorCode::InvalidArgument,
ecstore::error::StorageError::MethodNotAllowed => S3ErrorCode::MethodNotAllowed,
ecstore::error::StorageError::StorageFull => S3ErrorCode::ServiceUnavailable,
_ => S3ErrorCode::InternalError,
};
ApiError {
code,
message: err.to_string(),
source: Some(Box::new(err)),
}
}
}
impl From<ApiError> for S3Error {
fn from(err: ApiError) -> Self {
let mut s3e = S3Error::with_message(err.code, err.message);
if let Some(source) = err.source {
s3e.set_source(source);
}
s3e
}
}
```
### 6. Error Handling Best Practices
#### Pattern Matching and Error Classification
```rust
// Use pattern matching for specific error handling
async fn handle_storage_operation() -> Result<()> {
match storage.get_object("bucket", "key").await {
Ok(object) => process_object(object),
Err(ecstore::error::StorageError::ObjectNotFound(bucket, key)) => {
warn!("Object not found: {}/{}", bucket, key);
create_default_object(bucket, key).await
}
Err(ecstore::error::StorageError::BucketNotFound(bucket)) => {
error!("Bucket not found: {}", bucket);
Err(MyError::Custom {
message: format!("Bucket {} does not exist", bucket)
})
}
Err(e) => {
error!("Storage operation failed: {}", e);
Err(MyError::Storage(e))
}
}
}
```
#### Error Aggregation and Reporting
```rust
// Collect and report multiple errors
pub fn validate_configuration(config: &Config) -> Result<()> {
let mut errors = Vec::new();
if config.bucket_name.is_empty() {
errors.push("Bucket name cannot be empty");
}
if config.region.is_empty() {
errors.push("Region must be specified");
}
if !errors.is_empty() {
return Err(MyError::Custom {
message: format!("Configuration validation failed: {}", errors.join(", "))
});
}
Ok(())
}
```
#### Contextual Error Information
```rust
// Add operation context to errors
#[tracing::instrument(skip(self))]
async fn upload_file(&self, bucket: &str, key: &str, data: Vec<u8>) -> Result<()> {
self.storage
.put_object(bucket, key, data)
.await
.map_err(|e| MyError::Custom {
message: format!("Failed to upload {}/{}: {}", bucket, key, e)
})
}
```
## Performance Optimization Guidelines
### 1. Memory Management
@@ -558,45 +372,6 @@ mod tests {
fn test_with_cases(input: &str, expected: &str) {
assert_eq!(function(input), expected);
}
#[test]
fn test_error_conversion() {
use ecstore::error::StorageError;
let storage_err = StorageError::BucketNotFound("test-bucket".to_string());
let api_err: ApiError = storage_err.into();
assert_eq!(api_err.code, S3ErrorCode::NoSuchBucket);
assert!(api_err.message.contains("test-bucket"));
assert!(api_err.source.is_some());
}
#[test]
fn test_error_types() {
let io_err = std::io::Error::new(std::io::ErrorKind::NotFound, "file not found");
let my_err = MyError::Io(io_err);
// Test error matching
match my_err {
MyError::Io(_) => {}, // Expected
_ => panic!("Unexpected error type"),
}
}
#[test]
fn test_error_context() {
let result = process_with_context("nonexistent_file.txt");
assert!(result.is_err());
let err = result.unwrap_err();
match err {
MyError::Custom { message } => {
assert!(message.contains("Failed to read"));
assert!(message.contains("nonexistent_file.txt"));
}
_ => panic!("Expected Custom error"),
}
}
}
```
@@ -829,17 +604,15 @@ async fn shutdown_gracefully(shutdown_rx: &mut Receiver<()>) {
- Support version control and migration
- Implement metadata caching
These rules should serve as guiding principles when developing the RustFS project, ensuring code quality, performance, and maintainability.
## Branch Management and Development Workflow
### 4. Code Operations
#### Branch Management
### Branch Management
- **🚨 CRITICAL: NEVER modify code directly on main or master branch - THIS IS ABSOLUTELY FORBIDDEN 🚨**
- **⚠️ ANY DIRECT COMMITS TO MASTER/MAIN WILL BE REJECTED AND MUST BE REVERTED IMMEDIATELY ⚠️**
- **🔒 ALL CHANGES MUST GO THROUGH PULL REQUESTS - NO DIRECT COMMITS TO MAIN UNDER ANY CIRCUMSTANCES 🔒**
- **Always work on feature branches - NO EXCEPTIONS**
- Always check the .cursorrules file before starting to ensure you understand the project guidelines
- Always check the .rules.md file before starting to ensure you understand the project guidelines
- **MANDATORY workflow for ALL changes:**
1. `git checkout main` (switch to main branch)
2. `git pull` (get latest changes)
@@ -862,7 +635,7 @@ These rules should serve as guiding principles when developing the RustFS projec
- Direct pushes to main should be blocked by repository settings
- Any accidental direct commits to main must be immediately reverted via PR
#### Development Workflow
### Development Workflow
## 🎯 **Core Development Principles**
@@ -901,27 +674,29 @@ These rules should serve as guiding principles when developing the RustFS projec
- Testing information and verification steps
- **Provide PR descriptions in copyable markdown format** enclosed in code blocks for easy one-click copying
## 🚫 AI 文档生成限制
## 🚫 AI Documentation Generation Restrictions
### 禁止生成总结文档
### Forbidden Summary Documents
- **严格禁止创建任何形式的AI生成总结文档**
- **不得创建包含大量表情符号、详细格式化表格和典型AI风格的文档**
- **不得在项目中生成以下类型的文档:**
- 基准测试总结文档(BENCHMARK*.md
- 实现对比分析文档(IMPLEMENTATION_COMPARISON*.md
- 性能分析报告文档
- 架构总结文档
- 功能对比文档
- 任何带有大量表情符号和格式化内容的文档
- **如果需要文档,请只在用户明确要求时创建,并保持简洁实用的风格**
- **文档应当专注于实际需要的信息,避免过度格式化和装饰性内容**
- **任何发现的AI生成总结文档都应该立即删除**
- **Strictly forbidden to create any form of AI-generated summary documents**
- **Do not create documents containing large amounts of emoji, detailed formatting tables and typical AI style**
- **Do not generate the following types of documents in the project:**
- Benchmark summary documents (BENCHMARK*.md)
- Implementation comparison analysis documents (IMPLEMENTATION_COMPARISON*.md)
- Performance analysis report documents
- Architecture summary documents
- Feature comparison documents
- Any documents with large amounts of emoji and formatted content
- **If documentation is needed, only create when explicitly requested by the user, and maintain a concise and practical style**
- **Documentation should focus on actually needed information, avoiding excessive formatting and decorative content**
- **Any discovered AI-generated summary documents should be immediately deleted**
### 允许的文档类型
### Allowed Documentation Types
- README.md(项目介绍,保持简洁)
- 技术文档(仅在明确需要时创建)
- 用户手册(仅在明确需要时创建)
- API文档(从代码生成)
- 变更日志(CHANGELOG.md
- README.md (project introduction, keep concise)
- Technical documentation (only create when explicitly needed)
- User manual (only create when explicitly needed)
- API documentation (generated from code)
- Changelog (CHANGELOG.md)
These rules should serve as guiding principles when developing the RustFS project, ensuring code quality, performance, and maintainability.

46
.vscode/launch.json vendored
View File

@@ -20,18 +20,22 @@
}
},
"env": {
"RUST_LOG": "rustfs=debug,ecstore=info,s3s=debug"
"RUST_LOG": "rustfs=debug,ecstore=info,s3s=debug,iam=debug",
"RUSTFS_SKIP_BACKGROUND_TASK": "on",
//"RUSTFS_OBS_LOG_DIRECTORY": "./deploy/logs",
// "RUSTFS_POLICY_PLUGIN_URL":"http://localhost:8181/v1/data/rustfs/authz/allow",
// "RUSTFS_POLICY_PLUGIN_AUTH_TOKEN":"your-opa-token"
},
"args": [
"--access-key",
"AKEXAMPLERUSTFS",
"rustfsadmin",
"--secret-key",
"SKEXAMPLERUSTFS",
"rustfsadmin",
"--address",
"0.0.0.0:9010",
"--domain-name",
"--server-domains",
"127.0.0.1:9010",
"./target/volume/test{0...4}"
"./target/volume/test{1...4}"
],
"cwd": "${workspaceFolder}"
},
@@ -82,6 +86,38 @@
"cwd": "${workspaceFolder}",
//"stopAtEntry": false,
//"preLaunchTask": "cargo build",
"env": {
"RUSTFS_ACCESS_KEY": "rustfsadmin",
"RUSTFS_SECRET_KEY": "rustfsadmin",
"RUSTFS_VOLUMES": "./target/volume/test{1...4}",
"RUSTFS_ADDRESS": ":9000",
"RUSTFS_CONSOLE_ENABLE": "true",
// "RUSTFS_OBS_TRACE_ENDPOINT": "http://127.0.0.1:4318/v1/traces", // jeager otlp http endpoint
// "RUSTFS_OBS_METRIC_ENDPOINT": "http://127.0.0.1:4318/v1/metrics", // default otlp http endpoint
// "RUSTFS_OBS_LOG_ENDPOINT": "http://127.0.0.1:4318/v1/logs", // default otlp http endpoint
"RUSTFS_CONSOLE_ADDRESS": "127.0.0.1:9001",
"RUSTFS_OBS_LOG_DIRECTORY": "./target/logs",
},
"sourceLanguages": [
"rust"
],
},
{
"name": "Debug executable target/debug/test",
"type": "lldb",
"request": "launch",
"program": "${workspaceFolder}/target/debug/deps/lifecycle_integration_test-5915cbfcab491b3b",
"args": [
"--skip",
"test_lifecycle_expiry_basic",
"--skip",
"test_lifecycle_expiry_deletemarker",
//"--skip",
//"test_lifecycle_transition_basic",
],
"cwd": "${workspaceFolder}",
//"stopAtEntry": false,
//"preLaunchTask": "cargo build",
"sourceLanguages": [
"rust"
],

24
AGENTS.md Normal file
View File

@@ -0,0 +1,24 @@
# Repository Guidelines
## Communication Rules
- Respond to the user in Chinese; use English in all other contexts.
## Project Structure & Module Organization
The workspace root hosts shared dependencies in `Cargo.toml`. The service binary lives under `rustfs/src/main.rs`, while reusable crates sit in `crates/` (`crypto`, `iam`, `kms`, and `e2e_test`). Local fixtures for standalone flows reside in `test_standalone/`, deployment manifests are under `deploy/`, Docker assets sit at the root, and automation lives in `scripts/`. Skim each crates README or module docs before contributing changes.
## Build, Test, and Development Commands
Run `cargo check --all-targets` for fast validation. Build release binaries via `cargo build --release` or the pipeline-aligned `make build`. Use `./build-rustfs.sh --dev` for iterative development and `./build-rustfs.sh --platform <target>` for cross-compiles. Prefer `make pre-commit` before pushing to cover formatting, clippy, checks, and tests.
Always ensure `cargo fmt --all --check`, `cargo test --workspace --exclude e2e_test`, and `cargo clippy --all-targets --all-features -- -D warnings` complete successfully after each code change to keep the tree healthy and warning-free.
## Coding Style & Naming Conventions
Formatting follows the repo `rustfmt.toml` (130-column width). Use `snake_case` for items, `PascalCase` for types, and `SCREAMING_SNAKE_CASE` for constants. Avoid `unwrap()` or `expect()` outside tests; bubble errors with `Result` and crate-specific `thiserror` types. Keep async code non-blocking and offload CPU-heavy work with `tokio::task::spawn_blocking` when necessary.
## Testing Guidelines
Co-locate unit tests with their modules and give behavior-led names such as `handles_expired_token`. Integration suites belong in each crates `tests/` directory, while exhaustive end-to-end scenarios live in `crates/e2e_test/`. Run `cargo test --workspace --exclude e2e_test` during iteration, `cargo nextest run --all --exclude e2e_test` when available, and finish with `cargo test --all` before requesting review. Use `NO_PROXY=127.0.0.1,localhost HTTP_PROXY= HTTPS_PROXY=` for KMS e2e tests.
When fixing bugs or adding features, include regression tests that capture the new behavior so future changes cannot silently break it.
## Commit & Pull Request Guidelines
Work on feature branches (e.g., `feat/...`) after syncing `main`. Follow Conventional Commits under 72 characters (e.g., `feat: add kms key rotation`). Each commit must compile, format cleanly, and pass `make pre-commit`. Open PRs with a concise summary, note verification commands, link relevant issues, and wait for reviewer approval.
## Security & Configuration Tips
Do not commit secrets or cloud credentials; prefer environment variables or vault tooling. Review IAM- and KMS-related changes with a second maintainer. Confirm proxy settings before running sensitive tests to avoid leaking traffic outside localhost.

75
CLA.md
View File

@@ -1,39 +1,88 @@
RustFS Individual Contributor License Agreement
Thank you for your interest in contributing documentation and related software code to a project hosted or managed by RustFS. In order to clarify the intellectual property license granted with Contributions from any person or entity, RustFS must have a Contributor License Agreement (“CLA”) on file that has been signed by each Contributor, indicating agreement to the license terms below. This version of the Contributor License Agreement allows an individual to submit Contributions to the applicable project. If you are making a submission on behalf of a legal entity, then you should sign the separate Corporate Contributor License Agreement.
Thank you for your interest in contributing documentation and related software code to a project hosted or managed by
RustFS. In order to clarify the intellectual property license granted with Contributions from any person or entity,
RustFS must have a Contributor License Agreement ("CLA") on file that has been signed by each Contributor, indicating
agreement to the license terms below. This version of the Contributor License Agreement allows an individual to submit
Contributions to the applicable project. If you are making a submission on behalf of a legal entity, then you should
sign the separate Corporate Contributor License Agreement.
You accept and agree to the following terms and conditions for Your present and future Contributions submitted to RustFS. You hereby irrevocably assign and transfer to RustFS all right, title, and interest in and to Your Contributions, including all copyrights and other intellectual property rights therein.
You accept and agree to the following terms and conditions for Your present and future Contributions submitted to
RustFS. You hereby irrevocably assign and transfer to RustFS all right, title, and interest in and to Your
Contributions, including all copyrights and other intellectual property rights therein.
Definitions
“You” (or “Your”) shall mean the copyright owner or legal entity authorized by the copyright owner that is making this Agreement with RustFS. For legal entities, the entity making a Contribution and all other entities that control, are controlled by, or are under common control with that entity are considered to be a single Contributor. For the purposes of this definition, “control” means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
“You” (or “Your”) shall mean the copyright owner or legal entity authorized by the copyright owner that is making this
Agreement with RustFS. For legal entities, the entity making a Contribution and all other entities that control, are
controlled by, or are under common control with that entity are considered to be a single Contributor. For the purposes
of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such
entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares,
or (iii) beneficial ownership of such entity.
“Contribution” shall mean any original work of authorship, including any modifications or additions to an existing work, that is intentionally submitted by You to RustFS for inclusion in, or documentation of, any of the products or projects owned or managed by RustFS (the “Work”), including without limitation any Work described in Schedule A. For the purposes of this definition, “submitted” means any form of electronic or written communication sent to RustFS or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, RustFS for the purpose of discussing and improving the Work.
“Contribution” shall mean any original work of authorship, including any modifications or additions to an existing work,
that is intentionally submitted by You to RustFS for inclusion in, or documentation of, any of the products or projects
owned or managed by RustFS (the "Work"), including without limitation any Work described in Schedule A. For the purposes
of this definition, "submitted" means any form of electronic or written communication sent to RustFS or its
representatives, including but not limited to communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, RustFS for the purpose of discussing and improving the
Work.
Assignment of Copyright
Subject to the terms and conditions of this Agreement, You hereby irrevocably assign and transfer to RustFS all right, title, and interest in and to Your Contributions, including all copyrights and other intellectual property rights therein, for the entire term of such rights, including all renewals and extensions. You agree to execute all documents and take all actions as may be reasonably necessary to vest in RustFS the ownership of Your Contributions and to assist RustFS in perfecting, maintaining, and enforcing its rights in Your Contributions.
Subject to the terms and conditions of this Agreement, You hereby irrevocably assign and transfer to RustFS all right,
title, and interest in and to Your Contributions, including all copyrights and other intellectual property rights
therein, for the entire term of such rights, including all renewals and extensions. You agree to execute all documents
and take all actions as may be reasonably necessary to vest in RustFS the ownership of Your Contributions and to assist
RustFS in perfecting, maintaining, and enforcing its rights in Your Contributions.
Grant of Patent License
Subject to the terms and conditions of this Agreement, You hereby grant to RustFS and to recipients of documentation and software distributed by RustFS a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by You that are necessarily infringed by Your Contribution(s) alone or by combination of Your Contribution(s) with the Work to which such Contribution(s) was submitted. If any entity institutes patent litigation against You or any other entity (including a cross-claim or counterclaim in a lawsuit) alleging that your Contribution, or the Work to which you have contributed, constitutes direct or contributory patent infringement, then any patent licenses granted to that entity under this Agreement for that Contribution or Work shall terminate as of the date such litigation is filed.
Subject to the terms and conditions of this Agreement, You hereby grant to RustFS and to recipients of documentation and
software distributed by RustFS a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as
stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the
Work, where such license applies only to those patent claims licensable by You that are necessarily infringed by Your
Contribution(s) alone or by combination of Your Contribution(s) with the Work to which such Contribution(s) was
submitted. If any entity institutes patent litigation against You or any other entity (including a cross-claim or
counterclaim in a lawsuit) alleging that your Contribution, or the Work to which you have contributed, constitutes
direct or contributory patent infringement, then any patent licenses granted to that entity under this Agreement for
that Contribution or Work shall terminate as of the date such litigation is filed.
You represent that you are legally entitled to grant the above assignment and license.
You represent that each of Your Contributions is Your original creation (see section 7 for submissions on behalf of others). You represent that Your Contribution submissions include complete details of any third-party license or other restriction (including, but not limited to, related patents and trademarks) of which you are personally aware and which are associated with any part of Your Contributions.
You represent that each of Your Contributions is Your original creation (see section 7 for submissions on behalf of
others). You represent that Your Contribution submissions include complete details of any third-party license or other
restriction (including, but not limited to, related patents and trademarks) of which you are personally aware and which
are associated with any part of Your Contributions.
You are not expected to provide support for Your Contributions, except to the extent You desire to provide support. You may provide support for free, for a fee, or not at all. Unless required by applicable law or agreed to in writing, You provide Your Contributions on an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON- INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE.
You are not expected to provide support for Your Contributions, except to the extent You desire to provide support. You
may provide support for free, for a fee, or not at all. Unless required by applicable law or agreed to in writing, You
provide Your Contributions on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
including, without limitation, any warranties or conditions of TITLE, NON- INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR
A PARTICULAR PURPOSE.
Should You wish to submit work that is not Your original creation, You may submit it to RustFS separately from any Contribution, identifying the complete details of its source and of any license or other restriction (including, but not limited to, related patents, trademarks, and license agreements) of which you are personally aware, and conspicuously marking the work as “Submitted on behalf of a third-party: [named here]”.
Should You wish to submit work that is not Your original creation, You may submit it to RustFS separately from any
Contribution, identifying the complete details of its source and of any license or other restriction (including, but not
limited to, related patents, trademarks, and license agreements) of which you are personally aware, and conspicuously
marking the work as "Submitted on behalf of a third-party: [named here]”.
You agree to notify RustFS of any facts or circumstances of which you become aware that would make these representations inaccurate in any respect.
You agree to notify RustFS of any facts or circumstances of which you become aware that would make these representations
inaccurate in any respect.
Modification of CLA
RustFS reserves the right to update or modify this CLA in the future. Any updates or modifications to this CLA shall apply only to Contributions made after the effective date of the revised CLA. Contributions made prior to the update shall remain governed by the version of the CLA that was in effect at the time of submission. It is not necessary for all Contributors to re-sign the CLA when the CLA is updated or modified.
RustFS reserves the right to update or modify this CLA in the future. Any updates or modifications to this CLA shall
apply only to Contributions made after the effective date of the revised CLA. Contributions made prior to the update
shall remain governed by the version of the CLA that was in effect at the time of submission. It is not necessary for
all Contributors to re-sign the CLA when the CLA is updated or modified.
Governing Law and Dispute Resolution
This Agreement will be governed by and construed in accordance with the laws of the Peoples Republic of China excluding that body of laws known as conflict of laws. The parties expressly agree that the United Nations Convention on Contracts for the International Sale of Goods will not apply. Any legal action or proceeding arising under this Agreement will be brought exclusively in the courts located in Beijing, China, and the parties hereby irrevocably consent to the personal jurisdiction and venue therein.
This Agreement will be governed by and construed in accordance with the laws of the People's Republic of China excluding
that body of laws known as conflict of laws. The parties expressly agree that the United Nations Convention on Contracts
for the International Sale of Goods will not apply. Any legal action or proceeding arising under this Agreement will be
brought exclusively in the courts located in Beijing, China, and the parties hereby irrevocably consent to the personal
jurisdiction and venue therein.
For your reading convenience, this Agreement is written in parallel English and Chinese sections. To the extent there is a conflict between the English and Chinese sections, the English sections shall govern.
For your reading convenience, this Agreement is written in parallel English and Chinese sections. To the extent there is
a conflict between the English and Chinese sections, the English sections shall govern.

275
CLAUDE.md Normal file
View File

@@ -0,0 +1,275 @@
# CLAUDE.md
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
## Project Overview
RustFS is a high-performance distributed object storage software built with Rust, providing S3-compatible APIs and
advanced features like data lakes, AI, and big data support. It's designed as an alternative to MinIO with better
performance and a more business-friendly Apache 2.0 license.
## Build Commands
### Primary Build Commands
- `cargo build --release` - Build the main RustFS binary
- `./build-rustfs.sh` - Recommended build script that handles console resources and cross-platform compilation
- `./build-rustfs.sh --dev` - Development build with debug symbols
- `make build` or `just build` - Use Make/Just for standardized builds
### Platform-Specific Builds
- `./build-rustfs.sh --platform x86_64-unknown-linux-musl` - Build for musl target
- `./build-rustfs.sh --platform aarch64-unknown-linux-gnu` - Build for ARM64
- `make build-musl` or `just build-musl` - Build musl variant
- `make build-cross-all` - Build all supported architectures
### Testing Commands
- `cargo test --workspace --exclude e2e_test` - Run unit tests (excluding e2e tests)
- `cargo nextest run --all --exclude e2e_test` - Use nextest if available (faster)
- `cargo test --all --doc` - Run documentation tests
- `make test` or `just test` - Run full test suite
- `make pre-commit` - Run all quality checks (fmt, clippy, check, test)
### End-to-End Testing
- `cargo test --package e2e_test` - Run all e2e tests
- `./scripts/run_e2e_tests.sh` - Run e2e tests via script
- `./scripts/run_scanner_benchmarks.sh` - Run scanner performance benchmarks
### KMS-Specific Testing (with proxy bypass)
-
`NO_PROXY=127.0.0.1,localhost HTTP_PROXY= HTTPS_PROXY= http_proxy= https_proxy= cargo test --package e2e_test test_local_kms_end_to_end -- --nocapture --test-threads=1` -
Run complete KMS end-to-end test
-
`NO_PROXY=127.0.0.1,localhost HTTP_PROXY= HTTPS_PROXY= http_proxy= https_proxy= cargo test --package e2e_test kms:: -- --nocapture --test-threads=1` -
Run all KMS tests
- `cargo test --package e2e_test test_local_kms_key_isolation -- --nocapture --test-threads=1` - Test KMS key isolation
- `cargo test --package e2e_test test_local_kms_large_file -- --nocapture --test-threads=1` - Test KMS with large files
### Code Quality
- `cargo fmt --all` - Format code
- `cargo clippy --all-targets --all-features -- -D warnings` - Lint code
- `make pre-commit` or `just pre-commit` - Run all quality checks (fmt, clippy, check, test)
### Quick Development Commands
- `make help` or `just help` - Show all available commands with descriptions
- `make help-build` - Show detailed build options and cross-compilation help
- `make help-docker` - Show comprehensive Docker build and deployment options
- `./scripts/dev_deploy.sh <IP>` - Deploy development build to remote server
- `./scripts/run.sh` - Start local development server
- `./scripts/probe.sh` - Health check and connectivity testing
### Docker Build Commands
- `make docker-buildx` - Build multi-architecture production images
- `make docker-dev-local` - Build development image for local use
- `./docker-buildx.sh --push` - Build and push production images
## Architecture Overview
### Core Components
**Main Binary (`rustfs/`):**
- Entry point at `rustfs/src/main.rs`
- Core modules: admin, auth, config, server, storage, license management, profiling
- HTTP server with S3-compatible APIs
- Service state management and graceful shutdown
- Parallel service initialization with DNS resolver, bucket metadata, and IAM
**Key Crates (`crates/`):**
- `ecstore` - Erasure coding storage implementation (core storage layer)
- `iam` - Identity and Access Management
- `kms` - Key Management Service for encryption and key handling
- `madmin` - Management dashboard and admin API interface
- `s3select-api` & `s3select-query` - S3 Select API and query engine
- `config` - Configuration management with notify features
- `crypto` - Cryptography and security features
- `lock` - Distributed locking implementation
- `filemeta` - File metadata management
- `rio` - Rust I/O utilities and abstractions
- `common` - Shared utilities and data structures
- `protos` - Protocol buffer definitions
- `audit-logger` - Audit logging for file operations
- `notify` - Event notification system
- `obs` - Observability utilities
- `workers` - Worker thread pools and task scheduling
- `appauth` - Application authentication and authorization
- `ahm` - Asynchronous Hash Map for concurrent data structures
- `mcp` - MCP server for S3 operations
- `signer` - Client request signing utilities
- `checksums` - Client checksum calculation utilities
- `utils` - General utility functions and helpers
- `zip` - ZIP file handling and compression
- `targets` - Target-specific configurations and utilities
### Build System
- Cargo workspace with 25+ crates (including new KMS functionality)
- Custom `build-rustfs.sh` script for advanced build options
- Multi-architecture Docker builds via `docker-buildx.sh`
- Both Make and Just task runners supported with comprehensive help
- Cross-compilation support for multiple Linux targets
- Automated CI/CD with GitHub Actions for testing, building, and Docker publishing
- Performance benchmarking and audit workflows
### Key Dependencies
- `axum` - HTTP framework for S3 API server
- `tokio` - Async runtime
- `s3s` - S3 protocol implementation library
- `datafusion` - For S3 Select query processing
- `hyper`/`hyper-util` - HTTP client/server utilities
- `rustls` - TLS implementation
- `serde`/`serde_json` - Serialization
- `tracing` - Structured logging and observability
- `pprof` - Performance profiling with flamegraph support
- `tikv-jemallocator` - Memory allocator for Linux GNU builds
### Development Workflow
- Console resources are embedded during build via `rust-embed`
- Protocol buffers generated via custom `gproto` binary
- E2E tests in separate crate (`e2e_test`) with comprehensive KMS testing
- Shadow build for version/metadata embedding
- Support for both GNU and musl libc targets
- Development scripts in `scripts/` directory for common tasks
- Git hooks setup available via `make setup-hooks` or `just setup-hooks`
### Performance & Observability
- Performance profiling available with `pprof` integration (disabled on Windows)
- Profiling enabled via environment variables in production
- Built-in observability with OpenTelemetry integration
- Background services (scanner, heal) can be controlled via environment variables:
- `RUSTFS_ENABLE_SCANNER` (default: true)
- `RUSTFS_ENABLE_HEAL` (default: true)
### Service Architecture
- Service state management with graceful shutdown handling
- Parallel initialization of core systems (DNS, bucket metadata, IAM)
- Event notification system with MQTT and webhook support
- Auto-heal and data scanner for storage integrity
- Jemalloc allocator for Linux GNU targets for better performance
## Environment Variables
- `RUSTFS_ENABLE_SCANNER` - Enable/disable background data scanner (default: true)
- `RUSTFS_ENABLE_HEAL` - Enable/disable auto-heal functionality (default: true)
- Various profiling and observability controls
- Build-time variables for Docker builds (RELEASE, REGISTRY, etc.)
- Test environment configurations in `scripts/dev_rustfs.env`
### KMS Environment Variables
- `NO_PROXY=127.0.0.1,localhost` - Required for KMS E2E tests to bypass proxy
- `HTTP_PROXY=` `HTTPS_PROXY=` `http_proxy=` `https_proxy=` - Clear proxy settings for local KMS testing
## KMS (Key Management Service) Architecture
### KMS Implementation Status
- **Full KMS Integration:** Complete implementation with Local and Vault backends
- **Automatic Configuration:** KMS auto-configures on startup with `--kms-enable` flag
- **Encryption Support:** Full S3-compatible server-side encryption (SSE-S3, SSE-KMS, SSE-C)
- **Admin API:** Complete KMS management via HTTP admin endpoints
- **Production Ready:** Comprehensive testing including large files and key isolation
### KMS Configuration
- **Local Backend:** `--kms-backend local --kms-key-dir <path> --kms-default-key-id <id>`
- **Vault Backend:** `--kms-backend vault --kms-vault-endpoint <url> --kms-vault-key-name <name>`
- **Auto-startup:** KMS automatically initializes when `--kms-enable` is provided
- **Manual Configuration:** Also supports dynamic configuration via admin API
### S3 Encryption Support
- **SSE-S3:** Server-side encryption with S3-managed keys (`ServerSideEncryption: AES256`)
- **SSE-KMS:** Server-side encryption with KMS-managed keys (`ServerSideEncryption: aws:kms`)
- **SSE-C:** Server-side encryption with customer-provided keys
- **Response Headers:** All encryption types return correct `server_side_encryption` headers in PUT/GET responses
### KMS Testing Architecture
- **Comprehensive E2E Tests:** Located in `crates/e2e_test/src/kms/`
- **Test Environments:** Automated test environment setup with temporary directories
- **Encryption Coverage:** Tests all three encryption types (SSE-S3, SSE-KMS, SSE-C)
- **API Coverage:** Tests all KMS admin APIs (CreateKey, DescribeKey, ListKeys, etc.)
- **Edge Cases:** Key isolation, large file handling, error scenarios
### Key Files for KMS
- `crates/kms/` - Core KMS implementation with Local/Vault backends
- `rustfs/src/main.rs` - KMS auto-initialization in `init_kms_system()`
- `rustfs/src/storage/ecfs.rs` - SSE encryption/decryption in PUT/GET operations
- `rustfs/src/admin/handlers/kms*.rs` - KMS admin endpoints
- `crates/e2e_test/src/kms/` - Comprehensive KMS test suite
- `crates/rio/src/encrypt_reader.rs` - Streaming encryption for large files
## Code Style and Safety Requirements
- **Language Requirements:**
- Communicate with me in Chinese, but **only English can be used in code files**
- Code comments, function names, variable names, and all text in source files must be in English only
- No Chinese characters, emojis, or non-ASCII characters are allowed in any source code files
- This includes comments, strings, documentation, and any other text within code files
- **Safety-Critical Rules:**
- `unsafe_code = "deny"` enforced at workspace level
- Never use `unwrap()`, `expect()`, or panic-inducing code except in tests
- Avoid blocking I/O operations in async contexts
- Use proper error handling with `Result<T, E>` and `Option<T>`
- Follow Rust's ownership and borrowing rules strictly
- **Performance Guidelines:**
- Use `cargo clippy --all-targets --all-features -- -D warnings` to catch issues
- Prefer `anyhow` for error handling in applications, `thiserror` for libraries
- Use appropriate async runtimes and avoid blocking calls
- **Testing Standards:**
- All new features must include comprehensive tests
- Use `#[cfg(test)]` for test-only code that may use panic macros
- E2E tests should cover KMS integration scenarios
## Common Development Tasks
### Running KMS Tests Locally
1. **Clear proxy settings:** KMS tests require direct localhost connections
2. **Use serial execution:** `--test-threads=1` prevents port conflicts
3. **Enable output:** `--nocapture` shows detailed test logs
4. **Full command:**
`NO_PROXY=127.0.0.1,localhost HTTP_PROXY= HTTPS_PROXY= http_proxy= https_proxy= cargo test --package e2e_test test_local_kms_end_to_end -- --nocapture --test-threads=1`
### KMS Development Workflow
1. **Code changes:** Modify KMS-related code in `crates/kms/` or `rustfs/src/`
2. **Compile:** Always run `cargo build` after changes
3. **Test specific functionality:** Use targeted test commands for faster iteration
4. **Full validation:** Run complete end-to-end tests before commits
### Debugging KMS Issues
- **Server startup:** Check that KMS auto-initializes with debug logs
- **Encryption failures:** Verify SSE headers are correctly set in both PUT and GET responses
- **Test failures:** Use `--nocapture` to see detailed error messages
- **Key management:** Test admin API endpoints with proper authentication
## Important Reminders
- **Always compile after code changes:** Use `cargo build` to catch errors early
- **Don't bypass tests:** All functionality must be properly tested, not worked around
- **Use proper error handling:** Never use `unwrap()` or `expect()` in production code (except tests)
- **Follow S3 compatibility:** Ensure all encryption types return correct HTTP response headers
# important-instruction-reminders
Do what has been asked; nothing more, nothing less.
NEVER create files unless they're absolutely necessary for achieving your goal.
ALWAYS prefer editing an existing file to creating a new one.
NEVER proactively create documentation files (*.md) or README files. Only create documentation files if explicitly
requested by the User.

8247
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -15,8 +15,8 @@
[workspace]
members = [
"rustfs", # Core file system implementation
"cli/rustfs-gui", # Graphical user interface client
"crates/appauth", # Application authentication and authorization
"crates/audit", # Audit target management system with multi-target fan-out
"crates/common", # Shared utilities and data structures
"crates/config", # Configuration management
"crates/crypto", # Cryptography and security features
@@ -28,15 +28,20 @@ members = [
"crates/madmin", # Management dashboard and admin API interface
"crates/notify", # Notification system for events
"crates/obs", # Observability utilities
"crates/policy", # Policy management
"crates/protos", # Protocol buffer definitions
"crates/rio", # Rust I/O utilities and abstractions
"crates/targets", # Target-specific configurations and utilities
"crates/s3select-api", # S3 Select API interface
"crates/s3select-query", # S3 Select query engine
"crates/signer", # client signer
"crates/checksums", # client checksums
"crates/utils", # Utility functions and helpers
"crates/workers", # Worker thread pools and task scheduling
"crates/zip", # ZIP file handling and compression
"crates/ahm",
"crates/ahm", # Asynchronous Hash Map for concurrent data structures
"crates/mcp", # MCP server for S3 operations
"crates/kms", # Key Management Service
]
resolver = "2"
@@ -57,226 +62,222 @@ unsafe_code = "deny"
[workspace.lints.clippy]
all = "warn"
[patch.crates-io]
rustfs-utils = { path = "crates/utils" }
rustfs-filemeta = { path = "crates/filemeta" }
rustfs-rio = { path = "crates/rio" }
[workspace.dependencies]
# RustFS Internal Crates
rustfs = { path = "./rustfs", version = "0.0.5" }
rustfs-ahm = { path = "crates/ahm", version = "0.0.5" }
rustfs-s3select-api = { path = "crates/s3select-api", version = "0.0.5" }
rustfs-appauth = { path = "crates/appauth", version = "0.0.5" }
rustfs-audit = { path = "crates/audit", version = "0.0.5" }
rustfs-checksums = { path = "crates/checksums", version = "0.0.5" }
rustfs-common = { path = "crates/common", version = "0.0.5" }
rustfs-config = { path = "./crates/config", version = "0.0.5" }
rustfs-crypto = { path = "crates/crypto", version = "0.0.5" }
rustfs-ecstore = { path = "crates/ecstore", version = "0.0.5" }
rustfs-filemeta = { path = "crates/filemeta", version = "0.0.5" }
rustfs-iam = { path = "crates/iam", version = "0.0.5" }
rustfs-kms = { path = "crates/kms", version = "0.0.5" }
rustfs-lock = { path = "crates/lock", version = "0.0.5" }
rustfs-madmin = { path = "crates/madmin", version = "0.0.5" }
rustfs-mcp = { path = "crates/mcp", version = "0.0.5" }
rustfs-notify = { path = "crates/notify", version = "0.0.5" }
rustfs-obs = { path = "crates/obs", version = "0.0.5" }
rustfs-policy = { path = "crates/policy", version = "0.0.5" }
rustfs-protos = { path = "crates/protos", version = "0.0.5" }
rustfs-s3select-query = { path = "crates/s3select-query", version = "0.0.5" }
rustfs = { path = "./rustfs", version = "0.0.5" }
rustfs-zip = { path = "./crates/zip", version = "0.0.5" }
rustfs-config = { path = "./crates/config", version = "0.0.5" }
rustfs-obs = { path = "crates/obs", version = "0.0.5" }
rustfs-notify = { path = "crates/notify", version = "0.0.5" }
rustfs-utils = { path = "crates/utils", version = "0.0.5" }
rustfs-rio = { path = "crates/rio", version = "0.0.5" }
rustfs-filemeta = { path = "crates/filemeta", version = "0.0.5" }
rustfs-s3select-api = { path = "crates/s3select-api", version = "0.0.5" }
rustfs-s3select-query = { path = "crates/s3select-query", version = "0.0.5" }
rustfs-signer = { path = "crates/signer", version = "0.0.5" }
rustfs-targets = { path = "crates/targets", version = "0.0.5" }
rustfs-utils = { path = "crates/utils", version = "0.0.5" }
rustfs-workers = { path = "crates/workers", version = "0.0.5" }
aes-gcm = { version = "0.10.3", features = ["std"] }
arc-swap = "1.7.1"
argon2 = { version = "0.5.3", features = ["std"] }
atoi = "2.0.0"
rustfs-zip = { path = "./crates/zip", version = "0.0.5" }
# Async Runtime and Networking
async-channel = "2.5.0"
async-compression = { version = "0.4.19" }
async-recursion = "1.1.1"
async-trait = "0.1.88"
async-compression = { version = "0.4.0" }
atomic_enum = "0.3.0"
aws-sdk-s3 = "1.96.0"
axum = "0.8.4"
axum-extra = "0.10.1"
axum-server = { version = "0.7.2", features = ["tls-rustls"] }
base64-simd = "0.8.0"
base64 = "0.22.1"
brotli = "8.0.1"
bytes = { version = "1.10.1", features = ["serde"] }
bytesize = "2.0.1"
byteorder = "1.5.0"
cfg-if = "1.0.1"
chacha20poly1305 = { version = "0.10.1" }
chrono = { version = "0.4.41", features = ["serde"] }
clap = { version = "4.5.41", features = ["derive", "env"] }
const-str = { version = "0.6.2", features = ["std", "proc"] }
crc32fast = "1.4.2"
criterion = { version = "0.5", features = ["html_reports"] }
dashmap = "6.1.0"
datafusion = "46.0.1"
derive_builder = "0.20.2"
dioxus = { version = "0.6.3", features = ["router"] }
dirs = "6.0.0"
enumset = "1.1.7"
flatbuffers = "25.2.10"
flate2 = "1.1.2"
flexi_logger = { version = "0.31.2", features = ["trc", "dont_minimize_extra_stacks"] }
form_urlencoded = "1.2.1"
async-trait = "0.1.89"
axum = "0.8.7"
axum-extra = "0.12.2"
axum-server = { version = "0.8.0", features = ["tls-rustls-no-provider"], default-features = false }
futures = "0.3.31"
futures-core = "0.3.31"
futures-util = "0.3.31"
glob = "0.3.2"
hex = "0.4.3"
hyper = { version = "1.8.1", features = ["http2", "http1", "server"] }
hyper-rustls = { version = "0.27.7", default-features = false, features = ["native-tokio", "http1", "tls12", "logging", "http2", "ring", "webpki-roots"] }
hyper-util = { version = "0.1.19", features = ["tokio", "server-auto", "server-graceful"] }
http = "1.4.0"
http-body = "1.0.1"
reqwest = { version = "0.12.25", default-features = false, features = ["rustls-tls-webpki-roots", "charset", "http2", "system-proxy", "stream", "json", "blocking"] }
socket2 = "0.6.1"
tokio = { version = "1.48.0", features = ["fs", "rt-multi-thread"] }
tokio-rustls = { version = "0.26.4", default-features = false, features = ["logging", "tls12", "ring"] }
tokio-stream = { version = "0.1.17" }
tokio-test = "0.4.4"
tokio-util = { version = "0.7.17", features = ["io", "compat"] }
tonic = { version = "0.14.2", features = ["gzip"] }
tonic-prost = { version = "0.14.2" }
tonic-prost-build = { version = "0.14.2" }
tower = { version = "0.5.2", features = ["timeout"] }
tower-http = { version = "0.6.8", features = ["cors"] }
# Serialization and Data Formats
bytes = { version = "1.11.0", features = ["serde"] }
bytesize = "2.3.1"
byteorder = "1.5.0"
flatbuffers = "25.9.23"
form_urlencoded = "1.2.2"
prost = "0.14.1"
quick-xml = "0.38.4"
rmcp = { version = "0.10.0" }
rmp = { version = "0.8.14" }
rmp-serde = { version = "1.3.0" }
serde = { version = "1.0.228", features = ["derive"] }
serde_json = { version = "1.0.145", features = ["raw_value"] }
serde_urlencoded = "0.7.1"
schemars = "1.1.0"
# Cryptography and Security
aes-gcm = { version = "0.11.0-rc.2", features = ["rand_core"] }
argon2 = { version = "0.6.0-rc.3", features = ["std"] }
blake3 = { version = "1.8.2", features = ["rayon", "mmap"] }
chacha20poly1305 = { version = "0.11.0-rc.2" }
crc-fast = "1.6.0"
hmac = { version = "0.13.0-rc.3" }
jsonwebtoken = { version = "10.2.0", features = ["rust_crypto"] }
pbkdf2 = "0.13.0-rc.3"
rsa = { version = "0.10.0-rc.10" }
rustls = { version = "0.23.35", features = ["ring", "logging", "std", "tls12"], default-features = false }
rustls-pemfile = "2.2.0"
rustls-pki-types = "1.13.1"
sha1 = "0.11.0-rc.3"
sha2 = "0.11.0-rc.3"
subtle = "2.6"
zeroize = { version = "1.8.2", features = ["derive"] }
# Time and Date
chrono = { version = "0.4.42", features = ["serde"] }
humantime = "2.3.0"
time = { version = "0.3.44", features = ["std", "parsing", "formatting", "macros", "serde"] }
# Utilities and Tools
anyhow = "1.0.100"
arc-swap = "1.7.1"
astral-tokio-tar = "0.5.6"
atoi = "2.0.0"
atomic_enum = "0.3.0"
aws-config = { version = "1.8.11" }
aws-credential-types = { version = "1.2.10" }
aws-sdk-s3 = { version = "1.116.0", default-features = false, features = ["sigv4a", "rustls", "rt-tokio"] }
aws-smithy-types = { version = "1.3.4" }
base64 = "0.22.1"
base64-simd = "0.8.0"
brotli = "8.0.2"
cfg-if = "1.0.4"
clap = { version = "4.5.53", features = ["derive", "env"] }
const-str = { version = "0.7.0", features = ["std", "proc"] }
convert_case = "0.10.0"
criterion = { version = "0.8", features = ["html_reports"] }
crossbeam-queue = "0.3.12"
datafusion = "51.0.0"
derive_builder = "0.20.2"
enumset = "1.1.10"
faster-hex = "0.10.0"
flate2 = "1.1.5"
flexi_logger = { version = "0.31.7", features = ["trc", "dont_minimize_extra_stacks", "compress", "kv", "json"] }
glob = "0.3.3"
google-cloud-storage = "1.4.0"
google-cloud-auth = "1.2.0"
hashbrown = { version = "0.16.1", features = ["serde", "rayon"] }
heed = { version = "0.22.0" }
hex-simd = "0.8.0"
highway = { version = "1.3.0" }
hmac = "0.12.1"
hyper = "1.6.0"
hyper-util = { version = "0.1.15", features = [
"tokio",
"server-auto",
"server-graceful",
] }
hyper-rustls = "0.27.7"
http = "1.3.1"
http-body = "1.0.1"
humantime = "2.2.0"
ipnetwork = { version = "0.21.1", features = ["serde"] }
jsonwebtoken = "9.3.1"
keyring = { version = "3.6.2", features = [
"apple-native",
"windows-native",
"sync-secret-service",
] }
lazy_static = "1.5.0"
libsystemd = { version = "0.7.2" }
local-ip-address = "0.6.5"
libc = "0.2.178"
libsystemd = "0.7.2"
local-ip-address = "0.6.6"
lz4 = "1.28.1"
matchit = "0.8.4"
md-5 = "0.10.6"
matchit = "0.9.0"
md-5 = "0.11.0-rc.3"
md5 = "0.8.0"
mime_guess = "2.0.5"
moka = { version = "0.12.11", features = ["future"] }
netif = "0.1.6"
nix = { version = "0.30.1", features = ["fs"] }
nu-ansi-term = "0.50.1"
nu-ansi-term = "0.50.3"
num_cpus = { version = "1.17.0" }
nvml-wrapper = "0.11.0"
object_store = "0.11.2"
once_cell = "1.21.3"
opentelemetry = { version = "0.30.0" }
opentelemetry-appender-tracing = { version = "0.30.1", features = [
"experimental_use_tracing_span_context",
"experimental_metadata_attributes",
"spec_unstable_logs_enabled"
] }
opentelemetry_sdk = { version = "0.30.0" }
opentelemetry-stdout = { version = "0.30.0" }
opentelemetry-otlp = { version = "0.30.0", default-features = false, features = [
"grpc-tonic", "gzip-tonic", "trace", "metrics", "logs", "internal-logs"
] }
opentelemetry-semantic-conventions = { version = "0.30.0", features = [
"semconv_experimental",
] }
parking_lot = "0.12.4"
object_store = "0.12.4"
parking_lot = "0.12.5"
path-absolutize = "3.1.1"
path-clean = "1.0.1"
blake3 = { version = "1.8.2" }
pbkdf2 = "0.12.2"
percent-encoding = "2.3.1"
pin-project-lite = "0.2.16"
prost = "0.13.5"
quick-xml = "0.38.0"
rand = "0.9.1"
rdkafka = { version = "0.38.0", features = ["tokio"] }
reed-solomon-simd = { version = "3.0.1" }
regex = { version = "1.11.1" }
reqwest = { version = "0.12.22", default-features = false, features = [
"rustls-tls",
"charset",
"http2",
"system-proxy",
"stream",
"json",
"blocking",
] }
rfd = { version = "0.15.3", default-features = false, features = [
"xdg-portal",
"tokio",
] }
rmp = "0.8.14"
rmp-serde = "1.3.0"
rsa = "0.9.8"
rumqttc = { version = "0.24" }
rust-embed = { version = "8.7.2" }
rust-i18n = { version = "3.1.5" }
rustfs-rsc = "2025.506.1"
rustls = { version = "0.23.29" }
rustls-pki-types = "1.12.0"
rustls-pemfile = "2.2.0"
s3s = { version = "0.12.0-minio-preview.2" }
shadow-rs = { version = "1.2.0", default-features = false }
serde = { version = "1.0.219", features = ["derive"] }
serde_json = { version = "1.0.140", features = ["raw_value"] }
serde-xml-rs = "0.8.1"
serde_urlencoded = "0.7.1"
sha1 = "0.10.6"
sha2 = "0.10.9"
pretty_assertions = "1.4.1"
rand = { version = "0.10.0-rc.5", features = ["serde"] }
rayon = "1.11.0"
reed-solomon-simd = { version = "3.1.0" }
regex = { version = "1.12.2" }
rumqttc = { version = "0.25.1" }
rust-embed = { version = "8.9.0" }
rustc-hash = { version = "2.1.1" }
s3s = { version = "0.12.0-rc.4", features = ["minio"] }
serial_test = "3.2.0"
shadow-rs = { version = "1.4.0", default-features = false }
siphasher = "1.0.1"
smallvec = { version = "1.15.1", features = ["serde"] }
snafu = "0.8.6"
smartstring = "1.0.1"
snafu = "0.8.9"
snap = "1.1.1"
socket2 = "0.6.0"
strum = { version = "0.27.1", features = ["derive"] }
sysinfo = "0.36.0"
sysctl = "0.6.0"
tempfile = "3.20.0"
starshard = { version = "0.6.0", features = ["rayon", "async", "serde"] }
strum = { version = "0.27.2", features = ["derive"] }
sysctl = "0.7.1"
sysinfo = "0.37.2"
temp-env = "0.3.6"
tempfile = "3.23.0"
test-case = "3.3.1"
thiserror = "2.0.12"
time = { version = "0.3.41", features = [
"std",
"parsing",
"formatting",
"macros",
"serde",
] }
tokio = { version = "1.46.1", features = ["fs", "rt-multi-thread"] }
tokio-rustls = { version = "0.26.2", default-features = false }
tokio-stream = { version = "0.1.17" }
tokio-tar = "0.3.1"
tokio-test = "0.4.4"
tokio-util = { version = "0.7.15", features = ["io", "compat"] }
tonic = { version = "0.13.1", features = ["gzip"] }
tonic-build = { version = "0.13.1" }
tower = { version = "0.5.2", features = ["timeout"] }
tower-http = { version = "0.6.6", features = ["cors"] }
tracing = "0.1.41"
tracing-core = "0.1.34"
thiserror = "2.0.17"
tracing = { version = "0.1.43" }
tracing-appender = "0.2.4"
tracing-error = "0.2.1"
tracing-subscriber = { version = "0.3.19", features = ["env-filter", "time"] }
tracing-appender = "0.2.3"
tracing-opentelemetry = "0.31.0"
tracing-opentelemetry = "0.32.0"
tracing-subscriber = { version = "0.3.22", features = ["env-filter", "time"] }
transform-stream = "0.3.1"
url = "2.5.4"
url = "2.5.7"
urlencoding = "2.1.3"
uuid = { version = "1.17.0", features = [
"v4",
"fast-rng",
"macro-diagnostics",
] }
wildmatch = { version = "2.4.0", features = ["serde"] }
uuid = { version = "1.19.0", features = ["v4", "fast-rng", "macro-diagnostics"] }
vaultrs = { version = "0.7.4" }
walkdir = "2.5.0"
wildmatch = { version = "2.6.1", features = ["serde"] }
winapi = { version = "0.3.9" }
xxhash-rust = { version = "0.8.15", features = ["xxh64", "xxh3"] }
zip = "2.4.2"
zip = "6.0.0"
zstd = "0.13.3"
anyhow = "1.0.98"
[profile.wasm-dev]
inherits = "dev"
opt-level = 1
# Observability and Metrics
metrics = "0.24.3"
opentelemetry = { version = "0.31.0" }
opentelemetry-appender-tracing = { version = "0.31.1", features = ["experimental_use_tracing_span_context", "experimental_metadata_attributes", "spec_unstable_logs_enabled"] }
opentelemetry-otlp = { version = "0.31.0", features = ["gzip-http", "reqwest-rustls"] }
opentelemetry_sdk = { version = "0.31.0" }
opentelemetry-semantic-conventions = { version = "0.31.0", features = ["semconv_experimental"] }
opentelemetry-stdout = { version = "0.31.0" }
[profile.server-dev]
inherits = "dev"
# Performance Analysis and Memory Profiling
mimalloc = "0.1"
# Use tikv-jemallocator as memory allocator and enable performance analysis
tikv-jemallocator = { version = "0.6", features = ["profiling", "stats", "unprefixed_malloc_on_supported_platforms", "background_threads"] }
# Used to control and obtain statistics for jemalloc at runtime
tikv-jemalloc-ctl = { version = "0.6", features = ["use_std", "stats", "profiling"] }
# Used to generate pprof-compatible memory profiling data and support symbolization and flame graphs
jemalloc_pprof = { version = "0.8.1", features = ["symbolize", "flamegraph"] }
# Used to generate CPU performance analysis data and flame diagrams
pprof = { version = "0.15.0", features = ["flamegraph", "protobuf-codec"] }
[profile.android-dev]
inherits = "dev"
[workspace.metadata.cargo-shear]
ignored = ["rustfs", "rustfs-mcp", "tokio-test"]
[profile.release]
opt-level = 3

View File

@@ -1,121 +1,95 @@
# Multi-stage build for RustFS production image
FROM alpine:latest AS build
FROM alpine:3.22 AS build
# Build arguments - use TARGETPLATFORM for consistency with Dockerfile.source
ARG TARGETPLATFORM
ARG BUILDPLATFORM
ARG TARGETARCH
ARG RELEASE=latest
# Install dependencies for downloading and verifying binaries
RUN apk add --no-cache \
ca-certificates \
curl \
bash \
wget \
unzip \
jq
# Create build directory
RUN apk add --no-cache ca-certificates curl unzip
WORKDIR /build
# Map TARGETPLATFORM to architecture format used in builds
RUN case "${TARGETPLATFORM}" in \
"linux/amd64") ARCH="x86_64" ;; \
"linux/arm64") ARCH="aarch64" ;; \
*) echo "Unsupported platform: ${TARGETPLATFORM}" && exit 1 ;; \
esac && \
echo "ARCH=${ARCH}" > /build/arch.env
# Download rustfs binary from dl.rustfs.com (release channel only)
RUN . /build/arch.env && \
BASE_URL="https://dl.rustfs.com/artifacts/rustfs/release" && \
PLATFORM="linux" && \
if [ "${RELEASE}" = "latest" ]; then \
# Download latest release version \
PACKAGE_NAME="rustfs-${PLATFORM}-${ARCH}-latest.zip"; \
DOWNLOAD_URL="${BASE_URL}/${PACKAGE_NAME}"; \
echo "📥 Downloading latest release build: ${PACKAGE_NAME}"; \
RUN set -eux; \
case "$TARGETARCH" in \
amd64) ARCH_SUBSTR="x86_64-musl" ;; \
arm64) ARCH_SUBSTR="aarch64-musl" ;; \
*) echo "Unsupported TARGETARCH=$TARGETARCH" >&2; exit 1 ;; \
esac; \
if [ "$RELEASE" = "latest" ]; then \
TAG="$(curl -fsSL https://api.github.com/repos/rustfs/rustfs/releases \
| grep -o '"tag_name": "[^"]*"' | cut -d'"' -f4 | head -n 1)"; \
else \
# Download specific release version \
PACKAGE_NAME="rustfs-${PLATFORM}-${ARCH}-v${RELEASE}.zip"; \
DOWNLOAD_URL="${BASE_URL}/${PACKAGE_NAME}"; \
echo "📥 Downloading specific release version: ${PACKAGE_NAME}"; \
fi && \
echo "🔗 Download URL: ${DOWNLOAD_URL}" && \
curl -f -L "${DOWNLOAD_URL}" -o /build/rustfs.zip && \
if [ ! -f /build/rustfs.zip ] || [ ! -s /build/rustfs.zip ]; then \
echo "❌ Failed to download binary package"; \
echo "💡 Make sure the package ${PACKAGE_NAME} exists"; \
echo "🔗 Check: ${DOWNLOAD_URL}"; \
exit 1; \
fi && \
unzip /build/rustfs.zip -d /build && \
chmod +x /build/rustfs && \
rm /build/rustfs.zip && \
echo "✅ Successfully downloaded and extracted rustfs binary"
TAG="$RELEASE"; \
fi; \
echo "Using tag: $TAG (arch pattern: $ARCH_SUBSTR)"; \
# Find download URL in assets list for this tag that contains arch substring and ends with .zip
URL="$(curl -fsSL "https://api.github.com/repos/rustfs/rustfs/releases/tags/$TAG" \
| grep -o "\"browser_download_url\": \"[^\"]*${ARCH_SUBSTR}[^\"]*\\.zip\"" \
| cut -d'"' -f4 | head -n 1)"; \
if [ -z "$URL" ]; then echo "Failed to locate release asset for $ARCH_SUBSTR at tag $TAG" >&2; exit 1; fi; \
echo "Downloading: $URL"; \
curl -fL "$URL" -o rustfs.zip; \
unzip -q rustfs.zip -d /build; \
# If binary is not in root directory, try to locate and move from zip to /build/rustfs
if [ ! -x /build/rustfs ]; then \
BIN_PATH="$(unzip -Z -1 rustfs.zip | grep -E '(^|/)rustfs$' | head -n 1 || true)"; \
if [ -n "$BIN_PATH" ]; then \
mkdir -p /build/.tmp && unzip -q rustfs.zip "$BIN_PATH" -d /build/.tmp && \
mv "/build/.tmp/$BIN_PATH" /build/rustfs; \
fi; \
fi; \
[ -x /build/rustfs ] || { echo "rustfs binary not found in asset" >&2; exit 1; }; \
chmod +x /build/rustfs; \
rm -rf rustfs.zip /build/.tmp || true
# Runtime stage
FROM alpine:latest
# Set build arguments and labels
FROM alpine:3.22
ARG RELEASE=latest
ARG BUILD_DATE
ARG VCS_REF
LABEL name="RustFS" \
vendor="RustFS Team" \
maintainer="RustFS Team <dev@rustfs.com>" \
version="${RELEASE}" \
release="${RELEASE}" \
build-date="${BUILD_DATE}" \
vcs-ref="${VCS_REF}" \
summary="RustFS is a high-performance distributed object storage system written in Rust, compatible with S3 API." \
description="RustFS is a high-performance distributed object storage software built using Rust. It supports erasure coding storage, multi-tenant management, observability, and other enterprise-level features." \
url="https://rustfs.com" \
license="Apache-2.0"
vendor="RustFS Team" \
maintainer="RustFS Team <dev@rustfs.com>" \
version="v${RELEASE#v}" \
release="${RELEASE}" \
build-date="${BUILD_DATE}" \
vcs-ref="${VCS_REF}" \
summary="High-performance distributed object storage system compatible with S3 API" \
description="RustFS is a distributed object storage system written in Rust, supporting erasure coding, multi-tenant management, and observability." \
url="https://rustfs.com" \
license="Apache-2.0"
# Install runtime dependencies
RUN apk add --no-cache \
ca-certificates \
curl \
tzdata \
bash \
&& addgroup -g 1000 rustfs \
&& adduser -u 1000 -G rustfs -s /bin/sh -D rustfs
RUN apk add --no-cache ca-certificates coreutils curl
# Environment variables
ENV RUSTFS_ACCESS_KEY=rustfsadmin \
RUSTFS_SECRET_KEY=rustfsadmin \
RUSTFS_ADDRESS=":9000" \
RUSTFS_CONSOLE_ENABLE=true \
RUSTFS_VOLUMES=/data \
RUST_LOG=warn
# Set permissions for /usr/bin (similar to MinIO's approach)
RUN chmod -R 755 /usr/bin
# Copy CA certificates and binaries from build stage
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
COPY --from=build /build/rustfs /usr/bin/
COPY --from=build /build/rustfs /usr/bin/rustfs
COPY entrypoint.sh /entrypoint.sh
# Set executable permissions
RUN chmod +x /usr/bin/rustfs
RUN chmod +x /usr/bin/rustfs /entrypoint.sh
# Create data directory
RUN mkdir -p /data /config && chown -R rustfs:rustfs /data /config
RUN addgroup -g 10001 -S rustfs && \
adduser -u 10001 -G rustfs -S rustfs -D && \
mkdir -p /data /logs && \
chown -R rustfs:rustfs /data /logs && \
chmod 0750 /data /logs
ENV RUSTFS_ADDRESS=":9000" \
RUSTFS_CONSOLE_ADDRESS=":9001" \
RUSTFS_ACCESS_KEY="rustfsadmin" \
RUSTFS_SECRET_KEY="rustfsadmin" \
RUSTFS_CONSOLE_ENABLE="true" \
RUSTFS_EXTERNAL_ADDRESS="" \
RUSTFS_CORS_ALLOWED_ORIGINS="*" \
RUSTFS_CONSOLE_CORS_ALLOWED_ORIGINS="*" \
RUSTFS_VOLUMES="/data" \
RUST_LOG="warn" \
RUSTFS_OBS_LOG_DIRECTORY="/logs"
EXPOSE 9000 9001
VOLUME ["/data", "/logs"]
# Switch to non-root user
USER rustfs
# Set working directory
WORKDIR /data
ENTRYPOINT ["/entrypoint.sh"]
# Expose port
EXPOSE 9000
# Volume for data
VOLUME ["/data"]
# Set entrypoint
ENTRYPOINT ["/usr/bin/rustfs"]
CMD ["rustfs"]

View File

@@ -1,80 +1,88 @@
# syntax=docker/dockerfile:1.6
# Multi-stage Dockerfile for RustFS - LOCAL DEVELOPMENT ONLY
#
# ⚠️ IMPORTANT: This Dockerfile is for local development and testing only.
# ⚠️ It builds RustFS from source code and is NOT used in CI/CD pipelines.
# ⚠️ CI/CD pipeline uses pre-built binaries from Dockerfile instead.
# IMPORTANT: This Dockerfile builds RustFS from source for local development and testing.
# CI/CD uses the production Dockerfile with prebuilt binaries instead.
#
# Usage for local development:
# Example:
# docker build -f Dockerfile.source -t rustfs:dev-local .
# docker run --rm -p 9000:9000 rustfs:dev-local
#
# Supports cross-compilation for amd64 and arm64 architectures
# Supports cross-compilation for amd64 and arm64 via TARGETPLATFORM.
ARG TARGETPLATFORM
ARG BUILDPLATFORM
# -----------------------------
# Build stage
FROM --platform=$BUILDPLATFORM rust:1.88-bookworm AS builder
# -----------------------------
FROM rust:1.88-bookworm AS builder
# Re-declare build arguments after FROM (required for multi-stage builds)
# Re-declare args after FROM
ARG TARGETPLATFORM
ARG BUILDPLATFORM
# Debug: Print platform information
RUN echo "🐳 Build Info: BUILDPLATFORM=$BUILDPLATFORM, TARGETPLATFORM=$TARGETPLATFORM"
# Debug: print platforms
RUN echo "Build info -> BUILDPLATFORM=${BUILDPLATFORM}, TARGETPLATFORM=${TARGETPLATFORM}"
# Install required build dependencies
RUN apt-get update && apt-get install -y \
wget \
git \
# Install build toolchain and headers
# Use distro packages for protoc/flatc to avoid host-arch mismatch
RUN set -eux; \
export DEBIAN_FRONTEND=noninteractive; \
apt-get update; \
apt-get install -y --no-install-recommends \
build-essential \
ca-certificates \
curl \
unzip \
gcc \
git \
pkg-config \
libssl-dev \
lld \
&& rm -rf /var/lib/apt/lists/*
protobuf-compiler \
flatbuffers-compiler; \
rm -rf /var/lib/apt/lists/*
# Note: sccache removed for simpler builds
# Install cross-compilation tools for ARM64
RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
apt-get update && \
apt-get install -y gcc-aarch64-linux-gnu && \
rm -rf /var/lib/apt/lists/*; \
# Optional: cross toolchain for aarch64 (only when targeting linux/arm64)
RUN set -eux; \
if [ "${TARGETPLATFORM:-linux/amd64}" = "linux/arm64" ]; then \
export DEBIAN_FRONTEND=noninteractive; \
apt-get update; \
apt-get install -y --no-install-recommends gcc-aarch64-linux-gnu; \
rm -rf /var/lib/apt/lists/*; \
fi
# Install protoc
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip \
&& unzip protoc-31.1-linux-x86_64.zip -d protoc3 \
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc \
&& mv protoc3/include/* /usr/local/include/ && rm -rf protoc-31.1-linux-x86_64.zip protoc3
# Install flatc
RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip \
&& unzip Linux.flatc.binary.g++-13.zip \
&& mv flatc /usr/local/bin/ && chmod +x /usr/local/bin/flatc && rm -rf Linux.flatc.binary.g++-13.zip
# Set up Rust targets based on platform
RUN set -e && \
PLATFORM="${TARGETPLATFORM:-linux/amd64}" && \
echo "🎯 Setting up Rust target for platform: $PLATFORM" && \
case "$PLATFORM" in \
"linux/amd64") rustup target add x86_64-unknown-linux-gnu ;; \
"linux/arm64") rustup target add aarch64-unknown-linux-gnu ;; \
*) echo "❌ Unsupported platform: $PLATFORM" && exit 1 ;; \
# Add Rust targets based on TARGETPLATFORM
RUN set -eux; \
case "${TARGETPLATFORM:-linux/amd64}" in \
linux/amd64) rustup target add x86_64-unknown-linux-gnu ;; \
linux/arm64) rustup target add aarch64-unknown-linux-gnu ;; \
*) echo "Unsupported TARGETPLATFORM=${TARGETPLATFORM}" >&2; exit 1 ;; \
esac
# Set up environment for cross-compilation
# Cross-compilation environment (used only when targeting aarch64)
ENV CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=aarch64-linux-gnu-gcc
ENV CC_aarch64_unknown_linux_gnu=aarch64-linux-gnu-gcc
ENV CXX_aarch64_unknown_linux_gnu=aarch64-linux-gnu-g++
WORKDIR /usr/src/rustfs
# Copy all source code
# Layered copy to maximize caching:
# 1) top-level manifests
COPY Cargo.toml Cargo.lock ./
# 2) workspace member manifests (adjust if workspace layout changes)
COPY rustfs/Cargo.toml rustfs/Cargo.toml
COPY crates/*/Cargo.toml crates/
COPY cli/rustfs-gui/Cargo.toml cli/rustfs-gui/Cargo.toml
# Pre-fetch dependencies for better caching
RUN --mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/usr/local/cargo/git \
cargo fetch --locked || true
# 3) copy full sources (this is the main cache invalidation point)
COPY . .
# Configure cargo for optimized builds
# Cargo build configuration for lean release artifacts
ENV CARGO_NET_GIT_FETCH_WITH_CLI=true \
CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse \
CARGO_INCREMENTAL=0 \
@@ -82,69 +90,91 @@ ENV CARGO_NET_GIT_FETCH_WITH_CLI=true \
CARGO_PROFILE_RELEASE_SPLIT_DEBUGINFO=off \
CARGO_PROFILE_RELEASE_STRIP=symbols
# Generate protobuf code
RUN cargo run --bin gproto
# Generate protobuf/flatbuffers code (uses protoc/flatc from distro)
RUN --mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/usr/local/cargo/git \
--mount=type=cache,target=/usr/src/rustfs/target \
cargo run --bin gproto
# Build the actual application with optimizations
RUN case "$TARGETPLATFORM" in \
"linux/amd64") \
echo "🔨 Building for amd64..." && \
rustup target add x86_64-unknown-linux-gnu && \
cargo build --release --target x86_64-unknown-linux-gnu --bin rustfs -j $(nproc) && \
cp target/x86_64-unknown-linux-gnu/release/rustfs /usr/local/bin/rustfs \
;; \
"linux/arm64") \
echo "🔨 Building for arm64..." && \
rustup target add aarch64-unknown-linux-gnu && \
cargo build --release --target aarch64-unknown-linux-gnu --bin rustfs -j $(nproc) && \
cp target/aarch64-unknown-linux-gnu/release/rustfs /usr/local/bin/rustfs \
;; \
*) \
echo "❌ Unsupported platform: $TARGETPLATFORM" && exit 1 \
;; \
# Build RustFS (target depends on TARGETPLATFORM)
RUN --mount=type=cache,target=/usr/local/cargo/registry \
--mount=type=cache,target=/usr/local/cargo/git \
--mount=type=cache,target=/usr/src/rustfs/target \
set -eux; \
case "${TARGETPLATFORM:-linux/amd64}" in \
linux/amd64) \
echo "Building for x86_64-unknown-linux-gnu"; \
cargo build --release --locked --target x86_64-unknown-linux-gnu --bin rustfs -j "$(nproc)"; \
install -m 0755 target/x86_64-unknown-linux-gnu/release/rustfs /usr/local/bin/rustfs \
;; \
linux/arm64) \
echo "Building for aarch64-unknown-linux-gnu"; \
cargo build --release --locked --target aarch64-unknown-linux-gnu --bin rustfs -j "$(nproc)"; \
install -m 0755 target/aarch64-unknown-linux-gnu/release/rustfs /usr/local/bin/rustfs \
;; \
*) \
echo "Unsupported TARGETPLATFORM=${TARGETPLATFORM}" >&2; exit 1 \
;; \
esac
# Runtime stage - Ubuntu minimal for better compatibility
# -----------------------------
# Runtime stage (Ubuntu minimal)
# -----------------------------
FROM ubuntu:22.04
# Install runtime dependencies
RUN apt-get update && apt-get install -y \
ARG BUILD_DATE
ARG VCS_REF
LABEL name="RustFS (dev-local)" \
maintainer="RustFS Team" \
build-date="${BUILD_DATE}" \
vcs-ref="${VCS_REF}" \
description="RustFS - local development image built from source (NOT for production)."
# Minimal runtime deps: certificates + tzdata + coreutils (for chroot --userspec)
RUN set -eux; \
export DEBIAN_FRONTEND=noninteractive; \
apt-get update; \
apt-get install -y --no-install-recommends \
ca-certificates \
tzdata \
wget \
&& rm -rf /var/lib/apt/lists/*
coreutils; \
rm -rf /var/lib/apt/lists/*
# Create rustfs user and group
RUN groupadd -g 1000 rustfs && \
useradd -d /app -g rustfs -u 1000 -s /bin/bash rustfs
# Create a conventional runtime user/group (final switch happens in entrypoint via chroot --userspec)
RUN set -eux; \
groupadd -g 1000 rustfs; \
useradd -u 1000 -g rustfs -M -s /usr/sbin/nologin rustfs
WORKDIR /app
# Create data directories
RUN mkdir -p /data/rustfs{0,1,2,3} && \
chown -R rustfs:rustfs /data /app
# Prepare data/log directories with sane defaults
RUN set -eux; \
mkdir -p /data /logs; \
chown -R rustfs:rustfs /data /logs /app; \
chmod 0750 /data /logs
# Copy binary from builder stage
COPY --from=builder /usr/local/bin/rustfs /app/rustfs
RUN chmod +x /app/rustfs && chown rustfs:rustfs /app/rustfs
# Copy the freshly built binary and the entrypoint
COPY --from=builder /usr/local/bin/rustfs /usr/bin/rustfs
COPY entrypoint.sh /entrypoint.sh
RUN chmod +x /usr/bin/rustfs /entrypoint.sh
# Switch to non-root user
USER rustfs
# Default environment (override in docker run/compose as needed)
ENV RUSTFS_ADDRESS=":9000" \
RUSTFS_ACCESS_KEY="rustfsadmin" \
RUSTFS_SECRET_KEY="rustfsadmin" \
RUSTFS_CONSOLE_ENABLE="true" \
RUSTFS_VOLUMES="/data" \
RUST_LOG="warn" \
RUSTFS_OBS_LOG_DIRECTORY="/logs" \
RUSTFS_USERNAME="rustfs" \
RUSTFS_GROUPNAME="rustfs" \
RUSTFS_UID="1000" \
RUSTFS_GID="1000"
# Expose ports
EXPOSE 9000
VOLUME ["/data", "/logs"]
# Environment variables
ENV RUSTFS_ACCESS_KEY=rustfsadmin \
RUSTFS_SECRET_KEY=rustfsadmin \
RUSTFS_ADDRESS=":9000" \
RUSTFS_CONSOLE_ENABLE=true \
RUSTFS_VOLUMES=/data \
RUST_LOG=warn
# Volume for data
VOLUME ["/data"]
# Set default command
CMD ["/app/rustfs"]
# Keep root here; entrypoint will drop privileges using chroot --userspec
ENTRYPOINT ["/entrypoint.sh"]
CMD ["/usr/bin/rustfs"]

258
Justfile Normal file
View File

@@ -0,0 +1,258 @@
DOCKER_CLI := env("DOCKER_CLI", "docker")
IMAGE_NAME := env("IMAGE_NAME", "rustfs:v1.0.0")
DOCKERFILE_SOURCE := env("DOCKERFILE_SOURCE", "Dockerfile.source")
DOCKERFILE_PRODUCTION := env("DOCKERFILE_PRODUCTION", "Dockerfile")
CONTAINER_NAME := env("CONTAINER_NAME", "rustfs-dev")
[group("📒 Help")]
[private]
default:
@just --list --list-heading $'🦀 RustFS justfile manual page:\n'
[doc("show help")]
[group("📒 Help")]
help: default
[doc("run `cargo fmt` to format codes")]
[group("👆 Code Quality")]
fmt:
@echo "🔧 Formatting code..."
cargo fmt --all
[doc("run `cargo fmt` in check mode")]
[group("👆 Code Quality")]
fmt-check:
@echo "📝 Checking code formatting..."
cargo fmt --all --check
[doc("run `cargo clippy`")]
[group("👆 Code Quality")]
clippy:
@echo "🔍 Running clippy checks..."
cargo clippy --all-targets --all-features --fix --allow-dirty -- -D warnings
[doc("run `cargo check`")]
[group("👆 Code Quality")]
check:
@echo "🔨 Running compilation check..."
cargo check --all-targets
[doc("run `cargo test`")]
[group("👆 Code Quality")]
test:
@echo "🧪 Running tests..."
cargo nextest run --all --exclude e2e_test
cargo test --all --doc
[doc("run `fmt` `clippy` `check` `test` at once")]
[group("👆 Code Quality")]
pre-commit: fmt clippy check test
@echo "✅ All pre-commit checks passed!"
[group("🤔 Git")]
setup-hooks:
@echo "🔧 Setting up git hooks..."
chmod +x .git/hooks/pre-commit
@echo "✅ Git hooks setup complete!"
[doc("use `release` mode for building")]
[group("🔨 Build")]
build:
@echo "🔨 Building RustFS using build-rustfs.sh script..."
./build-rustfs.sh
[doc("use `debug` mode for building")]
[group("🔨 Build")]
build-dev:
@echo "🔨 Building RustFS in development mode..."
./build-rustfs.sh --dev
[group("🔨 Build")]
[private]
build-target target:
@echo "🔨 Building rustfs for {{ target }}..."
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
./build-rustfs.sh --platform {{ target }}
[doc("use `x86_64-unknown-linux-musl` target for building")]
[group("🔨 Build")]
build-musl: (build-target "x86_64-unknown-linux-musl")
[doc("use `x86_64-unknown-linux-gnu` target for building")]
[group("🔨 Build")]
build-gnu: (build-target "x86_64-unknown-linux-gnu")
[doc("use `aarch64-unknown-linux-musl` target for building")]
[group("🔨 Build")]
build-musl-arm64: (build-target "aarch64-unknown-linux-musl")
[doc("use `aarch64-unknown-linux-gnu` target for building")]
[group("🔨 Build")]
build-gnu-arm64: (build-target "aarch64-unknown-linux-gnu")
[doc("build and deploy to server")]
[group("🔨 Build")]
deploy-dev ip: build-musl
@echo "🚀 Deploying to dev server: {{ ip }}"
./scripts/dev_deploy.sh {{ ip }}
[group("🔨 Build")]
[private]
build-cross-all-pre:
@echo "🔧 Building all target architectures..."
@echo "💡 On macOS/Windows, use 'make docker-dev' for reliable multi-arch builds"
@echo "🔨 Generating protobuf code..."
-cargo run --bin gproto
[doc("build all targets at once")]
[group("🔨 Build")]
build-cross-all: build-cross-all-pre && build-gnu build-gnu-arm64 build-musl build-musl-arm64
# ========================================================================================
# Docker Multi-Architecture Builds (Primary Methods)
# ========================================================================================
[doc("build an image and run it")]
[group("🐳 Build Image")]
build-docker os="rockylinux9.3" cli=(DOCKER_CLI) dockerfile=(DOCKERFILE_SOURCE):
#!/usr/bin/env bash
SOURCE_BUILD_IMAGE_NAME="rustfs/rustfs-{{ os }}:v1"
SOURCE_BUILD_CONTAINER_NAME="rustfs-{{ os }}-build"
BUILD_CMD="/root/.cargo/bin/cargo build --release --bin rustfs --target-dir /root/s3-rustfs/target/{{ os }}"
echo "🐳 Building RustFS using Docker ({{ os }})..."
{{ cli }} buildx build -t $SOURCE_BUILD_IMAGE_NAME -f {{ dockerfile }} .
{{ cli }} run --rm --name $SOURCE_BUILD_CONTAINER_NAME -v $(pwd):/root/s3-rustfs -it $SOURCE_BUILD_IMAGE_NAME $BUILD_CMD
[doc("build an image")]
[group("🐳 Build Image")]
docker-buildx:
@echo "🏗️ Building multi-architecture production Docker images with buildx..."
./docker-buildx.sh
[doc("build an image and push it")]
[group("🐳 Build Image")]
docker-buildx-push:
@echo "🚀 Building and pushing multi-architecture production Docker images with buildx..."
./docker-buildx.sh --push
[doc("build an image with a version")]
[group("🐳 Build Image")]
docker-buildx-version version:
@echo "🏗️ Building multi-architecture production Docker images (version: {{ version }}..."
./docker-buildx.sh --release {{ version }}
[doc("build an image with a version and push it")]
[group("🐳 Build Image")]
docker-buildx-push-version version:
@echo "🚀 Building and pushing multi-architecture production Docker images (version: {{ version }}..."
./docker-buildx.sh --release {{ version }} --push
[doc("build an image with a version and push it to registry")]
[group("🐳 Build Image")]
docker-dev-push registry cli=(DOCKER_CLI) source=(DOCKERFILE_SOURCE):
@echo "🚀 Building and pushing multi-architecture development Docker images..."
@echo "💡 push to registry: {{ registry }}"
{{ cli }} buildx build \
--platform linux/amd64,linux/arm64 \
--file {{ source }} \
--tag {{ registry }}/rustfs:source-latest \
--tag {{ registry }}/rustfs:dev-latest \
--push \
.
# Local production builds using direct buildx (alternative to docker-buildx.sh)
[group("🐳 Build Image")]
docker-buildx-production-local cli=(DOCKER_CLI) source=(DOCKERFILE_PRODUCTION):
@echo "🏗️ Building single-architecture production Docker image locally..."
@echo "💡 Alternative to docker-buildx.sh for local testing"
{{ cli }} buildx build \
--file {{ source }} \
--tag rustfs:production-latest \
--tag rustfs:latest \
--load \
--build-arg RELEASE=latest \
.
# Development/Source builds using direct buildx commands
[group("🐳 Build Image")]
docker-dev cli=(DOCKER_CLI) source=(DOCKERFILE_SOURCE):
@echo "🏗️ Building multi-architecture development Docker images with buildx..."
@echo "💡 This builds from source code and is intended for local development and testing"
@echo "⚠️ Multi-arch images cannot be loaded locally, use docker-dev-push to push to registry"
{{ cli }} buildx build \
--platform linux/amd64,linux/arm64 \
--file {{ source }} \
--tag rustfs:source-latest \
--tag rustfs:dev-latest \
.
[group("🐳 Build Image")]
docker-dev-local cli=(DOCKER_CLI) source=(DOCKERFILE_SOURCE):
@echo "🏗️ Building single-architecture development Docker image for local use..."
@echo "💡 This builds from source code for the current platform and loads locally"
{{ cli }} buildx build \
--file {{ source }} \
--tag rustfs:source-latest \
--tag rustfs:dev-latest \
--load \
.
# ========================================================================================
# Single Architecture Docker Builds (Traditional)
# ========================================================================================
[group("🐳 Build Image")]
docker-build-production cli=(DOCKER_CLI) source=(DOCKERFILE_PRODUCTION):
@echo "🏗️ Building single-architecture production Docker image..."
@echo "💡 Consider using 'make docker-buildx-production-local' for multi-arch support"
{{ cli }} build -f {{ source }} -t rustfs:latest .
[group("🐳 Build Image")]
docker-build-source cli=(DOCKER_CLI) source=(DOCKERFILE_SOURCE):
@echo "🏗️ Building single-architecture source Docker image..."
@echo "💡 Consider using 'make docker-dev-local' for multi-arch support"
{{ cli }} build -f {{ source }} -t rustfs:source .
# ========================================================================================
# Development Environment
# ========================================================================================
[group("🏃 Running")]
dev-env-start cli=(DOCKER_CLI) source=(DOCKERFILE_SOURCE) container=(CONTAINER_NAME):
@echo "🚀 Starting development environment..."
{{ cli }} buildx build \
--file {{ source }} \
--tag rustfs:dev \
--load \
.
-{{ cli }} stop {{ container }} 2>/dev/null
-{{ cli }} rm {{ container }} 2>/dev/null
{{ cli }} run -d --name {{ container }} \
-p 9010:9010 -p 9000:9000 \
-v {{ invocation_directory() }}:/workspace \
-it rustfs:dev
[group("🏃 Running")]
dev-env-stop cli=(DOCKER_CLI) container=(CONTAINER_NAME):
@echo "🛑 Stopping development environment..."
-{{ cli }} stop {{ container }} 2>/dev/null
-{{ cli }} rm {{ container }} 2>/dev/null
[group("🏃 Running")]
dev-env-restart: dev-env-stop dev-env-start
[group("👍 E2E")]
e2e-server:
sh scripts/run.sh
[group("👍 E2E")]
probe-e2e:
sh scripts/probe.sh
[doc("inspect one image")]
[group("🚚 Other")]
docker-inspect-multiarch image cli=(DOCKER_CLI):
@echo "🔍 Inspecting multi-architecture image: {{ image }}"
{{ cli }} buildx imagetools inspect {{ image }}

294
Makefile
View File

@@ -1,5 +1,5 @@
###########
# 远程开发,需要 VSCode 安装 Dev Containers, Remote SSH, Remote Explorer
# Remote development requires VSCode with Dev Containers, Remote SSH, Remote Explorer
# https://code.visualstudio.com/docs/remote/containers
###########
DOCKER_CLI ?= docker
@@ -23,6 +23,7 @@ fmt-check:
.PHONY: clippy
clippy:
@echo "🔍 Running clippy checks..."
cargo clippy --fix --allow-dirty
cargo clippy --all-targets --all-features -- -D warnings
.PHONY: check
@@ -33,7 +34,12 @@ check:
.PHONY: test
test:
@echo "🧪 Running tests..."
cargo nextest run --all --exclude e2e_test
@if command -v cargo-nextest >/dev/null 2>&1; then \
cargo nextest run --all --exclude e2e_test; \
else \
echo " cargo-nextest not found; falling back to 'cargo test'"; \
cargo test --workspace --exclude e2e_test -- --nocapture; \
fi
cargo test --all --doc
.PHONY: pre-commit
@@ -46,21 +52,6 @@ setup-hooks:
chmod +x .git/hooks/pre-commit
@echo "✅ Git hooks setup complete!"
.PHONY: init-devenv
init-devenv:
$(DOCKER_CLI) build -t $(IMAGE_NAME) -f Dockerfile.source .
$(DOCKER_CLI) stop $(CONTAINER_NAME)
$(DOCKER_CLI) rm $(CONTAINER_NAME)
$(DOCKER_CLI) run -d --name $(CONTAINER_NAME) -p 9010:9010 -p 9000:9000 -v $(shell pwd):/root/s3-rustfs -it $(IMAGE_NAME)
.PHONY: start
start:
$(DOCKER_CLI) start $(CONTAINER_NAME)
.PHONY: stop
stop:
$(DOCKER_CLI) stop $(CONTAINER_NAME)
.PHONY: e2e-server
e2e-server:
sh $(shell pwd)/scripts/run.sh
@@ -80,8 +71,6 @@ build-dev:
@echo "🔨 Building RustFS in development mode..."
./build-rustfs.sh --dev
# Docker-based build (alternative approach)
# Usage: make BUILD_OS=ubuntu22.04 build-docker
# Output: target/ubuntu22.04/release/rustfs
@@ -92,71 +81,182 @@ build-docker: SOURCE_BUILD_CONTAINER_NAME = rustfs-$(BUILD_OS)-build
build-docker: BUILD_CMD = /root/.cargo/bin/cargo build --release --bin rustfs --target-dir /root/s3-rustfs/target/$(BUILD_OS)
build-docker:
@echo "🐳 Building RustFS using Docker ($(BUILD_OS))..."
$(DOCKER_CLI) build -t $(SOURCE_BUILD_IMAGE_NAME) -f $(DOCKERFILE_SOURCE) .
$(DOCKER_CLI) buildx build -t $(SOURCE_BUILD_IMAGE_NAME) -f $(DOCKERFILE_SOURCE) .
$(DOCKER_CLI) run --rm --name $(SOURCE_BUILD_CONTAINER_NAME) -v $(shell pwd):/root/s3-rustfs -it $(SOURCE_BUILD_IMAGE_NAME) $(BUILD_CMD)
.PHONY: build-musl
build-musl:
@echo "🔨 Building rustfs for x86_64-unknown-linux-musl..."
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-buildx' instead"
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
./build-rustfs.sh --platform x86_64-unknown-linux-musl
.PHONY: build-gnu
build-gnu:
@echo "🔨 Building rustfs for x86_64-unknown-linux-gnu..."
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-buildx' instead"
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
./build-rustfs.sh --platform x86_64-unknown-linux-gnu
.PHONY: build-musl-arm64
build-musl-arm64:
@echo "🔨 Building rustfs for aarch64-unknown-linux-musl..."
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
./build-rustfs.sh --platform aarch64-unknown-linux-musl
.PHONY: build-gnu-arm64
build-gnu-arm64:
@echo "🔨 Building rustfs for aarch64-unknown-linux-gnu..."
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
./build-rustfs.sh --platform aarch64-unknown-linux-gnu
.PHONY: deploy-dev
deploy-dev: build-musl
@echo "🚀 Deploying to dev server: $${IP}"
./scripts/dev_deploy.sh $${IP}
# Multi-architecture Docker build targets (NEW: using docker-buildx.sh)
# ========================================================================================
# Docker Multi-Architecture Builds (Primary Methods)
# ========================================================================================
# Production builds using docker-buildx.sh (for CI/CD and production)
.PHONY: docker-buildx
docker-buildx:
@echo "🏗️ Building multi-architecture Docker images with buildx..."
@echo "🏗️ Building multi-architecture production Docker images with buildx..."
./docker-buildx.sh
.PHONY: docker-buildx-push
docker-buildx-push:
@echo "🚀 Building and pushing multi-architecture Docker images with buildx..."
@echo "🚀 Building and pushing multi-architecture production Docker images with buildx..."
./docker-buildx.sh --push
.PHONY: docker-buildx-version
docker-buildx-version:
@if [ -z "$(VERSION)" ]; then \
echo "❌ 错误: 请指定版本, 例如: make docker-buildx-version VERSION=v1.0.0"; \
echo "❌ Error: Please specify version, example: make docker-buildx-version VERSION=v1.0.0"; \
exit 1; \
fi
@echo "🏗️ Building multi-architecture Docker images (version: $(VERSION))..."
@echo "🏗️ Building multi-architecture production Docker images (version: $(VERSION))..."
./docker-buildx.sh --release $(VERSION)
.PHONY: docker-buildx-push-version
docker-buildx-push-version:
@if [ -z "$(VERSION)" ]; then \
echo "❌ 错误: 请指定版本, 例如: make docker-buildx-push-version VERSION=v1.0.0"; \
echo "❌ Error: Please specify version, example: make docker-buildx-push-version VERSION=v1.0.0"; \
exit 1; \
fi
@echo "🚀 Building and pushing multi-architecture Docker images (version: $(VERSION))..."
@echo "🚀 Building and pushing multi-architecture production Docker images (version: $(VERSION))..."
./docker-buildx.sh --release $(VERSION) --push
# Development/Source builds using direct buildx commands
.PHONY: docker-dev
docker-dev:
@echo "🏗️ Building multi-architecture development Docker images with buildx..."
@echo "💡 This builds from source code and is intended for local development and testing"
@echo "⚠️ Multi-arch images cannot be loaded locally, use docker-dev-push to push to registry"
$(DOCKER_CLI) buildx build \
--platform linux/amd64,linux/arm64 \
--file $(DOCKERFILE_SOURCE) \
--tag rustfs:source-latest \
--tag rustfs:dev-latest \
.
.PHONY: docker-dev-local
docker-dev-local:
@echo "🏗️ Building single-architecture development Docker image for local use..."
@echo "💡 This builds from source code for the current platform and loads locally"
$(DOCKER_CLI) buildx build \
--file $(DOCKERFILE_SOURCE) \
--tag rustfs:source-latest \
--tag rustfs:dev-latest \
--load \
.
.PHONY: docker-dev-push
docker-dev-push:
@if [ -z "$(REGISTRY)" ]; then \
echo "❌ Error: Please specify registry, example: make docker-dev-push REGISTRY=ghcr.io/username"; \
exit 1; \
fi
@echo "🚀 Building and pushing multi-architecture development Docker images..."
@echo "💡 Pushing to registry: $(REGISTRY)"
$(DOCKER_CLI) buildx build \
--platform linux/amd64,linux/arm64 \
--file $(DOCKERFILE_SOURCE) \
--tag $(REGISTRY)/rustfs:source-latest \
--tag $(REGISTRY)/rustfs:dev-latest \
--push \
.
# Local production builds using direct buildx (alternative to docker-buildx.sh)
.PHONY: docker-buildx-production-local
docker-buildx-production-local:
@echo "🏗️ Building single-architecture production Docker image locally..."
@echo "💡 Alternative to docker-buildx.sh for local testing"
$(DOCKER_CLI) buildx build \
--file $(DOCKERFILE_PRODUCTION) \
--tag rustfs:production-latest \
--tag rustfs:latest \
--load \
--build-arg RELEASE=latest \
.
# ========================================================================================
# Single Architecture Docker Builds (Traditional)
# ========================================================================================
.PHONY: docker-build-production
docker-build-production:
@echo "🏗️ Building production Docker image..."
@echo "🏗️ Building single-architecture production Docker image..."
@echo "💡 Consider using 'make docker-buildx-production-local' for multi-arch support"
$(DOCKER_CLI) build -f $(DOCKERFILE_PRODUCTION) -t rustfs:latest .
.PHONY: docker-build-source
docker-build-source:
@echo "🏗️ Building source Docker image..."
$(DOCKER_CLI) build -f $(DOCKERFILE_SOURCE) -t rustfs:source .
@echo "🏗️ Building single-architecture source Docker image..."
@echo "💡 Consider using 'make docker-dev-local' for multi-arch support"
DOCKER_BUILDKIT=1 $(DOCKER_CLI) build \
--build-arg BUILDKIT_INLINE_CACHE=1 \
-f $(DOCKERFILE_SOURCE) -t rustfs:source .
# ========================================================================================
# Development Environment
# ========================================================================================
.PHONY: dev-env-start
dev-env-start:
@echo "🚀 Starting development environment..."
$(DOCKER_CLI) buildx build \
--file $(DOCKERFILE_SOURCE) \
--tag rustfs:dev \
--load \
.
$(DOCKER_CLI) stop $(CONTAINER_NAME) 2>/dev/null || true
$(DOCKER_CLI) rm $(CONTAINER_NAME) 2>/dev/null || true
$(DOCKER_CLI) run -d --name $(CONTAINER_NAME) \
-p 9010:9010 -p 9000:9000 \
-v $(shell pwd):/workspace \
-it rustfs:dev
.PHONY: dev-env-stop
dev-env-stop:
@echo "🛑 Stopping development environment..."
$(DOCKER_CLI) stop $(CONTAINER_NAME) 2>/dev/null || true
$(DOCKER_CLI) rm $(CONTAINER_NAME) 2>/dev/null || true
.PHONY: dev-env-restart
dev-env-restart: dev-env-stop dev-env-start
# ========================================================================================
# Build Utilities
# ========================================================================================
.PHONY: docker-inspect-multiarch
docker-inspect-multiarch:
@if [ -z "$(IMAGE)" ]; then \
echo "❌ 错误: 请指定镜像, 例如: make docker-inspect-multiarch IMAGE=rustfs/rustfs:latest"; \
echo "❌ Error: Please specify image, example: make docker-inspect-multiarch IMAGE=rustfs/rustfs:latest"; \
exit 1; \
fi
@echo "🔍 Inspecting multi-architecture image: $(IMAGE)"
@@ -165,64 +265,112 @@ docker-inspect-multiarch:
.PHONY: build-cross-all
build-cross-all:
@echo "🔧 Building all target architectures..."
@echo "💡 On macOS/Windows, use 'make docker-buildx' for reliable multi-arch builds"
@echo "💡 On macOS/Windows, use 'make docker-dev' for reliable multi-arch builds"
@echo "🔨 Generating protobuf code..."
cargo run --bin gproto || true
@echo "🔨 Building x86_64-unknown-linux-musl..."
./build-rustfs.sh --platform x86_64-unknown-linux-musl
@echo "🔨 Building x86_64-unknown-linux-gnu..."
./build-rustfs.sh --platform x86_64-unknown-linux-gnu
@echo "🔨 Building aarch64-unknown-linux-gnu..."
./build-rustfs.sh --platform aarch64-unknown-linux-gnu
@echo "🔨 Building x86_64-unknown-linux-musl..."
./build-rustfs.sh --platform x86_64-unknown-linux-musl
@echo "🔨 Building aarch64-unknown-linux-musl..."
./build-rustfs.sh --platform aarch64-unknown-linux-musl
@echo "✅ All architectures built successfully!"
# ========================================================================================
# Help and Documentation
# ========================================================================================
.PHONY: help-build
help-build:
@echo "🔨 RustFS 构建帮助:"
@echo "🔨 RustFS Build Help:"
@echo ""
@echo "🚀 本地构建 (推荐使用):"
@echo " make build # 构建 RustFS 二进制文件 (默认包含 console)"
@echo " make build-dev # 开发模式构建"
@echo " make build-musl # 构建 musl 版本"
@echo " make build-gnu # 构建 GNU 版本"
@echo "🚀 Local Build (Recommended):"
@echo " make build # Build RustFS binary (includes console by default)"
@echo " make build-dev # Development mode build"
@echo " make build-musl # Build x86_64 musl version"
@echo " make build-gnu # Build x86_64 GNU version"
@echo " make build-musl-arm64 # Build aarch64 musl version"
@echo " make build-gnu-arm64 # Build aarch64 GNU version"
@echo ""
@echo "🐳 Docker 构建:"
@echo " make build-docker # 使用 Docker 容器构建"
@echo " make build-docker BUILD_OS=ubuntu22.04 # 指定构建系统"
@echo "🐳 Docker Build:"
@echo " make build-docker # Build using Docker container"
@echo " make build-docker BUILD_OS=ubuntu22.04 # Specify build system"
@echo ""
@echo "🏗️ 跨架构构建:"
@echo " make build-cross-all # 构建所有架构的二进制文件"
@echo "🏗️ Cross-architecture Build:"
@echo " make build-cross-all # Build binaries for all architectures"
@echo ""
@echo "🔧 直接使用 build-rustfs.sh 脚本:"
@echo " ./build-rustfs.sh --help # 查看脚本帮助"
@echo " ./build-rustfs.sh --no-console # 构建时跳过 console 资源"
@echo " ./build-rustfs.sh --force-console-update # 强制更新 console 资源"
@echo " ./build-rustfs.sh --dev # 开发模式构建"
@echo " ./build-rustfs.sh --sign # 签名二进制文件"
@echo " ./build-rustfs.sh --platform x86_64-unknown-linux-musl # 指定目标平台"
@echo " ./build-rustfs.sh --skip-verification # 跳过二进制验证"
@echo "🔧 Direct usage of build-rustfs.sh script:"
@echo " ./build-rustfs.sh --help # View script help"
@echo " ./build-rustfs.sh --no-console # Build without console resources"
@echo " ./build-rustfs.sh --force-console-update # Force update console resources"
@echo " ./build-rustfs.sh --dev # Development mode build"
@echo " ./build-rustfs.sh --sign # Sign binary files"
@echo " ./build-rustfs.sh --platform x86_64-unknown-linux-gnu # Specify target platform"
@echo " ./build-rustfs.sh --skip-verification # Skip binary verification"
@echo ""
@echo "💡 build-rustfs.sh 脚本提供了更多选项、智能检测和二进制验证功能"
@echo "💡 build-rustfs.sh script provides more options, smart detection and binary verification"
.PHONY: help-docker
help-docker:
@echo "🐳 Docker 多架构构建帮助:"
@echo "🐳 Docker Multi-architecture Build Help:"
@echo ""
@echo "🚀 推荐使用 (新的 docker-buildx 方式):"
@echo " make docker-buildx # 构建多架构镜像(不推送)"
@echo " make docker-buildx-push # 构建并推送多架构镜像"
@echo " make docker-buildx-version VERSION=v1.0.0 # 构建指定版本"
@echo " make docker-buildx-push-version VERSION=v1.0.0 # 构建并推送指定版本"
@echo "🚀 Production Image Build (Recommended to use docker-buildx.sh):"
@echo " make docker-buildx # Build production multi-arch image (no push)"
@echo " make docker-buildx-push # Build and push production multi-arch image"
@echo " make docker-buildx-version VERSION=v1.0.0 # Build specific version"
@echo " make docker-buildx-push-version VERSION=v1.0.0 # Build and push specific version"
@echo ""
@echo "🏗️ 单架构构建:"
@echo " make docker-build-production # 构建生产环境镜像"
@echo " make docker-build-source # 构建源码构建镜像"
@echo "🔧 Development/Source Image Build (Local development testing):"
@echo " make docker-dev # Build dev multi-arch image (cannot load locally)"
@echo " make docker-dev-local # Build dev single-arch image (local load)"
@echo " make docker-dev-push REGISTRY=xxx # Build and push dev image"
@echo ""
@echo "🔧 辅助工具:"
@echo " make build-cross-all # 构建所有架构的二进制文件"
@echo " make docker-inspect-multiarch IMAGE=xxx # 检查镜像的架构支持"
@echo "🏗️ Local Production Image Build (Alternative):"
@echo " make docker-buildx-production-local # Build production single-arch image locally"
@echo ""
@echo "📋 环境变量 (在推送时需要设置):"
@echo " DOCKERHUB_USERNAME Docker Hub 用户名"
@echo " DOCKERHUB_TOKEN Docker Hub 访问令牌"
@echo " GITHUB_TOKEN GitHub 访问令牌"
@echo "📦 Single-architecture Build (Traditional way):"
@echo " make docker-build-production # Build single-arch production image"
@echo " make docker-build-source # Build single-arch source image"
@echo ""
@echo "💡 更多详情请参考项目根目录的 docker-buildx.sh 脚本"
@echo "🚀 Development Environment Management:"
@echo " make dev-env-start # Start development container environment"
@echo " make dev-env-stop # Stop development container environment"
@echo " make dev-env-restart # Restart development container environment"
@echo ""
@echo "🔧 Auxiliary Tools:"
@echo " make build-cross-all # Build binaries for all architectures"
@echo " make docker-inspect-multiarch IMAGE=xxx # Check image architecture support"
@echo ""
@echo "📋 Environment Variables:"
@echo " REGISTRY Image registry address (required for push)"
@echo " DOCKERHUB_USERNAME Docker Hub username"
@echo " DOCKERHUB_TOKEN Docker Hub access token"
@echo " GITHUB_TOKEN GitHub access token"
@echo ""
@echo "💡 Suggestions:"
@echo " - Production use: Use docker-buildx* commands (based on precompiled binaries)"
@echo " - Local development: Use docker-dev* commands (build from source)"
@echo " - Development environment: Use dev-env-* commands to manage dev containers"
.PHONY: help
help:
@echo "🦀 RustFS Makefile Help:"
@echo ""
@echo "📋 Main Command Categories:"
@echo " make help-build # Show build-related help"
@echo " make help-docker # Show Docker-related help"
@echo ""
@echo "🔧 Code Quality:"
@echo " make fmt # Format code"
@echo " make clippy # Run clippy checks"
@echo " make test # Run tests"
@echo " make pre-commit # Run all pre-commit checks"
@echo ""
@echo "🚀 Quick Start:"
@echo " make build # Build RustFS binary"
@echo " make docker-dev-local # Build development Docker image (local)"
@echo " make dev-env-start # Start development environment"
@echo ""
@echo "💡 For more help use 'make help-build' or 'make help-docker'"

221
README.md
View File

@@ -1,6 +1,6 @@
[![RustFS](https://rustfs.com/images/rustfs-github.png)](https://rustfs.com)
<p align="center">RustFS is a high-performance distributed object storage software built using Rust</p>
<p align="center">RustFS is a high-performance, distributed object storage system built in Rust.</p>
<p align="center">
<a href="https://github.com/rustfs/rustfs/actions/workflows/ci.yml"><img alt="CI" src="https://github.com/rustfs/rustfs/actions/workflows/ci.yml/badge.svg" /></a>
@@ -11,122 +11,158 @@
</p>
<p align="center">
<a href="https://docs.rustfs.com/en/introduction.html">Getting Started</a>
· <a href="https://docs.rustfs.com/en/">Docs</a>
<a href="https://docs.rustfs.com/installation/">Getting Started</a>
· <a href="https://docs.rustfs.com/">Docs</a>
· <a href="https://github.com/rustfs/rustfs/issues">Bug reports</a>
· <a href="https://github.com/rustfs/rustfs/discussions">Discussions</a>
</p>
<p align="center">
English | <a href="https://github.com/rustfs/rustfs/blob/main/README_ZH.md">简体中文</a> |
<!-- Keep these links. Translations will automatically update with the README. -->
<a href="https://readme-i18n.com/rustfs/rustfs?lang=de">Deutsch</a> |
<a href="https://readme-i18n.com/rustfs/rustfs?lang=es">Español</a> |
<a href="https://readme-i18n.com/rustfs/rustfs?lang=fr">français</a> |
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ja">日本語</a> |
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ko">한국어</a> |
<a href="https://readme-i18n.com/rustfs/rustfs?lang=pt">Português</a> |
<a href="https://readme-i18n.com/rustfs/rustfs?lang=pt">Portuguese</a> |
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ru">Русский</a>
</p>
RustFS is a high-performance distributed object storage software built using Rust, one of the most popular languages worldwide. Along with MinIO, it shares a range of advantages such as simplicity, S3 compatibility, open-source nature, support for data lakes, AI, and big data. Furthermore, it has a better and more user-friendly open-source license in comparison to other storage systems, being constructed under the Apache license. As Rust serves as its foundation, RustFS provides faster speed and safer distributed features for high-performance object storage.
RustFS is a high-performance, distributed object storage system built in Rustone of the most loved programming languages worldwide. RustFS combines the simplicity of MinIO with the memory safety and raw performance of Rust. It offers full S3 compatibility, is completely open-source, and is optimized for data lakes, AI, and big data workloads.
> ⚠️ **RustFS is under rapid development. Do NOT use in production environments!**
Unlike other storage systems, RustFS is released under the permissible Apache 2.0 license, avoiding the restrictions of AGPL. With Rust as its foundation, RustFS delivers superior speed and secure distributed features for next-generation object storage.
## Features
## Feature & Status
- **High Performance**: Built with Rust, ensuring speed and efficiency.
- **Distributed Architecture**: Scalable and fault-tolerant design for large-scale deployments.
- **S3 Compatibility**: Seamless integration with existing S3-compatible applications.
- **Data Lake Support**: Optimized for big data and AI workloads.
- **Open Source**: Licensed under Apache 2.0, encouraging community contributions and transparency.
- **User-Friendly**: Designed with simplicity in mind, making it easy to deploy and manage.
- **High Performance**: Built with Rust to ensure maximum speed and resource efficiency.
- **Distributed Architecture**: Scalable and fault-tolerant design suitable for large-scale deployments.
- **S3 Compatibility**: Seamless integration with existing S3-compatible applications and tools.
- **Data Lake Support**: Optimized for high-throughput big data and AI workloads.
- **Open Source**: Licensed under Apache 2.0, encouraging unrestricted community contributions and commercial usage.
- **User-Friendly**: Designed with simplicity in mind for easy deployment and management.
## RustFS vs MinIO
| Feature | Status | Feature | Status |
| :--- | :--- | :--- | :--- |
| **S3 Core Features** | ✅ Available | **Bitrot Protection** | ✅ Available |
| **Upload / Download** | ✅ Available | **Single Node Mode** | ✅ Available |
| **Versioning** | ✅ Available | **Bucket Replication** | ⚠️ Partial Support |
| **Logging** | ✅ Available | **Lifecycle Management** | 🚧 Under Testing |
| **Event Notifications** | ✅ Available | **Distributed Mode** | 🚧 Under Testing |
| **K8s Helm Charts** | ✅ Available | **OPA (Open Policy Agent)** | 🚧 Under Testing |
Stress test server parameters
| Type | parameter | Remark |
| - | - | - |
|CPU | 2 Core | Intel Xeon(Sapphire Rapids) Platinum 8475B , 2.7/3.2 GHz| |
|Memory| 4GB |   |
|Network | 15Gbp |   |
|Driver | 40GB x 4 | IOPS 3800 / Driver |
## RustFS vs MinIO Performance
**Stress Test Environment:**
| Type | Parameter | Remark |
|---------|-----------|----------------------------------------------------------|
| CPU | 2 Core | Intel Xeon (Sapphire Rapids) Platinum 8475B, 2.7/3.2 GHz |
| Memory | 4GB | |
| Network | 15Gbps | |
| Drive | 40GB x 4 | IOPS 3800 / Drive |
<https://github.com/user-attachments/assets/2e4979b5-260c-4f2c-ac12-c87fd558072a>
### RustFS vs Other object storage
### RustFS vs Other Object Storage
| RustFS | Other object storage|
| - | - |
| Powerful Console | Simple and useless Console |
| Developed based on Rust language, memory is safer | Developed in Go or C, with potential issues like memory GC/leaks |
| Does not report logs to third-party countries | Reporting logs to other third countries may violate national security laws |
| Licensed under Apache, more business-friendly | AGPL V3 License and other License, polluted open source and License traps, infringement of intellectual property rights |
| Comprehensive S3 support, works with domestic and international cloud providers | Full support for S3, but no local cloud vendor support |
| Rust-based development, strong support for secure and innovative devices | Poor support for edge gateways and secure innovative devices|
| Stable commercial prices, free community support | High pricing, with costs up to $250,000 for 1PiB |
| No risk | Intellectual property risks and risks of prohibited uses |
| Feature | RustFS | Other Object Storage |
| :--- | :--- | :--- |
| **Console Experience** | **Powerful Console**<br>Comprehensive management interface. | **Basic / Limited Console**<br>Often overly simple or lacking critical features. |
| **Language & Safety** | **Rust-based**<br>Memory safety by design. | **Go or C-based**<br>Potential for memory GC pauses or leaks. |
| **Data Sovereignty** | **No Telemetry / Full Compliance**<br>Guards against unauthorized cross-border data egress. Compliant with GDPR (EU/UK), CCPA (US), and APPI (Japan). | **Potential Risk**<br>Possible legal exposure and unwanted data telemetry. |
| **Licensing** | **Permissive Apache 2.0**<br>Business-friendly, no "poison pill" clauses. | **Restrictive AGPL v3**<br>Risk of license traps and intellectual property pollution. |
| **Compatibility** | **100% S3 Compatible**<br>Works with any cloud provider or client, anywhere. | **Variable Compatibility**<br>May lack support for local cloud vendors or specific APIs. |
| **Edge & IoT** | **Strong Edge Support**<br>Ideal for secure, innovative edge devices. | **Weak Edge Support**<br>Often too heavy for edge gateways. |
| **Risk Profile** | **Enterprise Risk Mitigation**<br>Clear IP rights and safe for commercial use. | **Legal Risks**<br>Intellectual property ambiguity and usage restrictions. |
## Quickstart
To get started with RustFS, follow these steps:
1. **One-click installation script (Option 1)**
```bash
curl -O https://rustfs.com/install_rustfs.sh && bash install_rustfs.sh
```
2. **Docker Quick Start (Option 2)**
### 1. One-click Installation (Option 1)
```bash
# Latest stable release
docker run -d -p 9000:9000 -v /data:/data rustfs/rustfs:latest
curl -O https://rustfs.com/install_rustfs.sh && bash install_rustfs.sh
````
# Development version (main branch)
docker run -d -p 9000:9000 -v /data:/data rustfs/rustfs:main-latest
### 2\. Docker Quick Start (Option 2)
# Specific version
docker run -d -p 9000:9000 -v /data:/data rustfs/rustfs:v1.0.0
```
The RustFS container runs as a non-root user `rustfs` (UID `10001`). If you run Docker with `-v` to mount a host directory, please ensure the host directory owner is set to `10001`, otherwise you will encounter permission denied errors.
3. **Build from Source (Option 3) - Advanced Users**
```bash
# Create data and logs directories
mkdir -p data logs
For developers who want to build RustFS Docker images from source with multi-architecture support:
# Change the owner of these directories
chown -R 10001:10001 data logs
```bash
# Build multi-architecture images locally
./docker-buildx.sh --build-arg RELEASE=latest
# Using latest version
docker run -d -p 9000:9000 -p 9001:9001 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:latest
# Build and push to registry
./docker-buildx.sh --push
# Using specific version
docker run -d -p 9000:9000 -p 9001:9001 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:1.0.0.alpha.68
```
# Build specific version
./docker-buildx.sh --release v1.0.0 --push
You can also use Docker Compose. Using the `docker-compose.yml` file in the root directory:
# Build for custom registry
./docker-buildx.sh --registry your-registry.com --namespace yourname --push
```
```bash
docker compose --profile observability up -d
```
The `docker-buildx.sh` script supports:
- **Multi-architecture builds**: `linux/amd64`, `linux/arm64`
- **Automatic version detection**: Uses git tags or commit hashes
- **Registry flexibility**: Supports Docker Hub, GitHub Container Registry, etc.
- **Build optimization**: Includes caching and parallel builds
**NOTE**: We recommend reviewing the `docker-compose.yaml` file before running. It defines several services including Grafana, Prometheus, and Jaeger, which are helpful for RustFS observability. If you wish to start Redis or Nginx containers, you can specify the corresponding profiles.
You can also use Make targets for convenience:
### 3\. Build from Source (Option 3) - Advanced Users
```bash
make docker-buildx # Build locally
make docker-buildx-push # Build and push
make docker-buildx-version VERSION=v1.0.0 # Build specific version
make help-docker # Show all Docker-related commands
```
For developers who want to build RustFS Docker images from source with multi-architecture support:
4. **Access the Console**: Open your web browser and navigate to `http://localhost:9000` to access the RustFS console, default username and password is `rustfsadmin` .
5. **Create a Bucket**: Use the console to create a new bucket for your objects.
6. **Upload Objects**: You can upload files directly through the console or use S3-compatible APIs to interact with your RustFS instance.
```bash
# Build multi-architecture images locally
./docker-buildx.sh --build-arg RELEASE=latest
# Build and push to registry
./docker-buildx.sh --push
# Build specific version
./docker-buildx.sh --release v1.0.0 --push
# Build for custom registry
./docker-buildx.sh --registry your-registry.com --namespace yourname --push
```
The `docker-buildx.sh` script supports:
\- **Multi-architecture builds**: `linux/amd64`, `linux/arm64`
\- **Automatic version detection**: Uses git tags or commit hashes
\- **Registry flexibility**: Supports Docker Hub, GitHub Container Registry, etc.
\- **Build optimization**: Includes caching and parallel builds
You can also use Make targets for convenience:
```bash
make docker-buildx # Build locally
make docker-buildx-push # Build and push
make docker-buildx-version VERSION=v1.0.0 # Build specific version
make help-docker # Show all Docker-related commands
```
> **Heads-up (macOS cross-compilation)**: macOS keeps the default `ulimit -n` at 256, so `cargo zigbuild` or `./build-rustfs.sh --platform ...` may fail with `ProcessFdQuotaExceeded` when targeting Linux. The build script attempts to raise the limit automatically, but if you still see the warning, run `ulimit -n 4096` (or higher) in your shell before building.
### 4\. Build with Helm Chart (Option 4) - Cloud Native
Follow the instructions in the [Helm Chart README](https://charts.rustfs.com/) to install RustFS on a Kubernetes cluster.
-----
### Accessing RustFS
5. **Access the Console**: Open your web browser and navigate to `http://localhost:9000` to access the RustFS console.
* Default credentials: `rustfsadmin` / `rustfsadmin`
6. **Create a Bucket**: Use the console to create a new bucket for your objects.
7. **Upload Objects**: You can upload files directly through the console or use S3-compatible APIs/clients to interact with your RustFS instance.
**NOTE**: To access the RustFS instance via `https`, please refer to the [TLS Configuration Docs](https://docs.rustfs.com/integration/tls-configured.html).
## Documentation
@@ -134,36 +170,47 @@ For detailed documentation, including configuration options, API references, and
## Getting Help
If you have any questions or need assistance, you can:
If you have any questions or need assistance:
- Check the [FAQ](https://github.com/rustfs/rustfs/discussions/categories/q-a) for common issues and solutions.
- Join our [GitHub Discussions](https://github.com/rustfs/rustfs/discussions) to ask questions and share your experiences.
- Open an issue on our [GitHub Issues](https://github.com/rustfs/rustfs/issues) page for bug reports or feature requests.
- Check the [FAQ](https://github.com/rustfs/rustfs/discussions/categories/q-a) for common issues and solutions.
- Join our [GitHub Discussions](https://github.com/rustfs/rustfs/discussions) to ask questions and share your experiences.
- Open an issue on our [GitHub Issues](https://github.com/rustfs/rustfs/issues) page for bug reports or feature requests.
## Links
- [Documentation](https://docs.rustfs.com) - The manual you should read
- [Changelog](https://github.com/rustfs/rustfs/releases) - What we broke and fixed
- [GitHub Discussions](https://github.com/rustfs/rustfs/discussions) - Where the community lives
- [Documentation](https://docs.rustfs.com) - The manual you should read
- [Changelog](https://github.com/rustfs/rustfs/releases) - What we broke and fixed
- [GitHub Discussions](https://github.com/rustfs/rustfs/discussions) - Where the community lives
## Contact
- **Bugs**: [GitHub Issues](https://github.com/rustfs/rustfs/issues)
- **Business**: <hello@rustfs.com>
- **Jobs**: <jobs@rustfs.com>
- **General Discussion**: [GitHub Discussions](https://github.com/rustfs/rustfs/discussions)
- **Contributing**: [CONTRIBUTING.md](CONTRIBUTING.md)
- **Bugs**: [GitHub Issues](https://github.com/rustfs/rustfs/issues)
- **Business**: [hello@rustfs.com](mailto:hello@rustfs.com)
- **Jobs**: [jobs@rustfs.com](mailto:jobs@rustfs.com)
- **General Discussion**: [GitHub Discussions](https://github.com/rustfs/rustfs/discussions)
- **Contributing**: [CONTRIBUTING.md](CONTRIBUTING.md)
## Contributors
RustFS is a community-driven project, and we appreciate all contributions. Check out the [Contributors](https://github.com/rustfs/rustfs/graphs/contributors) page to see the amazing people who have helped make RustFS better.
<a href="https://github.com/rustfs/rustfs/graphs/contributors">
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" />
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" alt="Contributors" />
</a>
## Github Trending Top
🚀 RustFS is beloved by open-source enthusiasts and enterprise users worldwide, often appearing on the GitHub Trending top charts.
<a href="https://trendshift.io/repositories/14181" target="_blank"><img src="https://raw.githubusercontent.com/rustfs/rustfs/refs/heads/main/docs/rustfs-trending.jpg" alt="rustfs%2Frustfs | Trendshift" /></a>
## Star History
[![Star History Chart](https://api.star-history.com/svg?repos=rustfs/rustfs&type=date&legend=top-left)](https://www.star-history.com/#rustfs/rustfs&type=date&legend=top-left)
## License
[Apache 2.0](https://opensource.org/licenses/Apache-2.0)
**RustFS** is a trademark of RustFS, Inc. All other trademarks are the property of their respective owners.

View File

@@ -1,119 +1,219 @@
[![RustFS](https://rustfs.com/images/rustfs-github.png)](https://rustfs.com)
<p align="center">RustFS 是一个使用 Rust 构建的高性能分布式对象存储软件</p >
<p align="center">RustFS 是一个基于 Rust 构建的高性能分布式对象存储系统。</p>
<p align="center">
<a href="https://github.com/rustfs/rustfs/actions/workflows/ci.yml"><img alt="CI" src="https://github.com/rustfs/rustfs/actions/workflows/ci.yml/badge.svg" /></a>
<a href="https://github.com/rustfs/rustfs/actions/workflows/docker.yml"><img alt="Build and Push Docker Images" src="https://github.com/rustfs/rustfs/actions/workflows/docker.yml/badge.svg" /></a>
<img alt="GitHub commit activity" src="https://img.shields.io/github/commit-activity/m/rustfs/rustfs"/>
<img alt="Github Last Commit" src="https://img.shields.io/github/last-commit/rustfs/rustfs"/>
<a href="https://github.com/rustfs/rustfs/actions/workflows/docker.yml"><img alt="构建并推送 Docker 镜像" src="https://github.com/rustfs/rustfs/actions/workflows/docker.yml/badge.svg" /></a>
<img alt="GitHub 提交活跃度" src="https://img.shields.io/github/commit-activity/m/rustfs/rustfs"/>
<img alt="Github 最新提交" src="https://img.shields.io/github/last-commit/rustfs/rustfs"/>
<a href="https://hellogithub.com/repository/rustfs/rustfs" target="_blank"><img src="https://abroad.hellogithub.com/v1/widgets/recommend.svg?rid=b95bcb72bdc340b68f16fdf6790b7d5b&claim_uid=MsbvjYeLDKAH457&theme=small" alt="FeaturedHelloGitHub" /></a>
</p >
</p>
<p align="center">
<a href="https://docs.rustfs.com/zh/introduction.html">快速开始</a >
· <a href="https://docs.rustfs.com/zh/">文档</a >
· <a href="https://github.com/rustfs/rustfs/issues">问题报告</a >
· <a href="https://github.com/rustfs/rustfs/discussions">讨论</a >
</p >
<a href="https://docs.rustfs.com/installation/">快速开始</a>
· <a href="https://docs.rustfs.com/">文档</a>
· <a href="https://github.com/rustfs/rustfs/issues">报告 Bug</a>
· <a href="https://github.com/rustfs/rustfs/discussions">社区讨论</a>
</p>
<p align="center">
<a href="https://github.com/rustfs/rustfs/blob/main/README.md">English</a > | 简体中文
</p >
<a href="https://github.com/rustfs/rustfs/blob/main/README.md">English</a> | 简体中文 |
<a href="https://readme-i18n.com/rustfs/rustfs?lang=de">Deutsch</a> |
<a href="https://readme-i18n.com/rustfs/rustfs?lang=es">Español</a> |
<a href="https://readme-i18n.com/rustfs/rustfs?lang=fr">français</a> |
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ja">日本語</a> |
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ko">한국어</a> |
<a href="https://readme-i18n.com/rustfs/rustfs?lang=pt">Portuguese</a> |
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ru">Русский</a>
</p>
RustFS 是一个使用 Rust(全球最受欢迎的编程语言之一)构建的高性能分布式对象存储软件。与 MinIO 一样它具有简单性、S3 兼容性、开源特性以及对数据湖、AI 和大数据的支持等一系列优势。此外,与其他存储系统相比,它采用 Apache 许可证构建,拥有更好、更用户友好的开源许可证。由于以 Rust 为基础RustFS 为高性能对象存储提供了更快的速度和更安全的分布式功能
RustFS 是一个基于 Rust 构建的高性能分布式对象存储系统。Rust 是全球最受开发者喜爱的编程语言之一RustFS 完美结合了 MinIO 的简洁性与 Rust 的内存安全及高性能优势。它提供完整的 S3 兼容性,完全开源,并专为数据湖、人工智能(AI和大数据负载进行了优化
## 特性
与其他存储系统不同RustFS 采用更宽松、商业友好的 Apache 2.0 许可证,避免了 AGPL 协议的限制。以 Rust 为基石RustFS 为下一代对象存储提供了更快的速度和更安全的分布式特性。
- **高性能**:使用 Rust 构建,确保速度和效率。
## 特征和功能状态
- **高性能**:基于 Rust 构建,确保极致的速度和资源效率。
- **分布式架构**:可扩展且容错的设计,适用于大规模部署。
- **S3 兼容性**:与现有 S3 兼容应用程序无缝集成。
- **数据湖支持**针对大数据和 AI 工作负载进行了优化。
- **开源**:采用 Apache 2.0 许可证,鼓励社区贡献和透明度
- **用户友好**:设计简,易于部署和管理。
- **S3 兼容性**:与现有 S3 兼容应用和工具无缝集成。
- **数据湖支持**专为高吞吐量的大数据和 AI 工作负载优化。
- **完全开源**:采用 Apache 2.0 许可证,鼓励社区贡献和商业使用
- **简单易用**:设计简,易于部署和管理。
## RustFS vs MinIO
压力测试服务器参数
| 功能 | 状态 | 功能 | 状态 |
| :--- | :--- | :--- | :--- |
| **S3 核心功能** | ✅ 可用 | **Bitrot (防数据腐烂)** | ✅ 可用 |
| **上传 / 下载** | ✅ 可用 | **单机模式** | ✅ 可用 |
| **版本控制** | ✅ 可用 | **存储桶复制** | ⚠️ 部分可用 |
| **日志功能** | ✅ 可用 | **生命周期管理** | 🚧 测试中 |
| **事件通知** | ✅ 可用 | **分布式模式** | 🚧 测试中 |
| **K8s Helm Chart** | ✅ 可用 | **OPA (策略引擎)** | 🚧 测试中 |
| 类型 | 参数 | 备注 |
| - | - | - |
|CPU | 2 核心 | Intel Xeon(Sapphire Rapids) Platinum 8475B , 2.7/3.2 GHz| |
|内存| 4GB | |
|网络 | 15Gbp | |
|驱动器 | 40GB x 4 | IOPS 3800 / 驱动器 |
## RustFS vs MinIO 性能对比
**压力测试环境参数:**
| 类型 | 参数 | 备注 |
|---------|-----------|----------------------------------------------------------|
| CPU | 2 核 | Intel Xeon (Sapphire Rapids) Platinum 8475B , 2.7/3.2 GHz |
| 内存 | 4GB |   |
| 网络 | 15Gbps |   |
| 硬盘 | 40GB x 4 | IOPS 3800 / Drive |
<https://github.com/user-attachments/assets/2e4979b5-260c-4f2c-ac12-c87fd558072a>
### RustFS vs 其他对象存储
| RustFS | 其他对象存储|
| - | - |
| 强大的控制台 | 简单且无用的控制台 |
| 基于 Rust 语言开发,内存安全 | 使用 Go 或 C 开发存在内存 GC/泄漏潜在问题 |
| 不向第三方国家报告日志 | 向其他第三方国家报告日志可能违反国家安全法律 |
| 采用 Apache 许可证,对商业友好 | AGPL V3 许可证等其他许可证,污染开源和许可证陷阱,侵犯知识产权 |
| 全面的 S3 支持,适用于国内外云提供商 | 完全支持 S3但不支持本地云厂商 |
| 基于 Rust 开发,对安全创新设备有强大支持 | 对边缘网关和安全创新设备支持较差|
| 稳定的商业价格,免费社区支持 | 高昂的定价,1PiB 成本高达 $250,000 |
| 无风险 | 知识产权风险和禁止使用的风险 |
| 特性 | RustFS | 其他对象存储 |
| :--- | :--- | :--- |
| **控制台体验** | **功能强大的控制台**<br>提供全面的管理界面。 | **基础/简陋的控制台**<br>通常功能过于简单或缺失关键特性。 |
| **语言与安全** | **基于 Rust 开发**<br>天生的内存安全 | **基于 Go 或 C 开发**<br>存在内存 GC 停顿或内存泄漏潜在风险。 |
| **数据主权** | **无遥测 / 完全合规**<br>防止未经授权的数据跨境传输。完全符合 GDPR (欧盟/英国)、CCPA (美国) 和 APPI (日本) 等法规。 | **潜在风险**<br>可能存在法律风险和隐蔽的数据遥测Telemetry |
| **开源协议** | **宽松的 Apache 2.0**<br>商业友好,无“毒丸”条款。 | **受限的 AGPL v3**<br>存在许可证陷阱知识产权污染的风险。 |
| **兼容性** | **100% S3 兼容**<br>适用于任何云提供商和客户端,随处运行。 | **兼容性不一**<br>虽然支持 S3但可能缺乏对本地云厂商或特定 API 的支持。 |
| **边缘与 IoT** | **强大的边缘支持**<br>非常适合安全创新的边缘设备。 | **边缘支持较弱**<br>对于边缘网关来说通常过于沉重。 |
| **成本** | **稳定且免费**<br>免费社区支持,稳定的商业定价。 | **高昂成本**<br>1PiB 成本可能高达 250,000 美元。 |
| **风险控制** | **企业级风险规避**<br>清晰的知识产权,商业使用安全无忧。 | **法律风险**<br>知识产权归属模糊及使用限制风险 |
## 快速开始
要开始使用 RustFS请按照以下步骤操作
请按照以下步骤快速上手 RustFS
1. **一键脚本快速启动 (方案一)**
```bash
curl -O https://rustfs.com/install_rustfs.sh && bash install_rustfs.sh
```
2. **Docker快速启动方案二**
### 1. 一键安装脚本 (选项 1)
```bash
docker run -d -p 9000:9000 -v /data:/data rustfs/rustfs
```
curl -O https://rustfs.com/install_rustfs.sh && bash install_rustfs.sh
````
3. **访问控制台**:打开 Web 浏览器并导航到 `http://localhost:9000` 以访问 RustFS 控制台,默认的用户名和密码是 `rustfsadmin` 。
4. **创建存储桶**:使用控制台为您的对象创建新的存储桶。
5. **上传对象**:您可以直接通过控制台上传文件,或使用 S3 兼容的 API 与您的 RustFS 实例交互
### 2\. Docker 快速启动 (选项 2)
RustFS 容器以非 root 用户 `rustfs` (UID `10001`) 运行。如果您使用 Docker 的 `-v` 参数挂载宿主机目录,请务必确保宿主机目录的所有者已更改为 `1000`,否则会遇到权限拒绝错误
```bash
# 创建数据和日志目录
mkdir -p data logs
# 更改这两个目录的所有者
chown -R 10001:10001 data logs
# 使用最新版本运行
docker run -d -p 9000:9000 -p 9001:9001 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:latest
# 使用指定版本运行
docker run -d -p 9000:9000 -p 9001:9001 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:1.0.0.alpha.68
```
您也可以使用 Docker Compose。使用根目录下的 `docker-compose.yml` 文件:
```bash
docker compose --profile observability up -d
```
**注意**: 我们建议您在运行前查看 `docker-compose.yaml` 文件。该文件定义了包括 Grafana、Prometheus 和 Jaeger 在内的多个服务,有助于 RustFS 的可观测性监控。如果您还想启动 Redis 或 Nginx 容器,可以指定相应的 profile。
### 3\. 源码编译 (选项 3) - 进阶用户
适用于希望从源码构建支持多架构 RustFS Docker 镜像的开发者:
```bash
# 在本地构建多架构镜像
./docker-buildx.sh --build-arg RELEASE=latest
# 构建并推送到仓库
./docker-buildx.sh --push
# 构建指定版本
./docker-buildx.sh --release v1.0.0 --push
# 构建并推送到自定义仓库
./docker-buildx.sh --registry your-registry.com --namespace yourname --push
```
`docker-buildx.sh` 脚本支持:
\- **多架构构建**: `linux/amd64`, `linux/arm64`
\- **自动版本检测**: 使用 git tags 或 commit hash
\- **灵活的仓库支持**: 支持 Docker Hub, GitHub Container Registry 等
\- **构建优化**: 包含缓存和并行构建
为了方便起见,您也可以使用 Make 命令:
```bash
make docker-buildx # 本地构建
make docker-buildx-push # 构建并推送
make docker-buildx-version VERSION=v1.0.0 # 构建指定版本
make help-docker # 显示所有 Docker 相关命令
```
> **注意 (macOS 交叉编译)**: macOS 默认的 `ulimit -n` 限制为 256因此在使用 `cargo zigbuild` 或 `./build-rustfs.sh --platform ...` 交叉编译 Linux 版本时,可能会因 `ProcessFdQuotaExceeded` 失败。构建脚本会尝试自动提高限制,但如果您仍然看到警告,请在构建前在终端运行 `ulimit -n 4096` (或更高)。
### 4\. 使用 Helm Chart 安装 (选项 4) - 云原生环境
请按照 [Helm Chart README](https://charts.rustfs.com) 上的说明在 Kubernetes 集群上安装 RustFS。
-----
### 访问 RustFS
5. **访问控制台**: 打开浏览器并访问 `http://localhost:9000` 进入 RustFS 控制台。
* 默认账号/密码: `rustfsadmin` / `rustfsadmin`
6. **创建存储桶**: 使用控制台为您​​的对象创建一个新的存储桶 (Bucket)。
7. **上传对象**: 您可以直接通过控制台上传文件,或使用 S3 兼容的 API/客户端与您的 RustFS 实例进行交互。
**注意**: 如果您希望通过 `https` 访问 RustFS 实例,请参考 [TLS 配置文档](https://docs.rustfs.com/integration/tls-configured.html)。
## 文档
有关详细文档包括配置选项、API 参考和高级用法,请访问我们的[文档](https://docs.rustfs.com)。
有关详细文档包括配置选项、API 参考和高级用法,请访问我们的 [官方文档](https://docs.rustfs.com)。
## 获取帮助
如果您有任何问题或需要帮助,您可以
如果您有任何问题或需要帮助:
- 查看[常见问题解答](https://github.com/rustfs/rustfs/discussions/categories/q-a)以获取常见问题和解决方案。
- 加入我们的 [GitHub 讨论](https://github.com/rustfs/rustfs/discussions)提问分享您的经验。
- 在我们的 [GitHub Issues](https://github.com/rustfs/rustfs/issues) 页面上开启问题,报告错误或功能请求。
- 查看 [FAQ](https://github.com/rustfs/rustfs/discussions/categories/q-a) 寻找常见问题和解决方案。
- 加入我们的 [GitHub Discussions](https://github.com/rustfs/rustfs/discussions) 提问分享您的经验。
- 在我们的 [GitHub Issues](https://github.com/rustfs/rustfs/issues) 页面提交 Bug 报告或功能请求。
## 链接
- [文档](https://docs.rustfs.com) - 您应该阅读的手册
- [更新日志](https://docs.rustfs.com/changelog) - 我们破坏和修复的内容
- [GitHub 讨论](https://github.com/rustfs/rustfs/discussions) - 社区所在
- [官方文档](https://docs.rustfs.com) - 必读手册
- [更新日志](https://github.com/rustfs/rustfs/releases) - 版本变更记录
- [社区讨论](https://github.com/rustfs/rustfs/discussions) - 社区交流
## 联系
## 联系方式
- **错误报告**[GitHub Issues](https://github.com/rustfs/rustfs/issues)
- **商务合作**<hello@rustfs.com>
- **招聘**<jobs@rustfs.com>
- **一般讨论**[GitHub 讨论](https://github.com/rustfs/rustfs/discussions)
- **贡献**[CONTRIBUTING.md](CONTRIBUTING.md)
- **Bug 反馈**: [GitHub Issues](https://github.com/rustfs/rustfs/issues)
- **商务合作**: [hello@rustfs.com](mailto:hello@rustfs.com)
- **工作机会**: [jobs@rustfs.com](mailto:jobs@rustfs.com)
- **一般讨论**: [GitHub Discussions](https://github.com/rustfs/rustfs/discussions)
- **贡献指南**: [CONTRIBUTING.md](https://www.google.com/search?q=CONTRIBUTING.md)
## 贡献者
RustFS 是一个社区驱动的项目,我们感谢所有的贡献。查看[贡献者](https://github.com/rustfs/rustfs/graphs/contributors)页面,了解帮助 RustFS 变得更好的杰出人员
RustFS 是一个社区驱动的项目,我们感谢所有的贡献。查看 [贡献者](https://github.com/rustfs/rustfs/graphs/contributors) 页面,看看那些让 RustFS 变得更好的了不起的人们
<a href="https://github.com/rustfs/rustfs/graphs/contributors">
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" />
</a >
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" alt="Contributors" />
</a>
## Github Trending Top
🚀 RustFS 深受全球开源爱好者和企业用户的喜爱,经常荣登 GitHub Trending 榜单。
<a href="https://trendshift.io/repositories/14181" target="_blank"><img src="https://raw.githubusercontent.com/rustfs/rustfs/refs/heads/main/docs/rustfs-trending.jpg" alt="rustfs%2Frustfs | Trendshift" /></a>
## Star 历史
[![Star History Chart](https://api.star-history.com/svg?repos=rustfs/rustfs&type=date&legend=top-left)](https://www.star-history.com/#rustfs/rustfs&type=date&legend=top-left)
## 许可证
[Apache 2.0](https://opensource.org/licenses/Apache-2.0)
**RustFS** 是 RustFS, Inc. 的商标。所有其他商标均为其各自所有者的财产。

41
_typos.toml Normal file
View File

@@ -0,0 +1,41 @@
[default]
# # Ignore specific spell checking patterns
# extend-ignore-identifiers-re = [
# # Ignore common patterns in base64 encoding and hash values
# "[A-Za-z0-9+/]{8,}={0,2}", # base64 encoding
# "[A-Fa-f0-9]{8,}", # hexadecimal hash
# "[A-Za-z0-9_-]{20,}", # long random strings
# ]
# # Ignore specific regex patterns in content
# extend-ignore-re = [
# # Ignore hash values and encoded strings (base64 patterns)
# "(?i)[A-Za-z0-9+/]{8,}={0,2}",
# # Ignore long strings in quotes (usually hash or base64)
# '"[A-Za-z0-9+/=_-]{8,}"',
# # Ignore IV values and similar cryptographic strings
# '"[A-Za-z0-9+/=]{12,}"',
# # Ignore cryptographic signatures and keys (including partial strings)
# "[A-Za-z0-9+/]{6,}[A-Za-z0-9+/=]*",
# # Ignore base64-like strings in comments (common in examples)
# "//.*[A-Za-z0-9+/]{8,}[A-Za-z0-9+/=]*",
# ]
extend-ignore-re = [
# Ignore long strings in quotes (usually hash or base64)
'"[A-Za-z0-9+/=_-]{32,}"',
# Ignore IV values and similar cryptographic strings
'"[A-Za-z0-9+/=]{12,}"',
# Ignore cryptographic signatures and keys (including partial strings)
"[A-Za-z0-9+/]{16,}[A-Za-z0-9+/=]*",
]
[default.extend-words]
bui = "bui"
typ = "typ"
clen = "clen"
datas = "datas"
bre = "bre"
abd = "abd"
[files]
extend-exclude = []

View File

@@ -21,13 +21,17 @@ detect_platform() {
"linux")
case "$arch" in
"x86_64")
echo "x86_64-unknown-linux-musl"
# Default to GNU for better compatibility
echo "x86_64-unknown-linux-gnu"
;;
"aarch64"|"arm64")
echo "aarch64-unknown-linux-musl"
echo "aarch64-unknown-linux-gnu"
;;
"armv7l")
echo "armv7-unknown-linux-musleabihf"
echo "armv7-unknown-linux-gnueabihf"
;;
"loongarch64")
echo "loongarch64-unknown-linux-musl"
;;
*)
echo "unknown-platform"
@@ -119,6 +123,17 @@ usage() {
echo " -o, --output-dir DIR Output directory (default: target/release)"
echo " -b, --binary-name NAME Binary name (default: rustfs)"
echo " -p, --platform TARGET Target platform (default: auto-detect)"
echo " Supported platforms:"
echo " x86_64-unknown-linux-gnu"
echo " aarch64-unknown-linux-gnu"
echo " armv7-unknown-linux-gnueabihf"
echo " x86_64-unknown-linux-musl"
echo " aarch64-unknown-linux-musl"
echo " armv7-unknown-linux-musleabihf"
echo " x86_64-apple-darwin"
echo " aarch64-apple-darwin"
echo " x86_64-pc-windows-msvc"
echo " aarch64-pc-windows-msvc"
echo " --dev Build in dev mode"
echo " --sign Sign binaries after build"
echo " --with-console Download console static assets (default)"
@@ -148,6 +163,35 @@ print_message() {
echo -e "${color}${message}${NC}"
}
# Prevent zig/ld from hitting macOS file descriptor defaults during linking
ensure_file_descriptor_limit() {
local required_limit=4096
local current_limit
current_limit=$(ulimit -Sn 2>/dev/null || echo "")
if [ -z "$current_limit" ] || [ "$current_limit" = "unlimited" ]; then
return
fi
if (( current_limit >= required_limit )); then
return
fi
local hard_limit target_limit
hard_limit=$(ulimit -Hn 2>/dev/null || echo "")
target_limit=$required_limit
if [ -n "$hard_limit" ] && [ "$hard_limit" != "unlimited" ] && (( hard_limit < required_limit )); then
target_limit=$hard_limit
fi
if ulimit -Sn "$target_limit" 2>/dev/null; then
print_message $YELLOW "🔧 Increased open file limit from $current_limit to $target_limit to avoid ProcessFdQuotaExceeded"
else
print_message $YELLOW "⚠️ Unable to raise ulimit -n automatically (current: $current_limit, needed: $required_limit). Please run 'ulimit -n $required_limit' manually before building."
fi
}
# Get version from git
get_version() {
if git describe --abbrev=0 --tags >/dev/null 2>&1; then
@@ -385,7 +429,7 @@ build_binary() {
fi
else
# Native compilation
build_cmd="cargo build"
build_cmd="RUSTFLAGS=-Clink-arg=-lm cargo build"
fi
if [ "$BUILD_TYPE" = "release" ]; then
@@ -555,10 +599,11 @@ main() {
fi
fi
ensure_file_descriptor_limit
# Start build process
build_rustfs
}
# Run main function
main

View File

@@ -1,52 +0,0 @@
[application]
# App (Project) Name
name = "rustfs-gui"
# The static resource path
asset_dir = "public"
[web.app]
# HTML title tag content
title = "rustfs-gui"
# include `assets` in web platform
[web.resource]
# Additional CSS style files
style = []
# Additional JavaScript files
script = []
[web.resource.dev]
# Javascript code file
# serve: [dev-server] only
script = []
[bundle]
identifier = "com.rustfs.cli.gui"
publisher = "RustFsGUI"
category = "Utility"
copyright = "Copyright 2025 rustfs.com"
icon = [
"assets/icons/icon.icns",
"assets/icons/icon.ico",
"assets/icons/icon.png",
"assets/icons/rustfs-icon.png",
]
#[bundle.macos]
#provider_short_name = "RustFs"
[bundle.windows]
tsp = true
icon_path = "assets/icons/icon.ico"
allow_downgrades = true
[bundle.windows.webview_install_mode]
[bundle.windows.webview_install_mode.EmbedBootstrapper]
silent = true

View File

@@ -1,34 +0,0 @@
## Rustfs GUI
### Tailwind
1. Install npm: https://docs.npmjs.com/downloading-and-installing-node-js-and-npm
2. Install the Tailwind CSS CLI: https://tailwindcss.com/docs/installation
3. Run the following command in the root of the project to start the Tailwind CSS compiler:
```bash
npx tailwindcss -i ./input.css -o ./assets/tailwind.css --watch
```
### Dioxus CLI
#### Install the stable version (recommended)
```shell
cargo install dioxus-cli
```
### Serving Your App
Run the following command in the root of your project to start developing with the default platform:
```bash
dx serve
```
To run for a different platform, use the `--platform platform` flag. E.g.
```bash
dx serve --platform desktop
```

Binary file not shown.

Before

Width:  |  Height:  |  Size: 80 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 80 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 80 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 4.5 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.9 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 498 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 969 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 9.9 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 969 B

Binary file not shown.

Before

Width:  |  Height:  |  Size: 2.0 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 47 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 23 KiB

View File

@@ -1,48 +0,0 @@
/**
* Copyright 2024 RustFS Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
window.switchTab = function (tabId) {
// Hide everything
document.querySelectorAll('.tab-content').forEach(content => {
content.classList.add('hidden');
});
// Reset all label styles
document.querySelectorAll('.tab-btn').forEach(btn => {
btn.classList.remove('border-b-2', 'border-black');
btn.classList.add('text-gray-500');
});
// Displays the selected content
const activeContent = document.getElementById(tabId);
if (activeContent) {
activeContent.classList.remove('hidden');
}
// Updates the selected label style
const activeBtn = document.querySelector(`[data-tab="${tabId}"]`);
if (activeBtn) {
activeBtn.classList.add('border-b-2', 'border-black');
activeBtn.classList.remove('text-gray-500');
}
};
window.togglePassword = function (button) {
const input = button.parentElement.querySelector('input[type="password"], input[type="text"]');
if (input) {
input.type = input.type === 'password' ? 'text' : 'password';
}
};

Binary file not shown.

Before

Width:  |  Height:  |  Size: 23 KiB

Binary file not shown.

Before

Width:  |  Height:  |  Size: 34 KiB

View File

@@ -1,15 +0,0 @@
<svg width="1558" height="260" viewBox="0 0 1558 260" fill="none" xmlns="http://www.w3.org/2000/svg">
<g clip-path="url(#clip0_0_3)">
<path d="M1288.5 112.905H1159.75V58.4404H1262L1270 0L1074 0V260H1159.75V162.997H1296.95L1288.5 112.905Z" fill="#0196D0"/>
<path d="M1058.62 58.4404V0H789V58.4404H881.133V260H966.885V58.4404H1058.62Z" fill="#0196D0"/>
<path d="M521 179.102V0L454.973 15V161C454.973 181.124 452.084 193.146 443.5 202C434.916 211.257 419.318 214.5 400.5 214.5C381.022 214.5 366.744 210.854 357.5 202C348.916 193.548 346.357 175.721 346.357 156V0L280 15V175.48C280 208.08 290.234 229.412 309.712 241.486C329.19 253.56 358.903 260 400.5 260C440.447 260 470.159 253.56 490.297 241.486C510.766 229.412 521 208.483 521 179.102Z" fill="#0196D0"/>
<path d="M172.84 84.2813C172.84 97.7982 168.249 107.737 158.41 113.303C149.883 118.471 137.092 121.254 120.693 122.049V162.997C129.876 163.792 138.076 166.177 144.307 176.514L184.647 260H265L225.316 180.489C213.181 155.046 201.374 149.48 178.744 143.517C212.197 138.349 241.386 118.471 241.386 73.1499C241.386 53.2722 233.843 30.2141 218.756 17.8899C203.998 5.56575 183.991 0 159.394 0H120.693V48.5015H127.58C142.23 48.5015 153.6 51.4169 161.689 57.2477C169.233 62.8135 172.84 71.5596 172.84 84.2813ZM120.693 122.049C119.163 122.049 117.741 122.049 116.43 122.049H68.5457V48.5015H120.693V0H0V260H70.5137V162.997H110.526C113.806 162.997 117.741 162.997 120.693 162.997V122.049Z" fill="#0196D0"/>
<path d="M774 179.297C774 160.829 766.671 144.669 752.013 131.972C738.127 119.66 712.025 110.169 673.708 103.5C662.136 101.191 651.722 99.6523 643.235 97.3437C586.532 84.6467 594.632 52.7118 650.564 52.7118C680.651 52.7118 709.582 61.946 738.127 66.9478C742.37 67.7174 743.913 68.1021 744.298 68.1021L750.47 12.697C720.383 3.46282 684.895 0 654.036 0C616.619 0 587.689 6.54088 567.245 19.2379C546.801 31.9349 536 57.7137 536 82.3382C536 103.5 543.715 119.66 559.916 131.972C575.731 143.515 604.276 152.749 645.55 160.059C658.279 162.368 668.694 163.907 676.794 166.215C685.023 168.524 691.066 170.704 694.924 172.756C702.253 176.604 706.11 182.375 706.11 188.531C706.11 196.611 701.481 202.767 692.224 207C664.836 220.081 587.689 212.001 556.83 198.15L543.715 247.784C547.186 248.169 552.972 249.323 559.916 250.477C616.619 259.327 690.681 270.869 741.212 238.935C762.814 225.468 774 206.23 774 179.297Z" fill="#0196D0"/>
<path d="M1558 179.568C1558 160.383 1550.42 144.268 1535.67 131.99C1521.32 119.968 1494.34 110.631 1454.74 103.981C1442.38 101.679 1432.01 99.3764 1422.84 97.8416C1422.44 97.8416 1422.04 97.8416 1422.04 97.4579V112.422L1361.04 75.2038L1422.04 38.3692V52.9496C1424.7 52.9496 1427.49 52.9496 1430.41 52.9496C1461.51 52.9496 1491.42 62.5419 1521.32 67.5299C1525.31 67.9136 1526.9 67.9136 1527.3 67.9136L1533.68 12.6619C1502.98 3.83692 1465.9 0 1434 0C1395.33 0 1365.43 6.52277 1345.09 19.5683C1323.16 32.6139 1312 57.9376 1312 82.8776C1312 103.981 1320.37 120.096 1336.72 131.607C1353.46 143.885 1382.97 153.093 1425.23 160.383C1434 161.535 1441.18 162.686 1447.56 164.22L1448.36 150.791L1507.36 190.312L1445.57 224.844L1445.96 212.949C1409.68 215.635 1357.45 209.112 1333.53 197.985L1320.37 247.482C1323.56 248.249 1329.54 248.633 1336.72 250.551C1395.33 259.376 1471.88 270.887 1524.11 238.657C1546.84 225.611 1558 205.659 1558 179.568Z" fill="#0196D0"/>
</g>
<defs>
<clipPath id="clip0_0_3">
<rect width="1558" height="260" fill="white"/>
</clipPath>
</defs>
</svg>

Before

Width:  |  Height:  |  Size: 3.4 KiB

View File

@@ -1,33 +0,0 @@
/**
* Copyright 2024 RustFS Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#navbar {
display: flex;
flex-direction: row;
}
#navbar a {
color: #ffffff;
margin-right: 20px;
text-decoration: none;
transition: color 0.2s ease;
}
#navbar a:hover {
cursor: pointer;
color: #ffffff;
/ / #91a4d2;
}

View File

@@ -1,972 +0,0 @@
/**
* Copyright 2024 RustFS Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
*, ::before, ::after {
--tw-border-spacing-x: 0;
--tw-border-spacing-y: 0;
--tw-translate-x: 0;
--tw-translate-y: 0;
--tw-rotate: 0;
--tw-skew-x: 0;
--tw-skew-y: 0;
--tw-scale-x: 1;
--tw-scale-y: 1;
--tw-pan-x: ;
--tw-pan-y: ;
--tw-pinch-zoom: ;
--tw-scroll-snap-strictness: proximity;
--tw-gradient-from-position: ;
--tw-gradient-via-position: ;
--tw-gradient-to-position: ;
--tw-ordinal: ;
--tw-slashed-zero: ;
--tw-numeric-figure: ;
--tw-numeric-spacing: ;
--tw-numeric-fraction: ;
--tw-ring-inset: ;
--tw-ring-offset-width: 0px;
--tw-ring-offset-color: #fff;
--tw-ring-color: rgb(59 130 246 / 0.5);
--tw-ring-offset-shadow: 0 0 #0000;
--tw-ring-shadow: 0 0 #0000;
--tw-shadow: 0 0 #0000;
--tw-shadow-colored: 0 0 #0000;
--tw-blur: ;
--tw-brightness: ;
--tw-contrast: ;
--tw-grayscale: ;
--tw-hue-rotate: ;
--tw-invert: ;
--tw-saturate: ;
--tw-sepia: ;
--tw-drop-shadow: ;
--tw-backdrop-blur: ;
--tw-backdrop-brightness: ;
--tw-backdrop-contrast: ;
--tw-backdrop-grayscale: ;
--tw-backdrop-hue-rotate: ;
--tw-backdrop-invert: ;
--tw-backdrop-opacity: ;
--tw-backdrop-saturate: ;
--tw-backdrop-sepia: ;
--tw-contain-size: ;
--tw-contain-layout: ;
--tw-contain-paint: ;
--tw-contain-style: ;
}
::backdrop {
--tw-border-spacing-x: 0;
--tw-border-spacing-y: 0;
--tw-translate-x: 0;
--tw-translate-y: 0;
--tw-rotate: 0;
--tw-skew-x: 0;
--tw-skew-y: 0;
--tw-scale-x: 1;
--tw-scale-y: 1;
--tw-pan-x: ;
--tw-pan-y: ;
--tw-pinch-zoom: ;
--tw-scroll-snap-strictness: proximity;
--tw-gradient-from-position: ;
--tw-gradient-via-position: ;
--tw-gradient-to-position: ;
--tw-ordinal: ;
--tw-slashed-zero: ;
--tw-numeric-figure: ;
--tw-numeric-spacing: ;
--tw-numeric-fraction: ;
--tw-ring-inset: ;
--tw-ring-offset-width: 0px;
--tw-ring-offset-color: #fff;
--tw-ring-color: rgb(59 130 246 / 0.5);
--tw-ring-offset-shadow: 0 0 #0000;
--tw-ring-shadow: 0 0 #0000;
--tw-shadow: 0 0 #0000;
--tw-shadow-colored: 0 0 #0000;
--tw-blur: ;
--tw-brightness: ;
--tw-contrast: ;
--tw-grayscale: ;
--tw-hue-rotate: ;
--tw-invert: ;
--tw-saturate: ;
--tw-sepia: ;
--tw-drop-shadow: ;
--tw-backdrop-blur: ;
--tw-backdrop-brightness: ;
--tw-backdrop-contrast: ;
--tw-backdrop-grayscale: ;
--tw-backdrop-hue-rotate: ;
--tw-backdrop-invert: ;
--tw-backdrop-opacity: ;
--tw-backdrop-saturate: ;
--tw-backdrop-sepia: ;
--tw-contain-size: ;
--tw-contain-layout: ;
--tw-contain-paint: ;
--tw-contain-style: ;
}
/*
! tailwindcss v3.4.17 | MIT License | https://tailwindcss.com
*/
/*
1. Prevent padding and border from affecting element width. (https://github.com/mozdevs/cssremedy/issues/4)
2. Allow adding a border to an element by just adding a border-width. (https://github.com/tailwindcss/tailwindcss/pull/116)
*/
*,
::before,
::after {
box-sizing: border-box;
/* 1 */
border-width: 0;
/* 2 */
border-style: solid;
/* 2 */
border-color: #e5e7eb;
/* 2 */
}
::before,
::after {
--tw-content: '';
}
/*
1. Use a consistent sensible line-height in all browsers.
2. Prevent adjustments of font size after orientation changes in iOS.
3. Use a more readable tab size.
4. Use the user's configured `sans` font-family by default.
5. Use the user's configured `sans` font-feature-settings by default.
6. Use the user's configured `sans` font-variation-settings by default.
7. Disable tap highlights on iOS
*/
html,
:host {
line-height: 1.5;
/* 1 */
-webkit-text-size-adjust: 100%;
/* 2 */
-moz-tab-size: 4;
/* 3 */
-o-tab-size: 4;
tab-size: 4;
/* 3 */
font-family: ui-sans-serif, system-ui, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji";
/* 4 */
font-feature-settings: normal;
/* 5 */
font-variation-settings: normal;
/* 6 */
-webkit-tap-highlight-color: transparent;
/* 7 */
}
/*
1. Remove the margin in all browsers.
2. Inherit line-height from `html` so users can set them as a class directly on the `html` element.
*/
body {
margin: 0;
/* 1 */
line-height: inherit;
/* 2 */
}
/*
1. Add the correct height in Firefox.
2. Correct the inheritance of border color in Firefox. (https://bugzilla.mozilla.org/show_bug.cgi?id=190655)
3. Ensure horizontal rules are visible by default.
*/
hr {
height: 0;
/* 1 */
color: inherit;
/* 2 */
border-top-width: 1px;
/* 3 */
}
/*
Add the correct text decoration in Chrome, Edge, and Safari.
*/
abbr:where([title]) {
-webkit-text-decoration: underline dotted;
text-decoration: underline dotted;
}
/*
Remove the default font size and weight for headings.
*/
h1,
h2,
h3,
h4,
h5,
h6 {
font-size: inherit;
font-weight: inherit;
}
/*
Reset links to optimize for opt-in styling instead of opt-out.
*/
a {
color: inherit;
text-decoration: inherit;
}
/*
Add the correct font weight in Edge and Safari.
*/
b,
strong {
font-weight: bolder;
}
/*
1. Use the user's configured `mono` font-family by default.
2. Use the user's configured `mono` font-feature-settings by default.
3. Use the user's configured `mono` font-variation-settings by default.
4. Correct the odd `em` font sizing in all browsers.
*/
code,
kbd,
samp,
pre {
font-family: ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace;
/* 1 */
font-feature-settings: normal;
/* 2 */
font-variation-settings: normal;
/* 3 */
font-size: 1em;
/* 4 */
}
/*
Add the correct font size in all browsers.
*/
small {
font-size: 80%;
}
/*
Prevent `sub` and `sup` elements from affecting the line height in all browsers.
*/
sub,
sup {
font-size: 75%;
line-height: 0;
position: relative;
vertical-align: baseline;
}
sub {
bottom: -0.25em;
}
sup {
top: -0.5em;
}
/*
1. Remove text indentation from table contents in Chrome and Safari. (https://bugs.chromium.org/p/chromium/issues/detail?id=999088, https://bugs.webkit.org/show_bug.cgi?id=201297)
2. Correct table border color inheritance in all Chrome and Safari. (https://bugs.chromium.org/p/chromium/issues/detail?id=935729, https://bugs.webkit.org/show_bug.cgi?id=195016)
3. Remove gaps between table borders by default.
*/
table {
text-indent: 0;
/* 1 */
border-color: inherit;
/* 2 */
border-collapse: collapse;
/* 3 */
}
/*
1. Change the font styles in all browsers.
2. Remove the margin in Firefox and Safari.
3. Remove default padding in all browsers.
*/
button,
input,
optgroup,
select,
textarea {
font-family: inherit;
/* 1 */
font-feature-settings: inherit;
/* 1 */
font-variation-settings: inherit;
/* 1 */
font-size: 100%;
/* 1 */
font-weight: inherit;
/* 1 */
line-height: inherit;
/* 1 */
letter-spacing: inherit;
/* 1 */
color: inherit;
/* 1 */
margin: 0;
/* 2 */
padding: 0;
/* 3 */
}
/*
Remove the inheritance of text transform in Edge and Firefox.
*/
button,
select {
text-transform: none;
}
/*
1. Correct the inability to style clickable types in iOS and Safari.
2. Remove default button styles.
*/
button,
input:where([type='button']),
input:where([type='reset']),
input:where([type='submit']) {
-webkit-appearance: button;
/* 1 */
background-color: transparent;
/* 2 */
background-image: none;
/* 2 */
}
/*
Use the modern Firefox focus style for all focusable elements.
*/
:-moz-focusring {
outline: auto;
}
/*
Remove the additional `:invalid` styles in Firefox. (https://github.com/mozilla/gecko-dev/blob/2f9eacd9d3d995c937b4251a5557d95d494c9be1/layout/style/res/forms.css#L728-L737)
*/
:-moz-ui-invalid {
box-shadow: none;
}
/*
Add the correct vertical alignment in Chrome and Firefox.
*/
progress {
vertical-align: baseline;
}
/*
Correct the cursor style of increment and decrement buttons in Safari.
*/
::-webkit-inner-spin-button,
::-webkit-outer-spin-button {
height: auto;
}
/*
1. Correct the odd appearance in Chrome and Safari.
2. Correct the outline style in Safari.
*/
[type='search'] {
-webkit-appearance: textfield;
/* 1 */
outline-offset: -2px;
/* 2 */
}
/*
Remove the inner padding in Chrome and Safari on macOS.
*/
::-webkit-search-decoration {
-webkit-appearance: none;
}
/*
1. Correct the inability to style clickable types in iOS and Safari.
2. Change font properties to `inherit` in Safari.
*/
::-webkit-file-upload-button {
-webkit-appearance: button;
/* 1 */
font: inherit;
/* 2 */
}
/*
Add the correct display in Chrome and Safari.
*/
summary {
display: list-item;
}
/*
Removes the default spacing and border for appropriate elements.
*/
blockquote,
dl,
dd,
h1,
h2,
h3,
h4,
h5,
h6,
hr,
figure,
p,
pre {
margin: 0;
}
fieldset {
margin: 0;
padding: 0;
}
legend {
padding: 0;
}
ol,
ul,
menu {
list-style: none;
margin: 0;
padding: 0;
}
/*
Reset default styling for dialogs.
*/
dialog {
padding: 0;
}
/*
Prevent resizing textareas horizontally by default.
*/
textarea {
resize: vertical;
}
/*
1. Reset the default placeholder opacity in Firefox. (https://github.com/tailwindlabs/tailwindcss/issues/3300)
2. Set the default placeholder color to the user's configured gray 400 color.
*/
input::-moz-placeholder, textarea::-moz-placeholder {
opacity: 1;
/* 1 */
color: #9ca3af;
/* 2 */
}
input::placeholder,
textarea::placeholder {
opacity: 1;
/* 1 */
color: #9ca3af;
/* 2 */
}
/*
Set the default cursor for buttons.
*/
button,
[role="button"] {
cursor: pointer;
}
/*
Make sure disabled buttons don't get the pointer cursor.
*/
:disabled {
cursor: default;
}
/*
1. Make replaced elements `display: block` by default. (https://github.com/mozdevs/cssremedy/issues/14)
2. Add `vertical-align: middle` to align replaced elements more sensibly by default. (https://github.com/jensimmons/cssremedy/issues/14#issuecomment-634934210)
This can trigger a poorly considered lint error in some tools but is included by design.
*/
img,
svg,
video,
canvas,
audio,
iframe,
embed,
object {
display: block;
/* 1 */
vertical-align: middle;
/* 2 */
}
/*
Constrain images and videos to the parent width and preserve their intrinsic aspect ratio. (https://github.com/mozdevs/cssremedy/issues/14)
*/
img,
video {
max-width: 100%;
height: auto;
}
/* Make elements with the HTML hidden attribute stay hidden by default */
[hidden]:where(:not([hidden="until-found"])) {
display: none;
}
.static {
position: static;
}
.absolute {
position: absolute;
}
.relative {
position: relative;
}
.right-2 {
right: 0.5rem;
}
.right-6 {
right: 1.5rem;
}
.top-1\/2 {
top: 50%;
}
.top-4 {
top: 1rem;
}
.z-10 {
z-index: 10;
}
.mb-2 {
margin-bottom: 0.5rem;
}
.mb-4 {
margin-bottom: 1rem;
}
.mb-6 {
margin-bottom: 1.5rem;
}
.mb-8 {
margin-bottom: 2rem;
}
.ml-2 {
margin-left: 0.5rem;
}
.flex {
display: flex;
}
.hidden {
display: none;
}
.h-16 {
height: 4rem;
}
.h-24 {
height: 6rem;
}
.h-4 {
height: 1rem;
}
.h-5 {
height: 1.25rem;
}
.h-6 {
height: 1.5rem;
}
.min-h-screen {
min-height: 100vh;
}
.w-16 {
width: 4rem;
}
.w-20 {
width: 5rem;
}
.w-24 {
width: 6rem;
}
.w-4 {
width: 1rem;
}
.w-48 {
width: 12rem;
}
.w-5 {
width: 1.25rem;
}
.w-6 {
width: 1.5rem;
}
.w-full {
width: 100%;
}
.flex-1 {
flex: 1 1 0%;
}
.-translate-y-1\/2 {
--tw-translate-y: -50%;
transform: translate(var(--tw-translate-x), var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y));
}
.transform {
transform: translate(var(--tw-translate-x), var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y));
}
@keyframes spin {
to {
transform: rotate(360deg);
}
}
.animate-spin {
animation: spin 1s linear infinite;
}
.flex-col {
flex-direction: column;
}
.items-center {
align-items: center;
}
.justify-center {
justify-content: center;
}
.space-x-2 > :not([hidden]) ~ :not([hidden]) {
--tw-space-x-reverse: 0;
margin-right: calc(0.5rem * var(--tw-space-x-reverse));
margin-left: calc(0.5rem * calc(1 - var(--tw-space-x-reverse)));
}
.space-x-4 > :not([hidden]) ~ :not([hidden]) {
--tw-space-x-reverse: 0;
margin-right: calc(1rem * var(--tw-space-x-reverse));
margin-left: calc(1rem * calc(1 - var(--tw-space-x-reverse)));
}
.space-x-8 > :not([hidden]) ~ :not([hidden]) {
--tw-space-x-reverse: 0;
margin-right: calc(2rem * var(--tw-space-x-reverse));
margin-left: calc(2rem * calc(1 - var(--tw-space-x-reverse)));
}
.space-y-4 > :not([hidden]) ~ :not([hidden]) {
--tw-space-y-reverse: 0;
margin-top: calc(1rem * calc(1 - var(--tw-space-y-reverse)));
margin-bottom: calc(1rem * var(--tw-space-y-reverse));
}
.space-y-6 > :not([hidden]) ~ :not([hidden]) {
--tw-space-y-reverse: 0;
margin-top: calc(1.5rem * calc(1 - var(--tw-space-y-reverse)));
margin-bottom: calc(1.5rem * var(--tw-space-y-reverse));
}
.rounded {
border-radius: 0.25rem;
}
.rounded-full {
border-radius: 9999px;
}
.rounded-lg {
border-radius: 0.5rem;
}
.rounded-md {
border-radius: 0.375rem;
}
.border {
border-width: 1px;
}
.border-b {
border-bottom-width: 1px;
}
.border-b-2 {
border-bottom-width: 2px;
}
.border-black {
--tw-border-opacity: 1;
border-color: rgb(0 0 0 / var(--tw-border-opacity, 1));
}
.border-gray-200 {
--tw-border-opacity: 1;
border-color: rgb(229 231 235 / var(--tw-border-opacity, 1));
}
.bg-\[\#111827\] {
--tw-bg-opacity: 1;
background-color: rgb(17 24 39 / var(--tw-bg-opacity, 1));
}
.bg-gray-100 {
--tw-bg-opacity: 1;
background-color: rgb(243 244 246 / var(--tw-bg-opacity, 1));
}
.bg-gray-900 {
--tw-bg-opacity: 1;
background-color: rgb(17 24 39 / var(--tw-bg-opacity, 1));
}
.bg-red-500 {
--tw-bg-opacity: 1;
background-color: rgb(239 68 68 / var(--tw-bg-opacity, 1));
}
.bg-white {
--tw-bg-opacity: 1;
background-color: rgb(255 255 255 / var(--tw-bg-opacity, 1));
}
.p-2 {
padding: 0.5rem;
}
.p-4 {
padding: 1rem;
}
.p-8 {
padding: 2rem;
}
.px-1 {
padding-left: 0.25rem;
padding-right: 0.25rem;
}
.px-3 {
padding-left: 0.75rem;
padding-right: 0.75rem;
}
.px-4 {
padding-left: 1rem;
padding-right: 1rem;
}
.py-0\.5 {
padding-top: 0.125rem;
padding-bottom: 0.125rem;
}
.py-2 {
padding-top: 0.5rem;
padding-bottom: 0.5rem;
}
.py-4 {
padding-top: 1rem;
padding-bottom: 1rem;
}
.py-6 {
padding-top: 1.5rem;
padding-bottom: 1.5rem;
}
.pr-10 {
padding-right: 2.5rem;
}
.text-2xl {
font-size: 1.5rem;
line-height: 2rem;
}
.text-base {
font-size: 1rem;
line-height: 1.5rem;
}
.text-sm {
font-size: 0.875rem;
line-height: 1.25rem;
}
.font-medium {
font-weight: 500;
}
.font-semibold {
font-weight: 600;
}
.text-blue-500 {
--tw-text-opacity: 1;
color: rgb(59 130 246 / var(--tw-text-opacity, 1));
}
.text-blue-600 {
--tw-text-opacity: 1;
color: rgb(37 99 235 / var(--tw-text-opacity, 1));
}
.text-gray-400 {
--tw-text-opacity: 1;
color: rgb(156 163 175 / var(--tw-text-opacity, 1));
}
.text-gray-500 {
--tw-text-opacity: 1;
color: rgb(107 114 128 / var(--tw-text-opacity, 1));
}
.text-gray-600 {
--tw-text-opacity: 1;
color: rgb(75 85 99 / var(--tw-text-opacity, 1));
}
.text-white {
--tw-text-opacity: 1;
color: rgb(255 255 255 / var(--tw-text-opacity, 1));
}
.opacity-25 {
opacity: 0.25;
}
.opacity-75 {
opacity: 0.75;
}
.filter {
filter: var(--tw-blur) var(--tw-brightness) var(--tw-contrast) var(--tw-grayscale) var(--tw-hue-rotate) var(--tw-invert) var(--tw-saturate) var(--tw-sepia) var(--tw-drop-shadow);
}
.hover\:bg-\[\#1f2937\]:hover {
--tw-bg-opacity: 1;
background-color: rgb(31 41 55 / var(--tw-bg-opacity, 1));
}
.hover\:bg-gray-100:hover {
--tw-bg-opacity: 1;
background-color: rgb(243 244 246 / var(--tw-bg-opacity, 1));
}
.hover\:bg-red-600:hover {
--tw-bg-opacity: 1;
background-color: rgb(220 38 38 / var(--tw-bg-opacity, 1));
}
.hover\:text-gray-700:hover {
--tw-text-opacity: 1;
color: rgb(55 65 81 / var(--tw-text-opacity, 1));
}
.hover\:text-gray-900:hover {
--tw-text-opacity: 1;
color: rgb(17 24 39 / var(--tw-text-opacity, 1));
}
.focus\:outline-none:focus {
outline: 2px solid transparent;
outline-offset: 2px;
}
.focus\:ring-2:focus {
--tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
--tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(2px + var(--tw-ring-offset-width)) var(--tw-ring-color);
box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
}
.focus\:ring-blue-500:focus {
--tw-ring-opacity: 1;
--tw-ring-color: rgb(59 130 246 / var(--tw-ring-opacity, 1));
}

View File

@@ -1 +0,0 @@
rustfs bin path, do not delete

View File

@@ -1,19 +0,0 @@
/**
* Copyright 2024 RustFS Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
@tailwind base;
@tailwind components;
@tailwind utilities;

View File

@@ -1,330 +0,0 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::components::navbar::LoadingSpinner;
use crate::route::Route;
use crate::utils::{RustFSConfig, ServiceManager};
use chrono::Datelike;
use dioxus::logger::tracing::debug;
use dioxus::prelude::*;
use std::time::Duration;
const HEADER_LOGO: Asset = asset!("/assets/rustfs-logo.svg");
const TAILWIND_CSS: Asset = asset!("/assets/tailwind.css");
/// Define the state of the service
#[derive(PartialEq, Debug, Clone)]
enum ServiceState {
Start,
Stop,
}
/// Define the Home component
/// The Home component is the main component of the application
/// It is responsible for starting and stopping the service
/// It also displays the service status and provides a button to toggle the service
/// The Home component also displays the footer of the application
/// The footer contains links to the official site, documentation, GitHub, and license
/// The footer also displays the version of the application
/// The Home component also contains a button to change the theme of the application
/// The Home component also contains a button to go to the settings page
#[component]
pub fn Home() -> Element {
#[allow(clippy::redundant_closure)]
let service = use_signal(|| ServiceManager::new());
let conf = RustFSConfig::load().unwrap_or_else(|e| {
ServiceManager::show_error(&format!("load config failed: {e}"));
RustFSConfig::default()
});
debug!("loaded configurations: {:?}", conf);
let config = use_signal(|| conf.clone());
use dioxus_router::prelude::Link;
use document::{Meta, Stylesheet, Title};
let mut service_state = use_signal(|| ServiceState::Start);
// Create a periodic check on the effect of the service status
use_effect(move || {
spawn(async move {
loop {
if let Some(pid) = ServiceManager::check_service_status().await {
debug!("service_running true pid: {:?}", pid);
service_state.set(ServiceState::Stop);
} else {
debug!("service_running true pid: 0");
service_state.set(ServiceState::Start);
}
tokio::time::sleep(Duration::from_secs(2)).await;
}
});
});
debug!("project start service_state: {:?}", service_state.read());
// Use 'use_signal' to manage service status
let mut loading = use_signal(|| false);
let mut start_service = move |_| {
let service = service;
let config = config.read().clone();
let mut service_state = service_state;
// set the loading status
loading.set(true);
debug!("stop loading_state: {:?}", loading.read());
spawn(async move {
match service.read().start(config).await {
Ok(result) => {
if result.success {
let duration = result.end_time - result.start_time;
debug!("The service starts successfully and takes a long time:{}ms", duration.num_milliseconds());
service_state.set(ServiceState::Stop);
} else {
ServiceManager::show_error(&result.message);
service_state.set(ServiceState::Start);
}
}
Err(e) => {
ServiceManager::show_error(&format!("start service failed: {e}"));
}
}
// Only set loading to false when it's actually done
loading.set(false);
debug!("start loading_state: {:?}", loading.read());
});
};
let mut stop_service = move |_| {
let service = service;
let mut service_state = service_state;
// set the loading status
loading.set(true);
spawn(async move {
match service.read().stop().await {
Ok(result) => {
if result.success {
let duration = result.end_time - result.start_time;
debug!("The service stops successfully and takes a long time:{}ms", duration.num_milliseconds());
service_state.set(ServiceState::Start);
} else {
ServiceManager::show_error(&result.message);
}
}
Err(e) => {
ServiceManager::show_error(&format!("stop service failed: {e}"));
}
}
debug!("service_state: {:?}", service_state.read());
// Only set loading to false when it's actually done
loading.set(false);
debug!("stop loading_state: {:?}", loading.read());
});
};
// Toggle the state when the button is clicked
let toggle_service = {
let mut service_state = service_state;
debug!("toggle_service service_state: {:?}", service_state.read());
move |_| {
if service_state.read().eq(&ServiceState::Stop) {
// If the service status is started, you need to run a command to stop the service
stop_service(());
service_state.set(ServiceState::Start);
} else {
start_service(());
service_state.set(ServiceState::Stop);
}
}
};
// Define dynamic styles based on state
let button_class = if service_state.read().eq(&ServiceState::Start) {
"bg-[#111827] hover:bg-[#1f2937] text-white px-4 py-2 rounded-md flex items-center space-x-2"
} else {
"bg-red-500 hover:bg-red-600 text-white px-4 py-2 rounded-md flex items-center space-x-2"
};
rsx! {
// The Stylesheet component inserts a style link into the head of the document
Stylesheet {href: TAILWIND_CSS,}
Title { "RustFS APP" }
Meta {
name: "description",
// TODO: translate to english
content: "RustFS RustFS 用热门安全的 Rust 语言开发,兼容 S3 协议。适用于 AI/ML 及海量数据存储、大数据、互联网、工业和保密存储等全部场景。近乎免费使用。遵循 Apache 2 协议,支持国产保密设备和系统。",
}
div { class: "min-h-screen flex flex-col items-center bg-white",
div { class: "absolute top-4 right-6 flex space-x-2",
// change theme
button { class: "p-2 hover:bg-gray-100 rounded-lg", ChangeThemeButton {} }
// setting button
Link {
class: "p-2 hover:bg-gray-100 rounded-lg",
to: Route::SettingViews {},
SettingButton {}
}
}
main { class: "flex-1 flex flex-col items-center justify-center space-y-6 p-4",
div { class: "w-24 h-24 bg-gray-900 rounded-full flex items-center justify-center",
img { alt: "Logo", class: "w-16 h-16", src: HEADER_LOGO }
}
div { class: "text-gray-600",
"Service is running on "
span { class: "text-blue-600", " 127.0.0.1:9000 " }
}
LoadingSpinner {
loading: loading.read().to_owned(),
text: "processing...",
}
button { class: button_class, onclick: toggle_service,
svg {
class: "h-4 w-4",
fill: "none",
stroke: "currentColor",
view_box: "0 0 24 24",
xmlns: "http://www.w3.org/2000/svg",
if service_state.read().eq(&ServiceState::Start) {
path {
d: "M14.752 11.168l-3.197-2.132A1 1 0 0010 9.87v4.263a1 1 0 001.555.832l3.197-2.132a1 1 0 000-1.664z",
stroke_linecap: "round",
stroke_linejoin: "round",
stroke_width: "2",
}
path {
d: "M21 12a9 9 0 11-18 0 9 9 0 0118 0z",
stroke_linecap: "round",
stroke_linejoin: "round",
stroke_width: "2",
}
} else {
path {
stroke_linecap: "round",
stroke_linejoin: "round",
stroke_width: "2",
d: "M21 12a9 9 0 11-18 0 9 9 0 0118 0z",
}
path {
stroke_linecap: "round",
stroke_linejoin: "round",
stroke_width: "2",
d: "M9 10h6v4H9z",
}
}
}
span { id: "serviceStatus",
if service_state.read().eq(&ServiceState::Start) {
"Start service"
} else {
"Stop service"
}
}
}
}
Footer { version: "v1.0.0".to_string() }
}
}
}
#[component]
pub fn Footer(version: String) -> Element {
let now = chrono::Local::now();
let year = now.naive_local().year();
rsx! {
footer { class: "w-full py-6 flex flex-col items-center space-y-4 mb-6",
nav { class: "flex space-x-4 text-gray-600",
a { class: "hover:text-gray-900", href: "https://rustfs.com", "Official Site" }
a {
class: "hover:text-gray-900",
href: "https://rustfs.com/docs",
"Documentation"
}
a {
class: "hover:text-gray-900",
href: "https://github.com/rustfs/rustfs",
"GitHub"
}
a {
class: "hover:text-gray-900",
href: "https://rustfs.com/docs/license/",
"License"
}
a { class: "hover:text-gray-900", href: "#", "Sponsors" }
}
div { class: "text-gray-500 text-sm", " © rustfs.com {year}, All rights reserved." }
div { class: "text-gray-400 text-sm mb-8", " version {version} " }
}
}
}
#[component]
pub fn GoBackButtons() -> Element {
rsx! {
button {
class: "p-2 hover:bg-gray-100 rounded-lg",
"onclick": "window.history.back()",
"Back to the Past"
}
}
}
#[component]
pub fn GoForwardButtons() -> Element {
rsx! {
button {
class: "p-2 hover:bg-gray-100 rounded-lg",
"onclick": "window.history.forward()",
"Back to the Future"
}
}
}
#[component]
pub fn ChangeThemeButton() -> Element {
rsx! {
svg {
class: "h-6 w-6 text-gray-600",
fill: "none",
stroke: "currentColor",
view_box: "0 0 24 24",
xmlns: "http://www.w3.org/2000/svg",
path {
d: "M9 3v2m6-2v2M9 19v2m6-2v2M5 9H3m2 6H3m18-6h-2m2 6h-2M7 19h10a2 2 0 002-2V7a2 2 0 00-2-2H7a2 2 0 00-2 2v10a2 2 0 002 2zM9 9h6v6H9V9z",
stroke_linecap: "round",
stroke_linejoin: "round",
stroke_width: "2",
}
}
}
}
#[component]
pub fn SettingButton() -> Element {
rsx! {
svg {
class: "h-6 w-6 text-gray-600",
fill: "none",
stroke: "currentColor",
view_box: "0 0 24 24",
xmlns: "http://www.w3.org/2000/svg",
path {
d: "M10.325 4.317c.426-1.756 2.924-1.756 3.35 0a1.724 1.724 0 002.573 1.066c1.543-.94 3.31.826 2.37 2.37a1.724 1.724 0 001.065 2.572c1.756.426 1.756 2.924 0 3.35a1.724 1.724 0 00-1.066 2.573c.94 1.543-.826 3.31-2.37 2.37a1.724 1.724 0 00-2.572 1.065c-.426 1.756-2.924 1.756-3.35 0a1.724 1.724 0 00-2.573-1.066c-1.543.94-3.31-.826-2.37-2.37a1.724 1.724 0 00-1.065-2.572c-1.756-.426-1.756-2.924 0-3.35a1.724 1.724 0 001.066-2.573c-.94-1.543.826-3.31 2.37-2.37.996.608 2.296.07 2.572-1.065z",
stroke_linecap: "round",
stroke_linejoin: "round",
stroke_width: "2",
}
path {
d: "M15 12a3 3 0 11-6 0 3 3 0 016 0z",
stroke_linecap: "round",
stroke_linejoin: "round",
stroke_width: "2",
}
}
}
}

View File

@@ -1,20 +0,0 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
mod home;
pub use home::Home;
mod navbar;
pub use navbar::Navbar;
mod setting;
pub use setting::Setting;

View File

@@ -1,74 +0,0 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::route::Route;
use dioxus::logger::tracing::debug;
use dioxus::prelude::*;
const NAVBAR_CSS: Asset = asset!("/assets/styling/navbar.css");
#[component]
pub fn Navbar() -> Element {
rsx! {
document::Link { rel: "stylesheet", href: NAVBAR_CSS }
div { id: "navbar", class: "hidden", style: "display: none;",
Link { to: Route::HomeViews {}, "Home" }
Link { to: Route::SettingViews {}, "Setting" }
}
Outlet::<Route> {}
}
}
#[derive(Props, PartialEq, Debug, Clone)]
pub struct LoadingSpinnerProps {
#[props(default = true)]
loading: bool,
#[props(default = "正在处理中...")]
text: &'static str,
}
#[component]
pub fn LoadingSpinner(props: LoadingSpinnerProps) -> Element {
debug!("loading: {}", props.loading);
if !props.loading {
debug!("LoadingSpinner false loading: {}", props.loading);
return rsx! {};
}
rsx! {
div { class: "flex items-center justify-center z-10",
svg {
class: "animate-spin h-5 w-5 text-blue-500",
xmlns: "http://www.w3.org/2000/svg",
fill: "none",
view_box: "0 0 24 24",
circle {
class: "opacity-25",
cx: "12",
cy: "12",
r: "10",
stroke: "currentColor",
stroke_width: "4",
}
path {
class: "opacity-75",
fill: "currentColor",
d: "M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z",
}
}
span { class: "ml-2 text-gray-600", "{props.text}" }
}
}
}

View File

@@ -1,216 +0,0 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::components::navbar::LoadingSpinner;
use dioxus::logger::tracing::{debug, error};
use dioxus::prelude::*;
const SETTINGS_JS: Asset = asset!("/assets/js/sts.js");
const TAILWIND_CSS: Asset = asset!("/assets/tailwind.css");
#[component]
pub fn Setting() -> Element {
use crate::utils::{RustFSConfig, ServiceManager};
use document::{Meta, Script, Stylesheet, Title};
#[allow(clippy::redundant_closure)]
let service = use_signal(|| ServiceManager::new());
let conf = RustFSConfig::load().unwrap_or_else(|e| {
error!("load config error: {}", e);
RustFSConfig::default_config()
});
debug!("conf address: {:?}", conf.clone().address);
let config = use_signal(|| conf.clone());
let address_state = use_signal(|| conf.address.to_string());
let mut host_state = use_signal(|| conf.host.to_string());
let mut port_state = use_signal(|| conf.port.to_string());
let mut access_key_state = use_signal(|| conf.access_key.to_string());
let mut secret_key_state = use_signal(|| conf.secret_key.to_string());
let mut volume_name_state = use_signal(|| conf.volume_name.to_string());
let loading = use_signal(|| false);
let save_and_restart = {
let host_state = host_state;
let port_state = port_state;
let access_key_state = access_key_state;
let secret_key_state = secret_key_state;
let volume_name_state = volume_name_state;
let mut loading = loading;
debug!("save_and_restart access_key:{}", access_key_state.read());
move |_| {
// set the loading status
loading.set(true);
let mut config = config;
config.write().address = format!("{}:{}", host_state.read(), port_state.read());
config.write().host = host_state.read().to_string();
config.write().port = port_state.read().to_string();
config.write().access_key = access_key_state.read().to_string();
config.write().secret_key = secret_key_state.read().to_string();
config.write().volume_name = volume_name_state.read().to_string();
// restart service
let service = service;
let config = config.read().clone();
spawn(async move {
if let Err(e) = service.read().restart(config).await {
ServiceManager::show_error(&format!("发送重启命令失败:{e}"));
}
// reset the status when you're done
loading.set(false);
});
}
};
rsx! {
Title { "Settings - RustFS App" }
Meta { name: "description", content: "Settings - RustFS App." }
// The Stylesheet component inserts a style link into the head of the document
Stylesheet { href: TAILWIND_CSS }
Script { src: SETTINGS_JS }
div { class: "bg-white p-8",
h1 { class: "text-2xl font-semibold mb-6", "Settings" }
div { class: "border-b border-gray-200 mb-6",
nav { class: "flex space-x-8",
button {
class: "tab-btn px-1 py-4 text-sm font-medium border-b-2 border-black",
"data-tab": "service",
"onclick": "switchTab('service')",
"Service "
}
button {
class: "tab-btn px-1 py-4 text-sm font-medium text-gray-500 hover:text-gray-700",
"data-tab": "user",
"onclick": "switchTab('user')",
"User "
}
button {
class: "tab-btn px-1 py-4 text-sm font-medium text-gray-500 hover:text-gray-700 hidden",
"data-tab": "logs",
"onclick": "switchTab('logs')",
"Logs "
}
}
}
div { id: "tabContent",
div { class: "tab-content", id: "service",
div { class: "mb-8",
h2 { class: "text-base font-medium mb-2", "Service address" }
p { class: "text-gray-600 mb-4",
" The service address is the IP address and port number of the service. the default address is "
code { class: "bg-gray-100 px-1 py-0.5 rounded", {address_state} }
". "
}
div { class: "flex space-x-2",
input {
class: "border rounded px-3 py-2 w-48 focus:outline-none focus:ring-2 focus:ring-blue-500",
r#type: "text",
value: host_state,
oninput: move |evt| host_state.set(evt.value().clone()),
}
span { class: "flex items-center", ":" }
input {
class: "border rounded px-3 py-2 w-20 focus:outline-none focus:ring-2 focus:ring-blue-500",
r#type: "text",
value: port_state,
oninput: move |evt| port_state.set(evt.value().clone()),
}
}
}
div { class: "mb-8",
h2 { class: "text-base font-medium mb-2", "Storage path" }
p { class: "text-gray-600 mb-4",
"Update the storage path of the service. the default path is {volume_name_state}."
}
input {
class: "border rounded px-3 py-2 w-full focus:outline-none focus:ring-2 focus:ring-blue-500",
r#type: "text",
value: volume_name_state,
oninput: move |evt| volume_name_state.set(evt.value().clone()),
}
}
}
div { class: "tab-content hidden", id: "user",
div { class: "mb-8",
h2 { class: "text-base font-medium mb-2", "User" }
p { class: "text-gray-600 mb-4",
"The user is the owner of the service. the default user is "
code { class: "bg-gray-100 px-1 py-0.5 rounded", {access_key_state} }
}
input {
class: "border rounded px-3 py-2 w-full focus:outline-none focus:ring-2 focus:ring-blue-500",
r#type: "text",
value: access_key_state,
oninput: move |evt| access_key_state.set(evt.value().clone()),
}
}
div { class: "mb-8",
h2 { class: "text-base font-medium mb-2", "Password" }
p { class: "text-gray-600 mb-4",
"The password is the password of the user. the default password is "
code { class: "bg-gray-100 px-1 py-0.5 rounded", {secret_key_state} }
}
div { class: "relative",
input {
class: "border rounded px-3 py-2 w-full pr-10 focus:outline-none focus:ring-2 focus:ring-blue-500",
r#type: "password",
value: secret_key_state,
oninput: move |evt| secret_key_state.set(evt.value().clone()),
}
button {
class: "absolute right-2 top-1/2 transform -translate-y-1/2 text-gray-500 hover:text-gray-700",
"onclick": "togglePassword(this)",
svg {
class: "h-5 w-5",
fill: "currentColor",
view_box: "0 0 20 20",
xmlns: "http://www.w3.org/2000/svg",
path { d: "M10 12a2 2 0 100-4 2 2 0 000 4z" }
path {
clip_rule: "evenodd",
d: "M.458 10C1.732 5.943 5.522 3 10 3s8.268 2.943 9.542 7c-1.274 4.057-5.064 7-9.542 7S1.732 14.057.458 10zM14 10a4 4 0 11-8 0 4 4 0 018 0z",
fill_rule: "evenodd",
}
}
}
}
}
}
div { class: "tab-content hidden", id: "logs",
div { class: "mb-8",
h2 { class: "text-base font-medium mb-2", "Logs storage path" }
p { class: "text-gray-600 mb-4",
"The logs storage path is the path where the logs are stored. the default path is /var/log/rustfs. "
}
input {
class: "border rounded px-3 py-2 w-full focus:outline-none focus:ring-2 focus:ring-blue-500",
r#type: "text",
value: "/var/logs/rustfs",
}
}
}
}
div { class: "flex space-x-4",
button {
class: "bg-[#111827] text-white px-4 py-2 rounded hover:bg-[#1f2937]",
onclick: save_and_restart,
" Save and restart "
}
GoBackButton { "Back" }
}
LoadingSpinner {
loading: loading.read().to_owned(),
text: "服务处理中...",
}
}
}
}

View File

@@ -1,564 +0,0 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use keyring::Entry;
use serde::{Deserialize, Serialize};
use std::error::Error;
/// Configuration for the RustFS service
///
/// # Fields
/// * `address` - The address of the RustFS service
/// * `host` - The host of the RustFS service
/// * `port` - The port of the RustFS service
/// * `access_key` - The access key of the RustFS service
/// * `secret_key` - The secret key of the RustFS service
/// * `domain_name` - The domain name of the RustFS service
/// * `volume_name` - The volume name of the RustFS service
/// * `console_address` - The console address of the RustFS service
///
/// # Example
/// ```
/// let config = RustFSConfig {
/// address: "127.0.0.1:9000".to_string(),
/// host: "127.0.0.1".to_string(),
/// port: "9000".to_string(),
/// access_key: "rustfsadmin".to_string(),
/// secret_key: "rustfsadmin".to_string(),
/// domain_name: "demo.rustfs.com".to_string(),
/// volume_name: "data".to_string(),
/// console_address: "127.0.0.1:9001".to_string(),
/// };
/// println!("{:?}", config);
/// assert_eq!(config.address, "127.0.0.1:9000");
/// ```
#[derive(Debug, Clone, Default, Deserialize, Serialize, Ord, PartialOrd, Eq, PartialEq)]
pub struct RustFSConfig {
pub address: String,
pub host: String,
pub port: String,
pub access_key: String,
pub secret_key: String,
pub domain_name: String,
pub volume_name: String,
pub console_address: String,
}
impl RustFSConfig {
/// keyring the name of the service
const SERVICE_NAME: &'static str = "rustfs-service";
/// keyring the key of the service
const SERVICE_KEY: &'static str = "rustfs_key";
/// default domain name
const DEFAULT_DOMAIN_NAME_VALUE: &'static str = "demo.rustfs.com";
/// default address value
const DEFAULT_ADDRESS_VALUE: &'static str = "127.0.0.1:9000";
/// default port value
const DEFAULT_PORT_VALUE: &'static str = "9000";
/// default host value
const DEFAULT_HOST_VALUE: &'static str = "127.0.0.1";
/// default access key value
const DEFAULT_ACCESS_KEY_VALUE: &'static str = "rustfsadmin";
/// default secret key value
const DEFAULT_SECRET_KEY_VALUE: &'static str = "rustfsadmin";
/// default console address value
const DEFAULT_CONSOLE_ADDRESS_VALUE: &'static str = "127.0.0.1:9001";
/// get the default volume_name
///
/// # Returns
/// * The default volume name
///
/// # Example
/// ```
/// let volume_name = RustFSConfig::default_volume_name();
/// ```
pub fn default_volume_name() -> String {
dirs::home_dir()
.map(|home| home.join("rustfs").join("data"))
.and_then(|path| path.to_str().map(String::from))
.unwrap_or_else(|| "data".to_string())
}
/// create a default configuration
///
/// # Returns
/// * The default configuration
///
/// # Example
/// ```
/// let config = RustFSConfig::default_config();
/// println!("{:?}", config);
/// assert_eq!(config.address, "127.0.0.1:9000");
/// ```
pub fn default_config() -> Self {
Self {
address: Self::DEFAULT_ADDRESS_VALUE.to_string(),
host: Self::DEFAULT_HOST_VALUE.to_string(),
port: Self::DEFAULT_PORT_VALUE.to_string(),
access_key: Self::DEFAULT_ACCESS_KEY_VALUE.to_string(),
secret_key: Self::DEFAULT_SECRET_KEY_VALUE.to_string(),
domain_name: Self::DEFAULT_DOMAIN_NAME_VALUE.to_string(),
volume_name: Self::default_volume_name(),
console_address: Self::DEFAULT_CONSOLE_ADDRESS_VALUE.to_string(),
}
}
/// Load the configuration from the keyring
///
/// # Errors
/// * If the configuration cannot be loaded from the keyring
/// * If the configuration cannot be deserialized
/// * If the address cannot be extracted from the configuration
///
/// # Example
/// ```
/// let config = RustFSConfig::load().unwrap();
/// println!("{:?}", config);
/// assert_eq!(config.address, "127.0.0.1:9000");
/// ```
pub fn load() -> Result<Self, Box<dyn Error>> {
let mut config = Self::default_config();
// Try to get the configuration of the storage from the keyring
let entry = Entry::new(Self::SERVICE_NAME, Self::SERVICE_KEY)?;
if let Ok(stored_json) = entry.get_password() {
if let Ok(stored_config) = serde_json::from_str::<RustFSConfig>(&stored_json) {
// update fields that are not empty and non default
if !stored_config.address.is_empty() && stored_config.address != Self::DEFAULT_ADDRESS_VALUE {
config.address = stored_config.address;
let (host, port) = Self::extract_host_port(config.address.as_str())
.ok_or_else(|| format!("无法从地址 '{}' 中提取主机和端口", config.address))?;
config.host = host.to_string();
config.port = port.to_string();
}
if !stored_config.access_key.is_empty() && stored_config.access_key != Self::DEFAULT_ACCESS_KEY_VALUE {
config.access_key = stored_config.access_key;
}
if !stored_config.secret_key.is_empty() && stored_config.secret_key != Self::DEFAULT_SECRET_KEY_VALUE {
config.secret_key = stored_config.secret_key;
}
if !stored_config.domain_name.is_empty() && stored_config.domain_name != Self::DEFAULT_DOMAIN_NAME_VALUE {
config.domain_name = stored_config.domain_name;
}
// The stored volume_name is updated only if it is not empty and different from the default
if !stored_config.volume_name.is_empty() && stored_config.volume_name != Self::default_volume_name() {
config.volume_name = stored_config.volume_name;
}
if !stored_config.console_address.is_empty()
&& stored_config.console_address != Self::DEFAULT_CONSOLE_ADDRESS_VALUE
{
config.console_address = stored_config.console_address;
}
}
}
Ok(config)
}
/// Auxiliary method: Extract the host and port from the address string
/// # Arguments
/// * `address` - The address string
///
/// # Returns
/// * `Some((host, port))` - The host and port
///
/// # Errors
/// * If the address is not in the form 'host:port'
/// * If the port is not a valid u16
///
/// # Example
/// ```
/// let (host, port) = RustFSConfig::extract_host_port("127.0.0.1:9000").unwrap();
/// assert_eq!(host, "127.0.0.1");
/// assert_eq!(port, 9000);
/// ```
pub fn extract_host_port(address: &str) -> Option<(&str, u16)> {
let parts: Vec<&str> = address.split(':').collect();
if parts.len() == 2 {
if let Ok(port) = parts[1].parse::<u16>() {
return Some((parts[0], port));
}
}
None
}
/// save the configuration to keyring
///
/// # Errors
/// * If the configuration cannot be serialized
/// * If the configuration cannot be saved to the keyring
///
/// # Example
/// ```
/// let config = RustFSConfig::default_config();
/// config.save().unwrap();
/// ```
pub fn save(&self) -> Result<(), Box<dyn Error>> {
let entry = Entry::new(Self::SERVICE_NAME, Self::SERVICE_KEY)?;
let json = serde_json::to_string(self)?;
entry.set_password(&json)?;
Ok(())
}
/// Clear the stored configuration from the system keyring
///
/// # Returns
/// `Ok(())` if the configuration was successfully cleared, or an error if the operation failed.
///
/// # Example
/// ```
/// RustFSConfig::clear().unwrap();
/// ```
#[allow(dead_code)]
pub fn clear() -> Result<(), Box<dyn Error>> {
let entry = Entry::new(Self::SERVICE_NAME, Self::SERVICE_KEY)?;
entry.delete_credential()?;
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_rustfs_config_default() {
let config = RustFSConfig::default();
assert!(config.address.is_empty());
assert!(config.host.is_empty());
assert!(config.port.is_empty());
assert!(config.access_key.is_empty());
assert!(config.secret_key.is_empty());
assert!(config.domain_name.is_empty());
assert!(config.volume_name.is_empty());
assert!(config.console_address.is_empty());
}
#[test]
fn test_rustfs_config_creation() {
let config = RustFSConfig {
address: "192.168.1.100:9000".to_string(),
host: "192.168.1.100".to_string(),
port: "9000".to_string(),
access_key: "testuser".to_string(),
secret_key: "testpass".to_string(),
domain_name: "test.rustfs.com".to_string(),
volume_name: "/data/rustfs".to_string(),
console_address: "192.168.1.100:9001".to_string(),
};
assert_eq!(config.address, "192.168.1.100:9000");
assert_eq!(config.host, "192.168.1.100");
assert_eq!(config.port, "9000");
assert_eq!(config.access_key, "testuser");
assert_eq!(config.secret_key, "testpass");
assert_eq!(config.domain_name, "test.rustfs.com");
assert_eq!(config.volume_name, "/data/rustfs");
assert_eq!(config.console_address, "192.168.1.100:9001");
}
#[test]
fn test_default_volume_name() {
let volume_name = RustFSConfig::default_volume_name();
assert!(!volume_name.is_empty());
// Should either be the home directory path or fallback to "data"
assert!(volume_name.contains("rustfs") || volume_name == "data");
}
#[test]
fn test_default_config() {
let config = RustFSConfig::default_config();
assert_eq!(config.address, RustFSConfig::DEFAULT_ADDRESS_VALUE);
assert_eq!(config.host, RustFSConfig::DEFAULT_HOST_VALUE);
assert_eq!(config.port, RustFSConfig::DEFAULT_PORT_VALUE);
assert_eq!(config.access_key, RustFSConfig::DEFAULT_ACCESS_KEY_VALUE);
assert_eq!(config.secret_key, RustFSConfig::DEFAULT_SECRET_KEY_VALUE);
assert_eq!(config.domain_name, RustFSConfig::DEFAULT_DOMAIN_NAME_VALUE);
assert_eq!(config.console_address, RustFSConfig::DEFAULT_CONSOLE_ADDRESS_VALUE);
assert!(!config.volume_name.is_empty());
}
#[test]
fn test_extract_host_port_valid() {
let test_cases = vec![
("127.0.0.1:9000", Some(("127.0.0.1", 9000))),
("localhost:8080", Some(("localhost", 8080))),
("192.168.1.100:3000", Some(("192.168.1.100", 3000))),
("0.0.0.0:80", Some(("0.0.0.0", 80))),
("example.com:443", Some(("example.com", 443))),
];
for (input, expected) in test_cases {
let result = RustFSConfig::extract_host_port(input);
assert_eq!(result, expected, "Failed for input: {input}");
}
}
#[test]
fn test_extract_host_port_invalid() {
let invalid_cases = vec![
"127.0.0.1", // Missing port
"127.0.0.1:", // Empty port
"127.0.0.1:abc", // Invalid port
"127.0.0.1:99999", // Port out of range
"", // Empty string
"127.0.0.1:9000:extra", // Too many parts
"invalid", // No colon
];
for input in invalid_cases {
let result = RustFSConfig::extract_host_port(input);
assert_eq!(result, None, "Should be None for input: {input}");
}
// Special case: empty host but valid port should still work
let result = RustFSConfig::extract_host_port(":9000");
assert_eq!(result, Some(("", 9000)));
}
#[test]
fn test_extract_host_port_edge_cases() {
// Test edge cases for port numbers
assert_eq!(RustFSConfig::extract_host_port("host:0"), Some(("host", 0)));
assert_eq!(RustFSConfig::extract_host_port("host:65535"), Some(("host", 65535)));
assert_eq!(RustFSConfig::extract_host_port("host:65536"), None); // Out of range
}
#[test]
fn test_serialization() {
let config = RustFSConfig {
address: "127.0.0.1:9000".to_string(),
host: "127.0.0.1".to_string(),
port: "9000".to_string(),
access_key: "admin".to_string(),
secret_key: "password".to_string(),
domain_name: "test.com".to_string(),
volume_name: "/data".to_string(),
console_address: "127.0.0.1:9001".to_string(),
};
let json = serde_json::to_string(&config).unwrap();
assert!(json.contains("127.0.0.1:9000"));
assert!(json.contains("admin"));
assert!(json.contains("test.com"));
}
#[test]
fn test_deserialization() {
let json = r#"{
"address": "192.168.1.100:9000",
"host": "192.168.1.100",
"port": "9000",
"access_key": "testuser",
"secret_key": "testpass",
"domain_name": "example.com",
"volume_name": "/opt/data",
"console_address": "192.168.1.100:9001"
}"#;
let config: RustFSConfig = serde_json::from_str(json).unwrap();
assert_eq!(config.address, "192.168.1.100:9000");
assert_eq!(config.host, "192.168.1.100");
assert_eq!(config.port, "9000");
assert_eq!(config.access_key, "testuser");
assert_eq!(config.secret_key, "testpass");
assert_eq!(config.domain_name, "example.com");
assert_eq!(config.volume_name, "/opt/data");
assert_eq!(config.console_address, "192.168.1.100:9001");
}
#[test]
fn test_serialization_deserialization_roundtrip() {
let original_config = RustFSConfig {
address: "10.0.0.1:8080".to_string(),
host: "10.0.0.1".to_string(),
port: "8080".to_string(),
access_key: "roundtrip_user".to_string(),
secret_key: "roundtrip_pass".to_string(),
domain_name: "roundtrip.test".to_string(),
volume_name: "/tmp/roundtrip".to_string(),
console_address: "10.0.0.1:8081".to_string(),
};
let json = serde_json::to_string(&original_config).unwrap();
let deserialized_config: RustFSConfig = serde_json::from_str(&json).unwrap();
assert_eq!(original_config, deserialized_config);
}
#[test]
fn test_config_ordering() {
let config1 = RustFSConfig {
address: "127.0.0.1:9000".to_string(),
host: "127.0.0.1".to_string(),
port: "9000".to_string(),
access_key: "admin".to_string(),
secret_key: "password".to_string(),
domain_name: "test.com".to_string(),
volume_name: "/data".to_string(),
console_address: "127.0.0.1:9001".to_string(),
};
let config2 = RustFSConfig {
address: "127.0.0.1:9000".to_string(),
host: "127.0.0.1".to_string(),
port: "9000".to_string(),
access_key: "admin".to_string(),
secret_key: "password".to_string(),
domain_name: "test.com".to_string(),
volume_name: "/data".to_string(),
console_address: "127.0.0.1:9001".to_string(),
};
let config3 = RustFSConfig {
address: "127.0.0.1:9001".to_string(), // Different port
host: "127.0.0.1".to_string(),
port: "9001".to_string(),
access_key: "admin".to_string(),
secret_key: "password".to_string(),
domain_name: "test.com".to_string(),
volume_name: "/data".to_string(),
console_address: "127.0.0.1:9002".to_string(),
};
assert_eq!(config1, config2);
assert_ne!(config1, config3);
assert!(config1 < config3); // Lexicographic ordering
}
#[test]
fn test_clone() {
let original = RustFSConfig::default_config();
let cloned = original.clone();
assert_eq!(original, cloned);
assert_eq!(original.address, cloned.address);
assert_eq!(original.access_key, cloned.access_key);
}
#[test]
fn test_debug_format() {
let config = RustFSConfig::default_config();
let debug_str = format!("{config:?}");
assert!(debug_str.contains("RustFSConfig"));
assert!(debug_str.contains("address"));
assert!(debug_str.contains("127.0.0.1:9000"));
}
#[test]
fn test_constants() {
assert_eq!(RustFSConfig::SERVICE_NAME, "rustfs-service");
assert_eq!(RustFSConfig::SERVICE_KEY, "rustfs_key");
assert_eq!(RustFSConfig::DEFAULT_DOMAIN_NAME_VALUE, "demo.rustfs.com");
assert_eq!(RustFSConfig::DEFAULT_ADDRESS_VALUE, "127.0.0.1:9000");
assert_eq!(RustFSConfig::DEFAULT_PORT_VALUE, "9000");
assert_eq!(RustFSConfig::DEFAULT_HOST_VALUE, "127.0.0.1");
assert_eq!(RustFSConfig::DEFAULT_ACCESS_KEY_VALUE, "rustfsadmin");
assert_eq!(RustFSConfig::DEFAULT_SECRET_KEY_VALUE, "rustfsadmin");
assert_eq!(RustFSConfig::DEFAULT_CONSOLE_ADDRESS_VALUE, "127.0.0.1:9001");
}
#[test]
fn test_empty_strings() {
let config = RustFSConfig {
address: "".to_string(),
host: "".to_string(),
port: "".to_string(),
access_key: "".to_string(),
secret_key: "".to_string(),
domain_name: "".to_string(),
volume_name: "".to_string(),
console_address: "".to_string(),
};
assert!(config.address.is_empty());
assert!(config.host.is_empty());
assert!(config.port.is_empty());
assert!(config.access_key.is_empty());
assert!(config.secret_key.is_empty());
assert!(config.domain_name.is_empty());
assert!(config.volume_name.is_empty());
assert!(config.console_address.is_empty());
}
#[test]
fn test_very_long_strings() {
let long_string = "a".repeat(1000);
let config = RustFSConfig {
address: format!("{long_string}:9000"),
host: long_string.clone(),
port: "9000".to_string(),
access_key: long_string.clone(),
secret_key: long_string.clone(),
domain_name: format!("{long_string}.com"),
volume_name: format!("/data/{long_string}"),
console_address: format!("{long_string}:9001"),
};
assert_eq!(config.host.len(), 1000);
assert_eq!(config.access_key.len(), 1000);
assert_eq!(config.secret_key.len(), 1000);
}
#[test]
fn test_special_characters() {
let config = RustFSConfig {
address: "127.0.0.1:9000".to_string(),
host: "127.0.0.1".to_string(),
port: "9000".to_string(),
access_key: "user@domain.com".to_string(),
secret_key: "p@ssw0rd!#$%".to_string(),
domain_name: "test-domain.example.com".to_string(),
volume_name: "/data/rust-fs/storage".to_string(),
console_address: "127.0.0.1:9001".to_string(),
};
assert!(config.access_key.contains("@"));
assert!(config.secret_key.contains("!#$%"));
assert!(config.domain_name.contains("-"));
assert!(config.volume_name.contains("/"));
}
#[test]
fn test_unicode_strings() {
let config = RustFSConfig {
address: "127.0.0.1:9000".to_string(),
host: "127.0.0.1".to_string(),
port: "9000".to_string(),
access_key: "用户名".to_string(),
secret_key: "密码 123".to_string(),
domain_name: "测试.com".to_string(),
volume_name: "/数据/存储".to_string(),
console_address: "127.0.0.1:9001".to_string(),
};
assert_eq!(config.access_key, "用户名");
assert_eq!(config.secret_key, "密码 123");
assert_eq!(config.domain_name, "测试.com");
assert_eq!(config.volume_name, "/数据/存储");
}
#[test]
fn test_memory_efficiency() {
// Test that the structure doesn't use excessive memory
assert!(std::mem::size_of::<RustFSConfig>() < 1000);
}
// Note: Keyring-related tests (load, save, clear) are not included here
// because they require actual keyring access and would be integration tests
// rather than unit tests. They should be tested separately in an integration
// test environment where keyring access can be properly mocked or controlled.
}

View File

@@ -1,899 +0,0 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::utils::RustFSConfig;
use dioxus::logger::tracing::{debug, error, info};
use rust_embed::RustEmbed;
use sha2::{Digest, Sha256};
use std::error::Error;
use std::path::{Path, PathBuf};
use std::process::Command as StdCommand;
use std::sync::LazyLock;
use std::time::Duration;
use tokio::fs;
use tokio::fs::File;
use tokio::io::AsyncWriteExt;
use tokio::net::TcpStream;
use tokio::sync::{Mutex, mpsc};
#[derive(RustEmbed)]
#[folder = "$CARGO_MANIFEST_DIR/embedded-rustfs/"]
struct Asset;
// Use `LazyLock` to cache the checksum of embedded resources
static RUSTFS_HASH: LazyLock<Mutex<String>> = LazyLock::new(|| {
let rustfs_file = if cfg!(windows) { "rustfs.exe" } else { "rustfs" };
let rustfs_data = Asset::get(rustfs_file).expect("RustFs binary not embedded");
let hash = hex::encode(Sha256::digest(&rustfs_data.data));
Mutex::new(hash)
});
/// Service command
/// This enum represents the commands that can be sent to the service manager
/// to start, stop, or restart the service
/// The `Start` variant contains the configuration for the service
/// The `Restart` variant contains the configuration for the service
///
/// # Example
/// ```
/// let config = RustFSConfig {
/// address: "127.0.0.1:9000".to_string(),
/// host: "127.0.0.1".to_string(),
/// port: "9000".to_string(),
/// access_key: "rustfsadmin".to_string(),
/// secret_key: "rustfsadmin".to_string(),
/// domain_name: "demo.rustfs.com".to_string(),
/// volume_name: "data".to_string(),
/// console_address: "127.0.0.1:9001".to_string(),
/// };
///
/// let command = ServiceCommand::Start(config);
/// println!("{:?}", command);
///
/// assert_eq!(command, ServiceCommand::Start(config));
/// ```
pub enum ServiceCommand {
Start(RustFSConfig),
Stop,
Restart(RustFSConfig),
}
/// Service operation result
/// This struct represents the result of a service operation
/// It contains information about the success of the operation,
///
/// # Example
/// ```
/// use chrono::Local;
///
/// let result = ServiceOperationResult {
/// success: true,
/// start_time: chrono::Local::now(),
/// end_time: chrono::Local::now(),
/// message: "服务启动成功".to_string(),
/// };
///
/// println!("{:?}", result);
/// assert_eq!(result.success, true);
/// ```
#[derive(Debug)]
pub struct ServiceOperationResult {
pub success: bool,
pub start_time: chrono::DateTime<chrono::Local>,
pub end_time: chrono::DateTime<chrono::Local>,
pub message: String,
}
/// Service manager
/// This struct represents a service manager that can be used to start, stop, or restart a service
/// It contains a command sender that can be used to send commands to the service manager
///
/// # Example
/// ```
/// let service_manager = ServiceManager::new();
/// println!("{:?}", service_manager);
/// ```
#[derive(Debug, Clone)]
pub struct ServiceManager {
command_tx: mpsc::Sender<ServiceCommand>,
// process: Arc<Mutex<Option<Child>>>,
// pid: Arc<Mutex<Option<u32>>>, // Add PID storage
// current_config: Arc<Mutex<Option<RustFSConfig>>>, // Add configuration storage
}
impl ServiceManager {
/// check if the service is running and return a pid
/// This function is platform dependent
/// On Unix systems, it uses the `ps` command to check for the service
/// On Windows systems, it uses the `wmic` command to check for the service
///
/// # Example
/// ```
/// let pid = check_service_status().await;
/// println!("{:?}", pid);
/// ```
pub async fn check_service_status() -> Option<u32> {
#[cfg(unix)]
{
// use the ps command on a unix system
if let Ok(output) = StdCommand::new("ps").arg("-ef").output() {
let output_str = String::from_utf8_lossy(&output.stdout);
for line in output_str.lines() {
// match contains `rustfs/bin/rustfs` of the line
if line.contains("rustfs/bin/rustfs") && !line.contains("grep") {
if let Some(pid_str) = line.split_whitespace().nth(1) {
if let Ok(pid) = pid_str.parse::<u32>() {
return Some(pid);
}
}
}
}
}
}
#[cfg(windows)]
{
if let Ok(output) = StdCommand::new("wmic")
.arg("process")
.arg("where")
.arg("caption='rustfs.exe'")
.arg("get")
.arg("processid")
.output()
{
let output_str = String::from_utf8_lossy(&output.stdout);
for line in output_str.lines() {
if let Ok(pid) = line.trim().parse::<u32>() {
return Some(pid);
}
}
}
}
None
}
/// Prepare the service
/// This function downloads the service executable if it doesn't exist
/// It also creates the necessary directories for the service
///
/// # Example
/// ```
/// let executable_path = prepare_service().await;
/// println!("{:?}", executable_path);
/// ```
async fn prepare_service() -> Result<PathBuf, Box<dyn Error>> {
// get the user directory
let home_dir = dirs::home_dir().ok_or("无法获取用户目录")?;
let rustfs_dir = home_dir.join("rustfs");
let bin_dir = rustfs_dir.join("bin");
let data_dir = rustfs_dir.join("data");
let logs_dir = rustfs_dir.join("logs");
// create the necessary directories
for dir in [&bin_dir, &data_dir, &logs_dir] {
if !dir.exists() {
tokio::fs::create_dir_all(dir).await?;
}
}
let rustfs_file = if cfg!(windows) { "rustfs.exe" } else { "rustfs" };
let executable_path = bin_dir.join(rustfs_file);
let hash_path = bin_dir.join("embedded_rustfs.sha256");
if executable_path.exists() && hash_path.exists() {
let cached_hash = fs::read_to_string(&hash_path).await?;
let expected_hash = RUSTFS_HASH.lock().await;
if cached_hash == *expected_hash {
println!("Use cached rustfs: {executable_path:?}");
return Ok(executable_path);
}
}
// Extract and write files
let rustfs_data = Asset::get(rustfs_file).expect("RustFS binary not embedded");
let mut file = File::create(&executable_path).await?;
file.write_all(&rustfs_data.data).await?;
let expected_hash = hex::encode(Sha256::digest(&rustfs_data.data));
fs::write(&hash_path, expected_hash).await?;
// set execution permissions on unix systems
#[cfg(unix)]
{
use std::os::unix::fs::PermissionsExt;
let mut perms = std::fs::metadata(&executable_path)?.permissions();
perms.set_mode(0o755);
std::fs::set_permissions(&executable_path, perms)?;
}
Ok(executable_path)
}
/// Helper function: Extracts the port from the address string
///
/// # Example
/// ```
/// let address = "127.0.0.1:9000";
/// let port = extract_port(address);
/// println!("{:?}", port);
/// ```
fn extract_port(address: &str) -> Option<u16> {
address.split(':').nth(1)?.parse().ok()
}
/// Create a new instance of the service manager
///
/// # Example
/// ```
/// let service_manager = ServiceManager::new();
/// println!("{:?}", service_manager);
/// ```
pub(crate) fn new() -> Self {
let (command_tx, mut command_rx) = mpsc::channel(10);
// Start the control loop
tokio::spawn(async move {
while let Some(cmd) = command_rx.recv().await {
match cmd {
ServiceCommand::Start(config) => {
if let Err(e) = Self::start_service(&config).await {
Self::show_error(&format!("启动服务失败:{e}"));
}
}
ServiceCommand::Stop => {
if let Err(e) = Self::stop_service().await {
Self::show_error(&format!("停止服务失败:{e}"));
}
}
ServiceCommand::Restart(config) => {
if Self::check_service_status().await.is_some() {
if let Err(e) = Self::stop_service().await {
Self::show_error(&format!("重启服务失败:{e}"));
continue;
}
}
if let Err(e) = Self::start_service(&config).await {
Self::show_error(&format!("重启服务失败:{e}"));
}
}
}
}
});
ServiceManager { command_tx }
}
/// Start the service
/// This function starts the service with the given configuration
///
/// # Example
/// ```
/// let config = RustFSConfig {
/// address: "127.0.0.1:9000".to_string(),
/// host: "127.0.0.1".to_string(),
/// port: "9000".to_string(),
/// access_key: "rustfsadmin".to_string(),
/// secret_key: "rustfsadmin".to_string(),
/// domain_name: "demo.rustfs.com".to_string(),
/// volume_name: "data".to_string(),
/// console_address: "127.0.0.1:9001".to_string(),
/// };
///
/// let result = start_service(&config).await;
/// println!("{:?}", result);
/// ```
async fn start_service(config: &RustFSConfig) -> Result<(), Box<dyn Error>> {
// Check if the service is already running
if let Some(existing_pid) = Self::check_service_status().await {
return Err(format!("服务已经在运行PID: {existing_pid}").into());
}
// Prepare the service program
let executable_path = Self::prepare_service().await?;
// Check the data catalog
let volume_name_path = Path::new(&config.volume_name);
if !volume_name_path.exists() {
tokio::fs::create_dir_all(&config.volume_name).await?;
}
// Extract the port from the configuration
let main_port = Self::extract_port(&config.address).ok_or("无法解析主服务端口")?;
let console_port = Self::extract_port(&config.console_address).ok_or("无法解析控制台端口")?;
let host = config.address.split(':').next().ok_or("无法解析主机地址")?;
// Check the port
let ports = vec![main_port, console_port];
for port in ports {
if Self::is_port_in_use(host, port).await {
return Err(format!("端口 {port} 已被占用").into());
}
}
// Start the service
let mut child = tokio::process::Command::new(executable_path)
.arg("--address")
.arg(&config.address)
.arg("--access-key")
.arg(&config.access_key)
.arg("--secret-key")
.arg(&config.secret_key)
.arg("--console-address")
.arg(&config.console_address)
.arg(config.volume_name.clone())
.spawn()?;
let process_pid = child.id().unwrap();
// Wait for the service to start
tokio::time::sleep(Duration::from_secs(2)).await;
// Check if the service started successfully
if Self::is_port_in_use(host, main_port).await {
Self::show_info(&format!("服务启动成功!进程 ID: {process_pid}"));
Ok(())
} else {
child.kill().await?;
Err("服务启动失败".into())
}
}
/// Stop the service
/// This function stops the service
///
/// # Example
/// ```
/// let result = stop_service().await;
/// println!("{:?}", result);
/// ```
async fn stop_service() -> Result<(), Box<dyn Error>> {
let existing_pid = Self::check_service_status().await;
debug!("existing_pid: {:?}", existing_pid);
if let Some(service_pid) = existing_pid {
// An attempt was made to terminate the process
#[cfg(unix)]
{
StdCommand::new("kill").arg("-9").arg(service_pid.to_string()).output()?;
}
#[cfg(windows)]
{
StdCommand::new("taskkill")
.arg("/F")
.arg("/PID")
.arg(&service_pid.to_string())
.output()?;
}
// Verify that the service is indeed stopped
tokio::time::sleep(Duration::from_secs(1)).await;
if Self::check_service_status().await.is_some() {
return Err("服务停止失败".into());
}
Self::show_info("服务已成功停止");
Ok(())
} else {
Err("服务未运行".into())
}
}
/// Check if the port is in use
/// This function checks if the given port is in use on the given host
///
/// # Example
/// ```
/// let host = "127.0.0.1";
/// let port = 9000;
/// let result = is_port_in_use(host, port).await;
/// println!("{:?}", result);
/// ```
async fn is_port_in_use(host: &str, port: u16) -> bool {
TcpStream::connect(format!("{host}:{port}")).await.is_ok()
}
/// Show an error message
/// This function shows an error message dialog
///
/// # Example
/// ```
/// show_error("This is an error message");
/// ```
pub(crate) fn show_error(message: &str) {
rfd::MessageDialog::new()
.set_title("错误")
.set_description(message)
.set_level(rfd::MessageLevel::Error)
.show();
}
/// Show an information message
/// This function shows an information message dialog
///
/// # Example
/// ```
/// show_info("This is an information message");
/// ```
pub(crate) fn show_info(message: &str) {
rfd::MessageDialog::new()
.set_title("成功")
.set_description(message)
.set_level(rfd::MessageLevel::Info)
.show();
}
/// Start the service
/// This function sends a `Start` command to the service manager
///
/// # Example
/// ```
/// let config = RustFSConfig {
/// address: "127.0.0.1:9000".to_string(),
/// host: "127.0.0.1".to_string(),
/// port: "9000".to_string(),
/// access_key: "rustfsadmin".to_string(),
/// secret_key: "rustfsadmin".to_string(),
/// domain_name: "demo.rustfs.com".to_string(),
/// volume_name: "data".to_string(),
/// console_address: "127.0.0.1:9001".to_string(),
/// };
///
/// let service_manager = ServiceManager::new();
/// let result = service_manager.start(config).await;
/// println!("{:?}", result);
/// ```
///
/// # Errors
/// This function returns an error if the service fails to start
///
/// # Panics
/// This function panics if the port number is invalid
///
/// # Safety
/// This function is not marked as unsafe
///
/// # Performance
/// This function is not optimized for performance
///
/// # Design
/// This function is designed to be simple and easy to use
///
/// # Security
/// This function does not have any security implications
pub async fn start(&self, config: RustFSConfig) -> Result<ServiceOperationResult, Box<dyn Error>> {
let start_time = chrono::Local::now();
self.command_tx.send(ServiceCommand::Start(config.clone())).await?;
let host = &config.host;
let port = config.port.parse::<u16>().expect("无效的端口号");
// wait for the service to actually start
let mut retries = 0;
while retries < 30 {
// wait up to 30 seconds
if Self::check_service_status().await.is_some() && Self::is_port_in_use(host, port).await {
let end_time = chrono::Local::now();
return Ok(ServiceOperationResult {
success: true,
start_time,
end_time,
message: "服务启动成功".to_string(),
});
}
tokio::time::sleep(Duration::from_secs(1)).await;
retries += 1;
}
Err("服务启动超时".into())
}
/// Stop the service
/// This function sends a `Stop` command to the service manager
///
/// # Example
/// ```
/// let service_manager = ServiceManager::new();
/// let result = service_manager.stop().await;
/// println!("{:?}", result);
/// ```
///
/// # Errors
/// This function returns an error if the service fails to stop
///
/// # Panics
/// This function panics if the port number is invalid
///
/// # Safety
/// This function is not marked as unsafe
///
/// # Performance
/// This function is not optimized for performance
///
/// # Design
/// This function is designed to be simple and easy to use
///
/// # Security
/// This function does not have any security implications
pub async fn stop(&self) -> Result<ServiceOperationResult, Box<dyn Error>> {
let start_time = chrono::Local::now();
self.command_tx.send(ServiceCommand::Stop).await?;
// Wait for the service to actually stop
let mut retries = 0;
while retries < 15 {
// Wait up to 15 seconds
if Self::check_service_status().await.is_none() {
let end_time = chrono::Local::now();
return Ok(ServiceOperationResult {
success: true,
start_time,
end_time,
message: "服务停止成功".to_string(),
});
}
tokio::time::sleep(Duration::from_secs(1)).await;
retries += 1;
}
Err("服务停止超时".into())
}
/// Restart the service
/// This function sends a `Restart` command to the service manager
///
/// # Example
/// ```
/// let config = RustFSConfig {
/// address: "127.0.0.1:9000".to_string(),
/// host: "127.0.0.1".to_string(),
/// port: "9000".to_string(),
/// access_key: "rustfsadmin".to_string(),
/// secret_key: "rustfsadmin".to_string(),
/// domain_name: "demo.rustfs.com".to_string(),
/// volume_name: "data".to_string(),
/// console_address: "127.0.0.1:9001".to_string(),
/// };
///
/// let service_manager = ServiceManager::new();
/// let result = service_manager.restart(config).await;
/// println!("{:?}", result);
/// ```
///
/// # Errors
/// This function returns an error if the service fails to restart
///
/// # Panics
/// This function panics if the port number is invalid
///
/// # Safety
/// This function is not marked as unsafe
///
/// # Performance
/// This function is not optimized for performance
///
/// # Design
/// This function is designed to be simple and easy to use
///
/// # Security
/// This function does not have any security implications
pub async fn restart(&self, config: RustFSConfig) -> Result<ServiceOperationResult, Box<dyn Error>> {
let start_time = chrono::Local::now();
self.command_tx.send(ServiceCommand::Restart(config.clone())).await?;
let host = &config.host;
let port = config.port.parse::<u16>().expect("无效的端口号");
// wait for the service to restart
let mut retries = 0;
while retries < 45 {
// Longer waiting time is given as both the stop and start processes are involved
if Self::check_service_status().await.is_some() && Self::is_port_in_use(host, port).await {
match config.save() {
Ok(_) => info!("save config success"),
Err(e) => {
error!("save config error: {}", e);
self.command_tx.send(ServiceCommand::Stop).await?;
Self::show_error("保存配置失败");
return Err("保存配置失败".into());
}
}
let end_time = chrono::Local::now();
return Ok(ServiceOperationResult {
success: true,
start_time,
end_time,
message: "服务重启成功".to_string(),
});
}
tokio::time::sleep(Duration::from_secs(1)).await;
retries += 1;
}
Err("服务重启超时".into())
}
}
#[cfg(test)]
mod tests {
use super::*;
use std::time::Duration;
#[test]
fn test_service_command_creation() {
let config = RustFSConfig::default_config();
let start_cmd = ServiceCommand::Start(config.clone());
let stop_cmd = ServiceCommand::Stop;
let restart_cmd = ServiceCommand::Restart(config);
// Test that commands can be created
match start_cmd {
ServiceCommand::Start(_) => {}
_ => panic!("Expected Start command"),
}
match stop_cmd {
ServiceCommand::Stop => {}
_ => panic!("Expected Stop command"),
}
match restart_cmd {
ServiceCommand::Restart(_) => {}
_ => panic!("Expected Restart command"),
}
}
#[test]
fn test_service_operation_result_creation() {
let start_time = chrono::Local::now();
let end_time = chrono::Local::now();
let success_result = ServiceOperationResult {
success: true,
start_time,
end_time,
message: "Operation successful".to_string(),
};
let failure_result = ServiceOperationResult {
success: false,
start_time,
end_time,
message: "Operation failed".to_string(),
};
assert!(success_result.success);
assert_eq!(success_result.message, "Operation successful");
assert!(!failure_result.success);
assert_eq!(failure_result.message, "Operation failed");
}
#[test]
fn test_service_operation_result_debug() {
let result = ServiceOperationResult {
success: true,
start_time: chrono::Local::now(),
end_time: chrono::Local::now(),
message: "Test message".to_string(),
};
let debug_str = format!("{result:?}");
assert!(debug_str.contains("ServiceOperationResult"));
assert!(debug_str.contains("success: true"));
assert!(debug_str.contains("Test message"));
}
#[test]
fn test_service_manager_creation() {
// Test ServiceManager creation in a tokio runtime
let rt = tokio::runtime::Runtime::new().unwrap();
rt.block_on(async {
let service_manager = ServiceManager::new();
// Test that ServiceManager can be created and cloned
let cloned_manager = service_manager.clone();
// Both should be valid (we can't test much more without async runtime)
assert!(format!("{service_manager:?}").contains("ServiceManager"));
assert!(format!("{cloned_manager:?}").contains("ServiceManager"));
});
}
#[test]
fn test_extract_port_valid() {
let test_cases = vec![
("127.0.0.1:9000", Some(9000)),
("localhost:8080", Some(8080)),
("192.168.1.100:3000", Some(3000)),
("0.0.0.0:80", Some(80)),
("example.com:443", Some(443)),
("host:65535", Some(65535)),
("host:1", Some(1)),
];
for (input, expected) in test_cases {
let result = ServiceManager::extract_port(input);
assert_eq!(result, expected, "Failed for input: {input}");
}
}
#[test]
fn test_extract_port_invalid() {
let invalid_cases = vec![
"127.0.0.1", // Missing port
"127.0.0.1:", // Empty port
"127.0.0.1:abc", // Invalid port
"127.0.0.1:99999", // Port out of range
"", // Empty string
"invalid", // No colon
"host:-1", // Negative port
"host:0.5", // Decimal port
];
for input in invalid_cases {
let result = ServiceManager::extract_port(input);
assert_eq!(result, None, "Should be None for input: {input}");
}
// Special case: empty host but valid port should still work
assert_eq!(ServiceManager::extract_port(":9000"), Some(9000));
// Special case: multiple colons - extract_port takes the second part
// For "127.0.0.1:9000:extra", it takes "9000" which is valid
assert_eq!(ServiceManager::extract_port("127.0.0.1:9000:extra"), Some(9000));
}
#[test]
fn test_extract_port_edge_cases() {
// Test edge cases for port numbers
assert_eq!(ServiceManager::extract_port("host:0"), Some(0));
assert_eq!(ServiceManager::extract_port("host:65535"), Some(65535));
assert_eq!(ServiceManager::extract_port("host:65536"), None); // Out of range
// IPv6-like address - extract_port takes the second part after split(':')
// For "::1:8080", split(':') gives ["", "", "1", "8080"], nth(1) gives ""
assert_eq!(ServiceManager::extract_port("::1:8080"), None); // Second part is empty
// For "[::1]:8080", split(':') gives ["[", "", "1]", "8080"], nth(1) gives ""
assert_eq!(ServiceManager::extract_port("[::1]:8080"), None); // Second part is empty
}
#[test]
fn test_show_error() {
// Test that show_error function exists and can be called
// We can't actually test the dialog in a test environment
// so we just verify the function signature
}
#[test]
fn test_show_info() {
// Test that show_info function exists and can be called
// We can't actually test the dialog in a test environment
// so we just verify the function signature
}
#[test]
fn test_service_operation_result_timing() {
let start_time = chrono::Local::now();
std::thread::sleep(Duration::from_millis(10)); // Small delay
let end_time = chrono::Local::now();
let result = ServiceOperationResult {
success: true,
start_time,
end_time,
message: "Timing test".to_string(),
};
// End time should be after start time
assert!(result.end_time >= result.start_time);
}
#[test]
fn test_service_operation_result_with_unicode() {
let result = ServiceOperationResult {
success: true,
start_time: chrono::Local::now(),
end_time: chrono::Local::now(),
message: "操作成功 🎉".to_string(),
};
assert_eq!(result.message, "操作成功 🎉");
assert!(result.success);
}
#[test]
fn test_service_operation_result_with_long_message() {
let long_message = "A".repeat(10000);
let result = ServiceOperationResult {
success: false,
start_time: chrono::Local::now(),
end_time: chrono::Local::now(),
message: long_message.clone(),
};
assert_eq!(result.message.len(), 10000);
assert_eq!(result.message, long_message);
assert!(!result.success);
}
#[test]
fn test_service_command_with_different_configs() {
let config1 = RustFSConfig {
address: "127.0.0.1:9000".to_string(),
host: "127.0.0.1".to_string(),
port: "9000".to_string(),
access_key: "admin1".to_string(),
secret_key: "pass1".to_string(),
domain_name: "test1.com".to_string(),
volume_name: "/data1".to_string(),
console_address: "127.0.0.1:9001".to_string(),
};
let config2 = RustFSConfig {
address: "192.168.1.100:8080".to_string(),
host: "192.168.1.100".to_string(),
port: "8080".to_string(),
access_key: "admin2".to_string(),
secret_key: "pass2".to_string(),
domain_name: "test2.com".to_string(),
volume_name: "/data2".to_string(),
console_address: "192.168.1.100:8081".to_string(),
};
let start_cmd1 = ServiceCommand::Start(config1);
let restart_cmd2 = ServiceCommand::Restart(config2);
// Test that different configs can be used
match start_cmd1 {
ServiceCommand::Start(config) => {
assert_eq!(config.address, "127.0.0.1:9000");
assert_eq!(config.access_key, "admin1");
}
_ => panic!("Expected Start command"),
}
match restart_cmd2 {
ServiceCommand::Restart(config) => {
assert_eq!(config.address, "192.168.1.100:8080");
assert_eq!(config.access_key, "admin2");
}
_ => panic!("Expected Restart command"),
}
}
#[test]
fn test_memory_efficiency() {
// Test that structures don't use excessive memory
assert!(std::mem::size_of::<ServiceCommand>() < 2000);
assert!(std::mem::size_of::<ServiceOperationResult>() < 1000);
assert!(std::mem::size_of::<ServiceManager>() < 1000);
}
// Note: The following methods are not tested here because they require:
// - Async runtime (tokio)
// - File system access
// - Network access
// - Process management
// - External dependencies (embedded assets)
//
// These should be tested in integration tests:
// - check_service_status()
// - prepare_service()
// - start_service()
// - stop_service()
// - is_port_in_use()
// - ServiceManager::start()
// - ServiceManager::stop()
// - ServiceManager::restart()
//
// The RUSTFS_HASH lazy_static is also not tested here as it depends
// on embedded assets that may not be available in unit test environment.
}

View File

@@ -1,300 +0,0 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use dioxus::logger::tracing::debug;
use tracing_appender::non_blocking::WorkerGuard;
use tracing_appender::rolling::{RollingFileAppender, Rotation};
use tracing_subscriber::fmt;
use tracing_subscriber::layer::SubscriberExt;
use tracing_subscriber::util::SubscriberInitExt;
/// Initialize the logger with a rolling file appender
/// that rotates log files daily
pub fn init_logger() -> WorkerGuard {
// configuring rolling logs rolling by day
let home_dir = dirs::home_dir().expect("无法获取用户目录");
let rustfs_dir = home_dir.join("rustfs");
let logs_dir = rustfs_dir.join("logs");
let file_appender = RollingFileAppender::builder()
.rotation(Rotation::DAILY) // rotate log files once every hour
.filename_prefix("rustfs-cli") // log file names will be prefixed with `myapp.`
.filename_suffix("log") // log file names will be suffixed with `.log`
.build(logs_dir) // try to build an appender that stores log files in `/ var/ log`
.expect("initializing rolling file appender failed");
// non-blocking writer for improved performance
let (non_blocking_file, worker_guard) = tracing_appender::non_blocking(file_appender);
// console output layer
let console_layer = fmt::layer()
.with_writer(std::io::stdout)
.with_ansi(true)
.with_line_number(true); // enable colors in the console
// file output layer
let file_layer = fmt::layer()
.with_writer(non_blocking_file)
.with_ansi(false)
.with_thread_names(true)
.with_target(true)
.with_thread_ids(true)
.with_level(true)
.with_line_number(true); // disable colors in the file
// Combine all tiers and initialize global subscribers
tracing_subscriber::registry()
.with(console_layer)
.with(file_layer)
.with(tracing_subscriber::EnvFilter::new("info")) // filter the log level by environment variables
.init();
debug!("Logger initialized");
worker_guard
}
#[cfg(test)]
mod tests {
use super::*;
use std::sync::Once;
static INIT: Once = Once::new();
// Helper function to ensure logger is only initialized once in tests
fn ensure_logger_init() {
INIT.call_once(|| {
// Initialize a simple test logger to avoid conflicts
let _ = tracing_subscriber::fmt().with_test_writer().try_init();
});
}
#[test]
fn test_logger_initialization_components() {
ensure_logger_init();
// Test that we can create the components used in init_logger
// without actually initializing the global logger again
// Test home directory access
let home_dir_result = dirs::home_dir();
assert!(home_dir_result.is_some(), "Should be able to get home directory");
let home_dir = home_dir_result.unwrap();
let rustfs_dir = home_dir.join("rustfs");
let logs_dir = rustfs_dir.join("logs");
// Test path construction
assert!(rustfs_dir.to_string_lossy().contains("rustfs"));
assert!(logs_dir.to_string_lossy().contains("logs"));
}
#[test]
fn test_rolling_file_appender_builder() {
ensure_logger_init();
// Test that we can create a RollingFileAppender builder
let builder = RollingFileAppender::builder()
.rotation(Rotation::DAILY)
.filename_prefix("test-rustfs-cli")
.filename_suffix("log");
// We can't actually build it without creating directories,
// but we can verify the builder pattern works
let debug_str = format!("{builder:?}");
// The actual debug format might be different, so just check it's not empty
assert!(!debug_str.is_empty());
// Check that it contains some expected parts
assert!(debug_str.contains("Builder") || debug_str.contains("builder") || debug_str.contains("RollingFileAppender"));
}
#[test]
fn test_rotation_types() {
ensure_logger_init();
// Test different rotation types
let daily = Rotation::DAILY;
let hourly = Rotation::HOURLY;
let minutely = Rotation::MINUTELY;
let never = Rotation::NEVER;
// Test that rotation types can be created and formatted
assert!(!format!("{daily:?}").is_empty());
assert!(!format!("{hourly:?}").is_empty());
assert!(!format!("{minutely:?}").is_empty());
assert!(!format!("{never:?}").is_empty());
}
#[test]
fn test_fmt_layer_configuration() {
ensure_logger_init();
// Test that we can create fmt layers with different configurations
// We can't actually test the layers directly due to type complexity,
// but we can test that the configuration values are correct
// Test console layer settings
let console_ansi = true;
let console_line_number = true;
assert!(console_ansi);
assert!(console_line_number);
// Test file layer settings
let file_ansi = false;
let file_thread_names = true;
let file_target = true;
let file_thread_ids = true;
let file_level = true;
let file_line_number = true;
assert!(!file_ansi);
assert!(file_thread_names);
assert!(file_target);
assert!(file_thread_ids);
assert!(file_level);
assert!(file_line_number);
}
#[test]
fn test_env_filter_creation() {
ensure_logger_init();
// Test that EnvFilter can be created with different levels
let info_filter = tracing_subscriber::EnvFilter::new("info");
let debug_filter = tracing_subscriber::EnvFilter::new("debug");
let warn_filter = tracing_subscriber::EnvFilter::new("warn");
let error_filter = tracing_subscriber::EnvFilter::new("error");
// Test that filters can be created
assert!(!format!("{info_filter:?}").is_empty());
assert!(!format!("{debug_filter:?}").is_empty());
assert!(!format!("{warn_filter:?}").is_empty());
assert!(!format!("{error_filter:?}").is_empty());
}
#[test]
fn test_path_construction() {
ensure_logger_init();
// Test path construction logic used in init_logger
if let Some(home_dir) = dirs::home_dir() {
let rustfs_dir = home_dir.join("rustfs");
let logs_dir = rustfs_dir.join("logs");
// Test that paths are constructed correctly
assert!(rustfs_dir.ends_with("rustfs"));
assert!(logs_dir.ends_with("logs"));
assert!(logs_dir.parent().unwrap().ends_with("rustfs"));
// Test path string representation
let rustfs_str = rustfs_dir.to_string_lossy();
let logs_str = logs_dir.to_string_lossy();
assert!(rustfs_str.contains("rustfs"));
assert!(logs_str.contains("rustfs"));
assert!(logs_str.contains("logs"));
}
}
#[test]
fn test_filename_patterns() {
ensure_logger_init();
// Test the filename patterns used in the logger
let prefix = "rustfs-cli";
let suffix = "log";
assert_eq!(prefix, "rustfs-cli");
assert_eq!(suffix, "log");
// Test that these would create valid filenames
let sample_filename = format!("{prefix}.2024-01-01.{suffix}");
assert_eq!(sample_filename, "rustfs-cli.2024-01-01.log");
}
#[test]
fn test_worker_guard_type() {
ensure_logger_init();
// Test that WorkerGuard type exists and can be referenced
// We can't actually create one without the full setup, but we can test the type
let guard_size = std::mem::size_of::<WorkerGuard>();
assert!(guard_size > 0, "WorkerGuard should have non-zero size");
}
#[test]
fn test_logger_configuration_constants() {
ensure_logger_init();
// Test the configuration values used in the logger
let default_log_level = "info";
let filename_prefix = "rustfs-cli";
let filename_suffix = "log";
let rotation = Rotation::DAILY;
assert_eq!(default_log_level, "info");
assert_eq!(filename_prefix, "rustfs-cli");
assert_eq!(filename_suffix, "log");
assert!(matches!(rotation, Rotation::DAILY));
}
#[test]
fn test_directory_names() {
ensure_logger_init();
// Test the directory names used in the logger setup
let rustfs_dir_name = "rustfs";
let logs_dir_name = "logs";
assert_eq!(rustfs_dir_name, "rustfs");
assert_eq!(logs_dir_name, "logs");
// Test path joining
let combined = format!("{rustfs_dir_name}/{logs_dir_name}");
assert_eq!(combined, "rustfs/logs");
}
#[test]
fn test_layer_settings() {
ensure_logger_init();
// Test the boolean settings used in layer configuration
let console_ansi = true;
let console_line_number = true;
let file_ansi = false;
let file_thread_names = true;
let file_target = true;
let file_thread_ids = true;
let file_level = true;
let file_line_number = true;
// Verify the settings
assert!(console_ansi);
assert!(console_line_number);
assert!(!file_ansi);
assert!(file_thread_names);
assert!(file_target);
assert!(file_thread_ids);
assert!(file_level);
assert!(file_line_number);
}
// Note: The actual init_logger() function is not tested here because:
// 1. It initializes a global tracing subscriber which can only be done once
// 2. It requires file system access to create directories
// 3. It has side effects that would interfere with other tests
// 4. It returns a WorkerGuard that needs to be kept alive
//
// This function should be tested in integration tests where:
// - File system access can be properly controlled
// - The global state can be managed
// - The actual logging behavior can be verified
// - The WorkerGuard lifecycle can be properly managed
}

View File

@@ -1,38 +0,0 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::route::Route;
use dioxus::logger::tracing::info;
use dioxus::prelude::*;
const FAVICON: Asset = asset!("/assets/favicon.ico");
const TAILWIND_CSS: Asset = asset!("/assets/tailwind.css");
/// The main application component
/// This is the root component of the application
/// It contains the global resources and the router
/// for the application
#[component]
pub fn App() -> Element {
// Build cool things ✌️
use document::{Link, Title};
info!("App rendered");
rsx! {
// Global app resources
Link { rel: "icon", href: FAVICON }
Link { rel: "stylesheet", href: TAILWIND_CSS }
Title { "RustFS" }
Router::<Route> {}
}
}

View File

@@ -1,23 +0,0 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::components::Home;
use dioxus::prelude::*;
#[component]
pub fn HomeViews() -> Element {
rsx! {
Home {}
}
}

View File

@@ -1,23 +0,0 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::components::Setting;
use dioxus::prelude::*;
#[component]
pub fn SettingViews() -> Element {
rsx! {
Setting {}
}
}

View File

@@ -1,24 +0,0 @@
/**
* Copyright 2024 RustFS Team
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
module.exports = {
mode: "all",
content: ["./src/**/*.{rs,html,css}", "./dist/**/*.html"],
theme: {
extend: {},
},
plugins: [],
};

View File

@@ -13,6 +13,7 @@ keywords = ["RustFS", "AHM", "health-management", "scanner", "Minio"]
categories = ["web-programming", "development-tools", "filesystem"]
[dependencies]
rustfs-config = { workspace = true }
rustfs-ecstore = { workspace = true }
rustfs-common = { workspace = true }
rustfs-filemeta = { workspace = true }
@@ -22,20 +23,23 @@ tokio = { workspace = true, features = ["full"] }
tokio-util = { workspace = true }
tracing = { workspace = true }
serde = { workspace = true, features = ["derive"] }
time = { workspace = true }
serde_json = { workspace = true }
thiserror = { workspace = true }
bytes = { workspace = true }
time = { workspace = true, features = ["serde"] }
uuid = { workspace = true, features = ["v4", "serde"] }
anyhow = { workspace = true }
async-trait = { workspace = true }
futures = { workspace = true }
url = { workspace = true }
rustfs-lock = { workspace = true }
lazy_static = { workspace = true }
s3s = { workspace = true }
chrono = { workspace = true }
rand = { workspace = true }
reqwest = { workspace = true }
tempfile = { workspace = true }
walkdir = { workspace = true }
[dev-dependencies]
rmp-serde = { workspace = true }
tokio-test = { workspace = true }
serde_json = { workspace = true }
serial_test = { workspace = true }
tracing-subscriber = { workspace = true }
tempfile = { workspace = true }
heed = { workspace = true }

View File

@@ -14,6 +14,10 @@
use thiserror::Error;
/// Custom error type for AHM operations
/// This enum defines various error variants that can occur during
/// the execution of AHM-related tasks, such as I/O errors, storage errors,
/// configuration errors, and specific errors related to healing operations.
#[derive(Debug, Error)]
pub enum Error {
#[error("I/O error: {0}")]
@@ -22,22 +26,84 @@ pub enum Error {
#[error("Storage error: {0}")]
Storage(#[from] rustfs_ecstore::error::Error),
#[error("Disk error: {0}")]
Disk(#[from] rustfs_ecstore::disk::error::DiskError),
#[error("Configuration error: {0}")]
Config(String),
#[error("Heal configuration error: {message}")]
ConfigurationError { message: String },
#[error("Other error: {0}")]
Other(String),
#[error(transparent)]
Anyhow(#[from] anyhow::Error),
// Scanner
#[error("Scanner error: {0}")]
Scanner(String),
#[error("Metrics error: {0}")]
Metrics(String),
#[error(transparent)]
Other(#[from] anyhow::Error),
#[error("Serialization error: {0}")]
Serialization(String),
#[error("IO error: {0}")]
IO(String),
#[error("Not found: {0}")]
NotFound(String),
#[error("Invalid checkpoint: {0}")]
InvalidCheckpoint(String),
// Heal
#[error("Heal task not found: {task_id}")]
TaskNotFound { task_id: String },
#[error("Heal task already exists: {task_id}")]
TaskAlreadyExists { task_id: String },
#[error("Heal manager is not running")]
ManagerNotRunning,
#[error("Heal task execution failed: {message}")]
TaskExecutionFailed { message: String },
#[error("Invalid heal type: {heal_type}")]
InvalidHealType { heal_type: String },
#[error("Heal task cancelled")]
TaskCancelled,
#[error("Heal task timeout")]
TaskTimeout,
#[error("Heal event processing failed: {message}")]
EventProcessingFailed { message: String },
#[error("Heal progress tracking failed: {message}")]
ProgressTrackingFailed { message: String },
}
/// A specialized Result type for AHM operations
///This type is a convenient alias for results returned by functions in the AHM crate,
/// using the custom Error type defined above.
pub type Result<T, E = Error> = std::result::Result<T, E>;
// Implement conversion from ahm::Error to std::io::Error for use in main.rs
impl Error {
/// Create an Other error from any error type
pub fn other<E>(error: E) -> Self
where
E: Into<Box<dyn std::error::Error + Send + Sync>>,
{
Error::Other(error.into().to_string())
}
}
impl From<Error> for std::io::Error {
fn from(err: Error) -> Self {
std::io::Error::other(err)

View File

@@ -0,0 +1,565 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::heal::{
manager::HealManager,
task::{HealOptions, HealPriority, HealRequest, HealType},
utils,
};
use crate::{Error, Result};
use rustfs_common::heal_channel::{
HealChannelCommand, HealChannelPriority, HealChannelReceiver, HealChannelRequest, HealChannelResponse, HealScanMode,
publish_heal_response,
};
use std::sync::Arc;
use tokio::sync::mpsc;
use tracing::{debug, error, info};
/// Heal channel processor
pub struct HealChannelProcessor {
/// Heal manager
heal_manager: Arc<HealManager>,
/// Response sender
response_sender: mpsc::UnboundedSender<HealChannelResponse>,
/// Response receiver
response_receiver: mpsc::UnboundedReceiver<HealChannelResponse>,
}
impl HealChannelProcessor {
/// Create new HealChannelProcessor
pub fn new(heal_manager: Arc<HealManager>) -> Self {
let (response_tx, response_rx) = mpsc::unbounded_channel();
Self {
heal_manager,
response_sender: response_tx,
response_receiver: response_rx,
}
}
/// Start processing heal channel requests
pub async fn start(&mut self, mut receiver: HealChannelReceiver) -> Result<()> {
info!("Starting heal channel processor");
loop {
tokio::select! {
command = receiver.recv() => {
match command {
Some(command) => {
if let Err(e) = self.process_command(command).await {
error!("Failed to process heal command: {}", e);
}
}
None => {
debug!("Heal channel receiver closed, stopping processor");
break;
}
}
}
response = self.response_receiver.recv() => {
if let Some(response) = response {
// Handle response if needed
info!("Received heal response for request: {}", response.request_id);
}
}
}
}
info!("Heal channel processor stopped");
Ok(())
}
/// Process heal command
async fn process_command(&self, command: HealChannelCommand) -> Result<()> {
match command {
HealChannelCommand::Start(request) => self.process_start_request(request).await,
HealChannelCommand::Query { heal_path, client_token } => self.process_query_request(heal_path, client_token).await,
HealChannelCommand::Cancel { heal_path } => self.process_cancel_request(heal_path).await,
}
}
/// Process start request
async fn process_start_request(&self, request: HealChannelRequest) -> Result<()> {
info!(
"Processing heal start request: {} for bucket: {}/{}",
request.id,
request.bucket,
request.object_prefix.as_deref().unwrap_or("")
);
// Convert channel request to heal request
let heal_request = self.convert_to_heal_request(request.clone())?;
// Submit to heal manager
match self.heal_manager.submit_heal_request(heal_request).await {
Ok(task_id) => {
info!("Successfully submitted heal request: {} as task: {}", request.id, task_id);
let response = HealChannelResponse {
request_id: request.id,
success: true,
data: Some(format!("Task ID: {task_id}").into_bytes()),
error: None,
};
self.publish_response(response);
}
Err(e) => {
error!("Failed to submit heal request: {} - {}", request.id, e);
// Send error response
let response = HealChannelResponse {
request_id: request.id,
success: false,
data: None,
error: Some(e.to_string()),
};
self.publish_response(response);
}
}
Ok(())
}
/// Process query request
async fn process_query_request(&self, heal_path: String, client_token: String) -> Result<()> {
info!("Processing heal query request for path: {}", heal_path);
// TODO: Implement query logic based on heal_path and client_token
// For now, return a placeholder response
let response = HealChannelResponse {
request_id: client_token,
success: true,
data: Some(format!("Query result for path: {heal_path}").into_bytes()),
error: None,
};
self.publish_response(response);
Ok(())
}
/// Process cancel request
async fn process_cancel_request(&self, heal_path: String) -> Result<()> {
info!("Processing heal cancel request for path: {}", heal_path);
// TODO: Implement cancel logic based on heal_path
// For now, return a placeholder response
let response = HealChannelResponse {
request_id: heal_path.clone(),
success: true,
data: Some(format!("Cancel request for path: {heal_path}").into_bytes()),
error: None,
};
self.publish_response(response);
Ok(())
}
/// Convert channel request to heal request
fn convert_to_heal_request(&self, request: HealChannelRequest) -> Result<HealRequest> {
let heal_type = if let Some(disk_id) = &request.disk {
let set_disk_id = utils::normalize_set_disk_id(disk_id).ok_or_else(|| Error::InvalidHealType {
heal_type: format!("erasure-set({disk_id})"),
})?;
HealType::ErasureSet {
buckets: vec![],
set_disk_id,
}
} else if let Some(prefix) = &request.object_prefix {
if !prefix.is_empty() {
HealType::Object {
bucket: request.bucket.clone(),
object: prefix.clone(),
version_id: None,
}
} else {
HealType::Bucket {
bucket: request.bucket.clone(),
}
}
} else {
HealType::Bucket {
bucket: request.bucket.clone(),
}
};
let priority = match request.priority {
HealChannelPriority::Low => HealPriority::Low,
HealChannelPriority::Normal => HealPriority::Normal,
HealChannelPriority::High => HealPriority::High,
HealChannelPriority::Critical => HealPriority::Urgent,
};
// Build HealOptions with all available fields
let mut options = HealOptions {
scan_mode: request.scan_mode.unwrap_or(HealScanMode::Normal),
remove_corrupted: request.remove_corrupted.unwrap_or(false),
recreate_missing: request.recreate_missing.unwrap_or(true),
update_parity: request.update_parity.unwrap_or(true),
recursive: request.recursive.unwrap_or(false),
dry_run: request.dry_run.unwrap_or(false),
timeout: request.timeout_seconds.map(std::time::Duration::from_secs),
pool_index: request.pool_index,
set_index: request.set_index,
};
// Apply force_start overrides
if request.force_start {
options.remove_corrupted = true;
options.recreate_missing = true;
options.update_parity = true;
}
Ok(HealRequest::new(heal_type, options, priority))
}
fn publish_response(&self, response: HealChannelResponse) {
// Try to send to local channel first, but don't block broadcast on failure
if let Err(e) = self.response_sender.send(response.clone()) {
error!("Failed to enqueue heal response locally: {}", e);
}
// Always attempt to broadcast, even if local send failed
// Use the original response for broadcast; local send uses a clone
if let Err(e) = publish_heal_response(response) {
error!("Failed to broadcast heal response: {}", e);
}
}
/// Get response sender for external use
pub fn get_response_sender(&self) -> mpsc::UnboundedSender<HealChannelResponse> {
self.response_sender.clone()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::heal::storage::HealStorageAPI;
use rustfs_common::heal_channel::{HealChannelPriority, HealChannelRequest, HealScanMode};
use std::sync::Arc;
// Mock storage for testing
struct MockStorage;
#[async_trait::async_trait]
impl HealStorageAPI for MockStorage {
async fn get_object_meta(
&self,
_bucket: &str,
_object: &str,
) -> crate::Result<Option<rustfs_ecstore::store_api::ObjectInfo>> {
Ok(None)
}
async fn get_object_data(&self, _bucket: &str, _object: &str) -> crate::Result<Option<Vec<u8>>> {
Ok(None)
}
async fn put_object_data(&self, _bucket: &str, _object: &str, _data: &[u8]) -> crate::Result<()> {
Ok(())
}
async fn delete_object(&self, _bucket: &str, _object: &str) -> crate::Result<()> {
Ok(())
}
async fn verify_object_integrity(&self, _bucket: &str, _object: &str) -> crate::Result<bool> {
Ok(true)
}
async fn ec_decode_rebuild(&self, _bucket: &str, _object: &str) -> crate::Result<Vec<u8>> {
Ok(vec![])
}
async fn get_disk_status(
&self,
_endpoint: &rustfs_ecstore::disk::endpoint::Endpoint,
) -> crate::Result<crate::heal::storage::DiskStatus> {
Ok(crate::heal::storage::DiskStatus::Ok)
}
async fn format_disk(&self, _endpoint: &rustfs_ecstore::disk::endpoint::Endpoint) -> crate::Result<()> {
Ok(())
}
async fn get_bucket_info(&self, _bucket: &str) -> crate::Result<Option<rustfs_ecstore::store_api::BucketInfo>> {
Ok(None)
}
async fn heal_bucket_metadata(&self, _bucket: &str) -> crate::Result<()> {
Ok(())
}
async fn list_buckets(&self) -> crate::Result<Vec<rustfs_ecstore::store_api::BucketInfo>> {
Ok(vec![])
}
async fn object_exists(&self, _bucket: &str, _object: &str) -> crate::Result<bool> {
Ok(false)
}
async fn get_object_size(&self, _bucket: &str, _object: &str) -> crate::Result<Option<u64>> {
Ok(None)
}
async fn get_object_checksum(&self, _bucket: &str, _object: &str) -> crate::Result<Option<String>> {
Ok(None)
}
async fn heal_object(
&self,
_bucket: &str,
_object: &str,
_version_id: Option<&str>,
_opts: &rustfs_common::heal_channel::HealOpts,
) -> crate::Result<(rustfs_madmin::heal_commands::HealResultItem, Option<crate::Error>)> {
Ok((rustfs_madmin::heal_commands::HealResultItem::default(), None))
}
async fn heal_bucket(
&self,
_bucket: &str,
_opts: &rustfs_common::heal_channel::HealOpts,
) -> crate::Result<rustfs_madmin::heal_commands::HealResultItem> {
Ok(rustfs_madmin::heal_commands::HealResultItem::default())
}
async fn heal_format(
&self,
_dry_run: bool,
) -> crate::Result<(rustfs_madmin::heal_commands::HealResultItem, Option<crate::Error>)> {
Ok((rustfs_madmin::heal_commands::HealResultItem::default(), None))
}
async fn list_objects_for_heal(&self, _bucket: &str, _prefix: &str) -> crate::Result<Vec<String>> {
Ok(vec![])
}
async fn list_objects_for_heal_page(
&self,
_bucket: &str,
_prefix: &str,
_continuation_token: Option<&str>,
) -> crate::Result<(Vec<String>, Option<String>, bool)> {
Ok((vec![], None, false))
}
async fn get_disk_for_resume(&self, _set_disk_id: &str) -> crate::Result<rustfs_ecstore::disk::DiskStore> {
Err(crate::Error::other("Not implemented in mock"))
}
}
fn create_test_heal_manager() -> Arc<HealManager> {
let storage: Arc<dyn HealStorageAPI> = Arc::new(MockStorage);
Arc::new(HealManager::new(storage, None))
}
#[test]
fn test_heal_channel_processor_new() {
let heal_manager = create_test_heal_manager();
let processor = HealChannelProcessor::new(heal_manager);
// Verify processor is created successfully
let _sender = processor.get_response_sender();
// If we can get the sender, processor was created correctly
}
#[tokio::test]
async fn test_convert_to_heal_request_bucket() {
let heal_manager = create_test_heal_manager();
let processor = HealChannelProcessor::new(heal_manager);
let channel_request = HealChannelRequest {
id: "test-id".to_string(),
bucket: "test-bucket".to_string(),
object_prefix: None,
disk: None,
priority: HealChannelPriority::Normal,
scan_mode: None,
remove_corrupted: None,
recreate_missing: None,
update_parity: None,
recursive: None,
dry_run: None,
timeout_seconds: None,
pool_index: None,
set_index: None,
force_start: false,
};
let heal_request = processor.convert_to_heal_request(channel_request).unwrap();
assert!(matches!(heal_request.heal_type, HealType::Bucket { .. }));
assert_eq!(heal_request.priority, HealPriority::Normal);
}
#[tokio::test]
async fn test_convert_to_heal_request_object() {
let heal_manager = create_test_heal_manager();
let processor = HealChannelProcessor::new(heal_manager);
let channel_request = HealChannelRequest {
id: "test-id".to_string(),
bucket: "test-bucket".to_string(),
object_prefix: Some("test-object".to_string()),
disk: None,
priority: HealChannelPriority::High,
scan_mode: Some(HealScanMode::Deep),
remove_corrupted: Some(true),
recreate_missing: Some(true),
update_parity: Some(true),
recursive: Some(false),
dry_run: Some(false),
timeout_seconds: Some(300),
pool_index: Some(0),
set_index: Some(1),
force_start: false,
};
let heal_request = processor.convert_to_heal_request(channel_request).unwrap();
assert!(matches!(heal_request.heal_type, HealType::Object { .. }));
assert_eq!(heal_request.priority, HealPriority::High);
assert_eq!(heal_request.options.scan_mode, HealScanMode::Deep);
assert!(heal_request.options.remove_corrupted);
assert!(heal_request.options.recreate_missing);
}
#[tokio::test]
async fn test_convert_to_heal_request_erasure_set() {
let heal_manager = create_test_heal_manager();
let processor = HealChannelProcessor::new(heal_manager);
let channel_request = HealChannelRequest {
id: "test-id".to_string(),
bucket: "test-bucket".to_string(),
object_prefix: None,
disk: Some("pool_0_set_1".to_string()),
priority: HealChannelPriority::Critical,
scan_mode: None,
remove_corrupted: None,
recreate_missing: None,
update_parity: None,
recursive: None,
dry_run: None,
timeout_seconds: None,
pool_index: None,
set_index: None,
force_start: false,
};
let heal_request = processor.convert_to_heal_request(channel_request).unwrap();
assert!(matches!(heal_request.heal_type, HealType::ErasureSet { .. }));
assert_eq!(heal_request.priority, HealPriority::Urgent);
}
#[tokio::test]
async fn test_convert_to_heal_request_invalid_disk_id() {
let heal_manager = create_test_heal_manager();
let processor = HealChannelProcessor::new(heal_manager);
let channel_request = HealChannelRequest {
id: "test-id".to_string(),
bucket: "test-bucket".to_string(),
object_prefix: None,
disk: Some("invalid-disk-id".to_string()),
priority: HealChannelPriority::Normal,
scan_mode: None,
remove_corrupted: None,
recreate_missing: None,
update_parity: None,
recursive: None,
dry_run: None,
timeout_seconds: None,
pool_index: None,
set_index: None,
force_start: false,
};
let result = processor.convert_to_heal_request(channel_request);
assert!(result.is_err());
}
#[tokio::test]
async fn test_convert_to_heal_request_priority_mapping() {
let heal_manager = create_test_heal_manager();
let processor = HealChannelProcessor::new(heal_manager);
let priorities = vec![
(HealChannelPriority::Low, HealPriority::Low),
(HealChannelPriority::Normal, HealPriority::Normal),
(HealChannelPriority::High, HealPriority::High),
(HealChannelPriority::Critical, HealPriority::Urgent),
];
for (channel_priority, expected_heal_priority) in priorities {
let channel_request = HealChannelRequest {
id: "test-id".to_string(),
bucket: "test-bucket".to_string(),
object_prefix: None,
disk: None,
priority: channel_priority,
scan_mode: None,
remove_corrupted: None,
recreate_missing: None,
update_parity: None,
recursive: None,
dry_run: None,
timeout_seconds: None,
pool_index: None,
set_index: None,
force_start: false,
};
let heal_request = processor.convert_to_heal_request(channel_request).unwrap();
assert_eq!(heal_request.priority, expected_heal_priority);
}
}
#[tokio::test]
async fn test_convert_to_heal_request_force_start() {
let heal_manager = create_test_heal_manager();
let processor = HealChannelProcessor::new(heal_manager);
let channel_request = HealChannelRequest {
id: "test-id".to_string(),
bucket: "test-bucket".to_string(),
object_prefix: None,
disk: None,
priority: HealChannelPriority::Normal,
scan_mode: None,
remove_corrupted: Some(false),
recreate_missing: Some(false),
update_parity: Some(false),
recursive: None,
dry_run: None,
timeout_seconds: None,
pool_index: None,
set_index: None,
force_start: true, // Should override the above false values
};
let heal_request = processor.convert_to_heal_request(channel_request).unwrap();
assert!(heal_request.options.remove_corrupted);
assert!(heal_request.options.recreate_missing);
assert!(heal_request.options.update_parity);
}
#[tokio::test]
async fn test_convert_to_heal_request_empty_object_prefix() {
let heal_manager = create_test_heal_manager();
let processor = HealChannelProcessor::new(heal_manager);
let channel_request = HealChannelRequest {
id: "test-id".to_string(),
bucket: "test-bucket".to_string(),
object_prefix: Some("".to_string()), // Empty prefix should be treated as bucket heal
disk: None,
priority: HealChannelPriority::Normal,
scan_mode: None,
remove_corrupted: None,
recreate_missing: None,
update_parity: None,
recursive: None,
dry_run: None,
timeout_seconds: None,
pool_index: None,
set_index: None,
force_start: false,
};
let heal_request = processor.convert_to_heal_request(channel_request).unwrap();
assert!(matches!(heal_request.heal_type, HealType::Bucket { .. }));
}
}

View File

@@ -0,0 +1,574 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::heal::{
progress::HealProgress,
resume::{CheckpointManager, ResumeManager, ResumeUtils},
storage::HealStorageAPI,
};
use crate::{Error, Result};
use futures::future::join_all;
use rustfs_common::heal_channel::{HealOpts, HealScanMode};
use rustfs_ecstore::disk::DiskStore;
use std::sync::Arc;
use tokio::sync::RwLock;
use tracing::{error, info, warn};
/// Erasure Set Healer
pub struct ErasureSetHealer {
storage: Arc<dyn HealStorageAPI>,
progress: Arc<RwLock<HealProgress>>,
cancel_token: tokio_util::sync::CancellationToken,
disk: DiskStore,
}
impl ErasureSetHealer {
pub fn new(
storage: Arc<dyn HealStorageAPI>,
progress: Arc<RwLock<HealProgress>>,
cancel_token: tokio_util::sync::CancellationToken,
disk: DiskStore,
) -> Self {
Self {
storage,
progress,
cancel_token,
disk,
}
}
/// execute erasure set heal with resume
#[tracing::instrument(skip(self, buckets), fields(set_disk_id = %set_disk_id, bucket_count = buckets.len()))]
pub async fn heal_erasure_set(&self, buckets: &[String], set_disk_id: &str) -> Result<()> {
info!("Starting erasure set heal");
// 1. generate or get task id
let task_id = self.get_or_create_task_id(set_disk_id).await?;
// 2. initialize or resume resume state
let (resume_manager, checkpoint_manager) = self.initialize_resume_state(&task_id, set_disk_id, buckets).await?;
// 3. execute heal with resume
let result = self
.execute_heal_with_resume(buckets, &resume_manager, &checkpoint_manager)
.await;
// 4. cleanup resume state
if result.is_ok() {
if let Err(e) = resume_manager.cleanup().await {
warn!("Failed to cleanup resume state: {}", e);
}
if let Err(e) = checkpoint_manager.cleanup().await {
warn!("Failed to cleanup checkpoint: {}", e);
}
}
result
}
/// get or create task id
async fn get_or_create_task_id(&self, set_disk_id: &str) -> Result<String> {
// check if there are resumable tasks
let resumable_tasks = ResumeUtils::get_resumable_tasks(&self.disk).await?;
for task_id in resumable_tasks {
match ResumeManager::load_from_disk(self.disk.clone(), &task_id).await {
Ok(manager) => {
let state = manager.get_state().await;
if state.set_disk_id == set_disk_id && ResumeUtils::can_resume_task(&self.disk, &task_id).await {
info!("Found resumable task: {} for set {}", task_id, set_disk_id);
return Ok(task_id);
}
}
Err(e) => {
warn!("Failed to load resume state for task {}: {}", task_id, e);
}
}
}
// create new task id
let task_id = format!("{}_{}", set_disk_id, ResumeUtils::generate_task_id());
info!("Created new heal task: {}", task_id);
Ok(task_id)
}
/// initialize or resume resume state
async fn initialize_resume_state(
&self,
task_id: &str,
set_disk_id: &str,
buckets: &[String],
) -> Result<(ResumeManager, CheckpointManager)> {
// check if resume state exists
if ResumeManager::has_resume_state(&self.disk, task_id).await {
info!("Loading existing resume state for task: {}", task_id);
let resume_manager = ResumeManager::load_from_disk(self.disk.clone(), task_id).await?;
let checkpoint_manager = if CheckpointManager::has_checkpoint(&self.disk, task_id).await {
CheckpointManager::load_from_disk(self.disk.clone(), task_id).await?
} else {
CheckpointManager::new(self.disk.clone(), task_id.to_string()).await?
};
Ok((resume_manager, checkpoint_manager))
} else {
info!("Creating new resume state for task: {}", task_id);
let resume_manager = ResumeManager::new(
self.disk.clone(),
task_id.to_string(),
"erasure_set".to_string(),
set_disk_id.to_string(),
buckets.to_vec(),
)
.await?;
let checkpoint_manager = CheckpointManager::new(self.disk.clone(), task_id.to_string()).await?;
Ok((resume_manager, checkpoint_manager))
}
}
/// execute heal with resume
async fn execute_heal_with_resume(
&self,
buckets: &[String],
resume_manager: &ResumeManager,
checkpoint_manager: &CheckpointManager,
) -> Result<()> {
// 1. get current state
let state = resume_manager.get_state().await;
let checkpoint = checkpoint_manager.get_checkpoint().await;
info!(
"Resuming from bucket {} object {}",
checkpoint.current_bucket_index, checkpoint.current_object_index
);
// 2. initialize progress
self.initialize_progress(buckets, &state).await;
// 3. continue from checkpoint
let current_bucket_index = checkpoint.current_bucket_index;
let mut current_object_index = checkpoint.current_object_index;
let mut processed_objects = state.processed_objects;
let mut successful_objects = state.successful_objects;
let mut failed_objects = state.failed_objects;
let mut skipped_objects = state.skipped_objects;
// 4. process remaining buckets
for (bucket_idx, bucket) in buckets.iter().enumerate().skip(current_bucket_index) {
// check if completed
if state.completed_buckets.contains(bucket) {
continue;
}
// update current bucket
resume_manager.set_current_item(Some(bucket.clone()), None).await?;
// process objects in bucket
let bucket_result = self
.heal_bucket_with_resume(
bucket,
bucket_idx,
&mut current_object_index,
&mut processed_objects,
&mut successful_objects,
&mut failed_objects,
&mut skipped_objects,
resume_manager,
checkpoint_manager,
)
.await;
// update checkpoint position
checkpoint_manager.update_position(bucket_idx, current_object_index).await?;
// update progress
resume_manager
.update_progress(processed_objects, successful_objects, failed_objects, skipped_objects)
.await?;
// check cancel status
if self.cancel_token.is_cancelled() {
warn!("Heal task cancelled");
return Err(Error::TaskCancelled);
}
// process bucket result
match bucket_result {
Ok(_) => {
resume_manager.complete_bucket(bucket).await?;
info!("Completed heal for bucket: {}", bucket);
}
Err(e) => {
error!("Failed to heal bucket {}: {}", bucket, e);
// continue to next bucket, do not interrupt the whole process
}
}
// reset object index
current_object_index = 0;
}
// 5. mark task completed
resume_manager.mark_completed().await?;
info!("Erasure set heal completed successfully");
Ok(())
}
/// heal single bucket with resume
#[allow(clippy::too_many_arguments)]
#[tracing::instrument(skip(self, current_object_index, processed_objects, successful_objects, failed_objects, _skipped_objects, resume_manager, checkpoint_manager), fields(bucket = %bucket, bucket_index = bucket_index))]
async fn heal_bucket_with_resume(
&self,
bucket: &str,
bucket_index: usize,
current_object_index: &mut usize,
processed_objects: &mut u64,
successful_objects: &mut u64,
failed_objects: &mut u64,
_skipped_objects: &mut u64,
resume_manager: &ResumeManager,
checkpoint_manager: &CheckpointManager,
) -> Result<()> {
info!(target: "rustfs:ahm:heal_bucket_with_resume" ,"Starting heal for bucket from object index {}", current_object_index);
// 1. get bucket info
let _bucket_info = match self.storage.get_bucket_info(bucket).await? {
Some(info) => info,
None => {
warn!("Bucket {} not found, skipping", bucket);
return Ok(());
}
};
// 2. process objects with pagination to avoid loading all objects into memory
let mut continuation_token: Option<String> = None;
let mut global_obj_idx = 0usize;
loop {
// Get one page of objects
let (objects, next_token, is_truncated) = self
.storage
.list_objects_for_heal_page(bucket, "", continuation_token.as_deref())
.await?;
// Process objects in this page
for object in objects {
// Skip objects before the checkpoint
if global_obj_idx < *current_object_index {
global_obj_idx += 1;
continue;
}
// check if already processed
if checkpoint_manager.get_checkpoint().await.processed_objects.contains(&object) {
global_obj_idx += 1;
continue;
}
// update current object
resume_manager
.set_current_item(Some(bucket.to_string()), Some(object.clone()))
.await?;
// Check if object still exists before attempting heal
let object_exists = match self.storage.object_exists(bucket, &object).await {
Ok(exists) => exists,
Err(e) => {
warn!("Failed to check existence of {}/{}: {}, marking as failed", bucket, object, e);
*failed_objects += 1;
checkpoint_manager.add_failed_object(object.clone()).await?;
global_obj_idx += 1;
*current_object_index = global_obj_idx;
continue;
}
};
if !object_exists {
info!(
target: "rustfs:ahm:heal_bucket_with_resume" ,"Object {}/{} no longer exists, skipping heal (likely deleted intentionally)",
bucket, object
);
checkpoint_manager.add_processed_object(object.clone()).await?;
*successful_objects += 1; // Treat as successful - object is gone as intended
global_obj_idx += 1;
*current_object_index = global_obj_idx;
continue;
}
// heal object
let heal_opts = HealOpts {
scan_mode: HealScanMode::Normal,
remove: true,
recreate: true, // Keep recreate enabled for legitimate heal scenarios
..Default::default()
};
match self.storage.heal_object(bucket, &object, None, &heal_opts).await {
Ok((_result, None)) => {
*successful_objects += 1;
checkpoint_manager.add_processed_object(object.clone()).await?;
info!("Successfully healed object {}/{}", bucket, object);
}
Ok((_, Some(err))) => {
*failed_objects += 1;
checkpoint_manager.add_failed_object(object.clone()).await?;
warn!("Failed to heal object {}/{}: {}", bucket, object, err);
}
Err(err) => {
*failed_objects += 1;
checkpoint_manager.add_failed_object(object.clone()).await?;
warn!("Error healing object {}/{}: {}", bucket, object, err);
}
}
*processed_objects += 1;
global_obj_idx += 1;
*current_object_index = global_obj_idx;
// check cancel status
if self.cancel_token.is_cancelled() {
info!("Heal task cancelled during object processing");
return Err(Error::TaskCancelled);
}
// save checkpoint periodically
if global_obj_idx % 100 == 0 {
checkpoint_manager
.update_position(bucket_index, *current_object_index)
.await?;
}
}
// Check if there are more pages
if !is_truncated {
break;
}
continuation_token = next_token;
if continuation_token.is_none() {
warn!("List is truncated but no continuation token provided for {}", bucket);
break;
}
}
Ok(())
}
/// initialize progress tracking
async fn initialize_progress(&self, _buckets: &[String], state: &crate::heal::resume::ResumeState) {
let mut progress = self.progress.write().await;
progress.objects_scanned = state.total_objects;
progress.objects_healed = state.successful_objects;
progress.objects_failed = state.failed_objects;
progress.bytes_processed = 0; // set to 0 for now, can be extended later
progress.set_current_object(state.current_object.clone());
}
/// heal all buckets concurrently
#[allow(dead_code)]
async fn heal_buckets_concurrently(&self, buckets: &[String]) -> Vec<Result<()>> {
// use semaphore to control concurrency, avoid too many concurrent healings
let semaphore = Arc::new(tokio::sync::Semaphore::new(4)); // max 4 concurrent healings
let heal_futures = buckets.iter().map(|bucket| {
let bucket = bucket.clone();
let storage = self.storage.clone();
let progress = self.progress.clone();
let semaphore = semaphore.clone();
let cancel_token = self.cancel_token.clone();
async move {
let _permit = semaphore
.acquire()
.await
.map_err(|e| Error::other(format!("Failed to acquire semaphore for bucket heal: {e}")))?;
if cancel_token.is_cancelled() {
return Err(Error::TaskCancelled);
}
Self::heal_single_bucket(&storage, &bucket, &progress).await
}
});
// use join_all to process concurrently
join_all(heal_futures).await
}
/// heal single bucket
#[allow(dead_code)]
async fn heal_single_bucket(
storage: &Arc<dyn HealStorageAPI>,
bucket: &str,
progress: &Arc<RwLock<HealProgress>>,
) -> Result<()> {
info!("Starting heal for bucket: {}", bucket);
// 1. get bucket info
let _bucket_info = match storage.get_bucket_info(bucket).await? {
Some(info) => info,
None => {
warn!("Bucket {} not found, skipping", bucket);
return Ok(());
}
};
// 2. process objects with pagination to avoid loading all objects into memory
let mut continuation_token: Option<String> = None;
let mut total_scanned = 0u64;
let mut total_success = 0u64;
let mut total_failed = 0u64;
let heal_opts = HealOpts {
scan_mode: HealScanMode::Normal,
remove: true, // remove corrupted data
recreate: true, // recreate missing data
..Default::default()
};
loop {
// Get one page of objects
let (objects, next_token, is_truncated) = storage
.list_objects_for_heal_page(bucket, "", continuation_token.as_deref())
.await?;
let page_count = objects.len() as u64;
total_scanned += page_count;
// 3. update progress
{
let mut p = progress.write().await;
p.objects_scanned = total_scanned;
}
// 4. heal objects concurrently for this page
let object_results = Self::heal_objects_concurrently(storage, bucket, &objects, &heal_opts, progress).await;
// 5. count results for this page
let (success_count, failure_count) =
object_results
.into_iter()
.fold((0, 0), |(success, failure), result| match result {
Ok(_) => (success + 1, failure),
Err(_) => (success, failure + 1),
});
total_success += success_count;
total_failed += failure_count;
// 6. update progress
{
let mut p = progress.write().await;
p.objects_healed = total_success;
p.objects_failed = total_failed;
p.set_current_object(Some(format!("processing bucket: {bucket} (page)")));
}
// Check if there are more pages
if !is_truncated {
break;
}
continuation_token = next_token;
if continuation_token.is_none() {
warn!("List is truncated but no continuation token provided for {}", bucket);
break;
}
}
// 7. final progress update
{
let mut p = progress.write().await;
p.set_current_object(Some(format!("completed bucket: {bucket}")));
}
info!(
"Completed heal for bucket {}: {} success, {} failures (total scanned: {})",
bucket, total_success, total_failed, total_scanned
);
Ok(())
}
/// heal objects concurrently
#[allow(dead_code)]
async fn heal_objects_concurrently(
storage: &Arc<dyn HealStorageAPI>,
bucket: &str,
objects: &[String],
heal_opts: &HealOpts,
_progress: &Arc<RwLock<HealProgress>>,
) -> Vec<Result<()>> {
// use semaphore to control object healing concurrency
let semaphore = Arc::new(tokio::sync::Semaphore::new(8)); // max 8 concurrent object healings
let heal_futures = objects.iter().map(|object| {
let object = object.clone();
let bucket = bucket.to_string();
let storage = storage.clone();
let heal_opts = *heal_opts;
let semaphore = semaphore.clone();
async move {
let _permit = semaphore
.acquire()
.await
.map_err(|e| Error::other(format!("Failed to acquire semaphore for object heal: {e}")))?;
match storage.heal_object(&bucket, &object, None, &heal_opts).await {
Ok((_result, None)) => {
info!("Successfully healed object {}/{}", bucket, object);
Ok(())
}
Ok((_, Some(err))) => {
warn!("Failed to heal object {}/{}: {}", bucket, object, err);
Err(Error::other(err))
}
Err(err) => {
warn!("Error healing object {}/{}: {}", bucket, object, err);
Err(err)
}
}
}
});
join_all(heal_futures).await
}
/// process results
#[allow(dead_code)]
async fn process_results(&self, results: Vec<Result<()>>) -> Result<()> {
let (success_count, failure_count): (usize, usize) =
results.into_iter().fold((0, 0), |(success, failure), result| match result {
Ok(_) => (success + 1, failure),
Err(_) => (success, failure + 1),
});
let total = success_count + failure_count;
info!("Erasure set heal completed: {}/{} buckets successful", success_count, total);
if failure_count > 0 {
warn!("{} buckets failed to heal", failure_count);
return Err(Error::other(format!("{failure_count} buckets failed to heal")));
}
Ok(())
}
}

View File

@@ -0,0 +1,682 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::heal::{HealOptions, HealPriority, HealRequest, HealType};
use crate::{Error, Result};
use rustfs_ecstore::disk::endpoint::Endpoint;
use serde::{Deserialize, Serialize};
use std::time::SystemTime;
/// Corruption type
#[derive(Debug, Clone, Serialize, Deserialize)]
pub enum CorruptionType {
/// Data corruption
DataCorruption,
/// Metadata corruption
MetadataCorruption,
/// Partial corruption
PartialCorruption,
/// Complete corruption
CompleteCorruption,
}
/// Severity level
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
pub enum Severity {
/// Low severity
Low = 0,
/// Medium severity
Medium = 1,
/// High severity
High = 2,
/// Critical severity
Critical = 3,
}
/// Heal event
#[derive(Debug, Clone)]
pub enum HealEvent {
/// Object corruption event
ObjectCorruption {
bucket: String,
object: String,
version_id: Option<String>,
corruption_type: CorruptionType,
severity: Severity,
},
/// Object missing event
ObjectMissing {
bucket: String,
object: String,
version_id: Option<String>,
expected_locations: Vec<usize>,
available_locations: Vec<usize>,
},
/// Metadata corruption event
MetadataCorruption {
bucket: String,
object: String,
corruption_type: CorruptionType,
},
/// Disk status change event
DiskStatusChange {
endpoint: Endpoint,
old_status: String,
new_status: String,
},
/// EC decode failure event
ECDecodeFailure {
bucket: String,
object: String,
version_id: Option<String>,
missing_shards: Vec<usize>,
available_shards: Vec<usize>,
},
/// Checksum mismatch event
ChecksumMismatch {
bucket: String,
object: String,
version_id: Option<String>,
expected_checksum: String,
actual_checksum: String,
},
/// Bucket metadata corruption event
BucketMetadataCorruption {
bucket: String,
corruption_type: CorruptionType,
},
/// MRF metadata corruption event
MRFMetadataCorruption {
meta_path: String,
corruption_type: CorruptionType,
},
}
impl HealEvent {
/// Convert HealEvent to HealRequest
pub fn to_heal_request(&self) -> Result<HealRequest> {
match self {
HealEvent::ObjectCorruption {
bucket,
object,
version_id,
severity,
..
} => Ok(HealRequest::new(
HealType::Object {
bucket: bucket.clone(),
object: object.clone(),
version_id: version_id.clone(),
},
HealOptions::default(),
Self::severity_to_priority(severity),
)),
HealEvent::ObjectMissing {
bucket,
object,
version_id,
..
} => Ok(HealRequest::new(
HealType::Object {
bucket: bucket.clone(),
object: object.clone(),
version_id: version_id.clone(),
},
HealOptions::default(),
HealPriority::High,
)),
HealEvent::MetadataCorruption { bucket, object, .. } => Ok(HealRequest::new(
HealType::Metadata {
bucket: bucket.clone(),
object: object.clone(),
},
HealOptions::default(),
HealPriority::High,
)),
HealEvent::DiskStatusChange { endpoint, .. } => {
// Convert disk status change to erasure set heal
// Note: This requires access to storage to get bucket list, which is not available here
// The actual bucket list will need to be provided by the caller or retrieved differently
let set_disk_id = crate::heal::utils::format_set_disk_id_from_i32(endpoint.pool_idx, endpoint.set_idx)
.ok_or_else(|| Error::InvalidHealType {
heal_type: format!("erasure-set(pool={}, set={})", endpoint.pool_idx, endpoint.set_idx),
})?;
Ok(HealRequest::new(
HealType::ErasureSet {
buckets: vec![], // Empty bucket list - caller should populate this
set_disk_id,
},
HealOptions::default(),
HealPriority::High,
))
}
HealEvent::ECDecodeFailure {
bucket,
object,
version_id,
..
} => Ok(HealRequest::new(
HealType::ECDecode {
bucket: bucket.clone(),
object: object.clone(),
version_id: version_id.clone(),
},
HealOptions::default(),
HealPriority::Urgent,
)),
HealEvent::ChecksumMismatch {
bucket,
object,
version_id,
..
} => Ok(HealRequest::new(
HealType::Object {
bucket: bucket.clone(),
object: object.clone(),
version_id: version_id.clone(),
},
HealOptions::default(),
HealPriority::High,
)),
HealEvent::BucketMetadataCorruption { bucket, .. } => Ok(HealRequest::new(
HealType::Bucket { bucket: bucket.clone() },
HealOptions::default(),
HealPriority::High,
)),
HealEvent::MRFMetadataCorruption { meta_path, .. } => Ok(HealRequest::new(
HealType::MRF {
meta_path: meta_path.clone(),
},
HealOptions::default(),
HealPriority::High,
)),
}
}
/// Convert severity to priority
fn severity_to_priority(severity: &Severity) -> HealPriority {
match severity {
Severity::Low => HealPriority::Low,
Severity::Medium => HealPriority::Normal,
Severity::High => HealPriority::High,
Severity::Critical => HealPriority::Urgent,
}
}
/// Get event description
pub fn description(&self) -> String {
match self {
HealEvent::ObjectCorruption {
bucket,
object,
corruption_type,
..
} => {
format!("Object corruption detected: {bucket}/{object} - {corruption_type:?}")
}
HealEvent::ObjectMissing { bucket, object, .. } => {
format!("Object missing: {bucket}/{object}")
}
HealEvent::MetadataCorruption {
bucket,
object,
corruption_type,
..
} => {
format!("Metadata corruption: {bucket}/{object} - {corruption_type:?}")
}
HealEvent::DiskStatusChange {
endpoint,
old_status,
new_status,
..
} => {
format!("Disk status changed: {endpoint:?} {old_status} -> {new_status}")
}
HealEvent::ECDecodeFailure {
bucket,
object,
missing_shards,
..
} => {
format!("EC decode failure: {bucket}/{object} - missing shards: {missing_shards:?}")
}
HealEvent::ChecksumMismatch {
bucket,
object,
expected_checksum,
actual_checksum,
..
} => {
format!("Checksum mismatch: {bucket}/{object} - expected: {expected_checksum}, actual: {actual_checksum}")
}
HealEvent::BucketMetadataCorruption {
bucket, corruption_type, ..
} => {
format!("Bucket metadata corruption: {bucket} - {corruption_type:?}")
}
HealEvent::MRFMetadataCorruption {
meta_path,
corruption_type,
..
} => {
format!("MRF metadata corruption: {meta_path} - {corruption_type:?}")
}
}
}
/// Get event severity
pub fn severity(&self) -> Severity {
match self {
HealEvent::ObjectCorruption { severity, .. } => severity.clone(),
HealEvent::ObjectMissing { .. } => Severity::High,
HealEvent::MetadataCorruption { .. } => Severity::High,
HealEvent::DiskStatusChange { .. } => Severity::High,
HealEvent::ECDecodeFailure { .. } => Severity::Critical,
HealEvent::ChecksumMismatch { .. } => Severity::High,
HealEvent::BucketMetadataCorruption { .. } => Severity::High,
HealEvent::MRFMetadataCorruption { .. } => Severity::High,
}
}
/// Get event timestamp
pub fn timestamp(&self) -> SystemTime {
SystemTime::now()
}
}
/// Heal event handler
pub struct HealEventHandler {
/// Event queue
events: Vec<HealEvent>,
/// Maximum number of events
max_events: usize,
}
impl HealEventHandler {
pub fn new(max_events: usize) -> Self {
Self {
events: Vec::new(),
max_events,
}
}
/// Add event
pub fn add_event(&mut self, event: HealEvent) {
if self.events.len() >= self.max_events {
// Remove oldest event
self.events.remove(0);
}
self.events.push(event);
}
/// Get all events
pub fn get_events(&self) -> &[HealEvent] {
&self.events
}
/// Clear events
pub fn clear_events(&mut self) {
self.events.clear();
}
/// Get event count
pub fn event_count(&self) -> usize {
self.events.len()
}
/// Filter events by severity
pub fn filter_by_severity(&self, min_severity: Severity) -> Vec<&HealEvent> {
self.events.iter().filter(|event| event.severity() >= min_severity).collect()
}
/// Filter events by type
pub fn filter_by_type(&self, event_type: &str) -> Vec<&HealEvent> {
self.events
.iter()
.filter(|event| match event {
HealEvent::ObjectCorruption { .. } => event_type == "ObjectCorruption",
HealEvent::ObjectMissing { .. } => event_type == "ObjectMissing",
HealEvent::MetadataCorruption { .. } => event_type == "MetadataCorruption",
HealEvent::DiskStatusChange { .. } => event_type == "DiskStatusChange",
HealEvent::ECDecodeFailure { .. } => event_type == "ECDecodeFailure",
HealEvent::ChecksumMismatch { .. } => event_type == "ChecksumMismatch",
HealEvent::BucketMetadataCorruption { .. } => event_type == "BucketMetadataCorruption",
HealEvent::MRFMetadataCorruption { .. } => event_type == "MRFMetadataCorruption",
})
.collect()
}
}
impl Default for HealEventHandler {
fn default() -> Self {
Self::new(1000)
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::heal::task::{HealPriority, HealType};
#[test]
fn test_heal_event_object_corruption_to_request() {
let event = HealEvent::ObjectCorruption {
bucket: "test-bucket".to_string(),
object: "test-object".to_string(),
version_id: None,
corruption_type: CorruptionType::DataCorruption,
severity: Severity::High,
};
let request = event.to_heal_request().unwrap();
assert!(matches!(request.heal_type, HealType::Object { .. }));
assert_eq!(request.priority, HealPriority::High);
}
#[test]
fn test_heal_event_object_missing_to_request() {
let event = HealEvent::ObjectMissing {
bucket: "test-bucket".to_string(),
object: "test-object".to_string(),
version_id: Some("v1".to_string()),
expected_locations: vec![0, 1],
available_locations: vec![2, 3],
};
let request = event.to_heal_request().unwrap();
assert!(matches!(request.heal_type, HealType::Object { .. }));
assert_eq!(request.priority, HealPriority::High);
}
#[test]
fn test_heal_event_metadata_corruption_to_request() {
let event = HealEvent::MetadataCorruption {
bucket: "test-bucket".to_string(),
object: "test-object".to_string(),
corruption_type: CorruptionType::MetadataCorruption,
};
let request = event.to_heal_request().unwrap();
assert!(matches!(request.heal_type, HealType::Metadata { .. }));
assert_eq!(request.priority, HealPriority::High);
}
#[test]
fn test_heal_event_ec_decode_failure_to_request() {
let event = HealEvent::ECDecodeFailure {
bucket: "test-bucket".to_string(),
object: "test-object".to_string(),
version_id: None,
missing_shards: vec![0, 1],
available_shards: vec![2, 3, 4],
};
let request = event.to_heal_request().unwrap();
assert!(matches!(request.heal_type, HealType::ECDecode { .. }));
assert_eq!(request.priority, HealPriority::Urgent);
}
#[test]
fn test_heal_event_checksum_mismatch_to_request() {
let event = HealEvent::ChecksumMismatch {
bucket: "test-bucket".to_string(),
object: "test-object".to_string(),
version_id: None,
expected_checksum: "abc123".to_string(),
actual_checksum: "def456".to_string(),
};
let request = event.to_heal_request().unwrap();
assert!(matches!(request.heal_type, HealType::Object { .. }));
assert_eq!(request.priority, HealPriority::High);
}
#[test]
fn test_heal_event_bucket_metadata_corruption_to_request() {
let event = HealEvent::BucketMetadataCorruption {
bucket: "test-bucket".to_string(),
corruption_type: CorruptionType::MetadataCorruption,
};
let request = event.to_heal_request().unwrap();
assert!(matches!(request.heal_type, HealType::Bucket { .. }));
assert_eq!(request.priority, HealPriority::High);
}
#[test]
fn test_heal_event_mrf_metadata_corruption_to_request() {
let event = HealEvent::MRFMetadataCorruption {
meta_path: "test-bucket/test-object".to_string(),
corruption_type: CorruptionType::MetadataCorruption,
};
let request = event.to_heal_request().unwrap();
assert!(matches!(request.heal_type, HealType::MRF { .. }));
assert_eq!(request.priority, HealPriority::High);
}
#[test]
fn test_heal_event_severity_to_priority() {
let event_low = HealEvent::ObjectCorruption {
bucket: "test".to_string(),
object: "test".to_string(),
version_id: None,
corruption_type: CorruptionType::DataCorruption,
severity: Severity::Low,
};
let request = event_low.to_heal_request().unwrap();
assert_eq!(request.priority, HealPriority::Low);
let event_medium = HealEvent::ObjectCorruption {
bucket: "test".to_string(),
object: "test".to_string(),
version_id: None,
corruption_type: CorruptionType::DataCorruption,
severity: Severity::Medium,
};
let request = event_medium.to_heal_request().unwrap();
assert_eq!(request.priority, HealPriority::Normal);
let event_high = HealEvent::ObjectCorruption {
bucket: "test".to_string(),
object: "test".to_string(),
version_id: None,
corruption_type: CorruptionType::DataCorruption,
severity: Severity::High,
};
let request = event_high.to_heal_request().unwrap();
assert_eq!(request.priority, HealPriority::High);
let event_critical = HealEvent::ObjectCorruption {
bucket: "test".to_string(),
object: "test".to_string(),
version_id: None,
corruption_type: CorruptionType::DataCorruption,
severity: Severity::Critical,
};
let request = event_critical.to_heal_request().unwrap();
assert_eq!(request.priority, HealPriority::Urgent);
}
#[test]
fn test_heal_event_description() {
let event = HealEvent::ObjectCorruption {
bucket: "test-bucket".to_string(),
object: "test-object".to_string(),
version_id: None,
corruption_type: CorruptionType::DataCorruption,
severity: Severity::High,
};
let desc = event.description();
assert!(desc.contains("Object corruption detected"));
assert!(desc.contains("test-bucket/test-object"));
assert!(desc.contains("DataCorruption"));
}
#[test]
fn test_heal_event_severity() {
let event = HealEvent::ECDecodeFailure {
bucket: "test".to_string(),
object: "test".to_string(),
version_id: None,
missing_shards: vec![],
available_shards: vec![],
};
assert_eq!(event.severity(), Severity::Critical);
let event = HealEvent::ObjectMissing {
bucket: "test".to_string(),
object: "test".to_string(),
version_id: None,
expected_locations: vec![],
available_locations: vec![],
};
assert_eq!(event.severity(), Severity::High);
}
#[test]
fn test_heal_event_handler_new() {
let handler = HealEventHandler::new(10);
assert_eq!(handler.event_count(), 0);
assert_eq!(handler.max_events, 10);
}
#[test]
fn test_heal_event_handler_default() {
let handler = HealEventHandler::default();
assert_eq!(handler.max_events, 1000);
}
#[test]
fn test_heal_event_handler_add_event() {
let mut handler = HealEventHandler::new(3);
let event = HealEvent::ObjectCorruption {
bucket: "test".to_string(),
object: "test".to_string(),
version_id: None,
corruption_type: CorruptionType::DataCorruption,
severity: Severity::High,
};
handler.add_event(event.clone());
assert_eq!(handler.event_count(), 1);
handler.add_event(event.clone());
handler.add_event(event.clone());
assert_eq!(handler.event_count(), 3);
}
#[test]
fn test_heal_event_handler_max_events() {
let mut handler = HealEventHandler::new(2);
let event = HealEvent::ObjectCorruption {
bucket: "test".to_string(),
object: "test".to_string(),
version_id: None,
corruption_type: CorruptionType::DataCorruption,
severity: Severity::High,
};
handler.add_event(event.clone());
handler.add_event(event.clone());
handler.add_event(event.clone()); // Should remove oldest
assert_eq!(handler.event_count(), 2);
}
#[test]
fn test_heal_event_handler_get_events() {
let mut handler = HealEventHandler::new(10);
let event = HealEvent::ObjectCorruption {
bucket: "test".to_string(),
object: "test".to_string(),
version_id: None,
corruption_type: CorruptionType::DataCorruption,
severity: Severity::High,
};
handler.add_event(event.clone());
handler.add_event(event.clone());
let events = handler.get_events();
assert_eq!(events.len(), 2);
}
#[test]
fn test_heal_event_handler_clear_events() {
let mut handler = HealEventHandler::new(10);
let event = HealEvent::ObjectCorruption {
bucket: "test".to_string(),
object: "test".to_string(),
version_id: None,
corruption_type: CorruptionType::DataCorruption,
severity: Severity::High,
};
handler.add_event(event);
assert_eq!(handler.event_count(), 1);
handler.clear_events();
assert_eq!(handler.event_count(), 0);
}
#[test]
fn test_heal_event_handler_filter_by_severity() {
let mut handler = HealEventHandler::new(10);
handler.add_event(HealEvent::ObjectCorruption {
bucket: "test".to_string(),
object: "test".to_string(),
version_id: None,
corruption_type: CorruptionType::DataCorruption,
severity: Severity::Low,
});
handler.add_event(HealEvent::ECDecodeFailure {
bucket: "test".to_string(),
object: "test".to_string(),
version_id: None,
missing_shards: vec![],
available_shards: vec![],
});
let high_severity = handler.filter_by_severity(Severity::High);
assert_eq!(high_severity.len(), 1); // Only ECDecodeFailure is Critical >= High
}
#[test]
fn test_heal_event_handler_filter_by_type() {
let mut handler = HealEventHandler::new(10);
handler.add_event(HealEvent::ObjectCorruption {
bucket: "test".to_string(),
object: "test".to_string(),
version_id: None,
corruption_type: CorruptionType::DataCorruption,
severity: Severity::High,
});
handler.add_event(HealEvent::ObjectMissing {
bucket: "test".to_string(),
object: "test".to_string(),
version_id: None,
expected_locations: vec![],
available_locations: vec![],
});
let corruption_events = handler.filter_by_type("ObjectCorruption");
assert_eq!(corruption_events.len(), 1);
let missing_events = handler.filter_by_type("ObjectMissing");
assert_eq!(missing_events.len(), 1);
}
}

File diff suppressed because it is too large Load Diff

View File

@@ -12,12 +12,17 @@
// See the License for the specific language governing permissions and
// limitations under the License.
mod components;
mod route;
mod utils;
mod views;
pub mod channel;
pub mod erasure_healer;
pub mod event;
pub mod manager;
pub mod progress;
pub mod resume;
pub mod storage;
pub mod task;
pub mod utils;
fn main() {
let _worker_guard = utils::init_logger();
dioxus::launch(views::App);
}
pub use erasure_healer::ErasureSetHealer;
pub use manager::HealManager;
pub use resume::{CheckpointManager, ResumeCheckpoint, ResumeManager, ResumeState, ResumeUtils};
pub use task::{HealOptions, HealPriority, HealRequest, HealTask, HealType};

View File

@@ -0,0 +1,389 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use serde::{Deserialize, Serialize};
use std::time::SystemTime;
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct HealProgress {
/// Objects scanned
pub objects_scanned: u64,
/// Objects healed
pub objects_healed: u64,
/// Objects failed
pub objects_failed: u64,
/// Bytes processed
pub bytes_processed: u64,
/// Current object
pub current_object: Option<String>,
/// Progress percentage
pub progress_percentage: f64,
/// Start time
pub start_time: Option<SystemTime>,
/// Last update time
pub last_update_time: Option<SystemTime>,
/// Estimated completion time
pub estimated_completion_time: Option<SystemTime>,
}
impl HealProgress {
pub fn new() -> Self {
Self {
start_time: Some(SystemTime::now()),
last_update_time: Some(SystemTime::now()),
..Default::default()
}
}
pub fn update_progress(&mut self, scanned: u64, healed: u64, failed: u64, bytes: u64) {
self.objects_scanned = scanned;
self.objects_healed = healed;
self.objects_failed = failed;
self.bytes_processed = bytes;
self.last_update_time = Some(SystemTime::now());
// calculate progress percentage
let total = scanned + healed + failed;
if total > 0 {
self.progress_percentage = (healed as f64 / total as f64) * 100.0;
}
}
pub fn set_current_object(&mut self, object: Option<String>) {
self.current_object = object;
self.last_update_time = Some(SystemTime::now());
}
pub fn is_completed(&self) -> bool {
self.progress_percentage >= 100.0
|| self.objects_scanned > 0 && self.objects_healed + self.objects_failed >= self.objects_scanned
}
pub fn get_success_rate(&self) -> f64 {
let total = self.objects_healed + self.objects_failed;
if total > 0 {
(self.objects_healed as f64 / total as f64) * 100.0
} else {
0.0
}
}
}
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct HealStatistics {
/// Total heal tasks
pub total_tasks: u64,
/// Successful tasks
pub successful_tasks: u64,
/// Failed tasks
pub failed_tasks: u64,
/// Running tasks
pub running_tasks: u64,
/// Total healed objects
pub total_objects_healed: u64,
/// Total healed bytes
pub total_bytes_healed: u64,
/// Last update time
pub last_update_time: SystemTime,
}
impl Default for HealStatistics {
fn default() -> Self {
Self::new()
}
}
impl HealStatistics {
pub fn new() -> Self {
Self {
total_tasks: 0,
successful_tasks: 0,
failed_tasks: 0,
running_tasks: 0,
total_objects_healed: 0,
total_bytes_healed: 0,
last_update_time: SystemTime::now(),
}
}
pub fn update_task_completion(&mut self, success: bool) {
if success {
self.successful_tasks += 1;
} else {
self.failed_tasks += 1;
}
self.last_update_time = SystemTime::now();
}
pub fn update_running_tasks(&mut self, count: u64) {
self.running_tasks = count;
self.last_update_time = SystemTime::now();
}
pub fn add_healed_objects(&mut self, count: u64, bytes: u64) {
self.total_objects_healed += count;
self.total_bytes_healed += bytes;
self.last_update_time = SystemTime::now();
}
pub fn get_success_rate(&self) -> f64 {
let total = self.successful_tasks + self.failed_tasks;
if total > 0 {
(self.successful_tasks as f64 / total as f64) * 100.0
} else {
0.0
}
}
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_heal_progress_new() {
let progress = HealProgress::new();
assert_eq!(progress.objects_scanned, 0);
assert_eq!(progress.objects_healed, 0);
assert_eq!(progress.objects_failed, 0);
assert_eq!(progress.bytes_processed, 0);
assert_eq!(progress.progress_percentage, 0.0);
assert!(progress.start_time.is_some());
assert!(progress.last_update_time.is_some());
assert!(progress.current_object.is_none());
}
#[test]
fn test_heal_progress_update_progress() {
let mut progress = HealProgress::new();
progress.update_progress(10, 8, 2, 1024);
assert_eq!(progress.objects_scanned, 10);
assert_eq!(progress.objects_healed, 8);
assert_eq!(progress.objects_failed, 2);
assert_eq!(progress.bytes_processed, 1024);
// Progress percentage should be calculated based on healed/total
// total = scanned + healed + failed = 10 + 8 + 2 = 20
// healed/total = 8/20 = 0.4 = 40%
assert!((progress.progress_percentage - 40.0).abs() < 0.001);
assert!(progress.last_update_time.is_some());
}
#[test]
fn test_heal_progress_update_progress_zero_total() {
let mut progress = HealProgress::new();
progress.update_progress(0, 0, 0, 0);
assert_eq!(progress.progress_percentage, 0.0);
}
#[test]
fn test_heal_progress_update_progress_all_healed() {
let mut progress = HealProgress::new();
// When scanned=0, healed=10, failed=0: total=10, progress = 10/10 = 100%
progress.update_progress(0, 10, 0, 2048);
// All healed, should be 100%
assert!((progress.progress_percentage - 100.0).abs() < 0.001);
}
#[test]
fn test_heal_progress_set_current_object() {
let mut progress = HealProgress::new();
let initial_time = progress.last_update_time;
// Small delay to ensure time difference
std::thread::sleep(std::time::Duration::from_millis(10));
progress.set_current_object(Some("test-bucket/test-object".to_string()));
assert_eq!(progress.current_object, Some("test-bucket/test-object".to_string()));
assert!(progress.last_update_time.is_some());
// last_update_time should be updated
assert_ne!(progress.last_update_time, initial_time);
}
#[test]
fn test_heal_progress_set_current_object_none() {
let mut progress = HealProgress::new();
progress.set_current_object(Some("test".to_string()));
progress.set_current_object(None);
assert!(progress.current_object.is_none());
}
#[test]
fn test_heal_progress_is_completed_by_percentage() {
let mut progress = HealProgress::new();
progress.update_progress(10, 10, 0, 1024);
assert!(progress.is_completed());
}
#[test]
fn test_heal_progress_is_completed_by_processed() {
let mut progress = HealProgress::new();
progress.objects_scanned = 10;
progress.objects_healed = 8;
progress.objects_failed = 2;
// healed + failed = 8 + 2 = 10 >= scanned = 10
assert!(progress.is_completed());
}
#[test]
fn test_heal_progress_is_not_completed() {
let mut progress = HealProgress::new();
progress.objects_scanned = 10;
progress.objects_healed = 5;
progress.objects_failed = 2;
// healed + failed = 5 + 2 = 7 < scanned = 10
assert!(!progress.is_completed());
}
#[test]
fn test_heal_progress_get_success_rate() {
let mut progress = HealProgress::new();
progress.objects_healed = 8;
progress.objects_failed = 2;
// success_rate = 8 / (8 + 2) * 100 = 80%
assert!((progress.get_success_rate() - 80.0).abs() < 0.001);
}
#[test]
fn test_heal_progress_get_success_rate_zero_total() {
let progress = HealProgress::new();
// No healed or failed objects
assert_eq!(progress.get_success_rate(), 0.0);
}
#[test]
fn test_heal_progress_get_success_rate_all_success() {
let mut progress = HealProgress::new();
progress.objects_healed = 10;
progress.objects_failed = 0;
assert!((progress.get_success_rate() - 100.0).abs() < 0.001);
}
#[test]
fn test_heal_statistics_new() {
let stats = HealStatistics::new();
assert_eq!(stats.total_tasks, 0);
assert_eq!(stats.successful_tasks, 0);
assert_eq!(stats.failed_tasks, 0);
assert_eq!(stats.running_tasks, 0);
assert_eq!(stats.total_objects_healed, 0);
assert_eq!(stats.total_bytes_healed, 0);
}
#[test]
fn test_heal_statistics_default() {
let stats = HealStatistics::default();
assert_eq!(stats.total_tasks, 0);
assert_eq!(stats.successful_tasks, 0);
assert_eq!(stats.failed_tasks, 0);
}
#[test]
fn test_heal_statistics_update_task_completion_success() {
let mut stats = HealStatistics::new();
let initial_time = stats.last_update_time;
std::thread::sleep(std::time::Duration::from_millis(10));
stats.update_task_completion(true);
assert_eq!(stats.successful_tasks, 1);
assert_eq!(stats.failed_tasks, 0);
assert!(stats.last_update_time > initial_time);
}
#[test]
fn test_heal_statistics_update_task_completion_failure() {
let mut stats = HealStatistics::new();
stats.update_task_completion(false);
assert_eq!(stats.successful_tasks, 0);
assert_eq!(stats.failed_tasks, 1);
}
#[test]
fn test_heal_statistics_update_running_tasks() {
let mut stats = HealStatistics::new();
let initial_time = stats.last_update_time;
std::thread::sleep(std::time::Duration::from_millis(10));
stats.update_running_tasks(5);
assert_eq!(stats.running_tasks, 5);
assert!(stats.last_update_time > initial_time);
}
#[test]
fn test_heal_statistics_add_healed_objects() {
let mut stats = HealStatistics::new();
let initial_time = stats.last_update_time;
std::thread::sleep(std::time::Duration::from_millis(10));
stats.add_healed_objects(10, 10240);
assert_eq!(stats.total_objects_healed, 10);
assert_eq!(stats.total_bytes_healed, 10240);
assert!(stats.last_update_time > initial_time);
}
#[test]
fn test_heal_statistics_add_healed_objects_accumulative() {
let mut stats = HealStatistics::new();
stats.add_healed_objects(5, 5120);
stats.add_healed_objects(3, 3072);
assert_eq!(stats.total_objects_healed, 8);
assert_eq!(stats.total_bytes_healed, 8192);
}
#[test]
fn test_heal_statistics_get_success_rate() {
let mut stats = HealStatistics::new();
stats.successful_tasks = 8;
stats.failed_tasks = 2;
// success_rate = 8 / (8 + 2) * 100 = 80%
assert!((stats.get_success_rate() - 80.0).abs() < 0.001);
}
#[test]
fn test_heal_statistics_get_success_rate_zero_total() {
let stats = HealStatistics::new();
assert_eq!(stats.get_success_rate(), 0.0);
}
#[test]
fn test_heal_statistics_get_success_rate_all_success() {
let mut stats = HealStatistics::new();
stats.successful_tasks = 10;
stats.failed_tasks = 0;
assert!((stats.get_success_rate() - 100.0).abs() < 0.001);
}
#[test]
fn test_heal_statistics_get_success_rate_all_failure() {
let mut stats = HealStatistics::new();
stats.successful_tasks = 0;
stats.failed_tasks = 5;
assert_eq!(stats.get_success_rate(), 0.0);
}
}

View File

@@ -0,0 +1,719 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{Error, Result};
use rustfs_ecstore::disk::{BUCKET_META_PREFIX, DiskAPI, DiskStore, RUSTFS_META_BUCKET};
use serde::{Deserialize, Serialize};
use std::path::Path;
use std::sync::Arc;
use std::time::{SystemTime, UNIX_EPOCH};
use tokio::sync::RwLock;
use tracing::{debug, info, warn};
use uuid::Uuid;
/// resume state file constants
const RESUME_STATE_FILE: &str = "ahm_resume_state.json";
const RESUME_PROGRESS_FILE: &str = "ahm_progress.json";
const RESUME_CHECKPOINT_FILE: &str = "ahm_checkpoint.json";
/// Helper function to convert Path to &str, returning an error if conversion fails
fn path_to_str(path: &Path) -> Result<&str> {
path.to_str()
.ok_or_else(|| Error::other(format!("Invalid UTF-8 path: {path:?}")))
}
/// resume state
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ResumeState {
/// task id
pub task_id: String,
/// task type
pub task_type: String,
/// set disk identifier (for erasure set tasks)
#[serde(default)]
pub set_disk_id: String,
/// start time
pub start_time: u64,
/// last update time
pub last_update: u64,
/// completed
pub completed: bool,
/// total objects
pub total_objects: u64,
/// processed objects
pub processed_objects: u64,
/// successful objects
pub successful_objects: u64,
/// failed objects
pub failed_objects: u64,
/// skipped objects
pub skipped_objects: u64,
/// current bucket
pub current_bucket: Option<String>,
/// current object
pub current_object: Option<String>,
/// completed buckets
pub completed_buckets: Vec<String>,
/// pending buckets
pub pending_buckets: Vec<String>,
/// error message
pub error_message: Option<String>,
/// retry count
pub retry_count: u32,
/// max retries
pub max_retries: u32,
}
impl ResumeState {
pub fn new(task_id: String, task_type: String, set_disk_id: String, buckets: Vec<String>) -> Self {
Self {
task_id,
task_type,
set_disk_id,
start_time: SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs(),
last_update: SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs(),
completed: false,
total_objects: 0,
processed_objects: 0,
successful_objects: 0,
failed_objects: 0,
skipped_objects: 0,
current_bucket: None,
current_object: None,
completed_buckets: Vec::new(),
pending_buckets: buckets,
error_message: None,
retry_count: 0,
max_retries: 3,
}
}
pub fn update_progress(&mut self, processed: u64, successful: u64, failed: u64, skipped: u64) {
self.processed_objects = processed;
self.successful_objects = successful;
self.failed_objects = failed;
self.skipped_objects = skipped;
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
}
pub fn set_current_item(&mut self, bucket: Option<String>, object: Option<String>) {
self.current_bucket = bucket;
self.current_object = object;
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
}
pub fn complete_bucket(&mut self, bucket: &str) {
if !self.completed_buckets.contains(&bucket.to_string()) {
self.completed_buckets.push(bucket.to_string());
}
if let Some(pos) = self.pending_buckets.iter().position(|b| b == bucket) {
self.pending_buckets.remove(pos);
}
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
}
pub fn mark_completed(&mut self) {
self.completed = true;
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
}
pub fn set_error(&mut self, error: String) {
self.error_message = Some(error);
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
}
pub fn increment_retry(&mut self) {
self.retry_count += 1;
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
}
pub fn can_retry(&self) -> bool {
self.retry_count < self.max_retries
}
pub fn get_progress_percentage(&self) -> f64 {
if self.total_objects == 0 {
return 0.0;
}
(self.processed_objects as f64 / self.total_objects as f64) * 100.0
}
pub fn get_success_rate(&self) -> f64 {
let total = self.successful_objects + self.failed_objects;
if total == 0 {
return 0.0;
}
(self.successful_objects as f64 / total as f64) * 100.0
}
}
/// resume manager
pub struct ResumeManager {
disk: DiskStore,
state: Arc<RwLock<ResumeState>>,
}
impl ResumeManager {
/// create new resume manager
pub async fn new(
disk: DiskStore,
task_id: String,
task_type: String,
set_disk_id: String,
buckets: Vec<String>,
) -> Result<Self> {
let state = ResumeState::new(task_id, task_type, set_disk_id, buckets);
let manager = Self {
disk,
state: Arc::new(RwLock::new(state)),
};
// save initial state
manager.save_state().await?;
Ok(manager)
}
/// load resume state from disk
pub async fn load_from_disk(disk: DiskStore, task_id: &str) -> Result<Self> {
let state_data = Self::read_state_file(&disk, task_id).await?;
let state: ResumeState = serde_json::from_slice(&state_data).map_err(|e| Error::TaskExecutionFailed {
message: format!("Failed to deserialize resume state: {e}"),
})?;
Ok(Self {
disk,
state: Arc::new(RwLock::new(state)),
})
}
/// check if resume state exists
pub async fn has_resume_state(disk: &DiskStore, task_id: &str) -> bool {
let file_path = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_STATE_FILE}"));
match path_to_str(&file_path) {
Ok(path_str) => match disk.read_all(RUSTFS_META_BUCKET, path_str).await {
Ok(data) => !data.is_empty(),
Err(_) => false,
},
Err(_) => false,
}
}
/// get current state
pub async fn get_state(&self) -> ResumeState {
self.state.read().await.clone()
}
/// update progress
pub async fn update_progress(&self, processed: u64, successful: u64, failed: u64, skipped: u64) -> Result<()> {
let mut state = self.state.write().await;
state.update_progress(processed, successful, failed, skipped);
drop(state);
self.save_state().await
}
/// set current item
pub async fn set_current_item(&self, bucket: Option<String>, object: Option<String>) -> Result<()> {
let mut state = self.state.write().await;
state.set_current_item(bucket, object);
drop(state);
self.save_state().await
}
/// complete bucket
pub async fn complete_bucket(&self, bucket: &str) -> Result<()> {
let mut state = self.state.write().await;
state.complete_bucket(bucket);
drop(state);
self.save_state().await
}
/// mark task completed
pub async fn mark_completed(&self) -> Result<()> {
let mut state = self.state.write().await;
state.mark_completed();
drop(state);
self.save_state().await
}
/// set error message
pub async fn set_error(&self, error: String) -> Result<()> {
let mut state = self.state.write().await;
state.set_error(error);
drop(state);
self.save_state().await
}
/// increment retry count
pub async fn increment_retry(&self) -> Result<()> {
let mut state = self.state.write().await;
state.increment_retry();
drop(state);
self.save_state().await
}
/// cleanup resume state
pub async fn cleanup(&self) -> Result<()> {
let state = self.state.read().await;
let task_id = &state.task_id;
// delete state files
let state_file = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_STATE_FILE}"));
let progress_file = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_PROGRESS_FILE}"));
let checkpoint_file = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_CHECKPOINT_FILE}"));
// ignore delete errors, files may not exist
if let Ok(path_str) = path_to_str(&state_file) {
let _ = self.disk.delete(RUSTFS_META_BUCKET, path_str, Default::default()).await;
}
if let Ok(path_str) = path_to_str(&progress_file) {
let _ = self.disk.delete(RUSTFS_META_BUCKET, path_str, Default::default()).await;
}
if let Ok(path_str) = path_to_str(&checkpoint_file) {
let _ = self.disk.delete(RUSTFS_META_BUCKET, path_str, Default::default()).await;
}
info!("Cleaned up resume state for task: {}", task_id);
Ok(())
}
/// save state to disk
async fn save_state(&self) -> Result<()> {
let state = self.state.read().await;
let state_data = serde_json::to_vec(&*state).map_err(|e| Error::TaskExecutionFailed {
message: format!("Failed to serialize resume state: {e}"),
})?;
let file_path = Path::new(BUCKET_META_PREFIX).join(format!("{}_{}", state.task_id, RESUME_STATE_FILE));
let path_str = path_to_str(&file_path)?;
self.disk
.write_all(RUSTFS_META_BUCKET, path_str, state_data.into())
.await
.map_err(|e| Error::TaskExecutionFailed {
message: format!("Failed to save resume state: {e}"),
})?;
debug!("Saved resume state for task: {}", state.task_id);
Ok(())
}
/// read state file from disk
async fn read_state_file(disk: &DiskStore, task_id: &str) -> Result<Vec<u8>> {
let file_path = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_STATE_FILE}"));
let path_str = path_to_str(&file_path)?;
disk.read_all(RUSTFS_META_BUCKET, path_str)
.await
.map(|bytes| bytes.to_vec())
.map_err(|e| Error::TaskExecutionFailed {
message: format!("Failed to read resume state file: {e}"),
})
}
}
/// resume checkpoint
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ResumeCheckpoint {
/// task id
pub task_id: String,
/// checkpoint time
pub checkpoint_time: u64,
/// current bucket index
pub current_bucket_index: usize,
/// current object index
pub current_object_index: usize,
/// processed objects
pub processed_objects: Vec<String>,
/// failed objects
pub failed_objects: Vec<String>,
/// skipped objects
pub skipped_objects: Vec<String>,
}
impl ResumeCheckpoint {
pub fn new(task_id: String) -> Self {
Self {
task_id,
checkpoint_time: SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs(),
current_bucket_index: 0,
current_object_index: 0,
processed_objects: Vec::new(),
failed_objects: Vec::new(),
skipped_objects: Vec::new(),
}
}
pub fn update_position(&mut self, bucket_index: usize, object_index: usize) {
self.current_bucket_index = bucket_index;
self.current_object_index = object_index;
self.checkpoint_time = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
}
pub fn add_processed_object(&mut self, object: String) {
if !self.processed_objects.contains(&object) {
self.processed_objects.push(object);
}
}
pub fn add_failed_object(&mut self, object: String) {
if !self.failed_objects.contains(&object) {
self.failed_objects.push(object);
}
}
pub fn add_skipped_object(&mut self, object: String) {
if !self.skipped_objects.contains(&object) {
self.skipped_objects.push(object);
}
}
}
/// resume checkpoint manager
pub struct CheckpointManager {
disk: DiskStore,
checkpoint: Arc<RwLock<ResumeCheckpoint>>,
}
impl CheckpointManager {
/// create new checkpoint manager
pub async fn new(disk: DiskStore, task_id: String) -> Result<Self> {
let checkpoint = ResumeCheckpoint::new(task_id);
let manager = Self {
disk,
checkpoint: Arc::new(RwLock::new(checkpoint)),
};
// save initial checkpoint
manager.save_checkpoint().await?;
Ok(manager)
}
/// load checkpoint from disk
pub async fn load_from_disk(disk: DiskStore, task_id: &str) -> Result<Self> {
let checkpoint_data = Self::read_checkpoint_file(&disk, task_id).await?;
let checkpoint: ResumeCheckpoint = serde_json::from_slice(&checkpoint_data).map_err(|e| Error::TaskExecutionFailed {
message: format!("Failed to deserialize checkpoint: {e}"),
})?;
Ok(Self {
disk,
checkpoint: Arc::new(RwLock::new(checkpoint)),
})
}
/// check if checkpoint exists
pub async fn has_checkpoint(disk: &DiskStore, task_id: &str) -> bool {
let file_path = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_CHECKPOINT_FILE}"));
match path_to_str(&file_path) {
Ok(path_str) => match disk.read_all(RUSTFS_META_BUCKET, path_str).await {
Ok(data) => !data.is_empty(),
Err(_) => false,
},
Err(_) => false,
}
}
/// get current checkpoint
pub async fn get_checkpoint(&self) -> ResumeCheckpoint {
self.checkpoint.read().await.clone()
}
/// update position
pub async fn update_position(&self, bucket_index: usize, object_index: usize) -> Result<()> {
let mut checkpoint = self.checkpoint.write().await;
checkpoint.update_position(bucket_index, object_index);
drop(checkpoint);
self.save_checkpoint().await
}
/// add processed object
pub async fn add_processed_object(&self, object: String) -> Result<()> {
let mut checkpoint = self.checkpoint.write().await;
checkpoint.add_processed_object(object);
drop(checkpoint);
self.save_checkpoint().await
}
/// add failed object
pub async fn add_failed_object(&self, object: String) -> Result<()> {
let mut checkpoint = self.checkpoint.write().await;
checkpoint.add_failed_object(object);
drop(checkpoint);
self.save_checkpoint().await
}
/// add skipped object
pub async fn add_skipped_object(&self, object: String) -> Result<()> {
let mut checkpoint = self.checkpoint.write().await;
checkpoint.add_skipped_object(object);
drop(checkpoint);
self.save_checkpoint().await
}
/// cleanup checkpoint
pub async fn cleanup(&self) -> Result<()> {
let checkpoint = self.checkpoint.read().await;
let task_id = &checkpoint.task_id;
let checkpoint_file = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_CHECKPOINT_FILE}"));
if let Ok(path_str) = path_to_str(&checkpoint_file) {
let _ = self.disk.delete(RUSTFS_META_BUCKET, path_str, Default::default()).await;
}
info!("Cleaned up checkpoint for task: {}", task_id);
Ok(())
}
/// save checkpoint to disk
async fn save_checkpoint(&self) -> Result<()> {
let checkpoint = self.checkpoint.read().await;
let checkpoint_data = serde_json::to_vec(&*checkpoint).map_err(|e| Error::TaskExecutionFailed {
message: format!("Failed to serialize checkpoint: {e}"),
})?;
let file_path = Path::new(BUCKET_META_PREFIX).join(format!("{}_{}", checkpoint.task_id, RESUME_CHECKPOINT_FILE));
let path_str = path_to_str(&file_path)?;
self.disk
.write_all(RUSTFS_META_BUCKET, path_str, checkpoint_data.into())
.await
.map_err(|e| Error::TaskExecutionFailed {
message: format!("Failed to save checkpoint: {e}"),
})?;
debug!("Saved checkpoint for task: {}", checkpoint.task_id);
Ok(())
}
/// read checkpoint file from disk
async fn read_checkpoint_file(disk: &DiskStore, task_id: &str) -> Result<Vec<u8>> {
let file_path = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_CHECKPOINT_FILE}"));
let path_str = path_to_str(&file_path)?;
disk.read_all(RUSTFS_META_BUCKET, path_str)
.await
.map(|bytes| bytes.to_vec())
.map_err(|e| Error::TaskExecutionFailed {
message: format!("Failed to read checkpoint file: {e}"),
})
}
}
/// resume utils
pub struct ResumeUtils;
impl ResumeUtils {
/// generate unique task id
pub fn generate_task_id() -> String {
Uuid::new_v4().to_string()
}
/// check if task can be resumed
pub async fn can_resume_task(disk: &DiskStore, task_id: &str) -> bool {
ResumeManager::has_resume_state(disk, task_id).await
}
/// get all resumable task ids
pub async fn get_resumable_tasks(disk: &DiskStore) -> Result<Vec<String>> {
// List all files in the buckets metadata directory
let entries = match disk.list_dir("", RUSTFS_META_BUCKET, BUCKET_META_PREFIX, -1).await {
Ok(entries) => entries,
Err(e) => {
debug!("Failed to list resume state files: {}", e);
return Ok(Vec::new());
}
};
let mut task_ids = Vec::new();
// Filter files that end with ahm_resume_state.json and extract task IDs
for entry in entries {
if entry.ends_with(&format!("_{RESUME_STATE_FILE}")) {
// Extract task ID from filename: {task_id}_ahm_resume_state.json
if let Some(task_id) = entry.strip_suffix(&format!("_{RESUME_STATE_FILE}")) {
if !task_id.is_empty() {
task_ids.push(task_id.to_string());
}
}
}
}
debug!("Found {} resumable tasks: {:?}", task_ids.len(), task_ids);
Ok(task_ids)
}
/// cleanup expired resume states
pub async fn cleanup_expired_states(disk: &DiskStore, max_age_hours: u64) -> Result<()> {
let task_ids = Self::get_resumable_tasks(disk).await?;
let current_time = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs();
for task_id in task_ids {
if let Ok(resume_manager) = ResumeManager::load_from_disk(disk.clone(), &task_id).await {
let state = resume_manager.get_state().await;
let age_hours = (current_time - state.last_update) / 3600;
if age_hours > max_age_hours {
info!("Cleaning up expired resume state for task: {} (age: {} hours)", task_id, age_hours);
if let Err(e) = resume_manager.cleanup().await {
warn!("Failed to cleanup expired resume state for task {}: {}", task_id, e);
}
}
}
}
Ok(())
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn test_resume_state_creation() {
let task_id = ResumeUtils::generate_task_id();
let buckets = vec!["bucket1".to_string(), "bucket2".to_string()];
let state = ResumeState::new(task_id.clone(), "erasure_set".to_string(), "pool_0_set_0".to_string(), buckets);
assert_eq!(state.task_id, task_id);
assert_eq!(state.task_type, "erasure_set");
assert!(!state.completed);
assert_eq!(state.processed_objects, 0);
assert_eq!(state.pending_buckets.len(), 2);
}
#[tokio::test]
async fn test_resume_state_progress() {
let task_id = ResumeUtils::generate_task_id();
let buckets = vec!["bucket1".to_string()];
let mut state = ResumeState::new(task_id, "erasure_set".to_string(), "pool_0_set_0".to_string(), buckets);
state.update_progress(10, 8, 1, 1);
assert_eq!(state.processed_objects, 10);
assert_eq!(state.successful_objects, 8);
assert_eq!(state.failed_objects, 1);
assert_eq!(state.skipped_objects, 1);
let progress = state.get_progress_percentage();
assert_eq!(progress, 0.0); // total_objects is 0
state.total_objects = 100;
let progress = state.get_progress_percentage();
assert_eq!(progress, 10.0);
}
#[tokio::test]
async fn test_resume_state_bucket_completion() {
let task_id = ResumeUtils::generate_task_id();
let buckets = vec!["bucket1".to_string(), "bucket2".to_string()];
let mut state = ResumeState::new(task_id, "erasure_set".to_string(), "pool_0_set_0".to_string(), buckets);
assert_eq!(state.pending_buckets.len(), 2);
assert_eq!(state.completed_buckets.len(), 0);
state.complete_bucket("bucket1");
assert_eq!(state.pending_buckets.len(), 1);
assert_eq!(state.completed_buckets.len(), 1);
assert!(state.completed_buckets.contains(&"bucket1".to_string()));
}
#[tokio::test]
async fn test_resume_utils() {
let task_id1 = ResumeUtils::generate_task_id();
let task_id2 = ResumeUtils::generate_task_id();
assert_ne!(task_id1, task_id2);
assert_eq!(task_id1.len(), 36); // UUID length
assert_eq!(task_id2.len(), 36);
}
#[tokio::test]
async fn test_get_resumable_tasks_integration() {
use rustfs_ecstore::disk::{DiskOption, endpoint::Endpoint, new_disk};
use tempfile::TempDir;
// Create a temporary directory for testing
let temp_dir = TempDir::new().unwrap();
let disk_path = temp_dir.path().join("test_disk");
std::fs::create_dir_all(&disk_path).unwrap();
// Create a local disk for testing
let endpoint = Endpoint::try_from(disk_path.to_string_lossy().as_ref()).unwrap();
let disk_option = DiskOption {
cleanup: false,
health_check: false,
};
let disk = new_disk(&endpoint, &disk_option).await.unwrap();
// Create necessary directories first (ignore if already exist)
let _ = disk.make_volume(RUSTFS_META_BUCKET).await;
let _ = disk.make_volume(&format!("{RUSTFS_META_BUCKET}/{BUCKET_META_PREFIX}")).await;
// Create some test resume state files
let task_ids = vec![
"test-task-1".to_string(),
"test-task-2".to_string(),
"test-task-3".to_string(),
];
// Save resume state files for each task
for task_id in &task_ids {
let state = ResumeState::new(
task_id.clone(),
"erasure_set".to_string(),
"pool_0_set_0".to_string(),
vec!["bucket1".to_string(), "bucket2".to_string()],
);
let state_data = serde_json::to_vec(&state).unwrap();
let file_path = format!("{BUCKET_META_PREFIX}/{task_id}_{RESUME_STATE_FILE}");
disk.write_all(RUSTFS_META_BUCKET, &file_path, state_data.into())
.await
.unwrap();
}
// Also create some non-resume state files to test filtering
let non_resume_files = vec![
"other_file.txt",
"task4_ahm_checkpoint.json",
"task5_ahm_progress.json",
"_ahm_resume_state.json", // Invalid: empty task ID
];
for file_name in non_resume_files {
let file_path = format!("{BUCKET_META_PREFIX}/{file_name}");
disk.write_all(RUSTFS_META_BUCKET, &file_path, b"test data".to_vec().into())
.await
.unwrap();
}
// Now call get_resumable_tasks to see if it finds the correct files
let found_task_ids = ResumeUtils::get_resumable_tasks(&disk).await.unwrap();
// Verify that only the valid resume state files are found
assert_eq!(found_task_ids.len(), 3);
for task_id in &task_ids {
assert!(found_task_ids.contains(task_id), "Task ID {task_id} not found");
}
// Verify that invalid files are not included
assert!(!found_task_ids.contains(&"".to_string()));
assert!(!found_task_ids.contains(&"task4".to_string()));
assert!(!found_task_ids.contains(&"task5".to_string()));
// Clean up
temp_dir.close().unwrap();
}
}

View File

@@ -0,0 +1,596 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{Error, Result};
use async_trait::async_trait;
use rustfs_common::heal_channel::{HealOpts, HealScanMode};
use rustfs_ecstore::{
disk::{DiskStore, endpoint::Endpoint},
store::ECStore,
store_api::{BucketInfo, ObjectIO, StorageAPI},
};
use rustfs_madmin::heal_commands::HealResultItem;
use std::sync::Arc;
use tracing::{debug, error, info, warn};
/// Disk status for heal operations
#[derive(Debug, Clone, PartialEq, Eq)]
pub enum DiskStatus {
/// Ok
Ok,
/// Offline
Offline,
/// Corrupt
Corrupt,
/// Missing
Missing,
/// Permission denied
PermissionDenied,
/// Faulty
Faulty,
/// Root mount
RootMount,
/// Unknown
Unknown,
/// Unformatted
Unformatted,
}
/// Heal storage layer interface
#[async_trait]
pub trait HealStorageAPI: Send + Sync {
/// Get object meta
async fn get_object_meta(&self, bucket: &str, object: &str) -> Result<Option<rustfs_ecstore::store_api::ObjectInfo>>;
/// Get object data
async fn get_object_data(&self, bucket: &str, object: &str) -> Result<Option<Vec<u8>>>;
/// Put object data
async fn put_object_data(&self, bucket: &str, object: &str, data: &[u8]) -> Result<()>;
/// Delete object
async fn delete_object(&self, bucket: &str, object: &str) -> Result<()>;
/// Check object integrity
async fn verify_object_integrity(&self, bucket: &str, object: &str) -> Result<bool>;
/// EC decode rebuild
async fn ec_decode_rebuild(&self, bucket: &str, object: &str) -> Result<Vec<u8>>;
/// Get disk status
async fn get_disk_status(&self, endpoint: &Endpoint) -> Result<DiskStatus>;
/// Format disk
async fn format_disk(&self, endpoint: &Endpoint) -> Result<()>;
/// Get bucket info
async fn get_bucket_info(&self, bucket: &str) -> Result<Option<BucketInfo>>;
/// Fix bucket metadata
async fn heal_bucket_metadata(&self, bucket: &str) -> Result<()>;
/// Get all buckets
async fn list_buckets(&self) -> Result<Vec<BucketInfo>>;
/// Check object exists
async fn object_exists(&self, bucket: &str, object: &str) -> Result<bool>;
/// Get object size
async fn get_object_size(&self, bucket: &str, object: &str) -> Result<Option<u64>>;
/// Get object checksum
async fn get_object_checksum(&self, bucket: &str, object: &str) -> Result<Option<String>>;
/// Heal object using ecstore
async fn heal_object(
&self,
bucket: &str,
object: &str,
version_id: Option<&str>,
opts: &HealOpts,
) -> Result<(HealResultItem, Option<Error>)>;
/// Heal bucket using ecstore
async fn heal_bucket(&self, bucket: &str, opts: &HealOpts) -> Result<HealResultItem>;
/// Heal format using ecstore
async fn heal_format(&self, dry_run: bool) -> Result<(HealResultItem, Option<Error>)>;
/// List objects for healing (returns all objects, may use significant memory for large buckets)
///
/// WARNING: This method loads all objects into memory at once. For buckets with many objects,
/// consider using `list_objects_for_heal_page` instead to process objects in pages.
async fn list_objects_for_heal(&self, bucket: &str, prefix: &str) -> Result<Vec<String>>;
/// List objects for healing with pagination (returns one page and continuation token)
/// Returns (objects, next_continuation_token, is_truncated)
async fn list_objects_for_heal_page(
&self,
bucket: &str,
prefix: &str,
continuation_token: Option<&str>,
) -> Result<(Vec<String>, Option<String>, bool)>;
/// Get disk for resume functionality
async fn get_disk_for_resume(&self, set_disk_id: &str) -> Result<DiskStore>;
}
/// ECStore Heal storage layer implementation
pub struct ECStoreHealStorage {
ecstore: Arc<ECStore>,
}
impl ECStoreHealStorage {
pub fn new(ecstore: Arc<ECStore>) -> Self {
Self { ecstore }
}
}
#[async_trait]
impl HealStorageAPI for ECStoreHealStorage {
async fn get_object_meta(&self, bucket: &str, object: &str) -> Result<Option<rustfs_ecstore::store_api::ObjectInfo>> {
debug!("Getting object meta: {}/{}", bucket, object);
match self.ecstore.get_object_info(bucket, object, &Default::default()).await {
Ok(info) => Ok(Some(info)),
Err(e) => {
// Map ObjectNotFound to None to align with Option return type
if matches!(e, rustfs_ecstore::error::StorageError::ObjectNotFound(_, _)) {
debug!("Object meta not found: {}/{}", bucket, object);
Ok(None)
} else {
error!("Failed to get object meta: {}/{} - {}", bucket, object, e);
Err(Error::other(e))
}
}
}
}
async fn get_object_data(&self, bucket: &str, object: &str) -> Result<Option<Vec<u8>>> {
debug!("Getting object data: {}/{}", bucket, object);
let reader = match (*self.ecstore)
.get_object_reader(bucket, object, None, Default::default(), &Default::default())
.await
{
Ok(reader) => reader,
Err(e) => {
error!("Failed to get object: {}/{} - {}", bucket, object, e);
return Err(Error::other(e));
}
};
// WARNING: Returning Vec<u8> for large objects is dangerous. To avoid OOM, cap the read size.
// If needed, refactor callers to stream instead of buffering entire object.
const MAX_READ_BYTES: usize = 16 * 1024 * 1024; // 16 MiB cap
let mut buf = Vec::with_capacity(1024 * 1024);
use tokio::io::AsyncReadExt as _;
let mut n_read: usize = 0;
let mut stream = reader.stream;
loop {
// Read in chunks
let mut chunk = vec![0u8; 1024 * 1024];
match stream.read(&mut chunk).await {
Ok(0) => break,
Ok(n) => {
buf.extend_from_slice(&chunk[..n]);
n_read += n;
if n_read > MAX_READ_BYTES {
warn!(
"Object data exceeds cap ({} bytes), aborting full read to prevent OOM: {}/{}",
MAX_READ_BYTES, bucket, object
);
return Err(Error::other(format!(
"Object too large: {n_read} bytes (max: {MAX_READ_BYTES} bytes) for {bucket}/{object}"
)));
}
}
Err(e) => {
error!("Failed to read object data: {}/{} - {}", bucket, object, e);
return Err(Error::other(e));
}
}
}
Ok(Some(buf))
}
async fn put_object_data(&self, bucket: &str, object: &str, data: &[u8]) -> Result<()> {
debug!("Putting object data: {}/{} ({} bytes)", bucket, object, data.len());
let mut reader = rustfs_ecstore::store_api::PutObjReader::from_vec(data.to_vec());
match (*self.ecstore)
.put_object(bucket, object, &mut reader, &Default::default())
.await
{
Ok(_) => {
info!("Successfully put object: {}/{}", bucket, object);
Ok(())
}
Err(e) => {
error!("Failed to put object: {}/{} - {}", bucket, object, e);
Err(Error::other(e))
}
}
}
async fn delete_object(&self, bucket: &str, object: &str) -> Result<()> {
debug!("Deleting object: {}/{}", bucket, object);
match self.ecstore.delete_object(bucket, object, Default::default()).await {
Ok(_) => {
info!("Successfully deleted object: {}/{}", bucket, object);
Ok(())
}
Err(e) => {
error!("Failed to delete object: {}/{} - {}", bucket, object, e);
Err(Error::other(e))
}
}
}
async fn verify_object_integrity(&self, bucket: &str, object: &str) -> Result<bool> {
debug!("Verifying object integrity: {}/{}", bucket, object);
// Check object metadata first
match self.get_object_meta(bucket, object).await? {
Some(obj_info) => {
if obj_info.size < 0 {
warn!("Object has invalid size: {}/{}", bucket, object);
return Ok(false);
}
// Stream-read the object to a sink to avoid loading into memory
match (*self.ecstore)
.get_object_reader(bucket, object, None, Default::default(), &Default::default())
.await
{
Ok(reader) => {
let mut stream = reader.stream;
match tokio::io::copy(&mut stream, &mut tokio::io::sink()).await {
Ok(_) => {
info!("Object integrity check passed: {}/{}", bucket, object);
Ok(true)
}
Err(e) => {
warn!("Object stream read failed: {}/{} - {}", bucket, object, e);
Ok(false)
}
}
}
Err(e) => {
warn!("Failed to get object reader: {}/{} - {}", bucket, object, e);
Ok(false)
}
}
}
None => {
warn!("Object metadata not found: {}/{}", bucket, object);
Ok(false)
}
}
}
async fn ec_decode_rebuild(&self, bucket: &str, object: &str) -> Result<Vec<u8>> {
debug!("EC decode rebuild: {}/{}", bucket, object);
// Use ecstore's heal_object to rebuild the object
let heal_opts = HealOpts {
recursive: false,
dry_run: false,
remove: false,
recreate: true,
scan_mode: HealScanMode::Deep,
update_parity: true,
no_lock: false,
pool: None,
set: None,
};
match self.heal_object(bucket, object, None, &heal_opts).await {
Ok((_result, error)) => {
if error.is_some() {
return Err(Error::TaskExecutionFailed {
message: format!("Heal failed: {error:?}"),
});
}
// After healing, try to read the object data
match self.get_object_data(bucket, object).await? {
Some(data) => {
info!("EC decode rebuild successful: {}/{} ({} bytes)", bucket, object, data.len());
Ok(data)
}
None => {
error!("Object not found after heal: {}/{}", bucket, object);
Err(Error::TaskExecutionFailed {
message: format!("Object not found after heal: {bucket}/{object}"),
})
}
}
}
Err(e) => {
error!("Heal operation failed: {}/{} - {}", bucket, object, e);
Err(e)
}
}
}
async fn get_disk_status(&self, endpoint: &Endpoint) -> Result<DiskStatus> {
debug!("Getting disk status: {:?}", endpoint);
// TODO: implement disk status check using ecstore
// For now, return Ok status
info!("Disk status check: {:?} - OK", endpoint);
Ok(DiskStatus::Ok)
}
async fn format_disk(&self, endpoint: &Endpoint) -> Result<()> {
debug!("Formatting disk: {:?}", endpoint);
// Use ecstore's heal_format
match self.heal_format(false).await {
Ok((_, error)) => {
if error.is_some() {
return Err(Error::other(format!("Format failed: {error:?}")));
}
info!("Successfully formatted disk: {:?}", endpoint);
Ok(())
}
Err(e) => {
error!("Failed to format disk: {:?} - {}", endpoint, e);
Err(e)
}
}
}
async fn get_bucket_info(&self, bucket: &str) -> Result<Option<BucketInfo>> {
debug!("Getting bucket info: {}", bucket);
match self.ecstore.get_bucket_info(bucket, &Default::default()).await {
Ok(info) => Ok(Some(info)),
Err(e) => {
error!("Failed to get bucket info: {} - {}", bucket, e);
Err(Error::other(e))
}
}
}
async fn heal_bucket_metadata(&self, bucket: &str) -> Result<()> {
debug!("Healing bucket metadata: {}", bucket);
let heal_opts = HealOpts {
recursive: true,
dry_run: false,
remove: false,
recreate: false,
scan_mode: HealScanMode::Normal,
update_parity: false,
no_lock: false,
pool: None,
set: None,
};
match self.heal_bucket(bucket, &heal_opts).await {
Ok(_) => {
info!("Successfully healed bucket metadata: {}", bucket);
Ok(())
}
Err(e) => {
error!("Failed to heal bucket metadata: {} - {}", bucket, e);
Err(e)
}
}
}
async fn list_buckets(&self) -> Result<Vec<BucketInfo>> {
debug!("Listing buckets");
match self.ecstore.list_bucket(&Default::default()).await {
Ok(buckets) => Ok(buckets),
Err(e) => {
error!("Failed to list buckets: {}", e);
Err(Error::other(e))
}
}
}
async fn object_exists(&self, bucket: &str, object: &str) -> Result<bool> {
debug!("Checking object exists: {}/{}", bucket, object);
// Use get_object_info for efficient existence check without heavy heal operations
match self.ecstore.get_object_info(bucket, object, &Default::default()).await {
Ok(_) => Ok(true), // Object exists
Err(e) => {
// Map ObjectNotFound to false, other errors must be propagated!
if matches!(e, rustfs_ecstore::error::StorageError::ObjectNotFound(_, _)) {
debug!("Object not found: {}/{}", bucket, object);
Ok(false)
} else {
error!("Error checking object existence {}/{}: {}", bucket, object, e);
Err(Error::other(e))
}
}
}
}
async fn get_object_size(&self, bucket: &str, object: &str) -> Result<Option<u64>> {
debug!("Getting object size: {}/{}", bucket, object);
match self.get_object_meta(bucket, object).await {
Ok(Some(obj_info)) => Ok(Some(obj_info.size as u64)),
Ok(None) => Ok(None),
Err(e) => Err(e),
}
}
async fn get_object_checksum(&self, bucket: &str, object: &str) -> Result<Option<String>> {
debug!("Getting object checksum: {}/{}", bucket, object);
match self.get_object_meta(bucket, object).await {
Ok(Some(obj_info)) => {
// Convert checksum bytes to hex string
let checksum = obj_info.checksum.iter().map(|b| format!("{b:02x}")).collect::<String>();
Ok(Some(checksum))
}
Ok(None) => Ok(None),
Err(e) => Err(e),
}
}
async fn heal_object(
&self,
bucket: &str,
object: &str,
version_id: Option<&str>,
opts: &HealOpts,
) -> Result<(HealResultItem, Option<Error>)> {
debug!("Healing object: {}/{}", bucket, object);
let version_id_str = version_id.unwrap_or("");
match self.ecstore.heal_object(bucket, object, version_id_str, opts).await {
Ok((result, ecstore_error)) => {
let error = ecstore_error.map(Error::other);
info!("Heal object completed: {}/{} - result: {:?}, error: {:?}", bucket, object, result, error);
Ok((result, error))
}
Err(e) => {
error!("Heal object failed: {}/{} - {}", bucket, object, e);
Err(Error::other(e))
}
}
}
async fn heal_bucket(&self, bucket: &str, opts: &HealOpts) -> Result<HealResultItem> {
debug!("Healing bucket: {}", bucket);
match self.ecstore.heal_bucket(bucket, opts).await {
Ok(result) => {
info!("Heal bucket completed: {} - result: {:?}", bucket, result);
Ok(result)
}
Err(e) => {
error!("Heal bucket failed: {} - {}", bucket, e);
Err(Error::other(e))
}
}
}
async fn heal_format(&self, dry_run: bool) -> Result<(HealResultItem, Option<Error>)> {
debug!("Healing format (dry_run: {})", dry_run);
match self.ecstore.heal_format(dry_run).await {
Ok((result, ecstore_error)) => {
let error = ecstore_error.map(Error::other);
info!("Heal format completed - result: {:?}, error: {:?}", result, error);
Ok((result, error))
}
Err(e) => {
error!("Heal format failed: {}", e);
Err(Error::other(e))
}
}
}
async fn list_objects_for_heal(&self, bucket: &str, prefix: &str) -> Result<Vec<String>> {
debug!("Listing objects for heal: {}/{}", bucket, prefix);
warn!(
"list_objects_for_heal loads all objects into memory. For large buckets, consider using list_objects_for_heal_page instead."
);
let mut all_objects = Vec::new();
let mut continuation_token: Option<String> = None;
loop {
let (page_objects, next_token, is_truncated) = self
.list_objects_for_heal_page(bucket, prefix, continuation_token.as_deref())
.await?;
all_objects.extend(page_objects);
if !is_truncated {
break;
}
continuation_token = next_token;
if continuation_token.is_none() {
warn!("List is truncated but no continuation token provided for {}/{}", bucket, prefix);
break;
}
}
info!("Found {} objects for heal in {}/{}", all_objects.len(), bucket, prefix);
Ok(all_objects)
}
async fn list_objects_for_heal_page(
&self,
bucket: &str,
prefix: &str,
continuation_token: Option<&str>,
) -> Result<(Vec<String>, Option<String>, bool)> {
debug!("Listing objects for heal (page): {}/{}", bucket, prefix);
const MAX_KEYS: i32 = 1000;
let continuation_token_opt = continuation_token.map(|s| s.to_string());
// Use list_objects_v2 to get objects with pagination
let list_info = match self
.ecstore
.clone()
.list_objects_v2(bucket, prefix, continuation_token_opt, None, MAX_KEYS, false, None, false)
.await
{
Ok(info) => info,
Err(e) => {
error!("Failed to list objects for heal: {}/{} - {}", bucket, prefix, e);
return Err(Error::other(e));
}
};
// Collect objects from this page
let page_objects: Vec<String> = list_info.objects.into_iter().map(|obj| obj.name).collect();
let page_count = page_objects.len();
debug!("Listed {} objects (page) for heal in {}/{}", page_count, bucket, prefix);
Ok((page_objects, list_info.next_continuation_token, list_info.is_truncated))
}
async fn get_disk_for_resume(&self, set_disk_id: &str) -> Result<DiskStore> {
debug!("Getting disk for resume: {}", set_disk_id);
// Parse set_disk_id to extract pool and set indices
let (pool_idx, set_idx) = crate::heal::utils::parse_set_disk_id(set_disk_id)?;
// Get the first available disk from the set
let disks = self
.ecstore
.get_disks(pool_idx, set_idx)
.await
.map_err(|e| Error::TaskExecutionFailed {
message: format!("Failed to get disks for pool {pool_idx} set {set_idx}: {e}"),
})?;
// Find the first available disk
if let Some(disk_store) = disks.into_iter().flatten().next() {
info!("Found disk for resume: {:?}", disk_store);
return Ok(disk_store);
}
Err(Error::TaskExecutionFailed {
message: format!("No available disk found for set_disk_id: {set_disk_id}"),
})
}
}

999
crates/ahm/src/heal/task.rs Normal file
View File

@@ -0,0 +1,999 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::heal::{ErasureSetHealer, progress::HealProgress, storage::HealStorageAPI};
use crate::{Error, Result};
use rustfs_common::heal_channel::{HealOpts, HealScanMode};
use serde::{Deserialize, Serialize};
use std::{
future::Future,
sync::Arc,
time::{Duration, Instant, SystemTime},
};
use tokio::sync::RwLock;
use tracing::{error, info, warn};
use uuid::Uuid;
/// Heal type
#[derive(Debug, Clone)]
pub enum HealType {
/// Object heal
Object {
bucket: String,
object: String,
version_id: Option<String>,
},
/// Bucket heal
Bucket { bucket: String },
/// Erasure Set heal (includes disk format repair)
ErasureSet { buckets: Vec<String>, set_disk_id: String },
/// Metadata heal
Metadata { bucket: String, object: String },
/// MRF heal
MRF { meta_path: String },
/// EC decode heal
ECDecode {
bucket: String,
object: String,
version_id: Option<String>,
},
}
/// Heal priority
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
pub enum HealPriority {
/// Low priority
Low = 0,
/// Normal priority
#[default]
Normal = 1,
/// High priority
High = 2,
/// Urgent priority
Urgent = 3,
}
/// Heal options
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct HealOptions {
/// Scan mode
pub scan_mode: HealScanMode,
/// Whether to remove corrupted data
pub remove_corrupted: bool,
/// Whether to recreate
pub recreate_missing: bool,
/// Whether to update parity
pub update_parity: bool,
/// Whether to recursively process
pub recursive: bool,
/// Whether to dry run
pub dry_run: bool,
/// Timeout
pub timeout: Option<Duration>,
/// pool index
pub pool_index: Option<usize>,
/// set index
pub set_index: Option<usize>,
}
impl Default for HealOptions {
fn default() -> Self {
Self {
scan_mode: HealScanMode::Normal,
remove_corrupted: false,
recreate_missing: true,
update_parity: true,
recursive: false,
dry_run: false,
timeout: Some(Duration::from_secs(300)), // 5 minutes default timeout
pool_index: None,
set_index: None,
}
}
}
/// Heal task status
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
pub enum HealTaskStatus {
/// Pending
Pending,
/// Running
Running,
/// Completed
Completed,
/// Failed
Failed { error: String },
/// Cancelled
Cancelled,
/// Timeout
Timeout,
}
/// Heal request
#[derive(Debug, Clone)]
pub struct HealRequest {
/// Request ID
pub id: String,
/// Heal type
pub heal_type: HealType,
/// Heal options
pub options: HealOptions,
/// Priority
pub priority: HealPriority,
/// Created time
pub created_at: SystemTime,
}
impl HealRequest {
pub fn new(heal_type: HealType, options: HealOptions, priority: HealPriority) -> Self {
Self {
id: Uuid::new_v4().to_string(),
heal_type,
options,
priority,
created_at: SystemTime::now(),
}
}
pub fn object(bucket: String, object: String, version_id: Option<String>) -> Self {
Self::new(
HealType::Object {
bucket,
object,
version_id,
},
HealOptions::default(),
HealPriority::Normal,
)
}
pub fn bucket(bucket: String) -> Self {
Self::new(HealType::Bucket { bucket }, HealOptions::default(), HealPriority::Normal)
}
pub fn metadata(bucket: String, object: String) -> Self {
Self::new(HealType::Metadata { bucket, object }, HealOptions::default(), HealPriority::High)
}
pub fn ec_decode(bucket: String, object: String, version_id: Option<String>) -> Self {
Self::new(
HealType::ECDecode {
bucket,
object,
version_id,
},
HealOptions::default(),
HealPriority::Urgent,
)
}
}
/// Heal task
pub struct HealTask {
/// Task ID
pub id: String,
/// Heal type
pub heal_type: HealType,
/// Heal options
pub options: HealOptions,
/// Task status
pub status: Arc<RwLock<HealTaskStatus>>,
/// Progress tracking
pub progress: Arc<RwLock<HealProgress>>,
/// Created time
pub created_at: SystemTime,
/// Started time
pub started_at: Arc<RwLock<Option<SystemTime>>>,
/// Completed time
pub completed_at: Arc<RwLock<Option<SystemTime>>>,
/// Task start instant for timeout calculation (monotonic)
task_start_instant: Arc<RwLock<Option<Instant>>>,
/// Cancel token
pub cancel_token: tokio_util::sync::CancellationToken,
/// Storage layer interface
pub storage: Arc<dyn HealStorageAPI>,
}
impl HealTask {
pub fn from_request(request: HealRequest, storage: Arc<dyn HealStorageAPI>) -> Self {
Self {
id: request.id,
heal_type: request.heal_type,
options: request.options,
status: Arc::new(RwLock::new(HealTaskStatus::Pending)),
progress: Arc::new(RwLock::new(HealProgress::new())),
created_at: request.created_at,
started_at: Arc::new(RwLock::new(None)),
completed_at: Arc::new(RwLock::new(None)),
task_start_instant: Arc::new(RwLock::new(None)),
cancel_token: tokio_util::sync::CancellationToken::new(),
storage,
}
}
async fn remaining_timeout(&self) -> Result<Option<Duration>> {
if let Some(total) = self.options.timeout {
let start_instant = { *self.task_start_instant.read().await };
if let Some(started_at) = start_instant {
let elapsed = started_at.elapsed();
if elapsed >= total {
return Err(Error::TaskTimeout);
}
return Ok(Some(total - elapsed));
}
Ok(Some(total))
} else {
Ok(None)
}
}
async fn check_control_flags(&self) -> Result<()> {
if self.cancel_token.is_cancelled() {
return Err(Error::TaskCancelled);
}
// Only interested in propagating an error if the timeout has expired;
// the actual Duration value is not needed here
let _ = self.remaining_timeout().await?;
Ok(())
}
async fn await_with_control<F, T>(&self, fut: F) -> Result<T>
where
F: Future<Output = Result<T>> + Send,
T: Send,
{
let cancel_token = self.cancel_token.clone();
if let Some(remaining) = self.remaining_timeout().await? {
if remaining.is_zero() {
return Err(Error::TaskTimeout);
}
let mut fut = Box::pin(fut);
tokio::select! {
_ = cancel_token.cancelled() => Err(Error::TaskCancelled),
_ = tokio::time::sleep(remaining) => Err(Error::TaskTimeout),
result = &mut fut => result,
}
} else {
tokio::select! {
_ = cancel_token.cancelled() => Err(Error::TaskCancelled),
result = fut => result,
}
}
}
#[tracing::instrument(skip(self), fields(task_id = %self.id, heal_type = ?self.heal_type))]
pub async fn execute(&self) -> Result<()> {
// update status and timestamps atomically to avoid race conditions
let now = SystemTime::now();
let start_instant = Instant::now();
{
let mut status = self.status.write().await;
let mut started_at = self.started_at.write().await;
let mut task_start_instant = self.task_start_instant.write().await;
*status = HealTaskStatus::Running;
*started_at = Some(now);
*task_start_instant = Some(start_instant);
}
info!("Task started");
let result = match &self.heal_type {
HealType::Object {
bucket,
object,
version_id,
} => self.heal_object(bucket, object, version_id.as_deref()).await,
HealType::Bucket { bucket } => self.heal_bucket(bucket).await,
HealType::Metadata { bucket, object } => self.heal_metadata(bucket, object).await,
HealType::MRF { meta_path } => self.heal_mrf(meta_path).await,
HealType::ECDecode {
bucket,
object,
version_id,
} => self.heal_ec_decode(bucket, object, version_id.as_deref()).await,
HealType::ErasureSet { buckets, set_disk_id } => self.heal_erasure_set(buckets.clone(), set_disk_id.clone()).await,
};
// update completed time and status
{
let mut completed_at = self.completed_at.write().await;
*completed_at = Some(SystemTime::now());
}
match &result {
Ok(_) => {
let mut status = self.status.write().await;
*status = HealTaskStatus::Completed;
info!("Task completed successfully");
}
Err(Error::TaskCancelled) => {
let mut status = self.status.write().await;
*status = HealTaskStatus::Cancelled;
info!("Heal task was cancelled: {}", self.id);
}
Err(Error::TaskTimeout) => {
let mut status = self.status.write().await;
*status = HealTaskStatus::Timeout;
warn!("Heal task timed out: {}", self.id);
}
Err(e) => {
let mut status = self.status.write().await;
*status = HealTaskStatus::Failed { error: e.to_string() };
error!("Heal task failed: {} with error: {}", self.id, e);
}
}
result
}
pub async fn cancel(&self) -> Result<()> {
self.cancel_token.cancel();
let mut status = self.status.write().await;
*status = HealTaskStatus::Cancelled;
info!("Heal task cancelled: {}", self.id);
Ok(())
}
pub async fn get_status(&self) -> HealTaskStatus {
self.status.read().await.clone()
}
pub async fn get_progress(&self) -> HealProgress {
self.progress.read().await.clone()
}
// specific heal implementation method
#[tracing::instrument(skip(self), fields(bucket = %bucket, object = %object, version_id = ?version_id))]
async fn heal_object(&self, bucket: &str, object: &str, version_id: Option<&str>) -> Result<()> {
info!("Starting object heal workflow");
// update progress
{
let mut progress = self.progress.write().await;
progress.set_current_object(Some(format!("{bucket}/{object}")));
progress.update_progress(0, 4, 0, 0);
}
// Step 1: Check if object exists and get metadata
warn!("Step 1: Checking object existence and metadata");
self.check_control_flags().await?;
let object_exists = self.await_with_control(self.storage.object_exists(bucket, object)).await?;
if !object_exists {
warn!("Object does not exist: {}/{}", bucket, object);
if self.options.recreate_missing {
info!("Attempting to recreate missing object: {}/{}", bucket, object);
return self.recreate_missing_object(bucket, object, version_id).await;
} else {
return Err(Error::TaskExecutionFailed {
message: format!("Object not found: {bucket}/{object}"),
});
}
}
{
let mut progress = self.progress.write().await;
progress.update_progress(1, 3, 0, 0);
}
// Step 2: directly call ecstore to perform heal
info!("Step 2: Performing heal using ecstore");
let heal_opts = HealOpts {
recursive: self.options.recursive,
dry_run: self.options.dry_run,
remove: self.options.remove_corrupted,
recreate: self.options.recreate_missing,
scan_mode: self.options.scan_mode,
update_parity: self.options.update_parity,
no_lock: false,
pool: self.options.pool_index,
set: self.options.set_index,
};
let heal_result = self
.await_with_control(self.storage.heal_object(bucket, object, version_id, &heal_opts))
.await;
match heal_result {
Ok((result, error)) => {
if let Some(e) = error {
// Check if this is a "File not found" error during delete operations
let error_msg = format!("{e}");
if error_msg.contains("File not found") || error_msg.contains("not found") {
info!(
"Object {}/{} not found during heal - likely deleted intentionally, treating as successful",
bucket, object
);
{
let mut progress = self.progress.write().await;
progress.update_progress(3, 3, 0, 0);
}
return Ok(());
}
error!("Heal operation failed: {}/{} - {}", bucket, object, e);
// If heal failed and remove_corrupted is enabled, delete the corrupted object
if self.options.remove_corrupted {
info!("Removing corrupted object: {}/{}", bucket, object);
if !self.options.dry_run {
self.await_with_control(self.storage.delete_object(bucket, object)).await?;
info!("Successfully deleted corrupted object: {}/{}", bucket, object);
} else {
info!("Dry run mode - would delete corrupted object: {}/{}", bucket, object);
}
}
{
let mut progress = self.progress.write().await;
progress.update_progress(3, 3, 0, 0);
}
return Err(Error::TaskExecutionFailed {
message: format!("Failed to heal object {bucket}/{object}: {e}"),
});
}
// Step 3: Verify heal result
info!("Step 3: Verifying heal result");
let object_size = result.object_size as u64;
info!(
object_size = object_size,
drives_healed = result.after.drives.len(),
"Heal completed successfully"
);
{
let mut progress = self.progress.write().await;
progress.update_progress(3, 3, object_size, object_size);
}
Ok(())
}
Err(Error::TaskCancelled) => Err(Error::TaskCancelled),
Err(Error::TaskTimeout) => Err(Error::TaskTimeout),
Err(e) => {
// Check if this is a "File not found" error during delete operations
let error_msg = format!("{e}");
if error_msg.contains("File not found") || error_msg.contains("not found") {
info!(
"Object {}/{} not found during heal - likely deleted intentionally, treating as successful",
bucket, object
);
{
let mut progress = self.progress.write().await;
progress.update_progress(3, 3, 0, 0);
}
return Ok(());
}
error!("Heal operation failed: {}/{} - {}", bucket, object, e);
// If heal failed and remove_corrupted is enabled, delete the corrupted object
if self.options.remove_corrupted {
info!("Removing corrupted object: {}/{}", bucket, object);
if !self.options.dry_run {
self.await_with_control(self.storage.delete_object(bucket, object)).await?;
info!("Successfully deleted corrupted object: {}/{}", bucket, object);
} else {
info!("Dry run mode - would delete corrupted object: {}/{}", bucket, object);
}
}
{
let mut progress = self.progress.write().await;
progress.update_progress(3, 3, 0, 0);
}
Err(Error::TaskExecutionFailed {
message: format!("Failed to heal object {bucket}/{object}: {e}"),
})
}
}
}
/// Recreate missing object (for EC decode scenarios)
async fn recreate_missing_object(&self, bucket: &str, object: &str, version_id: Option<&str>) -> Result<()> {
info!("Attempting to recreate missing object: {}/{}", bucket, object);
// Use ecstore's heal_object with recreate option
let heal_opts = HealOpts {
recursive: false,
dry_run: self.options.dry_run,
remove: false,
recreate: true,
scan_mode: HealScanMode::Deep,
update_parity: true,
no_lock: false,
pool: None,
set: None,
};
match self
.await_with_control(self.storage.heal_object(bucket, object, version_id, &heal_opts))
.await
{
Ok((result, error)) => {
if let Some(e) = error {
error!("Failed to recreate missing object: {}/{} - {}", bucket, object, e);
return Err(Error::TaskExecutionFailed {
message: format!("Failed to recreate missing object {bucket}/{object}: {e}"),
});
}
let object_size = result.object_size as u64;
info!("Successfully recreated missing object: {}/{} ({} bytes)", bucket, object, object_size);
{
let mut progress = self.progress.write().await;
progress.update_progress(4, 4, object_size, object_size);
}
Ok(())
}
Err(Error::TaskCancelled) => Err(Error::TaskCancelled),
Err(Error::TaskTimeout) => Err(Error::TaskTimeout),
Err(e) => {
error!("Failed to recreate missing object: {}/{} - {}", bucket, object, e);
Err(Error::TaskExecutionFailed {
message: format!("Failed to recreate missing object {bucket}/{object}: {e}"),
})
}
}
}
async fn heal_bucket(&self, bucket: &str) -> Result<()> {
info!("Healing bucket: {}", bucket);
// update progress
{
let mut progress = self.progress.write().await;
progress.set_current_object(Some(format!("bucket: {bucket}")));
progress.update_progress(0, 3, 0, 0);
}
// Step 1: Check if bucket exists
info!("Step 1: Checking bucket existence");
self.check_control_flags().await?;
let bucket_exists = self.await_with_control(self.storage.get_bucket_info(bucket)).await?.is_some();
if !bucket_exists {
warn!("Bucket does not exist: {}", bucket);
return Err(Error::TaskExecutionFailed {
message: format!("Bucket not found: {bucket}"),
});
}
{
let mut progress = self.progress.write().await;
progress.update_progress(1, 3, 0, 0);
}
// Step 2: Perform bucket heal using ecstore
info!("Step 2: Performing bucket heal using ecstore");
let heal_opts = HealOpts {
recursive: self.options.recursive,
dry_run: self.options.dry_run,
remove: self.options.remove_corrupted,
recreate: self.options.recreate_missing,
scan_mode: self.options.scan_mode,
update_parity: self.options.update_parity,
no_lock: false,
pool: self.options.pool_index,
set: self.options.set_index,
};
let heal_result = self.await_with_control(self.storage.heal_bucket(bucket, &heal_opts)).await;
match heal_result {
Ok(result) => {
info!("Bucket heal completed successfully: {} ({} drives)", bucket, result.after.drives.len());
{
let mut progress = self.progress.write().await;
progress.update_progress(3, 3, 0, 0);
}
Ok(())
}
Err(Error::TaskCancelled) => Err(Error::TaskCancelled),
Err(Error::TaskTimeout) => Err(Error::TaskTimeout),
Err(e) => {
error!("Bucket heal failed: {} - {}", bucket, e);
{
let mut progress = self.progress.write().await;
progress.update_progress(3, 3, 0, 0);
}
Err(Error::TaskExecutionFailed {
message: format!("Failed to heal bucket {bucket}: {e}"),
})
}
}
}
async fn heal_metadata(&self, bucket: &str, object: &str) -> Result<()> {
info!("Healing metadata: {}/{}", bucket, object);
// update progress
{
let mut progress = self.progress.write().await;
progress.set_current_object(Some(format!("metadata: {bucket}/{object}")));
progress.update_progress(0, 3, 0, 0);
}
// Step 1: Check if object exists
info!("Step 1: Checking object existence");
self.check_control_flags().await?;
let object_exists = self.await_with_control(self.storage.object_exists(bucket, object)).await?;
if !object_exists {
warn!("Object does not exist: {}/{}", bucket, object);
return Err(Error::TaskExecutionFailed {
message: format!("Object not found: {bucket}/{object}"),
});
}
{
let mut progress = self.progress.write().await;
progress.update_progress(1, 3, 0, 0);
}
// Step 2: Perform metadata heal using ecstore
info!("Step 2: Performing metadata heal using ecstore");
let heal_opts = HealOpts {
recursive: false,
dry_run: self.options.dry_run,
remove: false,
recreate: false,
scan_mode: HealScanMode::Deep,
update_parity: false,
no_lock: false,
pool: self.options.pool_index,
set: self.options.set_index,
};
let heal_result = self
.await_with_control(self.storage.heal_object(bucket, object, None, &heal_opts))
.await;
match heal_result {
Ok((result, error)) => {
if let Some(e) = error {
error!("Metadata heal failed: {}/{} - {}", bucket, object, e);
{
let mut progress = self.progress.write().await;
progress.update_progress(3, 3, 0, 0);
}
return Err(Error::TaskExecutionFailed {
message: format!("Failed to heal metadata {bucket}/{object}: {e}"),
});
}
info!(
"Metadata heal completed successfully: {}/{} ({} drives)",
bucket,
object,
result.after.drives.len()
);
{
let mut progress = self.progress.write().await;
progress.update_progress(3, 3, 0, 0);
}
Ok(())
}
Err(Error::TaskCancelled) => Err(Error::TaskCancelled),
Err(Error::TaskTimeout) => Err(Error::TaskTimeout),
Err(e) => {
error!("Metadata heal failed: {}/{} - {}", bucket, object, e);
{
let mut progress = self.progress.write().await;
progress.update_progress(3, 3, 0, 0);
}
Err(Error::TaskExecutionFailed {
message: format!("Failed to heal metadata {bucket}/{object}: {e}"),
})
}
}
}
async fn heal_mrf(&self, meta_path: &str) -> Result<()> {
info!("Healing MRF: {}", meta_path);
// update progress
{
let mut progress = self.progress.write().await;
progress.set_current_object(Some(format!("mrf: {meta_path}")));
progress.update_progress(0, 2, 0, 0);
}
// Parse meta_path to extract bucket and object
let parts: Vec<&str> = meta_path.split('/').collect();
if parts.len() < 2 {
return Err(Error::TaskExecutionFailed {
message: format!("Invalid meta path format: {meta_path}"),
});
}
let bucket = parts[0];
let object = parts[1..].join("/");
// Step 1: Perform MRF heal using ecstore
info!("Step 1: Performing MRF heal using ecstore");
let heal_opts = HealOpts {
recursive: true,
dry_run: self.options.dry_run,
remove: self.options.remove_corrupted,
recreate: self.options.recreate_missing,
scan_mode: HealScanMode::Deep,
update_parity: true,
no_lock: false,
pool: None,
set: None,
};
let heal_result = self
.await_with_control(self.storage.heal_object(bucket, &object, None, &heal_opts))
.await;
match heal_result {
Ok((result, error)) => {
if let Some(e) = error {
error!("MRF heal failed: {} - {}", meta_path, e);
{
let mut progress = self.progress.write().await;
progress.update_progress(2, 2, 0, 0);
}
return Err(Error::TaskExecutionFailed {
message: format!("Failed to heal MRF {meta_path}: {e}"),
});
}
info!("MRF heal completed successfully: {} ({} drives)", meta_path, result.after.drives.len());
{
let mut progress = self.progress.write().await;
progress.update_progress(2, 2, 0, 0);
}
Ok(())
}
Err(Error::TaskCancelled) => Err(Error::TaskCancelled),
Err(Error::TaskTimeout) => Err(Error::TaskTimeout),
Err(e) => {
error!("MRF heal failed: {} - {}", meta_path, e);
{
let mut progress = self.progress.write().await;
progress.update_progress(2, 2, 0, 0);
}
Err(Error::TaskExecutionFailed {
message: format!("Failed to heal MRF {meta_path}: {e}"),
})
}
}
}
async fn heal_ec_decode(&self, bucket: &str, object: &str, version_id: Option<&str>) -> Result<()> {
info!("Healing EC decode: {}/{}", bucket, object);
// update progress
{
let mut progress = self.progress.write().await;
progress.set_current_object(Some(format!("ec_decode: {bucket}/{object}")));
progress.update_progress(0, 3, 0, 0);
}
// Step 1: Check if object exists
info!("Step 1: Checking object existence");
self.check_control_flags().await?;
let object_exists = self.await_with_control(self.storage.object_exists(bucket, object)).await?;
if !object_exists {
warn!("Object does not exist: {}/{}", bucket, object);
return Err(Error::TaskExecutionFailed {
message: format!("Object not found: {bucket}/{object}"),
});
}
{
let mut progress = self.progress.write().await;
progress.update_progress(1, 3, 0, 0);
}
// Step 2: Perform EC decode heal using ecstore
info!("Step 2: Performing EC decode heal using ecstore");
let heal_opts = HealOpts {
recursive: false,
dry_run: self.options.dry_run,
remove: false,
recreate: true,
scan_mode: HealScanMode::Deep,
update_parity: true,
no_lock: false,
pool: None,
set: None,
};
let heal_result = self
.await_with_control(self.storage.heal_object(bucket, object, version_id, &heal_opts))
.await;
match heal_result {
Ok((result, error)) => {
if let Some(e) = error {
error!("EC decode heal failed: {}/{} - {}", bucket, object, e);
{
let mut progress = self.progress.write().await;
progress.update_progress(3, 3, 0, 0);
}
return Err(Error::TaskExecutionFailed {
message: format!("Failed to heal EC decode {bucket}/{object}: {e}"),
});
}
let object_size = result.object_size as u64;
info!(
"EC decode heal completed successfully: {}/{} ({} bytes, {} drives)",
bucket,
object,
object_size,
result.after.drives.len()
);
{
let mut progress = self.progress.write().await;
progress.update_progress(3, 3, object_size, object_size);
}
Ok(())
}
Err(Error::TaskCancelled) => Err(Error::TaskCancelled),
Err(Error::TaskTimeout) => Err(Error::TaskTimeout),
Err(e) => {
error!("EC decode heal failed: {}/{} - {}", bucket, object, e);
{
let mut progress = self.progress.write().await;
progress.update_progress(3, 3, 0, 0);
}
Err(Error::TaskExecutionFailed {
message: format!("Failed to heal EC decode {bucket}/{object}: {e}"),
})
}
}
}
async fn heal_erasure_set(&self, buckets: Vec<String>, set_disk_id: String) -> Result<()> {
info!("Healing Erasure Set: {} ({} buckets)", set_disk_id, buckets.len());
// update progress
{
let mut progress = self.progress.write().await;
progress.set_current_object(Some(format!("erasure_set: {} ({} buckets)", set_disk_id, buckets.len())));
progress.update_progress(0, 4, 0, 0);
}
let buckets = if buckets.is_empty() {
info!("No buckets specified, listing all buckets");
let bucket_infos = self.await_with_control(self.storage.list_buckets()).await?;
bucket_infos.into_iter().map(|info| info.name).collect()
} else {
buckets
};
// Step 1: Perform disk format heal using ecstore
info!("Step 1: Performing disk format heal using ecstore");
let format_result = self.await_with_control(self.storage.heal_format(self.options.dry_run)).await;
match format_result {
Ok((result, error)) => {
if let Some(e) = error {
error!("Disk format heal failed: {} - {}", set_disk_id, e);
{
let mut progress = self.progress.write().await;
progress.update_progress(4, 4, 0, 0);
}
return Err(Error::TaskExecutionFailed {
message: format!("Failed to heal disk format for {set_disk_id}: {e}"),
});
}
info!(
"Disk format heal completed successfully: {} ({} drives)",
set_disk_id,
result.after.drives.len()
);
}
Err(Error::TaskCancelled) => return Err(Error::TaskCancelled),
Err(Error::TaskTimeout) => return Err(Error::TaskTimeout),
Err(e) => {
error!("Disk format heal failed: {} - {}", set_disk_id, e);
{
let mut progress = self.progress.write().await;
progress.update_progress(4, 4, 0, 0);
}
return Err(Error::TaskExecutionFailed {
message: format!("Failed to heal disk format for {set_disk_id}: {e}"),
});
}
}
{
let mut progress = self.progress.write().await;
progress.update_progress(1, 4, 0, 0);
}
// Step 2: Get disk for resume functionality
info!("Step 2: Getting disk for resume functionality");
let disk = self
.await_with_control(self.storage.get_disk_for_resume(&set_disk_id))
.await?;
{
let mut progress = self.progress.write().await;
progress.update_progress(2, 4, 0, 0);
}
// Step 3: Heal bucket structure
// Check control flags before each iteration to ensure timely cancellation.
// Each heal_bucket call may handle timeout/cancellation internally, see its implementation for details.
for bucket in buckets.iter() {
// Check control flags before starting each bucket heal
self.check_control_flags().await?;
// heal_bucket internally uses await_with_control for timeout/cancellation handling
if let Err(err) = self.heal_bucket(bucket).await {
// Check if error is due to cancellation or timeout
if matches!(err, Error::TaskCancelled | Error::TaskTimeout) {
return Err(err);
}
info!("Bucket heal failed: {}", err.to_string());
}
}
// Step 3: Create erasure set healer with resume support
info!("Step 3: Creating erasure set healer with resume support");
let erasure_healer = ErasureSetHealer::new(self.storage.clone(), self.progress.clone(), self.cancel_token.clone(), disk);
{
let mut progress = self.progress.write().await;
progress.update_progress(3, 4, 0, 0);
}
// Step 4: Execute erasure set heal with resume
info!("Step 4: Executing erasure set heal with resume");
let result = erasure_healer.heal_erasure_set(&buckets, &set_disk_id).await;
{
let mut progress = self.progress.write().await;
progress.update_progress(4, 4, 0, 0);
}
match result {
Ok(_) => {
info!("Erasure set heal completed successfully: {} ({} buckets)", set_disk_id, buckets.len());
Ok(())
}
Err(Error::TaskCancelled) => Err(Error::TaskCancelled),
Err(Error::TaskTimeout) => Err(Error::TaskTimeout),
Err(e) => {
error!("Erasure set heal failed: {} - {}", set_disk_id, e);
Err(Error::TaskExecutionFailed {
message: format!("Failed to heal erasure set {set_disk_id}: {e}"),
})
}
}
}
}
impl std::fmt::Debug for HealTask {
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
f.debug_struct("HealTask")
.field("id", &self.id)
.field("heal_type", &self.heal_type)
.field("options", &self.options)
.field("created_at", &self.created_at)
.finish()
}
}

View File

@@ -0,0 +1,110 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{Error, Result};
/// Prefix for pool index in set disk identifiers.
const POOL_PREFIX: &str = "pool";
/// Prefix for set index in set disk identifiers.
const SET_PREFIX: &str = "set";
/// Format a set disk identifier using unsigned indices.
pub fn format_set_disk_id(pool_idx: usize, set_idx: usize) -> String {
format!("{POOL_PREFIX}_{pool_idx}_{SET_PREFIX}_{set_idx}")
}
/// Format a set disk identifier from signed indices.
pub fn format_set_disk_id_from_i32(pool_idx: i32, set_idx: i32) -> Option<String> {
if pool_idx < 0 || set_idx < 0 {
None
} else {
Some(format_set_disk_id(pool_idx as usize, set_idx as usize))
}
}
/// Normalise external set disk identifiers into the canonical format.
pub fn normalize_set_disk_id(raw: &str) -> Option<String> {
if raw.starts_with(&format!("{POOL_PREFIX}_")) {
Some(raw.to_string())
} else {
parse_compact_set_disk_id(raw).map(|(pool, set)| format_set_disk_id(pool, set))
}
}
/// Parse a canonical set disk identifier into pool/set indices.
pub fn parse_set_disk_id(raw: &str) -> Result<(usize, usize)> {
let parts: Vec<&str> = raw.split('_').collect();
if parts.len() != 4 || parts[0] != POOL_PREFIX || parts[2] != SET_PREFIX {
return Err(Error::TaskExecutionFailed {
message: format!("Invalid set_disk_id format: {raw}"),
});
}
let pool_idx = parts[1].parse::<usize>().map_err(|_| Error::TaskExecutionFailed {
message: format!("Invalid pool index in set_disk_id: {raw}"),
})?;
let set_idx = parts[3].parse::<usize>().map_err(|_| Error::TaskExecutionFailed {
message: format!("Invalid set index in set_disk_id: {raw}"),
})?;
Ok((pool_idx, set_idx))
}
fn parse_compact_set_disk_id(raw: &str) -> Option<(usize, usize)> {
let (pool, set) = raw.split_once('_')?;
let pool_idx = pool.parse::<usize>().ok()?;
let set_idx = set.parse::<usize>().ok()?;
Some((pool_idx, set_idx))
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn format_from_unsigned_indices() {
assert_eq!(format_set_disk_id(1, 2), "pool_1_set_2");
}
#[test]
fn format_from_signed_indices() {
assert_eq!(format_set_disk_id_from_i32(3, 4), Some("pool_3_set_4".into()));
assert_eq!(format_set_disk_id_from_i32(-1, 4), None);
}
#[test]
fn normalize_compact_identifier() {
assert_eq!(normalize_set_disk_id("3_5"), Some("pool_3_set_5".to_string()));
}
#[test]
fn normalize_prefixed_identifier() {
assert_eq!(normalize_set_disk_id("pool_7_set_1"), Some("pool_7_set_1".to_string()));
}
#[test]
fn normalize_invalid_identifier() {
assert_eq!(normalize_set_disk_id("invalid"), None);
}
#[test]
fn parse_prefixed_identifier() {
assert_eq!(parse_set_disk_id("pool_9_set_3").unwrap(), (9, 3));
}
#[test]
fn parse_invalid_identifier() {
assert!(parse_set_disk_id("bad").is_err());
assert!(parse_set_disk_id("pool_X_set_1").is_err());
}
}

View File

@@ -12,17 +12,16 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::OnceLock;
use tokio_util::sync::CancellationToken;
pub mod error;
mod error;
pub mod heal;
pub mod scanner;
pub use error::{Error, Result};
pub use scanner::{
BucketTargetUsageInfo, BucketUsageInfo, DataUsageInfo, Scanner, ScannerMetrics, load_data_usage_from_backend,
store_data_usage_in_backend,
};
pub use heal::{HealManager, HealOptions, HealPriority, HealRequest, HealType, channel::HealChannelProcessor};
pub use scanner::Scanner;
use std::sync::{Arc, OnceLock};
use tokio_util::sync::CancellationToken;
use tracing::{error, info};
// Global cancellation token for AHM services (scanner and other background tasks)
static GLOBAL_AHM_SERVICES_CANCEL_TOKEN: OnceLock<CancellationToken> = OnceLock::new();
@@ -52,3 +51,61 @@ pub fn shutdown_ahm_services() {
cancel_token.cancel();
}
}
/// Global heal manager instance
static GLOBAL_HEAL_MANAGER: OnceLock<Arc<HealManager>> = OnceLock::new();
/// Global heal channel processor instance
static GLOBAL_HEAL_CHANNEL_PROCESSOR: OnceLock<Arc<tokio::sync::Mutex<HealChannelProcessor>>> = OnceLock::new();
/// Initialize and start heal manager with channel processor
pub async fn init_heal_manager(
storage: Arc<dyn heal::storage::HealStorageAPI>,
config: Option<heal::manager::HealConfig>,
) -> Result<Arc<HealManager>> {
// Create heal manager
let heal_manager = Arc::new(HealManager::new(storage, config));
// Start heal manager
heal_manager.start().await?;
// Store global instance
GLOBAL_HEAL_MANAGER
.set(heal_manager.clone())
.map_err(|_| Error::Config("Heal manager already initialized".to_string()))?;
// Initialize heal channel
let channel_receiver = rustfs_common::heal_channel::init_heal_channel();
// Create channel processor
let channel_processor = HealChannelProcessor::new(heal_manager.clone());
// Store channel processor instance first
GLOBAL_HEAL_CHANNEL_PROCESSOR
.set(Arc::new(tokio::sync::Mutex::new(channel_processor)))
.map_err(|_| Error::Config("Heal channel processor already initialized".to_string()))?;
// Start channel processor in background
let receiver = channel_receiver;
tokio::spawn(async move {
if let Some(processor_guard) = GLOBAL_HEAL_CHANNEL_PROCESSOR.get() {
let mut processor = processor_guard.lock().await;
if let Err(e) = processor.start(receiver).await {
error!("Heal channel processor failed: {}", e);
}
}
});
info!("Heal manager with channel processor initialized successfully");
Ok(heal_manager)
}
/// Get global heal manager instance
pub fn get_heal_manager() -> Option<&'static Arc<HealManager>> {
GLOBAL_HEAL_MANAGER.get()
}
/// Get global heal channel processor instance
pub fn get_heal_channel_processor() -> Option<&'static Arc<tokio::sync::Mutex<HealChannelProcessor>>> {
GLOBAL_HEAL_CHANNEL_PROCESSOR.get()
}

View File

@@ -0,0 +1,326 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::scanner::node_scanner::ScanProgress;
use crate::{Error, Result};
use serde::{Deserialize, Serialize};
use std::{
path::{Path, PathBuf},
time::{Duration, SystemTime},
};
use tokio::sync::RwLock;
use tracing::{debug, error, info, warn};
#[derive(Debug, Serialize, Deserialize, Clone)]
pub struct CheckpointData {
pub version: u32,
pub timestamp: SystemTime,
pub progress: ScanProgress,
pub node_id: String,
pub checksum: u64,
}
impl CheckpointData {
pub fn new(progress: ScanProgress, node_id: String) -> Self {
let mut checkpoint = Self {
version: 1,
timestamp: SystemTime::now(),
progress,
node_id,
checksum: 0,
};
checkpoint.checksum = checkpoint.calculate_checksum();
checkpoint
}
fn calculate_checksum(&self) -> u64 {
use std::collections::hash_map::DefaultHasher;
use std::hash::{Hash, Hasher};
let mut hasher = DefaultHasher::new();
self.version.hash(&mut hasher);
self.node_id.hash(&mut hasher);
self.progress.current_cycle.hash(&mut hasher);
self.progress.current_disk_index.hash(&mut hasher);
if let Some(ref bucket) = self.progress.current_bucket {
bucket.hash(&mut hasher);
}
if let Some(ref key) = self.progress.last_scan_key {
key.hash(&mut hasher);
}
hasher.finish()
}
pub fn verify_integrity(&self) -> bool {
let calculated_checksum = self.calculate_checksum();
self.checksum == calculated_checksum
}
}
pub struct CheckpointManager {
checkpoint_file: PathBuf,
backup_file: PathBuf,
temp_file: PathBuf,
save_interval: Duration,
last_save: RwLock<SystemTime>,
node_id: String,
}
impl CheckpointManager {
pub fn new(node_id: &str, data_dir: &Path) -> Self {
if !data_dir.exists() {
if let Err(e) = std::fs::create_dir_all(data_dir) {
error!("create data dir failed {:?}: {}", data_dir, e);
}
}
let checkpoint_file = data_dir.join(format!("scanner_checkpoint_{node_id}.json"));
let backup_file = data_dir.join(format!("scanner_checkpoint_{node_id}.backup"));
let temp_file = data_dir.join(format!("scanner_checkpoint_{node_id}.tmp"));
Self {
checkpoint_file,
backup_file,
temp_file,
save_interval: Duration::from_secs(30), // 30s
last_save: RwLock::new(SystemTime::UNIX_EPOCH),
node_id: node_id.to_string(),
}
}
pub async fn save_checkpoint(&self, progress: &ScanProgress) -> Result<()> {
let now = SystemTime::now();
let last_save = *self.last_save.read().await;
if now.duration_since(last_save).unwrap_or(Duration::ZERO) < self.save_interval {
return Ok(());
}
let checkpoint_data = CheckpointData::new(progress.clone(), self.node_id.clone());
let json_data = serde_json::to_string_pretty(&checkpoint_data)
.map_err(|e| Error::Serialization(format!("serialize checkpoint failed: {e}")))?;
tokio::fs::write(&self.temp_file, json_data)
.await
.map_err(|e| Error::IO(format!("write temp checkpoint file failed: {e}")))?;
if self.checkpoint_file.exists() {
tokio::fs::copy(&self.checkpoint_file, &self.backup_file)
.await
.map_err(|e| Error::IO(format!("backup checkpoint file failed: {e}")))?;
}
tokio::fs::rename(&self.temp_file, &self.checkpoint_file)
.await
.map_err(|e| Error::IO(format!("replace checkpoint file failed: {e}")))?;
*self.last_save.write().await = now;
debug!(
"save checkpoint to {:?}, cycle: {}, disk index: {}",
self.checkpoint_file, checkpoint_data.progress.current_cycle, checkpoint_data.progress.current_disk_index
);
Ok(())
}
pub async fn load_checkpoint(&self) -> Result<Option<ScanProgress>> {
// first try main checkpoint file
match self.load_checkpoint_from_file(&self.checkpoint_file).await {
Ok(checkpoint) => {
info!(
"restore scan progress from main checkpoint file: cycle={}, disk index={}, last scan key={:?}",
checkpoint.current_cycle, checkpoint.current_disk_index, checkpoint.last_scan_key
);
Ok(Some(checkpoint))
}
Err(e) => {
warn!("main checkpoint file is corrupted or not exists: {}", e);
// try backup file
match self.load_checkpoint_from_file(&self.backup_file).await {
Ok(checkpoint) => {
warn!(
"restore scan progress from backup file: cycle={}, disk index={}",
checkpoint.current_cycle, checkpoint.current_disk_index
);
// copy backup file to main checkpoint file
if let Err(copy_err) = tokio::fs::copy(&self.backup_file, &self.checkpoint_file).await {
warn!("restore main checkpoint file failed: {}", copy_err);
}
Ok(Some(checkpoint))
}
Err(backup_e) => {
warn!("backup file is corrupted or not exists: {}", backup_e);
info!("cannot restore scan progress, will start fresh scan");
Ok(None)
}
}
}
}
}
/// load checkpoint from file
async fn load_checkpoint_from_file(&self, file_path: &Path) -> Result<ScanProgress> {
if !file_path.exists() {
return Err(Error::NotFound(format!("checkpoint file not exists: {file_path:?}")));
}
// read file content
let content = tokio::fs::read_to_string(file_path)
.await
.map_err(|e| Error::IO(format!("read checkpoint file failed: {e}")))?;
// deserialize
let checkpoint_data: CheckpointData =
serde_json::from_str(&content).map_err(|e| Error::Serialization(format!("deserialize checkpoint failed: {e}")))?;
// validate checkpoint data
self.validate_checkpoint(&checkpoint_data)?;
Ok(checkpoint_data.progress)
}
/// validate checkpoint data
fn validate_checkpoint(&self, checkpoint: &CheckpointData) -> Result<()> {
// validate data integrity
if !checkpoint.verify_integrity() {
return Err(Error::InvalidCheckpoint(
"checkpoint data verification failed, may be corrupted".to_string(),
));
}
// validate node id match
if checkpoint.node_id != self.node_id {
return Err(Error::InvalidCheckpoint(format!(
"checkpoint node id not match: expected {}, actual {}",
self.node_id, checkpoint.node_id
)));
}
let now = SystemTime::now();
let checkpoint_age = now.duration_since(checkpoint.timestamp).unwrap_or(Duration::MAX);
// checkpoint is too old (more than 24 hours), may be data expired
if checkpoint_age > Duration::from_secs(24 * 3600) {
return Err(Error::InvalidCheckpoint(format!("checkpoint data is too old: {checkpoint_age:?}")));
}
// validate version compatibility
if checkpoint.version > 1 {
return Err(Error::InvalidCheckpoint(format!(
"unsupported checkpoint version: {}",
checkpoint.version
)));
}
Ok(())
}
/// clean checkpoint file
///
/// called when scanner stops or resets
pub async fn cleanup_checkpoint(&self) -> Result<()> {
// delete main file
if self.checkpoint_file.exists() {
tokio::fs::remove_file(&self.checkpoint_file)
.await
.map_err(|e| Error::IO(format!("delete main checkpoint file failed: {e}")))?;
}
// delete backup file
if self.backup_file.exists() {
tokio::fs::remove_file(&self.backup_file)
.await
.map_err(|e| Error::IO(format!("delete backup checkpoint file failed: {e}")))?;
}
// delete temp file
if self.temp_file.exists() {
tokio::fs::remove_file(&self.temp_file)
.await
.map_err(|e| Error::IO(format!("delete temp checkpoint file failed: {e}")))?;
}
info!("cleaned up all checkpoint files");
Ok(())
}
/// get checkpoint file info
pub async fn get_checkpoint_info(&self) -> Result<Option<CheckpointInfo>> {
if !self.checkpoint_file.exists() {
return Ok(None);
}
let metadata = tokio::fs::metadata(&self.checkpoint_file)
.await
.map_err(|e| Error::IO(format!("get checkpoint file metadata failed: {e}")))?;
let content = tokio::fs::read_to_string(&self.checkpoint_file)
.await
.map_err(|e| Error::IO(format!("read checkpoint file failed: {e}")))?;
let checkpoint_data: CheckpointData =
serde_json::from_str(&content).map_err(|e| Error::Serialization(format!("deserialize checkpoint failed: {e}")))?;
Ok(Some(CheckpointInfo {
file_size: metadata.len(),
last_modified: metadata.modified().unwrap_or(SystemTime::UNIX_EPOCH),
checkpoint_timestamp: checkpoint_data.timestamp,
current_cycle: checkpoint_data.progress.current_cycle,
current_disk_index: checkpoint_data.progress.current_disk_index,
completed_disks_count: checkpoint_data.progress.completed_disks.len(),
is_valid: checkpoint_data.verify_integrity(),
}))
}
/// force save checkpoint (ignore time interval limit)
pub async fn force_save_checkpoint(&self, progress: &ScanProgress) -> Result<()> {
// temporarily reset last save time, force save
*self.last_save.write().await = SystemTime::UNIX_EPOCH;
self.save_checkpoint(progress).await
}
/// set save interval
pub async fn set_save_interval(&mut self, interval: Duration) {
self.save_interval = interval;
info!("checkpoint save interval set to: {:?}", interval);
}
}
/// checkpoint info
#[derive(Debug, Clone)]
pub struct CheckpointInfo {
/// file size
pub file_size: u64,
/// file last modified time
pub last_modified: SystemTime,
/// checkpoint creation time
pub checkpoint_timestamp: SystemTime,
/// current scan cycle
pub current_cycle: u64,
/// current disk index
pub current_disk_index: usize,
/// completed disks count
pub completed_disks_count: usize,
/// checkpoint is valid
pub is_valid: bool,
}

File diff suppressed because it is too large Load Diff

View File

@@ -1,671 +0,0 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use std::{collections::HashMap, sync::Arc, time::SystemTime};
use rustfs_ecstore::{bucket::metadata_sys::get_replication_config, config::com::read_config, store::ECStore};
use rustfs_utils::path::SLASH_SEPARATOR;
use serde::{Deserialize, Serialize};
use tracing::{error, info, warn};
use crate::error::{Error, Result};
// Data usage storage constants
pub const DATA_USAGE_ROOT: &str = SLASH_SEPARATOR;
const DATA_USAGE_OBJ_NAME: &str = ".usage.json";
const DATA_USAGE_BLOOM_NAME: &str = ".bloomcycle.bin";
pub const DATA_USAGE_CACHE_NAME: &str = ".usage-cache.bin";
// Data usage storage paths
lazy_static::lazy_static! {
pub static ref DATA_USAGE_BUCKET: String = format!("{}{}{}",
rustfs_ecstore::disk::RUSTFS_META_BUCKET,
SLASH_SEPARATOR,
rustfs_ecstore::disk::BUCKET_META_PREFIX
);
pub static ref DATA_USAGE_OBJ_NAME_PATH: String = format!("{}{}{}",
rustfs_ecstore::disk::BUCKET_META_PREFIX,
SLASH_SEPARATOR,
DATA_USAGE_OBJ_NAME
);
pub static ref DATA_USAGE_BLOOM_NAME_PATH: String = format!("{}{}{}",
rustfs_ecstore::disk::BUCKET_META_PREFIX,
SLASH_SEPARATOR,
DATA_USAGE_BLOOM_NAME
);
}
/// Bucket target usage info provides replication statistics
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct BucketTargetUsageInfo {
pub replication_pending_size: u64,
pub replication_failed_size: u64,
pub replicated_size: u64,
pub replica_size: u64,
pub replication_pending_count: u64,
pub replication_failed_count: u64,
pub replicated_count: u64,
}
/// Bucket usage info provides bucket-level statistics
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct BucketUsageInfo {
pub size: u64,
// Following five fields suffixed with V1 are here for backward compatibility
// Total Size for objects that have not yet been replicated
pub replication_pending_size_v1: u64,
// Total size for objects that have witness one or more failures and will be retried
pub replication_failed_size_v1: u64,
// Total size for objects that have been replicated to destination
pub replicated_size_v1: u64,
// Total number of objects pending replication
pub replication_pending_count_v1: u64,
// Total number of objects that failed replication
pub replication_failed_count_v1: u64,
pub objects_count: u64,
pub object_size_histogram: HashMap<String, u64>,
pub object_versions_histogram: HashMap<String, u64>,
pub versions_count: u64,
pub delete_markers_count: u64,
pub replica_size: u64,
pub replica_count: u64,
pub replication_info: HashMap<String, BucketTargetUsageInfo>,
}
/// DataUsageInfo represents data usage stats of the underlying storage
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
pub struct DataUsageInfo {
/// Total capacity
pub total_capacity: u64,
/// Total used capacity
pub total_used_capacity: u64,
/// Total free capacity
pub total_free_capacity: u64,
/// LastUpdate is the timestamp of when the data usage info was last updated
pub last_update: Option<SystemTime>,
/// Objects total count across all buckets
pub objects_total_count: u64,
/// Versions total count across all buckets
pub versions_total_count: u64,
/// Delete markers total count across all buckets
pub delete_markers_total_count: u64,
/// Objects total size across all buckets
pub objects_total_size: u64,
/// Replication info across all buckets
pub replication_info: HashMap<String, BucketTargetUsageInfo>,
/// Total number of buckets in this cluster
pub buckets_count: u64,
/// Buckets usage info provides following information across all buckets
pub buckets_usage: HashMap<String, BucketUsageInfo>,
/// Deprecated kept here for backward compatibility reasons
pub bucket_sizes: HashMap<String, u64>,
}
/// Size summary for a single object or group of objects
#[derive(Debug, Default, Clone)]
pub struct SizeSummary {
/// Total size
pub total_size: usize,
/// Number of versions
pub versions: usize,
/// Number of delete markers
pub delete_markers: usize,
/// Replicated size
pub replicated_size: usize,
/// Replicated count
pub replicated_count: usize,
/// Pending size
pub pending_size: usize,
/// Failed size
pub failed_size: usize,
/// Replica size
pub replica_size: usize,
/// Replica count
pub replica_count: usize,
/// Pending count
pub pending_count: usize,
/// Failed count
pub failed_count: usize,
/// Replication target stats
pub repl_target_stats: HashMap<String, ReplTargetSizeSummary>,
}
/// Replication target size summary
#[derive(Debug, Default, Clone)]
pub struct ReplTargetSizeSummary {
/// Replicated size
pub replicated_size: usize,
/// Replicated count
pub replicated_count: usize,
/// Pending size
pub pending_size: usize,
/// Failed size
pub failed_size: usize,
/// Pending count
pub pending_count: usize,
/// Failed count
pub failed_count: usize,
}
impl DataUsageInfo {
/// Create a new DataUsageInfo
pub fn new() -> Self {
Self::default()
}
/// Add object metadata to data usage statistics
pub fn add_object(&mut self, object_path: &str, meta_object: &rustfs_filemeta::MetaObject) {
// This method is kept for backward compatibility
// For accurate version counting, use add_object_from_file_meta instead
let bucket_name = match self.extract_bucket_from_path(object_path) {
Ok(name) => name,
Err(_) => return,
};
// Update bucket statistics
if let Some(bucket_usage) = self.buckets_usage.get_mut(&bucket_name) {
bucket_usage.size += meta_object.size as u64;
bucket_usage.objects_count += 1;
bucket_usage.versions_count += 1; // Simplified: assume 1 version per object
// Update size histogram
let total_size = meta_object.size as u64;
let size_ranges = [
("0-1KB", 0, 1024),
("1KB-1MB", 1024, 1024 * 1024),
("1MB-10MB", 1024 * 1024, 10 * 1024 * 1024),
("10MB-100MB", 10 * 1024 * 1024, 100 * 1024 * 1024),
("100MB-1GB", 100 * 1024 * 1024, 1024 * 1024 * 1024),
("1GB+", 1024 * 1024 * 1024, u64::MAX),
];
for (range_name, min_size, max_size) in size_ranges {
if total_size >= min_size && total_size < max_size {
*bucket_usage.object_size_histogram.entry(range_name.to_string()).or_insert(0) += 1;
break;
}
}
// Update version histogram (simplified - count as single version)
*bucket_usage
.object_versions_histogram
.entry("SINGLE_VERSION".to_string())
.or_insert(0) += 1;
} else {
// Create new bucket usage
let mut bucket_usage = BucketUsageInfo {
size: meta_object.size as u64,
objects_count: 1,
versions_count: 1,
..Default::default()
};
bucket_usage.object_size_histogram.insert("0-1KB".to_string(), 1);
bucket_usage.object_versions_histogram.insert("SINGLE_VERSION".to_string(), 1);
self.buckets_usage.insert(bucket_name, bucket_usage);
}
// Update global statistics
self.objects_total_size += meta_object.size as u64;
self.objects_total_count += 1;
self.versions_total_count += 1;
}
/// Add object from FileMeta for accurate version counting
pub fn add_object_from_file_meta(&mut self, object_path: &str, file_meta: &rustfs_filemeta::FileMeta) {
let bucket_name = match self.extract_bucket_from_path(object_path) {
Ok(name) => name,
Err(_) => return,
};
// Calculate accurate statistics from all versions
let mut total_size = 0u64;
let mut versions_count = 0u64;
let mut delete_markers_count = 0u64;
let mut latest_object_size = 0u64;
// Process all versions to get accurate counts
for version in &file_meta.versions {
match rustfs_filemeta::FileMetaVersion::try_from(version.clone()) {
Ok(ver) => {
if let Some(obj) = ver.object {
total_size += obj.size as u64;
versions_count += 1;
latest_object_size = obj.size as u64; // Keep track of latest object size
} else if ver.delete_marker.is_some() {
delete_markers_count += 1;
}
}
Err(_) => {
// Skip invalid versions
continue;
}
}
}
// Update bucket statistics
if let Some(bucket_usage) = self.buckets_usage.get_mut(&bucket_name) {
bucket_usage.size += total_size;
bucket_usage.objects_count += 1;
bucket_usage.versions_count += versions_count;
bucket_usage.delete_markers_count += delete_markers_count;
// Update size histogram based on latest object size
let size_ranges = [
("0-1KB", 0, 1024),
("1KB-1MB", 1024, 1024 * 1024),
("1MB-10MB", 1024 * 1024, 10 * 1024 * 1024),
("10MB-100MB", 10 * 1024 * 1024, 100 * 1024 * 1024),
("100MB-1GB", 100 * 1024 * 1024, 1024 * 1024 * 1024),
("1GB+", 1024 * 1024 * 1024, u64::MAX),
];
for (range_name, min_size, max_size) in size_ranges {
if latest_object_size >= min_size && latest_object_size < max_size {
*bucket_usage.object_size_histogram.entry(range_name.to_string()).or_insert(0) += 1;
break;
}
}
// Update version histogram based on actual version count
let version_ranges = [
("1", 1, 1),
("2-5", 2, 5),
("6-10", 6, 10),
("11-50", 11, 50),
("51-100", 51, 100),
("100+", 101, usize::MAX),
];
for (range_name, min_versions, max_versions) in version_ranges {
if versions_count as usize >= min_versions && versions_count as usize <= max_versions {
*bucket_usage
.object_versions_histogram
.entry(range_name.to_string())
.or_insert(0) += 1;
break;
}
}
} else {
// Create new bucket usage
let mut bucket_usage = BucketUsageInfo {
size: total_size,
objects_count: 1,
versions_count,
delete_markers_count,
..Default::default()
};
// Set size histogram
let size_ranges = [
("0-1KB", 0, 1024),
("1KB-1MB", 1024, 1024 * 1024),
("1MB-10MB", 1024 * 1024, 10 * 1024 * 1024),
("10MB-100MB", 10 * 1024 * 1024, 100 * 1024 * 1024),
("100MB-1GB", 100 * 1024 * 1024, 1024 * 1024 * 1024),
("1GB+", 1024 * 1024 * 1024, u64::MAX),
];
for (range_name, min_size, max_size) in size_ranges {
if latest_object_size >= min_size && latest_object_size < max_size {
bucket_usage.object_size_histogram.insert(range_name.to_string(), 1);
break;
}
}
// Set version histogram
let version_ranges = [
("1", 1, 1),
("2-5", 2, 5),
("6-10", 6, 10),
("11-50", 11, 50),
("51-100", 51, 100),
("100+", 101, usize::MAX),
];
for (range_name, min_versions, max_versions) in version_ranges {
if versions_count as usize >= min_versions && versions_count as usize <= max_versions {
bucket_usage.object_versions_histogram.insert(range_name.to_string(), 1);
break;
}
}
self.buckets_usage.insert(bucket_name, bucket_usage);
// Update buckets count when adding new bucket
self.buckets_count = self.buckets_usage.len() as u64;
}
// Update global statistics
self.objects_total_size += total_size;
self.objects_total_count += 1;
self.versions_total_count += versions_count;
self.delete_markers_total_count += delete_markers_count;
}
/// Extract bucket name from object path
fn extract_bucket_from_path(&self, object_path: &str) -> Result<String> {
let parts: Vec<&str> = object_path.split('/').collect();
if parts.is_empty() {
return Err(Error::Scanner("Invalid object path: empty".to_string()));
}
Ok(parts[0].to_string())
}
/// Update capacity information
pub fn update_capacity(&mut self, total: u64, used: u64, free: u64) {
self.total_capacity = total;
self.total_used_capacity = used;
self.total_free_capacity = free;
self.last_update = Some(SystemTime::now());
}
/// Add bucket usage info
pub fn add_bucket_usage(&mut self, bucket: String, usage: BucketUsageInfo) {
self.buckets_usage.insert(bucket.clone(), usage);
self.buckets_count = self.buckets_usage.len() as u64;
self.last_update = Some(SystemTime::now());
}
/// Get bucket usage info
pub fn get_bucket_usage(&self, bucket: &str) -> Option<&BucketUsageInfo> {
self.buckets_usage.get(bucket)
}
/// Calculate total statistics from all buckets
pub fn calculate_totals(&mut self) {
self.objects_total_count = 0;
self.versions_total_count = 0;
self.delete_markers_total_count = 0;
self.objects_total_size = 0;
for usage in self.buckets_usage.values() {
self.objects_total_count += usage.objects_count;
self.versions_total_count += usage.versions_count;
self.delete_markers_total_count += usage.delete_markers_count;
self.objects_total_size += usage.size;
}
}
/// Merge another DataUsageInfo into this one
pub fn merge(&mut self, other: &DataUsageInfo) {
// Merge bucket usage
for (bucket, usage) in &other.buckets_usage {
if let Some(existing) = self.buckets_usage.get_mut(bucket) {
existing.merge(usage);
} else {
self.buckets_usage.insert(bucket.clone(), usage.clone());
}
}
// Recalculate totals
self.calculate_totals();
// Ensure buckets_count stays consistent with buckets_usage
self.buckets_count = self.buckets_usage.len() as u64;
// Update last update time
if let Some(other_update) = other.last_update {
if self.last_update.is_none() || other_update > self.last_update.unwrap() {
self.last_update = Some(other_update);
}
}
}
}
impl BucketUsageInfo {
/// Create a new BucketUsageInfo
pub fn new() -> Self {
Self::default()
}
/// Add size summary to this bucket usage
pub fn add_size_summary(&mut self, summary: &SizeSummary) {
self.size += summary.total_size as u64;
self.versions_count += summary.versions as u64;
self.delete_markers_count += summary.delete_markers as u64;
self.replica_size += summary.replica_size as u64;
self.replica_count += summary.replica_count as u64;
}
/// Merge another BucketUsageInfo into this one
pub fn merge(&mut self, other: &BucketUsageInfo) {
self.size += other.size;
self.objects_count += other.objects_count;
self.versions_count += other.versions_count;
self.delete_markers_count += other.delete_markers_count;
self.replica_size += other.replica_size;
self.replica_count += other.replica_count;
// Merge histograms
for (key, value) in &other.object_size_histogram {
*self.object_size_histogram.entry(key.clone()).or_insert(0) += value;
}
for (key, value) in &other.object_versions_histogram {
*self.object_versions_histogram.entry(key.clone()).or_insert(0) += value;
}
// Merge replication info
for (target, info) in &other.replication_info {
let entry = self.replication_info.entry(target.clone()).or_default();
entry.replicated_size += info.replicated_size;
entry.replica_size += info.replica_size;
entry.replication_pending_size += info.replication_pending_size;
entry.replication_failed_size += info.replication_failed_size;
entry.replication_pending_count += info.replication_pending_count;
entry.replication_failed_count += info.replication_failed_count;
entry.replicated_count += info.replicated_count;
}
// Merge backward compatibility fields
self.replication_pending_size_v1 += other.replication_pending_size_v1;
self.replication_failed_size_v1 += other.replication_failed_size_v1;
self.replicated_size_v1 += other.replicated_size_v1;
self.replication_pending_count_v1 += other.replication_pending_count_v1;
self.replication_failed_count_v1 += other.replication_failed_count_v1;
}
}
impl SizeSummary {
/// Create a new SizeSummary
pub fn new() -> Self {
Self::default()
}
/// Add another SizeSummary to this one
pub fn add(&mut self, other: &SizeSummary) {
self.total_size += other.total_size;
self.versions += other.versions;
self.delete_markers += other.delete_markers;
self.replicated_size += other.replicated_size;
self.replicated_count += other.replicated_count;
self.pending_size += other.pending_size;
self.failed_size += other.failed_size;
self.replica_size += other.replica_size;
self.replica_count += other.replica_count;
self.pending_count += other.pending_count;
self.failed_count += other.failed_count;
// Merge replication target stats
for (target, stats) in &other.repl_target_stats {
let entry = self.repl_target_stats.entry(target.clone()).or_default();
entry.replicated_size += stats.replicated_size;
entry.replicated_count += stats.replicated_count;
entry.pending_size += stats.pending_size;
entry.failed_size += stats.failed_size;
entry.pending_count += stats.pending_count;
entry.failed_count += stats.failed_count;
}
}
}
/// Store data usage info to backend storage
pub async fn store_data_usage_in_backend(data_usage_info: DataUsageInfo, store: Arc<ECStore>) -> Result<()> {
let data =
serde_json::to_vec(&data_usage_info).map_err(|e| Error::Config(format!("Failed to serialize data usage info: {e}")))?;
// Save to backend using the same mechanism as original code
rustfs_ecstore::config::com::save_config(store, &DATA_USAGE_OBJ_NAME_PATH, data)
.await
.map_err(Error::Storage)?;
Ok(())
}
/// Load data usage info from backend storage
pub async fn load_data_usage_from_backend(store: Arc<ECStore>) -> Result<DataUsageInfo> {
let buf = match read_config(store, &DATA_USAGE_OBJ_NAME_PATH).await {
Ok(data) => data,
Err(e) => {
error!("Failed to read data usage info from backend: {}", e);
if e == rustfs_ecstore::error::Error::ConfigNotFound {
return Ok(DataUsageInfo::default());
}
return Err(Error::Storage(e));
}
};
let mut data_usage_info: DataUsageInfo =
serde_json::from_slice(&buf).map_err(|e| Error::Config(format!("Failed to deserialize data usage info: {e}")))?;
warn!("Loaded data usage info from backend {:?}", &data_usage_info);
// Handle backward compatibility like original code
if data_usage_info.buckets_usage.is_empty() {
data_usage_info.buckets_usage = data_usage_info
.bucket_sizes
.iter()
.map(|(bucket, &size)| {
(
bucket.clone(),
BucketUsageInfo {
size,
..Default::default()
},
)
})
.collect();
}
if data_usage_info.bucket_sizes.is_empty() {
data_usage_info.bucket_sizes = data_usage_info
.buckets_usage
.iter()
.map(|(bucket, bui)| (bucket.clone(), bui.size))
.collect();
}
for (bucket, bui) in &data_usage_info.buckets_usage {
if bui.replicated_size_v1 > 0
|| bui.replication_failed_count_v1 > 0
|| bui.replication_failed_size_v1 > 0
|| bui.replication_pending_count_v1 > 0
{
if let Ok((cfg, _)) = get_replication_config(bucket).await {
if !cfg.role.is_empty() {
data_usage_info.replication_info.insert(
cfg.role.clone(),
BucketTargetUsageInfo {
replication_failed_size: bui.replication_failed_size_v1,
replication_failed_count: bui.replication_failed_count_v1,
replicated_size: bui.replicated_size_v1,
replication_pending_count: bui.replication_pending_count_v1,
replication_pending_size: bui.replication_pending_size_v1,
..Default::default()
},
);
}
}
}
}
Ok(data_usage_info)
}
/// Example function showing how to use AHM data usage functionality
/// This demonstrates the integration pattern for DataUsageInfoHandler
pub async fn example_data_usage_integration() -> Result<()> {
// Get the global storage instance
let Some(store) = rustfs_ecstore::new_object_layer_fn() else {
return Err(Error::Config("Storage not initialized".to_string()));
};
// Load data usage from backend (this replaces the original load_data_usage_from_backend)
let data_usage = load_data_usage_from_backend(store).await?;
info!(
"Loaded data usage info: {} buckets, {} total objects",
data_usage.buckets_count, data_usage.objects_total_count
);
// Example: Store updated data usage back to backend
// This would typically be called by the scanner after collecting new statistics
// store_data_usage_in_backend(data_usage, store).await?;
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
#[test]
fn test_data_usage_info_creation() {
let mut info = DataUsageInfo::new();
info.update_capacity(1000, 500, 500);
assert_eq!(info.total_capacity, 1000);
assert_eq!(info.total_used_capacity, 500);
assert_eq!(info.total_free_capacity, 500);
assert!(info.last_update.is_some());
}
#[test]
fn test_bucket_usage_info_merge() {
let mut usage1 = BucketUsageInfo::new();
usage1.size = 100;
usage1.objects_count = 10;
usage1.versions_count = 5;
let mut usage2 = BucketUsageInfo::new();
usage2.size = 200;
usage2.objects_count = 20;
usage2.versions_count = 10;
usage1.merge(&usage2);
assert_eq!(usage1.size, 300);
assert_eq!(usage1.objects_count, 30);
assert_eq!(usage1.versions_count, 15);
}
#[test]
fn test_size_summary_add() {
let mut summary1 = SizeSummary::new();
summary1.total_size = 100;
summary1.versions = 5;
let mut summary2 = SizeSummary::new();
summary2.total_size = 200;
summary2.versions = 10;
summary1.add(&summary2);
assert_eq!(summary1.total_size, 300);
assert_eq!(summary1.versions, 15);
}
}

View File

@@ -12,197 +12,257 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::collections::HashMap;
use serde::{Deserialize, Serialize};
use std::{
collections::HashMap,
sync::atomic::{AtomicU64, Ordering},
time::{Duration, SystemTime},
};
use tracing::info;
/// Size interval for object size histogram
#[derive(Debug, Clone)]
pub struct SizeInterval {
pub start: u64,
pub end: u64,
pub name: &'static str,
/// Scanner metrics
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct ScannerMetrics {
/// Total objects scanned since server start
pub objects_scanned: u64,
/// Total object versions scanned since server start
pub versions_scanned: u64,
/// Total directories scanned since server start
pub directories_scanned: u64,
/// Total bucket scans started since server start
pub bucket_scans_started: u64,
/// Total bucket scans finished since server start
pub bucket_scans_finished: u64,
/// Total objects with health issues found
pub objects_with_issues: u64,
/// Total heal tasks queued
pub heal_tasks_queued: u64,
/// Total heal tasks completed
pub heal_tasks_completed: u64,
/// Total heal tasks failed
pub heal_tasks_failed: u64,
/// Total healthy objects found
pub healthy_objects: u64,
/// Total corrupted objects found
pub corrupted_objects: u64,
/// Last scan activity time
pub last_activity: Option<SystemTime>,
/// Current scan cycle
pub current_cycle: u64,
/// Total scan cycles completed
pub total_cycles: u64,
/// Current scan duration
pub current_scan_duration: Option<Duration>,
/// Average scan duration
pub avg_scan_duration: Duration,
/// Objects scanned per second
pub objects_per_second: f64,
/// Buckets scanned per second
pub buckets_per_second: f64,
/// Storage metrics by bucket
pub bucket_metrics: HashMap<String, BucketMetrics>,
/// Disk metrics
pub disk_metrics: HashMap<String, DiskMetrics>,
}
/// Version interval for object versions histogram
#[derive(Debug, Clone)]
pub struct VersionInterval {
pub start: u64,
pub end: u64,
pub name: &'static str,
/// Bucket-specific metrics
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct BucketMetrics {
/// Bucket name
pub bucket: String,
/// Total objects in bucket
pub total_objects: u64,
/// Total size of objects in bucket (bytes)
pub total_size: u64,
/// Objects with health issues
pub objects_with_issues: u64,
/// Last scan time
pub last_scan_time: Option<SystemTime>,
/// Scan duration
pub scan_duration: Option<Duration>,
/// Heal tasks queued for this bucket
pub heal_tasks_queued: u64,
/// Heal tasks completed for this bucket
pub heal_tasks_completed: u64,
/// Heal tasks failed for this bucket
pub heal_tasks_failed: u64,
}
/// Object size histogram intervals
pub const OBJECTS_HISTOGRAM_INTERVALS: &[SizeInterval] = &[
SizeInterval {
start: 0,
end: 1024 - 1,
name: "LESS_THAN_1_KiB",
},
SizeInterval {
start: 1024,
end: 1024 * 1024 - 1,
name: "1_KiB_TO_1_MiB",
},
SizeInterval {
start: 1024 * 1024,
end: 10 * 1024 * 1024 - 1,
name: "1_MiB_TO_10_MiB",
},
SizeInterval {
start: 10 * 1024 * 1024,
end: 64 * 1024 * 1024 - 1,
name: "10_MiB_TO_64_MiB",
},
SizeInterval {
start: 64 * 1024 * 1024,
end: 128 * 1024 * 1024 - 1,
name: "64_MiB_TO_128_MiB",
},
SizeInterval {
start: 128 * 1024 * 1024,
end: 512 * 1024 * 1024 - 1,
name: "128_MiB_TO_512_MiB",
},
SizeInterval {
start: 512 * 1024 * 1024,
end: u64::MAX,
name: "MORE_THAN_512_MiB",
},
];
/// Object version count histogram intervals
pub const OBJECTS_VERSION_COUNT_INTERVALS: &[VersionInterval] = &[
VersionInterval {
start: 1,
end: 1,
name: "1_VERSION",
},
VersionInterval {
start: 2,
end: 10,
name: "2_TO_10_VERSIONS",
},
VersionInterval {
start: 11,
end: 100,
name: "11_TO_100_VERSIONS",
},
VersionInterval {
start: 101,
end: 1000,
name: "101_TO_1000_VERSIONS",
},
VersionInterval {
start: 1001,
end: u64::MAX,
name: "MORE_THAN_1000_VERSIONS",
},
];
/// Size histogram for object size distribution
#[derive(Debug, Clone, Default)]
pub struct SizeHistogram {
counts: Vec<u64>,
/// Disk-specific metrics
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
pub struct DiskMetrics {
/// Disk path
pub disk_path: String,
/// Total disk space (bytes)
pub total_space: u64,
/// Used disk space (bytes)
pub used_space: u64,
/// Free disk space (bytes)
pub free_space: u64,
/// Objects scanned on this disk
pub objects_scanned: u64,
/// Objects with issues on this disk
pub objects_with_issues: u64,
/// Last scan time
pub last_scan_time: Option<SystemTime>,
/// Whether disk is online
pub is_online: bool,
/// Whether disk is being scanned
pub is_scanning: bool,
}
/// Versions histogram for object version count distribution
#[derive(Debug, Clone, Default)]
pub struct VersionsHistogram {
counts: Vec<u64>,
/// Thread-safe metrics collector
pub struct MetricsCollector {
/// Atomic counters for real-time metrics
objects_scanned: AtomicU64,
versions_scanned: AtomicU64,
directories_scanned: AtomicU64,
bucket_scans_started: AtomicU64,
bucket_scans_finished: AtomicU64,
objects_with_issues: AtomicU64,
heal_tasks_queued: AtomicU64,
heal_tasks_completed: AtomicU64,
heal_tasks_failed: AtomicU64,
current_cycle: AtomicU64,
total_cycles: AtomicU64,
healthy_objects: AtomicU64,
corrupted_objects: AtomicU64,
}
impl SizeHistogram {
/// Create a new size histogram
impl MetricsCollector {
/// Create a new metrics collector
pub fn new() -> Self {
Self {
counts: vec![0; OBJECTS_HISTOGRAM_INTERVALS.len()],
objects_scanned: AtomicU64::new(0),
versions_scanned: AtomicU64::new(0),
directories_scanned: AtomicU64::new(0),
bucket_scans_started: AtomicU64::new(0),
bucket_scans_finished: AtomicU64::new(0),
objects_with_issues: AtomicU64::new(0),
heal_tasks_queued: AtomicU64::new(0),
heal_tasks_completed: AtomicU64::new(0),
heal_tasks_failed: AtomicU64::new(0),
current_cycle: AtomicU64::new(0),
total_cycles: AtomicU64::new(0),
healthy_objects: AtomicU64::new(0),
corrupted_objects: AtomicU64::new(0),
}
}
/// Add a size to the histogram
pub fn add(&mut self, size: u64) {
for (idx, interval) in OBJECTS_HISTOGRAM_INTERVALS.iter().enumerate() {
if size >= interval.start && size <= interval.end {
self.counts[idx] += 1;
break;
}
/// Increment objects scanned count
pub fn increment_objects_scanned(&self, count: u64) {
self.objects_scanned.fetch_add(count, Ordering::Relaxed);
}
/// Increment versions scanned count
pub fn increment_versions_scanned(&self, count: u64) {
self.versions_scanned.fetch_add(count, Ordering::Relaxed);
}
/// Increment directories scanned count
pub fn increment_directories_scanned(&self, count: u64) {
self.directories_scanned.fetch_add(count, Ordering::Relaxed);
}
/// Increment bucket scans started count
pub fn increment_bucket_scans_started(&self, count: u64) {
self.bucket_scans_started.fetch_add(count, Ordering::Relaxed);
}
/// Increment bucket scans finished count
pub fn increment_bucket_scans_finished(&self, count: u64) {
self.bucket_scans_finished.fetch_add(count, Ordering::Relaxed);
}
/// Increment objects with issues count
pub fn increment_objects_with_issues(&self, count: u64) {
self.objects_with_issues.fetch_add(count, Ordering::Relaxed);
}
/// Increment heal tasks queued count
pub fn increment_heal_tasks_queued(&self, count: u64) {
self.heal_tasks_queued.fetch_add(count, Ordering::Relaxed);
}
/// Increment heal tasks completed count
pub fn increment_heal_tasks_completed(&self, count: u64) {
self.heal_tasks_completed.fetch_add(count, Ordering::Relaxed);
}
/// Increment heal tasks failed count
pub fn increment_heal_tasks_failed(&self, count: u64) {
self.heal_tasks_failed.fetch_add(count, Ordering::Relaxed);
}
/// Set current cycle
pub fn set_current_cycle(&self, cycle: u64) {
self.current_cycle.store(cycle, Ordering::Relaxed);
}
/// Increment total cycles
pub fn increment_total_cycles(&self) {
self.total_cycles.fetch_add(1, Ordering::Relaxed);
}
/// Increment healthy objects count
pub fn increment_healthy_objects(&self) {
self.healthy_objects.fetch_add(1, Ordering::Relaxed);
}
/// Increment corrupted objects count
pub fn increment_corrupted_objects(&self) {
self.corrupted_objects.fetch_add(1, Ordering::Relaxed);
}
/// Get current metrics snapshot
pub fn get_metrics(&self) -> ScannerMetrics {
ScannerMetrics {
objects_scanned: self.objects_scanned.load(Ordering::Relaxed),
versions_scanned: self.versions_scanned.load(Ordering::Relaxed),
directories_scanned: self.directories_scanned.load(Ordering::Relaxed),
bucket_scans_started: self.bucket_scans_started.load(Ordering::Relaxed),
bucket_scans_finished: self.bucket_scans_finished.load(Ordering::Relaxed),
objects_with_issues: self.objects_with_issues.load(Ordering::Relaxed),
heal_tasks_queued: self.heal_tasks_queued.load(Ordering::Relaxed),
heal_tasks_completed: self.heal_tasks_completed.load(Ordering::Relaxed),
heal_tasks_failed: self.heal_tasks_failed.load(Ordering::Relaxed),
healthy_objects: self.healthy_objects.load(Ordering::Relaxed),
corrupted_objects: self.corrupted_objects.load(Ordering::Relaxed),
last_activity: Some(SystemTime::now()),
current_cycle: self.current_cycle.load(Ordering::Relaxed),
total_cycles: self.total_cycles.load(Ordering::Relaxed),
current_scan_duration: None, // Will be set by scanner
avg_scan_duration: Duration::ZERO, // Will be calculated
objects_per_second: 0.0, // Will be calculated
buckets_per_second: 0.0, // Will be calculated
bucket_metrics: HashMap::new(), // Will be populated by scanner
disk_metrics: HashMap::new(), // Will be populated by scanner
}
}
/// Get the histogram as a map
pub fn to_map(&self) -> HashMap<String, u64> {
let mut result = HashMap::new();
for (idx, count) in self.counts.iter().enumerate() {
let interval = &OBJECTS_HISTOGRAM_INTERVALS[idx];
result.insert(interval.name.to_string(), *count);
}
result
}
/// Reset all metrics
pub fn reset(&self) {
self.objects_scanned.store(0, Ordering::Relaxed);
self.versions_scanned.store(0, Ordering::Relaxed);
self.directories_scanned.store(0, Ordering::Relaxed);
self.bucket_scans_started.store(0, Ordering::Relaxed);
self.bucket_scans_finished.store(0, Ordering::Relaxed);
self.objects_with_issues.store(0, Ordering::Relaxed);
self.heal_tasks_queued.store(0, Ordering::Relaxed);
self.heal_tasks_completed.store(0, Ordering::Relaxed);
self.heal_tasks_failed.store(0, Ordering::Relaxed);
self.current_cycle.store(0, Ordering::Relaxed);
self.total_cycles.store(0, Ordering::Relaxed);
self.healthy_objects.store(0, Ordering::Relaxed);
self.corrupted_objects.store(0, Ordering::Relaxed);
/// Merge another histogram into this one
pub fn merge(&mut self, other: &SizeHistogram) {
for (idx, count) in other.counts.iter().enumerate() {
self.counts[idx] += count;
}
}
/// Get total count
pub fn total_count(&self) -> u64 {
self.counts.iter().sum()
}
/// Reset the histogram
pub fn reset(&mut self) {
for count in &mut self.counts {
*count = 0;
}
info!("Scanner metrics reset");
}
}
impl VersionsHistogram {
/// Create a new versions histogram
pub fn new() -> Self {
Self {
counts: vec![0; OBJECTS_VERSION_COUNT_INTERVALS.len()],
}
}
/// Add a version count to the histogram
pub fn add(&mut self, versions: u64) {
for (idx, interval) in OBJECTS_VERSION_COUNT_INTERVALS.iter().enumerate() {
if versions >= interval.start && versions <= interval.end {
self.counts[idx] += 1;
break;
}
}
}
/// Get the histogram as a map
pub fn to_map(&self) -> HashMap<String, u64> {
let mut result = HashMap::new();
for (idx, count) in self.counts.iter().enumerate() {
let interval = &OBJECTS_VERSION_COUNT_INTERVALS[idx];
result.insert(interval.name.to_string(), *count);
}
result
}
/// Merge another histogram into this one
pub fn merge(&mut self, other: &VersionsHistogram) {
for (idx, count) in other.counts.iter().enumerate() {
self.counts[idx] += count;
}
}
/// Get total count
pub fn total_count(&self) -> u64 {
self.counts.iter().sum()
}
/// Reset the histogram
pub fn reset(&mut self) {
for count in &mut self.counts {
*count = 0;
}
impl Default for MetricsCollector {
fn default() -> Self {
Self::new()
}
}
@@ -211,67 +271,35 @@ mod tests {
use super::*;
#[test]
fn test_size_histogram() {
let mut histogram = SizeHistogram::new();
// Add some sizes
histogram.add(512); // LESS_THAN_1_KiB
histogram.add(1024); // 1_KiB_TO_1_MiB
histogram.add(1024 * 1024); // 1_MiB_TO_10_MiB
histogram.add(5 * 1024 * 1024); // 1_MiB_TO_10_MiB
let map = histogram.to_map();
assert_eq!(map.get("LESS_THAN_1_KiB"), Some(&1));
assert_eq!(map.get("1_KiB_TO_1_MiB"), Some(&1));
assert_eq!(map.get("1_MiB_TO_10_MiB"), Some(&2));
assert_eq!(map.get("10_MiB_TO_64_MiB"), Some(&0));
fn test_metrics_collector_creation() {
let collector = MetricsCollector::new();
let metrics = collector.get_metrics();
assert_eq!(metrics.objects_scanned, 0);
assert_eq!(metrics.versions_scanned, 0);
}
#[test]
fn test_versions_histogram() {
let mut histogram = VersionsHistogram::new();
fn test_metrics_increment() {
let collector = MetricsCollector::new();
// Add some version counts
histogram.add(1); // 1_VERSION
histogram.add(5); // 2_TO_10_VERSIONS
histogram.add(50); // 11_TO_100_VERSIONS
histogram.add(500); // 101_TO_1000_VERSIONS
collector.increment_objects_scanned(10);
collector.increment_versions_scanned(5);
collector.increment_objects_with_issues(2);
let map = histogram.to_map();
assert_eq!(map.get("1_VERSION"), Some(&1));
assert_eq!(map.get("2_TO_10_VERSIONS"), Some(&1));
assert_eq!(map.get("11_TO_100_VERSIONS"), Some(&1));
assert_eq!(map.get("101_TO_1000_VERSIONS"), Some(&1));
let metrics = collector.get_metrics();
assert_eq!(metrics.objects_scanned, 10);
assert_eq!(metrics.versions_scanned, 5);
assert_eq!(metrics.objects_with_issues, 2);
}
#[test]
fn test_histogram_merge() {
let mut histogram1 = SizeHistogram::new();
histogram1.add(1024);
histogram1.add(1024 * 1024);
fn test_metrics_reset() {
let collector = MetricsCollector::new();
let mut histogram2 = SizeHistogram::new();
histogram2.add(1024);
histogram2.add(5 * 1024 * 1024);
collector.increment_objects_scanned(10);
collector.reset();
histogram1.merge(&histogram2);
let map = histogram1.to_map();
assert_eq!(map.get("1_KiB_TO_1_MiB"), Some(&2)); // 1 from histogram1 + 1 from histogram2
assert_eq!(map.get("1_MiB_TO_10_MiB"), Some(&2)); // 1 from histogram1 + 1 from histogram2
}
#[test]
fn test_histogram_reset() {
let mut histogram = SizeHistogram::new();
histogram.add(1024);
histogram.add(1024 * 1024);
assert_eq!(histogram.total_count(), 2);
histogram.reset();
assert_eq!(histogram.total_count(), 0);
let metrics = collector.get_metrics();
assert_eq!(metrics.objects_scanned, 0);
}
}

View File

@@ -0,0 +1,555 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::Result;
use crate::scanner::LoadLevel;
use serde::{Deserialize, Serialize};
use std::{
collections::VecDeque,
sync::{
Arc,
atomic::{AtomicU64, Ordering},
},
time::{Duration, SystemTime},
};
use tokio::sync::RwLock;
use tokio_util::sync::CancellationToken;
use tracing::{debug, error, info, warn};
/// IO monitor config
#[derive(Debug, Clone)]
pub struct IOMonitorConfig {
/// monitor interval
pub monitor_interval: Duration,
/// history data retention time
pub history_retention: Duration,
/// load evaluation window size
pub load_window_size: usize,
/// whether to enable actual system monitoring
pub enable_system_monitoring: bool,
/// disk path list (for monitoring specific disks)
pub disk_paths: Vec<String>,
}
impl Default for IOMonitorConfig {
fn default() -> Self {
Self {
monitor_interval: Duration::from_secs(1), // 1 second monitor interval
history_retention: Duration::from_secs(300), // keep 5 minutes history
load_window_size: 30, // 30 sample points sliding window
enable_system_monitoring: false, // default use simulated data
disk_paths: Vec::new(),
}
}
}
/// IO monitor metrics
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct IOMetrics {
/// timestamp
pub timestamp: SystemTime,
/// disk IOPS (read + write)
pub iops: u64,
/// read IOPS
pub read_iops: u64,
/// write IOPS
pub write_iops: u64,
/// disk queue depth
pub queue_depth: u64,
/// average latency (milliseconds)
pub avg_latency: u64,
/// read latency (milliseconds)
pub read_latency: u64,
/// write latency (milliseconds)
pub write_latency: u64,
/// CPU usage (0-100)
pub cpu_usage: u8,
/// memory usage (0-100)
pub memory_usage: u8,
/// disk usage (0-100)
pub disk_utilization: u8,
/// network IO (Mbps)
pub network_io: u64,
}
impl Default for IOMetrics {
fn default() -> Self {
Self {
timestamp: SystemTime::now(),
iops: 0,
read_iops: 0,
write_iops: 0,
queue_depth: 0,
avg_latency: 0,
read_latency: 0,
write_latency: 0,
cpu_usage: 0,
memory_usage: 0,
disk_utilization: 0,
network_io: 0,
}
}
}
/// load level stats
#[derive(Debug, Clone, Default)]
pub struct LoadLevelStats {
/// low load duration (seconds)
pub low_load_duration: u64,
/// medium load duration (seconds)
pub medium_load_duration: u64,
/// high load duration (seconds)
pub high_load_duration: u64,
/// critical load duration (seconds)
pub critical_load_duration: u64,
/// load transitions
pub load_transitions: u64,
}
/// advanced IO monitor
pub struct AdvancedIOMonitor {
/// config
config: Arc<RwLock<IOMonitorConfig>>,
/// current metrics
current_metrics: Arc<RwLock<IOMetrics>>,
/// history metrics (sliding window)
history_metrics: Arc<RwLock<VecDeque<IOMetrics>>>,
/// current load level
current_load_level: Arc<RwLock<LoadLevel>>,
/// load level history
load_level_history: Arc<RwLock<VecDeque<(SystemTime, LoadLevel)>>>,
/// load level stats
load_stats: Arc<RwLock<LoadLevelStats>>,
/// business IO metrics (updated by external)
business_metrics: Arc<BusinessIOMetrics>,
/// cancel token
cancel_token: CancellationToken,
}
/// business IO metrics
pub struct BusinessIOMetrics {
/// business request latency (milliseconds)
pub request_latency: AtomicU64,
/// business request QPS
pub request_qps: AtomicU64,
/// business error rate (0-10000, 0.00%-100.00%)
pub error_rate: AtomicU64,
/// active connections
pub active_connections: AtomicU64,
/// last update time
pub last_update: Arc<RwLock<SystemTime>>,
}
impl Default for BusinessIOMetrics {
fn default() -> Self {
Self {
request_latency: AtomicU64::new(0),
request_qps: AtomicU64::new(0),
error_rate: AtomicU64::new(0),
active_connections: AtomicU64::new(0),
last_update: Arc::new(RwLock::new(SystemTime::UNIX_EPOCH)),
}
}
}
impl AdvancedIOMonitor {
/// create new advanced IO monitor
pub fn new(config: IOMonitorConfig) -> Self {
Self {
config: Arc::new(RwLock::new(config)),
current_metrics: Arc::new(RwLock::new(IOMetrics::default())),
history_metrics: Arc::new(RwLock::new(VecDeque::new())),
current_load_level: Arc::new(RwLock::new(LoadLevel::Low)),
load_level_history: Arc::new(RwLock::new(VecDeque::new())),
load_stats: Arc::new(RwLock::new(LoadLevelStats::default())),
business_metrics: Arc::new(BusinessIOMetrics::default()),
cancel_token: CancellationToken::new(),
}
}
/// start monitoring
pub async fn start(&self) -> Result<()> {
info!("start advanced IO monitor");
let monitor = self.clone_for_background();
tokio::spawn(async move {
if let Err(e) = monitor.monitoring_loop().await {
error!("IO monitoring loop failed: {}", e);
}
});
Ok(())
}
/// stop monitoring
pub async fn stop(&self) {
info!("stop IO monitor");
self.cancel_token.cancel();
}
/// monitoring loop
async fn monitoring_loop(&self) -> Result<()> {
let mut interval = {
let config = self.config.read().await;
tokio::time::interval(config.monitor_interval)
};
let mut last_load_level = LoadLevel::Low;
let mut load_level_start_time = SystemTime::now();
loop {
tokio::select! {
_ = self.cancel_token.cancelled() => {
info!("IO monitoring loop cancelled");
break;
}
_ = interval.tick() => {
// collect system metrics
let metrics = self.collect_system_metrics().await;
// update current metrics
*self.current_metrics.write().await = metrics.clone();
// update history metrics
self.update_metrics_history(metrics.clone()).await;
// calculate load level
let new_load_level = self.calculate_load_level(&metrics).await;
// check if load level changed
if new_load_level != last_load_level {
self.handle_load_level_change(last_load_level, new_load_level, load_level_start_time).await;
last_load_level = new_load_level;
load_level_start_time = SystemTime::now();
}
// update current load level
*self.current_load_level.write().await = new_load_level;
debug!("IO monitor updated: IOPS={}, queue depth={}, latency={}ms, load level={:?}",
metrics.iops, metrics.queue_depth, metrics.avg_latency, new_load_level);
}
}
}
Ok(())
}
/// collect system metrics
async fn collect_system_metrics(&self) -> IOMetrics {
let config = self.config.read().await;
if config.enable_system_monitoring {
// actual system monitoring implementation
self.collect_real_system_metrics().await
} else {
// simulated data
self.generate_simulated_metrics().await
}
}
/// collect real system metrics (need to be implemented according to specific system)
async fn collect_real_system_metrics(&self) -> IOMetrics {
// TODO: implement actual system metrics collection
// can use procfs, sysfs or other system API
let metrics = IOMetrics {
timestamp: SystemTime::now(),
..Default::default()
};
// example: read /proc/diskstats
if let Ok(diskstats) = tokio::fs::read_to_string("/proc/diskstats").await {
// parse disk stats info
// here need to implement specific parsing logic
debug!("read disk stats info: {} bytes", diskstats.len());
}
// example: read /proc/stat to get CPU info
if let Ok(stat) = tokio::fs::read_to_string("/proc/stat").await {
// parse CPU stats info
debug!("read CPU stats info: {} bytes", stat.len());
}
// example: read /proc/meminfo to get memory info
if let Ok(meminfo) = tokio::fs::read_to_string("/proc/meminfo").await {
// parse memory stats info
debug!("read memory stats info: {} bytes", meminfo.len());
}
metrics
}
/// generate simulated metrics (for testing and development)
async fn generate_simulated_metrics(&self) -> IOMetrics {
use rand::Rng;
let mut rng = rand::rng();
// get business metrics impact
let business_latency = self.business_metrics.request_latency.load(Ordering::Relaxed);
let business_qps = self.business_metrics.request_qps.load(Ordering::Relaxed);
// generate simulated system metrics based on business load
let base_iops = 100 + (business_qps / 10);
let base_latency = 5 + (business_latency / 10);
IOMetrics {
timestamp: SystemTime::now(),
iops: base_iops + rng.random_range(0..50),
read_iops: (base_iops * 6 / 10) + rng.random_range(0..20),
write_iops: (base_iops * 4 / 10) + rng.random_range(0..20),
queue_depth: rng.random_range(1..20),
avg_latency: base_latency + rng.random_range(0..10),
read_latency: base_latency + rng.random_range(0..5),
write_latency: base_latency + rng.random_range(0..15),
cpu_usage: rng.random_range(10..70),
memory_usage: rng.random_range(30..80),
disk_utilization: rng.random_range(20..90),
network_io: rng.random_range(10..1000),
}
}
/// update metrics history
async fn update_metrics_history(&self, metrics: IOMetrics) {
let mut history = self.history_metrics.write().await;
let config = self.config.read().await;
// add new metrics
history.push_back(metrics);
// clean expired data
let retention_cutoff = SystemTime::now() - config.history_retention;
while let Some(front) = history.front() {
if front.timestamp < retention_cutoff {
history.pop_front();
} else {
break;
}
}
// limit window size
while history.len() > config.load_window_size {
history.pop_front();
}
}
/// calculate load level
async fn calculate_load_level(&self, metrics: &IOMetrics) -> LoadLevel {
// multi-dimensional load evaluation algorithm
let mut load_score = 0u32;
// IOPS load evaluation (weight: 25%)
let iops_score = match metrics.iops {
0..=200 => 0,
201..=500 => 15,
501..=1000 => 25,
_ => 35,
};
load_score += iops_score;
// latency load evaluation (weight: 30%)
let latency_score = match metrics.avg_latency {
0..=10 => 0,
11..=50 => 20,
51..=100 => 30,
_ => 40,
};
load_score += latency_score;
// queue depth evaluation (weight: 20%)
let queue_score = match metrics.queue_depth {
0..=5 => 0,
6..=15 => 10,
16..=30 => 20,
_ => 25,
};
load_score += queue_score;
// CPU usage evaluation (weight: 15%)
let cpu_score = match metrics.cpu_usage {
0..=30 => 0,
31..=60 => 8,
61..=80 => 12,
_ => 15,
};
load_score += cpu_score;
// disk usage evaluation (weight: 10%)
let disk_score = match metrics.disk_utilization {
0..=50 => 0,
51..=75 => 5,
76..=90 => 8,
_ => 10,
};
load_score += disk_score;
// business metrics impact
let business_latency = self.business_metrics.request_latency.load(Ordering::Relaxed);
let business_error_rate = self.business_metrics.error_rate.load(Ordering::Relaxed);
if business_latency > 100 {
load_score += 20; // business latency too high
}
if business_error_rate > 100 {
// > 1%
load_score += 15; // business error rate too high
}
// history trend analysis
let trend_score = self.calculate_trend_score().await;
load_score += trend_score;
// determine load level based on total score
match load_score {
0..=30 => LoadLevel::Low,
31..=60 => LoadLevel::Medium,
61..=90 => LoadLevel::High,
_ => LoadLevel::Critical,
}
}
/// calculate trend score
async fn calculate_trend_score(&self) -> u32 {
let history = self.history_metrics.read().await;
if history.len() < 5 {
return 0; // data insufficient, cannot analyze trend
}
// analyze trend of last 5 samples
let recent: Vec<_> = history.iter().rev().take(5).collect();
// check IOPS rising trend
let mut iops_trend = 0;
for i in 1..recent.len() {
if recent[i - 1].iops > recent[i].iops {
iops_trend += 1;
}
}
// check latency rising trend
let mut latency_trend = 0;
for i in 1..recent.len() {
if recent[i - 1].avg_latency > recent[i].avg_latency {
latency_trend += 1;
}
}
// if IOPS and latency are both rising, increase load score
if iops_trend >= 3 && latency_trend >= 3 {
15 // obvious rising trend
} else if iops_trend >= 2 || latency_trend >= 2 {
5 // slight rising trend
} else {
0 // no obvious trend
}
}
/// handle load level change
async fn handle_load_level_change(&self, old_level: LoadLevel, new_level: LoadLevel, start_time: SystemTime) {
let duration = SystemTime::now().duration_since(start_time).unwrap_or(Duration::ZERO);
// update stats
{
let mut stats = self.load_stats.write().await;
match old_level {
LoadLevel::Low => stats.low_load_duration += duration.as_secs(),
LoadLevel::Medium => stats.medium_load_duration += duration.as_secs(),
LoadLevel::High => stats.high_load_duration += duration.as_secs(),
LoadLevel::Critical => stats.critical_load_duration += duration.as_secs(),
}
stats.load_transitions += 1;
}
// update history
{
let mut history = self.load_level_history.write().await;
history.push_back((SystemTime::now(), new_level));
// keep history record in reasonable range
while history.len() > 100 {
history.pop_front();
}
}
info!("load level changed: {:?} -> {:?}, duration: {:?}", old_level, new_level, duration);
// if enter critical load state, record warning
if new_level == LoadLevel::Critical {
warn!("system entered critical load state, Scanner will pause running");
}
}
/// get current load level
pub async fn get_business_load_level(&self) -> LoadLevel {
*self.current_load_level.read().await
}
/// get current metrics
pub async fn get_current_metrics(&self) -> IOMetrics {
self.current_metrics.read().await.clone()
}
/// get history metrics
pub async fn get_history_metrics(&self) -> Vec<IOMetrics> {
self.history_metrics.read().await.iter().cloned().collect()
}
/// get load stats
pub async fn get_load_stats(&self) -> LoadLevelStats {
self.load_stats.read().await.clone()
}
/// update business IO metrics
pub async fn update_business_metrics(&self, latency: u64, qps: u64, error_rate: u64, connections: u64) {
self.business_metrics.request_latency.store(latency, Ordering::Relaxed);
self.business_metrics.request_qps.store(qps, Ordering::Relaxed);
self.business_metrics.error_rate.store(error_rate, Ordering::Relaxed);
self.business_metrics.active_connections.store(connections, Ordering::Relaxed);
*self.business_metrics.last_update.write().await = SystemTime::now();
debug!(
"update business metrics: latency={}ms, QPS={}, error rate={}‰, connections={}",
latency, qps, error_rate, connections
);
}
/// clone for background task
fn clone_for_background(&self) -> Self {
Self {
config: self.config.clone(),
current_metrics: self.current_metrics.clone(),
history_metrics: self.history_metrics.clone(),
current_load_level: self.current_load_level.clone(),
load_level_history: self.load_level_history.clone(),
load_stats: self.load_stats.clone(),
business_metrics: self.business_metrics.clone(),
cancel_token: self.cancel_token.clone(),
}
}
/// reset stats
pub async fn reset_stats(&self) {
*self.load_stats.write().await = LoadLevelStats::default();
self.load_level_history.write().await.clear();
self.history_metrics.write().await.clear();
info!("IO monitor stats reset");
}
/// get load level history
pub async fn get_load_level_history(&self) -> Vec<(SystemTime, LoadLevel)> {
self.load_level_history.read().await.iter().cloned().collect()
}
}

View File

@@ -0,0 +1,499 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::scanner::LoadLevel;
use std::{
sync::{
Arc,
atomic::{AtomicU8, AtomicU64, Ordering},
},
time::{Duration, SystemTime},
};
use tokio::sync::RwLock;
use tracing::{debug, info, warn};
/// IO throttler config
#[derive(Debug, Clone)]
pub struct IOThrottlerConfig {
/// max IOPS limit
pub max_iops: u64,
/// business priority baseline (percentage)
pub base_business_priority: u8,
/// scanner minimum delay (milliseconds)
pub min_scan_delay: u64,
/// scanner maximum delay (milliseconds)
pub max_scan_delay: u64,
/// whether enable dynamic adjustment
pub enable_dynamic_adjustment: bool,
/// adjustment response time (seconds)
pub adjustment_response_time: u64,
}
impl Default for IOThrottlerConfig {
fn default() -> Self {
Self {
max_iops: 1000, // default max 1000 IOPS
base_business_priority: 95, // business priority 95%
min_scan_delay: 5000, // minimum 5s delay
max_scan_delay: 60000, // maximum 60s delay
enable_dynamic_adjustment: true,
adjustment_response_time: 5, // 5 seconds response time
}
}
}
/// resource allocation strategy
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
pub enum ResourceAllocationStrategy {
/// business priority strategy
BusinessFirst,
/// balanced strategy
Balanced,
/// maintenance priority strategy (only used in special cases)
MaintenanceFirst,
}
/// throttle decision
#[derive(Debug, Clone)]
pub struct ThrottleDecision {
/// whether should pause scanning
pub should_pause: bool,
/// suggested scanning delay
pub suggested_delay: Duration,
/// resource allocation suggestion
pub resource_allocation: ResourceAllocation,
/// decision reason
pub reason: String,
}
/// resource allocation
#[derive(Debug, Clone)]
pub struct ResourceAllocation {
/// business IO allocation percentage (0-100)
pub business_percentage: u8,
/// scanner IO allocation percentage (0-100)
pub scanner_percentage: u8,
/// allocation strategy
pub strategy: ResourceAllocationStrategy,
}
/// enhanced IO throttler
///
/// dynamically adjust the resource usage of the scanner based on real-time system load and business demand,
/// ensure business IO gets priority protection.
pub struct AdvancedIOThrottler {
/// config
config: Arc<RwLock<IOThrottlerConfig>>,
/// current IOPS usage (reserved field)
#[allow(dead_code)]
current_iops: Arc<AtomicU64>,
/// business priority weight (0-100)
business_priority: Arc<AtomicU8>,
/// scanning operation delay (milliseconds)
scan_delay: Arc<AtomicU64>,
/// resource allocation strategy
allocation_strategy: Arc<RwLock<ResourceAllocationStrategy>>,
/// throttle history record
throttle_history: Arc<RwLock<Vec<ThrottleRecord>>>,
/// last adjustment time (reserved field)
#[allow(dead_code)]
last_adjustment: Arc<RwLock<SystemTime>>,
}
/// throttle record
#[derive(Debug, Clone)]
pub struct ThrottleRecord {
/// timestamp
pub timestamp: SystemTime,
/// load level
pub load_level: LoadLevel,
/// decision
pub decision: ThrottleDecision,
/// system metrics snapshot
pub metrics_snapshot: MetricsSnapshot,
}
/// metrics snapshot
#[derive(Debug, Clone)]
pub struct MetricsSnapshot {
/// IOPS
pub iops: u64,
/// latency
pub latency: u64,
/// CPU usage
pub cpu_usage: u8,
/// memory usage
pub memory_usage: u8,
}
impl AdvancedIOThrottler {
/// create new advanced IO throttler
pub fn new(config: IOThrottlerConfig) -> Self {
Self {
config: Arc::new(RwLock::new(config)),
current_iops: Arc::new(AtomicU64::new(0)),
business_priority: Arc::new(AtomicU8::new(95)),
scan_delay: Arc::new(AtomicU64::new(5000)),
allocation_strategy: Arc::new(RwLock::new(ResourceAllocationStrategy::BusinessFirst)),
throttle_history: Arc::new(RwLock::new(Vec::new())),
last_adjustment: Arc::new(RwLock::new(SystemTime::UNIX_EPOCH)),
}
}
/// adjust scanning delay based on load level
pub async fn adjust_for_load_level(&self, load_level: LoadLevel) -> Duration {
let config = self.config.read().await;
let delay_ms = match load_level {
LoadLevel::Low => {
// low load: use minimum delay
self.scan_delay.store(config.min_scan_delay, Ordering::Relaxed);
self.business_priority
.store(config.base_business_priority.saturating_sub(5), Ordering::Relaxed);
config.min_scan_delay
}
LoadLevel::Medium => {
// medium load: increase delay moderately
let delay = config.min_scan_delay * 5; // 500ms
self.scan_delay.store(delay, Ordering::Relaxed);
self.business_priority.store(config.base_business_priority, Ordering::Relaxed);
delay
}
LoadLevel::High => {
// high load: increase delay significantly
let delay = config.min_scan_delay * 10; // 50s
self.scan_delay.store(delay, Ordering::Relaxed);
self.business_priority
.store(config.base_business_priority.saturating_add(3), Ordering::Relaxed);
delay
}
LoadLevel::Critical => {
// critical load: maximum delay or pause
let delay = config.max_scan_delay; // 60s
self.scan_delay.store(delay, Ordering::Relaxed);
self.business_priority.store(99, Ordering::Relaxed);
delay
}
};
let duration = Duration::from_millis(delay_ms);
debug!("Adjust scanning delay based on load level {:?}: {:?}", load_level, duration);
duration
}
/// create throttle decision
pub async fn make_throttle_decision(&self, load_level: LoadLevel, metrics: Option<MetricsSnapshot>) -> ThrottleDecision {
let _config = self.config.read().await;
let should_pause = matches!(load_level, LoadLevel::Critical);
let suggested_delay = self.adjust_for_load_level(load_level).await;
let resource_allocation = self.calculate_resource_allocation(load_level).await;
let reason = match load_level {
LoadLevel::Low => "system load is low, scanner can run normally".to_string(),
LoadLevel::Medium => "system load is moderate, scanner is running at reduced speed".to_string(),
LoadLevel::High => "system load is high, scanner is running at significantly reduced speed".to_string(),
LoadLevel::Critical => "system load is too high, scanner is paused".to_string(),
};
let decision = ThrottleDecision {
should_pause,
suggested_delay,
resource_allocation,
reason,
};
// record decision history
if let Some(snapshot) = metrics {
self.record_throttle_decision(load_level, decision.clone(), snapshot).await;
}
decision
}
/// calculate resource allocation
async fn calculate_resource_allocation(&self, load_level: LoadLevel) -> ResourceAllocation {
let strategy = *self.allocation_strategy.read().await;
let (business_pct, scanner_pct) = match (strategy, load_level) {
(ResourceAllocationStrategy::BusinessFirst, LoadLevel::Low) => (90, 10),
(ResourceAllocationStrategy::BusinessFirst, LoadLevel::Medium) => (95, 5),
(ResourceAllocationStrategy::BusinessFirst, LoadLevel::High) => (98, 2),
(ResourceAllocationStrategy::BusinessFirst, LoadLevel::Critical) => (99, 1),
(ResourceAllocationStrategy::Balanced, LoadLevel::Low) => (80, 20),
(ResourceAllocationStrategy::Balanced, LoadLevel::Medium) => (85, 15),
(ResourceAllocationStrategy::Balanced, LoadLevel::High) => (90, 10),
(ResourceAllocationStrategy::Balanced, LoadLevel::Critical) => (95, 5),
(ResourceAllocationStrategy::MaintenanceFirst, _) => (70, 30), // special maintenance mode
};
ResourceAllocation {
business_percentage: business_pct,
scanner_percentage: scanner_pct,
strategy,
}
}
/// check whether should pause scanning
pub async fn should_pause_scanning(&self, load_level: LoadLevel) -> bool {
match load_level {
LoadLevel::Critical => {
warn!("System load reached critical level, pausing scanner");
true
}
_ => false,
}
}
/// record throttle decision
async fn record_throttle_decision(&self, load_level: LoadLevel, decision: ThrottleDecision, metrics: MetricsSnapshot) {
let record = ThrottleRecord {
timestamp: SystemTime::now(),
load_level,
decision,
metrics_snapshot: metrics,
};
let mut history = self.throttle_history.write().await;
history.push(record);
// keep history record in reasonable range (last 1000 records)
while history.len() > 1000 {
history.remove(0);
}
}
/// set resource allocation strategy
pub async fn set_allocation_strategy(&self, strategy: ResourceAllocationStrategy) {
*self.allocation_strategy.write().await = strategy;
info!("Set resource allocation strategy: {:?}", strategy);
}
/// get current resource allocation
pub async fn get_current_allocation(&self) -> ResourceAllocation {
let current_load = LoadLevel::Low; // need to get from external
self.calculate_resource_allocation(current_load).await
}
/// get throttle history
pub async fn get_throttle_history(&self) -> Vec<ThrottleRecord> {
self.throttle_history.read().await.clone()
}
/// get throttle stats
pub async fn get_throttle_stats(&self) -> ThrottleStats {
let history = self.throttle_history.read().await;
let total_decisions = history.len();
let pause_decisions = history.iter().filter(|r| r.decision.should_pause).count();
let mut delay_sum = Duration::ZERO;
for record in history.iter() {
delay_sum += record.decision.suggested_delay;
}
let avg_delay = if total_decisions > 0 {
delay_sum / total_decisions as u32
} else {
Duration::ZERO
};
// count by load level
let low_count = history.iter().filter(|r| r.load_level == LoadLevel::Low).count();
let medium_count = history.iter().filter(|r| r.load_level == LoadLevel::Medium).count();
let high_count = history.iter().filter(|r| r.load_level == LoadLevel::High).count();
let critical_count = history.iter().filter(|r| r.load_level == LoadLevel::Critical).count();
ThrottleStats {
total_decisions,
pause_decisions,
average_delay: avg_delay,
load_level_distribution: LoadLevelDistribution {
low_count,
medium_count,
high_count,
critical_count,
},
}
}
/// reset throttle history
pub async fn reset_history(&self) {
self.throttle_history.write().await.clear();
info!("Reset throttle history");
}
/// update config
pub async fn update_config(&self, new_config: IOThrottlerConfig) {
*self.config.write().await = new_config;
info!("Updated IO throttler configuration");
}
/// get current scanning delay
pub fn get_current_scan_delay(&self) -> Duration {
let delay_ms = self.scan_delay.load(Ordering::Relaxed);
Duration::from_millis(delay_ms)
}
/// get current business priority
pub fn get_current_business_priority(&self) -> u8 {
self.business_priority.load(Ordering::Relaxed)
}
/// simulate business load pressure test
pub async fn simulate_business_pressure(&self, duration: Duration) -> SimulationResult {
info!("Start simulating business load pressure test, duration: {:?}", duration);
let start_time = SystemTime::now();
let mut simulation_records = Vec::new();
// simulate different load level changes
let load_levels = [
LoadLevel::Low,
LoadLevel::Medium,
LoadLevel::High,
LoadLevel::Critical,
LoadLevel::High,
LoadLevel::Medium,
LoadLevel::Low,
];
let step_duration = duration / load_levels.len() as u32;
for (i, &load_level) in load_levels.iter().enumerate() {
let _step_start = SystemTime::now();
// simulate metrics for this load level
let metrics = MetricsSnapshot {
iops: match load_level {
LoadLevel::Low => 200,
LoadLevel::Medium => 500,
LoadLevel::High => 800,
LoadLevel::Critical => 1200,
},
latency: match load_level {
LoadLevel::Low => 10,
LoadLevel::Medium => 25,
LoadLevel::High => 60,
LoadLevel::Critical => 150,
},
cpu_usage: match load_level {
LoadLevel::Low => 30,
LoadLevel::Medium => 50,
LoadLevel::High => 75,
LoadLevel::Critical => 95,
},
memory_usage: match load_level {
LoadLevel::Low => 40,
LoadLevel::Medium => 60,
LoadLevel::High => 80,
LoadLevel::Critical => 90,
},
};
let decision = self.make_throttle_decision(load_level, Some(metrics.clone())).await;
simulation_records.push(SimulationRecord {
step: i + 1,
load_level,
metrics,
decision: decision.clone(),
step_duration,
});
info!(
"simulate step {}: load={:?}, delay={:?}, pause={}",
i + 1,
load_level,
decision.suggested_delay,
decision.should_pause
);
// wait for step duration
tokio::time::sleep(step_duration).await;
}
let total_duration = SystemTime::now().duration_since(start_time).unwrap_or(Duration::ZERO);
SimulationResult {
total_duration,
simulation_records,
final_stats: self.get_throttle_stats().await,
}
}
}
/// throttle stats
#[derive(Debug, Clone)]
pub struct ThrottleStats {
/// total decisions
pub total_decisions: usize,
/// pause decisions
pub pause_decisions: usize,
/// average delay
pub average_delay: Duration,
/// load level distribution
pub load_level_distribution: LoadLevelDistribution,
}
/// load level distribution
#[derive(Debug, Clone)]
pub struct LoadLevelDistribution {
/// low load count
pub low_count: usize,
/// medium load count
pub medium_count: usize,
/// high load count
pub high_count: usize,
/// critical load count
pub critical_count: usize,
}
/// simulation result
#[derive(Debug, Clone)]
pub struct SimulationResult {
/// total duration
pub total_duration: Duration,
/// simulation records
pub simulation_records: Vec<SimulationRecord>,
/// final stats
pub final_stats: ThrottleStats,
}
/// simulation record
#[derive(Debug, Clone)]
pub struct SimulationRecord {
/// step number
pub step: usize,
/// load level
pub load_level: LoadLevel,
/// metrics snapshot
pub metrics: MetricsSnapshot,
/// throttle decision
pub decision: ThrottleDecision,
/// step duration
pub step_duration: Duration,
}
impl Default for AdvancedIOThrottler {
fn default() -> Self {
Self::new(IOThrottlerConfig::default())
}
}

View File

@@ -0,0 +1,561 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{
Result,
scanner::metrics::{BucketMetrics, MetricsCollector},
};
use rustfs_common::data_usage::SizeSummary;
use rustfs_common::metrics::IlmAction;
use rustfs_ecstore::bucket::{
lifecycle::{
bucket_lifecycle_audit::LcEventSrc,
bucket_lifecycle_ops::{GLOBAL_ExpiryState, apply_lifecycle_action, eval_action_from_lifecycle},
lifecycle,
lifecycle::Lifecycle,
},
metadata_sys::get_object_lock_config,
object_lock::objectlock_sys::{BucketObjectLockSys, enforce_retention_for_deletion},
versioning::VersioningApi,
versioning_sys::BucketVersioningSys,
};
use rustfs_ecstore::bucket::{
replication::{GLOBAL_REPLICATION_POOL, ReplicationConfig, get_heal_replicate_object_info},
utils::is_meta_bucketname,
};
use rustfs_ecstore::store_api::{ObjectInfo, ObjectToDelete};
use rustfs_filemeta::{FileInfo, ReplicationStatusType, replication_statuses_map};
use rustfs_utils::http::headers::{AMZ_BUCKET_REPLICATION_STATUS, HeaderExt, VERSION_PURGE_STATUS_KEY};
use s3s::dto::DefaultRetention;
use s3s::dto::{BucketLifecycleConfiguration as LifecycleConfig, VersioningConfiguration};
use std::{
collections::HashMap,
sync::{
Arc,
atomic::{AtomicU64, Ordering},
},
time::Duration as StdDuration,
};
use time::{Duration as TimeDuration, OffsetDateTime};
use tokio::sync::Mutex;
use tracing::{debug, info, warn};
static SCANNER_EXCESS_OBJECT_VERSIONS: AtomicU64 = AtomicU64::new(100);
static SCANNER_EXCESS_OBJECT_VERSIONS_TOTAL_SIZE: AtomicU64 = AtomicU64::new(1024 * 1024 * 1024 * 1024); // 1 TB
#[derive(Clone)]
pub struct ScannerItem {
pub bucket: String,
pub object_name: String,
pub replication: Option<ReplicationConfig>,
pub lifecycle: Option<Arc<LifecycleConfig>>,
pub versioning: Option<Arc<VersioningConfiguration>>,
pub object_lock_config: Option<DefaultRetention>,
pub replication_pending_grace: StdDuration,
pub replication_metrics: Option<ReplicationMetricsHandle>,
}
#[derive(Clone)]
pub struct ReplicationMetricsHandle {
inner: Arc<ReplicationMetricsInner>,
}
struct ReplicationMetricsInner {
metrics: Arc<MetricsCollector>,
bucket_metrics: Arc<Mutex<HashMap<String, BucketMetrics>>>,
}
impl ReplicationMetricsHandle {
pub fn new(metrics: Arc<MetricsCollector>, bucket_metrics: Arc<Mutex<HashMap<String, BucketMetrics>>>) -> Self {
Self {
inner: Arc::new(ReplicationMetricsInner { metrics, bucket_metrics }),
}
}
pub async fn record_status(&self, bucket: &str, status: ReplicationStatusType, lagging: bool) {
match status {
ReplicationStatusType::Pending => self.inner.metrics.increment_replication_pending_objects(1),
ReplicationStatusType::Failed => self.inner.metrics.increment_replication_failed_objects(1),
_ => {}
}
if lagging {
self.inner.metrics.increment_replication_lagging_objects(1);
}
let mut guard = self.inner.bucket_metrics.lock().await;
let entry = guard.entry(bucket.to_string()).or_insert_with(|| BucketMetrics {
bucket: bucket.to_string(),
..Default::default()
});
match status {
ReplicationStatusType::Pending => {
entry.replication_pending = entry.replication_pending.saturating_add(1);
}
ReplicationStatusType::Failed => {
entry.replication_failed = entry.replication_failed.saturating_add(1);
}
_ => {}
}
if lagging {
entry.replication_lagging = entry.replication_lagging.saturating_add(1);
}
}
pub async fn record_task_submission(&self, bucket: &str) {
self.inner.metrics.increment_replication_tasks_queued(1);
let mut guard = self.inner.bucket_metrics.lock().await;
let entry = guard.entry(bucket.to_string()).or_insert_with(|| BucketMetrics {
bucket: bucket.to_string(),
..Default::default()
});
entry.replication_tasks_queued = entry.replication_tasks_queued.saturating_add(1);
}
}
impl ScannerItem {
const INTERNAL_REPLICATION_STATUS_KEY: &'static str = "x-rustfs-internal-replication-status";
pub fn new(
bucket: String,
replication: Option<ReplicationConfig>,
lifecycle: Option<Arc<LifecycleConfig>>,
versioning: Option<Arc<VersioningConfiguration>>,
object_lock_config: Option<DefaultRetention>,
replication_pending_grace: StdDuration,
replication_metrics: Option<ReplicationMetricsHandle>,
) -> Self {
Self {
bucket,
object_name: "".to_string(),
replication,
lifecycle,
versioning,
object_lock_config,
replication_pending_grace,
replication_metrics,
}
}
pub async fn apply_versions_actions(&self, fivs: &[FileInfo]) -> Result<Vec<ObjectInfo>> {
let obj_infos = self.apply_newer_noncurrent_version_limit(fivs).await?;
if obj_infos.len() >= SCANNER_EXCESS_OBJECT_VERSIONS.load(Ordering::SeqCst) as usize {
// todo
}
let mut cumulative_size = 0;
for obj_info in obj_infos.iter() {
cumulative_size += obj_info.size;
}
if cumulative_size >= SCANNER_EXCESS_OBJECT_VERSIONS_TOTAL_SIZE.load(Ordering::SeqCst) as i64 {
//todo
}
Ok(obj_infos)
}
pub async fn apply_newer_noncurrent_version_limit(&self, fivs: &[FileInfo]) -> Result<Vec<ObjectInfo>> {
let lock_enabled = if let Some(rcfg) = BucketObjectLockSys::get(&self.bucket).await {
rcfg.mode.is_some()
} else {
false
};
let _vcfg = BucketVersioningSys::get(&self.bucket).await?;
let versioned = match BucketVersioningSys::get(&self.bucket).await {
Ok(vcfg) => vcfg.versioned(&self.object_name),
Err(_) => false,
};
let mut object_infos = Vec::with_capacity(fivs.len());
if self.lifecycle.is_none() {
for info in fivs.iter() {
object_infos.push(ObjectInfo::from_file_info(info, &self.bucket, &self.object_name, versioned));
}
return Ok(object_infos);
}
let event = self
.lifecycle
.as_ref()
.expect("lifecycle err.")
.clone()
.noncurrent_versions_expiration_limit(&lifecycle::ObjectOpts {
name: self.object_name.clone(),
..Default::default()
})
.await;
let lim = event.newer_noncurrent_versions;
if lim == 0 || fivs.len() <= lim + 1 {
for fi in fivs.iter() {
object_infos.push(ObjectInfo::from_file_info(fi, &self.bucket, &self.object_name, versioned));
}
return Ok(object_infos);
}
let overflow_versions = &fivs[lim + 1..];
for fi in fivs[..lim + 1].iter() {
object_infos.push(ObjectInfo::from_file_info(fi, &self.bucket, &self.object_name, versioned));
}
let mut to_del = Vec::<ObjectToDelete>::with_capacity(overflow_versions.len());
for fi in overflow_versions.iter() {
let obj = ObjectInfo::from_file_info(fi, &self.bucket, &self.object_name, versioned);
if lock_enabled && enforce_retention_for_deletion(&obj) {
//if enforce_retention_for_deletion(&obj) {
/*if self.debug {
if obj.version_id.is_some() {
info!("lifecycle: {} v({}) is locked, not deleting\n", obj.name, obj.version_id.expect("err"));
} else {
info!("lifecycle: {} is locked, not deleting\n", obj.name);
}
}*/
object_infos.push(obj);
continue;
}
if OffsetDateTime::now_utc().unix_timestamp()
< lifecycle::expected_expiry_time(obj.successor_mod_time.expect("err"), event.noncurrent_days as i32)
.unix_timestamp()
{
object_infos.push(obj);
continue;
}
to_del.push(ObjectToDelete {
object_name: obj.name,
version_id: obj.version_id,
..Default::default()
});
}
if !to_del.is_empty() {
let mut expiry_state = GLOBAL_ExpiryState.write().await;
expiry_state.enqueue_by_newer_noncurrent(&self.bucket, to_del, event).await;
}
Ok(object_infos)
}
pub async fn apply_actions(&mut self, oi: &ObjectInfo, _size_s: &mut SizeSummary) -> (bool, i64) {
let object_locked = self.is_object_lock_protected(oi);
if let Err(err) = self.heal_replication(oi).await {
warn!(
"heal_replication failed for {}/{} (version {:?}): {}",
oi.bucket, oi.name, oi.version_id, err
);
}
if object_locked {
info!(
"apply_actions: Skipping lifecycle for {}/{} because object lock retention or legal hold is active",
oi.bucket, oi.name
);
return (false, oi.size);
}
let (action, _size) = self.apply_lifecycle(oi).await;
info!(
"apply_actions {} {} {:?} {:?}",
oi.bucket.clone(),
oi.name.clone(),
oi.version_id.clone(),
oi.user_defined.clone()
);
if action.delete_all() {
return (true, 0);
}
(false, oi.size)
}
async fn apply_lifecycle(&mut self, oi: &ObjectInfo) -> (IlmAction, i64) {
let size = oi.size;
if self.lifecycle.is_none() {
info!("apply_lifecycle: No lifecycle config for object: {}", oi.name);
return (IlmAction::NoneAction, size);
}
info!("apply_lifecycle: Lifecycle config exists for object: {}", oi.name);
let (olcfg, rcfg) = if !is_meta_bucketname(&self.bucket) {
(
get_object_lock_config(&self.bucket).await.ok(),
None, // FIXME: replication config
)
} else {
(None, None)
};
info!("apply_lifecycle: Evaluating lifecycle for object: {}", oi.name);
let lifecycle = match self.lifecycle.as_ref() {
Some(lc) => lc,
None => {
info!("No lifecycle configuration found for object: {}", oi.name);
return (IlmAction::NoneAction, 0);
}
};
let lc_evt = eval_action_from_lifecycle(
lifecycle,
olcfg
.as_ref()
.and_then(|(c, _)| c.rule.as_ref().and_then(|r| r.default_retention.clone())),
rcfg.clone(),
oi, // Pass oi directly
)
.await;
info!("lifecycle: {} Initial scan: {} (action: {:?})", oi.name, lc_evt.action, lc_evt.action);
let mut new_size = size;
match lc_evt.action {
IlmAction::DeleteVersionAction | IlmAction::DeleteAllVersionsAction | IlmAction::DelMarkerDeleteAllVersionsAction => {
info!("apply_lifecycle: Object {} marked for version deletion, new_size=0", oi.name);
new_size = 0;
}
IlmAction::DeleteAction => {
info!("apply_lifecycle: Object {} marked for deletion", oi.name);
if let Some(vcfg) = &self.versioning {
if !vcfg.enabled() {
info!("apply_lifecycle: Versioning disabled, setting new_size=0");
new_size = 0;
}
} else {
info!("apply_lifecycle: No versioning config, setting new_size=0");
new_size = 0;
}
}
IlmAction::NoneAction => {
info!("apply_lifecycle: No action for object {}", oi.name);
}
_ => {
info!("apply_lifecycle: Other action {:?} for object {}", lc_evt.action, oi.name);
}
}
if lc_evt.action != IlmAction::NoneAction {
info!("apply_lifecycle: Applying lifecycle action {:?} for object {}", lc_evt.action, oi.name);
apply_lifecycle_action(&lc_evt, &LcEventSrc::Scanner, oi).await;
} else {
info!("apply_lifecycle: Skipping lifecycle action for object {} as no action is needed", oi.name);
}
(lc_evt.action, new_size)
}
fn is_object_lock_protected(&self, oi: &ObjectInfo) -> bool {
enforce_retention_for_deletion(oi)
}
async fn heal_replication(&self, oi: &ObjectInfo) -> Result<()> {
warn!("heal_replication: healing replication for {}/{}", oi.bucket, oi.name);
warn!("heal_replication: ObjectInfo oi: {:?}", oi);
let enriched = Self::hydrate_replication_metadata(oi);
let pending_lagging = self.is_pending_lagging(&enriched);
if let Some(handle) = &self.replication_metrics {
handle
.record_status(&self.bucket, enriched.replication_status.clone(), pending_lagging)
.await;
}
debug!(
"heal_replication: evaluating {}/{} with status {:?} and internal {:?}",
enriched.bucket, enriched.name, enriched.replication_status, enriched.replication_status_internal
);
// if !self.needs_replication_heal(&enriched, pending_lagging) {
// return Ok(());
// }
// let replication_cfg = match get_replication_config(&self.bucket).await {
// Ok((cfg, _)) => Some(cfg),
// Err(err) => {
// debug!("heal_replication: failed to fetch replication config for bucket {}: {}", self.bucket, err);
// None
// }
// };
// if replication_cfg.is_none() {
// return Ok(());
// }
// let bucket_targets = match get_bucket_targets_config(&self.bucket).await {
// Ok(targets) => Some(targets),
// Err(err) => {
// debug!("heal_replication: no bucket targets for bucket {}: {}", self.bucket, err);
// None
// }
// };
// let replication_cfg = ReplicationConfig::new(replication_cfg, bucket_targets);
let replication_cfg = self.replication.clone().unwrap_or_default();
if replication_cfg.config.is_none() && replication_cfg.remotes.is_none() {
debug!("heal_replication: no replication config for {}/{}", enriched.bucket, enriched.name);
return Ok(());
}
let replicate_info = get_heal_replicate_object_info(&enriched, &replication_cfg).await;
let should_replicate = replicate_info.dsc.replicate_any()
|| matches!(
enriched.replication_status,
ReplicationStatusType::Failed | ReplicationStatusType::Pending
);
if !should_replicate {
debug!("heal_replication: no actionable targets for {}/{}", enriched.bucket, enriched.name);
return Ok(());
}
if let Some(pool) = GLOBAL_REPLICATION_POOL.get() {
pool.queue_replica_task(replicate_info).await;
if let Some(handle) = &self.replication_metrics {
handle.record_task_submission(&self.bucket).await;
}
warn!("heal_replication: queued replication heal task for {}/{}", enriched.bucket, enriched.name);
} else {
warn!(
"heal_replication: GLOBAL_REPLICATION_POOL not initialized, skipping heal for {}/{}",
enriched.bucket, enriched.name
);
}
Ok(())
}
#[allow(dead_code)]
fn needs_replication_heal(&self, oi: &ObjectInfo, pending_lagging: bool) -> bool {
if matches!(oi.replication_status, ReplicationStatusType::Failed) {
return true;
}
if pending_lagging && matches!(oi.replication_status, ReplicationStatusType::Pending) {
return true;
}
if let Some(raw) = oi.replication_status_internal.as_ref() {
let statuses = replication_statuses_map(raw);
if statuses
.values()
.any(|status| matches!(status, ReplicationStatusType::Failed))
{
return true;
}
if pending_lagging
&& statuses
.values()
.any(|status| matches!(status, ReplicationStatusType::Pending))
{
return true;
}
}
false
}
fn hydrate_replication_metadata(oi: &ObjectInfo) -> ObjectInfo {
let mut enriched = oi.clone();
if enriched.replication_status.is_empty() {
if let Some(status) = enriched.user_defined.lookup(AMZ_BUCKET_REPLICATION_STATUS) {
enriched.replication_status = ReplicationStatusType::from(status);
}
}
if enriched.replication_status_internal.is_none() {
if let Some(raw) = enriched.user_defined.lookup(Self::INTERNAL_REPLICATION_STATUS_KEY) {
if !raw.is_empty() {
enriched.replication_status_internal = Some(raw.to_string());
}
}
}
if enriched.version_purge_status_internal.is_none() {
if let Some(raw) = enriched.user_defined.lookup(VERSION_PURGE_STATUS_KEY) {
if !raw.is_empty() {
enriched.version_purge_status_internal = Some(raw.to_string());
}
}
}
enriched
}
fn is_pending_lagging(&self, oi: &ObjectInfo) -> bool {
if !matches!(oi.replication_status, ReplicationStatusType::Pending) {
return false;
}
let Some(mod_time) = oi.mod_time else {
return false;
};
let grace = TimeDuration::try_from(self.replication_pending_grace).unwrap_or_else(|_| {
warn!(
"replication_pending_grace is invalid, using default value: 0 seconds, grace: {:?}",
self.replication_pending_grace
);
TimeDuration::seconds(0)
});
if grace.is_zero() {
return true;
}
let elapsed = OffsetDateTime::now_utc() - mod_time;
elapsed >= grace
}
}
#[cfg(test)]
mod tests {
use super::*;
#[tokio::test]
async fn replication_metrics_handle_tracks_counts() {
let metrics = Arc::new(MetricsCollector::new());
let bucket_metrics = Arc::new(Mutex::new(HashMap::new()));
let handle = ReplicationMetricsHandle::new(metrics.clone(), bucket_metrics.clone());
handle
.record_status("test-bucket", ReplicationStatusType::Pending, true)
.await;
handle
.record_status("test-bucket", ReplicationStatusType::Failed, false)
.await;
handle.record_task_submission("test-bucket").await;
let snapshot = metrics.get_metrics();
assert_eq!(snapshot.replication_pending_objects, 1);
assert_eq!(snapshot.replication_failed_objects, 1);
assert_eq!(snapshot.replication_lagging_objects, 1);
assert_eq!(snapshot.replication_tasks_queued, 1);
let guard = bucket_metrics.lock().await;
let bucket_entry = guard.get("test-bucket").expect("bucket metrics exists");
assert_eq!(bucket_entry.replication_pending, 1);
assert_eq!(bucket_entry.replication_failed, 1);
assert_eq!(bucket_entry.replication_lagging, 1);
assert_eq!(bucket_entry.replication_tasks_queued, 1);
}
}

View File

@@ -0,0 +1,683 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::{Error, Result};
use rustfs_common::data_usage::DiskUsageStatus;
use rustfs_ecstore::data_usage::{
LocalUsageSnapshot, LocalUsageSnapshotMeta, data_usage_state_dir, ensure_data_usage_layout, snapshot_file_name,
write_local_snapshot,
};
use rustfs_ecstore::disk::DiskAPI;
use rustfs_ecstore::store::ECStore;
use rustfs_ecstore::store_api::ObjectInfo;
use rustfs_filemeta::{FileInfo, FileMeta, FileMetaVersion, VersionType};
use serde::{Deserialize, Serialize};
use serde_json::{from_slice, to_vec};
use std::collections::{HashMap, HashSet};
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::time::{SystemTime, UNIX_EPOCH};
use tokio::{fs, task};
use tracing::warn;
use walkdir::WalkDir;
const STATE_FILE_EXTENSION: &str = "";
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
pub struct LocalObjectUsage {
pub bucket: String,
pub object: String,
pub last_modified_ns: Option<i128>,
pub versions_count: u64,
pub delete_markers_count: u64,
pub total_size: u64,
pub has_live_object: bool,
}
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
struct IncrementalScanState {
last_scan_ns: Option<i128>,
objects: HashMap<String, LocalObjectUsage>,
}
struct DiskScanResult {
snapshot: LocalUsageSnapshot,
state: IncrementalScanState,
objects_by_bucket: HashMap<String, Vec<LocalObjectRecord>>,
status: DiskUsageStatus,
}
#[derive(Debug, Clone)]
pub struct LocalObjectRecord {
pub usage: LocalObjectUsage,
pub object_info: Option<rustfs_ecstore::store_api::ObjectInfo>,
pub file_info: Option<FileInfo>,
}
#[derive(Debug, Default)]
pub struct LocalScanOutcome {
pub snapshots: Vec<LocalUsageSnapshot>,
pub bucket_objects: HashMap<String, Vec<LocalObjectRecord>>,
pub disk_status: Vec<DiskUsageStatus>,
}
/// Scan all local primary disks and persist refreshed usage snapshots.
pub async fn scan_and_persist_local_usage(store: Arc<ECStore>) -> Result<LocalScanOutcome> {
let mut snapshots = Vec::new();
let mut bucket_objects: HashMap<String, Vec<LocalObjectRecord>> = HashMap::new();
let mut disk_status = Vec::new();
for (pool_idx, pool) in store.pools.iter().enumerate() {
for set_disks in pool.disk_set.iter() {
let disks = {
let guard = set_disks.disks.read().await;
guard.clone()
};
for (disk_index, disk_opt) in disks.into_iter().enumerate() {
let Some(disk) = disk_opt else {
continue;
};
if !disk.is_local() {
continue;
}
// Count objects once by scanning only disk index zero from each set.
if disk_index != 0 {
continue;
}
let disk_id = match disk.get_disk_id().await.map_err(Error::from)? {
Some(id) => id.to_string(),
None => {
warn!("Skipping disk without ID: {}", disk.to_string());
continue;
}
};
let root = disk.path();
ensure_data_usage_layout(root.as_path()).await.map_err(Error::from)?;
let meta = LocalUsageSnapshotMeta {
disk_id: disk_id.clone(),
pool_index: Some(pool_idx),
set_index: Some(set_disks.set_index),
disk_index: Some(disk_index),
};
let state_path = state_file_path(root.as_path(), &disk_id);
let state = read_scan_state(&state_path).await?;
let root_clone = root.clone();
let meta_clone = meta.clone();
let handle = task::spawn_blocking(move || scan_disk_blocking(root_clone, meta_clone, state));
match handle.await {
Ok(Ok(result)) => {
write_local_snapshot(root.as_path(), &disk_id, &result.snapshot)
.await
.map_err(Error::from)?;
write_scan_state(&state_path, &result.state).await?;
snapshots.push(result.snapshot);
for (bucket, records) in result.objects_by_bucket {
bucket_objects.entry(bucket).or_default().extend(records.into_iter());
}
disk_status.push(result.status);
}
Ok(Err(err)) => {
warn!("Failed to scan disk {}: {}", disk.to_string(), err);
}
Err(join_err) => {
warn!("Disk scan task panicked for disk {}: {}", disk.to_string(), join_err);
}
}
}
}
}
Ok(LocalScanOutcome {
snapshots,
bucket_objects,
disk_status,
})
}
fn scan_disk_blocking(root: PathBuf, meta: LocalUsageSnapshotMeta, mut state: IncrementalScanState) -> Result<DiskScanResult> {
let now = SystemTime::now();
let now_ns = system_time_to_ns(now);
let mut visited: HashSet<String> = HashSet::new();
let mut emitted: HashSet<String> = HashSet::new();
let mut objects_by_bucket: HashMap<String, Vec<LocalObjectRecord>> = HashMap::new();
let mut status = DiskUsageStatus {
disk_id: meta.disk_id.clone(),
pool_index: meta.pool_index,
set_index: meta.set_index,
disk_index: meta.disk_index,
last_update: None,
snapshot_exists: false,
};
for entry in WalkDir::new(&root).follow_links(false).into_iter().filter_map(|res| res.ok()) {
if !entry.file_type().is_file() {
continue;
}
if entry.file_name() != "xl.meta" {
continue;
}
let xl_path = entry.path().to_path_buf();
let Some(object_dir) = xl_path.parent() else {
continue;
};
let Some(rel_path) = object_dir.strip_prefix(&root).ok().map(normalize_path) else {
continue;
};
let mut components = rel_path.split('/');
let Some(bucket_name) = components.next() else {
continue;
};
if bucket_name.starts_with('.') {
continue;
}
let object_key = components.collect::<Vec<_>>().join("/");
visited.insert(rel_path.clone());
let metadata = match std::fs::metadata(&xl_path) {
Ok(meta) => meta,
Err(err) => {
warn!("Failed to read metadata for {xl_path:?}: {err}");
continue;
}
};
let mtime_ns = metadata.modified().ok().map(system_time_to_ns);
let should_parse = match state.objects.get(&rel_path) {
Some(existing) => existing.last_modified_ns != mtime_ns,
None => true,
};
if should_parse {
match std::fs::read(&xl_path) {
Ok(buf) => match FileMeta::load(&buf) {
Ok(file_meta) => match compute_object_usage(bucket_name, object_key.as_str(), &file_meta) {
Ok(Some(mut record)) => {
record.usage.last_modified_ns = mtime_ns;
state.objects.insert(rel_path.clone(), record.usage.clone());
emitted.insert(rel_path.clone());
warn!("compute_object_usage: record: {:?}", record.clone());
objects_by_bucket.entry(record.usage.bucket.clone()).or_default().push(record);
}
Ok(None) => {
warn!("compute_object_usage: None, rel_path: {:?}", rel_path);
state.objects.remove(&rel_path);
}
Err(err) => {
warn!("Failed to parse usage from {:?}: {}", xl_path, err);
}
},
Err(err) => {
warn!("Failed to decode xl.meta {:?}: {}", xl_path, err);
}
},
Err(err) => {
warn!("Failed to read xl.meta {:?}: {}", xl_path, err);
}
}
} else {
warn!("should_parse: false, rel_path: {:?}", rel_path);
}
}
state.objects.retain(|key, _| visited.contains(key));
state.last_scan_ns = Some(now_ns);
// for (key, usage) in &state.objects {
// if emitted.contains(key) {
// continue;
// }
// objects_by_bucket
// .entry(usage.bucket.clone())
// .or_default()
// .push(LocalObjectRecord {
// usage: usage.clone(),
// object_info: None,
// file_info: None,
// });
// }
let snapshot = build_snapshot(meta, &state.objects, now);
status.snapshot_exists = true;
status.last_update = Some(now);
Ok(DiskScanResult {
snapshot,
state,
objects_by_bucket,
status,
})
}
fn compute_object_usage(bucket: &str, object: &str, file_meta: &FileMeta) -> Result<Option<LocalObjectRecord>> {
let mut versions_count = 0u64;
let mut delete_markers_count = 0u64;
let mut total_size = 0u64;
let mut has_live_object = false;
let mut latest_file_info: Option<FileInfo> = None;
for shallow in &file_meta.versions {
match shallow.header.version_type {
VersionType::Object => {
let version = match FileMetaVersion::try_from(shallow.meta.as_slice()) {
Ok(version) => version,
Err(err) => {
warn!("Failed to parse file meta version: {}", err);
continue;
}
};
if let Some(obj) = version.object {
if !has_live_object {
total_size = obj.size.max(0) as u64;
}
has_live_object = true;
versions_count = versions_count.saturating_add(1);
if latest_file_info.is_none() {
if let Ok(info) = file_meta.into_fileinfo(bucket, object, "", false, false) {
latest_file_info = Some(info);
}
}
}
}
VersionType::Delete => {
delete_markers_count = delete_markers_count.saturating_add(1);
versions_count = versions_count.saturating_add(1);
}
_ => {}
}
}
if !has_live_object && delete_markers_count == 0 {
return Ok(None);
}
let object_info = latest_file_info.as_ref().map(|fi| {
let versioned = fi.version_id.is_some();
ObjectInfo::from_file_info(fi, bucket, object, versioned)
});
let file_info = latest_file_info.clone();
Ok(Some(LocalObjectRecord {
usage: LocalObjectUsage {
bucket: bucket.to_string(),
object: object.to_string(),
last_modified_ns: None,
versions_count,
delete_markers_count,
total_size,
has_live_object,
},
object_info,
file_info,
}))
}
fn build_snapshot(
meta: LocalUsageSnapshotMeta,
objects: &HashMap<String, LocalObjectUsage>,
now: SystemTime,
) -> LocalUsageSnapshot {
let mut snapshot = LocalUsageSnapshot::new(meta);
for usage in objects.values() {
let bucket_entry = snapshot.buckets_usage.entry(usage.bucket.clone()).or_default();
if usage.has_live_object {
bucket_entry.objects_count = bucket_entry.objects_count.saturating_add(1);
}
bucket_entry.versions_count = bucket_entry.versions_count.saturating_add(usage.versions_count);
bucket_entry.delete_markers_count = bucket_entry.delete_markers_count.saturating_add(usage.delete_markers_count);
bucket_entry.size = bucket_entry.size.saturating_add(usage.total_size);
}
snapshot.last_update = Some(now);
snapshot.recompute_totals();
snapshot
}
fn normalize_path(path: &Path) -> String {
path.iter()
.map(|component| component.to_string_lossy())
.collect::<Vec<_>>()
.join("/")
}
fn system_time_to_ns(time: SystemTime) -> i128 {
match time.duration_since(UNIX_EPOCH) {
Ok(duration) => {
let secs = duration.as_secs() as i128;
let nanos = duration.subsec_nanos() as i128;
secs * 1_000_000_000 + nanos
}
Err(err) => {
let duration = err.duration();
let secs = duration.as_secs() as i128;
let nanos = duration.subsec_nanos() as i128;
-(secs * 1_000_000_000 + nanos)
}
}
}
fn state_file_path(root: &Path, disk_id: &str) -> PathBuf {
let mut path = data_usage_state_dir(root);
path.push(format!("{}{}", snapshot_file_name(disk_id), STATE_FILE_EXTENSION));
path
}
async fn read_scan_state(path: &Path) -> Result<IncrementalScanState> {
match fs::read(path).await {
Ok(bytes) => from_slice(&bytes).map_err(|err| Error::Serialization(err.to_string())),
Err(err) if err.kind() == std::io::ErrorKind::NotFound => Ok(IncrementalScanState::default()),
Err(err) => Err(err.into()),
}
}
async fn write_scan_state(path: &Path, state: &IncrementalScanState) -> Result<()> {
if let Some(parent) = path.parent() {
fs::create_dir_all(parent).await?;
}
let data = to_vec(state).map_err(|err| Error::Serialization(err.to_string()))?;
fs::write(path, data).await?;
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use rustfs_filemeta::{ChecksumAlgo, ErasureAlgo, FileMetaShallowVersion, MetaDeleteMarker, MetaObject};
use std::collections::HashMap;
use std::fs;
use tempfile::TempDir;
use time::OffsetDateTime;
use uuid::Uuid;
fn build_file_meta_with_object(erasure_index: usize, size: i64) -> FileMeta {
let mut file_meta = FileMeta::default();
let meta_object = MetaObject {
version_id: Some(Uuid::new_v4()),
data_dir: Some(Uuid::new_v4()),
erasure_algorithm: ErasureAlgo::ReedSolomon,
erasure_m: 2,
erasure_n: 2,
erasure_block_size: 4096,
erasure_index,
erasure_dist: vec![0_u8, 1, 2, 3],
bitrot_checksum_algo: ChecksumAlgo::HighwayHash,
part_numbers: vec![1],
part_etags: vec!["etag".to_string()],
part_sizes: vec![size as usize],
part_actual_sizes: vec![size],
part_indices: Vec::new(),
size,
mod_time: Some(OffsetDateTime::now_utc()),
meta_sys: HashMap::new(),
meta_user: HashMap::new(),
};
let version = FileMetaVersion {
version_type: VersionType::Object,
object: Some(meta_object),
delete_marker: None,
write_version: 1,
};
let shallow = FileMetaShallowVersion::try_from(version).expect("convert version");
file_meta.versions.push(shallow);
file_meta
}
fn build_file_meta_with_delete_marker() -> FileMeta {
let mut file_meta = FileMeta::default();
let delete_marker = MetaDeleteMarker {
version_id: Some(Uuid::new_v4()),
mod_time: Some(OffsetDateTime::now_utc()),
meta_sys: HashMap::new(),
};
let version = FileMetaVersion {
version_type: VersionType::Delete,
object: None,
delete_marker: Some(delete_marker),
write_version: 2,
};
let shallow = FileMetaShallowVersion::try_from(version).expect("convert delete marker");
file_meta.versions.push(shallow);
file_meta
}
#[test]
fn compute_object_usage_primary_disk() {
let file_meta = build_file_meta_with_object(0, 1024);
let record = compute_object_usage("bucket", "foo/bar", &file_meta)
.expect("compute usage")
.expect("record should exist");
assert!(record.usage.has_live_object);
assert_eq!(record.usage.bucket, "bucket");
assert_eq!(record.usage.object, "foo/bar");
assert_eq!(record.usage.total_size, 1024);
assert!(record.object_info.is_some(), "object info should be synthesized");
}
#[test]
fn compute_object_usage_handles_non_primary_disk() {
let file_meta = build_file_meta_with_object(1, 2048);
let record = compute_object_usage("bucket", "obj", &file_meta)
.expect("compute usage")
.expect("record should exist for non-primary shard");
assert!(record.usage.has_live_object);
}
#[test]
fn compute_object_usage_reports_delete_marker() {
let file_meta = build_file_meta_with_delete_marker();
let record = compute_object_usage("bucket", "obj", &file_meta)
.expect("compute usage")
.expect("delete marker record");
assert!(!record.usage.has_live_object);
assert_eq!(record.usage.delete_markers_count, 1);
assert_eq!(record.usage.versions_count, 1);
}
#[test]
fn build_snapshot_accumulates_usage() {
let mut objects = HashMap::new();
objects.insert(
"bucket/a".to_string(),
LocalObjectUsage {
bucket: "bucket".to_string(),
object: "a".to_string(),
last_modified_ns: None,
versions_count: 2,
delete_markers_count: 1,
total_size: 512,
has_live_object: true,
},
);
let snapshot = build_snapshot(LocalUsageSnapshotMeta::default(), &objects, SystemTime::now());
let usage = snapshot.buckets_usage.get("bucket").expect("bucket entry should exist");
assert_eq!(usage.objects_count, 1);
assert_eq!(usage.versions_count, 2);
assert_eq!(usage.delete_markers_count, 1);
assert_eq!(usage.size, 512);
}
#[test]
fn scan_disk_blocking_handles_incremental_updates() {
let temp_dir = TempDir::new().expect("create temp dir");
let root = temp_dir.path();
let bucket_dir = root.join("bench");
let object1_dir = bucket_dir.join("obj1");
fs::create_dir_all(&object1_dir).expect("create first object directory");
let file_meta = build_file_meta_with_object(0, 1024);
let bytes = file_meta.marshal_msg().expect("serialize first object");
fs::write(object1_dir.join("xl.meta"), bytes).expect("write first xl.meta");
let meta = LocalUsageSnapshotMeta {
disk_id: "disk-test".to_string(),
..Default::default()
};
let DiskScanResult {
snapshot: snapshot1,
state,
..
} = scan_disk_blocking(root.to_path_buf(), meta.clone(), IncrementalScanState::default()).expect("initial scan succeeds");
let usage1 = snapshot1.buckets_usage.get("bench").expect("bucket stats recorded");
assert_eq!(usage1.objects_count, 1);
assert_eq!(usage1.size, 1024);
assert_eq!(state.objects.len(), 1);
let object2_dir = bucket_dir.join("nested").join("obj2");
fs::create_dir_all(&object2_dir).expect("create second object directory");
let second_meta = build_file_meta_with_object(0, 2048);
let bytes = second_meta.marshal_msg().expect("serialize second object");
fs::write(object2_dir.join("xl.meta"), bytes).expect("write second xl.meta");
let DiskScanResult {
snapshot: snapshot2,
state: state_next,
..
} = scan_disk_blocking(root.to_path_buf(), meta.clone(), state).expect("incremental scan succeeds");
let usage2 = snapshot2
.buckets_usage
.get("bench")
.expect("bucket stats recorded after addition");
assert_eq!(usage2.objects_count, 2);
assert_eq!(usage2.size, 1024 + 2048);
assert_eq!(state_next.objects.len(), 2);
fs::remove_dir_all(&object1_dir).expect("remove first object");
let DiskScanResult {
snapshot: snapshot3,
state: state_final,
..
} = scan_disk_blocking(root.to_path_buf(), meta, state_next).expect("scan after deletion succeeds");
let usage3 = snapshot3
.buckets_usage
.get("bench")
.expect("bucket stats recorded after deletion");
assert_eq!(usage3.objects_count, 1);
assert_eq!(usage3.size, 2048);
assert_eq!(state_final.objects.len(), 1);
assert!(
state_final.objects.keys().all(|path| path.contains("nested")),
"state should only keep surviving object"
);
}
#[test]
fn scan_disk_blocking_recovers_from_stale_state_entries() {
let temp_dir = TempDir::new().expect("create temp dir");
let root = temp_dir.path();
let mut stale_state = IncrementalScanState::default();
stale_state.objects.insert(
"bench/stale".to_string(),
LocalObjectUsage {
bucket: "bench".to_string(),
object: "stale".to_string(),
last_modified_ns: Some(42),
versions_count: 1,
delete_markers_count: 0,
total_size: 512,
has_live_object: true,
},
);
stale_state.last_scan_ns = Some(99);
let meta = LocalUsageSnapshotMeta {
disk_id: "disk-test".to_string(),
..Default::default()
};
let DiskScanResult {
snapshot, state, status, ..
} = scan_disk_blocking(root.to_path_buf(), meta, stale_state).expect("scan succeeds");
assert!(state.objects.is_empty(), "stale entries should be cleared when files disappear");
assert!(
snapshot.buckets_usage.is_empty(),
"no real xl.meta files means bucket usage should stay empty"
);
assert!(status.snapshot_exists, "snapshot status should indicate a refresh");
}
#[test]
fn scan_disk_blocking_handles_large_volume() {
const OBJECTS: usize = 256;
let temp_dir = TempDir::new().expect("create temp dir");
let root = temp_dir.path();
let bucket_dir = root.join("bulk");
for idx in 0..OBJECTS {
let object_dir = bucket_dir.join(format!("obj-{idx:03}"));
fs::create_dir_all(&object_dir).expect("create object directory");
let size = 1024 + idx as i64;
let file_meta = build_file_meta_with_object(0, size);
let bytes = file_meta.marshal_msg().expect("serialize file meta");
fs::write(object_dir.join("xl.meta"), bytes).expect("write xl.meta");
}
let meta = LocalUsageSnapshotMeta {
disk_id: "disk-test".to_string(),
..Default::default()
};
let DiskScanResult { snapshot, state, .. } =
scan_disk_blocking(root.to_path_buf(), meta, IncrementalScanState::default()).expect("bulk scan succeeds");
let bucket_usage = snapshot
.buckets_usage
.get("bulk")
.expect("bucket usage present for bulk scan");
assert_eq!(bucket_usage.objects_count as usize, OBJECTS, "should count all objects once");
assert!(
bucket_usage.size >= (1024 * OBJECTS) as u64,
"aggregated size should grow with object count"
);
assert_eq!(state.objects.len(), OBJECTS, "incremental state tracks every object");
}
}

View File

@@ -0,0 +1,430 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use crate::scanner::node_scanner::{BucketStats, DiskStats, LocalScanStats};
use crate::{Error, Result};
use rustfs_common::data_usage::DataUsageInfo;
use serde::{Deserialize, Serialize};
use std::{
path::{Path, PathBuf},
sync::Arc,
sync::atomic::{AtomicU64, Ordering},
time::{Duration, SystemTime},
};
use tokio::sync::RwLock;
use tracing::{debug, error, info, warn};
/// local stats manager
pub struct LocalStatsManager {
/// node id
node_id: String,
/// stats file path
stats_file: PathBuf,
/// backup file path
backup_file: PathBuf,
/// temp file path
temp_file: PathBuf,
/// local stats data
stats: Arc<RwLock<LocalScanStats>>,
/// save interval
save_interval: Duration,
/// last save time
last_save: Arc<RwLock<SystemTime>>,
/// stats counters
counters: Arc<StatsCounters>,
}
/// stats counters
pub struct StatsCounters {
/// total scanned objects
pub total_objects_scanned: AtomicU64,
/// total healthy objects
pub total_healthy_objects: AtomicU64,
/// total corrupted objects
pub total_corrupted_objects: AtomicU64,
/// total scanned bytes
pub total_bytes_scanned: AtomicU64,
/// total scan errors
pub total_scan_errors: AtomicU64,
/// total heal triggered
pub total_heal_triggered: AtomicU64,
}
impl Default for StatsCounters {
fn default() -> Self {
Self {
total_objects_scanned: AtomicU64::new(0),
total_healthy_objects: AtomicU64::new(0),
total_corrupted_objects: AtomicU64::new(0),
total_bytes_scanned: AtomicU64::new(0),
total_scan_errors: AtomicU64::new(0),
total_heal_triggered: AtomicU64::new(0),
}
}
}
/// scan result entry
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct ScanResultEntry {
/// object path
pub object_path: String,
/// bucket name
pub bucket_name: String,
/// object size
pub object_size: u64,
/// is healthy
pub is_healthy: bool,
/// error message (if any)
pub error_message: Option<String>,
/// scan time
pub scan_time: SystemTime,
/// disk id
pub disk_id: String,
}
/// batch scan result
#[derive(Debug, Clone)]
pub struct BatchScanResult {
/// disk id
pub disk_id: String,
/// scan result entries
pub entries: Vec<ScanResultEntry>,
/// scan start time
pub scan_start: SystemTime,
/// scan end time
pub scan_end: SystemTime,
/// scan duration
pub scan_duration: Duration,
}
impl LocalStatsManager {
/// create new local stats manager
pub fn new(node_id: &str, data_dir: &Path) -> Self {
// ensure data directory exists
if !data_dir.exists() {
if let Err(e) = std::fs::create_dir_all(data_dir) {
error!("create stats data directory failed {:?}: {}", data_dir, e);
}
}
let stats_file = data_dir.join(format!("scanner_stats_{node_id}.json"));
let backup_file = data_dir.join(format!("scanner_stats_{node_id}.backup"));
let temp_file = data_dir.join(format!("scanner_stats_{node_id}.tmp"));
Self {
node_id: node_id.to_string(),
stats_file,
backup_file,
temp_file,
stats: Arc::new(RwLock::new(LocalScanStats::default())),
save_interval: Duration::from_secs(60), // 60 seconds save once
last_save: Arc::new(RwLock::new(SystemTime::UNIX_EPOCH)),
counters: Arc::new(StatsCounters::default()),
}
}
/// load local stats data
pub async fn load_stats(&self) -> Result<()> {
if !self.stats_file.exists() {
info!("stats data file not exists, will create new stats data");
return Ok(());
}
match self.load_stats_from_file(&self.stats_file).await {
Ok(stats) => {
*self.stats.write().await = stats;
info!("success load local stats data");
Ok(())
}
Err(e) => {
warn!("load main stats file failed: {}, try backup file", e);
match self.load_stats_from_file(&self.backup_file).await {
Ok(stats) => {
*self.stats.write().await = stats;
warn!("restore stats data from backup file");
Ok(())
}
Err(backup_e) => {
warn!("backup file also cannot load: {}, will use default stats data", backup_e);
Ok(())
}
}
}
}
}
/// load stats data from file
async fn load_stats_from_file(&self, file_path: &Path) -> Result<LocalScanStats> {
let content = tokio::fs::read_to_string(file_path)
.await
.map_err(|e| Error::IO(format!("read stats file failed: {e}")))?;
let stats: LocalScanStats =
serde_json::from_str(&content).map_err(|e| Error::Serialization(format!("deserialize stats data failed: {e}")))?;
Ok(stats)
}
/// save stats data to disk
pub async fn save_stats(&self) -> Result<()> {
let now = SystemTime::now();
let last_save = *self.last_save.read().await;
// frequency control
if now.duration_since(last_save).unwrap_or(Duration::ZERO) < self.save_interval {
return Ok(());
}
let stats = self.stats.read().await.clone();
// serialize
let json_data = serde_json::to_string_pretty(&stats)
.map_err(|e| Error::Serialization(format!("serialize stats data failed: {e}")))?;
// atomic write
tokio::fs::write(&self.temp_file, json_data)
.await
.map_err(|e| Error::IO(format!("write temp stats file failed: {e}")))?;
// backup existing file
if self.stats_file.exists() {
tokio::fs::copy(&self.stats_file, &self.backup_file)
.await
.map_err(|e| Error::IO(format!("backup stats file failed: {e}")))?;
}
// atomic replace
tokio::fs::rename(&self.temp_file, &self.stats_file)
.await
.map_err(|e| Error::IO(format!("replace stats file failed: {e}")))?;
*self.last_save.write().await = now;
debug!("save local stats data to {:?}", self.stats_file);
Ok(())
}
/// force save stats data
pub async fn force_save_stats(&self) -> Result<()> {
*self.last_save.write().await = SystemTime::UNIX_EPOCH;
self.save_stats().await
}
/// update disk scan result
pub async fn update_disk_scan_result(&self, result: &BatchScanResult) -> Result<()> {
let mut stats = self.stats.write().await;
// update disk stats
let disk_stat = stats.disks_stats.entry(result.disk_id.clone()).or_insert_with(|| DiskStats {
disk_id: result.disk_id.clone(),
..Default::default()
});
let healthy_count = result.entries.iter().filter(|e| e.is_healthy).count() as u64;
let error_count = result.entries.iter().filter(|e| !e.is_healthy).count() as u64;
disk_stat.objects_scanned += result.entries.len() as u64;
disk_stat.errors_count += error_count;
disk_stat.last_scan_time = result.scan_end;
disk_stat.scan_duration = result.scan_duration;
disk_stat.scan_completed = true;
// update overall stats
stats.objects_scanned += result.entries.len() as u64;
stats.healthy_objects += healthy_count;
stats.corrupted_objects += error_count;
stats.last_update = SystemTime::now();
// update bucket stats
for entry in &result.entries {
let _bucket_stat = stats
.buckets_stats
.entry(entry.bucket_name.clone())
.or_insert_with(BucketStats::default);
// TODO: update BucketStats
}
// update atomic counters
self.counters
.total_objects_scanned
.fetch_add(result.entries.len() as u64, Ordering::Relaxed);
self.counters
.total_healthy_objects
.fetch_add(healthy_count, Ordering::Relaxed);
self.counters
.total_corrupted_objects
.fetch_add(error_count, Ordering::Relaxed);
let total_bytes: u64 = result.entries.iter().map(|e| e.object_size).sum();
self.counters.total_bytes_scanned.fetch_add(total_bytes, Ordering::Relaxed);
if error_count > 0 {
self.counters.total_scan_errors.fetch_add(error_count, Ordering::Relaxed);
}
drop(stats);
debug!(
"update disk {} scan result: objects {}, healthy {}, error {}",
result.disk_id,
result.entries.len(),
healthy_count,
error_count
);
Ok(())
}
/// record single object scan result
pub async fn record_object_scan(&self, entry: ScanResultEntry) -> Result<()> {
let result = BatchScanResult {
disk_id: entry.disk_id.clone(),
entries: vec![entry],
scan_start: SystemTime::now(),
scan_end: SystemTime::now(),
scan_duration: Duration::from_millis(0),
};
self.update_disk_scan_result(&result).await
}
/// get local stats data copy
pub async fn get_stats(&self) -> LocalScanStats {
self.stats.read().await.clone()
}
/// get real-time counters
pub fn get_counters(&self) -> Arc<StatsCounters> {
self.counters.clone()
}
/// reset stats data
pub async fn reset_stats(&self) -> Result<()> {
{
let mut stats = self.stats.write().await;
*stats = LocalScanStats::default();
}
// reset counters
self.counters.total_objects_scanned.store(0, Ordering::Relaxed);
self.counters.total_healthy_objects.store(0, Ordering::Relaxed);
self.counters.total_corrupted_objects.store(0, Ordering::Relaxed);
self.counters.total_bytes_scanned.store(0, Ordering::Relaxed);
self.counters.total_scan_errors.store(0, Ordering::Relaxed);
self.counters.total_heal_triggered.store(0, Ordering::Relaxed);
info!("reset local stats data");
Ok(())
}
/// get stats summary
pub async fn get_stats_summary(&self) -> StatsSummary {
let stats = self.stats.read().await;
StatsSummary {
node_id: self.node_id.clone(),
total_objects_scanned: self.counters.total_objects_scanned.load(Ordering::Relaxed),
total_healthy_objects: self.counters.total_healthy_objects.load(Ordering::Relaxed),
total_corrupted_objects: self.counters.total_corrupted_objects.load(Ordering::Relaxed),
total_bytes_scanned: self.counters.total_bytes_scanned.load(Ordering::Relaxed),
total_scan_errors: self.counters.total_scan_errors.load(Ordering::Relaxed),
total_heal_triggered: self.counters.total_heal_triggered.load(Ordering::Relaxed),
total_disks: stats.disks_stats.len(),
total_buckets: stats.buckets_stats.len(),
last_update: stats.last_update,
scan_progress: stats.scan_progress.clone(),
data_usage: stats.data_usage.clone(),
}
}
/// record heal triggered
pub async fn record_heal_triggered(&self, object_path: &str, error_message: &str) {
self.counters.total_heal_triggered.fetch_add(1, Ordering::Relaxed);
info!("record heal triggered: object={}, error={}", object_path, error_message);
}
/// update data usage stats
pub async fn update_data_usage(&self, data_usage: DataUsageInfo) {
let mut stats = self.stats.write().await;
stats.data_usage = data_usage;
stats.last_update = SystemTime::now();
debug!("update data usage stats");
}
/// cleanup stats files
pub async fn cleanup_stats_files(&self) -> Result<()> {
// delete main file
if self.stats_file.exists() {
tokio::fs::remove_file(&self.stats_file)
.await
.map_err(|e| Error::IO(format!("delete stats file failed: {e}")))?;
}
// delete backup file
if self.backup_file.exists() {
tokio::fs::remove_file(&self.backup_file)
.await
.map_err(|e| Error::IO(format!("delete backup stats file failed: {e}")))?;
}
// delete temp file
if self.temp_file.exists() {
tokio::fs::remove_file(&self.temp_file)
.await
.map_err(|e| Error::IO(format!("delete temp stats file failed: {e}")))?;
}
info!("cleanup all stats files");
Ok(())
}
/// set save interval
pub fn set_save_interval(&mut self, interval: Duration) {
self.save_interval = interval;
info!("set stats data save interval to {:?}", interval);
}
}
/// stats summary
#[derive(Debug, Clone, Serialize, Deserialize)]
pub struct StatsSummary {
/// node id
pub node_id: String,
/// total scanned objects
pub total_objects_scanned: u64,
/// total healthy objects
pub total_healthy_objects: u64,
/// total corrupted objects
pub total_corrupted_objects: u64,
/// total scanned bytes
pub total_bytes_scanned: u64,
/// total scan errors
pub total_scan_errors: u64,
/// total heal triggered
pub total_heal_triggered: u64,
/// total disks
pub total_disks: usize,
/// total buckets
pub total_buckets: usize,
/// last update time
pub last_update: SystemTime,
/// scan progress
pub scan_progress: super::node_scanner::ScanProgress,
/// data usage snapshot for the node
pub data_usage: DataUsageInfo,
}

Some files were not shown because too many files have changed in this diff Show More