Compare commits

..

82 Commits

Author SHA1 Message Date
guojidan
fba201df3d fix: harden data usage aggregation and cache handling (#1102)
Signed-off-by: junxiang Mu <1948535941@qq.com>
Co-authored-by: loverustfs <hello@rustfs.com>
2025-12-11 09:55:25 +08:00
yxrxy
ccbab3232b fix: ListObjectsV2 correctly handles repeated folder names in prefixes (#1104)
Co-authored-by: loverustfs <hello@rustfs.com>
2025-12-11 09:38:52 +08:00
loverustfs
421f66ea18 Disable codeql 2025-12-11 09:29:46 +08:00
yxrxy
ede2fa9d0b fix: is-admin api (For STS/temporary credentials, we need to check the… (#1101)
Co-authored-by: loverustfs <hello@rustfs.com>
2025-12-11 08:55:41 +08:00
tennisleng
978845b555 fix(lifecycle): Fix ObjectInfo fields and mod_time error handling (#1088)
Co-authored-by: loverustfs <hello@rustfs.com>
2025-12-11 07:17:35 +08:00
Jacob
53c126d678 fix: decode percent-encoded paths in get_file_path() (#1072)
Co-authored-by: houseme <housemecn@gmail.com>
Co-authored-by: loverustfs <hello@rustfs.com>
2025-12-10 22:30:02 +08:00
0xdx2
9f12a7678c feat(ci): add codeql to scanner code (#1076) 2025-12-10 21:48:18 +08:00
Jörg Thalheim
2c86fe30ec Content encoding (#1089)
Signed-off-by: Jörg Thalheim <joerg@thalheim.io>
Co-authored-by: loverustfs <hello@rustfs.com>
2025-12-10 15:21:51 +08:00
tennisleng
ac0c34e734 fix(lifecycle): Return NoSuchLifecycleConfiguration error for missing lifecycle config (#1087)
Co-authored-by: loverustfs <hello@rustfs.com>
2025-12-10 12:35:22 +08:00
majinghe
ae46ea4bd3 fix github action security found by github CodeQL (#1091) 2025-12-10 12:07:28 +08:00
majinghe
8b3d4ea59b enhancement logs output for container deployment (#1090) 2025-12-10 11:14:05 +08:00
houseme
ef261deef6 improve code for is admin (#1082) 2025-12-09 17:34:47 +08:00
Copilot
20961d7c91 Add comprehensive special character handling with validation refactoring and extensive test coverage (#1078)
Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>
Co-authored-by: houseme <housemecn@gmail.com>
2025-12-09 13:40:29 +08:00
shiro.lee
8de8172833 fix: the If-None-Match error handling in the complete_multipart_uploa… (#1065)
Co-authored-by: houseme <housemecn@gmail.com>
Co-authored-by: loverustfs <hello@rustfs.com>
2025-12-08 23:10:20 +08:00
orbisai0security
7c98c62d60 [Security] Fix HIGH vulnerability: yaml.docker-compose.security.writable-filesystem-service.writable-filesystem-service (#1005)
Co-authored-by: orbisai0security <orbisai0security@users.noreply.github.com>
Co-authored-by: houseme <housemecn@gmail.com>
2025-12-08 22:05:10 +08:00
Ali Mehraji
15c75b9d36 simple deployment via docker-compose (#1043)
Signed-off-by: Ali Mehraji <a.mehraji75@gmail.com>
Co-authored-by: houseme <housemecn@gmail.com>
2025-12-08 21:25:11 +08:00
yxrxy
af650716da feat: add is-admin user api (#1063) 2025-12-08 21:15:04 +08:00
shiro.lee
552e95e368 fix: the If-None-Match error handling in the put_object method when t… (#1034)
Co-authored-by: 0xdx2 <xuedamon2@gmail.com>
Co-authored-by: loverustfs <hello@rustfs.com>
2025-12-08 15:36:31 +08:00
dependabot[bot]
619cc69512 build(deps): bump the dependencies group with 3 updates (#1052)
Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: houseme <housemecn@gmail.com>
2025-12-08 14:31:53 +08:00
Jitter
76d25d9a20 Fix/issue #1001 dead node detection (#1054)
Co-authored-by: weisd <im@weisd.in>
Co-authored-by: Jitterx69 <mohit@example.com>
2025-12-08 12:29:46 +08:00
yihong
834025d9e3 docs: fix some dead link (#1053)
Signed-off-by: yihong0618 <zouzou0208@gmail.com>
2025-12-08 11:23:24 +08:00
houseme
e2d8e9e3d3 Feature/improve profiling (#1038)
Co-authored-by: Jitter <jitterx69@gmail.com>
Co-authored-by: weisd <im@weisd.in>
2025-12-07 22:39:47 +08:00
Jitter
cd6a26bc3a fix(net): resolve 1GB upload hang and macos build (Issue #1001 regression) (#1035) 2025-12-07 18:05:51 +08:00
tennisleng
5f256249f4 fix: correct ARN parsing for notification targets (#1010)
Co-authored-by: Andrew Leng <work@Andrews-MacBook-Air.local>
Co-authored-by: houseme <housemecn@gmail.com>
2025-12-06 23:12:58 +08:00
Jitter
b10d80cbb6 fix: detect dead nodes via HTTP/2 keepalives (Issue #1001) (#1025)
Co-authored-by: weisd <im@weisd.in>
2025-12-06 21:45:42 +08:00
0xdx2
7c6cbaf837 feat: enhance error handling and add precondition checks for object o… (#1008) 2025-12-06 20:39:03 +08:00
Hunter Wu
72930b1e30 security: Fix timing attack vulnerability in credential comparison (#1014)
Co-authored-by: Copilot AI <copilot@github.com>
2025-12-06 15:13:27 +08:00
LemonDouble
6ca8945ca7 feat(helm): split storageSize into data and log storage parameters (#1018) 2025-12-06 14:01:49 +08:00
majinghe
0d0edc22be update helm package ci file and helm values file (#1004) 2025-12-05 22:13:00 +08:00
weisd
030d3c9426 fix filemeta nil versionid (#1002) 2025-12-05 20:30:08 +08:00
majinghe
b8b905be86 add helm package ci file (#994) 2025-12-05 15:09:53 +08:00
Damien Degois
ace58fea0d feat(helm): add existingSecret handling and support for extra manifests (#992) 2025-12-05 14:14:59 +08:00
唐小鸭
3a79242133 feat: The observability module can be set separately. (#993) 2025-12-05 13:46:06 +08:00
Andrew Steurer
63d846ed14 Fix link to CONTRIBUTING.md in README (#991) 2025-12-05 09:23:26 +08:00
shiro.lee
3a79fcfe73 fix: add the is_truncated field to the return of the list_object_vers… (#985) 2025-12-04 22:26:31 +08:00
weisd
b3c80ae362 fix: listdir rpc (#979)
Co-authored-by: houseme <housemecn@gmail.com>
Co-authored-by: loverustfs <hello@rustfs.com>
2025-12-04 16:12:10 +08:00
Hey Martin
3fd003b21d Delete duplicate titles in the README (#977) 2025-12-04 15:55:26 +08:00
houseme
1d3f622922 console: add version_handler and improve comments (#975) 2025-12-04 13:41:06 +08:00
loverustfs
e31b4303ed fix link error 2025-12-04 08:26:41 +08:00
houseme
5b0a3a0764 upgrade crate version and improve heal config (#963) 2025-12-03 18:49:11 +08:00
weisd
a8b7b28fd0 Fix Admin Heal API and Add Pagination Support for Large Buckets (#933)
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: loverustfs <hello@rustfs.com>
Co-authored-by: houseme <housemecn@gmail.com>
2025-12-03 18:10:46 +08:00
loverustfs
e355d3db80 Modify readme 2025-12-03 17:18:53 +08:00
weisd
4d7bf98c82 add logs (#962) 2025-12-03 13:17:47 +08:00
shiro.lee
699164e05e fix: add the is_truncated field to the return of the list_objects int… (#958) 2025-12-03 03:14:17 +08:00
dependabot[bot]
d35ceac441 build(deps): bump criterion in the dependencies group (#947)
Bumps the dependencies group with 1 update: [criterion](https://github.com/criterion-rs/criterion.rs).


Updates `criterion` from 0.7.0 to 0.8.0
- [Release notes](https://github.com/criterion-rs/criterion.rs/releases)
- [Changelog](https://github.com/criterion-rs/criterion.rs/blob/master/CHANGELOG.md)
- [Commits](https://github.com/criterion-rs/criterion.rs/compare/criterion-plot-v0.7.0...criterion-v0.8.0)

---
updated-dependencies:
- dependency-name: criterion
  dependency-version: 0.8.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: dependencies
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-12-02 00:16:28 +08:00
houseme
93982227ac Improve health check handlers for endpoint and console (GET/HEAD, safer error handling) (#942)
* Improve health check handlers for endpoint and console

- Add unified GET/HEAD handling for `/health` and `/rustfs/console/health`
- Implement proper method filtering and 405 with `Allow: GET, HEAD`
- Avoid panics by removing `unwrap()` in health check logic
- Add safe fallbacks for JSON serialization and uptime calculation
- Ensure HEAD requests return only status and headers (empty body)
- Keep response format backward compatible for monitoring systems

* fix
2025-11-30 02:43:59 +08:00
Copilot
fdcdb30d28 Optimize concurrent GetObject performance with Moka cache, comprehensive metrics, complete test suite, cache writeback, and comprehensive documentation (#916)
* Initial plan

* feat: add concurrency-aware buffer sizing and hot object caching for GetObject

- Implement adaptive buffer sizing based on concurrent request load
- Add per-request tracking with automatic cleanup using RAII guards
- Implement hot object cache (LRU) for frequently accessed small files (<= 10MB)
- Add disk I/O semaphore to prevent saturation under extreme load
- Integrate concurrency module into GetObject implementation
- Buffer sizes now adapt: low concurrency uses large buffers for throughput,
  high concurrency uses smaller buffers for fairness and memory efficiency
- Add comprehensive metrics collection for monitoring performance

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* docs: add comprehensive documentation and tests for concurrent GetObject optimization

- Add detailed technical documentation explaining the solution
- Document root cause analysis and solution architecture
- Include performance expectations and testing recommendations
- Add integration tests for concurrency tracking and buffer sizing
- Add cache behavior tests
- Include benchmark tests for concurrent request handling

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* fix: address code review issues in concurrency module

- Fix race condition in cache size tracking by using consistent atomic operations within lock
- Correct buffer sizing logic: 1-2 requests use 100%, 3-4 use 75%, 5-8 use 50%, >8 use 40%
- Improve error message for semaphore acquire failure
- Document limitation of streaming cache implementation (not yet implemented)
- Add TODO for proper streaming cache with suggested approaches
- Update tests to match corrected buffer sizing thresholds

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* docs: add comprehensive implementation summary for concurrent GetObject optimization

- Executive summary of problem and solution
- Detailed architecture documentation for each optimization
- Integration points and code examples
- Complete testing strategy and performance validation steps
- Monitoring and observability guidelines with Prometheus queries
- Deployment considerations and rollout strategy
- Future enhancement roadmap
- Success criteria and quantitative metrics

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* fix

* fix

* refactor: optimize cache with lru 0.16.2 read-first pattern and add advanced features

- Implement optimized read-first cache access using peek() to reduce write lock contention
- Add batch cache operations: get_cached_batch() for efficient multi-object retrieval
- Add cache utility methods: is_cached(), remove_cached(), get_hot_keys()
- Implement warm_cache() for pre-populating cache on startup
- Add get_advanced_buffer_size() with file size and sequential read optimization
- Enhance test suite with 8 new comprehensive tests covering:
  - Batch operations and cache warming
  - Hot keys tracking and analysis
  - Cache removal and LRU behavior verification
  - Concurrent cache access performance
  - Advanced buffer sizing strategies
- Improve documentation and code comments in English throughout

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* docs: add final comprehensive optimization summary

- Complete technical documentation of all optimizations
- Detailed performance analysis and metrics
- Production deployment guide with examples
- Comprehensive API reference and usage patterns
- Migration guide and future enhancement roadmap
- All documentation in professional English

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* fix

* fix

* add moka crate for cache

* feat: replace LRU with Moka cache and add comprehensive metrics

- Replace lru crate with moka 0.12.11 for superior concurrent performance
- Implement lock-free cache with automatic TTL/TTI expiration
- Add size-based eviction using Moka's weigher function
- Integrate comprehensive metrics collection throughout GetObject flow:
  * Cache hit/miss tracking with per-key access counts
  * Request concurrency gauges
  * Disk permit wait time histograms
  * Total request duration tracking
  * Response size and buffer size histograms
- Deep integration with ecfs.rs GetObject operation
- Add hit rate calculation method
- Enhanced CacheStats with hit/miss counters
- Lock-free concurrent reads for better scalability

Moka advantages over LRU:
- True lock-free concurrent access
- Built-in TTL and TTI support
- Automatic size-based eviction
- Better performance under high concurrency
- Native async support

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* docs: add comprehensive Moka cache migration and metrics documentation

- Complete technical documentation of LRU to Moka migration
- Detailed performance comparison and benchmarks
- Comprehensive metrics catalog with 15+ Prometheus metrics
- Prometheus query examples for monitoring
- Dashboard and alerting recommendations
- Migration guide with code examples
- Troubleshooting guide for common issues
- Future enhancement roadmap

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* fix

* fix

* refactor: update tests for Moka cache implementation

- Completely refactor test suite to align with Moka-based concurrency.rs
- Add Clone derive to ConcurrencyManager for test convenience
- Update all tests to handle Moka's async behavior with proper delays
- Add new tests:
  * test_cache_hit_rate - validate hit rate calculation
  * test_ttl_expiration - verify TTL configuration
  * test_is_cached_no_side_effects - ensure contains doesn't affect LRU
  * bench_concurrent_cache_performance - benchmark concurrent access
- Updated existing tests:
  * test_moka_cache_operations - renamed and updated for Moka API
  * test_moka_cache_eviction - validate automatic eviction
  * test_hot_keys_tracking - improved assertions for sorted results
  * test_concurrent_cache_access - validate lock-free performance
- All tests now include appropriate sleep delays for Moka's async processing
- Enhanced documentation and assertions for better test clarity
- Total: 18 comprehensive integration tests

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* docs: add comprehensive Moka test suite documentation

- Complete test suite documentation for all 18 tests
- Detailed test patterns and best practices for Moka
- Debugging guide for common test failures
- Performance expectations and CI integration
- 100% API coverage validation
- Future enhancement roadmap

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* fix

* fmt

* docs: enhance code comments and test documentation throughout

- Significantly improve module-level documentation in concurrency.rs
- Add detailed explanations of performance characteristics and expected improvements
- Document the solution to issue #911 with before/after metrics
- Enhance constant documentation with purpose and impact explanations
- Add comprehensive RAII guard documentation with thread safety notes
- Improve test documentation with detailed test strategy and rationale
- Add Moka-specific test patterns and timing guidance
- Include performance expectations for each test category
- Document why each test matters for solving the original issue
- All documentation written in professional English
- Follow Rust documentation best practices with examples

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* remove lru crate

* upgrade version

* fix: resolve test errors by correcting module structure and test assertions

- Fix test import paths to use crate:: instead of rustfs:: (binary-only crate)
- Keep test file in src/storage/ instead of tests/ (no lib.rs exists)
- Add #[cfg(test)] guard to mod declaration in storage/mod.rs
- Fix Arc type annotations for Moka's ConcurrencyManager in concurrent tests
- Correct test_buffer_size_bounds assertions to match actual implementation:
  * Minimum buffer is 32KB for files <100KB, 64KB otherwise
  * Maximum buffer respects base_buffer_size when concurrency is low
  * Buffer sizing doesn't cap at file size, only at min/max constraints
- All 17 integration tests now pass successfully

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* fix: modify `TimeoutLayer::new` to `TimeoutLayer::with_status_code` and improve docker health check

* fix

* feat: implement cache writeback for small objects in GetObject

- Add cache writeback logic for objects meeting caching criteria:
  * No range/part request (full object retrieval)
  * Object size known and <= 10MB (max_object_size threshold)
  * Not encrypted (SSE-C or managed encryption)
- Read eligible objects into memory and cache via background task
- Serve response from in-memory data for immediate client response
- Add metrics counter for cache writeback operations
- Add 3 new tests for cache writeback functionality:
  * test_cache_writeback_flow - validates round-trip caching
  * test_cache_writeback_size_limit - ensures large objects aren't cached
  * test_cache_writeback_concurrent - validates thread-safe concurrent writes
- Update test suite documentation (now 20 comprehensive tests)

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* improve code for const

* cargo clippy

* feat: add cache enable/disable configuration via environment variable

- Add is_cache_enabled() method to ConcurrencyManager
- Read RUSTFS_OBJECT_CACHE_ENABLE env var (default: false) at startup
- Update ecfs.rs to check is_cache_enabled() before cache lookup and writeback
- Cache lookup and writeback now respect the enable flag
- Add test_cache_enable_configuration test
- Constants already exist in rustfs_config:
  * ENV_OBJECT_CACHE_ENABLE = "RUSTFS_OBJECT_CACHE_ENABLE"
  * DEFAULT_OBJECT_CACHE_ENABLE = false
- Total: 21 comprehensive tests passing

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* fix

* fmt

* fix

* fix

* feat: implement comprehensive CachedGetObject response cache with metadata

- Add CachedGetObject struct with full response metadata fields:
  * body, content_length, content_type, e_tag, last_modified
  * expires, cache_control, content_disposition, content_encoding
  * storage_class, version_id, delete_marker, tag_count, etc.
- Add dual cache architecture in HotObjectCache:
  * Legacy simple byte cache for backward compatibility
  * New response cache for complete GetObject responses
- Add ConcurrencyManager methods for response caching:
  * get_cached_object() - retrieve cached response with metadata
  * put_cached_object() - store complete response
  * invalidate_cache() - invalidate on write operations
  * invalidate_cache_versioned() - invalidate both version and latest
  * make_cache_key() - generate cache keys with version support
  * max_object_size() - get cache threshold
- Add builder pattern for CachedGetObject construction
- Add 6 new tests for response cache functionality (27 total):
  * test_cached_get_object_basic - basic operations
  * test_cached_get_object_versioned - version key handling
  * test_cache_invalidation - write operation invalidation
  * test_cache_invalidation_versioned - versioned invalidation
  * test_cached_get_object_size_limit - size enforcement
  * test_max_object_size - threshold accessor

All 27 tests pass successfully.

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* feat: integrate CachedGetObject cache in ecfs.rs with full metadata and cache invalidation

Integration of CachedGetObject response cache in ecfs.rs:
1. get_object: Cache lookup uses get_cached_object() with full metadata
   - Returns complete response with e_tag, last_modified, content_type, etc.
   - Parses last_modified from RFC3339 string
   - Supports versioned cache keys via make_cache_key()

2. get_object: Cache writeback uses put_cached_object() with metadata
   - Stores content_type, e_tag, last_modified in CachedGetObject
   - Background writeback via tokio::spawn()

3. Cache invalidation added to write operations:
   - put_object: invalidate_cache_versioned() after store.put_object()
   - put_object_extract: invalidate_cache_versioned() after each file extraction
   - copy_object: invalidate_cache_versioned() after store.copy_object()
   - delete_object: invalidate_cache_versioned() after store.delete_object()
   - delete_objects: invalidate_cache_versioned() for each deleted object
   - complete_multipart_upload: invalidate_cache_versioned() after completion

4. Fixed test_adaptive_buffer_sizing to be more robust with parallel tests

All 27 tests pass.

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* fix: add error logging for time parsing in cache lookup and writeback

- Add warning log when RFC3339 parsing fails in cache lookup
- Add warning log when time formatting fails in cache writeback
- Improves debugging for cache-related issues

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* fix

* fix

* upgrade version

* fmt

* add http: 4138 and add max_object_size key

* fix

* fix

* fix

* modify metrics key

* add

* upgrade crates version and improve docker observability

* feat: implement adaptive I/O strategy based on disk permit wait time

Implements adaptive I/O strategy for GetObject operations based on disk permit
wait times, as requested in issue #911.

Key changes:

1. New types in concurrency.rs:
   - IoLoadLevel enum: Low (<10ms), Medium (10-50ms), High (50-200ms), Critical (>200ms)
   - IoStrategy struct: contains optimized I/O parameters based on load level
   - IoLoadMetrics: rolling window metrics for load tracking

2. New ConcurrencyManager methods:
   - calculate_io_strategy(): calculates adaptive strategy from permit wait duration
   - record_permit_wait(): records observation for load tracking
   - smoothed_load_level(): returns averaged load level for stability
   - io_load_stats(): returns (avg_wait, p95_wait, max_wait, count) for monitoring
   - adaptive_buffer_size(): convenience method for buffer sizing

3. Integration in ecfs.rs get_object:
   - Calculate IoStrategy after acquiring disk permit
   - Use strategy buffer_size instead of static sizing
   - Consider strategy.cache_writeback_enabled in cache decision
   - Record new metrics: io.load.level gauge, io.buffer.multiplier gauge,
     io.strategy.selected counter by load level

4. New tests (4 added, 31 total):
   - test_io_load_level_classification: validates load level thresholds
   - test_io_strategy_buffer_sizing: validates buffer multipliers
   - test_calculate_io_strategy: validates manager strategy calculation
   - test_io_load_stats: validates stats tracking

All 31 tests pass. Clippy clean. Formatted.

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* fix

* fix

* docs: add comprehensive architecture design and fix cache hit event notification

1. Added CONCURRENCY_ARCHITECTURE.md with complete design documentation:
   - Executive summary and problem statement
   - Architecture overview with request flow diagram
   - Detailed module analysis for concurrency.rs and ecfs.rs
   - Critical analysis of helper.complete() for cache hits
   - Adaptive I/O strategy design with algorithm
   - Cache architecture with CachedGetObject structure
   - Metrics and monitoring with Prometheus queries
   - Performance characteristics and future enhancements

2. Fixed critical issue: Cache hit path now calls helper.complete()
   - S3 bucket notifications (s3:GetObject events) now trigger for cache hits
   - Event-driven workflows (Lambda, SNS) work correctly for all object access
   - Maintains audit trail for both cache hits and misses

All 31 tests pass.

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* fix: set object info and version_id on helper before complete() for cache hits

When serving from cache, properly configure the OperationHelper before
calling complete() to ensure S3 bucket notifications include complete
object metadata:

1. Build ObjectInfo from cached metadata:
   - bucket, name, size, actual_size
   - etag, mod_time, version_id, delete_marker
   - storage_class, content_type, content_encoding
   - user_metadata (user_defined)

2. Set helper.object(event_info).version_id(version_id_str) before complete()

3. Updated CONCURRENCY_ARCHITECTURE.md with:
   - Complete code example for cache hit event notification
   - Explanation of why ObjectInfo is required
   - Documentation of version_id handling

This ensures:
- Lambda triggers receive proper object metadata for cache hits
- SNS/SQS notifications include complete information
- Audit logs contain accurate object details
- Version-specific event routing works correctly

All 31 tests pass.

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* fix

* improve code

* fmt

---------

Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>
Co-authored-by: houseme <housemecn@gmail.com>
2025-11-30 01:16:55 +08:00
Serhiy Novoseletskiy
a6cf0740cb Updated RUSTFS_VOLUMES (#922)
1. Removed .rustfs.svc.cluster.local as all pods for statefulset are running in the same namespace
2. used "rustfs.fullname" as it's used in statefulset services and statefull set names

Co-authored-by: houseme <housemecn@gmail.com>
2025-11-29 23:50:18 +08:00
loverustfs
a2e3a719d3 Improve reading experience 2025-11-28 16:03:41 +08:00
loverustfs
76efee37fa fix error 2025-11-28 15:23:26 +08:00
loverustfs
fd7c0964a0 Modify Readme 2025-11-28 15:16:59 +08:00
唐小鸭
701960dd81 fix out of range for slice (#931) 2025-11-27 15:57:38 +08:00
Shyim
ee04cc77a0 remove debug (#912)
* remove debug

* Refactor get_global_encryption_service function

* Refactor get_global_encryption_service function

---------

Co-authored-by: loverustfs <hello@rustfs.com>
Co-authored-by: houseme <housemecn@gmail.com>
2025-11-26 11:56:01 +08:00
houseme
069194f553 Fix/getobjectlength (#920)
* fix getobject content length resp

* Fix regression in exception handling for non-existent key with enhanced compression predicate and metadata improvements (#915)

* Initial plan

* Fix GetObject regression by excluding error responses from compression

The issue was that CompressionLayer was attempting to compress error responses,
which could cause Content-Length header mismatches. By excluding 4xx and 5xx
responses from compression, we ensure error responses (like NoSuchKey) are sent
correctly without body truncation.

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* Enhance NoSuchKey fix with improved compression predicate and comprehensive tests

- Enhanced ShouldCompress predicate with size-based exclusion (< 256 bytes)
- Added detailed documentation explaining the compression logic
- Added debug logging for better observability
- Created comprehensive test suite with 4 test cases:
  - test_get_deleted_object_returns_nosuchkey
  - test_head_deleted_object_returns_nosuchkey
  - test_get_nonexistent_object_returns_nosuchkey
  - test_multiple_gets_deleted_object
- Added extensive inline documentation and comments
- Created docs/fix-nosuchkey-regression.md with full analysis

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* Add compression best practices documentation

Added comprehensive guide covering:
- Best practices for HTTP response compression
- Common pitfalls and solutions
- Performance considerations and trade-offs
- Testing guidelines and examples
- Monitoring and alerting recommendations
- Migration guide for existing services

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* fix

* fmt

* fmt

* Fix/objectdelete (#917)

* fix getobject content length resp

* fix delete object

---------

Co-authored-by: houseme <housemecn@gmail.com>

* Add comprehensive analysis of NoSuchKey fix and related improvements

Created detailed documentation analyzing:
- HTTP compression layer fix (primary issue)
- Content-length calculation fix from PR #917
- Delete object metadata fixes from PR #917
- How all components work together
- Complete scenario walkthrough
- Performance impact analysis
- Testing strategy and deployment checklist

This ties together all the changes in the PR branch including the merged
improvements from PR #917.

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* replace `once_cell` to `std`

* fmt

---------

Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>
Co-authored-by: houseme <housemecn@gmail.com>
Co-authored-by: weisd <im@weisd.in>

* fmt

---------

Co-authored-by: weisd <weishidavip@163.com>
Co-authored-by: Copilot <198982749+Copilot@users.noreply.github.com>
Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>
Co-authored-by: weisd <im@weisd.in>
2025-11-24 18:56:34 +08:00
weisd
fce4e64da4 Fix/objectdelete (#917)
* fix getobject content length resp

* fix delete object

---------

Co-authored-by: houseme <housemecn@gmail.com>
2025-11-24 16:35:51 +08:00
houseme
44bdebe6e9 build(deps): bump the dependencies group with 10 updates (#914)
* build(deps): bump the dependencies group with 10 updates

* build(deps): bump the dependencies group with 8 updates (#913)

Bumps the dependencies group with 8 updates:

| Package | From | To |
| --- | --- | --- |
| [bytesize](https://github.com/bytesize-rs/bytesize) | `2.2.0` | `2.3.0` |
| [aws-config](https://github.com/smithy-lang/smithy-rs) | `1.8.10` | `1.8.11` |
| [aws-credential-types](https://github.com/smithy-lang/smithy-rs) | `1.2.9` | `1.2.10` |
| [aws-sdk-s3](https://github.com/awslabs/aws-sdk-rust) | `1.113.0` | `1.115.0` |
| [convert_case](https://github.com/rutrum/convert-case) | `0.9.0` | `0.10.0` |
| [hashbrown](https://github.com/rust-lang/hashbrown) | `0.16.0` | `0.16.1` |
| [rumqttc](https://github.com/bytebeamio/rumqtt) | `0.25.0` | `0.25.1` |
| [starshard](https://github.com/houseme/starshard) | `0.5.0` | `0.6.0` |


Updates `bytesize` from 2.2.0 to 2.3.0
- [Release notes](https://github.com/bytesize-rs/bytesize/releases)
- [Changelog](https://github.com/bytesize-rs/bytesize/blob/master/CHANGELOG.md)
- [Commits](https://github.com/bytesize-rs/bytesize/compare/bytesize-v2.2.0...bytesize-v2.3.0)

Updates `aws-config` from 1.8.10 to 1.8.11
- [Release notes](https://github.com/smithy-lang/smithy-rs/releases)
- [Changelog](https://github.com/smithy-lang/smithy-rs/blob/main/CHANGELOG.md)
- [Commits](https://github.com/smithy-lang/smithy-rs/commits)

Updates `aws-credential-types` from 1.2.9 to 1.2.10
- [Release notes](https://github.com/smithy-lang/smithy-rs/releases)
- [Changelog](https://github.com/smithy-lang/smithy-rs/blob/main/CHANGELOG.md)
- [Commits](https://github.com/smithy-lang/smithy-rs/commits)

Updates `aws-sdk-s3` from 1.113.0 to 1.115.0
- [Release notes](https://github.com/awslabs/aws-sdk-rust/releases)
- [Commits](https://github.com/awslabs/aws-sdk-rust/commits)

Updates `convert_case` from 0.9.0 to 0.10.0
- [Commits](https://github.com/rutrum/convert-case/commits)

Updates `hashbrown` from 0.16.0 to 0.16.1
- [Release notes](https://github.com/rust-lang/hashbrown/releases)
- [Changelog](https://github.com/rust-lang/hashbrown/blob/master/CHANGELOG.md)
- [Commits](https://github.com/rust-lang/hashbrown/compare/v0.16.0...v0.16.1)

Updates `rumqttc` from 0.25.0 to 0.25.1
- [Release notes](https://github.com/bytebeamio/rumqtt/releases)
- [Changelog](https://github.com/bytebeamio/rumqtt/blob/main/CHANGELOG.md)
- [Commits](https://github.com/bytebeamio/rumqtt/compare/rumqttc-0.25.0...rumqttc-0.25.1)

Updates `starshard` from 0.5.0 to 0.6.0
- [Commits](https://github.com/houseme/starshard/compare/0.5.0...0.6.0)

---
updated-dependencies:
- dependency-name: bytesize
  dependency-version: 2.3.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: dependencies
- dependency-name: aws-config
  dependency-version: 1.8.11
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dependencies
- dependency-name: aws-credential-types
  dependency-version: 1.2.10
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dependencies
- dependency-name: aws-sdk-s3
  dependency-version: 1.115.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: dependencies
- dependency-name: convert_case
  dependency-version: 0.10.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: dependencies
- dependency-name: hashbrown
  dependency-version: 0.16.1
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dependencies
- dependency-name: rumqttc
  dependency-version: 0.25.1
  dependency-type: direct:production
  update-type: version-update:semver-patch
  dependency-group: dependencies
- dependency-name: starshard
  dependency-version: 0.6.0
  dependency-type: direct:production
  update-type: version-update:semver-minor
  dependency-group: dependencies
...

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
Co-authored-by: houseme <housemecn@gmail.com>

---------

Signed-off-by: dependabot[bot] <support@github.com>
Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com>
2025-11-24 11:03:35 +08:00
majinghe
2b268fdd7f update tls configuration in helm chart (#900)
* update tls configuration in helm chart

* typo fix
2025-11-20 22:20:11 +08:00
houseme
18cd9a8b46 build(deps): bump the dependencies group with 5 updates (#896) 2025-11-20 13:04:24 +08:00
loverustfs
e14809ee04 Revise data sovereignty and compliance details in README
Updated the comparison between RustFS and other object storage solutions to clarify data sovereignty and compliance aspects.
2025-11-20 09:11:15 +08:00
loverustfs
390d051ddd Update README.md
Correcting inaccurate expressions
2025-11-20 08:55:14 +08:00
weisd
b73ca0220e add ifmatch support (#885)
Co-authored-by: loverustfs <hello@rustfs.com>
2025-11-20 08:40:47 +08:00
loverustfs
b4071d493c Modify README 2025-11-20 08:24:07 +08:00
Copilot
277d80de13 Fix: Implement priority-based heal queue with comprehensive diagnostic logging (#884)
* Initial plan

* Implement priority-based heal queue with deduplication

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* Apply cargo fmt formatting fixes

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* Add queue monitoring, better error handling, and adaptive processing

- Add priority-based queue statistics tracking
- Implement queue capacity warnings (>80% full)
- Process multiple tasks per cycle when capacity allows
- Add proper error logging for failed heal request submissions
- Add Hash trait to HealPriority for HashMap support
- Improve observability with detailed queue status logs

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* Add comprehensive diagnostic logging to heal flow

- Add detailed logging for shard availability checks during reads
- Log successful heal request submissions
- Add detailed logging in heal_object for disk checking
- Log each disk's heal decision with reason and endpoint
- Add logging for heal writing and renaming phases
- Improve variable naming (nil_count -> available_shards)
- Add success confirmation messages for renamed healed data

These logs will help diagnose where the heal flow is failing.

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* fix

* feat(heal): enhance robustness, safety, and observability of heal process

- **Logging & Observability**:
  - Upgrade critical heal logs from [info](/crates/e2e_test/src/reliant/node_interact_test.rs:196:0-213:1) to `warn` for better visibility.
  - Implement structured logging with `tracing` fields for machine readability.
  - Add `#[tracing::instrument]` to [HealTask](c/crates/ahm/src/heal/task.rs:182:0-205:1) and [SetDisks](/crates/ecstore/src/set_disk.rs:120:0-131:1) methods for automatic context propagation.

- **Robustness**:
  - Add exponential backoff retry (3 attempts) for acquiring write locks in [heal_object](/crates/ahm/src/heal/storage.rs:438:4-460:5) to handle contention.
  - Handle [rename_data](/crates/ecstore/src/set_disk.rs:392:4-516:5) failures gracefully by preserving temporary files instead of forcing deletion, preventing potential data loss.

- **Data Safety**:
  - Fix [object_exists](/crates/ahm/src/heal/storage.rs:395:4-412:5) to propagate IO errors instead of treating them as "object not found".
  - Update [ErasureSetHealer](/crates/ahm/src/heal/erasure_healer.rs:28:0-33:1) to mark objects as failed rather than skipped when existence checks error, ensuring they are tracked for retry.

* fix

* fmt

* improve code for heal_object

* fix

* fix

* fix

---------

Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>
Co-authored-by: houseme <housemecn@gmail.com>
2025-11-20 00:36:25 +08:00
shiro.lee
9b9bbb662b fix: removing the Limit on the Number of Object Versions (#819) (#892)
removing the Limit on the Number of Object Versions (#819)
2025-11-19 22:34:26 +08:00
majinghe
44f3f3d070 add standalone mode support (#881)
* add standalone mode support

* update readme file

* change non-root from 1000 to 10001

* delete self sign crt content

* modify security content

* fix synatx error for readme file.

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* update image repository and tag info.

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* fix helm chart syntax issue.

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* fix helm chart syntax issue.

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

---------

Co-authored-by: houseme <housemecn@gmail.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
Co-authored-by: loverustfs <hello@rustfs.com>
2025-11-19 13:56:21 +08:00
weisd
a13ce08590 fix listbucket sign (#890) 2025-11-19 11:08:16 +08:00
weisd
55d44622ed list object include deleted support (#882)
Co-authored-by: houseme <housemecn@gmail.com>
2025-11-18 21:51:10 +08:00
Copilot
6da5766ea2 Implement adaptive buffer sizing with workload profiles - Production-Ready Implementation (#878)
* Initial plan

* Add workload profiles module with adaptive buffer sizing

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* Add comprehensive adaptive buffer sizing documentation

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* Add implementation summary documentation

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* Implement Phase 2: Opt-In Usage for buffer profiles

- Add RUSTFS_BUFFER_PROFILE_ENABLE and RUSTFS_BUFFER_PROFILE config options
- Implement global buffer configuration with enable/disable flag
- Add WorkloadProfile::from_name() for parsing profile names
- Update put_object, put_object_extract, and upload_part to use opt-in function
- Add init_buffer_profile_system() in main.rs for initialization
- Add comprehensive tests for opt-in functionality
- Update documentation with Phase 2 usage examples

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* Implement Phase 3: Default Enablement for buffer profiles

- Change default to enabled with GeneralPurpose profile
- Replace --buffer-profile-enable with --buffer-profile-disable for opt-out
- Update init_buffer_profile_system() to reflect Phase 3 logic
- Add comprehensive Phase 3 documentation
- Create MIGRATION_PHASE3.md guide for smooth transition
- Add test for Phase 3 default behavior
- Update IMPLEMENTATION_SUMMARY.md to mark Phase 3 as complete

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* Implement Phase 4: Full Integration with deprecated legacy and metrics

- Deprecate get_adaptive_buffer_size() function (backward compatible)
- Implement profile-only buffer sizing in get_buffer_size_opt_in()
- Add performance metrics collection (optional, via metrics feature)
- Even disabled mode now uses GeneralPurpose profile (no hardcoded values)
- Add comprehensive Phase 4 documentation and guide
- Add test_phase4_full_integration() test
- Update IMPLEMENTATION_SUMMARY.md to mark Phase 4 as complete

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* Finalize adaptive buffer sizing: remove deprecated code and improve documentation

English:
- Remove deprecated get_adaptive_buffer_size() function
- Remove deprecated test cases (test_adaptive_buffer_size, Phase 3 legacy comparisons)
- Improve all code comments to be clear and professional English
- Add comprehensive bilingual implementation summary (COMPLETE_SUMMARY.md)
- Update buffer sizing comments in put_object, put_object_extract, upload_part
- Enhance init_buffer_profile_system documentation
- Clean up phase-specific references in comments

Chinese (中文):
- 移除已弃用的 get_adaptive_buffer_size() 函数
- 移除已弃用的测试用例(test_adaptive_buffer_size、Phase 3 旧版比较)
- 改进所有代码注释,使用清晰专业的英文
- 添加全面的双语实现摘要(COMPLETE_SUMMARY.md)
- 更新 put_object、put_object_extract、upload_part 中的缓冲区调整注释
- 增强 init_buffer_profile_system 文档
- 清理注释中的特定阶段引用

This commit completes the adaptive buffer sizing implementation by:
1. Removing all deprecated legacy code and tests
2. Improving code documentation quality
3. Providing comprehensive bilingual summary

本提交完成自适应缓冲区大小实现:
1. 移除所有已弃用的旧代码和测试
2. 提高代码文档质量
3. 提供全面的双语摘要

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* fmt

* fix

* fix

* fix

* fix

---------

Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>
Co-authored-by: houseme <housemecn@gmail.com>
2025-11-18 13:32:02 +08:00
weisd
85bc0ce2d5 fix: filemeta version handling and delete operations (#879)
* fix filemeta version

* fix clippy

* fix delete version

* fix clippy/test
2025-11-18 09:24:22 +08:00
Copilot
601f3456bc Fix large file upload freeze with adaptive buffer sizing (#869)
* Initial plan

* Fix large file upload freeze by increasing StreamReader buffer size

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* Add comprehensive documentation for large file upload freeze fix

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* upgrade s3s version

* Fix compilation error: use BufReader instead of non-existent StreamReader::with_capacity

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* Update documentation with correct BufReader implementation

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* add tokio feature `io-util`

* Implement adaptive buffer sizing based on file size

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* Constants are managed uniformly and fmt code

* fix

* Fix: Trigger self-heal on read when shards missing from rejoined nodes (#871)

* Initial plan

* Fix: Trigger self-heal when missing shards detected during read

- Added proactive heal detection in get_object_with_fileinfo
- When reading an object, now checks if any shards are missing even if read succeeds
- Sends low-priority heal request to reconstruct missing shards on rejoined nodes
- This fixes the issue where data written during node outage is not healed when node rejoins

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* fix

* Unify CRC implementations to crc-fast (#873)

* Initial plan

* Replace CRC libraries with unified crc-fast implementation

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* fix

* fix: replace low to Normal

---------

Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>
Co-authored-by: houseme <housemecn@gmail.com>

---------

Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: houseme <housemecn@gmail.com>
Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

---------

Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>
Co-authored-by: houseme <housemecn@gmail.com>
2025-11-17 23:15:20 +08:00
weisd
1279baa72b fix replication (#875) 2025-11-17 17:37:41 +08:00
weisd
acdefb6703 fix read lock (#866) 2025-11-16 11:44:13 +08:00
Copilot
b7964081ce Fix KMS configuration synchronization across cluster nodes (#855)
* Initial plan

* Add KMS configuration persistence to cluster storage

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* Apply code formatting to KMS configuration changes

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* add comment

* fix fmt

* fix

* Fix overlapping dependabot cargo configurations

Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>

* improve code for comment and replace  `Once_Cell` to `std::sync::OnceLock`

---------

Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>
Co-authored-by: houseme <housemecn@gmail.com>
Co-authored-by: loverustfs <155562731+loverustfs@users.noreply.github.com>
2025-11-16 00:05:03 +08:00
Nugine
f73fa59bf6 ci: fix dependabot (#860) 2025-11-15 22:35:59 +08:00
Nugine
0b1b7832fe ci: update s3s weekly (#858) 2025-11-15 22:05:03 +08:00
houseme
c242957c6f build(deps): bump the dependencies group with 8 updates (#857) 2025-11-15 19:51:07 +08:00
houseme
55e3a1f7e0 fix(audit): prevent state transition when no targets exist (#854)
Avoid setting AuditSystemState::Starting when target list is empty.
Now checks target availability before state transition, keeping the
system in Stopped state if no enabled targets are found.

- Check targets.is_empty() before setting Starting state
- Return early with Ok(()) when no targets exist
- Maintain consistent state machine behavior
- Prevent transient "Starting" state with no actual targets

Resolves issue where audit system would incorrectly enter Starting
state even when configuration contained no enabled targets.
2025-11-14 20:23:21 +08:00
majinghe
3cf565e847 delete sink file path env and update readme file with container user change (#852)
* update container user change in readme file

* delete sink file path env vars

---------

Co-authored-by: houseme <housemecn@gmail.com>
2025-11-14 13:00:15 +08:00
houseme
9d553620cf remove linux dep and upgrade Protocol Buffers and FlatBuffers (#853) 2025-11-14 12:50:55 +08:00
houseme
51584986e1 feat(obs): unify metrics initialization and fix exporter move error (#851)
* feat(obs): unify metrics initialization and fix exporter move error

- Fix Rust E0382 (use after move) by removing duplicate MetricExporter consumption.
- Consolidate MeterProvider construction into single Recorder builder path.
- Remove redundant Recorder::builder(...).install_global() call.
- Ensure PeriodicReader setup is performed only once (HTTP + optional stdout).
- Set global meter provider and metrics recorder exactly once.
- Preserve existing behavior for stdout/file vs HTTP modes.
- Minor cleanup: consistent resource reuse and interval handling.

* update telemetry.rs

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* fix

* fix

* fix

* fix: modify logger level from error to event

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-11-14 00:50:07 +08:00
majinghe
93090adf7c enhance security context part for k8s deployment (#850) 2025-11-13 18:18:19 +08:00
houseme
d4817a4bea fix: modify logger from warn to info (#842)
* fix: modify logger from warn to info

* upgrade version
2025-11-12 11:13:38 +08:00
230 changed files with 21579 additions and 3241 deletions

View File

@@ -34,61 +34,111 @@ services:
ports:
- "3200:3200" # tempo
- "24317:4317" # otlp grpc
- "24318:4318" # otlp http
restart: unless-stopped
networks:
- otel-network
healthcheck:
test: [ "CMD", "wget", "--spider", "-q", "http://localhost:3200/metrics" ]
interval: 10s
timeout: 5s
retries: 3
start_period: 15s
otel-collector:
image: otel/opentelemetry-collector-contrib:latest
environment:
- TZ=Asia/Shanghai
volumes:
- ./otel-collector-config.yaml:/etc/otelcol-contrib/config.yaml
- ./otel-collector-config.yaml:/etc/otelcol-contrib/config.yaml:ro
ports:
- "1888:1888"
- "8888:8888"
- "8889:8889"
- "13133:13133"
- "4317:4317"
- "4318:4318"
- "55679:55679"
- "1888:1888" # pprof
- "8888:8888" # Prometheus metrics for Collector
- "8889:8889" # Prometheus metrics for application indicators
- "13133:13133" # health check
- "4317:4317" # OTLP gRPC
- "4318:4318" # OTLP HTTP
- "55679:55679" # zpages
networks:
- otel-network
depends_on:
jaeger:
condition: service_started
tempo:
condition: service_started
prometheus:
condition: service_started
loki:
condition: service_started
healthcheck:
test: [ "CMD", "wget", "--spider", "-q", "http://localhost:13133" ]
interval: 10s
timeout: 5s
retries: 3
jaeger:
image: jaegertracing/jaeger:latest
environment:
- TZ=Asia/Shanghai
- SPAN_STORAGE_TYPE=memory
- COLLECTOR_OTLP_ENABLED=true
ports:
- "16686:16686"
- "14317:4317"
- "14318:4318"
- "16686:16686" # Web UI
- "14317:4317" # OTLP gRPC
- "14318:4318" # OTLP HTTP
- "18888:8888" # collector
networks:
- otel-network
healthcheck:
test: [ "CMD", "wget", "--spider", "-q", "http://localhost:16686" ]
interval: 10s
timeout: 5s
retries: 3
prometheus:
image: prom/prometheus:latest
environment:
- TZ=Asia/Shanghai
volumes:
- ./prometheus.yml:/etc/prometheus/prometheus.yml
- ./prometheus.yml:/etc/prometheus/prometheus.yml:ro
- ./prometheus-data:/prometheus
ports:
- "9090:9090"
command:
- '--config.file=/etc/prometheus/prometheus.yml'
- '--web.enable-otlp-receiver' # Enable OTLP
- '--web.enable-remote-write-receiver' # Enable remote write
- '--enable-feature=promql-experimental-functions' # Enable info()
- '--storage.tsdb.min-block-duration=15m' # Minimum block duration
- '--storage.tsdb.max-block-duration=1h' # Maximum block duration
- '--log.level=info'
- '--storage.tsdb.retention.time=30d'
- '--storage.tsdb.path=/prometheus'
- '--web.console.libraries=/usr/share/prometheus/console_libraries'
- '--web.console.templates=/usr/share/prometheus/consoles'
restart: unless-stopped
networks:
- otel-network
healthcheck:
test: [ "CMD", "wget", "--spider", "-q", "http://localhost:9090/-/healthy" ]
interval: 10s
timeout: 5s
retries: 3
loki:
image: grafana/loki:latest
environment:
- TZ=Asia/Shanghai
volumes:
- ./loki-config.yaml:/etc/loki/local-config.yaml
- ./loki-config.yaml:/etc/loki/local-config.yaml:ro
ports:
- "3100:3100"
command: -config.file=/etc/loki/local-config.yaml
networks:
- otel-network
healthcheck:
test: [ "CMD", "wget", "--spider", "-q", "http://localhost:3100/ready" ]
interval: 10s
timeout: 5s
retries: 3
grafana:
image: grafana/grafana:latest
ports:
@@ -97,14 +147,32 @@ services:
- ./grafana-datasources.yaml:/etc/grafana/provisioning/datasources/datasources.yaml
environment:
- GF_SECURITY_ADMIN_PASSWORD=admin
- GF_SECURITY_ADMIN_USER=admin
- TZ=Asia/Shanghai
- GF_INSTALL_PLUGINS=grafana-pyroscope-datasource
restart: unless-stopped
networks:
- otel-network
depends_on:
- prometheus
- tempo
- loki
healthcheck:
test: [ "CMD", "wget", "--spider", "-q", "http://localhost:3000/api/health" ]
interval: 10s
timeout: 5s
retries: 3
volumes:
prometheus-data:
tempo-data:
networks:
otel-network:
driver: bridge
name: "network_otel_config"
ipam:
config:
- subnet: 172.28.0.0/16
driver_opts:
com.docker.network.enable_ipv6: "true"

View File

@@ -42,7 +42,7 @@ datasources:
customQuery: true
query: 'method="$${__span.tags.method}"'
tracesToMetrics:
datasourceUid: 'prom'
datasourceUid: 'prometheus'
spanStartTimeShift: '-1h'
spanEndTimeShift: '1h'
tags: [ { key: 'service.name', value: 'service' }, { key: 'job' } ]
@@ -91,7 +91,7 @@ datasources:
customQuery: true
query: 'method="$${__span.tags.method}"'
tracesToMetrics:
datasourceUid: 'prom'
datasourceUid: 'Prometheus'
spanStartTimeShift: '1h'
spanEndTimeShift: '-1h'
tags: [ { key: 'service.name', value: 'service' }, { key: 'job' } ]

View File

@@ -65,6 +65,7 @@ extensions:
some_store:
memory:
max_traces: 1000000
max_events: 100000
another_store:
memory:
max_traces: 1000000
@@ -102,6 +103,7 @@ receivers:
processors:
batch:
metadata_keys: [ "span.kind", "http.method", "http.status_code", "db.system", "db.statement", "messaging.system", "messaging.destination", "messaging.operation","span.events","span.links" ]
# Adaptive Sampling Processor is required to support adaptive sampling.
# It expects remote_sampling extension with `adaptive:` config to be enabled.
adaptive_sampling:

View File

@@ -41,6 +41,9 @@ query_range:
limits_config:
metric_aggregation_enabled: true
max_line_size: 256KB
max_line_size_truncate: false
allow_structured_metadata: true
schema_config:
configs:
@@ -51,6 +54,7 @@ schema_config:
index:
prefix: index_
period: 24h
row_shards: 16
pattern_ingester:
enabled: true

View File

@@ -15,66 +15,108 @@
receivers:
otlp:
protocols:
grpc: # OTLP gRPC 接收器
grpc: # OTLP gRPC receiver
endpoint: 0.0.0.0:4317
http: # OTLP HTTP 接收器
http: # OTLP HTTP receiver
endpoint: 0.0.0.0:4318
processors:
batch: # 批处理处理器,提升吞吐量
batch: # Batch processor to improve throughput
timeout: 5s
send_batch_size: 1000
metadata_keys: [ ]
metadata_cardinality_limit: 1000
memory_limiter:
check_interval: 1s
limit_mib: 512
transform/logs:
log_statements:
- context: log
statements:
# Extract Body as attribute "message"
- set(attributes["message"], body.string)
# Retain the original Body
- set(attributes["log.body"], body.string)
exporters:
otlp/traces: # OTLP 导出器,用于跟踪数据
endpoint: "jaeger:4317" # Jaeger 的 OTLP gRPC 端点
otlp/traces: # OTLP exporter for trace data
endpoint: "http://jaeger:4317" # OTLP gRPC endpoint for Jaeger
tls:
insecure: true # 开发环境禁用 TLS生产环境需配置证书
otlp/tempo: # OTLP 导出器,用于跟踪数据
endpoint: "tempo:4317" # tempo 的 OTLP gRPC 端点
insecure: true # TLS is disabled in the development environment and a certificate needs to be configured in the production environment.
compression: gzip # Enable compression to reduce network bandwidth
retry_on_failure:
enabled: true # Enable retry on failure
initial_interval: 1s # Initial interval for retry
max_interval: 30s # Maximum interval for retry
max_elapsed_time: 300s # Maximum elapsed time for retry
sending_queue:
enabled: true # Enable sending queue
num_consumers: 10 # Number of consumers
queue_size: 5000 # Queue size
otlp/tempo: # OTLP exporter for trace data
endpoint: "http://tempo:4317" # OTLP gRPC endpoint for tempo
tls:
insecure: true # 开发环境禁用 TLS生产环境需配置证书
prometheus: # Prometheus 导出器,用于指标数据
endpoint: "0.0.0.0:8889" # Prometheus 刮取端点
namespace: "rustfs" # 指标前缀
send_timestamps: true # 发送时间戳
# enable_open_metrics: true
otlphttp/loki: # Loki 导出器,用于日志数据
endpoint: "http://loki:3100/otlp/v1/logs"
insecure: true # TLS is disabled in the development environment and a certificate needs to be configured in the production environment.
compression: gzip # Enable compression to reduce network bandwidth
retry_on_failure:
enabled: true # Enable retry on failure
initial_interval: 1s # Initial interval for retry
max_interval: 30s # Maximum interval for retry
max_elapsed_time: 300s # Maximum elapsed time for retry
sending_queue:
enabled: true # Enable sending queue
num_consumers: 10 # Number of consumers
queue_size: 5000 # Queue size
prometheus: # Prometheus exporter for metrics data
endpoint: "0.0.0.0:8889" # Prometheus scraping endpoint
namespace: "metrics" # indicator prefix
send_timestamps: true # Send timestamp
metric_expiration: 5m # Metric expiration time
resource_to_telemetry_conversion:
enabled: true # Enable resource to telemetry conversion
otlphttp/loki: # Loki exporter for log data
endpoint: "http://loki:3100/otlp"
tls:
insecure: true
compression: gzip # Enable compression to reduce network bandwidth
extensions:
health_check:
endpoint: 0.0.0.0:13133
pprof:
endpoint: 0.0.0.0:1888
zpages:
endpoint: 0.0.0.0:55679
service:
extensions: [ health_check, pprof, zpages ] # 启用扩展
extensions: [ health_check, pprof, zpages ] # Enable extension
pipelines:
traces:
receivers: [ otlp ]
processors: [ memory_limiter,batch ]
exporters: [ otlp/traces,otlp/tempo ]
processors: [ memory_limiter, batch ]
exporters: [ otlp/traces, otlp/tempo ]
metrics:
receivers: [ otlp ]
processors: [ batch ]
exporters: [ prometheus ]
logs:
receivers: [ otlp ]
processors: [ batch ]
processors: [ batch, transform/logs ]
exporters: [ otlphttp/loki ]
telemetry:
logs:
level: "info" # Collector 日志级别
level: "debug" # Collector log level
encoding: "json" # Log encoding: console or json
metrics:
level: "detailed" # 可以是 basic, normal, detailed
level: "detailed" # Can be basic, normal, detailed
readers:
- periodic:
exporter:
otlp:
protocol: http/protobuf
endpoint: http://otel-collector:4318
- pull:
exporter:
prometheus:
host: '0.0.0.0'
port: 8888

View File

@@ -0,0 +1 @@
*

View File

@@ -14,17 +14,27 @@
global:
scrape_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
evaluation_interval: 15s
external_labels:
cluster: 'rustfs-dev' # Label to identify the cluster
relica: '1' # Replica identifier
scrape_configs:
- job_name: 'otel-collector'
- job_name: 'otel-collector-internal'
static_configs:
- targets: [ 'otel-collector:8888' ] # Scrape metrics from Collector
- job_name: 'otel-metrics'
scrape_interval: 10s
- job_name: 'rustfs-app-metrics'
static_configs:
- targets: [ 'otel-collector:8889' ] # Application indicators
scrape_interval: 15s
metric_relabel_configs:
- job_name: 'tempo'
static_configs:
- targets: [ 'tempo:3200' ] # Scrape metrics from Tempo
- job_name: 'jaeger'
static_configs:
- targets: [ 'jaeger:8888' ] # Jaeger admin port
otlp:
# Recommended attributes to be promoted to labels.

View File

@@ -18,7 +18,9 @@ distributor:
otlp:
protocols:
grpc:
endpoint: "tempo:4317"
endpoint: "0.0.0.0:4317"
http:
endpoint: "0.0.0.0:4318"
ingester:
max_block_duration: 5m # cut the headblock when this much time passes. this is being set for demo purposes and should probably be left alone normally

View File

@@ -52,24 +52,19 @@ runs:
sudo apt-get install -y \
musl-tools \
build-essential \
lld \
libdbus-1-dev \
libwayland-dev \
libwebkit2gtk-4.1-dev \
libxdo-dev \
pkg-config \
libssl-dev
- name: Install protoc
uses: arduino/setup-protoc@v3
with:
version: "31.1"
version: "33.1"
repo-token: ${{ inputs.github-token }}
- name: Install flatc
uses: Nugine/setup-flatc@v1
with:
version: "25.2.10"
version: "25.9.23"
- name: Install Rust toolchain
uses: dtolnay/rust-toolchain@stable

View File

@@ -22,8 +22,18 @@ updates:
- package-ecosystem: "cargo" # See documentation for possible values
directory: "/" # Location of package manifests
schedule:
interval: "monthly"
interval: "weekly"
day: "monday"
timezone: "Asia/Shanghai"
time: "08:00"
groups:
s3s:
update-types:
- "minor"
- "patch"
patterns:
- "s3s"
- "s3s-*"
dependencies:
patterns:
- "*"

81
.github/workflows/helm-package.yml vendored Normal file
View File

@@ -0,0 +1,81 @@
name: Publish helm chart to artifacthub
on:
workflow_run:
workflows: ["Build and Release"]
types: [completed]
permissions:
contents: read
env:
new_version: ${{ github.event.workflow_run.head_branch }}
jobs:
build-helm-package:
runs-on: ubuntu-latest
# Only run on successful builds triggered by tag pushes (version format: x.y.z or x.y.z-suffix)
if: |
github.event.workflow_run.conclusion == 'success' &&
github.event.workflow_run.event == 'push' &&
contains(github.event.workflow_run.head_branch, '.')
steps:
- name: Checkout helm chart repo
uses: actions/checkout@v2
- name: Replace chart appversion
run: |
set -e
set -x
old_version=$(grep "^appVersion:" helm/rustfs/Chart.yaml | awk '{print $2}')
sed -i "s/$old_version/$new_version/g" helm/rustfs/Chart.yaml
sed -i "/^image:/,/^[^ ]/ s/tag:.*/tag: "$new_version"/" helm/rustfs/values.yaml
- name: Set up Helm
uses: azure/setup-helm@v4.3.0
- name: Package Helm Chart
run: |
cp helm/README.md helm/rustfs/
package_version=$(echo $new_version | awk -F '-' '{print $2}' | awk -F '.' '{print $NF}')
helm package ./helm/rustfs --destination helm/rustfs/ --version "0.0.$package_version"
- name: Upload helm package as artifact
uses: actions/upload-artifact@v4
with:
name: helm-package
path: helm/rustfs/*.tgz
retention-days: 1
publish-helm-package:
runs-on: ubuntu-latest
needs: [build-helm-package]
steps:
- name: Checkout helm package repo
uses: actions/checkout@v2
with:
repository: rustfs/helm
token: ${{ secrets.RUSTFS_HELM_PACKAGE }}
- name: Download helm package
uses: actions/download-artifact@v4
with:
name: helm-package
path: ./
- name: Set up helm
uses: azure/setup-helm@v4.3.0
- name: Generate index
run: helm repo index . --url https://charts.rustfs.com
- name: Push helm package and index file
run: |
git config --global user.name "${{ secrets.USERNAME }}"
git config --global user.email "${{ secrets.EMAIL_ADDRESS }}"
git status .
git add .
git commit -m "Update rustfs helm package with $new_version."
git push origin main

13
.vscode/launch.json vendored
View File

@@ -22,6 +22,7 @@
"env": {
"RUST_LOG": "rustfs=debug,ecstore=info,s3s=debug,iam=debug",
"RUSTFS_SKIP_BACKGROUND_TASK": "on",
//"RUSTFS_OBS_LOG_DIRECTORY": "./deploy/logs",
// "RUSTFS_POLICY_PLUGIN_URL":"http://localhost:8181/v1/data/rustfs/authz/allow",
// "RUSTFS_POLICY_PLUGIN_AUTH_TOKEN":"your-opa-token"
},
@@ -85,6 +86,18 @@
"cwd": "${workspaceFolder}",
//"stopAtEntry": false,
//"preLaunchTask": "cargo build",
"env": {
"RUSTFS_ACCESS_KEY": "rustfsadmin",
"RUSTFS_SECRET_KEY": "rustfsadmin",
"RUSTFS_VOLUMES": "./target/volume/test{1...4}",
"RUSTFS_ADDRESS": ":9000",
"RUSTFS_CONSOLE_ENABLE": "true",
// "RUSTFS_OBS_TRACE_ENDPOINT": "http://127.0.0.1:4318/v1/traces", // jeager otlp http endpoint
// "RUSTFS_OBS_METRIC_ENDPOINT": "http://127.0.0.1:4318/v1/metrics", // default otlp http endpoint
// "RUSTFS_OBS_LOG_ENDPOINT": "http://127.0.0.1:4318/v1/logs", // default otlp http endpoint
"RUSTFS_CONSOLE_ADDRESS": "127.0.0.1:9001",
"RUSTFS_OBS_LOG_DIRECTORY": "./target/logs",
},
"sourceLanguages": [
"rust"
],

1321
Cargo.lock generated

File diff suppressed because it is too large Load Diff

View File

@@ -97,18 +97,18 @@ async-channel = "2.5.0"
async-compression = { version = "0.4.19" }
async-recursion = "1.1.1"
async-trait = "0.1.89"
axum = "0.8.6"
axum-extra = "0.12.1"
axum-server = { version = "0.7.2", features = ["tls-rustls-no-provider"], default-features = false }
axum = "0.8.7"
axum-extra = "0.12.2"
axum-server = { version = "0.8.0", features = ["tls-rustls-no-provider"], default-features = false }
futures = "0.3.31"
futures-core = "0.3.31"
futures-util = "0.3.31"
hyper = { version = "1.7.0", features = ["http2", "http1", "server"] }
hyper = { version = "1.8.1", features = ["http2", "http1", "server"] }
hyper-rustls = { version = "0.27.7", default-features = false, features = ["native-tokio", "http1", "tls12", "logging", "http2", "ring", "webpki-roots"] }
hyper-util = { version = "0.1.17", features = ["tokio", "server-auto", "server-graceful"] }
http = "1.3.1"
hyper-util = { version = "0.1.19", features = ["tokio", "server-auto", "server-graceful"] }
http = "1.4.0"
http-body = "1.0.1"
reqwest = { version = "0.12.24", default-features = false, features = ["rustls-tls-webpki-roots", "charset", "http2", "system-proxy", "stream", "json", "blocking"] }
reqwest = { version = "0.12.25", default-features = false, features = ["rustls-tls-webpki-roots", "charset", "http2", "system-proxy", "stream", "json", "blocking"] }
socket2 = "0.6.1"
tokio = { version = "1.48.0", features = ["fs", "rt-multi-thread"] }
tokio-rustls = { version = "0.26.4", default-features = false, features = ["logging", "tls12", "ring"] }
@@ -119,17 +119,17 @@ tonic = { version = "0.14.2", features = ["gzip"] }
tonic-prost = { version = "0.14.2" }
tonic-prost-build = { version = "0.14.2" }
tower = { version = "0.5.2", features = ["timeout"] }
tower-http = { version = "0.6.6", features = ["cors"] }
tower-http = { version = "0.6.8", features = ["cors"] }
# Serialization and Data Formats
bytes = { version = "1.10.1", features = ["serde"] }
bytesize = "2.2.0"
bytes = { version = "1.11.0", features = ["serde"] }
bytesize = "2.3.1"
byteorder = "1.5.0"
flatbuffers = "25.9.23"
form_urlencoded = "1.2.2"
prost = "0.14.1"
quick-xml = "0.38.3"
rmcp = { version = "0.8.5" }
quick-xml = "0.38.4"
rmcp = { version = "0.10.0" }
rmp = { version = "0.8.14" }
rmp-serde = { version = "1.3.0" }
serde = { version = "1.0.228", features = ["derive"] }
@@ -139,22 +139,20 @@ schemars = "1.1.0"
# Cryptography and Security
aes-gcm = { version = "0.11.0-rc.2", features = ["rand_core"] }
argon2 = { version = "0.6.0-rc.2", features = ["std"] }
blake3 = { version = "1.8.2" }
argon2 = { version = "0.6.0-rc.3", features = ["std"] }
blake3 = { version = "1.8.2", features = ["rayon", "mmap"] }
chacha20poly1305 = { version = "0.11.0-rc.2" }
crc-fast = "1.3.0"
crc32c = "0.6.8"
crc32fast = "1.5.0"
crc64fast-nvme = "1.2.0"
crc-fast = "1.6.0"
hmac = { version = "0.13.0-rc.3" }
jsonwebtoken = { version = "10.2.0", features = ["rust_crypto"] }
pbkdf2 = "0.13.0-rc.2"
pbkdf2 = "0.13.0-rc.3"
rsa = { version = "0.10.0-rc.10" }
rustls = { version = "0.23.35", features = ["ring", "logging", "std", "tls12"], default-features = false }
rustls-pemfile = "2.2.0"
rustls-pki-types = "1.13.0"
rustls-pki-types = "1.13.1"
sha1 = "0.11.0-rc.3"
sha2 = "0.11.0-rc.3"
subtle = "2.6"
zeroize = { version = "1.8.2", features = ["derive"] }
# Time and Date
@@ -168,43 +166,41 @@ arc-swap = "1.7.1"
astral-tokio-tar = "0.5.6"
atoi = "2.0.0"
atomic_enum = "0.3.0"
aws-config = { version = "1.8.10" }
aws-credential-types = { version = "1.2.9" }
aws-sdk-s3 = { version = "1.112.0", default-features = false, features = ["sigv4a", "rustls", "rt-tokio"] }
aws-config = { version = "1.8.11" }
aws-credential-types = { version = "1.2.10" }
aws-sdk-s3 = { version = "1.116.0", default-features = false, features = ["sigv4a", "rustls", "rt-tokio"] }
aws-smithy-types = { version = "1.3.4" }
base64 = "0.22.1"
base64-simd = "0.8.0"
brotli = "8.0.2"
cfg-if = "1.0.4"
clap = { version = "4.5.51", features = ["derive", "env"] }
clap = { version = "4.5.53", features = ["derive", "env"] }
const-str = { version = "0.7.0", features = ["std", "proc"] }
convert_case = "0.9.0"
criterion = { version = "0.7", features = ["html_reports"] }
convert_case = "0.10.0"
criterion = { version = "0.8", features = ["html_reports"] }
crossbeam-queue = "0.3.12"
datafusion = "50.3.0"
datafusion = "51.0.0"
derive_builder = "0.20.2"
enumset = "1.1.10"
faster-hex = "0.10.0"
flate2 = "1.1.5"
flexi_logger = { version = "0.31.7", features = ["trc", "dont_minimize_extra_stacks", "compress", "kv", "json"] }
glob = "0.3.3"
google-cloud-storage = "1.2.0"
google-cloud-auth = "1.1.0"
hashbrown = { version = "0.16.0", features = ["serde", "rayon"] }
google-cloud-storage = "1.4.0"
google-cloud-auth = "1.2.0"
hashbrown = { version = "0.16.1", features = ["serde", "rayon"] }
heed = { version = "0.22.0" }
hex-simd = "0.8.0"
highway = { version = "1.3.0" }
ipnetwork = { version = "0.21.1", features = ["serde"] }
lazy_static = "1.5.0"
libc = "0.2.177"
libc = "0.2.178"
libsystemd = "0.7.2"
local-ip-address = "0.6.5"
local-ip-address = "0.6.6"
lz4 = "1.28.1"
matchit = "0.9.0"
md-5 = "0.11.0-rc.3"
md5 = "0.8.0"
metrics = "0.24.2"
metrics-exporter-opentelemetry = "0.1.2"
mime_guess = "2.0.5"
moka = { version = "0.12.11", features = ["future"] }
netif = "0.1.6"
@@ -213,7 +209,6 @@ nu-ansi-term = "0.50.3"
num_cpus = { version = "1.17.0" }
nvml-wrapper = "0.11.0"
object_store = "0.12.4"
once_cell = "1.21.3"
parking_lot = "0.12.5"
path-absolutize = "3.1.1"
path-clean = "1.0.1"
@@ -223,10 +218,10 @@ rand = { version = "0.10.0-rc.5", features = ["serde"] }
rayon = "1.11.0"
reed-solomon-simd = { version = "3.1.0" }
regex = { version = "1.12.2" }
rumqttc = { version = "0.25.0" }
rumqttc = { version = "0.25.1" }
rust-embed = { version = "8.9.0" }
rustc-hash = { version = "2.1.1" }
s3s = { git = "https://github.com/s3s-project/s3s.git", rev = "1ab064b", version = "0.12.0-rc.3", features = ["minio"] }
s3s = { version = "0.12.0-rc.4", features = ["minio"] }
serial_test = "3.2.0"
shadow-rs = { version = "1.4.0", default-features = false }
siphasher = "1.0.1"
@@ -234,7 +229,7 @@ smallvec = { version = "1.15.1", features = ["serde"] }
smartstring = "1.0.1"
snafu = "0.8.9"
snap = "1.1.1"
starshard = { version = "0.5.0", features = ["rayon", "async", "serde"] }
starshard = { version = "0.6.0", features = ["rayon", "async", "serde"] }
strum = { version = "0.27.2", features = ["derive"] }
sysctl = "0.7.1"
sysinfo = "0.37.2"
@@ -242,32 +237,34 @@ temp-env = "0.3.6"
tempfile = "3.23.0"
test-case = "3.3.1"
thiserror = "2.0.17"
tracing = { version = "0.1.41" }
tracing-appender = "0.2.3"
tracing = { version = "0.1.43" }
tracing-appender = "0.2.4"
tracing-error = "0.2.1"
tracing-opentelemetry = "0.32.0"
tracing-subscriber = { version = "0.3.20", features = ["env-filter", "time"] }
tracing-subscriber = { version = "0.3.22", features = ["env-filter", "time"] }
transform-stream = "0.3.1"
url = "2.5.7"
urlencoding = "2.1.3"
uuid = { version = "1.18.1", features = ["v4", "fast-rng", "macro-diagnostics"] }
uuid = { version = "1.19.0", features = ["v4", "fast-rng", "macro-diagnostics"] }
vaultrs = { version = "0.7.4" }
walkdir = "2.5.0"
wildmatch = { version = "2.6.0", features = ["serde"] }
wildmatch = { version = "2.6.1", features = ["serde"] }
winapi = { version = "0.3.9" }
xxhash-rust = { version = "0.8.15", features = ["xxh64", "xxh3"] }
zip = "6.0.0"
zstd = "0.13.3"
# Observability and Metrics
metrics = "0.24.3"
opentelemetry = { version = "0.31.0" }
opentelemetry-appender-tracing = { version = "0.31.1", features = ["experimental_use_tracing_span_context", "experimental_metadata_attributes", "spec_unstable_logs_enabled"] }
opentelemetry-otlp = { version = "0.31.0", features = ["http-proto", "zstd-http"] }
opentelemetry-otlp = { version = "0.31.0", features = ["gzip-http", "reqwest-rustls"] }
opentelemetry_sdk = { version = "0.31.0" }
opentelemetry-semantic-conventions = { version = "0.31.0", features = ["semconv_experimental"] }
opentelemetry-stdout = { version = "0.31.0" }
# Performance Analysis and Memory Profiling
mimalloc = "0.1"
# Use tikv-jemallocator as memory allocator and enable performance analysis
tikv-jemallocator = { version = "0.6", features = ["profiling", "stats", "unprefixed_malloc_on_supported_platforms", "background_threads"] }
# Used to control and obtain statistics for jemalloc at runtime
@@ -276,7 +273,7 @@ tikv-jemalloc-ctl = { version = "0.6", features = ["use_std", "stats", "profilin
jemalloc_pprof = { version = "0.8.1", features = ["symbolize", "flamegraph"] }
# Used to generate CPU performance analysis data and flame diagrams
pprof = { version = "0.15.0", features = ["flamegraph", "protobuf-codec"] }
mimalloc = "0.1"
[workspace.metadata.cargo-shear]

View File

@@ -66,8 +66,8 @@ COPY entrypoint.sh /entrypoint.sh
RUN chmod +x /usr/bin/rustfs /entrypoint.sh
RUN addgroup -g 1000 -S rustfs && \
adduser -u 1000 -G rustfs -S rustfs -D && \
RUN addgroup -g 10001 -S rustfs && \
adduser -u 10001 -G rustfs -S rustfs -D && \
mkdir -p /data /logs && \
chown -R rustfs:rustfs /data /logs && \
chmod 0750 /data /logs
@@ -81,13 +81,11 @@ ENV RUSTFS_ADDRESS=":9000" \
RUSTFS_CORS_ALLOWED_ORIGINS="*" \
RUSTFS_CONSOLE_CORS_ALLOWED_ORIGINS="*" \
RUSTFS_VOLUMES="/data" \
RUST_LOG="warn" \
RUSTFS_OBS_LOG_DIRECTORY="/logs" \
RUSTFS_SINKS_FILE_PATH="/logs"
RUST_LOG="warn"
EXPOSE 9000 9001
VOLUME ["/data", "/logs"]
VOLUME ["/data"]
USER rustfs

View File

@@ -166,15 +166,13 @@ ENV RUSTFS_ADDRESS=":9000" \
RUSTFS_CONSOLE_ENABLE="true" \
RUSTFS_VOLUMES="/data" \
RUST_LOG="warn" \
RUSTFS_OBS_LOG_DIRECTORY="/logs" \
RUSTFS_SINKS_FILE_PATH="/logs" \
RUSTFS_USERNAME="rustfs" \
RUSTFS_GROUPNAME="rustfs" \
RUSTFS_UID="1000" \
RUSTFS_GID="1000"
EXPOSE 9000
VOLUME ["/data", "/logs"]
VOLUME ["/data"]
# Keep root here; entrypoint will drop privileges using chroot --userspec
ENTRYPOINT ["/entrypoint.sh"]

225
README.md
View File

@@ -1,6 +1,6 @@
[![RustFS](https://rustfs.com/images/rustfs-github.png)](https://rustfs.com)
<p align="center">RustFS is a high-performance distributed object storage software built using Rust</p>
<p align="center">RustFS is a high-performance, distributed object storage system built in Rust.</p>
<p align="center">
<a href="https://github.com/rustfs/rustfs/actions/workflows/ci.yml"><img alt="CI" src="https://github.com/rustfs/rustfs/actions/workflows/ci.yml/badge.svg" /></a>
@@ -11,7 +11,7 @@
</p>
<p align="center">
<a href="https://docs.rustfs.com/introduction.html">Getting Started</a>
<a href="https://docs.rustfs.com/installation/">Getting Started</a>
· <a href="https://docs.rustfs.com/">Docs</a>
· <a href="https://github.com/rustfs/rustfs/issues">Bug reports</a>
· <a href="https://github.com/rustfs/rustfs/discussions">Discussions</a>
@@ -19,7 +19,6 @@
<p align="center">
English | <a href="https://github.com/rustfs/rustfs/blob/main/README_ZH.md">简体中文</a> |
<!-- Keep these links. Translations will automatically update with the README. -->
<a href="https://readme-i18n.com/rustfs/rustfs?lang=de">Deutsch</a> |
<a href="https://readme-i18n.com/rustfs/rustfs?lang=es">Español</a> |
<a href="https://readme-i18n.com/rustfs/rustfs?lang=fr">français</a> |
@@ -29,174 +28,179 @@ English | <a href="https://github.com/rustfs/rustfs/blob/main/README_ZH.md">简
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ru">Русский</a>
</p>
RustFS is a high-performance distributed object storage software built using Rust, one of the most popular languages
worldwide. Along with MinIO, it shares a range of advantages such as simplicity, S3 compatibility, open-source nature,
support for data lakes, AI, and big data. Furthermore, it has a better and more user-friendly open-source license in
comparison to other storage systems, being constructed under the Apache license. As Rust serves as its foundation,
RustFS provides faster speed and safer distributed features for high-performance object storage.
RustFS is a high-performance, distributed object storage system built in Rust—one of the most loved programming languages worldwide. RustFS combines the simplicity of MinIO with the memory safety and raw performance of Rust. It offers full S3 compatibility, is completely open-source, and is optimized for data lakes, AI, and big data workloads.
> ⚠️ **RustFS is under rapid development. Do NOT use in production environments!**
Unlike other storage systems, RustFS is released under the permissible Apache 2.0 license, avoiding the restrictions of AGPL. With Rust as its foundation, RustFS delivers superior speed and secure distributed features for next-generation object storage.
## Features
## Feature & Status
- **High Performance**: Built with Rust, ensuring speed and efficiency.
- **Distributed Architecture**: Scalable and fault-tolerant design for large-scale deployments.
- **S3 Compatibility**: Seamless integration with existing S3-compatible applications.
- **Data Lake Support**: Optimized for big data and AI workloads.
- **Open Source**: Licensed under Apache 2.0, encouraging community contributions and transparency.
- **User-Friendly**: Designed with simplicity in mind, making it easy to deploy and manage.
- **High Performance**: Built with Rust to ensure maximum speed and resource efficiency.
- **Distributed Architecture**: Scalable and fault-tolerant design suitable for large-scale deployments.
- **S3 Compatibility**: Seamless integration with existing S3-compatible applications and tools.
- **Data Lake Support**: Optimized for high-throughput big data and AI workloads.
- **Open Source**: Licensed under Apache 2.0, encouraging unrestricted community contributions and commercial usage.
- **User-Friendly**: Designed with simplicity in mind for easy deployment and management.
## RustFS vs MinIO
| Feature | Status | Feature | Status |
| :--- | :--- | :--- | :--- |
| **S3 Core Features** | ✅ Available | **Bitrot Protection** | ✅ Available |
| **Upload / Download** | ✅ Available | **Single Node Mode** | ✅ Available |
| **Versioning** | ✅ Available | **Bucket Replication** | ⚠️ Partial Support |
| **Logging** | ✅ Available | **Lifecycle Management** | 🚧 Under Testing |
| **Event Notifications** | ✅ Available | **Distributed Mode** | 🚧 Under Testing |
| **K8s Helm Charts** | ✅ Available | **OPA (Open Policy Agent)** | 🚧 Under Testing |
Stress test server parameters
| Type | parameter | Remark |
## RustFS vs MinIO Performance
**Stress Test Environment:**
| Type | Parameter | Remark |
|---------|-----------|----------------------------------------------------------|
| CPU | 2 Core | Intel Xeon(Sapphire Rapids) Platinum 8475B , 2.7/3.2 GHz | |
| Memory | 4GB |   |
| Network | 15Gbp |   |
| Driver | 40GB x 4 | IOPS 3800 / Driver |
| CPU | 2 Core | Intel Xeon (Sapphire Rapids) Platinum 8475B, 2.7/3.2 GHz |
| Memory | 4GB | |
| Network | 15Gbps | |
| Drive | 40GB x 4 | IOPS 3800 / Drive |
<https://github.com/user-attachments/assets/2e4979b5-260c-4f2c-ac12-c87fd558072a>
### RustFS vs Other object storage
### RustFS vs Other Object Storage
| RustFS | Other object storage |
|---------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------|
| Powerful Console | Simple and useless Console |
| Developed based on Rust language, memory is safer | Developed in Go or C, with potential issues like memory GC/leaks |
| Does not report logs to third-party countries | Reporting logs to other third countries may violate national security laws |
| Licensed under Apache, more business-friendly | AGPL V3 License and other License, polluted open source and License traps, infringement of intellectual property rights |
| Comprehensive S3 support, works with domestic and international cloud providers | Full support for S3, but no local cloud vendor support |
| Rust-based development, strong support for secure and innovative devices | Poor support for edge gateways and secure innovative devices |
| Stable commercial prices, free community support | High pricing, with costs up to $250,000 for 1PiB |
| No risk | Intellectual property risks and risks of prohibited uses |
| Feature | RustFS | Other Object Storage |
| :--- | :--- | :--- |
| **Console Experience** | **Powerful Console**<br>Comprehensive management interface. | **Basic / Limited Console**<br>Often overly simple or lacking critical features. |
| **Language & Safety** | **Rust-based**<br>Memory safety by design. | **Go or C-based**<br>Potential for memory GC pauses or leaks. |
| **Data Sovereignty** | **No Telemetry / Full Compliance**<br>Guards against unauthorized cross-border data egress. Compliant with GDPR (EU/UK), CCPA (US), and APPI (Japan). | **Potential Risk**<br>Possible legal exposure and unwanted data telemetry. |
| **Licensing** | **Permissive Apache 2.0**<br>Business-friendly, no "poison pill" clauses. | **Restrictive AGPL v3**<br>Risk of license traps and intellectual property pollution. |
| **Compatibility** | **100% S3 Compatible**<br>Works with any cloud provider or client, anywhere. | **Variable Compatibility**<br>May lack support for local cloud vendors or specific APIs. |
| **Edge & IoT** | **Strong Edge Support**<br>Ideal for secure, innovative edge devices. | **Weak Edge Support**<br>Often too heavy for edge gateways. |
| **Risk Profile** | **Enterprise Risk Mitigation**<br>Clear IP rights and safe for commercial use. | **Legal Risks**<br>Intellectual property ambiguity and usage restrictions. |
## Quickstart
To get started with RustFS, follow these steps:
1. **One-click installation script (Option 1)**
### 1. One-click Installation (Option 1)
```bash
curl -O https://rustfs.com/install_rustfs.sh && bash install_rustfs.sh
```
curl -O https://rustfs.com/install_rustfs.sh && bash install_rustfs.sh
````
2. **Docker Quick Start (Option 2)**
### 2\. Docker Quick Start (Option 2)
```bash
# create data and logs directories
mkdir -p data logs
The RustFS container runs as a non-root user `rustfs` (UID `10001`). If you run Docker with `-v` to mount a host directory, please ensure the host directory owner is set to `10001`, otherwise you will encounter permission denied errors.
# using latest alpha version
docker run -d -p 9000:9000 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:alpha
```bash
# Create data and logs directories
mkdir -p data logs
# Specific version
docker run -d -p 9000:9000 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:1.0.0.alpha.45
```
# Change the owner of these directories
chown -R 10001:10001 data logs
For docker installation, you can also run the container with docker compose. With the `docker-compose.yml` file under
root directory, running the command:
# Using latest version
docker run -d -p 9000:9000 -p 9001:9001 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:latest
```
docker compose --profile observability up -d
```
# Using specific version
docker run -d -p 9000:9000 -p 9001:9001 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:1.0.0.alpha.68
```
**NOTE**: You should be better to have a look for `docker-compose.yaml` file. Because, several services contains in the
file. Grafan,prometheus,jaeger containers will be launched using docker compose file, which is helpful for rustfs
observability. If you want to start redis as well as nginx container, you can specify the corresponding profiles.
You can also use Docker Compose. Using the `docker-compose.yml` file in the root directory:
3. **Build from Source (Option 3) - Advanced Users**
```bash
docker compose --profile observability up -d
```
For developers who want to build RustFS Docker images from source with multi-architecture support:
**NOTE**: We recommend reviewing the `docker-compose.yaml` file before running. It defines several services including Grafana, Prometheus, and Jaeger, which are helpful for RustFS observability. If you wish to start Redis or Nginx containers, you can specify the corresponding profiles.
```bash
# Build multi-architecture images locally
./docker-buildx.sh --build-arg RELEASE=latest
### 3\. Build from Source (Option 3) - Advanced Users
# Build and push to registry
./docker-buildx.sh --push
For developers who want to build RustFS Docker images from source with multi-architecture support:
# Build specific version
./docker-buildx.sh --release v1.0.0 --push
```bash
# Build multi-architecture images locally
./docker-buildx.sh --build-arg RELEASE=latest
# Build for custom registry
./docker-buildx.sh --registry your-registry.com --namespace yourname --push
```
# Build and push to registry
./docker-buildx.sh --push
The `docker-buildx.sh` script supports:
- **Multi-architecture builds**: `linux/amd64`, `linux/arm64`
- **Automatic version detection**: Uses git tags or commit hashes
- **Registry flexibility**: Supports Docker Hub, GitHub Container Registry, etc.
- **Build optimization**: Includes caching and parallel builds
# Build specific version
./docker-buildx.sh --release v1.0.0 --push
You can also use Make targets for convenience:
# Build for custom registry
./docker-buildx.sh --registry your-registry.com --namespace yourname --push
```
```bash
make docker-buildx # Build locally
make docker-buildx-push # Build and push
make docker-buildx-version VERSION=v1.0.0 # Build specific version
make help-docker # Show all Docker-related commands
```
The `docker-buildx.sh` script supports:
\- **Multi-architecture builds**: `linux/amd64`, `linux/arm64`
\- **Automatic version detection**: Uses git tags or commit hashes
\- **Registry flexibility**: Supports Docker Hub, GitHub Container Registry, etc.
\- **Build optimization**: Includes caching and parallel builds
> **Heads-up (macOS cross-compilation)**: macOS keeps the default `ulimit -n` at 256, so `cargo zigbuild` or `./build-rustfs.sh --platform ...` may fail with `ProcessFdQuotaExceeded` when targeting Linux. The build script now tries to raise the limit automatically, but if you still see the warning, run `ulimit -n 4096` (or higher) in your shell before building.
You can also use Make targets for convenience:
4. **Build with helm chart(Option 4) - Cloud Native environment**
```bash
make docker-buildx # Build locally
make docker-buildx-push # Build and push
make docker-buildx-version VERSION=v1.0.0 # Build specific version
make help-docker # Show all Docker-related commands
```
Following the instructions on [helm chart README](./helm/README.md) to install RustFS on kubernetes cluster.
> **Heads-up (macOS cross-compilation)**: macOS keeps the default `ulimit -n` at 256, so `cargo zigbuild` or `./build-rustfs.sh --platform ...` may fail with `ProcessFdQuotaExceeded` when targeting Linux. The build script attempts to raise the limit automatically, but if you still see the warning, run `ulimit -n 4096` (or higher) in your shell before building.
5. **Access the Console**: Open your web browser and navigate to `http://localhost:9000` to access the RustFS console,
default username and password is `rustfsadmin` .
6. **Create a Bucket**: Use the console to create a new bucket for your objects.
7. **Upload Objects**: You can upload files directly through the console or use S3-compatible APIs to interact with your
RustFS instance.
### 4\. Build with Helm Chart (Option 4) - Cloud Native
**NOTE**: If you want to access RustFS instance with `https`, you can refer
to [TLS configuration docs](https://docs.rustfs.com/integration/tls-configured.html).
Follow the instructions in the [Helm Chart README](https://charts.rustfs.com/) to install RustFS on a Kubernetes cluster.
-----
### Accessing RustFS
5. **Access the Console**: Open your web browser and navigate to `http://localhost:9000` to access the RustFS console.
* Default credentials: `rustfsadmin` / `rustfsadmin`
6. **Create a Bucket**: Use the console to create a new bucket for your objects.
7. **Upload Objects**: You can upload files directly through the console or use S3-compatible APIs/clients to interact with your RustFS instance.
**NOTE**: To access the RustFS instance via `https`, please refer to the [TLS Configuration Docs](https://docs.rustfs.com/integration/tls-configured.html).
## Documentation
For detailed documentation, including configuration options, API references, and advanced usage, please visit
our [Documentation](https://docs.rustfs.com).
For detailed documentation, including configuration options, API references, and advanced usage, please visit our [Documentation](https://docs.rustfs.com).
## Getting Help
If you have any questions or need assistance, you can:
If you have any questions or need assistance:
- Check the [FAQ](https://github.com/rustfs/rustfs/discussions/categories/q-a) for common issues and solutions.
- Join our [GitHub Discussions](https://github.com/rustfs/rustfs/discussions) to ask questions and share your
experiences.
- Open an issue on our [GitHub Issues](https://github.com/rustfs/rustfs/issues) page for bug reports or feature
requests.
- Check the [FAQ](https://github.com/rustfs/rustfs/discussions/categories/q-a) for common issues and solutions.
- Join our [GitHub Discussions](https://github.com/rustfs/rustfs/discussions) to ask questions and share your experiences.
- Open an issue on our [GitHub Issues](https://github.com/rustfs/rustfs/issues) page for bug reports or feature requests.
## Links
- [Documentation](https://docs.rustfs.com) - The manual you should read
- [Changelog](https://github.com/rustfs/rustfs/releases) - What we broke and fixed
- [GitHub Discussions](https://github.com/rustfs/rustfs/discussions) - Where the community lives
- [Documentation](https://docs.rustfs.com) - The manual you should read
- [Changelog](https://github.com/rustfs/rustfs/releases) - What we broke and fixed
- [GitHub Discussions](https://github.com/rustfs/rustfs/discussions) - Where the community lives
## Contact
- **Bugs**: [GitHub Issues](https://github.com/rustfs/rustfs/issues)
- **Business**: <hello@rustfs.com>
- **Jobs**: <jobs@rustfs.com>
- **General Discussion**: [GitHub Discussions](https://github.com/rustfs/rustfs/discussions)
- **Contributing**: [CONTRIBUTING.md](CONTRIBUTING.md)
- **Bugs**: [GitHub Issues](https://github.com/rustfs/rustfs/issues)
- **Business**: [hello@rustfs.com](mailto:hello@rustfs.com)
- **Jobs**: [jobs@rustfs.com](mailto:jobs@rustfs.com)
- **General Discussion**: [GitHub Discussions](https://github.com/rustfs/rustfs/discussions)
- **Contributing**: [CONTRIBUTING.md](CONTRIBUTING.md)
## Contributors
RustFS is a community-driven project, and we appreciate all contributions. Check out
the [Contributors](https://github.com/rustfs/rustfs/graphs/contributors) page to see the amazing people who have helped
make RustFS better.
RustFS is a community-driven project, and we appreciate all contributions. Check out the [Contributors](https://github.com/rustfs/rustfs/graphs/contributors) page to see the amazing people who have helped make RustFS better.
<a href="https://github.com/rustfs/rustfs/graphs/contributors">
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" alt="Contributors"/>
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" alt="Contributors" />
</a>
## Github Trending Top
🚀 RustFS is beloved by open-source enthusiasts and enterprise users worldwide, often appearing on the GitHub Trending
top charts.
🚀 RustFS is beloved by open-source enthusiasts and enterprise users worldwide, often appearing on the GitHub Trending top charts.
<a href="https://trendshift.io/repositories/14181" target="_blank"><img src="https://raw.githubusercontent.com/rustfs/rustfs/refs/heads/main/docs/rustfs-trending.jpg" alt="rustfs%2Frustfs | Trendshift" /></a>
@@ -209,3 +213,4 @@ top charts.
[Apache 2.0](https://opensource.org/licenses/Apache-2.0)
**RustFS** is a trademark of RustFS, Inc. All other trademarks are the property of their respective owners.

View File

@@ -1,185 +1,219 @@
[![RustFS](https://rustfs.com/images/rustfs-github.png)](https://rustfs.com)
<p align="center">RustFS 是一个使用 Rust 构建的高性能分布式对象存储软件</p >
<p align="center">RustFS 是一个基于 Rust 构建的高性能分布式对象存储系统。</p>
<p align="center">
<a href="https://github.com/rustfs/rustfs/actions/workflows/ci.yml"><img alt="CI" src="https://github.com/rustfs/rustfs/actions/workflows/ci.yml/badge.svg" /></a>
<a href="https://github.com/rustfs/rustfs/actions/workflows/docker.yml"><img alt="Build and Push Docker Images" src="https://github.com/rustfs/rustfs/actions/workflows/docker.yml/badge.svg" /></a>
<img alt="GitHub commit activity" src="https://img.shields.io/github/commit-activity/m/rustfs/rustfs"/>
<img alt="Github Last Commit" src="https://img.shields.io/github/last-commit/rustfs/rustfs"/>
<a href="https://github.com/rustfs/rustfs/actions/workflows/docker.yml"><img alt="构建并推送 Docker 镜像" src="https://github.com/rustfs/rustfs/actions/workflows/docker.yml/badge.svg" /></a>
<img alt="GitHub 提交活跃度" src="https://img.shields.io/github/commit-activity/m/rustfs/rustfs"/>
<img alt="Github 最新提交" src="https://img.shields.io/github/last-commit/rustfs/rustfs"/>
<a href="https://hellogithub.com/repository/rustfs/rustfs" target="_blank"><img src="https://abroad.hellogithub.com/v1/widgets/recommend.svg?rid=b95bcb72bdc340b68f16fdf6790b7d5b&claim_uid=MsbvjYeLDKAH457&theme=small" alt="FeaturedHelloGitHub" /></a>
</p >
</p>
<p align="center">
<a href="https://docs.rustfs.com/zh/introduction.html">快速开始</a >
· <a href="https://docs.rustfs.com/zh/">文档</a >
· <a href="https://github.com/rustfs/rustfs/issues">问题报告</a >
· <a href="https://github.com/rustfs/rustfs/discussions">讨论</a >
</p >
<a href="https://docs.rustfs.com/installation/">快速开始</a>
· <a href="https://docs.rustfs.com/">文档</a>
· <a href="https://github.com/rustfs/rustfs/issues">报告 Bug</a>
· <a href="https://github.com/rustfs/rustfs/discussions">社区讨论</a>
</p>
<p align="center">
<a href="https://github.com/rustfs/rustfs/blob/main/README.md">English</a > | 简体中文
</p >
<a href="https://github.com/rustfs/rustfs/blob/main/README.md">English</a> | 简体中文 |
<a href="https://readme-i18n.com/rustfs/rustfs?lang=de">Deutsch</a> |
<a href="https://readme-i18n.com/rustfs/rustfs?lang=es">Español</a> |
<a href="https://readme-i18n.com/rustfs/rustfs?lang=fr">français</a> |
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ja">日本語</a> |
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ko">한국어</a> |
<a href="https://readme-i18n.com/rustfs/rustfs?lang=pt">Portuguese</a> |
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ru">Русский</a>
</p>
RustFS 是一个使用 Rust(全球最受欢迎的编程语言之一)构建的高性能分布式对象存储软件。与 MinIO 一样它具有简单性、S3
兼容性、开源特性以及对数据湖、AI 和大数据的支持等一系列优势。此外,与其他存储系统相比,它采用 Apache
许可证构建,拥有更好、更用户友好的开源许可证。由于以 Rust 为基础RustFS 为高性能对象存储提供了更快的速度和更安全的分布式功能。
RustFS 是一个基于 Rust 构建的高性能分布式对象存储系统。Rust 是全球最受开发者喜爱的编程语言之一RustFS 完美结合了 MinIO 的简洁性与 Rust 的内存安全及高性能优势。它提供完整的 S3 兼容性完全开源并专为数据湖、人工智能AI和大数据负载进行了优化。
## 特性
与其他存储系统不同RustFS 采用更宽松、商业友好的 Apache 2.0 许可证,避免了 AGPL 协议的限制。以 Rust 为基石RustFS 为下一代对象存储提供了更快的速度和更安全的分布式特性。
- **高性能**:使用 Rust 构建,确保速度和效率。
## 特征和功能状态
- **高性能**:基于 Rust 构建,确保极致的速度和资源效率。
- **分布式架构**:可扩展且容错的设计,适用于大规模部署。
- **S3 兼容性**:与现有 S3 兼容应用程序无缝集成。
- **数据湖支持**针对大数据和 AI 工作负载进行了优化。
- **开源**:采用 Apache 2.0 许可证,鼓励社区贡献和透明度
- **用户友好**:设计简,易于部署和管理。
- **S3 兼容性**:与现有 S3 兼容应用和工具无缝集成。
- **数据湖支持**专为高吞吐量的大数据和 AI 工作负载优化。
- **完全开源**:采用 Apache 2.0 许可证,鼓励社区贡献和商业使用
- **简单易用**:设计简,易于部署和管理。
## RustFS vs MinIO
压力测试服务器参数
| 功能 | 状态 | 功能 | 状态 |
| :--- | :--- | :--- | :--- |
| **S3 核心功能** | ✅ 可用 | **Bitrot (防数据腐烂)** | ✅ 可用 |
| **上传 / 下载** | ✅ 可用 | **单机模式** | ✅ 可用 |
| **版本控制** | ✅ 可用 | **存储桶复制** | ⚠️ 部分可用 |
| **日志功能** | ✅ 可用 | **生命周期管理** | 🚧 测试中 |
| **事件通知** | ✅ 可用 | **分布式模式** | 🚧 测试中 |
| **K8s Helm Chart** | ✅ 可用 | **OPA (策略引擎)** | 🚧 测试中 |
| 类型 | 参数 | 备注 |
|-----|----------|----------------------------------------------------------|
| CPU | 2 核心 | Intel Xeon(Sapphire Rapids) Platinum 8475B , 2.7/3.2 GHz | |
| 内存 | 4GB | |
| 网络 | 15Gbp | |
| 驱动器 | 40GB x 4 | IOPS 3800 / 驱动器 |
## RustFS vs MinIO 性能对比
**压力测试环境参数:**
| 类型 | 参数 | 备注 |
|---------|-----------|----------------------------------------------------------|
| CPU | 2 核 | Intel Xeon (Sapphire Rapids) Platinum 8475B , 2.7/3.2 GHz |
| 内存 | 4GB |   |
| 网络 | 15Gbps |   |
| 硬盘 | 40GB x 4 | IOPS 3800 / Drive |
<https://github.com/user-attachments/assets/2e4979b5-260c-4f2c-ac12-c87fd558072a>
### RustFS vs 其他对象存储
| RustFS | 其他对象存储 |
|--------------------------|-------------------------------------|
| 强大的控制台 | 简单且无用的控制台 |
| 基于 Rust 语言开发,内存安全 | 使用 Go 或 C 开发存在内存 GC/泄漏潜在问题 |
| 不向第三方国家报告日志 | 向其他第三方国家报告日志可能违反国家安全法律 |
| 采用 Apache 许可证,对商业友好 | AGPL V3 许可证等其他许可证,污染开源和许可证陷阱,侵犯知识产权 |
| 全面的 S3 支持,适用于国内外云提供商 | 完全支持 S3但不支持本地云厂商 |
| 基于 Rust 开发,对安全创新设备有强大支持 | 对边缘网关和安全创新设备支持较差 |
| 稳定的商业价格,免费社区支持 | 高昂的定价,1PiB 成本高达 $250,000 |
| 无风险 | 知识产权风险和禁止使用的风险 |
| 特性 | RustFS | 其他对象存储 |
| :--- | :--- | :--- |
| **控制台体验** | **功能强大的控制台**<br>提供全面的管理界面。 | **基础/简陋的控制台**<br>通常功能过于简单或缺失关键特性。 |
| **语言与安全** | **基于 Rust 开发**<br>天生的内存安全。 | **基于 Go 或 C 开发**<br>存在内存 GC 停顿或内存泄漏潜在风险。 |
| **数据主权** | **无遥测 / 完全合规**<br>防止未经授权的数据跨境传输。完全符合 GDPR (欧盟/英国)、CCPA (美国) 和 APPI (日本) 等法规。 | **潜在风险**<br>可能存在法律风险和隐蔽的数据遥测Telemetry |
| **开源协议** | **宽松的 Apache 2.0**<br>商业友好,无“毒丸”条款。 | **受限的 AGPL v3**<br>存在许可证陷阱知识产权污染的风险。 |
| **兼容性** | **100% S3 兼容**<br>适用于任何云提供商和客户端,随处运行。 | **兼容性不一**<br>虽然支持 S3但可能缺乏对本地云厂商或特定 API 的支持。 |
| **边缘与 IoT** | **强大的边缘支持**<br>非常适合安全创新的边缘设备。 | **边缘支持较弱**<br>对于边缘网关来说通常过于沉重。 |
| **成本** | **稳定且免费**<br>免费社区支持,稳定的商业定价。 | **高昂成本**<br>1PiB 成本可能高达 250,000 美元。 |
| **风险控制** | **企业级风险规避**<br>清晰的知识产权,商业使用安全无忧。 | **法律风险**<br>知识产权归属模糊及使用限制风险。 |
## 快速开始
要开始使用 RustFS请按照以下步骤操作
请按照以下步骤快速上手 RustFS
1. **一键脚本快速启动 (方案一)**
```bash
curl -O https://rustfs.com/install_rustfs.sh && bash install_rustfs.sh
```
2. **Docker 快速启动(方案二)**
### 1. 一键安装脚本 (选项 1)
```bash
docker run -d -p 9000:9000 -v /data:/data rustfs/rustfs
```
curl -O https://rustfs.com/install_rustfs.sh && bash install_rustfs.sh
````
对于使用 Docker 安装来讲,你还可以使用 `docker compose` 来启动 rustfs 实例。在仓库的根目录下面有一个 `docker-compose.yml`
文件。运行如下命令即可:
### 2\. Docker 快速启动 (选项 2)
```
docker compose --profile observability up -d
```
RustFS 容器以非 root 用户 `rustfs` (UID `10001`) 运行。如果您使用 Docker 的 `-v` 参数挂载宿主机目录,请务必确保宿主机目录的所有者已更改为 `1000`,否则会遇到权限拒绝错误。
**注意**:在使用 `docker compose` 之前,你应该仔细阅读一下 `docker-compose.yaml`,因为该文件中包含多个服务,除了 rustfs
以外,还有 grafana、prometheus、jaeger 等,这些是为 rustfs 可观测性服务的,还有 redis 和 nginx。你想启动哪些容器就需要用
`--profile` 参数指定相应的 profile。
```bash
# 创建数据和日志目录
mkdir -p data logs
3. **从源码构建(方案三)- 高级用户**
# 更改这两个目录的所有者
chown -R 10001:10001 data logs
面向希望从源码构建支持多架构 Docker 镜像的开发者:
# 使用最新版本运行
docker run -d -p 9000:9000 -p 9001:9001 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:latest
```bash
# 本地构建多架构镜像
./docker-buildx.sh --build-arg RELEASE=latest
# 使用指定版本运行
docker run -d -p 9000:9000 -p 9001:9001 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:1.0.0.alpha.68
```
# 构建并推送至镜像仓库
./docker-buildx.sh --push
您也可以使用 Docker Compose。使用根目录下的 `docker-compose.yml` 文件:
# 构建指定版本
./docker-buildx.sh --release v1.0.0 --push
```bash
docker compose --profile observability up -d
```
# 构建并推送到自定义镜像仓库
./docker-buildx.sh --registry your-registry.com --namespace yourname --push
```
**注意**: 我们建议您在运行前查看 `docker-compose.yaml` 文件。该文件定义了包括 Grafana、Prometheus 和 Jaeger 在内的多个服务,有助于 RustFS 的可观测性监控。如果您还想启动 Redis 或 Nginx 容器,可以指定相应的 profile。
`docker-buildx.sh` 脚本支持:
- **多架构构建**`linux/amd64`、`linux/arm64`
- **自动版本检测**:可使用 git 标签或提交哈希
- **仓库灵活性**:支持 Docker Hub、GitHub Container Registry 等
- **构建优化**:包含缓存和并行构建
### 3\. 源码编译 (选项 3) - 进阶用户
你也可以使用 Makefile 提供的目标命令以提升便捷性
适用于希望从源码构建支持多架构 RustFS Docker 镜像的开发者
```bash
make docker-buildx # 本地构建
make docker-buildx-push # 构建并推送
make docker-buildx-version VERSION=v1.0.0 # 构建指定版本
make help-docker # 显示全部 Docker 相关命令
```
```bash
# 本地构建多架构镜像
./docker-buildx.sh --build-arg RELEASE=latest
> **提示macOS 交叉编译)**macOS 默认的 `ulimit -n` 只有 256使用 `cargo zigbuild` 或 `./build-rustfs.sh --platform ...` 编译 Linux 目标时容易触发 `ProcessFdQuotaExceeded` 链接错误。脚本会尝试自动提升该限制,如仍提示失败,请在构建前手动执行 `ulimit -n 4096`(或更大的值)。
# 构建并推送到仓库
./docker-buildx.sh --push
4. **使用 Helm Chart 部署(方案四)- 云原生环境**
# 构建指定版本
./docker-buildx.sh --release v1.0.0 --push
按照 [helm chart 说明文档](./helm/README.md) 的指引,在 Kubernetes 集群中安装 RustFS。
# 构建并推送到自定义仓库
./docker-buildx.sh --registry your-registry.com --namespace yourname --push
```
5. **访问控制台**:打开 Web 浏览器并导航到 `http://localhost:9000` 以访问 RustFS 控制台,默认的用户名和密码是
`rustfsadmin` 。
6. **创建存储桶**:使用控制台为您的对象创建新的存储桶。
7. **上传对象**:您可以直接通过控制台上传文件,或使用 S3 兼容的 API 与您的 RustFS 实例交互。
`docker-buildx.sh` 脚本支持:
\- **多架构构建**: `linux/amd64`, `linux/arm64`
\- **自动版本检测**: 使用 git tags 或 commit hash
\- **灵活的仓库支持**: 支持 Docker Hub, GitHub Container Registry 等
\- **构建优化**: 包含缓存和并行构建
**注意**:如果你想通过 `https` 来访问 RustFS 实例,请参考 [TLS 配置文档](https://docs.rustfs.com/zh/integration/tls-configured.html)
为了方便起见,您也可以使用 Make 命令:
```bash
make docker-buildx # 本地构建
make docker-buildx-push # 构建并推送
make docker-buildx-version VERSION=v1.0.0 # 构建指定版本
make help-docker # 显示所有 Docker 相关命令
```
> **注意 (macOS 交叉编译)**: macOS 默认的 `ulimit -n` 限制为 256因此在使用 `cargo zigbuild` 或 `./build-rustfs.sh --platform ...` 交叉编译 Linux 版本时,可能会因 `ProcessFdQuotaExceeded` 失败。构建脚本会尝试自动提高限制,但如果您仍然看到警告,请在构建前在终端运行 `ulimit -n 4096` (或更高)。
### 4\. 使用 Helm Chart 安装 (选项 4) - 云原生环境
请按照 [Helm Chart README](https://charts.rustfs.com) 上的说明在 Kubernetes 集群上安装 RustFS。
-----
### 访问 RustFS
5. **访问控制台**: 打开浏览器并访问 `http://localhost:9000` 进入 RustFS 控制台。
* 默认账号/密码: `rustfsadmin` / `rustfsadmin`
6. **创建存储桶**: 使用控制台为您​​的对象创建一个新的存储桶 (Bucket)。
7. **上传对象**: 您可以直接通过控制台上传文件,或使用 S3 兼容的 API/客户端与您的 RustFS 实例进行交互。
**注意**: 如果您希望通过 `https` 访问 RustFS 实例,请参考 [TLS 配置文档](https://docs.rustfs.com/integration/tls-configured.html)。
## 文档
有关详细文档包括配置选项、API 参考和高级用法,请访问我们的[文档](https://docs.rustfs.com)。
有关详细文档包括配置选项、API 参考和高级用法,请访问我们的 [官方文档](https://docs.rustfs.com)。
## 获取帮助
如果您有任何问题或需要帮助,您可以
如果您有任何问题或需要帮助:
- 查看[常见问题解答](https://github.com/rustfs/rustfs/discussions/categories/q-a)以获取常见问题和解决方案。
- 加入我们的 [GitHub 讨论](https://github.com/rustfs/rustfs/discussions)提问分享您的经验。
- 在我们的 [GitHub Issues](https://github.com/rustfs/rustfs/issues) 页面上开启问题,报告错误或功能请求。
- 查看 [FAQ](https://github.com/rustfs/rustfs/discussions/categories/q-a) 寻找常见问题和解决方案。
- 加入我们的 [GitHub Discussions](https://github.com/rustfs/rustfs/discussions) 提问分享您的经验。
- 在我们的 [GitHub Issues](https://github.com/rustfs/rustfs/issues) 页面提交 Bug 报告或功能请求。
## 链接
- [文档](https://docs.rustfs.com) - 您应该阅读的手册
- [更新日志](https://docs.rustfs.com/changelog) - 我们破坏和修复的内容
- [GitHub 讨论](https://github.com/rustfs/rustfs/discussions) - 社区所在
- [官方文档](https://docs.rustfs.com) - 必读手册
- [更新日志](https://github.com/rustfs/rustfs/releases) - 版本变更记录
- [社区讨论](https://github.com/rustfs/rustfs/discussions) - 社区交流
## 联系
## 联系方式
- **错误报告**[GitHub Issues](https://github.com/rustfs/rustfs/issues)
- **商务合作**<hello@rustfs.com>
- **招聘**<jobs@rustfs.com>
- **一般讨论**[GitHub 讨论](https://github.com/rustfs/rustfs/discussions)
- **贡献**[CONTRIBUTING.md](CONTRIBUTING.md)
- **Bug 反馈**: [GitHub Issues](https://github.com/rustfs/rustfs/issues)
- **商务合作**: [hello@rustfs.com](mailto:hello@rustfs.com)
- **工作机会**: [jobs@rustfs.com](mailto:jobs@rustfs.com)
- **一般讨论**: [GitHub Discussions](https://github.com/rustfs/rustfs/discussions)
- **贡献指南**: [CONTRIBUTING.md](https://www.google.com/search?q=CONTRIBUTING.md)
## 贡献者
RustFS 是一个社区驱动的项目,我们感谢所有的贡献。查看[贡献者](https://github.com/rustfs/rustfs/graphs/contributors)页面,了解帮助
RustFS 变得更好的杰出人员。
RustFS 是一个社区驱动的项目,我们感谢所有的贡献。查看 [贡献者](https://github.com/rustfs/rustfs/graphs/contributors) 页面,看看那些让 RustFS 变得更好的了不起的人们。
<a href="https://github.com/rustfs/rustfs/graphs/contributors">
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" alt="贡献者"/>
</a >
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" alt="Contributors" />
</a>
## Github 全球推荐榜
## Github Trending Top
🚀 RustFS 受到了全世界开源爱好者和企业用户的喜欢,多次登顶 Github Trending 全球榜。
🚀 RustFS 深受全球开源爱好者和企业用户的喜爱,经常荣登 GitHub Trending 榜
<a href="https://trendshift.io/repositories/14181" target="_blank"><img src="https://raw.githubusercontent.com/rustfs/rustfs/refs/heads/main/docs/rustfs-trending.jpg" alt="rustfs%2Frustfs | Trendshift" /></a>
## Star 历史
## Star 历史
[![Star History Chart](https://api.star-history.com/svg?repos=rustfs/rustfs&type=date&legend=top-left)](https://www.star-history.com/#rustfs/rustfs&type=date&legend=top-left)
[![Star 历史图](https://api.star-history.com/svg?repos=rustfs/rustfs&type=date&legend=top-left)](https://www.star-history.com/#rustfs/rustfs&type=date&legend=top-left)
## 许可证
[Apache 2.0](https://opensource.org/licenses/Apache-2.0)
**RustFS** 是 RustFS, Inc. 的商标。所有其他商标均为其各自所有者的财产。

View File

@@ -13,10 +13,12 @@ keywords = ["RustFS", "AHM", "health-management", "scanner", "Minio"]
categories = ["web-programming", "development-tools", "filesystem"]
[dependencies]
rustfs-config = { workspace = true }
rustfs-ecstore = { workspace = true }
rustfs-common = { workspace = true }
rustfs-filemeta = { workspace = true }
rustfs-madmin = { workspace = true }
rustfs-utils = { workspace = true }
tokio = { workspace = true, features = ["full"] }
tokio-util = { workspace = true }
tracing = { workspace = true }

View File

@@ -90,7 +90,12 @@ impl HealChannelProcessor {
/// Process start request
async fn process_start_request(&self, request: HealChannelRequest) -> Result<()> {
info!("Processing heal start request: {} for bucket: {}", request.id, request.bucket);
info!(
"Processing heal start request: {} for bucket: {}/{}",
request.id,
request.bucket,
request.object_prefix.as_deref().unwrap_or("")
);
// Convert channel request to heal request
let heal_request = self.convert_to_heal_request(request.clone())?;
@@ -324,6 +329,14 @@ mod tests {
async fn list_objects_for_heal(&self, _bucket: &str, _prefix: &str) -> crate::Result<Vec<String>> {
Ok(vec![])
}
async fn list_objects_for_heal_page(
&self,
_bucket: &str,
_prefix: &str,
_continuation_token: Option<&str>,
) -> crate::Result<(Vec<String>, Option<String>, bool)> {
Ok((vec![], None, false))
}
async fn get_disk_for_resume(&self, _set_disk_id: &str) -> crate::Result<rustfs_ecstore::disk::DiskStore> {
Err(crate::Error::other("Not implemented in mock"))
}

View File

@@ -49,8 +49,9 @@ impl ErasureSetHealer {
}
/// execute erasure set heal with resume
#[tracing::instrument(skip(self, buckets), fields(set_disk_id = %set_disk_id, bucket_count = buckets.len()))]
pub async fn heal_erasure_set(&self, buckets: &[String], set_disk_id: &str) -> Result<()> {
info!("Starting erasure set heal for {} buckets on set disk {}", buckets.len(), set_disk_id);
info!("Starting erasure set heal");
// 1. generate or get task id
let task_id = self.get_or_create_task_id(set_disk_id).await?;
@@ -231,6 +232,7 @@ impl ErasureSetHealer {
/// heal single bucket with resume
#[allow(clippy::too_many_arguments)]
#[tracing::instrument(skip(self, current_object_index, processed_objects, successful_objects, failed_objects, _skipped_objects, resume_manager, checkpoint_manager), fields(bucket = %bucket, bucket_index = bucket_index))]
async fn heal_bucket_with_resume(
&self,
bucket: &str,
@@ -243,7 +245,7 @@ impl ErasureSetHealer {
resume_manager: &ResumeManager,
checkpoint_manager: &CheckpointManager,
) -> Result<()> {
info!(target: "rustfs:ahm:heal_bucket_with_resume" ,"Starting heal for bucket: {} from object index {}", bucket, current_object_index);
info!(target: "rustfs:ahm:heal_bucket_with_resume" ,"Starting heal for bucket from object index {}", current_object_index);
// 1. get bucket info
let _bucket_info = match self.storage.get_bucket_info(bucket).await? {
@@ -254,82 +256,114 @@ impl ErasureSetHealer {
}
};
// 2. get objects to heal
let objects = self.storage.list_objects_for_heal(bucket, "").await?;
// 2. process objects with pagination to avoid loading all objects into memory
let mut continuation_token: Option<String> = None;
let mut global_obj_idx = 0usize;
// 3. continue from checkpoint
for (obj_idx, object) in objects.iter().enumerate().skip(*current_object_index) {
// check if already processed
if checkpoint_manager.get_checkpoint().await.processed_objects.contains(object) {
continue;
}
// update current object
resume_manager
.set_current_item(Some(bucket.to_string()), Some(object.clone()))
loop {
// Get one page of objects
let (objects, next_token, is_truncated) = self
.storage
.list_objects_for_heal_page(bucket, "", continuation_token.as_deref())
.await?;
// Check if object still exists before attempting heal
let object_exists = match self.storage.object_exists(bucket, object).await {
Ok(exists) => exists,
Err(e) => {
warn!("Failed to check existence of {}/{}: {}, skipping", bucket, object, e);
*current_object_index = obj_idx + 1;
// Process objects in this page
for object in objects {
// Skip objects before the checkpoint
if global_obj_idx < *current_object_index {
global_obj_idx += 1;
continue;
}
};
if !object_exists {
info!(
target: "rustfs:ahm:heal_bucket_with_resume" ,"Object {}/{} no longer exists, skipping heal (likely deleted intentionally)",
bucket, object
);
checkpoint_manager.add_processed_object(object.clone()).await?;
*successful_objects += 1; // Treat as successful - object is gone as intended
*current_object_index = obj_idx + 1;
continue;
}
// heal object
let heal_opts = HealOpts {
scan_mode: HealScanMode::Normal,
remove: true,
recreate: true, // Keep recreate enabled for legitimate heal scenarios
..Default::default()
};
match self.storage.heal_object(bucket, object, None, &heal_opts).await {
Ok((_result, None)) => {
*successful_objects += 1;
checkpoint_manager.add_processed_object(object.clone()).await?;
info!("Successfully healed object {}/{}", bucket, object);
// check if already processed
if checkpoint_manager.get_checkpoint().await.processed_objects.contains(&object) {
global_obj_idx += 1;
continue;
}
Ok((_, Some(err))) => {
*failed_objects += 1;
checkpoint_manager.add_failed_object(object.clone()).await?;
warn!("Failed to heal object {}/{}: {}", bucket, object, err);
}
Err(err) => {
*failed_objects += 1;
checkpoint_manager.add_failed_object(object.clone()).await?;
warn!("Error healing object {}/{}: {}", bucket, object, err);
}
}
*processed_objects += 1;
*current_object_index = obj_idx + 1;
// check cancel status
if self.cancel_token.is_cancelled() {
info!("Heal task cancelled during object processing");
return Err(Error::TaskCancelled);
}
// save checkpoint periodically
if obj_idx % 100 == 0 {
checkpoint_manager
.update_position(bucket_index, *current_object_index)
// update current object
resume_manager
.set_current_item(Some(bucket.to_string()), Some(object.clone()))
.await?;
// Check if object still exists before attempting heal
let object_exists = match self.storage.object_exists(bucket, &object).await {
Ok(exists) => exists,
Err(e) => {
warn!("Failed to check existence of {}/{}: {}, marking as failed", bucket, object, e);
*failed_objects += 1;
checkpoint_manager.add_failed_object(object.clone()).await?;
global_obj_idx += 1;
*current_object_index = global_obj_idx;
continue;
}
};
if !object_exists {
info!(
target: "rustfs:ahm:heal_bucket_with_resume" ,"Object {}/{} no longer exists, skipping heal (likely deleted intentionally)",
bucket, object
);
checkpoint_manager.add_processed_object(object.clone()).await?;
*successful_objects += 1; // Treat as successful - object is gone as intended
global_obj_idx += 1;
*current_object_index = global_obj_idx;
continue;
}
// heal object
let heal_opts = HealOpts {
scan_mode: HealScanMode::Normal,
remove: true,
recreate: true, // Keep recreate enabled for legitimate heal scenarios
..Default::default()
};
match self.storage.heal_object(bucket, &object, None, &heal_opts).await {
Ok((_result, None)) => {
*successful_objects += 1;
checkpoint_manager.add_processed_object(object.clone()).await?;
info!("Successfully healed object {}/{}", bucket, object);
}
Ok((_, Some(err))) => {
*failed_objects += 1;
checkpoint_manager.add_failed_object(object.clone()).await?;
warn!("Failed to heal object {}/{}: {}", bucket, object, err);
}
Err(err) => {
*failed_objects += 1;
checkpoint_manager.add_failed_object(object.clone()).await?;
warn!("Error healing object {}/{}: {}", bucket, object, err);
}
}
*processed_objects += 1;
global_obj_idx += 1;
*current_object_index = global_obj_idx;
// check cancel status
if self.cancel_token.is_cancelled() {
info!("Heal task cancelled during object processing");
return Err(Error::TaskCancelled);
}
// save checkpoint periodically
if global_obj_idx % 100 == 0 {
checkpoint_manager
.update_position(bucket_index, *current_object_index)
.await?;
}
}
// Check if there are more pages
if !is_truncated {
break;
}
continuation_token = next_token;
if continuation_token.is_none() {
warn!("List is truncated but no continuation token provided for {}", bucket);
break;
}
}
@@ -363,7 +397,7 @@ impl ErasureSetHealer {
let _permit = semaphore
.acquire()
.await
.map_err(|e| Error::other(format!("Failed to acquire semaphore for bucket heal: {}", e)))?;
.map_err(|e| Error::other(format!("Failed to acquire semaphore for bucket heal: {e}")))?;
if cancel_token.is_cancelled() {
return Err(Error::TaskCancelled);
@@ -395,16 +429,12 @@ impl ErasureSetHealer {
}
};
// 2. get objects to heal
let objects = storage.list_objects_for_heal(bucket, "").await?;
// 2. process objects with pagination to avoid loading all objects into memory
let mut continuation_token: Option<String> = None;
let mut total_scanned = 0u64;
let mut total_success = 0u64;
let mut total_failed = 0u64;
// 3. update progress
{
let mut p = progress.write().await;
p.objects_scanned += objects.len() as u64;
}
// 4. heal objects concurrently
let heal_opts = HealOpts {
scan_mode: HealScanMode::Normal,
remove: true, // remove corrupted data
@@ -412,27 +442,65 @@ impl ErasureSetHealer {
..Default::default()
};
let object_results = Self::heal_objects_concurrently(storage, bucket, &objects, &heal_opts, progress).await;
loop {
// Get one page of objects
let (objects, next_token, is_truncated) = storage
.list_objects_for_heal_page(bucket, "", continuation_token.as_deref())
.await?;
// 5. count results
let (success_count, failure_count) = object_results
.into_iter()
.fold((0, 0), |(success, failure), result| match result {
Ok(_) => (success + 1, failure),
Err(_) => (success, failure + 1),
});
let page_count = objects.len() as u64;
total_scanned += page_count;
// 6. update progress
// 3. update progress
{
let mut p = progress.write().await;
p.objects_scanned = total_scanned;
}
// 4. heal objects concurrently for this page
let object_results = Self::heal_objects_concurrently(storage, bucket, &objects, &heal_opts, progress).await;
// 5. count results for this page
let (success_count, failure_count) =
object_results
.into_iter()
.fold((0, 0), |(success, failure), result| match result {
Ok(_) => (success + 1, failure),
Err(_) => (success, failure + 1),
});
total_success += success_count;
total_failed += failure_count;
// 6. update progress
{
let mut p = progress.write().await;
p.objects_healed = total_success;
p.objects_failed = total_failed;
p.set_current_object(Some(format!("processing bucket: {bucket} (page)")));
}
// Check if there are more pages
if !is_truncated {
break;
}
continuation_token = next_token;
if continuation_token.is_none() {
warn!("List is truncated but no continuation token provided for {}", bucket);
break;
}
}
// 7. final progress update
{
let mut p = progress.write().await;
p.objects_healed += success_count;
p.objects_failed += failure_count;
p.set_current_object(Some(format!("completed bucket: {bucket}")));
}
info!(
"Completed heal for bucket {}: {} success, {} failures",
bucket, success_count, failure_count
"Completed heal for bucket {}: {} success, {} failures (total scanned: {})",
bucket, total_success, total_failed, total_scanned
);
Ok(())
@@ -461,7 +529,7 @@ impl ErasureSetHealer {
let _permit = semaphore
.acquire()
.await
.map_err(|e| Error::other(format!("Failed to acquire semaphore for object heal: {}", e)))?;
.map_err(|e| Error::other(format!("Failed to acquire semaphore for object heal: {e}")))?;
match storage.heal_object(&bucket, &object, None, &heal_opts).await {
Ok((_result, None)) => {

View File

@@ -22,7 +22,7 @@ use rustfs_ecstore::disk::DiskAPI;
use rustfs_ecstore::disk::error::DiskError;
use rustfs_ecstore::global::GLOBAL_LOCAL_DISK_MAP;
use std::{
collections::{HashMap, VecDeque},
collections::{BinaryHeap, HashMap, HashSet},
sync::Arc,
time::{Duration, SystemTime},
};
@@ -33,6 +33,151 @@ use tokio::{
use tokio_util::sync::CancellationToken;
use tracing::{error, info, warn};
/// Priority queue wrapper for heal requests
/// Uses BinaryHeap for priority-based ordering while maintaining FIFO for same-priority items
#[derive(Debug)]
struct PriorityHealQueue {
/// Heap of (priority, sequence, request) tuples
heap: BinaryHeap<PriorityQueueItem>,
/// Sequence counter for FIFO ordering within same priority
sequence: u64,
/// Set of request keys to prevent duplicates
dedup_keys: HashSet<String>,
}
/// Wrapper for heap items to implement proper ordering
#[derive(Debug)]
struct PriorityQueueItem {
priority: HealPriority,
sequence: u64,
request: HealRequest,
}
impl Eq for PriorityQueueItem {}
impl PartialEq for PriorityQueueItem {
fn eq(&self, other: &Self) -> bool {
self.priority == other.priority && self.sequence == other.sequence
}
}
impl Ord for PriorityQueueItem {
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
// First compare by priority (higher priority first)
match self.priority.cmp(&other.priority) {
std::cmp::Ordering::Equal => {
// If priorities are equal, use sequence for FIFO (lower sequence first)
other.sequence.cmp(&self.sequence)
}
ordering => ordering,
}
}
}
impl PartialOrd for PriorityQueueItem {
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
Some(self.cmp(other))
}
}
impl PriorityHealQueue {
fn new() -> Self {
Self {
heap: BinaryHeap::new(),
sequence: 0,
dedup_keys: HashSet::new(),
}
}
fn len(&self) -> usize {
self.heap.len()
}
fn is_empty(&self) -> bool {
self.heap.is_empty()
}
fn push(&mut self, request: HealRequest) -> bool {
let key = Self::make_dedup_key(&request);
// Check for duplicates
if self.dedup_keys.contains(&key) {
return false; // Duplicate request, don't add
}
self.dedup_keys.insert(key);
self.sequence += 1;
self.heap.push(PriorityQueueItem {
priority: request.priority,
sequence: self.sequence,
request,
});
true
}
/// Get statistics about queue contents by priority
fn get_priority_stats(&self) -> HashMap<HealPriority, usize> {
let mut stats = HashMap::new();
for item in &self.heap {
*stats.entry(item.priority).or_insert(0) += 1;
}
stats
}
fn pop(&mut self) -> Option<HealRequest> {
self.heap.pop().map(|item| {
let key = Self::make_dedup_key(&item.request);
self.dedup_keys.remove(&key);
item.request
})
}
/// Create a deduplication key from a heal request
fn make_dedup_key(request: &HealRequest) -> String {
match &request.heal_type {
HealType::Object {
bucket,
object,
version_id,
} => {
format!("object:{}:{}:{}", bucket, object, version_id.as_deref().unwrap_or(""))
}
HealType::Bucket { bucket } => {
format!("bucket:{bucket}")
}
HealType::ErasureSet { set_disk_id, .. } => {
format!("erasure_set:{set_disk_id}")
}
HealType::Metadata { bucket, object } => {
format!("metadata:{bucket}:{object}")
}
HealType::MRF { meta_path } => {
format!("mrf:{meta_path}")
}
HealType::ECDecode {
bucket,
object,
version_id,
} => {
format!("ecdecode:{}:{}:{}", bucket, object, version_id.as_deref().unwrap_or(""))
}
}
}
/// Check if a request with the same key already exists in the queue
#[allow(dead_code)]
fn contains_key(&self, request: &HealRequest) -> bool {
let key = Self::make_dedup_key(request);
self.dedup_keys.contains(&key)
}
/// Check if an erasure set heal request for a specific set_disk_id exists
fn contains_erasure_set(&self, set_disk_id: &str) -> bool {
let key = format!("erasure_set:{set_disk_id}");
self.dedup_keys.contains(&key)
}
}
/// Heal config
#[derive(Debug, Clone)]
pub struct HealConfig {
@@ -50,12 +195,28 @@ pub struct HealConfig {
impl Default for HealConfig {
fn default() -> Self {
let queue_size: usize =
rustfs_utils::get_env_usize(rustfs_config::ENV_HEAL_QUEUE_SIZE, rustfs_config::DEFAULT_HEAL_QUEUE_SIZE);
let heal_interval = Duration::from_secs(rustfs_utils::get_env_u64(
rustfs_config::ENV_HEAL_INTERVAL_SECS,
rustfs_config::DEFAULT_HEAL_INTERVAL_SECS,
));
let enable_auto_heal =
rustfs_utils::get_env_bool(rustfs_config::ENV_HEAL_AUTO_HEAL_ENABLE, rustfs_config::DEFAULT_HEAL_AUTO_HEAL_ENABLE);
let task_timeout = Duration::from_secs(rustfs_utils::get_env_u64(
rustfs_config::ENV_HEAL_TASK_TIMEOUT_SECS,
rustfs_config::DEFAULT_HEAL_TASK_TIMEOUT_SECS,
));
let max_concurrent_heals = rustfs_utils::get_env_usize(
rustfs_config::ENV_HEAL_MAX_CONCURRENT_HEALS,
rustfs_config::DEFAULT_HEAL_MAX_CONCURRENT_HEALS,
);
Self {
enable_auto_heal: true,
heal_interval: Duration::from_secs(10), // 10 seconds
max_concurrent_heals: 4,
task_timeout: Duration::from_secs(300), // 5 minutes
queue_size: 1000,
enable_auto_heal,
heal_interval, // 10 seconds
max_concurrent_heals, // max 4,
task_timeout, // 5 minutes
queue_size,
}
}
}
@@ -85,8 +246,8 @@ pub struct HealManager {
state: Arc<RwLock<HealState>>,
/// Active heal tasks
active_heals: Arc<Mutex<HashMap<String, Arc<HealTask>>>>,
/// Heal queue
heal_queue: Arc<Mutex<VecDeque<HealRequest>>>,
/// Heal queue (priority-based)
heal_queue: Arc<Mutex<PriorityHealQueue>>,
/// Storage layer interface
storage: Arc<dyn HealStorageAPI>,
/// Cancel token
@@ -103,7 +264,7 @@ impl HealManager {
config: Arc::new(RwLock::new(config)),
state: Arc::new(RwLock::new(HealState::default())),
active_heals: Arc::new(Mutex::new(HashMap::new())),
heal_queue: Arc::new(Mutex::new(VecDeque::new())),
heal_queue: Arc::new(Mutex::new(PriorityHealQueue::new())),
storage,
cancel_token: CancellationToken::new(),
statistics: Arc::new(RwLock::new(HealStatistics::new())),
@@ -125,7 +286,7 @@ impl HealManager {
// start scheduler
self.start_scheduler().await?;
// start auto disk scanner
// start auto disk scanner to heal unformatted disks
self.start_auto_disk_scanner().await?;
info!("HealManager started successfully");
@@ -161,17 +322,54 @@ impl HealManager {
let config = self.config.read().await;
let mut queue = self.heal_queue.lock().await;
if queue.len() >= config.queue_size {
let queue_len = queue.len();
let queue_capacity = config.queue_size;
if queue_len >= queue_capacity {
return Err(Error::ConfigurationError {
message: "Heal queue is full".to_string(),
message: format!("Heal queue is full ({queue_len}/{queue_capacity})"),
});
}
// Warn when queue is getting full (>80% capacity)
let capacity_threshold = (queue_capacity as f64 * 0.8) as usize;
if queue_len >= capacity_threshold {
warn!(
"Heal queue is {}% full ({}/{}). Consider increasing queue size or processing capacity.",
(queue_len * 100) / queue_capacity,
queue_len,
queue_capacity
);
}
let request_id = request.id.clone();
queue.push_back(request);
let priority = request.priority;
// Try to push the request; if it's a duplicate, still return the request_id
let is_new = queue.push(request);
// Log queue statistics periodically (when adding high/urgent priority items)
if matches!(priority, HealPriority::High | HealPriority::Urgent) {
let stats = queue.get_priority_stats();
info!(
"Heal queue stats after adding {:?} priority request: total={}, urgent={}, high={}, normal={}, low={}",
priority,
queue_len + 1,
stats.get(&HealPriority::Urgent).unwrap_or(&0),
stats.get(&HealPriority::High).unwrap_or(&0),
stats.get(&HealPriority::Normal).unwrap_or(&0),
stats.get(&HealPriority::Low).unwrap_or(&0)
);
}
drop(queue);
info!("Submitted heal request: {}", request_id);
if is_new {
info!("Submitted heal request: {} with priority: {:?}", request_id, priority);
} else {
info!("Heal request already queued (duplicate): {}", request_id);
}
Ok(request_id)
}
@@ -271,13 +469,18 @@ impl HealManager {
let cancel_token = self.cancel_token.clone();
let storage = self.storage.clone();
info!(
"start_auto_disk_scanner: Starting auto disk scanner with interval: {:?}",
config.read().await.heal_interval
);
tokio::spawn(async move {
let mut interval = interval(config.read().await.heal_interval);
loop {
tokio::select! {
_ = cancel_token.cancelled() => {
info!("Auto disk scanner received shutdown signal");
info!("start_auto_disk_scanner: Auto disk scanner received shutdown signal");
break;
}
_ = interval.tick() => {
@@ -296,6 +499,7 @@ impl HealManager {
}
if endpoints.is_empty() {
info!("start_auto_disk_scanner: No endpoints need healing");
continue;
}
@@ -303,7 +507,7 @@ impl HealManager {
let buckets = match storage.list_buckets().await {
Ok(buckets) => buckets.iter().map(|b| b.name.clone()).collect::<Vec<String>>(),
Err(e) => {
error!("Failed to get bucket list for auto healing: {}", e);
error!("start_auto_disk_scanner: Failed to get bucket list for auto healing: {}", e);
continue;
}
};
@@ -313,7 +517,7 @@ impl HealManager {
let Some(set_disk_id) =
crate::heal::utils::format_set_disk_id_from_i32(ep.pool_idx, ep.set_idx)
else {
warn!("Skipping endpoint {} without valid pool/set index", ep);
warn!("start_auto_disk_scanner: Skipping endpoint {} without valid pool/set index", ep);
continue;
};
// skip if already queued or healing
@@ -321,13 +525,7 @@ impl HealManager {
let mut skip = false;
{
let queue = heal_queue.lock().await;
if queue.iter().any(|req| {
matches!(
&req.heal_type,
crate::heal::task::HealType::ErasureSet { set_disk_id: queued_id, .. }
if queued_id == &set_disk_id
)
}) {
if queue.contains_erasure_set(&set_disk_id) {
skip = true;
}
}
@@ -345,6 +543,7 @@ impl HealManager {
}
if skip {
info!("start_auto_disk_scanner: Skipping auto erasure set heal for endpoint: {} (set_disk_id: {}) because it is already queued or healing", ep, set_disk_id);
continue;
}
@@ -358,8 +557,8 @@ impl HealManager {
HealPriority::Normal,
);
let mut queue = heal_queue.lock().await;
queue.push_back(req);
info!("Enqueued auto erasure set heal for endpoint: {} (set_disk_id: {})", ep, set_disk_id);
queue.push(req);
info!("start_auto_disk_scanner: Enqueued auto erasure set heal for endpoint: {} (set_disk_id: {})", ep, set_disk_id);
}
}
}
@@ -369,8 +568,9 @@ impl HealManager {
}
/// Process heal queue
/// Processes multiple tasks per cycle when capacity allows and queue has high-priority items
async fn process_heal_queue(
heal_queue: &Arc<Mutex<VecDeque<HealRequest>>>,
heal_queue: &Arc<Mutex<PriorityHealQueue>>,
active_heals: &Arc<Mutex<HashMap<String, Arc<HealTask>>>>,
config: &Arc<RwLock<HealConfig>>,
statistics: &Arc<RwLock<HealStatistics>>,
@@ -379,51 +579,83 @@ impl HealManager {
let config = config.read().await;
let mut active_heals_guard = active_heals.lock().await;
// check if new heal tasks can be started
if active_heals_guard.len() >= config.max_concurrent_heals {
// Check if new heal tasks can be started
let active_count = active_heals_guard.len();
if active_count >= config.max_concurrent_heals {
return;
}
// Calculate how many tasks we can start this cycle
let available_slots = config.max_concurrent_heals - active_count;
let mut queue = heal_queue.lock().await;
if let Some(request) = queue.pop_front() {
let task = Arc::new(HealTask::from_request(request, storage.clone()));
let task_id = task.id.clone();
active_heals_guard.insert(task_id.clone(), task.clone());
drop(active_heals_guard);
let active_heals_clone = active_heals.clone();
let statistics_clone = statistics.clone();
let queue_len = queue.len();
// start heal task
tokio::spawn(async move {
info!("Starting heal task: {}", task_id);
let result = task.execute().await;
match result {
Ok(_) => {
info!("Heal task completed successfully: {}", task_id);
}
Err(e) => {
error!("Heal task failed: {} - {}", task_id, e);
}
}
let mut active_heals_guard = active_heals_clone.lock().await;
if let Some(completed_task) = active_heals_guard.remove(&task_id) {
// update statistics
let mut stats = statistics_clone.write().await;
match completed_task.get_status().await {
HealTaskStatus::Completed => {
stats.update_task_completion(true);
if queue_len == 0 {
return;
}
// Process multiple tasks if:
// 1. We have available slots
// 2. Queue is not empty
// Prioritize urgent/high priority tasks by processing up to 2 tasks per cycle if available
let tasks_to_process = if queue_len > 0 {
std::cmp::min(available_slots, std::cmp::min(2, queue_len))
} else {
0
};
for _ in 0..tasks_to_process {
if let Some(request) = queue.pop() {
let task_priority = request.priority;
let task = Arc::new(HealTask::from_request(request, storage.clone()));
let task_id = task.id.clone();
active_heals_guard.insert(task_id.clone(), task.clone());
let active_heals_clone = active_heals.clone();
let statistics_clone = statistics.clone();
// start heal task
tokio::spawn(async move {
info!("Starting heal task: {} with priority: {:?}", task_id, task_priority);
let result = task.execute().await;
match result {
Ok(_) => {
info!("Heal task completed successfully: {}", task_id);
}
_ => {
stats.update_task_completion(false);
Err(e) => {
error!("Heal task failed: {} - {}", task_id, e);
}
}
stats.update_running_tasks(active_heals_guard.len() as u64);
}
});
let mut active_heals_guard = active_heals_clone.lock().await;
if let Some(completed_task) = active_heals_guard.remove(&task_id) {
// update statistics
let mut stats = statistics_clone.write().await;
match completed_task.get_status().await {
HealTaskStatus::Completed => {
stats.update_task_completion(true);
}
_ => {
stats.update_task_completion(false);
}
}
stats.update_running_tasks(active_heals_guard.len() as u64);
}
});
} else {
break;
}
}
// update statistics
let mut stats = statistics.write().await;
stats.total_tasks += 1;
// Update statistics for all started tasks
let mut stats = statistics.write().await;
stats.total_tasks += tasks_to_process as u64;
// Log queue status if items remain
if !queue.is_empty() {
let remaining = queue.len();
if remaining > 10 {
info!("Heal queue has {} pending requests, {} tasks active", remaining, active_heals_guard.len());
}
}
}
}
@@ -438,3 +670,333 @@ impl std::fmt::Debug for HealManager {
.finish()
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::heal::task::{HealOptions, HealPriority, HealRequest, HealType};
#[test]
fn test_priority_queue_ordering() {
let mut queue = PriorityHealQueue::new();
// Add requests with different priorities
let low_req = HealRequest::new(
HealType::Bucket {
bucket: "bucket1".to_string(),
},
HealOptions::default(),
HealPriority::Low,
);
let normal_req = HealRequest::new(
HealType::Bucket {
bucket: "bucket2".to_string(),
},
HealOptions::default(),
HealPriority::Normal,
);
let high_req = HealRequest::new(
HealType::Bucket {
bucket: "bucket3".to_string(),
},
HealOptions::default(),
HealPriority::High,
);
let urgent_req = HealRequest::new(
HealType::Bucket {
bucket: "bucket4".to_string(),
},
HealOptions::default(),
HealPriority::Urgent,
);
// Add in random order: low, high, normal, urgent
assert!(queue.push(low_req));
assert!(queue.push(high_req));
assert!(queue.push(normal_req));
assert!(queue.push(urgent_req));
assert_eq!(queue.len(), 4);
// Should pop in priority order: urgent, high, normal, low
let popped1 = queue.pop().unwrap();
assert_eq!(popped1.priority, HealPriority::Urgent);
let popped2 = queue.pop().unwrap();
assert_eq!(popped2.priority, HealPriority::High);
let popped3 = queue.pop().unwrap();
assert_eq!(popped3.priority, HealPriority::Normal);
let popped4 = queue.pop().unwrap();
assert_eq!(popped4.priority, HealPriority::Low);
assert_eq!(queue.len(), 0);
}
#[test]
fn test_priority_queue_fifo_same_priority() {
let mut queue = PriorityHealQueue::new();
// Add multiple requests with same priority
let req1 = HealRequest::new(
HealType::Bucket {
bucket: "bucket1".to_string(),
},
HealOptions::default(),
HealPriority::Normal,
);
let req2 = HealRequest::new(
HealType::Bucket {
bucket: "bucket2".to_string(),
},
HealOptions::default(),
HealPriority::Normal,
);
let req3 = HealRequest::new(
HealType::Bucket {
bucket: "bucket3".to_string(),
},
HealOptions::default(),
HealPriority::Normal,
);
let id1 = req1.id.clone();
let id2 = req2.id.clone();
let id3 = req3.id.clone();
assert!(queue.push(req1));
assert!(queue.push(req2));
assert!(queue.push(req3));
// Should maintain FIFO order for same priority
let popped1 = queue.pop().unwrap();
assert_eq!(popped1.id, id1);
let popped2 = queue.pop().unwrap();
assert_eq!(popped2.id, id2);
let popped3 = queue.pop().unwrap();
assert_eq!(popped3.id, id3);
}
#[test]
fn test_priority_queue_deduplication() {
let mut queue = PriorityHealQueue::new();
let req1 = HealRequest::new(
HealType::Object {
bucket: "bucket1".to_string(),
object: "object1".to_string(),
version_id: None,
},
HealOptions::default(),
HealPriority::Normal,
);
let req2 = HealRequest::new(
HealType::Object {
bucket: "bucket1".to_string(),
object: "object1".to_string(),
version_id: None,
},
HealOptions::default(),
HealPriority::High,
);
// First request should be added
assert!(queue.push(req1));
assert_eq!(queue.len(), 1);
// Second request with same object should be rejected (duplicate)
assert!(!queue.push(req2));
assert_eq!(queue.len(), 1);
}
#[test]
fn test_priority_queue_contains_erasure_set() {
let mut queue = PriorityHealQueue::new();
let req = HealRequest::new(
HealType::ErasureSet {
buckets: vec!["bucket1".to_string()],
set_disk_id: "pool_0_set_1".to_string(),
},
HealOptions::default(),
HealPriority::Normal,
);
assert!(queue.push(req));
assert!(queue.contains_erasure_set("pool_0_set_1"));
assert!(!queue.contains_erasure_set("pool_0_set_2"));
}
#[test]
fn test_priority_queue_dedup_key_generation() {
// Test different heal types generate different keys
let obj_req = HealRequest::new(
HealType::Object {
bucket: "bucket1".to_string(),
object: "object1".to_string(),
version_id: None,
},
HealOptions::default(),
HealPriority::Normal,
);
let bucket_req = HealRequest::new(
HealType::Bucket {
bucket: "bucket1".to_string(),
},
HealOptions::default(),
HealPriority::Normal,
);
let erasure_req = HealRequest::new(
HealType::ErasureSet {
buckets: vec!["bucket1".to_string()],
set_disk_id: "pool_0_set_1".to_string(),
},
HealOptions::default(),
HealPriority::Normal,
);
let obj_key = PriorityHealQueue::make_dedup_key(&obj_req);
let bucket_key = PriorityHealQueue::make_dedup_key(&bucket_req);
let erasure_key = PriorityHealQueue::make_dedup_key(&erasure_req);
// All keys should be different
assert_ne!(obj_key, bucket_key);
assert_ne!(obj_key, erasure_key);
assert_ne!(bucket_key, erasure_key);
assert!(obj_key.starts_with("object:"));
assert!(bucket_key.starts_with("bucket:"));
assert!(erasure_key.starts_with("erasure_set:"));
}
#[test]
fn test_priority_queue_mixed_priorities_and_types() {
let mut queue = PriorityHealQueue::new();
// Add various requests
let requests = vec![
(
HealType::Object {
bucket: "b1".to_string(),
object: "o1".to_string(),
version_id: None,
},
HealPriority::Low,
),
(
HealType::Bucket {
bucket: "b2".to_string(),
},
HealPriority::Urgent,
),
(
HealType::ErasureSet {
buckets: vec!["b3".to_string()],
set_disk_id: "pool_0_set_1".to_string(),
},
HealPriority::Normal,
),
(
HealType::Object {
bucket: "b4".to_string(),
object: "o4".to_string(),
version_id: None,
},
HealPriority::High,
),
];
for (heal_type, priority) in requests {
let req = HealRequest::new(heal_type, HealOptions::default(), priority);
queue.push(req);
}
assert_eq!(queue.len(), 4);
// Check they come out in priority order
let priorities: Vec<HealPriority> = (0..4).filter_map(|_| queue.pop().map(|r| r.priority)).collect();
assert_eq!(
priorities,
vec![
HealPriority::Urgent,
HealPriority::High,
HealPriority::Normal,
HealPriority::Low,
]
);
}
#[test]
fn test_priority_queue_stats() {
let mut queue = PriorityHealQueue::new();
// Add requests with different priorities
for _ in 0..3 {
queue.push(HealRequest::new(
HealType::Bucket {
bucket: format!("bucket-low-{}", queue.len()),
},
HealOptions::default(),
HealPriority::Low,
));
}
for _ in 0..2 {
queue.push(HealRequest::new(
HealType::Bucket {
bucket: format!("bucket-normal-{}", queue.len()),
},
HealOptions::default(),
HealPriority::Normal,
));
}
queue.push(HealRequest::new(
HealType::Bucket {
bucket: "bucket-high".to_string(),
},
HealOptions::default(),
HealPriority::High,
));
let stats = queue.get_priority_stats();
assert_eq!(*stats.get(&HealPriority::Low).unwrap_or(&0), 3);
assert_eq!(*stats.get(&HealPriority::Normal).unwrap_or(&0), 2);
assert_eq!(*stats.get(&HealPriority::High).unwrap_or(&0), 1);
assert_eq!(*stats.get(&HealPriority::Urgent).unwrap_or(&0), 0);
}
#[test]
fn test_priority_queue_is_empty() {
let mut queue = PriorityHealQueue::new();
assert!(queue.is_empty());
queue.push(HealRequest::new(
HealType::Bucket {
bucket: "test".to_string(),
},
HealOptions::default(),
HealPriority::Normal,
));
assert!(!queue.is_empty());
queue.pop();
assert!(queue.is_empty());
}
}

View File

@@ -30,7 +30,7 @@ const RESUME_CHECKPOINT_FILE: &str = "ahm_checkpoint.json";
/// Helper function to convert Path to &str, returning an error if conversion fails
fn path_to_str(path: &Path) -> Result<&str> {
path.to_str()
.ok_or_else(|| Error::other(format!("Invalid UTF-8 path: {:?}", path)))
.ok_or_else(|| Error::other(format!("Invalid UTF-8 path: {path:?}")))
}
/// resume state

View File

@@ -107,9 +107,21 @@ pub trait HealStorageAPI: Send + Sync {
/// Heal format using ecstore
async fn heal_format(&self, dry_run: bool) -> Result<(HealResultItem, Option<Error>)>;
/// List objects for healing
/// List objects for healing (returns all objects, may use significant memory for large buckets)
///
/// WARNING: This method loads all objects into memory at once. For buckets with many objects,
/// consider using `list_objects_for_heal_page` instead to process objects in pages.
async fn list_objects_for_heal(&self, bucket: &str, prefix: &str) -> Result<Vec<String>>;
/// List objects for healing with pagination (returns one page and continuation token)
/// Returns (objects, next_continuation_token, is_truncated)
async fn list_objects_for_heal_page(
&self,
bucket: &str,
prefix: &str,
continuation_token: Option<&str>,
) -> Result<(Vec<String>, Option<String>, bool)>;
/// Get disk for resume functionality
async fn get_disk_for_resume(&self, set_disk_id: &str) -> Result<DiskStore>;
}
@@ -180,8 +192,7 @@ impl HealStorageAPI for ECStoreHealStorage {
MAX_READ_BYTES, bucket, object
);
return Err(Error::other(format!(
"Object too large: {} bytes (max: {} bytes) for {}/{}",
n_read, MAX_READ_BYTES, bucket, object
"Object too large: {n_read} bytes (max: {MAX_READ_BYTES} bytes) for {bucket}/{object}"
)));
}
}
@@ -401,13 +412,13 @@ impl HealStorageAPI for ECStoreHealStorage {
match self.ecstore.get_object_info(bucket, object, &Default::default()).await {
Ok(_) => Ok(true), // Object exists
Err(e) => {
// Map ObjectNotFound to false, other errors to false as well for safety
// Map ObjectNotFound to false, other errors must be propagated!
if matches!(e, rustfs_ecstore::error::StorageError::ObjectNotFound(_, _)) {
debug!("Object not found: {}/{}", bucket, object);
Ok(false)
} else {
debug!("Error checking object existence {}/{}: {}", bucket, object, e);
Ok(false) // Treat errors as non-existence to be safe
error!("Error checking object existence {}/{}: {}", bucket, object, e);
Err(Error::other(e))
}
}
}
@@ -494,24 +505,67 @@ impl HealStorageAPI for ECStoreHealStorage {
async fn list_objects_for_heal(&self, bucket: &str, prefix: &str) -> Result<Vec<String>> {
debug!("Listing objects for heal: {}/{}", bucket, prefix);
warn!(
"list_objects_for_heal loads all objects into memory. For large buckets, consider using list_objects_for_heal_page instead."
);
// Use list_objects_v2 to get objects
match self
.ecstore
.clone()
.list_objects_v2(bucket, prefix, None, None, 1000, false, None)
.await
{
Ok(list_info) => {
let objects: Vec<String> = list_info.objects.into_iter().map(|obj| obj.name).collect();
info!("Found {} objects for heal in {}/{}", objects.len(), bucket, prefix);
Ok(objects)
let mut all_objects = Vec::new();
let mut continuation_token: Option<String> = None;
loop {
let (page_objects, next_token, is_truncated) = self
.list_objects_for_heal_page(bucket, prefix, continuation_token.as_deref())
.await?;
all_objects.extend(page_objects);
if !is_truncated {
break;
}
Err(e) => {
error!("Failed to list objects for heal: {}/{} - {}", bucket, prefix, e);
Err(Error::other(e))
continuation_token = next_token;
if continuation_token.is_none() {
warn!("List is truncated but no continuation token provided for {}/{}", bucket, prefix);
break;
}
}
info!("Found {} objects for heal in {}/{}", all_objects.len(), bucket, prefix);
Ok(all_objects)
}
async fn list_objects_for_heal_page(
&self,
bucket: &str,
prefix: &str,
continuation_token: Option<&str>,
) -> Result<(Vec<String>, Option<String>, bool)> {
debug!("Listing objects for heal (page): {}/{}", bucket, prefix);
const MAX_KEYS: i32 = 1000;
let continuation_token_opt = continuation_token.map(|s| s.to_string());
// Use list_objects_v2 to get objects with pagination
let list_info = match self
.ecstore
.clone()
.list_objects_v2(bucket, prefix, continuation_token_opt, None, MAX_KEYS, false, None, false)
.await
{
Ok(info) => info,
Err(e) => {
error!("Failed to list objects for heal: {}/{} - {}", bucket, prefix, e);
return Err(Error::other(e));
}
};
// Collect objects from this page
let page_objects: Vec<String> = list_info.objects.into_iter().map(|obj| obj.name).collect();
let page_count = page_objects.len();
debug!("Listed {} objects (page) for heal in {}/{}", page_count, bucket, prefix);
Ok((page_objects, list_info.next_continuation_token, list_info.is_truncated))
}
async fn get_disk_for_resume(&self, set_disk_id: &str) -> Result<DiskStore> {

View File

@@ -51,7 +51,7 @@ pub enum HealType {
}
/// Heal priority
#[derive(Debug, Default, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
pub enum HealPriority {
/// Low priority
Low = 0,
@@ -272,6 +272,7 @@ impl HealTask {
}
}
#[tracing::instrument(skip(self), fields(task_id = %self.id, heal_type = ?self.heal_type))]
pub async fn execute(&self) -> Result<()> {
// update status and timestamps atomically to avoid race conditions
let now = SystemTime::now();
@@ -285,7 +286,7 @@ impl HealTask {
*task_start_instant = Some(start_instant);
}
info!("Starting heal task: {} with type: {:?}", self.id, self.heal_type);
info!("Task started");
let result = match &self.heal_type {
HealType::Object {
@@ -315,7 +316,7 @@ impl HealTask {
Ok(_) => {
let mut status = self.status.write().await;
*status = HealTaskStatus::Completed;
info!("Heal task completed successfully: {}", self.id);
info!("Task completed successfully");
}
Err(Error::TaskCancelled) => {
let mut status = self.status.write().await;
@@ -354,8 +355,9 @@ impl HealTask {
}
// specific heal implementation method
#[tracing::instrument(skip(self), fields(bucket = %bucket, object = %object, version_id = ?version_id))]
async fn heal_object(&self, bucket: &str, object: &str, version_id: Option<&str>) -> Result<()> {
info!("Healing object: {}/{}", bucket, object);
info!("Starting object heal workflow");
// update progress
{
@@ -365,7 +367,7 @@ impl HealTask {
}
// Step 1: Check if object exists and get metadata
info!("Step 1: Checking object existence and metadata");
warn!("Step 1: Checking object existence and metadata");
self.check_control_flags().await?;
let object_exists = self.await_with_control(self.storage.object_exists(bucket, object)).await?;
if !object_exists {
@@ -424,7 +426,7 @@ impl HealTask {
// If heal failed and remove_corrupted is enabled, delete the corrupted object
if self.options.remove_corrupted {
warn!("Removing corrupted object: {}/{}", bucket, object);
info!("Removing corrupted object: {}/{}", bucket, object);
if !self.options.dry_run {
self.await_with_control(self.storage.delete_object(bucket, object)).await?;
info!("Successfully deleted corrupted object: {}/{}", bucket, object);
@@ -447,11 +449,9 @@ impl HealTask {
info!("Step 3: Verifying heal result");
let object_size = result.object_size as u64;
info!(
"Heal completed successfully: {}/{} ({} bytes, {} drives healed)",
bucket,
object,
object_size,
result.after.drives.len()
object_size = object_size,
drives_healed = result.after.drives.len(),
"Heal completed successfully"
);
{
@@ -481,7 +481,7 @@ impl HealTask {
// If heal failed and remove_corrupted is enabled, delete the corrupted object
if self.options.remove_corrupted {
warn!("Removing corrupted object: {}/{}", bucket, object);
info!("Removing corrupted object: {}/{}", bucket, object);
if !self.options.dry_run {
self.await_with_control(self.storage.delete_object(bucket, object)).await?;
info!("Successfully deleted corrupted object: {}/{}", bucket, object);

View File

@@ -29,7 +29,7 @@ use rustfs_ecstore::{
self as ecstore, StorageAPI,
bucket::versioning::VersioningApi,
bucket::versioning_sys::BucketVersioningSys,
data_usage::{aggregate_local_snapshots, store_data_usage_in_backend},
data_usage::{aggregate_local_snapshots, compute_bucket_usage, store_data_usage_in_backend},
disk::{Disk, DiskAPI, DiskStore, RUSTFS_META_BUCKET, WalkDirOptions},
set_disk::SetDisks,
store_api::ObjectInfo,
@@ -137,6 +137,8 @@ pub struct Scanner {
data_usage_stats: Arc<Mutex<HashMap<String, DataUsageInfo>>>,
/// Last data usage statistics collection time
last_data_usage_collection: Arc<RwLock<Option<SystemTime>>>,
/// Backoff timestamp for heavy fallback collection
fallback_backoff_until: Arc<RwLock<Option<SystemTime>>>,
/// Heal manager for auto-heal integration
heal_manager: Option<Arc<HealManager>>,
@@ -192,6 +194,7 @@ impl Scanner {
disk_metrics: Arc::new(Mutex::new(HashMap::new())),
data_usage_stats: Arc::new(Mutex::new(HashMap::new())),
last_data_usage_collection: Arc::new(RwLock::new(None)),
fallback_backoff_until: Arc::new(RwLock::new(None)),
heal_manager,
node_scanner,
stats_aggregator,
@@ -473,6 +476,8 @@ impl Scanner {
size: usage.total_size as i64,
delete_marker: !usage.has_live_object && usage.delete_markers_count > 0,
mod_time: usage.last_modified_ns.and_then(Self::ns_to_offset_datetime),
// Set is_latest to true for live objects - required for lifecycle expiration evaluation
is_latest: usage.has_live_object,
..Default::default()
}
}
@@ -600,6 +605,7 @@ impl Scanner {
// Initialize and start the node scanner
self.node_scanner.initialize_stats().await?;
// update object count and size for each bucket
self.node_scanner.start().await?;
// Set local stats in aggregator
@@ -614,21 +620,6 @@ impl Scanner {
}
});
// Trigger an immediate data usage collection so that admin APIs have fresh data after startup.
let scanner = self.clone_for_background();
tokio::spawn(async move {
let enable_stats = {
let cfg = scanner.config.read().await;
cfg.enable_data_usage_stats
};
if enable_stats {
if let Err(e) = scanner.collect_and_persist_data_usage().await {
warn!("Initial data usage collection failed: {}", e);
}
}
});
Ok(())
}
@@ -893,6 +884,7 @@ impl Scanner {
/// Collect and persist data usage statistics
async fn collect_and_persist_data_usage(&self) -> Result<()> {
info!("Starting data usage collection and persistence");
let now = SystemTime::now();
// Get ECStore instance
let Some(ecstore) = rustfs_ecstore::new_object_layer_fn() else {
@@ -900,6 +892,10 @@ impl Scanner {
return Ok(());
};
// Helper to avoid hammering the storage layer with repeated realtime scans.
let mut use_cached_on_backoff = false;
let fallback_backoff_secs = Duration::from_secs(300);
// Run local usage scan and aggregate snapshots; fall back to on-demand build when necessary.
let mut data_usage = match local_scan::scan_and_persist_local_usage(ecstore.clone()).await {
Ok(outcome) => {
@@ -921,16 +917,55 @@ impl Scanner {
"Failed to aggregate local data usage snapshots, falling back to realtime collection: {}",
e
);
self.build_data_usage_from_ecstore(&ecstore).await?
match self.maybe_fallback_collection(now, fallback_backoff_secs, &ecstore).await? {
Some(usage) => usage,
None => {
use_cached_on_backoff = true;
DataUsageInfo::default()
}
}
}
}
}
Err(e) => {
warn!("Local usage scan failed (using realtime collection instead): {}", e);
self.build_data_usage_from_ecstore(&ecstore).await?
match self.maybe_fallback_collection(now, fallback_backoff_secs, &ecstore).await? {
Some(usage) => usage,
None => {
use_cached_on_backoff = true;
DataUsageInfo::default()
}
}
}
};
// If heavy fallback was skipped due to backoff, try to reuse cached stats to avoid empty responses.
if use_cached_on_backoff && data_usage.buckets_usage.is_empty() {
let cached = {
let guard = self.data_usage_stats.lock().await;
guard.values().next().cloned()
};
if let Some(cached_usage) = cached {
data_usage = cached_usage;
}
// If there is still no data, try backend before persisting zeros
if data_usage.buckets_usage.is_empty() {
if let Ok(existing) = rustfs_ecstore::data_usage::load_data_usage_from_backend(ecstore.clone()).await {
if !existing.buckets_usage.is_empty() {
info!("Using existing backend data usage during fallback backoff");
data_usage = existing;
}
}
}
// Avoid overwriting valid backend stats with zeros when fallback is throttled
if data_usage.buckets_usage.is_empty() {
warn!("Skipping data usage persistence: fallback throttled and no cached/backend data available");
return Ok(());
}
}
// Make sure bucket counters reflect aggregated content
data_usage.buckets_count = data_usage.buckets_usage.len() as u64;
if data_usage.last_update.is_none() {
@@ -973,8 +1008,31 @@ impl Scanner {
Ok(())
}
async fn maybe_fallback_collection(
&self,
now: SystemTime,
backoff: Duration,
ecstore: &Arc<rustfs_ecstore::store::ECStore>,
) -> Result<Option<DataUsageInfo>> {
let backoff_until = *self.fallback_backoff_until.read().await;
let within_backoff = backoff_until.map(|ts| now < ts).unwrap_or(false);
if within_backoff {
warn!(
"Skipping heavy data usage fallback within backoff window (until {:?}); using cached stats if available",
backoff_until
);
return Ok(None);
}
let usage = self.build_data_usage_from_ecstore(ecstore).await?;
let mut backoff_guard = self.fallback_backoff_until.write().await;
*backoff_guard = Some(now + backoff);
Ok(Some(usage))
}
/// Build data usage statistics directly from ECStore
async fn build_data_usage_from_ecstore(&self, ecstore: &Arc<rustfs_ecstore::store::ECStore>) -> Result<DataUsageInfo> {
pub async fn build_data_usage_from_ecstore(&self, ecstore: &Arc<rustfs_ecstore::store::ECStore>) -> Result<DataUsageInfo> {
let mut data_usage = DataUsageInfo::default();
// Get bucket list
@@ -987,6 +1045,8 @@ impl Scanner {
data_usage.last_update = Some(SystemTime::now());
let mut total_objects = 0u64;
let mut total_versions = 0u64;
let mut total_delete_markers = 0u64;
let mut total_size = 0u64;
for bucket_info in buckets {
@@ -994,36 +1054,26 @@ impl Scanner {
continue; // Skip system buckets
}
// Try to get actual object count for this bucket
let (object_count, bucket_size) = match ecstore
.clone()
.list_objects_v2(
&bucket_info.name,
"", // prefix
None, // continuation_token
None, // delimiter
100, // max_keys - small limit for performance
false, // fetch_owner
None, // start_after
)
.await
{
Ok(result) => {
let count = result.objects.len() as u64;
let size = result.objects.iter().map(|obj| obj.size as u64).sum();
(count, size)
}
Err(_) => (0, 0),
};
// Use ecstore pagination helper to avoid truncating at 100 objects
let (object_count, bucket_size, versions_count, delete_markers) =
match compute_bucket_usage(ecstore.clone(), &bucket_info.name).await {
Ok(usage) => (usage.objects_count, usage.size, usage.versions_count, usage.delete_markers_count),
Err(e) => {
warn!("Failed to compute bucket usage for {}: {}", bucket_info.name, e);
(0, 0, 0, 0)
}
};
total_objects += object_count;
total_versions += versions_count;
total_delete_markers += delete_markers;
total_size += bucket_size;
let bucket_usage = rustfs_common::data_usage::BucketUsageInfo {
size: bucket_size,
objects_count: object_count,
versions_count: object_count, // Simplified
delete_markers_count: 0,
versions_count,
delete_markers_count: delete_markers,
..Default::default()
};
@@ -1033,7 +1083,8 @@ impl Scanner {
data_usage.objects_total_count = total_objects;
data_usage.objects_total_size = total_size;
data_usage.versions_total_count = total_objects;
data_usage.versions_total_count = total_versions;
data_usage.delete_markers_total_count = total_delete_markers;
}
Err(e) => {
warn!("Failed to list buckets for data usage collection: {}", e);
@@ -2567,6 +2618,7 @@ impl Scanner {
disk_metrics: Arc::clone(&self.disk_metrics),
data_usage_stats: Arc::clone(&self.data_usage_stats),
last_data_usage_collection: Arc::clone(&self.last_data_usage_collection),
fallback_backoff_until: Arc::clone(&self.fallback_backoff_until),
heal_manager: self.heal_manager.clone(),
node_scanner: Arc::clone(&self.node_scanner),
stats_aggregator: Arc::clone(&self.stats_aggregator),

View File

@@ -84,6 +84,9 @@ pub async fn scan_and_persist_local_usage(store: Arc<ECStore>) -> Result<LocalSc
guard.clone()
};
// Use the first local online disk in the set to avoid missing stats when disk 0 is down
let mut picked = false;
for (disk_index, disk_opt) in disks.into_iter().enumerate() {
let Some(disk) = disk_opt else {
continue;
@@ -93,11 +96,17 @@ pub async fn scan_and_persist_local_usage(store: Arc<ECStore>) -> Result<LocalSc
continue;
}
// Count objects once by scanning only disk index zero from each set.
if disk_index != 0 {
if picked {
continue;
}
// Skip offline disks; keep looking for an online candidate
if !disk.is_online().await {
continue;
}
picked = true;
let disk_id = match disk.get_disk_id().await.map_err(Error::from)? {
Some(id) => id.to_string(),
None => {

View File

@@ -711,6 +711,7 @@ impl NodeScanner {
// start scanning loop
let scanner_clone = self.clone_for_background();
tokio::spawn(async move {
// update object count and size for each bucket
if let Err(e) = scanner_clone.scan_loop_with_resume(None).await {
error!("scanning loop failed: {}", e);
}

View File

@@ -347,7 +347,8 @@ impl DecentralizedStatsAggregator {
// update cache
*self.cached_stats.write().await = Some(aggregated.clone());
*self.cache_timestamp.write().await = aggregation_timestamp;
// Use the time when aggregation completes as cache timestamp to avoid premature expiry during long runs
*self.cache_timestamp.write().await = SystemTime::now();
Ok(aggregated)
}
@@ -359,7 +360,8 @@ impl DecentralizedStatsAggregator {
// update cache
*self.cached_stats.write().await = Some(aggregated.clone());
*self.cache_timestamp.write().await = now;
// Cache timestamp should reflect completion time rather than aggregation start
*self.cache_timestamp.write().await = SystemTime::now();
Ok(aggregated)
}

View File

@@ -0,0 +1,97 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#![cfg(test)]
use rustfs_ahm::scanner::data_scanner::Scanner;
use rustfs_common::data_usage::DataUsageInfo;
use rustfs_ecstore::GLOBAL_Endpoints;
use rustfs_ecstore::bucket::metadata_sys::{BucketMetadataSys, GLOBAL_BucketMetadataSys};
use rustfs_ecstore::endpoints::EndpointServerPools;
use rustfs_ecstore::store::ECStore;
use rustfs_ecstore::store_api::{ObjectIO, PutObjReader, StorageAPI};
use std::sync::Arc;
use tempfile::TempDir;
use tokio::sync::RwLock;
use tokio_util::sync::CancellationToken;
/// Build a minimal single-node ECStore over a temp directory and populate objects.
async fn create_store_with_objects(count: usize) -> (TempDir, std::sync::Arc<ECStore>) {
let temp_dir = TempDir::new().expect("temp dir");
let root = temp_dir.path().to_string_lossy().to_string();
// Create endpoints from the temp dir
let (endpoint_pools, _setup) = EndpointServerPools::from_volumes("127.0.0.1:0", vec![root])
.await
.expect("endpoint pools");
// Seed globals required by metadata sys if not already set
if GLOBAL_Endpoints.get().is_none() {
let _ = GLOBAL_Endpoints.set(endpoint_pools.clone());
}
let store = ECStore::new("127.0.0.1:0".parse().unwrap(), endpoint_pools, CancellationToken::new())
.await
.expect("create store");
if rustfs_ecstore::global::new_object_layer_fn().is_none() {
rustfs_ecstore::global::set_object_layer(store.clone()).await;
}
// Initialize metadata system before bucket operations
if GLOBAL_BucketMetadataSys.get().is_none() {
let mut sys = BucketMetadataSys::new(store.clone());
sys.init(Vec::new()).await;
let _ = GLOBAL_BucketMetadataSys.set(Arc::new(RwLock::new(sys)));
}
store
.make_bucket("fallback-bucket", &rustfs_ecstore::store_api::MakeBucketOptions::default())
.await
.expect("make bucket");
for i in 0..count {
let key = format!("obj-{i:04}");
let data = format!("payload-{i}");
let mut reader = PutObjReader::from_vec(data.into_bytes());
store
.put_object("fallback-bucket", &key, &mut reader, &rustfs_ecstore::store_api::ObjectOptions::default())
.await
.expect("put object");
}
(temp_dir, store)
}
#[tokio::test]
async fn fallback_builds_full_counts_over_100_objects() {
let (_tmp, store) = create_store_with_objects(1000).await;
let scanner = Scanner::new(None, None);
// Directly call the fallback builder to ensure pagination works.
let usage: DataUsageInfo = scanner.build_data_usage_from_ecstore(&store).await.expect("fallback usage");
let bucket = usage.buckets_usage.get("fallback-bucket").expect("bucket usage present");
assert!(
usage.objects_total_count >= 1000,
"total objects should be >=1000, got {}",
usage.objects_total_count
);
assert!(
bucket.objects_count >= 1000,
"bucket objects should be >=1000, got {}",
bucket.objects_count
);
}

View File

@@ -244,6 +244,14 @@ fn test_heal_task_status_atomic_update() {
async fn list_objects_for_heal(&self, _bucket: &str, _prefix: &str) -> rustfs_ahm::Result<Vec<String>> {
Ok(vec![])
}
async fn list_objects_for_heal_page(
&self,
_bucket: &str,
_prefix: &str,
_continuation_token: Option<&str>,
) -> rustfs_ahm::Result<(Vec<String>, Option<String>, bool)> {
Ok((vec![], None, false))
}
async fn get_disk_for_resume(&self, _set_disk_id: &str) -> rustfs_ahm::Result<rustfs_ecstore::disk::DiskStore> {
Err(rustfs_ahm::Error::other("Not implemented in mock"))
}

View File

@@ -144,7 +144,7 @@ async fn setup_test_env() -> (Vec<PathBuf>, Arc<ECStore>) {
let mut wtxn = lmdb_env.write_txn().unwrap();
let db = match lmdb_env
.database_options()
.name(&format!("bucket_{}", bucket_name))
.name(&format!("bucket_{bucket_name}"))
.types::<I64<BigEndian>, LifecycleContentCodec>()
.flags(DatabaseFlags::DUP_SORT)
//.dup_sort_comparator::<>()
@@ -152,7 +152,7 @@ async fn setup_test_env() -> (Vec<PathBuf>, Arc<ECStore>) {
{
Ok(db) => db,
Err(err) => {
panic!("lmdb error: {}", err);
panic!("lmdb error: {err}");
}
};
let _ = wtxn.commit();
@@ -199,7 +199,7 @@ async fn upload_test_object(ecstore: &Arc<ECStore>, bucket: &str, object: &str,
.await
.expect("Failed to upload test object");
println!("object_info1: {:?}", object_info);
println!("object_info1: {object_info:?}");
info!("Uploaded test object: {}/{} ({} bytes)", bucket, object, object_info.size);
}
@@ -456,7 +456,7 @@ mod serial_tests {
}
let object_info = convert_record_to_object_info(record);
println!("object_info2: {:?}", object_info);
println!("object_info2: {object_info:?}");
let mod_time = object_info.mod_time.unwrap_or(OffsetDateTime::now_utc());
let expiry_time = rustfs_ecstore::bucket::lifecycle::lifecycle::expected_expiry_time(mod_time, 1);
@@ -494,9 +494,9 @@ mod serial_tests {
type_,
object_name,
} = &elm.1;
println!("cache row:{} {} {} {:?} {}", ver_no, ver_id, mod_time, type_, object_name);
println!("cache row:{ver_no} {ver_id} {mod_time} {type_:?} {object_name}");
}
println!("row:{:?}", row);
println!("row:{row:?}");
}
//drop(iter);
wtxn.commit().unwrap();

View File

@@ -277,11 +277,11 @@ async fn create_test_tier(server: u32) {
};
let mut tier_config_mgr = GLOBAL_TierConfigMgr.write().await;
if let Err(err) = tier_config_mgr.add(args, false).await {
println!("tier_config_mgr add failed, e: {:?}", err);
println!("tier_config_mgr add failed, e: {err:?}");
panic!("tier add failed. {err}");
}
if let Err(e) = tier_config_mgr.save().await {
println!("tier_config_mgr save failed, e: {:?}", e);
println!("tier_config_mgr save failed, e: {e:?}");
panic!("tier save failed");
}
println!("Created test tier: COLDTIER44");
@@ -299,7 +299,7 @@ async fn object_exists(ecstore: &Arc<ECStore>, bucket: &str, object: &str) -> bo
#[allow(dead_code)]
async fn object_is_delete_marker(ecstore: &Arc<ECStore>, bucket: &str, object: &str) -> bool {
if let Ok(oi) = (**ecstore).get_object_info(bucket, object, &ObjectOptions::default()).await {
println!("oi: {:?}", oi);
println!("oi: {oi:?}");
oi.delete_marker
} else {
println!("object_is_delete_marker is error");
@@ -311,7 +311,7 @@ async fn object_is_delete_marker(ecstore: &Arc<ECStore>, bucket: &str, object: &
#[allow(dead_code)]
async fn object_is_transitioned(ecstore: &Arc<ECStore>, bucket: &str, object: &str) -> bool {
if let Ok(oi) = (**ecstore).get_object_info(bucket, object, &ObjectOptions::default()).await {
println!("oi: {:?}", oi);
println!("oi: {oi:?}");
!oi.transitioned_object.status.is_empty()
} else {
println!("object_is_transitioned is error");

View File

@@ -8,7 +8,7 @@
<p align="center">
<a href="https://github.com/rustfs/rustfs/actions/workflows/ci.yml"><img alt="CI" src="https://github.com/rustfs/rustfs/actions/workflows/ci.yml/badge.svg" /></a>
<a href="https://docs.rustfs.com/en/">📖 Documentation</a>
<a href="https://docs.rustfs.com/">📖 Documentation</a>
· <a href="https://github.com/rustfs/rustfs/issues">🐛 Bug Reports</a>
· <a href="https://github.com/rustfs/rustfs/discussions">💬 Discussions</a>
</p>

View File

@@ -15,7 +15,7 @@
use crate::{AuditEntry, AuditResult, AuditSystem};
use rustfs_ecstore::config::Config;
use std::sync::{Arc, OnceLock};
use tracing::{error, trace, warn};
use tracing::{debug, error, trace, warn};
/// Global audit system instance
static AUDIT_SYSTEM: OnceLock<Arc<AuditSystem>> = OnceLock::new();
@@ -78,7 +78,7 @@ pub async fn dispatch_audit_log(entry: Arc<AuditEntry>) -> AuditResult<()> {
} else {
// The system is not initialized at all. This is a more important state.
// It might be better to return an error or log a warning.
warn!("Audit system not initialized, dropping audit entry.");
debug!("Audit system not initialized, dropping audit entry.");
// If this should be a hard failure, you can return Err(AuditError::NotInitialized("..."))
Ok(())
}

View File

@@ -59,7 +59,7 @@ impl AuditSystem {
/// Starts the audit system with the given configuration
pub async fn start(&self, config: Config) -> AuditResult<()> {
let mut state = self.state.write().await;
let state = self.state.write().await;
match *state {
AuditSystemState::Running => {
@@ -72,7 +72,6 @@ impl AuditSystem {
_ => {}
}
*state = AuditSystemState::Starting;
drop(state);
info!("Starting audit system");
@@ -90,6 +89,17 @@ impl AuditSystem {
let mut registry = self.registry.lock().await;
match registry.create_targets_from_config(&config).await {
Ok(targets) => {
if targets.is_empty() {
info!("No enabled audit targets found, keeping audit system stopped");
drop(registry);
return Ok(());
}
{
let mut state = self.state.write().await;
*state = AuditSystemState::Starting;
}
info!(target_count = targets.len(), "Created audit targets successfully");
// Initialize all targets

View File

@@ -39,3 +39,4 @@ path-clean = { workspace = true }
rmp-serde = { workspace = true }
async-trait = { workspace = true }
s3s = { workspace = true }
tracing = { workspace = true }

View File

@@ -8,7 +8,7 @@
<p align="center">
<a href="https://github.com/rustfs/rustfs/actions/workflows/ci.yml"><img alt="CI" src="https://github.com/rustfs/rustfs/actions/workflows/ci.yml/badge.svg" /></a>
<a href="https://docs.rustfs.com/en/">📖 Documentation</a>
<a href="https://docs.rustfs.com/">📖 Documentation</a>
· <a href="https://github.com/rustfs/rustfs/issues">🐛 Bug Reports</a>
· <a href="https://github.com/rustfs/rustfs/discussions">💬 Discussions</a>
</p>

View File

@@ -28,3 +28,28 @@ pub static GLOBAL_Conn_Map: LazyLock<RwLock<HashMap<String, Channel>>> = LazyLoc
pub async fn set_global_addr(addr: &str) {
*GLOBAL_Rustfs_Addr.write().await = addr.to_string();
}
/// Evict a stale/dead connection from the global connection cache.
/// This is critical for cluster recovery when a node dies unexpectedly (e.g., power-off).
/// By removing the cached connection, subsequent requests will establish a fresh connection.
pub async fn evict_connection(addr: &str) {
let removed = GLOBAL_Conn_Map.write().await.remove(addr);
if removed.is_some() {
tracing::warn!("Evicted stale connection from cache: {}", addr);
}
}
/// Check if a connection exists in the cache for the given address.
pub async fn has_cached_connection(addr: &str) -> bool {
GLOBAL_Conn_Map.read().await.contains_key(addr)
}
/// Clear all cached connections. Useful for full cluster reset/recovery.
pub async fn clear_all_connections() {
let mut map = GLOBAL_Conn_Map.write().await;
let count = map.len();
map.clear();
if count > 0 {
tracing::warn!("Cleared {} cached connections from global map", count);
}
}

View File

@@ -85,12 +85,90 @@ impl Display for DriveState {
}
}
#[derive(Clone, Copy, Debug, Default, Serialize, Deserialize, PartialEq, Eq)]
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
#[repr(u8)]
pub enum HealScanMode {
Unknown,
Unknown = 0,
#[default]
Normal,
Deep,
Normal = 1,
Deep = 2,
}
impl Serialize for HealScanMode {
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
where
S: serde::Serializer,
{
serializer.serialize_u8(*self as u8)
}
}
impl<'de> Deserialize<'de> for HealScanMode {
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
where
D: serde::Deserializer<'de>,
{
struct HealScanModeVisitor;
impl<'de> serde::de::Visitor<'de> for HealScanModeVisitor {
type Value = HealScanMode;
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
formatter.write_str("an integer between 0 and 2")
}
fn visit_u8<E>(self, value: u8) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
match value {
0 => Ok(HealScanMode::Unknown),
1 => Ok(HealScanMode::Normal),
2 => Ok(HealScanMode::Deep),
_ => Err(E::custom(format!("invalid HealScanMode value: {value}"))),
}
}
fn visit_u64<E>(self, value: u64) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
if value > u8::MAX as u64 {
return Err(E::custom(format!("HealScanMode value too large: {value}")));
}
self.visit_u8(value as u8)
}
fn visit_i64<E>(self, value: i64) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
if value < 0 || value > u8::MAX as i64 {
return Err(E::custom(format!("invalid HealScanMode value: {value}")));
}
self.visit_u8(value as u8)
}
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
where
E: serde::de::Error,
{
// Try parsing as number string first (for URL-encoded values)
if let Ok(num) = value.parse::<u8>() {
return self.visit_u8(num);
}
// Try parsing as named string
match value {
"Unknown" | "unknown" => Ok(HealScanMode::Unknown),
"Normal" | "normal" => Ok(HealScanMode::Normal),
"Deep" | "deep" => Ok(HealScanMode::Deep),
_ => Err(E::custom(format!("invalid HealScanMode string: {value}"))),
}
}
}
deserializer.deserialize_any(HealScanModeVisitor)
}
}
#[derive(Clone, Copy, Debug, Default, Serialize, Deserialize)]
@@ -106,7 +184,9 @@ pub struct HealOpts {
pub update_parity: bool,
#[serde(rename = "nolock")]
pub no_lock: bool,
#[serde(rename = "pool", default)]
pub pool: Option<usize>,
#[serde(rename = "set", default)]
pub set: Option<usize>,
}

View File

@@ -8,7 +8,7 @@
<p align="center">
<a href="https://github.com/rustfs/rustfs/actions/workflows/ci.yml"><img alt="CI" src="https://github.com/rustfs/rustfs/actions/workflows/ci.yml/badge.svg" /></a>
<a href="https://docs.rustfs.com/en/">📖 Documentation</a>
<a href="https://docs.rustfs.com/">📖 Documentation</a>
· <a href="https://github.com/rustfs/rustfs/issues">🐛 Bug Reports</a>
· <a href="https://github.com/rustfs/rustfs/discussions">💬 Discussions</a>
</p>

View File

@@ -25,7 +25,7 @@ pub const VERSION: &str = "1.0.0";
/// Default configuration logger level
/// Default value: error
/// Environment variable: RUSTFS_LOG_LEVEL
/// Environment variable: RUSTFS_OBS_LOGGER_LEVEL
pub const DEFAULT_LOG_LEVEL: &str = "error";
/// Default configuration use stdout

View File

@@ -0,0 +1,88 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/// Environment variable name that enables or disables auto-heal functionality.
/// - Purpose: Control whether the system automatically performs heal operations.
/// - Valid values: "true" or "false" (case insensitive).
/// - Semantics: When set to "true", auto-heal is enabled and the system will automatically attempt to heal detected issues; when set to "false", auto-heal is disabled and healing must be triggered manually.
/// - Example: `export RUSTFS_HEAL_AUTO_HEAL_ENABLE=true`
/// - Note: Enabling auto-heal can improve system resilience by automatically addressing issues, but may increase resource usage; evaluate based on your operational requirements.
pub const ENV_HEAL_AUTO_HEAL_ENABLE: &str = "RUSTFS_HEAL_AUTO_HEAL_ENABLE";
/// Environment variable name that specifies the heal queue size.
///
/// - Purpose: Set the maximum number of heal requests that can be queued.
/// - Unit: number of requests (usize).
/// - Valid values: any positive integer.
/// - Semantics: When the heal queue reaches this size, new heal requests may be rejected or blocked until space is available; tune according to expected heal workload and system capacity.
/// - Example: `export RUSTFS_HEAL_QUEUE_SIZE=10000`
/// - Note: A larger queue size can accommodate bursts of heal requests but may increase memory usage.
pub const ENV_HEAL_QUEUE_SIZE: &str = "RUSTFS_HEAL_QUEUE_SIZE";
/// Environment variable name that specifies the heal interval in seconds.
/// - Purpose: Define the time interval between successive heal operations.
/// - Unit: seconds (u64).
/// - Valid values: any positive integer.
/// - Semantics: This interval controls how frequently the heal manager checks for and processes heal requests; shorter intervals lead to more responsive healing but may increase system load.
/// - Example: `export RUSTFS_HEAL_INTERVAL_SECS=10`
/// - Note: Choose an interval that balances healing responsiveness with overall system performance.
pub const ENV_HEAL_INTERVAL_SECS: &str = "RUSTFS_HEAL_INTERVAL_SECS";
/// Environment variable name that specifies the heal task timeout in seconds.
/// - Purpose: Set the maximum duration allowed for a heal task to complete.
/// - Unit: seconds (u64).
/// - Valid values: any positive integer.
/// - Semantics: If a heal task exceeds this timeout, it may be aborted or retried; tune according to the expected duration of heal operations and system performance characteristics.
/// - Example: `export RUSTFS_HEAL_TASK_TIMEOUT_SECS=300`
/// - Note: Setting an appropriate timeout helps prevent long-running heal tasks from impacting system stability.
pub const ENV_HEAL_TASK_TIMEOUT_SECS: &str = "RUSTFS_HEAL_TASK_TIMEOUT_SECS";
/// Environment variable name that specifies the maximum number of concurrent heal operations.
/// - Purpose: Limit the number of heal operations that can run simultaneously.
/// - Unit: number of operations (usize).
/// - Valid values: any positive integer.
/// - Semantics: This limit helps control resource usage during healing; tune according to system capacity and expected heal workload.
/// - Example: `export RUSTFS_HEAL_MAX_CONCURRENT_HEALS=4`
/// - Note: A higher concurrency limit can speed up healing but may lead to resource contention.
pub const ENV_HEAL_MAX_CONCURRENT_HEALS: &str = "RUSTFS_HEAL_MAX_CONCURRENT_HEALS";
/// Default value for enabling authentication for heal operations if not specified in the environment variable.
/// - Value: true (authentication enabled).
/// - Rationale: Enabling authentication by default enhances security for heal operations.
/// - Adjustments: Users may disable this feature via the `RUSTFS_HEAL_AUTO_HEAL_ENABLE` environment variable based on their security requirements.
pub const DEFAULT_HEAL_AUTO_HEAL_ENABLE: bool = true;
/// Default heal queue size if not specified in the environment variable.
///
/// - Value: 10,000 requests.
/// - Rationale: This default size balances the need to handle typical heal workloads without excessive memory consumption.
/// - Adjustments: Users may modify this value via the `RUSTFS_HEAL_QUEUE_SIZE` environment variable based on their specific use cases and system capabilities.
pub const DEFAULT_HEAL_QUEUE_SIZE: usize = 10_000;
/// Default heal interval in seconds if not specified in the environment variable.
/// - Value: 10 seconds.
/// - Rationale: This default interval provides a reasonable balance between healing responsiveness and system load for most deployments.
/// - Adjustments: Users may modify this value via the `RUSTFS_HEAL_INTERVAL_SECS` environment variable based on their specific healing requirements and system performance.
pub const DEFAULT_HEAL_INTERVAL_SECS: u64 = 10;
/// Default heal task timeout in seconds if not specified in the environment variable.
/// - Value: 300 seconds (5 minutes).
/// - Rationale: This default timeout allows sufficient time for most heal operations to complete while preventing excessively long-running tasks.
/// - Adjustments: Users may modify this value via the `RUSTFS_HEAL_TASK_TIMEOUT_SECS` environment variable based on their specific heal operation characteristics and system performance.
pub const DEFAULT_HEAL_TASK_TIMEOUT_SECS: u64 = 300; // 5 minutes
/// Default maximum number of concurrent heal operations if not specified in the environment variable.
/// - Value: 4 concurrent heal operations.
/// - Rationale: This default concurrency limit helps balance healing speed with resource usage, preventing system overload.
/// - Adjustments: Users may modify this value via the `RUSTFS_HEAL_MAX_CONCURRENT_HEALS` environment variable based on their system capacity and expected heal workload.
pub const DEFAULT_HEAL_MAX_CONCURRENT_HEALS: usize = 4;

View File

@@ -15,6 +15,8 @@
pub(crate) mod app;
pub(crate) mod console;
pub(crate) mod env;
pub(crate) mod heal;
pub(crate) mod object;
pub(crate) mod profiler;
pub(crate) mod runtime;
pub(crate) mod targets;

View File

@@ -0,0 +1,169 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/// Environment variable name to toggle object-level in-memory caching.
///
/// - Purpose: Enable or disable the object-level in-memory cache (moka).
/// - Acceptable values: `"true"` / `"false"` (case-insensitive) or a boolean typed config.
/// - Semantics: When enabled, the system keeps fully-read objects in memory to reduce backend requests; when disabled, reads bypass the object cache.
/// - Example: `export RUSTFS_OBJECT_CACHE_ENABLE=true`
/// - Note: Evaluate together with `RUSTFS_OBJECT_CACHE_CAPACITY_MB`, TTL/TTI and concurrency thresholds to balance memory usage and throughput.
pub const ENV_OBJECT_CACHE_ENABLE: &str = "RUSTFS_OBJECT_CACHE_ENABLE";
/// Environment variable name that specifies the object cache capacity in megabytes.
///
/// - Purpose: Set the maximum total capacity of the object cache (in MB).
/// - Unit: MB (1 MB = 1_048_576 bytes).
/// - Valid values: any positive integer (0 may indicate disabled or alternative handling).
/// - Semantics: When the moka cache reaches this capacity, eviction policies will remove entries; tune according to available memory and object size distribution.
/// - Example: `export RUSTFS_OBJECT_CACHE_CAPACITY_MB=512`
/// - Note: Actual memory usage will be slightly higher due to object headers and indexing overhead.
pub const ENV_OBJECT_CACHE_CAPACITY_MB: &str = "RUSTFS_OBJECT_CACHE_CAPACITY_MB";
/// Environment variable name for maximum object size eligible for caching in megabytes.
///
/// - Purpose: Define the upper size limit for individual objects to be considered for caching.
/// - Unit: MB (1 MB = 1_048_576 bytes).
/// - Valid values: any positive integer; objects larger than this size will not be cached.
/// - Semantics: Prevents caching of excessively large objects that could monopolize cache capacity; tune based on typical object size distribution.
/// - Example: `export RUSTFS_OBJECT_CACHE_MAX_OBJECT_SIZE_MB=50`
/// - Note: Setting this too low may reduce cache effectiveness; setting it too high may lead to inefficient memory usage.
pub const ENV_OBJECT_CACHE_MAX_OBJECT_SIZE_MB: &str = "RUSTFS_OBJECT_CACHE_MAX_OBJECT_SIZE_MB";
/// Environment variable name for object cache TTL (time-to-live) in seconds.
///
/// - Purpose: Specify the maximum lifetime of a cached entry from the moment it is written.
/// - Unit: seconds (u64).
/// - Semantics: TTL acts as a hard upper bound; entries older than TTL are considered expired and removed by periodic cleanup.
/// - Example: `export RUSTFS_OBJECT_CACHE_TTL_SECS=300`
/// - Note: TTL and TTI both apply; either policy can cause eviction.
pub const ENV_OBJECT_CACHE_TTL_SECS: &str = "RUSTFS_OBJECT_CACHE_TTL_SECS";
/// Environment variable name for object cache TTI (time-to-idle) in seconds.
///
/// - Purpose: Specify how long an entry may remain in cache without being accessed before it is evicted.
/// - Unit: seconds (u64).
/// - Semantics: TTI helps remove one-time or infrequently used entries; frequent accesses reset idle timers but do not extend beyond TTL unless additional logic exists.
/// - Example: `export RUSTFS_OBJECT_CACHE_TTI_SECS=120`
/// - Note: Works together with TTL to keep the cache populated with actively used objects.
pub const ENV_OBJECT_CACHE_TTI_SECS: &str = "RUSTFS_OBJECT_CACHE_TTI_SECS";
/// Environment variable name for threshold of "hot" object hit count used to extend life.
///
/// - Purpose: Define a hit-count threshold to mark objects as "hot" so they may be treated preferentially near expiration.
/// - Valid values: positive integer (usize).
/// - Semantics: Objects reaching this hit count can be considered for relaxed eviction to avoid thrashing hot items.
/// - Example: `export RUSTFS_OBJECT_HOT_MIN_HITS_TO_EXTEND=5`
/// - Note: This is an optional enhancement and requires cache-layer statistics and extension logic to take effect.
pub const ENV_OBJECT_HOT_MIN_HITS_TO_EXTEND: &str = "RUSTFS_OBJECT_HOT_MIN_HITS_TO_EXTEND";
/// Environment variable name for high concurrency threshold used in adaptive buffering.
///
/// - Purpose: When concurrent request count exceeds this threshold, the system enters a "high concurrency" optimization mode to reduce per-request buffer sizes.
/// - Unit: request count (usize).
/// - Semantics: High concurrency mode reduces per-request buffers (e.g., to a fraction of base size) to protect overall memory and fairness.
/// - Example: `export RUSTFS_OBJECT_HIGH_CONCURRENCY_THRESHOLD=8`
/// - Note: This affects buffering and I/O behavior, not cache capacity directly.
pub const ENV_OBJECT_HIGH_CONCURRENCY_THRESHOLD: &str = "RUSTFS_OBJECT_HIGH_CONCURRENCY_THRESHOLD";
/// Environment variable name for medium concurrency threshold used in adaptive buffering.
///
/// - Purpose: Define the boundary for "medium concurrency" where more moderate buffer adjustments apply.
/// - Unit: request count (usize).
/// - Semantics: In the medium range, buffers are reduced moderately to balance throughput and memory efficiency.
/// - Example: `export RUSTFS_OBJECT_MEDIUM_CONCURRENCY_THRESHOLD=4`
/// - Note: Tune this value based on target workload and hardware.
pub const ENV_OBJECT_MEDIUM_CONCURRENCY_THRESHOLD: &str = "RUSTFS_OBJECT_MEDIUM_CONCURRENCY_THRESHOLD";
/// Environment variable name for maximum concurrent disk reads for object operations.
/// - Purpose: Limit the number of concurrent disk read operations for object reads to prevent I/O saturation.
/// - Unit: request count (usize).
/// - Semantics: Throttling disk reads helps maintain overall system responsiveness under load.
/// - Example: `export RUSTFS_OBJECT_MAX_CONCURRENT_DISK_READS=16`
/// - Note: This setting may interact with OS-level I/O scheduling and should be tuned based on hardware capabilities.
pub const ENV_OBJECT_MAX_CONCURRENT_DISK_READS: &str = "RUSTFS_OBJECT_MAX_CONCURRENT_DISK_READS";
/// Default: object caching is disabled.
///
/// - Semantics: Safe default to avoid unexpected memory usage or cache consistency concerns when not explicitly enabled.
/// - Default is set to false (disabled).
pub const DEFAULT_OBJECT_CACHE_ENABLE: bool = false;
/// Default object cache capacity in MB.
///
/// - Default: 100 MB (can be overridden by `RUSTFS_OBJECT_CACHE_CAPACITY_MB`).
/// - Note: Choose a conservative default to reduce memory pressure in development/testing.
pub const DEFAULT_OBJECT_CACHE_CAPACITY_MB: u64 = 100;
/// Default maximum object size eligible for caching in MB.
///
/// - Default: 10 MB (can be overridden by `RUSTFS_OBJECT_CACHE_MAX_OBJECT_SIZE_MB`).
/// - Note: Balances caching effectiveness with memory usage.
pub const DEFAULT_OBJECT_CACHE_MAX_OBJECT_SIZE_MB: usize = 10;
/// Maximum concurrent requests before applying aggressive optimization.
///
/// When concurrent requests exceed this threshold (>8), the system switches to
/// aggressive memory optimization mode, reducing buffer sizes to 40% of base size
/// to prevent memory exhaustion and ensure fair resource allocation.
///
/// This helps maintain system stability under high load conditions.
/// Default is set to 8 concurrent requests.
pub const DEFAULT_OBJECT_HIGH_CONCURRENCY_THRESHOLD: usize = 8;
/// Medium concurrency threshold for buffer size adjustment.
///
/// At this level (3-4 requests), buffers are reduced to 75% of base size to
/// balance throughput and memory efficiency as load increases.
///
/// This helps maintain performance without overly aggressive memory reduction.
///
/// Default is set to 4 concurrent requests.
pub const DEFAULT_OBJECT_MEDIUM_CONCURRENCY_THRESHOLD: usize = 4;
/// Maximum concurrent disk reads for object operations.
/// Limits the number of simultaneous disk read operations to prevent I/O saturation.
///
/// A higher value may improve throughput on high-performance storage,
/// but could also lead to increased latency if the disk becomes overloaded.
///
/// Default is set to 64 concurrent reads.
pub const DEFAULT_OBJECT_MAX_CONCURRENT_DISK_READS: usize = 64;
/// Time-to-live for cached objects (5 minutes = 300 seconds).
///
/// After this duration, cached objects are automatically expired by Moka's
/// background cleanup process, even if they haven't been accessed. This prevents
/// stale data from consuming cache capacity indefinitely.
///
/// Default is set to 300 seconds.
pub const DEFAULT_OBJECT_CACHE_TTL_SECS: u64 = 300;
/// Time-to-idle for cached objects (2 minutes = 120 seconds).
///
/// Objects that haven't been accessed for this duration are automatically evicted,
/// even if their TTL hasn't expired. This ensures cache is populated with actively
/// used objects and clears out one-time reads efficiently.
///
/// Default is set to 120 seconds.
pub const DEFAULT_OBJECT_CACHE_TTI_SECS: u64 = 120;
/// Minimum hit count to extend object lifetime beyond TTL.
///
/// "Hot" objects that have been accessed at least this many times are treated
/// specially - they can survive longer in cache even as they approach TTL expiration.
/// This prevents frequently accessed objects from being evicted prematurely.
///
/// Default is set to 5 hits.
pub const DEFAULT_OBJECT_HOT_MIN_HITS_TO_EXTEND: usize = 5;

View File

@@ -21,6 +21,10 @@ pub use constants::console::*;
#[cfg(feature = "constants")]
pub use constants::env::*;
#[cfg(feature = "constants")]
pub use constants::heal::*;
#[cfg(feature = "constants")]
pub use constants::object::*;
#[cfg(feature = "constants")]
pub use constants::profiler::*;
#[cfg(feature = "constants")]
pub use constants::runtime::*;

View File

@@ -8,7 +8,7 @@
<p align="center">
<a href="https://github.com/rustfs/rustfs/actions/workflows/ci.yml"><img alt="CI" src="https://github.com/rustfs/rustfs/actions/workflows/ci.yml/badge.svg" /></a>
<a href="https://docs.rustfs.com/en/">📖 Documentation</a>
<a href="https://docs.rustfs.com/">📖 Documentation</a>
· <a href="https://github.com/rustfs/rustfs/issues">🐛 Bug Reports</a>
· <a href="https://github.com/rustfs/rustfs/discussions">💬 Discussions</a>
</p>

View File

@@ -25,6 +25,7 @@ workspace = true
[dependencies]
rustfs-ecstore.workspace = true
rustfs-common.workspace = true
flatbuffers.workspace = true
futures.workspace = true
rustfs-lock.workspace = true
@@ -49,4 +50,4 @@ uuid = { workspace = true }
base64 = { workspace = true }
rand = { workspace = true }
chrono = { workspace = true }
md5 = { workspace = true }
md5 = { workspace = true }

View File

@@ -0,0 +1,85 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! End-to-end test for Content-Encoding header handling
//!
//! Tests that the Content-Encoding header is correctly stored during PUT
//! and returned in GET/HEAD responses. This is important for clients that
//! upload pre-compressed content and rely on the header for decompression.
#[cfg(test)]
mod tests {
use crate::common::{RustFSTestEnvironment, init_logging};
use aws_sdk_s3::primitives::ByteStream;
use serial_test::serial;
use tracing::info;
/// Verify Content-Encoding header roundtrips through PUT, GET, and HEAD operations
#[tokio::test]
#[serial]
async fn test_content_encoding_roundtrip() {
init_logging();
info!("Starting Content-Encoding roundtrip test");
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
let client = env.create_s3_client();
let bucket = "content-encoding-test";
let key = "logs/app.log.zst";
let content = b"2024-01-15 10:23:45 INFO Application started\n2024-01-15 10:23:46 DEBUG Loading config\n";
client
.create_bucket()
.bucket(bucket)
.send()
.await
.expect("Failed to create bucket");
info!("Uploading object with Content-Encoding: zstd");
client
.put_object()
.bucket(bucket)
.key(key)
.content_type("text/plain")
.content_encoding("zstd")
.body(ByteStream::from_static(content))
.send()
.await
.expect("PUT failed");
info!("Verifying GET response includes Content-Encoding");
let get_resp = client.get_object().bucket(bucket).key(key).send().await.expect("GET failed");
assert_eq!(get_resp.content_encoding(), Some("zstd"), "GET should return Content-Encoding: zstd");
assert_eq!(get_resp.content_type(), Some("text/plain"), "GET should return correct Content-Type");
let body = get_resp.body.collect().await.unwrap().into_bytes();
assert_eq!(body.as_ref(), content, "Body content mismatch");
info!("Verifying HEAD response includes Content-Encoding");
let head_resp = client
.head_object()
.bucket(bucket)
.key(key)
.send()
.await
.expect("HEAD failed");
assert_eq!(head_resp.content_encoding(), Some("zstd"), "HEAD should return Content-Encoding: zstd");
assert_eq!(head_resp.content_type(), Some("text/plain"), "HEAD should return correct Content-Type");
env.stop_server();
}
}

View File

@@ -0,0 +1,73 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use aws_sdk_s3::primitives::ByteStream;
use rustfs_common::data_usage::DataUsageInfo;
use serial_test::serial;
use crate::common::{RustFSTestEnvironment, TEST_BUCKET, awscurl_get, init_logging};
/// Regression test for data usage accuracy (issue #1012).
/// Launches rustfs, writes 1000 objects, then asserts admin data usage reports the full count.
#[tokio::test(flavor = "multi_thread")]
#[serial]
#[ignore = "Starts a rustfs server and requires awscurl; enable when running full E2E"]
async fn data_usage_reports_all_objects() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
init_logging();
let mut env = RustFSTestEnvironment::new().await?;
env.start_rustfs_server(vec![]).await?;
let client = env.create_s3_client();
// Create bucket and upload objects
client.create_bucket().bucket(TEST_BUCKET).send().await?;
for i in 0..1000 {
let key = format!("obj-{i:04}");
client
.put_object()
.bucket(TEST_BUCKET)
.key(key)
.body(ByteStream::from_static(b"hello-world"))
.send()
.await?;
}
// Query admin data usage API
let url = format!("{}/rustfs/admin/v3/datausageinfo", env.url);
let resp = awscurl_get(&url, &env.access_key, &env.secret_key).await?;
let usage: DataUsageInfo = serde_json::from_str(&resp)?;
// Assert total object count and per-bucket count are not truncated
let bucket_usage = usage
.buckets_usage
.get(TEST_BUCKET)
.cloned()
.expect("bucket usage should exist");
assert!(
usage.objects_total_count >= 1000,
"total object count should be at least 1000, got {}",
usage.objects_total_count
);
assert!(
bucket_usage.objects_count >= 1000,
"bucket object count should be at least 1000, got {}",
bucket_usage.objects_count
);
env.stop_server();
Ok(())
}

View File

@@ -18,6 +18,18 @@ mod reliant;
#[cfg(test)]
pub mod common;
// Data usage regression tests
#[cfg(test)]
mod data_usage_test;
// KMS-specific test modules
#[cfg(test)]
mod kms;
// Special characters in path test modules
#[cfg(test)]
mod special_chars_test;
// Content-Encoding header preservation test
#[cfg(test)]
mod content_encoding_test;

View File

@@ -0,0 +1,284 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! Test for GetObject on deleted objects
//!
//! This test reproduces the issue where getting a deleted object returns
//! a networking error instead of NoSuchKey.
#![cfg(test)]
use aws_config::meta::region::RegionProviderChain;
use aws_sdk_s3::Client;
use aws_sdk_s3::config::{Credentials, Region};
use aws_sdk_s3::error::SdkError;
use bytes::Bytes;
use serial_test::serial;
use std::error::Error;
use tracing::info;
const ENDPOINT: &str = "http://localhost:9000";
const ACCESS_KEY: &str = "rustfsadmin";
const SECRET_KEY: &str = "rustfsadmin";
const BUCKET: &str = "test-get-deleted-bucket";
async fn create_aws_s3_client() -> Result<Client, Box<dyn Error>> {
let region_provider = RegionProviderChain::default_provider().or_else(Region::new("us-east-1"));
let shared_config = aws_config::defaults(aws_config::BehaviorVersion::latest())
.region(region_provider)
.credentials_provider(Credentials::new(ACCESS_KEY, SECRET_KEY, None, None, "static"))
.endpoint_url(ENDPOINT)
.load()
.await;
let client = Client::from_conf(
aws_sdk_s3::Config::from(&shared_config)
.to_builder()
.force_path_style(true)
.build(),
);
Ok(client)
}
/// Setup test bucket, creating it if it doesn't exist
async fn setup_test_bucket(client: &Client) -> Result<(), Box<dyn Error>> {
match client.create_bucket().bucket(BUCKET).send().await {
Ok(_) => {}
Err(SdkError::ServiceError(e)) => {
let e = e.into_err();
let error_code = e.meta().code().unwrap_or("");
if !error_code.eq("BucketAlreadyExists") && !error_code.eq("BucketAlreadyOwnedByYou") {
return Err(e.into());
}
}
Err(e) => {
return Err(e.into());
}
}
Ok(())
}
#[tokio::test]
#[serial]
#[ignore = "requires running RustFS server at localhost:9000"]
async fn test_get_deleted_object_returns_nosuchkey() -> Result<(), Box<dyn std::error::Error>> {
// Initialize logging
let _ = tracing_subscriber::fmt()
.with_max_level(tracing::Level::INFO)
.with_test_writer()
.try_init();
info!("🧪 Starting test_get_deleted_object_returns_nosuchkey");
let client = create_aws_s3_client().await?;
setup_test_bucket(&client).await?;
// Upload a test object
let key = "test-file-to-delete.txt";
let content = b"This will be deleted soon!";
info!("Uploading object: {}", key);
client
.put_object()
.bucket(BUCKET)
.key(key)
.body(Bytes::from_static(content).into())
.send()
.await?;
// Verify object exists
info!("Verifying object exists");
let get_result = client.get_object().bucket(BUCKET).key(key).send().await;
assert!(get_result.is_ok(), "Object should exist after upload");
// Delete the object
info!("Deleting object: {}", key);
client.delete_object().bucket(BUCKET).key(key).send().await?;
// Try to get the deleted object - should return NoSuchKey error
info!("Attempting to get deleted object - expecting NoSuchKey error");
let get_result = client.get_object().bucket(BUCKET).key(key).send().await;
// Check that we get an error
assert!(get_result.is_err(), "Getting deleted object should return an error");
// Check that the error is NoSuchKey, not a networking error
let err = get_result.unwrap_err();
// Print the error for debugging
info!("Error received: {:?}", err);
// Check if it's a service error
match err {
SdkError::ServiceError(service_err) => {
let s3_err = service_err.into_err();
info!("Service error code: {:?}", s3_err.meta().code());
// The error should be NoSuchKey
assert!(s3_err.is_no_such_key(), "Error should be NoSuchKey, got: {:?}", s3_err);
info!("✅ Test passed: GetObject on deleted object correctly returns NoSuchKey");
}
other_err => {
panic!("Expected ServiceError with NoSuchKey, but got: {:?}", other_err);
}
}
// Cleanup
let _ = client.delete_object().bucket(BUCKET).key(key).send().await;
Ok(())
}
/// Test that HeadObject on a deleted object also returns NoSuchKey
#[tokio::test]
#[serial]
#[ignore = "requires running RustFS server at localhost:9000"]
async fn test_head_deleted_object_returns_nosuchkey() -> Result<(), Box<dyn std::error::Error>> {
let _ = tracing_subscriber::fmt()
.with_max_level(tracing::Level::INFO)
.with_test_writer()
.try_init();
info!("🧪 Starting test_head_deleted_object_returns_nosuchkey");
let client = create_aws_s3_client().await?;
setup_test_bucket(&client).await?;
let key = "test-head-deleted.txt";
let content = b"Test content for HeadObject";
// Upload and verify
client
.put_object()
.bucket(BUCKET)
.key(key)
.body(Bytes::from_static(content).into())
.send()
.await?;
// Delete the object
client.delete_object().bucket(BUCKET).key(key).send().await?;
// Try to head the deleted object
let head_result = client.head_object().bucket(BUCKET).key(key).send().await;
assert!(head_result.is_err(), "HeadObject on deleted object should return an error");
match head_result.unwrap_err() {
SdkError::ServiceError(service_err) => {
let s3_err = service_err.into_err();
assert!(
s3_err.meta().code() == Some("NoSuchKey") || s3_err.meta().code() == Some("NotFound"),
"Error should be NoSuchKey or NotFound, got: {:?}",
s3_err
);
info!("✅ HeadObject correctly returns NoSuchKey/NotFound");
}
other_err => {
panic!("Expected ServiceError but got: {:?}", other_err);
}
}
Ok(())
}
/// Test GetObject with non-existent key (never existed)
#[tokio::test]
#[serial]
#[ignore = "requires running RustFS server at localhost:9000"]
async fn test_get_nonexistent_object_returns_nosuchkey() -> Result<(), Box<dyn std::error::Error>> {
let _ = tracing_subscriber::fmt()
.with_max_level(tracing::Level::INFO)
.with_test_writer()
.try_init();
info!("🧪 Starting test_get_nonexistent_object_returns_nosuchkey");
let client = create_aws_s3_client().await?;
setup_test_bucket(&client).await?;
// Try to get an object that never existed
let key = "this-key-never-existed.txt";
let get_result = client.get_object().bucket(BUCKET).key(key).send().await;
assert!(get_result.is_err(), "Getting non-existent object should return an error");
match get_result.unwrap_err() {
SdkError::ServiceError(service_err) => {
let s3_err = service_err.into_err();
assert!(s3_err.is_no_such_key(), "Error should be NoSuchKey, got: {:?}", s3_err);
info!("✅ GetObject correctly returns NoSuchKey for non-existent object");
}
other_err => {
panic!("Expected ServiceError with NoSuchKey, but got: {:?}", other_err);
}
}
Ok(())
}
/// Test multiple consecutive GetObject calls on deleted object
/// This ensures the fix is stable and doesn't have race conditions
#[tokio::test]
#[serial]
#[ignore = "requires running RustFS server at localhost:9000"]
async fn test_multiple_gets_deleted_object() -> Result<(), Box<dyn std::error::Error>> {
let _ = tracing_subscriber::fmt()
.with_max_level(tracing::Level::INFO)
.with_test_writer()
.try_init();
info!("🧪 Starting test_multiple_gets_deleted_object");
let client = create_aws_s3_client().await?;
setup_test_bucket(&client).await?;
let key = "test-multiple-gets.txt";
let content = b"Test content";
// Upload and delete
client
.put_object()
.bucket(BUCKET)
.key(key)
.body(Bytes::from_static(content).into())
.send()
.await?;
client.delete_object().bucket(BUCKET).key(key).send().await?;
// Try multiple consecutive GetObject calls
for i in 1..=5 {
info!("Attempt {} to get deleted object", i);
let get_result = client.get_object().bucket(BUCKET).key(key).send().await;
assert!(get_result.is_err(), "Attempt {}: should return error", i);
match get_result.unwrap_err() {
SdkError::ServiceError(service_err) => {
let s3_err = service_err.into_err();
assert!(s3_err.is_no_such_key(), "Attempt {}: Error should be NoSuchKey, got: {:?}", i, s3_err);
}
other_err => {
panic!("Attempt {}: Expected ServiceError but got: {:?}", i, other_err);
}
}
}
info!("✅ All 5 attempts correctly returned NoSuchKey");
Ok(())
}

View File

@@ -13,6 +13,7 @@
// limitations under the License.
mod conditional_writes;
mod get_deleted_object_test;
mod lifecycle;
mod lock;
mod node_interact_test;

View File

@@ -0,0 +1,799 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//! End-to-end tests for special characters in object paths
//!
//! This module tests the handling of various special characters in S3 object keys,
//! including spaces, plus signs, percent signs, and other URL-encoded characters.
//!
//! ## Test Scenarios
//!
//! 1. **Spaces in paths**: `a f+/b/c/README.md` (encoded as `a%20f+/b/c/README.md`)
//! 2. **Plus signs in paths**: `ES+net/file+name.txt`
//! 3. **Mixed special characters**: Combinations of spaces, plus, percent, etc.
//! 4. **Operations tested**: PUT, GET, LIST, DELETE
#[cfg(test)]
mod tests {
use crate::common::{RustFSTestEnvironment, init_logging};
use aws_sdk_s3::Client;
use aws_sdk_s3::primitives::ByteStream;
use serial_test::serial;
use tracing::{debug, info};
/// Helper function to create an S3 client for testing
fn create_s3_client(env: &RustFSTestEnvironment) -> Client {
env.create_s3_client()
}
/// Helper function to create a test bucket
async fn create_bucket(client: &Client, bucket: &str) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
match client.create_bucket().bucket(bucket).send().await {
Ok(_) => {
info!("Bucket {} created successfully", bucket);
Ok(())
}
Err(e) => {
// Ignore if bucket already exists
if e.to_string().contains("BucketAlreadyOwnedByYou") || e.to_string().contains("BucketAlreadyExists") {
info!("Bucket {} already exists", bucket);
Ok(())
} else {
Err(Box::new(e))
}
}
}
}
/// Test PUT and GET with space character in path
///
/// This reproduces Part A of the issue:
/// ```
/// mc cp README.md "local/dummy/a%20f+/b/c/3/README.md"
/// ```
#[tokio::test]
#[serial]
async fn test_object_with_space_in_path() {
init_logging();
info!("Starting test: object with space in path");
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
let client = create_s3_client(&env);
let bucket = "test-special-chars";
// Create bucket
create_bucket(&client, bucket).await.expect("Failed to create bucket");
// Test key with space: "a f+/b/c/3/README.md"
// When URL-encoded by client: "a%20f+/b/c/3/README.md"
let key = "a f+/b/c/3/README.md";
let content = b"Test content with space in path";
info!("Testing PUT object with key: {}", key);
// PUT object
let result = client
.put_object()
.bucket(bucket)
.key(key)
.body(ByteStream::from_static(content))
.send()
.await;
assert!(result.is_ok(), "Failed to PUT object with space in path: {:?}", result.err());
info!("✅ PUT object with space in path succeeded");
// GET object
info!("Testing GET object with key: {}", key);
let result = client.get_object().bucket(bucket).key(key).send().await;
assert!(result.is_ok(), "Failed to GET object with space in path: {:?}", result.err());
let output = result.unwrap();
let body_bytes = output.body.collect().await.unwrap().into_bytes();
assert_eq!(body_bytes.as_ref(), content, "Content mismatch");
info!("✅ GET object with space in path succeeded");
// LIST objects with prefix containing space
info!("Testing LIST objects with prefix: a f+/");
let result = client.list_objects_v2().bucket(bucket).prefix("a f+/").send().await;
assert!(result.is_ok(), "Failed to LIST objects with space in prefix: {:?}", result.err());
let output = result.unwrap();
let contents = output.contents();
assert!(!contents.is_empty(), "LIST returned no objects");
assert!(
contents.iter().any(|obj| obj.key().unwrap() == key),
"Object with space not found in LIST results"
);
info!("✅ LIST objects with space in prefix succeeded");
// LIST objects with deeper prefix
info!("Testing LIST objects with prefix: a f+/b/c/");
let result = client.list_objects_v2().bucket(bucket).prefix("a f+/b/c/").send().await;
assert!(result.is_ok(), "Failed to LIST objects with deeper prefix: {:?}", result.err());
let output = result.unwrap();
let contents = output.contents();
assert!(!contents.is_empty(), "LIST with deeper prefix returned no objects");
info!("✅ LIST objects with deeper prefix succeeded");
// Cleanup
env.stop_server();
info!("Test completed successfully");
}
/// Test PUT and GET with plus sign in path
///
/// This reproduces Part B of the issue:
/// ```
/// /test/data/org_main-org/dashboards/ES+net/LHC+Data+Challenge/firefly-details.json
/// ```
#[tokio::test]
#[serial]
async fn test_object_with_plus_in_path() {
init_logging();
info!("Starting test: object with plus sign in path");
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
let client = create_s3_client(&env);
let bucket = "test-plus-chars";
// Create bucket
create_bucket(&client, bucket).await.expect("Failed to create bucket");
// Test key with plus signs
let key = "dashboards/ES+net/LHC+Data+Challenge/firefly-details.json";
let content = b"Test content with plus signs in path";
info!("Testing PUT object with key: {}", key);
// PUT object
let result = client
.put_object()
.bucket(bucket)
.key(key)
.body(ByteStream::from_static(content))
.send()
.await;
assert!(result.is_ok(), "Failed to PUT object with plus in path: {:?}", result.err());
info!("✅ PUT object with plus in path succeeded");
// GET object
info!("Testing GET object with key: {}", key);
let result = client.get_object().bucket(bucket).key(key).send().await;
assert!(result.is_ok(), "Failed to GET object with plus in path: {:?}", result.err());
let output = result.unwrap();
let body_bytes = output.body.collect().await.unwrap().into_bytes();
assert_eq!(body_bytes.as_ref(), content, "Content mismatch");
info!("✅ GET object with plus in path succeeded");
// LIST objects with prefix containing plus
info!("Testing LIST objects with prefix: dashboards/ES+net/");
let result = client
.list_objects_v2()
.bucket(bucket)
.prefix("dashboards/ES+net/")
.send()
.await;
assert!(result.is_ok(), "Failed to LIST objects with plus in prefix: {:?}", result.err());
let output = result.unwrap();
let contents = output.contents();
assert!(!contents.is_empty(), "LIST returned no objects");
assert!(
contents.iter().any(|obj| obj.key().unwrap() == key),
"Object with plus not found in LIST results"
);
info!("✅ LIST objects with plus in prefix succeeded");
// Cleanup
env.stop_server();
info!("Test completed successfully");
}
/// Test with mixed special characters
#[tokio::test]
#[serial]
async fn test_object_with_mixed_special_chars() {
init_logging();
info!("Starting test: object with mixed special characters");
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
let client = create_s3_client(&env);
let bucket = "test-mixed-chars";
// Create bucket
create_bucket(&client, bucket).await.expect("Failed to create bucket");
// Test various special characters
let test_cases = vec![
("path/with spaces/file.txt", b"Content 1" as &[u8]),
("path/with+plus/file.txt", b"Content 2"),
("path/with spaces+and+plus/file.txt", b"Content 3"),
("ES+net/folder name/file.txt", b"Content 4"),
];
for (key, content) in &test_cases {
info!("Testing with key: {}", key);
// PUT
let result = client
.put_object()
.bucket(bucket)
.key(*key)
.body(ByteStream::from(content.to_vec()))
.send()
.await;
assert!(result.is_ok(), "Failed to PUT object with key '{}': {:?}", key, result.err());
// GET
let result = client.get_object().bucket(bucket).key(*key).send().await;
assert!(result.is_ok(), "Failed to GET object with key '{}': {:?}", key, result.err());
let output = result.unwrap();
let body_bytes = output.body.collect().await.unwrap().into_bytes();
assert_eq!(body_bytes.as_ref(), *content, "Content mismatch for key '{}'", key);
info!("✅ PUT/GET succeeded for key: {}", key);
}
// LIST all objects
let result = client.list_objects_v2().bucket(bucket).send().await;
assert!(result.is_ok(), "Failed to LIST all objects");
let output = result.unwrap();
let contents = output.contents();
assert_eq!(contents.len(), test_cases.len(), "Number of objects mismatch");
// Cleanup
env.stop_server();
info!("Test completed successfully");
}
/// Test DELETE operation with special characters
#[tokio::test]
#[serial]
async fn test_delete_object_with_special_chars() {
init_logging();
info!("Starting test: DELETE object with special characters");
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
let client = create_s3_client(&env);
let bucket = "test-delete-special";
// Create bucket
create_bucket(&client, bucket).await.expect("Failed to create bucket");
let key = "folder with spaces/ES+net/file.txt";
let content = b"Test content";
// PUT object
client
.put_object()
.bucket(bucket)
.key(key)
.body(ByteStream::from_static(content))
.send()
.await
.expect("Failed to PUT object");
// Verify it exists
let result = client.get_object().bucket(bucket).key(key).send().await;
assert!(result.is_ok(), "Object should exist before DELETE");
// DELETE object
info!("Testing DELETE object with key: {}", key);
let result = client.delete_object().bucket(bucket).key(key).send().await;
assert!(result.is_ok(), "Failed to DELETE object with special chars: {:?}", result.err());
info!("✅ DELETE object succeeded");
// Verify it's deleted
let result = client.get_object().bucket(bucket).key(key).send().await;
assert!(result.is_err(), "Object should not exist after DELETE");
// Cleanup
env.stop_server();
info!("Test completed successfully");
}
/// Test exact scenario from the issue
#[tokio::test]
#[serial]
async fn test_issue_scenario_exact() {
init_logging();
info!("Starting test: Exact scenario from GitHub issue");
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
let client = create_s3_client(&env);
let bucket = "dummy";
// Create bucket
create_bucket(&client, bucket).await.expect("Failed to create bucket");
// Exact key from issue: "a%20f+/b/c/3/README.md"
// The decoded form should be: "a f+/b/c/3/README.md"
let key = "a f+/b/c/3/README.md";
let content = b"README content";
info!("Reproducing exact issue scenario with key: {}", key);
// Step 1: Upload file (like `mc cp README.md "local/dummy/a%20f+/b/c/3/README.md"`)
let result = client
.put_object()
.bucket(bucket)
.key(key)
.body(ByteStream::from_static(content))
.send()
.await;
assert!(result.is_ok(), "Failed to upload file: {:?}", result.err());
info!("✅ File uploaded successfully");
// Step 2: Navigate to folder (like navigating to "%20f+/" in UI)
// This is equivalent to listing with prefix "a f+/"
info!("Listing folder 'a f+/' (this should show subdirectories)");
let result = client
.list_objects_v2()
.bucket(bucket)
.prefix("a f+/")
.delimiter("/")
.send()
.await;
assert!(result.is_ok(), "Failed to list folder: {:?}", result.err());
let output = result.unwrap();
debug!("List result: {:?}", output);
// Should show "b/" as a common prefix (subdirectory)
let common_prefixes = output.common_prefixes();
assert!(
!common_prefixes.is_empty() || !output.contents().is_empty(),
"Folder should show contents or subdirectories"
);
info!("✅ Folder listing succeeded");
// Step 3: List deeper (like `mc ls "local/dummy/a%20f+/b/c/3/"`)
info!("Listing deeper folder 'a f+/b/c/3/'");
let result = client.list_objects_v2().bucket(bucket).prefix("a f+/b/c/3/").send().await;
assert!(result.is_ok(), "Failed to list deep folder: {:?}", result.err());
let output = result.unwrap();
let contents = output.contents();
assert!(!contents.is_empty(), "Deep folder should show the file");
assert!(contents.iter().any(|obj| obj.key().unwrap() == key), "README.md should be in the list");
info!("✅ Deep folder listing succeeded - file found");
// Cleanup
env.stop_server();
info!("✅ Exact issue scenario test completed successfully");
}
/// Test HEAD object with special characters
#[tokio::test]
#[serial]
async fn test_head_object_with_special_chars() {
init_logging();
info!("Starting test: HEAD object with special characters");
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
let client = create_s3_client(&env);
let bucket = "test-head-special";
// Create bucket
create_bucket(&client, bucket).await.expect("Failed to create bucket");
let key = "folder with spaces/ES+net/file.txt";
let content = b"Test content for HEAD";
// PUT object
client
.put_object()
.bucket(bucket)
.key(key)
.body(ByteStream::from_static(content))
.send()
.await
.expect("Failed to PUT object");
info!("Testing HEAD object with key: {}", key);
// HEAD object
let result = client.head_object().bucket(bucket).key(key).send().await;
assert!(result.is_ok(), "Failed to HEAD object with special chars: {:?}", result.err());
let output = result.unwrap();
assert_eq!(output.content_length().unwrap_or(0), content.len() as i64, "Content length mismatch");
info!("✅ HEAD object with special characters succeeded");
// Cleanup
env.stop_server();
info!("Test completed successfully");
}
/// Test COPY object with special characters in both source and destination
#[tokio::test]
#[serial]
async fn test_copy_object_with_special_chars() {
init_logging();
info!("Starting test: COPY object with special characters");
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
let client = create_s3_client(&env);
let bucket = "test-copy-special";
// Create bucket
create_bucket(&client, bucket).await.expect("Failed to create bucket");
let src_key = "source/folder with spaces/file.txt";
let dest_key = "dest/ES+net/copied file.txt";
let content = b"Test content for COPY";
// PUT source object
client
.put_object()
.bucket(bucket)
.key(src_key)
.body(ByteStream::from_static(content))
.send()
.await
.expect("Failed to PUT source object");
info!("Testing COPY from '{}' to '{}'", src_key, dest_key);
// COPY object
let copy_source = format!("{}/{}", bucket, src_key);
let result = client
.copy_object()
.bucket(bucket)
.key(dest_key)
.copy_source(&copy_source)
.send()
.await;
assert!(result.is_ok(), "Failed to COPY object with special chars: {:?}", result.err());
info!("✅ COPY operation succeeded");
// Verify destination exists
let result = client.get_object().bucket(bucket).key(dest_key).send().await;
assert!(result.is_ok(), "Failed to GET copied object");
let output = result.unwrap();
let body_bytes = output.body.collect().await.unwrap().into_bytes();
assert_eq!(body_bytes.as_ref(), content, "Copied content mismatch");
info!("✅ Copied object verified successfully");
// Cleanup
env.stop_server();
info!("Test completed successfully");
}
/// Test Unicode characters in object keys
#[tokio::test]
#[serial]
async fn test_unicode_characters_in_path() {
init_logging();
info!("Starting test: Unicode characters in object paths");
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
let client = create_s3_client(&env);
let bucket = "test-unicode";
// Create bucket
create_bucket(&client, bucket).await.expect("Failed to create bucket");
// Test various Unicode characters
let test_cases = vec![
("测试/文件.txt", b"Chinese characters" as &[u8]),
("テスト/ファイル.txt", b"Japanese characters"),
("테스트/파일.txt", b"Korean characters"),
("тест/файл.txt", b"Cyrillic characters"),
("emoji/😀/file.txt", b"Emoji in path"),
("mixed/测试 test/file.txt", b"Mixed languages"),
];
for (key, content) in &test_cases {
info!("Testing Unicode key: {}", key);
// PUT
let result = client
.put_object()
.bucket(bucket)
.key(*key)
.body(ByteStream::from(content.to_vec()))
.send()
.await;
assert!(result.is_ok(), "Failed to PUT object with Unicode key '{}': {:?}", key, result.err());
// GET
let result = client.get_object().bucket(bucket).key(*key).send().await;
assert!(result.is_ok(), "Failed to GET object with Unicode key '{}': {:?}", key, result.err());
let output = result.unwrap();
let body_bytes = output.body.collect().await.unwrap().into_bytes();
assert_eq!(body_bytes.as_ref(), *content, "Content mismatch for Unicode key '{}'", key);
info!("✅ PUT/GET succeeded for Unicode key: {}", key);
}
// LIST to verify all objects
let result = client.list_objects_v2().bucket(bucket).send().await;
assert!(result.is_ok(), "Failed to LIST objects with Unicode keys");
let output = result.unwrap();
let contents = output.contents();
assert_eq!(contents.len(), test_cases.len(), "Number of Unicode objects mismatch");
info!("✅ All Unicode objects listed successfully");
// Cleanup
env.stop_server();
info!("Test completed successfully");
}
/// Test special characters in different parts of the path
#[tokio::test]
#[serial]
async fn test_special_chars_in_different_path_positions() {
init_logging();
info!("Starting test: Special characters in different path positions");
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
let client = create_s3_client(&env);
let bucket = "test-path-positions";
// Create bucket
create_bucket(&client, bucket).await.expect("Failed to create bucket");
// Test special characters in different positions
let test_cases = vec![
("start with space/file.txt", b"Space at start" as &[u8]),
("folder/end with space /file.txt", b"Space at end of folder"),
("multiple spaces/file.txt", b"Multiple consecutive spaces"),
("folder/file with space.txt", b"Space in filename"),
("a+b/c+d/e+f.txt", b"Plus signs throughout"),
("a%b/c%d/e%f.txt", b"Percent signs throughout"),
("folder/!@#$%^&*()/file.txt", b"Multiple special chars"),
("(parentheses)/[brackets]/file.txt", b"Parentheses and brackets"),
("'quotes'/\"double\"/file.txt", b"Quote characters"),
];
for (key, content) in &test_cases {
info!("Testing key: {}", key);
// PUT
let result = client
.put_object()
.bucket(bucket)
.key(*key)
.body(ByteStream::from(content.to_vec()))
.send()
.await;
assert!(result.is_ok(), "Failed to PUT object with key '{}': {:?}", key, result.err());
// GET
let result = client.get_object().bucket(bucket).key(*key).send().await;
assert!(result.is_ok(), "Failed to GET object with key '{}': {:?}", key, result.err());
let output = result.unwrap();
let body_bytes = output.body.collect().await.unwrap().into_bytes();
assert_eq!(body_bytes.as_ref(), *content, "Content mismatch for key '{}'", key);
info!("✅ PUT/GET succeeded for key: {}", key);
}
// Cleanup
env.stop_server();
info!("Test completed successfully");
}
/// Test that control characters are properly rejected
#[tokio::test]
#[serial]
async fn test_control_characters_rejected() {
init_logging();
info!("Starting test: Control characters should be rejected");
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
let client = create_s3_client(&env);
let bucket = "test-control-chars";
// Create bucket
create_bucket(&client, bucket).await.expect("Failed to create bucket");
// Test that control characters are rejected
let invalid_keys = vec![
"file\0with\0null.txt",
"file\nwith\nnewline.txt",
"file\rwith\rcarriage.txt",
"file\twith\ttab.txt", // Tab might be allowed, but let's test
];
for key in invalid_keys {
info!("Testing rejection of control character in key: {:?}", key);
let result = client
.put_object()
.bucket(bucket)
.key(key)
.body(ByteStream::from_static(b"test"))
.send()
.await;
// Note: The validation happens on the server side, so we expect an error
// For null byte, newline, and carriage return
if key.contains('\0') || key.contains('\n') || key.contains('\r') {
assert!(result.is_err(), "Control character should be rejected for key: {:?}", key);
if let Err(e) = result {
info!("✅ Control character correctly rejected: {:?}", e);
}
}
}
// Cleanup
env.stop_server();
info!("Test completed successfully");
}
/// Test LIST with various special character prefixes
#[tokio::test]
#[serial]
async fn test_list_with_special_char_prefixes() {
init_logging();
info!("Starting test: LIST with special character prefixes");
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
let client = create_s3_client(&env);
let bucket = "test-list-prefixes";
// Create bucket
create_bucket(&client, bucket).await.expect("Failed to create bucket");
// Create objects with various special characters
let test_objects = vec![
"prefix with spaces/file1.txt",
"prefix with spaces/file2.txt",
"prefix+plus/file1.txt",
"prefix+plus/file2.txt",
"prefix%percent/file1.txt",
"prefix%percent/file2.txt",
];
for key in &test_objects {
client
.put_object()
.bucket(bucket)
.key(*key)
.body(ByteStream::from_static(b"test"))
.send()
.await
.expect("Failed to PUT object");
}
// Test LIST with different prefixes
let prefix_tests = vec![
("prefix with spaces/", 2),
("prefix+plus/", 2),
("prefix%percent/", 2),
("prefix", 6), // Should match all
];
for (prefix, expected_count) in prefix_tests {
info!("Testing LIST with prefix: '{}'", prefix);
let result = client.list_objects_v2().bucket(bucket).prefix(prefix).send().await;
assert!(result.is_ok(), "Failed to LIST with prefix '{}': {:?}", prefix, result.err());
let output = result.unwrap();
let contents = output.contents();
assert_eq!(
contents.len(),
expected_count,
"Expected {} objects with prefix '{}', got {}",
expected_count,
prefix,
contents.len()
);
info!("✅ LIST with prefix '{}' returned {} objects", prefix, contents.len());
}
// Cleanup
env.stop_server();
info!("Test completed successfully");
}
/// Test delimiter-based listing with special characters
#[tokio::test]
#[serial]
async fn test_list_with_delimiter_and_special_chars() {
init_logging();
info!("Starting test: LIST with delimiter and special characters");
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
let client = create_s3_client(&env);
let bucket = "test-delimiter-special";
// Create bucket
create_bucket(&client, bucket).await.expect("Failed to create bucket");
// Create hierarchical structure with special characters
let test_objects = vec![
"folder with spaces/subfolder1/file.txt",
"folder with spaces/subfolder2/file.txt",
"folder with spaces/file.txt",
"folder+plus/subfolder1/file.txt",
"folder+plus/file.txt",
];
for key in &test_objects {
client
.put_object()
.bucket(bucket)
.key(*key)
.body(ByteStream::from_static(b"test"))
.send()
.await
.expect("Failed to PUT object");
}
// Test LIST with delimiter
info!("Testing LIST with delimiter for 'folder with spaces/'");
let result = client
.list_objects_v2()
.bucket(bucket)
.prefix("folder with spaces/")
.delimiter("/")
.send()
.await;
assert!(result.is_ok(), "Failed to LIST with delimiter");
let output = result.unwrap();
let common_prefixes = output.common_prefixes();
assert_eq!(common_prefixes.len(), 2, "Should have 2 common prefixes (subdirectories)");
info!("✅ LIST with delimiter returned {} common prefixes", common_prefixes.len());
// Cleanup
env.stop_server();
info!("Test completed successfully");
}
}

View File

@@ -8,7 +8,7 @@
<p align="center">
<a href="https://github.com/rustfs/rustfs/actions/workflows/ci.yml"><img alt="CI" src="https://github.com/rustfs/rustfs/actions/workflows/ci.yml/badge.svg" /></a>
<a href="https://docs.rustfs.com/en/">📖 Documentation</a>
<a href="https://docs.rustfs.com/">📖 Documentation</a>
· <a href="https://github.com/rustfs/rustfs/issues">🐛 Bug Reports</a>
· <a href="https://github.com/rustfs/rustfs/discussions">💬 Discussions</a>
</p>

View File

@@ -1105,10 +1105,17 @@ impl TargetClient {
Err(e) => match e {
SdkError::ServiceError(oe) => match oe.into_err() {
HeadBucketError::NotFound(_) => Ok(false),
other => Err(other.into()),
other => Err(S3ClientError::new(format!(
"failed to check bucket exists for bucket:{bucket} please check the bucket name and credentials, error:{other:?}"
))),
},
SdkError::DispatchFailure(e) => Err(S3ClientError::new(format!(
"failed to dispatch bucket exists for bucket:{bucket} error:{e:?}"
))),
_ => Err(e.into()),
_ => Err(S3ClientError::new(format!(
"failed to check bucket exists for bucket:{bucket} error:{e:?}"
))),
},
}
}

View File

@@ -283,7 +283,17 @@ impl Lifecycle for BucketLifecycleConfiguration {
"eval_inner: object={}, mod_time={:?}, now={:?}, is_latest={}, delete_marker={}",
obj.name, obj.mod_time, now, obj.is_latest, obj.delete_marker
);
if obj.mod_time.expect("err").unix_timestamp() == 0 {
// Gracefully handle missing mod_time instead of panicking
let mod_time = match obj.mod_time {
Some(t) => t,
None => {
info!("eval_inner: mod_time is None for object={}, returning default event", obj.name);
return Event::default();
}
};
if mod_time.unix_timestamp() == 0 {
info!("eval_inner: mod_time is 0, returning default event");
return Event::default();
}
@@ -323,7 +333,7 @@ impl Lifecycle for BucketLifecycleConfiguration {
}
if let Some(days) = expiration.days {
let expected_expiry = expected_expiry_time(obj.mod_time.unwrap(), days /*, date*/);
let expected_expiry = expected_expiry_time(mod_time, days /*, date*/);
if now.unix_timestamp() >= expected_expiry.unix_timestamp() {
events.push(Event {
action: IlmAction::DeleteVersionAction,
@@ -446,11 +456,11 @@ impl Lifecycle for BucketLifecycleConfiguration {
});
}
} else if let Some(days) = expiration.days {
let expected_expiry: OffsetDateTime = expected_expiry_time(obj.mod_time.unwrap(), days);
let expected_expiry: OffsetDateTime = expected_expiry_time(mod_time, days);
info!(
"eval_inner: expiration check - days={}, obj_time={:?}, expiry_time={:?}, now={:?}, should_expire={}",
days,
obj.mod_time.expect("err!"),
mod_time,
expected_expiry,
now,
now.unix_timestamp() > expected_expiry.unix_timestamp()

View File

@@ -103,6 +103,8 @@ impl ReplicationConfigurationExt for ReplicationConfiguration {
if filter.test_tags(&object_tags) {
rules.push(rule.clone());
}
} else {
rules.push(rule.clone());
}
}

View File

@@ -34,7 +34,7 @@ use rustfs_filemeta::{
};
use rustfs_utils::http::{
AMZ_BUCKET_REPLICATION_STATUS, AMZ_OBJECT_TAGGING, AMZ_TAGGING_DIRECTIVE, CONTENT_ENCODING, HeaderExt as _,
RESERVED_METADATA_PREFIX, RESERVED_METADATA_PREFIX_LOWER, RUSTFS_REPLICATION_AUTUAL_OBJECT_SIZE,
RESERVED_METADATA_PREFIX, RESERVED_METADATA_PREFIX_LOWER, RUSTFS_REPLICATION_ACTUAL_OBJECT_SIZE,
RUSTFS_REPLICATION_RESET_STATUS, SSEC_ALGORITHM_HEADER, SSEC_KEY_HEADER, SSEC_KEY_MD5_HEADER, headers,
};
use rustfs_utils::path::path_join_buf;
@@ -2324,7 +2324,7 @@ async fn replicate_object_with_multipart(
let mut user_metadata = HashMap::new();
user_metadata.insert(
RUSTFS_REPLICATION_AUTUAL_OBJECT_SIZE.to_string(),
RUSTFS_REPLICATION_ACTUAL_OBJECT_SIZE.to_string(),
object_info
.user_defined
.get(&format!("{RESERVED_METADATA_PREFIX}actual-size"))

View File

@@ -19,7 +19,7 @@ use rustfs_filemeta::{MetaCacheEntries, MetaCacheEntry, MetacacheReader, is_io_e
use std::{future::Future, pin::Pin, sync::Arc};
use tokio::spawn;
use tokio_util::sync::CancellationToken;
use tracing::{error, warn};
use tracing::{error, info, warn};
pub type AgreedFn = Box<dyn Fn(MetaCacheEntry) -> Pin<Box<dyn Future<Output = ()> + Send>> + Send + 'static>;
pub type PartialFn =
@@ -99,7 +99,7 @@ pub async fn list_path_raw(rx: CancellationToken, opts: ListPathRawOptions) -> d
match disk.walk_dir(wakl_opts, &mut wr).await {
Ok(_res) => {}
Err(err) => {
error!("walk dir err {:?}", &err);
info!("walk dir err {:?}", &err);
need_fallback = true;
}
}

View File

@@ -32,6 +32,7 @@ use rustfs_common::data_usage::{
BucketTargetUsageInfo, BucketUsageInfo, DataUsageCache, DataUsageEntry, DataUsageInfo, DiskUsageStatus, SizeSummary,
};
use rustfs_utils::path::SLASH_SEPARATOR;
use tokio::fs;
use tracing::{error, info, warn};
use crate::error::Error;
@@ -63,6 +64,21 @@ lazy_static::lazy_static! {
/// Store data usage info to backend storage
pub async fn store_data_usage_in_backend(data_usage_info: DataUsageInfo, store: Arc<ECStore>) -> Result<(), Error> {
// Prevent older data from overwriting newer persisted stats
if let Ok(buf) = read_config(store.clone(), &DATA_USAGE_OBJ_NAME_PATH).await {
if let Ok(existing) = serde_json::from_slice::<DataUsageInfo>(&buf) {
if let (Some(new_ts), Some(existing_ts)) = (data_usage_info.last_update, existing.last_update) {
if new_ts <= existing_ts {
info!(
"Skip persisting data usage: incoming last_update {:?} <= existing {:?}",
new_ts, existing_ts
);
return Ok(());
}
}
}
}
let data =
serde_json::to_vec(&data_usage_info).map_err(|e| Error::other(format!("Failed to serialize data usage info: {e}")))?;
@@ -160,6 +176,39 @@ pub async fn load_data_usage_from_backend(store: Arc<ECStore>) -> Result<DataUsa
}
/// Aggregate usage information from local disk snapshots.
fn merge_snapshot(aggregated: &mut DataUsageInfo, mut snapshot: LocalUsageSnapshot, latest_update: &mut Option<SystemTime>) {
if let Some(update) = snapshot.last_update {
if latest_update.is_none_or(|current| update > current) {
*latest_update = Some(update);
}
}
snapshot.recompute_totals();
aggregated.objects_total_count = aggregated.objects_total_count.saturating_add(snapshot.objects_total_count);
aggregated.versions_total_count = aggregated.versions_total_count.saturating_add(snapshot.versions_total_count);
aggregated.delete_markers_total_count = aggregated
.delete_markers_total_count
.saturating_add(snapshot.delete_markers_total_count);
aggregated.objects_total_size = aggregated.objects_total_size.saturating_add(snapshot.objects_total_size);
for (bucket, usage) in snapshot.buckets_usage.into_iter() {
let bucket_size = usage.size;
match aggregated.buckets_usage.entry(bucket.clone()) {
Entry::Occupied(mut entry) => entry.get_mut().merge(&usage),
Entry::Vacant(entry) => {
entry.insert(usage.clone());
}
}
aggregated
.bucket_sizes
.entry(bucket)
.and_modify(|size| *size = size.saturating_add(bucket_size))
.or_insert(bucket_size);
}
}
pub async fn aggregate_local_snapshots(store: Arc<ECStore>) -> Result<(Vec<DiskUsageStatus>, DataUsageInfo), Error> {
let mut aggregated = DataUsageInfo::default();
let mut latest_update: Option<SystemTime> = None;
@@ -196,7 +245,24 @@ pub async fn aggregate_local_snapshots(store: Arc<ECStore>) -> Result<(Vec<DiskU
snapshot_exists: false,
};
if let Some(mut snapshot) = read_local_snapshot(root.as_path(), &disk_id).await? {
let snapshot_result = read_local_snapshot(root.as_path(), &disk_id).await;
// If a snapshot is corrupted or unreadable, skip it but keep processing others
if let Err(err) = &snapshot_result {
warn!(
"Failed to read data usage snapshot for disk {} (pool {}, set {}, disk {}): {}",
disk_id, pool_idx, set_disks.set_index, disk_index, err
);
// Best-effort cleanup so next scan can rebuild a fresh snapshot instead of repeatedly failing
let snapshot_file = snapshot_path(root.as_path(), &disk_id);
if let Err(remove_err) = fs::remove_file(&snapshot_file).await {
if remove_err.kind() != std::io::ErrorKind::NotFound {
warn!("Failed to remove corrupted snapshot {:?}: {}", snapshot_file, remove_err);
}
}
}
if let Ok(Some(mut snapshot)) = snapshot_result {
status.last_update = snapshot.last_update;
status.snapshot_exists = true;
@@ -213,37 +279,7 @@ pub async fn aggregate_local_snapshots(store: Arc<ECStore>) -> Result<(Vec<DiskU
snapshot.meta.disk_index = Some(disk_index);
}
snapshot.recompute_totals();
if let Some(update) = snapshot.last_update {
if latest_update.is_none_or(|current| update > current) {
latest_update = Some(update);
}
}
aggregated.objects_total_count = aggregated.objects_total_count.saturating_add(snapshot.objects_total_count);
aggregated.versions_total_count =
aggregated.versions_total_count.saturating_add(snapshot.versions_total_count);
aggregated.delete_markers_total_count = aggregated
.delete_markers_total_count
.saturating_add(snapshot.delete_markers_total_count);
aggregated.objects_total_size = aggregated.objects_total_size.saturating_add(snapshot.objects_total_size);
for (bucket, usage) in snapshot.buckets_usage.into_iter() {
let bucket_size = usage.size;
match aggregated.buckets_usage.entry(bucket.clone()) {
Entry::Occupied(mut entry) => entry.get_mut().merge(&usage),
Entry::Vacant(entry) => {
entry.insert(usage.clone());
}
}
aggregated
.bucket_sizes
.entry(bucket)
.and_modify(|size| *size = size.saturating_add(bucket_size))
.or_insert(bucket_size);
}
merge_snapshot(&mut aggregated, snapshot, &mut latest_update);
}
statuses.push(status);
@@ -277,6 +313,7 @@ pub async fn compute_bucket_usage(store: Arc<ECStore>, bucket_name: &str) -> Res
1000, // max_keys
false, // fetch_owner
None, // start_after
false, // incl_deleted
)
.await?;
@@ -548,3 +585,94 @@ pub async fn save_data_usage_cache(cache: &DataUsageCache, name: &str) -> crate:
save_config(store, &name, buf).await?;
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
use rustfs_common::data_usage::BucketUsageInfo;
fn aggregate_for_test(
inputs: Vec<(DiskUsageStatus, Result<Option<LocalUsageSnapshot>, Error>)>,
) -> (Vec<DiskUsageStatus>, DataUsageInfo) {
let mut aggregated = DataUsageInfo::default();
let mut latest_update: Option<SystemTime> = None;
let mut statuses = Vec::new();
for (mut status, snapshot_result) in inputs {
if let Ok(Some(snapshot)) = snapshot_result {
status.snapshot_exists = true;
status.last_update = snapshot.last_update;
merge_snapshot(&mut aggregated, snapshot, &mut latest_update);
}
statuses.push(status);
}
aggregated.buckets_count = aggregated.buckets_usage.len() as u64;
aggregated.last_update = latest_update;
aggregated.disk_usage_status = statuses.clone();
(statuses, aggregated)
}
#[test]
fn aggregate_skips_corrupted_snapshot_and_preserves_other_disks() {
let mut good_snapshot = LocalUsageSnapshot::new(LocalUsageSnapshotMeta {
disk_id: "good-disk".to_string(),
pool_index: Some(0),
set_index: Some(0),
disk_index: Some(0),
});
good_snapshot.last_update = Some(SystemTime::now());
good_snapshot.buckets_usage.insert(
"bucket-a".to_string(),
BucketUsageInfo {
objects_count: 3,
versions_count: 3,
size: 42,
..Default::default()
},
);
good_snapshot.recompute_totals();
let bad_snapshot_err: Result<Option<LocalUsageSnapshot>, Error> = Err(Error::other("corrupted snapshot payload"));
let inputs = vec![
(
DiskUsageStatus {
disk_id: "bad-disk".to_string(),
pool_index: Some(0),
set_index: Some(0),
disk_index: Some(1),
last_update: None,
snapshot_exists: false,
},
bad_snapshot_err,
),
(
DiskUsageStatus {
disk_id: "good-disk".to_string(),
pool_index: Some(0),
set_index: Some(0),
disk_index: Some(0),
last_update: None,
snapshot_exists: false,
},
Ok(Some(good_snapshot)),
),
];
let (statuses, aggregated) = aggregate_for_test(inputs);
// Bad disk stays non-existent, good disk is marked present
let bad_status = statuses.iter().find(|s| s.disk_id == "bad-disk").unwrap();
assert!(!bad_status.snapshot_exists);
let good_status = statuses.iter().find(|s| s.disk_id == "good-disk").unwrap();
assert!(good_status.snapshot_exists);
// Aggregated data is from good snapshot only
assert_eq!(aggregated.objects_total_count, 3);
assert_eq!(aggregated.objects_total_size, 42);
assert_eq!(aggregated.buckets_count, 1);
assert_eq!(aggregated.buckets_usage.get("bucket-a").map(|b| (b.objects_count, b.size)), Some((3, 42)));
}
}

View File

@@ -198,15 +198,22 @@ impl Endpoint {
}
}
pub fn get_file_path(&self) -> &str {
let path = self.url.path();
pub fn get_file_path(&self) -> String {
let path: &str = self.url.path();
let decoded: std::borrow::Cow<'_, str> = match urlencoding::decode(path) {
Ok(decoded) => decoded,
Err(e) => {
debug!("Failed to decode path '{}': {}, using original path", path, e);
std::borrow::Cow::Borrowed(path)
}
};
#[cfg(windows)]
if self.url.scheme() == "file" {
let stripped = path.strip_prefix('/').unwrap_or(path);
let stripped: &str = decoded.strip_prefix('/').unwrap_or(&decoded);
debug!("get_file_path windows: path={}", stripped);
return stripped;
return stripped.to_string();
}
path
decoded.into_owned()
}
}
@@ -501,6 +508,45 @@ mod test {
assert_eq!(endpoint.get_type(), EndpointType::Path);
}
#[test]
fn test_endpoint_with_spaces_in_path() {
let path_with_spaces = "/Users/test/Library/Application Support/rustfs/data";
let endpoint = Endpoint::try_from(path_with_spaces).unwrap();
assert_eq!(endpoint.get_file_path(), path_with_spaces);
assert!(endpoint.is_local);
assert_eq!(endpoint.get_type(), EndpointType::Path);
}
#[test]
fn test_endpoint_percent_encoding_roundtrip() {
let path_with_spaces = "/Users/test/Library/Application Support/rustfs/data";
let endpoint = Endpoint::try_from(path_with_spaces).unwrap();
// Verify that the URL internally stores percent-encoded path
assert!(
endpoint.url.path().contains("%20"),
"URL path should contain percent-encoded spaces: {}",
endpoint.url.path()
);
// Verify that get_file_path() decodes the percent-encoded path correctly
assert_eq!(
endpoint.get_file_path(),
"/Users/test/Library/Application Support/rustfs/data",
"get_file_path() should decode percent-encoded spaces"
);
}
#[test]
fn test_endpoint_with_various_special_characters() {
// Test path with multiple special characters that get percent-encoded
let path_with_special = "/tmp/test path/data[1]/file+name&more";
let endpoint = Endpoint::try_from(path_with_special).unwrap();
// get_file_path() should return the original path with decoded characters
assert_eq!(endpoint.get_file_path(), path_with_special);
}
#[test]
fn test_endpoint_update_is_local() {
let mut endpoint = Endpoint::try_from("http://localhost:9000/path").unwrap();

View File

@@ -140,6 +140,12 @@ pub enum DiskError {
#[error("io error {0}")]
Io(io::Error),
#[error("source stalled")]
SourceStalled,
#[error("timeout")]
Timeout,
}
impl DiskError {
@@ -366,6 +372,8 @@ impl Clone for DiskError {
DiskError::ErasureWriteQuorum => DiskError::ErasureWriteQuorum,
DiskError::ErasureReadQuorum => DiskError::ErasureReadQuorum,
DiskError::ShortWrite => DiskError::ShortWrite,
DiskError::SourceStalled => DiskError::SourceStalled,
DiskError::Timeout => DiskError::Timeout,
}
}
}
@@ -412,6 +420,8 @@ impl DiskError {
DiskError::ErasureWriteQuorum => 0x25,
DiskError::ErasureReadQuorum => 0x26,
DiskError::ShortWrite => 0x27,
DiskError::SourceStalled => 0x28,
DiskError::Timeout => 0x29,
}
}
@@ -456,6 +466,8 @@ impl DiskError {
0x25 => Some(DiskError::ErasureWriteQuorum),
0x26 => Some(DiskError::ErasureReadQuorum),
0x27 => Some(DiskError::ShortWrite),
0x28 => Some(DiskError::SourceStalled),
0x29 => Some(DiskError::Timeout),
_ => None,
}
}

View File

@@ -806,7 +806,7 @@ impl LocalDisk {
Ok((bytes, modtime))
}
async fn delete_versions_internal(&self, volume: &str, path: &str, fis: &Vec<FileInfo>) -> Result<()> {
async fn delete_versions_internal(&self, volume: &str, path: &str, fis: &[FileInfo]) -> Result<()> {
let volume_dir = self.get_bucket_path(volume)?;
let xlpath = self.get_object_path(volume, format!("{path}/{STORAGE_FORMAT_FILE}").as_str())?;
@@ -820,7 +820,7 @@ impl LocalDisk {
fm.unmarshal_msg(&data)?;
for fi in fis {
for fi in fis.iter() {
let data_dir = match fm.delete_version(fi) {
Ok(res) => res,
Err(err) => {
@@ -967,9 +967,7 @@ impl LocalDisk {
sum: &[u8],
shard_size: usize,
) -> Result<()> {
let file = super::fs::open_file(part_path, O_CREATE | O_WRONLY)
.await
.map_err(to_file_error)?;
let file = super::fs::open_file(part_path, O_RDONLY).await.map_err(to_file_error)?;
let meta = file.metadata().await.map_err(to_file_error)?;
let file_size = meta.len() as usize;
@@ -1465,6 +1463,7 @@ impl DiskAPI for LocalDisk {
resp.results[i] = conv_part_err_to_int(&err);
if resp.results[i] == CHECK_PART_UNKNOWN {
if let Some(err) = err {
error!("verify_file: failed to bitrot verify file: {:?}, error: {:?}", &part_path, &err);
if err == DiskError::FileAccessDenied {
continue;
}
@@ -1551,7 +1550,7 @@ impl DiskAPI for LocalDisk {
.join(fi.data_dir.map_or("".to_string(), |dir| dir.to_string()))
.join(format!("part.{}", part.number));
match lstat(file_path).await {
match lstat(&file_path).await {
Ok(st) => {
if st.is_dir() {
resp.results[i] = CHECK_PART_FILE_NOT_FOUND;
@@ -1577,6 +1576,8 @@ impl DiskAPI for LocalDisk {
}
}
resp.results[i] = CHECK_PART_FILE_NOT_FOUND;
} else {
error!("check_parts: failed to stat file: {:?}, error: {:?}", &file_path, &e);
}
continue;
}
@@ -1984,34 +1985,19 @@ impl DiskAPI for LocalDisk {
// TODO: Healing
let has_old_data_dir = {
if let Ok((_, ver)) = xlmeta.find_version(fi.version_id) {
let has_data_dir = ver.get_data_dir();
if let Some(data_dir) = has_data_dir {
if xlmeta.shard_data_dir_count(&fi.version_id, &Some(data_dir)) == 0 {
// TODO: Healing
// remove inlinedata\
Some(data_dir)
} else {
None
}
} else {
None
}
} else {
None
}
};
let search_version_id = fi.version_id.or(Some(Uuid::nil()));
// CLAUDE DEBUG: Check if inline data is being preserved
tracing::info!(
"CLAUDE DEBUG: rename_data - Adding version to xlmeta. fi.data.is_some()={}, fi.inline_data()={}, fi.size={}",
fi.data.is_some(),
fi.inline_data(),
fi.size
);
if let Some(ref data) = fi.data {
tracing::info!("CLAUDE DEBUG: rename_data - FileInfo has inline data: {} bytes", data.len());
// Check if there's an existing version with the same version_id that has a data_dir to clean up
// Note: For non-versioned buckets, fi.version_id is None, but in xl.meta it's stored as Some(Uuid::nil())
let has_old_data_dir = {
xlmeta.find_version(search_version_id).ok().and_then(|(_, ver)| {
// shard_count == 0 means no other version shares this data_dir
ver.get_data_dir()
.filter(|&data_dir| xlmeta.shard_data_dir_count(&search_version_id, &Some(data_dir)) == 0)
})
};
if let Some(old_data_dir) = has_old_data_dir.as_ref() {
let _ = xlmeta.data.remove(vec![search_version_id.unwrap_or_default(), *old_data_dir]);
}
xlmeta.add_version(fi.clone())?;
@@ -2021,10 +2007,6 @@ impl DiskAPI for LocalDisk {
}
let new_dst_buf = xlmeta.marshal_msg()?;
tracing::info!(
"CLAUDE DEBUG: rename_data - Marshaled xlmeta, new_dst_buf size: {} bytes",
new_dst_buf.len()
);
self.write_all(src_volume, format!("{}/{}", &src_path, STORAGE_FORMAT_FILE).as_str(), new_dst_buf.into())
.await?;
@@ -2300,7 +2282,6 @@ impl DiskAPI for LocalDisk {
let buf = match self.read_all_data(volume, &volume_dir, &xl_path).await {
Ok(res) => res,
Err(err) => {
//
if err != DiskError::FileNotFound {
return Err(err);
}

View File

@@ -271,10 +271,10 @@ impl DiskAPI for Disk {
}
#[tracing::instrument(skip(self))]
async fn list_dir(&self, _origvolume: &str, volume: &str, _dir_path: &str, _count: i32) -> Result<Vec<String>> {
async fn list_dir(&self, _origvolume: &str, volume: &str, dir_path: &str, count: i32) -> Result<Vec<String>> {
match self {
Disk::Local(local_disk) => local_disk.list_dir(_origvolume, volume, _dir_path, _count).await,
Disk::Remote(remote_disk) => remote_disk.list_dir(_origvolume, volume, _dir_path, _count).await,
Disk::Local(local_disk) => local_disk.list_dir(_origvolume, volume, dir_path, count).await,
Disk::Remote(remote_disk) => remote_disk.list_dir(_origvolume, volume, dir_path, count).await,
}
}
@@ -681,7 +681,10 @@ pub fn conv_part_err_to_int(err: &Option<Error>) -> usize {
Some(DiskError::VolumeNotFound) => CHECK_PART_VOLUME_NOT_FOUND,
Some(DiskError::DiskNotFound) => CHECK_PART_DISK_NOT_FOUND,
None => CHECK_PART_SUCCESS,
_ => CHECK_PART_UNKNOWN,
_ => {
tracing::warn!("conv_part_err_to_int: unknown error: {err:?}");
CHECK_PART_UNKNOWN
}
}
}

View File

@@ -232,7 +232,7 @@ impl PoolEndpointList {
for endpoints in pool_endpoint_list.inner.iter_mut() {
// Check whether same path is not used in endpoints of a host on different port.
let mut path_ip_map: HashMap<&str, HashSet<IpAddr>> = HashMap::new();
let mut path_ip_map: HashMap<String, HashSet<IpAddr>> = HashMap::new();
let mut host_ip_cache = HashMap::new();
for ep in endpoints.as_ref() {
if !ep.url.has_host() {
@@ -275,8 +275,9 @@ impl PoolEndpointList {
match path_ip_map.entry(path) {
Entry::Occupied(mut e) => {
if e.get().intersection(host_ip_set).count() > 0 {
let path_key = e.key().clone();
return Err(Error::other(format!(
"same path '{path}' can not be served by different port on same address"
"same path '{path_key}' can not be served by different port on same address"
)));
}
e.get_mut().extend(host_ip_set.iter());
@@ -295,7 +296,7 @@ impl PoolEndpointList {
}
let path = ep.get_file_path();
if local_path_set.contains(path) {
if local_path_set.contains(&path) {
return Err(Error::other(format!(
"path '{path}' cannot be served by different address on same server"
)));

View File

@@ -176,12 +176,10 @@ where
let mut write_left = length;
for block_op in &en_blocks[..data_blocks] {
if block_op.is_none() {
let Some(block) = block_op else {
error!("write_data_blocks block_op.is_none()");
return Err(io::Error::new(ErrorKind::UnexpectedEof, "Missing data block"));
}
let block = block_op.as_ref().unwrap();
};
if offset >= block.len() {
offset -= block.len();
@@ -191,7 +189,7 @@ where
let block_slice = &block[offset..];
offset = 0;
if write_left < block.len() {
if write_left < block_slice.len() {
writer.write_all(&block_slice[..write_left]).await.map_err(|e| {
error!("write_data_blocks write_all err: {}", e);
e

View File

@@ -149,6 +149,12 @@ impl Erasure {
break;
}
Err(e) if e.kind() == std::io::ErrorKind::UnexpectedEof => {
// Check if the inner error is a checksum mismatch - if so, propagate it
if let Some(inner) = e.get_ref() {
if rustfs_rio::is_checksum_mismatch(inner) {
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, e.to_string()));
}
}
break;
}
Err(e) => {

View File

@@ -194,6 +194,12 @@ pub enum StorageError {
#[error("Precondition failed")]
PreconditionFailed,
#[error("Not modified")]
NotModified,
#[error("Invalid part number: {0}")]
InvalidPartNumber(usize),
#[error("Invalid range specified: {0}")]
InvalidRangeSpec(String),
}
@@ -427,6 +433,8 @@ impl Clone for StorageError {
StorageError::InsufficientReadQuorum(a, b) => StorageError::InsufficientReadQuorum(a.clone(), b.clone()),
StorageError::InsufficientWriteQuorum(a, b) => StorageError::InsufficientWriteQuorum(a.clone(), b.clone()),
StorageError::PreconditionFailed => StorageError::PreconditionFailed,
StorageError::NotModified => StorageError::NotModified,
StorageError::InvalidPartNumber(a) => StorageError::InvalidPartNumber(*a),
StorageError::InvalidRangeSpec(a) => StorageError::InvalidRangeSpec(a.clone()),
}
}
@@ -496,6 +504,8 @@ impl StorageError {
StorageError::PreconditionFailed => 0x3B,
StorageError::EntityTooSmall(_, _, _) => 0x3C,
StorageError::InvalidRangeSpec(_) => 0x3D,
StorageError::NotModified => 0x3E,
StorageError::InvalidPartNumber(_) => 0x3F,
}
}
@@ -566,6 +576,8 @@ impl StorageError {
0x3B => Some(StorageError::PreconditionFailed),
0x3C => Some(StorageError::EntityTooSmall(Default::default(), Default::default(), Default::default())),
0x3D => Some(StorageError::InvalidRangeSpec(Default::default())),
0x3E => Some(StorageError::NotModified),
0x3F => Some(StorageError::InvalidPartNumber(Default::default())),
_ => None,
}
}
@@ -679,6 +691,10 @@ pub fn is_err_data_movement_overwrite(err: &Error) -> bool {
matches!(err, &StorageError::DataMovementOverwriteErr(_, _, _))
}
pub fn is_err_io(err: &Error) -> bool {
matches!(err, &StorageError::Io(_))
}
pub fn is_all_not_found(errs: &[Option<Error>]) -> bool {
for err in errs.iter() {
if let Some(err) = err {

View File

@@ -190,16 +190,32 @@ impl NotificationSys {
pub async fn storage_info<S: StorageAPI>(&self, api: &S) -> rustfs_madmin::StorageInfo {
let mut futures = Vec::with_capacity(self.peer_clients.len());
let endpoints = get_global_endpoints();
let peer_timeout = Duration::from_secs(2); // Same timeout as server_info
for client in self.peer_clients.iter() {
let endpoints = endpoints.clone();
futures.push(async move {
if let Some(client) = client {
match client.local_storage_info().await {
Ok(info) => Some(info),
Err(_) => Some(rustfs_madmin::StorageInfo {
disks: get_offline_disks(&client.host.to_string(), &get_global_endpoints()),
..Default::default()
}),
let host = client.host.to_string();
// Wrap in timeout to ensure we don't hang on dead peers
match timeout(peer_timeout, client.local_storage_info()).await {
Ok(Ok(info)) => Some(info),
Ok(Err(err)) => {
warn!("peer {} storage_info failed: {}", host, err);
Some(rustfs_madmin::StorageInfo {
disks: get_offline_disks(&host, &endpoints),
..Default::default()
})
}
Err(_) => {
warn!("peer {} storage_info timed out after {:?}", host, peer_timeout);
client.evict_connection().await;
Some(rustfs_madmin::StorageInfo {
disks: get_offline_disks(&host, &endpoints),
..Default::default()
})
}
}
} else {
None
@@ -230,13 +246,19 @@ impl NotificationSys {
futures.push(async move {
if let Some(client) = client {
let host = client.host.to_string();
call_peer_with_timeout(
peer_timeout,
&host,
|| client.server_info(),
|| offline_server_properties(&host, &endpoints),
)
.await
match timeout(peer_timeout, client.server_info()).await {
Ok(Ok(info)) => info,
Ok(Err(err)) => {
warn!("peer {} server_info failed: {}", host, err);
// client.server_info handles eviction internally on error, but fallback needed
offline_server_properties(&host, &endpoints)
}
Err(_) => {
warn!("peer {} server_info timed out after {:?}", host, peer_timeout);
client.evict_connection().await;
offline_server_properties(&host, &endpoints)
}
}
} else {
ServerProperties::default()
}

View File

@@ -26,7 +26,7 @@ use rustfs_madmin::{
net::NetInfo,
};
use rustfs_protos::{
node_service_time_out_client,
evict_failed_connection, node_service_time_out_client,
proto_gen::node_service::{
DeleteBucketMetadataRequest, DeletePolicyRequest, DeleteServiceAccountRequest, DeleteUserRequest, GetCpusRequest,
GetMemInfoRequest, GetMetricsRequest, GetNetInfoRequest, GetOsInfoRequest, GetPartitionsRequest, GetProcInfoRequest,
@@ -82,10 +82,25 @@ impl PeerRestClient {
(remote, all)
}
/// Evict the connection to this peer from the global cache.
/// This should be called when communication with this peer fails.
pub async fn evict_connection(&self) {
evict_failed_connection(&self.grid_host).await;
}
}
impl PeerRestClient {
pub async fn local_storage_info(&self) -> Result<rustfs_madmin::StorageInfo> {
let result = self.local_storage_info_inner().await;
if result.is_err() {
// Evict stale connection on any error for cluster recovery
self.evict_connection().await;
}
result
}
async fn local_storage_info_inner(&self) -> Result<rustfs_madmin::StorageInfo> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::other(err.to_string()))?;
@@ -107,6 +122,15 @@ impl PeerRestClient {
}
pub async fn server_info(&self) -> Result<ServerProperties> {
let result = self.server_info_inner().await;
if result.is_err() {
// Evict stale connection on any error for cluster recovery
self.evict_connection().await;
}
result
}
async fn server_info_inner(&self) -> Result<ServerProperties> {
let mut client = node_service_time_out_client(&self.grid_host)
.await
.map_err(|err| Error::other(err.to_string()))?;
@@ -478,7 +502,11 @@ impl PeerRestClient {
access_key: access_key.to_string(),
});
let response = client.delete_user(request).await?.into_inner();
let result = client.delete_user(request).await;
if result.is_err() {
self.evict_connection().await;
}
let response = result?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::other(msg));
@@ -496,7 +524,11 @@ impl PeerRestClient {
access_key: access_key.to_string(),
});
let response = client.delete_service_account(request).await?.into_inner();
let result = client.delete_service_account(request).await;
if result.is_err() {
self.evict_connection().await;
}
let response = result?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::other(msg));
@@ -515,7 +547,11 @@ impl PeerRestClient {
temp,
});
let response = client.load_user(request).await?.into_inner();
let result = client.load_user(request).await;
if result.is_err() {
self.evict_connection().await;
}
let response = result?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::other(msg));
@@ -533,7 +569,11 @@ impl PeerRestClient {
access_key: access_key.to_string(),
});
let response = client.load_service_account(request).await?.into_inner();
let result = client.load_service_account(request).await;
if result.is_err() {
self.evict_connection().await;
}
let response = result?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::other(msg));
@@ -551,7 +591,11 @@ impl PeerRestClient {
group: group.to_string(),
});
let response = client.load_group(request).await?.into_inner();
let result = client.load_group(request).await;
if result.is_err() {
self.evict_connection().await;
}
let response = result?.into_inner();
if !response.success {
if let Some(msg) = response.error_info {
return Err(Error::other(msg));

View File

@@ -42,7 +42,7 @@ use rustfs_protos::proto_gen::node_service::RenamePartRequest;
use rustfs_rio::{HttpReader, HttpWriter};
use tokio::{io::AsyncWrite, net::TcpStream, time::timeout};
use tonic::Request;
use tracing::info;
use tracing::{debug, info};
use uuid::Uuid;
#[derive(Debug)]
@@ -596,14 +596,16 @@ impl DiskAPI for RemoteDisk {
}
#[tracing::instrument(skip(self))]
async fn list_dir(&self, _origvolume: &str, volume: &str, _dir_path: &str, _count: i32) -> Result<Vec<String>> {
info!("list_dir {}/{}", volume, _dir_path);
async fn list_dir(&self, _origvolume: &str, volume: &str, dir_path: &str, count: i32) -> Result<Vec<String>> {
debug!("list_dir {}/{}", volume, dir_path);
let mut client = node_service_time_out_client(&self.addr)
.await
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
let request = Request::new(ListDirRequest {
disk: self.endpoint.to_string(),
volume: volume.to_string(),
dir_path: dir_path.to_string(),
count,
});
let response = client.list_dir(request).await?.into_inner();
@@ -1085,7 +1087,7 @@ mod tests {
drop(listener);
let url = url::Url::parse(&format!("http://{}:{}/data/rustfs0", ip, port)).unwrap();
let url = url::Url::parse(&format!("http://{ip}:{port}/data/rustfs0")).unwrap();
let endpoint = Endpoint {
url,
is_local: false,

File diff suppressed because it is too large Load Diff

View File

@@ -111,6 +111,9 @@ impl Sets {
let mut disk_set = Vec::with_capacity(set_count);
// Create fast lock manager for high performance
let fast_lock_manager = Arc::new(rustfs_lock::FastObjectLockManager::new());
for i in 0..set_count {
let mut set_drive = Vec::with_capacity(set_drive_count);
let mut set_endpoints = Vec::with_capacity(set_drive_count);
@@ -164,11 +167,9 @@ impl Sets {
// Note: write_quorum was used for the old lock system, no longer needed with FastLock
let _write_quorum = set_drive_count - parity_count;
// Create fast lock manager for high performance
let fast_lock_manager = Arc::new(rustfs_lock::FastObjectLockManager::new());
let set_disks = SetDisks::new(
fast_lock_manager,
fast_lock_manager.clone(),
GLOBAL_Local_Node_Name.read().await.to_string(),
Arc::new(RwLock::new(set_drive)),
set_drive_count,
@@ -439,6 +440,7 @@ impl StorageAPI for Sets {
_max_keys: i32,
_fetch_owner: bool,
_start_after: Option<String>,
_incl_deleted: bool,
) -> Result<ListObjectsV2Info> {
unimplemented!()
}

View File

@@ -767,6 +767,12 @@ impl ECStore {
def_pool = pinfo.clone();
has_def_pool = true;
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-deletes.html
if is_err_object_not_found(err) {
if let Err(err) = opts.precondition_check(&pinfo.object_info) {
return Err(err.clone());
}
}
if !is_err_object_not_found(err) && !is_err_version_not_found(err) {
return Err(err.clone());
@@ -1338,9 +1344,19 @@ impl StorageAPI for ECStore {
max_keys: i32,
fetch_owner: bool,
start_after: Option<String>,
incl_deleted: bool,
) -> Result<ListObjectsV2Info> {
self.inner_list_objects_v2(bucket, prefix, continuation_token, delimiter, max_keys, fetch_owner, start_after)
.await
self.inner_list_objects_v2(
bucket,
prefix,
continuation_token,
delimiter,
max_keys,
fetch_owner,
start_after,
incl_deleted,
)
.await
}
#[instrument(skip(self))]
@@ -1382,6 +1398,7 @@ impl StorageAPI for ECStore {
let (info, _) = self.get_latest_object_info_with_idx(bucket, object.as_str(), opts).await?;
opts.precondition_check(&info)?;
Ok(info)
}

View File

@@ -356,6 +356,8 @@ impl HTTPRangeSpec {
pub struct HTTPPreconditions {
pub if_match: Option<String>,
pub if_none_match: Option<String>,
pub if_modified_since: Option<OffsetDateTime>,
pub if_unmodified_since: Option<OffsetDateTime>,
}
#[derive(Debug, Default, Clone)]
@@ -456,6 +458,76 @@ impl ObjectOptions {
..Default::default()
}
}
pub fn precondition_check(&self, obj_info: &ObjectInfo) -> Result<()> {
let has_valid_mod_time = obj_info.mod_time.is_some_and(|t| t != OffsetDateTime::UNIX_EPOCH);
if let Some(part_number) = self.part_number {
if part_number > 1 && !obj_info.parts.is_empty() {
let part_found = obj_info.parts.iter().any(|pi| pi.number == part_number);
if !part_found {
return Err(Error::InvalidPartNumber(part_number));
}
}
}
if let Some(pre) = &self.http_preconditions {
if let Some(if_none_match) = &pre.if_none_match {
if let Some(etag) = &obj_info.etag {
if is_etag_equal(etag, if_none_match) {
return Err(Error::NotModified);
}
}
}
if has_valid_mod_time {
if let Some(if_modified_since) = &pre.if_modified_since {
if let Some(mod_time) = &obj_info.mod_time {
if !is_modified_since(mod_time, if_modified_since) {
return Err(Error::NotModified);
}
}
}
}
if let Some(if_match) = &pre.if_match {
if let Some(etag) = &obj_info.etag {
if !is_etag_equal(etag, if_match) {
return Err(Error::PreconditionFailed);
}
} else {
return Err(Error::PreconditionFailed);
}
}
if has_valid_mod_time && pre.if_match.is_none() {
if let Some(if_unmodified_since) = &pre.if_unmodified_since {
if let Some(mod_time) = &obj_info.mod_time {
if is_modified_since(mod_time, if_unmodified_since) {
return Err(Error::PreconditionFailed);
}
}
}
}
}
Ok(())
}
}
fn is_etag_equal(etag1: &str, etag2: &str) -> bool {
let e1 = etag1.trim_matches('"');
let e2 = etag2.trim_matches('"');
// Handle wildcard "*" - matches any ETag (per HTTP/1.1 RFC 7232)
if e2 == "*" {
return true;
}
e1 == e2
}
fn is_modified_since(mod_time: &OffsetDateTime, given_time: &OffsetDateTime) -> bool {
let mod_secs = mod_time.unix_timestamp();
let given_secs = given_time.unix_timestamp();
mod_secs > given_secs
}
#[derive(Debug, Default, Serialize, Deserialize)]
@@ -755,7 +827,12 @@ impl ObjectInfo {
for entry in entries.entries() {
if entry.is_object() {
if let Some(delimiter) = &delimiter {
if let Some(idx) = entry.name.trim_start_matches(prefix).find(delimiter) {
let remaining = if entry.name.starts_with(prefix) {
&entry.name[prefix.len()..]
} else {
entry.name.as_str()
};
if let Some(idx) = remaining.find(delimiter.as_str()) {
let idx = prefix.len() + idx + delimiter.len();
if let Some(curr_prefix) = entry.name.get(0..idx) {
if curr_prefix == prev_prefix {
@@ -806,7 +883,14 @@ impl ObjectInfo {
if entry.is_dir() {
if let Some(delimiter) = &delimiter {
if let Some(idx) = entry.name.trim_start_matches(prefix).find(delimiter) {
if let Some(idx) = {
let remaining = if entry.name.starts_with(prefix) {
&entry.name[prefix.len()..]
} else {
entry.name.as_str()
};
remaining.find(delimiter.as_str())
} {
let idx = prefix.len() + idx + delimiter.len();
if let Some(curr_prefix) = entry.name.get(0..idx) {
if curr_prefix == prev_prefix {
@@ -842,7 +926,12 @@ impl ObjectInfo {
for entry in entries.entries() {
if entry.is_object() {
if let Some(delimiter) = &delimiter {
if let Some(idx) = entry.name.trim_start_matches(prefix).find(delimiter) {
let remaining = if entry.name.starts_with(prefix) {
&entry.name[prefix.len()..]
} else {
entry.name.as_str()
};
if let Some(idx) = remaining.find(delimiter.as_str()) {
let idx = prefix.len() + idx + delimiter.len();
if let Some(curr_prefix) = entry.name.get(0..idx) {
if curr_prefix == prev_prefix {
@@ -879,7 +968,14 @@ impl ObjectInfo {
if entry.is_dir() {
if let Some(delimiter) = &delimiter {
if let Some(idx) = entry.name.trim_start_matches(prefix).find(delimiter) {
if let Some(idx) = {
let remaining = if entry.name.starts_with(prefix) {
&entry.name[prefix.len()..]
} else {
entry.name.as_str()
};
remaining.find(delimiter.as_str())
} {
let idx = prefix.len() + idx + delimiter.len();
if let Some(curr_prefix) = entry.name.get(0..idx) {
if curr_prefix == prev_prefix {
@@ -1224,6 +1320,7 @@ pub trait StorageAPI: ObjectIO + Debug {
max_keys: i32,
fetch_owner: bool,
start_after: Option<String>,
incl_deleted: bool,
) -> Result<ListObjectsV2Info>;
// ListObjectVersions TODO: FIXME:
async fn list_object_versions(

View File

@@ -225,6 +225,7 @@ impl ECStore {
max_keys: i32,
_fetch_owner: bool,
start_after: Option<String>,
incl_deleted: bool,
) -> Result<ListObjectsV2Info> {
let marker = {
if continuation_token.is_none() {
@@ -234,7 +235,9 @@ impl ECStore {
}
};
let loi = self.list_objects_generic(bucket, prefix, marker, delimiter, max_keys).await?;
let loi = self
.list_objects_generic(bucket, prefix, marker, delimiter, max_keys, incl_deleted)
.await?;
Ok(ListObjectsV2Info {
is_truncated: loi.is_truncated,
continuation_token,
@@ -251,6 +254,7 @@ impl ECStore {
marker: Option<String>,
delimiter: Option<String>,
max_keys: i32,
incl_deleted: bool,
) -> Result<ListObjectsInfo> {
let opts = ListPathOptions {
bucket: bucket.to_owned(),
@@ -258,7 +262,7 @@ impl ECStore {
separator: delimiter.clone(),
limit: max_keys_plus_one(max_keys, marker.is_some()),
marker,
incl_deleted: false,
incl_deleted,
ask_disks: "strict".to_owned(), //TODO: from config
..Default::default()
};

View File

@@ -26,7 +26,7 @@ categories = ["web-programming", "development-tools", "filesystem"]
documentation = "https://docs.rs/rustfs-filemeta/latest/rustfs_filemeta/"
[dependencies]
crc32fast = { workspace = true }
crc-fast = { workspace = true }
rmp.workspace = true
rmp-serde.workspace = true
serde.workspace = true

View File

@@ -8,7 +8,7 @@
<p align="center">
<a href="https://github.com/rustfs/rustfs/actions/workflows/ci.yml"><img alt="CI" src="https://github.com/rustfs/rustfs/actions/workflows/ci.yml/badge.svg" /></a>
<a href="https://docs.rustfs.com/en/">📖 Documentation</a>
<a href="https://docs.rustfs.com/">📖 Documentation</a>
· <a href="https://github.com/rustfs/rustfs/issues">🐛 Bug Reports</a>
· <a href="https://github.com/rustfs/rustfs/discussions">💬 Discussions</a>
</p>

View File

@@ -220,7 +220,11 @@ impl FileInfo {
let indices = {
let cardinality = data_blocks + parity_blocks;
let mut nums = vec![0; cardinality];
let key_crc = crc32fast::hash(object.as_bytes());
let key_crc = {
let mut hasher = crc_fast::Digest::new(crc_fast::CrcAlgorithm::Crc32IsoHdlc);
hasher.update(object.as_bytes());
hasher.finalize() as u32
};
let start = key_crc as usize % cardinality;
for i in 1..=cardinality {

View File

@@ -34,7 +34,7 @@ use std::{collections::HashMap, io::Cursor};
use time::OffsetDateTime;
use time::format_description::well_known::Rfc3339;
use tokio::io::AsyncRead;
use tracing::error;
use tracing::{error, warn};
use uuid::Uuid;
use xxhash_rust::xxh64;
@@ -427,17 +427,26 @@ impl FileMeta {
return;
}
self.versions.reverse();
// for _v in self.versions.iter() {
// // warn!("sort {} {:?}", i, v);
// }
self.versions.sort_by(|a, b| {
if a.header.mod_time != b.header.mod_time {
b.header.mod_time.cmp(&a.header.mod_time)
} else if a.header.version_type != b.header.version_type {
b.header.version_type.cmp(&a.header.version_type)
} else if a.header.version_id != b.header.version_id {
b.header.version_id.cmp(&a.header.version_id)
} else if a.header.flags != b.header.flags {
b.header.flags.cmp(&a.header.flags)
} else {
b.cmp(a)
}
});
}
// Find version
pub fn find_version(&self, vid: Option<Uuid>) -> Result<(usize, FileMetaVersion)> {
let vid = vid.unwrap_or_default();
for (i, fver) in self.versions.iter().enumerate() {
if fver.header.version_id == vid {
if fver.header.version_id == Some(vid) {
let version = self.get_idx(i)?;
return Ok((i, version));
}
@@ -448,9 +457,12 @@ impl FileMeta {
// shard_data_dir_count queries the count of data_dir under vid
pub fn shard_data_dir_count(&self, vid: &Option<Uuid>, data_dir: &Option<Uuid>) -> usize {
let vid = vid.unwrap_or_default();
self.versions
.iter()
.filter(|v| v.header.version_type == VersionType::Object && v.header.version_id != *vid && v.header.user_data_dir())
.filter(|v| {
v.header.version_type == VersionType::Object && v.header.version_id != Some(vid) && v.header.user_data_dir()
})
.map(|v| FileMetaVersion::decode_data_dir_from_meta(&v.meta).unwrap_or_default())
.filter(|v| v == data_dir)
.count()
@@ -489,25 +501,27 @@ impl FileMeta {
self.versions.sort_by(|a, b| {
if a.header.mod_time != b.header.mod_time {
a.header.mod_time.cmp(&b.header.mod_time)
b.header.mod_time.cmp(&a.header.mod_time)
} else if a.header.version_type != b.header.version_type {
a.header.version_type.cmp(&b.header.version_type)
b.header.version_type.cmp(&a.header.version_type)
} else if a.header.version_id != b.header.version_id {
a.header.version_id.cmp(&b.header.version_id)
b.header.version_id.cmp(&a.header.version_id)
} else if a.header.flags != b.header.flags {
a.header.flags.cmp(&b.header.flags)
b.header.flags.cmp(&a.header.flags)
} else {
a.cmp(b)
b.cmp(a)
}
});
Ok(())
}
pub fn add_version(&mut self, fi: FileInfo) -> Result<()> {
let vid = fi.version_id;
pub fn add_version(&mut self, mut fi: FileInfo) -> Result<()> {
if fi.version_id.is_none() {
fi.version_id = Some(Uuid::nil());
}
if let Some(ref data) = fi.data {
let key = vid.unwrap_or_default().to_string();
let key = fi.version_id.unwrap_or_default().to_string();
self.data.replace(&key, data.to_vec())?;
}
@@ -521,12 +535,13 @@ impl FileMeta {
return Err(Error::other("file meta version invalid"));
}
// 1000 is the limit of versions TODO: make it configurable
if self.versions.len() + 1 > 1000 {
return Err(Error::other(
"You've exceeded the limit on the number of versions you can create on this object",
));
}
// TODO: make it configurable
// 1000 is the limit of versions
// if self.versions.len() + 1 > 1000 {
// return Err(Error::other(
// "You've exceeded the limit on the number of versions you can create on this object",
// ));
// }
if self.versions.is_empty() {
self.versions.push(FileMetaShallowVersion::try_from(version)?);
@@ -551,7 +566,6 @@ impl FileMeta {
}
}
}
Err(Error::other("add_version failed"))
// if !ver.valid() {
@@ -583,12 +597,19 @@ impl FileMeta {
}
// delete_version deletes version, returns data_dir
#[tracing::instrument(skip(self))]
pub fn delete_version(&mut self, fi: &FileInfo) -> Result<Option<Uuid>> {
let vid = if fi.version_id.is_none() {
Some(Uuid::nil())
} else {
Some(fi.version_id.unwrap())
};
let mut ventry = FileMetaVersion::default();
if fi.deleted {
ventry.version_type = VersionType::Delete;
ventry.delete_marker = Some(MetaDeleteMarker {
version_id: fi.version_id,
version_id: vid,
mod_time: fi.mod_time,
..Default::default()
});
@@ -598,7 +619,7 @@ impl FileMeta {
}
}
let mut update_version = fi.mark_deleted;
let mut update_version = false;
if fi.version_purge_status().is_empty()
&& (fi.delete_marker_replication_status() == ReplicationStatusType::Replica
|| fi.delete_marker_replication_status() == ReplicationStatusType::Empty)
@@ -689,8 +710,10 @@ impl FileMeta {
}
}
let mut found_index = None;
for (i, ver) in self.versions.iter().enumerate() {
if ver.header.version_id != fi.version_id {
if ver.header.version_id != vid {
continue;
}
@@ -701,7 +724,7 @@ impl FileMeta {
let mut v = self.get_idx(i)?;
if v.delete_marker.is_none() {
v.delete_marker = Some(MetaDeleteMarker {
version_id: fi.version_id,
version_id: vid,
mod_time: fi.mod_time,
meta_sys: HashMap::new(),
});
@@ -767,7 +790,7 @@ impl FileMeta {
self.versions.remove(i);
if (fi.mark_deleted && fi.version_purge_status() != VersionPurgeStatusType::Complete)
|| (fi.deleted && fi.version_id.is_none())
|| (fi.deleted && vid == Some(Uuid::nil()))
{
self.add_version_filemata(ventry)?;
}
@@ -803,18 +826,11 @@ impl FileMeta {
return Ok(old_dir);
}
found_index = Some(i);
}
}
}
let mut found_index = None;
for (i, version) in self.versions.iter().enumerate() {
if version.header.version_type == VersionType::Object && version.header.version_id == fi.version_id {
found_index = Some(i);
break;
}
}
let Some(i) = found_index else {
if fi.deleted {
self.add_version_filemata(ventry)?;
@@ -878,12 +894,11 @@ impl FileMeta {
read_data: bool,
all_parts: bool,
) -> Result<FileInfo> {
let has_vid = {
let vid = {
if !version_id.is_empty() {
let id = Uuid::parse_str(version_id)?;
if !id.is_nil() { Some(id) } else { None }
Uuid::parse_str(version_id)?
} else {
None
Uuid::nil()
}
};
@@ -893,12 +908,12 @@ impl FileMeta {
for ver in self.versions.iter() {
let header = &ver.header;
if let Some(vid) = has_vid {
if header.version_id != Some(vid) {
is_latest = false;
succ_mod_time = header.mod_time;
continue;
}
// TODO: freeVersion
if !version_id.is_empty() && header.version_id != Some(vid) {
is_latest = false;
succ_mod_time = header.mod_time;
continue;
}
let mut fi = ver.into_fileinfo(volume, path, all_parts)?;
@@ -920,7 +935,7 @@ impl FileMeta {
return Ok(fi);
}
if has_vid.is_none() {
if version_id.is_empty() {
Err(Error::FileNotFound)
} else {
Err(Error::FileVersionNotFound)
@@ -1079,13 +1094,10 @@ impl FileMeta {
/// Count shared data directories
pub fn shared_data_dir_count(&self, version_id: Option<Uuid>, data_dir: Option<Uuid>) -> usize {
let version_id = version_id.unwrap_or_default();
if self.data.entries().unwrap_or_default() > 0
&& version_id.is_some()
&& self
.data
.find(version_id.unwrap().to_string().as_str())
.unwrap_or_default()
.is_some()
&& self.data.find(version_id.to_string().as_str()).unwrap_or_default().is_some()
{
return 0;
}
@@ -1093,7 +1105,9 @@ impl FileMeta {
self.versions
.iter()
.filter(|v| {
v.header.version_type == VersionType::Object && v.header.version_id != version_id && v.header.user_data_dir()
v.header.version_type == VersionType::Object
&& v.header.version_id != Some(version_id)
&& v.header.user_data_dir()
})
.filter_map(|v| FileMetaVersion::decode_data_dir_from_meta(&v.meta).ok())
.filter(|&dir| dir == data_dir)
@@ -1521,7 +1535,8 @@ impl FileMetaVersionHeader {
cur.read_exact(&mut buf)?;
self.version_id = {
let id = Uuid::from_bytes(buf);
if id.is_nil() { None } else { Some(id) }
// if id.is_nil() { None } else { Some(id) }
Some(id)
};
// mod_time

View File

@@ -8,7 +8,7 @@
<p align="center">
<a href="https://github.com/rustfs/rustfs/actions/workflows/ci.yml"><img alt="CI" src="https://github.com/rustfs/rustfs/actions/workflows/ci.yml/badge.svg" /></a>
<a href="https://docs.rustfs.com/en/">📖 Documentation</a>
<a href="https://docs.rustfs.com/">📖 Documentation</a>
· <a href="https://github.com/rustfs/rustfs/issues">🐛 Bug Reports</a>
· <a href="https://github.com/rustfs/rustfs/discussions">💬 Discussions</a>
</p>

View File

@@ -38,7 +38,7 @@ use std::sync::LazyLock;
use std::{collections::HashMap, sync::Arc};
use tokio::sync::mpsc::{self, Sender};
use tokio_util::sync::CancellationToken;
use tracing::{debug, info, warn};
use tracing::{info, warn};
pub static IAM_CONFIG_PREFIX: LazyLock<String> = LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam"));
pub static IAM_CONFIG_USERS_PREFIX: LazyLock<String> = LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/users/"));
@@ -389,7 +389,7 @@ impl Store for ObjectStore {
data = match Self::decrypt_data(&data) {
Ok(v) => v,
Err(err) => {
debug!("decrypt_data failed: {}", err);
warn!("delete the config file when decrypt failed failed: {}, path: {}", err, path.as_ref());
// delete the config file when decrypt failed
let _ = self.delete_iam_config(path.as_ref()).await;
return Err(Error::ConfigNotFound);
@@ -439,8 +439,10 @@ impl Store for ObjectStore {
.await
.map_err(|err| {
if is_err_config_not_found(&err) {
warn!("load_user_identity failed: no such user, name: {name}, user_type: {user_type:?}");
Error::NoSuchUser(name.to_owned())
} else {
warn!("load_user_identity failed: {err:?}, name: {name}, user_type: {user_type:?}");
err
}
})?;
@@ -448,6 +450,9 @@ impl Store for ObjectStore {
if u.credentials.is_expired() {
let _ = self.delete_iam_config(get_user_identity_path(name, user_type)).await;
let _ = self.delete_iam_config(get_mapped_policy_path(name, user_type, false)).await;
warn!(
"load_user_identity failed: user is expired, delete the user and mapped policy, name: {name}, user_type: {user_type:?}"
);
return Err(Error::NoSuchUser(name.to_owned()));
}
@@ -465,7 +470,7 @@ impl Store for ObjectStore {
let _ = self.delete_iam_config(get_user_identity_path(name, user_type)).await;
let _ = self.delete_iam_config(get_mapped_policy_path(name, user_type, false)).await;
}
warn!("extract_jwt_claims failed: {}", err);
warn!("extract_jwt_claims failed: {err:?}, name: {name}, user_type: {user_type:?}");
return Err(Error::NoSuchUser(name.to_owned()));
}
}

View File

@@ -240,14 +240,19 @@ impl<T: Store> IamSys<T> {
return;
}
if let Some(notification_sys) = get_global_notification_sys() {
let resp = notification_sys.load_user(name, is_temp).await;
for r in resp {
if let Some(err) = r.err {
warn!("notify load_user failed: {}", err);
// Fire-and-forget notification to peers - don't block auth operations
// This is critical for cluster recovery: login should not wait for dead peers
let name = name.to_string();
tokio::spawn(async move {
if let Some(notification_sys) = get_global_notification_sys() {
let resp = notification_sys.load_user(&name, is_temp).await;
for r in resp {
if let Some(err) = r.err {
warn!("notify load_user failed (non-blocking): {}", err);
}
}
}
}
});
}
async fn notify_for_service_account(&self, name: &str) {
@@ -255,14 +260,18 @@ impl<T: Store> IamSys<T> {
return;
}
if let Some(notification_sys) = get_global_notification_sys() {
let resp = notification_sys.load_service_account(name).await;
for r in resp {
if let Some(err) = r.err {
warn!("notify load_service_account failed: {}", err);
// Fire-and-forget notification to peers - don't block service account operations
let name = name.to_string();
tokio::spawn(async move {
if let Some(notification_sys) = get_global_notification_sys() {
let resp = notification_sys.load_service_account(&name).await;
for r in resp {
if let Some(err) = r.err {
warn!("notify load_service_account failed (non-blocking): {}", err);
}
}
}
}
});
}
pub async fn current_policies(&self, name: &str) -> String {
@@ -571,14 +580,18 @@ impl<T: Store> IamSys<T> {
return;
}
if let Some(notification_sys) = get_global_notification_sys() {
let resp = notification_sys.load_group(group).await;
for r in resp {
if let Some(err) = r.err {
warn!("notify load_group failed: {}", err);
// Fire-and-forget notification to peers - don't block group operations
let group = group.to_string();
tokio::spawn(async move {
if let Some(notification_sys) = get_global_notification_sys() {
let resp = notification_sys.load_group(&group).await;
for r in resp {
if let Some(err) = r.err {
warn!("notify load_group failed (non-blocking): {}", err);
}
}
}
}
});
}
pub async fn create_user(&self, access_key: &str, args: &AddOrUpdateUserReq) -> Result<OffsetDateTime> {

View File

@@ -37,7 +37,6 @@ serde = { workspace = true, features = ["derive"] }
serde_json = { workspace = true }
tracing = { workspace = true }
thiserror = { workspace = true }
once_cell = { workspace = true }
# Cryptography
aes-gcm = { workspace = true }

View File

@@ -14,7 +14,7 @@
//! API types for KMS dynamic configuration
use crate::config::{KmsBackend, KmsConfig, VaultAuthMethod};
use crate::config::{BackendConfig, CacheConfig, KmsBackend, KmsConfig, LocalConfig, TlsConfig, VaultAuthMethod, VaultConfig};
use crate::service_manager::KmsServiceStatus;
use crate::types::{KeyMetadata, KeyUsage};
use serde::{Deserialize, Serialize};
@@ -212,12 +212,12 @@ impl From<&KmsConfig> for KmsConfigSummary {
};
let backend_summary = match &config.backend_config {
crate::config::BackendConfig::Local(local_config) => BackendSummary::Local {
BackendConfig::Local(local_config) => BackendSummary::Local {
key_dir: local_config.key_dir.clone(),
has_master_key: local_config.master_key.is_some(),
file_permissions: local_config.file_permissions,
},
crate::config::BackendConfig::Vault(vault_config) => BackendSummary::Vault {
BackendConfig::Vault(vault_config) => BackendSummary::Vault {
address: vault_config.address.clone(),
auth_method_type: match &vault_config.auth_method {
VaultAuthMethod::Token { .. } => "token".to_string(),
@@ -248,7 +248,7 @@ impl ConfigureLocalKmsRequest {
KmsConfig {
backend: KmsBackend::Local,
default_key_id: self.default_key_id.clone(),
backend_config: crate::config::BackendConfig::Local(crate::config::LocalConfig {
backend_config: BackendConfig::Local(LocalConfig {
key_dir: self.key_dir.clone(),
master_key: self.master_key.clone(),
file_permissions: self.file_permissions,
@@ -256,7 +256,7 @@ impl ConfigureLocalKmsRequest {
timeout: Duration::from_secs(self.timeout_seconds.unwrap_or(30)),
retry_attempts: self.retry_attempts.unwrap_or(3),
enable_cache: self.enable_cache.unwrap_or(true),
cache_config: crate::config::CacheConfig {
cache_config: CacheConfig {
max_keys: self.max_cached_keys.unwrap_or(1000),
ttl: Duration::from_secs(self.cache_ttl_seconds.unwrap_or(3600)),
enable_metrics: true,
@@ -271,7 +271,7 @@ impl ConfigureVaultKmsRequest {
KmsConfig {
backend: KmsBackend::Vault,
default_key_id: self.default_key_id.clone(),
backend_config: crate::config::BackendConfig::Vault(crate::config::VaultConfig {
backend_config: BackendConfig::Vault(VaultConfig {
address: self.address.clone(),
auth_method: self.auth_method.clone(),
namespace: self.namespace.clone(),
@@ -279,7 +279,7 @@ impl ConfigureVaultKmsRequest {
kv_mount: self.kv_mount.clone().unwrap_or_else(|| "secret".to_string()),
key_path_prefix: self.key_path_prefix.clone().unwrap_or_else(|| "rustfs/kms/keys".to_string()),
tls: if self.skip_tls_verify.unwrap_or(false) {
Some(crate::config::TlsConfig {
Some(TlsConfig {
ca_cert_path: None,
client_cert_path: None,
client_key_path: None,
@@ -292,7 +292,7 @@ impl ConfigureVaultKmsRequest {
timeout: Duration::from_secs(self.timeout_seconds.unwrap_or(30)),
retry_attempts: self.retry_attempts.unwrap_or(3),
enable_cache: self.enable_cache.unwrap_or(true),
cache_config: crate::config::CacheConfig {
cache_config: CacheConfig {
max_keys: self.max_cached_keys.unwrap_or(1000),
ttl: Duration::from_secs(self.cache_ttl_seconds.unwrap_or(3600)),
enable_metrics: true,

View File

@@ -200,6 +200,16 @@ pub struct BackendInfo {
impl BackendInfo {
/// Create a new backend info
///
/// # Arguments
/// * `backend_type` - The type of the backend
/// * `version` - The version of the backend
/// * `endpoint` - The endpoint or location of the backend
/// * `healthy` - Whether the backend is healthy
///
/// # Returns
/// A new BackendInfo instance
///
pub fn new(backend_type: String, version: String, endpoint: String, healthy: bool) -> Self {
Self {
backend_type,
@@ -211,6 +221,14 @@ impl BackendInfo {
}
/// Add metadata to the backend info
///
/// # Arguments
/// * `key` - Metadata key
/// * `value` - Metadata value
///
/// # Returns
/// Updated BackendInfo instance
///
pub fn with_metadata(mut self, key: String, value: String) -> Self {
self.metadata.insert(key, value);
self

View File

@@ -34,6 +34,13 @@ pub struct KmsCache {
impl KmsCache {
/// Create a new KMS cache with the specified capacity
///
/// # Arguments
/// * `capacity` - Maximum number of entries in the cache
///
/// # Returns
/// A new instance of `KmsCache`
///
pub fn new(capacity: u64) -> Self {
Self {
key_metadata_cache: Cache::builder()
@@ -48,22 +55,47 @@ impl KmsCache {
}
/// Get key metadata from cache
///
/// # Arguments
/// * `key_id` - The ID of the key to retrieve metadata for
///
/// # Returns
/// An `Option` containing the `KeyMetadata` if found, or `None` if not found
///
pub async fn get_key_metadata(&self, key_id: &str) -> Option<KeyMetadata> {
self.key_metadata_cache.get(key_id).await
}
/// Put key metadata into cache
///
/// # Arguments
/// * `key_id` - The ID of the key to store metadata for
/// * `metadata` - The `KeyMetadata` to store in the cache
///
pub async fn put_key_metadata(&mut self, key_id: &str, metadata: &KeyMetadata) {
self.key_metadata_cache.insert(key_id.to_string(), metadata.clone()).await;
self.key_metadata_cache.run_pending_tasks().await;
}
/// Get data key from cache
///
/// # Arguments
/// * `key_id` - The ID of the key to retrieve the data key for
///
/// # Returns
/// An `Option` containing the `CachedDataKey` if found, or `None` if not found
///
pub async fn get_data_key(&self, key_id: &str) -> Option<CachedDataKey> {
self.data_key_cache.get(key_id).await
}
/// Put data key into cache
///
/// # Arguments
/// * `key_id` - The ID of the key to store the data key for
/// * `plaintext` - The plaintext data key bytes
/// * `ciphertext` - The ciphertext data key bytes
///
pub async fn put_data_key(&mut self, key_id: &str, plaintext: &[u8], ciphertext: &[u8]) {
let cached_key = CachedDataKey {
plaintext: plaintext.to_vec(),
@@ -75,11 +107,19 @@ impl KmsCache {
}
/// Remove key metadata from cache
///
/// # Arguments
/// * `key_id` - The ID of the key to remove metadata for
///
pub async fn remove_key_metadata(&mut self, key_id: &str) {
self.key_metadata_cache.remove(key_id).await;
}
/// Remove data key from cache
///
/// # Arguments
/// * `key_id` - The ID of the key to remove the data key for
///
pub async fn remove_data_key(&mut self, key_id: &str) {
self.data_key_cache.remove(key_id).await;
}
@@ -95,6 +135,10 @@ impl KmsCache {
}
/// Get cache statistics (hit count, miss count)
///
/// # Returns
/// A tuple containing total entries and total misses
///
pub fn stats(&self) -> (u64, u64) {
let metadata_stats = (
self.key_metadata_cache.entry_count(),

View File

@@ -52,6 +52,16 @@ pub struct AesCipher {
impl AesCipher {
/// Create a new AES cipher with the given key
///
/// #Arguments
/// * `key` - A byte slice representing the AES-256 key (32 bytes)
///
/// #Errors
/// Returns `KmsError` if the key size is invalid
///
/// #Returns
/// A Result containing the AesCipher instance
///
pub fn new(key: &[u8]) -> Result<Self> {
if key.len() != 32 {
return Err(KmsError::invalid_key_size(32, key.len()));
@@ -142,6 +152,16 @@ pub struct ChaCha20Cipher {
impl ChaCha20Cipher {
/// Create a new ChaCha20 cipher with the given key
///
/// #Arguments
/// * `key` - A byte slice representing the ChaCha20-Poly1305 key (32 bytes)
///
/// #Errors
/// Returns `KmsError` if the key size is invalid
///
/// #Returns
/// A Result containing the ChaCha20Cipher instance
///
pub fn new(key: &[u8]) -> Result<Self> {
if key.len() != 32 {
return Err(KmsError::invalid_key_size(32, key.len()));
@@ -228,6 +248,14 @@ impl ObjectCipher for ChaCha20Cipher {
}
/// Create a cipher instance for the given algorithm and key
///
/// #Arguments
/// * `algorithm` - The encryption algorithm to use
/// * `key` - A byte slice representing the encryption key
///
/// #Returns
/// A Result containing a boxed ObjectCipher instance
///
pub fn create_cipher(algorithm: &EncryptionAlgorithm, key: &[u8]) -> Result<Box<dyn ObjectCipher>> {
match algorithm {
EncryptionAlgorithm::Aes256 | EncryptionAlgorithm::AwsKms => Ok(Box::new(AesCipher::new(key)?)),
@@ -236,6 +264,13 @@ pub fn create_cipher(algorithm: &EncryptionAlgorithm, key: &[u8]) -> Result<Box<
}
/// Generate a random IV for the given algorithm
///
/// #Arguments
/// * `algorithm` - The encryption algorithm for which to generate the IV
///
/// #Returns
/// A vector containing the generated IV bytes
///
pub fn generate_iv(algorithm: &EncryptionAlgorithm) -> Vec<u8> {
let iv_size = match algorithm {
EncryptionAlgorithm::Aes256 | EncryptionAlgorithm::AwsKms => 12,

View File

@@ -18,6 +18,12 @@ use crate::encryption::ciphers::{create_cipher, generate_iv};
use crate::error::{KmsError, Result};
use crate::manager::KmsManager;
use crate::types::*;
use base64::Engine;
use rand::random;
use std::collections::HashMap;
use std::io::Cursor;
use tokio::io::{AsyncRead, AsyncReadExt};
use tracing::{debug, info};
use zeroize::Zeroize;
/// Data key for object encryption
@@ -36,12 +42,6 @@ impl Drop for DataKey {
self.plaintext_key.zeroize();
}
}
use base64::Engine;
use rand::random;
use std::collections::HashMap;
use std::io::Cursor;
use tokio::io::{AsyncRead, AsyncReadExt};
use tracing::{debug, info};
/// Service for encrypting and decrypting S3 objects with KMS integration
pub struct ObjectEncryptionService {
@@ -59,51 +59,110 @@ pub struct EncryptionResult {
impl ObjectEncryptionService {
/// Create a new object encryption service
///
/// # Arguments
/// * `kms_manager` - KMS manager to use for key operations
///
/// # Returns
/// New ObjectEncryptionService instance
///
pub fn new(kms_manager: KmsManager) -> Self {
Self { kms_manager }
}
/// Create a new master key (delegates to KMS manager)
///
/// # Arguments
/// * `request` - CreateKeyRequest with key parameters
///
/// # Returns
/// CreateKeyResponse with created key details
///
pub async fn create_key(&self, request: CreateKeyRequest) -> Result<CreateKeyResponse> {
self.kms_manager.create_key(request).await
}
/// Describe a master key (delegates to KMS manager)
///
/// # Arguments
/// * `request` - DescribeKeyRequest with key ID
///
/// # Returns
/// DescribeKeyResponse with key metadata
///
pub async fn describe_key(&self, request: DescribeKeyRequest) -> Result<DescribeKeyResponse> {
self.kms_manager.describe_key(request).await
}
/// List master keys (delegates to KMS manager)
///
/// # Arguments
/// * `request` - ListKeysRequest with listing parameters
///
/// # Returns
/// ListKeysResponse with list of keys
///
pub async fn list_keys(&self, request: ListKeysRequest) -> Result<ListKeysResponse> {
self.kms_manager.list_keys(request).await
}
/// Generate a data encryption key (delegates to KMS manager)
///
/// # Arguments
/// * `request` - GenerateDataKeyRequest with key parameters
///
/// # Returns
/// GenerateDataKeyResponse with generated key details
///
pub async fn generate_data_key(&self, request: GenerateDataKeyRequest) -> Result<GenerateDataKeyResponse> {
self.kms_manager.generate_data_key(request).await
}
/// Get the default key ID
///
/// # Returns
/// Option with default key ID if configured
///
pub fn get_default_key_id(&self) -> Option<&String> {
self.kms_manager.get_default_key_id()
}
/// Get cache statistics
///
/// # Returns
/// Option with (hits, misses) if caching is enabled
///
pub async fn cache_stats(&self) -> Option<(u64, u64)> {
self.kms_manager.cache_stats().await
}
/// Clear the cache
///
/// # Returns
/// Result indicating success or failure
///
pub async fn clear_cache(&self) -> Result<()> {
self.kms_manager.clear_cache().await
}
/// Get backend health status
///
/// # Returns
/// Result indicating if backend is healthy
///
pub async fn health_check(&self) -> Result<bool> {
self.kms_manager.health_check().await
}
/// Create a data encryption key for object encryption
///
/// # Arguments
/// * `kms_key_id` - Optional KMS key ID to use (uses default if None)
/// * `context` - ObjectEncryptionContext with bucket and object key
///
/// # Returns
/// Tuple with DataKey and encrypted key blob
///
pub async fn create_data_key(
&self,
kms_key_id: &Option<String>,
@@ -146,6 +205,14 @@ impl ObjectEncryptionService {
}
/// Decrypt a data encryption key
///
/// # Arguments
/// * `encrypted_key` - Encrypted data key blob
/// * `context` - ObjectEncryptionContext with bucket and object key
///
/// # Returns
/// DataKey with decrypted key
///
pub async fn decrypt_data_key(&self, encrypted_key: &[u8], _context: &ObjectEncryptionContext) -> Result<DataKey> {
let decrypt_request = DecryptRequest {
ciphertext: encrypted_key.to_vec(),
@@ -429,6 +496,17 @@ impl ObjectEncryptionService {
}
/// Decrypt object with customer-provided key (SSE-C)
///
/// # Arguments
/// * `bucket` - S3 bucket name
/// * `object_key` - S3 object key
/// * `ciphertext` - Encrypted data
/// * `metadata` - Encryption metadata
/// * `customer_key` - Customer-provided 256-bit key
///
/// # Returns
/// Decrypted data as a reader
///
pub async fn decrypt_object_with_customer_key(
&self,
bucket: &str,
@@ -481,6 +559,14 @@ impl ObjectEncryptionService {
}
/// Validate encryption context
///
/// # Arguments
/// * `actual` - Actual encryption context from metadata
/// * `expected` - Expected encryption context to validate against
///
/// # Returns
/// Result indicating success or context mismatch
///
fn validate_encryption_context(&self, actual: &HashMap<String, String>, expected: &HashMap<String, String>) -> Result<()> {
for (key, expected_value) in expected {
match actual.get(key) {
@@ -499,6 +585,13 @@ impl ObjectEncryptionService {
}
/// Convert encryption metadata to HTTP headers for S3 compatibility
///
/// # Arguments
/// * `metadata` - EncryptionMetadata to convert
///
/// # Returns
/// HashMap of HTTP headers
///
pub fn metadata_to_headers(&self, metadata: &EncryptionMetadata) -> HashMap<String, String> {
let mut headers = HashMap::new();
@@ -542,6 +635,13 @@ impl ObjectEncryptionService {
}
/// Parse encryption metadata from HTTP headers
///
/// # Arguments
/// * `headers` - HashMap of HTTP headers
///
/// # Returns
/// EncryptionMetadata parsed from headers
///
pub fn headers_to_metadata(&self, headers: &HashMap<String, String>) -> Result<EncryptionMetadata> {
let algorithm = headers
.get("x-amz-server-side-encryption")

View File

@@ -116,7 +116,7 @@ impl KmsError {
Self::BackendError { message: message.into() }
}
/// Create an access denied error
/// Create access denied error
pub fn access_denied<S: Into<String>>(message: S) -> Self {
Self::AccessDenied { message: message.into() }
}
@@ -184,7 +184,7 @@ impl KmsError {
}
}
// Convert from standard library errors
/// Convert from standard library errors
impl From<std::io::Error> for KmsError {
fn from(error: std::io::Error) -> Self {
Self::IoError {
@@ -206,6 +206,13 @@ impl From<serde_json::Error> for KmsError {
impl KmsError {
/// Create a KMS error from AES-GCM error
///
/// #Arguments
/// * `error` - The AES-GCM error to convert
///
/// #Returns
/// * `KmsError` - The corresponding KMS error
///
pub fn from_aes_gcm_error(error: aes_gcm::Error) -> Self {
Self::CryptographicError {
operation: "AES-GCM".to_string(),
@@ -214,6 +221,13 @@ impl KmsError {
}
/// Create a KMS error from ChaCha20-Poly1305 error
///
/// #Arguments
/// * `error` - The ChaCha20-Poly1305 error to convert
///
/// #Returns
/// * `KmsError` - The corresponding KMS error
///
pub fn from_chacha20_error(error: chacha20poly1305::Error) -> Self {
Self::CryptographicError {
operation: "ChaCha20-Poly1305".to_string(),

View File

@@ -19,7 +19,7 @@ use crate::config::{BackendConfig, KmsConfig};
use crate::encryption::service::ObjectEncryptionService;
use crate::error::{KmsError, Result};
use crate::manager::KmsManager;
use std::sync::Arc;
use std::sync::{Arc, OnceLock};
use tokio::sync::RwLock;
use tracing::{error, info, warn};
@@ -71,9 +71,6 @@ impl KmsServiceManager {
/// Configure KMS with new configuration
pub async fn configure(&self, new_config: KmsConfig) -> Result<()> {
tracing::info!("CLAUDE DEBUG: configure() called with backend: {:?}", new_config.backend);
info!("Configuring KMS with backend: {:?}", new_config.backend);
// Update configuration
{
let mut config = self.config.write().await;
@@ -92,7 +89,6 @@ impl KmsServiceManager {
/// Start KMS service with current configuration
pub async fn start(&self) -> Result<()> {
tracing::info!("CLAUDE DEBUG: start() called");
let config = {
let config_guard = self.config.read().await;
match config_guard.as_ref() {
@@ -254,7 +250,7 @@ impl Default for KmsServiceManager {
}
/// Global KMS service manager instance
static GLOBAL_KMS_SERVICE_MANAGER: once_cell::sync::OnceCell<Arc<KmsServiceManager>> = once_cell::sync::OnceCell::new();
static GLOBAL_KMS_SERVICE_MANAGER: OnceLock<Arc<KmsServiceManager>> = OnceLock::new();
/// Initialize global KMS service manager
pub fn init_global_kms_service_manager() -> Arc<KmsServiceManager> {
@@ -270,12 +266,6 @@ pub fn get_global_kms_service_manager() -> Option<Arc<KmsServiceManager>> {
/// Get global encryption service (if KMS is running)
pub async fn get_global_encryption_service() -> Option<Arc<ObjectEncryptionService>> {
tracing::info!("CLAUDE DEBUG: get_global_encryption_service called");
let manager = get_global_kms_service_manager().unwrap_or_else(|| {
tracing::warn!("CLAUDE DEBUG: KMS service manager not initialized, initializing now as fallback");
init_global_kms_service_manager()
});
let service = manager.get_encryption_service().await;
tracing::info!("CLAUDE DEBUG: get_encryption_service returned: {}", service.is_some());
service
let manager = get_global_kms_service_manager().unwrap_or_else(init_global_kms_service_manager);
manager.get_encryption_service().await
}

View File

@@ -42,6 +42,17 @@ pub struct DataKey {
impl DataKey {
/// Create a new data key
///
/// # Arguments
/// * `key_id` - Unique identifier for the key
/// * `version` - Key version number
/// * `plaintext` - Optional plaintext key material
/// * `ciphertext` - Encrypted key material
/// * `key_spec` - Key specification (e.g., "AES_256")
///
/// # Returns
/// A new `DataKey` instance
///
pub fn new(key_id: String, version: u32, plaintext: Option<Vec<u8>>, ciphertext: Vec<u8>, key_spec: String) -> Self {
Self {
key_id,
@@ -55,6 +66,11 @@ impl DataKey {
}
/// Clear the plaintext key material from memory for security
///
/// # Security
/// This method zeroes out the plaintext key material before dropping it
/// to prevent sensitive data from lingering in memory.
///
pub fn clear_plaintext(&mut self) {
if let Some(ref mut plaintext) = self.plaintext {
// Zero out the memory before dropping
@@ -64,6 +80,14 @@ impl DataKey {
}
/// Add metadata to the data key
///
/// # Arguments
/// * `key` - Metadata key
/// * `value` - Metadata value
///
/// # Returns
/// Updated `DataKey` instance with added metadata
///
pub fn with_metadata(mut self, key: String, value: String) -> Self {
self.metadata.insert(key, value);
self
@@ -97,6 +121,15 @@ pub struct MasterKey {
impl MasterKey {
/// Create a new master key
///
/// # Arguments
/// * `key_id` - Unique identifier for the key
/// * `algorithm` - Key algorithm (e.g., "AES-256")
/// * `created_by` - Optional creator/owner of the key
///
/// # Returns
/// A new `MasterKey` instance
///
pub fn new(key_id: String, algorithm: String, created_by: Option<String>) -> Self {
Self {
key_id,
@@ -113,6 +146,16 @@ impl MasterKey {
}
/// Create a new master key with description
///
/// # Arguments
/// * `key_id` - Unique identifier for the key
/// * `algorithm` - Key algorithm (e.g., "AES-256")
/// * `created_by` - Optional creator/owner of the key
/// * `description` - Optional key description
///
/// # Returns
/// A new `MasterKey` instance with description
///
pub fn new_with_description(
key_id: String,
algorithm: String,
@@ -218,6 +261,14 @@ pub struct GenerateKeyRequest {
impl GenerateKeyRequest {
/// Create a new generate key request
///
/// # Arguments
/// * `master_key_id` - Master key ID to use for encryption
/// * `key_spec` - Key specification (e.g., "AES_256")
///
/// # Returns
/// A new `GenerateKeyRequest` instance
///
pub fn new(master_key_id: String, key_spec: String) -> Self {
Self {
master_key_id,
@@ -229,12 +280,27 @@ impl GenerateKeyRequest {
}
/// Add encryption context
///
/// # Arguments
/// * `key` - Context key
/// * `value` - Context value
///
/// # Returns
/// Updated `GenerateKeyRequest` instance with added context
///
pub fn with_context(mut self, key: String, value: String) -> Self {
self.encryption_context.insert(key, value);
self
}
/// Set key length explicitly
///
/// # Arguments
/// * `length` - Key length in bytes
///
/// # Returns
/// Updated `GenerateKeyRequest` instance with specified key length
///
pub fn with_length(mut self, length: u32) -> Self {
self.key_length = Some(length);
self
@@ -256,6 +322,14 @@ pub struct EncryptRequest {
impl EncryptRequest {
/// Create a new encrypt request
///
/// # Arguments
/// * `key_id` - Key ID to use for encryption
/// * `plaintext` - Plaintext data to encrypt
///
/// # Returns
/// A new `EncryptRequest` instance
///
pub fn new(key_id: String, plaintext: Vec<u8>) -> Self {
Self {
key_id,
@@ -266,6 +340,14 @@ impl EncryptRequest {
}
/// Add encryption context
///
/// # Arguments
/// * `key` - Context key
/// * `value` - Context value
///
/// # Returns
/// Updated `EncryptRequest` instance with added context
///
pub fn with_context(mut self, key: String, value: String) -> Self {
self.encryption_context.insert(key, value);
self
@@ -298,6 +380,13 @@ pub struct DecryptRequest {
impl DecryptRequest {
/// Create a new decrypt request
///
/// # Arguments
/// * `ciphertext` - Ciphertext to decrypt
///
/// # Returns
/// A new `DecryptRequest` instance
///
pub fn new(ciphertext: Vec<u8>) -> Self {
Self {
ciphertext,
@@ -307,6 +396,14 @@ impl DecryptRequest {
}
/// Add encryption context
///
/// # Arguments
/// * `key` - Context key
/// * `value` - Context value
///
/// # Returns
/// Updated `DecryptRequest` instance with added context
///
pub fn with_context(mut self, key: String, value: String) -> Self {
self.encryption_context.insert(key, value);
self
@@ -365,6 +462,13 @@ pub struct OperationContext {
impl OperationContext {
/// Create a new operation context
///
/// # Arguments
/// * `principal` - User or service performing the operation
///
/// # Returns
/// A new `OperationContext` instance
///
pub fn new(principal: String) -> Self {
Self {
operation_id: Uuid::new_v4(),
@@ -376,18 +480,40 @@ impl OperationContext {
}
/// Add additional context
///
/// # Arguments
/// * `key` - Context key
/// * `value` - Context value
///
/// # Returns
/// Updated `OperationContext` instance with added context
///
pub fn with_context(mut self, key: String, value: String) -> Self {
self.additional_context.insert(key, value);
self
}
/// Set source IP
///
/// # Arguments
/// * `ip` - Source IP address
///
/// # Returns
/// Updated `OperationContext` instance with source IP
///
pub fn with_source_ip(mut self, ip: String) -> Self {
self.source_ip = Some(ip);
self
}
/// Set user agent
///
/// # Arguments
/// * `agent` - User agent string
///
/// # Returns
/// Updated `OperationContext` instance with user agent
///
pub fn with_user_agent(mut self, agent: String) -> Self {
self.user_agent = Some(agent);
self
@@ -411,6 +537,14 @@ pub struct ObjectEncryptionContext {
impl ObjectEncryptionContext {
/// Create a new object encryption context
///
/// # Arguments
/// * `bucket` - Bucket name
/// * `object_key` - Object key
///
/// # Returns
/// A new `ObjectEncryptionContext` instance
///
pub fn new(bucket: String, object_key: String) -> Self {
Self {
bucket,
@@ -422,18 +556,40 @@ impl ObjectEncryptionContext {
}
/// Set content type
///
/// # Arguments
/// * `content_type` - Content type string
///
/// # Returns
/// Updated `ObjectEncryptionContext` instance with content type
///
pub fn with_content_type(mut self, content_type: String) -> Self {
self.content_type = Some(content_type);
self
}
/// Set object size
///
/// # Arguments
/// * `size` - Object size in bytes
///
/// # Returns
/// Updated `ObjectEncryptionContext` instance with size
///
pub fn with_size(mut self, size: u64) -> Self {
self.size = Some(size);
self
}
/// Add encryption context
///
/// # Arguments
/// * `key` - Context key
/// * `value` - Context value
///
/// # Returns
/// Updated `ObjectEncryptionContext` instance with added context
///
pub fn with_encryption_context(mut self, key: String, value: String) -> Self {
self.encryption_context.insert(key, value);
self
@@ -503,6 +659,10 @@ pub enum KeySpec {
impl KeySpec {
/// Get the key size in bytes
///
/// # Returns
/// Key size in bytes
///
pub fn key_size(&self) -> usize {
match self {
Self::Aes256 => 32,
@@ -512,6 +672,10 @@ impl KeySpec {
}
/// Get the string representation for backends
///
/// # Returns
/// Key specification as a string
///
pub fn as_str(&self) -> &'static str {
match self {
Self::Aes256 => "AES_256",
@@ -636,6 +800,14 @@ pub struct GenerateDataKeyRequest {
impl GenerateDataKeyRequest {
/// Create a new generate data key request
///
/// # Arguments
/// * `key_id` - Key ID to use for encryption
/// * `key_spec` - Key specification
///
/// # Returns
/// A new `GenerateDataKeyRequest` instance
///
pub fn new(key_id: String, key_spec: KeySpec) -> Self {
Self {
key_id,
@@ -658,6 +830,10 @@ pub struct GenerateDataKeyResponse {
impl EncryptionAlgorithm {
/// Get the algorithm name as a string
///
/// # Returns
/// Algorithm name as a string
///
pub fn as_str(&self) -> &'static str {
match self {
Self::Aes256 => "AES256",

View File

@@ -41,7 +41,6 @@ tracing.workspace = true
url.workspace = true
uuid.workspace = true
thiserror.workspace = true
once_cell.workspace = true
parking_lot.workspace = true
smallvec.workspace = true
smartstring.workspace = true

View File

@@ -8,7 +8,7 @@
<p align="center">
<a href="https://github.com/rustfs/rustfs/actions/workflows/ci.yml"><img alt="CI" src="https://github.com/rustfs/rustfs/actions/workflows/ci.yml/badge.svg" /></a>
<a href="https://docs.rustfs.com/en/">📖 Documentation</a>
<a href="https://docs.rustfs.com/">📖 Documentation</a>
· <a href="https://github.com/rustfs/rustfs/issues">🐛 Bug Reports</a>
· <a href="https://github.com/rustfs/rustfs/discussions">💬 Discussions</a>
</p>

View File

@@ -12,14 +12,14 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use once_cell::sync::Lazy;
use std::sync::Arc;
use std::sync::LazyLock;
use std::sync::atomic::{AtomicU32, AtomicUsize, Ordering};
use tokio::sync::Notify;
/// Optimized notification pool to reduce memory overhead and thundering herd effects
/// Increased pool size for better performance under high concurrency
static NOTIFY_POOL: Lazy<Vec<Arc<Notify>>> = Lazy::new(|| (0..128).map(|_| Arc::new(Notify::new())).collect());
static NOTIFY_POOL: LazyLock<Vec<Arc<Notify>>> = LazyLock::new(|| (0..128).map(|_| Arc::new(Notify::new())).collect());
/// Optimized notification system for object locks
#[derive(Debug)]

View File

@@ -12,11 +12,11 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use once_cell::unsync::OnceCell;
use serde::{Deserialize, Serialize};
use smartstring::SmartString;
use std::hash::{Hash, Hasher};
use std::sync::Arc;
use std::sync::OnceLock;
use std::time::{Duration, SystemTime};
use crate::fast_lock::guard::FastLockGuard;
@@ -72,10 +72,10 @@ pub struct OptimizedObjectKey {
/// Version - optional for latest version semantics
pub version: Option<SmartString<smartstring::LazyCompact>>,
/// Cached hash to avoid recomputation
hash_cache: OnceCell<u64>,
hash_cache: OnceLock<u64>,
}
// Manual implementations to handle OnceCell properly
// Manual implementations to handle OnceLock properly
impl PartialEq for OptimizedObjectKey {
fn eq(&self, other: &Self) -> bool {
self.bucket == other.bucket && self.object == other.object && self.version == other.version
@@ -116,7 +116,7 @@ impl OptimizedObjectKey {
bucket: bucket.into(),
object: object.into(),
version: None,
hash_cache: OnceCell::new(),
hash_cache: OnceLock::new(),
}
}
@@ -129,7 +129,7 @@ impl OptimizedObjectKey {
bucket: bucket.into(),
object: object.into(),
version: Some(version.into()),
hash_cache: OnceCell::new(),
hash_cache: OnceLock::new(),
}
}
@@ -145,7 +145,7 @@ impl OptimizedObjectKey {
/// Reset hash cache if key is modified
pub fn invalidate_cache(&mut self) {
self.hash_cache = OnceCell::new();
self.hash_cache = OnceLock::new();
}
/// Convert from regular ObjectKey
@@ -154,7 +154,7 @@ impl OptimizedObjectKey {
bucket: SmartString::from(key.bucket.as_ref()),
object: SmartString::from(key.object.as_ref()),
version: key.version.as_ref().map(|v| SmartString::from(v.as_ref())),
hash_cache: OnceCell::new(),
hash_cache: OnceLock::new(),
}
}

View File

@@ -12,12 +12,9 @@
// See the License for the specific language governing permissions and
// limitations under the License.
use std::sync::Arc;
use once_cell::sync::Lazy;
use tokio::sync::mpsc;
use crate::{client::LockClient, types::LockId};
use std::sync::{Arc, LazyLock};
use tokio::sync::mpsc;
#[derive(Debug, Clone)]
struct UnlockJob {
@@ -31,7 +28,7 @@ struct UnlockRuntime {
}
// Global unlock runtime with background worker
static UNLOCK_RUNTIME: Lazy<UnlockRuntime> = Lazy::new(|| {
static UNLOCK_RUNTIME: LazyLock<UnlockRuntime> = LazyLock::new(|| {
// Larger buffer to reduce contention during bursts
let (tx, mut rx) = mpsc::channel::<UnlockJob>(8192);

View File

@@ -73,13 +73,13 @@ pub const MAX_DELETE_LIST: usize = 1000;
// ============================================================================
// Global singleton FastLock manager shared across all lock implementations
use once_cell::sync::OnceCell;
use std::sync::Arc;
use std::sync::OnceLock;
/// Enum wrapper for different lock manager implementations
pub enum GlobalLockManager {
Enabled(Arc<fast_lock::FastObjectLockManager>),
Disabled(fast_lock::DisabledLockManager),
Enabled(Arc<FastObjectLockManager>),
Disabled(DisabledLockManager),
}
impl Default for GlobalLockManager {
@@ -99,11 +99,11 @@ impl GlobalLockManager {
match locks_enabled.as_str() {
"false" | "0" | "no" | "off" | "disabled" => {
tracing::info!("Lock system disabled via RUSTFS_ENABLE_LOCKS environment variable");
Self::Disabled(fast_lock::DisabledLockManager::new())
Self::Disabled(DisabledLockManager::new())
}
_ => {
tracing::info!("Lock system enabled");
Self::Enabled(Arc::new(fast_lock::FastObjectLockManager::new()))
Self::Enabled(Arc::new(FastObjectLockManager::new()))
}
}
}
@@ -114,7 +114,7 @@ impl GlobalLockManager {
}
/// Get the FastObjectLockManager if enabled, otherwise returns None
pub fn as_fast_lock_manager(&self) -> Option<Arc<fast_lock::FastObjectLockManager>> {
pub fn as_fast_lock_manager(&self) -> Option<Arc<FastObjectLockManager>> {
match self {
Self::Enabled(manager) => Some(manager.clone()),
Self::Disabled(_) => None,
@@ -123,11 +123,8 @@ impl GlobalLockManager {
}
#[async_trait::async_trait]
impl fast_lock::LockManager for GlobalLockManager {
async fn acquire_lock(
&self,
request: fast_lock::ObjectLockRequest,
) -> std::result::Result<fast_lock::FastLockGuard, fast_lock::LockResult> {
impl LockManager for GlobalLockManager {
async fn acquire_lock(&self, request: ObjectLockRequest) -> std::result::Result<FastLockGuard, LockResult> {
match self {
Self::Enabled(manager) => manager.acquire_lock(request).await,
Self::Disabled(manager) => manager.acquire_lock(request).await,
@@ -139,7 +136,7 @@ impl fast_lock::LockManager for GlobalLockManager {
bucket: impl Into<Arc<str>> + Send,
object: impl Into<Arc<str>> + Send,
owner: impl Into<Arc<str>> + Send,
) -> std::result::Result<fast_lock::FastLockGuard, fast_lock::LockResult> {
) -> std::result::Result<FastLockGuard, LockResult> {
match self {
Self::Enabled(manager) => manager.acquire_read_lock(bucket, object, owner).await,
Self::Disabled(manager) => manager.acquire_read_lock(bucket, object, owner).await,
@@ -152,7 +149,7 @@ impl fast_lock::LockManager for GlobalLockManager {
object: impl Into<Arc<str>> + Send,
version: impl Into<Arc<str>> + Send,
owner: impl Into<Arc<str>> + Send,
) -> std::result::Result<fast_lock::FastLockGuard, fast_lock::LockResult> {
) -> std::result::Result<FastLockGuard, LockResult> {
match self {
Self::Enabled(manager) => manager.acquire_read_lock_versioned(bucket, object, version, owner).await,
Self::Disabled(manager) => manager.acquire_read_lock_versioned(bucket, object, version, owner).await,
@@ -164,7 +161,7 @@ impl fast_lock::LockManager for GlobalLockManager {
bucket: impl Into<Arc<str>> + Send,
object: impl Into<Arc<str>> + Send,
owner: impl Into<Arc<str>> + Send,
) -> std::result::Result<fast_lock::FastLockGuard, fast_lock::LockResult> {
) -> std::result::Result<FastLockGuard, LockResult> {
match self {
Self::Enabled(manager) => manager.acquire_write_lock(bucket, object, owner).await,
Self::Disabled(manager) => manager.acquire_write_lock(bucket, object, owner).await,
@@ -177,21 +174,21 @@ impl fast_lock::LockManager for GlobalLockManager {
object: impl Into<Arc<str>> + Send,
version: impl Into<Arc<str>> + Send,
owner: impl Into<Arc<str>> + Send,
) -> std::result::Result<fast_lock::FastLockGuard, fast_lock::LockResult> {
) -> std::result::Result<FastLockGuard, LockResult> {
match self {
Self::Enabled(manager) => manager.acquire_write_lock_versioned(bucket, object, version, owner).await,
Self::Disabled(manager) => manager.acquire_write_lock_versioned(bucket, object, version, owner).await,
}
}
async fn acquire_locks_batch(&self, batch_request: fast_lock::BatchLockRequest) -> fast_lock::BatchLockResult {
async fn acquire_locks_batch(&self, batch_request: BatchLockRequest) -> BatchLockResult {
match self {
Self::Enabled(manager) => manager.acquire_locks_batch(batch_request).await,
Self::Disabled(manager) => manager.acquire_locks_batch(batch_request).await,
}
}
fn get_lock_info(&self, key: &fast_lock::ObjectKey) -> Option<fast_lock::ObjectLockInfo> {
fn get_lock_info(&self, key: &ObjectKey) -> Option<ObjectLockInfo> {
match self {
Self::Enabled(manager) => manager.get_lock_info(key),
Self::Disabled(manager) => manager.get_lock_info(key),
@@ -248,7 +245,7 @@ impl fast_lock::LockManager for GlobalLockManager {
}
}
static GLOBAL_LOCK_MANAGER: OnceCell<Arc<GlobalLockManager>> = OnceCell::new();
static GLOBAL_LOCK_MANAGER: OnceLock<Arc<GlobalLockManager>> = OnceLock::new();
/// Get the global shared lock manager instance
///
@@ -263,7 +260,7 @@ pub fn get_global_lock_manager() -> Arc<GlobalLockManager> {
/// This function is deprecated. Use get_global_lock_manager() instead.
/// Returns FastObjectLockManager when locks are enabled, or panics when disabled.
#[deprecated(note = "Use get_global_lock_manager() instead")]
pub fn get_global_fast_lock_manager() -> Arc<fast_lock::FastObjectLockManager> {
pub fn get_global_fast_lock_manager() -> Arc<FastObjectLockManager> {
let manager = get_global_lock_manager();
manager.as_fast_lock_manager().unwrap_or_else(|| {
panic!("Cannot get FastObjectLockManager when locks are disabled. Use get_global_lock_manager() instead.");
@@ -301,7 +298,7 @@ mod tests {
#[tokio::test]
async fn test_disabled_manager_direct() {
let manager = fast_lock::DisabledLockManager::new();
let manager = DisabledLockManager::new();
// All operations should succeed immediately
let guard = manager.acquire_read_lock("bucket", "object", "owner").await;
@@ -316,7 +313,7 @@ mod tests {
#[tokio::test]
async fn test_enabled_manager_direct() {
let manager = fast_lock::FastObjectLockManager::new();
let manager = FastObjectLockManager::new();
// Operations should work normally
let guard = manager.acquire_read_lock("bucket", "object", "owner").await;
@@ -331,8 +328,8 @@ mod tests {
#[tokio::test]
async fn test_global_manager_enum_wrapper() {
// Test the GlobalLockManager enum directly
let enabled_manager = GlobalLockManager::Enabled(Arc::new(fast_lock::FastObjectLockManager::new()));
let disabled_manager = GlobalLockManager::Disabled(fast_lock::DisabledLockManager::new());
let enabled_manager = GlobalLockManager::Enabled(Arc::new(FastObjectLockManager::new()));
let disabled_manager = GlobalLockManager::Disabled(DisabledLockManager::new());
assert!(!enabled_manager.is_disabled());
assert!(disabled_manager.is_disabled());
@@ -352,7 +349,7 @@ mod tests {
async fn test_batch_operations_work() {
let manager = get_global_lock_manager();
let batch = fast_lock::BatchLockRequest::new("owner")
let batch = BatchLockRequest::new("owner")
.add_read_lock("bucket", "obj1")
.add_write_lock("bucket", "obj2");

View File

@@ -8,7 +8,7 @@
<p align="center">
<a href="https://github.com/rustfs/rustfs/actions/workflows/ci.yml"><img alt="CI" src="https://github.com/rustfs/rustfs/actions/workflows/ci.yml/badge.svg" /></a>
<a href="https://docs.rustfs.com/en/">📖 Documentation</a>
<a href="https://docs.rustfs.com/">📖 Documentation</a>
· <a href="https://github.com/rustfs/rustfs/issues">🐛 Bug Reports</a>
· <a href="https://github.com/rustfs/rustfs/discussions">💬 Discussions</a>
</p>

Some files were not shown because too many files have changed in this diff Show More