mirror of
https://github.com/rustfs/rustfs.git
synced 2026-01-17 01:30:33 +00:00
Compare commits
127 Commits
1.0.0-alph
...
1.0.0-alph
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
89c3ae77a4 | ||
|
|
82a6e78845 | ||
|
|
7e75c9b1f5 | ||
|
|
8bdff3fbcb | ||
|
|
65d32e693f | ||
|
|
1ff28b3157 | ||
|
|
2186f46ea3 | ||
|
|
add6453aea | ||
|
|
4418c882ad | ||
|
|
00c607b5ce | ||
|
|
79585f98e0 | ||
|
|
2a3517f1d5 | ||
|
|
3942e07487 | ||
|
|
04811c0006 | ||
|
|
73c15d6be1 | ||
|
|
af5c0b13ef | ||
|
|
f17990f746 | ||
|
|
80cfb4feab | ||
|
|
08f1a31f3f | ||
|
|
1c51e204ab | ||
|
|
958f054123 | ||
|
|
3e2252e4bb | ||
|
|
f3a1431fa5 | ||
|
|
3bd96bcf10 | ||
|
|
20ea591049 | ||
|
|
cc31e88c91 | ||
|
|
b5535083de | ||
|
|
1e35edf079 | ||
|
|
8dd3e8b534 | ||
|
|
8e0aeb4fdc | ||
|
|
abe8a50b5a | ||
|
|
61f4d307b5 | ||
|
|
3eafeb0ff0 | ||
|
|
4abfc9f554 | ||
|
|
1057953052 | ||
|
|
889c67f359 | ||
|
|
1d111464f9 | ||
|
|
a0b2f5a232 | ||
|
|
46557cddd1 | ||
|
|
443947e1ac | ||
|
|
8821fcc1e7 | ||
|
|
17828ec2a8 | ||
|
|
94d5b1c1e4 | ||
|
|
0bca1fbd56 | ||
|
|
52c2d15a4b | ||
|
|
352035a06f | ||
|
|
fe4fabb195 | ||
|
|
07c5e7997a | ||
|
|
0007b541cd | ||
|
|
0f2e4d124c | ||
|
|
2e4ce6921b | ||
|
|
7178a94792 | ||
|
|
e8fe9731fd | ||
|
|
3ba415740e | ||
|
|
aeccd14d99 | ||
|
|
89a155a35d | ||
|
|
67095c05f9 | ||
|
|
1229fddb5d | ||
|
|
08be8f5472 | ||
|
|
0bf25fdefa | ||
|
|
9e2fa148ee | ||
|
|
cb3e496b17 | ||
|
|
997f54e700 | ||
|
|
1a4e95e940 | ||
|
|
a3006ab407 | ||
|
|
e197486c8c | ||
|
|
0da943a6a4 | ||
|
|
fba201df3d | ||
|
|
ccbab3232b | ||
|
|
421f66ea18 | ||
|
|
ede2fa9d0b | ||
|
|
978845b555 | ||
|
|
53c126d678 | ||
|
|
9f12a7678c | ||
|
|
2c86fe30ec | ||
|
|
ac0c34e734 | ||
|
|
ae46ea4bd3 | ||
|
|
8b3d4ea59b | ||
|
|
ef261deef6 | ||
|
|
20961d7c91 | ||
|
|
8de8172833 | ||
|
|
7c98c62d60 | ||
|
|
15c75b9d36 | ||
|
|
af650716da | ||
|
|
552e95e368 | ||
|
|
619cc69512 | ||
|
|
76d25d9a20 | ||
|
|
834025d9e3 | ||
|
|
e2d8e9e3d3 | ||
|
|
cd6a26bc3a | ||
|
|
5f256249f4 | ||
|
|
b10d80cbb6 | ||
|
|
7c6cbaf837 | ||
|
|
72930b1e30 | ||
|
|
6ca8945ca7 | ||
|
|
0d0edc22be | ||
|
|
030d3c9426 | ||
|
|
b8b905be86 | ||
|
|
ace58fea0d | ||
|
|
3a79242133 | ||
|
|
63d846ed14 | ||
|
|
3a79fcfe73 | ||
|
|
b3c80ae362 | ||
|
|
3fd003b21d | ||
|
|
1d3f622922 | ||
|
|
e31b4303ed | ||
|
|
5b0a3a0764 | ||
|
|
a8b7b28fd0 | ||
|
|
e355d3db80 | ||
|
|
4d7bf98c82 | ||
|
|
699164e05e | ||
|
|
d35ceac441 | ||
|
|
93982227ac | ||
|
|
fdcdb30d28 | ||
|
|
a6cf0740cb | ||
|
|
a2e3a719d3 | ||
|
|
76efee37fa | ||
|
|
fd7c0964a0 | ||
|
|
701960dd81 | ||
|
|
ee04cc77a0 | ||
|
|
069194f553 | ||
|
|
fce4e64da4 | ||
|
|
44bdebe6e9 | ||
|
|
2b268fdd7f | ||
|
|
18cd9a8b46 | ||
|
|
e14809ee04 | ||
|
|
390d051ddd |
@@ -34,61 +34,111 @@ services:
|
||||
ports:
|
||||
- "3200:3200" # tempo
|
||||
- "24317:4317" # otlp grpc
|
||||
- "24318:4318" # otlp http
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- otel-network
|
||||
healthcheck:
|
||||
test: [ "CMD", "wget", "--spider", "-q", "http://localhost:3200/metrics" ]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 15s
|
||||
|
||||
otel-collector:
|
||||
image: otel/opentelemetry-collector-contrib:latest
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otelcol-contrib/config.yaml
|
||||
- ./otel-collector-config.yaml:/etc/otelcol-contrib/config.yaml:ro
|
||||
ports:
|
||||
- "1888:1888"
|
||||
- "8888:8888"
|
||||
- "8889:8889"
|
||||
- "13133:13133"
|
||||
- "4317:4317"
|
||||
- "4318:4318"
|
||||
- "55679:55679"
|
||||
- "1888:1888" # pprof
|
||||
- "8888:8888" # Prometheus metrics for Collector
|
||||
- "8889:8889" # Prometheus metrics for application indicators
|
||||
- "13133:13133" # health check
|
||||
- "4317:4317" # OTLP gRPC
|
||||
- "4318:4318" # OTLP HTTP
|
||||
- "55679:55679" # zpages
|
||||
networks:
|
||||
- otel-network
|
||||
depends_on:
|
||||
jaeger:
|
||||
condition: service_started
|
||||
tempo:
|
||||
condition: service_started
|
||||
prometheus:
|
||||
condition: service_started
|
||||
loki:
|
||||
condition: service_started
|
||||
healthcheck:
|
||||
test: [ "CMD", "wget", "--spider", "-q", "http://localhost:13133" ]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
|
||||
jaeger:
|
||||
image: jaegertracing/jaeger:latest
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
- SPAN_STORAGE_TYPE=memory
|
||||
- COLLECTOR_OTLP_ENABLED=true
|
||||
ports:
|
||||
- "16686:16686"
|
||||
- "14317:4317"
|
||||
- "14318:4318"
|
||||
- "16686:16686" # Web UI
|
||||
- "14317:4317" # OTLP gRPC
|
||||
- "14318:4318" # OTLP HTTP
|
||||
- "18888:8888" # collector
|
||||
networks:
|
||||
- otel-network
|
||||
healthcheck:
|
||||
test: [ "CMD", "wget", "--spider", "-q", "http://localhost:16686" ]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
prometheus:
|
||||
image: prom/prometheus:latest
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
volumes:
|
||||
- ./prometheus.yml:/etc/prometheus/prometheus.yml
|
||||
- ./prometheus.yml:/etc/prometheus/prometheus.yml:ro
|
||||
- ./prometheus-data:/prometheus
|
||||
ports:
|
||||
- "9090:9090"
|
||||
command:
|
||||
- '--config.file=/etc/prometheus/prometheus.yml'
|
||||
- '--web.enable-otlp-receiver' # Enable OTLP
|
||||
- '--web.enable-remote-write-receiver' # Enable remote write
|
||||
- '--enable-feature=promql-experimental-functions' # Enable info()
|
||||
- '--storage.tsdb.min-block-duration=15m' # Minimum block duration
|
||||
- '--storage.tsdb.max-block-duration=1h' # Maximum block duration
|
||||
- '--log.level=info'
|
||||
- '--storage.tsdb.retention.time=30d'
|
||||
- '--storage.tsdb.path=/prometheus'
|
||||
- '--web.console.libraries=/usr/share/prometheus/console_libraries'
|
||||
- '--web.console.templates=/usr/share/prometheus/consoles'
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- otel-network
|
||||
healthcheck:
|
||||
test: [ "CMD", "wget", "--spider", "-q", "http://localhost:9090/-/healthy" ]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
loki:
|
||||
image: grafana/loki:latest
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
volumes:
|
||||
- ./loki-config.yaml:/etc/loki/local-config.yaml
|
||||
- ./loki-config.yaml:/etc/loki/local-config.yaml:ro
|
||||
ports:
|
||||
- "3100:3100"
|
||||
command: -config.file=/etc/loki/local-config.yaml
|
||||
networks:
|
||||
- otel-network
|
||||
healthcheck:
|
||||
test: [ "CMD", "wget", "--spider", "-q", "http://localhost:3100/ready" ]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
grafana:
|
||||
image: grafana/grafana:latest
|
||||
ports:
|
||||
@@ -97,14 +147,32 @@ services:
|
||||
- ./grafana-datasources.yaml:/etc/grafana/provisioning/datasources/datasources.yaml
|
||||
environment:
|
||||
- GF_SECURITY_ADMIN_PASSWORD=admin
|
||||
- GF_SECURITY_ADMIN_USER=admin
|
||||
- TZ=Asia/Shanghai
|
||||
- GF_INSTALL_PLUGINS=grafana-pyroscope-datasource
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- otel-network
|
||||
depends_on:
|
||||
- prometheus
|
||||
- tempo
|
||||
- loki
|
||||
healthcheck:
|
||||
test: [ "CMD", "wget", "--spider", "-q", "http://localhost:3000/api/health" ]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
|
||||
volumes:
|
||||
prometheus-data:
|
||||
tempo-data:
|
||||
|
||||
networks:
|
||||
otel-network:
|
||||
driver: bridge
|
||||
name: "network_otel_config"
|
||||
ipam:
|
||||
config:
|
||||
- subnet: 172.28.0.0/16
|
||||
driver_opts:
|
||||
com.docker.network.enable_ipv6: "true"
|
||||
|
||||
@@ -42,7 +42,7 @@ datasources:
|
||||
customQuery: true
|
||||
query: 'method="$${__span.tags.method}"'
|
||||
tracesToMetrics:
|
||||
datasourceUid: 'prom'
|
||||
datasourceUid: 'prometheus'
|
||||
spanStartTimeShift: '-1h'
|
||||
spanEndTimeShift: '1h'
|
||||
tags: [ { key: 'service.name', value: 'service' }, { key: 'job' } ]
|
||||
@@ -91,7 +91,7 @@ datasources:
|
||||
customQuery: true
|
||||
query: 'method="$${__span.tags.method}"'
|
||||
tracesToMetrics:
|
||||
datasourceUid: 'prom'
|
||||
datasourceUid: 'Prometheus'
|
||||
spanStartTimeShift: '1h'
|
||||
spanEndTimeShift: '-1h'
|
||||
tags: [ { key: 'service.name', value: 'service' }, { key: 'job' } ]
|
||||
|
||||
@@ -65,6 +65,7 @@ extensions:
|
||||
some_store:
|
||||
memory:
|
||||
max_traces: 1000000
|
||||
max_events: 100000
|
||||
another_store:
|
||||
memory:
|
||||
max_traces: 1000000
|
||||
@@ -102,6 +103,7 @@ receivers:
|
||||
|
||||
processors:
|
||||
batch:
|
||||
metadata_keys: [ "span.kind", "http.method", "http.status_code", "db.system", "db.statement", "messaging.system", "messaging.destination", "messaging.operation","span.events","span.links" ]
|
||||
# Adaptive Sampling Processor is required to support adaptive sampling.
|
||||
# It expects remote_sampling extension with `adaptive:` config to be enabled.
|
||||
adaptive_sampling:
|
||||
|
||||
@@ -41,6 +41,9 @@ query_range:
|
||||
|
||||
limits_config:
|
||||
metric_aggregation_enabled: true
|
||||
max_line_size: 256KB
|
||||
max_line_size_truncate: false
|
||||
allow_structured_metadata: true
|
||||
|
||||
schema_config:
|
||||
configs:
|
||||
@@ -51,6 +54,7 @@ schema_config:
|
||||
index:
|
||||
prefix: index_
|
||||
period: 24h
|
||||
row_shards: 16
|
||||
|
||||
pattern_ingester:
|
||||
enabled: true
|
||||
|
||||
@@ -15,66 +15,108 @@
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc: # OTLP gRPC 接收器
|
||||
grpc: # OTLP gRPC receiver
|
||||
endpoint: 0.0.0.0:4317
|
||||
http: # OTLP HTTP 接收器
|
||||
http: # OTLP HTTP receiver
|
||||
endpoint: 0.0.0.0:4318
|
||||
|
||||
processors:
|
||||
batch: # 批处理处理器,提升吞吐量
|
||||
batch: # Batch processor to improve throughput
|
||||
timeout: 5s
|
||||
send_batch_size: 1000
|
||||
metadata_keys: [ ]
|
||||
metadata_cardinality_limit: 1000
|
||||
memory_limiter:
|
||||
check_interval: 1s
|
||||
limit_mib: 512
|
||||
transform/logs:
|
||||
log_statements:
|
||||
- context: log
|
||||
statements:
|
||||
# Extract Body as attribute "message"
|
||||
- set(attributes["message"], body.string)
|
||||
# Retain the original Body
|
||||
- set(attributes["log.body"], body.string)
|
||||
|
||||
exporters:
|
||||
otlp/traces: # OTLP 导出器,用于跟踪数据
|
||||
endpoint: "jaeger:4317" # Jaeger 的 OTLP gRPC 端点
|
||||
otlp/traces: # OTLP exporter for trace data
|
||||
endpoint: "http://jaeger:4317" # OTLP gRPC endpoint for Jaeger
|
||||
tls:
|
||||
insecure: true # 开发环境禁用 TLS,生产环境需配置证书
|
||||
otlp/tempo: # OTLP 导出器,用于跟踪数据
|
||||
endpoint: "tempo:4317" # tempo 的 OTLP gRPC 端点
|
||||
insecure: true # TLS is disabled in the development environment and a certificate needs to be configured in the production environment.
|
||||
compression: gzip # Enable compression to reduce network bandwidth
|
||||
retry_on_failure:
|
||||
enabled: true # Enable retry on failure
|
||||
initial_interval: 1s # Initial interval for retry
|
||||
max_interval: 30s # Maximum interval for retry
|
||||
max_elapsed_time: 300s # Maximum elapsed time for retry
|
||||
sending_queue:
|
||||
enabled: true # Enable sending queue
|
||||
num_consumers: 10 # Number of consumers
|
||||
queue_size: 5000 # Queue size
|
||||
otlp/tempo: # OTLP exporter for trace data
|
||||
endpoint: "http://tempo:4317" # OTLP gRPC endpoint for tempo
|
||||
tls:
|
||||
insecure: true # 开发环境禁用 TLS,生产环境需配置证书
|
||||
prometheus: # Prometheus 导出器,用于指标数据
|
||||
endpoint: "0.0.0.0:8889" # Prometheus 刮取端点
|
||||
namespace: "rustfs" # 指标前缀
|
||||
send_timestamps: true # 发送时间戳
|
||||
# enable_open_metrics: true
|
||||
otlphttp/loki: # Loki 导出器,用于日志数据
|
||||
endpoint: "http://loki:3100/otlp/v1/logs"
|
||||
insecure: true # TLS is disabled in the development environment and a certificate needs to be configured in the production environment.
|
||||
compression: gzip # Enable compression to reduce network bandwidth
|
||||
retry_on_failure:
|
||||
enabled: true # Enable retry on failure
|
||||
initial_interval: 1s # Initial interval for retry
|
||||
max_interval: 30s # Maximum interval for retry
|
||||
max_elapsed_time: 300s # Maximum elapsed time for retry
|
||||
sending_queue:
|
||||
enabled: true # Enable sending queue
|
||||
num_consumers: 10 # Number of consumers
|
||||
queue_size: 5000 # Queue size
|
||||
prometheus: # Prometheus exporter for metrics data
|
||||
endpoint: "0.0.0.0:8889" # Prometheus scraping endpoint
|
||||
namespace: "metrics" # indicator prefix
|
||||
send_timestamps: true # Send timestamp
|
||||
metric_expiration: 5m # Metric expiration time
|
||||
resource_to_telemetry_conversion:
|
||||
enabled: true # Enable resource to telemetry conversion
|
||||
otlphttp/loki: # Loki exporter for log data
|
||||
endpoint: "http://loki:3100/otlp"
|
||||
tls:
|
||||
insecure: true
|
||||
compression: gzip # Enable compression to reduce network bandwidth
|
||||
extensions:
|
||||
health_check:
|
||||
endpoint: 0.0.0.0:13133
|
||||
pprof:
|
||||
endpoint: 0.0.0.0:1888
|
||||
zpages:
|
||||
endpoint: 0.0.0.0:55679
|
||||
service:
|
||||
extensions: [ health_check, pprof, zpages ] # 启用扩展
|
||||
extensions: [ health_check, pprof, zpages ] # Enable extension
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [ otlp ]
|
||||
processors: [ memory_limiter,batch ]
|
||||
exporters: [ otlp/traces,otlp/tempo ]
|
||||
processors: [ memory_limiter, batch ]
|
||||
exporters: [ otlp/traces, otlp/tempo ]
|
||||
metrics:
|
||||
receivers: [ otlp ]
|
||||
processors: [ batch ]
|
||||
exporters: [ prometheus ]
|
||||
logs:
|
||||
receivers: [ otlp ]
|
||||
processors: [ batch ]
|
||||
processors: [ batch, transform/logs ]
|
||||
exporters: [ otlphttp/loki ]
|
||||
telemetry:
|
||||
logs:
|
||||
level: "info" # Collector 日志级别
|
||||
level: "debug" # Collector log level
|
||||
encoding: "json" # Log encoding: console or json
|
||||
metrics:
|
||||
level: "detailed" # 可以是 basic, normal, detailed
|
||||
level: "detailed" # Can be basic, normal, detailed
|
||||
readers:
|
||||
- periodic:
|
||||
exporter:
|
||||
otlp:
|
||||
protocol: http/protobuf
|
||||
endpoint: http://otel-collector:4318
|
||||
- pull:
|
||||
exporter:
|
||||
prometheus:
|
||||
host: '0.0.0.0'
|
||||
port: 8888
|
||||
|
||||
|
||||
|
||||
1
.docker/observability/prometheus-data/.gitignore
vendored
Normal file
1
.docker/observability/prometheus-data/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
*
|
||||
@@ -14,17 +14,27 @@
|
||||
|
||||
global:
|
||||
scrape_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
|
||||
evaluation_interval: 15s
|
||||
external_labels:
|
||||
cluster: 'rustfs-dev' # Label to identify the cluster
|
||||
relica: '1' # Replica identifier
|
||||
|
||||
scrape_configs:
|
||||
- job_name: 'otel-collector'
|
||||
- job_name: 'otel-collector-internal'
|
||||
static_configs:
|
||||
- targets: [ 'otel-collector:8888' ] # Scrape metrics from Collector
|
||||
- job_name: 'otel-metrics'
|
||||
scrape_interval: 10s
|
||||
- job_name: 'rustfs-app-metrics'
|
||||
static_configs:
|
||||
- targets: [ 'otel-collector:8889' ] # Application indicators
|
||||
scrape_interval: 15s
|
||||
metric_relabel_configs:
|
||||
- job_name: 'tempo'
|
||||
static_configs:
|
||||
- targets: [ 'tempo:3200' ] # Scrape metrics from Tempo
|
||||
- job_name: 'jaeger'
|
||||
static_configs:
|
||||
- targets: [ 'jaeger:8888' ] # Jaeger admin port
|
||||
|
||||
otlp:
|
||||
# Recommended attributes to be promoted to labels.
|
||||
|
||||
@@ -18,7 +18,9 @@ distributor:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: "tempo:4317"
|
||||
endpoint: "0.0.0.0:4317"
|
||||
http:
|
||||
endpoint: "0.0.0.0:4318"
|
||||
|
||||
ingester:
|
||||
max_block_duration: 5m # cut the headblock when this much time passes. this is being set for demo purposes and should probably be left alone normally
|
||||
|
||||
103
.github/s3tests/README.md
vendored
Normal file
103
.github/s3tests/README.md
vendored
Normal file
@@ -0,0 +1,103 @@
|
||||
# S3 Compatibility Tests Configuration
|
||||
|
||||
This directory contains the configuration for running [Ceph S3 compatibility tests](https://github.com/ceph/s3-tests) against RustFS.
|
||||
|
||||
## Configuration File
|
||||
|
||||
The `s3tests.conf` file is based on the official `s3tests.conf.SAMPLE` from the ceph/s3-tests repository. It uses environment variable substitution via `envsubst` to configure the endpoint and credentials.
|
||||
|
||||
### Key Configuration Points
|
||||
|
||||
- **Host**: Set via `${S3_HOST}` environment variable (e.g., `rustfs-single` for single-node, `lb` for multi-node)
|
||||
- **Port**: 9000 (standard RustFS port)
|
||||
- **Credentials**: Uses `${S3_ACCESS_KEY}` and `${S3_SECRET_KEY}` from workflow environment
|
||||
- **TLS**: Disabled (`is_secure = False`)
|
||||
|
||||
## Test Execution Strategy
|
||||
|
||||
### Network Connectivity Fix
|
||||
|
||||
Tests run inside a Docker container on the `rustfs-net` network, which allows them to resolve and connect to the RustFS container hostnames. This fixes the "Temporary failure in name resolution" error that occurred when tests ran on the GitHub runner host.
|
||||
|
||||
### Performance Optimizations
|
||||
|
||||
1. **Parallel Execution**: Uses `pytest-xdist` with `-n 4` to run tests in parallel across 4 workers
|
||||
2. **Load Distribution**: Uses `--dist=loadgroup` to distribute test groups across workers
|
||||
3. **Fail-Fast**: Uses `--maxfail=50` to stop after 50 failures, saving time on catastrophic failures
|
||||
|
||||
### Feature Filtering
|
||||
|
||||
Tests are filtered using pytest markers (`-m`) to skip features not yet supported by RustFS:
|
||||
|
||||
- `lifecycle` - Bucket lifecycle policies
|
||||
- `versioning` - Object versioning
|
||||
- `s3website` - Static website hosting
|
||||
- `bucket_logging` - Bucket logging
|
||||
- `encryption` / `sse_s3` - Server-side encryption
|
||||
- `cloud_transition` / `cloud_restore` - Cloud storage transitions
|
||||
- `lifecycle_expiration` / `lifecycle_transition` - Lifecycle operations
|
||||
|
||||
This filtering:
|
||||
1. Reduces test execution time significantly (from 1+ hour to ~10-15 minutes)
|
||||
2. Focuses on features RustFS currently supports
|
||||
3. Avoids hundreds of expected failures
|
||||
|
||||
## Running Tests Locally
|
||||
|
||||
### Single-Node Test
|
||||
|
||||
```bash
|
||||
# Set credentials
|
||||
export S3_ACCESS_KEY=rustfsadmin
|
||||
export S3_SECRET_KEY=rustfsadmin
|
||||
|
||||
# Start RustFS container
|
||||
docker run -d --name rustfs-single \
|
||||
--network rustfs-net \
|
||||
-e RUSTFS_ADDRESS=0.0.0.0:9000 \
|
||||
-e RUSTFS_ACCESS_KEY=$S3_ACCESS_KEY \
|
||||
-e RUSTFS_SECRET_KEY=$S3_SECRET_KEY \
|
||||
-e RUSTFS_VOLUMES="/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3" \
|
||||
rustfs-ci
|
||||
|
||||
# Generate config
|
||||
export S3_HOST=rustfs-single
|
||||
envsubst < .github/s3tests/s3tests.conf > /tmp/s3tests.conf
|
||||
|
||||
# Run tests
|
||||
docker run --rm \
|
||||
--network rustfs-net \
|
||||
-v /tmp/s3tests.conf:/etc/s3tests.conf:ro \
|
||||
python:3.12-slim \
|
||||
bash -c '
|
||||
apt-get update -qq && apt-get install -y -qq git
|
||||
git clone --depth 1 https://github.com/ceph/s3-tests.git /s3-tests
|
||||
cd /s3-tests
|
||||
pip install -q -r requirements.txt pytest-xdist
|
||||
S3TEST_CONF=/etc/s3tests.conf pytest -v -n 4 \
|
||||
s3tests/functional/test_s3.py \
|
||||
-m "not lifecycle and not versioning and not s3website and not bucket_logging and not encryption and not sse_s3"
|
||||
'
|
||||
```
|
||||
|
||||
## Test Results Interpretation
|
||||
|
||||
- **PASSED**: Test succeeded, feature works correctly
|
||||
- **FAILED**: Test failed, indicates a potential bug or incompatibility
|
||||
- **ERROR**: Test setup failed (e.g., network issues, missing dependencies)
|
||||
- **SKIPPED**: Test skipped due to marker filtering
|
||||
|
||||
## Adding New Feature Support
|
||||
|
||||
When adding support for a new S3 feature to RustFS:
|
||||
|
||||
1. Remove the corresponding marker from the filter in `.github/workflows/e2e-s3tests.yml`
|
||||
2. Run the tests to verify compatibility
|
||||
3. Fix any failing tests
|
||||
4. Update this README to reflect the newly supported feature
|
||||
|
||||
## References
|
||||
|
||||
- [Ceph S3 Tests Repository](https://github.com/ceph/s3-tests)
|
||||
- [S3 API Compatibility](https://docs.aws.amazon.com/AmazonS3/latest/API/)
|
||||
- [pytest-xdist Documentation](https://pytest-xdist.readthedocs.io/)
|
||||
185
.github/s3tests/s3tests.conf
vendored
Normal file
185
.github/s3tests/s3tests.conf
vendored
Normal file
@@ -0,0 +1,185 @@
|
||||
# RustFS s3-tests configuration
|
||||
# Based on: https://github.com/ceph/s3-tests/blob/master/s3tests.conf.SAMPLE
|
||||
#
|
||||
# Usage:
|
||||
# Single-node: S3_HOST=rustfs-single envsubst < s3tests.conf > /tmp/s3tests.conf
|
||||
# Multi-node: S3_HOST=lb envsubst < s3tests.conf > /tmp/s3tests.conf
|
||||
|
||||
[DEFAULT]
|
||||
## this section is just used for host, port and bucket_prefix
|
||||
|
||||
# host set for RustFS - will be substituted via envsubst
|
||||
host = ${S3_HOST}
|
||||
|
||||
# port for RustFS
|
||||
port = 9000
|
||||
|
||||
## say "False" to disable TLS
|
||||
is_secure = False
|
||||
|
||||
## say "False" to disable SSL Verify
|
||||
ssl_verify = False
|
||||
|
||||
[fixtures]
|
||||
## all the buckets created will start with this prefix;
|
||||
## {random} will be filled with random characters to pad
|
||||
## the prefix to 30 characters long, and avoid collisions
|
||||
bucket prefix = rustfs-{random}-
|
||||
|
||||
# all the iam account resources (users, roles, etc) created
|
||||
# will start with this name prefix
|
||||
iam name prefix = s3-tests-
|
||||
|
||||
# all the iam account resources (users, roles, etc) created
|
||||
# will start with this path prefix
|
||||
iam path prefix = /s3-tests/
|
||||
|
||||
[s3 main]
|
||||
# main display_name
|
||||
display_name = RustFS Tester
|
||||
|
||||
# main user_id
|
||||
user_id = rustfsadmin
|
||||
|
||||
# main email
|
||||
email = tester@rustfs.local
|
||||
|
||||
# zonegroup api_name for bucket location
|
||||
api_name = default
|
||||
|
||||
## main AWS access key
|
||||
access_key = ${S3_ACCESS_KEY}
|
||||
|
||||
## main AWS secret key
|
||||
secret_key = ${S3_SECRET_KEY}
|
||||
|
||||
## replace with key id obtained when secret is created, or delete if KMS not tested
|
||||
#kms_keyid = 01234567-89ab-cdef-0123-456789abcdef
|
||||
|
||||
## Storage classes
|
||||
#storage_classes = "LUKEWARM, FROZEN"
|
||||
|
||||
## Lifecycle debug interval (default: 10)
|
||||
#lc_debug_interval = 20
|
||||
## Restore debug interval (default: 100)
|
||||
#rgw_restore_debug_interval = 60
|
||||
#rgw_restore_processor_period = 60
|
||||
|
||||
[s3 alt]
|
||||
# alt display_name
|
||||
display_name = RustFS Alt Tester
|
||||
|
||||
## alt email
|
||||
email = alt@rustfs.local
|
||||
|
||||
# alt user_id
|
||||
user_id = rustfsalt
|
||||
|
||||
# alt AWS access key (must be different from s3 main for many tests)
|
||||
access_key = ${S3_ALT_ACCESS_KEY}
|
||||
|
||||
# alt AWS secret key
|
||||
secret_key = ${S3_ALT_SECRET_KEY}
|
||||
|
||||
#[s3 cloud]
|
||||
## to run the testcases with "cloud_transition" for transition
|
||||
## and "cloud_restore" for restore attribute.
|
||||
## Note: the waiting time may have to tweaked depending on
|
||||
## the I/O latency to the cloud endpoint.
|
||||
|
||||
## host set for cloud endpoint
|
||||
# host = localhost
|
||||
|
||||
## port set for cloud endpoint
|
||||
# port = 8001
|
||||
|
||||
## say "False" to disable TLS
|
||||
# is_secure = False
|
||||
|
||||
## cloud endpoint credentials
|
||||
# access_key = 0555b35654ad1656d804
|
||||
# secret_key = h7GhxuBLTrlhVUyxSPUKUV8r/2EI4ngqJxD7iBdBYLhwluN30JaT3Q==
|
||||
|
||||
## storage class configured as cloud tier on local rgw server
|
||||
# cloud_storage_class = CLOUDTIER
|
||||
|
||||
## Below are optional -
|
||||
|
||||
## Above configured cloud storage class config options
|
||||
# retain_head_object = false
|
||||
# allow_read_through = false # change it to enable read_through
|
||||
# read_through_restore_days = 2
|
||||
# target_storage_class = Target_SC
|
||||
# target_path = cloud-bucket
|
||||
|
||||
## another regular storage class to test multiple transition rules,
|
||||
# storage_class = S1
|
||||
|
||||
[s3 tenant]
|
||||
# tenant display_name
|
||||
display_name = RustFS Tenant Tester
|
||||
|
||||
# tenant user_id
|
||||
user_id = rustfstenant
|
||||
|
||||
# tenant AWS access key
|
||||
access_key = ${S3_ACCESS_KEY}
|
||||
|
||||
# tenant AWS secret key
|
||||
secret_key = ${S3_SECRET_KEY}
|
||||
|
||||
# tenant email
|
||||
email = tenant@rustfs.local
|
||||
|
||||
# tenant name
|
||||
tenant = testx
|
||||
|
||||
#following section needs to be added for all sts-tests
|
||||
[iam]
|
||||
#used for iam operations in sts-tests
|
||||
#email
|
||||
email = s3@rustfs.local
|
||||
|
||||
#user_id
|
||||
user_id = rustfsiam
|
||||
|
||||
#access_key
|
||||
access_key = ${S3_ACCESS_KEY}
|
||||
|
||||
#secret_key
|
||||
secret_key = ${S3_SECRET_KEY}
|
||||
|
||||
#display_name
|
||||
display_name = RustFS IAM User
|
||||
|
||||
# iam account root user for iam_account tests
|
||||
[iam root]
|
||||
access_key = ${S3_ACCESS_KEY}
|
||||
secret_key = ${S3_SECRET_KEY}
|
||||
user_id = RGW11111111111111111
|
||||
email = account1@rustfs.local
|
||||
|
||||
# iam account root user in a different account than [iam root]
|
||||
[iam alt root]
|
||||
access_key = ${S3_ACCESS_KEY}
|
||||
secret_key = ${S3_SECRET_KEY}
|
||||
user_id = RGW22222222222222222
|
||||
email = account2@rustfs.local
|
||||
|
||||
#following section needs to be added when you want to run Assume Role With Webidentity test
|
||||
[webidentity]
|
||||
#used for assume role with web identity test in sts-tests
|
||||
#all parameters will be obtained from ceph/qa/tasks/keycloak.py
|
||||
#token=<access_token>
|
||||
|
||||
#aud=<obtained after introspecting token>
|
||||
|
||||
#sub=<obtained after introspecting token>
|
||||
|
||||
#azp=<obtained after introspecting token>
|
||||
|
||||
#user_token=<access token for a user, with attribute Department=[Engineering, Marketing>]
|
||||
|
||||
#thumbprint=<obtained from x509 certificate>
|
||||
|
||||
#KC_REALM=<name of the realm>
|
||||
8
.github/workflows/audit.yml
vendored
8
.github/workflows/audit.yml
vendored
@@ -40,11 +40,11 @@ env:
|
||||
jobs:
|
||||
security-audit:
|
||||
name: Security Audit
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubicloud-standard-2
|
||||
timeout-minutes: 15
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Install cargo-audit
|
||||
uses: taiki-e/install-action@v2
|
||||
@@ -65,14 +65,14 @@ jobs:
|
||||
|
||||
dependency-review:
|
||||
name: Dependency Review
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubicloud-standard-2
|
||||
if: github.event_name == 'pull_request'
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Dependency Review
|
||||
uses: actions/dependency-review-action@v4
|
||||
|
||||
32
.github/workflows/build.yml
vendored
32
.github/workflows/build.yml
vendored
@@ -83,7 +83,7 @@ jobs:
|
||||
# Build strategy check - determine build type based on trigger
|
||||
build-check:
|
||||
name: Build Strategy Check
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubicloud-standard-2
|
||||
outputs:
|
||||
should_build: ${{ steps.check.outputs.should_build }}
|
||||
build_type: ${{ steps.check.outputs.build_type }}
|
||||
@@ -92,7 +92,7 @@ jobs:
|
||||
is_prerelease: ${{ steps.check.outputs.is_prerelease }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -167,19 +167,19 @@ jobs:
|
||||
matrix:
|
||||
include:
|
||||
# Linux builds
|
||||
- os: ubuntu-latest
|
||||
- os: ubicloud-standard-2
|
||||
target: x86_64-unknown-linux-musl
|
||||
cross: false
|
||||
platform: linux
|
||||
- os: ubuntu-latest
|
||||
- os: ubicloud-standard-2
|
||||
target: aarch64-unknown-linux-musl
|
||||
cross: true
|
||||
platform: linux
|
||||
- os: ubuntu-latest
|
||||
- os: ubicloud-standard-2
|
||||
target: x86_64-unknown-linux-gnu
|
||||
cross: false
|
||||
platform: linux
|
||||
- os: ubuntu-latest
|
||||
- os: ubicloud-standard-2
|
||||
target: aarch64-unknown-linux-gnu
|
||||
cross: true
|
||||
platform: linux
|
||||
@@ -203,7 +203,7 @@ jobs:
|
||||
# platform: windows
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -454,7 +454,7 @@ jobs:
|
||||
OSS_ACCESS_KEY_ID: ${{ secrets.ALICLOUDOSS_KEY_ID }}
|
||||
OSS_ACCESS_KEY_SECRET: ${{ secrets.ALICLOUDOSS_KEY_SECRET }}
|
||||
OSS_REGION: cn-beijing
|
||||
OSS_ENDPOINT: https://oss-cn-beijing.aliyuncs.com
|
||||
OSS_ENDPOINT: https://oss-accelerate.aliyuncs.com
|
||||
shell: bash
|
||||
run: |
|
||||
BUILD_TYPE="${{ needs.build-check.outputs.build_type }}"
|
||||
@@ -532,7 +532,7 @@ jobs:
|
||||
name: Build Summary
|
||||
needs: [ build-check, build-rustfs ]
|
||||
if: always() && needs.build-check.outputs.should_build == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubicloud-standard-2
|
||||
steps:
|
||||
- name: Build completion summary
|
||||
shell: bash
|
||||
@@ -584,7 +584,7 @@ jobs:
|
||||
name: Create GitHub Release
|
||||
needs: [ build-check, build-rustfs ]
|
||||
if: startsWith(github.ref, 'refs/tags/') && needs.build-check.outputs.build_type != 'development'
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubicloud-standard-2
|
||||
permissions:
|
||||
contents: write
|
||||
outputs:
|
||||
@@ -592,7 +592,7 @@ jobs:
|
||||
release_url: ${{ steps.create.outputs.release_url }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -670,13 +670,13 @@ jobs:
|
||||
name: Upload Release Assets
|
||||
needs: [ build-check, build-rustfs, create-release ]
|
||||
if: startsWith(github.ref, 'refs/tags/') && needs.build-check.outputs.build_type != 'development'
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubicloud-standard-2
|
||||
permissions:
|
||||
contents: write
|
||||
actions: read
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Download all build artifacts
|
||||
uses: actions/download-artifact@v5
|
||||
@@ -751,7 +751,7 @@ jobs:
|
||||
name: Update Latest Version
|
||||
needs: [ build-check, upload-release-assets ]
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubicloud-standard-2
|
||||
steps:
|
||||
- name: Update latest.json
|
||||
env:
|
||||
@@ -801,12 +801,12 @@ jobs:
|
||||
name: Publish Release
|
||||
needs: [ build-check, create-release, upload-release-assets ]
|
||||
if: startsWith(github.ref, 'refs/tags/') && needs.build-check.outputs.build_type != 'development'
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubicloud-standard-2
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Update release notes and publish
|
||||
env:
|
||||
|
||||
37
.github/workflows/ci.yml
vendored
37
.github/workflows/ci.yml
vendored
@@ -4,7 +4,7 @@
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
@@ -62,17 +62,23 @@ on:
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
concurrency:
|
||||
group: ${{ github.workflow }}-${{ github.ref }}
|
||||
cancel-in-progress: true
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
RUST_BACKTRACE: 1
|
||||
CARGO_BUILD_JOBS: 2
|
||||
|
||||
jobs:
|
||||
|
||||
skip-check:
|
||||
name: Skip Duplicate Actions
|
||||
permissions:
|
||||
actions: write
|
||||
contents: read
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubicloud-standard-2
|
||||
outputs:
|
||||
should_skip: ${{ steps.skip_check.outputs.should_skip }}
|
||||
steps:
|
||||
@@ -83,15 +89,13 @@ jobs:
|
||||
concurrent_skipping: "same_content_newer"
|
||||
cancel_others: true
|
||||
paths_ignore: '["*.md", "docs/**", "deploy/**"]'
|
||||
# Never skip release events and tag pushes
|
||||
do_not_skip: '["workflow_dispatch", "schedule", "merge_group", "release", "push"]'
|
||||
|
||||
|
||||
typos:
|
||||
name: Typos
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubicloud-standard-2
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: actions/checkout@v6
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- name: Typos check with custom config file
|
||||
uses: crate-ci/typos@master
|
||||
@@ -100,13 +104,11 @@ jobs:
|
||||
name: Test and Lint
|
||||
needs: skip-check
|
||||
if: needs.skip-check.outputs.should_skip != 'true'
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubicloud-standard-4
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: Delete huge unnecessary tools folder
|
||||
run: rm -rf /opt/hostedtoolcache
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Setup Rust environment
|
||||
uses: ./.github/actions/setup
|
||||
@@ -116,6 +118,9 @@ jobs:
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
cache-save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||
|
||||
- name: Install cargo-nextest
|
||||
uses: taiki-e/install-action@nextest
|
||||
|
||||
- name: Run tests
|
||||
run: |
|
||||
cargo nextest run --all --exclude e2e_test
|
||||
@@ -131,11 +136,16 @@ jobs:
|
||||
name: End-to-End Tests
|
||||
needs: skip-check
|
||||
if: needs.skip-check.outputs.should_skip != 'true'
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubicloud-standard-2
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Clean up previous test run
|
||||
run: |
|
||||
rm -rf /tmp/rustfs
|
||||
rm -f /tmp/rustfs.log
|
||||
|
||||
- name: Setup Rust environment
|
||||
uses: ./.github/actions/setup
|
||||
@@ -155,7 +165,8 @@ jobs:
|
||||
- name: Build debug binary
|
||||
run: |
|
||||
touch rustfs/build.rs
|
||||
cargo build -p rustfs --bins
|
||||
# Limit concurrency to prevent OOM
|
||||
cargo build -p rustfs --bins --jobs 2
|
||||
|
||||
- name: Run end-to-end tests
|
||||
run: |
|
||||
|
||||
34
.github/workflows/docker.yml
vendored
34
.github/workflows/docker.yml
vendored
@@ -72,7 +72,7 @@ jobs:
|
||||
# Check if we should build Docker images
|
||||
build-check:
|
||||
name: Docker Build Check
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubicloud-standard-2
|
||||
outputs:
|
||||
should_build: ${{ steps.check.outputs.should_build }}
|
||||
should_push: ${{ steps.check.outputs.should_push }}
|
||||
@@ -83,7 +83,7 @@ jobs:
|
||||
create_latest: ${{ steps.check.outputs.create_latest }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
# For workflow_run events, checkout the specific commit that triggered the workflow
|
||||
@@ -162,11 +162,11 @@ jobs:
|
||||
if [[ "$version" == *"alpha"* ]] || [[ "$version" == *"beta"* ]] || [[ "$version" == *"rc"* ]]; then
|
||||
build_type="prerelease"
|
||||
is_prerelease=true
|
||||
# TODO: 临时修改 - 当前允许 alpha 版本也创建 latest 标签
|
||||
# 等版本稳定后,需要移除下面这行,恢复原有逻辑(只有稳定版本才创建 latest)
|
||||
# TODO: Temporary change - currently allows alpha versions to also create latest tags
|
||||
# After the version is stable, you need to remove the following line and restore the original logic (latest is created only for stable versions)
|
||||
if [[ "$version" == *"alpha"* ]]; then
|
||||
create_latest=true
|
||||
echo "🧪 Building Docker image for prerelease: $version (临时允许创建 latest 标签)"
|
||||
echo "🧪 Building Docker image for prerelease: $version (temporarily allowing creation of latest tag)"
|
||||
else
|
||||
echo "🧪 Building Docker image for prerelease: $version"
|
||||
fi
|
||||
@@ -215,11 +215,11 @@ jobs:
|
||||
v*alpha*|v*beta*|v*rc*|*alpha*|*beta*|*rc*)
|
||||
build_type="prerelease"
|
||||
is_prerelease=true
|
||||
# TODO: 临时修改 - 当前允许 alpha 版本也创建 latest 标签
|
||||
# 等版本稳定后,需要移除下面的 if 块,恢复原有逻辑
|
||||
# TODO: Temporary change - currently allows alpha versions to also create latest tags
|
||||
# After the version is stable, you need to remove the if block below and restore the original logic.
|
||||
if [[ "$input_version" == *"alpha"* ]]; then
|
||||
create_latest=true
|
||||
echo "🧪 Building with prerelease version: $input_version (临时允许创建 latest 标签)"
|
||||
echo "🧪 Building with prerelease version: $input_version (temporarily allowing creation of latest tag)"
|
||||
else
|
||||
echo "🧪 Building with prerelease version: $input_version"
|
||||
fi
|
||||
@@ -264,11 +264,11 @@ jobs:
|
||||
name: Build Docker Images
|
||||
needs: build-check
|
||||
if: needs.build-check.outputs.should_build == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubicloud-standard-2
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
@@ -330,9 +330,9 @@ jobs:
|
||||
|
||||
# Add channel tags for prereleases and latest for stable
|
||||
if [[ "$CREATE_LATEST" == "true" ]]; then
|
||||
# TODO: 临时修改 - 当前 alpha 版本也会创建 latest 标签
|
||||
# 等版本稳定后,这里的逻辑保持不变,但上游的 CREATE_LATEST 设置需要恢复
|
||||
# Stable release (以及临时的 alpha 版本)
|
||||
# TODO: Temporary change - the current alpha version will also create the latest tag
|
||||
# After the version is stabilized, the logic here remains unchanged, but the upstream CREATE_LATEST setting needs to be restored.
|
||||
# Stable release (and temporary alpha versions)
|
||||
TAGS="$TAGS,${{ env.REGISTRY_DOCKERHUB }}:latest"
|
||||
elif [[ "$BUILD_TYPE" == "prerelease" ]]; then
|
||||
# Prerelease channel tags (alpha, beta, rc)
|
||||
@@ -404,7 +404,7 @@ jobs:
|
||||
name: Docker Build Summary
|
||||
needs: [ build-check, build-docker ]
|
||||
if: always() && needs.build-check.outputs.should_build == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubicloud-standard-2
|
||||
steps:
|
||||
- name: Docker build completion summary
|
||||
run: |
|
||||
@@ -429,10 +429,10 @@ jobs:
|
||||
"prerelease")
|
||||
echo "🧪 Prerelease Docker image has been built with ${VERSION} tags"
|
||||
echo "⚠️ This is a prerelease image - use with caution"
|
||||
# TODO: 临时修改 - alpha 版本当前会创建 latest 标签
|
||||
# 等版本稳定后,需要恢复下面的提示信息
|
||||
# TODO: Temporary change - alpha versions currently create the latest tag
|
||||
# After the version is stable, you need to restore the following prompt information
|
||||
if [[ "$VERSION" == *"alpha"* ]] && [[ "$CREATE_LATEST" == "true" ]]; then
|
||||
echo "🏷️ Latest tag has been created for alpha version (临时措施)"
|
||||
echo "🏷️ Latest tag has been created for alpha version (temporary measures)"
|
||||
else
|
||||
echo "🚫 Latest tag NOT created for prerelease"
|
||||
fi
|
||||
|
||||
260
.github/workflows/e2e-mint.yml
vendored
Normal file
260
.github/workflows/e2e-mint.yml
vendored
Normal file
@@ -0,0 +1,260 @@
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name: e2e-mint
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
paths:
|
||||
- ".github/workflows/e2e-mint.yml"
|
||||
- "Dockerfile.source"
|
||||
- "rustfs/**"
|
||||
- "crates/**"
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
run-multi:
|
||||
description: "Run multi-node Mint as well"
|
||||
required: false
|
||||
default: "false"
|
||||
|
||||
env:
|
||||
ACCESS_KEY: rustfsadmin
|
||||
SECRET_KEY: rustfsadmin
|
||||
RUST_LOG: info
|
||||
PLATFORM: linux/amd64
|
||||
|
||||
jobs:
|
||||
mint-single:
|
||||
runs-on: ubicloud-standard-2
|
||||
timeout-minutes: 40
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Enable buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build RustFS image (source)
|
||||
run: |
|
||||
DOCKER_BUILDKIT=1 docker buildx build --load \
|
||||
--platform ${PLATFORM} \
|
||||
-t rustfs-ci \
|
||||
-f Dockerfile.source .
|
||||
|
||||
- name: Create network
|
||||
run: |
|
||||
docker network inspect rustfs-net >/dev/null 2>&1 || docker network create rustfs-net
|
||||
|
||||
- name: Remove existing rustfs-single (if any)
|
||||
run: docker rm -f rustfs-single >/dev/null 2>&1 || true
|
||||
|
||||
- name: Start single RustFS
|
||||
run: |
|
||||
docker run -d --name rustfs-single \
|
||||
--network rustfs-net \
|
||||
-e RUSTFS_ADDRESS=0.0.0.0:9000 \
|
||||
-e RUSTFS_ACCESS_KEY=$ACCESS_KEY \
|
||||
-e RUSTFS_SECRET_KEY=$SECRET_KEY \
|
||||
-e RUSTFS_VOLUMES="/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3" \
|
||||
-v /tmp/rustfs-single:/data \
|
||||
rustfs-ci
|
||||
|
||||
- name: Wait for RustFS ready
|
||||
run: |
|
||||
for i in {1..30}; do
|
||||
if docker exec rustfs-single curl -sf http://localhost:9000/health >/dev/null; then
|
||||
exit 0
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
echo "RustFS did not become ready" >&2
|
||||
docker logs rustfs-single || true
|
||||
exit 1
|
||||
|
||||
- name: Run Mint (single, S3-only)
|
||||
run: |
|
||||
mkdir -p artifacts/mint-single
|
||||
docker run --rm --network rustfs-net \
|
||||
--platform ${PLATFORM} \
|
||||
-e SERVER_ENDPOINT=rustfs-single:9000 \
|
||||
-e ACCESS_KEY=$ACCESS_KEY \
|
||||
-e SECRET_KEY=$SECRET_KEY \
|
||||
-e ENABLE_HTTPS=0 \
|
||||
-e SERVER_REGION=us-east-1 \
|
||||
-e RUN_ON_FAIL=1 \
|
||||
-e MINT_MODE=core \
|
||||
-v ${GITHUB_WORKSPACE}/artifacts/mint-single:/mint/log \
|
||||
--entrypoint /mint/mint.sh \
|
||||
minio/mint:edge \
|
||||
awscli aws-sdk-go aws-sdk-java-v2 aws-sdk-php aws-sdk-ruby s3cmd s3select
|
||||
|
||||
- name: Collect RustFS logs
|
||||
run: |
|
||||
mkdir -p artifacts/rustfs-single
|
||||
docker logs rustfs-single > artifacts/rustfs-single/rustfs.log || true
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: mint-single
|
||||
path: artifacts/**
|
||||
|
||||
mint-multi:
|
||||
if: github.event_name == 'workflow_dispatch' && github.event.inputs.run-multi == 'true'
|
||||
needs: mint-single
|
||||
runs-on: ubicloud-standard-2
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Enable buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build RustFS image (source)
|
||||
run: |
|
||||
DOCKER_BUILDKIT=1 docker buildx build --load \
|
||||
--platform ${PLATFORM} \
|
||||
-t rustfs-ci \
|
||||
-f Dockerfile.source .
|
||||
|
||||
- name: Prepare cluster compose
|
||||
run: |
|
||||
cat > compose.yml <<'EOF'
|
||||
version: '3.8'
|
||||
services:
|
||||
rustfs1:
|
||||
image: rustfs-ci
|
||||
hostname: rustfs1
|
||||
networks: [rustfs-net]
|
||||
environment:
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_ACCESS_KEY=${ACCESS_KEY}
|
||||
- RUSTFS_SECRET_KEY=${SECRET_KEY}
|
||||
- RUSTFS_VOLUMES=/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3
|
||||
volumes:
|
||||
- rustfs1-data:/data
|
||||
rustfs2:
|
||||
image: rustfs-ci
|
||||
hostname: rustfs2
|
||||
networks: [rustfs-net]
|
||||
environment:
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_ACCESS_KEY=${ACCESS_KEY}
|
||||
- RUSTFS_SECRET_KEY=${SECRET_KEY}
|
||||
- RUSTFS_VOLUMES=/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3
|
||||
volumes:
|
||||
- rustfs2-data:/data
|
||||
rustfs3:
|
||||
image: rustfs-ci
|
||||
hostname: rustfs3
|
||||
networks: [rustfs-net]
|
||||
environment:
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_ACCESS_KEY=${ACCESS_KEY}
|
||||
- RUSTFS_SECRET_KEY=${SECRET_KEY}
|
||||
- RUSTFS_VOLUMES=/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3
|
||||
volumes:
|
||||
- rustfs3-data:/data
|
||||
rustfs4:
|
||||
image: rustfs-ci
|
||||
hostname: rustfs4
|
||||
networks: [rustfs-net]
|
||||
environment:
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_ACCESS_KEY=${ACCESS_KEY}
|
||||
- RUSTFS_SECRET_KEY=${SECRET_KEY}
|
||||
- RUSTFS_VOLUMES=/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3
|
||||
volumes:
|
||||
- rustfs4-data:/data
|
||||
lb:
|
||||
image: haproxy:2.9
|
||||
hostname: lb
|
||||
networks: [rustfs-net]
|
||||
ports:
|
||||
- "9000:9000"
|
||||
volumes:
|
||||
- ./haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro
|
||||
networks:
|
||||
rustfs-net:
|
||||
name: rustfs-net
|
||||
volumes:
|
||||
rustfs1-data:
|
||||
rustfs2-data:
|
||||
rustfs3-data:
|
||||
rustfs4-data:
|
||||
EOF
|
||||
|
||||
cat > haproxy.cfg <<'EOF'
|
||||
defaults
|
||||
mode http
|
||||
timeout connect 5s
|
||||
timeout client 30s
|
||||
timeout server 30s
|
||||
|
||||
frontend fe_s3
|
||||
bind *:9000
|
||||
default_backend be_s3
|
||||
|
||||
backend be_s3
|
||||
balance roundrobin
|
||||
server s1 rustfs1:9000 check
|
||||
server s2 rustfs2:9000 check
|
||||
server s3 rustfs3:9000 check
|
||||
server s4 rustfs4:9000 check
|
||||
EOF
|
||||
|
||||
- name: Launch cluster
|
||||
run: docker compose -f compose.yml up -d
|
||||
|
||||
- name: Wait for LB ready
|
||||
run: |
|
||||
for i in {1..60}; do
|
||||
if docker run --rm --network rustfs-net curlimages/curl -sf http://lb:9000/health >/dev/null; then
|
||||
exit 0
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
echo "LB or backend not ready" >&2
|
||||
docker compose -f compose.yml logs --tail=200 || true
|
||||
exit 1
|
||||
|
||||
- name: Run Mint (multi, S3-only)
|
||||
run: |
|
||||
mkdir -p artifacts/mint-multi
|
||||
docker run --rm --network rustfs-net \
|
||||
--platform ${PLATFORM} \
|
||||
-e SERVER_ENDPOINT=lb:9000 \
|
||||
-e ACCESS_KEY=$ACCESS_KEY \
|
||||
-e SECRET_KEY=$SECRET_KEY \
|
||||
-e ENABLE_HTTPS=0 \
|
||||
-e SERVER_REGION=us-east-1 \
|
||||
-e RUN_ON_FAIL=1 \
|
||||
-e MINT_MODE=core \
|
||||
-v ${GITHUB_WORKSPACE}/artifacts/mint-multi:/mint/log \
|
||||
--entrypoint /mint/mint.sh \
|
||||
minio/mint:edge \
|
||||
awscli aws-sdk-go aws-sdk-java-v2 aws-sdk-php aws-sdk-ruby s3cmd s3select
|
||||
|
||||
- name: Collect logs
|
||||
run: |
|
||||
mkdir -p artifacts/cluster
|
||||
docker compose -f compose.yml logs --no-color > artifacts/cluster/cluster.log || true
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: mint-multi
|
||||
path: artifacts/**
|
||||
422
.github/workflows/e2e-s3tests.yml
vendored
Normal file
422
.github/workflows/e2e-s3tests.yml
vendored
Normal file
@@ -0,0 +1,422 @@
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name: e2e-s3tests
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
test-mode:
|
||||
description: "Test mode to run"
|
||||
required: true
|
||||
type: choice
|
||||
default: "single"
|
||||
options:
|
||||
- single
|
||||
- multi
|
||||
xdist:
|
||||
description: "Enable pytest-xdist (parallel). '0' to disable."
|
||||
required: false
|
||||
default: "0"
|
||||
maxfail:
|
||||
description: "Stop after N failures (debug friendly)"
|
||||
required: false
|
||||
default: "1"
|
||||
markexpr:
|
||||
description: "pytest -m expression (feature filters)"
|
||||
required: false
|
||||
default: "not lifecycle and not versioning and not s3website and not bucket_logging and not encryption"
|
||||
|
||||
env:
|
||||
# main user
|
||||
S3_ACCESS_KEY: rustfsadmin
|
||||
S3_SECRET_KEY: rustfsadmin
|
||||
# alt user (must be different from main for many s3-tests)
|
||||
S3_ALT_ACCESS_KEY: rustfsalt
|
||||
S3_ALT_SECRET_KEY: rustfsalt
|
||||
|
||||
S3_REGION: us-east-1
|
||||
|
||||
RUST_LOG: info
|
||||
PLATFORM: linux/amd64
|
||||
|
||||
defaults:
|
||||
run:
|
||||
shell: bash
|
||||
|
||||
jobs:
|
||||
s3tests-single:
|
||||
if: github.event.inputs.test-mode == 'single'
|
||||
runs-on: ubicloud-standard-2
|
||||
timeout-minutes: 120
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Enable buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build RustFS image (source, cached)
|
||||
run: |
|
||||
DOCKER_BUILDKIT=1 docker buildx build --load \
|
||||
--platform ${PLATFORM} \
|
||||
--cache-from type=gha \
|
||||
--cache-to type=gha,mode=max \
|
||||
-t rustfs-ci \
|
||||
-f Dockerfile.source .
|
||||
|
||||
- name: Create network
|
||||
run: docker network inspect rustfs-net >/dev/null 2>&1 || docker network create rustfs-net
|
||||
|
||||
- name: Remove existing rustfs-single (if any)
|
||||
run: docker rm -f rustfs-single >/dev/null 2>&1 || true
|
||||
|
||||
- name: Start single RustFS
|
||||
run: |
|
||||
docker run -d --name rustfs-single \
|
||||
--network rustfs-net \
|
||||
-p 9000:9000 \
|
||||
-e RUSTFS_ADDRESS=0.0.0.0:9000 \
|
||||
-e RUSTFS_ACCESS_KEY=$S3_ACCESS_KEY \
|
||||
-e RUSTFS_SECRET_KEY=$S3_SECRET_KEY \
|
||||
-e RUSTFS_VOLUMES="/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3" \
|
||||
-v /tmp/rustfs-single:/data \
|
||||
rustfs-ci
|
||||
|
||||
- name: Wait for RustFS ready
|
||||
run: |
|
||||
for i in {1..60}; do
|
||||
if curl -sf http://127.0.0.1:9000/health >/dev/null 2>&1; then
|
||||
echo "RustFS is ready"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
if [ "$(docker inspect -f '{{.State.Running}}' rustfs-single 2>/dev/null)" != "true" ]; then
|
||||
echo "RustFS container not running" >&2
|
||||
docker logs rustfs-single || true
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sleep 2
|
||||
done
|
||||
|
||||
echo "Health check timed out" >&2
|
||||
docker logs rustfs-single || true
|
||||
exit 1
|
||||
|
||||
- name: Generate s3tests config
|
||||
run: |
|
||||
export S3_HOST=127.0.0.1
|
||||
envsubst < .github/s3tests/s3tests.conf > s3tests.conf
|
||||
|
||||
- name: Provision s3-tests alt user (required by suite)
|
||||
run: |
|
||||
python3 -m pip install --user --upgrade pip awscurl
|
||||
export PATH="$HOME/.local/bin:$PATH"
|
||||
|
||||
# Admin API requires AWS SigV4 signing. awscurl is used by RustFS codebase as well.
|
||||
awscurl \
|
||||
--service s3 \
|
||||
--region "${S3_REGION}" \
|
||||
--access_key "${S3_ACCESS_KEY}" \
|
||||
--secret_key "${S3_SECRET_KEY}" \
|
||||
-X PUT \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{"secretKey":"'"${S3_ALT_SECRET_KEY}"'","status":"enabled","policy":"readwrite"}' \
|
||||
"http://127.0.0.1:9000/rustfs/admin/v3/add-user?accessKey=${S3_ALT_ACCESS_KEY}"
|
||||
|
||||
# Explicitly attach built-in policy via policy mapping.
|
||||
# s3-tests relies on alt client being able to ListBuckets during setup cleanup.
|
||||
awscurl \
|
||||
--service s3 \
|
||||
--region "${S3_REGION}" \
|
||||
--access_key "${S3_ACCESS_KEY}" \
|
||||
--secret_key "${S3_SECRET_KEY}" \
|
||||
-X PUT \
|
||||
"http://127.0.0.1:9000/rustfs/admin/v3/set-user-or-group-policy?policyName=readwrite&userOrGroup=${S3_ALT_ACCESS_KEY}&isGroup=false"
|
||||
|
||||
# Sanity check: alt user can list buckets (should not be AccessDenied).
|
||||
awscurl \
|
||||
--service s3 \
|
||||
--region "${S3_REGION}" \
|
||||
--access_key "${S3_ALT_ACCESS_KEY}" \
|
||||
--secret_key "${S3_ALT_SECRET_KEY}" \
|
||||
-X GET \
|
||||
"http://127.0.0.1:9000/" >/dev/null
|
||||
|
||||
- name: Prepare s3-tests
|
||||
run: |
|
||||
python3 -m pip install --user --upgrade pip tox
|
||||
export PATH="$HOME/.local/bin:$PATH"
|
||||
git clone --depth 1 https://github.com/ceph/s3-tests.git s3-tests
|
||||
|
||||
- name: Run ceph s3-tests (debug friendly)
|
||||
run: |
|
||||
export PATH="$HOME/.local/bin:$PATH"
|
||||
mkdir -p artifacts/s3tests-single
|
||||
|
||||
cd s3-tests
|
||||
|
||||
set -o pipefail
|
||||
|
||||
MAXFAIL="${{ github.event.inputs.maxfail }}"
|
||||
if [ -z "$MAXFAIL" ]; then MAXFAIL="1"; fi
|
||||
|
||||
MARKEXPR="${{ github.event.inputs.markexpr }}"
|
||||
if [ -z "$MARKEXPR" ]; then MARKEXPR="not lifecycle and not versioning and not s3website and not bucket_logging and not encryption"; fi
|
||||
|
||||
XDIST="${{ github.event.inputs.xdist }}"
|
||||
if [ -z "$XDIST" ]; then XDIST="0"; fi
|
||||
XDIST_ARGS=""
|
||||
if [ "$XDIST" != "0" ]; then
|
||||
# Add pytest-xdist to requirements.txt so tox installs it inside
|
||||
# its virtualenv. Installing outside tox does NOT work.
|
||||
echo "pytest-xdist" >> requirements.txt
|
||||
XDIST_ARGS="-n $XDIST --dist=loadgroup"
|
||||
fi
|
||||
|
||||
# Run tests from s3tests/functional (boto2+boto3 combined directory).
|
||||
S3TEST_CONF=${GITHUB_WORKSPACE}/s3tests.conf \
|
||||
tox -- \
|
||||
-vv -ra --showlocals --tb=long \
|
||||
--maxfail="$MAXFAIL" \
|
||||
--junitxml=${GITHUB_WORKSPACE}/artifacts/s3tests-single/junit.xml \
|
||||
$XDIST_ARGS \
|
||||
s3tests/functional/test_s3.py \
|
||||
-m "$MARKEXPR" \
|
||||
2>&1 | tee ${GITHUB_WORKSPACE}/artifacts/s3tests-single/pytest.log
|
||||
|
||||
- name: Collect RustFS logs
|
||||
if: always()
|
||||
run: |
|
||||
mkdir -p artifacts/rustfs-single
|
||||
docker logs rustfs-single > artifacts/rustfs-single/rustfs.log 2>&1 || true
|
||||
docker inspect rustfs-single > artifacts/rustfs-single/inspect.json || true
|
||||
|
||||
- name: Upload artifacts
|
||||
if: always() && env.ACT != 'true'
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: s3tests-single
|
||||
path: artifacts/**
|
||||
|
||||
s3tests-multi:
|
||||
if: github.event_name == 'workflow_dispatch' && github.event.inputs.test-mode == 'multi'
|
||||
runs-on: ubicloud-standard-2
|
||||
timeout-minutes: 150
|
||||
steps:
|
||||
- uses: actions/checkout@v6
|
||||
|
||||
- name: Enable buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build RustFS image (source, cached)
|
||||
run: |
|
||||
DOCKER_BUILDKIT=1 docker buildx build --load \
|
||||
--platform ${PLATFORM} \
|
||||
--cache-from type=gha \
|
||||
--cache-to type=gha,mode=max \
|
||||
-t rustfs-ci \
|
||||
-f Dockerfile.source .
|
||||
|
||||
- name: Prepare cluster compose
|
||||
run: |
|
||||
cat > compose.yml <<'EOF'
|
||||
services:
|
||||
rustfs1:
|
||||
image: rustfs-ci
|
||||
hostname: rustfs1
|
||||
networks: [rustfs-net]
|
||||
environment:
|
||||
RUSTFS_ADDRESS: "0.0.0.0:9000"
|
||||
RUSTFS_ACCESS_KEY: ${S3_ACCESS_KEY}
|
||||
RUSTFS_SECRET_KEY: ${S3_SECRET_KEY}
|
||||
RUSTFS_VOLUMES: "/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3"
|
||||
volumes:
|
||||
- rustfs1-data:/data
|
||||
rustfs2:
|
||||
image: rustfs-ci
|
||||
hostname: rustfs2
|
||||
networks: [rustfs-net]
|
||||
environment:
|
||||
RUSTFS_ADDRESS: "0.0.0.0:9000"
|
||||
RUSTFS_ACCESS_KEY: ${S3_ACCESS_KEY}
|
||||
RUSTFS_SECRET_KEY: ${S3_SECRET_KEY}
|
||||
RUSTFS_VOLUMES: "/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3"
|
||||
volumes:
|
||||
- rustfs2-data:/data
|
||||
rustfs3:
|
||||
image: rustfs-ci
|
||||
hostname: rustfs3
|
||||
networks: [rustfs-net]
|
||||
environment:
|
||||
RUSTFS_ADDRESS: "0.0.0.0:9000"
|
||||
RUSTFS_ACCESS_KEY: ${S3_ACCESS_KEY}
|
||||
RUSTFS_SECRET_KEY: ${S3_SECRET_KEY}
|
||||
RUSTFS_VOLUMES: "/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3"
|
||||
volumes:
|
||||
- rustfs3-data:/data
|
||||
rustfs4:
|
||||
image: rustfs-ci
|
||||
hostname: rustfs4
|
||||
networks: [rustfs-net]
|
||||
environment:
|
||||
RUSTFS_ADDRESS: "0.0.0.0:9000"
|
||||
RUSTFS_ACCESS_KEY: ${S3_ACCESS_KEY}
|
||||
RUSTFS_SECRET_KEY: ${S3_SECRET_KEY}
|
||||
RUSTFS_VOLUMES: "/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3"
|
||||
volumes:
|
||||
- rustfs4-data:/data
|
||||
lb:
|
||||
image: haproxy:2.9
|
||||
hostname: lb
|
||||
networks: [rustfs-net]
|
||||
ports:
|
||||
- "9000:9000"
|
||||
volumes:
|
||||
- ./haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro
|
||||
networks:
|
||||
rustfs-net:
|
||||
name: rustfs-net
|
||||
volumes:
|
||||
rustfs1-data:
|
||||
rustfs2-data:
|
||||
rustfs3-data:
|
||||
rustfs4-data:
|
||||
EOF
|
||||
|
||||
cat > haproxy.cfg <<'EOF'
|
||||
defaults
|
||||
mode http
|
||||
timeout connect 5s
|
||||
timeout client 30s
|
||||
timeout server 30s
|
||||
|
||||
frontend fe_s3
|
||||
bind *:9000
|
||||
default_backend be_s3
|
||||
|
||||
backend be_s3
|
||||
balance roundrobin
|
||||
server s1 rustfs1:9000 check
|
||||
server s2 rustfs2:9000 check
|
||||
server s3 rustfs3:9000 check
|
||||
server s4 rustfs4:9000 check
|
||||
EOF
|
||||
|
||||
- name: Launch cluster
|
||||
run: docker compose -f compose.yml up -d
|
||||
|
||||
- name: Wait for LB ready
|
||||
run: |
|
||||
for i in {1..90}; do
|
||||
if curl -sf http://127.0.0.1:9000/health >/dev/null 2>&1; then
|
||||
echo "Load balancer is ready"
|
||||
exit 0
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
echo "LB or backend not ready" >&2
|
||||
docker compose -f compose.yml logs --tail=200 || true
|
||||
exit 1
|
||||
|
||||
- name: Generate s3tests config
|
||||
run: |
|
||||
export S3_HOST=127.0.0.1
|
||||
envsubst < .github/s3tests/s3tests.conf > s3tests.conf
|
||||
|
||||
- name: Provision s3-tests alt user (required by suite)
|
||||
run: |
|
||||
python3 -m pip install --user --upgrade pip awscurl
|
||||
export PATH="$HOME/.local/bin:$PATH"
|
||||
|
||||
awscurl \
|
||||
--service s3 \
|
||||
--region "${S3_REGION}" \
|
||||
--access_key "${S3_ACCESS_KEY}" \
|
||||
--secret_key "${S3_SECRET_KEY}" \
|
||||
-X PUT \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d '{"secretKey":"'"${S3_ALT_SECRET_KEY}"'","status":"enabled","policy":"readwrite"}' \
|
||||
"http://127.0.0.1:9000/rustfs/admin/v3/add-user?accessKey=${S3_ALT_ACCESS_KEY}"
|
||||
|
||||
awscurl \
|
||||
--service s3 \
|
||||
--region "${S3_REGION}" \
|
||||
--access_key "${S3_ACCESS_KEY}" \
|
||||
--secret_key "${S3_SECRET_KEY}" \
|
||||
-X PUT \
|
||||
"http://127.0.0.1:9000/rustfs/admin/v3/set-user-or-group-policy?policyName=readwrite&userOrGroup=${S3_ALT_ACCESS_KEY}&isGroup=false"
|
||||
|
||||
awscurl \
|
||||
--service s3 \
|
||||
--region "${S3_REGION}" \
|
||||
--access_key "${S3_ALT_ACCESS_KEY}" \
|
||||
--secret_key "${S3_ALT_SECRET_KEY}" \
|
||||
-X GET \
|
||||
"http://127.0.0.1:9000/" >/dev/null
|
||||
|
||||
- name: Prepare s3-tests
|
||||
run: |
|
||||
python3 -m pip install --user --upgrade pip tox
|
||||
export PATH="$HOME/.local/bin:$PATH"
|
||||
git clone --depth 1 https://github.com/ceph/s3-tests.git s3-tests
|
||||
|
||||
- name: Run ceph s3-tests (multi, debug friendly)
|
||||
run: |
|
||||
export PATH="$HOME/.local/bin:$PATH"
|
||||
mkdir -p artifacts/s3tests-multi
|
||||
|
||||
cd s3-tests
|
||||
|
||||
set -o pipefail
|
||||
|
||||
MAXFAIL="${{ github.event.inputs.maxfail }}"
|
||||
if [ -z "$MAXFAIL" ]; then MAXFAIL="1"; fi
|
||||
|
||||
MARKEXPR="${{ github.event.inputs.markexpr }}"
|
||||
if [ -z "$MARKEXPR" ]; then MARKEXPR="not lifecycle and not versioning and not s3website and not bucket_logging and not encryption"; fi
|
||||
|
||||
XDIST="${{ github.event.inputs.xdist }}"
|
||||
if [ -z "$XDIST" ]; then XDIST="0"; fi
|
||||
XDIST_ARGS=""
|
||||
if [ "$XDIST" != "0" ]; then
|
||||
# Add pytest-xdist to requirements.txt so tox installs it inside
|
||||
# its virtualenv. Installing outside tox does NOT work.
|
||||
echo "pytest-xdist" >> requirements.txt
|
||||
XDIST_ARGS="-n $XDIST --dist=loadgroup"
|
||||
fi
|
||||
|
||||
# Run tests from s3tests/functional (boto2+boto3 combined directory).
|
||||
S3TEST_CONF=${GITHUB_WORKSPACE}/s3tests.conf \
|
||||
tox -- \
|
||||
-vv -ra --showlocals --tb=long \
|
||||
--maxfail="$MAXFAIL" \
|
||||
--junitxml=${GITHUB_WORKSPACE}/artifacts/s3tests-multi/junit.xml \
|
||||
$XDIST_ARGS \
|
||||
s3tests/functional/test_s3.py \
|
||||
-m "$MARKEXPR" \
|
||||
2>&1 | tee ${GITHUB_WORKSPACE}/artifacts/s3tests-multi/pytest.log
|
||||
|
||||
- name: Collect logs
|
||||
if: always()
|
||||
run: |
|
||||
mkdir -p artifacts/cluster
|
||||
docker compose -f compose.yml logs --no-color > artifacts/cluster/cluster.log 2>&1 || true
|
||||
|
||||
- name: Upload artifacts
|
||||
if: always() && env.ACT != 'true'
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: s3tests-multi
|
||||
path: artifacts/**
|
||||
95
.github/workflows/helm-package.yml
vendored
Normal file
95
.github/workflows/helm-package.yml
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name: Publish helm chart to artifacthub
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: [ "Build and Release" ]
|
||||
types: [ completed ]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
new_version: ${{ github.event.workflow_run.head_branch }}
|
||||
|
||||
jobs:
|
||||
build-helm-package:
|
||||
runs-on: ubicloud-standard-2
|
||||
# Only run on successful builds triggered by tag pushes (version format: x.y.z or x.y.z-suffix)
|
||||
if: |
|
||||
github.event.workflow_run.conclusion == 'success' &&
|
||||
github.event.workflow_run.event == 'push' &&
|
||||
contains(github.event.workflow_run.head_branch, '.')
|
||||
|
||||
steps:
|
||||
- name: Checkout helm chart repo
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Replace chart app version
|
||||
run: |
|
||||
set -e
|
||||
set -x
|
||||
old_version=$(grep "^appVersion:" helm/rustfs/Chart.yaml | awk '{print $2}')
|
||||
sed -i "s/$old_version/$new_version/g" helm/rustfs/Chart.yaml
|
||||
sed -i "/^image:/,/^[^ ]/ s/tag:.*/tag: "$new_version"/" helm/rustfs/values.yaml
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v4.3.0
|
||||
|
||||
- name: Package Helm Chart
|
||||
run: |
|
||||
cp helm/README.md helm/rustfs/
|
||||
package_version=$(echo $new_version | awk -F '-' '{print $2}' | awk -F '.' '{print $NF}')
|
||||
helm package ./helm/rustfs --destination helm/rustfs/ --version "0.0.$package_version"
|
||||
|
||||
- name: Upload helm package as artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: helm-package
|
||||
path: helm/rustfs/*.tgz
|
||||
retention-days: 1
|
||||
|
||||
publish-helm-package:
|
||||
runs-on: ubicloud-standard-2
|
||||
needs: [ build-helm-package ]
|
||||
|
||||
steps:
|
||||
- name: Checkout helm package repo
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
repository: rustfs/helm
|
||||
token: ${{ secrets.RUSTFS_HELM_PACKAGE }}
|
||||
|
||||
- name: Download helm package
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: helm-package
|
||||
path: ./
|
||||
|
||||
- name: Set up helm
|
||||
uses: azure/setup-helm@v4.3.0
|
||||
|
||||
- name: Generate index
|
||||
run: helm repo index . --url https://charts.rustfs.com
|
||||
|
||||
- name: Push helm package and index file
|
||||
run: |
|
||||
git config --global user.name "${{ secrets.USERNAME }}"
|
||||
git config --global user.email "${{ secrets.EMAIL_ADDRESS }}"
|
||||
git status .
|
||||
git add .
|
||||
git commit -m "Update rustfs helm package with $new_version."
|
||||
git push origin main
|
||||
2
.github/workflows/issue-translator.yml
vendored
2
.github/workflows/issue-translator.yml
vendored
@@ -25,7 +25,7 @@ permissions:
|
||||
|
||||
jobs:
|
||||
build:
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubicloud-standard-4
|
||||
steps:
|
||||
- uses: usthe/issues-translate-action@v2.7
|
||||
with:
|
||||
|
||||
8
.github/workflows/performance.yml
vendored
8
.github/workflows/performance.yml
vendored
@@ -40,11 +40,11 @@ env:
|
||||
jobs:
|
||||
performance-profile:
|
||||
name: Performance Profiling
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubicloud-standard-2
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Setup Rust environment
|
||||
uses: ./.github/actions/setup
|
||||
@@ -115,11 +115,11 @@ jobs:
|
||||
|
||||
benchmark:
|
||||
name: Benchmark Tests
|
||||
runs-on: ubuntu-latest
|
||||
runs-on: ubicloud-standard-2
|
||||
timeout-minutes: 45
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Setup Rust environment
|
||||
uses: ./.github/actions/setup
|
||||
|
||||
12
.gitignore
vendored
12
.gitignore
vendored
@@ -2,6 +2,7 @@
|
||||
.DS_Store
|
||||
.idea
|
||||
.vscode
|
||||
.direnv/
|
||||
/test
|
||||
/logs
|
||||
/data
|
||||
@@ -23,4 +24,13 @@ profile.json
|
||||
*.go
|
||||
*.pb
|
||||
*.svg
|
||||
deploy/logs/*.log.*
|
||||
deploy/logs/*.log.*
|
||||
|
||||
# s3-tests local artifacts (root directory only)
|
||||
/s3-tests/
|
||||
/s3-tests-local/
|
||||
/s3tests.conf
|
||||
/s3tests.conf.*
|
||||
*.events
|
||||
*.audit
|
||||
*.snappy
|
||||
32
.pre-commit-config.yaml
Normal file
32
.pre-commit-config.yaml
Normal file
@@ -0,0 +1,32 @@
|
||||
# See https://pre-commit.com for more information
|
||||
# See https://pre-commit.com/hooks.html for more hooks
|
||||
repos:
|
||||
- repo: local
|
||||
hooks:
|
||||
- id: cargo-fmt
|
||||
name: cargo fmt
|
||||
entry: cargo fmt --all --check
|
||||
language: system
|
||||
types: [rust]
|
||||
pass_filenames: false
|
||||
|
||||
- id: cargo-clippy
|
||||
name: cargo clippy
|
||||
entry: cargo clippy --all-targets --all-features -- -D warnings
|
||||
language: system
|
||||
types: [rust]
|
||||
pass_filenames: false
|
||||
|
||||
- id: cargo-check
|
||||
name: cargo check
|
||||
entry: cargo check --all-targets
|
||||
language: system
|
||||
types: [rust]
|
||||
pass_filenames: false
|
||||
|
||||
- id: cargo-test
|
||||
name: cargo test
|
||||
entry: bash -c 'cargo test --workspace --exclude e2e_test && cargo test --all --doc'
|
||||
language: system
|
||||
types: [rust]
|
||||
pass_filenames: false
|
||||
50
.vscode/launch.json
vendored
50
.vscode/launch.json
vendored
@@ -1,9 +1,31 @@
|
||||
{
|
||||
// 使用 IntelliSense 了解相关属性。
|
||||
// 悬停以查看现有属性的描述。
|
||||
// 欲了解更多信息,请访问: https://go.microsoft.com/fwlink/?linkid=830387
|
||||
"version": "0.2.0",
|
||||
"configurations": [
|
||||
{
|
||||
"type": "lldb",
|
||||
"request": "launch",
|
||||
"name": "Debug(only) executable 'rustfs'",
|
||||
"env": {
|
||||
"RUST_LOG": "rustfs=info,ecstore=info,s3s=info,iam=info",
|
||||
"RUSTFS_SKIP_BACKGROUND_TASK": "on"
|
||||
//"RUSTFS_OBS_LOG_DIRECTORY": "./deploy/logs",
|
||||
// "RUSTFS_POLICY_PLUGIN_URL":"http://localhost:8181/v1/data/rustfs/authz/allow",
|
||||
// "RUSTFS_POLICY_PLUGIN_AUTH_TOKEN":"your-opa-token"
|
||||
},
|
||||
"program": "${workspaceFolder}/target/debug/rustfs",
|
||||
"args": [
|
||||
"--access-key",
|
||||
"rustfsadmin",
|
||||
"--secret-key",
|
||||
"rustfsadmin",
|
||||
"--address",
|
||||
"0.0.0.0:9010",
|
||||
"--server-domains",
|
||||
"127.0.0.1:9010",
|
||||
"./target/volume/test{1...4}"
|
||||
],
|
||||
"cwd": "${workspaceFolder}"
|
||||
},
|
||||
{
|
||||
"type": "lldb",
|
||||
"request": "launch",
|
||||
@@ -22,6 +44,7 @@
|
||||
"env": {
|
||||
"RUST_LOG": "rustfs=debug,ecstore=info,s3s=debug,iam=debug",
|
||||
"RUSTFS_SKIP_BACKGROUND_TASK": "on",
|
||||
//"RUSTFS_OBS_LOG_DIRECTORY": "./deploy/logs",
|
||||
// "RUSTFS_POLICY_PLUGIN_URL":"http://localhost:8181/v1/data/rustfs/authz/allow",
|
||||
// "RUSTFS_POLICY_PLUGIN_AUTH_TOKEN":"your-opa-token"
|
||||
},
|
||||
@@ -66,12 +89,8 @@
|
||||
"test",
|
||||
"--no-run",
|
||||
"--lib",
|
||||
"--package=ecstore"
|
||||
],
|
||||
"filter": {
|
||||
"name": "ecstore",
|
||||
"kind": "lib"
|
||||
}
|
||||
"--package=rustfs-ecstore"
|
||||
]
|
||||
},
|
||||
"args": [],
|
||||
"cwd": "${workspaceFolder}"
|
||||
@@ -85,6 +104,19 @@
|
||||
"cwd": "${workspaceFolder}",
|
||||
//"stopAtEntry": false,
|
||||
//"preLaunchTask": "cargo build",
|
||||
"env": {
|
||||
"RUSTFS_ACCESS_KEY": "rustfsadmin",
|
||||
"RUSTFS_SECRET_KEY": "rustfsadmin",
|
||||
"RUSTFS_VOLUMES": "./target/volume/test{1...4}",
|
||||
"RUSTFS_ADDRESS": ":9000",
|
||||
"RUSTFS_CONSOLE_ENABLE": "true",
|
||||
// "RUSTFS_OBS_TRACE_ENDPOINT": "http://127.0.0.1:4318/v1/traces", // jeager otlp http endpoint
|
||||
// "RUSTFS_OBS_METRIC_ENDPOINT": "http://127.0.0.1:4318/v1/metrics", // default otlp http endpoint
|
||||
// "RUSTFS_OBS_LOG_ENDPOINT": "http://127.0.0.1:4318/v1/logs", // default otlp http endpoint
|
||||
// "RUSTFS_COMPRESS_ENABLE": "true",
|
||||
"RUSTFS_CONSOLE_ADDRESS": "127.0.0.1:9001",
|
||||
"RUSTFS_OBS_LOG_DIRECTORY": "./target/logs",
|
||||
},
|
||||
"sourceLanguages": [
|
||||
"rust"
|
||||
],
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
|
||||
## Communication Rules
|
||||
- Respond to the user in Chinese; use English in all other contexts.
|
||||
- Code and documentation must be written in English only. Chinese text is allowed solely as test data/fixtures when a case explicitly requires Chinese-language content for validation.
|
||||
|
||||
## Project Structure & Module Organization
|
||||
The workspace root hosts shared dependencies in `Cargo.toml`. The service binary lives under `rustfs/src/main.rs`, while reusable crates sit in `crates/` (`crypto`, `iam`, `kms`, and `e2e_test`). Local fixtures for standalone flows reside in `test_standalone/`, deployment manifests are under `deploy/`, Docker assets sit at the root, and automation lives in `scripts/`. Skim each crate’s README or module docs before contributing changes.
|
||||
|
||||
@@ -2,6 +2,8 @@
|
||||
|
||||
## 📋 Code Quality Requirements
|
||||
|
||||
For instructions on setting up and running the local development environment, please see [Development Guide](docs/DEVELOPMENT.md).
|
||||
|
||||
### 🔧 Code Formatting Rules
|
||||
|
||||
**MANDATORY**: All code must be properly formatted before committing. This project enforces strict formatting standards to maintain code consistency and readability.
|
||||
|
||||
1557
Cargo.lock
generated
1557
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
99
Cargo.toml
99
Cargo.toml
@@ -97,18 +97,20 @@ async-channel = "2.5.0"
|
||||
async-compression = { version = "0.4.19" }
|
||||
async-recursion = "1.1.1"
|
||||
async-trait = "0.1.89"
|
||||
axum = "0.8.7"
|
||||
axum-extra = "0.12.2"
|
||||
axum-server = { version = "0.7.3", features = ["tls-rustls-no-provider"], default-features = false }
|
||||
axum = "0.8.8"
|
||||
axum-extra = "0.12.3"
|
||||
axum-server = { version = "0.8.0", features = ["tls-rustls-no-provider"], default-features = false }
|
||||
futures = "0.3.31"
|
||||
futures-core = "0.3.31"
|
||||
futures-util = "0.3.31"
|
||||
pollster = "0.4.0"
|
||||
hyper = { version = "1.8.1", features = ["http2", "http1", "server"] }
|
||||
hyper-rustls = { version = "0.27.7", default-features = false, features = ["native-tokio", "http1", "tls12", "logging", "http2", "ring", "webpki-roots"] }
|
||||
hyper-util = { version = "0.1.18", features = ["tokio", "server-auto", "server-graceful"] }
|
||||
http = "1.3.1"
|
||||
hyper-util = { version = "0.1.19", features = ["tokio", "server-auto", "server-graceful"] }
|
||||
http = "1.4.0"
|
||||
http-body = "1.0.1"
|
||||
reqwest = { version = "0.12.24", default-features = false, features = ["rustls-tls-webpki-roots", "charset", "http2", "system-proxy", "stream", "json", "blocking"] }
|
||||
http-body-util = "0.1.3"
|
||||
reqwest = { version = "0.12.28", default-features = false, features = ["rustls-tls-webpki-roots", "charset", "http2", "system-proxy", "stream", "json", "blocking"] }
|
||||
socket2 = "0.6.1"
|
||||
tokio = { version = "1.48.0", features = ["fs", "rt-multi-thread"] }
|
||||
tokio-rustls = { version = "0.26.4", default-features = false, features = ["logging", "tls12", "ring"] }
|
||||
@@ -119,39 +121,40 @@ tonic = { version = "0.14.2", features = ["gzip"] }
|
||||
tonic-prost = { version = "0.14.2" }
|
||||
tonic-prost-build = { version = "0.14.2" }
|
||||
tower = { version = "0.5.2", features = ["timeout"] }
|
||||
tower-http = { version = "0.6.6", features = ["cors"] }
|
||||
tower-http = { version = "0.6.8", features = ["cors"] }
|
||||
|
||||
# Serialization and Data Formats
|
||||
bytes = { version = "1.11.0", features = ["serde"] }
|
||||
bytesize = "2.2.0"
|
||||
bytesize = "2.3.1"
|
||||
byteorder = "1.5.0"
|
||||
flatbuffers = "25.9.23"
|
||||
flatbuffers = "25.12.19"
|
||||
form_urlencoded = "1.2.2"
|
||||
prost = "0.14.1"
|
||||
quick-xml = "0.38.4"
|
||||
rmcp = { version = "0.8.5" }
|
||||
rmp = { version = "0.8.14" }
|
||||
rmp-serde = { version = "1.3.0" }
|
||||
rmcp = { version = "0.12.0" }
|
||||
rmp = { version = "0.8.15" }
|
||||
rmp-serde = { version = "1.3.1" }
|
||||
serde = { version = "1.0.228", features = ["derive"] }
|
||||
serde_json = { version = "1.0.145", features = ["raw_value"] }
|
||||
serde_json = { version = "1.0.147", features = ["raw_value"] }
|
||||
serde_urlencoded = "0.7.1"
|
||||
schemars = "1.1.0"
|
||||
|
||||
# Cryptography and Security
|
||||
aes-gcm = { version = "0.11.0-rc.2", features = ["rand_core"] }
|
||||
argon2 = { version = "0.6.0-rc.2", features = ["std"] }
|
||||
blake3 = { version = "1.8.2" }
|
||||
argon2 = { version = "0.6.0-rc.5" }
|
||||
blake3 = { version = "1.8.2", features = ["rayon", "mmap"] }
|
||||
chacha20poly1305 = { version = "0.11.0-rc.2" }
|
||||
crc-fast = "1.6.0"
|
||||
hmac = { version = "0.13.0-rc.3" }
|
||||
jsonwebtoken = { version = "10.2.0", features = ["rust_crypto"] }
|
||||
pbkdf2 = "0.13.0-rc.2"
|
||||
pbkdf2 = "0.13.0-rc.5"
|
||||
rsa = { version = "0.10.0-rc.10" }
|
||||
rustls = { version = "0.23.35", features = ["ring", "logging", "std", "tls12"], default-features = false }
|
||||
rustls-pemfile = "2.2.0"
|
||||
rustls-pki-types = "1.13.0"
|
||||
rustls-pki-types = "1.13.2"
|
||||
sha1 = "0.11.0-rc.3"
|
||||
sha2 = "0.11.0-rc.3"
|
||||
subtle = "2.6"
|
||||
zeroize = { version = "1.8.2", features = ["derive"] }
|
||||
|
||||
# Time and Date
|
||||
@@ -161,55 +164,53 @@ time = { version = "0.3.44", features = ["std", "parsing", "formatting", "macros
|
||||
|
||||
# Utilities and Tools
|
||||
anyhow = "1.0.100"
|
||||
arc-swap = "1.7.1"
|
||||
arc-swap = "1.8.0"
|
||||
astral-tokio-tar = "0.5.6"
|
||||
atoi = "2.0.0"
|
||||
atomic_enum = "0.3.0"
|
||||
aws-config = { version = "1.8.10" }
|
||||
aws-credential-types = { version = "1.2.9" }
|
||||
aws-sdk-s3 = { version = "1.112.0", default-features = false, features = ["sigv4a", "rustls", "rt-tokio"] }
|
||||
aws-smithy-types = { version = "1.3.4" }
|
||||
aws-config = { version = "1.8.12" }
|
||||
aws-credential-types = { version = "1.2.11" }
|
||||
aws-sdk-s3 = { version = "1.119.0", default-features = false, features = ["sigv4a", "rustls", "rt-tokio"] }
|
||||
aws-smithy-types = { version = "1.3.5" }
|
||||
base64 = "0.22.1"
|
||||
base64-simd = "0.8.0"
|
||||
brotli = "8.0.2"
|
||||
cfg-if = "1.0.4"
|
||||
clap = { version = "4.5.51", features = ["derive", "env"] }
|
||||
const-str = { version = "0.7.0", features = ["std", "proc"] }
|
||||
convert_case = "0.9.0"
|
||||
criterion = { version = "0.7", features = ["html_reports"] }
|
||||
clap = { version = "4.5.53", features = ["derive", "env"] }
|
||||
const-str = { version = "0.7.1", features = ["std", "proc"] }
|
||||
convert_case = "0.10.0"
|
||||
criterion = { version = "0.8", features = ["html_reports"] }
|
||||
crossbeam-queue = "0.3.12"
|
||||
datafusion = "50.3.0"
|
||||
datafusion = "51.0.0"
|
||||
derive_builder = "0.20.2"
|
||||
enumset = "1.1.10"
|
||||
faster-hex = "0.10.0"
|
||||
flate2 = "1.1.5"
|
||||
flexi_logger = { version = "0.31.7", features = ["trc", "dont_minimize_extra_stacks", "compress", "kv", "json"] }
|
||||
glob = "0.3.3"
|
||||
google-cloud-storage = "1.4.0"
|
||||
google-cloud-auth = "1.2.0"
|
||||
hashbrown = { version = "0.16.0", features = ["serde", "rayon"] }
|
||||
google-cloud-storage = "1.5.0"
|
||||
google-cloud-auth = "1.3.0"
|
||||
hashbrown = { version = "0.16.1", features = ["serde", "rayon"] }
|
||||
heed = { version = "0.22.0" }
|
||||
hex-simd = "0.8.0"
|
||||
highway = { version = "1.3.0" }
|
||||
ipnetwork = { version = "0.21.1", features = ["serde"] }
|
||||
lazy_static = "1.5.0"
|
||||
libc = "0.2.177"
|
||||
libc = "0.2.178"
|
||||
libsystemd = "0.7.2"
|
||||
local-ip-address = "0.6.5"
|
||||
local-ip-address = "0.6.8"
|
||||
lz4 = "1.28.1"
|
||||
matchit = "0.9.0"
|
||||
md-5 = "0.11.0-rc.3"
|
||||
md5 = "0.8.0"
|
||||
metrics = "0.24.2"
|
||||
mime_guess = "2.0.5"
|
||||
moka = { version = "0.12.11", features = ["future"] }
|
||||
moka = { version = "0.12.12", features = ["future"] }
|
||||
netif = "0.1.6"
|
||||
nix = { version = "0.30.1", features = ["fs"] }
|
||||
nu-ansi-term = "0.50.3"
|
||||
num_cpus = { version = "1.17.0" }
|
||||
nvml-wrapper = "0.11.0"
|
||||
object_store = "0.12.4"
|
||||
once_cell = "1.21.3"
|
||||
parking_lot = "0.12.5"
|
||||
path-absolutize = "3.1.1"
|
||||
path-clean = "1.0.1"
|
||||
@@ -219,51 +220,53 @@ rand = { version = "0.10.0-rc.5", features = ["serde"] }
|
||||
rayon = "1.11.0"
|
||||
reed-solomon-simd = { version = "3.1.0" }
|
||||
regex = { version = "1.12.2" }
|
||||
rumqttc = { version = "0.25.0" }
|
||||
rumqttc = { version = "0.25.1" }
|
||||
rust-embed = { version = "8.9.0" }
|
||||
rustc-hash = { version = "2.1.1" }
|
||||
s3s = { git = "https://github.com/s3s-project/s3s.git", rev = "ba9f902", version = "0.12.0-rc.3", features = ["minio"] }
|
||||
s3s = { version = "0.13.0-alpha", features = ["minio"], git = "https://github.com/s3s-project/s3s.git", branch = "main" }
|
||||
serial_test = "3.2.0"
|
||||
shadow-rs = { version = "1.4.0", default-features = false }
|
||||
shadow-rs = { version = "1.5.0", default-features = false }
|
||||
siphasher = "1.0.1"
|
||||
smallvec = { version = "1.15.1", features = ["serde"] }
|
||||
smartstring = "1.0.1"
|
||||
snafu = "0.8.9"
|
||||
snap = "1.1.1"
|
||||
starshard = { version = "0.5.0", features = ["rayon", "async", "serde"] }
|
||||
starshard = { version = "0.6.0", features = ["rayon", "async", "serde"] }
|
||||
strum = { version = "0.27.2", features = ["derive"] }
|
||||
sysctl = "0.7.1"
|
||||
sysinfo = "0.37.2"
|
||||
temp-env = "0.3.6"
|
||||
tempfile = "3.23.0"
|
||||
tempfile = "3.24.0"
|
||||
test-case = "3.3.1"
|
||||
thiserror = "2.0.17"
|
||||
tracing = { version = "0.1.41" }
|
||||
tracing-appender = "0.2.3"
|
||||
tracing = { version = "0.1.44" }
|
||||
tracing-appender = "0.2.4"
|
||||
tracing-error = "0.2.1"
|
||||
tracing-opentelemetry = "0.32.0"
|
||||
tracing-subscriber = { version = "0.3.20", features = ["env-filter", "time"] }
|
||||
tracing-subscriber = { version = "0.3.22", features = ["env-filter", "time"] }
|
||||
transform-stream = "0.3.1"
|
||||
url = "2.5.7"
|
||||
urlencoding = "2.1.3"
|
||||
uuid = { version = "1.18.1", features = ["v4", "fast-rng", "macro-diagnostics"] }
|
||||
uuid = { version = "1.19.0", features = ["v4", "fast-rng", "macro-diagnostics"] }
|
||||
vaultrs = { version = "0.7.4" }
|
||||
walkdir = "2.5.0"
|
||||
wildmatch = { version = "2.6.1", features = ["serde"] }
|
||||
winapi = { version = "0.3.9" }
|
||||
xxhash-rust = { version = "0.8.15", features = ["xxh64", "xxh3"] }
|
||||
zip = "6.0.0"
|
||||
zip = "7.0.0"
|
||||
zstd = "0.13.3"
|
||||
|
||||
# Observability and Metrics
|
||||
metrics = "0.24.3"
|
||||
opentelemetry = { version = "0.31.0" }
|
||||
opentelemetry-appender-tracing = { version = "0.31.1", features = ["experimental_use_tracing_span_context", "experimental_metadata_attributes", "spec_unstable_logs_enabled"] }
|
||||
opentelemetry-otlp = { version = "0.31.0", features = ["http-proto", "zstd-http"] }
|
||||
opentelemetry-otlp = { version = "0.31.0", features = ["gzip-http", "reqwest-rustls"] }
|
||||
opentelemetry_sdk = { version = "0.31.0" }
|
||||
opentelemetry-semantic-conventions = { version = "0.31.0", features = ["semconv_experimental"] }
|
||||
opentelemetry-stdout = { version = "0.31.0" }
|
||||
|
||||
# Performance Analysis and Memory Profiling
|
||||
mimalloc = "0.1"
|
||||
# Use tikv-jemallocator as memory allocator and enable performance analysis
|
||||
tikv-jemallocator = { version = "0.6", features = ["profiling", "stats", "unprefixed_malloc_on_supported_platforms", "background_threads"] }
|
||||
# Used to control and obtain statistics for jemalloc at runtime
|
||||
@@ -272,11 +275,11 @@ tikv-jemalloc-ctl = { version = "0.6", features = ["use_std", "stats", "profilin
|
||||
jemalloc_pprof = { version = "0.8.1", features = ["symbolize", "flamegraph"] }
|
||||
# Used to generate CPU performance analysis data and flame diagrams
|
||||
pprof = { version = "0.15.0", features = ["flamegraph", "protobuf-codec"] }
|
||||
mimalloc = "0.1"
|
||||
|
||||
|
||||
|
||||
[workspace.metadata.cargo-shear]
|
||||
ignored = ["rustfs", "rustfs-mcp", "tokio-test"]
|
||||
ignored = ["rustfs", "rustfs-mcp"]
|
||||
|
||||
[profile.release]
|
||||
opt-level = 3
|
||||
|
||||
@@ -81,12 +81,11 @@ ENV RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_CORS_ALLOWED_ORIGINS="*" \
|
||||
RUSTFS_CONSOLE_CORS_ALLOWED_ORIGINS="*" \
|
||||
RUSTFS_VOLUMES="/data" \
|
||||
RUST_LOG="warn" \
|
||||
RUSTFS_OBS_LOG_DIRECTORY="/logs"
|
||||
RUST_LOG="warn"
|
||||
|
||||
EXPOSE 9000 9001
|
||||
|
||||
VOLUME ["/data", "/logs"]
|
||||
VOLUME ["/data"]
|
||||
|
||||
USER rustfs
|
||||
|
||||
|
||||
@@ -39,7 +39,9 @@ RUN set -eux; \
|
||||
libssl-dev \
|
||||
lld \
|
||||
protobuf-compiler \
|
||||
flatbuffers-compiler; \
|
||||
flatbuffers-compiler \
|
||||
gcc-aarch64-linux-gnu \
|
||||
gcc-x86-64-linux-gnu; \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Optional: cross toolchain for aarch64 (only when targeting linux/arm64)
|
||||
@@ -51,18 +53,18 @@ RUN set -eux; \
|
||||
rm -rf /var/lib/apt/lists/*; \
|
||||
fi
|
||||
|
||||
# Add Rust targets based on TARGETPLATFORM
|
||||
# Add Rust targets for both arches (to support cross-builds on multi-arch runners)
|
||||
RUN set -eux; \
|
||||
case "${TARGETPLATFORM:-linux/amd64}" in \
|
||||
linux/amd64) rustup target add x86_64-unknown-linux-gnu ;; \
|
||||
linux/arm64) rustup target add aarch64-unknown-linux-gnu ;; \
|
||||
*) echo "Unsupported TARGETPLATFORM=${TARGETPLATFORM}" >&2; exit 1 ;; \
|
||||
esac
|
||||
rustup target add x86_64-unknown-linux-gnu aarch64-unknown-linux-gnu; \
|
||||
rustup component add rust-std-x86_64-unknown-linux-gnu rust-std-aarch64-unknown-linux-gnu
|
||||
|
||||
# Cross-compilation environment (used only when targeting aarch64)
|
||||
ENV CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=aarch64-linux-gnu-gcc
|
||||
ENV CC_aarch64_unknown_linux_gnu=aarch64-linux-gnu-gcc
|
||||
ENV CXX_aarch64_unknown_linux_gnu=aarch64-linux-gnu-g++
|
||||
ENV CARGO_TARGET_X86_64_UNKNOWN_LINUX_GNU_LINKER=x86_64-linux-gnu-gcc
|
||||
ENV CC_x86_64_unknown_linux_gnu=x86_64-linux-gnu-gcc
|
||||
ENV CXX_x86_64_unknown_linux_gnu=x86_64-linux-gnu-g++
|
||||
|
||||
WORKDIR /usr/src/rustfs
|
||||
|
||||
@@ -72,7 +74,6 @@ COPY Cargo.toml Cargo.lock ./
|
||||
# 2) workspace member manifests (adjust if workspace layout changes)
|
||||
COPY rustfs/Cargo.toml rustfs/Cargo.toml
|
||||
COPY crates/*/Cargo.toml crates/
|
||||
COPY cli/rustfs-gui/Cargo.toml cli/rustfs-gui/Cargo.toml
|
||||
|
||||
# Pre-fetch dependencies for better caching
|
||||
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||
@@ -117,6 +118,49 @@ RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||
;; \
|
||||
esac
|
||||
|
||||
# -----------------------------
|
||||
# Development stage (keeps toolchain)
|
||||
# -----------------------------
|
||||
FROM builder AS dev
|
||||
|
||||
ARG BUILD_DATE
|
||||
ARG VCS_REF
|
||||
|
||||
LABEL name="RustFS (dev-source)" \
|
||||
maintainer="RustFS Team" \
|
||||
build-date="${BUILD_DATE}" \
|
||||
vcs-ref="${VCS_REF}" \
|
||||
description="RustFS - local development with Rust toolchain."
|
||||
|
||||
# Install runtime dependencies that might be missing in partial builder
|
||||
# (builder already has build-essential, lld, etc.)
|
||||
WORKDIR /app
|
||||
|
||||
ENV CARGO_INCREMENTAL=1
|
||||
|
||||
# Ensure we have the same default env vars available
|
||||
ENV RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_ACCESS_KEY="rustfsadmin" \
|
||||
RUSTFS_SECRET_KEY="rustfsadmin" \
|
||||
RUSTFS_CONSOLE_ENABLE="true" \
|
||||
RUSTFS_VOLUMES="/data" \
|
||||
RUST_LOG="warn" \
|
||||
RUSTFS_OBS_LOG_DIRECTORY="/logs" \
|
||||
RUSTFS_USERNAME="rustfs" \
|
||||
RUSTFS_GROUPNAME="rustfs" \
|
||||
RUSTFS_UID="1000" \
|
||||
RUSTFS_GID="1000"
|
||||
|
||||
# Note: We don't COPY source here because we expect it to be mounted at /app
|
||||
# We rely on cargo run to build and run
|
||||
EXPOSE 9000 9001
|
||||
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
RUN chmod +x /entrypoint.sh
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
CMD ["cargo", "run", "--bin", "rustfs", "--"]
|
||||
|
||||
# -----------------------------
|
||||
# Runtime stage (Ubuntu minimal)
|
||||
# -----------------------------
|
||||
@@ -166,14 +210,13 @@ ENV RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_CONSOLE_ENABLE="true" \
|
||||
RUSTFS_VOLUMES="/data" \
|
||||
RUST_LOG="warn" \
|
||||
RUSTFS_OBS_LOG_DIRECTORY="/logs" \
|
||||
RUSTFS_USERNAME="rustfs" \
|
||||
RUSTFS_GROUPNAME="rustfs" \
|
||||
RUSTFS_UID="1000" \
|
||||
RUSTFS_GID="1000"
|
||||
|
||||
EXPOSE 9000
|
||||
VOLUME ["/data", "/logs"]
|
||||
VOLUME ["/data"]
|
||||
|
||||
# Keep root here; entrypoint will drop privileges using chroot --userspec
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
|
||||
45
Makefile
45
Makefile
@@ -9,30 +9,53 @@ CONTAINER_NAME ?= rustfs-dev
|
||||
DOCKERFILE_PRODUCTION = Dockerfile
|
||||
DOCKERFILE_SOURCE = Dockerfile.source
|
||||
|
||||
# Fatal check
|
||||
# Checks all required dependencies and exits with error if not found
|
||||
# (e.g., cargo, rustfmt)
|
||||
check-%:
|
||||
@command -v $* >/dev/null 2>&1 || { \
|
||||
echo >&2 "❌ '$*' is not installed."; \
|
||||
exit 1; \
|
||||
}
|
||||
|
||||
# Warning-only check
|
||||
# Checks for optional dependencies and issues a warning if not found
|
||||
# (e.g., cargo-nextest for enhanced testing)
|
||||
warn-%:
|
||||
@command -v $* >/dev/null 2>&1 || { \
|
||||
echo >&2 "⚠️ '$*' is not installed."; \
|
||||
}
|
||||
|
||||
# For checking dependencies use check-<dep-name> or warn-<dep-name>
|
||||
.PHONY: core-deps fmt-deps test-deps
|
||||
core-deps: check-cargo
|
||||
fmt-deps: check-rustfmt
|
||||
test-deps: warn-cargo-nextest
|
||||
|
||||
# Code quality and formatting targets
|
||||
.PHONY: fmt
|
||||
fmt:
|
||||
fmt: core-deps fmt-deps
|
||||
@echo "🔧 Formatting code..."
|
||||
cargo fmt --all
|
||||
|
||||
.PHONY: fmt-check
|
||||
fmt-check:
|
||||
fmt-check: core-deps fmt-deps
|
||||
@echo "📝 Checking code formatting..."
|
||||
cargo fmt --all --check
|
||||
|
||||
.PHONY: clippy
|
||||
clippy:
|
||||
clippy: core-deps
|
||||
@echo "🔍 Running clippy checks..."
|
||||
cargo clippy --fix --allow-dirty
|
||||
cargo clippy --all-targets --all-features -- -D warnings
|
||||
|
||||
.PHONY: check
|
||||
check:
|
||||
check: core-deps
|
||||
@echo "🔨 Running compilation check..."
|
||||
cargo check --all-targets
|
||||
|
||||
.PHONY: test
|
||||
test:
|
||||
test: core-deps test-deps
|
||||
@echo "🧪 Running tests..."
|
||||
@if command -v cargo-nextest >/dev/null 2>&1; then \
|
||||
cargo nextest run --all --exclude e2e_test; \
|
||||
@@ -42,16 +65,16 @@ test:
|
||||
fi
|
||||
cargo test --all --doc
|
||||
|
||||
.PHONY: pre-commit
|
||||
pre-commit: fmt clippy check test
|
||||
@echo "✅ All pre-commit checks passed!"
|
||||
|
||||
.PHONY: setup-hooks
|
||||
setup-hooks:
|
||||
@echo "🔧 Setting up git hooks..."
|
||||
chmod +x .git/hooks/pre-commit
|
||||
@echo "✅ Git hooks setup complete!"
|
||||
|
||||
.PHONY: pre-commit
|
||||
pre-commit: fmt clippy check test
|
||||
@echo "✅ All pre-commit checks passed!"
|
||||
|
||||
.PHONY: e2e-server
|
||||
e2e-server:
|
||||
sh $(shell pwd)/scripts/run.sh
|
||||
@@ -186,8 +209,6 @@ docker-dev-push:
|
||||
--push \
|
||||
.
|
||||
|
||||
|
||||
|
||||
# Local production builds using direct buildx (alternative to docker-buildx.sh)
|
||||
.PHONY: docker-buildx-production-local
|
||||
docker-buildx-production-local:
|
||||
@@ -247,8 +268,6 @@ dev-env-stop:
|
||||
.PHONY: dev-env-restart
|
||||
dev-env-restart: dev-env-stop dev-env-start
|
||||
|
||||
|
||||
|
||||
# ========================================================================================
|
||||
# Build Utilities
|
||||
# ========================================================================================
|
||||
|
||||
241
README.md
241
README.md
@@ -11,7 +11,7 @@
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://docs.rustfs.com/introduction.html">Getting Started</a>
|
||||
<a href="https://docs.rustfs.com/installation/">Getting Started</a>
|
||||
· <a href="https://docs.rustfs.com/">Docs</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/issues">Bug reports</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/discussions">Discussions</a>
|
||||
@@ -19,7 +19,6 @@
|
||||
|
||||
<p align="center">
|
||||
English | <a href="https://github.com/rustfs/rustfs/blob/main/README_ZH.md">简体中文</a> |
|
||||
<!-- Keep these links. Translations will automatically update with the README. -->
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=de">Deutsch</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=es">Español</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=fr">français</a> |
|
||||
@@ -29,179 +28,196 @@ English | <a href="https://github.com/rustfs/rustfs/blob/main/README_ZH.md">简
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ru">Русский</a>
|
||||
</p>
|
||||
|
||||
RustFS is a high-performance, distributed object storage system built in Rust., one of the most popular languages
|
||||
worldwide. RustFS combines the simplicity of MinIO with the memory safety and performance of Rust., S3 compatibility, open-source nature,
|
||||
support for data lakes, AI, and big data. Furthermore, it has a better and more user-friendly open-source license in
|
||||
comparison to other storage systems, being constructed under the Apache license. As Rust serves as its foundation,
|
||||
RustFS provides faster speed and safer distributed features for high-performance object storage.
|
||||
RustFS is a high-performance, distributed object storage system built in Rust—one of the most loved programming languages worldwide. RustFS combines the simplicity of MinIO with the memory safety and raw performance of Rust. It offers full S3 compatibility, is completely open-source, and is optimized for data lakes, AI, and big data workloads.
|
||||
|
||||
> ⚠️ **Current Status: Beta / Technical Preview. Not yet recommended for critical production workloads.**
|
||||
Unlike other storage systems, RustFS is released under the permissible Apache 2.0 license, avoiding the restrictions of AGPL. With Rust as its foundation, RustFS delivers superior speed and secure distributed features for next-generation object storage.
|
||||
|
||||
## Features
|
||||
## Feature & Status
|
||||
|
||||
- **High Performance**: Built with Rust, ensuring speed and efficiency.
|
||||
- **Distributed Architecture**: Scalable and fault-tolerant design for large-scale deployments.
|
||||
- **S3 Compatibility**: Seamless integration with existing S3-compatible applications.
|
||||
- **Data Lake Support**: Optimized for big data and AI workloads.
|
||||
- **Open Source**: Licensed under Apache 2.0, encouraging community contributions and transparency.
|
||||
- **User-Friendly**: Designed with simplicity in mind, making it easy to deploy and manage.
|
||||
- **High Performance**: Built with Rust to ensure maximum speed and resource efficiency.
|
||||
- **Distributed Architecture**: Scalable and fault-tolerant design suitable for large-scale deployments.
|
||||
- **S3 Compatibility**: Seamless integration with existing S3-compatible applications and tools.
|
||||
- **Data Lake Support**: Optimized for high-throughput big data and AI workloads.
|
||||
- **Open Source**: Licensed under Apache 2.0, encouraging unrestricted community contributions and commercial usage.
|
||||
- **User-Friendly**: Designed with simplicity in mind for easy deployment and management.
|
||||
|
||||
## RustFS vs MinIO
|
||||
| Feature | Status | Feature | Status |
|
||||
| :--- | :--- | :--- | :--- |
|
||||
| **S3 Core Features** | ✅ Available | **Bitrot Protection** | ✅ Available |
|
||||
| **Upload / Download** | ✅ Available | **Single Node Mode** | ✅ Available |
|
||||
| **Versioning** | ✅ Available | **Bucket Replication** | ⚠️ Partial Support |
|
||||
| **Logging** | ✅ Available | **Lifecycle Management** | 🚧 Under Testing |
|
||||
| **Event Notifications** | ✅ Available | **Distributed Mode** | 🚧 Under Testing |
|
||||
| **K8s Helm Charts** | ✅ Available | **OPA (Open Policy Agent)** | 🚧 Under Testing |
|
||||
|
||||
Stress test server parameters
|
||||
|
||||
| Type | parameter | Remark |
|
||||
|
||||
|
||||
## RustFS vs MinIO Performance
|
||||
|
||||
**Stress Test Environment:**
|
||||
|
||||
| Type | Parameter | Remark |
|
||||
|---------|-----------|----------------------------------------------------------|
|
||||
| CPU | 2 Core | Intel Xeon(Sapphire Rapids) Platinum 8475B , 2.7/3.2 GHz | |
|
||||
| Memory | 4GB | |
|
||||
| Network | 15Gbp | |
|
||||
| Driver | 40GB x 4 | IOPS 3800 / Driver |
|
||||
| CPU | 2 Core | Intel Xeon (Sapphire Rapids) Platinum 8475B, 2.7/3.2 GHz |
|
||||
| Memory | 4GB | |
|
||||
| Network | 15Gbps | |
|
||||
| Drive | 40GB x 4 | IOPS 3800 / Drive |
|
||||
|
||||
<https://github.com/user-attachments/assets/2e4979b5-260c-4f2c-ac12-c87fd558072a>
|
||||
|
||||
### RustFS vs Other object storage
|
||||
### RustFS vs Other Object Storage
|
||||
|
||||
| RustFS | Other object storage |
|
||||
|---------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------|
|
||||
| Powerful Console | Simple and useless Console |
|
||||
| Developed based on Rust language, memory is safer | Developed in Go or C, with potential issues like memory GC/leaks |
|
||||
| Guaranteed Data Sovereignty: No telemetry or unauthorized data egress | Reporting logs to other third countries may violate national security laws |
|
||||
| Permissive Apache 2.0 License | AGPL V3 License and other License, polluted open source and License traps, infringement of intellectual property rights |
|
||||
| Comprehensive S3 support, works with domestic and international cloud providers | Full support for S3, but no local cloud vendor support |
|
||||
| Rust-based development, strong support for secure and innovative devices | Poor support for edge gateways and secure innovative devices |
|
||||
| Stable commercial prices, free community support | High pricing, with costs up to $250,000 for 1PiB |
|
||||
| No risk | Intellectual property risks and risks of prohibited uses |
|
||||
| Feature | RustFS | Other Object Storage |
|
||||
| :--- | :--- | :--- |
|
||||
| **Console Experience** | **Powerful Console**<br>Comprehensive management interface. | **Basic / Limited Console**<br>Often overly simple or lacking critical features. |
|
||||
| **Language & Safety** | **Rust-based**<br>Memory safety by design. | **Go or C-based**<br>Potential for memory GC pauses or leaks. |
|
||||
| **Data Sovereignty** | **No Telemetry / Full Compliance**<br>Guards against unauthorized cross-border data egress. Compliant with GDPR (EU/UK), CCPA (US), and APPI (Japan). | **Potential Risk**<br>Possible legal exposure and unwanted data telemetry. |
|
||||
| **Licensing** | **Permissive Apache 2.0**<br>Business-friendly, no "poison pill" clauses. | **Restrictive AGPL v3**<br>Risk of license traps and intellectual property pollution. |
|
||||
| **Compatibility** | **100% S3 Compatible**<br>Works with any cloud provider or client, anywhere. | **Variable Compatibility**<br>May lack support for local cloud vendors or specific APIs. |
|
||||
| **Edge & IoT** | **Strong Edge Support**<br>Ideal for secure, innovative edge devices. | **Weak Edge Support**<br>Often too heavy for edge gateways. |
|
||||
| **Risk Profile** | **Enterprise Risk Mitigation**<br>Clear IP rights and safe for commercial use. | **Legal Risks**<br>Intellectual property ambiguity and usage restrictions. |
|
||||
|
||||
## Quickstart
|
||||
|
||||
To get started with RustFS, follow these steps:
|
||||
|
||||
1. **One-click installation script (Option 1)**
|
||||
### 1. One-click Installation (Option 1)
|
||||
|
||||
```bash
|
||||
curl -O https://rustfs.com/install_rustfs.sh && bash install_rustfs.sh
|
||||
```
|
||||
curl -O https://rustfs.com/install_rustfs.sh && bash install_rustfs.sh
|
||||
````
|
||||
|
||||
2. **Docker Quick Start (Option 2)**
|
||||
### 2\. Docker Quick Start (Option 2)
|
||||
|
||||
RustFS container run as non-root user `rustfs` with id `1000`, if you run docker with `-v` to mount host directory into docker container, please make sure the owner of host directory has been changed to `1000`, otherwise you will encounter permission denied error.
|
||||
The RustFS container runs as a non-root user `rustfs` (UID `10001`). If you run Docker with `-v` to mount a host directory, please ensure the host directory owner is set to `10001`, otherwise you will encounter permission denied errors.
|
||||
|
||||
```bash
|
||||
# create data and logs directories
|
||||
mkdir -p data logs
|
||||
```bash
|
||||
# Create data and logs directories
|
||||
mkdir -p data logs
|
||||
|
||||
# change the owner of those two ditectories
|
||||
chown -R 10001:10001 data logs
|
||||
# Change the owner of these directories
|
||||
chown -R 10001:10001 data logs
|
||||
|
||||
# using latest version
|
||||
docker run -d -p 9000:9000 -p 9001:9001 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:latest
|
||||
# Using latest version
|
||||
docker run -d -p 9000:9000 -p 9001:9001 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:latest
|
||||
|
||||
# using specific version
|
||||
docker run -d -p 9000:9000 -p 9001:9001 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:1.0.0.alpha.68
|
||||
```
|
||||
# Using specific version
|
||||
docker run -d -p 9000:9000 -p 9001:9001 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:1.0.0-alpha.76
|
||||
```
|
||||
|
||||
For docker installation, you can also run the container with docker compose. With the `docker-compose.yml` file under
|
||||
root directory, running the command:
|
||||
You can also use Docker Compose. Using the `docker-compose.yml` file in the root directory:
|
||||
|
||||
```
|
||||
docker compose --profile observability up -d
|
||||
```
|
||||
```bash
|
||||
docker compose --profile observability up -d
|
||||
```
|
||||
|
||||
**NOTE**: You should be better to have a look for `docker-compose.yaml` file. Because, several services contains in the
|
||||
file. Grafan,prometheus,jaeger containers will be launched using docker compose file, which is helpful for rustfs
|
||||
observability. If you want to start redis as well as nginx container, you can specify the corresponding profiles.
|
||||
**NOTE**: We recommend reviewing the `docker-compose.yaml` file before running. It defines several services including Grafana, Prometheus, and Jaeger, which are helpful for RustFS observability. If you wish to start Redis or Nginx containers, you can specify the corresponding profiles.
|
||||
|
||||
3. **Build from Source (Option 3) - Advanced Users**
|
||||
### 3\. Build from Source (Option 3) - Advanced Users
|
||||
|
||||
For developers who want to build RustFS Docker images from source with multi-architecture support:
|
||||
For developers who want to build RustFS Docker images from source with multi-architecture support:
|
||||
|
||||
```bash
|
||||
# Build multi-architecture images locally
|
||||
./docker-buildx.sh --build-arg RELEASE=latest
|
||||
```bash
|
||||
# Build multi-architecture images locally
|
||||
./docker-buildx.sh --build-arg RELEASE=latest
|
||||
|
||||
# Build and push to registry
|
||||
./docker-buildx.sh --push
|
||||
# Build and push to registry
|
||||
./docker-buildx.sh --push
|
||||
|
||||
# Build specific version
|
||||
./docker-buildx.sh --release v1.0.0 --push
|
||||
# Build specific version
|
||||
./docker-buildx.sh --release v1.0.0 --push
|
||||
|
||||
# Build for custom registry
|
||||
./docker-buildx.sh --registry your-registry.com --namespace yourname --push
|
||||
```
|
||||
# Build for custom registry
|
||||
./docker-buildx.sh --registry your-registry.com --namespace yourname --push
|
||||
```
|
||||
|
||||
The `docker-buildx.sh` script supports:
|
||||
- **Multi-architecture builds**: `linux/amd64`, `linux/arm64`
|
||||
- **Automatic version detection**: Uses git tags or commit hashes
|
||||
- **Registry flexibility**: Supports Docker Hub, GitHub Container Registry, etc.
|
||||
- **Build optimization**: Includes caching and parallel builds
|
||||
The `docker-buildx.sh` script supports:
|
||||
\- **Multi-architecture builds**: `linux/amd64`, `linux/arm64`
|
||||
\- **Automatic version detection**: Uses git tags or commit hashes
|
||||
\- **Registry flexibility**: Supports Docker Hub, GitHub Container Registry, etc.
|
||||
\- **Build optimization**: Includes caching and parallel builds
|
||||
|
||||
You can also use Make targets for convenience:
|
||||
You can also use Make targets for convenience:
|
||||
|
||||
```bash
|
||||
make docker-buildx # Build locally
|
||||
make docker-buildx-push # Build and push
|
||||
make docker-buildx-version VERSION=v1.0.0 # Build specific version
|
||||
make help-docker # Show all Docker-related commands
|
||||
```
|
||||
```bash
|
||||
make docker-buildx # Build locally
|
||||
make docker-buildx-push # Build and push
|
||||
make docker-buildx-version VERSION=v1.0.0 # Build specific version
|
||||
make help-docker # Show all Docker-related commands
|
||||
```
|
||||
|
||||
> **Heads-up (macOS cross-compilation)**: macOS keeps the default `ulimit -n` at 256, so `cargo zigbuild` or `./build-rustfs.sh --platform ...` may fail with `ProcessFdQuotaExceeded` when targeting Linux. The build script now tries to raise the limit automatically, but if you still see the warning, run `ulimit -n 4096` (or higher) in your shell before building.
|
||||
> **Heads-up (macOS cross-compilation)**: macOS keeps the default `ulimit -n` at 256, so `cargo zigbuild` or `./build-rustfs.sh --platform ...` may fail with `ProcessFdQuotaExceeded` when targeting Linux. The build script attempts to raise the limit automatically, but if you still see the warning, run `ulimit -n 4096` (or higher) in your shell before building.
|
||||
|
||||
4. **Build with helm chart(Option 4) - Cloud Native environment**
|
||||
### 4\. Build with Helm Chart (Option 4) - Cloud Native
|
||||
|
||||
Following the instructions on [helm chart README](./helm/README.md) to install RustFS on kubernetes cluster.
|
||||
Follow the instructions in the [Helm Chart README](https://charts.rustfs.com/) to install RustFS on a Kubernetes cluster.
|
||||
|
||||
5. **Access the Console**: Open your web browser and navigate to `http://localhost:9000` to access the RustFS console,
|
||||
default username and password is `rustfsadmin` .
|
||||
6. **Create a Bucket**: Use the console to create a new bucket for your objects.
|
||||
7. **Upload Objects**: You can upload files directly through the console or use S3-compatible APIs to interact with your
|
||||
RustFS instance.
|
||||
### 5\. Nix Flake (Option 5)
|
||||
|
||||
**NOTE**: If you want to access RustFS instance with `https`, you can refer
|
||||
to [TLS configuration docs](https://docs.rustfs.com/integration/tls-configured.html).
|
||||
If you have [Nix with flakes enabled](https://nixos.wiki/wiki/Flakes#Enable_flakes):
|
||||
|
||||
```bash
|
||||
# Run directly without installing
|
||||
nix run github:rustfs/rustfs
|
||||
|
||||
# Build the binary
|
||||
nix build github:rustfs/rustfs
|
||||
./result/bin/rustfs --help
|
||||
|
||||
# Or from a local checkout
|
||||
nix build
|
||||
nix run
|
||||
```
|
||||
|
||||
-----
|
||||
|
||||
### Accessing RustFS
|
||||
|
||||
5. **Access the Console**: Open your web browser and navigate to `http://localhost:9001` to access the RustFS console.
|
||||
* Default credentials: `rustfsadmin` / `rustfsadmin`
|
||||
6. **Create a Bucket**: Use the console to create a new bucket for your objects.
|
||||
7. **Upload Objects**: You can upload files directly through the console or use S3-compatible APIs/clients to interact with your RustFS instance.
|
||||
|
||||
**NOTE**: To access the RustFS instance via `https`, please refer to the [TLS Configuration Docs](https://docs.rustfs.com/integration/tls-configured.html).
|
||||
|
||||
## Documentation
|
||||
|
||||
For detailed documentation, including configuration options, API references, and advanced usage, please visit
|
||||
our [Documentation](https://docs.rustfs.com).
|
||||
For detailed documentation, including configuration options, API references, and advanced usage, please visit our [Documentation](https://docs.rustfs.com).
|
||||
|
||||
## Getting Help
|
||||
|
||||
If you have any questions or need assistance, you can:
|
||||
If you have any questions or need assistance:
|
||||
|
||||
- Check the [FAQ](https://github.com/rustfs/rustfs/discussions/categories/q-a) for common issues and solutions.
|
||||
- Join our [GitHub Discussions](https://github.com/rustfs/rustfs/discussions) to ask questions and share your
|
||||
experiences.
|
||||
- Open an issue on our [GitHub Issues](https://github.com/rustfs/rustfs/issues) page for bug reports or feature
|
||||
requests.
|
||||
- Check the [FAQ](https://github.com/rustfs/rustfs/discussions/categories/q-a) for common issues and solutions.
|
||||
- Join our [GitHub Discussions](https://github.com/rustfs/rustfs/discussions) to ask questions and share your experiences.
|
||||
- Open an issue on our [GitHub Issues](https://github.com/rustfs/rustfs/issues) page for bug reports or feature requests.
|
||||
|
||||
## Links
|
||||
|
||||
- [Documentation](https://docs.rustfs.com) - The manual you should read
|
||||
- [Changelog](https://github.com/rustfs/rustfs/releases) - What we broke and fixed
|
||||
- [GitHub Discussions](https://github.com/rustfs/rustfs/discussions) - Where the community lives
|
||||
- [Documentation](https://docs.rustfs.com) - The manual you should read
|
||||
- [Changelog](https://github.com/rustfs/rustfs/releases) - What we broke and fixed
|
||||
- [GitHub Discussions](https://github.com/rustfs/rustfs/discussions) - Where the community lives
|
||||
|
||||
## Contact
|
||||
|
||||
- **Bugs**: [GitHub Issues](https://github.com/rustfs/rustfs/issues)
|
||||
- **Business**: <hello@rustfs.com>
|
||||
- **Jobs**: <jobs@rustfs.com>
|
||||
- **General Discussion**: [GitHub Discussions](https://github.com/rustfs/rustfs/discussions)
|
||||
- **Contributing**: [CONTRIBUTING.md](CONTRIBUTING.md)
|
||||
- **Bugs**: [GitHub Issues](https://github.com/rustfs/rustfs/issues)
|
||||
- **Business**: [hello@rustfs.com](mailto:hello@rustfs.com)
|
||||
- **Jobs**: [jobs@rustfs.com](mailto:jobs@rustfs.com)
|
||||
- **General Discussion**: [GitHub Discussions](https://github.com/rustfs/rustfs/discussions)
|
||||
- **Contributing**: [CONTRIBUTING.md](CONTRIBUTING.md)
|
||||
|
||||
## Contributors
|
||||
|
||||
RustFS is a community-driven project, and we appreciate all contributions. Check out
|
||||
the [Contributors](https://github.com/rustfs/rustfs/graphs/contributors) page to see the amazing people who have helped
|
||||
make RustFS better.
|
||||
RustFS is a community-driven project, and we appreciate all contributions. Check out the [Contributors](https://github.com/rustfs/rustfs/graphs/contributors) page to see the amazing people who have helped make RustFS better.
|
||||
|
||||
<a href="https://github.com/rustfs/rustfs/graphs/contributors">
|
||||
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" alt="Contributors"/>
|
||||
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" alt="Contributors" />
|
||||
</a>
|
||||
|
||||
## Github Trending Top
|
||||
|
||||
🚀 RustFS is beloved by open-source enthusiasts and enterprise users worldwide, often appearing on the GitHub Trending
|
||||
top charts.
|
||||
🚀 RustFS is beloved by open-source enthusiasts and enterprise users worldwide, often appearing on the GitHub Trending top charts.
|
||||
|
||||
<a href="https://trendshift.io/repositories/14181" target="_blank"><img src="https://raw.githubusercontent.com/rustfs/rustfs/refs/heads/main/docs/rustfs-trending.jpg" alt="rustfs%2Frustfs | Trendshift" /></a>
|
||||
|
||||
@@ -214,3 +230,4 @@ top charts.
|
||||
[Apache 2.0](https://opensource.org/licenses/Apache-2.0)
|
||||
|
||||
**RustFS** is a trademark of RustFS, Inc. All other trademarks are the property of their respective owners.
|
||||
|
||||
|
||||
256
README_ZH.md
256
README_ZH.md
@@ -1,185 +1,219 @@
|
||||
[](https://rustfs.com)
|
||||
|
||||
<p align="center">RustFS 是一个使用 Rust 构建的高性能分布式对象存储软件</p >
|
||||
<p align="center">RustFS 是一个基于 Rust 构建的高性能分布式对象存储系统。</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/rustfs/rustfs/actions/workflows/ci.yml"><img alt="CI" src="https://github.com/rustfs/rustfs/actions/workflows/ci.yml/badge.svg" /></a>
|
||||
<a href="https://github.com/rustfs/rustfs/actions/workflows/docker.yml"><img alt="Build and Push Docker Images" src="https://github.com/rustfs/rustfs/actions/workflows/docker.yml/badge.svg" /></a>
|
||||
<img alt="GitHub commit activity" src="https://img.shields.io/github/commit-activity/m/rustfs/rustfs"/>
|
||||
<img alt="Github Last Commit" src="https://img.shields.io/github/last-commit/rustfs/rustfs"/>
|
||||
<a href="https://github.com/rustfs/rustfs/actions/workflows/docker.yml"><img alt="构建并推送 Docker 镜像" src="https://github.com/rustfs/rustfs/actions/workflows/docker.yml/badge.svg" /></a>
|
||||
<img alt="GitHub 提交活跃度" src="https://img.shields.io/github/commit-activity/m/rustfs/rustfs"/>
|
||||
<img alt="Github 最新提交" src="https://img.shields.io/github/last-commit/rustfs/rustfs"/>
|
||||
<a href="https://hellogithub.com/repository/rustfs/rustfs" target="_blank"><img src="https://abroad.hellogithub.com/v1/widgets/recommend.svg?rid=b95bcb72bdc340b68f16fdf6790b7d5b&claim_uid=MsbvjYeLDKAH457&theme=small" alt="Featured|HelloGitHub" /></a>
|
||||
</p >
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://docs.rustfs.com/zh/introduction.html">快速开始</a >
|
||||
· <a href="https://docs.rustfs.com/zh/">文档</a >
|
||||
· <a href="https://github.com/rustfs/rustfs/issues">问题报告</a >
|
||||
· <a href="https://github.com/rustfs/rustfs/discussions">讨论</a >
|
||||
</p >
|
||||
<a href="https://docs.rustfs.com/installation/">快速开始</a>
|
||||
· <a href="https://docs.rustfs.com/">文档</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/issues">报告 Bug</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/discussions">社区讨论</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/rustfs/rustfs/blob/main/README.md">English</a > | 简体中文
|
||||
</p >
|
||||
<a href="https://github.com/rustfs/rustfs/blob/main/README.md">English</a> | 简体中文 |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=de">Deutsch</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=es">Español</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=fr">français</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ja">日本語</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ko">한국어</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=pt">Portuguese</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ru">Русский</a>
|
||||
</p>
|
||||
|
||||
RustFS 是一个使用 Rust(全球最受欢迎的编程语言之一)构建的高性能分布式对象存储软件。与 MinIO 一样,它具有简单性、S3
|
||||
兼容性、开源特性以及对数据湖、AI 和大数据的支持等一系列优势。此外,与其他存储系统相比,它采用 Apache
|
||||
许可证构建,拥有更好、更用户友好的开源许可证。由于以 Rust 为基础,RustFS 为高性能对象存储提供了更快的速度和更安全的分布式功能。
|
||||
RustFS 是一个基于 Rust 构建的高性能分布式对象存储系统。Rust 是全球最受开发者喜爱的编程语言之一,RustFS 完美结合了 MinIO 的简洁性与 Rust 的内存安全及高性能优势。它提供完整的 S3 兼容性,完全开源,并专为数据湖、人工智能(AI)和大数据负载进行了优化。
|
||||
|
||||
## 特性
|
||||
与其他存储系统不同,RustFS 采用更宽松、商业友好的 Apache 2.0 许可证,避免了 AGPL 协议的限制。以 Rust 为基石,RustFS 为下一代对象存储提供了更快的速度和更安全的分布式特性。
|
||||
|
||||
- **高性能**:使用 Rust 构建,确保速度和效率。
|
||||
## 特征和功能状态
|
||||
|
||||
- **高性能**:基于 Rust 构建,确保极致的速度和资源效率。
|
||||
- **分布式架构**:可扩展且容错的设计,适用于大规模部署。
|
||||
- **S3 兼容性**:与现有 S3 兼容应用程序无缝集成。
|
||||
- **数据湖支持**:针对大数据和 AI 工作负载进行了优化。
|
||||
- **开源**:采用 Apache 2.0 许可证,鼓励社区贡献和透明度。
|
||||
- **用户友好**:设计简单,易于部署和管理。
|
||||
- **S3 兼容性**:与现有的 S3 兼容应用和工具无缝集成。
|
||||
- **数据湖支持**:专为高吞吐量的大数据和 AI 工作负载优化。
|
||||
- **完全开源**:采用 Apache 2.0 许可证,鼓励社区贡献和商业使用。
|
||||
- **简单易用**:设计简洁,易于部署和管理。
|
||||
|
||||
## RustFS vs MinIO
|
||||
|
||||
压力测试服务器参数
|
||||
| 功能 | 状态 | 功能 | 状态 |
|
||||
| :--- | :--- | :--- | :--- |
|
||||
| **S3 核心功能** | ✅ 可用 | **Bitrot (防数据腐烂)** | ✅ 可用 |
|
||||
| **上传 / 下载** | ✅ 可用 | **单机模式** | ✅ 可用 |
|
||||
| **版本控制** | ✅ 可用 | **存储桶复制** | ⚠️ 部分可用 |
|
||||
| **日志功能** | ✅ 可用 | **生命周期管理** | 🚧 测试中 |
|
||||
| **事件通知** | ✅ 可用 | **分布式模式** | 🚧 测试中 |
|
||||
| **K8s Helm Chart** | ✅ 可用 | **OPA (策略引擎)** | 🚧 测试中 |
|
||||
|
||||
| 类型 | 参数 | 备注 |
|
||||
|-----|----------|----------------------------------------------------------|
|
||||
| CPU | 2 核心 | Intel Xeon(Sapphire Rapids) Platinum 8475B , 2.7/3.2 GHz | |
|
||||
| 内存 | 4GB | |
|
||||
| 网络 | 15Gbp | |
|
||||
| 驱动器 | 40GB x 4 | IOPS 3800 / 驱动器 |
|
||||
|
||||
|
||||
|
||||
## RustFS vs MinIO 性能对比
|
||||
|
||||
**压力测试环境参数:**
|
||||
|
||||
| 类型 | 参数 | 备注 |
|
||||
|---------|-----------|----------------------------------------------------------|
|
||||
| CPU | 2 核 | Intel Xeon (Sapphire Rapids) Platinum 8475B , 2.7/3.2 GHz |
|
||||
| 内存 | 4GB | |
|
||||
| 网络 | 15Gbps | |
|
||||
| 硬盘 | 40GB x 4 | IOPS 3800 / Drive |
|
||||
|
||||
<https://github.com/user-attachments/assets/2e4979b5-260c-4f2c-ac12-c87fd558072a>
|
||||
|
||||
### RustFS vs 其他对象存储
|
||||
|
||||
| RustFS | 其他对象存储 |
|
||||
|--------------------------|-------------------------------------|
|
||||
| 强大的控制台 | 简单且无用的控制台 |
|
||||
| 基于 Rust 语言开发,内存更安全 | 使用 Go 或 C 开发,存在内存 GC/泄漏等潜在问题 |
|
||||
| 不向第三方国家报告日志 | 向其他第三方国家报告日志可能违反国家安全法律 |
|
||||
| 采用 Apache 许可证,对商业更友好 | AGPL V3 许可证等其他许可证,污染开源和许可证陷阱,侵犯知识产权 |
|
||||
| 全面的 S3 支持,适用于国内外云提供商 | 完全支持 S3,但不支持本地云厂商 |
|
||||
| 基于 Rust 开发,对安全和创新设备有强大支持 | 对边缘网关和安全创新设备支持较差 |
|
||||
| 稳定的商业价格,免费社区支持 | 高昂的定价,1PiB 成本高达 $250,000 |
|
||||
| 无风险 | 知识产权风险和禁止使用的风险 |
|
||||
| 特性 | RustFS | 其他对象存储 |
|
||||
| :--- | :--- | :--- |
|
||||
| **控制台体验** | **功能强大的控制台**<br>提供全面的管理界面。 | **基础/简陋的控制台**<br>通常功能过于简单或缺失关键特性。 |
|
||||
| **语言与安全** | **基于 Rust 开发**<br>天生的内存安全。 | **基于 Go 或 C 开发**<br>存在内存 GC 停顿或内存泄漏的潜在风险。 |
|
||||
| **数据主权** | **无遥测 / 完全合规**<br>防止未经授权的数据跨境传输。完全符合 GDPR (欧盟/英国)、CCPA (美国) 和 APPI (日本) 等法规。 | **潜在风险**<br>可能存在法律风险和隐蔽的数据遥测(Telemetry)。 |
|
||||
| **开源协议** | **宽松的 Apache 2.0**<br>商业友好,无“毒丸”条款。 | **受限的 AGPL v3**<br>存在许可证陷阱和知识产权污染的风险。 |
|
||||
| **兼容性** | **100% S3 兼容**<br>适用于任何云提供商和客户端,随处运行。 | **兼容性不一**<br>虽然支持 S3,但可能缺乏对本地云厂商或特定 API 的支持。 |
|
||||
| **边缘与 IoT** | **强大的边缘支持**<br>非常适合安全、创新的边缘设备。 | **边缘支持较弱**<br>对于边缘网关来说通常过于沉重。 |
|
||||
| **成本** | **稳定且免费**<br>免费社区支持,稳定的商业定价。 | **高昂成本**<br>1PiB 的成本可能高达 250,000 美元。 |
|
||||
| **风险控制** | **企业级风险规避**<br>清晰的知识产权,商业使用安全无忧。 | **法律风险**<br>知识产权归属模糊及使用限制风险。 |
|
||||
|
||||
## 快速开始
|
||||
|
||||
要开始使用 RustFS,请按照以下步骤操作:
|
||||
请按照以下步骤快速上手 RustFS:
|
||||
|
||||
1. **一键脚本快速启动 (方案一)**
|
||||
|
||||
```bash
|
||||
curl -O https://rustfs.com/install_rustfs.sh && bash install_rustfs.sh
|
||||
```
|
||||
|
||||
2. **Docker 快速启动(方案二)**
|
||||
### 1. 一键安装脚本 (选项 1)
|
||||
|
||||
```bash
|
||||
docker run -d -p 9000:9000 -v /data:/data rustfs/rustfs
|
||||
```
|
||||
curl -O https://rustfs.com/install_rustfs.sh && bash install_rustfs.sh
|
||||
````
|
||||
|
||||
对于使用 Docker 安装来讲,你还可以使用 `docker compose` 来启动 rustfs 实例。在仓库的根目录下面有一个 `docker-compose.yml`
|
||||
文件。运行如下命令即可:
|
||||
### 2\. Docker 快速启动 (选项 2)
|
||||
|
||||
```
|
||||
docker compose --profile observability up -d
|
||||
```
|
||||
RustFS 容器以非 root 用户 `rustfs` (UID `10001`) 运行。如果您使用 Docker 的 `-v` 参数挂载宿主机目录,请务必确保宿主机目录的所有者已更改为 `1000`,否则会遇到权限拒绝错误。
|
||||
|
||||
**注意**:在使用 `docker compose` 之前,你应该仔细阅读一下 `docker-compose.yaml`,因为该文件中包含多个服务,除了 rustfs
|
||||
以外,还有 grafana、prometheus、jaeger 等,这些是为 rustfs 可观测性服务的,还有 redis 和 nginx。你想启动哪些容器,就需要用
|
||||
`--profile` 参数指定相应的 profile。
|
||||
```bash
|
||||
# 创建数据和日志目录
|
||||
mkdir -p data logs
|
||||
|
||||
3. **从源码构建(方案三)- 高级用户**
|
||||
# 更改这两个目录的所有者
|
||||
chown -R 10001:10001 data logs
|
||||
|
||||
面向希望从源码构建支持多架构 Docker 镜像的开发者:
|
||||
# 使用最新版本运行
|
||||
docker run -d -p 9000:9000 -p 9001:9001 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:latest
|
||||
|
||||
```bash
|
||||
# 本地构建多架构镜像
|
||||
./docker-buildx.sh --build-arg RELEASE=latest
|
||||
# 使用指定版本运行
|
||||
docker run -d -p 9000:9000 -p 9001:9001 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:1.0.0.alpha.68
|
||||
```
|
||||
|
||||
# 构建并推送至镜像仓库
|
||||
./docker-buildx.sh --push
|
||||
您也可以使用 Docker Compose。使用根目录下的 `docker-compose.yml` 文件:
|
||||
|
||||
# 构建指定版本
|
||||
./docker-buildx.sh --release v1.0.0 --push
|
||||
```bash
|
||||
docker compose --profile observability up -d
|
||||
```
|
||||
|
||||
# 构建并推送到自定义镜像仓库
|
||||
./docker-buildx.sh --registry your-registry.com --namespace yourname --push
|
||||
```
|
||||
**注意**: 我们建议您在运行前查看 `docker-compose.yaml` 文件。该文件定义了包括 Grafana、Prometheus 和 Jaeger 在内的多个服务,有助于 RustFS 的可观测性监控。如果您还想启动 Redis 或 Nginx 容器,可以指定相应的 profile。
|
||||
|
||||
`docker-buildx.sh` 脚本支持:
|
||||
- **多架构构建**:`linux/amd64`、`linux/arm64`
|
||||
- **自动版本检测**:可使用 git 标签或提交哈希
|
||||
- **仓库灵活性**:支持 Docker Hub、GitHub Container Registry 等
|
||||
- **构建优化**:包含缓存和并行构建
|
||||
### 3\. 源码编译 (选项 3) - 进阶用户
|
||||
|
||||
你也可以使用 Makefile 提供的目标命令以提升便捷性:
|
||||
适用于希望从源码构建支持多架构 RustFS Docker 镜像的开发者:
|
||||
|
||||
```bash
|
||||
make docker-buildx # 本地构建
|
||||
make docker-buildx-push # 构建并推送
|
||||
make docker-buildx-version VERSION=v1.0.0 # 构建指定版本
|
||||
make help-docker # 显示全部 Docker 相关命令
|
||||
```
|
||||
```bash
|
||||
# 在本地构建多架构镜像
|
||||
./docker-buildx.sh --build-arg RELEASE=latest
|
||||
|
||||
> **提示(macOS 交叉编译)**:macOS 默认的 `ulimit -n` 只有 256,使用 `cargo zigbuild` 或 `./build-rustfs.sh --platform ...` 编译 Linux 目标时容易触发 `ProcessFdQuotaExceeded` 链接错误。脚本会尝试自动提升该限制,如仍提示失败,请在构建前手动执行 `ulimit -n 4096`(或更大的值)。
|
||||
# 构建并推送到仓库
|
||||
./docker-buildx.sh --push
|
||||
|
||||
4. **使用 Helm Chart 部署(方案四)- 云原生环境**
|
||||
# 构建指定版本
|
||||
./docker-buildx.sh --release v1.0.0 --push
|
||||
|
||||
按照 [helm chart 说明文档](./helm/README.md) 的指引,在 Kubernetes 集群中安装 RustFS。
|
||||
# 构建并推送到自定义仓库
|
||||
./docker-buildx.sh --registry your-registry.com --namespace yourname --push
|
||||
```
|
||||
|
||||
5. **访问控制台**:打开 Web 浏览器并导航到 `http://localhost:9000` 以访问 RustFS 控制台,默认的用户名和密码是
|
||||
`rustfsadmin` 。
|
||||
6. **创建存储桶**:使用控制台为您的对象创建新的存储桶。
|
||||
7. **上传对象**:您可以直接通过控制台上传文件,或使用 S3 兼容的 API 与您的 RustFS 实例交互。
|
||||
`docker-buildx.sh` 脚本支持:
|
||||
\- **多架构构建**: `linux/amd64`, `linux/arm64`
|
||||
\- **自动版本检测**: 使用 git tags 或 commit hash
|
||||
\- **灵活的仓库支持**: 支持 Docker Hub, GitHub Container Registry 等
|
||||
\- **构建优化**: 包含缓存和并行构建
|
||||
|
||||
**注意**:如果你想通过 `https` 来访问 RustFS 实例,请参考 [TLS 配置文档](https://docs.rustfs.com/zh/integration/tls-configured.html)
|
||||
为了方便起见,您也可以使用 Make 命令:
|
||||
|
||||
```bash
|
||||
make docker-buildx # 本地构建
|
||||
make docker-buildx-push # 构建并推送
|
||||
make docker-buildx-version VERSION=v1.0.0 # 构建指定版本
|
||||
make help-docker # 显示所有 Docker 相关命令
|
||||
```
|
||||
|
||||
> **注意 (macOS 交叉编译)**: macOS 默认的 `ulimit -n` 限制为 256,因此在使用 `cargo zigbuild` 或 `./build-rustfs.sh --platform ...` 交叉编译 Linux 版本时,可能会因 `ProcessFdQuotaExceeded` 失败。构建脚本会尝试自动提高限制,但如果您仍然看到警告,请在构建前在终端运行 `ulimit -n 4096` (或更高)。
|
||||
|
||||
### 4\. 使用 Helm Chart 安装 (选项 4) - 云原生环境
|
||||
|
||||
请按照 [Helm Chart README](https://charts.rustfs.com) 上的说明在 Kubernetes 集群上安装 RustFS。
|
||||
|
||||
-----
|
||||
|
||||
### 访问 RustFS
|
||||
|
||||
5. **访问控制台**: 打开浏览器并访问 `http://localhost:9000` 进入 RustFS 控制台。
|
||||
* 默认账号/密码: `rustfsadmin` / `rustfsadmin`
|
||||
6. **创建存储桶**: 使用控制台为您的对象创建一个新的存储桶 (Bucket)。
|
||||
7. **上传对象**: 您可以直接通过控制台上传文件,或使用 S3 兼容的 API/客户端与您的 RustFS 实例进行交互。
|
||||
|
||||
**注意**: 如果您希望通过 `https` 访问 RustFS 实例,请参考 [TLS 配置文档](https://docs.rustfs.com/integration/tls-configured.html)。
|
||||
|
||||
## 文档
|
||||
|
||||
有关详细文档,包括配置选项、API 参考和高级用法,请访问我们的[文档](https://docs.rustfs.com)。
|
||||
有关详细文档,包括配置选项、API 参考和高级用法,请访问我们的 [官方文档](https://docs.rustfs.com)。
|
||||
|
||||
## 获取帮助
|
||||
|
||||
如果您有任何问题或需要帮助,您可以:
|
||||
如果您有任何问题或需要帮助:
|
||||
|
||||
- 查看[常见问题解答](https://github.com/rustfs/rustfs/discussions/categories/q-a)以获取常见问题和解决方案。
|
||||
- 加入我们的 [GitHub 讨论](https://github.com/rustfs/rustfs/discussions)来提问和分享您的经验。
|
||||
- 在我们的 [GitHub Issues](https://github.com/rustfs/rustfs/issues) 页面上开启问题,报告错误或功能请求。
|
||||
- 查看 [FAQ](https://github.com/rustfs/rustfs/discussions/categories/q-a) 寻找常见问题和解决方案。
|
||||
- 加入我们的 [GitHub Discussions](https://github.com/rustfs/rustfs/discussions) 提问并分享您的经验。
|
||||
- 在我们的 [GitHub Issues](https://github.com/rustfs/rustfs/issues) 页面提交 Bug 报告或功能请求。
|
||||
|
||||
## 链接
|
||||
|
||||
- [文档](https://docs.rustfs.com) - 您应该阅读的手册
|
||||
- [更新日志](https://docs.rustfs.com/changelog) - 我们破坏和修复的内容
|
||||
- [GitHub 讨论](https://github.com/rustfs/rustfs/discussions) - 社区所在地
|
||||
- [官方文档](https://docs.rustfs.com) - 必读手册
|
||||
- [更新日志](https://github.com/rustfs/rustfs/releases) - 版本变更记录
|
||||
- [社区讨论](https://github.com/rustfs/rustfs/discussions) - 社区交流地
|
||||
|
||||
## 联系
|
||||
## 联系方式
|
||||
|
||||
- **错误报告**:[GitHub Issues](https://github.com/rustfs/rustfs/issues)
|
||||
- **商务合作**:<hello@rustfs.com>
|
||||
- **招聘**:<jobs@rustfs.com>
|
||||
- **一般讨论**:[GitHub 讨论](https://github.com/rustfs/rustfs/discussions)
|
||||
- **贡献**:[CONTRIBUTING.md](CONTRIBUTING.md)
|
||||
- **Bug 反馈**: [GitHub Issues](https://github.com/rustfs/rustfs/issues)
|
||||
- **商务合作**: [hello@rustfs.com](mailto:hello@rustfs.com)
|
||||
- **工作机会**: [jobs@rustfs.com](mailto:jobs@rustfs.com)
|
||||
- **一般讨论**: [GitHub Discussions](https://github.com/rustfs/rustfs/discussions)
|
||||
- **贡献指南**: [CONTRIBUTING.md](https://www.google.com/search?q=CONTRIBUTING.md)
|
||||
|
||||
## 贡献者
|
||||
|
||||
RustFS 是一个社区驱动的项目,我们感谢所有的贡献。查看[贡献者](https://github.com/rustfs/rustfs/graphs/contributors)页面,了解帮助
|
||||
RustFS 变得更好的杰出人员。
|
||||
RustFS 是一个社区驱动的项目,我们感谢所有的贡献。请查看 [贡献者](https://github.com/rustfs/rustfs/graphs/contributors) 页面,看看那些让 RustFS 变得更好的了不起的人们。
|
||||
|
||||
<a href="https://github.com/rustfs/rustfs/graphs/contributors">
|
||||
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" alt="贡献者"/>
|
||||
</a >
|
||||
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" alt="Contributors" />
|
||||
</a>
|
||||
|
||||
## Github 全球推荐榜
|
||||
## Github Trending Top
|
||||
|
||||
🚀 RustFS 受到了全世界开源爱好者和企业用户的喜欢,多次登顶 Github Trending 全球榜。
|
||||
🚀 RustFS 深受全球开源爱好者和企业用户的喜爱,经常荣登 GitHub Trending 榜单。
|
||||
|
||||
<a href="https://trendshift.io/repositories/14181" target="_blank"><img src="https://raw.githubusercontent.com/rustfs/rustfs/refs/heads/main/docs/rustfs-trending.jpg" alt="rustfs%2Frustfs | Trendshift" /></a>
|
||||
|
||||
## Star 历史图
|
||||
## Star 历史
|
||||
|
||||
[](https://www.star-history.com/#rustfs/rustfs&type=date&legend=top-left)
|
||||
|
||||
[](https://www.star-history.com/#rustfs/rustfs&type=date&legend=top-left)
|
||||
|
||||
## 许可证
|
||||
|
||||
[Apache 2.0](https://opensource.org/licenses/Apache-2.0)
|
||||
|
||||
**RustFS** 是 RustFS, Inc. 的商标。所有其他商标均为其各自所有者的财产。
|
||||
|
||||
|
||||
13
SECURITY.md
13
SECURITY.md
@@ -2,8 +2,7 @@
|
||||
|
||||
## Supported Versions
|
||||
|
||||
Use this section to tell people about which versions of your project are
|
||||
currently being supported with security updates.
|
||||
Security updates are provided for the latest released version of this project.
|
||||
|
||||
| Version | Supported |
|
||||
| ------- | ------------------ |
|
||||
@@ -11,8 +10,10 @@ currently being supported with security updates.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
Use this section to tell people how to report a vulnerability.
|
||||
Please report security vulnerabilities **privately** via GitHub Security Advisories:
|
||||
|
||||
Tell them where to go, how often they can expect to get an update on a
|
||||
reported vulnerability, what to expect if the vulnerability is accepted or
|
||||
declined, etc.
|
||||
https://github.com/rustfs/rustfs/security/advisories/new
|
||||
|
||||
Do **not** open a public issue for security-sensitive bugs.
|
||||
|
||||
You can expect an initial response within a reasonable timeframe. Further updates will be provided as the report is triaged.
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# RustFS Binary Build Script
|
||||
# This script compiles RustFS binaries for different platforms and architectures
|
||||
|
||||
@@ -13,10 +13,12 @@ keywords = ["RustFS", "AHM", "health-management", "scanner", "Minio"]
|
||||
categories = ["web-programming", "development-tools", "filesystem"]
|
||||
|
||||
[dependencies]
|
||||
rustfs-config = { workspace = true }
|
||||
rustfs-ecstore = { workspace = true }
|
||||
rustfs-common = { workspace = true }
|
||||
rustfs-filemeta = { workspace = true }
|
||||
rustfs-madmin = { workspace = true }
|
||||
rustfs-utils = { workspace = true }
|
||||
tokio = { workspace = true, features = ["full"] }
|
||||
tokio-util = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
|
||||
@@ -90,7 +90,12 @@ impl HealChannelProcessor {
|
||||
|
||||
/// Process start request
|
||||
async fn process_start_request(&self, request: HealChannelRequest) -> Result<()> {
|
||||
info!("Processing heal start request: {} for bucket: {}", request.id, request.bucket);
|
||||
info!(
|
||||
"Processing heal start request: {} for bucket: {}/{}",
|
||||
request.id,
|
||||
request.bucket,
|
||||
request.object_prefix.as_deref().unwrap_or("")
|
||||
);
|
||||
|
||||
// Convert channel request to heal request
|
||||
let heal_request = self.convert_to_heal_request(request.clone())?;
|
||||
@@ -324,6 +329,14 @@ mod tests {
|
||||
async fn list_objects_for_heal(&self, _bucket: &str, _prefix: &str) -> crate::Result<Vec<String>> {
|
||||
Ok(vec![])
|
||||
}
|
||||
async fn list_objects_for_heal_page(
|
||||
&self,
|
||||
_bucket: &str,
|
||||
_prefix: &str,
|
||||
_continuation_token: Option<&str>,
|
||||
) -> crate::Result<(Vec<String>, Option<String>, bool)> {
|
||||
Ok((vec![], None, false))
|
||||
}
|
||||
async fn get_disk_for_resume(&self, _set_disk_id: &str) -> crate::Result<rustfs_ecstore::disk::DiskStore> {
|
||||
Err(crate::Error::other("Not implemented in mock"))
|
||||
}
|
||||
|
||||
@@ -256,84 +256,114 @@ impl ErasureSetHealer {
|
||||
}
|
||||
};
|
||||
|
||||
// 2. get objects to heal
|
||||
let objects = self.storage.list_objects_for_heal(bucket, "").await?;
|
||||
// 2. process objects with pagination to avoid loading all objects into memory
|
||||
let mut continuation_token: Option<String> = None;
|
||||
let mut global_obj_idx = 0usize;
|
||||
|
||||
// 3. continue from checkpoint
|
||||
for (obj_idx, object) in objects.iter().enumerate().skip(*current_object_index) {
|
||||
// check if already processed
|
||||
if checkpoint_manager.get_checkpoint().await.processed_objects.contains(object) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// update current object
|
||||
resume_manager
|
||||
.set_current_item(Some(bucket.to_string()), Some(object.clone()))
|
||||
loop {
|
||||
// Get one page of objects
|
||||
let (objects, next_token, is_truncated) = self
|
||||
.storage
|
||||
.list_objects_for_heal_page(bucket, "", continuation_token.as_deref())
|
||||
.await?;
|
||||
|
||||
// Check if object still exists before attempting heal
|
||||
let object_exists = match self.storage.object_exists(bucket, object).await {
|
||||
Ok(exists) => exists,
|
||||
Err(e) => {
|
||||
warn!("Failed to check existence of {}/{}: {}, marking as failed", bucket, object, e);
|
||||
*failed_objects += 1;
|
||||
checkpoint_manager.add_failed_object(object.clone()).await?;
|
||||
*current_object_index = obj_idx + 1;
|
||||
// Process objects in this page
|
||||
for object in objects {
|
||||
// Skip objects before the checkpoint
|
||||
if global_obj_idx < *current_object_index {
|
||||
global_obj_idx += 1;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
if !object_exists {
|
||||
info!(
|
||||
target: "rustfs:ahm:heal_bucket_with_resume" ,"Object {}/{} no longer exists, skipping heal (likely deleted intentionally)",
|
||||
bucket, object
|
||||
);
|
||||
checkpoint_manager.add_processed_object(object.clone()).await?;
|
||||
*successful_objects += 1; // Treat as successful - object is gone as intended
|
||||
*current_object_index = obj_idx + 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
// heal object
|
||||
let heal_opts = HealOpts {
|
||||
scan_mode: HealScanMode::Normal,
|
||||
remove: true,
|
||||
recreate: true, // Keep recreate enabled for legitimate heal scenarios
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
match self.storage.heal_object(bucket, object, None, &heal_opts).await {
|
||||
Ok((_result, None)) => {
|
||||
*successful_objects += 1;
|
||||
checkpoint_manager.add_processed_object(object.clone()).await?;
|
||||
info!("Successfully healed object {}/{}", bucket, object);
|
||||
// check if already processed
|
||||
if checkpoint_manager.get_checkpoint().await.processed_objects.contains(&object) {
|
||||
global_obj_idx += 1;
|
||||
continue;
|
||||
}
|
||||
Ok((_, Some(err))) => {
|
||||
*failed_objects += 1;
|
||||
checkpoint_manager.add_failed_object(object.clone()).await?;
|
||||
warn!("Failed to heal object {}/{}: {}", bucket, object, err);
|
||||
}
|
||||
Err(err) => {
|
||||
*failed_objects += 1;
|
||||
checkpoint_manager.add_failed_object(object.clone()).await?;
|
||||
warn!("Error healing object {}/{}: {}", bucket, object, err);
|
||||
}
|
||||
}
|
||||
|
||||
*processed_objects += 1;
|
||||
*current_object_index = obj_idx + 1;
|
||||
|
||||
// check cancel status
|
||||
if self.cancel_token.is_cancelled() {
|
||||
info!("Heal task cancelled during object processing");
|
||||
return Err(Error::TaskCancelled);
|
||||
}
|
||||
|
||||
// save checkpoint periodically
|
||||
if obj_idx % 100 == 0 {
|
||||
checkpoint_manager
|
||||
.update_position(bucket_index, *current_object_index)
|
||||
// update current object
|
||||
resume_manager
|
||||
.set_current_item(Some(bucket.to_string()), Some(object.clone()))
|
||||
.await?;
|
||||
|
||||
// Check if object still exists before attempting heal
|
||||
let object_exists = match self.storage.object_exists(bucket, &object).await {
|
||||
Ok(exists) => exists,
|
||||
Err(e) => {
|
||||
warn!("Failed to check existence of {}/{}: {}, marking as failed", bucket, object, e);
|
||||
*failed_objects += 1;
|
||||
checkpoint_manager.add_failed_object(object.clone()).await?;
|
||||
global_obj_idx += 1;
|
||||
*current_object_index = global_obj_idx;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
if !object_exists {
|
||||
info!(
|
||||
target: "rustfs:ahm:heal_bucket_with_resume" ,"Object {}/{} no longer exists, skipping heal (likely deleted intentionally)",
|
||||
bucket, object
|
||||
);
|
||||
checkpoint_manager.add_processed_object(object.clone()).await?;
|
||||
*successful_objects += 1; // Treat as successful - object is gone as intended
|
||||
global_obj_idx += 1;
|
||||
*current_object_index = global_obj_idx;
|
||||
continue;
|
||||
}
|
||||
|
||||
// heal object
|
||||
let heal_opts = HealOpts {
|
||||
scan_mode: HealScanMode::Normal,
|
||||
remove: true,
|
||||
recreate: true, // Keep recreate enabled for legitimate heal scenarios
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
match self.storage.heal_object(bucket, &object, None, &heal_opts).await {
|
||||
Ok((_result, None)) => {
|
||||
*successful_objects += 1;
|
||||
checkpoint_manager.add_processed_object(object.clone()).await?;
|
||||
info!("Successfully healed object {}/{}", bucket, object);
|
||||
}
|
||||
Ok((_, Some(err))) => {
|
||||
*failed_objects += 1;
|
||||
checkpoint_manager.add_failed_object(object.clone()).await?;
|
||||
warn!("Failed to heal object {}/{}: {}", bucket, object, err);
|
||||
}
|
||||
Err(err) => {
|
||||
*failed_objects += 1;
|
||||
checkpoint_manager.add_failed_object(object.clone()).await?;
|
||||
warn!("Error healing object {}/{}: {}", bucket, object, err);
|
||||
}
|
||||
}
|
||||
|
||||
*processed_objects += 1;
|
||||
global_obj_idx += 1;
|
||||
*current_object_index = global_obj_idx;
|
||||
|
||||
// check cancel status
|
||||
if self.cancel_token.is_cancelled() {
|
||||
info!("Heal task cancelled during object processing");
|
||||
return Err(Error::TaskCancelled);
|
||||
}
|
||||
|
||||
// save checkpoint periodically
|
||||
if global_obj_idx % 100 == 0 {
|
||||
checkpoint_manager
|
||||
.update_position(bucket_index, *current_object_index)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
// Check if there are more pages
|
||||
if !is_truncated {
|
||||
break;
|
||||
}
|
||||
|
||||
continuation_token = next_token;
|
||||
if continuation_token.is_none() {
|
||||
warn!("List is truncated but no continuation token provided for {}", bucket);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -399,16 +429,12 @@ impl ErasureSetHealer {
|
||||
}
|
||||
};
|
||||
|
||||
// 2. get objects to heal
|
||||
let objects = storage.list_objects_for_heal(bucket, "").await?;
|
||||
// 2. process objects with pagination to avoid loading all objects into memory
|
||||
let mut continuation_token: Option<String> = None;
|
||||
let mut total_scanned = 0u64;
|
||||
let mut total_success = 0u64;
|
||||
let mut total_failed = 0u64;
|
||||
|
||||
// 3. update progress
|
||||
{
|
||||
let mut p = progress.write().await;
|
||||
p.objects_scanned += objects.len() as u64;
|
||||
}
|
||||
|
||||
// 4. heal objects concurrently
|
||||
let heal_opts = HealOpts {
|
||||
scan_mode: HealScanMode::Normal,
|
||||
remove: true, // remove corrupted data
|
||||
@@ -416,27 +442,65 @@ impl ErasureSetHealer {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let object_results = Self::heal_objects_concurrently(storage, bucket, &objects, &heal_opts, progress).await;
|
||||
loop {
|
||||
// Get one page of objects
|
||||
let (objects, next_token, is_truncated) = storage
|
||||
.list_objects_for_heal_page(bucket, "", continuation_token.as_deref())
|
||||
.await?;
|
||||
|
||||
// 5. count results
|
||||
let (success_count, failure_count) = object_results
|
||||
.into_iter()
|
||||
.fold((0, 0), |(success, failure), result| match result {
|
||||
Ok(_) => (success + 1, failure),
|
||||
Err(_) => (success, failure + 1),
|
||||
});
|
||||
let page_count = objects.len() as u64;
|
||||
total_scanned += page_count;
|
||||
|
||||
// 6. update progress
|
||||
// 3. update progress
|
||||
{
|
||||
let mut p = progress.write().await;
|
||||
p.objects_scanned = total_scanned;
|
||||
}
|
||||
|
||||
// 4. heal objects concurrently for this page
|
||||
let object_results = Self::heal_objects_concurrently(storage, bucket, &objects, &heal_opts, progress).await;
|
||||
|
||||
// 5. count results for this page
|
||||
let (success_count, failure_count) =
|
||||
object_results
|
||||
.into_iter()
|
||||
.fold((0, 0), |(success, failure), result| match result {
|
||||
Ok(_) => (success + 1, failure),
|
||||
Err(_) => (success, failure + 1),
|
||||
});
|
||||
|
||||
total_success += success_count;
|
||||
total_failed += failure_count;
|
||||
|
||||
// 6. update progress
|
||||
{
|
||||
let mut p = progress.write().await;
|
||||
p.objects_healed = total_success;
|
||||
p.objects_failed = total_failed;
|
||||
p.set_current_object(Some(format!("processing bucket: {bucket} (page)")));
|
||||
}
|
||||
|
||||
// Check if there are more pages
|
||||
if !is_truncated {
|
||||
break;
|
||||
}
|
||||
|
||||
continuation_token = next_token;
|
||||
if continuation_token.is_none() {
|
||||
warn!("List is truncated but no continuation token provided for {}", bucket);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// 7. final progress update
|
||||
{
|
||||
let mut p = progress.write().await;
|
||||
p.objects_healed += success_count;
|
||||
p.objects_failed += failure_count;
|
||||
p.set_current_object(Some(format!("completed bucket: {bucket}")));
|
||||
}
|
||||
|
||||
info!(
|
||||
"Completed heal for bucket {}: {} success, {} failures",
|
||||
bucket, success_count, failure_count
|
||||
"Completed heal for bucket {}: {} success, {} failures (total scanned: {})",
|
||||
bucket, total_success, total_failed, total_scanned
|
||||
);
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -143,16 +143,16 @@ impl PriorityHealQueue {
|
||||
format!("object:{}:{}:{}", bucket, object, version_id.as_deref().unwrap_or(""))
|
||||
}
|
||||
HealType::Bucket { bucket } => {
|
||||
format!("bucket:{}", bucket)
|
||||
format!("bucket:{bucket}")
|
||||
}
|
||||
HealType::ErasureSet { set_disk_id, .. } => {
|
||||
format!("erasure_set:{}", set_disk_id)
|
||||
format!("erasure_set:{set_disk_id}")
|
||||
}
|
||||
HealType::Metadata { bucket, object } => {
|
||||
format!("metadata:{}:{}", bucket, object)
|
||||
format!("metadata:{bucket}:{object}")
|
||||
}
|
||||
HealType::MRF { meta_path } => {
|
||||
format!("mrf:{}", meta_path)
|
||||
format!("mrf:{meta_path}")
|
||||
}
|
||||
HealType::ECDecode {
|
||||
bucket,
|
||||
@@ -173,7 +173,7 @@ impl PriorityHealQueue {
|
||||
|
||||
/// Check if an erasure set heal request for a specific set_disk_id exists
|
||||
fn contains_erasure_set(&self, set_disk_id: &str) -> bool {
|
||||
let key = format!("erasure_set:{}", set_disk_id);
|
||||
let key = format!("erasure_set:{set_disk_id}");
|
||||
self.dedup_keys.contains(&key)
|
||||
}
|
||||
}
|
||||
@@ -195,12 +195,28 @@ pub struct HealConfig {
|
||||
|
||||
impl Default for HealConfig {
|
||||
fn default() -> Self {
|
||||
let queue_size: usize =
|
||||
rustfs_utils::get_env_usize(rustfs_config::ENV_HEAL_QUEUE_SIZE, rustfs_config::DEFAULT_HEAL_QUEUE_SIZE);
|
||||
let heal_interval = Duration::from_secs(rustfs_utils::get_env_u64(
|
||||
rustfs_config::ENV_HEAL_INTERVAL_SECS,
|
||||
rustfs_config::DEFAULT_HEAL_INTERVAL_SECS,
|
||||
));
|
||||
let enable_auto_heal =
|
||||
rustfs_utils::get_env_bool(rustfs_config::ENV_HEAL_AUTO_HEAL_ENABLE, rustfs_config::DEFAULT_HEAL_AUTO_HEAL_ENABLE);
|
||||
let task_timeout = Duration::from_secs(rustfs_utils::get_env_u64(
|
||||
rustfs_config::ENV_HEAL_TASK_TIMEOUT_SECS,
|
||||
rustfs_config::DEFAULT_HEAL_TASK_TIMEOUT_SECS,
|
||||
));
|
||||
let max_concurrent_heals = rustfs_utils::get_env_usize(
|
||||
rustfs_config::ENV_HEAL_MAX_CONCURRENT_HEALS,
|
||||
rustfs_config::DEFAULT_HEAL_MAX_CONCURRENT_HEALS,
|
||||
);
|
||||
Self {
|
||||
enable_auto_heal: true,
|
||||
heal_interval: Duration::from_secs(10), // 10 seconds
|
||||
max_concurrent_heals: 4,
|
||||
task_timeout: Duration::from_secs(300), // 5 minutes
|
||||
queue_size: 1000,
|
||||
enable_auto_heal,
|
||||
heal_interval, // 10 seconds
|
||||
max_concurrent_heals, // max 4,
|
||||
task_timeout, // 5 minutes
|
||||
queue_size,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -270,7 +286,7 @@ impl HealManager {
|
||||
// start scheduler
|
||||
self.start_scheduler().await?;
|
||||
|
||||
// start auto disk scanner
|
||||
// start auto disk scanner to heal unformatted disks
|
||||
self.start_auto_disk_scanner().await?;
|
||||
|
||||
info!("HealManager started successfully");
|
||||
@@ -311,7 +327,7 @@ impl HealManager {
|
||||
|
||||
if queue_len >= queue_capacity {
|
||||
return Err(Error::ConfigurationError {
|
||||
message: format!("Heal queue is full ({}/{})", queue_len, queue_capacity),
|
||||
message: format!("Heal queue is full ({queue_len}/{queue_capacity})"),
|
||||
});
|
||||
}
|
||||
|
||||
@@ -452,14 +468,22 @@ impl HealManager {
|
||||
let active_heals = self.active_heals.clone();
|
||||
let cancel_token = self.cancel_token.clone();
|
||||
let storage = self.storage.clone();
|
||||
let mut duration = {
|
||||
let config = config.read().await;
|
||||
config.heal_interval
|
||||
};
|
||||
if duration < Duration::from_secs(1) {
|
||||
duration = Duration::from_secs(1);
|
||||
}
|
||||
info!("start_auto_disk_scanner: Starting auto disk scanner with interval: {:?}", duration);
|
||||
|
||||
tokio::spawn(async move {
|
||||
let mut interval = interval(config.read().await.heal_interval);
|
||||
let mut interval = interval(duration);
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = cancel_token.cancelled() => {
|
||||
info!("Auto disk scanner received shutdown signal");
|
||||
info!("start_auto_disk_scanner: Auto disk scanner received shutdown signal");
|
||||
break;
|
||||
}
|
||||
_ = interval.tick() => {
|
||||
@@ -478,6 +502,7 @@ impl HealManager {
|
||||
}
|
||||
|
||||
if endpoints.is_empty() {
|
||||
info!("start_auto_disk_scanner: No endpoints need healing");
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -485,7 +510,7 @@ impl HealManager {
|
||||
let buckets = match storage.list_buckets().await {
|
||||
Ok(buckets) => buckets.iter().map(|b| b.name.clone()).collect::<Vec<String>>(),
|
||||
Err(e) => {
|
||||
error!("Failed to get bucket list for auto healing: {}", e);
|
||||
error!("start_auto_disk_scanner: Failed to get bucket list for auto healing: {}", e);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
@@ -495,7 +520,7 @@ impl HealManager {
|
||||
let Some(set_disk_id) =
|
||||
crate::heal::utils::format_set_disk_id_from_i32(ep.pool_idx, ep.set_idx)
|
||||
else {
|
||||
warn!("Skipping endpoint {} without valid pool/set index", ep);
|
||||
warn!("start_auto_disk_scanner: Skipping endpoint {} without valid pool/set index", ep);
|
||||
continue;
|
||||
};
|
||||
// skip if already queued or healing
|
||||
@@ -521,6 +546,7 @@ impl HealManager {
|
||||
}
|
||||
|
||||
if skip {
|
||||
info!("start_auto_disk_scanner: Skipping auto erasure set heal for endpoint: {} (set_disk_id: {}) because it is already queued or healing", ep, set_disk_id);
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -535,7 +561,7 @@ impl HealManager {
|
||||
);
|
||||
let mut queue = heal_queue.lock().await;
|
||||
queue.push(req);
|
||||
info!("Enqueued auto erasure set heal for endpoint: {} (set_disk_id: {})", ep, set_disk_id);
|
||||
info!("start_auto_disk_scanner: Enqueued auto erasure set heal for endpoint: {} (set_disk_id: {})", ep, set_disk_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -107,9 +107,21 @@ pub trait HealStorageAPI: Send + Sync {
|
||||
/// Heal format using ecstore
|
||||
async fn heal_format(&self, dry_run: bool) -> Result<(HealResultItem, Option<Error>)>;
|
||||
|
||||
/// List objects for healing
|
||||
/// List objects for healing (returns all objects, may use significant memory for large buckets)
|
||||
///
|
||||
/// WARNING: This method loads all objects into memory at once. For buckets with many objects,
|
||||
/// consider using `list_objects_for_heal_page` instead to process objects in pages.
|
||||
async fn list_objects_for_heal(&self, bucket: &str, prefix: &str) -> Result<Vec<String>>;
|
||||
|
||||
/// List objects for healing with pagination (returns one page and continuation token)
|
||||
/// Returns (objects, next_continuation_token, is_truncated)
|
||||
async fn list_objects_for_heal_page(
|
||||
&self,
|
||||
bucket: &str,
|
||||
prefix: &str,
|
||||
continuation_token: Option<&str>,
|
||||
) -> Result<(Vec<String>, Option<String>, bool)>;
|
||||
|
||||
/// Get disk for resume functionality
|
||||
async fn get_disk_for_resume(&self, set_disk_id: &str) -> Result<DiskStore>;
|
||||
}
|
||||
@@ -493,24 +505,67 @@ impl HealStorageAPI for ECStoreHealStorage {
|
||||
|
||||
async fn list_objects_for_heal(&self, bucket: &str, prefix: &str) -> Result<Vec<String>> {
|
||||
debug!("Listing objects for heal: {}/{}", bucket, prefix);
|
||||
warn!(
|
||||
"list_objects_for_heal loads all objects into memory. For large buckets, consider using list_objects_for_heal_page instead."
|
||||
);
|
||||
|
||||
// Use list_objects_v2 to get objects
|
||||
match self
|
||||
.ecstore
|
||||
.clone()
|
||||
.list_objects_v2(bucket, prefix, None, None, 1000, false, None, false)
|
||||
.await
|
||||
{
|
||||
Ok(list_info) => {
|
||||
let objects: Vec<String> = list_info.objects.into_iter().map(|obj| obj.name).collect();
|
||||
info!("Found {} objects for heal in {}/{}", objects.len(), bucket, prefix);
|
||||
Ok(objects)
|
||||
let mut all_objects = Vec::new();
|
||||
let mut continuation_token: Option<String> = None;
|
||||
|
||||
loop {
|
||||
let (page_objects, next_token, is_truncated) = self
|
||||
.list_objects_for_heal_page(bucket, prefix, continuation_token.as_deref())
|
||||
.await?;
|
||||
|
||||
all_objects.extend(page_objects);
|
||||
|
||||
if !is_truncated {
|
||||
break;
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to list objects for heal: {}/{} - {}", bucket, prefix, e);
|
||||
Err(Error::other(e))
|
||||
|
||||
continuation_token = next_token;
|
||||
if continuation_token.is_none() {
|
||||
warn!("List is truncated but no continuation token provided for {}/{}", bucket, prefix);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
info!("Found {} objects for heal in {}/{}", all_objects.len(), bucket, prefix);
|
||||
Ok(all_objects)
|
||||
}
|
||||
|
||||
async fn list_objects_for_heal_page(
|
||||
&self,
|
||||
bucket: &str,
|
||||
prefix: &str,
|
||||
continuation_token: Option<&str>,
|
||||
) -> Result<(Vec<String>, Option<String>, bool)> {
|
||||
debug!("Listing objects for heal (page): {}/{}", bucket, prefix);
|
||||
|
||||
const MAX_KEYS: i32 = 1000;
|
||||
let continuation_token_opt = continuation_token.map(|s| s.to_string());
|
||||
|
||||
// Use list_objects_v2 to get objects with pagination
|
||||
let list_info = match self
|
||||
.ecstore
|
||||
.clone()
|
||||
.list_objects_v2(bucket, prefix, continuation_token_opt, None, MAX_KEYS, false, None, false)
|
||||
.await
|
||||
{
|
||||
Ok(info) => info,
|
||||
Err(e) => {
|
||||
error!("Failed to list objects for heal: {}/{} - {}", bucket, prefix, e);
|
||||
return Err(Error::other(e));
|
||||
}
|
||||
};
|
||||
|
||||
// Collect objects from this page
|
||||
let page_objects: Vec<String> = list_info.objects.into_iter().map(|obj| obj.name).collect();
|
||||
let page_count = page_objects.len();
|
||||
|
||||
debug!("Listed {} objects (page) for heal in {}/{}", page_count, bucket, prefix);
|
||||
|
||||
Ok((page_objects, list_info.next_continuation_token, list_info.is_truncated))
|
||||
}
|
||||
|
||||
async fn get_disk_for_resume(&self, set_disk_id: &str) -> Result<DiskStore> {
|
||||
|
||||
@@ -29,8 +29,8 @@ use rustfs_ecstore::{
|
||||
self as ecstore, StorageAPI,
|
||||
bucket::versioning::VersioningApi,
|
||||
bucket::versioning_sys::BucketVersioningSys,
|
||||
data_usage::{aggregate_local_snapshots, store_data_usage_in_backend},
|
||||
disk::{Disk, DiskAPI, DiskStore, RUSTFS_META_BUCKET, WalkDirOptions},
|
||||
data_usage::{aggregate_local_snapshots, compute_bucket_usage, store_data_usage_in_backend},
|
||||
disk::{DiskAPI, DiskStore, RUSTFS_META_BUCKET, WalkDirOptions},
|
||||
set_disk::SetDisks,
|
||||
store_api::ObjectInfo,
|
||||
};
|
||||
@@ -137,6 +137,8 @@ pub struct Scanner {
|
||||
data_usage_stats: Arc<Mutex<HashMap<String, DataUsageInfo>>>,
|
||||
/// Last data usage statistics collection time
|
||||
last_data_usage_collection: Arc<RwLock<Option<SystemTime>>>,
|
||||
/// Backoff timestamp for heavy fallback collection
|
||||
fallback_backoff_until: Arc<RwLock<Option<SystemTime>>>,
|
||||
/// Heal manager for auto-heal integration
|
||||
heal_manager: Option<Arc<HealManager>>,
|
||||
|
||||
@@ -192,6 +194,7 @@ impl Scanner {
|
||||
disk_metrics: Arc::new(Mutex::new(HashMap::new())),
|
||||
data_usage_stats: Arc::new(Mutex::new(HashMap::new())),
|
||||
last_data_usage_collection: Arc::new(RwLock::new(None)),
|
||||
fallback_backoff_until: Arc::new(RwLock::new(None)),
|
||||
heal_manager,
|
||||
node_scanner,
|
||||
stats_aggregator,
|
||||
@@ -473,6 +476,8 @@ impl Scanner {
|
||||
size: usage.total_size as i64,
|
||||
delete_marker: !usage.has_live_object && usage.delete_markers_count > 0,
|
||||
mod_time: usage.last_modified_ns.and_then(Self::ns_to_offset_datetime),
|
||||
// Set is_latest to true for live objects - required for lifecycle expiration evaluation
|
||||
is_latest: usage.has_live_object,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
@@ -600,6 +605,7 @@ impl Scanner {
|
||||
|
||||
// Initialize and start the node scanner
|
||||
self.node_scanner.initialize_stats().await?;
|
||||
// update object count and size for each bucket
|
||||
self.node_scanner.start().await?;
|
||||
|
||||
// Set local stats in aggregator
|
||||
@@ -614,21 +620,6 @@ impl Scanner {
|
||||
}
|
||||
});
|
||||
|
||||
// Trigger an immediate data usage collection so that admin APIs have fresh data after startup.
|
||||
let scanner = self.clone_for_background();
|
||||
tokio::spawn(async move {
|
||||
let enable_stats = {
|
||||
let cfg = scanner.config.read().await;
|
||||
cfg.enable_data_usage_stats
|
||||
};
|
||||
|
||||
if enable_stats {
|
||||
if let Err(e) = scanner.collect_and_persist_data_usage().await {
|
||||
warn!("Initial data usage collection failed: {}", e);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -893,6 +884,7 @@ impl Scanner {
|
||||
/// Collect and persist data usage statistics
|
||||
async fn collect_and_persist_data_usage(&self) -> Result<()> {
|
||||
info!("Starting data usage collection and persistence");
|
||||
let now = SystemTime::now();
|
||||
|
||||
// Get ECStore instance
|
||||
let Some(ecstore) = rustfs_ecstore::new_object_layer_fn() else {
|
||||
@@ -900,6 +892,10 @@ impl Scanner {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
// Helper to avoid hammering the storage layer with repeated realtime scans.
|
||||
let mut use_cached_on_backoff = false;
|
||||
let fallback_backoff_secs = Duration::from_secs(300);
|
||||
|
||||
// Run local usage scan and aggregate snapshots; fall back to on-demand build when necessary.
|
||||
let mut data_usage = match local_scan::scan_and_persist_local_usage(ecstore.clone()).await {
|
||||
Ok(outcome) => {
|
||||
@@ -921,16 +917,55 @@ impl Scanner {
|
||||
"Failed to aggregate local data usage snapshots, falling back to realtime collection: {}",
|
||||
e
|
||||
);
|
||||
self.build_data_usage_from_ecstore(&ecstore).await?
|
||||
match self.maybe_fallback_collection(now, fallback_backoff_secs, &ecstore).await? {
|
||||
Some(usage) => usage,
|
||||
None => {
|
||||
use_cached_on_backoff = true;
|
||||
DataUsageInfo::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Local usage scan failed (using realtime collection instead): {}", e);
|
||||
self.build_data_usage_from_ecstore(&ecstore).await?
|
||||
match self.maybe_fallback_collection(now, fallback_backoff_secs, &ecstore).await? {
|
||||
Some(usage) => usage,
|
||||
None => {
|
||||
use_cached_on_backoff = true;
|
||||
DataUsageInfo::default()
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// If heavy fallback was skipped due to backoff, try to reuse cached stats to avoid empty responses.
|
||||
if use_cached_on_backoff && data_usage.buckets_usage.is_empty() {
|
||||
let cached = {
|
||||
let guard = self.data_usage_stats.lock().await;
|
||||
guard.values().next().cloned()
|
||||
};
|
||||
if let Some(cached_usage) = cached {
|
||||
data_usage = cached_usage;
|
||||
}
|
||||
|
||||
// If there is still no data, try backend before persisting zeros
|
||||
if data_usage.buckets_usage.is_empty() {
|
||||
if let Ok(existing) = rustfs_ecstore::data_usage::load_data_usage_from_backend(ecstore.clone()).await {
|
||||
if !existing.buckets_usage.is_empty() {
|
||||
info!("Using existing backend data usage during fallback backoff");
|
||||
data_usage = existing;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Avoid overwriting valid backend stats with zeros when fallback is throttled
|
||||
if data_usage.buckets_usage.is_empty() {
|
||||
warn!("Skipping data usage persistence: fallback throttled and no cached/backend data available");
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
// Make sure bucket counters reflect aggregated content
|
||||
data_usage.buckets_count = data_usage.buckets_usage.len() as u64;
|
||||
if data_usage.last_update.is_none() {
|
||||
@@ -973,8 +1008,31 @@ impl Scanner {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn maybe_fallback_collection(
|
||||
&self,
|
||||
now: SystemTime,
|
||||
backoff: Duration,
|
||||
ecstore: &Arc<rustfs_ecstore::store::ECStore>,
|
||||
) -> Result<Option<DataUsageInfo>> {
|
||||
let backoff_until = *self.fallback_backoff_until.read().await;
|
||||
let within_backoff = backoff_until.map(|ts| now < ts).unwrap_or(false);
|
||||
|
||||
if within_backoff {
|
||||
warn!(
|
||||
"Skipping heavy data usage fallback within backoff window (until {:?}); using cached stats if available",
|
||||
backoff_until
|
||||
);
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let usage = self.build_data_usage_from_ecstore(ecstore).await?;
|
||||
let mut backoff_guard = self.fallback_backoff_until.write().await;
|
||||
*backoff_guard = Some(now + backoff);
|
||||
Ok(Some(usage))
|
||||
}
|
||||
|
||||
/// Build data usage statistics directly from ECStore
|
||||
async fn build_data_usage_from_ecstore(&self, ecstore: &Arc<rustfs_ecstore::store::ECStore>) -> Result<DataUsageInfo> {
|
||||
pub async fn build_data_usage_from_ecstore(&self, ecstore: &Arc<rustfs_ecstore::store::ECStore>) -> Result<DataUsageInfo> {
|
||||
let mut data_usage = DataUsageInfo::default();
|
||||
|
||||
// Get bucket list
|
||||
@@ -987,6 +1045,8 @@ impl Scanner {
|
||||
data_usage.last_update = Some(SystemTime::now());
|
||||
|
||||
let mut total_objects = 0u64;
|
||||
let mut total_versions = 0u64;
|
||||
let mut total_delete_markers = 0u64;
|
||||
let mut total_size = 0u64;
|
||||
|
||||
for bucket_info in buckets {
|
||||
@@ -994,37 +1054,26 @@ impl Scanner {
|
||||
continue; // Skip system buckets
|
||||
}
|
||||
|
||||
// Try to get actual object count for this bucket
|
||||
let (object_count, bucket_size) = match ecstore
|
||||
.clone()
|
||||
.list_objects_v2(
|
||||
&bucket_info.name,
|
||||
"", // prefix
|
||||
None, // continuation_token
|
||||
None, // delimiter
|
||||
100, // max_keys - small limit for performance
|
||||
false, // fetch_owner
|
||||
None, // start_after
|
||||
false, // incl_deleted
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(result) => {
|
||||
let count = result.objects.len() as u64;
|
||||
let size = result.objects.iter().map(|obj| obj.size as u64).sum();
|
||||
(count, size)
|
||||
}
|
||||
Err(_) => (0, 0),
|
||||
};
|
||||
// Use ecstore pagination helper to avoid truncating at 100 objects
|
||||
let (object_count, bucket_size, versions_count, delete_markers) =
|
||||
match compute_bucket_usage(ecstore.clone(), &bucket_info.name).await {
|
||||
Ok(usage) => (usage.objects_count, usage.size, usage.versions_count, usage.delete_markers_count),
|
||||
Err(e) => {
|
||||
warn!("Failed to compute bucket usage for {}: {}", bucket_info.name, e);
|
||||
(0, 0, 0, 0)
|
||||
}
|
||||
};
|
||||
|
||||
total_objects += object_count;
|
||||
total_versions += versions_count;
|
||||
total_delete_markers += delete_markers;
|
||||
total_size += bucket_size;
|
||||
|
||||
let bucket_usage = rustfs_common::data_usage::BucketUsageInfo {
|
||||
size: bucket_size,
|
||||
objects_count: object_count,
|
||||
versions_count: object_count, // Simplified
|
||||
delete_markers_count: 0,
|
||||
versions_count,
|
||||
delete_markers_count: delete_markers,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
@@ -1034,7 +1083,8 @@ impl Scanner {
|
||||
|
||||
data_usage.objects_total_count = total_objects;
|
||||
data_usage.objects_total_size = total_size;
|
||||
data_usage.versions_total_count = total_objects;
|
||||
data_usage.versions_total_count = total_versions;
|
||||
data_usage.delete_markers_total_count = total_delete_markers;
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Failed to list buckets for data usage collection: {}", e);
|
||||
@@ -1927,7 +1977,7 @@ impl Scanner {
|
||||
} else {
|
||||
// Apply lifecycle actions
|
||||
if let Some(lifecycle_config) = &lifecycle_config {
|
||||
if let Disk::Local(_local_disk) = &**disk {
|
||||
if disk.is_local() {
|
||||
let vcfg = BucketVersioningSys::get(bucket).await.ok();
|
||||
|
||||
let mut scanner_item = ScannerItem {
|
||||
@@ -2568,6 +2618,7 @@ impl Scanner {
|
||||
disk_metrics: Arc::clone(&self.disk_metrics),
|
||||
data_usage_stats: Arc::clone(&self.data_usage_stats),
|
||||
last_data_usage_collection: Arc::clone(&self.last_data_usage_collection),
|
||||
fallback_backoff_until: Arc::clone(&self.fallback_backoff_until),
|
||||
heal_manager: self.heal_manager.clone(),
|
||||
node_scanner: Arc::clone(&self.node_scanner),
|
||||
stats_aggregator: Arc::clone(&self.stats_aggregator),
|
||||
|
||||
@@ -84,6 +84,9 @@ pub async fn scan_and_persist_local_usage(store: Arc<ECStore>) -> Result<LocalSc
|
||||
guard.clone()
|
||||
};
|
||||
|
||||
// Use the first local online disk in the set to avoid missing stats when disk 0 is down
|
||||
let mut picked = false;
|
||||
|
||||
for (disk_index, disk_opt) in disks.into_iter().enumerate() {
|
||||
let Some(disk) = disk_opt else {
|
||||
continue;
|
||||
@@ -93,11 +96,17 @@ pub async fn scan_and_persist_local_usage(store: Arc<ECStore>) -> Result<LocalSc
|
||||
continue;
|
||||
}
|
||||
|
||||
// Count objects once by scanning only disk index zero from each set.
|
||||
if disk_index != 0 {
|
||||
if picked {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Skip offline disks; keep looking for an online candidate
|
||||
if !disk.is_online().await {
|
||||
continue;
|
||||
}
|
||||
|
||||
picked = true;
|
||||
|
||||
let disk_id = match disk.get_disk_id().await.map_err(Error::from)? {
|
||||
Some(id) => id.to_string(),
|
||||
None => {
|
||||
|
||||
@@ -711,6 +711,7 @@ impl NodeScanner {
|
||||
// start scanning loop
|
||||
let scanner_clone = self.clone_for_background();
|
||||
tokio::spawn(async move {
|
||||
// update object count and size for each bucket
|
||||
if let Err(e) = scanner_clone.scan_loop_with_resume(None).await {
|
||||
error!("scanning loop failed: {}", e);
|
||||
}
|
||||
|
||||
@@ -347,7 +347,8 @@ impl DecentralizedStatsAggregator {
|
||||
|
||||
// update cache
|
||||
*self.cached_stats.write().await = Some(aggregated.clone());
|
||||
*self.cache_timestamp.write().await = aggregation_timestamp;
|
||||
// Use the time when aggregation completes as cache timestamp to avoid premature expiry during long runs
|
||||
*self.cache_timestamp.write().await = SystemTime::now();
|
||||
|
||||
Ok(aggregated)
|
||||
}
|
||||
@@ -359,7 +360,8 @@ impl DecentralizedStatsAggregator {
|
||||
|
||||
// update cache
|
||||
*self.cached_stats.write().await = Some(aggregated.clone());
|
||||
*self.cache_timestamp.write().await = now;
|
||||
// Cache timestamp should reflect completion time rather than aggregation start
|
||||
*self.cache_timestamp.write().await = SystemTime::now();
|
||||
|
||||
Ok(aggregated)
|
||||
}
|
||||
|
||||
112
crates/ahm/tests/data_usage_fallback_test.rs
Normal file
112
crates/ahm/tests/data_usage_fallback_test.rs
Normal file
@@ -0,0 +1,112 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![cfg(test)]
|
||||
|
||||
use rustfs_ahm::scanner::data_scanner::Scanner;
|
||||
use rustfs_common::data_usage::DataUsageInfo;
|
||||
use rustfs_ecstore::GLOBAL_Endpoints;
|
||||
use rustfs_ecstore::bucket::metadata_sys::{BucketMetadataSys, GLOBAL_BucketMetadataSys};
|
||||
use rustfs_ecstore::endpoints::EndpointServerPools;
|
||||
use rustfs_ecstore::store::ECStore;
|
||||
use rustfs_ecstore::store_api::{ObjectIO, PutObjReader, StorageAPI};
|
||||
use std::sync::{Arc, Once};
|
||||
use tempfile::TempDir;
|
||||
use tokio::sync::RwLock;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::Level;
|
||||
|
||||
/// Build a minimal single-node ECStore over a temp directory and populate objects.
|
||||
async fn create_store_with_objects(count: usize) -> (TempDir, std::sync::Arc<ECStore>) {
|
||||
let temp_dir = TempDir::new().expect("temp dir");
|
||||
let root = temp_dir.path().to_string_lossy().to_string();
|
||||
|
||||
// Create endpoints from the temp dir
|
||||
let (endpoint_pools, _setup) = EndpointServerPools::from_volumes("127.0.0.1:0", vec![root])
|
||||
.await
|
||||
.expect("endpoint pools");
|
||||
|
||||
// Seed globals required by metadata sys if not already set
|
||||
if GLOBAL_Endpoints.get().is_none() {
|
||||
let _ = GLOBAL_Endpoints.set(endpoint_pools.clone());
|
||||
}
|
||||
|
||||
let store = ECStore::new("127.0.0.1:0".parse().unwrap(), endpoint_pools, CancellationToken::new())
|
||||
.await
|
||||
.expect("create store");
|
||||
|
||||
if rustfs_ecstore::global::new_object_layer_fn().is_none() {
|
||||
rustfs_ecstore::global::set_object_layer(store.clone()).await;
|
||||
}
|
||||
|
||||
// Initialize metadata system before bucket operations
|
||||
if GLOBAL_BucketMetadataSys.get().is_none() {
|
||||
let mut sys = BucketMetadataSys::new(store.clone());
|
||||
sys.init(Vec::new()).await;
|
||||
let _ = GLOBAL_BucketMetadataSys.set(Arc::new(RwLock::new(sys)));
|
||||
}
|
||||
|
||||
store
|
||||
.make_bucket("fallback-bucket", &rustfs_ecstore::store_api::MakeBucketOptions::default())
|
||||
.await
|
||||
.expect("make bucket");
|
||||
|
||||
for i in 0..count {
|
||||
let key = format!("obj-{i:04}");
|
||||
let data = format!("payload-{i}");
|
||||
let mut reader = PutObjReader::from_vec(data.into_bytes());
|
||||
store
|
||||
.put_object("fallback-bucket", &key, &mut reader, &rustfs_ecstore::store_api::ObjectOptions::default())
|
||||
.await
|
||||
.expect("put object");
|
||||
}
|
||||
|
||||
(temp_dir, store)
|
||||
}
|
||||
|
||||
static INIT: Once = Once::new();
|
||||
|
||||
fn init_tracing(filter_level: Level) {
|
||||
INIT.call_once(|| {
|
||||
let _ = tracing_subscriber::fmt()
|
||||
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
||||
.with_max_level(filter_level)
|
||||
.with_timer(tracing_subscriber::fmt::time::UtcTime::rfc_3339())
|
||||
.with_thread_names(true)
|
||||
.try_init();
|
||||
});
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn fallback_builds_full_counts_over_100_objects() {
|
||||
init_tracing(Level::ERROR);
|
||||
let (_tmp, store) = create_store_with_objects(1000).await;
|
||||
let scanner = Scanner::new(None, None);
|
||||
|
||||
// Directly call the fallback builder to ensure pagination works.
|
||||
let usage: DataUsageInfo = scanner.build_data_usage_from_ecstore(&store).await.expect("fallback usage");
|
||||
|
||||
let bucket = usage.buckets_usage.get("fallback-bucket").expect("bucket usage present");
|
||||
|
||||
assert!(
|
||||
usage.objects_total_count >= 1000,
|
||||
"total objects should be >=1000, got {}",
|
||||
usage.objects_total_count
|
||||
);
|
||||
assert!(
|
||||
bucket.objects_count >= 1000,
|
||||
"bucket objects should be >=1000, got {}",
|
||||
bucket.objects_count
|
||||
);
|
||||
}
|
||||
@@ -244,6 +244,14 @@ fn test_heal_task_status_atomic_update() {
|
||||
async fn list_objects_for_heal(&self, _bucket: &str, _prefix: &str) -> rustfs_ahm::Result<Vec<String>> {
|
||||
Ok(vec![])
|
||||
}
|
||||
async fn list_objects_for_heal_page(
|
||||
&self,
|
||||
_bucket: &str,
|
||||
_prefix: &str,
|
||||
_continuation_token: Option<&str>,
|
||||
) -> rustfs_ahm::Result<(Vec<String>, Option<String>, bool)> {
|
||||
Ok((vec![], None, false))
|
||||
}
|
||||
async fn get_disk_for_resume(&self, _set_disk_id: &str) -> rustfs_ahm::Result<rustfs_ecstore::disk::DiskStore> {
|
||||
Err(rustfs_ahm::Error::other("Not implemented in mock"))
|
||||
}
|
||||
|
||||
@@ -38,9 +38,13 @@ use walkdir::WalkDir;
|
||||
static GLOBAL_ENV: OnceLock<(Vec<PathBuf>, Arc<ECStore>, Arc<ECStoreHealStorage>)> = OnceLock::new();
|
||||
static INIT: Once = Once::new();
|
||||
|
||||
fn init_tracing() {
|
||||
pub fn init_tracing() {
|
||||
INIT.call_once(|| {
|
||||
let _ = tracing_subscriber::fmt::try_init();
|
||||
let _ = tracing_subscriber::fmt()
|
||||
.with_env_filter(tracing_subscriber::EnvFilter::from_default_env())
|
||||
.with_timer(tracing_subscriber::fmt::time::UtcTime::rfc_3339())
|
||||
.with_thread_names(true)
|
||||
.try_init();
|
||||
});
|
||||
}
|
||||
|
||||
@@ -356,7 +360,7 @@ mod serial_tests {
|
||||
|
||||
// Create heal manager with faster interval
|
||||
let cfg = HealConfig {
|
||||
heal_interval: Duration::from_secs(2),
|
||||
heal_interval: Duration::from_secs(1),
|
||||
..Default::default()
|
||||
};
|
||||
let heal_manager = HealManager::new(heal_storage.clone(), Some(cfg));
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/rustfs/rustfs/actions/workflows/ci.yml"><img alt="CI" src="https://github.com/rustfs/rustfs/actions/workflows/ci.yml/badge.svg" /></a>
|
||||
<a href="https://docs.rustfs.com/en/">📖 Documentation</a>
|
||||
<a href="https://docs.rustfs.com/">📖 Documentation</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/issues">🐛 Bug Reports</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/discussions">💬 Discussions</a>
|
||||
</p>
|
||||
|
||||
@@ -29,6 +29,7 @@ categories = ["web-programming", "development-tools", "asynchronous", "api-bindi
|
||||
rustfs-targets = { workspace = true }
|
||||
rustfs-config = { workspace = true, features = ["audit", "constants"] }
|
||||
rustfs-ecstore = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
const-str = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
|
||||
224
crates/audit/src/factory.rs
Normal file
224
crates/audit/src/factory.rs
Normal file
@@ -0,0 +1,224 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::AuditEntry;
|
||||
use async_trait::async_trait;
|
||||
use hashbrown::HashSet;
|
||||
use rumqttc::QoS;
|
||||
use rustfs_config::audit::{AUDIT_MQTT_KEYS, AUDIT_WEBHOOK_KEYS, ENV_AUDIT_MQTT_KEYS, ENV_AUDIT_WEBHOOK_KEYS};
|
||||
use rustfs_config::{
|
||||
AUDIT_DEFAULT_DIR, DEFAULT_LIMIT, MQTT_BROKER, MQTT_KEEP_ALIVE_INTERVAL, MQTT_PASSWORD, MQTT_QOS, MQTT_QUEUE_DIR,
|
||||
MQTT_QUEUE_LIMIT, MQTT_RECONNECT_INTERVAL, MQTT_TOPIC, MQTT_USERNAME, WEBHOOK_AUTH_TOKEN, WEBHOOK_CLIENT_CERT,
|
||||
WEBHOOK_CLIENT_KEY, WEBHOOK_ENDPOINT, WEBHOOK_QUEUE_DIR, WEBHOOK_QUEUE_LIMIT,
|
||||
};
|
||||
use rustfs_ecstore::config::KVS;
|
||||
use rustfs_targets::{
|
||||
Target,
|
||||
error::TargetError,
|
||||
target::{mqtt::MQTTArgs, webhook::WebhookArgs},
|
||||
};
|
||||
use std::time::Duration;
|
||||
use tracing::{debug, warn};
|
||||
use url::Url;
|
||||
|
||||
/// Trait for creating targets from configuration
|
||||
#[async_trait]
|
||||
pub trait TargetFactory: Send + Sync {
|
||||
/// Creates a target from configuration
|
||||
async fn create_target(&self, id: String, config: &KVS) -> Result<Box<dyn Target<AuditEntry> + Send + Sync>, TargetError>;
|
||||
|
||||
/// Validates target configuration
|
||||
fn validate_config(&self, id: &str, config: &KVS) -> Result<(), TargetError>;
|
||||
|
||||
/// Returns a set of valid configuration field names for this target type.
|
||||
/// This is used to filter environment variables.
|
||||
fn get_valid_fields(&self) -> HashSet<String>;
|
||||
|
||||
/// Returns a set of valid configuration env field names for this target type.
|
||||
/// This is used to filter environment variables.
|
||||
fn get_valid_env_fields(&self) -> HashSet<String>;
|
||||
}
|
||||
|
||||
/// Factory for creating Webhook targets
|
||||
pub struct WebhookTargetFactory;
|
||||
|
||||
#[async_trait]
|
||||
impl TargetFactory for WebhookTargetFactory {
|
||||
async fn create_target(&self, id: String, config: &KVS) -> Result<Box<dyn Target<AuditEntry> + Send + Sync>, TargetError> {
|
||||
// All config values are now read directly from the merged `config` KVS.
|
||||
let endpoint = config
|
||||
.lookup(WEBHOOK_ENDPOINT)
|
||||
.ok_or_else(|| TargetError::Configuration("Missing webhook endpoint".to_string()))?;
|
||||
let parsed_endpoint = endpoint.trim();
|
||||
let endpoint_url = Url::parse(parsed_endpoint)
|
||||
.map_err(|e| TargetError::Configuration(format!("Invalid endpoint URL: {e} (value: '{parsed_endpoint}')")))?;
|
||||
|
||||
let args = WebhookArgs {
|
||||
enable: true, // If we are here, it's already enabled.
|
||||
endpoint: endpoint_url,
|
||||
auth_token: config.lookup(WEBHOOK_AUTH_TOKEN).unwrap_or_default(),
|
||||
queue_dir: config.lookup(WEBHOOK_QUEUE_DIR).unwrap_or(AUDIT_DEFAULT_DIR.to_string()),
|
||||
queue_limit: config
|
||||
.lookup(WEBHOOK_QUEUE_LIMIT)
|
||||
.and_then(|v| v.parse::<u64>().ok())
|
||||
.unwrap_or(DEFAULT_LIMIT),
|
||||
client_cert: config.lookup(WEBHOOK_CLIENT_CERT).unwrap_or_default(),
|
||||
client_key: config.lookup(WEBHOOK_CLIENT_KEY).unwrap_or_default(),
|
||||
target_type: rustfs_targets::target::TargetType::AuditLog,
|
||||
};
|
||||
|
||||
let target = rustfs_targets::target::webhook::WebhookTarget::new(id, args)?;
|
||||
Ok(Box::new(target))
|
||||
}
|
||||
|
||||
fn validate_config(&self, _id: &str, config: &KVS) -> Result<(), TargetError> {
|
||||
// Validation also uses the merged `config` KVS directly.
|
||||
let endpoint = config
|
||||
.lookup(WEBHOOK_ENDPOINT)
|
||||
.ok_or_else(|| TargetError::Configuration("Missing webhook endpoint".to_string()))?;
|
||||
debug!("endpoint: {}", endpoint);
|
||||
let parsed_endpoint = endpoint.trim();
|
||||
Url::parse(parsed_endpoint)
|
||||
.map_err(|e| TargetError::Configuration(format!("Invalid endpoint URL: {e} (value: '{parsed_endpoint}')")))?;
|
||||
|
||||
let client_cert = config.lookup(WEBHOOK_CLIENT_CERT).unwrap_or_default();
|
||||
let client_key = config.lookup(WEBHOOK_CLIENT_KEY).unwrap_or_default();
|
||||
|
||||
if client_cert.is_empty() != client_key.is_empty() {
|
||||
return Err(TargetError::Configuration(
|
||||
"Both client_cert and client_key must be specified together".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let queue_dir = config.lookup(WEBHOOK_QUEUE_DIR).unwrap_or(AUDIT_DEFAULT_DIR.to_string());
|
||||
if !queue_dir.is_empty() && !std::path::Path::new(&queue_dir).is_absolute() {
|
||||
return Err(TargetError::Configuration("Webhook queue directory must be an absolute path".to_string()));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_valid_fields(&self) -> HashSet<String> {
|
||||
AUDIT_WEBHOOK_KEYS.iter().map(|s| s.to_string()).collect()
|
||||
}
|
||||
|
||||
fn get_valid_env_fields(&self) -> HashSet<String> {
|
||||
ENV_AUDIT_WEBHOOK_KEYS.iter().map(|s| s.to_string()).collect()
|
||||
}
|
||||
}
|
||||
|
||||
/// Factory for creating MQTT targets
|
||||
pub struct MQTTTargetFactory;
|
||||
|
||||
#[async_trait]
|
||||
impl TargetFactory for MQTTTargetFactory {
|
||||
async fn create_target(&self, id: String, config: &KVS) -> Result<Box<dyn Target<AuditEntry> + Send + Sync>, TargetError> {
|
||||
let broker = config
|
||||
.lookup(MQTT_BROKER)
|
||||
.ok_or_else(|| TargetError::Configuration("Missing MQTT broker".to_string()))?;
|
||||
let broker_url = Url::parse(&broker)
|
||||
.map_err(|e| TargetError::Configuration(format!("Invalid broker URL: {e} (value: '{broker}')")))?;
|
||||
|
||||
let topic = config
|
||||
.lookup(MQTT_TOPIC)
|
||||
.ok_or_else(|| TargetError::Configuration("Missing MQTT topic".to_string()))?;
|
||||
|
||||
let args = MQTTArgs {
|
||||
enable: true, // Assumed enabled.
|
||||
broker: broker_url,
|
||||
topic,
|
||||
qos: config
|
||||
.lookup(MQTT_QOS)
|
||||
.and_then(|v| v.parse::<u8>().ok())
|
||||
.map(|q| match q {
|
||||
0 => QoS::AtMostOnce,
|
||||
1 => QoS::AtLeastOnce,
|
||||
2 => QoS::ExactlyOnce,
|
||||
_ => QoS::AtLeastOnce,
|
||||
})
|
||||
.unwrap_or(QoS::AtLeastOnce),
|
||||
username: config.lookup(MQTT_USERNAME).unwrap_or_default(),
|
||||
password: config.lookup(MQTT_PASSWORD).unwrap_or_default(),
|
||||
max_reconnect_interval: config
|
||||
.lookup(MQTT_RECONNECT_INTERVAL)
|
||||
.and_then(|v| v.parse::<u64>().ok())
|
||||
.map(Duration::from_secs)
|
||||
.unwrap_or_else(|| Duration::from_secs(5)),
|
||||
keep_alive: config
|
||||
.lookup(MQTT_KEEP_ALIVE_INTERVAL)
|
||||
.and_then(|v| v.parse::<u64>().ok())
|
||||
.map(Duration::from_secs)
|
||||
.unwrap_or_else(|| Duration::from_secs(30)),
|
||||
queue_dir: config.lookup(MQTT_QUEUE_DIR).unwrap_or(AUDIT_DEFAULT_DIR.to_string()),
|
||||
queue_limit: config
|
||||
.lookup(MQTT_QUEUE_LIMIT)
|
||||
.and_then(|v| v.parse::<u64>().ok())
|
||||
.unwrap_or(DEFAULT_LIMIT),
|
||||
target_type: rustfs_targets::target::TargetType::AuditLog,
|
||||
};
|
||||
|
||||
let target = rustfs_targets::target::mqtt::MQTTTarget::new(id, args)?;
|
||||
Ok(Box::new(target))
|
||||
}
|
||||
|
||||
fn validate_config(&self, _id: &str, config: &KVS) -> Result<(), TargetError> {
|
||||
let broker = config
|
||||
.lookup(MQTT_BROKER)
|
||||
.ok_or_else(|| TargetError::Configuration("Missing MQTT broker".to_string()))?;
|
||||
let url = Url::parse(&broker)
|
||||
.map_err(|e| TargetError::Configuration(format!("Invalid broker URL: {e} (value: '{broker}')")))?;
|
||||
|
||||
match url.scheme() {
|
||||
"tcp" | "ssl" | "ws" | "wss" | "mqtt" | "mqtts" => {}
|
||||
_ => {
|
||||
return Err(TargetError::Configuration("Unsupported broker URL scheme".to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
if config.lookup(MQTT_TOPIC).is_none() {
|
||||
return Err(TargetError::Configuration("Missing MQTT topic".to_string()));
|
||||
}
|
||||
|
||||
if let Some(qos_str) = config.lookup(MQTT_QOS) {
|
||||
let qos = qos_str
|
||||
.parse::<u8>()
|
||||
.map_err(|_| TargetError::Configuration("Invalid QoS value".to_string()))?;
|
||||
if qos > 2 {
|
||||
return Err(TargetError::Configuration("QoS must be 0, 1, or 2".to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
let queue_dir = config.lookup(MQTT_QUEUE_DIR).unwrap_or_default();
|
||||
if !queue_dir.is_empty() {
|
||||
if !std::path::Path::new(&queue_dir).is_absolute() {
|
||||
return Err(TargetError::Configuration("MQTT queue directory must be an absolute path".to_string()));
|
||||
}
|
||||
if let Some(qos_str) = config.lookup(MQTT_QOS) {
|
||||
if qos_str == "0" {
|
||||
warn!("Using queue_dir with QoS 0 may result in event loss");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn get_valid_fields(&self) -> HashSet<String> {
|
||||
AUDIT_MQTT_KEYS.iter().map(|s| s.to_string()).collect()
|
||||
}
|
||||
|
||||
fn get_valid_env_fields(&self) -> HashSet<String> {
|
||||
ENV_AUDIT_MQTT_KEYS.iter().map(|s| s.to_string()).collect()
|
||||
}
|
||||
}
|
||||
@@ -20,6 +20,7 @@
|
||||
|
||||
pub mod entity;
|
||||
pub mod error;
|
||||
pub mod factory;
|
||||
pub mod global;
|
||||
pub mod observability;
|
||||
pub mod registry;
|
||||
|
||||
@@ -12,29 +12,26 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::{AuditEntry, AuditError, AuditResult};
|
||||
use futures::{StreamExt, stream::FuturesUnordered};
|
||||
use crate::{
|
||||
AuditEntry, AuditError, AuditResult,
|
||||
factory::{MQTTTargetFactory, TargetFactory, WebhookTargetFactory},
|
||||
};
|
||||
use futures::StreamExt;
|
||||
use futures::stream::FuturesUnordered;
|
||||
use hashbrown::{HashMap, HashSet};
|
||||
use rustfs_config::{
|
||||
DEFAULT_DELIMITER, ENABLE_KEY, ENV_PREFIX, MQTT_BROKER, MQTT_KEEP_ALIVE_INTERVAL, MQTT_PASSWORD, MQTT_QOS, MQTT_QUEUE_DIR,
|
||||
MQTT_QUEUE_LIMIT, MQTT_RECONNECT_INTERVAL, MQTT_TOPIC, MQTT_USERNAME, WEBHOOK_AUTH_TOKEN, WEBHOOK_BATCH_SIZE,
|
||||
WEBHOOK_CLIENT_CERT, WEBHOOK_CLIENT_KEY, WEBHOOK_ENDPOINT, WEBHOOK_HTTP_TIMEOUT, WEBHOOK_MAX_RETRY, WEBHOOK_QUEUE_DIR,
|
||||
WEBHOOK_QUEUE_LIMIT, WEBHOOK_RETRY_INTERVAL, audit::AUDIT_ROUTE_PREFIX,
|
||||
};
|
||||
use rustfs_config::{DEFAULT_DELIMITER, ENABLE_KEY, ENV_PREFIX, EnableState, audit::AUDIT_ROUTE_PREFIX};
|
||||
use rustfs_ecstore::config::{Config, KVS};
|
||||
use rustfs_targets::{
|
||||
Target, TargetError,
|
||||
target::{ChannelTargetType, TargetType, mqtt::MQTTArgs, webhook::WebhookArgs},
|
||||
};
|
||||
use rustfs_targets::{Target, TargetError, target::ChannelTargetType};
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tracing::{debug, error, info, warn};
|
||||
use url::Url;
|
||||
|
||||
/// Registry for managing audit targets
|
||||
pub struct AuditRegistry {
|
||||
/// Storage for created targets
|
||||
targets: HashMap<String, Box<dyn Target<AuditEntry> + Send + Sync>>,
|
||||
/// Factories for creating targets
|
||||
factories: HashMap<String, Box<dyn TargetFactory>>,
|
||||
}
|
||||
|
||||
impl Default for AuditRegistry {
|
||||
@@ -46,162 +43,207 @@ impl Default for AuditRegistry {
|
||||
impl AuditRegistry {
|
||||
/// Creates a new AuditRegistry
|
||||
pub fn new() -> Self {
|
||||
Self { targets: HashMap::new() }
|
||||
let mut registry = AuditRegistry {
|
||||
factories: HashMap::new(),
|
||||
targets: HashMap::new(),
|
||||
};
|
||||
|
||||
// Register built-in factories
|
||||
registry.register(ChannelTargetType::Webhook.as_str(), Box::new(WebhookTargetFactory));
|
||||
registry.register(ChannelTargetType::Mqtt.as_str(), Box::new(MQTTTargetFactory));
|
||||
|
||||
registry
|
||||
}
|
||||
|
||||
/// Creates all audit targets from system configuration and environment variables.
|
||||
/// Registers a new factory for a target type
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `target_type` - The type of the target (e.g., "webhook", "mqtt").
|
||||
/// * `factory` - The factory instance to create targets of this type.
|
||||
pub fn register(&mut self, target_type: &str, factory: Box<dyn TargetFactory>) {
|
||||
self.factories.insert(target_type.to_string(), factory);
|
||||
}
|
||||
|
||||
/// Creates a target of the specified type with the given ID and configuration
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `target_type` - The type of the target (e.g., "webhook", "mqtt").
|
||||
/// * `id` - The identifier for the target instance.
|
||||
/// * `config` - The configuration key-value store for the target.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Result<Box<dyn Target<AuditEntry> + Send + Sync>, TargetError>` - The created target or an error.
|
||||
pub async fn create_target(
|
||||
&self,
|
||||
target_type: &str,
|
||||
id: String,
|
||||
config: &KVS,
|
||||
) -> Result<Box<dyn Target<AuditEntry> + Send + Sync>, TargetError> {
|
||||
let factory = self
|
||||
.factories
|
||||
.get(target_type)
|
||||
.ok_or_else(|| TargetError::Configuration(format!("Unknown target type: {target_type}")))?;
|
||||
|
||||
// Validate configuration before creating target
|
||||
factory.validate_config(&id, config)?;
|
||||
|
||||
// Create target
|
||||
factory.create_target(id, config).await
|
||||
}
|
||||
|
||||
/// Creates all targets from a configuration
|
||||
/// Create all notification targets from system configuration and environment variables.
|
||||
/// This method processes the creation of each target concurrently as follows:
|
||||
/// 1. Iterate through supported target types (webhook, mqtt).
|
||||
/// 2. For each type, resolve its configuration from file and environment variables.
|
||||
/// 1. Iterate through all registered target types (e.g. webhooks, mqtt).
|
||||
/// 2. For each type, resolve its configuration in the configuration file and environment variables.
|
||||
/// 3. Identify all target instance IDs that need to be created.
|
||||
/// 4. Merge configurations with precedence: ENV > file instance > file default.
|
||||
/// 5. Create async tasks for enabled instances.
|
||||
/// 6. Execute tasks concurrently and collect successful targets.
|
||||
/// 7. Persist successful configurations back to system storage.
|
||||
pub async fn create_targets_from_config(
|
||||
&mut self,
|
||||
/// 4. Combine the default configuration, file configuration, and environment variable configuration for each instance.
|
||||
/// 5. If the instance is enabled, create an asynchronous task for it to instantiate.
|
||||
/// 6. Concurrency executes all creation tasks and collects results.
|
||||
pub async fn create_audit_targets_from_config(
|
||||
&self,
|
||||
config: &Config,
|
||||
) -> AuditResult<Vec<Box<dyn Target<AuditEntry> + Send + Sync>>> {
|
||||
// Collect only environment variables with the relevant prefix to reduce memory usage
|
||||
let all_env: Vec<(String, String)> = std::env::vars().filter(|(key, _)| key.starts_with(ENV_PREFIX)).collect();
|
||||
|
||||
// A collection of asynchronous tasks for concurrently executing target creation
|
||||
let mut tasks = FuturesUnordered::new();
|
||||
// let final_config = config.clone();
|
||||
|
||||
// let final_config = config.clone(); // Clone a configuration for aggregating the final result
|
||||
// Record the defaults for each segment so that the segment can eventually be rebuilt
|
||||
let mut section_defaults: HashMap<String, KVS> = HashMap::new();
|
||||
|
||||
// Supported target types for audit
|
||||
let target_types = vec![ChannelTargetType::Webhook.as_str(), ChannelTargetType::Mqtt.as_str()];
|
||||
|
||||
// 1. Traverse all target types and process them
|
||||
for target_type in target_types {
|
||||
let span = tracing::Span::current();
|
||||
span.record("target_type", target_type);
|
||||
info!(target_type = %target_type, "Starting audit target type processing");
|
||||
// 1. Traverse all registered plants and process them by target type
|
||||
for (target_type, factory) in &self.factories {
|
||||
tracing::Span::current().record("target_type", target_type.as_str());
|
||||
info!("Start working on target types...");
|
||||
|
||||
// 2. Prepare the configuration source
|
||||
// 2.1. Get the configuration segment in the file, e.g. 'audit_webhook'
|
||||
let section_name = format!("{AUDIT_ROUTE_PREFIX}{target_type}").to_lowercase();
|
||||
let file_configs = config.0.get(§ion_name).cloned().unwrap_or_default();
|
||||
// 2.2. Get the default configuration for that type
|
||||
let default_cfg = file_configs.get(DEFAULT_DELIMITER).cloned().unwrap_or_default();
|
||||
debug!(?default_cfg, "Retrieved default configuration");
|
||||
debug!(?default_cfg, "Get the default configuration");
|
||||
|
||||
// Save defaults for eventual write back
|
||||
section_defaults.insert(section_name.clone(), default_cfg.clone());
|
||||
|
||||
// Get valid fields for the target type
|
||||
let valid_fields = match target_type {
|
||||
"webhook" => get_webhook_valid_fields(),
|
||||
"mqtt" => get_mqtt_valid_fields(),
|
||||
_ => {
|
||||
warn!(target_type = %target_type, "Unknown target type, skipping");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
debug!(?valid_fields, "Retrieved valid configuration fields");
|
||||
// *** Optimization point 1: Get all legitimate fields of the current target type ***
|
||||
let valid_fields = factory.get_valid_fields();
|
||||
debug!(?valid_fields, "Get the legitimate configuration fields");
|
||||
|
||||
// 3. Resolve instance IDs and configuration overrides from environment variables
|
||||
let mut instance_ids_from_env = HashSet::new();
|
||||
let mut env_overrides: HashMap<String, HashMap<String, String>> = HashMap::new();
|
||||
|
||||
for (env_key, env_value) in &all_env {
|
||||
let audit_prefix = format!("{ENV_PREFIX}{AUDIT_ROUTE_PREFIX}{target_type}").to_uppercase();
|
||||
if !env_key.starts_with(&audit_prefix) {
|
||||
continue;
|
||||
}
|
||||
|
||||
let suffix = &env_key[audit_prefix.len()..];
|
||||
if suffix.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Parse field and instance from suffix (FIELD_INSTANCE or FIELD)
|
||||
let (field_name, instance_id) = if let Some(last_underscore) = suffix.rfind('_') {
|
||||
let potential_field = &suffix[1..last_underscore]; // Skip leading _
|
||||
let potential_instance = &suffix[last_underscore + 1..];
|
||||
|
||||
// Check if the part before the last underscore is a valid field
|
||||
if valid_fields.contains(&potential_field.to_lowercase()) {
|
||||
(potential_field.to_lowercase(), potential_instance.to_lowercase())
|
||||
} else {
|
||||
// Treat the entire suffix as field name with default instance
|
||||
(suffix[1..].to_lowercase(), DEFAULT_DELIMITER.to_string())
|
||||
// 3.1. Instance discovery: Based on the '..._ENABLE_INSTANCEID' format
|
||||
let enable_prefix =
|
||||
format!("{ENV_PREFIX}{AUDIT_ROUTE_PREFIX}{target_type}{DEFAULT_DELIMITER}{ENABLE_KEY}{DEFAULT_DELIMITER}")
|
||||
.to_uppercase();
|
||||
for (key, value) in &all_env {
|
||||
if EnableState::from_str(value).ok().map(|s| s.is_enabled()).unwrap_or(false) {
|
||||
if let Some(id) = key.strip_prefix(&enable_prefix) {
|
||||
if !id.is_empty() {
|
||||
instance_ids_from_env.insert(id.to_lowercase());
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// No underscore, treat as field with default instance
|
||||
(suffix[1..].to_lowercase(), DEFAULT_DELIMITER.to_string())
|
||||
};
|
||||
|
||||
if valid_fields.contains(&field_name) {
|
||||
if instance_id != DEFAULT_DELIMITER {
|
||||
instance_ids_from_env.insert(instance_id.clone());
|
||||
}
|
||||
env_overrides
|
||||
.entry(instance_id)
|
||||
.or_default()
|
||||
.insert(field_name, env_value.clone());
|
||||
} else {
|
||||
debug!(
|
||||
env_key = %env_key,
|
||||
field_name = %field_name,
|
||||
"Ignoring environment variable field not found in valid fields for target type {}",
|
||||
target_type
|
||||
);
|
||||
}
|
||||
}
|
||||
debug!(?env_overrides, "Completed environment variable analysis");
|
||||
|
||||
// 3.2. Parse all relevant environment variable configurations
|
||||
// 3.2.1. Build environment variable prefixes such as 'RUSTFS_AUDIT_WEBHOOK_'
|
||||
let env_prefix = format!("{ENV_PREFIX}{AUDIT_ROUTE_PREFIX}{target_type}{DEFAULT_DELIMITER}").to_uppercase();
|
||||
// 3.2.2. 'env_overrides' is used to store configurations parsed from environment variables in the format: {instance id -> {field -> value}}
|
||||
let mut env_overrides: HashMap<String, HashMap<String, String>> = HashMap::new();
|
||||
for (key, value) in &all_env {
|
||||
if let Some(rest) = key.strip_prefix(&env_prefix) {
|
||||
// Use rsplitn to split from the right side to properly extract the INSTANCE_ID at the end
|
||||
// Format: <FIELD_NAME>_<INSTANCE_ID> or <FIELD_NAME>
|
||||
let mut parts = rest.rsplitn(2, DEFAULT_DELIMITER);
|
||||
|
||||
// The first part from the right is INSTANCE_ID
|
||||
let instance_id_part = parts.next().unwrap_or(DEFAULT_DELIMITER);
|
||||
// The remaining part is FIELD_NAME
|
||||
let field_name_part = parts.next();
|
||||
|
||||
let (field_name, instance_id) = match field_name_part {
|
||||
// Case 1: The format is <FIELD_NAME>_<INSTANCE_ID>
|
||||
// e.g., rest = "ENDPOINT_PRIMARY" -> field_name="ENDPOINT", instance_id="PRIMARY"
|
||||
Some(field) => (field.to_lowercase(), instance_id_part.to_lowercase()),
|
||||
// Case 2: The format is <FIELD_NAME> (without INSTANCE_ID)
|
||||
// e.g., rest = "ENABLE" -> field_name="ENABLE", instance_id="" (Universal configuration `_ DEFAULT_DELIMITER`)
|
||||
None => (instance_id_part.to_lowercase(), DEFAULT_DELIMITER.to_string()),
|
||||
};
|
||||
|
||||
// *** Optimization point 2: Verify whether the parsed field_name is legal ***
|
||||
if !field_name.is_empty() && valid_fields.contains(&field_name) {
|
||||
debug!(
|
||||
instance_id = %if instance_id.is_empty() { DEFAULT_DELIMITER } else { &instance_id },
|
||||
%field_name,
|
||||
%value,
|
||||
"Parsing to environment variables"
|
||||
);
|
||||
env_overrides
|
||||
.entry(instance_id)
|
||||
.or_default()
|
||||
.insert(field_name, value.clone());
|
||||
} else {
|
||||
// Ignore illegal field names
|
||||
warn!(
|
||||
field_name = %field_name,
|
||||
"Ignore environment variable fields, not found in the list of valid fields for target type {}",
|
||||
target_type
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
debug!(?env_overrides, "Complete the environment variable analysis");
|
||||
|
||||
// 4. Determine all instance IDs that need to be processed
|
||||
let mut all_instance_ids: HashSet<String> =
|
||||
file_configs.keys().filter(|k| *k != DEFAULT_DELIMITER).cloned().collect();
|
||||
all_instance_ids.extend(instance_ids_from_env);
|
||||
debug!(?all_instance_ids, "Determined all instance IDs");
|
||||
debug!(?all_instance_ids, "Determine all instance IDs");
|
||||
|
||||
// 5. Merge configurations and create tasks for each instance
|
||||
for id in all_instance_ids {
|
||||
// 5.1. Merge configuration, priority: Environment variables > File instance > File default
|
||||
// 5.1. Merge configuration, priority: Environment variables > File instance configuration > File default configuration
|
||||
let mut merged_config = default_cfg.clone();
|
||||
|
||||
// Apply file instance configuration if available
|
||||
// Instance-specific configuration in application files
|
||||
if let Some(file_instance_cfg) = file_configs.get(&id) {
|
||||
merged_config.extend(file_instance_cfg.clone());
|
||||
}
|
||||
|
||||
// Apply environment variable overrides
|
||||
// Application instance-specific environment variable configuration
|
||||
if let Some(env_instance_cfg) = env_overrides.get(&id) {
|
||||
// Convert HashMap<String, String> to KVS
|
||||
let mut kvs_from_env = KVS::new();
|
||||
for (k, v) in env_instance_cfg {
|
||||
kvs_from_env.insert(k.clone(), v.clone());
|
||||
}
|
||||
merged_config.extend(kvs_from_env);
|
||||
}
|
||||
debug!(instance_id = %id, ?merged_config, "Completed configuration merge");
|
||||
debug!(instance_id = %id, ?merged_config, "Complete configuration merge");
|
||||
|
||||
// 5.2. Check if the instance is enabled
|
||||
let enabled = merged_config
|
||||
.lookup(ENABLE_KEY)
|
||||
.map(|v| parse_enable_value(&v))
|
||||
.map(|v| {
|
||||
EnableState::from_str(v.as_str())
|
||||
.ok()
|
||||
.map(|s| s.is_enabled())
|
||||
.unwrap_or(false)
|
||||
})
|
||||
.unwrap_or(false);
|
||||
|
||||
if enabled {
|
||||
info!(instance_id = %id, "Creating audit target");
|
||||
|
||||
// Create task for concurrent execution
|
||||
let target_type_clone = target_type.to_string();
|
||||
let id_clone = id.clone();
|
||||
let merged_config_arc = Arc::new(merged_config.clone());
|
||||
let task = tokio::spawn(async move {
|
||||
let result = create_audit_target(&target_type_clone, &id_clone, &merged_config_arc).await;
|
||||
(target_type_clone, id_clone, result, merged_config_arc)
|
||||
info!(instance_id = %id, "Target is enabled, ready to create a task");
|
||||
// 5.3. Create asynchronous tasks for enabled instances
|
||||
let target_type_clone = target_type.clone();
|
||||
let tid = id.clone();
|
||||
let merged_config_arc = Arc::new(merged_config);
|
||||
tasks.push(async move {
|
||||
let result = factory.create_target(tid.clone(), &merged_config_arc).await;
|
||||
(target_type_clone, tid, result, Arc::clone(&merged_config_arc))
|
||||
});
|
||||
|
||||
tasks.push(task);
|
||||
|
||||
// Update final config with successful instance
|
||||
// final_config.0.entry(section_name.clone()).or_default().insert(id, merged_config);
|
||||
} else {
|
||||
info!(instance_id = %id, "Skipping disabled audit target, will be removed from final configuration");
|
||||
info!(instance_id = %id, "Skip the disabled target and will be removed from the final configuration");
|
||||
// Remove disabled target from final configuration
|
||||
// final_config.0.entry(section_name.clone()).or_default().remove(&id);
|
||||
}
|
||||
@@ -211,30 +253,28 @@ impl AuditRegistry {
|
||||
// 6. Concurrently execute all creation tasks and collect results
|
||||
let mut successful_targets = Vec::new();
|
||||
let mut successful_configs = Vec::new();
|
||||
while let Some(task_result) = tasks.next().await {
|
||||
match task_result {
|
||||
Ok((target_type, id, result, kvs_arc)) => match result {
|
||||
Ok(target) => {
|
||||
info!(target_type = %target_type, instance_id = %id, "Created audit target successfully");
|
||||
successful_targets.push(target);
|
||||
successful_configs.push((target_type, id, kvs_arc));
|
||||
}
|
||||
Err(e) => {
|
||||
error!(target_type = %target_type, instance_id = %id, error = %e, "Failed to create audit target");
|
||||
}
|
||||
},
|
||||
while let Some((target_type, id, result, final_config)) = tasks.next().await {
|
||||
match result {
|
||||
Ok(target) => {
|
||||
info!(target_type = %target_type, instance_id = %id, "Create a target successfully");
|
||||
successful_targets.push(target);
|
||||
successful_configs.push((target_type, id, final_config));
|
||||
}
|
||||
Err(e) => {
|
||||
error!(error = %e, "Task execution failed");
|
||||
error!(target_type = %target_type, instance_id = %id, error = %e, "Failed to create a target");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Rebuild in pieces based on "default items + successful instances" and overwrite writeback to ensure that deleted/disabled instances will not be "resurrected"
|
||||
// 7. Aggregate new configuration and write back to system configuration
|
||||
if !successful_configs.is_empty() || !section_defaults.is_empty() {
|
||||
info!("Prepare to rebuild and save target configurations to the system configuration...");
|
||||
info!(
|
||||
"Prepare to update {} successfully created target configurations to the system configuration...",
|
||||
successful_configs.len()
|
||||
);
|
||||
|
||||
// Aggregate successful instances into segments
|
||||
let mut successes_by_section: HashMap<String, HashMap<String, KVS>> = HashMap::new();
|
||||
|
||||
for (target_type, id, kvs) in successful_configs {
|
||||
let section_name = format!("{AUDIT_ROUTE_PREFIX}{target_type}").to_lowercase();
|
||||
successes_by_section
|
||||
@@ -244,76 +284,99 @@ impl AuditRegistry {
|
||||
}
|
||||
|
||||
let mut new_config = config.clone();
|
||||
|
||||
// Collection of segments that need to be processed: Collect all segments where default items exist or where successful instances exist
|
||||
let mut sections: HashSet<String> = HashSet::new();
|
||||
sections.extend(section_defaults.keys().cloned());
|
||||
sections.extend(successes_by_section.keys().cloned());
|
||||
|
||||
for section_name in sections {
|
||||
for section in sections {
|
||||
let mut section_map: std::collections::HashMap<String, KVS> = std::collections::HashMap::new();
|
||||
|
||||
// The default entry (if present) is written back to `_`
|
||||
if let Some(default_cfg) = section_defaults.get(§ion_name) {
|
||||
if !default_cfg.is_empty() {
|
||||
section_map.insert(DEFAULT_DELIMITER.to_string(), default_cfg.clone());
|
||||
// Add default item
|
||||
if let Some(default_kvs) = section_defaults.get(§ion) {
|
||||
if !default_kvs.is_empty() {
|
||||
section_map.insert(DEFAULT_DELIMITER.to_string(), default_kvs.clone());
|
||||
}
|
||||
}
|
||||
|
||||
// Successful instance write back
|
||||
if let Some(instances) = successes_by_section.get(§ion_name) {
|
||||
// Add successful instance item
|
||||
if let Some(instances) = successes_by_section.get(§ion) {
|
||||
for (id, kvs) in instances {
|
||||
section_map.insert(id.clone(), kvs.clone());
|
||||
}
|
||||
}
|
||||
|
||||
// Empty segments are removed and non-empty segments are replaced as a whole.
|
||||
// Empty breaks are removed and non-empty breaks are replaced entirely.
|
||||
if section_map.is_empty() {
|
||||
new_config.0.remove(§ion_name);
|
||||
new_config.0.remove(§ion);
|
||||
} else {
|
||||
new_config.0.insert(section_name, section_map);
|
||||
new_config.0.insert(section, section_map);
|
||||
}
|
||||
}
|
||||
|
||||
// 7. Save the new configuration to the system
|
||||
let Some(store) = rustfs_ecstore::new_object_layer_fn() else {
|
||||
let Some(store) = rustfs_ecstore::global::new_object_layer_fn() else {
|
||||
return Err(AuditError::StorageNotAvailable(
|
||||
"Failed to save target configuration: server storage not initialized".to_string(),
|
||||
));
|
||||
};
|
||||
|
||||
match rustfs_ecstore::config::com::save_server_config(store, &new_config).await {
|
||||
Ok(_) => info!("New audit configuration saved to system successfully"),
|
||||
Ok(_) => {
|
||||
info!("The new configuration was saved to the system successfully.")
|
||||
}
|
||||
Err(e) => {
|
||||
error!(error = %e, "Failed to save new audit configuration");
|
||||
error!("Failed to save the new configuration: {}", e);
|
||||
return Err(AuditError::SaveConfig(Box::new(e)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!(count = successful_targets.len(), "All target processing completed");
|
||||
Ok(successful_targets)
|
||||
}
|
||||
|
||||
/// Adds a target to the registry
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `id` - The identifier for the target.
|
||||
/// * `target` - The target instance to be added.
|
||||
pub fn add_target(&mut self, id: String, target: Box<dyn Target<AuditEntry> + Send + Sync>) {
|
||||
self.targets.insert(id, target);
|
||||
}
|
||||
|
||||
/// Removes a target from the registry
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `id` - The identifier for the target to be removed.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Option<Box<dyn Target<AuditEntry> + Send + Sync>>` - The removed target if it existed.
|
||||
pub fn remove_target(&mut self, id: &str) -> Option<Box<dyn Target<AuditEntry> + Send + Sync>> {
|
||||
self.targets.remove(id)
|
||||
}
|
||||
|
||||
/// Gets a target from the registry
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `id` - The identifier for the target to be retrieved.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Option<&(dyn Target<AuditEntry> + Send + Sync)>` - The target if it exists.
|
||||
pub fn get_target(&self, id: &str) -> Option<&(dyn Target<AuditEntry> + Send + Sync)> {
|
||||
self.targets.get(id).map(|t| t.as_ref())
|
||||
}
|
||||
|
||||
/// Lists all target IDs
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Vec<String>` - A vector of all target IDs in the registry.
|
||||
pub fn list_targets(&self) -> Vec<String> {
|
||||
self.targets.keys().cloned().collect()
|
||||
}
|
||||
|
||||
/// Closes all targets and clears the registry
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditResult<()>` - Result indicating success or failure.
|
||||
pub async fn close_all(&mut self) -> AuditResult<()> {
|
||||
let mut errors = Vec::new();
|
||||
|
||||
@@ -331,152 +394,3 @@ impl AuditRegistry {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Creates an audit target based on type and configuration
|
||||
async fn create_audit_target(
|
||||
target_type: &str,
|
||||
id: &str,
|
||||
config: &KVS,
|
||||
) -> Result<Box<dyn Target<AuditEntry> + Send + Sync>, TargetError> {
|
||||
match target_type {
|
||||
val if val == ChannelTargetType::Webhook.as_str() => {
|
||||
let args = parse_webhook_args(id, config)?;
|
||||
let target = rustfs_targets::target::webhook::WebhookTarget::new(id.to_string(), args)?;
|
||||
Ok(Box::new(target))
|
||||
}
|
||||
val if val == ChannelTargetType::Mqtt.as_str() => {
|
||||
let args = parse_mqtt_args(id, config)?;
|
||||
let target = rustfs_targets::target::mqtt::MQTTTarget::new(id.to_string(), args)?;
|
||||
Ok(Box::new(target))
|
||||
}
|
||||
_ => Err(TargetError::Configuration(format!("Unknown target type: {target_type}"))),
|
||||
}
|
||||
}
|
||||
|
||||
/// Gets valid field names for webhook configuration
|
||||
fn get_webhook_valid_fields() -> HashSet<String> {
|
||||
vec![
|
||||
ENABLE_KEY.to_string(),
|
||||
WEBHOOK_ENDPOINT.to_string(),
|
||||
WEBHOOK_AUTH_TOKEN.to_string(),
|
||||
WEBHOOK_CLIENT_CERT.to_string(),
|
||||
WEBHOOK_CLIENT_KEY.to_string(),
|
||||
WEBHOOK_BATCH_SIZE.to_string(),
|
||||
WEBHOOK_QUEUE_LIMIT.to_string(),
|
||||
WEBHOOK_QUEUE_DIR.to_string(),
|
||||
WEBHOOK_MAX_RETRY.to_string(),
|
||||
WEBHOOK_RETRY_INTERVAL.to_string(),
|
||||
WEBHOOK_HTTP_TIMEOUT.to_string(),
|
||||
]
|
||||
.into_iter()
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Gets valid field names for MQTT configuration
|
||||
fn get_mqtt_valid_fields() -> HashSet<String> {
|
||||
vec![
|
||||
ENABLE_KEY.to_string(),
|
||||
MQTT_BROKER.to_string(),
|
||||
MQTT_TOPIC.to_string(),
|
||||
MQTT_USERNAME.to_string(),
|
||||
MQTT_PASSWORD.to_string(),
|
||||
MQTT_QOS.to_string(),
|
||||
MQTT_KEEP_ALIVE_INTERVAL.to_string(),
|
||||
MQTT_RECONNECT_INTERVAL.to_string(),
|
||||
MQTT_QUEUE_DIR.to_string(),
|
||||
MQTT_QUEUE_LIMIT.to_string(),
|
||||
]
|
||||
.into_iter()
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Parses webhook arguments from KVS configuration
|
||||
fn parse_webhook_args(_id: &str, config: &KVS) -> Result<WebhookArgs, TargetError> {
|
||||
let endpoint = config
|
||||
.lookup(WEBHOOK_ENDPOINT)
|
||||
.filter(|s| !s.is_empty())
|
||||
.ok_or_else(|| TargetError::Configuration("webhook endpoint is required".to_string()))?;
|
||||
|
||||
let endpoint_url =
|
||||
Url::parse(&endpoint).map_err(|e| TargetError::Configuration(format!("invalid webhook endpoint URL: {e}")))?;
|
||||
|
||||
let args = WebhookArgs {
|
||||
enable: true, // Already validated as enabled
|
||||
endpoint: endpoint_url,
|
||||
auth_token: config.lookup(WEBHOOK_AUTH_TOKEN).unwrap_or_default(),
|
||||
queue_dir: config.lookup(WEBHOOK_QUEUE_DIR).unwrap_or_default(),
|
||||
queue_limit: config
|
||||
.lookup(WEBHOOK_QUEUE_LIMIT)
|
||||
.and_then(|s| s.parse().ok())
|
||||
.unwrap_or(100000),
|
||||
client_cert: config.lookup(WEBHOOK_CLIENT_CERT).unwrap_or_default(),
|
||||
client_key: config.lookup(WEBHOOK_CLIENT_KEY).unwrap_or_default(),
|
||||
target_type: TargetType::AuditLog,
|
||||
};
|
||||
|
||||
args.validate()?;
|
||||
Ok(args)
|
||||
}
|
||||
|
||||
/// Parses MQTT arguments from KVS configuration
|
||||
fn parse_mqtt_args(_id: &str, config: &KVS) -> Result<MQTTArgs, TargetError> {
|
||||
let broker = config
|
||||
.lookup(MQTT_BROKER)
|
||||
.filter(|s| !s.is_empty())
|
||||
.ok_or_else(|| TargetError::Configuration("MQTT broker is required".to_string()))?;
|
||||
|
||||
let broker_url = Url::parse(&broker).map_err(|e| TargetError::Configuration(format!("invalid MQTT broker URL: {e}")))?;
|
||||
|
||||
let topic = config
|
||||
.lookup(MQTT_TOPIC)
|
||||
.filter(|s| !s.is_empty())
|
||||
.ok_or_else(|| TargetError::Configuration("MQTT topic is required".to_string()))?;
|
||||
|
||||
let qos = config
|
||||
.lookup(MQTT_QOS)
|
||||
.and_then(|s| s.parse::<u8>().ok())
|
||||
.and_then(|q| match q {
|
||||
0 => Some(rumqttc::QoS::AtMostOnce),
|
||||
1 => Some(rumqttc::QoS::AtLeastOnce),
|
||||
2 => Some(rumqttc::QoS::ExactlyOnce),
|
||||
_ => None,
|
||||
})
|
||||
.unwrap_or(rumqttc::QoS::AtLeastOnce);
|
||||
|
||||
let args = MQTTArgs {
|
||||
enable: true, // Already validated as enabled
|
||||
broker: broker_url,
|
||||
topic,
|
||||
qos,
|
||||
username: config.lookup(MQTT_USERNAME).unwrap_or_default(),
|
||||
password: config.lookup(MQTT_PASSWORD).unwrap_or_default(),
|
||||
max_reconnect_interval: parse_duration(&config.lookup(MQTT_RECONNECT_INTERVAL).unwrap_or_else(|| "5s".to_string()))
|
||||
.unwrap_or(Duration::from_secs(5)),
|
||||
keep_alive: parse_duration(&config.lookup(MQTT_KEEP_ALIVE_INTERVAL).unwrap_or_else(|| "60s".to_string()))
|
||||
.unwrap_or(Duration::from_secs(60)),
|
||||
queue_dir: config.lookup(MQTT_QUEUE_DIR).unwrap_or_default(),
|
||||
queue_limit: config.lookup(MQTT_QUEUE_LIMIT).and_then(|s| s.parse().ok()).unwrap_or(100000),
|
||||
target_type: TargetType::AuditLog,
|
||||
};
|
||||
|
||||
args.validate()?;
|
||||
Ok(args)
|
||||
}
|
||||
|
||||
/// Parses enable value from string
|
||||
fn parse_enable_value(value: &str) -> bool {
|
||||
matches!(value.to_lowercase().as_str(), "1" | "on" | "true" | "yes")
|
||||
}
|
||||
|
||||
/// Parses duration from string (e.g., "3s", "5m")
|
||||
fn parse_duration(s: &str) -> Option<Duration> {
|
||||
if let Some(stripped) = s.strip_suffix('s') {
|
||||
stripped.parse::<u64>().ok().map(Duration::from_secs)
|
||||
} else if let Some(stripped) = s.strip_suffix('m') {
|
||||
stripped.parse::<u64>().ok().map(|m| Duration::from_secs(m * 60))
|
||||
} else if let Some(stripped) = s.strip_suffix("ms") {
|
||||
stripped.parse::<u64>().ok().map(Duration::from_millis)
|
||||
} else {
|
||||
s.parse::<u64>().ok().map(Duration::from_secs)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,6 +58,12 @@ impl AuditSystem {
|
||||
}
|
||||
|
||||
/// Starts the audit system with the given configuration
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `config` - The configuration to use for starting the audit system
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditResult<()>` - Result indicating success or failure
|
||||
pub async fn start(&self, config: Config) -> AuditResult<()> {
|
||||
let state = self.state.write().await;
|
||||
|
||||
@@ -87,7 +93,7 @@ impl AuditSystem {
|
||||
|
||||
// Create targets from configuration
|
||||
let mut registry = self.registry.lock().await;
|
||||
match registry.create_targets_from_config(&config).await {
|
||||
match registry.create_audit_targets_from_config(&config).await {
|
||||
Ok(targets) => {
|
||||
if targets.is_empty() {
|
||||
info!("No enabled audit targets found, keeping audit system stopped");
|
||||
@@ -143,6 +149,9 @@ impl AuditSystem {
|
||||
}
|
||||
|
||||
/// Pauses the audit system
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditResult<()>` - Result indicating success or failure
|
||||
pub async fn pause(&self) -> AuditResult<()> {
|
||||
let mut state = self.state.write().await;
|
||||
|
||||
@@ -161,6 +170,9 @@ impl AuditSystem {
|
||||
}
|
||||
|
||||
/// Resumes the audit system
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditResult<()>` - Result indicating success or failure
|
||||
pub async fn resume(&self) -> AuditResult<()> {
|
||||
let mut state = self.state.write().await;
|
||||
|
||||
@@ -179,6 +191,9 @@ impl AuditSystem {
|
||||
}
|
||||
|
||||
/// Stops the audit system and closes all targets
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditResult<()>` - Result indicating success or failure
|
||||
pub async fn close(&self) -> AuditResult<()> {
|
||||
let mut state = self.state.write().await;
|
||||
|
||||
@@ -223,11 +238,20 @@ impl AuditSystem {
|
||||
}
|
||||
|
||||
/// Checks if the audit system is running
|
||||
///
|
||||
/// # Returns
|
||||
/// * `bool` - True if running, false otherwise
|
||||
pub async fn is_running(&self) -> bool {
|
||||
matches!(*self.state.read().await, AuditSystemState::Running)
|
||||
}
|
||||
|
||||
/// Dispatches an audit log entry to all active targets
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `entry` - The audit log entry to dispatch
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditResult<()>` - Result indicating success or failure
|
||||
pub async fn dispatch(&self, entry: Arc<AuditEntry>) -> AuditResult<()> {
|
||||
let start_time = std::time::Instant::now();
|
||||
|
||||
@@ -319,6 +343,13 @@ impl AuditSystem {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Dispatches a batch of audit log entries to all active targets
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `entries` - A vector of audit log entries to dispatch
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditResult<()>` - Result indicating success or failure
|
||||
pub async fn dispatch_batch(&self, entries: Vec<Arc<AuditEntry>>) -> AuditResult<()> {
|
||||
let start_time = std::time::Instant::now();
|
||||
|
||||
@@ -386,7 +417,13 @@ impl AuditSystem {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// New: Audit flow background tasks, based on send_from_store, including retries and exponential backoffs
|
||||
/// Starts the audit stream processing for a target with batching and retry logic
|
||||
/// # Arguments
|
||||
/// * `store` - The store from which to read audit entries
|
||||
/// * `target` - The target to which audit entries will be sent
|
||||
///
|
||||
/// This function spawns a background task that continuously reads audit entries from the provided store
|
||||
/// and attempts to send them to the specified target. It implements retry logic with exponential backoff
|
||||
fn start_audit_stream_with_batching(
|
||||
&self,
|
||||
store: Box<dyn Store<EntityTarget<AuditEntry>, Error = StoreError, Key = Key> + Send>,
|
||||
@@ -462,6 +499,12 @@ impl AuditSystem {
|
||||
}
|
||||
|
||||
/// Enables a specific target
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `target_id` - The ID of the target to enable
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditResult<()>` - Result indicating success or failure
|
||||
pub async fn enable_target(&self, target_id: &str) -> AuditResult<()> {
|
||||
// This would require storing enabled/disabled state per target
|
||||
// For now, just check if target exists
|
||||
@@ -475,6 +518,12 @@ impl AuditSystem {
|
||||
}
|
||||
|
||||
/// Disables a specific target
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `target_id` - The ID of the target to disable
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditResult<()>` - Result indicating success or failure
|
||||
pub async fn disable_target(&self, target_id: &str) -> AuditResult<()> {
|
||||
// This would require storing enabled/disabled state per target
|
||||
// For now, just check if target exists
|
||||
@@ -488,6 +537,12 @@ impl AuditSystem {
|
||||
}
|
||||
|
||||
/// Removes a target from the system
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `target_id` - The ID of the target to remove
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditResult<()>` - Result indicating success or failure
|
||||
pub async fn remove_target(&self, target_id: &str) -> AuditResult<()> {
|
||||
let mut registry = self.registry.lock().await;
|
||||
if let Some(target) = registry.remove_target(target_id) {
|
||||
@@ -502,6 +557,13 @@ impl AuditSystem {
|
||||
}
|
||||
|
||||
/// Updates or inserts a target
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `target_id` - The ID of the target to upsert
|
||||
/// * `target` - The target instance to insert or update
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditResult<()>` - Result indicating success or failure
|
||||
pub async fn upsert_target(&self, target_id: String, target: Box<dyn Target<AuditEntry> + Send + Sync>) -> AuditResult<()> {
|
||||
let mut registry = self.registry.lock().await;
|
||||
|
||||
@@ -523,18 +585,33 @@ impl AuditSystem {
|
||||
}
|
||||
|
||||
/// Lists all targets
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Vec<String>` - List of target IDs
|
||||
pub async fn list_targets(&self) -> Vec<String> {
|
||||
let registry = self.registry.lock().await;
|
||||
registry.list_targets()
|
||||
}
|
||||
|
||||
/// Gets information about a specific target
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `target_id` - The ID of the target to retrieve
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Option<String>` - Target ID if found
|
||||
pub async fn get_target(&self, target_id: &str) -> Option<String> {
|
||||
let registry = self.registry.lock().await;
|
||||
registry.get_target(target_id).map(|target| target.id().to_string())
|
||||
}
|
||||
|
||||
/// Reloads configuration and updates targets
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `new_config` - The new configuration to load
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditResult<()>` - Result indicating success or failure
|
||||
pub async fn reload_config(&self, new_config: Config) -> AuditResult<()> {
|
||||
info!("Reloading audit system configuration");
|
||||
|
||||
@@ -554,7 +631,7 @@ impl AuditSystem {
|
||||
}
|
||||
|
||||
// Create new targets from updated configuration
|
||||
match registry.create_targets_from_config(&new_config).await {
|
||||
match registry.create_audit_targets_from_config(&new_config).await {
|
||||
Ok(targets) => {
|
||||
info!(target_count = targets.len(), "Reloaded audit targets successfully");
|
||||
|
||||
@@ -594,16 +671,22 @@ impl AuditSystem {
|
||||
}
|
||||
|
||||
/// Gets current audit system metrics
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditMetricsReport` - Current metrics report
|
||||
pub async fn get_metrics(&self) -> observability::AuditMetricsReport {
|
||||
observability::get_metrics_report().await
|
||||
}
|
||||
|
||||
/// Validates system performance against requirements
|
||||
///
|
||||
/// # Returns
|
||||
/// * `PerformanceValidation` - Performance validation results
|
||||
pub async fn validate_performance(&self) -> observability::PerformanceValidation {
|
||||
observability::validate_performance().await
|
||||
}
|
||||
|
||||
/// Resets all metrics
|
||||
/// Resets all metrics to initial state
|
||||
pub async fn reset_metrics(&self) {
|
||||
observability::reset_metrics().await;
|
||||
}
|
||||
|
||||
@@ -43,11 +43,11 @@ async fn test_config_parsing_webhook() {
|
||||
audit_webhook_section.insert("_".to_string(), default_kvs);
|
||||
config.0.insert("audit_webhook".to_string(), audit_webhook_section);
|
||||
|
||||
let mut registry = AuditRegistry::new();
|
||||
let registry = AuditRegistry::new();
|
||||
|
||||
// This should not fail even if server storage is not initialized
|
||||
// as it's an integration test
|
||||
let result = registry.create_targets_from_config(&config).await;
|
||||
let result = registry.create_audit_targets_from_config(&config).await;
|
||||
|
||||
// We expect this to fail due to server storage not being initialized
|
||||
// but the parsing should work correctly
|
||||
|
||||
@@ -44,7 +44,7 @@ async fn test_audit_system_startup_performance() {
|
||||
#[tokio::test]
|
||||
async fn test_concurrent_target_creation() {
|
||||
// Test that multiple targets can be created concurrently
|
||||
let mut registry = AuditRegistry::new();
|
||||
let registry = AuditRegistry::new();
|
||||
|
||||
// Create config with multiple webhook instances
|
||||
let mut config = rustfs_ecstore::config::Config(std::collections::HashMap::new());
|
||||
@@ -63,7 +63,7 @@ async fn test_concurrent_target_creation() {
|
||||
let start = Instant::now();
|
||||
|
||||
// This will fail due to server storage not being initialized, but we can measure timing
|
||||
let result = registry.create_targets_from_config(&config).await;
|
||||
let result = registry.create_audit_targets_from_config(&config).await;
|
||||
let elapsed = start.elapsed();
|
||||
|
||||
println!("Concurrent target creation took: {elapsed:?}");
|
||||
|
||||
@@ -135,7 +135,7 @@ async fn test_global_audit_functions() {
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_config_parsing_with_multiple_instances() {
|
||||
let mut registry = AuditRegistry::new();
|
||||
let registry = AuditRegistry::new();
|
||||
|
||||
// Create config with multiple webhook instances
|
||||
let mut config = Config(HashMap::new());
|
||||
@@ -164,7 +164,7 @@ async fn test_config_parsing_with_multiple_instances() {
|
||||
config.0.insert("audit_webhook".to_string(), webhook_section);
|
||||
|
||||
// Try to create targets from config
|
||||
let result = registry.create_targets_from_config(&config).await;
|
||||
let result = registry.create_audit_targets_from_config(&config).await;
|
||||
|
||||
// Should fail due to server storage not initialized, but parsing should work
|
||||
match result {
|
||||
|
||||
@@ -39,3 +39,4 @@ path-clean = { workspace = true }
|
||||
rmp-serde = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
s3s = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/rustfs/rustfs/actions/workflows/ci.yml"><img alt="CI" src="https://github.com/rustfs/rustfs/actions/workflows/ci.yml/badge.svg" /></a>
|
||||
<a href="https://docs.rustfs.com/en/">📖 Documentation</a>
|
||||
<a href="https://docs.rustfs.com/">📖 Documentation</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/issues">🐛 Bug Reports</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/discussions">💬 Discussions</a>
|
||||
</p>
|
||||
|
||||
@@ -19,12 +19,42 @@ use std::sync::LazyLock;
|
||||
use tokio::sync::RwLock;
|
||||
use tonic::transport::Channel;
|
||||
|
||||
pub static GLOBAL_Local_Node_Name: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
|
||||
pub static GLOBAL_Rustfs_Host: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
|
||||
pub static GLOBAL_Rustfs_Port: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("9000".to_string()));
|
||||
pub static GLOBAL_Rustfs_Addr: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
|
||||
pub static GLOBAL_Conn_Map: LazyLock<RwLock<HashMap<String, Channel>>> = LazyLock::new(|| RwLock::new(HashMap::new()));
|
||||
pub static GLOBAL_LOCAL_NODE_NAME: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
|
||||
pub static GLOBAL_RUSTFS_HOST: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
|
||||
pub static GLOBAL_RUSTFS_PORT: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("9000".to_string()));
|
||||
pub static GLOBAL_RUSTFS_ADDR: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
|
||||
pub static GLOBAL_CONN_MAP: LazyLock<RwLock<HashMap<String, Channel>>> = LazyLock::new(|| RwLock::new(HashMap::new()));
|
||||
pub static GLOBAL_ROOT_CERT: LazyLock<RwLock<Option<Vec<u8>>>> = LazyLock::new(|| RwLock::new(None));
|
||||
|
||||
pub async fn set_global_addr(addr: &str) {
|
||||
*GLOBAL_Rustfs_Addr.write().await = addr.to_string();
|
||||
*GLOBAL_RUSTFS_ADDR.write().await = addr.to_string();
|
||||
}
|
||||
|
||||
pub async fn set_global_root_cert(cert: Vec<u8>) {
|
||||
*GLOBAL_ROOT_CERT.write().await = Some(cert);
|
||||
}
|
||||
|
||||
/// Evict a stale/dead connection from the global connection cache.
|
||||
/// This is critical for cluster recovery when a node dies unexpectedly (e.g., power-off).
|
||||
/// By removing the cached connection, subsequent requests will establish a fresh connection.
|
||||
pub async fn evict_connection(addr: &str) {
|
||||
let removed = GLOBAL_CONN_MAP.write().await.remove(addr);
|
||||
if removed.is_some() {
|
||||
tracing::warn!("Evicted stale connection from cache: {}", addr);
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if a connection exists in the cache for the given address.
|
||||
pub async fn has_cached_connection(addr: &str) -> bool {
|
||||
GLOBAL_CONN_MAP.read().await.contains_key(addr)
|
||||
}
|
||||
|
||||
/// Clear all cached connections. Useful for full cluster reset/recovery.
|
||||
pub async fn clear_all_connections() {
|
||||
let mut map = GLOBAL_CONN_MAP.write().await;
|
||||
let count = map.len();
|
||||
map.clear();
|
||||
if count > 0 {
|
||||
tracing::warn!("Cleared {} cached connections from global map", count);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -85,12 +85,90 @@ impl Display for DriveState {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Default, Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
|
||||
#[repr(u8)]
|
||||
pub enum HealScanMode {
|
||||
Unknown,
|
||||
Unknown = 0,
|
||||
#[default]
|
||||
Normal,
|
||||
Deep,
|
||||
Normal = 1,
|
||||
Deep = 2,
|
||||
}
|
||||
|
||||
impl Serialize for HealScanMode {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
serializer.serialize_u8(*self as u8)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for HealScanMode {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
struct HealScanModeVisitor;
|
||||
|
||||
impl<'de> serde::de::Visitor<'de> for HealScanModeVisitor {
|
||||
type Value = HealScanMode;
|
||||
|
||||
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
||||
formatter.write_str("an integer between 0 and 2")
|
||||
}
|
||||
|
||||
fn visit_u8<E>(self, value: u8) -> Result<Self::Value, E>
|
||||
where
|
||||
E: serde::de::Error,
|
||||
{
|
||||
match value {
|
||||
0 => Ok(HealScanMode::Unknown),
|
||||
1 => Ok(HealScanMode::Normal),
|
||||
2 => Ok(HealScanMode::Deep),
|
||||
_ => Err(E::custom(format!("invalid HealScanMode value: {value}"))),
|
||||
}
|
||||
}
|
||||
|
||||
fn visit_u64<E>(self, value: u64) -> Result<Self::Value, E>
|
||||
where
|
||||
E: serde::de::Error,
|
||||
{
|
||||
if value > u8::MAX as u64 {
|
||||
return Err(E::custom(format!("HealScanMode value too large: {value}")));
|
||||
}
|
||||
self.visit_u8(value as u8)
|
||||
}
|
||||
|
||||
fn visit_i64<E>(self, value: i64) -> Result<Self::Value, E>
|
||||
where
|
||||
E: serde::de::Error,
|
||||
{
|
||||
if value < 0 || value > u8::MAX as i64 {
|
||||
return Err(E::custom(format!("invalid HealScanMode value: {value}")));
|
||||
}
|
||||
self.visit_u8(value as u8)
|
||||
}
|
||||
|
||||
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
|
||||
where
|
||||
E: serde::de::Error,
|
||||
{
|
||||
// Try parsing as number string first (for URL-encoded values)
|
||||
if let Ok(num) = value.parse::<u8>() {
|
||||
return self.visit_u8(num);
|
||||
}
|
||||
// Try parsing as named string
|
||||
match value {
|
||||
"Unknown" | "unknown" => Ok(HealScanMode::Unknown),
|
||||
"Normal" | "normal" => Ok(HealScanMode::Normal),
|
||||
"Deep" | "deep" => Ok(HealScanMode::Deep),
|
||||
_ => Err(E::custom(format!("invalid HealScanMode string: {value}"))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
deserializer.deserialize_any(HealScanModeVisitor)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Default, Serialize, Deserialize)]
|
||||
@@ -106,7 +184,9 @@ pub struct HealOpts {
|
||||
pub update_parity: bool,
|
||||
#[serde(rename = "nolock")]
|
||||
pub no_lock: bool,
|
||||
#[serde(rename = "pool", default)]
|
||||
pub pool: Option<usize>,
|
||||
#[serde(rename = "set", default)]
|
||||
pub set: Option<usize>,
|
||||
}
|
||||
|
||||
|
||||
@@ -19,6 +19,10 @@ pub mod globals;
|
||||
pub mod heal_channel;
|
||||
pub mod last_minute;
|
||||
pub mod metrics;
|
||||
mod readiness;
|
||||
|
||||
pub use globals::*;
|
||||
pub use readiness::{GlobalReadiness, SystemStage};
|
||||
|
||||
// is ','
|
||||
pub static DEFAULT_DELIMITER: u8 = 44;
|
||||
|
||||
136
crates/common/src/readiness.rs
Normal file
136
crates/common/src/readiness.rs
Normal file
@@ -0,0 +1,136 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::atomic::{AtomicU8, Ordering};
|
||||
|
||||
/// Represents the various stages of system startup
|
||||
#[repr(u8)]
|
||||
pub enum SystemStage {
|
||||
Booting = 0,
|
||||
StorageReady = 1, // Disks online, Quorum met
|
||||
IamReady = 2, // Users and Policies loaded into cache
|
||||
FullReady = 3, // System ready to serve all traffic
|
||||
}
|
||||
|
||||
/// Global readiness tracker for the service
|
||||
/// This struct uses atomic operations to track the readiness status of various components
|
||||
/// of the service in a thread-safe manner.
|
||||
pub struct GlobalReadiness {
|
||||
status: AtomicU8,
|
||||
}
|
||||
|
||||
impl Default for GlobalReadiness {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl GlobalReadiness {
|
||||
/// Create a new GlobalReadiness instance with initial status as Starting
|
||||
/// # Returns
|
||||
/// A new instance of GlobalReadiness
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
status: AtomicU8::new(SystemStage::Booting as u8),
|
||||
}
|
||||
}
|
||||
|
||||
/// Update the system to a new stage
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `step` - The SystemStage step to mark as ready
|
||||
pub fn mark_stage(&self, step: SystemStage) {
|
||||
self.status.fetch_max(step as u8, Ordering::SeqCst);
|
||||
}
|
||||
|
||||
/// Check if the service is fully ready
|
||||
/// # Returns
|
||||
/// `true` if the service is fully ready, `false` otherwise
|
||||
pub fn is_ready(&self) -> bool {
|
||||
self.status.load(Ordering::SeqCst) == SystemStage::FullReady as u8
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::sync::Arc;
|
||||
use std::thread;
|
||||
|
||||
#[test]
|
||||
fn test_initial_state() {
|
||||
let readiness = GlobalReadiness::new();
|
||||
assert!(!readiness.is_ready());
|
||||
assert_eq!(readiness.status.load(Ordering::SeqCst), SystemStage::Booting as u8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mark_stage_progression() {
|
||||
let readiness = GlobalReadiness::new();
|
||||
readiness.mark_stage(SystemStage::StorageReady);
|
||||
assert!(!readiness.is_ready());
|
||||
assert_eq!(readiness.status.load(Ordering::SeqCst), SystemStage::StorageReady as u8);
|
||||
|
||||
readiness.mark_stage(SystemStage::IamReady);
|
||||
assert!(!readiness.is_ready());
|
||||
assert_eq!(readiness.status.load(Ordering::SeqCst), SystemStage::IamReady as u8);
|
||||
|
||||
readiness.mark_stage(SystemStage::FullReady);
|
||||
assert!(readiness.is_ready());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_no_regression() {
|
||||
let readiness = GlobalReadiness::new();
|
||||
readiness.mark_stage(SystemStage::FullReady);
|
||||
readiness.mark_stage(SystemStage::IamReady); // Should not regress
|
||||
assert!(readiness.is_ready());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_concurrent_marking() {
|
||||
let readiness = Arc::new(GlobalReadiness::new());
|
||||
let mut handles = vec![];
|
||||
|
||||
for _ in 0..10 {
|
||||
let r = Arc::clone(&readiness);
|
||||
handles.push(thread::spawn(move || {
|
||||
r.mark_stage(SystemStage::StorageReady);
|
||||
r.mark_stage(SystemStage::IamReady);
|
||||
r.mark_stage(SystemStage::FullReady);
|
||||
}));
|
||||
}
|
||||
|
||||
for h in handles {
|
||||
h.join().unwrap();
|
||||
}
|
||||
|
||||
assert!(readiness.is_ready());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_ready_only_at_full_ready() {
|
||||
let readiness = GlobalReadiness::new();
|
||||
assert!(!readiness.is_ready());
|
||||
|
||||
readiness.mark_stage(SystemStage::StorageReady);
|
||||
assert!(!readiness.is_ready());
|
||||
|
||||
readiness.mark_stage(SystemStage::IamReady);
|
||||
assert!(!readiness.is_ready());
|
||||
|
||||
readiness.mark_stage(SystemStage::FullReady);
|
||||
assert!(readiness.is_ready());
|
||||
}
|
||||
}
|
||||
@@ -8,7 +8,7 @@
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/rustfs/rustfs/actions/workflows/ci.yml"><img alt="CI" src="https://github.com/rustfs/rustfs/actions/workflows/ci.yml/badge.svg" /></a>
|
||||
<a href="https://docs.rustfs.com/en/">📖 Documentation</a>
|
||||
<a href="https://docs.rustfs.com/">📖 Documentation</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/issues">🐛 Bug Reports</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/discussions">💬 Discussions</a>
|
||||
</p>
|
||||
|
||||
@@ -29,7 +29,7 @@ pub const AUDIT_PREFIX: &str = "audit";
|
||||
pub const AUDIT_ROUTE_PREFIX: &str = const_str::concat!(AUDIT_PREFIX, DEFAULT_DELIMITER);
|
||||
|
||||
pub const AUDIT_WEBHOOK_SUB_SYS: &str = "audit_webhook";
|
||||
pub const AUDIT_MQTT_SUB_SYS: &str = "mqtt_webhook";
|
||||
pub const AUDIT_MQTT_SUB_SYS: &str = "audit_mqtt";
|
||||
|
||||
pub const AUDIT_STORE_EXTENSION: &str = ".audit";
|
||||
#[allow(dead_code)]
|
||||
|
||||
@@ -25,7 +25,7 @@ pub const VERSION: &str = "1.0.0";
|
||||
|
||||
/// Default configuration logger level
|
||||
/// Default value: error
|
||||
/// Environment variable: RUSTFS_LOG_LEVEL
|
||||
/// Environment variable: RUSTFS_OBS_LOGGER_LEVEL
|
||||
pub const DEFAULT_LOG_LEVEL: &str = "error";
|
||||
|
||||
/// Default configuration use stdout
|
||||
@@ -89,6 +89,30 @@ pub const RUSTFS_TLS_KEY: &str = "rustfs_key.pem";
|
||||
/// This is the default cert for TLS.
|
||||
pub const RUSTFS_TLS_CERT: &str = "rustfs_cert.pem";
|
||||
|
||||
/// Default public certificate filename for rustfs
|
||||
/// This is the default public certificate filename for rustfs.
|
||||
/// It is used to store the public certificate of the application.
|
||||
/// Default value: public.crt
|
||||
pub const RUSTFS_PUBLIC_CERT: &str = "public.crt";
|
||||
|
||||
/// Default CA certificate filename for rustfs
|
||||
/// This is the default CA certificate filename for rustfs.
|
||||
/// It is used to store the CA certificate of the application.
|
||||
/// Default value: ca.crt
|
||||
pub const RUSTFS_CA_CERT: &str = "ca.crt";
|
||||
|
||||
/// Default HTTP prefix for rustfs
|
||||
/// This is the default HTTP prefix for rustfs.
|
||||
/// It is used to identify HTTP URLs.
|
||||
/// Default value: http://
|
||||
pub const RUSTFS_HTTP_PREFIX: &str = "http://";
|
||||
|
||||
/// Default HTTPS prefix for rustfs
|
||||
/// This is the default HTTPS prefix for rustfs.
|
||||
/// It is used to identify HTTPS URLs.
|
||||
/// Default value: https://
|
||||
pub const RUSTFS_HTTPS_PREFIX: &str = "https://";
|
||||
|
||||
/// Default port for rustfs
|
||||
/// This is the default port for rustfs.
|
||||
/// This is used to bind the server to a specific port.
|
||||
|
||||
56
crates/config/src/constants/body_limits.rs
Normal file
56
crates/config/src/constants/body_limits.rs
Normal file
@@ -0,0 +1,56 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Request body size limits for admin API endpoints
|
||||
//!
|
||||
//! These limits prevent DoS attacks through unbounded memory allocation
|
||||
//! while allowing legitimate use cases.
|
||||
|
||||
/// Maximum size for standard admin API request bodies (1 MB)
|
||||
/// Used for: user creation/update, policies, tier config, KMS config, events, groups, service accounts
|
||||
/// Rationale: Admin API payloads are typically JSON/XML configs under 100KB.
|
||||
/// AWS IAM policy limit is 6KB-10KB. 1MB provides generous headroom.
|
||||
pub const MAX_ADMIN_REQUEST_BODY_SIZE: usize = 1024 * 1024; // 1 MB
|
||||
|
||||
/// Maximum size for IAM import/export operations (10 MB)
|
||||
/// Used for: IAM entity imports/exports containing multiple users, policies, groups
|
||||
/// Rationale: ZIP archives with hundreds of IAM entities. 10MB allows ~10,000 small configs.
|
||||
pub const MAX_IAM_IMPORT_SIZE: usize = 10 * 1024 * 1024; // 10 MB
|
||||
|
||||
/// Maximum size for bucket metadata import operations (100 MB)
|
||||
/// Used for: Bucket metadata import containing configurations for many buckets
|
||||
/// Rationale: Large deployments may have thousands of buckets with various configs.
|
||||
/// 100MB allows importing metadata for ~10,000 buckets with reasonable configs.
|
||||
pub const MAX_BUCKET_METADATA_IMPORT_SIZE: usize = 100 * 1024 * 1024; // 100 MB
|
||||
|
||||
/// Maximum size for healing operation requests (1 MB)
|
||||
/// Used for: Healing parameters and configuration
|
||||
/// Rationale: Healing requests contain bucket/object paths and options. Should be small.
|
||||
pub const MAX_HEAL_REQUEST_SIZE: usize = 1024 * 1024; // 1 MB
|
||||
|
||||
/// Maximum size for S3 client response bodies (10 MB)
|
||||
/// Used for: Reading responses from remote S3-compatible services (ACL, attributes, lists)
|
||||
/// Rationale: Responses from external services should be bounded.
|
||||
/// Large responses (>10MB) indicate misconfiguration or potential attack.
|
||||
/// Typical responses: ACL XML < 10KB, List responses < 1MB
|
||||
///
|
||||
/// Rationale: Responses from external S3-compatible services should be bounded.
|
||||
/// - ACL XML responses: typically < 10KB
|
||||
/// - Object attributes: typically < 100KB
|
||||
/// - List responses: typically < 1MB (1000 objects with metadata)
|
||||
/// - Location/error responses: typically < 10KB
|
||||
///
|
||||
/// 10MB provides generous headroom for legitimate responses while preventing
|
||||
/// memory exhaustion from malicious or misconfigured remote services.
|
||||
pub const MAX_S3_CLIENT_RESPONSE_SIZE: usize = 10 * 1024 * 1024; // 10 MB
|
||||
61
crates/config/src/constants/compress.rs
Normal file
61
crates/config/src/constants/compress.rs
Normal file
@@ -0,0 +1,61 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! HTTP Response Compression Configuration
|
||||
//!
|
||||
//! This module provides configuration options for HTTP response compression.
|
||||
//! By default, compression is disabled (aligned with MinIO behavior).
|
||||
//! When enabled via `RUSTFS_COMPRESS_ENABLE=on`, compression can be configured
|
||||
//! to apply only to specific file extensions, MIME types, and minimum file sizes.
|
||||
|
||||
/// Environment variable to enable/disable HTTP response compression
|
||||
/// Default: off (disabled)
|
||||
/// Values: on, off, true, false, yes, no, 1, 0
|
||||
/// Example: RUSTFS_COMPRESS_ENABLE=on
|
||||
pub const ENV_COMPRESS_ENABLE: &str = "RUSTFS_COMPRESS_ENABLE";
|
||||
|
||||
/// Default compression enable state
|
||||
/// Aligned with MinIO behavior - compression is disabled by default
|
||||
pub const DEFAULT_COMPRESS_ENABLE: bool = false;
|
||||
|
||||
/// Environment variable for file extensions that should be compressed
|
||||
/// Comma-separated list of file extensions (with or without leading dot)
|
||||
/// Default: "" (empty, meaning use MIME type matching only)
|
||||
/// Example: RUSTFS_COMPRESS_EXTENSIONS=.txt,.log,.csv,.json,.xml,.html,.css,.js
|
||||
pub const ENV_COMPRESS_EXTENSIONS: &str = "RUSTFS_COMPRESS_EXTENSIONS";
|
||||
|
||||
/// Default file extensions for compression
|
||||
/// Empty by default - relies on MIME type matching
|
||||
pub const DEFAULT_COMPRESS_EXTENSIONS: &str = "";
|
||||
|
||||
/// Environment variable for MIME types that should be compressed
|
||||
/// Comma-separated list of MIME types, supports wildcard (*) for subtypes
|
||||
/// Default: "text/*,application/json,application/xml,application/javascript"
|
||||
/// Example: RUSTFS_COMPRESS_MIME_TYPES=text/*,application/json,application/xml
|
||||
pub const ENV_COMPRESS_MIME_TYPES: &str = "RUSTFS_COMPRESS_MIME_TYPES";
|
||||
|
||||
/// Default MIME types for compression
|
||||
/// Includes common text-based content types that benefit from compression
|
||||
pub const DEFAULT_COMPRESS_MIME_TYPES: &str = "text/*,application/json,application/xml,application/javascript";
|
||||
|
||||
/// Environment variable for minimum file size to apply compression
|
||||
/// Files smaller than this size will not be compressed
|
||||
/// Default: 1000 (bytes)
|
||||
/// Example: RUSTFS_COMPRESS_MIN_SIZE=1000
|
||||
pub const ENV_COMPRESS_MIN_SIZE: &str = "RUSTFS_COMPRESS_MIN_SIZE";
|
||||
|
||||
/// Default minimum file size for compression (in bytes)
|
||||
/// Files smaller than 1000 bytes typically don't benefit from compression
|
||||
/// and the compression overhead may outweigh the benefits
|
||||
pub const DEFAULT_COMPRESS_MIN_SIZE: u64 = 1000;
|
||||
@@ -16,7 +16,8 @@ pub const DEFAULT_DELIMITER: &str = "_";
|
||||
pub const ENV_PREFIX: &str = "RUSTFS_";
|
||||
pub const ENV_WORD_DELIMITER: &str = "_";
|
||||
|
||||
pub const DEFAULT_DIR: &str = "/opt/rustfs/events"; // Default directory for event store
|
||||
pub const EVENT_DEFAULT_DIR: &str = "/opt/rustfs/events"; // Default directory for event store
|
||||
pub const AUDIT_DEFAULT_DIR: &str = "/opt/rustfs/audit"; // Default directory for audit store
|
||||
pub const DEFAULT_LIMIT: u64 = 100000; // Default store limit
|
||||
|
||||
/// Standard config keys and values.
|
||||
|
||||
88
crates/config/src/constants/heal.rs
Normal file
88
crates/config/src/constants/heal.rs
Normal file
@@ -0,0 +1,88 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/// Environment variable name that enables or disables auto-heal functionality.
|
||||
/// - Purpose: Control whether the system automatically performs heal operations.
|
||||
/// - Valid values: "true" or "false" (case insensitive).
|
||||
/// - Semantics: When set to "true", auto-heal is enabled and the system will automatically attempt to heal detected issues; when set to "false", auto-heal is disabled and healing must be triggered manually.
|
||||
/// - Example: `export RUSTFS_HEAL_AUTO_HEAL_ENABLE=true`
|
||||
/// - Note: Enabling auto-heal can improve system resilience by automatically addressing issues, but may increase resource usage; evaluate based on your operational requirements.
|
||||
pub const ENV_HEAL_AUTO_HEAL_ENABLE: &str = "RUSTFS_HEAL_AUTO_HEAL_ENABLE";
|
||||
|
||||
/// Environment variable name that specifies the heal queue size.
|
||||
///
|
||||
/// - Purpose: Set the maximum number of heal requests that can be queued.
|
||||
/// - Unit: number of requests (usize).
|
||||
/// - Valid values: any positive integer.
|
||||
/// - Semantics: When the heal queue reaches this size, new heal requests may be rejected or blocked until space is available; tune according to expected heal workload and system capacity.
|
||||
/// - Example: `export RUSTFS_HEAL_QUEUE_SIZE=10000`
|
||||
/// - Note: A larger queue size can accommodate bursts of heal requests but may increase memory usage.
|
||||
pub const ENV_HEAL_QUEUE_SIZE: &str = "RUSTFS_HEAL_QUEUE_SIZE";
|
||||
/// Environment variable name that specifies the heal interval in seconds.
|
||||
/// - Purpose: Define the time interval between successive heal operations.
|
||||
/// - Unit: seconds (u64).
|
||||
/// - Valid values: any positive integer.
|
||||
/// - Semantics: This interval controls how frequently the heal manager checks for and processes heal requests; shorter intervals lead to more responsive healing but may increase system load.
|
||||
/// - Example: `export RUSTFS_HEAL_INTERVAL_SECS=10`
|
||||
/// - Note: Choose an interval that balances healing responsiveness with overall system performance.
|
||||
pub const ENV_HEAL_INTERVAL_SECS: &str = "RUSTFS_HEAL_INTERVAL_SECS";
|
||||
|
||||
/// Environment variable name that specifies the heal task timeout in seconds.
|
||||
/// - Purpose: Set the maximum duration allowed for a heal task to complete.
|
||||
/// - Unit: seconds (u64).
|
||||
/// - Valid values: any positive integer.
|
||||
/// - Semantics: If a heal task exceeds this timeout, it may be aborted or retried; tune according to the expected duration of heal operations and system performance characteristics.
|
||||
/// - Example: `export RUSTFS_HEAL_TASK_TIMEOUT_SECS=300`
|
||||
/// - Note: Setting an appropriate timeout helps prevent long-running heal tasks from impacting system stability.
|
||||
pub const ENV_HEAL_TASK_TIMEOUT_SECS: &str = "RUSTFS_HEAL_TASK_TIMEOUT_SECS";
|
||||
|
||||
/// Environment variable name that specifies the maximum number of concurrent heal operations.
|
||||
/// - Purpose: Limit the number of heal operations that can run simultaneously.
|
||||
/// - Unit: number of operations (usize).
|
||||
/// - Valid values: any positive integer.
|
||||
/// - Semantics: This limit helps control resource usage during healing; tune according to system capacity and expected heal workload.
|
||||
/// - Example: `export RUSTFS_HEAL_MAX_CONCURRENT_HEALS=4`
|
||||
/// - Note: A higher concurrency limit can speed up healing but may lead to resource contention.
|
||||
pub const ENV_HEAL_MAX_CONCURRENT_HEALS: &str = "RUSTFS_HEAL_MAX_CONCURRENT_HEALS";
|
||||
|
||||
/// Default value for enabling authentication for heal operations if not specified in the environment variable.
|
||||
/// - Value: true (authentication enabled).
|
||||
/// - Rationale: Enabling authentication by default enhances security for heal operations.
|
||||
/// - Adjustments: Users may disable this feature via the `RUSTFS_HEAL_AUTO_HEAL_ENABLE` environment variable based on their security requirements.
|
||||
pub const DEFAULT_HEAL_AUTO_HEAL_ENABLE: bool = true;
|
||||
|
||||
/// Default heal queue size if not specified in the environment variable.
|
||||
///
|
||||
/// - Value: 10,000 requests.
|
||||
/// - Rationale: This default size balances the need to handle typical heal workloads without excessive memory consumption.
|
||||
/// - Adjustments: Users may modify this value via the `RUSTFS_HEAL_QUEUE_SIZE` environment variable based on their specific use cases and system capabilities.
|
||||
pub const DEFAULT_HEAL_QUEUE_SIZE: usize = 10_000;
|
||||
|
||||
/// Default heal interval in seconds if not specified in the environment variable.
|
||||
/// - Value: 10 seconds.
|
||||
/// - Rationale: This default interval provides a reasonable balance between healing responsiveness and system load for most deployments.
|
||||
/// - Adjustments: Users may modify this value via the `RUSTFS_HEAL_INTERVAL_SECS` environment variable based on their specific healing requirements and system performance.
|
||||
pub const DEFAULT_HEAL_INTERVAL_SECS: u64 = 10;
|
||||
|
||||
/// Default heal task timeout in seconds if not specified in the environment variable.
|
||||
/// - Value: 300 seconds (5 minutes).
|
||||
/// - Rationale: This default timeout allows sufficient time for most heal operations to complete while preventing excessively long-running tasks.
|
||||
/// - Adjustments: Users may modify this value via the `RUSTFS_HEAL_TASK_TIMEOUT_SECS` environment variable based on their specific heal operation characteristics and system performance.
|
||||
pub const DEFAULT_HEAL_TASK_TIMEOUT_SECS: u64 = 300; // 5 minutes
|
||||
|
||||
/// Default maximum number of concurrent heal operations if not specified in the environment variable.
|
||||
/// - Value: 4 concurrent heal operations.
|
||||
/// - Rationale: This default concurrency limit helps balance healing speed with resource usage, preventing system overload.
|
||||
/// - Adjustments: Users may modify this value via the `RUSTFS_HEAL_MAX_CONCURRENT_HEALS` environment variable based on their system capacity and expected heal workload.
|
||||
pub const DEFAULT_HEAL_MAX_CONCURRENT_HEALS: usize = 4;
|
||||
@@ -13,8 +13,12 @@
|
||||
// limitations under the License.
|
||||
|
||||
pub(crate) mod app;
|
||||
pub(crate) mod body_limits;
|
||||
pub(crate) mod compress;
|
||||
pub(crate) mod console;
|
||||
pub(crate) mod env;
|
||||
pub(crate) mod heal;
|
||||
pub(crate) mod object;
|
||||
pub(crate) mod profiler;
|
||||
pub(crate) mod runtime;
|
||||
pub(crate) mod targets;
|
||||
|
||||
169
crates/config/src/constants/object.rs
Normal file
169
crates/config/src/constants/object.rs
Normal file
@@ -0,0 +1,169 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/// Environment variable name to toggle object-level in-memory caching.
|
||||
///
|
||||
/// - Purpose: Enable or disable the object-level in-memory cache (moka).
|
||||
/// - Acceptable values: `"true"` / `"false"` (case-insensitive) or a boolean typed config.
|
||||
/// - Semantics: When enabled, the system keeps fully-read objects in memory to reduce backend requests; when disabled, reads bypass the object cache.
|
||||
/// - Example: `export RUSTFS_OBJECT_CACHE_ENABLE=true`
|
||||
/// - Note: Evaluate together with `RUSTFS_OBJECT_CACHE_CAPACITY_MB`, TTL/TTI and concurrency thresholds to balance memory usage and throughput.
|
||||
pub const ENV_OBJECT_CACHE_ENABLE: &str = "RUSTFS_OBJECT_CACHE_ENABLE";
|
||||
|
||||
/// Environment variable name that specifies the object cache capacity in megabytes.
|
||||
///
|
||||
/// - Purpose: Set the maximum total capacity of the object cache (in MB).
|
||||
/// - Unit: MB (1 MB = 1_048_576 bytes).
|
||||
/// - Valid values: any positive integer (0 may indicate disabled or alternative handling).
|
||||
/// - Semantics: When the moka cache reaches this capacity, eviction policies will remove entries; tune according to available memory and object size distribution.
|
||||
/// - Example: `export RUSTFS_OBJECT_CACHE_CAPACITY_MB=512`
|
||||
/// - Note: Actual memory usage will be slightly higher due to object headers and indexing overhead.
|
||||
pub const ENV_OBJECT_CACHE_CAPACITY_MB: &str = "RUSTFS_OBJECT_CACHE_CAPACITY_MB";
|
||||
|
||||
/// Environment variable name for maximum object size eligible for caching in megabytes.
|
||||
///
|
||||
/// - Purpose: Define the upper size limit for individual objects to be considered for caching.
|
||||
/// - Unit: MB (1 MB = 1_048_576 bytes).
|
||||
/// - Valid values: any positive integer; objects larger than this size will not be cached.
|
||||
/// - Semantics: Prevents caching of excessively large objects that could monopolize cache capacity; tune based on typical object size distribution.
|
||||
/// - Example: `export RUSTFS_OBJECT_CACHE_MAX_OBJECT_SIZE_MB=50`
|
||||
/// - Note: Setting this too low may reduce cache effectiveness; setting it too high may lead to inefficient memory usage.
|
||||
pub const ENV_OBJECT_CACHE_MAX_OBJECT_SIZE_MB: &str = "RUSTFS_OBJECT_CACHE_MAX_OBJECT_SIZE_MB";
|
||||
|
||||
/// Environment variable name for object cache TTL (time-to-live) in seconds.
|
||||
///
|
||||
/// - Purpose: Specify the maximum lifetime of a cached entry from the moment it is written.
|
||||
/// - Unit: seconds (u64).
|
||||
/// - Semantics: TTL acts as a hard upper bound; entries older than TTL are considered expired and removed by periodic cleanup.
|
||||
/// - Example: `export RUSTFS_OBJECT_CACHE_TTL_SECS=300`
|
||||
/// - Note: TTL and TTI both apply; either policy can cause eviction.
|
||||
pub const ENV_OBJECT_CACHE_TTL_SECS: &str = "RUSTFS_OBJECT_CACHE_TTL_SECS";
|
||||
|
||||
/// Environment variable name for object cache TTI (time-to-idle) in seconds.
|
||||
///
|
||||
/// - Purpose: Specify how long an entry may remain in cache without being accessed before it is evicted.
|
||||
/// - Unit: seconds (u64).
|
||||
/// - Semantics: TTI helps remove one-time or infrequently used entries; frequent accesses reset idle timers but do not extend beyond TTL unless additional logic exists.
|
||||
/// - Example: `export RUSTFS_OBJECT_CACHE_TTI_SECS=120`
|
||||
/// - Note: Works together with TTL to keep the cache populated with actively used objects.
|
||||
pub const ENV_OBJECT_CACHE_TTI_SECS: &str = "RUSTFS_OBJECT_CACHE_TTI_SECS";
|
||||
|
||||
/// Environment variable name for threshold of "hot" object hit count used to extend life.
|
||||
///
|
||||
/// - Purpose: Define a hit-count threshold to mark objects as "hot" so they may be treated preferentially near expiration.
|
||||
/// - Valid values: positive integer (usize).
|
||||
/// - Semantics: Objects reaching this hit count can be considered for relaxed eviction to avoid thrashing hot items.
|
||||
/// - Example: `export RUSTFS_OBJECT_HOT_MIN_HITS_TO_EXTEND=5`
|
||||
/// - Note: This is an optional enhancement and requires cache-layer statistics and extension logic to take effect.
|
||||
pub const ENV_OBJECT_HOT_MIN_HITS_TO_EXTEND: &str = "RUSTFS_OBJECT_HOT_MIN_HITS_TO_EXTEND";
|
||||
|
||||
/// Environment variable name for high concurrency threshold used in adaptive buffering.
|
||||
///
|
||||
/// - Purpose: When concurrent request count exceeds this threshold, the system enters a "high concurrency" optimization mode to reduce per-request buffer sizes.
|
||||
/// - Unit: request count (usize).
|
||||
/// - Semantics: High concurrency mode reduces per-request buffers (e.g., to a fraction of base size) to protect overall memory and fairness.
|
||||
/// - Example: `export RUSTFS_OBJECT_HIGH_CONCURRENCY_THRESHOLD=8`
|
||||
/// - Note: This affects buffering and I/O behavior, not cache capacity directly.
|
||||
pub const ENV_OBJECT_HIGH_CONCURRENCY_THRESHOLD: &str = "RUSTFS_OBJECT_HIGH_CONCURRENCY_THRESHOLD";
|
||||
|
||||
/// Environment variable name for medium concurrency threshold used in adaptive buffering.
|
||||
///
|
||||
/// - Purpose: Define the boundary for "medium concurrency" where more moderate buffer adjustments apply.
|
||||
/// - Unit: request count (usize).
|
||||
/// - Semantics: In the medium range, buffers are reduced moderately to balance throughput and memory efficiency.
|
||||
/// - Example: `export RUSTFS_OBJECT_MEDIUM_CONCURRENCY_THRESHOLD=4`
|
||||
/// - Note: Tune this value based on target workload and hardware.
|
||||
pub const ENV_OBJECT_MEDIUM_CONCURRENCY_THRESHOLD: &str = "RUSTFS_OBJECT_MEDIUM_CONCURRENCY_THRESHOLD";
|
||||
|
||||
/// Environment variable name for maximum concurrent disk reads for object operations.
|
||||
/// - Purpose: Limit the number of concurrent disk read operations for object reads to prevent I/O saturation.
|
||||
/// - Unit: request count (usize).
|
||||
/// - Semantics: Throttling disk reads helps maintain overall system responsiveness under load.
|
||||
/// - Example: `export RUSTFS_OBJECT_MAX_CONCURRENT_DISK_READS=16`
|
||||
/// - Note: This setting may interact with OS-level I/O scheduling and should be tuned based on hardware capabilities.
|
||||
pub const ENV_OBJECT_MAX_CONCURRENT_DISK_READS: &str = "RUSTFS_OBJECT_MAX_CONCURRENT_DISK_READS";
|
||||
|
||||
/// Default: object caching is disabled.
|
||||
///
|
||||
/// - Semantics: Safe default to avoid unexpected memory usage or cache consistency concerns when not explicitly enabled.
|
||||
/// - Default is set to false (disabled).
|
||||
pub const DEFAULT_OBJECT_CACHE_ENABLE: bool = false;
|
||||
|
||||
/// Default object cache capacity in MB.
|
||||
///
|
||||
/// - Default: 100 MB (can be overridden by `RUSTFS_OBJECT_CACHE_CAPACITY_MB`).
|
||||
/// - Note: Choose a conservative default to reduce memory pressure in development/testing.
|
||||
pub const DEFAULT_OBJECT_CACHE_CAPACITY_MB: u64 = 100;
|
||||
|
||||
/// Default maximum object size eligible for caching in MB.
|
||||
///
|
||||
/// - Default: 10 MB (can be overridden by `RUSTFS_OBJECT_CACHE_MAX_OBJECT_SIZE_MB`).
|
||||
/// - Note: Balances caching effectiveness with memory usage.
|
||||
pub const DEFAULT_OBJECT_CACHE_MAX_OBJECT_SIZE_MB: usize = 10;
|
||||
|
||||
/// Maximum concurrent requests before applying aggressive optimization.
|
||||
///
|
||||
/// When concurrent requests exceed this threshold (>8), the system switches to
|
||||
/// aggressive memory optimization mode, reducing buffer sizes to 40% of base size
|
||||
/// to prevent memory exhaustion and ensure fair resource allocation.
|
||||
///
|
||||
/// This helps maintain system stability under high load conditions.
|
||||
/// Default is set to 8 concurrent requests.
|
||||
pub const DEFAULT_OBJECT_HIGH_CONCURRENCY_THRESHOLD: usize = 8;
|
||||
|
||||
/// Medium concurrency threshold for buffer size adjustment.
|
||||
///
|
||||
/// At this level (3-4 requests), buffers are reduced to 75% of base size to
|
||||
/// balance throughput and memory efficiency as load increases.
|
||||
///
|
||||
/// This helps maintain performance without overly aggressive memory reduction.
|
||||
///
|
||||
/// Default is set to 4 concurrent requests.
|
||||
pub const DEFAULT_OBJECT_MEDIUM_CONCURRENCY_THRESHOLD: usize = 4;
|
||||
|
||||
/// Maximum concurrent disk reads for object operations.
|
||||
/// Limits the number of simultaneous disk read operations to prevent I/O saturation.
|
||||
///
|
||||
/// A higher value may improve throughput on high-performance storage,
|
||||
/// but could also lead to increased latency if the disk becomes overloaded.
|
||||
///
|
||||
/// Default is set to 64 concurrent reads.
|
||||
pub const DEFAULT_OBJECT_MAX_CONCURRENT_DISK_READS: usize = 64;
|
||||
|
||||
/// Time-to-live for cached objects (5 minutes = 300 seconds).
|
||||
///
|
||||
/// After this duration, cached objects are automatically expired by Moka's
|
||||
/// background cleanup process, even if they haven't been accessed. This prevents
|
||||
/// stale data from consuming cache capacity indefinitely.
|
||||
///
|
||||
/// Default is set to 300 seconds.
|
||||
pub const DEFAULT_OBJECT_CACHE_TTL_SECS: u64 = 300;
|
||||
|
||||
/// Time-to-idle for cached objects (2 minutes = 120 seconds).
|
||||
///
|
||||
/// Objects that haven't been accessed for this duration are automatically evicted,
|
||||
/// even if their TTL hasn't expired. This ensures cache is populated with actively
|
||||
/// used objects and clears out one-time reads efficiently.
|
||||
///
|
||||
/// Default is set to 120 seconds.
|
||||
pub const DEFAULT_OBJECT_CACHE_TTI_SECS: u64 = 120;
|
||||
|
||||
/// Minimum hit count to extend object lifetime beyond TTL.
|
||||
///
|
||||
/// "Hot" objects that have been accessed at least this many times are treated
|
||||
/// specially - they can survive longer in cache even as they approach TTL expiration.
|
||||
/// This prevents frequently accessed objects from being evicted prematurely.
|
||||
///
|
||||
/// Default is set to 5 hits.
|
||||
pub const DEFAULT_OBJECT_HOT_MIN_HITS_TO_EXTEND: usize = 5;
|
||||
@@ -39,3 +39,10 @@ pub const DEFAULT_MAX_IO_EVENTS_PER_TICK: usize = 1024;
|
||||
/// Event polling default (Tokio default 61)
|
||||
pub const DEFAULT_EVENT_INTERVAL: u32 = 61;
|
||||
pub const DEFAULT_RNG_SEED: Option<u64> = None; // None means random
|
||||
|
||||
/// Threshold for small object seek support in megabytes.
|
||||
///
|
||||
/// When an object is smaller than this size, rustfs will provide seek support.
|
||||
///
|
||||
/// Default is set to 10MB.
|
||||
pub const DEFAULT_OBJECT_SEEK_SUPPORT_THRESHOLD: usize = 10 * 1024 * 1024;
|
||||
|
||||
@@ -12,4 +12,26 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/// TLS related environment variable names and default values
|
||||
/// Environment variable to enable TLS key logging
|
||||
/// When set to "1", RustFS will log TLS keys to the specified file for debugging purposes.
|
||||
/// By default, this is disabled.
|
||||
/// To enable, set the environment variable RUSTFS_TLS_KEYLOG=1
|
||||
pub const ENV_TLS_KEYLOG: &str = "RUSTFS_TLS_KEYLOG";
|
||||
|
||||
/// Default value for TLS key logging
|
||||
/// By default, RustFS does not log TLS keys.
|
||||
/// To change this behavior, set the environment variable RUSTFS_TLS_KEYLOG=1
|
||||
pub const DEFAULT_TLS_KEYLOG: bool = false;
|
||||
|
||||
/// Environment variable to trust system CA certificates
|
||||
/// When set to "1", RustFS will trust system CA certificates in addition to any
|
||||
/// custom CA certificates provided in the configuration.
|
||||
/// By default, this is disabled.
|
||||
/// To enable, set the environment variable RUSTFS_TRUST_SYSTEM_CA=1
|
||||
pub const ENV_TRUST_SYSTEM_CA: &str = "RUSTFS_TRUST_SYSTEM_CA";
|
||||
|
||||
/// Default value for trusting system CA certificates
|
||||
/// By default, RustFS does not trust system CA certificates.
|
||||
/// To change this behavior, set the environment variable RUSTFS_TRUST_SYSTEM_CA=1
|
||||
pub const DEFAULT_TRUST_SYSTEM_CA: bool = false;
|
||||
|
||||
@@ -17,10 +17,18 @@ pub mod constants;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::app::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::body_limits::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::compress::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::console::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::env::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::heal::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::object::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::profiler::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::runtime::*;
|
||||
|
||||
@@ -24,13 +24,45 @@ pub use webhook::*;
|
||||
|
||||
use crate::DEFAULT_DELIMITER;
|
||||
|
||||
// --- Configuration Constants ---
|
||||
/// Default target identifier for notifications,
|
||||
/// Used in notification system when no specific target is provided,
|
||||
/// Represents the default target stream or endpoint for notifications when no specific target is provided.
|
||||
pub const DEFAULT_TARGET: &str = "1";
|
||||
|
||||
/// Notification prefix for routing and identification,
|
||||
/// Used in notification system,
|
||||
/// This prefix is utilized in constructing routes and identifiers related to notifications within the system.
|
||||
pub const NOTIFY_PREFIX: &str = "notify";
|
||||
|
||||
/// Notification route prefix combining the notification prefix and default delimiter
|
||||
/// Combines the notification prefix with the default delimiter
|
||||
/// Used in notification system for defining routes related to notifications.
|
||||
/// Example: "notify:/"
|
||||
pub const NOTIFY_ROUTE_PREFIX: &str = const_str::concat!(NOTIFY_PREFIX, DEFAULT_DELIMITER);
|
||||
|
||||
/// Name of the environment variable that configures target stream concurrency.
|
||||
/// Controls how many target streams are processed in parallel by the notification system.
|
||||
/// Defaults to [`DEFAULT_NOTIFY_TARGET_STREAM_CONCURRENCY`] if not set.
|
||||
/// Example: `RUSTFS_NOTIFY_TARGET_STREAM_CONCURRENCY=20`.
|
||||
pub const ENV_NOTIFY_TARGET_STREAM_CONCURRENCY: &str = "RUSTFS_NOTIFY_TARGET_STREAM_CONCURRENCY";
|
||||
|
||||
/// Default concurrency for target stream processing in the notification system
|
||||
/// This value is used if the environment variable `RUSTFS_NOTIFY_TARGET_STREAM_CONCURRENCY` is not set.
|
||||
/// It defines how many target streams can be processed in parallel by the notification system at any given time.
|
||||
/// Adjust this value based on your system's capabilities and expected load.
|
||||
pub const DEFAULT_NOTIFY_TARGET_STREAM_CONCURRENCY: usize = 20;
|
||||
|
||||
/// Name of the environment variable that configures send concurrency.
|
||||
/// Controls how many send operations are processed in parallel by the notification system.
|
||||
/// Defaults to [`DEFAULT_NOTIFY_SEND_CONCURRENCY`] if not set.
|
||||
/// Example: `RUSTFS_NOTIFY_SEND_CONCURRENCY=64`.
|
||||
pub const ENV_NOTIFY_SEND_CONCURRENCY: &str = "RUSTFS_NOTIFY_SEND_CONCURRENCY";
|
||||
|
||||
/// Default concurrency for send operations in the notification system
|
||||
/// This value is used if the environment variable `RUSTFS_NOTIFY_SEND_CONCURRENCY` is not set.
|
||||
/// It defines how many send operations can be processed in parallel by the notification system at any given time.
|
||||
/// Adjust this value based on your system's capabilities and expected load.
|
||||
pub const DEFAULT_NOTIFY_SEND_CONCURRENCY: usize = 64;
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub const NOTIFY_SUB_SYSTEMS: &[&str] = &[NOTIFY_MQTT_SUB_SYS, NOTIFY_WEBHOOK_SUB_SYS];
|
||||
|
||||
|
||||
@@ -15,5 +15,5 @@
|
||||
pub const DEFAULT_EXT: &str = ".unknown"; // Default file extension
|
||||
pub const COMPRESS_EXT: &str = ".snappy"; // Extension for compressed files
|
||||
|
||||
/// STORE_EXTENSION - file extension of an event file in store
|
||||
pub const STORE_EXTENSION: &str = ".event";
|
||||
/// NOTIFY_STORE_EXTENSION - file extension of an event file in store
|
||||
pub const NOTIFY_STORE_EXTENSION: &str = ".event";
|
||||
|
||||
@@ -30,7 +30,7 @@ workspace = true
|
||||
|
||||
[dependencies]
|
||||
aes-gcm = { workspace = true, optional = true }
|
||||
argon2 = { workspace = true, features = ["std"], optional = true }
|
||||
argon2 = { workspace = true, optional = true }
|
||||
cfg-if = { workspace = true }
|
||||
chacha20poly1305 = { workspace = true, optional = true }
|
||||
jsonwebtoken = { workspace = true }
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/rustfs/rustfs/actions/workflows/ci.yml"><img alt="CI" src="https://github.com/rustfs/rustfs/actions/workflows/ci.yml/badge.svg" /></a>
|
||||
<a href="https://docs.rustfs.com/en/">📖 Documentation</a>
|
||||
<a href="https://docs.rustfs.com/">📖 Documentation</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/issues">🐛 Bug Reports</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/discussions">💬 Discussions</a>
|
||||
</p>
|
||||
|
||||
@@ -25,6 +25,7 @@ workspace = true
|
||||
|
||||
[dependencies]
|
||||
rustfs-ecstore.workspace = true
|
||||
rustfs-common.workspace = true
|
||||
flatbuffers.workspace = true
|
||||
futures.workspace = true
|
||||
rustfs-lock.workspace = true
|
||||
@@ -49,4 +50,4 @@ uuid = { workspace = true }
|
||||
base64 = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
md5 = { workspace = true }
|
||||
md5 = { workspace = true }
|
||||
|
||||
@@ -327,7 +327,8 @@ pub async fn execute_awscurl(
|
||||
|
||||
if !output.status.success() {
|
||||
let stderr = String::from_utf8_lossy(&output.stderr);
|
||||
return Err(format!("awscurl failed: {stderr}").into());
|
||||
let stdout = String::from_utf8_lossy(&output.stdout);
|
||||
return Err(format!("awscurl failed: stderr='{stderr}', stdout='{stdout}'").into());
|
||||
}
|
||||
|
||||
let response = String::from_utf8_lossy(&output.stdout).to_string();
|
||||
@@ -352,3 +353,13 @@ pub async fn awscurl_get(
|
||||
) -> Result<String, Box<dyn std::error::Error + Send + Sync>> {
|
||||
execute_awscurl(url, "GET", None, access_key, secret_key).await
|
||||
}
|
||||
|
||||
/// Helper function for PUT requests
|
||||
pub async fn awscurl_put(
|
||||
url: &str,
|
||||
body: &str,
|
||||
access_key: &str,
|
||||
secret_key: &str,
|
||||
) -> Result<String, Box<dyn std::error::Error + Send + Sync>> {
|
||||
execute_awscurl(url, "PUT", Some(body), access_key, secret_key).await
|
||||
}
|
||||
|
||||
85
crates/e2e_test/src/content_encoding_test.rs
Normal file
85
crates/e2e_test/src/content_encoding_test.rs
Normal file
@@ -0,0 +1,85 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! End-to-end test for Content-Encoding header handling
|
||||
//!
|
||||
//! Tests that the Content-Encoding header is correctly stored during PUT
|
||||
//! and returned in GET/HEAD responses. This is important for clients that
|
||||
//! upload pre-compressed content and rely on the header for decompression.
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::common::{RustFSTestEnvironment, init_logging};
|
||||
use aws_sdk_s3::primitives::ByteStream;
|
||||
use serial_test::serial;
|
||||
use tracing::info;
|
||||
|
||||
/// Verify Content-Encoding header roundtrips through PUT, GET, and HEAD operations
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_content_encoding_roundtrip() {
|
||||
init_logging();
|
||||
info!("Starting Content-Encoding roundtrip test");
|
||||
|
||||
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
|
||||
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
|
||||
|
||||
let client = env.create_s3_client();
|
||||
let bucket = "content-encoding-test";
|
||||
let key = "logs/app.log.zst";
|
||||
let content = b"2024-01-15 10:23:45 INFO Application started\n2024-01-15 10:23:46 DEBUG Loading config\n";
|
||||
|
||||
client
|
||||
.create_bucket()
|
||||
.bucket(bucket)
|
||||
.send()
|
||||
.await
|
||||
.expect("Failed to create bucket");
|
||||
|
||||
info!("Uploading object with Content-Encoding: zstd");
|
||||
client
|
||||
.put_object()
|
||||
.bucket(bucket)
|
||||
.key(key)
|
||||
.content_type("text/plain")
|
||||
.content_encoding("zstd")
|
||||
.body(ByteStream::from_static(content))
|
||||
.send()
|
||||
.await
|
||||
.expect("PUT failed");
|
||||
|
||||
info!("Verifying GET response includes Content-Encoding");
|
||||
let get_resp = client.get_object().bucket(bucket).key(key).send().await.expect("GET failed");
|
||||
|
||||
assert_eq!(get_resp.content_encoding(), Some("zstd"), "GET should return Content-Encoding: zstd");
|
||||
assert_eq!(get_resp.content_type(), Some("text/plain"), "GET should return correct Content-Type");
|
||||
|
||||
let body = get_resp.body.collect().await.unwrap().into_bytes();
|
||||
assert_eq!(body.as_ref(), content, "Body content mismatch");
|
||||
|
||||
info!("Verifying HEAD response includes Content-Encoding");
|
||||
let head_resp = client
|
||||
.head_object()
|
||||
.bucket(bucket)
|
||||
.key(key)
|
||||
.send()
|
||||
.await
|
||||
.expect("HEAD failed");
|
||||
|
||||
assert_eq!(head_resp.content_encoding(), Some("zstd"), "HEAD should return Content-Encoding: zstd");
|
||||
assert_eq!(head_resp.content_type(), Some("text/plain"), "HEAD should return correct Content-Type");
|
||||
|
||||
env.stop_server();
|
||||
}
|
||||
}
|
||||
73
crates/e2e_test/src/data_usage_test.rs
Normal file
73
crates/e2e_test/src/data_usage_test.rs
Normal file
@@ -0,0 +1,73 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use aws_sdk_s3::primitives::ByteStream;
|
||||
use rustfs_common::data_usage::DataUsageInfo;
|
||||
use serial_test::serial;
|
||||
|
||||
use crate::common::{RustFSTestEnvironment, TEST_BUCKET, awscurl_get, init_logging};
|
||||
|
||||
/// Regression test for data usage accuracy (issue #1012).
|
||||
/// Launches rustfs, writes 1000 objects, then asserts admin data usage reports the full count.
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
#[serial]
|
||||
#[ignore = "Starts a rustfs server and requires awscurl; enable when running full E2E"]
|
||||
async fn data_usage_reports_all_objects() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
|
||||
let mut env = RustFSTestEnvironment::new().await?;
|
||||
env.start_rustfs_server(vec![]).await?;
|
||||
|
||||
let client = env.create_s3_client();
|
||||
|
||||
// Create bucket and upload objects
|
||||
client.create_bucket().bucket(TEST_BUCKET).send().await?;
|
||||
|
||||
for i in 0..1000 {
|
||||
let key = format!("obj-{i:04}");
|
||||
client
|
||||
.put_object()
|
||||
.bucket(TEST_BUCKET)
|
||||
.key(key)
|
||||
.body(ByteStream::from_static(b"hello-world"))
|
||||
.send()
|
||||
.await?;
|
||||
}
|
||||
|
||||
// Query admin data usage API
|
||||
let url = format!("{}/rustfs/admin/v3/datausageinfo", env.url);
|
||||
let resp = awscurl_get(&url, &env.access_key, &env.secret_key).await?;
|
||||
let usage: DataUsageInfo = serde_json::from_str(&resp)?;
|
||||
|
||||
// Assert total object count and per-bucket count are not truncated
|
||||
let bucket_usage = usage
|
||||
.buckets_usage
|
||||
.get(TEST_BUCKET)
|
||||
.cloned()
|
||||
.expect("bucket usage should exist");
|
||||
|
||||
assert!(
|
||||
usage.objects_total_count >= 1000,
|
||||
"total object count should be at least 1000, got {}",
|
||||
usage.objects_total_count
|
||||
);
|
||||
assert!(
|
||||
bucket_usage.objects_count >= 1000,
|
||||
"bucket object count should be at least 1000, got {}",
|
||||
bucket_usage.objects_count
|
||||
);
|
||||
|
||||
env.stop_server();
|
||||
Ok(())
|
||||
}
|
||||
@@ -18,6 +18,22 @@ mod reliant;
|
||||
#[cfg(test)]
|
||||
pub mod common;
|
||||
|
||||
// Data usage regression tests
|
||||
#[cfg(test)]
|
||||
mod data_usage_test;
|
||||
|
||||
// KMS-specific test modules
|
||||
#[cfg(test)]
|
||||
mod kms;
|
||||
|
||||
// Special characters in path test modules
|
||||
#[cfg(test)]
|
||||
mod special_chars_test;
|
||||
|
||||
// Content-Encoding header preservation test
|
||||
#[cfg(test)]
|
||||
mod content_encoding_test;
|
||||
|
||||
// Policy variables tests
|
||||
#[cfg(test)]
|
||||
mod policy;
|
||||
|
||||
39
crates/e2e_test/src/policy/README.md
Normal file
39
crates/e2e_test/src/policy/README.md
Normal file
@@ -0,0 +1,39 @@
|
||||
# RustFS Policy Variables Tests
|
||||
|
||||
This directory contains comprehensive end-to-end tests for AWS IAM policy variables in RustFS.
|
||||
|
||||
## Test Overview
|
||||
|
||||
The tests cover the following AWS policy variable scenarios:
|
||||
|
||||
1. **Single-value variables** - Basic variable resolution like `${aws:username}`
|
||||
2. **Multi-value variables** - Variables that can have multiple values
|
||||
3. **Variable concatenation** - Combining variables with static text like `prefix-${aws:username}-suffix`
|
||||
4. **Nested variables** - Complex nested variable patterns like `${${aws:username}-test}`
|
||||
5. **Deny scenarios** - Testing deny policies with variables
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- RustFS server binary
|
||||
- `awscurl` utility for admin API calls
|
||||
- AWS SDK for Rust (included in the project)
|
||||
|
||||
## Running Tests
|
||||
|
||||
### Run All Policy Tests Using Unified Test Runner
|
||||
|
||||
```bash
|
||||
# Run all policy tests with comprehensive reporting
|
||||
# Note: Requires a RustFS server running on localhost:9000
|
||||
cargo test -p e2e_test policy::test_runner::test_policy_full_suite -- --nocapture --ignored --test-threads=1
|
||||
|
||||
# Run only critical policy tests
|
||||
cargo test -p e2e_test policy::test_runner::test_policy_critical_suite -- --nocapture --ignored --test-threads=1
|
||||
```
|
||||
|
||||
### Run All Policy Tests
|
||||
|
||||
```bash
|
||||
# From the project root directory
|
||||
cargo test -p e2e_test policy:: -- --nocapture --ignored --test-threads=1
|
||||
```
|
||||
@@ -11,3 +11,12 @@
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Policy-specific tests for RustFS
|
||||
//!
|
||||
//! This module provides comprehensive tests for AWS IAM policy variables
|
||||
//! including single-value, multi-value, and nested variable scenarios.
|
||||
|
||||
mod policy_variables_test;
|
||||
mod test_env;
|
||||
mod test_runner;
|
||||
798
crates/e2e_test/src/policy/policy_variables_test.rs
Normal file
798
crates/e2e_test/src/policy/policy_variables_test.rs
Normal file
@@ -0,0 +1,798 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Tests for AWS IAM policy variables with single-value, multi-value, and nested scenarios
|
||||
|
||||
use crate::common::{awscurl_put, init_logging};
|
||||
use crate::policy::test_env::PolicyTestEnvironment;
|
||||
use aws_sdk_s3::primitives::ByteStream;
|
||||
use serial_test::serial;
|
||||
use tracing::info;
|
||||
|
||||
/// Helper function to create a regular user with given credentials
|
||||
async fn create_user(
|
||||
env: &PolicyTestEnvironment,
|
||||
username: &str,
|
||||
password: &str,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
let create_user_body = serde_json::json!({
|
||||
"secretKey": password,
|
||||
"status": "enabled"
|
||||
})
|
||||
.to_string();
|
||||
|
||||
let create_user_url = format!("{}/rustfs/admin/v3/add-user?accessKey={}", env.url, username);
|
||||
awscurl_put(&create_user_url, &create_user_body, &env.access_key, &env.secret_key).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Helper function to create an STS user with given credentials
|
||||
async fn create_sts_user(
|
||||
env: &PolicyTestEnvironment,
|
||||
username: &str,
|
||||
password: &str,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
// For STS, we create a regular user first, then use it to assume roles
|
||||
create_user(env, username, password).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Helper function to create and attach a policy
|
||||
async fn create_and_attach_policy(
|
||||
env: &PolicyTestEnvironment,
|
||||
policy_name: &str,
|
||||
username: &str,
|
||||
policy_document: serde_json::Value,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
let policy_string = policy_document.to_string();
|
||||
|
||||
// Create policy
|
||||
let add_policy_url = format!("{}/rustfs/admin/v3/add-canned-policy?name={}", env.url, policy_name);
|
||||
awscurl_put(&add_policy_url, &policy_string, &env.access_key, &env.secret_key).await?;
|
||||
|
||||
// Attach policy to user
|
||||
let attach_policy_url = format!(
|
||||
"{}/rustfs/admin/v3/set-user-or-group-policy?policyName={}&userOrGroup={}&isGroup=false",
|
||||
env.url, policy_name, username
|
||||
);
|
||||
awscurl_put(&attach_policy_url, "", &env.access_key, &env.secret_key).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Helper function to clean up test resources
|
||||
async fn cleanup_user_and_policy(env: &PolicyTestEnvironment, username: &str, policy_name: &str) {
|
||||
// Create admin client for cleanup
|
||||
let admin_client = env.create_s3_client(&env.access_key, &env.secret_key);
|
||||
|
||||
// Delete buckets that might have been created by this user
|
||||
let bucket_patterns = [
|
||||
format!("{username}-test-bucket"),
|
||||
format!("{username}-bucket1"),
|
||||
format!("{username}-bucket2"),
|
||||
format!("{username}-bucket3"),
|
||||
format!("prefix-{username}-suffix"),
|
||||
format!("{username}-test"),
|
||||
format!("{username}-sts-bucket"),
|
||||
format!("{username}-service-bucket"),
|
||||
"private-test-bucket".to_string(), // For deny test
|
||||
];
|
||||
|
||||
// Try to delete objects and buckets
|
||||
for bucket_name in &bucket_patterns {
|
||||
let _ = admin_client
|
||||
.delete_object()
|
||||
.bucket(bucket_name)
|
||||
.key("test-object.txt")
|
||||
.send()
|
||||
.await;
|
||||
let _ = admin_client
|
||||
.delete_object()
|
||||
.bucket(bucket_name)
|
||||
.key("test-sts-object.txt")
|
||||
.send()
|
||||
.await;
|
||||
let _ = admin_client
|
||||
.delete_object()
|
||||
.bucket(bucket_name)
|
||||
.key("test-service-object.txt")
|
||||
.send()
|
||||
.await;
|
||||
let _ = admin_client.delete_bucket().bucket(bucket_name).send().await;
|
||||
}
|
||||
|
||||
// Remove user
|
||||
let remove_user_url = format!("{}/rustfs/admin/v3/remove-user?accessKey={}", env.url, username);
|
||||
let _ = awscurl_put(&remove_user_url, "", &env.access_key, &env.secret_key).await;
|
||||
|
||||
// Remove policy
|
||||
let remove_policy_url = format!("{}/rustfs/admin/v3/remove-canned-policy?name={}", env.url, policy_name);
|
||||
let _ = awscurl_put(&remove_policy_url, "", &env.access_key, &env.secret_key).await;
|
||||
}
|
||||
|
||||
/// Test AWS policy variables with single-value scenarios
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
#[serial]
|
||||
#[ignore = "Starts a rustfs server; enable when running full E2E"]
|
||||
pub async fn test_aws_policy_variables_single_value() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
test_aws_policy_variables_single_value_impl().await
|
||||
}
|
||||
|
||||
/// Implementation function for single-value policy variables test
|
||||
pub async fn test_aws_policy_variables_single_value_impl() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
info!("Starting AWS policy variables single-value test");
|
||||
|
||||
let env = PolicyTestEnvironment::with_address("127.0.0.1:9000").await?;
|
||||
|
||||
test_aws_policy_variables_single_value_impl_with_env(&env).await
|
||||
}
|
||||
|
||||
/// Implementation function for single-value policy variables test with shared environment
|
||||
pub async fn test_aws_policy_variables_single_value_impl_with_env(
|
||||
env: &PolicyTestEnvironment,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
// Create test user
|
||||
let test_user = "testuser1";
|
||||
let test_password = "testpassword123";
|
||||
let policy_name = "test-single-value-policy";
|
||||
|
||||
// Create cleanup function
|
||||
let cleanup = || async {
|
||||
cleanup_user_and_policy(env, test_user, policy_name).await;
|
||||
};
|
||||
|
||||
let create_user_body = serde_json::json!({
|
||||
"secretKey": test_password,
|
||||
"status": "enabled"
|
||||
})
|
||||
.to_string();
|
||||
|
||||
let create_user_url = format!("{}/rustfs/admin/v3/add-user?accessKey={}", env.url, test_user);
|
||||
awscurl_put(&create_user_url, &create_user_body, &env.access_key, &env.secret_key).await?;
|
||||
|
||||
// Create policy with single-value AWS variables
|
||||
let policy_document = serde_json::json!({
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:ListAllMyBuckets"],
|
||||
"Resource": ["arn:aws:s3:::*"]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:CreateBucket"],
|
||||
"Resource": [format!("arn:aws:s3:::{}-*", "${aws:username}")]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:ListBucket"],
|
||||
"Resource": [format!("arn:aws:s3:::{}-*", "${aws:username}")]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:PutObject", "s3:GetObject"],
|
||||
"Resource": [format!("arn:aws:s3:::{}-*/*", "${aws:username}")]
|
||||
}
|
||||
]
|
||||
})
|
||||
.to_string();
|
||||
|
||||
let add_policy_url = format!("{}/rustfs/admin/v3/add-canned-policy?name={}", env.url, policy_name);
|
||||
awscurl_put(&add_policy_url, &policy_document, &env.access_key, &env.secret_key).await?;
|
||||
|
||||
// Attach policy to user
|
||||
let attach_policy_url = format!(
|
||||
"{}/rustfs/admin/v3/set-user-or-group-policy?policyName={}&userOrGroup={}&isGroup=false",
|
||||
env.url, policy_name, test_user
|
||||
);
|
||||
awscurl_put(&attach_policy_url, "", &env.access_key, &env.secret_key).await?;
|
||||
|
||||
// Create S3 client for test user
|
||||
let test_client = env.create_s3_client(test_user, test_password);
|
||||
|
||||
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
|
||||
|
||||
// Test 1: User should be able to list buckets (allowed by policy)
|
||||
info!("Test 1: User listing buckets");
|
||||
let list_result = test_client.list_buckets().send().await;
|
||||
if let Err(e) = list_result {
|
||||
cleanup().await;
|
||||
return Err(format!("User should be able to list buckets: {e}").into());
|
||||
}
|
||||
|
||||
// Test 2: User should be able to create bucket matching username pattern
|
||||
info!("Test 2: User creating bucket matching pattern");
|
||||
let bucket_name = format!("{test_user}-test-bucket");
|
||||
let create_result = test_client.create_bucket().bucket(&bucket_name).send().await;
|
||||
if let Err(e) = create_result {
|
||||
cleanup().await;
|
||||
return Err(format!("User should be able to create bucket matching username pattern: {e}").into());
|
||||
}
|
||||
|
||||
// Test 3: User should be able to list objects in their own bucket
|
||||
info!("Test 3: User listing objects in their bucket");
|
||||
let list_objects_result = test_client.list_objects_v2().bucket(&bucket_name).send().await;
|
||||
if let Err(e) = list_objects_result {
|
||||
cleanup().await;
|
||||
return Err(format!("User should be able to list objects in their own bucket: {e}").into());
|
||||
}
|
||||
|
||||
// Test 4: User should be able to put object in their own bucket
|
||||
info!("Test 4: User putting object in their bucket");
|
||||
let put_result = test_client
|
||||
.put_object()
|
||||
.bucket(&bucket_name)
|
||||
.key("test-object.txt")
|
||||
.body(ByteStream::from_static(b"Hello, Policy Variables!"))
|
||||
.send()
|
||||
.await;
|
||||
if let Err(e) = put_result {
|
||||
cleanup().await;
|
||||
return Err(format!("User should be able to put object in their own bucket: {e}").into());
|
||||
}
|
||||
|
||||
// Test 5: User should be able to get object from their own bucket
|
||||
info!("Test 5: User getting object from their bucket");
|
||||
let get_result = test_client
|
||||
.get_object()
|
||||
.bucket(&bucket_name)
|
||||
.key("test-object.txt")
|
||||
.send()
|
||||
.await;
|
||||
if let Err(e) = get_result {
|
||||
cleanup().await;
|
||||
return Err(format!("User should be able to get object from their own bucket: {e}").into());
|
||||
}
|
||||
|
||||
// Test 6: User should NOT be able to create bucket NOT matching username pattern
|
||||
info!("Test 6: User attempting to create bucket NOT matching pattern");
|
||||
let other_bucket_name = "other-user-bucket";
|
||||
let create_other_result = test_client.create_bucket().bucket(other_bucket_name).send().await;
|
||||
if create_other_result.is_ok() {
|
||||
cleanup().await;
|
||||
return Err("User should NOT be able to create bucket NOT matching username pattern".into());
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
info!("Cleaning up test resources");
|
||||
cleanup().await;
|
||||
|
||||
info!("AWS policy variables single-value test completed successfully");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Test AWS policy variables with multi-value scenarios
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
#[serial]
|
||||
#[ignore = "Starts a rustfs server; enable when running full E2E"]
|
||||
pub async fn test_aws_policy_variables_multi_value() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
test_aws_policy_variables_multi_value_impl().await
|
||||
}
|
||||
|
||||
/// Implementation function for multi-value policy variables test
|
||||
pub async fn test_aws_policy_variables_multi_value_impl() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
info!("Starting AWS policy variables multi-value test");
|
||||
|
||||
let env = PolicyTestEnvironment::with_address("127.0.0.1:9000").await?;
|
||||
|
||||
test_aws_policy_variables_multi_value_impl_with_env(&env).await
|
||||
}
|
||||
|
||||
/// Implementation function for multi-value policy variables test with shared environment
|
||||
pub async fn test_aws_policy_variables_multi_value_impl_with_env(
|
||||
env: &PolicyTestEnvironment,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
// Create test user
|
||||
let test_user = "testuser2";
|
||||
let test_password = "testpassword123";
|
||||
let policy_name = "test-multi-value-policy";
|
||||
|
||||
// Create cleanup function
|
||||
let cleanup = || async {
|
||||
cleanup_user_and_policy(env, test_user, policy_name).await;
|
||||
};
|
||||
|
||||
// Create user
|
||||
create_user(env, test_user, test_password).await?;
|
||||
|
||||
// Create policy with multi-value AWS variables
|
||||
let policy_document = serde_json::json!({
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:ListAllMyBuckets"],
|
||||
"Resource": ["arn:aws:s3:::*"]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:CreateBucket"],
|
||||
"Resource": [
|
||||
format!("arn:aws:s3:::{}-bucket1", "${aws:username}"),
|
||||
format!("arn:aws:s3:::{}-bucket2", "${aws:username}"),
|
||||
format!("arn:aws:s3:::{}-bucket3", "${aws:username}")
|
||||
]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:ListBucket"],
|
||||
"Resource": [
|
||||
format!("arn:aws:s3:::{}-bucket1", "${aws:username}"),
|
||||
format!("arn:aws:s3:::{}-bucket2", "${aws:username}"),
|
||||
format!("arn:aws:s3:::{}-bucket3", "${aws:username}")
|
||||
]
|
||||
}
|
||||
]
|
||||
});
|
||||
|
||||
create_and_attach_policy(env, policy_name, test_user, policy_document).await?;
|
||||
|
||||
// Create S3 client for test user
|
||||
let test_client = env.create_s3_client(test_user, test_password);
|
||||
|
||||
// Test 1: User should be able to create buckets matching any of the multi-value patterns
|
||||
info!("Test 1: User creating first bucket matching multi-value pattern");
|
||||
let bucket1_name = format!("{test_user}-bucket1");
|
||||
let create_result1 = test_client.create_bucket().bucket(&bucket1_name).send().await;
|
||||
if let Err(e) = create_result1 {
|
||||
cleanup().await;
|
||||
return Err(format!("User should be able to create first bucket matching multi-value pattern: {e}").into());
|
||||
}
|
||||
|
||||
info!("Test 2: User creating second bucket matching multi-value pattern");
|
||||
let bucket2_name = format!("{test_user}-bucket2");
|
||||
let create_result2 = test_client.create_bucket().bucket(&bucket2_name).send().await;
|
||||
if let Err(e) = create_result2 {
|
||||
cleanup().await;
|
||||
return Err(format!("User should be able to create second bucket matching multi-value pattern: {e}").into());
|
||||
}
|
||||
|
||||
info!("Test 3: User creating third bucket matching multi-value pattern");
|
||||
let bucket3_name = format!("{test_user}-bucket3");
|
||||
let create_result3 = test_client.create_bucket().bucket(&bucket3_name).send().await;
|
||||
if let Err(e) = create_result3 {
|
||||
cleanup().await;
|
||||
return Err(format!("User should be able to create third bucket matching multi-value pattern: {e}").into());
|
||||
}
|
||||
|
||||
// Test 4: User should NOT be able to create bucket NOT matching any multi-value pattern
|
||||
info!("Test 4: User attempting to create bucket NOT matching any pattern");
|
||||
let other_bucket_name = format!("{test_user}-other-bucket");
|
||||
let create_other_result = test_client.create_bucket().bucket(&other_bucket_name).send().await;
|
||||
if create_other_result.is_ok() {
|
||||
cleanup().await;
|
||||
return Err("User should NOT be able to create bucket NOT matching any multi-value pattern".into());
|
||||
}
|
||||
|
||||
// Test 5: User should be able to list objects in their allowed buckets
|
||||
info!("Test 5: User listing objects in allowed buckets");
|
||||
let list_objects_result1 = test_client.list_objects_v2().bucket(&bucket1_name).send().await;
|
||||
if let Err(e) = list_objects_result1 {
|
||||
cleanup().await;
|
||||
return Err(format!("User should be able to list objects in first allowed bucket: {e}").into());
|
||||
}
|
||||
|
||||
let list_objects_result2 = test_client.list_objects_v2().bucket(&bucket2_name).send().await;
|
||||
if let Err(e) = list_objects_result2 {
|
||||
cleanup().await;
|
||||
return Err(format!("User should be able to list objects in second allowed bucket: {e}").into());
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
info!("Cleaning up test resources");
|
||||
cleanup().await;
|
||||
|
||||
info!("AWS policy variables multi-value test completed successfully");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Test AWS policy variables with variable concatenation
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
#[serial]
|
||||
#[ignore = "Starts a rustfs server; enable when running full E2E"]
|
||||
pub async fn test_aws_policy_variables_concatenation() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
test_aws_policy_variables_concatenation_impl().await
|
||||
}
|
||||
|
||||
/// Implementation function for concatenation policy variables test
|
||||
pub async fn test_aws_policy_variables_concatenation_impl() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
info!("Starting AWS policy variables concatenation test");
|
||||
|
||||
let env = PolicyTestEnvironment::with_address("127.0.0.1:9000").await?;
|
||||
|
||||
test_aws_policy_variables_concatenation_impl_with_env(&env).await
|
||||
}
|
||||
|
||||
/// Implementation function for concatenation policy variables test with shared environment
|
||||
pub async fn test_aws_policy_variables_concatenation_impl_with_env(
|
||||
env: &PolicyTestEnvironment,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
// Create test user
|
||||
let test_user = "testuser3";
|
||||
let test_password = "testpassword123";
|
||||
let policy_name = "test-concatenation-policy";
|
||||
|
||||
// Create cleanup function
|
||||
let cleanup = || async {
|
||||
cleanup_user_and_policy(env, test_user, policy_name).await;
|
||||
};
|
||||
|
||||
// Create user
|
||||
create_user(env, test_user, test_password).await?;
|
||||
|
||||
// Create policy with variable concatenation
|
||||
let policy_document = serde_json::json!({
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:ListAllMyBuckets"],
|
||||
"Resource": ["arn:aws:s3:::*"]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:CreateBucket"],
|
||||
"Resource": [format!("arn:aws:s3:::prefix-{}-suffix", "${aws:username}")]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:ListBucket"],
|
||||
"Resource": [format!("arn:aws:s3:::prefix-{}-suffix", "${aws:username}")]
|
||||
}
|
||||
]
|
||||
});
|
||||
|
||||
create_and_attach_policy(env, policy_name, test_user, policy_document).await?;
|
||||
|
||||
// Create S3 client for test user
|
||||
let test_client = env.create_s3_client(test_user, test_password);
|
||||
|
||||
// Add a small delay to allow policy to propagate
|
||||
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
|
||||
|
||||
// Test: User should be able to create bucket matching concatenated pattern
|
||||
info!("Test: User creating bucket matching concatenated pattern");
|
||||
let bucket_name = format!("prefix-{test_user}-suffix");
|
||||
let create_result = test_client.create_bucket().bucket(&bucket_name).send().await;
|
||||
if let Err(e) = create_result {
|
||||
cleanup().await;
|
||||
return Err(format!("User should be able to create bucket matching concatenated pattern: {e}").into());
|
||||
}
|
||||
|
||||
// Test: User should be able to list objects in the concatenated pattern bucket
|
||||
info!("Test: User listing objects in concatenated pattern bucket");
|
||||
let list_objects_result = test_client.list_objects_v2().bucket(&bucket_name).send().await;
|
||||
if let Err(e) = list_objects_result {
|
||||
cleanup().await;
|
||||
return Err(format!("User should be able to list objects in concatenated pattern bucket: {e}").into());
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
info!("Cleaning up test resources");
|
||||
cleanup().await;
|
||||
|
||||
info!("AWS policy variables concatenation test completed successfully");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Test AWS policy variables with nested scenarios
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
#[serial]
|
||||
#[ignore = "Starts a rustfs server; enable when running full E2E"]
|
||||
pub async fn test_aws_policy_variables_nested() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
test_aws_policy_variables_nested_impl().await
|
||||
}
|
||||
|
||||
/// Implementation function for nested policy variables test
|
||||
pub async fn test_aws_policy_variables_nested_impl() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
info!("Starting AWS policy variables nested test");
|
||||
|
||||
let env = PolicyTestEnvironment::with_address("127.0.0.1:9000").await?;
|
||||
|
||||
test_aws_policy_variables_nested_impl_with_env(&env).await
|
||||
}
|
||||
|
||||
/// Test AWS policy variables with STS temporary credentials
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
#[serial]
|
||||
#[ignore = "Starts a rustfs server; enable when running full E2E"]
|
||||
pub async fn test_aws_policy_variables_sts() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
test_aws_policy_variables_sts_impl().await
|
||||
}
|
||||
|
||||
/// Implementation function for STS policy variables test
|
||||
pub async fn test_aws_policy_variables_sts_impl() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
info!("Starting AWS policy variables STS test");
|
||||
|
||||
let env = PolicyTestEnvironment::with_address("127.0.0.1:9000").await?;
|
||||
|
||||
test_aws_policy_variables_sts_impl_with_env(&env).await
|
||||
}
|
||||
|
||||
/// Implementation function for nested policy variables test with shared environment
|
||||
pub async fn test_aws_policy_variables_nested_impl_with_env(
|
||||
env: &PolicyTestEnvironment,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
// Create test user
|
||||
let test_user = "testuser4";
|
||||
let test_password = "testpassword123";
|
||||
let policy_name = "test-nested-policy";
|
||||
|
||||
// Create cleanup function
|
||||
let cleanup = || async {
|
||||
cleanup_user_and_policy(env, test_user, policy_name).await;
|
||||
};
|
||||
|
||||
// Create user
|
||||
create_user(env, test_user, test_password).await?;
|
||||
|
||||
// Create policy with nested variables - this tests complex variable resolution
|
||||
let policy_document = serde_json::json!({
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:ListAllMyBuckets"],
|
||||
"Resource": ["arn:aws:s3:::*"]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:CreateBucket"],
|
||||
"Resource": ["arn:aws:s3:::${${aws:username}-test}"]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:ListBucket"],
|
||||
"Resource": ["arn:aws:s3:::${${aws:username}-test}"]
|
||||
}
|
||||
]
|
||||
});
|
||||
|
||||
create_and_attach_policy(env, policy_name, test_user, policy_document).await?;
|
||||
|
||||
// Create S3 client for test user
|
||||
let test_client = env.create_s3_client(test_user, test_password);
|
||||
|
||||
// Add a small delay to allow policy to propagate
|
||||
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
|
||||
|
||||
// Test nested variable resolution
|
||||
info!("Test: Nested variable resolution");
|
||||
|
||||
// Create bucket with expected resolved name
|
||||
let expected_bucket = format!("{test_user}-test");
|
||||
|
||||
// Attempt to create bucket with resolved name
|
||||
let create_result = test_client.create_bucket().bucket(&expected_bucket).send().await;
|
||||
|
||||
// Verify bucket creation succeeds (nested variable resolved correctly)
|
||||
if let Err(e) = create_result {
|
||||
cleanup().await;
|
||||
return Err(format!("User should be able to create bucket with nested variable: {e}").into());
|
||||
}
|
||||
|
||||
// Verify bucket creation fails with unresolved variable
|
||||
let unresolved_bucket = format!("${{}}-test {test_user}");
|
||||
let create_unresolved = test_client.create_bucket().bucket(&unresolved_bucket).send().await;
|
||||
|
||||
if create_unresolved.is_ok() {
|
||||
cleanup().await;
|
||||
return Err("User should NOT be able to create bucket with unresolved variable".into());
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
info!("Cleaning up test resources");
|
||||
cleanup().await;
|
||||
|
||||
info!("AWS policy variables nested test completed successfully");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Implementation function for STS policy variables test with shared environment
|
||||
pub async fn test_aws_policy_variables_sts_impl_with_env(
|
||||
env: &PolicyTestEnvironment,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
// Create test user for STS
|
||||
let test_user = "testuser-sts";
|
||||
let test_password = "testpassword123";
|
||||
let policy_name = "test-sts-policy";
|
||||
|
||||
// Create cleanup function
|
||||
let cleanup = || async {
|
||||
cleanup_user_and_policy(env, test_user, policy_name).await;
|
||||
};
|
||||
|
||||
// Create STS user
|
||||
create_sts_user(env, test_user, test_password).await?;
|
||||
|
||||
// Create policy with STS-compatible variables
|
||||
let policy_document = serde_json::json!({
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:ListAllMyBuckets"],
|
||||
"Resource": ["arn:aws:s3:::*"]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:CreateBucket"],
|
||||
"Resource": [format!("arn:aws:s3:::{}-sts-bucket", "${aws:username}")]
|
||||
},
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:ListBucket", "s3:PutObject", "s3:GetObject"],
|
||||
"Resource": [format!("arn:aws:s3:::{}-sts-bucket/*", "${aws:username}")]
|
||||
}
|
||||
]
|
||||
});
|
||||
|
||||
create_and_attach_policy(env, policy_name, test_user, policy_document).await?;
|
||||
|
||||
// Create S3 client for test user
|
||||
let test_client = env.create_s3_client(test_user, test_password);
|
||||
|
||||
// Add a small delay to allow policy to propagate
|
||||
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
|
||||
|
||||
// Test: User should be able to create bucket matching STS pattern
|
||||
info!("Test: User creating bucket matching STS pattern");
|
||||
let bucket_name = format!("{test_user}-sts-bucket");
|
||||
let create_result = test_client.create_bucket().bucket(&bucket_name).send().await;
|
||||
if let Err(e) = create_result {
|
||||
cleanup().await;
|
||||
return Err(format!("User should be able to create STS bucket: {e}").into());
|
||||
}
|
||||
|
||||
// Test: User should be able to put object in STS bucket
|
||||
info!("Test: User putting object in STS bucket");
|
||||
let put_result = test_client
|
||||
.put_object()
|
||||
.bucket(&bucket_name)
|
||||
.key("test-sts-object.txt")
|
||||
.body(ByteStream::from_static(b"STS Test Object"))
|
||||
.send()
|
||||
.await;
|
||||
if let Err(e) = put_result {
|
||||
cleanup().await;
|
||||
return Err(format!("User should be able to put object in STS bucket: {e}").into());
|
||||
}
|
||||
|
||||
// Test: User should be able to get object from STS bucket
|
||||
info!("Test: User getting object from STS bucket");
|
||||
let get_result = test_client
|
||||
.get_object()
|
||||
.bucket(&bucket_name)
|
||||
.key("test-sts-object.txt")
|
||||
.send()
|
||||
.await;
|
||||
if let Err(e) = get_result {
|
||||
cleanup().await;
|
||||
return Err(format!("User should be able to get object from STS bucket: {e}").into());
|
||||
}
|
||||
|
||||
// Test: User should be able to list objects in STS bucket
|
||||
info!("Test: User listing objects in STS bucket");
|
||||
let list_result = test_client.list_objects_v2().bucket(&bucket_name).send().await;
|
||||
if let Err(e) = list_result {
|
||||
cleanup().await;
|
||||
return Err(format!("User should be able to list objects in STS bucket: {e}").into());
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
info!("Cleaning up test resources");
|
||||
cleanup().await;
|
||||
|
||||
info!("AWS policy variables STS test completed successfully");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Test AWS policy variables with deny scenarios
|
||||
#[tokio::test(flavor = "multi_thread")]
|
||||
#[serial]
|
||||
#[ignore = "Starts a rustfs server; enable when running full E2E"]
|
||||
pub async fn test_aws_policy_variables_deny() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
test_aws_policy_variables_deny_impl().await
|
||||
}
|
||||
|
||||
/// Implementation function for deny policy variables test
|
||||
pub async fn test_aws_policy_variables_deny_impl() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
info!("Starting AWS policy variables deny test");
|
||||
|
||||
let env = PolicyTestEnvironment::with_address("127.0.0.1:9000").await?;
|
||||
|
||||
test_aws_policy_variables_deny_impl_with_env(&env).await
|
||||
}
|
||||
|
||||
/// Implementation function for deny policy variables test with shared environment
|
||||
pub async fn test_aws_policy_variables_deny_impl_with_env(
|
||||
env: &PolicyTestEnvironment,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
// Create test user
|
||||
let test_user = "testuser5";
|
||||
let test_password = "testpassword123";
|
||||
let policy_name = "test-deny-policy";
|
||||
|
||||
// Create cleanup function
|
||||
let cleanup = || async {
|
||||
cleanup_user_and_policy(env, test_user, policy_name).await;
|
||||
};
|
||||
|
||||
// Create user
|
||||
create_user(env, test_user, test_password).await?;
|
||||
|
||||
// Create policy with both allow and deny statements
|
||||
let policy_document = serde_json::json!({
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
// Allow general access
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:ListAllMyBuckets"],
|
||||
"Resource": ["arn:aws:s3:::*"]
|
||||
},
|
||||
// Allow creating buckets matching username pattern
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:CreateBucket"],
|
||||
"Resource": [format!("arn:aws:s3:::{}-*", "${aws:username}")]
|
||||
},
|
||||
// Deny creating buckets with "private" in the name
|
||||
{
|
||||
"Effect": "Deny",
|
||||
"Action": ["s3:CreateBucket"],
|
||||
"Resource": ["arn:aws:s3:::*private*"]
|
||||
}
|
||||
]
|
||||
});
|
||||
|
||||
create_and_attach_policy(env, policy_name, test_user, policy_document).await?;
|
||||
|
||||
// Create S3 client for test user
|
||||
let test_client = env.create_s3_client(test_user, test_password);
|
||||
|
||||
// Add a small delay to allow policy to propagate
|
||||
tokio::time::sleep(std::time::Duration::from_millis(500)).await;
|
||||
|
||||
// Test 1: User should be able to create bucket matching username pattern
|
||||
info!("Test 1: User creating bucket matching username pattern");
|
||||
let bucket_name = format!("{test_user}-test-bucket");
|
||||
let create_result = test_client.create_bucket().bucket(&bucket_name).send().await;
|
||||
if let Err(e) = create_result {
|
||||
cleanup().await;
|
||||
return Err(format!("User should be able to create bucket matching username pattern: {e}").into());
|
||||
}
|
||||
|
||||
// Test 2: User should NOT be able to create bucket with "private" in the name (deny rule)
|
||||
info!("Test 2: User attempting to create bucket with 'private' in name (should be denied)");
|
||||
let private_bucket_name = "private-test-bucket";
|
||||
let create_private_result = test_client.create_bucket().bucket(private_bucket_name).send().await;
|
||||
if create_private_result.is_ok() {
|
||||
cleanup().await;
|
||||
return Err("User should NOT be able to create bucket with 'private' in name due to deny rule".into());
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
info!("Cleaning up test resources");
|
||||
cleanup().await;
|
||||
|
||||
info!("AWS policy variables deny test completed successfully");
|
||||
Ok(())
|
||||
}
|
||||
100
crates/e2e_test/src/policy/test_env.rs
Normal file
100
crates/e2e_test/src/policy/test_env.rs
Normal file
@@ -0,0 +1,100 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Custom test environment for policy variables tests
|
||||
//!
|
||||
//! This module provides a custom test environment that doesn't automatically
|
||||
//! stop servers when destroyed, addressing the server stopping issue.
|
||||
|
||||
use aws_sdk_s3::Client;
|
||||
use aws_sdk_s3::config::{Config, Credentials, Region};
|
||||
use std::net::TcpStream;
|
||||
use std::time::Duration;
|
||||
use tokio::time::sleep;
|
||||
use tracing::{info, warn};
|
||||
|
||||
// Default credentials
|
||||
const DEFAULT_ACCESS_KEY: &str = "rustfsadmin";
|
||||
const DEFAULT_SECRET_KEY: &str = "rustfsadmin";
|
||||
|
||||
/// Custom test environment that doesn't automatically stop servers
|
||||
pub struct PolicyTestEnvironment {
|
||||
pub temp_dir: String,
|
||||
pub address: String,
|
||||
pub url: String,
|
||||
pub access_key: String,
|
||||
pub secret_key: String,
|
||||
}
|
||||
|
||||
impl PolicyTestEnvironment {
|
||||
/// Create a new test environment with specific address
|
||||
/// This environment won't stop any server when dropped
|
||||
pub async fn with_address(address: &str) -> Result<Self, Box<dyn std::error::Error + Send + Sync>> {
|
||||
let temp_dir = format!("/tmp/rustfs_policy_test_{}", uuid::Uuid::new_v4());
|
||||
tokio::fs::create_dir_all(&temp_dir).await?;
|
||||
|
||||
let url = format!("http://{address}");
|
||||
|
||||
Ok(Self {
|
||||
temp_dir,
|
||||
address: address.to_string(),
|
||||
url,
|
||||
access_key: DEFAULT_ACCESS_KEY.to_string(),
|
||||
secret_key: DEFAULT_SECRET_KEY.to_string(),
|
||||
})
|
||||
}
|
||||
|
||||
/// Create an AWS S3 client configured for this RustFS instance
|
||||
pub fn create_s3_client(&self, access_key: &str, secret_key: &str) -> Client {
|
||||
let credentials = Credentials::new(access_key, secret_key, None, None, "policy-test");
|
||||
let config = Config::builder()
|
||||
.credentials_provider(credentials)
|
||||
.region(Region::new("us-east-1"))
|
||||
.endpoint_url(&self.url)
|
||||
.force_path_style(true)
|
||||
.behavior_version_latest()
|
||||
.build();
|
||||
Client::from_conf(config)
|
||||
}
|
||||
|
||||
/// Wait for RustFS server to be ready by checking TCP connectivity
|
||||
pub async fn wait_for_server_ready(&self) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
info!("Waiting for RustFS server to be ready on {}", self.address);
|
||||
|
||||
for i in 0..30 {
|
||||
if TcpStream::connect(&self.address).is_ok() {
|
||||
info!("✅ RustFS server is ready after {} attempts", i + 1);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if i == 29 {
|
||||
return Err("RustFS server failed to become ready within 30 seconds".into());
|
||||
}
|
||||
|
||||
sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// Implement Drop trait that doesn't stop servers
|
||||
impl Drop for PolicyTestEnvironment {
|
||||
fn drop(&mut self) {
|
||||
// Clean up temp directory only, don't stop any server
|
||||
if let Err(e) = std::fs::remove_dir_all(&self.temp_dir) {
|
||||
warn!("Failed to clean up temp directory {}: {}", self.temp_dir, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
247
crates/e2e_test/src/policy/test_runner.rs
Normal file
247
crates/e2e_test/src/policy/test_runner.rs
Normal file
@@ -0,0 +1,247 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::common::init_logging;
|
||||
use crate::policy::test_env::PolicyTestEnvironment;
|
||||
use serial_test::serial;
|
||||
use std::time::Instant;
|
||||
use tokio::time::{Duration, sleep};
|
||||
use tracing::{error, info};
|
||||
|
||||
/// Core test categories
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum TestCategory {
|
||||
SingleValue,
|
||||
MultiValue,
|
||||
Concatenation,
|
||||
Nested,
|
||||
DenyScenarios,
|
||||
}
|
||||
|
||||
impl TestCategory {}
|
||||
|
||||
/// Test case definition
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct TestDefinition {
|
||||
pub name: String,
|
||||
#[allow(dead_code)]
|
||||
pub category: TestCategory,
|
||||
pub is_critical: bool,
|
||||
}
|
||||
|
||||
impl TestDefinition {
|
||||
pub fn new(name: impl Into<String>, category: TestCategory, is_critical: bool) -> Self {
|
||||
Self {
|
||||
name: name.into(),
|
||||
category,
|
||||
is_critical,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Test result
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct TestResult {
|
||||
pub test_name: String,
|
||||
pub success: bool,
|
||||
pub error_message: Option<String>,
|
||||
}
|
||||
|
||||
impl TestResult {
|
||||
pub fn success(test_name: String) -> Self {
|
||||
Self {
|
||||
test_name,
|
||||
success: true,
|
||||
error_message: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn failure(test_name: String, error: String) -> Self {
|
||||
Self {
|
||||
test_name,
|
||||
success: false,
|
||||
error_message: Some(error),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Test suite configuration
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct TestSuiteConfig {
|
||||
pub include_critical_only: bool,
|
||||
}
|
||||
|
||||
/// Policy test suite
|
||||
pub struct PolicyTestSuite {
|
||||
tests: Vec<TestDefinition>,
|
||||
config: TestSuiteConfig,
|
||||
}
|
||||
|
||||
impl PolicyTestSuite {
|
||||
/// Create default test suite
|
||||
pub fn new() -> Self {
|
||||
let tests = vec![
|
||||
TestDefinition::new("test_aws_policy_variables_single_value", TestCategory::SingleValue, true),
|
||||
TestDefinition::new("test_aws_policy_variables_multi_value", TestCategory::MultiValue, true),
|
||||
TestDefinition::new("test_aws_policy_variables_concatenation", TestCategory::Concatenation, true),
|
||||
TestDefinition::new("test_aws_policy_variables_nested", TestCategory::Nested, true),
|
||||
TestDefinition::new("test_aws_policy_variables_deny", TestCategory::DenyScenarios, true),
|
||||
TestDefinition::new("test_aws_policy_variables_sts", TestCategory::SingleValue, true),
|
||||
];
|
||||
|
||||
Self {
|
||||
tests,
|
||||
config: TestSuiteConfig::default(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Configure test suite
|
||||
pub fn with_config(mut self, config: TestSuiteConfig) -> Self {
|
||||
self.config = config;
|
||||
self
|
||||
}
|
||||
|
||||
/// Run test suite
|
||||
pub async fn run_test_suite(&self) -> Vec<TestResult> {
|
||||
init_logging();
|
||||
info!("Starting Policy Variables test suite");
|
||||
|
||||
let start_time = Instant::now();
|
||||
let mut results = Vec::new();
|
||||
|
||||
// Create test environment
|
||||
let env = match PolicyTestEnvironment::with_address("127.0.0.1:9000").await {
|
||||
Ok(env) => env,
|
||||
Err(e) => {
|
||||
error!("Failed to create test environment: {}", e);
|
||||
return vec![TestResult::failure("env_creation".into(), e.to_string())];
|
||||
}
|
||||
};
|
||||
|
||||
// Wait for server to be ready
|
||||
if env.wait_for_server_ready().await.is_err() {
|
||||
error!("Server is not ready");
|
||||
return vec![TestResult::failure("server_check".into(), "Server not ready".into())];
|
||||
}
|
||||
|
||||
// Filter tests
|
||||
let tests_to_run: Vec<&TestDefinition> = self
|
||||
.tests
|
||||
.iter()
|
||||
.filter(|test| !self.config.include_critical_only || test.is_critical)
|
||||
.collect();
|
||||
|
||||
info!("Scheduled {} tests", tests_to_run.len());
|
||||
|
||||
// Run tests
|
||||
for (i, test_def) in tests_to_run.iter().enumerate() {
|
||||
info!("Running test {}/{}: {}", i + 1, tests_to_run.len(), test_def.name);
|
||||
let test_start = Instant::now();
|
||||
|
||||
let result = self.run_single_test(test_def, &env).await;
|
||||
let test_duration = test_start.elapsed();
|
||||
|
||||
match result {
|
||||
Ok(_) => {
|
||||
info!("Test passed: {} ({:.2}s)", test_def.name, test_duration.as_secs_f64());
|
||||
results.push(TestResult::success(test_def.name.clone()));
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Test failed: {} ({:.2}s): {}", test_def.name, test_duration.as_secs_f64(), e);
|
||||
results.push(TestResult::failure(test_def.name.clone(), e.to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
// Delay between tests to avoid resource conflicts
|
||||
if i < tests_to_run.len() - 1 {
|
||||
sleep(Duration::from_secs(2)).await;
|
||||
}
|
||||
}
|
||||
|
||||
// Print summary
|
||||
self.print_summary(&results, start_time.elapsed());
|
||||
|
||||
results
|
||||
}
|
||||
|
||||
/// Run a single test
|
||||
async fn run_single_test(
|
||||
&self,
|
||||
test_def: &TestDefinition,
|
||||
env: &PolicyTestEnvironment,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
match test_def.name.as_str() {
|
||||
"test_aws_policy_variables_single_value" => {
|
||||
super::policy_variables_test::test_aws_policy_variables_single_value_impl_with_env(env).await
|
||||
}
|
||||
"test_aws_policy_variables_multi_value" => {
|
||||
super::policy_variables_test::test_aws_policy_variables_multi_value_impl_with_env(env).await
|
||||
}
|
||||
"test_aws_policy_variables_concatenation" => {
|
||||
super::policy_variables_test::test_aws_policy_variables_concatenation_impl_with_env(env).await
|
||||
}
|
||||
"test_aws_policy_variables_nested" => {
|
||||
super::policy_variables_test::test_aws_policy_variables_nested_impl_with_env(env).await
|
||||
}
|
||||
"test_aws_policy_variables_deny" => {
|
||||
super::policy_variables_test::test_aws_policy_variables_deny_impl_with_env(env).await
|
||||
}
|
||||
"test_aws_policy_variables_sts" => {
|
||||
super::policy_variables_test::test_aws_policy_variables_sts_impl_with_env(env).await
|
||||
}
|
||||
_ => Err(format!("Test {} not implemented", test_def.name).into()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Print test summary
|
||||
fn print_summary(&self, results: &[TestResult], total_duration: Duration) {
|
||||
info!("=== Test Suite Summary ===");
|
||||
info!("Total duration: {:.2}s", total_duration.as_secs_f64());
|
||||
info!("Total tests: {}", results.len());
|
||||
|
||||
let passed = results.iter().filter(|r| r.success).count();
|
||||
let failed = results.len() - passed;
|
||||
let success_rate = (passed as f64 / results.len() as f64) * 100.0;
|
||||
|
||||
info!("Passed: {} | Failed: {}", passed, failed);
|
||||
info!("Success rate: {:.1}%", success_rate);
|
||||
|
||||
if failed > 0 {
|
||||
error!("Failed tests:");
|
||||
for result in results.iter().filter(|r| !r.success) {
|
||||
error!(" - {}: {}", result.test_name, result.error_message.as_ref().unwrap());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Test suite
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
#[ignore = "Connects to existing rustfs server"]
|
||||
async fn test_policy_critical_suite() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
let config = TestSuiteConfig {
|
||||
include_critical_only: true,
|
||||
};
|
||||
let suite = PolicyTestSuite::new().with_config(config);
|
||||
let results = suite.run_test_suite().await;
|
||||
|
||||
let failed = results.iter().filter(|r| !r.success).count();
|
||||
if failed > 0 {
|
||||
return Err(format!("Critical tests failed: {failed} failures").into());
|
||||
}
|
||||
|
||||
info!("All critical tests passed");
|
||||
Ok(())
|
||||
}
|
||||
283
crates/e2e_test/src/reliant/get_deleted_object_test.rs
Normal file
283
crates/e2e_test/src/reliant/get_deleted_object_test.rs
Normal file
@@ -0,0 +1,283 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Test for GetObject on deleted objects
|
||||
//!
|
||||
//! This test reproduces the issue where getting a deleted object returns
|
||||
//! a networking error instead of NoSuchKey.
|
||||
|
||||
#![cfg(test)]
|
||||
|
||||
use aws_config::meta::region::RegionProviderChain;
|
||||
use aws_sdk_s3::Client;
|
||||
use aws_sdk_s3::config::{Credentials, Region};
|
||||
use aws_sdk_s3::error::SdkError;
|
||||
use bytes::Bytes;
|
||||
use serial_test::serial;
|
||||
use std::error::Error;
|
||||
use tracing::info;
|
||||
|
||||
const ENDPOINT: &str = "http://localhost:9000";
|
||||
const ACCESS_KEY: &str = "rustfsadmin";
|
||||
const SECRET_KEY: &str = "rustfsadmin";
|
||||
const BUCKET: &str = "test-get-deleted-bucket";
|
||||
|
||||
async fn create_aws_s3_client() -> Result<Client, Box<dyn Error>> {
|
||||
let region_provider = RegionProviderChain::default_provider().or_else(Region::new("us-east-1"));
|
||||
let shared_config = aws_config::defaults(aws_config::BehaviorVersion::latest())
|
||||
.region(region_provider)
|
||||
.credentials_provider(Credentials::new(ACCESS_KEY, SECRET_KEY, None, None, "static"))
|
||||
.endpoint_url(ENDPOINT)
|
||||
.load()
|
||||
.await;
|
||||
|
||||
let client = Client::from_conf(
|
||||
aws_sdk_s3::Config::from(&shared_config)
|
||||
.to_builder()
|
||||
.force_path_style(true)
|
||||
.build(),
|
||||
);
|
||||
Ok(client)
|
||||
}
|
||||
|
||||
/// Setup test bucket, creating it if it doesn't exist
|
||||
async fn setup_test_bucket(client: &Client) -> Result<(), Box<dyn Error>> {
|
||||
match client.create_bucket().bucket(BUCKET).send().await {
|
||||
Ok(_) => {}
|
||||
Err(SdkError::ServiceError(e)) => {
|
||||
let e = e.into_err();
|
||||
let error_code = e.meta().code().unwrap_or("");
|
||||
if !error_code.eq("BucketAlreadyExists") && !error_code.eq("BucketAlreadyOwnedByYou") {
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
#[ignore = "requires running RustFS server at localhost:9000"]
|
||||
async fn test_get_deleted_object_returns_nosuchkey() -> Result<(), Box<dyn std::error::Error>> {
|
||||
// Initialize logging
|
||||
let _ = tracing_subscriber::fmt()
|
||||
.with_max_level(tracing::Level::INFO)
|
||||
.with_test_writer()
|
||||
.try_init();
|
||||
|
||||
info!("🧪 Starting test_get_deleted_object_returns_nosuchkey");
|
||||
|
||||
let client = create_aws_s3_client().await?;
|
||||
setup_test_bucket(&client).await?;
|
||||
|
||||
// Upload a test object
|
||||
let key = "test-file-to-delete.txt";
|
||||
let content = b"This will be deleted soon!";
|
||||
|
||||
info!("Uploading object: {}", key);
|
||||
client
|
||||
.put_object()
|
||||
.bucket(BUCKET)
|
||||
.key(key)
|
||||
.body(Bytes::from_static(content).into())
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
// Verify object exists
|
||||
info!("Verifying object exists");
|
||||
let get_result = client.get_object().bucket(BUCKET).key(key).send().await;
|
||||
|
||||
assert!(get_result.is_ok(), "Object should exist after upload");
|
||||
|
||||
// Delete the object
|
||||
info!("Deleting object: {}", key);
|
||||
client.delete_object().bucket(BUCKET).key(key).send().await?;
|
||||
|
||||
// Try to get the deleted object - should return NoSuchKey error
|
||||
info!("Attempting to get deleted object - expecting NoSuchKey error");
|
||||
let get_result = client.get_object().bucket(BUCKET).key(key).send().await;
|
||||
|
||||
// Check that we get an error
|
||||
assert!(get_result.is_err(), "Getting deleted object should return an error");
|
||||
|
||||
// Check that the error is NoSuchKey, not a networking error
|
||||
let err = get_result.unwrap_err();
|
||||
|
||||
// Print the error for debugging
|
||||
info!("Error received: {:?}", err);
|
||||
|
||||
// Check if it's a service error
|
||||
match err {
|
||||
SdkError::ServiceError(service_err) => {
|
||||
let s3_err = service_err.into_err();
|
||||
info!("Service error code: {:?}", s3_err.meta().code());
|
||||
|
||||
// The error should be NoSuchKey
|
||||
assert!(s3_err.is_no_such_key(), "Error should be NoSuchKey, got: {s3_err:?}");
|
||||
|
||||
info!("✅ Test passed: GetObject on deleted object correctly returns NoSuchKey");
|
||||
}
|
||||
other_err => {
|
||||
panic!("Expected ServiceError with NoSuchKey, but got: {other_err:?}");
|
||||
}
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
let _ = client.delete_object().bucket(BUCKET).key(key).send().await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Test that HeadObject on a deleted object also returns NoSuchKey
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
#[ignore = "requires running RustFS server at localhost:9000"]
|
||||
async fn test_head_deleted_object_returns_nosuchkey() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let _ = tracing_subscriber::fmt()
|
||||
.with_max_level(tracing::Level::INFO)
|
||||
.with_test_writer()
|
||||
.try_init();
|
||||
|
||||
info!("🧪 Starting test_head_deleted_object_returns_nosuchkey");
|
||||
|
||||
let client = create_aws_s3_client().await?;
|
||||
setup_test_bucket(&client).await?;
|
||||
|
||||
let key = "test-head-deleted.txt";
|
||||
let content = b"Test content for HeadObject";
|
||||
|
||||
// Upload and verify
|
||||
client
|
||||
.put_object()
|
||||
.bucket(BUCKET)
|
||||
.key(key)
|
||||
.body(Bytes::from_static(content).into())
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
// Delete the object
|
||||
client.delete_object().bucket(BUCKET).key(key).send().await?;
|
||||
|
||||
// Try to head the deleted object
|
||||
let head_result = client.head_object().bucket(BUCKET).key(key).send().await;
|
||||
|
||||
assert!(head_result.is_err(), "HeadObject on deleted object should return an error");
|
||||
|
||||
match head_result.unwrap_err() {
|
||||
SdkError::ServiceError(service_err) => {
|
||||
let s3_err = service_err.into_err();
|
||||
assert!(
|
||||
s3_err.meta().code() == Some("NoSuchKey") || s3_err.meta().code() == Some("NotFound"),
|
||||
"Error should be NoSuchKey or NotFound, got: {s3_err:?}"
|
||||
);
|
||||
info!("✅ HeadObject correctly returns NoSuchKey/NotFound");
|
||||
}
|
||||
other_err => {
|
||||
panic!("Expected ServiceError but got: {other_err:?}");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Test GetObject with non-existent key (never existed)
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
#[ignore = "requires running RustFS server at localhost:9000"]
|
||||
async fn test_get_nonexistent_object_returns_nosuchkey() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let _ = tracing_subscriber::fmt()
|
||||
.with_max_level(tracing::Level::INFO)
|
||||
.with_test_writer()
|
||||
.try_init();
|
||||
|
||||
info!("🧪 Starting test_get_nonexistent_object_returns_nosuchkey");
|
||||
|
||||
let client = create_aws_s3_client().await?;
|
||||
setup_test_bucket(&client).await?;
|
||||
|
||||
// Try to get an object that never existed
|
||||
let key = "this-key-never-existed.txt";
|
||||
|
||||
let get_result = client.get_object().bucket(BUCKET).key(key).send().await;
|
||||
|
||||
assert!(get_result.is_err(), "Getting non-existent object should return an error");
|
||||
|
||||
match get_result.unwrap_err() {
|
||||
SdkError::ServiceError(service_err) => {
|
||||
let s3_err = service_err.into_err();
|
||||
assert!(s3_err.is_no_such_key(), "Error should be NoSuchKey, got: {s3_err:?}");
|
||||
info!("✅ GetObject correctly returns NoSuchKey for non-existent object");
|
||||
}
|
||||
other_err => {
|
||||
panic!("Expected ServiceError with NoSuchKey, but got: {other_err:?}");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Test multiple consecutive GetObject calls on deleted object
|
||||
/// This ensures the fix is stable and doesn't have race conditions
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
#[ignore = "requires running RustFS server at localhost:9000"]
|
||||
async fn test_multiple_gets_deleted_object() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let _ = tracing_subscriber::fmt()
|
||||
.with_max_level(tracing::Level::INFO)
|
||||
.with_test_writer()
|
||||
.try_init();
|
||||
|
||||
info!("🧪 Starting test_multiple_gets_deleted_object");
|
||||
|
||||
let client = create_aws_s3_client().await?;
|
||||
setup_test_bucket(&client).await?;
|
||||
|
||||
let key = "test-multiple-gets.txt";
|
||||
let content = b"Test content";
|
||||
|
||||
// Upload and delete
|
||||
client
|
||||
.put_object()
|
||||
.bucket(BUCKET)
|
||||
.key(key)
|
||||
.body(Bytes::from_static(content).into())
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
client.delete_object().bucket(BUCKET).key(key).send().await?;
|
||||
|
||||
// Try multiple consecutive GetObject calls
|
||||
for i in 1..=5 {
|
||||
info!("Attempt {} to get deleted object", i);
|
||||
let get_result = client.get_object().bucket(BUCKET).key(key).send().await;
|
||||
|
||||
assert!(get_result.is_err(), "Attempt {i}: should return error");
|
||||
|
||||
match get_result.unwrap_err() {
|
||||
SdkError::ServiceError(service_err) => {
|
||||
let s3_err = service_err.into_err();
|
||||
assert!(s3_err.is_no_such_key(), "Attempt {i}: Error should be NoSuchKey, got: {s3_err:?}");
|
||||
}
|
||||
other_err => {
|
||||
panic!("Attempt {i}: Expected ServiceError but got: {other_err:?}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!("✅ All 5 attempts correctly returned NoSuchKey");
|
||||
Ok(())
|
||||
}
|
||||
@@ -0,0 +1,138 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Test for HeadObject on deleted objects with versioning enabled
|
||||
//!
|
||||
//! This test reproduces the issue where getting a deleted object returns
|
||||
//! 200 OK instead of 404 NoSuchKey when versioning is enabled.
|
||||
|
||||
#![cfg(test)]
|
||||
|
||||
use aws_config::meta::region::RegionProviderChain;
|
||||
use aws_sdk_s3::Client;
|
||||
use aws_sdk_s3::config::{Credentials, Region};
|
||||
use aws_sdk_s3::error::SdkError;
|
||||
use aws_sdk_s3::types::{BucketVersioningStatus, VersioningConfiguration};
|
||||
use bytes::Bytes;
|
||||
use serial_test::serial;
|
||||
use std::error::Error;
|
||||
use tracing::info;
|
||||
|
||||
const ENDPOINT: &str = "http://localhost:9000";
|
||||
const ACCESS_KEY: &str = "rustfsadmin";
|
||||
const SECRET_KEY: &str = "rustfsadmin";
|
||||
const BUCKET: &str = "test-head-deleted-versioning-bucket";
|
||||
|
||||
async fn create_aws_s3_client() -> Result<Client, Box<dyn Error>> {
|
||||
let region_provider = RegionProviderChain::default_provider().or_else(Region::new("us-east-1"));
|
||||
let shared_config = aws_config::defaults(aws_config::BehaviorVersion::latest())
|
||||
.region(region_provider)
|
||||
.credentials_provider(Credentials::new(ACCESS_KEY, SECRET_KEY, None, None, "static"))
|
||||
.endpoint_url(ENDPOINT)
|
||||
.load()
|
||||
.await;
|
||||
|
||||
let client = Client::from_conf(
|
||||
aws_sdk_s3::Config::from(&shared_config)
|
||||
.to_builder()
|
||||
.force_path_style(true)
|
||||
.build(),
|
||||
);
|
||||
Ok(client)
|
||||
}
|
||||
|
||||
/// Setup test bucket, creating it if it doesn't exist, and enable versioning
|
||||
async fn setup_test_bucket(client: &Client) -> Result<(), Box<dyn Error>> {
|
||||
match client.create_bucket().bucket(BUCKET).send().await {
|
||||
Ok(_) => {}
|
||||
Err(SdkError::ServiceError(e)) => {
|
||||
let e = e.into_err();
|
||||
let error_code = e.meta().code().unwrap_or("");
|
||||
if !error_code.eq("BucketAlreadyExists") && !error_code.eq("BucketAlreadyOwnedByYou") {
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
return Err(e.into());
|
||||
}
|
||||
}
|
||||
|
||||
// Enable versioning
|
||||
client
|
||||
.put_bucket_versioning()
|
||||
.bucket(BUCKET)
|
||||
.versioning_configuration(
|
||||
VersioningConfiguration::builder()
|
||||
.status(BucketVersioningStatus::Enabled)
|
||||
.build(),
|
||||
)
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Test that HeadObject on a deleted object returns NoSuchKey when versioning is enabled
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
#[ignore = "requires running RustFS server at localhost:9000"]
|
||||
async fn test_head_deleted_object_versioning_returns_nosuchkey() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let _ = tracing_subscriber::fmt()
|
||||
.with_max_level(tracing::Level::INFO)
|
||||
.with_test_writer()
|
||||
.try_init();
|
||||
|
||||
info!("🧪 Starting test_head_deleted_object_versioning_returns_nosuchkey");
|
||||
|
||||
let client = create_aws_s3_client().await?;
|
||||
setup_test_bucket(&client).await?;
|
||||
|
||||
let key = "test-head-deleted-versioning.txt";
|
||||
let content = b"Test content for HeadObject with versioning";
|
||||
|
||||
// Upload and verify
|
||||
client
|
||||
.put_object()
|
||||
.bucket(BUCKET)
|
||||
.key(key)
|
||||
.body(Bytes::from_static(content).into())
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
// Delete the object (creates a delete marker)
|
||||
client.delete_object().bucket(BUCKET).key(key).send().await?;
|
||||
|
||||
// Try to head the deleted object (latest version is delete marker)
|
||||
let head_result = client.head_object().bucket(BUCKET).key(key).send().await;
|
||||
|
||||
assert!(head_result.is_err(), "HeadObject on deleted object should return an error");
|
||||
|
||||
match head_result.unwrap_err() {
|
||||
SdkError::ServiceError(service_err) => {
|
||||
let s3_err = service_err.into_err();
|
||||
assert!(
|
||||
s3_err.meta().code() == Some("NoSuchKey")
|
||||
|| s3_err.meta().code() == Some("NotFound")
|
||||
|| s3_err.meta().code() == Some("404"),
|
||||
"Error should be NoSuchKey or NotFound, got: {s3_err:?}"
|
||||
);
|
||||
info!("✅ HeadObject correctly returns NoSuchKey/NotFound");
|
||||
}
|
||||
other_err => {
|
||||
panic!("Expected ServiceError but got: {other_err:?}");
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -13,6 +13,8 @@
|
||||
// limitations under the License.
|
||||
|
||||
mod conditional_writes;
|
||||
mod get_deleted_object_test;
|
||||
mod head_deleted_object_versioning_test;
|
||||
mod lifecycle;
|
||||
mod lock;
|
||||
mod node_interact_test;
|
||||
|
||||
799
crates/e2e_test/src/special_chars_test.rs
Normal file
799
crates/e2e_test/src/special_chars_test.rs
Normal file
@@ -0,0 +1,799 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! End-to-end tests for special characters in object paths
|
||||
//!
|
||||
//! This module tests the handling of various special characters in S3 object keys,
|
||||
//! including spaces, plus signs, percent signs, and other URL-encoded characters.
|
||||
//!
|
||||
//! ## Test Scenarios
|
||||
//!
|
||||
//! 1. **Spaces in paths**: `a f+/b/c/README.md` (encoded as `a%20f+/b/c/README.md`)
|
||||
//! 2. **Plus signs in paths**: `ES+net/file+name.txt`
|
||||
//! 3. **Mixed special characters**: Combinations of spaces, plus, percent, etc.
|
||||
//! 4. **Operations tested**: PUT, GET, LIST, DELETE
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::common::{RustFSTestEnvironment, init_logging};
|
||||
use aws_sdk_s3::Client;
|
||||
use aws_sdk_s3::primitives::ByteStream;
|
||||
use serial_test::serial;
|
||||
use tracing::{debug, info};
|
||||
|
||||
/// Helper function to create an S3 client for testing
|
||||
fn create_s3_client(env: &RustFSTestEnvironment) -> Client {
|
||||
env.create_s3_client()
|
||||
}
|
||||
|
||||
/// Helper function to create a test bucket
|
||||
async fn create_bucket(client: &Client, bucket: &str) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
match client.create_bucket().bucket(bucket).send().await {
|
||||
Ok(_) => {
|
||||
info!("Bucket {} created successfully", bucket);
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
// Ignore if bucket already exists
|
||||
if e.to_string().contains("BucketAlreadyOwnedByYou") || e.to_string().contains("BucketAlreadyExists") {
|
||||
info!("Bucket {} already exists", bucket);
|
||||
Ok(())
|
||||
} else {
|
||||
Err(Box::new(e))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Test PUT and GET with space character in path
|
||||
///
|
||||
/// This reproduces Part A of the issue:
|
||||
/// ```
|
||||
/// mc cp README.md "local/dummy/a%20f+/b/c/3/README.md"
|
||||
/// ```
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_object_with_space_in_path() {
|
||||
init_logging();
|
||||
info!("Starting test: object with space in path");
|
||||
|
||||
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
|
||||
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
|
||||
|
||||
let client = create_s3_client(&env);
|
||||
let bucket = "test-special-chars";
|
||||
|
||||
// Create bucket
|
||||
create_bucket(&client, bucket).await.expect("Failed to create bucket");
|
||||
|
||||
// Test key with space: "a f+/b/c/3/README.md"
|
||||
// When URL-encoded by client: "a%20f+/b/c/3/README.md"
|
||||
let key = "a f+/b/c/3/README.md";
|
||||
let content = b"Test content with space in path";
|
||||
|
||||
info!("Testing PUT object with key: {}", key);
|
||||
|
||||
// PUT object
|
||||
let result = client
|
||||
.put_object()
|
||||
.bucket(bucket)
|
||||
.key(key)
|
||||
.body(ByteStream::from_static(content))
|
||||
.send()
|
||||
.await;
|
||||
|
||||
assert!(result.is_ok(), "Failed to PUT object with space in path: {:?}", result.err());
|
||||
info!("✅ PUT object with space in path succeeded");
|
||||
|
||||
// GET object
|
||||
info!("Testing GET object with key: {}", key);
|
||||
let result = client.get_object().bucket(bucket).key(key).send().await;
|
||||
|
||||
assert!(result.is_ok(), "Failed to GET object with space in path: {:?}", result.err());
|
||||
|
||||
let output = result.unwrap();
|
||||
let body_bytes = output.body.collect().await.unwrap().into_bytes();
|
||||
assert_eq!(body_bytes.as_ref(), content, "Content mismatch");
|
||||
info!("✅ GET object with space in path succeeded");
|
||||
|
||||
// LIST objects with prefix containing space
|
||||
info!("Testing LIST objects with prefix: a f+/");
|
||||
let result = client.list_objects_v2().bucket(bucket).prefix("a f+/").send().await;
|
||||
|
||||
assert!(result.is_ok(), "Failed to LIST objects with space in prefix: {:?}", result.err());
|
||||
|
||||
let output = result.unwrap();
|
||||
let contents = output.contents();
|
||||
assert!(!contents.is_empty(), "LIST returned no objects");
|
||||
assert!(
|
||||
contents.iter().any(|obj| obj.key().unwrap() == key),
|
||||
"Object with space not found in LIST results"
|
||||
);
|
||||
info!("✅ LIST objects with space in prefix succeeded");
|
||||
|
||||
// LIST objects with deeper prefix
|
||||
info!("Testing LIST objects with prefix: a f+/b/c/");
|
||||
let result = client.list_objects_v2().bucket(bucket).prefix("a f+/b/c/").send().await;
|
||||
|
||||
assert!(result.is_ok(), "Failed to LIST objects with deeper prefix: {:?}", result.err());
|
||||
|
||||
let output = result.unwrap();
|
||||
let contents = output.contents();
|
||||
assert!(!contents.is_empty(), "LIST with deeper prefix returned no objects");
|
||||
info!("✅ LIST objects with deeper prefix succeeded");
|
||||
|
||||
// Cleanup
|
||||
env.stop_server();
|
||||
info!("Test completed successfully");
|
||||
}
|
||||
|
||||
/// Test PUT and GET with plus sign in path
|
||||
///
|
||||
/// This reproduces Part B of the issue:
|
||||
/// ```
|
||||
/// /test/data/org_main-org/dashboards/ES+net/LHC+Data+Challenge/firefly-details.json
|
||||
/// ```
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_object_with_plus_in_path() {
|
||||
init_logging();
|
||||
info!("Starting test: object with plus sign in path");
|
||||
|
||||
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
|
||||
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
|
||||
|
||||
let client = create_s3_client(&env);
|
||||
let bucket = "test-plus-chars";
|
||||
|
||||
// Create bucket
|
||||
create_bucket(&client, bucket).await.expect("Failed to create bucket");
|
||||
|
||||
// Test key with plus signs
|
||||
let key = "dashboards/ES+net/LHC+Data+Challenge/firefly-details.json";
|
||||
let content = b"Test content with plus signs in path";
|
||||
|
||||
info!("Testing PUT object with key: {}", key);
|
||||
|
||||
// PUT object
|
||||
let result = client
|
||||
.put_object()
|
||||
.bucket(bucket)
|
||||
.key(key)
|
||||
.body(ByteStream::from_static(content))
|
||||
.send()
|
||||
.await;
|
||||
|
||||
assert!(result.is_ok(), "Failed to PUT object with plus in path: {:?}", result.err());
|
||||
info!("✅ PUT object with plus in path succeeded");
|
||||
|
||||
// GET object
|
||||
info!("Testing GET object with key: {}", key);
|
||||
let result = client.get_object().bucket(bucket).key(key).send().await;
|
||||
|
||||
assert!(result.is_ok(), "Failed to GET object with plus in path: {:?}", result.err());
|
||||
|
||||
let output = result.unwrap();
|
||||
let body_bytes = output.body.collect().await.unwrap().into_bytes();
|
||||
assert_eq!(body_bytes.as_ref(), content, "Content mismatch");
|
||||
info!("✅ GET object with plus in path succeeded");
|
||||
|
||||
// LIST objects with prefix containing plus
|
||||
info!("Testing LIST objects with prefix: dashboards/ES+net/");
|
||||
let result = client
|
||||
.list_objects_v2()
|
||||
.bucket(bucket)
|
||||
.prefix("dashboards/ES+net/")
|
||||
.send()
|
||||
.await;
|
||||
|
||||
assert!(result.is_ok(), "Failed to LIST objects with plus in prefix: {:?}", result.err());
|
||||
|
||||
let output = result.unwrap();
|
||||
let contents = output.contents();
|
||||
assert!(!contents.is_empty(), "LIST returned no objects");
|
||||
assert!(
|
||||
contents.iter().any(|obj| obj.key().unwrap() == key),
|
||||
"Object with plus not found in LIST results"
|
||||
);
|
||||
info!("✅ LIST objects with plus in prefix succeeded");
|
||||
|
||||
// Cleanup
|
||||
env.stop_server();
|
||||
info!("Test completed successfully");
|
||||
}
|
||||
|
||||
/// Test with mixed special characters
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_object_with_mixed_special_chars() {
|
||||
init_logging();
|
||||
info!("Starting test: object with mixed special characters");
|
||||
|
||||
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
|
||||
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
|
||||
|
||||
let client = create_s3_client(&env);
|
||||
let bucket = "test-mixed-chars";
|
||||
|
||||
// Create bucket
|
||||
create_bucket(&client, bucket).await.expect("Failed to create bucket");
|
||||
|
||||
// Test various special characters
|
||||
let test_cases = vec![
|
||||
("path/with spaces/file.txt", b"Content 1" as &[u8]),
|
||||
("path/with+plus/file.txt", b"Content 2"),
|
||||
("path/with spaces+and+plus/file.txt", b"Content 3"),
|
||||
("ES+net/folder name/file.txt", b"Content 4"),
|
||||
];
|
||||
|
||||
for (key, content) in &test_cases {
|
||||
info!("Testing with key: {}", key);
|
||||
|
||||
// PUT
|
||||
let result = client
|
||||
.put_object()
|
||||
.bucket(bucket)
|
||||
.key(*key)
|
||||
.body(ByteStream::from(content.to_vec()))
|
||||
.send()
|
||||
.await;
|
||||
assert!(result.is_ok(), "Failed to PUT object with key '{}': {:?}", key, result.err());
|
||||
|
||||
// GET
|
||||
let result = client.get_object().bucket(bucket).key(*key).send().await;
|
||||
assert!(result.is_ok(), "Failed to GET object with key '{}': {:?}", key, result.err());
|
||||
|
||||
let output = result.unwrap();
|
||||
let body_bytes = output.body.collect().await.unwrap().into_bytes();
|
||||
assert_eq!(body_bytes.as_ref(), *content, "Content mismatch for key '{key}'");
|
||||
|
||||
info!("✅ PUT/GET succeeded for key: {}", key);
|
||||
}
|
||||
|
||||
// LIST all objects
|
||||
let result = client.list_objects_v2().bucket(bucket).send().await;
|
||||
assert!(result.is_ok(), "Failed to LIST all objects");
|
||||
|
||||
let output = result.unwrap();
|
||||
let contents = output.contents();
|
||||
assert_eq!(contents.len(), test_cases.len(), "Number of objects mismatch");
|
||||
|
||||
// Cleanup
|
||||
env.stop_server();
|
||||
info!("Test completed successfully");
|
||||
}
|
||||
|
||||
/// Test DELETE operation with special characters
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_delete_object_with_special_chars() {
|
||||
init_logging();
|
||||
info!("Starting test: DELETE object with special characters");
|
||||
|
||||
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
|
||||
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
|
||||
|
||||
let client = create_s3_client(&env);
|
||||
let bucket = "test-delete-special";
|
||||
|
||||
// Create bucket
|
||||
create_bucket(&client, bucket).await.expect("Failed to create bucket");
|
||||
|
||||
let key = "folder with spaces/ES+net/file.txt";
|
||||
let content = b"Test content";
|
||||
|
||||
// PUT object
|
||||
client
|
||||
.put_object()
|
||||
.bucket(bucket)
|
||||
.key(key)
|
||||
.body(ByteStream::from_static(content))
|
||||
.send()
|
||||
.await
|
||||
.expect("Failed to PUT object");
|
||||
|
||||
// Verify it exists
|
||||
let result = client.get_object().bucket(bucket).key(key).send().await;
|
||||
assert!(result.is_ok(), "Object should exist before DELETE");
|
||||
|
||||
// DELETE object
|
||||
info!("Testing DELETE object with key: {}", key);
|
||||
let result = client.delete_object().bucket(bucket).key(key).send().await;
|
||||
assert!(result.is_ok(), "Failed to DELETE object with special chars: {:?}", result.err());
|
||||
info!("✅ DELETE object succeeded");
|
||||
|
||||
// Verify it's deleted
|
||||
let result = client.get_object().bucket(bucket).key(key).send().await;
|
||||
assert!(result.is_err(), "Object should not exist after DELETE");
|
||||
|
||||
// Cleanup
|
||||
env.stop_server();
|
||||
info!("Test completed successfully");
|
||||
}
|
||||
|
||||
/// Test exact scenario from the issue
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_issue_scenario_exact() {
|
||||
init_logging();
|
||||
info!("Starting test: Exact scenario from GitHub issue");
|
||||
|
||||
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
|
||||
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
|
||||
|
||||
let client = create_s3_client(&env);
|
||||
let bucket = "dummy";
|
||||
|
||||
// Create bucket
|
||||
create_bucket(&client, bucket).await.expect("Failed to create bucket");
|
||||
|
||||
// Exact key from issue: "a%20f+/b/c/3/README.md"
|
||||
// The decoded form should be: "a f+/b/c/3/README.md"
|
||||
let key = "a f+/b/c/3/README.md";
|
||||
let content = b"README content";
|
||||
|
||||
info!("Reproducing exact issue scenario with key: {}", key);
|
||||
|
||||
// Step 1: Upload file (like `mc cp README.md "local/dummy/a%20f+/b/c/3/README.md"`)
|
||||
let result = client
|
||||
.put_object()
|
||||
.bucket(bucket)
|
||||
.key(key)
|
||||
.body(ByteStream::from_static(content))
|
||||
.send()
|
||||
.await;
|
||||
assert!(result.is_ok(), "Failed to upload file: {:?}", result.err());
|
||||
info!("✅ File uploaded successfully");
|
||||
|
||||
// Step 2: Navigate to folder (like navigating to "%20f+/" in UI)
|
||||
// This is equivalent to listing with prefix "a f+/"
|
||||
info!("Listing folder 'a f+/' (this should show subdirectories)");
|
||||
let result = client
|
||||
.list_objects_v2()
|
||||
.bucket(bucket)
|
||||
.prefix("a f+/")
|
||||
.delimiter("/")
|
||||
.send()
|
||||
.await;
|
||||
assert!(result.is_ok(), "Failed to list folder: {:?}", result.err());
|
||||
|
||||
let output = result.unwrap();
|
||||
debug!("List result: {:?}", output);
|
||||
|
||||
// Should show "b/" as a common prefix (subdirectory)
|
||||
let common_prefixes = output.common_prefixes();
|
||||
assert!(
|
||||
!common_prefixes.is_empty() || !output.contents().is_empty(),
|
||||
"Folder should show contents or subdirectories"
|
||||
);
|
||||
info!("✅ Folder listing succeeded");
|
||||
|
||||
// Step 3: List deeper (like `mc ls "local/dummy/a%20f+/b/c/3/"`)
|
||||
info!("Listing deeper folder 'a f+/b/c/3/'");
|
||||
let result = client.list_objects_v2().bucket(bucket).prefix("a f+/b/c/3/").send().await;
|
||||
assert!(result.is_ok(), "Failed to list deep folder: {:?}", result.err());
|
||||
|
||||
let output = result.unwrap();
|
||||
let contents = output.contents();
|
||||
assert!(!contents.is_empty(), "Deep folder should show the file");
|
||||
assert!(contents.iter().any(|obj| obj.key().unwrap() == key), "README.md should be in the list");
|
||||
info!("✅ Deep folder listing succeeded - file found");
|
||||
|
||||
// Cleanup
|
||||
env.stop_server();
|
||||
info!("✅ Exact issue scenario test completed successfully");
|
||||
}
|
||||
|
||||
/// Test HEAD object with special characters
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_head_object_with_special_chars() {
|
||||
init_logging();
|
||||
info!("Starting test: HEAD object with special characters");
|
||||
|
||||
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
|
||||
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
|
||||
|
||||
let client = create_s3_client(&env);
|
||||
let bucket = "test-head-special";
|
||||
|
||||
// Create bucket
|
||||
create_bucket(&client, bucket).await.expect("Failed to create bucket");
|
||||
|
||||
let key = "folder with spaces/ES+net/file.txt";
|
||||
let content = b"Test content for HEAD";
|
||||
|
||||
// PUT object
|
||||
client
|
||||
.put_object()
|
||||
.bucket(bucket)
|
||||
.key(key)
|
||||
.body(ByteStream::from_static(content))
|
||||
.send()
|
||||
.await
|
||||
.expect("Failed to PUT object");
|
||||
|
||||
info!("Testing HEAD object with key: {}", key);
|
||||
|
||||
// HEAD object
|
||||
let result = client.head_object().bucket(bucket).key(key).send().await;
|
||||
assert!(result.is_ok(), "Failed to HEAD object with special chars: {:?}", result.err());
|
||||
|
||||
let output = result.unwrap();
|
||||
assert_eq!(output.content_length().unwrap_or(0), content.len() as i64, "Content length mismatch");
|
||||
info!("✅ HEAD object with special characters succeeded");
|
||||
|
||||
// Cleanup
|
||||
env.stop_server();
|
||||
info!("Test completed successfully");
|
||||
}
|
||||
|
||||
/// Test COPY object with special characters in both source and destination
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_copy_object_with_special_chars() {
|
||||
init_logging();
|
||||
info!("Starting test: COPY object with special characters");
|
||||
|
||||
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
|
||||
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
|
||||
|
||||
let client = create_s3_client(&env);
|
||||
let bucket = "test-copy-special";
|
||||
|
||||
// Create bucket
|
||||
create_bucket(&client, bucket).await.expect("Failed to create bucket");
|
||||
|
||||
let src_key = "source/folder with spaces/file.txt";
|
||||
let dest_key = "dest/ES+net/copied file.txt";
|
||||
let content = b"Test content for COPY";
|
||||
|
||||
// PUT source object
|
||||
client
|
||||
.put_object()
|
||||
.bucket(bucket)
|
||||
.key(src_key)
|
||||
.body(ByteStream::from_static(content))
|
||||
.send()
|
||||
.await
|
||||
.expect("Failed to PUT source object");
|
||||
|
||||
info!("Testing COPY from '{}' to '{}'", src_key, dest_key);
|
||||
|
||||
// COPY object
|
||||
let copy_source = format!("{bucket}/{src_key}");
|
||||
let result = client
|
||||
.copy_object()
|
||||
.bucket(bucket)
|
||||
.key(dest_key)
|
||||
.copy_source(©_source)
|
||||
.send()
|
||||
.await;
|
||||
|
||||
assert!(result.is_ok(), "Failed to COPY object with special chars: {:?}", result.err());
|
||||
info!("✅ COPY operation succeeded");
|
||||
|
||||
// Verify destination exists
|
||||
let result = client.get_object().bucket(bucket).key(dest_key).send().await;
|
||||
assert!(result.is_ok(), "Failed to GET copied object");
|
||||
|
||||
let output = result.unwrap();
|
||||
let body_bytes = output.body.collect().await.unwrap().into_bytes();
|
||||
assert_eq!(body_bytes.as_ref(), content, "Copied content mismatch");
|
||||
info!("✅ Copied object verified successfully");
|
||||
|
||||
// Cleanup
|
||||
env.stop_server();
|
||||
info!("Test completed successfully");
|
||||
}
|
||||
|
||||
/// Test Unicode characters in object keys
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_unicode_characters_in_path() {
|
||||
init_logging();
|
||||
info!("Starting test: Unicode characters in object paths");
|
||||
|
||||
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
|
||||
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
|
||||
|
||||
let client = create_s3_client(&env);
|
||||
let bucket = "test-unicode";
|
||||
|
||||
// Create bucket
|
||||
create_bucket(&client, bucket).await.expect("Failed to create bucket");
|
||||
|
||||
// Test various Unicode characters
|
||||
let test_cases = vec![
|
||||
("测试/文件.txt", b"Chinese characters" as &[u8]),
|
||||
("テスト/ファイル.txt", b"Japanese characters"),
|
||||
("테스트/파일.txt", b"Korean characters"),
|
||||
("тест/файл.txt", b"Cyrillic characters"),
|
||||
("emoji/😀/file.txt", b"Emoji in path"),
|
||||
("mixed/测试 test/file.txt", b"Mixed languages"),
|
||||
];
|
||||
|
||||
for (key, content) in &test_cases {
|
||||
info!("Testing Unicode key: {}", key);
|
||||
|
||||
// PUT
|
||||
let result = client
|
||||
.put_object()
|
||||
.bucket(bucket)
|
||||
.key(*key)
|
||||
.body(ByteStream::from(content.to_vec()))
|
||||
.send()
|
||||
.await;
|
||||
assert!(result.is_ok(), "Failed to PUT object with Unicode key '{}': {:?}", key, result.err());
|
||||
|
||||
// GET
|
||||
let result = client.get_object().bucket(bucket).key(*key).send().await;
|
||||
assert!(result.is_ok(), "Failed to GET object with Unicode key '{}': {:?}", key, result.err());
|
||||
|
||||
let output = result.unwrap();
|
||||
let body_bytes = output.body.collect().await.unwrap().into_bytes();
|
||||
assert_eq!(body_bytes.as_ref(), *content, "Content mismatch for Unicode key '{key}'");
|
||||
|
||||
info!("✅ PUT/GET succeeded for Unicode key: {}", key);
|
||||
}
|
||||
|
||||
// LIST to verify all objects
|
||||
let result = client.list_objects_v2().bucket(bucket).send().await;
|
||||
assert!(result.is_ok(), "Failed to LIST objects with Unicode keys");
|
||||
|
||||
let output = result.unwrap();
|
||||
let contents = output.contents();
|
||||
assert_eq!(contents.len(), test_cases.len(), "Number of Unicode objects mismatch");
|
||||
info!("✅ All Unicode objects listed successfully");
|
||||
|
||||
// Cleanup
|
||||
env.stop_server();
|
||||
info!("Test completed successfully");
|
||||
}
|
||||
|
||||
/// Test special characters in different parts of the path
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_special_chars_in_different_path_positions() {
|
||||
init_logging();
|
||||
info!("Starting test: Special characters in different path positions");
|
||||
|
||||
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
|
||||
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
|
||||
|
||||
let client = create_s3_client(&env);
|
||||
let bucket = "test-path-positions";
|
||||
|
||||
// Create bucket
|
||||
create_bucket(&client, bucket).await.expect("Failed to create bucket");
|
||||
|
||||
// Test special characters in different positions
|
||||
let test_cases = vec![
|
||||
("start with space/file.txt", b"Space at start" as &[u8]),
|
||||
("folder/end with space /file.txt", b"Space at end of folder"),
|
||||
("multiple spaces/file.txt", b"Multiple consecutive spaces"),
|
||||
("folder/file with space.txt", b"Space in filename"),
|
||||
("a+b/c+d/e+f.txt", b"Plus signs throughout"),
|
||||
("a%b/c%d/e%f.txt", b"Percent signs throughout"),
|
||||
("folder/!@#$%^&*()/file.txt", b"Multiple special chars"),
|
||||
("(parentheses)/[brackets]/file.txt", b"Parentheses and brackets"),
|
||||
("'quotes'/\"double\"/file.txt", b"Quote characters"),
|
||||
];
|
||||
|
||||
for (key, content) in &test_cases {
|
||||
info!("Testing key: {}", key);
|
||||
|
||||
// PUT
|
||||
let result = client
|
||||
.put_object()
|
||||
.bucket(bucket)
|
||||
.key(*key)
|
||||
.body(ByteStream::from(content.to_vec()))
|
||||
.send()
|
||||
.await;
|
||||
assert!(result.is_ok(), "Failed to PUT object with key '{}': {:?}", key, result.err());
|
||||
|
||||
// GET
|
||||
let result = client.get_object().bucket(bucket).key(*key).send().await;
|
||||
assert!(result.is_ok(), "Failed to GET object with key '{}': {:?}", key, result.err());
|
||||
|
||||
let output = result.unwrap();
|
||||
let body_bytes = output.body.collect().await.unwrap().into_bytes();
|
||||
assert_eq!(body_bytes.as_ref(), *content, "Content mismatch for key '{key}'");
|
||||
|
||||
info!("✅ PUT/GET succeeded for key: {}", key);
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
env.stop_server();
|
||||
info!("Test completed successfully");
|
||||
}
|
||||
|
||||
/// Test that control characters are properly rejected
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_control_characters_rejected() {
|
||||
init_logging();
|
||||
info!("Starting test: Control characters should be rejected");
|
||||
|
||||
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
|
||||
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
|
||||
|
||||
let client = create_s3_client(&env);
|
||||
let bucket = "test-control-chars";
|
||||
|
||||
// Create bucket
|
||||
create_bucket(&client, bucket).await.expect("Failed to create bucket");
|
||||
|
||||
// Test that control characters are rejected
|
||||
let invalid_keys = vec![
|
||||
"file\0with\0null.txt",
|
||||
"file\nwith\nnewline.txt",
|
||||
"file\rwith\rcarriage.txt",
|
||||
"file\twith\ttab.txt", // Tab might be allowed, but let's test
|
||||
];
|
||||
|
||||
for key in invalid_keys {
|
||||
info!("Testing rejection of control character in key: {:?}", key);
|
||||
|
||||
let result = client
|
||||
.put_object()
|
||||
.bucket(bucket)
|
||||
.key(key)
|
||||
.body(ByteStream::from_static(b"test"))
|
||||
.send()
|
||||
.await;
|
||||
|
||||
// Note: The validation happens on the server side, so we expect an error
|
||||
// For null byte, newline, and carriage return
|
||||
if key.contains('\0') || key.contains('\n') || key.contains('\r') {
|
||||
assert!(result.is_err(), "Control character should be rejected for key: {key:?}");
|
||||
if let Err(e) = result {
|
||||
info!("✅ Control character correctly rejected: {:?}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
env.stop_server();
|
||||
info!("Test completed successfully");
|
||||
}
|
||||
|
||||
/// Test LIST with various special character prefixes
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_list_with_special_char_prefixes() {
|
||||
init_logging();
|
||||
info!("Starting test: LIST with special character prefixes");
|
||||
|
||||
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
|
||||
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
|
||||
|
||||
let client = create_s3_client(&env);
|
||||
let bucket = "test-list-prefixes";
|
||||
|
||||
// Create bucket
|
||||
create_bucket(&client, bucket).await.expect("Failed to create bucket");
|
||||
|
||||
// Create objects with various special characters
|
||||
let test_objects = vec![
|
||||
"prefix with spaces/file1.txt",
|
||||
"prefix with spaces/file2.txt",
|
||||
"prefix+plus/file1.txt",
|
||||
"prefix+plus/file2.txt",
|
||||
"prefix%percent/file1.txt",
|
||||
"prefix%percent/file2.txt",
|
||||
];
|
||||
|
||||
for key in &test_objects {
|
||||
client
|
||||
.put_object()
|
||||
.bucket(bucket)
|
||||
.key(*key)
|
||||
.body(ByteStream::from_static(b"test"))
|
||||
.send()
|
||||
.await
|
||||
.expect("Failed to PUT object");
|
||||
}
|
||||
|
||||
// Test LIST with different prefixes
|
||||
let prefix_tests = vec![
|
||||
("prefix with spaces/", 2),
|
||||
("prefix+plus/", 2),
|
||||
("prefix%percent/", 2),
|
||||
("prefix", 6), // Should match all
|
||||
];
|
||||
|
||||
for (prefix, expected_count) in prefix_tests {
|
||||
info!("Testing LIST with prefix: '{}'", prefix);
|
||||
|
||||
let result = client.list_objects_v2().bucket(bucket).prefix(prefix).send().await;
|
||||
assert!(result.is_ok(), "Failed to LIST with prefix '{}': {:?}", prefix, result.err());
|
||||
|
||||
let output = result.unwrap();
|
||||
let contents = output.contents();
|
||||
assert_eq!(
|
||||
contents.len(),
|
||||
expected_count,
|
||||
"Expected {} objects with prefix '{}', got {}",
|
||||
expected_count,
|
||||
prefix,
|
||||
contents.len()
|
||||
);
|
||||
info!("✅ LIST with prefix '{}' returned {} objects", prefix, contents.len());
|
||||
}
|
||||
|
||||
// Cleanup
|
||||
env.stop_server();
|
||||
info!("Test completed successfully");
|
||||
}
|
||||
|
||||
/// Test delimiter-based listing with special characters
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_list_with_delimiter_and_special_chars() {
|
||||
init_logging();
|
||||
info!("Starting test: LIST with delimiter and special characters");
|
||||
|
||||
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
|
||||
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
|
||||
|
||||
let client = create_s3_client(&env);
|
||||
let bucket = "test-delimiter-special";
|
||||
|
||||
// Create bucket
|
||||
create_bucket(&client, bucket).await.expect("Failed to create bucket");
|
||||
|
||||
// Create hierarchical structure with special characters
|
||||
let test_objects = vec![
|
||||
"folder with spaces/subfolder1/file.txt",
|
||||
"folder with spaces/subfolder2/file.txt",
|
||||
"folder with spaces/file.txt",
|
||||
"folder+plus/subfolder1/file.txt",
|
||||
"folder+plus/file.txt",
|
||||
];
|
||||
|
||||
for key in &test_objects {
|
||||
client
|
||||
.put_object()
|
||||
.bucket(bucket)
|
||||
.key(*key)
|
||||
.body(ByteStream::from_static(b"test"))
|
||||
.send()
|
||||
.await
|
||||
.expect("Failed to PUT object");
|
||||
}
|
||||
|
||||
// Test LIST with delimiter
|
||||
info!("Testing LIST with delimiter for 'folder with spaces/'");
|
||||
let result = client
|
||||
.list_objects_v2()
|
||||
.bucket(bucket)
|
||||
.prefix("folder with spaces/")
|
||||
.delimiter("/")
|
||||
.send()
|
||||
.await;
|
||||
|
||||
assert!(result.is_ok(), "Failed to LIST with delimiter");
|
||||
|
||||
let output = result.unwrap();
|
||||
let common_prefixes = output.common_prefixes();
|
||||
assert_eq!(common_prefixes.len(), 2, "Should have 2 common prefixes (subdirectories)");
|
||||
info!("✅ LIST with delimiter returned {} common prefixes", common_prefixes.len());
|
||||
|
||||
// Cleanup
|
||||
env.stop_server();
|
||||
info!("Test completed successfully");
|
||||
}
|
||||
}
|
||||
@@ -108,17 +108,12 @@ google-cloud-auth = { workspace = true }
|
||||
aws-config = { workspace = true }
|
||||
faster-hex = { workspace = true }
|
||||
|
||||
[target.'cfg(not(windows))'.dependencies]
|
||||
nix = { workspace = true }
|
||||
|
||||
[target.'cfg(windows)'.dependencies]
|
||||
winapi = { workspace = true }
|
||||
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { workspace = true, features = ["rt-multi-thread", "macros"] }
|
||||
criterion = { workspace = true, features = ["html_reports"] }
|
||||
temp-env = { workspace = true }
|
||||
tracing-subscriber = { workspace = true }
|
||||
|
||||
[build-dependencies]
|
||||
shadow-rs = { workspace = true, features = ["build", "metadata"] }
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/rustfs/rustfs/actions/workflows/ci.yml"><img alt="CI" src="https://github.com/rustfs/rustfs/actions/workflows/ci.yml/badge.svg" /></a>
|
||||
<a href="https://docs.rustfs.com/en/">📖 Documentation</a>
|
||||
<a href="https://docs.rustfs.com/">📖 Documentation</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/issues">🐛 Bug Reports</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/discussions">💬 Discussions</a>
|
||||
</p>
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
#!/bin/bash
|
||||
#!/usr/bin/env bash
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
|
||||
@@ -23,7 +23,7 @@ use crate::{
|
||||
};
|
||||
|
||||
use crate::data_usage::load_data_usage_cache;
|
||||
use rustfs_common::{globals::GLOBAL_Local_Node_Name, heal_channel::DriveState};
|
||||
use rustfs_common::{GLOBAL_LOCAL_NODE_NAME, heal_channel::DriveState};
|
||||
use rustfs_madmin::{
|
||||
BackendDisks, Disk, ErasureSetInfo, ITEM_INITIALIZING, ITEM_OFFLINE, ITEM_ONLINE, InfoMessage, ServerProperties,
|
||||
};
|
||||
@@ -128,7 +128,7 @@ async fn is_server_resolvable(endpoint: &Endpoint) -> Result<()> {
|
||||
}
|
||||
|
||||
pub async fn get_local_server_property() -> ServerProperties {
|
||||
let addr = GLOBAL_Local_Node_Name.read().await.clone();
|
||||
let addr = GLOBAL_LOCAL_NODE_NAME.read().await.clone();
|
||||
let mut pool_numbers = HashSet::new();
|
||||
let mut network = HashMap::new();
|
||||
|
||||
|
||||
@@ -283,7 +283,17 @@ impl Lifecycle for BucketLifecycleConfiguration {
|
||||
"eval_inner: object={}, mod_time={:?}, now={:?}, is_latest={}, delete_marker={}",
|
||||
obj.name, obj.mod_time, now, obj.is_latest, obj.delete_marker
|
||||
);
|
||||
if obj.mod_time.expect("err").unix_timestamp() == 0 {
|
||||
|
||||
// Gracefully handle missing mod_time instead of panicking
|
||||
let mod_time = match obj.mod_time {
|
||||
Some(t) => t,
|
||||
None => {
|
||||
info!("eval_inner: mod_time is None for object={}, returning default event", obj.name);
|
||||
return Event::default();
|
||||
}
|
||||
};
|
||||
|
||||
if mod_time.unix_timestamp() == 0 {
|
||||
info!("eval_inner: mod_time is 0, returning default event");
|
||||
return Event::default();
|
||||
}
|
||||
@@ -323,7 +333,7 @@ impl Lifecycle for BucketLifecycleConfiguration {
|
||||
}
|
||||
|
||||
if let Some(days) = expiration.days {
|
||||
let expected_expiry = expected_expiry_time(obj.mod_time.unwrap(), days /*, date*/);
|
||||
let expected_expiry = expected_expiry_time(mod_time, days /*, date*/);
|
||||
if now.unix_timestamp() >= expected_expiry.unix_timestamp() {
|
||||
events.push(Event {
|
||||
action: IlmAction::DeleteVersionAction,
|
||||
@@ -446,11 +456,11 @@ impl Lifecycle for BucketLifecycleConfiguration {
|
||||
});
|
||||
}
|
||||
} else if let Some(days) = expiration.days {
|
||||
let expected_expiry: OffsetDateTime = expected_expiry_time(obj.mod_time.unwrap(), days);
|
||||
let expected_expiry: OffsetDateTime = expected_expiry_time(mod_time, days);
|
||||
info!(
|
||||
"eval_inner: expiration check - days={}, obj_time={:?}, expiry_time={:?}, now={:?}, should_expire={}",
|
||||
days,
|
||||
obj.mod_time.expect("err!"),
|
||||
mod_time,
|
||||
expected_expiry,
|
||||
now,
|
||||
now.unix_timestamp() > expected_expiry.unix_timestamp()
|
||||
|
||||
@@ -22,7 +22,7 @@ pub struct PolicySys {}
|
||||
impl PolicySys {
|
||||
pub async fn is_allowed(args: &BucketPolicyArgs<'_>) -> bool {
|
||||
match Self::get(args.bucket).await {
|
||||
Ok(cfg) => return cfg.is_allowed(args),
|
||||
Ok(cfg) => return cfg.is_allowed(args).await,
|
||||
Err(err) => {
|
||||
if err != StorageError::ConfigNotFound {
|
||||
info!("config get err {:?}", err);
|
||||
|
||||
@@ -16,7 +16,7 @@ use crate::disk::error::DiskError;
|
||||
use crate::disk::{self, DiskAPI, DiskStore, WalkDirOptions};
|
||||
use futures::future::join_all;
|
||||
use rustfs_filemeta::{MetaCacheEntries, MetaCacheEntry, MetacacheReader, is_io_eof};
|
||||
use std::{future::Future, pin::Pin, sync::Arc};
|
||||
use std::{future::Future, pin::Pin};
|
||||
use tokio::spawn;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::{error, info, warn};
|
||||
@@ -71,14 +71,14 @@ pub async fn list_path_raw(rx: CancellationToken, opts: ListPathRawOptions) -> d
|
||||
|
||||
let mut jobs: Vec<tokio::task::JoinHandle<std::result::Result<(), DiskError>>> = Vec::new();
|
||||
let mut readers = Vec::with_capacity(opts.disks.len());
|
||||
let fds = Arc::new(opts.fallback_disks.clone());
|
||||
let fds = opts.fallback_disks.iter().flatten().cloned().collect::<Vec<_>>();
|
||||
|
||||
let cancel_rx = CancellationToken::new();
|
||||
|
||||
for disk in opts.disks.iter() {
|
||||
let opdisk = disk.clone();
|
||||
let opts_clone = opts.clone();
|
||||
let fds_clone = fds.clone();
|
||||
let mut fds_clone = fds.clone();
|
||||
let cancel_rx_clone = cancel_rx.clone();
|
||||
let (rd, mut wr) = tokio::io::duplex(64);
|
||||
readers.push(MetacacheReader::new(rd));
|
||||
@@ -113,21 +113,20 @@ pub async fn list_path_raw(rx: CancellationToken, opts: ListPathRawOptions) -> d
|
||||
}
|
||||
|
||||
while need_fallback {
|
||||
// warn!("list_path_raw: while need_fallback start");
|
||||
let disk = match fds_clone.iter().find(|d| d.is_some()) {
|
||||
Some(d) => {
|
||||
if let Some(disk) = d.clone() {
|
||||
disk
|
||||
} else {
|
||||
warn!("list_path_raw: fallback disk is none");
|
||||
break;
|
||||
}
|
||||
}
|
||||
None => {
|
||||
warn!("list_path_raw: fallback disk is none2");
|
||||
break;
|
||||
let disk_op = {
|
||||
if fds_clone.is_empty() {
|
||||
None
|
||||
} else {
|
||||
let disk = fds_clone.remove(0);
|
||||
if disk.is_online().await { Some(disk.clone()) } else { None }
|
||||
}
|
||||
};
|
||||
|
||||
let Some(disk) = disk_op else {
|
||||
warn!("list_path_raw: fallback disk is none");
|
||||
break;
|
||||
};
|
||||
|
||||
match disk
|
||||
.as_ref()
|
||||
.walk_dir(
|
||||
|
||||
@@ -1,350 +0,0 @@
|
||||
#![allow(clippy::map_entry)]
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#![allow(unused_imports)]
|
||||
#![allow(unused_variables)]
|
||||
#![allow(unused_mut)]
|
||||
#![allow(unused_assignments)]
|
||||
#![allow(unused_must_use)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
use rustfs_checksums::ChecksumAlgorithm;
|
||||
use std::collections::HashMap;
|
||||
|
||||
use crate::client::{api_put_object::PutObjectOptions, api_s3_datatypes::ObjectPart};
|
||||
use crate::{disk::DiskAPI, store_api::GetObjectReader};
|
||||
use rustfs_utils::crypto::{base64_decode, base64_encode};
|
||||
use s3s::header::{
|
||||
X_AMZ_CHECKSUM_ALGORITHM, X_AMZ_CHECKSUM_CRC32, X_AMZ_CHECKSUM_CRC32C, X_AMZ_CHECKSUM_SHA1, X_AMZ_CHECKSUM_SHA256,
|
||||
};
|
||||
|
||||
use enumset::{EnumSet, EnumSetType, enum_set};
|
||||
|
||||
#[derive(Debug, EnumSetType, Default)]
|
||||
#[enumset(repr = "u8")]
|
||||
pub enum ChecksumMode {
|
||||
#[default]
|
||||
ChecksumNone,
|
||||
ChecksumSHA256,
|
||||
ChecksumSHA1,
|
||||
ChecksumCRC32,
|
||||
ChecksumCRC32C,
|
||||
ChecksumCRC64NVME,
|
||||
ChecksumFullObject,
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref C_ChecksumMask: EnumSet<ChecksumMode> = {
|
||||
let mut s = EnumSet::all();
|
||||
s.remove(ChecksumMode::ChecksumFullObject);
|
||||
s
|
||||
};
|
||||
static ref C_ChecksumFullObjectCRC32: EnumSet<ChecksumMode> =
|
||||
enum_set!(ChecksumMode::ChecksumCRC32 | ChecksumMode::ChecksumFullObject);
|
||||
static ref C_ChecksumFullObjectCRC32C: EnumSet<ChecksumMode> =
|
||||
enum_set!(ChecksumMode::ChecksumCRC32C | ChecksumMode::ChecksumFullObject);
|
||||
}
|
||||
const AMZ_CHECKSUM_CRC64NVME: &str = "x-amz-checksum-crc64nvme";
|
||||
|
||||
impl ChecksumMode {
|
||||
//pub const CRC64_NVME_POLYNOMIAL: i64 = 0xad93d23594c93659;
|
||||
|
||||
pub fn base(&self) -> ChecksumMode {
|
||||
let s = EnumSet::from(*self).intersection(*C_ChecksumMask);
|
||||
match s.as_u8() {
|
||||
1_u8 => ChecksumMode::ChecksumNone,
|
||||
2_u8 => ChecksumMode::ChecksumSHA256,
|
||||
4_u8 => ChecksumMode::ChecksumSHA1,
|
||||
8_u8 => ChecksumMode::ChecksumCRC32,
|
||||
16_u8 => ChecksumMode::ChecksumCRC32C,
|
||||
32_u8 => ChecksumMode::ChecksumCRC64NVME,
|
||||
_ => panic!("enum err."),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is(&self, t: ChecksumMode) -> bool {
|
||||
*self & t == t
|
||||
}
|
||||
|
||||
pub fn key(&self) -> String {
|
||||
//match c & checksumMask {
|
||||
match self {
|
||||
ChecksumMode::ChecksumCRC32 => {
|
||||
return X_AMZ_CHECKSUM_CRC32.to_string();
|
||||
}
|
||||
ChecksumMode::ChecksumCRC32C => {
|
||||
return X_AMZ_CHECKSUM_CRC32C.to_string();
|
||||
}
|
||||
ChecksumMode::ChecksumSHA1 => {
|
||||
return X_AMZ_CHECKSUM_SHA1.to_string();
|
||||
}
|
||||
ChecksumMode::ChecksumSHA256 => {
|
||||
return X_AMZ_CHECKSUM_SHA256.to_string();
|
||||
}
|
||||
ChecksumMode::ChecksumCRC64NVME => {
|
||||
return AMZ_CHECKSUM_CRC64NVME.to_string();
|
||||
}
|
||||
_ => {
|
||||
return "".to_string();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub fn can_composite(&self) -> bool {
|
||||
let s = EnumSet::from(*self).intersection(*C_ChecksumMask);
|
||||
match s.as_u8() {
|
||||
2_u8 => true,
|
||||
4_u8 => true,
|
||||
8_u8 => true,
|
||||
16_u8 => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn can_merge_crc(&self) -> bool {
|
||||
let s = EnumSet::from(*self).intersection(*C_ChecksumMask);
|
||||
match s.as_u8() {
|
||||
8_u8 => true,
|
||||
16_u8 => true,
|
||||
32_u8 => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn full_object_requested(&self) -> bool {
|
||||
let s = EnumSet::from(*self).intersection(*C_ChecksumMask);
|
||||
match s.as_u8() {
|
||||
//C_ChecksumFullObjectCRC32 as u8 => true,
|
||||
//C_ChecksumFullObjectCRC32C as u8 => true,
|
||||
32_u8 => true,
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn key_capitalized(&self) -> String {
|
||||
self.key()
|
||||
}
|
||||
|
||||
pub fn raw_byte_len(&self) -> usize {
|
||||
let u = EnumSet::from(*self).intersection(*C_ChecksumMask).as_u8();
|
||||
if u == ChecksumMode::ChecksumCRC32 as u8 || u == ChecksumMode::ChecksumCRC32C as u8 {
|
||||
4
|
||||
} else if u == ChecksumMode::ChecksumSHA1 as u8 {
|
||||
use sha1::Digest;
|
||||
sha1::Sha1::output_size() as usize
|
||||
} else if u == ChecksumMode::ChecksumSHA256 as u8 {
|
||||
use sha2::Digest;
|
||||
sha2::Sha256::output_size() as usize
|
||||
} else if u == ChecksumMode::ChecksumCRC64NVME as u8 {
|
||||
8
|
||||
} else {
|
||||
0
|
||||
}
|
||||
}
|
||||
|
||||
pub fn hasher(&self) -> Result<Box<dyn rustfs_checksums::http::HttpChecksum>, std::io::Error> {
|
||||
match /*C_ChecksumMask & **/self {
|
||||
ChecksumMode::ChecksumCRC32 => {
|
||||
return Ok(ChecksumAlgorithm::Crc32.into_impl());
|
||||
}
|
||||
ChecksumMode::ChecksumCRC32C => {
|
||||
return Ok(ChecksumAlgorithm::Crc32c.into_impl());
|
||||
}
|
||||
ChecksumMode::ChecksumSHA1 => {
|
||||
return Ok(ChecksumAlgorithm::Sha1.into_impl());
|
||||
}
|
||||
ChecksumMode::ChecksumSHA256 => {
|
||||
return Ok(ChecksumAlgorithm::Sha256.into_impl());
|
||||
}
|
||||
ChecksumMode::ChecksumCRC64NVME => {
|
||||
return Ok(ChecksumAlgorithm::Crc64Nvme.into_impl());
|
||||
}
|
||||
_ => return Err(std::io::Error::other("unsupported checksum type")),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn is_set(&self) -> bool {
|
||||
let s = EnumSet::from(*self).intersection(*C_ChecksumMask);
|
||||
s.len() == 1
|
||||
}
|
||||
|
||||
pub fn set_default(&mut self, t: ChecksumMode) {
|
||||
if !self.is_set() {
|
||||
*self = t;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn encode_to_string(&self, b: &[u8]) -> Result<String, std::io::Error> {
|
||||
if !self.is_set() {
|
||||
return Ok("".to_string());
|
||||
}
|
||||
let mut h = self.hasher()?;
|
||||
h.update(b);
|
||||
let hash = h.finalize();
|
||||
Ok(base64_encode(hash.as_ref()))
|
||||
}
|
||||
|
||||
pub fn to_string(&self) -> String {
|
||||
//match c & checksumMask {
|
||||
match self {
|
||||
ChecksumMode::ChecksumCRC32 => {
|
||||
return "CRC32".to_string();
|
||||
}
|
||||
ChecksumMode::ChecksumCRC32C => {
|
||||
return "CRC32C".to_string();
|
||||
}
|
||||
ChecksumMode::ChecksumSHA1 => {
|
||||
return "SHA1".to_string();
|
||||
}
|
||||
ChecksumMode::ChecksumSHA256 => {
|
||||
return "SHA256".to_string();
|
||||
}
|
||||
ChecksumMode::ChecksumNone => {
|
||||
return "".to_string();
|
||||
}
|
||||
ChecksumMode::ChecksumCRC64NVME => {
|
||||
return "CRC64NVME".to_string();
|
||||
}
|
||||
_ => {
|
||||
return "<invalid>".to_string();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// pub fn check_sum_reader(&self, r: GetObjectReader) -> Result<Checksum, std::io::Error> {
|
||||
// let mut h = self.hasher()?;
|
||||
// Ok(Checksum::new(self.clone(), h.sum().as_bytes()))
|
||||
// }
|
||||
|
||||
// pub fn check_sum_bytes(&self, b: &[u8]) -> Result<Checksum, std::io::Error> {
|
||||
// let mut h = self.hasher()?;
|
||||
// Ok(Checksum::new(self.clone(), h.sum().as_bytes()))
|
||||
// }
|
||||
|
||||
pub fn composite_checksum(&self, p: &mut [ObjectPart]) -> Result<Checksum, std::io::Error> {
|
||||
if !self.can_composite() {
|
||||
return Err(std::io::Error::other("cannot do composite checksum"));
|
||||
}
|
||||
p.sort_by(|i, j| {
|
||||
if i.part_num < j.part_num {
|
||||
std::cmp::Ordering::Less
|
||||
} else if i.part_num > j.part_num {
|
||||
std::cmp::Ordering::Greater
|
||||
} else {
|
||||
std::cmp::Ordering::Equal
|
||||
}
|
||||
});
|
||||
let c = self.base();
|
||||
let crc_bytes = Vec::<u8>::with_capacity(p.len() * self.raw_byte_len() as usize);
|
||||
let mut h = self.hasher()?;
|
||||
h.update(crc_bytes.as_ref());
|
||||
let hash = h.finalize();
|
||||
Ok(Checksum {
|
||||
checksum_type: self.clone(),
|
||||
r: hash.as_ref().to_vec(),
|
||||
computed: false,
|
||||
})
|
||||
}
|
||||
|
||||
pub fn full_object_checksum(&self, p: &mut [ObjectPart]) -> Result<Checksum, std::io::Error> {
|
||||
todo!();
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct Checksum {
|
||||
checksum_type: ChecksumMode,
|
||||
r: Vec<u8>,
|
||||
computed: bool,
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
impl Checksum {
|
||||
fn new(t: ChecksumMode, b: &[u8]) -> Checksum {
|
||||
if t.is_set() && b.len() == t.raw_byte_len() {
|
||||
return Checksum {
|
||||
checksum_type: t,
|
||||
r: b.to_vec(),
|
||||
computed: false,
|
||||
};
|
||||
}
|
||||
Checksum::default()
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn new_checksum_string(t: ChecksumMode, s: &str) -> Result<Checksum, std::io::Error> {
|
||||
let b = match base64_decode(s.as_bytes()) {
|
||||
Ok(b) => b,
|
||||
Err(err) => return Err(std::io::Error::other(err.to_string())),
|
||||
};
|
||||
if t.is_set() && b.len() == t.raw_byte_len() {
|
||||
return Ok(Checksum {
|
||||
checksum_type: t,
|
||||
r: b,
|
||||
computed: false,
|
||||
});
|
||||
}
|
||||
Ok(Checksum::default())
|
||||
}
|
||||
|
||||
fn is_set(&self) -> bool {
|
||||
self.checksum_type.is_set() && self.r.len() == self.checksum_type.raw_byte_len()
|
||||
}
|
||||
|
||||
fn encoded(&self) -> String {
|
||||
if !self.is_set() {
|
||||
return "".to_string();
|
||||
}
|
||||
base64_encode(&self.r)
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn raw(&self) -> Option<Vec<u8>> {
|
||||
if !self.is_set() {
|
||||
return None;
|
||||
}
|
||||
Some(self.r.clone())
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_auto_checksum_headers(opts: &mut PutObjectOptions) {
|
||||
opts.user_metadata
|
||||
.insert("X-Amz-Checksum-Algorithm".to_string(), opts.auto_checksum.to_string());
|
||||
if opts.auto_checksum.full_object_requested() {
|
||||
opts.user_metadata
|
||||
.insert("X-Amz-Checksum-Type".to_string(), "FULL_OBJECT".to_string());
|
||||
}
|
||||
}
|
||||
|
||||
pub fn apply_auto_checksum(opts: &mut PutObjectOptions, all_parts: &mut [ObjectPart]) -> Result<(), std::io::Error> {
|
||||
if opts.auto_checksum.can_composite() && !opts.auto_checksum.is(ChecksumMode::ChecksumFullObject) {
|
||||
let crc = opts.auto_checksum.composite_checksum(all_parts)?;
|
||||
opts.user_metadata = {
|
||||
let mut hm = HashMap::new();
|
||||
hm.insert(opts.auto_checksum.key(), crc.encoded());
|
||||
hm
|
||||
}
|
||||
} else if opts.auto_checksum.can_merge_crc() {
|
||||
let crc = opts.auto_checksum.full_object_checksum(all_parts)?;
|
||||
opts.user_metadata = {
|
||||
let mut hm = HashMap::new();
|
||||
hm.insert(opts.auto_checksum.key_capitalized(), crc.encoded());
|
||||
hm.insert("X-Amz-Checksum-Type".to_string(), "FULL_OBJECT".to_string());
|
||||
hm
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,270 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// use crate::error::StdError;
|
||||
// use bytes::Bytes;
|
||||
// use futures::pin_mut;
|
||||
// use futures::stream::{Stream, StreamExt};
|
||||
// use std::future::Future;
|
||||
// use std::pin::Pin;
|
||||
// use std::task::{Context, Poll};
|
||||
// use transform_stream::AsyncTryStream;
|
||||
|
||||
// pub type SyncBoxFuture<'a, T> = Pin<Box<dyn Future<Output = T> + Send + Sync + 'a>>;
|
||||
|
||||
// pub struct ChunkedStream<'a> {
|
||||
// /// inner
|
||||
// inner: AsyncTryStream<Bytes, StdError, SyncBoxFuture<'a, Result<(), StdError>>>,
|
||||
|
||||
// remaining_length: usize,
|
||||
// }
|
||||
|
||||
// impl<'a> ChunkedStream<'a> {
|
||||
// pub fn new<S>(body: S, content_length: usize, chunk_size: usize, need_padding: bool) -> Self
|
||||
// where
|
||||
// S: Stream<Item = Result<Bytes, StdError>> + Send + Sync + 'a,
|
||||
// {
|
||||
// let inner = AsyncTryStream::<_, _, SyncBoxFuture<'a, Result<(), StdError>>>::new(|mut y| {
|
||||
// #[allow(clippy::shadow_same)] // necessary for `pin_mut!`
|
||||
// Box::pin(async move {
|
||||
// pin_mut!(body);
|
||||
// // Data left over from the previous call
|
||||
// let mut prev_bytes = Bytes::new();
|
||||
// let mut read_size = 0;
|
||||
|
||||
// loop {
|
||||
// let data: Vec<Bytes> = {
|
||||
// // Read a fixed-size chunk
|
||||
// match Self::read_data(body.as_mut(), prev_bytes, chunk_size).await {
|
||||
// None => break,
|
||||
// Some(Err(e)) => return Err(e),
|
||||
// Some(Ok((data, remaining_bytes))) => {
|
||||
// // debug!(
|
||||
// // "content_length:{},read_size:{}, read_data data:{}, remaining_bytes: {} ",
|
||||
// // content_length,
|
||||
// // read_size,
|
||||
// // data.len(),
|
||||
// // remaining_bytes.len()
|
||||
// // );
|
||||
|
||||
// prev_bytes = remaining_bytes;
|
||||
// data
|
||||
// }
|
||||
// }
|
||||
// };
|
||||
|
||||
// for bytes in data {
|
||||
// read_size += bytes.len();
|
||||
// // debug!("read_size {}, content_length {}", read_size, content_length,);
|
||||
// y.yield_ok(bytes).await;
|
||||
// }
|
||||
|
||||
// if read_size + prev_bytes.len() >= content_length {
|
||||
// // debug!(
|
||||
// // "Finished reading: read_size:{} + prev_bytes.len({}) == content_length {}",
|
||||
// // read_size,
|
||||
// // prev_bytes.len(),
|
||||
// // content_length,
|
||||
// // );
|
||||
|
||||
// // Pad with zeros?
|
||||
// if !need_padding {
|
||||
// y.yield_ok(prev_bytes).await;
|
||||
// break;
|
||||
// }
|
||||
|
||||
// let mut bytes = vec![0u8; chunk_size];
|
||||
// let (left, _) = bytes.split_at_mut(prev_bytes.len());
|
||||
// left.copy_from_slice(&prev_bytes);
|
||||
|
||||
// y.yield_ok(Bytes::from(bytes)).await;
|
||||
|
||||
// break;
|
||||
// }
|
||||
// }
|
||||
|
||||
// // debug!("chunked stream exit");
|
||||
|
||||
// Ok(())
|
||||
// })
|
||||
// });
|
||||
// Self {
|
||||
// inner,
|
||||
// remaining_length: content_length,
|
||||
// }
|
||||
// }
|
||||
// /// read data and return remaining bytes
|
||||
// async fn read_data<S>(
|
||||
// mut body: Pin<&mut S>,
|
||||
// prev_bytes: Bytes,
|
||||
// data_size: usize,
|
||||
// ) -> Option<Result<(Vec<Bytes>, Bytes), StdError>>
|
||||
// where
|
||||
// S: Stream<Item = Result<Bytes, StdError>> + Send,
|
||||
// {
|
||||
// let mut bytes_buffer = Vec::new();
|
||||
|
||||
// // Run only once
|
||||
// let mut push_data_bytes = |mut bytes: Bytes| {
|
||||
// // debug!("read from body {} split per {}, prev_bytes: {}", bytes.len(), data_size, prev_bytes.len());
|
||||
|
||||
// if bytes.is_empty() {
|
||||
// return None;
|
||||
// }
|
||||
|
||||
// if data_size == 0 {
|
||||
// return Some(bytes);
|
||||
// }
|
||||
|
||||
// // Merge with the previous data
|
||||
// if !prev_bytes.is_empty() {
|
||||
// let need_size = data_size.wrapping_sub(prev_bytes.len());
|
||||
// // debug!(
|
||||
// // "Previous leftover {}, take {} now, total: {}",
|
||||
// // prev_bytes.len(),
|
||||
// // need_size,
|
||||
// // prev_bytes.len() + need_size
|
||||
// // );
|
||||
// if bytes.len() >= need_size {
|
||||
// let data = bytes.split_to(need_size);
|
||||
// let mut combined = Vec::new();
|
||||
// combined.extend_from_slice(&prev_bytes);
|
||||
// combined.extend_from_slice(&data);
|
||||
|
||||
// // debug!(
|
||||
// // "Fetched more bytes than needed: {}, merged result {}, remaining bytes {}",
|
||||
// // need_size,
|
||||
// // combined.len(),
|
||||
// // bytes.len(),
|
||||
// // );
|
||||
|
||||
// bytes_buffer.push(Bytes::from(combined));
|
||||
// } else {
|
||||
// let mut combined = Vec::new();
|
||||
// combined.extend_from_slice(&prev_bytes);
|
||||
// combined.extend_from_slice(&bytes);
|
||||
|
||||
// // debug!(
|
||||
// // "Fetched fewer bytes than needed: {}, merged result {}, remaining bytes {}, return immediately",
|
||||
// // need_size,
|
||||
// // combined.len(),
|
||||
// // bytes.len(),
|
||||
// // );
|
||||
|
||||
// return Some(Bytes::from(combined));
|
||||
// }
|
||||
// }
|
||||
|
||||
// // If the fetched data exceeds the chunk, slice the required size
|
||||
// if data_size <= bytes.len() {
|
||||
// let n = bytes.len() / data_size;
|
||||
|
||||
// for _ in 0..n {
|
||||
// let data = bytes.split_to(data_size);
|
||||
|
||||
// // println!("bytes_buffer.push: {}, remaining: {}", data.len(), bytes.len());
|
||||
// bytes_buffer.push(data);
|
||||
// }
|
||||
|
||||
// Some(bytes)
|
||||
// } else {
|
||||
// // Insufficient data
|
||||
// Some(bytes)
|
||||
// }
|
||||
// };
|
||||
|
||||
// // Remaining data
|
||||
// let remaining_bytes = 'outer: {
|
||||
// // // Exit if the previous data was sufficient
|
||||
// // if let Some(remaining_bytes) = push_data_bytes(prev_bytes) {
|
||||
// // println!("Consuming leftovers");
|
||||
// // break 'outer remaining_bytes;
|
||||
// // }
|
||||
|
||||
// loop {
|
||||
// match body.next().await? {
|
||||
// Err(e) => return Some(Err(e)),
|
||||
// Ok(bytes) => {
|
||||
// if let Some(remaining_bytes) = push_data_bytes(bytes) {
|
||||
// break 'outer remaining_bytes;
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
// };
|
||||
|
||||
// Some(Ok((bytes_buffer, remaining_bytes)))
|
||||
// }
|
||||
|
||||
// fn poll(mut self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Result<Bytes, StdError>>> {
|
||||
// let ans = Pin::new(&mut self.inner).poll_next(cx);
|
||||
// if let Poll::Ready(Some(Ok(ref bytes))) = ans {
|
||||
// self.remaining_length = self.remaining_length.saturating_sub(bytes.len());
|
||||
// }
|
||||
// ans
|
||||
// }
|
||||
|
||||
// // pub fn exact_remaining_length(&self) -> usize {
|
||||
// // self.remaining_length
|
||||
// // }
|
||||
// }
|
||||
|
||||
// impl Stream for ChunkedStream<'_> {
|
||||
// type Item = Result<Bytes, StdError>;
|
||||
|
||||
// fn poll_next(self: Pin<&mut Self>, cx: &mut Context<'_>) -> Poll<Option<Self::Item>> {
|
||||
// self.poll(cx)
|
||||
// }
|
||||
|
||||
// fn size_hint(&self) -> (usize, Option<usize>) {
|
||||
// (0, None)
|
||||
// }
|
||||
// }
|
||||
|
||||
// #[cfg(test)]
|
||||
// mod test {
|
||||
|
||||
// use super::*;
|
||||
|
||||
// #[tokio::test]
|
||||
// async fn test_chunked_stream() {
|
||||
// let chunk_size = 4;
|
||||
|
||||
// let data1 = vec![1u8; 7777]; // 65536
|
||||
// let data2 = vec![1u8; 7777]; // 65536
|
||||
|
||||
// let content_length = data1.len() + data2.len();
|
||||
|
||||
// let chunk1 = Bytes::from(data1);
|
||||
// let chunk2 = Bytes::from(data2);
|
||||
|
||||
// let chunk_results: Vec<Result<Bytes, _>> = vec![Ok(chunk1), Ok(chunk2)];
|
||||
|
||||
// let stream = futures::stream::iter(chunk_results);
|
||||
|
||||
// let mut chunked_stream = ChunkedStream::new(stream, content_length, chunk_size, true);
|
||||
|
||||
// loop {
|
||||
// let ans1 = chunked_stream.next().await;
|
||||
// if ans1.is_none() {
|
||||
// break;
|
||||
// }
|
||||
|
||||
// let bytes = ans1.unwrap().unwrap();
|
||||
// assert!(bytes.len() == chunk_size)
|
||||
// }
|
||||
|
||||
// // assert_eq!(ans1.unwrap(), chunk1_data.as_slice());
|
||||
// }
|
||||
// }
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user