mirror of
https://github.com/rustfs/rustfs.git
synced 2026-01-17 09:40:32 +00:00
Compare commits
125 Commits
1.0.0-alph
...
feat/scan
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
00787cbce4 | ||
|
|
3ac004510a | ||
|
|
d8f8bfa5b7 | ||
|
|
1768e7bbdb | ||
|
|
20961d7c91 | ||
|
|
8de8172833 | ||
|
|
3326737c01 | ||
|
|
7c98c62d60 | ||
|
|
91770ffd1b | ||
|
|
15c75b9d36 | ||
|
|
7940b69bf8 | ||
|
|
af650716da | ||
|
|
427d31d09c | ||
|
|
dbdcecb9c5 | ||
|
|
552e95e368 | ||
|
|
619cc69512 | ||
|
|
76d25d9a20 | ||
|
|
834025d9e3 | ||
|
|
e2d8e9e3d3 | ||
|
|
ad34f1b031 | ||
|
|
cd6a26bc3a | ||
|
|
5f256249f4 | ||
|
|
b10d80cbb6 | ||
|
|
7c6cbaf837 | ||
|
|
72930b1e30 | ||
|
|
6ca8945ca7 | ||
|
|
0d0edc22be | ||
|
|
030d3c9426 | ||
|
|
b8b905be86 | ||
|
|
ace58fea0d | ||
|
|
3a79242133 | ||
|
|
63d846ed14 | ||
|
|
3a79fcfe73 | ||
|
|
2a5ccd2211 | ||
|
|
c43166c4c6 | ||
|
|
b3c80ae362 | ||
|
|
3fd003b21d | ||
|
|
1d3f622922 | ||
|
|
e31b4303ed | ||
|
|
5b0a3a0764 | ||
|
|
a8b7b28fd0 | ||
|
|
e355d3db80 | ||
|
|
4d7bf98c82 | ||
|
|
699164e05e | ||
|
|
d35ceac441 | ||
|
|
93982227ac | ||
|
|
fdcdb30d28 | ||
|
|
a6cf0740cb | ||
|
|
a2e3a719d3 | ||
|
|
76efee37fa | ||
|
|
fd7c0964a0 | ||
|
|
701960dd81 | ||
|
|
ee04cc77a0 | ||
|
|
069194f553 | ||
|
|
fce4e64da4 | ||
|
|
44bdebe6e9 | ||
|
|
2b268fdd7f | ||
|
|
18cd9a8b46 | ||
|
|
e14809ee04 | ||
|
|
390d051ddd | ||
|
|
b73ca0220e | ||
|
|
b4071d493c | ||
|
|
277d80de13 | ||
|
|
9b9bbb662b | ||
|
|
44f3f3d070 | ||
|
|
a13ce08590 | ||
|
|
55d44622ed | ||
|
|
6da5766ea2 | ||
|
|
85bc0ce2d5 | ||
|
|
601f3456bc | ||
|
|
1279baa72b | ||
|
|
acdefb6703 | ||
|
|
b7964081ce | ||
|
|
f73fa59bf6 | ||
|
|
0b1b7832fe | ||
|
|
c242957c6f | ||
|
|
55e3a1f7e0 | ||
|
|
3cf565e847 | ||
|
|
9d553620cf | ||
|
|
51584986e1 | ||
|
|
93090adf7c | ||
|
|
d4817a4bea | ||
|
|
7e1a9e2ede | ||
|
|
8a020ec4d9 | ||
|
|
77a3489ed2 | ||
|
|
5941062909 | ||
|
|
98be7df0f5 | ||
|
|
b26aad4129 | ||
|
|
5989589c3e | ||
|
|
4716454faa | ||
|
|
29056a767a | ||
|
|
e823922654 | ||
|
|
8203f9ff6f | ||
|
|
1b22a1e078 | ||
|
|
461d5dff86 | ||
|
|
38f26b7c94 | ||
|
|
eb7eb9c5a1 | ||
|
|
d934e3905b | ||
|
|
6617372b33 | ||
|
|
769778e565 | ||
|
|
a7f5c4af46 | ||
|
|
a9d5fbac54 | ||
|
|
281e68c9bf | ||
|
|
d30c42f85a | ||
|
|
79012be2c8 | ||
|
|
325ff62684 | ||
|
|
f0c2ede7a7 | ||
|
|
b9fd66c1cd | ||
|
|
c43b11fb92 | ||
|
|
d737a439d5 | ||
|
|
0714c7a9ca | ||
|
|
2ceb65adb4 | ||
|
|
dd47fcf2a8 | ||
|
|
64ba52bc1e | ||
|
|
d2ced233e5 | ||
|
|
40660e7b80 | ||
|
|
2aca1f77af | ||
|
|
6f3d2885cd | ||
|
|
6ab7619023 | ||
|
|
ed73e2b782 | ||
|
|
6a59c0a474 | ||
|
|
c5264f9703 | ||
|
|
b47765b4c0 | ||
|
|
e22b24684f | ||
|
|
1d069fd351 |
@@ -16,7 +16,7 @@ services:
|
||||
|
||||
tempo-init:
|
||||
image: busybox:latest
|
||||
command: ["sh", "-c", "chown -R 10001:10001 /var/tempo"]
|
||||
command: [ "sh", "-c", "chown -R 10001:10001 /var/tempo" ]
|
||||
volumes:
|
||||
- ./tempo-data:/var/tempo
|
||||
user: root
|
||||
@@ -34,73 +34,145 @@ services:
|
||||
ports:
|
||||
- "3200:3200" # tempo
|
||||
- "24317:4317" # otlp grpc
|
||||
- "24318:4318" # otlp http
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- otel-network
|
||||
healthcheck:
|
||||
test: [ "CMD", "wget", "--spider", "-q", "http://localhost:3200/metrics" ]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 15s
|
||||
|
||||
otel-collector:
|
||||
image: otel/opentelemetry-collector-contrib:0.129.1
|
||||
image: otel/opentelemetry-collector-contrib:latest
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otelcol-contrib/config.yaml
|
||||
- ./otel-collector-config.yaml:/etc/otelcol-contrib/config.yaml:ro
|
||||
ports:
|
||||
- "1888:1888"
|
||||
- "8888:8888"
|
||||
- "8889:8889"
|
||||
- "13133:13133"
|
||||
- "4317:4317"
|
||||
- "4318:4318"
|
||||
- "55679:55679"
|
||||
- "1888:1888" # pprof
|
||||
- "8888:8888" # Prometheus metrics for Collector
|
||||
- "8889:8889" # Prometheus metrics for application indicators
|
||||
- "13133:13133" # health check
|
||||
- "4317:4317" # OTLP gRPC
|
||||
- "4318:4318" # OTLP HTTP
|
||||
- "55679:55679" # zpages
|
||||
networks:
|
||||
- otel-network
|
||||
depends_on:
|
||||
jaeger:
|
||||
condition: service_started
|
||||
tempo:
|
||||
condition: service_started
|
||||
prometheus:
|
||||
condition: service_started
|
||||
loki:
|
||||
condition: service_started
|
||||
healthcheck:
|
||||
test: [ "CMD", "wget", "--spider", "-q", "http://localhost:13133" ]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
|
||||
jaeger:
|
||||
image: jaegertracing/jaeger:2.8.0
|
||||
image: jaegertracing/jaeger:latest
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
- SPAN_STORAGE_TYPE=memory
|
||||
- COLLECTOR_OTLP_ENABLED=true
|
||||
ports:
|
||||
- "16686:16686"
|
||||
- "14317:4317"
|
||||
- "14318:4318"
|
||||
- "16686:16686" # Web UI
|
||||
- "14317:4317" # OTLP gRPC
|
||||
- "14318:4318" # OTLP HTTP
|
||||
- "18888:8888" # collector
|
||||
networks:
|
||||
- otel-network
|
||||
healthcheck:
|
||||
test: [ "CMD", "wget", "--spider", "-q", "http://localhost:16686" ]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
prometheus:
|
||||
image: prom/prometheus:v3.4.2
|
||||
image: prom/prometheus:latest
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
volumes:
|
||||
- ./prometheus.yml:/etc/prometheus/prometheus.yml
|
||||
- ./prometheus.yml:/etc/prometheus/prometheus.yml:ro
|
||||
- ./prometheus-data:/prometheus
|
||||
ports:
|
||||
- "9090:9090"
|
||||
command:
|
||||
- '--config.file=/etc/prometheus/prometheus.yml'
|
||||
- '--web.enable-otlp-receiver' # Enable OTLP
|
||||
- '--web.enable-remote-write-receiver' # Enable remote write
|
||||
- '--enable-feature=promql-experimental-functions' # Enable info()
|
||||
- '--storage.tsdb.min-block-duration=15m' # Minimum block duration
|
||||
- '--storage.tsdb.max-block-duration=1h' # Maximum block duration
|
||||
- '--log.level=info'
|
||||
- '--storage.tsdb.retention.time=30d'
|
||||
- '--storage.tsdb.path=/prometheus'
|
||||
- '--web.console.libraries=/usr/share/prometheus/console_libraries'
|
||||
- '--web.console.templates=/usr/share/prometheus/consoles'
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- otel-network
|
||||
healthcheck:
|
||||
test: [ "CMD", "wget", "--spider", "-q", "http://localhost:9090/-/healthy" ]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
loki:
|
||||
image: grafana/loki:3.5.1
|
||||
image: grafana/loki:latest
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
volumes:
|
||||
- ./loki-config.yaml:/etc/loki/local-config.yaml
|
||||
- ./loki-config.yaml:/etc/loki/local-config.yaml:ro
|
||||
ports:
|
||||
- "3100:3100"
|
||||
command: -config.file=/etc/loki/local-config.yaml
|
||||
networks:
|
||||
- otel-network
|
||||
healthcheck:
|
||||
test: [ "CMD", "wget", "--spider", "-q", "http://localhost:3100/ready" ]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
grafana:
|
||||
image: grafana/grafana:12.0.2
|
||||
image: grafana/grafana:latest
|
||||
ports:
|
||||
- "3000:3000" # Web UI
|
||||
volumes:
|
||||
- ./grafana-datasources.yaml:/etc/grafana/provisioning/datasources/datasources.yaml
|
||||
environment:
|
||||
- GF_SECURITY_ADMIN_PASSWORD=admin
|
||||
- GF_SECURITY_ADMIN_USER=admin
|
||||
- TZ=Asia/Shanghai
|
||||
- GF_INSTALL_PLUGINS=grafana-pyroscope-datasource
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- otel-network
|
||||
depends_on:
|
||||
- prometheus
|
||||
- tempo
|
||||
- loki
|
||||
healthcheck:
|
||||
test: [ "CMD", "wget", "--spider", "-q", "http://localhost:3000/api/health" ]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
|
||||
volumes:
|
||||
prometheus-data:
|
||||
tempo-data:
|
||||
|
||||
networks:
|
||||
otel-network:
|
||||
driver: bridge
|
||||
name: "network_otel_config"
|
||||
ipam:
|
||||
config:
|
||||
- subnet: 172.28.0.0/16
|
||||
driver_opts:
|
||||
com.docker.network.enable_ipv6: "true"
|
||||
|
||||
@@ -29,4 +29,80 @@ datasources:
|
||||
serviceMap:
|
||||
datasourceUid: prometheus
|
||||
streamingEnabled:
|
||||
search: true
|
||||
search: true
|
||||
tracesToLogsV2:
|
||||
# Field with an internal link pointing to a logs data source in Grafana.
|
||||
# datasourceUid value must match the uid value of the logs data source.
|
||||
datasourceUid: 'loki'
|
||||
spanStartTimeShift: '-1h'
|
||||
spanEndTimeShift: '1h'
|
||||
tags: [ 'job', 'instance', 'pod', 'namespace' ]
|
||||
filterByTraceID: false
|
||||
filterBySpanID: false
|
||||
customQuery: true
|
||||
query: 'method="$${__span.tags.method}"'
|
||||
tracesToMetrics:
|
||||
datasourceUid: 'prometheus'
|
||||
spanStartTimeShift: '-1h'
|
||||
spanEndTimeShift: '1h'
|
||||
tags: [ { key: 'service.name', value: 'service' }, { key: 'job' } ]
|
||||
queries:
|
||||
- name: 'Sample query'
|
||||
query: 'sum(rate(traces_spanmetrics_latency_bucket{$$__tags}[5m]))'
|
||||
tracesToProfiles:
|
||||
datasourceUid: 'grafana-pyroscope-datasource'
|
||||
tags: [ 'job', 'instance', 'pod', 'namespace' ]
|
||||
profileTypeId: 'process_cpu:cpu:nanoseconds:cpu:nanoseconds'
|
||||
customQuery: true
|
||||
query: 'method="$${__span.tags.method}"'
|
||||
serviceMap:
|
||||
datasourceUid: 'prometheus'
|
||||
nodeGraph:
|
||||
enabled: true
|
||||
search:
|
||||
hide: false
|
||||
traceQuery:
|
||||
timeShiftEnabled: true
|
||||
spanStartTimeShift: '-1h'
|
||||
spanEndTimeShift: '1h'
|
||||
spanBar:
|
||||
type: 'Tag'
|
||||
tag: 'http.path'
|
||||
streamingEnabled:
|
||||
search: true
|
||||
- name: Jaeger
|
||||
type: jaeger
|
||||
uid: Jaeger
|
||||
url: http://jaeger:16686
|
||||
basicAuth: false
|
||||
access: proxy
|
||||
readOnly: false
|
||||
isDefault: false
|
||||
jsonData:
|
||||
tracesToLogsV2:
|
||||
# Field with an internal link pointing to a logs data source in Grafana.
|
||||
# datasourceUid value must match the uid value of the logs data source.
|
||||
datasourceUid: 'loki'
|
||||
spanStartTimeShift: '1h'
|
||||
spanEndTimeShift: '-1h'
|
||||
tags: [ 'job', 'instance', 'pod', 'namespace' ]
|
||||
filterByTraceID: false
|
||||
filterBySpanID: false
|
||||
customQuery: true
|
||||
query: 'method="$${__span.tags.method}"'
|
||||
tracesToMetrics:
|
||||
datasourceUid: 'Prometheus'
|
||||
spanStartTimeShift: '1h'
|
||||
spanEndTimeShift: '-1h'
|
||||
tags: [ { key: 'service.name', value: 'service' }, { key: 'job' } ]
|
||||
queries:
|
||||
- name: 'Sample query'
|
||||
query: 'sum(rate(traces_spanmetrics_latency_bucket{$$__tags}[5m]))'
|
||||
nodeGraph:
|
||||
enabled: true
|
||||
traceQuery:
|
||||
timeShiftEnabled: true
|
||||
spanStartTimeShift: '1h'
|
||||
spanEndTimeShift: '-1h'
|
||||
spanBar:
|
||||
type: 'None'
|
||||
@@ -65,6 +65,7 @@ extensions:
|
||||
some_store:
|
||||
memory:
|
||||
max_traces: 1000000
|
||||
max_events: 100000
|
||||
another_store:
|
||||
memory:
|
||||
max_traces: 1000000
|
||||
@@ -102,6 +103,7 @@ receivers:
|
||||
|
||||
processors:
|
||||
batch:
|
||||
metadata_keys: [ "span.kind", "http.method", "http.status_code", "db.system", "db.statement", "messaging.system", "messaging.destination", "messaging.operation","span.events","span.links" ]
|
||||
# Adaptive Sampling Processor is required to support adaptive sampling.
|
||||
# It expects remote_sampling extension with `adaptive:` config to be enabled.
|
||||
adaptive_sampling:
|
||||
|
||||
@@ -41,6 +41,9 @@ query_range:
|
||||
|
||||
limits_config:
|
||||
metric_aggregation_enabled: true
|
||||
max_line_size: 256KB
|
||||
max_line_size_truncate: false
|
||||
allow_structured_metadata: true
|
||||
|
||||
schema_config:
|
||||
configs:
|
||||
@@ -51,6 +54,7 @@ schema_config:
|
||||
index:
|
||||
prefix: index_
|
||||
period: 24h
|
||||
row_shards: 16
|
||||
|
||||
pattern_ingester:
|
||||
enabled: true
|
||||
@@ -63,6 +67,7 @@ ruler:
|
||||
frontend:
|
||||
encoding: protobuf
|
||||
|
||||
|
||||
# By default, Loki will send anonymous, but uniquely-identifiable usage and configuration
|
||||
# analytics to Grafana Labs. These statistics are sent to https://stats.grafana.org/
|
||||
#
|
||||
|
||||
@@ -15,67 +15,108 @@
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc: # OTLP gRPC 接收器
|
||||
grpc: # OTLP gRPC receiver
|
||||
endpoint: 0.0.0.0:4317
|
||||
http: # OTLP HTTP 接收器
|
||||
http: # OTLP HTTP receiver
|
||||
endpoint: 0.0.0.0:4318
|
||||
|
||||
processors:
|
||||
batch: # 批处理处理器,提升吞吐量
|
||||
batch: # Batch processor to improve throughput
|
||||
timeout: 5s
|
||||
send_batch_size: 1000
|
||||
metadata_keys: [ ]
|
||||
metadata_cardinality_limit: 1000
|
||||
memory_limiter:
|
||||
check_interval: 1s
|
||||
limit_mib: 512
|
||||
transform/logs:
|
||||
log_statements:
|
||||
- context: log
|
||||
statements:
|
||||
# Extract Body as attribute "message"
|
||||
- set(attributes["message"], body.string)
|
||||
# Retain the original Body
|
||||
- set(attributes["log.body"], body.string)
|
||||
|
||||
exporters:
|
||||
otlp/traces: # OTLP 导出器,用于跟踪数据
|
||||
endpoint: "jaeger:4317" # Jaeger 的 OTLP gRPC 端点
|
||||
otlp/traces: # OTLP exporter for trace data
|
||||
endpoint: "http://jaeger:4317" # OTLP gRPC endpoint for Jaeger
|
||||
tls:
|
||||
insecure: true # 开发环境禁用 TLS,生产环境需配置证书
|
||||
otlp/tempo: # OTLP 导出器,用于跟踪数据
|
||||
endpoint: "tempo:4317" # tempo 的 OTLP gRPC 端点
|
||||
insecure: true # TLS is disabled in the development environment and a certificate needs to be configured in the production environment.
|
||||
compression: gzip # Enable compression to reduce network bandwidth
|
||||
retry_on_failure:
|
||||
enabled: true # Enable retry on failure
|
||||
initial_interval: 1s # Initial interval for retry
|
||||
max_interval: 30s # Maximum interval for retry
|
||||
max_elapsed_time: 300s # Maximum elapsed time for retry
|
||||
sending_queue:
|
||||
enabled: true # Enable sending queue
|
||||
num_consumers: 10 # Number of consumers
|
||||
queue_size: 5000 # Queue size
|
||||
otlp/tempo: # OTLP exporter for trace data
|
||||
endpoint: "http://tempo:4317" # OTLP gRPC endpoint for tempo
|
||||
tls:
|
||||
insecure: true # 开发环境禁用 TLS,生产环境需配置证书
|
||||
prometheus: # Prometheus 导出器,用于指标数据
|
||||
endpoint: "0.0.0.0:8889" # Prometheus 刮取端点
|
||||
namespace: "rustfs" # 指标前缀
|
||||
send_timestamps: true # 发送时间戳
|
||||
# enable_open_metrics: true
|
||||
otlphttp/loki: # Loki 导出器,用于日志数据
|
||||
# endpoint: "http://loki:3100/otlp/v1/logs"
|
||||
endpoint: "http://loki:3100/otlp/v1/logs"
|
||||
insecure: true # TLS is disabled in the development environment and a certificate needs to be configured in the production environment.
|
||||
compression: gzip # Enable compression to reduce network bandwidth
|
||||
retry_on_failure:
|
||||
enabled: true # Enable retry on failure
|
||||
initial_interval: 1s # Initial interval for retry
|
||||
max_interval: 30s # Maximum interval for retry
|
||||
max_elapsed_time: 300s # Maximum elapsed time for retry
|
||||
sending_queue:
|
||||
enabled: true # Enable sending queue
|
||||
num_consumers: 10 # Number of consumers
|
||||
queue_size: 5000 # Queue size
|
||||
prometheus: # Prometheus exporter for metrics data
|
||||
endpoint: "0.0.0.0:8889" # Prometheus scraping endpoint
|
||||
namespace: "metrics" # indicator prefix
|
||||
send_timestamps: true # Send timestamp
|
||||
metric_expiration: 5m # Metric expiration time
|
||||
resource_to_telemetry_conversion:
|
||||
enabled: true # Enable resource to telemetry conversion
|
||||
otlphttp/loki: # Loki exporter for log data
|
||||
endpoint: "http://loki:3100/otlp"
|
||||
tls:
|
||||
insecure: true
|
||||
compression: gzip # Enable compression to reduce network bandwidth
|
||||
extensions:
|
||||
health_check:
|
||||
endpoint: 0.0.0.0:13133
|
||||
pprof:
|
||||
endpoint: 0.0.0.0:1888
|
||||
zpages:
|
||||
endpoint: 0.0.0.0:55679
|
||||
service:
|
||||
extensions: [ health_check, pprof, zpages ] # 启用扩展
|
||||
extensions: [ health_check, pprof, zpages ] # Enable extension
|
||||
pipelines:
|
||||
traces:
|
||||
receivers: [ otlp ]
|
||||
processors: [ memory_limiter,batch ]
|
||||
exporters: [ otlp/traces,otlp/tempo ]
|
||||
processors: [ memory_limiter, batch ]
|
||||
exporters: [ otlp/traces, otlp/tempo ]
|
||||
metrics:
|
||||
receivers: [ otlp ]
|
||||
processors: [ batch ]
|
||||
exporters: [ prometheus ]
|
||||
logs:
|
||||
receivers: [ otlp ]
|
||||
processors: [ batch ]
|
||||
processors: [ batch, transform/logs ]
|
||||
exporters: [ otlphttp/loki ]
|
||||
telemetry:
|
||||
logs:
|
||||
level: "info" # Collector 日志级别
|
||||
level: "debug" # Collector log level
|
||||
encoding: "json" # Log encoding: console or json
|
||||
metrics:
|
||||
level: "detailed" # 可以是 basic, normal, detailed
|
||||
level: "detailed" # Can be basic, normal, detailed
|
||||
readers:
|
||||
- periodic:
|
||||
exporter:
|
||||
otlp:
|
||||
protocol: http/protobuf
|
||||
endpoint: http://otel-collector:4318
|
||||
- pull:
|
||||
exporter:
|
||||
prometheus:
|
||||
host: '0.0.0.0'
|
||||
port: 8888
|
||||
|
||||
|
||||
|
||||
1
.docker/observability/prometheus-data/.gitignore
vendored
Normal file
1
.docker/observability/prometheus-data/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
*
|
||||
@@ -13,16 +13,53 @@
|
||||
# limitations under the License.
|
||||
|
||||
global:
|
||||
scrape_interval: 5s # 刮取间隔
|
||||
scrape_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
|
||||
evaluation_interval: 15s
|
||||
external_labels:
|
||||
cluster: 'rustfs-dev' # Label to identify the cluster
|
||||
relica: '1' # Replica identifier
|
||||
|
||||
scrape_configs:
|
||||
- job_name: 'otel-collector'
|
||||
- job_name: 'otel-collector-internal'
|
||||
static_configs:
|
||||
- targets: [ 'otel-collector:8888' ] # 从 Collector 刮取指标
|
||||
- job_name: 'otel-metrics'
|
||||
- targets: [ 'otel-collector:8888' ] # Scrape metrics from Collector
|
||||
scrape_interval: 10s
|
||||
- job_name: 'rustfs-app-metrics'
|
||||
static_configs:
|
||||
- targets: [ 'otel-collector:8889' ] # 应用指标
|
||||
- targets: [ 'otel-collector:8889' ] # Application indicators
|
||||
scrape_interval: 15s
|
||||
metric_relabel_configs:
|
||||
- job_name: 'tempo'
|
||||
static_configs:
|
||||
- targets: [ 'tempo:3200' ]
|
||||
|
||||
- targets: [ 'tempo:3200' ] # Scrape metrics from Tempo
|
||||
- job_name: 'jaeger'
|
||||
static_configs:
|
||||
- targets: [ 'jaeger:8888' ] # Jaeger admin port
|
||||
|
||||
otlp:
|
||||
# Recommended attributes to be promoted to labels.
|
||||
promote_resource_attributes:
|
||||
- service.instance.id
|
||||
- service.name
|
||||
- service.namespace
|
||||
- cloud.availability_zone
|
||||
- cloud.region
|
||||
- container.name
|
||||
- deployment.environment.name
|
||||
- k8s.cluster.name
|
||||
- k8s.container.name
|
||||
- k8s.cronjob.name
|
||||
- k8s.daemonset.name
|
||||
- k8s.deployment.name
|
||||
- k8s.job.name
|
||||
- k8s.namespace.name
|
||||
- k8s.pod.name
|
||||
- k8s.replicaset.name
|
||||
- k8s.statefulset.name
|
||||
# Ingest OTLP data keeping all characters in metric/label names.
|
||||
translation_strategy: NoUTF8EscapingWithSuffixes
|
||||
|
||||
storage:
|
||||
# OTLP is a push-based protocol, Out of order samples is a common scenario.
|
||||
tsdb:
|
||||
out_of_order_time_window: 30m
|
||||
@@ -18,7 +18,9 @@ distributor:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: "tempo:4317"
|
||||
endpoint: "0.0.0.0:4317"
|
||||
http:
|
||||
endpoint: "0.0.0.0:4318"
|
||||
|
||||
ingester:
|
||||
max_block_duration: 5m # cut the headblock when this much time passes. this is being set for demo purposes and should probably be left alone normally
|
||||
|
||||
9
.github/actions/setup/action.yml
vendored
9
.github/actions/setup/action.yml
vendored
@@ -52,24 +52,19 @@ runs:
|
||||
sudo apt-get install -y \
|
||||
musl-tools \
|
||||
build-essential \
|
||||
lld \
|
||||
libdbus-1-dev \
|
||||
libwayland-dev \
|
||||
libwebkit2gtk-4.1-dev \
|
||||
libxdo-dev \
|
||||
pkg-config \
|
||||
libssl-dev
|
||||
|
||||
- name: Install protoc
|
||||
uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
version: "31.1"
|
||||
version: "33.1"
|
||||
repo-token: ${{ inputs.github-token }}
|
||||
|
||||
- name: Install flatc
|
||||
uses: Nugine/setup-flatc@v1
|
||||
with:
|
||||
version: "25.2.10"
|
||||
version: "25.9.23"
|
||||
|
||||
- name: Install Rust toolchain
|
||||
uses: dtolnay/rust-toolchain@stable
|
||||
|
||||
12
.github/dependabot.yml
vendored
12
.github/dependabot.yml
vendored
@@ -22,8 +22,18 @@ updates:
|
||||
- package-ecosystem: "cargo" # See documentation for possible values
|
||||
directory: "/" # Location of package manifests
|
||||
schedule:
|
||||
interval: "monthly"
|
||||
interval: "weekly"
|
||||
day: "monday"
|
||||
timezone: "Asia/Shanghai"
|
||||
time: "08:00"
|
||||
groups:
|
||||
s3s:
|
||||
update-types:
|
||||
- "minor"
|
||||
- "patch"
|
||||
patterns:
|
||||
- "s3s"
|
||||
- "s3s-*"
|
||||
dependencies:
|
||||
patterns:
|
||||
- "*"
|
||||
|
||||
2
.github/workflows/ci.yml
vendored
2
.github/workflows/ci.yml
vendored
@@ -103,6 +103,8 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: Delete huge unnecessary tools folder
|
||||
run: rm -rf /opt/hostedtoolcache
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
|
||||
|
||||
78
.github/workflows/helm-package.yml
vendored
Normal file
78
.github/workflows/helm-package.yml
vendored
Normal file
@@ -0,0 +1,78 @@
|
||||
name: Publish helm chart to artifacthub
|
||||
|
||||
on:
|
||||
workflow_run:
|
||||
workflows: ["Build and Release"]
|
||||
types: [completed]
|
||||
|
||||
env:
|
||||
new_version: ${{ github.event.workflow_run.head_branch }}
|
||||
|
||||
jobs:
|
||||
build-helm-package:
|
||||
runs-on: ubuntu-latest
|
||||
# Only run on successful builds triggered by tag pushes (version format: x.y.z or x.y.z-suffix)
|
||||
if: |
|
||||
github.event.workflow_run.conclusion == 'success' &&
|
||||
github.event.workflow_run.event == 'push' &&
|
||||
contains(github.event.workflow_run.head_branch, '.')
|
||||
|
||||
steps:
|
||||
- name: Checkout helm chart repo
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Replace chart appversion
|
||||
run: |
|
||||
set -e
|
||||
set -x
|
||||
old_version=$(grep "^appVersion:" helm/rustfs/Chart.yaml | awk '{print $2}')
|
||||
sed -i "s/$old_version/$new_version/g" helm/rustfs/Chart.yaml
|
||||
sed -i "/^image:/,/^[^ ]/ s/tag:.*/tag: "$new_version"/" helm/rustfs/values.yaml
|
||||
|
||||
- name: Set up Helm
|
||||
uses: azure/setup-helm@v4.3.0
|
||||
|
||||
- name: Package Helm Chart
|
||||
run: |
|
||||
cp helm/README.md helm/rustfs/
|
||||
package_version=$(echo $new_version | awk -F '-' '{print $2}' | awk -F '.' '{print $NF}')
|
||||
helm package ./helm/rustfs --destination helm/rustfs/ --version "0.0.$package_version"
|
||||
|
||||
- name: Upload helm package as artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: helm-package
|
||||
path: helm/rustfs/*.tgz
|
||||
retention-days: 1
|
||||
|
||||
publish-helm-package:
|
||||
runs-on: ubuntu-latest
|
||||
needs: [build-helm-package]
|
||||
|
||||
steps:
|
||||
- name: Checkout helm package repo
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
repository: rustfs/helm
|
||||
token: ${{ secrets.RUSTFS_HELM_PACKAGE }}
|
||||
|
||||
- name: Download helm package
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: helm-package
|
||||
path: ./
|
||||
|
||||
- name: Set up helm
|
||||
uses: azure/setup-helm@v4.3.0
|
||||
|
||||
- name: Generate index
|
||||
run: helm repo index . --url https://charts.rustfs.com
|
||||
|
||||
- name: Push helm package and index file
|
||||
run: |
|
||||
git config --global user.name "${{ secrets.USERNAME }}"
|
||||
git config --global user.email "${{ secrets.EMAIL_ADDRESS }}"
|
||||
git status .
|
||||
git add .
|
||||
git commit -m "Update rustfs helm package with $new_version."
|
||||
git push origin main
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -22,4 +22,5 @@ profile.json
|
||||
.secrets
|
||||
*.go
|
||||
*.pb
|
||||
*.svg
|
||||
*.svg
|
||||
deploy/logs/*.log.*
|
||||
702
.rules.md
Normal file
702
.rules.md
Normal file
@@ -0,0 +1,702 @@
|
||||
# RustFS Project AI Coding Rules
|
||||
|
||||
## 🚨🚨🚨 CRITICAL DEVELOPMENT RULES - ZERO TOLERANCE 🚨🚨🚨
|
||||
|
||||
### ⛔️ ABSOLUTE PROHIBITION: NEVER COMMIT DIRECTLY TO MASTER/MAIN BRANCH ⛔️
|
||||
|
||||
**🔥 THIS IS THE MOST CRITICAL RULE - VIOLATION WILL RESULT IN IMMEDIATE REVERSAL 🔥**
|
||||
|
||||
- **🚫 ZERO DIRECT COMMITS TO MAIN/MASTER BRANCH - ABSOLUTELY FORBIDDEN**
|
||||
- **🚫 ANY DIRECT COMMIT TO MAIN BRANCH MUST BE IMMEDIATELY REVERTED**
|
||||
- **🚫 NO EXCEPTIONS FOR HOTFIXES, EMERGENCIES, OR URGENT CHANGES**
|
||||
- **🚫 NO EXCEPTIONS FOR SMALL CHANGES, TYPOS, OR DOCUMENTATION UPDATES**
|
||||
- **🚫 NO EXCEPTIONS FOR ANYONE - MAINTAINERS, CONTRIBUTORS, OR ADMINS**
|
||||
|
||||
### 📋 MANDATORY WORKFLOW - STRICTLY ENFORCED
|
||||
|
||||
**EVERY SINGLE CHANGE MUST FOLLOW THIS WORKFLOW:**
|
||||
|
||||
1. **Check current branch**: `git branch` (MUST NOT be on main/master)
|
||||
2. **Switch to main**: `git checkout main`
|
||||
3. **Pull latest**: `git pull origin main`
|
||||
4. **Create feature branch**: `git checkout -b feat/your-feature-name`
|
||||
5. **Make changes ONLY on feature branch**
|
||||
6. **Test thoroughly before committing**
|
||||
7. **Commit and push to feature branch**: `git push origin feat/your-feature-name`
|
||||
8. **Create Pull Request**: Use `gh pr create` (MANDATORY)
|
||||
9. **Wait for PR approval**: NO self-merging allowed
|
||||
10. **Merge through GitHub interface**: ONLY after approval
|
||||
|
||||
### 🔒 ENFORCEMENT MECHANISMS
|
||||
|
||||
- **Branch protection rules**: Main branch is protected
|
||||
- **Pre-commit hooks**: Will block direct commits to main
|
||||
- **CI/CD checks**: All PRs must pass before merging
|
||||
- **Code review requirement**: At least one approval needed
|
||||
- **Automated reversal**: Direct commits to main will be automatically reverted
|
||||
|
||||
## 🎯 Core AI Development Principles
|
||||
|
||||
### Five Execution Steps
|
||||
|
||||
#### 1. Task Analysis and Planning
|
||||
- **Clear Objectives**: Deeply understand task requirements and expected results before starting coding
|
||||
- **Plan Development**: List specific files, components, and functions that need modification, explaining the reasons for changes
|
||||
- **Risk Assessment**: Evaluate the impact of changes on existing functionality, develop rollback plans
|
||||
|
||||
#### 2. Precise Code Location
|
||||
- **File Identification**: Determine specific files and line numbers that need modification
|
||||
- **Impact Analysis**: Avoid modifying irrelevant files, clearly state the reason for each file modification
|
||||
- **Minimization Principle**: Unless explicitly required by the task, do not create new abstraction layers or refactor existing code
|
||||
|
||||
#### 3. Minimal Code Changes
|
||||
- **Focus on Core**: Only write code directly required by the task
|
||||
- **Avoid Redundancy**: Do not add unnecessary logs, comments, tests, or error handling
|
||||
- **Isolation**: Ensure new code does not interfere with existing functionality, maintain code independence
|
||||
|
||||
#### 4. Strict Code Review
|
||||
- **Correctness Check**: Verify the correctness and completeness of code logic
|
||||
- **Style Consistency**: Ensure code conforms to established project coding style
|
||||
- **Side Effect Assessment**: Evaluate the impact of changes on downstream systems
|
||||
|
||||
#### 5. Clear Delivery Documentation
|
||||
- **Change Summary**: Detailed explanation of all modifications and reasons
|
||||
- **File List**: List all modified files and their specific changes
|
||||
- **Risk Statement**: Mark any assumptions or potential risk points
|
||||
|
||||
### Core Principles
|
||||
- **🎯 Precise Execution**: Strictly follow task requirements, no arbitrary innovation
|
||||
- **⚡ Efficient Development**: Avoid over-design, only do necessary work
|
||||
- **🛡️ Safe and Reliable**: Always follow development processes, ensure code quality and system stability
|
||||
- **🔒 Cautious Modification**: Only modify when clearly knowing what needs to be changed and having confidence
|
||||
|
||||
### Additional AI Behavior Rules
|
||||
|
||||
1. **Use English for all code comments and documentation** - All comments, variable names, function names, documentation, and user-facing text in code should be in English
|
||||
2. **Clean up temporary scripts after use** - Any temporary scripts, test files, or helper files created during AI work should be removed after task completion
|
||||
3. **Only make confident modifications** - Do not make speculative changes or "convenient" modifications outside the task scope. If uncertain about a change, ask for clarification rather than guessing
|
||||
|
||||
## Project Overview
|
||||
|
||||
RustFS is a high-performance distributed object storage system written in Rust, compatible with S3 API. The project adopts a modular architecture, supporting erasure coding storage, multi-tenant management, observability, and other enterprise-level features.
|
||||
|
||||
## Core Architecture Principles
|
||||
|
||||
### 1. Modular Design
|
||||
|
||||
- Project uses Cargo workspace structure, containing multiple independent crates
|
||||
- Core modules: `rustfs` (main service), `ecstore` (erasure coding storage), `common` (shared components)
|
||||
- Functional modules: `iam` (identity management), `madmin` (management interface), `crypto` (encryption), etc.
|
||||
- Tool modules: `cli` (command line tool), `crates/*` (utility libraries)
|
||||
|
||||
### 2. Asynchronous Programming Pattern
|
||||
|
||||
- Comprehensive use of `tokio` async runtime
|
||||
- Prioritize `async/await` syntax
|
||||
- Use `async-trait` for async methods in traits
|
||||
- Avoid blocking operations, use `spawn_blocking` when necessary
|
||||
|
||||
### 3. Error Handling Strategy
|
||||
|
||||
- **Use modular, type-safe error handling with `thiserror`**
|
||||
- Each module should define its own error type using `thiserror::Error` derive macro
|
||||
- Support error chains and context information through `#[from]` and `#[source]` attributes
|
||||
- Use `Result<T>` type aliases for consistency within each module
|
||||
- Error conversion between modules should use explicit `From` implementations
|
||||
- Follow the pattern: `pub type Result<T> = core::result::Result<T, Error>`
|
||||
- Use `#[error("description")]` attributes for clear error messages
|
||||
- Support error downcasting when needed through `other()` helper methods
|
||||
- Implement `Clone` for errors when required by the domain logic
|
||||
|
||||
## Code Style Guidelines
|
||||
|
||||
### 1. Formatting Configuration
|
||||
|
||||
```toml
|
||||
max_width = 130
|
||||
fn_call_width = 90
|
||||
single_line_let_else_max_width = 100
|
||||
```
|
||||
|
||||
### 2. **🔧 MANDATORY Code Formatting Rules**
|
||||
|
||||
**CRITICAL**: All code must be properly formatted before committing. This project enforces strict formatting standards to maintain code consistency and readability.
|
||||
|
||||
#### Pre-commit Requirements (MANDATORY)
|
||||
|
||||
Before every commit, you **MUST**:
|
||||
|
||||
1. **Format your code**:
|
||||
```bash
|
||||
cargo fmt --all
|
||||
```
|
||||
|
||||
2. **Verify formatting**:
|
||||
```bash
|
||||
cargo fmt --all --check
|
||||
```
|
||||
|
||||
3. **Pass clippy checks**:
|
||||
```bash
|
||||
cargo clippy --all-targets --all-features -- -D warnings
|
||||
```
|
||||
|
||||
4. **Ensure compilation**:
|
||||
```bash
|
||||
cargo check --all-targets
|
||||
```
|
||||
|
||||
#### Quick Commands
|
||||
|
||||
Use these convenient Makefile targets for common tasks:
|
||||
|
||||
```bash
|
||||
# Format all code
|
||||
make fmt
|
||||
|
||||
# Check if code is properly formatted
|
||||
make fmt-check
|
||||
|
||||
# Run clippy checks
|
||||
make clippy
|
||||
|
||||
# Run compilation check
|
||||
make check
|
||||
|
||||
# Run tests
|
||||
make test
|
||||
|
||||
# Run all pre-commit checks (format + clippy + check + test)
|
||||
make pre-commit
|
||||
|
||||
# Setup git hooks (one-time setup)
|
||||
make setup-hooks
|
||||
```
|
||||
|
||||
### 3. Naming Conventions
|
||||
|
||||
- Use `snake_case` for functions, variables, modules
|
||||
- Use `PascalCase` for types, traits, enums
|
||||
- Constants use `SCREAMING_SNAKE_CASE`
|
||||
- Global variables prefix `GLOBAL_`, e.g., `GLOBAL_Endpoints`
|
||||
- Use meaningful and descriptive names for variables, functions, and methods
|
||||
- Avoid meaningless names like `temp`, `data`, `foo`, `bar`, `test123`
|
||||
- Choose names that clearly express the purpose and intent
|
||||
|
||||
### 4. Type Declaration Guidelines
|
||||
|
||||
- **Prefer type inference over explicit type declarations** when the type is obvious from context
|
||||
- Let the Rust compiler infer types whenever possible to reduce verbosity and improve maintainability
|
||||
- Only specify types explicitly when:
|
||||
- The type cannot be inferred by the compiler
|
||||
- Explicit typing improves code clarity and readability
|
||||
- Required for API boundaries (function signatures, public struct fields)
|
||||
- Needed to resolve ambiguity between multiple possible types
|
||||
|
||||
### 5. Documentation Comments
|
||||
|
||||
- Public APIs must have documentation comments
|
||||
- Use `///` for documentation comments
|
||||
- Complex functions add `# Examples` and `# Parameters` descriptions
|
||||
- Error cases use `# Errors` descriptions
|
||||
- Always use English for all comments and documentation
|
||||
- Avoid meaningless comments like "debug 111" or placeholder text
|
||||
|
||||
### 6. Import Guidelines
|
||||
|
||||
- Standard library imports first
|
||||
- Third-party crate imports in the middle
|
||||
- Project internal imports last
|
||||
- Group `use` statements with blank lines between groups
|
||||
|
||||
## Asynchronous Programming Guidelines
|
||||
|
||||
### 1. Trait Definition
|
||||
|
||||
```rust
|
||||
#[async_trait::async_trait]
|
||||
pub trait StorageAPI: Send + Sync {
|
||||
async fn get_object(&self, bucket: &str, object: &str) -> Result<ObjectInfo>;
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Error Handling
|
||||
|
||||
```rust
|
||||
// Use ? operator to propagate errors
|
||||
async fn example_function() -> Result<()> {
|
||||
let data = read_file("path").await?;
|
||||
process_data(data).await?;
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Concurrency Control
|
||||
|
||||
- Use `Arc` and `Mutex`/`RwLock` for shared state management
|
||||
- Prioritize async locks from `tokio::sync`
|
||||
- Avoid holding locks for long periods
|
||||
|
||||
## Logging and Tracing Guidelines
|
||||
|
||||
### 1. Tracing Usage
|
||||
|
||||
```rust
|
||||
#[tracing::instrument(skip(self, data))]
|
||||
async fn process_data(&self, data: &[u8]) -> Result<()> {
|
||||
info!("Processing {} bytes", data.len());
|
||||
// Implementation logic
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Log Levels
|
||||
|
||||
- `error!`: System errors requiring immediate attention
|
||||
- `warn!`: Warning information that may affect functionality
|
||||
- `info!`: Important business information
|
||||
- `debug!`: Debug information for development use
|
||||
- `trace!`: Detailed execution paths
|
||||
|
||||
### 3. Structured Logging
|
||||
|
||||
```rust
|
||||
info!(
|
||||
counter.rustfs_api_requests_total = 1_u64,
|
||||
key_request_method = %request.method(),
|
||||
key_request_uri_path = %request.uri().path(),
|
||||
"API request processed"
|
||||
);
|
||||
```
|
||||
|
||||
## Error Handling Guidelines
|
||||
|
||||
### 1. Error Type Definition
|
||||
|
||||
```rust
|
||||
// Use thiserror for module-specific error types
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum MyError {
|
||||
#[error("IO error: {0}")]
|
||||
Io(#[from] std::io::Error),
|
||||
|
||||
#[error("Storage error: {0}")]
|
||||
Storage(#[from] ecstore::error::StorageError),
|
||||
|
||||
#[error("Custom error: {message}")]
|
||||
Custom { message: String },
|
||||
|
||||
#[error("File not found: {path}")]
|
||||
FileNotFound { path: String },
|
||||
|
||||
#[error("Invalid configuration: {0}")]
|
||||
InvalidConfig(String),
|
||||
}
|
||||
|
||||
// Provide Result type alias for the module
|
||||
pub type Result<T> = core::result::Result<T, MyError>;
|
||||
```
|
||||
|
||||
### 2. Error Helper Methods
|
||||
|
||||
```rust
|
||||
impl MyError {
|
||||
/// Create error from any compatible error type
|
||||
pub fn other<E>(error: E) -> Self
|
||||
where
|
||||
E: Into<Box<dyn std::error::Error + Send + Sync>>,
|
||||
{
|
||||
MyError::Io(std::io::Error::other(error))
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Error Context and Propagation
|
||||
|
||||
```rust
|
||||
// Use ? operator for clean error propagation
|
||||
async fn example_function() -> Result<()> {
|
||||
let data = read_file("path").await?;
|
||||
process_data(data).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Add context to errors
|
||||
fn process_with_context(path: &str) -> Result<()> {
|
||||
std::fs::read(path)
|
||||
.map_err(|e| MyError::Custom {
|
||||
message: format!("Failed to read {}: {}", path, e)
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
## Performance Optimization Guidelines
|
||||
|
||||
### 1. Memory Management
|
||||
|
||||
- Use `Bytes` instead of `Vec<u8>` for zero-copy operations
|
||||
- Avoid unnecessary cloning, use reference passing
|
||||
- Use `Arc` for sharing large objects
|
||||
|
||||
### 2. Concurrency Optimization
|
||||
|
||||
```rust
|
||||
// Use join_all for concurrent operations
|
||||
let futures = disks.iter().map(|disk| disk.operation());
|
||||
let results = join_all(futures).await;
|
||||
```
|
||||
|
||||
### 3. Caching Strategy
|
||||
|
||||
- Use `LazyLock` for global caching
|
||||
- Implement LRU cache to avoid memory leaks
|
||||
|
||||
## Testing Guidelines
|
||||
|
||||
### 1. Unit Tests
|
||||
|
||||
```rust
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use test_case::test_case;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_async_function() {
|
||||
let result = async_function().await;
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[test_case("input1", "expected1")]
|
||||
#[test_case("input2", "expected2")]
|
||||
fn test_with_cases(input: &str, expected: &str) {
|
||||
assert_eq!(function(input), expected);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Integration Tests
|
||||
|
||||
- Use `e2e_test` module for end-to-end testing
|
||||
- Simulate real storage environments
|
||||
|
||||
### 3. Test Quality Standards
|
||||
|
||||
- Write meaningful test cases that verify actual functionality
|
||||
- Avoid placeholder or debug content like "debug 111", "test test", etc.
|
||||
- Use descriptive test names that clearly indicate what is being tested
|
||||
- Each test should have a clear purpose and verify specific behavior
|
||||
- Test data should be realistic and representative of actual use cases
|
||||
|
||||
## Cross-Platform Compatibility Guidelines
|
||||
|
||||
### 1. CPU Architecture Compatibility
|
||||
|
||||
- **Always consider multi-platform and different CPU architecture compatibility** when writing code
|
||||
- Support major architectures: x86_64, aarch64 (ARM64), and other target platforms
|
||||
- Use conditional compilation for architecture-specific code:
|
||||
|
||||
```rust
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
fn optimized_x86_64_function() { /* x86_64 specific implementation */ }
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
fn optimized_aarch64_function() { /* ARM64 specific implementation */ }
|
||||
|
||||
#[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
|
||||
fn generic_function() { /* Generic fallback implementation */ }
|
||||
```
|
||||
|
||||
### 2. Platform-Specific Dependencies
|
||||
|
||||
- Use feature flags for platform-specific dependencies
|
||||
- Provide fallback implementations for unsupported platforms
|
||||
- Test on multiple architectures in CI/CD pipeline
|
||||
|
||||
### 3. Endianness Considerations
|
||||
|
||||
- Use explicit byte order conversion when dealing with binary data
|
||||
- Prefer `to_le_bytes()`, `from_le_bytes()` for consistent little-endian format
|
||||
- Use `byteorder` crate for complex binary format handling
|
||||
|
||||
### 4. SIMD and Performance Optimizations
|
||||
|
||||
- Use portable SIMD libraries like `wide` or `packed_simd`
|
||||
- Provide fallback implementations for non-SIMD architectures
|
||||
- Use runtime feature detection when appropriate
|
||||
|
||||
## Security Guidelines
|
||||
|
||||
### 1. Memory Safety
|
||||
|
||||
- Disable `unsafe` code (workspace.lints.rust.unsafe_code = "deny")
|
||||
- Use `rustls` instead of `openssl`
|
||||
|
||||
### 2. Authentication and Authorization
|
||||
|
||||
```rust
|
||||
// Use IAM system for permission checks
|
||||
let identity = iam.authenticate(&access_key, &secret_key).await?;
|
||||
iam.authorize(&identity, &action, &resource).await?;
|
||||
```
|
||||
|
||||
## Configuration Management Guidelines
|
||||
|
||||
### 1. Environment Variables
|
||||
|
||||
- Use `RUSTFS_` prefix
|
||||
- Support both configuration files and environment variables
|
||||
- Provide reasonable default values
|
||||
|
||||
### 2. Configuration Structure
|
||||
|
||||
```rust
|
||||
#[derive(Debug, Deserialize, Clone)]
|
||||
pub struct Config {
|
||||
pub address: String,
|
||||
pub volumes: String,
|
||||
#[serde(default)]
|
||||
pub console_enable: bool,
|
||||
}
|
||||
```
|
||||
|
||||
## Dependency Management Guidelines
|
||||
|
||||
### 1. Workspace Dependencies
|
||||
|
||||
- Manage versions uniformly at workspace level
|
||||
- Use `workspace = true` to inherit configuration
|
||||
|
||||
### 2. Feature Flags
|
||||
|
||||
```rust
|
||||
[features]
|
||||
default = ["file"]
|
||||
gpu = ["dep:nvml-wrapper"]
|
||||
kafka = ["dep:rdkafka"]
|
||||
```
|
||||
|
||||
## Deployment and Operations Guidelines
|
||||
|
||||
### 1. Containerization
|
||||
|
||||
- Provide Dockerfile and docker-compose configuration
|
||||
- Support multi-stage builds to optimize image size
|
||||
|
||||
### 2. Observability
|
||||
|
||||
- Integrate OpenTelemetry for distributed tracing
|
||||
- Support Prometheus metrics collection
|
||||
- Provide Grafana dashboards
|
||||
|
||||
### 3. Health Checks
|
||||
|
||||
```rust
|
||||
// Implement health check endpoint
|
||||
async fn health_check() -> Result<HealthStatus> {
|
||||
// Check component status
|
||||
}
|
||||
```
|
||||
|
||||
## Code Review Checklist
|
||||
|
||||
### 1. **Code Formatting and Quality (MANDATORY)**
|
||||
|
||||
- [ ] **Code is properly formatted** (`cargo fmt --all --check` passes)
|
||||
- [ ] **All clippy warnings are resolved** (`cargo clippy --all-targets --all-features -- -D warnings` passes)
|
||||
- [ ] **Code compiles successfully** (`cargo check --all-targets` passes)
|
||||
- [ ] **Pre-commit hooks are working** and all checks pass
|
||||
- [ ] **No formatting-related changes** mixed with functional changes (separate commits)
|
||||
|
||||
### 2. Functionality
|
||||
|
||||
- [ ] Are all error cases properly handled?
|
||||
- [ ] Is there appropriate logging?
|
||||
- [ ] Is there necessary test coverage?
|
||||
|
||||
### 3. Performance
|
||||
|
||||
- [ ] Are unnecessary memory allocations avoided?
|
||||
- [ ] Are async operations used correctly?
|
||||
- [ ] Are there potential deadlock risks?
|
||||
|
||||
### 4. Security
|
||||
|
||||
- [ ] Are input parameters properly validated?
|
||||
- [ ] Are there appropriate permission checks?
|
||||
- [ ] Is information leakage avoided?
|
||||
|
||||
### 5. Cross-Platform Compatibility
|
||||
|
||||
- [ ] Does the code work on different CPU architectures (x86_64, aarch64)?
|
||||
- [ ] Are platform-specific features properly gated with conditional compilation?
|
||||
- [ ] Is byte order handling correct for binary data?
|
||||
- [ ] Are there appropriate fallback implementations for unsupported platforms?
|
||||
|
||||
### 6. Code Commits and Documentation
|
||||
|
||||
- [ ] Does it comply with [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/)?
|
||||
- [ ] Are commit messages concise and under 72 characters for the title line?
|
||||
- [ ] Commit titles should be concise and in English, avoid Chinese
|
||||
- [ ] Is PR description provided in copyable markdown format for easy copying?
|
||||
|
||||
## Common Patterns and Best Practices
|
||||
|
||||
### 1. Resource Management
|
||||
|
||||
```rust
|
||||
// Use RAII pattern for resource management
|
||||
pub struct ResourceGuard {
|
||||
resource: Resource,
|
||||
}
|
||||
|
||||
impl Drop for ResourceGuard {
|
||||
fn drop(&mut self) {
|
||||
// Clean up resources
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Dependency Injection
|
||||
|
||||
```rust
|
||||
// Use dependency injection pattern
|
||||
pub struct Service {
|
||||
config: Arc<Config>,
|
||||
storage: Arc<dyn StorageAPI>,
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Graceful Shutdown
|
||||
|
||||
```rust
|
||||
// Implement graceful shutdown
|
||||
async fn shutdown_gracefully(shutdown_rx: &mut Receiver<()>) {
|
||||
tokio::select! {
|
||||
_ = shutdown_rx.recv() => {
|
||||
info!("Received shutdown signal");
|
||||
// Perform cleanup operations
|
||||
}
|
||||
_ = tokio::time::sleep(SHUTDOWN_TIMEOUT) => {
|
||||
warn!("Shutdown timeout reached");
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Domain-Specific Guidelines
|
||||
|
||||
### 1. Storage Operations
|
||||
|
||||
- All storage operations must support erasure coding
|
||||
- Implement read/write quorum mechanisms
|
||||
- Support data integrity verification
|
||||
|
||||
### 2. Network Communication
|
||||
|
||||
- Use gRPC for internal service communication
|
||||
- HTTP/HTTPS support for S3-compatible API
|
||||
- Implement connection pooling and retry mechanisms
|
||||
|
||||
### 3. Metadata Management
|
||||
|
||||
- Use FlatBuffers for serialization
|
||||
- Support version control and migration
|
||||
- Implement metadata caching
|
||||
|
||||
## Branch Management and Development Workflow
|
||||
|
||||
### Branch Management
|
||||
|
||||
- **🚨 CRITICAL: NEVER modify code directly on main or master branch - THIS IS ABSOLUTELY FORBIDDEN 🚨**
|
||||
- **⚠️ ANY DIRECT COMMITS TO MASTER/MAIN WILL BE REJECTED AND MUST BE REVERTED IMMEDIATELY ⚠️**
|
||||
- **🔒 ALL CHANGES MUST GO THROUGH PULL REQUESTS - NO DIRECT COMMITS TO MAIN UNDER ANY CIRCUMSTANCES 🔒**
|
||||
- **Always work on feature branches - NO EXCEPTIONS**
|
||||
- Always check the .rules.md file before starting to ensure you understand the project guidelines
|
||||
- **MANDATORY workflow for ALL changes:**
|
||||
1. `git checkout main` (switch to main branch)
|
||||
2. `git pull` (get latest changes)
|
||||
3. `git checkout -b feat/your-feature-name` (create and switch to feature branch)
|
||||
4. Make your changes ONLY on the feature branch
|
||||
5. Test thoroughly before committing
|
||||
6. Commit and push to the feature branch
|
||||
7. **Create a pull request for code review - THIS IS THE ONLY WAY TO MERGE TO MAIN**
|
||||
8. **Wait for PR approval before merging - NEVER merge your own PRs without review**
|
||||
- Use descriptive branch names following the pattern: `feat/feature-name`, `fix/issue-name`, `refactor/component-name`, etc.
|
||||
- **Double-check current branch before ANY commit: `git branch` to ensure you're NOT on main/master**
|
||||
- **Pull Request Requirements:**
|
||||
- All changes must be submitted via PR regardless of size or urgency
|
||||
- PRs must include comprehensive description and testing information
|
||||
- PRs must pass all CI/CD checks before merging
|
||||
- PRs require at least one approval from code reviewers
|
||||
- Even hotfixes and emergency changes must go through PR process
|
||||
- **Enforcement:**
|
||||
- Main branch should be protected with branch protection rules
|
||||
- Direct pushes to main should be blocked by repository settings
|
||||
- Any accidental direct commits to main must be immediately reverted via PR
|
||||
|
||||
### Development Workflow
|
||||
|
||||
## 🎯 **Core Development Principles**
|
||||
|
||||
- **🔴 Every change must be precise - don't modify unless you're confident**
|
||||
- Carefully analyze code logic and ensure complete understanding before making changes
|
||||
- When uncertain, prefer asking users or consulting documentation over blind modifications
|
||||
- Use small iterative steps, modify only necessary parts at a time
|
||||
- Evaluate impact scope before changes to ensure no new issues are introduced
|
||||
|
||||
- **🚀 GitHub PR creation prioritizes gh command usage**
|
||||
- Prefer using `gh pr create` command to create Pull Requests
|
||||
- Avoid having users manually create PRs through web interface
|
||||
- Provide clear and professional PR titles and descriptions
|
||||
- Using `gh` commands ensures better integration and automation
|
||||
|
||||
## 📝 **Code Quality Requirements**
|
||||
|
||||
- Use English for all code comments, documentation, and variable names
|
||||
- Write meaningful and descriptive names for variables, functions, and methods
|
||||
- Avoid meaningless test content like "debug 111" or placeholder values
|
||||
- Before each change, carefully read the existing code to ensure you understand the code structure and implementation, do not break existing logic implementation, do not introduce new issues
|
||||
- Ensure each change provides sufficient test cases to guarantee code correctness
|
||||
- Do not arbitrarily modify numbers and constants in test cases, carefully analyze their meaning to ensure test case correctness
|
||||
- When writing or modifying tests, check existing test cases to ensure they have scientific naming and rigorous logic testing, if not compliant, modify test cases to ensure scientific and rigorous testing
|
||||
- **Before committing any changes, run `cargo clippy --all-targets --all-features -- -D warnings` to ensure all code passes Clippy checks**
|
||||
- After each development completion, first git add . then git commit -m "feat: feature description" or "fix: issue description", ensure compliance with [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/)
|
||||
- **Keep commit messages concise and under 72 characters** for the title line, use body for detailed explanations if needed
|
||||
- After each development completion, first git push to remote repository
|
||||
- After each change completion, summarize the changes, do not create summary files, provide a brief change description, ensure compliance with [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/)
|
||||
- Provide change descriptions needed for PR in the conversation, ensure compliance with [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/)
|
||||
- **Always provide PR descriptions in English** after completing any changes, including:
|
||||
- Clear and concise title following Conventional Commits format
|
||||
- Detailed description of what was changed and why
|
||||
- List of key changes and improvements
|
||||
- Any breaking changes or migration notes if applicable
|
||||
- Testing information and verification steps
|
||||
- **Provide PR descriptions in copyable markdown format** enclosed in code blocks for easy one-click copying
|
||||
|
||||
## 🚫 AI Documentation Generation Restrictions
|
||||
|
||||
### Forbidden Summary Documents
|
||||
|
||||
- **Strictly forbidden to create any form of AI-generated summary documents**
|
||||
- **Do not create documents containing large amounts of emoji, detailed formatting tables and typical AI style**
|
||||
- **Do not generate the following types of documents in the project:**
|
||||
- Benchmark summary documents (BENCHMARK*.md)
|
||||
- Implementation comparison analysis documents (IMPLEMENTATION_COMPARISON*.md)
|
||||
- Performance analysis report documents
|
||||
- Architecture summary documents
|
||||
- Feature comparison documents
|
||||
- Any documents with large amounts of emoji and formatted content
|
||||
- **If documentation is needed, only create when explicitly requested by the user, and maintain a concise and practical style**
|
||||
- **Documentation should focus on actually needed information, avoiding excessive formatting and decorative content**
|
||||
- **Any discovered AI-generated summary documents should be immediately deleted**
|
||||
|
||||
### Allowed Documentation Types
|
||||
|
||||
- README.md (project introduction, keep concise)
|
||||
- Technical documentation (only create when explicitly needed)
|
||||
- User manual (only create when explicitly needed)
|
||||
- API documentation (generated from code)
|
||||
- Changelog (CHANGELOG.md)
|
||||
|
||||
These rules should serve as guiding principles when developing the RustFS project, ensuring code quality, performance, and maintainability.
|
||||
24
.vscode/launch.json
vendored
24
.vscode/launch.json
vendored
@@ -22,6 +22,7 @@
|
||||
"env": {
|
||||
"RUST_LOG": "rustfs=debug,ecstore=info,s3s=debug,iam=debug",
|
||||
"RUSTFS_SKIP_BACKGROUND_TASK": "on",
|
||||
//"RUSTFS_OBS_LOG_DIRECTORY": "./deploy/logs",
|
||||
// "RUSTFS_POLICY_PLUGIN_URL":"http://localhost:8181/v1/data/rustfs/authz/allow",
|
||||
// "RUSTFS_POLICY_PLUGIN_AUTH_TOKEN":"your-opa-token"
|
||||
},
|
||||
@@ -85,6 +86,18 @@
|
||||
"cwd": "${workspaceFolder}",
|
||||
//"stopAtEntry": false,
|
||||
//"preLaunchTask": "cargo build",
|
||||
"env": {
|
||||
"RUSTFS_ACCESS_KEY": "rustfsadmin",
|
||||
"RUSTFS_SECRET_KEY": "rustfsadmin",
|
||||
"RUSTFS_VOLUMES": "./target/volume/test{1...4}",
|
||||
"RUSTFS_ADDRESS": ":9000",
|
||||
"RUSTFS_CONSOLE_ENABLE": "true",
|
||||
// "RUSTFS_OBS_TRACE_ENDPOINT": "http://127.0.0.1:4318/v1/traces", // jeager otlp http endpoint
|
||||
// "RUSTFS_OBS_METRIC_ENDPOINT": "http://127.0.0.1:4318/v1/metrics", // default otlp http endpoint
|
||||
// "RUSTFS_OBS_LOG_ENDPOINT": "http://127.0.0.1:4318/v1/logs", // default otlp http endpoint
|
||||
"RUSTFS_CONSOLE_ADDRESS": "127.0.0.1:9001",
|
||||
"RUSTFS_OBS_LOG_DIRECTORY": "./target/logs",
|
||||
},
|
||||
"sourceLanguages": [
|
||||
"rust"
|
||||
],
|
||||
@@ -93,8 +106,15 @@
|
||||
"name": "Debug executable target/debug/test",
|
||||
"type": "lldb",
|
||||
"request": "launch",
|
||||
"program": "${workspaceFolder}/target/debug/deps/lifecycle_integration_test-5eb7590b8f3bea55",
|
||||
"args": [],
|
||||
"program": "${workspaceFolder}/target/debug/deps/lifecycle_integration_test-5915cbfcab491b3b",
|
||||
"args": [
|
||||
"--skip",
|
||||
"test_lifecycle_expiry_basic",
|
||||
"--skip",
|
||||
"test_lifecycle_expiry_deletemarker",
|
||||
//"--skip",
|
||||
//"test_lifecycle_transition_basic",
|
||||
],
|
||||
"cwd": "${workspaceFolder}",
|
||||
//"stopAtEntry": false,
|
||||
//"preLaunchTask": "cargo build",
|
||||
|
||||
@@ -8,12 +8,14 @@ The workspace root hosts shared dependencies in `Cargo.toml`. The service binary
|
||||
|
||||
## Build, Test, and Development Commands
|
||||
Run `cargo check --all-targets` for fast validation. Build release binaries via `cargo build --release` or the pipeline-aligned `make build`. Use `./build-rustfs.sh --dev` for iterative development and `./build-rustfs.sh --platform <target>` for cross-compiles. Prefer `make pre-commit` before pushing to cover formatting, clippy, checks, and tests.
|
||||
Always ensure `cargo fmt --all --check`, `cargo test --workspace --exclude e2e_test`, and `cargo clippy --all-targets --all-features -- -D warnings` complete successfully after each code change to keep the tree healthy and warning-free.
|
||||
|
||||
## Coding Style & Naming Conventions
|
||||
Formatting follows the repo `rustfmt.toml` (130-column width). Use `snake_case` for items, `PascalCase` for types, and `SCREAMING_SNAKE_CASE` for constants. Avoid `unwrap()` or `expect()` outside tests; bubble errors with `Result` and crate-specific `thiserror` types. Keep async code non-blocking and offload CPU-heavy work with `tokio::task::spawn_blocking` when necessary.
|
||||
|
||||
## Testing Guidelines
|
||||
Co-locate unit tests with their modules and give behavior-led names such as `handles_expired_token`. Integration suites belong in each crate’s `tests/` directory, while exhaustive end-to-end scenarios live in `crates/e2e_test/`. Run `cargo test --workspace --exclude e2e_test` during iteration, `cargo nextest run --all --exclude e2e_test` when available, and finish with `cargo test --all` before requesting review. Use `NO_PROXY=127.0.0.1,localhost HTTP_PROXY= HTTPS_PROXY=` for KMS e2e tests.
|
||||
When fixing bugs or adding features, include regression tests that capture the new behavior so future changes cannot silently break it.
|
||||
|
||||
## Commit & Pull Request Guidelines
|
||||
Work on feature branches (e.g., `feat/...`) after syncing `main`. Follow Conventional Commits under 72 characters (e.g., `feat: add kms key rotation`). Each commit must compile, format cleanly, and pass `make pre-commit`. Open PRs with a concise summary, note verification commands, link relevant issues, and wait for reviewer approval.
|
||||
|
||||
75
CLA.md
75
CLA.md
@@ -1,39 +1,88 @@
|
||||
RustFS Individual Contributor License Agreement
|
||||
|
||||
Thank you for your interest in contributing documentation and related software code to a project hosted or managed by RustFS. In order to clarify the intellectual property license granted with Contributions from any person or entity, RustFS must have a Contributor License Agreement (“CLA”) on file that has been signed by each Contributor, indicating agreement to the license terms below. This version of the Contributor License Agreement allows an individual to submit Contributions to the applicable project. If you are making a submission on behalf of a legal entity, then you should sign the separate Corporate Contributor License Agreement.
|
||||
Thank you for your interest in contributing documentation and related software code to a project hosted or managed by
|
||||
RustFS. In order to clarify the intellectual property license granted with Contributions from any person or entity,
|
||||
RustFS must have a Contributor License Agreement ("CLA") on file that has been signed by each Contributor, indicating
|
||||
agreement to the license terms below. This version of the Contributor License Agreement allows an individual to submit
|
||||
Contributions to the applicable project. If you are making a submission on behalf of a legal entity, then you should
|
||||
sign the separate Corporate Contributor License Agreement.
|
||||
|
||||
You accept and agree to the following terms and conditions for Your present and future Contributions submitted to RustFS. You hereby irrevocably assign and transfer to RustFS all right, title, and interest in and to Your Contributions, including all copyrights and other intellectual property rights therein.
|
||||
You accept and agree to the following terms and conditions for Your present and future Contributions submitted to
|
||||
RustFS. You hereby irrevocably assign and transfer to RustFS all right, title, and interest in and to Your
|
||||
Contributions, including all copyrights and other intellectual property rights therein.
|
||||
|
||||
Definitions
|
||||
|
||||
“You” (or “Your”) shall mean the copyright owner or legal entity authorized by the copyright owner that is making this Agreement with RustFS. For legal entities, the entity making a Contribution and all other entities that control, are controlled by, or are under common control with that entity are considered to be a single Contributor. For the purposes of this definition, “control” means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
“You” (or “Your”) shall mean the copyright owner or legal entity authorized by the copyright owner that is making this
|
||||
Agreement with RustFS. For legal entities, the entity making a Contribution and all other entities that control, are
|
||||
controlled by, or are under common control with that entity are considered to be a single Contributor. For the purposes
|
||||
of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such
|
||||
entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares,
|
||||
or (iii) beneficial ownership of such entity.
|
||||
|
||||
“Contribution” shall mean any original work of authorship, including any modifications or additions to an existing work, that is intentionally submitted by You to RustFS for inclusion in, or documentation of, any of the products or projects owned or managed by RustFS (the “Work”), including without limitation any Work described in Schedule A. For the purposes of this definition, “submitted” means any form of electronic or written communication sent to RustFS or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, RustFS for the purpose of discussing and improving the Work.
|
||||
“Contribution” shall mean any original work of authorship, including any modifications or additions to an existing work,
|
||||
that is intentionally submitted by You to RustFS for inclusion in, or documentation of, any of the products or projects
|
||||
owned or managed by RustFS (the "Work"), including without limitation any Work described in Schedule A. For the purposes
|
||||
of this definition, "submitted" means any form of electronic or written communication sent to RustFS or its
|
||||
representatives, including but not limited to communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, RustFS for the purpose of discussing and improving the
|
||||
Work.
|
||||
|
||||
Assignment of Copyright
|
||||
|
||||
Subject to the terms and conditions of this Agreement, You hereby irrevocably assign and transfer to RustFS all right, title, and interest in and to Your Contributions, including all copyrights and other intellectual property rights therein, for the entire term of such rights, including all renewals and extensions. You agree to execute all documents and take all actions as may be reasonably necessary to vest in RustFS the ownership of Your Contributions and to assist RustFS in perfecting, maintaining, and enforcing its rights in Your Contributions.
|
||||
Subject to the terms and conditions of this Agreement, You hereby irrevocably assign and transfer to RustFS all right,
|
||||
title, and interest in and to Your Contributions, including all copyrights and other intellectual property rights
|
||||
therein, for the entire term of such rights, including all renewals and extensions. You agree to execute all documents
|
||||
and take all actions as may be reasonably necessary to vest in RustFS the ownership of Your Contributions and to assist
|
||||
RustFS in perfecting, maintaining, and enforcing its rights in Your Contributions.
|
||||
|
||||
Grant of Patent License
|
||||
|
||||
Subject to the terms and conditions of this Agreement, You hereby grant to RustFS and to recipients of documentation and software distributed by RustFS a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by You that are necessarily infringed by Your Contribution(s) alone or by combination of Your Contribution(s) with the Work to which such Contribution(s) was submitted. If any entity institutes patent litigation against You or any other entity (including a cross-claim or counterclaim in a lawsuit) alleging that your Contribution, or the Work to which you have contributed, constitutes direct or contributory patent infringement, then any patent licenses granted to that entity under this Agreement for that Contribution or Work shall terminate as of the date such litigation is filed.
|
||||
Subject to the terms and conditions of this Agreement, You hereby grant to RustFS and to recipients of documentation and
|
||||
software distributed by RustFS a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as
|
||||
stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the
|
||||
Work, where such license applies only to those patent claims licensable by You that are necessarily infringed by Your
|
||||
Contribution(s) alone or by combination of Your Contribution(s) with the Work to which such Contribution(s) was
|
||||
submitted. If any entity institutes patent litigation against You or any other entity (including a cross-claim or
|
||||
counterclaim in a lawsuit) alleging that your Contribution, or the Work to which you have contributed, constitutes
|
||||
direct or contributory patent infringement, then any patent licenses granted to that entity under this Agreement for
|
||||
that Contribution or Work shall terminate as of the date such litigation is filed.
|
||||
|
||||
You represent that you are legally entitled to grant the above assignment and license.
|
||||
|
||||
You represent that each of Your Contributions is Your original creation (see section 7 for submissions on behalf of others). You represent that Your Contribution submissions include complete details of any third-party license or other restriction (including, but not limited to, related patents and trademarks) of which you are personally aware and which are associated with any part of Your Contributions.
|
||||
You represent that each of Your Contributions is Your original creation (see section 7 for submissions on behalf of
|
||||
others). You represent that Your Contribution submissions include complete details of any third-party license or other
|
||||
restriction (including, but not limited to, related patents and trademarks) of which you are personally aware and which
|
||||
are associated with any part of Your Contributions.
|
||||
|
||||
You are not expected to provide support for Your Contributions, except to the extent You desire to provide support. You may provide support for free, for a fee, or not at all. Unless required by applicable law or agreed to in writing, You provide Your Contributions on an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON- INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
You are not expected to provide support for Your Contributions, except to the extent You desire to provide support. You
|
||||
may provide support for free, for a fee, or not at all. Unless required by applicable law or agreed to in writing, You
|
||||
provide Your Contributions on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
|
||||
including, without limitation, any warranties or conditions of TITLE, NON- INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR
|
||||
A PARTICULAR PURPOSE.
|
||||
|
||||
Should You wish to submit work that is not Your original creation, You may submit it to RustFS separately from any Contribution, identifying the complete details of its source and of any license or other restriction (including, but not limited to, related patents, trademarks, and license agreements) of which you are personally aware, and conspicuously marking the work as “Submitted on behalf of a third-party: [named here]”.
|
||||
Should You wish to submit work that is not Your original creation, You may submit it to RustFS separately from any
|
||||
Contribution, identifying the complete details of its source and of any license or other restriction (including, but not
|
||||
limited to, related patents, trademarks, and license agreements) of which you are personally aware, and conspicuously
|
||||
marking the work as "Submitted on behalf of a third-party: [named here]”.
|
||||
|
||||
You agree to notify RustFS of any facts or circumstances of which you become aware that would make these representations inaccurate in any respect.
|
||||
You agree to notify RustFS of any facts or circumstances of which you become aware that would make these representations
|
||||
inaccurate in any respect.
|
||||
|
||||
Modification of CLA
|
||||
|
||||
RustFS reserves the right to update or modify this CLA in the future. Any updates or modifications to this CLA shall apply only to Contributions made after the effective date of the revised CLA. Contributions made prior to the update shall remain governed by the version of the CLA that was in effect at the time of submission. It is not necessary for all Contributors to re-sign the CLA when the CLA is updated or modified.
|
||||
RustFS reserves the right to update or modify this CLA in the future. Any updates or modifications to this CLA shall
|
||||
apply only to Contributions made after the effective date of the revised CLA. Contributions made prior to the update
|
||||
shall remain governed by the version of the CLA that was in effect at the time of submission. It is not necessary for
|
||||
all Contributors to re-sign the CLA when the CLA is updated or modified.
|
||||
|
||||
Governing Law and Dispute Resolution
|
||||
|
||||
This Agreement will be governed by and construed in accordance with the laws of the People’s Republic of China excluding that body of laws known as conflict of laws. The parties expressly agree that the United Nations Convention on Contracts for the International Sale of Goods will not apply. Any legal action or proceeding arising under this Agreement will be brought exclusively in the courts located in Beijing, China, and the parties hereby irrevocably consent to the personal jurisdiction and venue therein.
|
||||
This Agreement will be governed by and construed in accordance with the laws of the People's Republic of China excluding
|
||||
that body of laws known as conflict of laws. The parties expressly agree that the United Nations Convention on Contracts
|
||||
for the International Sale of Goods will not apply. Any legal action or proceeding arising under this Agreement will be
|
||||
brought exclusively in the courts located in Beijing, China, and the parties hereby irrevocably consent to the personal
|
||||
jurisdiction and venue therein.
|
||||
|
||||
For your reading convenience, this Agreement is written in parallel English and Chinese sections. To the extent there is a conflict between the English and Chinese sections, the English sections shall govern.
|
||||
For your reading convenience, this Agreement is written in parallel English and Chinese sections. To the extent there is
|
||||
a conflict between the English and Chinese sections, the English sections shall govern.
|
||||
84
CLAUDE.md
84
CLAUDE.md
@@ -4,23 +4,28 @@ This file provides guidance to Claude Code (claude.ai/code) when working with co
|
||||
|
||||
## Project Overview
|
||||
|
||||
RustFS is a high-performance distributed object storage software built with Rust, providing S3-compatible APIs and advanced features like data lakes, AI, and big data support. It's designed as an alternative to MinIO with better performance and a more business-friendly Apache 2.0 license.
|
||||
RustFS is a high-performance distributed object storage software built with Rust, providing S3-compatible APIs and
|
||||
advanced features like data lakes, AI, and big data support. It's designed as an alternative to MinIO with better
|
||||
performance and a more business-friendly Apache 2.0 license.
|
||||
|
||||
## Build Commands
|
||||
|
||||
### Primary Build Commands
|
||||
|
||||
- `cargo build --release` - Build the main RustFS binary
|
||||
- `./build-rustfs.sh` - Recommended build script that handles console resources and cross-platform compilation
|
||||
- `./build-rustfs.sh --dev` - Development build with debug symbols
|
||||
- `make build` or `just build` - Use Make/Just for standardized builds
|
||||
|
||||
### Platform-Specific Builds
|
||||
|
||||
- `./build-rustfs.sh --platform x86_64-unknown-linux-musl` - Build for musl target
|
||||
- `./build-rustfs.sh --platform aarch64-unknown-linux-gnu` - Build for ARM64
|
||||
- `make build-musl` or `just build-musl` - Build musl variant
|
||||
- `make build-cross-all` - Build all supported architectures
|
||||
|
||||
### Testing Commands
|
||||
|
||||
- `cargo test --workspace --exclude e2e_test` - Run unit tests (excluding e2e tests)
|
||||
- `cargo nextest run --all --exclude e2e_test` - Use nextest if available (faster)
|
||||
- `cargo test --all --doc` - Run documentation tests
|
||||
@@ -28,22 +33,30 @@ RustFS is a high-performance distributed object storage software built with Rust
|
||||
- `make pre-commit` - Run all quality checks (fmt, clippy, check, test)
|
||||
|
||||
### End-to-End Testing
|
||||
|
||||
- `cargo test --package e2e_test` - Run all e2e tests
|
||||
- `./scripts/run_e2e_tests.sh` - Run e2e tests via script
|
||||
- `./scripts/run_scanner_benchmarks.sh` - Run scanner performance benchmarks
|
||||
|
||||
### KMS-Specific Testing (with proxy bypass)
|
||||
- `NO_PROXY=127.0.0.1,localhost HTTP_PROXY= HTTPS_PROXY= http_proxy= https_proxy= cargo test --package e2e_test test_local_kms_end_to_end -- --nocapture --test-threads=1` - Run complete KMS end-to-end test
|
||||
- `NO_PROXY=127.0.0.1,localhost HTTP_PROXY= HTTPS_PROXY= http_proxy= https_proxy= cargo test --package e2e_test kms:: -- --nocapture --test-threads=1` - Run all KMS tests
|
||||
|
||||
-
|
||||
`NO_PROXY=127.0.0.1,localhost HTTP_PROXY= HTTPS_PROXY= http_proxy= https_proxy= cargo test --package e2e_test test_local_kms_end_to_end -- --nocapture --test-threads=1` -
|
||||
Run complete KMS end-to-end test
|
||||
-
|
||||
`NO_PROXY=127.0.0.1,localhost HTTP_PROXY= HTTPS_PROXY= http_proxy= https_proxy= cargo test --package e2e_test kms:: -- --nocapture --test-threads=1` -
|
||||
Run all KMS tests
|
||||
- `cargo test --package e2e_test test_local_kms_key_isolation -- --nocapture --test-threads=1` - Test KMS key isolation
|
||||
- `cargo test --package e2e_test test_local_kms_large_file -- --nocapture --test-threads=1` - Test KMS with large files
|
||||
|
||||
### Code Quality
|
||||
|
||||
- `cargo fmt --all` - Format code
|
||||
- `cargo clippy --all-targets --all-features -- -D warnings` - Lint code
|
||||
- `make pre-commit` or `just pre-commit` - Run all quality checks (fmt, clippy, check, test)
|
||||
|
||||
### Quick Development Commands
|
||||
|
||||
- `make help` or `just help` - Show all available commands with descriptions
|
||||
- `make help-build` - Show detailed build options and cross-compilation help
|
||||
- `make help-docker` - Show comprehensive Docker build and deployment options
|
||||
@@ -52,6 +65,7 @@ RustFS is a high-performance distributed object storage software built with Rust
|
||||
- `./scripts/probe.sh` - Health check and connectivity testing
|
||||
|
||||
### Docker Build Commands
|
||||
|
||||
- `make docker-buildx` - Build multi-architecture production images
|
||||
- `make docker-dev-local` - Build development image for local use
|
||||
- `./docker-buildx.sh --push` - Build and push production images
|
||||
@@ -61,6 +75,7 @@ RustFS is a high-performance distributed object storage software built with Rust
|
||||
### Core Components
|
||||
|
||||
**Main Binary (`rustfs/`):**
|
||||
|
||||
- Entry point at `rustfs/src/main.rs`
|
||||
- Core modules: admin, auth, config, server, storage, license management, profiling
|
||||
- HTTP server with S3-compatible APIs
|
||||
@@ -68,10 +83,11 @@ RustFS is a high-performance distributed object storage software built with Rust
|
||||
- Parallel service initialization with DNS resolver, bucket metadata, and IAM
|
||||
|
||||
**Key Crates (`crates/`):**
|
||||
|
||||
- `ecstore` - Erasure coding storage implementation (core storage layer)
|
||||
- `iam` - Identity and Access Management
|
||||
- `kms` - Key Management Service for encryption and key handling
|
||||
- `madmin` - Management dashboard and admin API interface
|
||||
- `madmin` - Management dashboard and admin API interface
|
||||
- `s3select-api` & `s3select-query` - S3 Select API and query engine
|
||||
- `config` - Configuration management with notify features
|
||||
- `crypto` - Cryptography and security features
|
||||
@@ -94,6 +110,7 @@ RustFS is a high-performance distributed object storage software built with Rust
|
||||
- `targets` - Target-specific configurations and utilities
|
||||
|
||||
### Build System
|
||||
|
||||
- Cargo workspace with 25+ crates (including new KMS functionality)
|
||||
- Custom `build-rustfs.sh` script for advanced build options
|
||||
- Multi-architecture Docker builds via `docker-buildx.sh`
|
||||
@@ -103,10 +120,11 @@ RustFS is a high-performance distributed object storage software built with Rust
|
||||
- Performance benchmarking and audit workflows
|
||||
|
||||
### Key Dependencies
|
||||
|
||||
- `axum` - HTTP framework for S3 API server
|
||||
- `tokio` - Async runtime
|
||||
- `s3s` - S3 protocol implementation library
|
||||
- `datafusion` - For S3 Select query processing
|
||||
- `datafusion` - For S3 Select query processing
|
||||
- `hyper`/`hyper-util` - HTTP client/server utilities
|
||||
- `rustls` - TLS implementation
|
||||
- `serde`/`serde_json` - Serialization
|
||||
@@ -115,6 +133,7 @@ RustFS is a high-performance distributed object storage software built with Rust
|
||||
- `tikv-jemallocator` - Memory allocator for Linux GNU builds
|
||||
|
||||
### Development Workflow
|
||||
|
||||
- Console resources are embedded during build via `rust-embed`
|
||||
- Protocol buffers generated via custom `gproto` binary
|
||||
- E2E tests in separate crate (`e2e_test`) with comprehensive KMS testing
|
||||
@@ -124,14 +143,16 @@ RustFS is a high-performance distributed object storage software built with Rust
|
||||
- Git hooks setup available via `make setup-hooks` or `just setup-hooks`
|
||||
|
||||
### Performance & Observability
|
||||
|
||||
- Performance profiling available with `pprof` integration (disabled on Windows)
|
||||
- Profiling enabled via environment variables in production
|
||||
- Built-in observability with OpenTelemetry integration
|
||||
- Background services (scanner, heal) can be controlled via environment variables:
|
||||
- `RUSTFS_ENABLE_SCANNER` (default: true)
|
||||
- `RUSTFS_ENABLE_HEAL` (default: true)
|
||||
- `RUSTFS_ENABLE_SCANNER` (default: true)
|
||||
- `RUSTFS_ENABLE_HEAL` (default: true)
|
||||
|
||||
### Service Architecture
|
||||
|
||||
- Service state management with graceful shutdown handling
|
||||
- Parallel initialization of core systems (DNS, bucket metadata, IAM)
|
||||
- Event notification system with MQTT and webhook support
|
||||
@@ -139,6 +160,7 @@ RustFS is a high-performance distributed object storage software built with Rust
|
||||
- Jemalloc allocator for Linux GNU targets for better performance
|
||||
|
||||
## Environment Variables
|
||||
|
||||
- `RUSTFS_ENABLE_SCANNER` - Enable/disable background data scanner (default: true)
|
||||
- `RUSTFS_ENABLE_HEAL` - Enable/disable auto-heal functionality (default: true)
|
||||
- Various profiling and observability controls
|
||||
@@ -146,12 +168,14 @@ RustFS is a high-performance distributed object storage software built with Rust
|
||||
- Test environment configurations in `scripts/dev_rustfs.env`
|
||||
|
||||
### KMS Environment Variables
|
||||
|
||||
- `NO_PROXY=127.0.0.1,localhost` - Required for KMS E2E tests to bypass proxy
|
||||
- `HTTP_PROXY=` `HTTPS_PROXY=` `http_proxy=` `https_proxy=` - Clear proxy settings for local KMS testing
|
||||
|
||||
## KMS (Key Management Service) Architecture
|
||||
|
||||
### KMS Implementation Status
|
||||
|
||||
- **Full KMS Integration:** Complete implementation with Local and Vault backends
|
||||
- **Automatic Configuration:** KMS auto-configures on startup with `--kms-enable` flag
|
||||
- **Encryption Support:** Full S3-compatible server-side encryption (SSE-S3, SSE-KMS, SSE-C)
|
||||
@@ -159,18 +183,21 @@ RustFS is a high-performance distributed object storage software built with Rust
|
||||
- **Production Ready:** Comprehensive testing including large files and key isolation
|
||||
|
||||
### KMS Configuration
|
||||
|
||||
- **Local Backend:** `--kms-backend local --kms-key-dir <path> --kms-default-key-id <id>`
|
||||
- **Vault Backend:** `--kms-backend vault --kms-vault-endpoint <url> --kms-vault-key-name <name>`
|
||||
- **Auto-startup:** KMS automatically initializes when `--kms-enable` is provided
|
||||
- **Manual Configuration:** Also supports dynamic configuration via admin API
|
||||
|
||||
### S3 Encryption Support
|
||||
|
||||
- **SSE-S3:** Server-side encryption with S3-managed keys (`ServerSideEncryption: AES256`)
|
||||
- **SSE-KMS:** Server-side encryption with KMS-managed keys (`ServerSideEncryption: aws:kms`)
|
||||
- **SSE-C:** Server-side encryption with customer-provided keys
|
||||
- **Response Headers:** All encryption types return correct `server_side_encryption` headers in PUT/GET responses
|
||||
|
||||
### KMS Testing Architecture
|
||||
|
||||
- **Comprehensive E2E Tests:** Located in `crates/e2e_test/src/kms/`
|
||||
- **Test Environments:** Automated test environment setup with temporary directories
|
||||
- **Encryption Coverage:** Tests all three encryption types (SSE-S3, SSE-KMS, SSE-C)
|
||||
@@ -178,6 +205,7 @@ RustFS is a high-performance distributed object storage software built with Rust
|
||||
- **Edge Cases:** Key isolation, large file handling, error scenarios
|
||||
|
||||
### Key Files for KMS
|
||||
|
||||
- `crates/kms/` - Core KMS implementation with Local/Vault backends
|
||||
- `rustfs/src/main.rs` - KMS auto-initialization in `init_kms_system()`
|
||||
- `rustfs/src/storage/ecfs.rs` - SSE encryption/decryption in PUT/GET operations
|
||||
@@ -186,54 +214,62 @@ RustFS is a high-performance distributed object storage software built with Rust
|
||||
- `crates/rio/src/encrypt_reader.rs` - Streaming encryption for large files
|
||||
|
||||
## Code Style and Safety Requirements
|
||||
|
||||
- **Language Requirements:**
|
||||
- Communicate with me in Chinese, but **only English can be used in code files**
|
||||
- Code comments, function names, variable names, and all text in source files must be in English only
|
||||
- No Chinese characters, emojis, or non-ASCII characters are allowed in any source code files
|
||||
- This includes comments, strings, documentation, and any other text within code files
|
||||
- Communicate with me in Chinese, but **only English can be used in code files**
|
||||
- Code comments, function names, variable names, and all text in source files must be in English only
|
||||
- No Chinese characters, emojis, or non-ASCII characters are allowed in any source code files
|
||||
- This includes comments, strings, documentation, and any other text within code files
|
||||
- **Safety-Critical Rules:**
|
||||
- `unsafe_code = "deny"` enforced at workspace level
|
||||
- Never use `unwrap()`, `expect()`, or panic-inducing code except in tests
|
||||
- Avoid blocking I/O operations in async contexts
|
||||
- Use proper error handling with `Result<T, E>` and `Option<T>`
|
||||
- Follow Rust's ownership and borrowing rules strictly
|
||||
- `unsafe_code = "deny"` enforced at workspace level
|
||||
- Never use `unwrap()`, `expect()`, or panic-inducing code except in tests
|
||||
- Avoid blocking I/O operations in async contexts
|
||||
- Use proper error handling with `Result<T, E>` and `Option<T>`
|
||||
- Follow Rust's ownership and borrowing rules strictly
|
||||
- **Performance Guidelines:**
|
||||
- Use `cargo clippy --all-targets --all-features -- -D warnings` to catch issues
|
||||
- Prefer `anyhow` for error handling in applications, `thiserror` for libraries
|
||||
- Use appropriate async runtimes and avoid blocking calls
|
||||
- Use `cargo clippy --all-targets --all-features -- -D warnings` to catch issues
|
||||
- Prefer `anyhow` for error handling in applications, `thiserror` for libraries
|
||||
- Use appropriate async runtimes and avoid blocking calls
|
||||
- **Testing Standards:**
|
||||
- All new features must include comprehensive tests
|
||||
- Use `#[cfg(test)]` for test-only code that may use panic macros
|
||||
- E2E tests should cover KMS integration scenarios
|
||||
- All new features must include comprehensive tests
|
||||
- Use `#[cfg(test)]` for test-only code that may use panic macros
|
||||
- E2E tests should cover KMS integration scenarios
|
||||
|
||||
## Common Development Tasks
|
||||
|
||||
### Running KMS Tests Locally
|
||||
|
||||
1. **Clear proxy settings:** KMS tests require direct localhost connections
|
||||
2. **Use serial execution:** `--test-threads=1` prevents port conflicts
|
||||
3. **Enable output:** `--nocapture` shows detailed test logs
|
||||
4. **Full command:** `NO_PROXY=127.0.0.1,localhost HTTP_PROXY= HTTPS_PROXY= http_proxy= https_proxy= cargo test --package e2e_test test_local_kms_end_to_end -- --nocapture --test-threads=1`
|
||||
4. **Full command:**
|
||||
`NO_PROXY=127.0.0.1,localhost HTTP_PROXY= HTTPS_PROXY= http_proxy= https_proxy= cargo test --package e2e_test test_local_kms_end_to_end -- --nocapture --test-threads=1`
|
||||
|
||||
### KMS Development Workflow
|
||||
|
||||
1. **Code changes:** Modify KMS-related code in `crates/kms/` or `rustfs/src/`
|
||||
2. **Compile:** Always run `cargo build` after changes
|
||||
3. **Test specific functionality:** Use targeted test commands for faster iteration
|
||||
4. **Full validation:** Run complete end-to-end tests before commits
|
||||
|
||||
### Debugging KMS Issues
|
||||
|
||||
- **Server startup:** Check that KMS auto-initializes with debug logs
|
||||
- **Encryption failures:** Verify SSE headers are correctly set in both PUT and GET responses
|
||||
- **Test failures:** Use `--nocapture` to see detailed error messages
|
||||
- **Key management:** Test admin API endpoints with proper authentication
|
||||
|
||||
## Important Reminders
|
||||
|
||||
- **Always compile after code changes:** Use `cargo build` to catch errors early
|
||||
- **Don't bypass tests:** All functionality must be properly tested, not worked around
|
||||
- **Use proper error handling:** Never use `unwrap()` or `expect()` in production code (except tests)
|
||||
- **Follow S3 compatibility:** Ensure all encryption types return correct HTTP response headers
|
||||
|
||||
# important-instruction-reminders
|
||||
|
||||
Do what has been asked; nothing more, nothing less.
|
||||
NEVER create files unless they're absolutely necessary for achieving your goal.
|
||||
ALWAYS prefer editing an existing file to creating a new one.
|
||||
NEVER proactively create documentation files (*.md) or README files. Only create documentation files if explicitly requested by the User.
|
||||
NEVER proactively create documentation files (*.md) or README files. Only create documentation files if explicitly
|
||||
requested by the User.
|
||||
2821
Cargo.lock
generated
2821
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
304
Cargo.toml
304
Cargo.toml
@@ -63,103 +63,143 @@ unsafe_code = "deny"
|
||||
all = "warn"
|
||||
|
||||
[workspace.dependencies]
|
||||
# RustFS Internal Crates
|
||||
rustfs = { path = "./rustfs", version = "0.0.5" }
|
||||
rustfs-ahm = { path = "crates/ahm", version = "0.0.5" }
|
||||
rustfs-s3select-api = { path = "crates/s3select-api", version = "0.0.5" }
|
||||
rustfs-appauth = { path = "crates/appauth", version = "0.0.5" }
|
||||
rustfs-audit = { path = "crates/audit", version = "0.0.5" }
|
||||
rustfs-checksums = { path = "crates/checksums", version = "0.0.5" }
|
||||
rustfs-common = { path = "crates/common", version = "0.0.5" }
|
||||
rustfs-config = { path = "./crates/config", version = "0.0.5" }
|
||||
rustfs-crypto = { path = "crates/crypto", version = "0.0.5" }
|
||||
rustfs-ecstore = { path = "crates/ecstore", version = "0.0.5" }
|
||||
rustfs-filemeta = { path = "crates/filemeta", version = "0.0.5" }
|
||||
rustfs-iam = { path = "crates/iam", version = "0.0.5" }
|
||||
rustfs-kms = { path = "crates/kms", version = "0.0.5" }
|
||||
rustfs-lock = { path = "crates/lock", version = "0.0.5" }
|
||||
rustfs-madmin = { path = "crates/madmin", version = "0.0.5" }
|
||||
rustfs-mcp = { path = "crates/mcp", version = "0.0.5" }
|
||||
rustfs-notify = { path = "crates/notify", version = "0.0.5" }
|
||||
rustfs-obs = { path = "crates/obs", version = "0.0.5" }
|
||||
rustfs-policy = { path = "crates/policy", version = "0.0.5" }
|
||||
rustfs-protos = { path = "crates/protos", version = "0.0.5" }
|
||||
rustfs-s3select-query = { path = "crates/s3select-query", version = "0.0.5" }
|
||||
rustfs = { path = "./rustfs", version = "0.0.5" }
|
||||
rustfs-zip = { path = "./crates/zip", version = "0.0.5" }
|
||||
rustfs-config = { path = "./crates/config", version = "0.0.5" }
|
||||
rustfs-obs = { path = "crates/obs", version = "0.0.5" }
|
||||
rustfs-notify = { path = "crates/notify", version = "0.0.5" }
|
||||
rustfs-utils = { path = "crates/utils", version = "0.0.5" }
|
||||
rustfs-rio = { path = "crates/rio", version = "0.0.5" }
|
||||
rustfs-filemeta = { path = "crates/filemeta", version = "0.0.5" }
|
||||
rustfs-s3select-api = { path = "crates/s3select-api", version = "0.0.5" }
|
||||
rustfs-s3select-query = { path = "crates/s3select-query", version = "0.0.5" }
|
||||
rustfs-signer = { path = "crates/signer", version = "0.0.5" }
|
||||
rustfs-checksums = { path = "crates/checksums", version = "0.0.5" }
|
||||
rustfs-workers = { path = "crates/workers", version = "0.0.5" }
|
||||
rustfs-mcp = { path = "crates/mcp", version = "0.0.5" }
|
||||
rustfs-targets = { path = "crates/targets", version = "0.0.5" }
|
||||
rustfs-kms = { path = "crates/kms", version = "0.0.5" }
|
||||
aes-gcm = { version = "0.10.3", features = ["std"] }
|
||||
anyhow = "1.0.100"
|
||||
arc-swap = "1.7.1"
|
||||
argon2 = { version = "0.5.3", features = ["std"] }
|
||||
atoi = "2.0.0"
|
||||
rustfs-utils = { path = "crates/utils", version = "0.0.5" }
|
||||
rustfs-workers = { path = "crates/workers", version = "0.0.5" }
|
||||
rustfs-zip = { path = "./crates/zip", version = "0.0.5" }
|
||||
|
||||
# Async Runtime and Networking
|
||||
async-channel = "2.5.0"
|
||||
async-compression = { version = "0.4.19" }
|
||||
async-recursion = "1.1.1"
|
||||
async-trait = "0.1.89"
|
||||
async-compression = { version = "0.4.19" }
|
||||
atomic_enum = "0.3.0"
|
||||
aws-config = { version = "1.8.8" }
|
||||
aws-credential-types = { version = "1.2.8" }
|
||||
aws-smithy-types = { version = "1.3.3" }
|
||||
aws-sdk-s3 = { version = "1.108.0", default-features = false, features = ["sigv4a", "rustls", "rt-tokio"] }
|
||||
axum = "0.8.6"
|
||||
axum-extra = "0.10.3"
|
||||
axum-server = { version = "0.7.2", features = ["tls-rustls-no-provider"], default-features = false }
|
||||
base64-simd = "0.8.0"
|
||||
base64 = "0.22.1"
|
||||
brotli = "8.0.2"
|
||||
bytes = { version = "1.10.1", features = ["serde"] }
|
||||
bytesize = "2.1.0"
|
||||
byteorder = "1.5.0"
|
||||
cfg-if = "1.0.4"
|
||||
convert_case = "0.8.0"
|
||||
crc-fast = "1.3.0"
|
||||
chacha20poly1305 = { version = "0.10.1" }
|
||||
chrono = { version = "0.4.42", features = ["serde"] }
|
||||
clap = { version = "4.5.49", features = ["derive", "env"] }
|
||||
const-str = { version = "0.7.0", features = ["std", "proc"] }
|
||||
crc32fast = "1.5.0"
|
||||
crc32c = "0.6.8"
|
||||
crc64fast-nvme = "1.2.0"
|
||||
criterion = { version = "0.7", features = ["html_reports"] }
|
||||
crossbeam-queue = "0.3.12"
|
||||
datafusion = "50.2.0"
|
||||
derive_builder = "0.20.2"
|
||||
enumset = "1.1.10"
|
||||
flatbuffers = "25.9.23"
|
||||
flate2 = "1.1.4"
|
||||
flexi_logger = { version = "0.31.7", features = ["trc", "dont_minimize_extra_stacks", "compress", "kv"] }
|
||||
form_urlencoded = "1.2.2"
|
||||
axum = "0.8.7"
|
||||
axum-extra = "0.12.2"
|
||||
axum-server = { version = "0.8.0", features = ["tls-rustls-no-provider"], default-features = false }
|
||||
futures = "0.3.31"
|
||||
futures-core = "0.3.31"
|
||||
futures-util = "0.3.31"
|
||||
hyper = { version = "1.8.1", features = ["http2", "http1", "server"] }
|
||||
hyper-rustls = { version = "0.27.7", default-features = false, features = ["native-tokio", "http1", "tls12", "logging", "http2", "ring", "webpki-roots"] }
|
||||
hyper-util = { version = "0.1.19", features = ["tokio", "server-auto", "server-graceful"] }
|
||||
http = "1.4.0"
|
||||
http-body = "1.0.1"
|
||||
reqwest = { version = "0.12.25", default-features = false, features = ["rustls-tls-webpki-roots", "charset", "http2", "system-proxy", "stream", "json", "blocking"] }
|
||||
socket2 = "0.6.1"
|
||||
tokio = { version = "1.48.0", features = ["fs", "rt-multi-thread"] }
|
||||
tokio-rustls = { version = "0.26.4", default-features = false, features = ["logging", "tls12", "ring"] }
|
||||
tokio-stream = { version = "0.1.17" }
|
||||
tokio-test = "0.4.4"
|
||||
tokio-util = { version = "0.7.17", features = ["io", "compat"] }
|
||||
tonic = { version = "0.14.2", features = ["gzip"] }
|
||||
tonic-prost = { version = "0.14.2" }
|
||||
tonic-prost-build = { version = "0.14.2" }
|
||||
tower = { version = "0.5.2", features = ["timeout"] }
|
||||
tower-http = { version = "0.6.8", features = ["cors"] }
|
||||
|
||||
# Serialization and Data Formats
|
||||
bytes = { version = "1.11.0", features = ["serde"] }
|
||||
bytesize = "2.3.1"
|
||||
byteorder = "1.5.0"
|
||||
flatbuffers = "25.9.23"
|
||||
form_urlencoded = "1.2.2"
|
||||
prost = "0.14.1"
|
||||
quick-xml = "0.38.4"
|
||||
rmcp = { version = "0.10.0" }
|
||||
rmp = { version = "0.8.14" }
|
||||
rmp-serde = { version = "1.3.0" }
|
||||
serde = { version = "1.0.228", features = ["derive"] }
|
||||
serde_json = { version = "1.0.145", features = ["raw_value"] }
|
||||
serde_urlencoded = "0.7.1"
|
||||
schemars = "1.1.0"
|
||||
|
||||
# Cryptography and Security
|
||||
aes-gcm = { version = "0.11.0-rc.2", features = ["rand_core"] }
|
||||
argon2 = { version = "0.6.0-rc.3", features = ["std"] }
|
||||
blake3 = { version = "1.8.2", features = ["rayon", "mmap"] }
|
||||
chacha20poly1305 = { version = "0.11.0-rc.2" }
|
||||
crc-fast = "1.6.0"
|
||||
hmac = { version = "0.13.0-rc.3" }
|
||||
jsonwebtoken = { version = "10.2.0", features = ["rust_crypto"] }
|
||||
pbkdf2 = "0.13.0-rc.3"
|
||||
rsa = { version = "0.10.0-rc.10" }
|
||||
rustls = { version = "0.23.35", features = ["ring", "logging", "std", "tls12"], default-features = false }
|
||||
rustls-pemfile = "2.2.0"
|
||||
rustls-pki-types = "1.13.1"
|
||||
sha1 = "0.11.0-rc.3"
|
||||
sha2 = "0.11.0-rc.3"
|
||||
subtle = "2.6"
|
||||
zeroize = { version = "1.8.2", features = ["derive"] }
|
||||
|
||||
# Time and Date
|
||||
chrono = { version = "0.4.42", features = ["serde"] }
|
||||
humantime = "2.3.0"
|
||||
time = { version = "0.3.44", features = ["std", "parsing", "formatting", "macros", "serde"] }
|
||||
|
||||
# Utilities and Tools
|
||||
anyhow = "1.0.100"
|
||||
arc-swap = "1.7.1"
|
||||
astral-tokio-tar = "0.5.6"
|
||||
atoi = "2.0.0"
|
||||
atomic_enum = "0.3.0"
|
||||
aws-config = { version = "1.8.11" }
|
||||
aws-credential-types = { version = "1.2.10" }
|
||||
aws-sdk-s3 = { version = "1.116.0", default-features = false, features = ["sigv4a", "rustls", "rt-tokio"] }
|
||||
aws-smithy-types = { version = "1.3.4" }
|
||||
base64 = "0.22.1"
|
||||
base64-simd = "0.8.0"
|
||||
brotli = "8.0.2"
|
||||
cfg-if = "1.0.4"
|
||||
clap = { version = "4.5.53", features = ["derive", "env"] }
|
||||
const-str = { version = "0.7.0", features = ["std", "proc"] }
|
||||
convert_case = "0.10.0"
|
||||
criterion = { version = "0.8", features = ["html_reports"] }
|
||||
crossbeam-queue = "0.3.12"
|
||||
datafusion = "51.0.0"
|
||||
derive_builder = "0.20.2"
|
||||
enumset = "1.1.10"
|
||||
faster-hex = "0.10.0"
|
||||
flate2 = "1.1.5"
|
||||
flexi_logger = { version = "0.31.7", features = ["trc", "dont_minimize_extra_stacks", "compress", "kv", "json"] }
|
||||
glob = "0.3.3"
|
||||
hashbrown = { version = "0.16.0", features = ["serde", "rayon"] }
|
||||
google-cloud-storage = "1.4.0"
|
||||
google-cloud-auth = "1.2.0"
|
||||
hashbrown = { version = "0.16.1", features = ["serde", "rayon"] }
|
||||
heed = { version = "0.22.0" }
|
||||
hex-simd = "0.8.0"
|
||||
highway = { version = "1.3.0" }
|
||||
hickory-resolver = { version = "0.25.2", features = ["tls-ring"] }
|
||||
hmac = "0.12.1"
|
||||
hyper = "1.7.0"
|
||||
hyper-util = { version = "0.1.17", features = [
|
||||
"tokio",
|
||||
"server-auto",
|
||||
"server-graceful",
|
||||
] }
|
||||
hyper-rustls = { version = "0.27.7", default-features = false, features = ["native-tokio", "http1", "tls12", "logging", "http2", "ring", "webpki-roots"] }
|
||||
http = "1.3.1"
|
||||
http-body = "1.0.1"
|
||||
humantime = "2.3.0"
|
||||
ipnetwork = { version = "0.21.1", features = ["serde"] }
|
||||
jsonwebtoken = { version = "10.0.0", features = ["rust_crypto"] }
|
||||
lazy_static = "1.5.0"
|
||||
libc = "0.2.177"
|
||||
libsystemd = { version = "0.7.2" }
|
||||
local-ip-address = "0.6.5"
|
||||
libc = "0.2.178"
|
||||
libsystemd = "0.7.2"
|
||||
local-ip-address = "0.6.6"
|
||||
lz4 = "1.28.1"
|
||||
matchit = "0.8.4"
|
||||
md-5 = "0.10.6"
|
||||
matchit = "0.9.0"
|
||||
md-5 = "0.11.0-rc.3"
|
||||
md5 = "0.8.0"
|
||||
mime_guess = "2.0.5"
|
||||
moka = { version = "0.12.11", features = ["future"] }
|
||||
@@ -169,129 +209,75 @@ nu-ansi-term = "0.50.3"
|
||||
num_cpus = { version = "1.17.0" }
|
||||
nvml-wrapper = "0.11.0"
|
||||
object_store = "0.12.4"
|
||||
once_cell = "1.21.3"
|
||||
opentelemetry = { version = "0.31.0" }
|
||||
opentelemetry-appender-tracing = { version = "0.31.1", features = [
|
||||
"experimental_use_tracing_span_context",
|
||||
"experimental_metadata_attributes",
|
||||
"spec_unstable_logs_enabled"
|
||||
] }
|
||||
opentelemetry_sdk = { version = "0.31.0" }
|
||||
opentelemetry-stdout = { version = "0.31.0" }
|
||||
opentelemetry-otlp = { version = "0.31.0", default-features = false, features = [
|
||||
"grpc-tonic", "gzip-tonic", "trace", "metrics", "logs", "internal-logs"
|
||||
] }
|
||||
opentelemetry-semantic-conventions = { version = "0.31.0", features = [
|
||||
"semconv_experimental",
|
||||
] }
|
||||
parking_lot = "0.12.5"
|
||||
path-absolutize = "3.1.1"
|
||||
path-clean = "1.0.1"
|
||||
blake3 = { version = "1.8.2" }
|
||||
pbkdf2 = "0.12.2"
|
||||
pin-project-lite = "0.2.16"
|
||||
prost = "0.14.1"
|
||||
pretty_assertions = "1.4.1"
|
||||
quick-xml = "0.38.3"
|
||||
rand = "0.9.2"
|
||||
rand = { version = "0.10.0-rc.5", features = ["serde"] }
|
||||
rayon = "1.11.0"
|
||||
reed-solomon-simd = { version = "3.1.0" }
|
||||
regex = { version = "1.12.2" }
|
||||
reqwest = { version = "0.12.24", default-features = false, features = [
|
||||
"rustls-tls-webpki-roots",
|
||||
"charset",
|
||||
"http2",
|
||||
"system-proxy",
|
||||
"stream",
|
||||
"json",
|
||||
"blocking",
|
||||
] }
|
||||
rmcp = { version = "0.8.1" }
|
||||
rmp = { version = "0.8.14" }
|
||||
rmp-serde = { version = "1.3.0" }
|
||||
rsa = { version = "0.9.8" }
|
||||
rumqttc = { version = "0.25.0" }
|
||||
rust-embed = { version = "8.7.2" }
|
||||
rumqttc = { version = "0.25.1" }
|
||||
rust-embed = { version = "8.9.0" }
|
||||
rustc-hash = { version = "2.1.1" }
|
||||
rustls = { version = "0.23.32", features = ["ring", "logging", "std", "tls12"], default-features = false }
|
||||
rustls-pki-types = "1.12.0"
|
||||
rustls-pemfile = "2.2.0"
|
||||
s3s = { version = "0.12.0-rc.3", features = ["minio"] }
|
||||
schemars = "1.0.4"
|
||||
serde = { version = "1.0.228", features = ["derive"] }
|
||||
serde_json = { version = "1.0.145", features = ["raw_value"] }
|
||||
serde_urlencoded = "0.7.1"
|
||||
s3s = { version = "0.12.0-rc.4", features = ["minio"] }
|
||||
serial_test = "3.2.0"
|
||||
sha1 = "0.10.6"
|
||||
sha2 = "0.10.9"
|
||||
shadow-rs = { version = "1.4.0", default-features = false }
|
||||
siphasher = "1.0.1"
|
||||
smallvec = { version = "1.15.1", features = ["serde"] }
|
||||
smartstring = "1.0.1"
|
||||
snafu = "0.8.9"
|
||||
snap = "1.1.1"
|
||||
socket2 = "0.6.1"
|
||||
starshard = { version = "0.5.0", features = ["rayon", "async", "serde"] }
|
||||
starshard = { version = "0.6.0", features = ["rayon", "async", "serde"] }
|
||||
strum = { version = "0.27.2", features = ["derive"] }
|
||||
sysinfo = "0.37.1"
|
||||
sysctl = "0.7.1"
|
||||
tempfile = "3.23.0"
|
||||
sysinfo = "0.37.2"
|
||||
temp-env = "0.3.6"
|
||||
tempfile = "3.23.0"
|
||||
test-case = "3.3.1"
|
||||
thiserror = "2.0.17"
|
||||
time = { version = "0.3.44", features = [
|
||||
"std",
|
||||
"parsing",
|
||||
"formatting",
|
||||
"macros",
|
||||
"serde",
|
||||
] }
|
||||
tokio = { version = "1.48.0", features = ["fs", "rt-multi-thread"] }
|
||||
tokio-rustls = { version = "0.26.4", default-features = false, features = ["logging", "tls12", "ring"] }
|
||||
tokio-stream = { version = "0.1.17" }
|
||||
tokio-tar = "0.3.1"
|
||||
tokio-test = "0.4.4"
|
||||
tokio-util = { version = "0.7.16", features = ["io", "compat"] }
|
||||
tonic = { version = "0.14.2", features = ["gzip"] }
|
||||
tonic-prost = { version = "0.14.2" }
|
||||
tonic-prost-build = { version = "0.14.2" }
|
||||
tower = { version = "0.5.2", features = ["timeout"] }
|
||||
tower-http = { version = "0.6.6", features = ["cors"] }
|
||||
tracing = { version = "0.1.41" }
|
||||
tracing-core = "0.1.34"
|
||||
tracing = { version = "0.1.43" }
|
||||
tracing-appender = "0.2.4"
|
||||
tracing-error = "0.2.1"
|
||||
tracing-opentelemetry = "0.32.0"
|
||||
tracing-subscriber = { version = "0.3.20", features = ["env-filter", "time"] }
|
||||
tracing-subscriber = { version = "0.3.22", features = ["env-filter", "time"] }
|
||||
transform-stream = "0.3.1"
|
||||
url = "2.5.7"
|
||||
urlencoding = "2.1.3"
|
||||
uuid = { version = "1.18.1", features = [
|
||||
"v4",
|
||||
"fast-rng",
|
||||
"macro-diagnostics",
|
||||
] }
|
||||
uuid = { version = "1.19.0", features = ["v4", "fast-rng", "macro-diagnostics"] }
|
||||
vaultrs = { version = "0.7.4" }
|
||||
walkdir = "2.5.0"
|
||||
wildmatch = { version = "2.5.0", features = ["serde"] }
|
||||
zeroize = { version = "1.8.2", features = ["derive"] }
|
||||
wildmatch = { version = "2.6.1", features = ["serde"] }
|
||||
winapi = { version = "0.3.9" }
|
||||
xxhash-rust = { version = "0.8.15", features = ["xxh64", "xxh3"] }
|
||||
zip = "6.0.0"
|
||||
zstd = "0.13.3"
|
||||
|
||||
# Observability and Metrics
|
||||
metrics = "0.24.3"
|
||||
opentelemetry = { version = "0.31.0" }
|
||||
opentelemetry-appender-tracing = { version = "0.31.1", features = ["experimental_use_tracing_span_context", "experimental_metadata_attributes", "spec_unstable_logs_enabled"] }
|
||||
opentelemetry-otlp = { version = "0.31.0", features = ["gzip-http", "reqwest-rustls"] }
|
||||
opentelemetry_sdk = { version = "0.31.0" }
|
||||
opentelemetry-semantic-conventions = { version = "0.31.0", features = ["semconv_experimental"] }
|
||||
opentelemetry-stdout = { version = "0.31.0" }
|
||||
|
||||
# Performance Analysis and Memory Profiling
|
||||
mimalloc = "0.1"
|
||||
# Use tikv-jemallocator as memory allocator and enable performance analysis
|
||||
tikv-jemallocator = { version = "0.6", features = ["profiling", "stats", "unprefixed_malloc_on_supported_platforms", "background_threads"] }
|
||||
# Used to control and obtain statistics for jemalloc at runtime
|
||||
tikv-jemalloc-ctl = { version = "0.6", features = ["use_std", "stats", "profiling"] }
|
||||
# Used to generate pprof-compatible memory profiling data and support symbolization and flame graphs
|
||||
jemalloc_pprof = { version = "0.8.1", features = ["symbolize", "flamegraph"] }
|
||||
# Used to generate CPU performance analysis data and flame diagrams
|
||||
pprof = { version = "0.15.0", features = ["flamegraph", "protobuf-codec"] }
|
||||
|
||||
|
||||
|
||||
[workspace.metadata.cargo-shear]
|
||||
ignored = ["rustfs", "rust-i18n", "rustfs-mcp", "tokio-test", "rustfs-audit"]
|
||||
|
||||
[profile.wasm-dev]
|
||||
inherits = "dev"
|
||||
opt-level = 1
|
||||
|
||||
[profile.server-dev]
|
||||
inherits = "dev"
|
||||
|
||||
[profile.android-dev]
|
||||
inherits = "dev"
|
||||
ignored = ["rustfs", "rustfs-mcp", "tokio-test"]
|
||||
|
||||
[profile.release]
|
||||
opt-level = 3
|
||||
|
||||
14
Dockerfile
14
Dockerfile
@@ -64,8 +64,12 @@ COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=build /build/rustfs /usr/bin/rustfs
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
|
||||
RUN chmod +x /usr/bin/rustfs /entrypoint.sh && \
|
||||
RUN chmod +x /usr/bin/rustfs /entrypoint.sh
|
||||
|
||||
RUN addgroup -g 10001 -S rustfs && \
|
||||
adduser -u 10001 -G rustfs -S rustfs -D && \
|
||||
mkdir -p /data /logs && \
|
||||
chown -R rustfs:rustfs /data /logs && \
|
||||
chmod 0750 /data /logs
|
||||
|
||||
ENV RUSTFS_ADDRESS=":9000" \
|
||||
@@ -78,12 +82,14 @@ ENV RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_CONSOLE_CORS_ALLOWED_ORIGINS="*" \
|
||||
RUSTFS_VOLUMES="/data" \
|
||||
RUST_LOG="warn" \
|
||||
RUSTFS_OBS_LOG_DIRECTORY="/logs" \
|
||||
RUSTFS_SINKS_FILE_PATH="/logs"
|
||||
|
||||
RUSTFS_OBS_LOG_DIRECTORY="/logs"
|
||||
|
||||
EXPOSE 9000 9001
|
||||
|
||||
VOLUME ["/data", "/logs"]
|
||||
|
||||
USER rustfs
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
|
||||
CMD ["rustfs"]
|
||||
|
||||
@@ -167,7 +167,6 @@ ENV RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_VOLUMES="/data" \
|
||||
RUST_LOG="warn" \
|
||||
RUSTFS_OBS_LOG_DIRECTORY="/logs" \
|
||||
RUSTFS_SINKS_FILE_PATH="/logs" \
|
||||
RUSTFS_USERNAME="rustfs" \
|
||||
RUSTFS_GROUPNAME="rustfs" \
|
||||
RUSTFS_UID="1000" \
|
||||
|
||||
230
README.md
230
README.md
@@ -1,6 +1,6 @@
|
||||
[](https://rustfs.com)
|
||||
|
||||
<p align="center">RustFS is a high-performance distributed object storage software built using Rust</p>
|
||||
<p align="center">RustFS is a high-performance, distributed object storage system built in Rust.</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/rustfs/rustfs/actions/workflows/ci.yml"><img alt="CI" src="https://github.com/rustfs/rustfs/actions/workflows/ci.yml/badge.svg" /></a>
|
||||
@@ -11,7 +11,7 @@
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://docs.rustfs.com/introduction.html">Getting Started</a>
|
||||
<a href="https://docs.rustfs.com/installation/">Getting Started</a>
|
||||
· <a href="https://docs.rustfs.com/">Docs</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/issues">Bug reports</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/discussions">Discussions</a>
|
||||
@@ -19,181 +19,195 @@
|
||||
|
||||
<p align="center">
|
||||
English | <a href="https://github.com/rustfs/rustfs/blob/main/README_ZH.md">简体中文</a> |
|
||||
<!-- Keep these links. Translations will automatically update with the README. -->
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=de">Deutsch</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=es">Español</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=fr">français</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ja">日本語</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ko">한국어</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=pt">Português</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=pt">Portuguese</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ru">Русский</a>
|
||||
</p>
|
||||
|
||||
RustFS is a high-performance distributed object storage software built using Rust, one of the most popular languages
|
||||
worldwide. Along with MinIO, it shares a range of advantages such as simplicity, S3 compatibility, open-source nature,
|
||||
support for data lakes, AI, and big data. Furthermore, it has a better and more user-friendly open-source license in
|
||||
comparison to other storage systems, being constructed under the Apache license. As Rust serves as its foundation,
|
||||
RustFS provides faster speed and safer distributed features for high-performance object storage.
|
||||
RustFS is a high-performance, distributed object storage system built in Rust—one of the most loved programming languages worldwide. RustFS combines the simplicity of MinIO with the memory safety and raw performance of Rust. It offers full S3 compatibility, is completely open-source, and is optimized for data lakes, AI, and big data workloads.
|
||||
|
||||
> ⚠️ **RustFS is under rapid development. Do NOT use in production environments!**
|
||||
Unlike other storage systems, RustFS is released under the permissible Apache 2.0 license, avoiding the restrictions of AGPL. With Rust as its foundation, RustFS delivers superior speed and secure distributed features for next-generation object storage.
|
||||
|
||||
## Features
|
||||
## Feature & Status
|
||||
|
||||
- **High Performance**: Built with Rust, ensuring speed and efficiency.
|
||||
- **Distributed Architecture**: Scalable and fault-tolerant design for large-scale deployments.
|
||||
- **S3 Compatibility**: Seamless integration with existing S3-compatible applications.
|
||||
- **Data Lake Support**: Optimized for big data and AI workloads.
|
||||
- **Open Source**: Licensed under Apache 2.0, encouraging community contributions and transparency.
|
||||
- **User-Friendly**: Designed with simplicity in mind, making it easy to deploy and manage.
|
||||
- **High Performance**: Built with Rust to ensure maximum speed and resource efficiency.
|
||||
- **Distributed Architecture**: Scalable and fault-tolerant design suitable for large-scale deployments.
|
||||
- **S3 Compatibility**: Seamless integration with existing S3-compatible applications and tools.
|
||||
- **Data Lake Support**: Optimized for high-throughput big data and AI workloads.
|
||||
- **Open Source**: Licensed under Apache 2.0, encouraging unrestricted community contributions and commercial usage.
|
||||
- **User-Friendly**: Designed with simplicity in mind for easy deployment and management.
|
||||
|
||||
## RustFS vs MinIO
|
||||
| Feature | Status | Feature | Status |
|
||||
| :--- | :--- | :--- | :--- |
|
||||
| **S3 Core Features** | ✅ Available | **Bitrot Protection** | ✅ Available |
|
||||
| **Upload / Download** | ✅ Available | **Single Node Mode** | ✅ Available |
|
||||
| **Versioning** | ✅ Available | **Bucket Replication** | ⚠️ Partial Support |
|
||||
| **Logging** | ✅ Available | **Lifecycle Management** | 🚧 Under Testing |
|
||||
| **Event Notifications** | ✅ Available | **Distributed Mode** | 🚧 Under Testing |
|
||||
| **K8s Helm Charts** | ✅ Available | **OPA (Open Policy Agent)** | 🚧 Under Testing |
|
||||
|
||||
Stress test server parameters
|
||||
|
||||
| Type | parameter | Remark |
|
||||
|
||||
|
||||
## RustFS vs MinIO Performance
|
||||
|
||||
**Stress Test Environment:**
|
||||
|
||||
| Type | Parameter | Remark |
|
||||
|---------|-----------|----------------------------------------------------------|
|
||||
| CPU | 2 Core | Intel Xeon(Sapphire Rapids) Platinum 8475B , 2.7/3.2 GHz | |
|
||||
| Memory | 4GB | |
|
||||
| Network | 15Gbp | |
|
||||
| Driver | 40GB x 4 | IOPS 3800 / Driver |
|
||||
| CPU | 2 Core | Intel Xeon (Sapphire Rapids) Platinum 8475B, 2.7/3.2 GHz |
|
||||
| Memory | 4GB | |
|
||||
| Network | 15Gbps | |
|
||||
| Drive | 40GB x 4 | IOPS 3800 / Drive |
|
||||
|
||||
<https://github.com/user-attachments/assets/2e4979b5-260c-4f2c-ac12-c87fd558072a>
|
||||
|
||||
### RustFS vs Other object storage
|
||||
### RustFS vs Other Object Storage
|
||||
|
||||
| RustFS | Other object storage |
|
||||
|---------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------|
|
||||
| Powerful Console | Simple and useless Console |
|
||||
| Developed based on Rust language, memory is safer | Developed in Go or C, with potential issues like memory GC/leaks |
|
||||
| Does not report logs to third-party countries | Reporting logs to other third countries may violate national security laws |
|
||||
| Licensed under Apache, more business-friendly | AGPL V3 License and other License, polluted open source and License traps, infringement of intellectual property rights |
|
||||
| Comprehensive S3 support, works with domestic and international cloud providers | Full support for S3, but no local cloud vendor support |
|
||||
| Rust-based development, strong support for secure and innovative devices | Poor support for edge gateways and secure innovative devices |
|
||||
| Stable commercial prices, free community support | High pricing, with costs up to $250,000 for 1PiB |
|
||||
| No risk | Intellectual property risks and risks of prohibited uses |
|
||||
| Feature | RustFS | Other Object Storage |
|
||||
| :--- | :--- | :--- |
|
||||
| **Console Experience** | **Powerful Console**<br>Comprehensive management interface. | **Basic / Limited Console**<br>Often overly simple or lacking critical features. |
|
||||
| **Language & Safety** | **Rust-based**<br>Memory safety by design. | **Go or C-based**<br>Potential for memory GC pauses or leaks. |
|
||||
| **Data Sovereignty** | **No Telemetry / Full Compliance**<br>Guards against unauthorized cross-border data egress. Compliant with GDPR (EU/UK), CCPA (US), and APPI (Japan). | **Potential Risk**<br>Possible legal exposure and unwanted data telemetry. |
|
||||
| **Licensing** | **Permissive Apache 2.0**<br>Business-friendly, no "poison pill" clauses. | **Restrictive AGPL v3**<br>Risk of license traps and intellectual property pollution. |
|
||||
| **Compatibility** | **100% S3 Compatible**<br>Works with any cloud provider or client, anywhere. | **Variable Compatibility**<br>May lack support for local cloud vendors or specific APIs. |
|
||||
| **Edge & IoT** | **Strong Edge Support**<br>Ideal for secure, innovative edge devices. | **Weak Edge Support**<br>Often too heavy for edge gateways. |
|
||||
| **Risk Profile** | **Enterprise Risk Mitigation**<br>Clear IP rights and safe for commercial use. | **Legal Risks**<br>Intellectual property ambiguity and usage restrictions. |
|
||||
|
||||
## Quickstart
|
||||
|
||||
To get started with RustFS, follow these steps:
|
||||
|
||||
1. **One-click installation script (Option 1)**
|
||||
### 1. One-click Installation (Option 1)
|
||||
|
||||
```bash
|
||||
curl -O https://rustfs.com/install_rustfs.sh && bash install_rustfs.sh
|
||||
```
|
||||
curl -O https://rustfs.com/install_rustfs.sh && bash install_rustfs.sh
|
||||
````
|
||||
|
||||
2. **Docker Quick Start (Option 2)**
|
||||
### 2\. Docker Quick Start (Option 2)
|
||||
|
||||
```bash
|
||||
# create data and logs directories
|
||||
mkdir -p data logs
|
||||
The RustFS container runs as a non-root user `rustfs` (UID `10001`). If you run Docker with `-v` to mount a host directory, please ensure the host directory owner is set to `10001`, otherwise you will encounter permission denied errors.
|
||||
|
||||
# using latest alpha version
|
||||
docker run -d -p 9000:9000 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:alpha
|
||||
```bash
|
||||
# Create data and logs directories
|
||||
mkdir -p data logs
|
||||
|
||||
# Specific version
|
||||
docker run -d -p 9000:9000 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:1.0.0.alpha.45
|
||||
```
|
||||
# Change the owner of these directories
|
||||
chown -R 10001:10001 data logs
|
||||
|
||||
For docker installation, you can also run the container with docker compose. With the `docker-compose.yml` file under
|
||||
root directory, running the command:
|
||||
# Using latest version
|
||||
docker run -d -p 9000:9000 -p 9001:9001 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:latest
|
||||
|
||||
```
|
||||
docker compose --profile observability up -d
|
||||
```
|
||||
# Using specific version
|
||||
docker run -d -p 9000:9000 -p 9001:9001 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:1.0.0.alpha.68
|
||||
```
|
||||
|
||||
**NOTE**: You should be better to have a look for `docker-compose.yaml` file. Because, several services contains in the
|
||||
file. Grafan,prometheus,jaeger containers will be launched using docker compose file, which is helpful for rustfs
|
||||
observability. If you want to start redis as well as nginx container, you can specify the corresponding profiles.
|
||||
You can also use Docker Compose. Using the `docker-compose.yml` file in the root directory:
|
||||
|
||||
3. **Build from Source (Option 3) - Advanced Users**
|
||||
```bash
|
||||
docker compose --profile observability up -d
|
||||
```
|
||||
|
||||
For developers who want to build RustFS Docker images from source with multi-architecture support:
|
||||
**NOTE**: We recommend reviewing the `docker-compose.yaml` file before running. It defines several services including Grafana, Prometheus, and Jaeger, which are helpful for RustFS observability. If you wish to start Redis or Nginx containers, you can specify the corresponding profiles.
|
||||
|
||||
```bash
|
||||
# Build multi-architecture images locally
|
||||
./docker-buildx.sh --build-arg RELEASE=latest
|
||||
### 3\. Build from Source (Option 3) - Advanced Users
|
||||
|
||||
# Build and push to registry
|
||||
./docker-buildx.sh --push
|
||||
For developers who want to build RustFS Docker images from source with multi-architecture support:
|
||||
|
||||
# Build specific version
|
||||
./docker-buildx.sh --release v1.0.0 --push
|
||||
```bash
|
||||
# Build multi-architecture images locally
|
||||
./docker-buildx.sh --build-arg RELEASE=latest
|
||||
|
||||
# Build for custom registry
|
||||
./docker-buildx.sh --registry your-registry.com --namespace yourname --push
|
||||
```
|
||||
# Build and push to registry
|
||||
./docker-buildx.sh --push
|
||||
|
||||
The `docker-buildx.sh` script supports:
|
||||
- **Multi-architecture builds**: `linux/amd64`, `linux/arm64`
|
||||
- **Automatic version detection**: Uses git tags or commit hashes
|
||||
- **Registry flexibility**: Supports Docker Hub, GitHub Container Registry, etc.
|
||||
- **Build optimization**: Includes caching and parallel builds
|
||||
# Build specific version
|
||||
./docker-buildx.sh --release v1.0.0 --push
|
||||
|
||||
You can also use Make targets for convenience:
|
||||
# Build for custom registry
|
||||
./docker-buildx.sh --registry your-registry.com --namespace yourname --push
|
||||
```
|
||||
|
||||
```bash
|
||||
make docker-buildx # Build locally
|
||||
make docker-buildx-push # Build and push
|
||||
make docker-buildx-version VERSION=v1.0.0 # Build specific version
|
||||
make help-docker # Show all Docker-related commands
|
||||
```
|
||||
The `docker-buildx.sh` script supports:
|
||||
\- **Multi-architecture builds**: `linux/amd64`, `linux/arm64`
|
||||
\- **Automatic version detection**: Uses git tags or commit hashes
|
||||
\- **Registry flexibility**: Supports Docker Hub, GitHub Container Registry, etc.
|
||||
\- **Build optimization**: Includes caching and parallel builds
|
||||
|
||||
4. **Access the Console**: Open your web browser and navigate to `http://localhost:9000` to access the RustFS console,
|
||||
default username and password is `rustfsadmin` .
|
||||
5. **Create a Bucket**: Use the console to create a new bucket for your objects.
|
||||
6. **Upload Objects**: You can upload files directly through the console or use S3-compatible APIs to interact with your
|
||||
RustFS instance.
|
||||
You can also use Make targets for convenience:
|
||||
|
||||
**NOTE**: If you want to access RustFS instance with `https`, you can refer
|
||||
to [TLS configuration docs](https://docs.rustfs.com/integration/tls-configured.html).
|
||||
```bash
|
||||
make docker-buildx # Build locally
|
||||
make docker-buildx-push # Build and push
|
||||
make docker-buildx-version VERSION=v1.0.0 # Build specific version
|
||||
make help-docker # Show all Docker-related commands
|
||||
```
|
||||
|
||||
> **Heads-up (macOS cross-compilation)**: macOS keeps the default `ulimit -n` at 256, so `cargo zigbuild` or `./build-rustfs.sh --platform ...` may fail with `ProcessFdQuotaExceeded` when targeting Linux. The build script attempts to raise the limit automatically, but if you still see the warning, run `ulimit -n 4096` (or higher) in your shell before building.
|
||||
|
||||
### 4\. Build with Helm Chart (Option 4) - Cloud Native
|
||||
|
||||
Follow the instructions in the [Helm Chart README](https://charts.rustfs.com/) to install RustFS on a Kubernetes cluster.
|
||||
|
||||
-----
|
||||
|
||||
### Accessing RustFS
|
||||
|
||||
5. **Access the Console**: Open your web browser and navigate to `http://localhost:9000` to access the RustFS console.
|
||||
* Default credentials: `rustfsadmin` / `rustfsadmin`
|
||||
6. **Create a Bucket**: Use the console to create a new bucket for your objects.
|
||||
7. **Upload Objects**: You can upload files directly through the console or use S3-compatible APIs/clients to interact with your RustFS instance.
|
||||
|
||||
**NOTE**: To access the RustFS instance via `https`, please refer to the [TLS Configuration Docs](https://docs.rustfs.com/integration/tls-configured.html).
|
||||
|
||||
## Documentation
|
||||
|
||||
For detailed documentation, including configuration options, API references, and advanced usage, please visit
|
||||
our [Documentation](https://docs.rustfs.com).
|
||||
For detailed documentation, including configuration options, API references, and advanced usage, please visit our [Documentation](https://docs.rustfs.com).
|
||||
|
||||
## Getting Help
|
||||
|
||||
If you have any questions or need assistance, you can:
|
||||
If you have any questions or need assistance:
|
||||
|
||||
- Check the [FAQ](https://github.com/rustfs/rustfs/discussions/categories/q-a) for common issues and solutions.
|
||||
- Join our [GitHub Discussions](https://github.com/rustfs/rustfs/discussions) to ask questions and share your
|
||||
experiences.
|
||||
- Open an issue on our [GitHub Issues](https://github.com/rustfs/rustfs/issues) page for bug reports or feature
|
||||
requests.
|
||||
- Check the [FAQ](https://github.com/rustfs/rustfs/discussions/categories/q-a) for common issues and solutions.
|
||||
- Join our [GitHub Discussions](https://github.com/rustfs/rustfs/discussions) to ask questions and share your experiences.
|
||||
- Open an issue on our [GitHub Issues](https://github.com/rustfs/rustfs/issues) page for bug reports or feature requests.
|
||||
|
||||
## Links
|
||||
|
||||
- [Documentation](https://docs.rustfs.com) - The manual you should read
|
||||
- [Changelog](https://github.com/rustfs/rustfs/releases) - What we broke and fixed
|
||||
- [GitHub Discussions](https://github.com/rustfs/rustfs/discussions) - Where the community lives
|
||||
- [Documentation](https://docs.rustfs.com) - The manual you should read
|
||||
- [Changelog](https://github.com/rustfs/rustfs/releases) - What we broke and fixed
|
||||
- [GitHub Discussions](https://github.com/rustfs/rustfs/discussions) - Where the community lives
|
||||
|
||||
## Contact
|
||||
|
||||
- **Bugs**: [GitHub Issues](https://github.com/rustfs/rustfs/issues)
|
||||
- **Business**: <hello@rustfs.com>
|
||||
- **Jobs**: <jobs@rustfs.com>
|
||||
- **General Discussion**: [GitHub Discussions](https://github.com/rustfs/rustfs/discussions)
|
||||
- **Contributing**: [CONTRIBUTING.md](CONTRIBUTING.md)
|
||||
- **Bugs**: [GitHub Issues](https://github.com/rustfs/rustfs/issues)
|
||||
- **Business**: [hello@rustfs.com](mailto:hello@rustfs.com)
|
||||
- **Jobs**: [jobs@rustfs.com](mailto:jobs@rustfs.com)
|
||||
- **General Discussion**: [GitHub Discussions](https://github.com/rustfs/rustfs/discussions)
|
||||
- **Contributing**: [CONTRIBUTING.md](CONTRIBUTING.md)
|
||||
|
||||
## Contributors
|
||||
|
||||
RustFS is a community-driven project, and we appreciate all contributions. Check out
|
||||
the [Contributors](https://github.com/rustfs/rustfs/graphs/contributors) page to see the amazing people who have helped
|
||||
make RustFS better.
|
||||
RustFS is a community-driven project, and we appreciate all contributions. Check out the [Contributors](https://github.com/rustfs/rustfs/graphs/contributors) page to see the amazing people who have helped make RustFS better.
|
||||
|
||||
<a href="https://github.com/rustfs/rustfs/graphs/contributors">
|
||||
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" alt="Contributors"/>
|
||||
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" alt="Contributors" />
|
||||
</a>
|
||||
|
||||
## Github Trending Top
|
||||
|
||||
🚀 RustFS is beloved by open-source enthusiasts and enterprise users worldwide, often appearing on the GitHub Trending
|
||||
top charts.
|
||||
🚀 RustFS is beloved by open-source enthusiasts and enterprise users worldwide, often appearing on the GitHub Trending top charts.
|
||||
|
||||
<a href="https://trendshift.io/repositories/14181" target="_blank"><img src="https://raw.githubusercontent.com/rustfs/rustfs/refs/heads/main/docs/rustfs-trending.jpg" alt="rustfs%2Frustfs | Trendshift" /></a>
|
||||
|
||||
## Star History
|
||||
|
||||
[](https://www.star-history.com/#rustfs/rustfs&type=date&legend=top-left)
|
||||
|
||||
## License
|
||||
|
||||
[Apache 2.0](https://opensource.org/licenses/Apache-2.0)
|
||||
|
||||
242
README_ZH.md
242
README_ZH.md
@@ -1,143 +1,219 @@
|
||||
[](https://rustfs.com)
|
||||
|
||||
<p align="center">RustFS 是一个使用 Rust 构建的高性能分布式对象存储软件</p >
|
||||
<p align="center">RustFS 是一个基于 Rust 构建的高性能分布式对象存储系统。</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/rustfs/rustfs/actions/workflows/ci.yml"><img alt="CI" src="https://github.com/rustfs/rustfs/actions/workflows/ci.yml/badge.svg" /></a>
|
||||
<a href="https://github.com/rustfs/rustfs/actions/workflows/docker.yml"><img alt="Build and Push Docker Images" src="https://github.com/rustfs/rustfs/actions/workflows/docker.yml/badge.svg" /></a>
|
||||
<img alt="GitHub commit activity" src="https://img.shields.io/github/commit-activity/m/rustfs/rustfs"/>
|
||||
<img alt="Github Last Commit" src="https://img.shields.io/github/last-commit/rustfs/rustfs"/>
|
||||
<a href="https://github.com/rustfs/rustfs/actions/workflows/docker.yml"><img alt="构建并推送 Docker 镜像" src="https://github.com/rustfs/rustfs/actions/workflows/docker.yml/badge.svg" /></a>
|
||||
<img alt="GitHub 提交活跃度" src="https://img.shields.io/github/commit-activity/m/rustfs/rustfs"/>
|
||||
<img alt="Github 最新提交" src="https://img.shields.io/github/last-commit/rustfs/rustfs"/>
|
||||
<a href="https://hellogithub.com/repository/rustfs/rustfs" target="_blank"><img src="https://abroad.hellogithub.com/v1/widgets/recommend.svg?rid=b95bcb72bdc340b68f16fdf6790b7d5b&claim_uid=MsbvjYeLDKAH457&theme=small" alt="Featured|HelloGitHub" /></a>
|
||||
</p >
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://docs.rustfs.com/zh/introduction.html">快速开始</a >
|
||||
· <a href="https://docs.rustfs.com/zh/">文档</a >
|
||||
· <a href="https://github.com/rustfs/rustfs/issues">问题报告</a >
|
||||
· <a href="https://github.com/rustfs/rustfs/discussions">讨论</a >
|
||||
</p >
|
||||
<a href="https://docs.rustfs.com/installation/">快速开始</a>
|
||||
· <a href="https://docs.rustfs.com/">文档</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/issues">报告 Bug</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/discussions">社区讨论</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/rustfs/rustfs/blob/main/README.md">English</a > | 简体中文
|
||||
</p >
|
||||
<a href="https://github.com/rustfs/rustfs/blob/main/README.md">English</a> | 简体中文 |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=de">Deutsch</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=es">Español</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=fr">français</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ja">日本語</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ko">한국어</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=pt">Portuguese</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ru">Русский</a>
|
||||
</p>
|
||||
|
||||
RustFS 是一个使用 Rust(全球最受欢迎的编程语言之一)构建的高性能分布式对象存储软件。与 MinIO 一样,它具有简单性、S3
|
||||
兼容性、开源特性以及对数据湖、AI 和大数据的支持等一系列优势。此外,与其他存储系统相比,它采用 Apache
|
||||
许可证构建,拥有更好、更用户友好的开源许可证。由于以 Rust 为基础,RustFS 为高性能对象存储提供了更快的速度和更安全的分布式功能。
|
||||
RustFS 是一个基于 Rust 构建的高性能分布式对象存储系统。Rust 是全球最受开发者喜爱的编程语言之一,RustFS 完美结合了 MinIO 的简洁性与 Rust 的内存安全及高性能优势。它提供完整的 S3 兼容性,完全开源,并专为数据湖、人工智能(AI)和大数据负载进行了优化。
|
||||
|
||||
## 特性
|
||||
与其他存储系统不同,RustFS 采用更宽松、商业友好的 Apache 2.0 许可证,避免了 AGPL 协议的限制。以 Rust 为基石,RustFS 为下一代对象存储提供了更快的速度和更安全的分布式特性。
|
||||
|
||||
- **高性能**:使用 Rust 构建,确保速度和效率。
|
||||
## 特征和功能状态
|
||||
|
||||
- **高性能**:基于 Rust 构建,确保极致的速度和资源效率。
|
||||
- **分布式架构**:可扩展且容错的设计,适用于大规模部署。
|
||||
- **S3 兼容性**:与现有 S3 兼容应用程序无缝集成。
|
||||
- **数据湖支持**:针对大数据和 AI 工作负载进行了优化。
|
||||
- **开源**:采用 Apache 2.0 许可证,鼓励社区贡献和透明度。
|
||||
- **用户友好**:设计简单,易于部署和管理。
|
||||
- **S3 兼容性**:与现有的 S3 兼容应用和工具无缝集成。
|
||||
- **数据湖支持**:专为高吞吐量的大数据和 AI 工作负载优化。
|
||||
- **完全开源**:采用 Apache 2.0 许可证,鼓励社区贡献和商业使用。
|
||||
- **简单易用**:设计简洁,易于部署和管理。
|
||||
|
||||
## RustFS vs MinIO
|
||||
|
||||
压力测试服务器参数
|
||||
| 功能 | 状态 | 功能 | 状态 |
|
||||
| :--- | :--- | :--- | :--- |
|
||||
| **S3 核心功能** | ✅ 可用 | **Bitrot (防数据腐烂)** | ✅ 可用 |
|
||||
| **上传 / 下载** | ✅ 可用 | **单机模式** | ✅ 可用 |
|
||||
| **版本控制** | ✅ 可用 | **存储桶复制** | ⚠️ 部分可用 |
|
||||
| **日志功能** | ✅ 可用 | **生命周期管理** | 🚧 测试中 |
|
||||
| **事件通知** | ✅ 可用 | **分布式模式** | 🚧 测试中 |
|
||||
| **K8s Helm Chart** | ✅ 可用 | **OPA (策略引擎)** | 🚧 测试中 |
|
||||
|
||||
| 类型 | 参数 | 备注 |
|
||||
|-----|----------|----------------------------------------------------------|
|
||||
| CPU | 2 核心 | Intel Xeon(Sapphire Rapids) Platinum 8475B , 2.7/3.2 GHz | |
|
||||
| 内存 | 4GB | |
|
||||
| 网络 | 15Gbp | |
|
||||
| 驱动器 | 40GB x 4 | IOPS 3800 / 驱动器 |
|
||||
|
||||
|
||||
|
||||
## RustFS vs MinIO 性能对比
|
||||
|
||||
**压力测试环境参数:**
|
||||
|
||||
| 类型 | 参数 | 备注 |
|
||||
|---------|-----------|----------------------------------------------------------|
|
||||
| CPU | 2 核 | Intel Xeon (Sapphire Rapids) Platinum 8475B , 2.7/3.2 GHz |
|
||||
| 内存 | 4GB | |
|
||||
| 网络 | 15Gbps | |
|
||||
| 硬盘 | 40GB x 4 | IOPS 3800 / Drive |
|
||||
|
||||
<https://github.com/user-attachments/assets/2e4979b5-260c-4f2c-ac12-c87fd558072a>
|
||||
|
||||
### RustFS vs 其他对象存储
|
||||
|
||||
| RustFS | 其他对象存储 |
|
||||
|--------------------------|-------------------------------------|
|
||||
| 强大的控制台 | 简单且无用的控制台 |
|
||||
| 基于 Rust 语言开发,内存更安全 | 使用 Go 或 C 开发,存在内存 GC/泄漏等潜在问题 |
|
||||
| 不向第三方国家报告日志 | 向其他第三方国家报告日志可能违反国家安全法律 |
|
||||
| 采用 Apache 许可证,对商业更友好 | AGPL V3 许可证等其他许可证,污染开源和许可证陷阱,侵犯知识产权 |
|
||||
| 全面的 S3 支持,适用于国内外云提供商 | 完全支持 S3,但不支持本地云厂商 |
|
||||
| 基于 Rust 开发,对安全和创新设备有强大支持 | 对边缘网关和安全创新设备支持较差 |
|
||||
| 稳定的商业价格,免费社区支持 | 高昂的定价,1PiB 成本高达 $250,000 |
|
||||
| 无风险 | 知识产权风险和禁止使用的风险 |
|
||||
| 特性 | RustFS | 其他对象存储 |
|
||||
| :--- | :--- | :--- |
|
||||
| **控制台体验** | **功能强大的控制台**<br>提供全面的管理界面。 | **基础/简陋的控制台**<br>通常功能过于简单或缺失关键特性。 |
|
||||
| **语言与安全** | **基于 Rust 开发**<br>天生的内存安全。 | **基于 Go 或 C 开发**<br>存在内存 GC 停顿或内存泄漏的潜在风险。 |
|
||||
| **数据主权** | **无遥测 / 完全合规**<br>防止未经授权的数据跨境传输。完全符合 GDPR (欧盟/英国)、CCPA (美国) 和 APPI (日本) 等法规。 | **潜在风险**<br>可能存在法律风险和隐蔽的数据遥测(Telemetry)。 |
|
||||
| **开源协议** | **宽松的 Apache 2.0**<br>商业友好,无“毒丸”条款。 | **受限的 AGPL v3**<br>存在许可证陷阱和知识产权污染的风险。 |
|
||||
| **兼容性** | **100% S3 兼容**<br>适用于任何云提供商和客户端,随处运行。 | **兼容性不一**<br>虽然支持 S3,但可能缺乏对本地云厂商或特定 API 的支持。 |
|
||||
| **边缘与 IoT** | **强大的边缘支持**<br>非常适合安全、创新的边缘设备。 | **边缘支持较弱**<br>对于边缘网关来说通常过于沉重。 |
|
||||
| **成本** | **稳定且免费**<br>免费社区支持,稳定的商业定价。 | **高昂成本**<br>1PiB 的成本可能高达 250,000 美元。 |
|
||||
| **风险控制** | **企业级风险规避**<br>清晰的知识产权,商业使用安全无忧。 | **法律风险**<br>知识产权归属模糊及使用限制风险。 |
|
||||
|
||||
## 快速开始
|
||||
|
||||
要开始使用 RustFS,请按照以下步骤操作:
|
||||
请按照以下步骤快速上手 RustFS:
|
||||
|
||||
1. **一键脚本快速启动 (方案一)**
|
||||
|
||||
```bash
|
||||
curl -O https://rustfs.com/install_rustfs.sh && bash install_rustfs.sh
|
||||
```
|
||||
|
||||
2. **Docker 快速启动(方案二)**
|
||||
### 1. 一键安装脚本 (选项 1)
|
||||
|
||||
```bash
|
||||
docker run -d -p 9000:9000 -v /data:/data rustfs/rustfs
|
||||
```
|
||||
curl -O https://rustfs.com/install_rustfs.sh && bash install_rustfs.sh
|
||||
````
|
||||
|
||||
对于使用 Docker 安装来讲,你还可以使用 `docker compose` 来启动 rustfs 实例。在仓库的根目录下面有一个 `docker-compose.yml`
|
||||
文件。运行如下命令即可:
|
||||
### 2\. Docker 快速启动 (选项 2)
|
||||
|
||||
```
|
||||
docker compose --profile observability up -d
|
||||
```
|
||||
RustFS 容器以非 root 用户 `rustfs` (UID `10001`) 运行。如果您使用 Docker 的 `-v` 参数挂载宿主机目录,请务必确保宿主机目录的所有者已更改为 `1000`,否则会遇到权限拒绝错误。
|
||||
|
||||
**注意**:在使用 `docker compose` 之前,你应该仔细阅读一下 `docker-compose.yaml`,因为该文件中包含多个服务,除了 rustfs
|
||||
以外,还有 grafana、prometheus、jaeger 等,这些是为 rustfs 可观测性服务的,还有 redis 和 nginx。你想启动哪些容器,就需要用
|
||||
`--profile` 参数指定相应的 profile。
|
||||
```bash
|
||||
# 创建数据和日志目录
|
||||
mkdir -p data logs
|
||||
|
||||
3. **访问控制台**:打开 Web 浏览器并导航到 `http://localhost:9000` 以访问 RustFS 控制台,默认的用户名和密码是
|
||||
`rustfsadmin` 。
|
||||
4. **创建存储桶**:使用控制台为您的对象创建新的存储桶。
|
||||
5. **上传对象**:您可以直接通过控制台上传文件,或使用 S3 兼容的 API 与您的 RustFS 实例交互。
|
||||
# 更改这两个目录的所有者
|
||||
chown -R 10001:10001 data logs
|
||||
|
||||
**注意**:如果你想通过 `https` 来访问 RustFS
|
||||
实例,请参考 [TLS 配置文档](https://docs.rustfs.com/zh/integration/tls-configured.html)
|
||||
# 使用最新版本运行
|
||||
docker run -d -p 9000:9000 -p 9001:9001 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:latest
|
||||
|
||||
# 使用指定版本运行
|
||||
docker run -d -p 9000:9000 -p 9001:9001 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:1.0.0.alpha.68
|
||||
```
|
||||
|
||||
您也可以使用 Docker Compose。使用根目录下的 `docker-compose.yml` 文件:
|
||||
|
||||
```bash
|
||||
docker compose --profile observability up -d
|
||||
```
|
||||
|
||||
**注意**: 我们建议您在运行前查看 `docker-compose.yaml` 文件。该文件定义了包括 Grafana、Prometheus 和 Jaeger 在内的多个服务,有助于 RustFS 的可观测性监控。如果您还想启动 Redis 或 Nginx 容器,可以指定相应的 profile。
|
||||
|
||||
### 3\. 源码编译 (选项 3) - 进阶用户
|
||||
|
||||
适用于希望从源码构建支持多架构 RustFS Docker 镜像的开发者:
|
||||
|
||||
```bash
|
||||
# 在本地构建多架构镜像
|
||||
./docker-buildx.sh --build-arg RELEASE=latest
|
||||
|
||||
# 构建并推送到仓库
|
||||
./docker-buildx.sh --push
|
||||
|
||||
# 构建指定版本
|
||||
./docker-buildx.sh --release v1.0.0 --push
|
||||
|
||||
# 构建并推送到自定义仓库
|
||||
./docker-buildx.sh --registry your-registry.com --namespace yourname --push
|
||||
```
|
||||
|
||||
`docker-buildx.sh` 脚本支持:
|
||||
\- **多架构构建**: `linux/amd64`, `linux/arm64`
|
||||
\- **自动版本检测**: 使用 git tags 或 commit hash
|
||||
\- **灵活的仓库支持**: 支持 Docker Hub, GitHub Container Registry 等
|
||||
\- **构建优化**: 包含缓存和并行构建
|
||||
|
||||
为了方便起见,您也可以使用 Make 命令:
|
||||
|
||||
```bash
|
||||
make docker-buildx # 本地构建
|
||||
make docker-buildx-push # 构建并推送
|
||||
make docker-buildx-version VERSION=v1.0.0 # 构建指定版本
|
||||
make help-docker # 显示所有 Docker 相关命令
|
||||
```
|
||||
|
||||
> **注意 (macOS 交叉编译)**: macOS 默认的 `ulimit -n` 限制为 256,因此在使用 `cargo zigbuild` 或 `./build-rustfs.sh --platform ...` 交叉编译 Linux 版本时,可能会因 `ProcessFdQuotaExceeded` 失败。构建脚本会尝试自动提高限制,但如果您仍然看到警告,请在构建前在终端运行 `ulimit -n 4096` (或更高)。
|
||||
|
||||
### 4\. 使用 Helm Chart 安装 (选项 4) - 云原生环境
|
||||
|
||||
请按照 [Helm Chart README](https://charts.rustfs.com) 上的说明在 Kubernetes 集群上安装 RustFS。
|
||||
|
||||
-----
|
||||
|
||||
### 访问 RustFS
|
||||
|
||||
5. **访问控制台**: 打开浏览器并访问 `http://localhost:9000` 进入 RustFS 控制台。
|
||||
* 默认账号/密码: `rustfsadmin` / `rustfsadmin`
|
||||
6. **创建存储桶**: 使用控制台为您的对象创建一个新的存储桶 (Bucket)。
|
||||
7. **上传对象**: 您可以直接通过控制台上传文件,或使用 S3 兼容的 API/客户端与您的 RustFS 实例进行交互。
|
||||
|
||||
**注意**: 如果您希望通过 `https` 访问 RustFS 实例,请参考 [TLS 配置文档](https://docs.rustfs.com/integration/tls-configured.html)。
|
||||
|
||||
## 文档
|
||||
|
||||
有关详细文档,包括配置选项、API 参考和高级用法,请访问我们的[文档](https://docs.rustfs.com)。
|
||||
有关详细文档,包括配置选项、API 参考和高级用法,请访问我们的 [官方文档](https://docs.rustfs.com)。
|
||||
|
||||
## 获取帮助
|
||||
|
||||
如果您有任何问题或需要帮助,您可以:
|
||||
如果您有任何问题或需要帮助:
|
||||
|
||||
- 查看[常见问题解答](https://github.com/rustfs/rustfs/discussions/categories/q-a)以获取常见问题和解决方案。
|
||||
- 加入我们的 [GitHub 讨论](https://github.com/rustfs/rustfs/discussions)来提问和分享您的经验。
|
||||
- 在我们的 [GitHub Issues](https://github.com/rustfs/rustfs/issues) 页面上开启问题,报告错误或功能请求。
|
||||
- 查看 [FAQ](https://github.com/rustfs/rustfs/discussions/categories/q-a) 寻找常见问题和解决方案。
|
||||
- 加入我们的 [GitHub Discussions](https://github.com/rustfs/rustfs/discussions) 提问并分享您的经验。
|
||||
- 在我们的 [GitHub Issues](https://github.com/rustfs/rustfs/issues) 页面提交 Bug 报告或功能请求。
|
||||
|
||||
## 链接
|
||||
|
||||
- [文档](https://docs.rustfs.com) - 您应该阅读的手册
|
||||
- [更新日志](https://docs.rustfs.com/changelog) - 我们破坏和修复的内容
|
||||
- [GitHub 讨论](https://github.com/rustfs/rustfs/discussions) - 社区所在地
|
||||
- [官方文档](https://docs.rustfs.com) - 必读手册
|
||||
- [更新日志](https://github.com/rustfs/rustfs/releases) - 版本变更记录
|
||||
- [社区讨论](https://github.com/rustfs/rustfs/discussions) - 社区交流地
|
||||
|
||||
## 联系
|
||||
## 联系方式
|
||||
|
||||
- **错误报告**:[GitHub Issues](https://github.com/rustfs/rustfs/issues)
|
||||
- **商务合作**:<hello@rustfs.com>
|
||||
- **招聘**:<jobs@rustfs.com>
|
||||
- **一般讨论**:[GitHub 讨论](https://github.com/rustfs/rustfs/discussions)
|
||||
- **贡献**:[CONTRIBUTING.md](CONTRIBUTING.md)
|
||||
- **Bug 反馈**: [GitHub Issues](https://github.com/rustfs/rustfs/issues)
|
||||
- **商务合作**: [hello@rustfs.com](mailto:hello@rustfs.com)
|
||||
- **工作机会**: [jobs@rustfs.com](mailto:jobs@rustfs.com)
|
||||
- **一般讨论**: [GitHub Discussions](https://github.com/rustfs/rustfs/discussions)
|
||||
- **贡献指南**: [CONTRIBUTING.md](https://www.google.com/search?q=CONTRIBUTING.md)
|
||||
|
||||
## 贡献者
|
||||
|
||||
RustFS 是一个社区驱动的项目,我们感谢所有的贡献。查看[贡献者](https://github.com/rustfs/rustfs/graphs/contributors)页面,了解帮助
|
||||
RustFS 变得更好的杰出人员。
|
||||
RustFS 是一个社区驱动的项目,我们感谢所有的贡献。请查看 [贡献者](https://github.com/rustfs/rustfs/graphs/contributors) 页面,看看那些让 RustFS 变得更好的了不起的人们。
|
||||
|
||||
<a href="https://github.com/rustfs/rustfs/graphs/contributors">
|
||||
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" alt="贡献者"/>
|
||||
</a >
|
||||
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" alt="Contributors" />
|
||||
</a>
|
||||
|
||||
## Github 全球推荐榜
|
||||
## Github Trending Top
|
||||
|
||||
🚀 RustFS 受到了全世界开源爱好者和企业用户的喜欢,多次登顶 Github Trending 全球榜。
|
||||
🚀 RustFS 深受全球开源爱好者和企业用户的喜爱,经常荣登 GitHub Trending 榜单。
|
||||
|
||||
<a href="https://trendshift.io/repositories/14181" target="_blank"><img src="https://raw.githubusercontent.com/rustfs/rustfs/refs/heads/main/docs/rustfs-trending.jpg" alt="rustfs%2Frustfs | Trendshift" /></a>
|
||||
|
||||
## Star 历史
|
||||
|
||||
[](https://www.star-history.com/#rustfs/rustfs&type=date&legend=top-left)
|
||||
|
||||
|
||||
## 许可证
|
||||
|
||||
[Apache 2.0](https://opensource.org/licenses/Apache-2.0)
|
||||
|
||||
**RustFS** 是 RustFS, Inc. 的商标。所有其他商标均为其各自所有者的财产。
|
||||
|
||||
|
||||
@@ -163,6 +163,35 @@ print_message() {
|
||||
echo -e "${color}${message}${NC}"
|
||||
}
|
||||
|
||||
# Prevent zig/ld from hitting macOS file descriptor defaults during linking
|
||||
ensure_file_descriptor_limit() {
|
||||
local required_limit=4096
|
||||
local current_limit
|
||||
current_limit=$(ulimit -Sn 2>/dev/null || echo "")
|
||||
|
||||
if [ -z "$current_limit" ] || [ "$current_limit" = "unlimited" ]; then
|
||||
return
|
||||
fi
|
||||
|
||||
if (( current_limit >= required_limit )); then
|
||||
return
|
||||
fi
|
||||
|
||||
local hard_limit target_limit
|
||||
hard_limit=$(ulimit -Hn 2>/dev/null || echo "")
|
||||
target_limit=$required_limit
|
||||
|
||||
if [ -n "$hard_limit" ] && [ "$hard_limit" != "unlimited" ] && (( hard_limit < required_limit )); then
|
||||
target_limit=$hard_limit
|
||||
fi
|
||||
|
||||
if ulimit -Sn "$target_limit" 2>/dev/null; then
|
||||
print_message $YELLOW "🔧 Increased open file limit from $current_limit to $target_limit to avoid ProcessFdQuotaExceeded"
|
||||
else
|
||||
print_message $YELLOW "⚠️ Unable to raise ulimit -n automatically (current: $current_limit, needed: $required_limit). Please run 'ulimit -n $required_limit' manually before building."
|
||||
fi
|
||||
}
|
||||
|
||||
# Get version from git
|
||||
get_version() {
|
||||
if git describe --abbrev=0 --tags >/dev/null 2>&1; then
|
||||
@@ -570,10 +599,11 @@ main() {
|
||||
fi
|
||||
fi
|
||||
|
||||
ensure_file_descriptor_limit
|
||||
|
||||
# Start build process
|
||||
build_rustfs
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main
|
||||
|
||||
|
||||
@@ -13,10 +13,12 @@ keywords = ["RustFS", "AHM", "health-management", "scanner", "Minio"]
|
||||
categories = ["web-programming", "development-tools", "filesystem"]
|
||||
|
||||
[dependencies]
|
||||
rustfs-config = { workspace = true }
|
||||
rustfs-ecstore = { workspace = true }
|
||||
rustfs-common = { workspace = true }
|
||||
rustfs-filemeta = { workspace = true }
|
||||
rustfs-madmin = { workspace = true }
|
||||
rustfs-utils = { workspace = true }
|
||||
tokio = { workspace = true, features = ["full"] }
|
||||
tokio-util = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
@@ -40,3 +42,4 @@ serde_json = { workspace = true }
|
||||
serial_test = { workspace = true }
|
||||
tracing-subscriber = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
heed = { workspace = true }
|
||||
|
||||
@@ -14,6 +14,10 @@
|
||||
|
||||
use thiserror::Error;
|
||||
|
||||
/// Custom error type for AHM operations
|
||||
/// This enum defines various error variants that can occur during
|
||||
/// the execution of AHM-related tasks, such as I/O errors, storage errors,
|
||||
/// configuration errors, and specific errors related to healing operations.
|
||||
#[derive(Debug, Error)]
|
||||
pub enum Error {
|
||||
#[error("I/O error: {0}")]
|
||||
@@ -85,9 +89,13 @@ pub enum Error {
|
||||
ProgressTrackingFailed { message: String },
|
||||
}
|
||||
|
||||
/// A specialized Result type for AHM operations
|
||||
///This type is a convenient alias for results returned by functions in the AHM crate,
|
||||
/// using the custom Error type defined above.
|
||||
pub type Result<T, E = Error> = std::result::Result<T, E>;
|
||||
|
||||
impl Error {
|
||||
/// Create an Other error from any error type
|
||||
pub fn other<E>(error: E) -> Self
|
||||
where
|
||||
E: Into<Box<dyn std::error::Error + Send + Sync>>,
|
||||
|
||||
@@ -12,18 +12,19 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::heal::{
|
||||
manager::HealManager,
|
||||
task::{HealOptions, HealPriority, HealRequest, HealType},
|
||||
utils,
|
||||
};
|
||||
|
||||
use crate::{Error, Result};
|
||||
use rustfs_common::heal_channel::{
|
||||
HealChannelCommand, HealChannelPriority, HealChannelReceiver, HealChannelRequest, HealChannelResponse, HealScanMode,
|
||||
publish_heal_response,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::mpsc;
|
||||
use tracing::{error, info};
|
||||
use tracing::{debug, error, info};
|
||||
|
||||
/// Heal channel processor
|
||||
pub struct HealChannelProcessor {
|
||||
@@ -60,7 +61,7 @@ impl HealChannelProcessor {
|
||||
}
|
||||
}
|
||||
None => {
|
||||
info!("Heal channel receiver closed, stopping processor");
|
||||
debug!("Heal channel receiver closed, stopping processor");
|
||||
break;
|
||||
}
|
||||
}
|
||||
@@ -89,7 +90,12 @@ impl HealChannelProcessor {
|
||||
|
||||
/// Process start request
|
||||
async fn process_start_request(&self, request: HealChannelRequest) -> Result<()> {
|
||||
info!("Processing heal start request: {} for bucket: {}", request.id, request.bucket);
|
||||
info!(
|
||||
"Processing heal start request: {} for bucket: {}/{}",
|
||||
request.id,
|
||||
request.bucket,
|
||||
request.object_prefix.as_deref().unwrap_or("")
|
||||
);
|
||||
|
||||
// Convert channel request to heal request
|
||||
let heal_request = self.convert_to_heal_request(request.clone())?;
|
||||
@@ -99,7 +105,6 @@ impl HealChannelProcessor {
|
||||
Ok(task_id) => {
|
||||
info!("Successfully submitted heal request: {} as task: {}", request.id, task_id);
|
||||
|
||||
// Send success response
|
||||
let response = HealChannelResponse {
|
||||
request_id: request.id,
|
||||
success: true,
|
||||
@@ -107,9 +112,7 @@ impl HealChannelProcessor {
|
||||
error: None,
|
||||
};
|
||||
|
||||
if let Err(e) = self.response_sender.send(response) {
|
||||
error!("Failed to send heal response: {}", e);
|
||||
}
|
||||
self.publish_response(response);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to submit heal request: {} - {}", request.id, e);
|
||||
@@ -122,9 +125,7 @@ impl HealChannelProcessor {
|
||||
error: Some(e.to_string()),
|
||||
};
|
||||
|
||||
if let Err(e) = self.response_sender.send(response) {
|
||||
error!("Failed to send heal error response: {}", e);
|
||||
}
|
||||
self.publish_response(response);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -144,9 +145,7 @@ impl HealChannelProcessor {
|
||||
error: None,
|
||||
};
|
||||
|
||||
if let Err(e) = self.response_sender.send(response) {
|
||||
error!("Failed to send query response: {}", e);
|
||||
}
|
||||
self.publish_response(response);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -164,9 +163,7 @@ impl HealChannelProcessor {
|
||||
error: None,
|
||||
};
|
||||
|
||||
if let Err(e) = self.response_sender.send(response) {
|
||||
error!("Failed to send cancel response: {}", e);
|
||||
}
|
||||
self.publish_response(response);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -174,9 +171,12 @@ impl HealChannelProcessor {
|
||||
/// Convert channel request to heal request
|
||||
fn convert_to_heal_request(&self, request: HealChannelRequest) -> Result<HealRequest> {
|
||||
let heal_type = if let Some(disk_id) = &request.disk {
|
||||
let set_disk_id = utils::normalize_set_disk_id(disk_id).ok_or_else(|| Error::InvalidHealType {
|
||||
heal_type: format!("erasure-set({disk_id})"),
|
||||
})?;
|
||||
HealType::ErasureSet {
|
||||
buckets: vec![],
|
||||
set_disk_id: disk_id.clone(),
|
||||
set_disk_id,
|
||||
}
|
||||
} else if let Some(prefix) = &request.object_prefix {
|
||||
if !prefix.is_empty() {
|
||||
@@ -226,8 +226,340 @@ impl HealChannelProcessor {
|
||||
Ok(HealRequest::new(heal_type, options, priority))
|
||||
}
|
||||
|
||||
fn publish_response(&self, response: HealChannelResponse) {
|
||||
// Try to send to local channel first, but don't block broadcast on failure
|
||||
if let Err(e) = self.response_sender.send(response.clone()) {
|
||||
error!("Failed to enqueue heal response locally: {}", e);
|
||||
}
|
||||
// Always attempt to broadcast, even if local send failed
|
||||
// Use the original response for broadcast; local send uses a clone
|
||||
if let Err(e) = publish_heal_response(response) {
|
||||
error!("Failed to broadcast heal response: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
/// Get response sender for external use
|
||||
pub fn get_response_sender(&self) -> mpsc::UnboundedSender<HealChannelResponse> {
|
||||
self.response_sender.clone()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::heal::storage::HealStorageAPI;
|
||||
use rustfs_common::heal_channel::{HealChannelPriority, HealChannelRequest, HealScanMode};
|
||||
use std::sync::Arc;
|
||||
|
||||
// Mock storage for testing
|
||||
struct MockStorage;
|
||||
#[async_trait::async_trait]
|
||||
impl HealStorageAPI for MockStorage {
|
||||
async fn get_object_meta(
|
||||
&self,
|
||||
_bucket: &str,
|
||||
_object: &str,
|
||||
) -> crate::Result<Option<rustfs_ecstore::store_api::ObjectInfo>> {
|
||||
Ok(None)
|
||||
}
|
||||
async fn get_object_data(&self, _bucket: &str, _object: &str) -> crate::Result<Option<Vec<u8>>> {
|
||||
Ok(None)
|
||||
}
|
||||
async fn put_object_data(&self, _bucket: &str, _object: &str, _data: &[u8]) -> crate::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
async fn delete_object(&self, _bucket: &str, _object: &str) -> crate::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
async fn verify_object_integrity(&self, _bucket: &str, _object: &str) -> crate::Result<bool> {
|
||||
Ok(true)
|
||||
}
|
||||
async fn ec_decode_rebuild(&self, _bucket: &str, _object: &str) -> crate::Result<Vec<u8>> {
|
||||
Ok(vec![])
|
||||
}
|
||||
async fn get_disk_status(
|
||||
&self,
|
||||
_endpoint: &rustfs_ecstore::disk::endpoint::Endpoint,
|
||||
) -> crate::Result<crate::heal::storage::DiskStatus> {
|
||||
Ok(crate::heal::storage::DiskStatus::Ok)
|
||||
}
|
||||
async fn format_disk(&self, _endpoint: &rustfs_ecstore::disk::endpoint::Endpoint) -> crate::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
async fn get_bucket_info(&self, _bucket: &str) -> crate::Result<Option<rustfs_ecstore::store_api::BucketInfo>> {
|
||||
Ok(None)
|
||||
}
|
||||
async fn heal_bucket_metadata(&self, _bucket: &str) -> crate::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
async fn list_buckets(&self) -> crate::Result<Vec<rustfs_ecstore::store_api::BucketInfo>> {
|
||||
Ok(vec![])
|
||||
}
|
||||
async fn object_exists(&self, _bucket: &str, _object: &str) -> crate::Result<bool> {
|
||||
Ok(false)
|
||||
}
|
||||
async fn get_object_size(&self, _bucket: &str, _object: &str) -> crate::Result<Option<u64>> {
|
||||
Ok(None)
|
||||
}
|
||||
async fn get_object_checksum(&self, _bucket: &str, _object: &str) -> crate::Result<Option<String>> {
|
||||
Ok(None)
|
||||
}
|
||||
async fn heal_object(
|
||||
&self,
|
||||
_bucket: &str,
|
||||
_object: &str,
|
||||
_version_id: Option<&str>,
|
||||
_opts: &rustfs_common::heal_channel::HealOpts,
|
||||
) -> crate::Result<(rustfs_madmin::heal_commands::HealResultItem, Option<crate::Error>)> {
|
||||
Ok((rustfs_madmin::heal_commands::HealResultItem::default(), None))
|
||||
}
|
||||
async fn heal_bucket(
|
||||
&self,
|
||||
_bucket: &str,
|
||||
_opts: &rustfs_common::heal_channel::HealOpts,
|
||||
) -> crate::Result<rustfs_madmin::heal_commands::HealResultItem> {
|
||||
Ok(rustfs_madmin::heal_commands::HealResultItem::default())
|
||||
}
|
||||
async fn heal_format(
|
||||
&self,
|
||||
_dry_run: bool,
|
||||
) -> crate::Result<(rustfs_madmin::heal_commands::HealResultItem, Option<crate::Error>)> {
|
||||
Ok((rustfs_madmin::heal_commands::HealResultItem::default(), None))
|
||||
}
|
||||
async fn list_objects_for_heal(&self, _bucket: &str, _prefix: &str) -> crate::Result<Vec<String>> {
|
||||
Ok(vec![])
|
||||
}
|
||||
async fn list_objects_for_heal_page(
|
||||
&self,
|
||||
_bucket: &str,
|
||||
_prefix: &str,
|
||||
_continuation_token: Option<&str>,
|
||||
) -> crate::Result<(Vec<String>, Option<String>, bool)> {
|
||||
Ok((vec![], None, false))
|
||||
}
|
||||
async fn get_disk_for_resume(&self, _set_disk_id: &str) -> crate::Result<rustfs_ecstore::disk::DiskStore> {
|
||||
Err(crate::Error::other("Not implemented in mock"))
|
||||
}
|
||||
}
|
||||
|
||||
fn create_test_heal_manager() -> Arc<HealManager> {
|
||||
let storage: Arc<dyn HealStorageAPI> = Arc::new(MockStorage);
|
||||
Arc::new(HealManager::new(storage, None))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_channel_processor_new() {
|
||||
let heal_manager = create_test_heal_manager();
|
||||
let processor = HealChannelProcessor::new(heal_manager);
|
||||
|
||||
// Verify processor is created successfully
|
||||
let _sender = processor.get_response_sender();
|
||||
// If we can get the sender, processor was created correctly
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_convert_to_heal_request_bucket() {
|
||||
let heal_manager = create_test_heal_manager();
|
||||
let processor = HealChannelProcessor::new(heal_manager);
|
||||
|
||||
let channel_request = HealChannelRequest {
|
||||
id: "test-id".to_string(),
|
||||
bucket: "test-bucket".to_string(),
|
||||
object_prefix: None,
|
||||
disk: None,
|
||||
priority: HealChannelPriority::Normal,
|
||||
scan_mode: None,
|
||||
remove_corrupted: None,
|
||||
recreate_missing: None,
|
||||
update_parity: None,
|
||||
recursive: None,
|
||||
dry_run: None,
|
||||
timeout_seconds: None,
|
||||
pool_index: None,
|
||||
set_index: None,
|
||||
force_start: false,
|
||||
};
|
||||
|
||||
let heal_request = processor.convert_to_heal_request(channel_request).unwrap();
|
||||
assert!(matches!(heal_request.heal_type, HealType::Bucket { .. }));
|
||||
assert_eq!(heal_request.priority, HealPriority::Normal);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_convert_to_heal_request_object() {
|
||||
let heal_manager = create_test_heal_manager();
|
||||
let processor = HealChannelProcessor::new(heal_manager);
|
||||
|
||||
let channel_request = HealChannelRequest {
|
||||
id: "test-id".to_string(),
|
||||
bucket: "test-bucket".to_string(),
|
||||
object_prefix: Some("test-object".to_string()),
|
||||
disk: None,
|
||||
priority: HealChannelPriority::High,
|
||||
scan_mode: Some(HealScanMode::Deep),
|
||||
remove_corrupted: Some(true),
|
||||
recreate_missing: Some(true),
|
||||
update_parity: Some(true),
|
||||
recursive: Some(false),
|
||||
dry_run: Some(false),
|
||||
timeout_seconds: Some(300),
|
||||
pool_index: Some(0),
|
||||
set_index: Some(1),
|
||||
force_start: false,
|
||||
};
|
||||
|
||||
let heal_request = processor.convert_to_heal_request(channel_request).unwrap();
|
||||
assert!(matches!(heal_request.heal_type, HealType::Object { .. }));
|
||||
assert_eq!(heal_request.priority, HealPriority::High);
|
||||
assert_eq!(heal_request.options.scan_mode, HealScanMode::Deep);
|
||||
assert!(heal_request.options.remove_corrupted);
|
||||
assert!(heal_request.options.recreate_missing);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_convert_to_heal_request_erasure_set() {
|
||||
let heal_manager = create_test_heal_manager();
|
||||
let processor = HealChannelProcessor::new(heal_manager);
|
||||
|
||||
let channel_request = HealChannelRequest {
|
||||
id: "test-id".to_string(),
|
||||
bucket: "test-bucket".to_string(),
|
||||
object_prefix: None,
|
||||
disk: Some("pool_0_set_1".to_string()),
|
||||
priority: HealChannelPriority::Critical,
|
||||
scan_mode: None,
|
||||
remove_corrupted: None,
|
||||
recreate_missing: None,
|
||||
update_parity: None,
|
||||
recursive: None,
|
||||
dry_run: None,
|
||||
timeout_seconds: None,
|
||||
pool_index: None,
|
||||
set_index: None,
|
||||
force_start: false,
|
||||
};
|
||||
|
||||
let heal_request = processor.convert_to_heal_request(channel_request).unwrap();
|
||||
assert!(matches!(heal_request.heal_type, HealType::ErasureSet { .. }));
|
||||
assert_eq!(heal_request.priority, HealPriority::Urgent);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_convert_to_heal_request_invalid_disk_id() {
|
||||
let heal_manager = create_test_heal_manager();
|
||||
let processor = HealChannelProcessor::new(heal_manager);
|
||||
|
||||
let channel_request = HealChannelRequest {
|
||||
id: "test-id".to_string(),
|
||||
bucket: "test-bucket".to_string(),
|
||||
object_prefix: None,
|
||||
disk: Some("invalid-disk-id".to_string()),
|
||||
priority: HealChannelPriority::Normal,
|
||||
scan_mode: None,
|
||||
remove_corrupted: None,
|
||||
recreate_missing: None,
|
||||
update_parity: None,
|
||||
recursive: None,
|
||||
dry_run: None,
|
||||
timeout_seconds: None,
|
||||
pool_index: None,
|
||||
set_index: None,
|
||||
force_start: false,
|
||||
};
|
||||
|
||||
let result = processor.convert_to_heal_request(channel_request);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_convert_to_heal_request_priority_mapping() {
|
||||
let heal_manager = create_test_heal_manager();
|
||||
let processor = HealChannelProcessor::new(heal_manager);
|
||||
|
||||
let priorities = vec![
|
||||
(HealChannelPriority::Low, HealPriority::Low),
|
||||
(HealChannelPriority::Normal, HealPriority::Normal),
|
||||
(HealChannelPriority::High, HealPriority::High),
|
||||
(HealChannelPriority::Critical, HealPriority::Urgent),
|
||||
];
|
||||
|
||||
for (channel_priority, expected_heal_priority) in priorities {
|
||||
let channel_request = HealChannelRequest {
|
||||
id: "test-id".to_string(),
|
||||
bucket: "test-bucket".to_string(),
|
||||
object_prefix: None,
|
||||
disk: None,
|
||||
priority: channel_priority,
|
||||
scan_mode: None,
|
||||
remove_corrupted: None,
|
||||
recreate_missing: None,
|
||||
update_parity: None,
|
||||
recursive: None,
|
||||
dry_run: None,
|
||||
timeout_seconds: None,
|
||||
pool_index: None,
|
||||
set_index: None,
|
||||
force_start: false,
|
||||
};
|
||||
|
||||
let heal_request = processor.convert_to_heal_request(channel_request).unwrap();
|
||||
assert_eq!(heal_request.priority, expected_heal_priority);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_convert_to_heal_request_force_start() {
|
||||
let heal_manager = create_test_heal_manager();
|
||||
let processor = HealChannelProcessor::new(heal_manager);
|
||||
|
||||
let channel_request = HealChannelRequest {
|
||||
id: "test-id".to_string(),
|
||||
bucket: "test-bucket".to_string(),
|
||||
object_prefix: None,
|
||||
disk: None,
|
||||
priority: HealChannelPriority::Normal,
|
||||
scan_mode: None,
|
||||
remove_corrupted: Some(false),
|
||||
recreate_missing: Some(false),
|
||||
update_parity: Some(false),
|
||||
recursive: None,
|
||||
dry_run: None,
|
||||
timeout_seconds: None,
|
||||
pool_index: None,
|
||||
set_index: None,
|
||||
force_start: true, // Should override the above false values
|
||||
};
|
||||
|
||||
let heal_request = processor.convert_to_heal_request(channel_request).unwrap();
|
||||
assert!(heal_request.options.remove_corrupted);
|
||||
assert!(heal_request.options.recreate_missing);
|
||||
assert!(heal_request.options.update_parity);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_convert_to_heal_request_empty_object_prefix() {
|
||||
let heal_manager = create_test_heal_manager();
|
||||
let processor = HealChannelProcessor::new(heal_manager);
|
||||
|
||||
let channel_request = HealChannelRequest {
|
||||
id: "test-id".to_string(),
|
||||
bucket: "test-bucket".to_string(),
|
||||
object_prefix: Some("".to_string()), // Empty prefix should be treated as bucket heal
|
||||
disk: None,
|
||||
priority: HealChannelPriority::Normal,
|
||||
scan_mode: None,
|
||||
remove_corrupted: None,
|
||||
recreate_missing: None,
|
||||
update_parity: None,
|
||||
recursive: None,
|
||||
dry_run: None,
|
||||
timeout_seconds: None,
|
||||
pool_index: None,
|
||||
set_index: None,
|
||||
force_start: false,
|
||||
};
|
||||
|
||||
let heal_request = processor.convert_to_heal_request(channel_request).unwrap();
|
||||
assert!(matches!(heal_request.heal_type, HealType::Bucket { .. }));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,12 +12,12 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
use crate::heal::{
|
||||
progress::HealProgress,
|
||||
resume::{CheckpointManager, ResumeManager, ResumeUtils},
|
||||
storage::HealStorageAPI,
|
||||
};
|
||||
use crate::{Error, Result};
|
||||
use futures::future::join_all;
|
||||
use rustfs_common::heal_channel::{HealOpts, HealScanMode};
|
||||
use rustfs_ecstore::disk::DiskStore;
|
||||
@@ -49,14 +49,15 @@ impl ErasureSetHealer {
|
||||
}
|
||||
|
||||
/// execute erasure set heal with resume
|
||||
#[tracing::instrument(skip(self, buckets), fields(set_disk_id = %set_disk_id, bucket_count = buckets.len()))]
|
||||
pub async fn heal_erasure_set(&self, buckets: &[String], set_disk_id: &str) -> Result<()> {
|
||||
info!("Starting erasure set heal for {} buckets on set disk {}", buckets.len(), set_disk_id);
|
||||
info!("Starting erasure set heal");
|
||||
|
||||
// 1. generate or get task id
|
||||
let task_id = self.get_or_create_task_id(set_disk_id).await?;
|
||||
|
||||
// 2. initialize or resume resume state
|
||||
let (resume_manager, checkpoint_manager) = self.initialize_resume_state(&task_id, buckets).await?;
|
||||
let (resume_manager, checkpoint_manager) = self.initialize_resume_state(&task_id, set_disk_id, buckets).await?;
|
||||
|
||||
// 3. execute heal with resume
|
||||
let result = self
|
||||
@@ -77,25 +78,38 @@ impl ErasureSetHealer {
|
||||
}
|
||||
|
||||
/// get or create task id
|
||||
async fn get_or_create_task_id(&self, _set_disk_id: &str) -> Result<String> {
|
||||
async fn get_or_create_task_id(&self, set_disk_id: &str) -> Result<String> {
|
||||
// check if there are resumable tasks
|
||||
let resumable_tasks = ResumeUtils::get_resumable_tasks(&self.disk).await?;
|
||||
|
||||
for task_id in resumable_tasks {
|
||||
if ResumeUtils::can_resume_task(&self.disk, &task_id).await {
|
||||
info!("Found resumable task: {}", task_id);
|
||||
return Ok(task_id);
|
||||
match ResumeManager::load_from_disk(self.disk.clone(), &task_id).await {
|
||||
Ok(manager) => {
|
||||
let state = manager.get_state().await;
|
||||
if state.set_disk_id == set_disk_id && ResumeUtils::can_resume_task(&self.disk, &task_id).await {
|
||||
info!("Found resumable task: {} for set {}", task_id, set_disk_id);
|
||||
return Ok(task_id);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Failed to load resume state for task {}: {}", task_id, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// create new task id
|
||||
let task_id = ResumeUtils::generate_task_id();
|
||||
let task_id = format!("{}_{}", set_disk_id, ResumeUtils::generate_task_id());
|
||||
info!("Created new heal task: {}", task_id);
|
||||
Ok(task_id)
|
||||
}
|
||||
|
||||
/// initialize or resume resume state
|
||||
async fn initialize_resume_state(&self, task_id: &str, buckets: &[String]) -> Result<(ResumeManager, CheckpointManager)> {
|
||||
async fn initialize_resume_state(
|
||||
&self,
|
||||
task_id: &str,
|
||||
set_disk_id: &str,
|
||||
buckets: &[String],
|
||||
) -> Result<(ResumeManager, CheckpointManager)> {
|
||||
// check if resume state exists
|
||||
if ResumeManager::has_resume_state(&self.disk, task_id).await {
|
||||
info!("Loading existing resume state for task: {}", task_id);
|
||||
@@ -111,8 +125,14 @@ impl ErasureSetHealer {
|
||||
} else {
|
||||
info!("Creating new resume state for task: {}", task_id);
|
||||
|
||||
let resume_manager =
|
||||
ResumeManager::new(self.disk.clone(), task_id.to_string(), "erasure_set".to_string(), buckets.to_vec()).await?;
|
||||
let resume_manager = ResumeManager::new(
|
||||
self.disk.clone(),
|
||||
task_id.to_string(),
|
||||
"erasure_set".to_string(),
|
||||
set_disk_id.to_string(),
|
||||
buckets.to_vec(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let checkpoint_manager = CheckpointManager::new(self.disk.clone(), task_id.to_string()).await?;
|
||||
|
||||
@@ -162,6 +182,7 @@ impl ErasureSetHealer {
|
||||
let bucket_result = self
|
||||
.heal_bucket_with_resume(
|
||||
bucket,
|
||||
bucket_idx,
|
||||
&mut current_object_index,
|
||||
&mut processed_objects,
|
||||
&mut successful_objects,
|
||||
@@ -182,7 +203,7 @@ impl ErasureSetHealer {
|
||||
|
||||
// check cancel status
|
||||
if self.cancel_token.is_cancelled() {
|
||||
info!("Heal task cancelled");
|
||||
warn!("Heal task cancelled");
|
||||
return Err(Error::TaskCancelled);
|
||||
}
|
||||
|
||||
@@ -211,9 +232,11 @@ impl ErasureSetHealer {
|
||||
|
||||
/// heal single bucket with resume
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
#[tracing::instrument(skip(self, current_object_index, processed_objects, successful_objects, failed_objects, _skipped_objects, resume_manager, checkpoint_manager), fields(bucket = %bucket, bucket_index = bucket_index))]
|
||||
async fn heal_bucket_with_resume(
|
||||
&self,
|
||||
bucket: &str,
|
||||
bucket_index: usize,
|
||||
current_object_index: &mut usize,
|
||||
processed_objects: &mut u64,
|
||||
successful_objects: &mut u64,
|
||||
@@ -222,7 +245,7 @@ impl ErasureSetHealer {
|
||||
resume_manager: &ResumeManager,
|
||||
checkpoint_manager: &CheckpointManager,
|
||||
) -> Result<()> {
|
||||
info!("Starting heal for bucket: {} from object index {}", bucket, current_object_index);
|
||||
info!(target: "rustfs:ahm:heal_bucket_with_resume" ,"Starting heal for bucket from object index {}", current_object_index);
|
||||
|
||||
// 1. get bucket info
|
||||
let _bucket_info = match self.storage.get_bucket_info(bucket).await? {
|
||||
@@ -233,80 +256,114 @@ impl ErasureSetHealer {
|
||||
}
|
||||
};
|
||||
|
||||
// 2. get objects to heal
|
||||
let objects = self.storage.list_objects_for_heal(bucket, "").await?;
|
||||
// 2. process objects with pagination to avoid loading all objects into memory
|
||||
let mut continuation_token: Option<String> = None;
|
||||
let mut global_obj_idx = 0usize;
|
||||
|
||||
// 3. continue from checkpoint
|
||||
for (obj_idx, object) in objects.iter().enumerate().skip(*current_object_index) {
|
||||
// check if already processed
|
||||
if checkpoint_manager.get_checkpoint().await.processed_objects.contains(object) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// update current object
|
||||
resume_manager
|
||||
.set_current_item(Some(bucket.to_string()), Some(object.clone()))
|
||||
loop {
|
||||
// Get one page of objects
|
||||
let (objects, next_token, is_truncated) = self
|
||||
.storage
|
||||
.list_objects_for_heal_page(bucket, "", continuation_token.as_deref())
|
||||
.await?;
|
||||
|
||||
// Check if object still exists before attempting heal
|
||||
let object_exists = match self.storage.object_exists(bucket, object).await {
|
||||
Ok(exists) => exists,
|
||||
Err(e) => {
|
||||
warn!("Failed to check existence of {}/{}: {}, skipping", bucket, object, e);
|
||||
*current_object_index = obj_idx + 1;
|
||||
// Process objects in this page
|
||||
for object in objects {
|
||||
// Skip objects before the checkpoint
|
||||
if global_obj_idx < *current_object_index {
|
||||
global_obj_idx += 1;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
if !object_exists {
|
||||
info!(
|
||||
"Object {}/{} no longer exists, skipping heal (likely deleted intentionally)",
|
||||
bucket, object
|
||||
);
|
||||
checkpoint_manager.add_processed_object(object.clone()).await?;
|
||||
*successful_objects += 1; // Treat as successful - object is gone as intended
|
||||
*current_object_index = obj_idx + 1;
|
||||
continue;
|
||||
}
|
||||
// check if already processed
|
||||
if checkpoint_manager.get_checkpoint().await.processed_objects.contains(&object) {
|
||||
global_obj_idx += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
// heal object
|
||||
let heal_opts = HealOpts {
|
||||
scan_mode: HealScanMode::Normal,
|
||||
remove: true,
|
||||
recreate: true, // Keep recreate enabled for legitimate heal scenarios
|
||||
..Default::default()
|
||||
};
|
||||
// update current object
|
||||
resume_manager
|
||||
.set_current_item(Some(bucket.to_string()), Some(object.clone()))
|
||||
.await?;
|
||||
|
||||
match self.storage.heal_object(bucket, object, None, &heal_opts).await {
|
||||
Ok((_result, None)) => {
|
||||
*successful_objects += 1;
|
||||
// Check if object still exists before attempting heal
|
||||
let object_exists = match self.storage.object_exists(bucket, &object).await {
|
||||
Ok(exists) => exists,
|
||||
Err(e) => {
|
||||
warn!("Failed to check existence of {}/{}: {}, marking as failed", bucket, object, e);
|
||||
*failed_objects += 1;
|
||||
checkpoint_manager.add_failed_object(object.clone()).await?;
|
||||
global_obj_idx += 1;
|
||||
*current_object_index = global_obj_idx;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
if !object_exists {
|
||||
info!(
|
||||
target: "rustfs:ahm:heal_bucket_with_resume" ,"Object {}/{} no longer exists, skipping heal (likely deleted intentionally)",
|
||||
bucket, object
|
||||
);
|
||||
checkpoint_manager.add_processed_object(object.clone()).await?;
|
||||
info!("Successfully healed object {}/{}", bucket, object);
|
||||
*successful_objects += 1; // Treat as successful - object is gone as intended
|
||||
global_obj_idx += 1;
|
||||
*current_object_index = global_obj_idx;
|
||||
continue;
|
||||
}
|
||||
Ok((_, Some(err))) => {
|
||||
*failed_objects += 1;
|
||||
checkpoint_manager.add_failed_object(object.clone()).await?;
|
||||
warn!("Failed to heal object {}/{}: {}", bucket, object, err);
|
||||
|
||||
// heal object
|
||||
let heal_opts = HealOpts {
|
||||
scan_mode: HealScanMode::Normal,
|
||||
remove: true,
|
||||
recreate: true, // Keep recreate enabled for legitimate heal scenarios
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
match self.storage.heal_object(bucket, &object, None, &heal_opts).await {
|
||||
Ok((_result, None)) => {
|
||||
*successful_objects += 1;
|
||||
checkpoint_manager.add_processed_object(object.clone()).await?;
|
||||
info!("Successfully healed object {}/{}", bucket, object);
|
||||
}
|
||||
Ok((_, Some(err))) => {
|
||||
*failed_objects += 1;
|
||||
checkpoint_manager.add_failed_object(object.clone()).await?;
|
||||
warn!("Failed to heal object {}/{}: {}", bucket, object, err);
|
||||
}
|
||||
Err(err) => {
|
||||
*failed_objects += 1;
|
||||
checkpoint_manager.add_failed_object(object.clone()).await?;
|
||||
warn!("Error healing object {}/{}: {}", bucket, object, err);
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
*failed_objects += 1;
|
||||
checkpoint_manager.add_failed_object(object.clone()).await?;
|
||||
warn!("Error healing object {}/{}: {}", bucket, object, err);
|
||||
|
||||
*processed_objects += 1;
|
||||
global_obj_idx += 1;
|
||||
*current_object_index = global_obj_idx;
|
||||
|
||||
// check cancel status
|
||||
if self.cancel_token.is_cancelled() {
|
||||
info!("Heal task cancelled during object processing");
|
||||
return Err(Error::TaskCancelled);
|
||||
}
|
||||
|
||||
// save checkpoint periodically
|
||||
if global_obj_idx % 100 == 0 {
|
||||
checkpoint_manager
|
||||
.update_position(bucket_index, *current_object_index)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
*processed_objects += 1;
|
||||
*current_object_index = obj_idx + 1;
|
||||
|
||||
// check cancel status
|
||||
if self.cancel_token.is_cancelled() {
|
||||
info!("Heal task cancelled during object processing");
|
||||
return Err(Error::TaskCancelled);
|
||||
// Check if there are more pages
|
||||
if !is_truncated {
|
||||
break;
|
||||
}
|
||||
|
||||
// save checkpoint periodically
|
||||
if obj_idx % 100 == 0 {
|
||||
checkpoint_manager.update_position(0, *current_object_index).await?;
|
||||
continuation_token = next_token;
|
||||
if continuation_token.is_none() {
|
||||
warn!("List is truncated but no continuation token provided for {}", bucket);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -337,7 +394,10 @@ impl ErasureSetHealer {
|
||||
let cancel_token = self.cancel_token.clone();
|
||||
|
||||
async move {
|
||||
let _permit = semaphore.acquire().await.unwrap();
|
||||
let _permit = semaphore
|
||||
.acquire()
|
||||
.await
|
||||
.map_err(|e| Error::other(format!("Failed to acquire semaphore for bucket heal: {e}")))?;
|
||||
|
||||
if cancel_token.is_cancelled() {
|
||||
return Err(Error::TaskCancelled);
|
||||
@@ -369,16 +429,12 @@ impl ErasureSetHealer {
|
||||
}
|
||||
};
|
||||
|
||||
// 2. get objects to heal
|
||||
let objects = storage.list_objects_for_heal(bucket, "").await?;
|
||||
// 2. process objects with pagination to avoid loading all objects into memory
|
||||
let mut continuation_token: Option<String> = None;
|
||||
let mut total_scanned = 0u64;
|
||||
let mut total_success = 0u64;
|
||||
let mut total_failed = 0u64;
|
||||
|
||||
// 3. update progress
|
||||
{
|
||||
let mut p = progress.write().await;
|
||||
p.objects_scanned += objects.len() as u64;
|
||||
}
|
||||
|
||||
// 4. heal objects concurrently
|
||||
let heal_opts = HealOpts {
|
||||
scan_mode: HealScanMode::Normal,
|
||||
remove: true, // remove corrupted data
|
||||
@@ -386,27 +442,65 @@ impl ErasureSetHealer {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let object_results = Self::heal_objects_concurrently(storage, bucket, &objects, &heal_opts, progress).await;
|
||||
loop {
|
||||
// Get one page of objects
|
||||
let (objects, next_token, is_truncated) = storage
|
||||
.list_objects_for_heal_page(bucket, "", continuation_token.as_deref())
|
||||
.await?;
|
||||
|
||||
// 5. count results
|
||||
let (success_count, failure_count) = object_results
|
||||
.into_iter()
|
||||
.fold((0, 0), |(success, failure), result| match result {
|
||||
Ok(_) => (success + 1, failure),
|
||||
Err(_) => (success, failure + 1),
|
||||
});
|
||||
let page_count = objects.len() as u64;
|
||||
total_scanned += page_count;
|
||||
|
||||
// 6. update progress
|
||||
// 3. update progress
|
||||
{
|
||||
let mut p = progress.write().await;
|
||||
p.objects_scanned = total_scanned;
|
||||
}
|
||||
|
||||
// 4. heal objects concurrently for this page
|
||||
let object_results = Self::heal_objects_concurrently(storage, bucket, &objects, &heal_opts, progress).await;
|
||||
|
||||
// 5. count results for this page
|
||||
let (success_count, failure_count) =
|
||||
object_results
|
||||
.into_iter()
|
||||
.fold((0, 0), |(success, failure), result| match result {
|
||||
Ok(_) => (success + 1, failure),
|
||||
Err(_) => (success, failure + 1),
|
||||
});
|
||||
|
||||
total_success += success_count;
|
||||
total_failed += failure_count;
|
||||
|
||||
// 6. update progress
|
||||
{
|
||||
let mut p = progress.write().await;
|
||||
p.objects_healed = total_success;
|
||||
p.objects_failed = total_failed;
|
||||
p.set_current_object(Some(format!("processing bucket: {bucket} (page)")));
|
||||
}
|
||||
|
||||
// Check if there are more pages
|
||||
if !is_truncated {
|
||||
break;
|
||||
}
|
||||
|
||||
continuation_token = next_token;
|
||||
if continuation_token.is_none() {
|
||||
warn!("List is truncated but no continuation token provided for {}", bucket);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// 7. final progress update
|
||||
{
|
||||
let mut p = progress.write().await;
|
||||
p.objects_healed += success_count;
|
||||
p.objects_failed += failure_count;
|
||||
p.set_current_object(Some(format!("completed bucket: {bucket}")));
|
||||
}
|
||||
|
||||
info!(
|
||||
"Completed heal for bucket {}: {} success, {} failures",
|
||||
bucket, success_count, failure_count
|
||||
"Completed heal for bucket {}: {} success, {} failures (total scanned: {})",
|
||||
bucket, total_success, total_failed, total_scanned
|
||||
);
|
||||
|
||||
Ok(())
|
||||
@@ -432,7 +526,10 @@ impl ErasureSetHealer {
|
||||
let semaphore = semaphore.clone();
|
||||
|
||||
async move {
|
||||
let _permit = semaphore.acquire().await.unwrap();
|
||||
let _permit = semaphore
|
||||
.acquire()
|
||||
.await
|
||||
.map_err(|e| Error::other(format!("Failed to acquire semaphore for object heal: {e}")))?;
|
||||
|
||||
match storage.heal_object(&bucket, &object, None, &heal_opts).await {
|
||||
Ok((_result, None)) => {
|
||||
|
||||
@@ -12,7 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::heal::task::{HealOptions, HealPriority, HealRequest, HealType};
|
||||
use crate::heal::{HealOptions, HealPriority, HealRequest, HealType};
|
||||
use crate::{Error, Result};
|
||||
use rustfs_ecstore::disk::endpoint::Endpoint;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::time::SystemTime;
|
||||
@@ -104,7 +105,7 @@ pub enum HealEvent {
|
||||
|
||||
impl HealEvent {
|
||||
/// Convert HealEvent to HealRequest
|
||||
pub fn to_heal_request(&self) -> HealRequest {
|
||||
pub fn to_heal_request(&self) -> Result<HealRequest> {
|
||||
match self {
|
||||
HealEvent::ObjectCorruption {
|
||||
bucket,
|
||||
@@ -112,7 +113,7 @@ impl HealEvent {
|
||||
version_id,
|
||||
severity,
|
||||
..
|
||||
} => HealRequest::new(
|
||||
} => Ok(HealRequest::new(
|
||||
HealType::Object {
|
||||
bucket: bucket.clone(),
|
||||
object: object.clone(),
|
||||
@@ -120,13 +121,13 @@ impl HealEvent {
|
||||
},
|
||||
HealOptions::default(),
|
||||
Self::severity_to_priority(severity),
|
||||
),
|
||||
)),
|
||||
HealEvent::ObjectMissing {
|
||||
bucket,
|
||||
object,
|
||||
version_id,
|
||||
..
|
||||
} => HealRequest::new(
|
||||
} => Ok(HealRequest::new(
|
||||
HealType::Object {
|
||||
bucket: bucket.clone(),
|
||||
object: object.clone(),
|
||||
@@ -134,34 +135,38 @@ impl HealEvent {
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::High,
|
||||
),
|
||||
HealEvent::MetadataCorruption { bucket, object, .. } => HealRequest::new(
|
||||
)),
|
||||
HealEvent::MetadataCorruption { bucket, object, .. } => Ok(HealRequest::new(
|
||||
HealType::Metadata {
|
||||
bucket: bucket.clone(),
|
||||
object: object.clone(),
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::High,
|
||||
),
|
||||
)),
|
||||
HealEvent::DiskStatusChange { endpoint, .. } => {
|
||||
// Convert disk status change to erasure set heal
|
||||
// Note: This requires access to storage to get bucket list, which is not available here
|
||||
// The actual bucket list will need to be provided by the caller or retrieved differently
|
||||
HealRequest::new(
|
||||
let set_disk_id = crate::heal::utils::format_set_disk_id_from_i32(endpoint.pool_idx, endpoint.set_idx)
|
||||
.ok_or_else(|| Error::InvalidHealType {
|
||||
heal_type: format!("erasure-set(pool={}, set={})", endpoint.pool_idx, endpoint.set_idx),
|
||||
})?;
|
||||
Ok(HealRequest::new(
|
||||
HealType::ErasureSet {
|
||||
buckets: vec![], // Empty bucket list - caller should populate this
|
||||
set_disk_id: format!("{}_{}", endpoint.pool_idx, endpoint.set_idx),
|
||||
set_disk_id,
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::High,
|
||||
)
|
||||
))
|
||||
}
|
||||
HealEvent::ECDecodeFailure {
|
||||
bucket,
|
||||
object,
|
||||
version_id,
|
||||
..
|
||||
} => HealRequest::new(
|
||||
} => Ok(HealRequest::new(
|
||||
HealType::ECDecode {
|
||||
bucket: bucket.clone(),
|
||||
object: object.clone(),
|
||||
@@ -169,13 +174,13 @@ impl HealEvent {
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::Urgent,
|
||||
),
|
||||
)),
|
||||
HealEvent::ChecksumMismatch {
|
||||
bucket,
|
||||
object,
|
||||
version_id,
|
||||
..
|
||||
} => HealRequest::new(
|
||||
} => Ok(HealRequest::new(
|
||||
HealType::Object {
|
||||
bucket: bucket.clone(),
|
||||
object: object.clone(),
|
||||
@@ -183,17 +188,19 @@ impl HealEvent {
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::High,
|
||||
),
|
||||
HealEvent::BucketMetadataCorruption { bucket, .. } => {
|
||||
HealRequest::new(HealType::Bucket { bucket: bucket.clone() }, HealOptions::default(), HealPriority::High)
|
||||
}
|
||||
HealEvent::MRFMetadataCorruption { meta_path, .. } => HealRequest::new(
|
||||
)),
|
||||
HealEvent::BucketMetadataCorruption { bucket, .. } => Ok(HealRequest::new(
|
||||
HealType::Bucket { bucket: bucket.clone() },
|
||||
HealOptions::default(),
|
||||
HealPriority::High,
|
||||
)),
|
||||
HealEvent::MRFMetadataCorruption { meta_path, .. } => Ok(HealRequest::new(
|
||||
HealType::MRF {
|
||||
meta_path: meta_path.clone(),
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::High,
|
||||
),
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -357,3 +364,319 @@ impl Default for HealEventHandler {
|
||||
Self::new(1000)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::heal::task::{HealPriority, HealType};
|
||||
|
||||
#[test]
|
||||
fn test_heal_event_object_corruption_to_request() {
|
||||
let event = HealEvent::ObjectCorruption {
|
||||
bucket: "test-bucket".to_string(),
|
||||
object: "test-object".to_string(),
|
||||
version_id: None,
|
||||
corruption_type: CorruptionType::DataCorruption,
|
||||
severity: Severity::High,
|
||||
};
|
||||
|
||||
let request = event.to_heal_request().unwrap();
|
||||
assert!(matches!(request.heal_type, HealType::Object { .. }));
|
||||
assert_eq!(request.priority, HealPriority::High);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_event_object_missing_to_request() {
|
||||
let event = HealEvent::ObjectMissing {
|
||||
bucket: "test-bucket".to_string(),
|
||||
object: "test-object".to_string(),
|
||||
version_id: Some("v1".to_string()),
|
||||
expected_locations: vec![0, 1],
|
||||
available_locations: vec![2, 3],
|
||||
};
|
||||
|
||||
let request = event.to_heal_request().unwrap();
|
||||
assert!(matches!(request.heal_type, HealType::Object { .. }));
|
||||
assert_eq!(request.priority, HealPriority::High);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_event_metadata_corruption_to_request() {
|
||||
let event = HealEvent::MetadataCorruption {
|
||||
bucket: "test-bucket".to_string(),
|
||||
object: "test-object".to_string(),
|
||||
corruption_type: CorruptionType::MetadataCorruption,
|
||||
};
|
||||
|
||||
let request = event.to_heal_request().unwrap();
|
||||
assert!(matches!(request.heal_type, HealType::Metadata { .. }));
|
||||
assert_eq!(request.priority, HealPriority::High);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_event_ec_decode_failure_to_request() {
|
||||
let event = HealEvent::ECDecodeFailure {
|
||||
bucket: "test-bucket".to_string(),
|
||||
object: "test-object".to_string(),
|
||||
version_id: None,
|
||||
missing_shards: vec![0, 1],
|
||||
available_shards: vec![2, 3, 4],
|
||||
};
|
||||
|
||||
let request = event.to_heal_request().unwrap();
|
||||
assert!(matches!(request.heal_type, HealType::ECDecode { .. }));
|
||||
assert_eq!(request.priority, HealPriority::Urgent);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_event_checksum_mismatch_to_request() {
|
||||
let event = HealEvent::ChecksumMismatch {
|
||||
bucket: "test-bucket".to_string(),
|
||||
object: "test-object".to_string(),
|
||||
version_id: None,
|
||||
expected_checksum: "abc123".to_string(),
|
||||
actual_checksum: "def456".to_string(),
|
||||
};
|
||||
|
||||
let request = event.to_heal_request().unwrap();
|
||||
assert!(matches!(request.heal_type, HealType::Object { .. }));
|
||||
assert_eq!(request.priority, HealPriority::High);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_event_bucket_metadata_corruption_to_request() {
|
||||
let event = HealEvent::BucketMetadataCorruption {
|
||||
bucket: "test-bucket".to_string(),
|
||||
corruption_type: CorruptionType::MetadataCorruption,
|
||||
};
|
||||
|
||||
let request = event.to_heal_request().unwrap();
|
||||
assert!(matches!(request.heal_type, HealType::Bucket { .. }));
|
||||
assert_eq!(request.priority, HealPriority::High);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_event_mrf_metadata_corruption_to_request() {
|
||||
let event = HealEvent::MRFMetadataCorruption {
|
||||
meta_path: "test-bucket/test-object".to_string(),
|
||||
corruption_type: CorruptionType::MetadataCorruption,
|
||||
};
|
||||
|
||||
let request = event.to_heal_request().unwrap();
|
||||
assert!(matches!(request.heal_type, HealType::MRF { .. }));
|
||||
assert_eq!(request.priority, HealPriority::High);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_event_severity_to_priority() {
|
||||
let event_low = HealEvent::ObjectCorruption {
|
||||
bucket: "test".to_string(),
|
||||
object: "test".to_string(),
|
||||
version_id: None,
|
||||
corruption_type: CorruptionType::DataCorruption,
|
||||
severity: Severity::Low,
|
||||
};
|
||||
let request = event_low.to_heal_request().unwrap();
|
||||
assert_eq!(request.priority, HealPriority::Low);
|
||||
|
||||
let event_medium = HealEvent::ObjectCorruption {
|
||||
bucket: "test".to_string(),
|
||||
object: "test".to_string(),
|
||||
version_id: None,
|
||||
corruption_type: CorruptionType::DataCorruption,
|
||||
severity: Severity::Medium,
|
||||
};
|
||||
let request = event_medium.to_heal_request().unwrap();
|
||||
assert_eq!(request.priority, HealPriority::Normal);
|
||||
|
||||
let event_high = HealEvent::ObjectCorruption {
|
||||
bucket: "test".to_string(),
|
||||
object: "test".to_string(),
|
||||
version_id: None,
|
||||
corruption_type: CorruptionType::DataCorruption,
|
||||
severity: Severity::High,
|
||||
};
|
||||
let request = event_high.to_heal_request().unwrap();
|
||||
assert_eq!(request.priority, HealPriority::High);
|
||||
|
||||
let event_critical = HealEvent::ObjectCorruption {
|
||||
bucket: "test".to_string(),
|
||||
object: "test".to_string(),
|
||||
version_id: None,
|
||||
corruption_type: CorruptionType::DataCorruption,
|
||||
severity: Severity::Critical,
|
||||
};
|
||||
let request = event_critical.to_heal_request().unwrap();
|
||||
assert_eq!(request.priority, HealPriority::Urgent);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_event_description() {
|
||||
let event = HealEvent::ObjectCorruption {
|
||||
bucket: "test-bucket".to_string(),
|
||||
object: "test-object".to_string(),
|
||||
version_id: None,
|
||||
corruption_type: CorruptionType::DataCorruption,
|
||||
severity: Severity::High,
|
||||
};
|
||||
|
||||
let desc = event.description();
|
||||
assert!(desc.contains("Object corruption detected"));
|
||||
assert!(desc.contains("test-bucket/test-object"));
|
||||
assert!(desc.contains("DataCorruption"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_event_severity() {
|
||||
let event = HealEvent::ECDecodeFailure {
|
||||
bucket: "test".to_string(),
|
||||
object: "test".to_string(),
|
||||
version_id: None,
|
||||
missing_shards: vec![],
|
||||
available_shards: vec![],
|
||||
};
|
||||
assert_eq!(event.severity(), Severity::Critical);
|
||||
|
||||
let event = HealEvent::ObjectMissing {
|
||||
bucket: "test".to_string(),
|
||||
object: "test".to_string(),
|
||||
version_id: None,
|
||||
expected_locations: vec![],
|
||||
available_locations: vec![],
|
||||
};
|
||||
assert_eq!(event.severity(), Severity::High);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_event_handler_new() {
|
||||
let handler = HealEventHandler::new(10);
|
||||
assert_eq!(handler.event_count(), 0);
|
||||
assert_eq!(handler.max_events, 10);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_event_handler_default() {
|
||||
let handler = HealEventHandler::default();
|
||||
assert_eq!(handler.max_events, 1000);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_event_handler_add_event() {
|
||||
let mut handler = HealEventHandler::new(3);
|
||||
let event = HealEvent::ObjectCorruption {
|
||||
bucket: "test".to_string(),
|
||||
object: "test".to_string(),
|
||||
version_id: None,
|
||||
corruption_type: CorruptionType::DataCorruption,
|
||||
severity: Severity::High,
|
||||
};
|
||||
|
||||
handler.add_event(event.clone());
|
||||
assert_eq!(handler.event_count(), 1);
|
||||
|
||||
handler.add_event(event.clone());
|
||||
handler.add_event(event.clone());
|
||||
assert_eq!(handler.event_count(), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_event_handler_max_events() {
|
||||
let mut handler = HealEventHandler::new(2);
|
||||
let event = HealEvent::ObjectCorruption {
|
||||
bucket: "test".to_string(),
|
||||
object: "test".to_string(),
|
||||
version_id: None,
|
||||
corruption_type: CorruptionType::DataCorruption,
|
||||
severity: Severity::High,
|
||||
};
|
||||
|
||||
handler.add_event(event.clone());
|
||||
handler.add_event(event.clone());
|
||||
handler.add_event(event.clone()); // Should remove oldest
|
||||
|
||||
assert_eq!(handler.event_count(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_event_handler_get_events() {
|
||||
let mut handler = HealEventHandler::new(10);
|
||||
let event = HealEvent::ObjectCorruption {
|
||||
bucket: "test".to_string(),
|
||||
object: "test".to_string(),
|
||||
version_id: None,
|
||||
corruption_type: CorruptionType::DataCorruption,
|
||||
severity: Severity::High,
|
||||
};
|
||||
|
||||
handler.add_event(event.clone());
|
||||
handler.add_event(event.clone());
|
||||
|
||||
let events = handler.get_events();
|
||||
assert_eq!(events.len(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_event_handler_clear_events() {
|
||||
let mut handler = HealEventHandler::new(10);
|
||||
let event = HealEvent::ObjectCorruption {
|
||||
bucket: "test".to_string(),
|
||||
object: "test".to_string(),
|
||||
version_id: None,
|
||||
corruption_type: CorruptionType::DataCorruption,
|
||||
severity: Severity::High,
|
||||
};
|
||||
|
||||
handler.add_event(event);
|
||||
assert_eq!(handler.event_count(), 1);
|
||||
|
||||
handler.clear_events();
|
||||
assert_eq!(handler.event_count(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_event_handler_filter_by_severity() {
|
||||
let mut handler = HealEventHandler::new(10);
|
||||
handler.add_event(HealEvent::ObjectCorruption {
|
||||
bucket: "test".to_string(),
|
||||
object: "test".to_string(),
|
||||
version_id: None,
|
||||
corruption_type: CorruptionType::DataCorruption,
|
||||
severity: Severity::Low,
|
||||
});
|
||||
handler.add_event(HealEvent::ECDecodeFailure {
|
||||
bucket: "test".to_string(),
|
||||
object: "test".to_string(),
|
||||
version_id: None,
|
||||
missing_shards: vec![],
|
||||
available_shards: vec![],
|
||||
});
|
||||
|
||||
let high_severity = handler.filter_by_severity(Severity::High);
|
||||
assert_eq!(high_severity.len(), 1); // Only ECDecodeFailure is Critical >= High
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_event_handler_filter_by_type() {
|
||||
let mut handler = HealEventHandler::new(10);
|
||||
handler.add_event(HealEvent::ObjectCorruption {
|
||||
bucket: "test".to_string(),
|
||||
object: "test".to_string(),
|
||||
version_id: None,
|
||||
corruption_type: CorruptionType::DataCorruption,
|
||||
severity: Severity::High,
|
||||
});
|
||||
handler.add_event(HealEvent::ObjectMissing {
|
||||
bucket: "test".to_string(),
|
||||
object: "test".to_string(),
|
||||
version_id: None,
|
||||
expected_locations: vec![],
|
||||
available_locations: vec![],
|
||||
});
|
||||
|
||||
let corruption_events = handler.filter_by_type("ObjectCorruption");
|
||||
assert_eq!(corruption_events.len(), 1);
|
||||
|
||||
let missing_events = handler.filter_by_type("ObjectMissing");
|
||||
assert_eq!(missing_events.len(), 1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,17 +12,17 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
use crate::heal::{
|
||||
progress::{HealProgress, HealStatistics},
|
||||
storage::HealStorageAPI,
|
||||
task::{HealOptions, HealPriority, HealRequest, HealTask, HealTaskStatus, HealType},
|
||||
};
|
||||
use crate::{Error, Result};
|
||||
use rustfs_ecstore::disk::DiskAPI;
|
||||
use rustfs_ecstore::disk::error::DiskError;
|
||||
use rustfs_ecstore::global::GLOBAL_LOCAL_DISK_MAP;
|
||||
use std::{
|
||||
collections::{HashMap, VecDeque},
|
||||
collections::{BinaryHeap, HashMap, HashSet},
|
||||
sync::Arc,
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
@@ -31,7 +31,152 @@ use tokio::{
|
||||
time::interval,
|
||||
};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::{error, info, warn};
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
/// Priority queue wrapper for heal requests
|
||||
/// Uses BinaryHeap for priority-based ordering while maintaining FIFO for same-priority items
|
||||
#[derive(Debug)]
|
||||
struct PriorityHealQueue {
|
||||
/// Heap of (priority, sequence, request) tuples
|
||||
heap: BinaryHeap<PriorityQueueItem>,
|
||||
/// Sequence counter for FIFO ordering within same priority
|
||||
sequence: u64,
|
||||
/// Set of request keys to prevent duplicates
|
||||
dedup_keys: HashSet<String>,
|
||||
}
|
||||
|
||||
/// Wrapper for heap items to implement proper ordering
|
||||
#[derive(Debug)]
|
||||
struct PriorityQueueItem {
|
||||
priority: HealPriority,
|
||||
sequence: u64,
|
||||
request: HealRequest,
|
||||
}
|
||||
|
||||
impl Eq for PriorityQueueItem {}
|
||||
|
||||
impl PartialEq for PriorityQueueItem {
|
||||
fn eq(&self, other: &Self) -> bool {
|
||||
self.priority == other.priority && self.sequence == other.sequence
|
||||
}
|
||||
}
|
||||
|
||||
impl Ord for PriorityQueueItem {
|
||||
fn cmp(&self, other: &Self) -> std::cmp::Ordering {
|
||||
// First compare by priority (higher priority first)
|
||||
match self.priority.cmp(&other.priority) {
|
||||
std::cmp::Ordering::Equal => {
|
||||
// If priorities are equal, use sequence for FIFO (lower sequence first)
|
||||
other.sequence.cmp(&self.sequence)
|
||||
}
|
||||
ordering => ordering,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl PartialOrd for PriorityQueueItem {
|
||||
fn partial_cmp(&self, other: &Self) -> Option<std::cmp::Ordering> {
|
||||
Some(self.cmp(other))
|
||||
}
|
||||
}
|
||||
|
||||
impl PriorityHealQueue {
|
||||
fn new() -> Self {
|
||||
Self {
|
||||
heap: BinaryHeap::new(),
|
||||
sequence: 0,
|
||||
dedup_keys: HashSet::new(),
|
||||
}
|
||||
}
|
||||
|
||||
fn len(&self) -> usize {
|
||||
self.heap.len()
|
||||
}
|
||||
|
||||
fn is_empty(&self) -> bool {
|
||||
self.heap.is_empty()
|
||||
}
|
||||
|
||||
fn push(&mut self, request: HealRequest) -> bool {
|
||||
let key = Self::make_dedup_key(&request);
|
||||
|
||||
// Check for duplicates
|
||||
if self.dedup_keys.contains(&key) {
|
||||
return false; // Duplicate request, don't add
|
||||
}
|
||||
|
||||
self.dedup_keys.insert(key);
|
||||
self.sequence += 1;
|
||||
self.heap.push(PriorityQueueItem {
|
||||
priority: request.priority,
|
||||
sequence: self.sequence,
|
||||
request,
|
||||
});
|
||||
true
|
||||
}
|
||||
|
||||
/// Get statistics about queue contents by priority
|
||||
fn get_priority_stats(&self) -> HashMap<HealPriority, usize> {
|
||||
let mut stats = HashMap::new();
|
||||
for item in &self.heap {
|
||||
*stats.entry(item.priority).or_insert(0) += 1;
|
||||
}
|
||||
stats
|
||||
}
|
||||
|
||||
fn pop(&mut self) -> Option<HealRequest> {
|
||||
self.heap.pop().map(|item| {
|
||||
let key = Self::make_dedup_key(&item.request);
|
||||
self.dedup_keys.remove(&key);
|
||||
item.request
|
||||
})
|
||||
}
|
||||
|
||||
/// Create a deduplication key from a heal request
|
||||
fn make_dedup_key(request: &HealRequest) -> String {
|
||||
match &request.heal_type {
|
||||
HealType::Object {
|
||||
bucket,
|
||||
object,
|
||||
version_id,
|
||||
} => {
|
||||
format!("object:{}:{}:{}", bucket, object, version_id.as_deref().unwrap_or(""))
|
||||
}
|
||||
HealType::Bucket { bucket } => {
|
||||
format!("bucket:{bucket}")
|
||||
}
|
||||
HealType::ErasureSet { set_disk_id, .. } => {
|
||||
format!("erasure_set:{set_disk_id}")
|
||||
}
|
||||
HealType::Metadata { bucket, object } => {
|
||||
format!("metadata:{bucket}:{object}")
|
||||
}
|
||||
HealType::MRF { meta_path } => {
|
||||
format!("mrf:{meta_path}")
|
||||
}
|
||||
HealType::ECDecode {
|
||||
bucket,
|
||||
object,
|
||||
version_id,
|
||||
} => {
|
||||
format!("ecdecode:{}:{}:{}", bucket, object, version_id.as_deref().unwrap_or(""))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if a request with the same key already exists in the queue
|
||||
#[allow(dead_code)]
|
||||
fn contains_key(&self, request: &HealRequest) -> bool {
|
||||
let key = Self::make_dedup_key(request);
|
||||
self.dedup_keys.contains(&key)
|
||||
}
|
||||
|
||||
/// Check if an erasure set heal request for a specific set_disk_id exists
|
||||
fn contains_erasure_set(&self, set_disk_id: &str) -> bool {
|
||||
let key = format!("erasure_set:{set_disk_id}");
|
||||
self.dedup_keys.contains(&key)
|
||||
}
|
||||
}
|
||||
|
||||
/// Heal config
|
||||
#[derive(Debug, Clone)]
|
||||
@@ -50,12 +195,28 @@ pub struct HealConfig {
|
||||
|
||||
impl Default for HealConfig {
|
||||
fn default() -> Self {
|
||||
let queue_size: usize =
|
||||
rustfs_utils::get_env_usize(rustfs_config::ENV_HEAL_QUEUE_SIZE, rustfs_config::DEFAULT_HEAL_QUEUE_SIZE);
|
||||
let heal_interval = Duration::from_secs(rustfs_utils::get_env_u64(
|
||||
rustfs_config::ENV_HEAL_INTERVAL_SECS,
|
||||
rustfs_config::DEFAULT_HEAL_INTERVAL_SECS,
|
||||
));
|
||||
let enable_auto_heal =
|
||||
rustfs_utils::get_env_bool(rustfs_config::ENV_HEAL_AUTO_HEAL_ENABLE, rustfs_config::DEFAULT_HEAL_AUTO_HEAL_ENABLE);
|
||||
let task_timeout = Duration::from_secs(rustfs_utils::get_env_u64(
|
||||
rustfs_config::ENV_HEAL_TASK_TIMEOUT_SECS,
|
||||
rustfs_config::DEFAULT_HEAL_TASK_TIMEOUT_SECS,
|
||||
));
|
||||
let max_concurrent_heals = rustfs_utils::get_env_usize(
|
||||
rustfs_config::ENV_HEAL_MAX_CONCURRENT_HEALS,
|
||||
rustfs_config::DEFAULT_HEAL_MAX_CONCURRENT_HEALS,
|
||||
);
|
||||
Self {
|
||||
enable_auto_heal: true,
|
||||
heal_interval: Duration::from_secs(10), // 10 seconds
|
||||
max_concurrent_heals: 4,
|
||||
task_timeout: Duration::from_secs(300), // 5 minutes
|
||||
queue_size: 1000,
|
||||
enable_auto_heal,
|
||||
heal_interval, // 10 seconds
|
||||
max_concurrent_heals, // max 4,
|
||||
task_timeout, // 5 minutes
|
||||
queue_size,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -85,8 +246,8 @@ pub struct HealManager {
|
||||
state: Arc<RwLock<HealState>>,
|
||||
/// Active heal tasks
|
||||
active_heals: Arc<Mutex<HashMap<String, Arc<HealTask>>>>,
|
||||
/// Heal queue
|
||||
heal_queue: Arc<Mutex<VecDeque<HealRequest>>>,
|
||||
/// Heal queue (priority-based)
|
||||
heal_queue: Arc<Mutex<PriorityHealQueue>>,
|
||||
/// Storage layer interface
|
||||
storage: Arc<dyn HealStorageAPI>,
|
||||
/// Cancel token
|
||||
@@ -103,7 +264,7 @@ impl HealManager {
|
||||
config: Arc::new(RwLock::new(config)),
|
||||
state: Arc::new(RwLock::new(HealState::default())),
|
||||
active_heals: Arc::new(Mutex::new(HashMap::new())),
|
||||
heal_queue: Arc::new(Mutex::new(VecDeque::new())),
|
||||
heal_queue: Arc::new(Mutex::new(PriorityHealQueue::new())),
|
||||
storage,
|
||||
cancel_token: CancellationToken::new(),
|
||||
statistics: Arc::new(RwLock::new(HealStatistics::new())),
|
||||
@@ -125,7 +286,7 @@ impl HealManager {
|
||||
// start scheduler
|
||||
self.start_scheduler().await?;
|
||||
|
||||
// start auto disk scanner
|
||||
// start auto disk scanner to heal unformatted disks
|
||||
self.start_auto_disk_scanner().await?;
|
||||
|
||||
info!("HealManager started successfully");
|
||||
@@ -161,17 +322,54 @@ impl HealManager {
|
||||
let config = self.config.read().await;
|
||||
let mut queue = self.heal_queue.lock().await;
|
||||
|
||||
if queue.len() >= config.queue_size {
|
||||
let queue_len = queue.len();
|
||||
let queue_capacity = config.queue_size;
|
||||
|
||||
if queue_len >= queue_capacity {
|
||||
return Err(Error::ConfigurationError {
|
||||
message: "Heal queue is full".to_string(),
|
||||
message: format!("Heal queue is full ({queue_len}/{queue_capacity})"),
|
||||
});
|
||||
}
|
||||
|
||||
// Warn when queue is getting full (>80% capacity)
|
||||
let capacity_threshold = (queue_capacity as f64 * 0.8) as usize;
|
||||
if queue_len >= capacity_threshold {
|
||||
warn!(
|
||||
"Heal queue is {}% full ({}/{}). Consider increasing queue size or processing capacity.",
|
||||
(queue_len * 100) / queue_capacity,
|
||||
queue_len,
|
||||
queue_capacity
|
||||
);
|
||||
}
|
||||
|
||||
let request_id = request.id.clone();
|
||||
queue.push_back(request);
|
||||
let priority = request.priority;
|
||||
|
||||
// Try to push the request; if it's a duplicate, still return the request_id
|
||||
let is_new = queue.push(request);
|
||||
|
||||
// Log queue statistics periodically (when adding high/urgent priority items)
|
||||
if matches!(priority, HealPriority::High | HealPriority::Urgent) {
|
||||
let stats = queue.get_priority_stats();
|
||||
info!(
|
||||
"Heal queue stats after adding {:?} priority request: total={}, urgent={}, high={}, normal={}, low={}",
|
||||
priority,
|
||||
queue_len + 1,
|
||||
stats.get(&HealPriority::Urgent).unwrap_or(&0),
|
||||
stats.get(&HealPriority::High).unwrap_or(&0),
|
||||
stats.get(&HealPriority::Normal).unwrap_or(&0),
|
||||
stats.get(&HealPriority::Low).unwrap_or(&0)
|
||||
);
|
||||
}
|
||||
|
||||
drop(queue);
|
||||
|
||||
info!("Submitted heal request: {}", request_id);
|
||||
if is_new {
|
||||
info!("Submitted heal request: {} with priority: {:?}", request_id, priority);
|
||||
} else {
|
||||
info!("Heal request already queued (duplicate): {}", request_id);
|
||||
}
|
||||
|
||||
Ok(request_id)
|
||||
}
|
||||
|
||||
@@ -220,7 +418,12 @@ impl HealManager {
|
||||
|
||||
/// Get statistics
|
||||
pub async fn get_statistics(&self) -> HealStatistics {
|
||||
self.statistics.read().await.clone()
|
||||
let stats = self.statistics.read().await.clone();
|
||||
debug!(
|
||||
"HealManager stats snapshot: total_tasks={}, successful_tasks={}, failed_tasks={}, running_tasks={}",
|
||||
stats.total_tasks, stats.successful_tasks, stats.failed_tasks, stats.running_tasks
|
||||
);
|
||||
stats
|
||||
}
|
||||
|
||||
/// Get active task count
|
||||
@@ -271,13 +474,18 @@ impl HealManager {
|
||||
let cancel_token = self.cancel_token.clone();
|
||||
let storage = self.storage.clone();
|
||||
|
||||
info!(
|
||||
"start_auto_disk_scanner: Starting auto disk scanner with interval: {:?}",
|
||||
config.read().await.heal_interval
|
||||
);
|
||||
|
||||
tokio::spawn(async move {
|
||||
let mut interval = interval(config.read().await.heal_interval);
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = cancel_token.cancelled() => {
|
||||
info!("Auto disk scanner received shutdown signal");
|
||||
info!("start_auto_disk_scanner: Auto disk scanner received shutdown signal");
|
||||
break;
|
||||
}
|
||||
_ = interval.tick() => {
|
||||
@@ -296,6 +504,7 @@ impl HealManager {
|
||||
}
|
||||
|
||||
if endpoints.is_empty() {
|
||||
info!("start_auto_disk_scanner: No endpoints need healing");
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -303,45 +512,58 @@ impl HealManager {
|
||||
let buckets = match storage.list_buckets().await {
|
||||
Ok(buckets) => buckets.iter().map(|b| b.name.clone()).collect::<Vec<String>>(),
|
||||
Err(e) => {
|
||||
error!("Failed to get bucket list for auto healing: {}", e);
|
||||
error!("start_auto_disk_scanner: Failed to get bucket list for auto healing: {}", e);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
// Create erasure set heal requests for each endpoint
|
||||
for ep in endpoints {
|
||||
let Some(set_disk_id) =
|
||||
crate::heal::utils::format_set_disk_id_from_i32(ep.pool_idx, ep.set_idx)
|
||||
else {
|
||||
warn!("start_auto_disk_scanner: Skipping endpoint {} without valid pool/set index", ep);
|
||||
continue;
|
||||
};
|
||||
// skip if already queued or healing
|
||||
// Use consistent lock order: queue first, then active_heals to avoid deadlock
|
||||
let mut skip = false;
|
||||
{
|
||||
let queue = heal_queue.lock().await;
|
||||
if queue.iter().any(|req| matches!(&req.heal_type, crate::heal::task::HealType::ErasureSet { set_disk_id, .. } if set_disk_id == &format!("{}_{}", ep.pool_idx, ep.set_idx))) {
|
||||
if queue.contains_erasure_set(&set_disk_id) {
|
||||
skip = true;
|
||||
}
|
||||
}
|
||||
if !skip {
|
||||
let active = active_heals.lock().await;
|
||||
if active.values().any(|task| matches!(&task.heal_type, crate::heal::task::HealType::ErasureSet { set_disk_id, .. } if set_disk_id == &format!("{}_{}", ep.pool_idx, ep.set_idx))) {
|
||||
if active.values().any(|task| {
|
||||
matches!(
|
||||
&task.heal_type,
|
||||
crate::heal::task::HealType::ErasureSet { set_disk_id: active_id, .. }
|
||||
if active_id == &set_disk_id
|
||||
)
|
||||
}) {
|
||||
skip = true;
|
||||
}
|
||||
}
|
||||
|
||||
if skip {
|
||||
info!("start_auto_disk_scanner: Skipping auto erasure set heal for endpoint: {} (set_disk_id: {}) because it is already queued or healing", ep, set_disk_id);
|
||||
continue;
|
||||
}
|
||||
|
||||
// enqueue erasure set heal request for this disk
|
||||
let set_disk_id = format!("pool_{}_set_{}", ep.pool_idx, ep.set_idx);
|
||||
let req = HealRequest::new(
|
||||
HealType::ErasureSet {
|
||||
buckets: buckets.clone(),
|
||||
set_disk_id: set_disk_id.clone()
|
||||
set_disk_id: set_disk_id.clone(),
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::Normal,
|
||||
);
|
||||
let mut queue = heal_queue.lock().await;
|
||||
queue.push_back(req);
|
||||
info!("Enqueued auto erasure set heal for endpoint: {} (set_disk_id: {})", ep, set_disk_id);
|
||||
queue.push(req);
|
||||
info!("start_auto_disk_scanner: Enqueued auto erasure set heal for endpoint: {} (set_disk_id: {})", ep, set_disk_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -351,8 +573,9 @@ impl HealManager {
|
||||
}
|
||||
|
||||
/// Process heal queue
|
||||
/// Processes multiple tasks per cycle when capacity allows and queue has high-priority items
|
||||
async fn process_heal_queue(
|
||||
heal_queue: &Arc<Mutex<VecDeque<HealRequest>>>,
|
||||
heal_queue: &Arc<Mutex<PriorityHealQueue>>,
|
||||
active_heals: &Arc<Mutex<HashMap<String, Arc<HealTask>>>>,
|
||||
config: &Arc<RwLock<HealConfig>>,
|
||||
statistics: &Arc<RwLock<HealStatistics>>,
|
||||
@@ -361,51 +584,83 @@ impl HealManager {
|
||||
let config = config.read().await;
|
||||
let mut active_heals_guard = active_heals.lock().await;
|
||||
|
||||
// check if new heal tasks can be started
|
||||
if active_heals_guard.len() >= config.max_concurrent_heals {
|
||||
// Check if new heal tasks can be started
|
||||
let active_count = active_heals_guard.len();
|
||||
if active_count >= config.max_concurrent_heals {
|
||||
return;
|
||||
}
|
||||
|
||||
// Calculate how many tasks we can start this cycle
|
||||
let available_slots = config.max_concurrent_heals - active_count;
|
||||
|
||||
let mut queue = heal_queue.lock().await;
|
||||
if let Some(request) = queue.pop_front() {
|
||||
let task = Arc::new(HealTask::from_request(request, storage.clone()));
|
||||
let task_id = task.id.clone();
|
||||
active_heals_guard.insert(task_id.clone(), task.clone());
|
||||
drop(active_heals_guard);
|
||||
let active_heals_clone = active_heals.clone();
|
||||
let statistics_clone = statistics.clone();
|
||||
let queue_len = queue.len();
|
||||
|
||||
// start heal task
|
||||
tokio::spawn(async move {
|
||||
info!("Starting heal task: {}", task_id);
|
||||
let result = task.execute().await;
|
||||
match result {
|
||||
Ok(_) => {
|
||||
info!("Heal task completed successfully: {}", task_id);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Heal task failed: {} - {}", task_id, e);
|
||||
}
|
||||
}
|
||||
let mut active_heals_guard = active_heals_clone.lock().await;
|
||||
if let Some(completed_task) = active_heals_guard.remove(&task_id) {
|
||||
// update statistics
|
||||
let mut stats = statistics_clone.write().await;
|
||||
match completed_task.get_status().await {
|
||||
HealTaskStatus::Completed => {
|
||||
stats.update_task_completion(true);
|
||||
if queue_len == 0 {
|
||||
return;
|
||||
}
|
||||
|
||||
// Process multiple tasks if:
|
||||
// 1. We have available slots
|
||||
// 2. Queue is not empty
|
||||
// Prioritize urgent/high priority tasks by processing up to 2 tasks per cycle if available
|
||||
let tasks_to_process = if queue_len > 0 {
|
||||
std::cmp::min(available_slots, std::cmp::min(2, queue_len))
|
||||
} else {
|
||||
0
|
||||
};
|
||||
|
||||
for _ in 0..tasks_to_process {
|
||||
if let Some(request) = queue.pop() {
|
||||
let task_priority = request.priority;
|
||||
let task = Arc::new(HealTask::from_request(request, storage.clone()));
|
||||
let task_id = task.id.clone();
|
||||
active_heals_guard.insert(task_id.clone(), task.clone());
|
||||
let active_heals_clone = active_heals.clone();
|
||||
let statistics_clone = statistics.clone();
|
||||
|
||||
// start heal task
|
||||
tokio::spawn(async move {
|
||||
info!("Starting heal task: {} with priority: {:?}", task_id, task_priority);
|
||||
let result = task.execute().await;
|
||||
match result {
|
||||
Ok(_) => {
|
||||
info!("Heal task completed successfully: {}", task_id);
|
||||
}
|
||||
_ => {
|
||||
stats.update_task_completion(false);
|
||||
Err(e) => {
|
||||
error!("Heal task failed: {} - {}", task_id, e);
|
||||
}
|
||||
}
|
||||
stats.update_running_tasks(active_heals_guard.len() as u64);
|
||||
}
|
||||
});
|
||||
let mut active_heals_guard = active_heals_clone.lock().await;
|
||||
if let Some(completed_task) = active_heals_guard.remove(&task_id) {
|
||||
// update statistics
|
||||
let mut stats = statistics_clone.write().await;
|
||||
match completed_task.get_status().await {
|
||||
HealTaskStatus::Completed => {
|
||||
stats.update_task_completion(true);
|
||||
}
|
||||
_ => {
|
||||
stats.update_task_completion(false);
|
||||
}
|
||||
}
|
||||
stats.update_running_tasks(active_heals_guard.len() as u64);
|
||||
}
|
||||
});
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// update statistics
|
||||
let mut stats = statistics.write().await;
|
||||
stats.total_tasks += 1;
|
||||
// Update statistics for all started tasks
|
||||
let mut stats = statistics.write().await;
|
||||
stats.total_tasks += tasks_to_process as u64;
|
||||
|
||||
// Log queue status if items remain
|
||||
if !queue.is_empty() {
|
||||
let remaining = queue.len();
|
||||
if remaining > 10 {
|
||||
info!("Heal queue has {} pending requests, {} tasks active", remaining, active_heals_guard.len());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -420,3 +675,333 @@ impl std::fmt::Debug for HealManager {
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::heal::task::{HealOptions, HealPriority, HealRequest, HealType};
|
||||
|
||||
#[test]
|
||||
fn test_priority_queue_ordering() {
|
||||
let mut queue = PriorityHealQueue::new();
|
||||
|
||||
// Add requests with different priorities
|
||||
let low_req = HealRequest::new(
|
||||
HealType::Bucket {
|
||||
bucket: "bucket1".to_string(),
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::Low,
|
||||
);
|
||||
|
||||
let normal_req = HealRequest::new(
|
||||
HealType::Bucket {
|
||||
bucket: "bucket2".to_string(),
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::Normal,
|
||||
);
|
||||
|
||||
let high_req = HealRequest::new(
|
||||
HealType::Bucket {
|
||||
bucket: "bucket3".to_string(),
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::High,
|
||||
);
|
||||
|
||||
let urgent_req = HealRequest::new(
|
||||
HealType::Bucket {
|
||||
bucket: "bucket4".to_string(),
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::Urgent,
|
||||
);
|
||||
|
||||
// Add in random order: low, high, normal, urgent
|
||||
assert!(queue.push(low_req));
|
||||
assert!(queue.push(high_req));
|
||||
assert!(queue.push(normal_req));
|
||||
assert!(queue.push(urgent_req));
|
||||
|
||||
assert_eq!(queue.len(), 4);
|
||||
|
||||
// Should pop in priority order: urgent, high, normal, low
|
||||
let popped1 = queue.pop().unwrap();
|
||||
assert_eq!(popped1.priority, HealPriority::Urgent);
|
||||
|
||||
let popped2 = queue.pop().unwrap();
|
||||
assert_eq!(popped2.priority, HealPriority::High);
|
||||
|
||||
let popped3 = queue.pop().unwrap();
|
||||
assert_eq!(popped3.priority, HealPriority::Normal);
|
||||
|
||||
let popped4 = queue.pop().unwrap();
|
||||
assert_eq!(popped4.priority, HealPriority::Low);
|
||||
|
||||
assert_eq!(queue.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_priority_queue_fifo_same_priority() {
|
||||
let mut queue = PriorityHealQueue::new();
|
||||
|
||||
// Add multiple requests with same priority
|
||||
let req1 = HealRequest::new(
|
||||
HealType::Bucket {
|
||||
bucket: "bucket1".to_string(),
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::Normal,
|
||||
);
|
||||
|
||||
let req2 = HealRequest::new(
|
||||
HealType::Bucket {
|
||||
bucket: "bucket2".to_string(),
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::Normal,
|
||||
);
|
||||
|
||||
let req3 = HealRequest::new(
|
||||
HealType::Bucket {
|
||||
bucket: "bucket3".to_string(),
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::Normal,
|
||||
);
|
||||
|
||||
let id1 = req1.id.clone();
|
||||
let id2 = req2.id.clone();
|
||||
let id3 = req3.id.clone();
|
||||
|
||||
assert!(queue.push(req1));
|
||||
assert!(queue.push(req2));
|
||||
assert!(queue.push(req3));
|
||||
|
||||
// Should maintain FIFO order for same priority
|
||||
let popped1 = queue.pop().unwrap();
|
||||
assert_eq!(popped1.id, id1);
|
||||
|
||||
let popped2 = queue.pop().unwrap();
|
||||
assert_eq!(popped2.id, id2);
|
||||
|
||||
let popped3 = queue.pop().unwrap();
|
||||
assert_eq!(popped3.id, id3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_priority_queue_deduplication() {
|
||||
let mut queue = PriorityHealQueue::new();
|
||||
|
||||
let req1 = HealRequest::new(
|
||||
HealType::Object {
|
||||
bucket: "bucket1".to_string(),
|
||||
object: "object1".to_string(),
|
||||
version_id: None,
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::Normal,
|
||||
);
|
||||
|
||||
let req2 = HealRequest::new(
|
||||
HealType::Object {
|
||||
bucket: "bucket1".to_string(),
|
||||
object: "object1".to_string(),
|
||||
version_id: None,
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::High,
|
||||
);
|
||||
|
||||
// First request should be added
|
||||
assert!(queue.push(req1));
|
||||
assert_eq!(queue.len(), 1);
|
||||
|
||||
// Second request with same object should be rejected (duplicate)
|
||||
assert!(!queue.push(req2));
|
||||
assert_eq!(queue.len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_priority_queue_contains_erasure_set() {
|
||||
let mut queue = PriorityHealQueue::new();
|
||||
|
||||
let req = HealRequest::new(
|
||||
HealType::ErasureSet {
|
||||
buckets: vec!["bucket1".to_string()],
|
||||
set_disk_id: "pool_0_set_1".to_string(),
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::Normal,
|
||||
);
|
||||
|
||||
assert!(queue.push(req));
|
||||
assert!(queue.contains_erasure_set("pool_0_set_1"));
|
||||
assert!(!queue.contains_erasure_set("pool_0_set_2"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_priority_queue_dedup_key_generation() {
|
||||
// Test different heal types generate different keys
|
||||
let obj_req = HealRequest::new(
|
||||
HealType::Object {
|
||||
bucket: "bucket1".to_string(),
|
||||
object: "object1".to_string(),
|
||||
version_id: None,
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::Normal,
|
||||
);
|
||||
|
||||
let bucket_req = HealRequest::new(
|
||||
HealType::Bucket {
|
||||
bucket: "bucket1".to_string(),
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::Normal,
|
||||
);
|
||||
|
||||
let erasure_req = HealRequest::new(
|
||||
HealType::ErasureSet {
|
||||
buckets: vec!["bucket1".to_string()],
|
||||
set_disk_id: "pool_0_set_1".to_string(),
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::Normal,
|
||||
);
|
||||
|
||||
let obj_key = PriorityHealQueue::make_dedup_key(&obj_req);
|
||||
let bucket_key = PriorityHealQueue::make_dedup_key(&bucket_req);
|
||||
let erasure_key = PriorityHealQueue::make_dedup_key(&erasure_req);
|
||||
|
||||
// All keys should be different
|
||||
assert_ne!(obj_key, bucket_key);
|
||||
assert_ne!(obj_key, erasure_key);
|
||||
assert_ne!(bucket_key, erasure_key);
|
||||
|
||||
assert!(obj_key.starts_with("object:"));
|
||||
assert!(bucket_key.starts_with("bucket:"));
|
||||
assert!(erasure_key.starts_with("erasure_set:"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_priority_queue_mixed_priorities_and_types() {
|
||||
let mut queue = PriorityHealQueue::new();
|
||||
|
||||
// Add various requests
|
||||
let requests = vec![
|
||||
(
|
||||
HealType::Object {
|
||||
bucket: "b1".to_string(),
|
||||
object: "o1".to_string(),
|
||||
version_id: None,
|
||||
},
|
||||
HealPriority::Low,
|
||||
),
|
||||
(
|
||||
HealType::Bucket {
|
||||
bucket: "b2".to_string(),
|
||||
},
|
||||
HealPriority::Urgent,
|
||||
),
|
||||
(
|
||||
HealType::ErasureSet {
|
||||
buckets: vec!["b3".to_string()],
|
||||
set_disk_id: "pool_0_set_1".to_string(),
|
||||
},
|
||||
HealPriority::Normal,
|
||||
),
|
||||
(
|
||||
HealType::Object {
|
||||
bucket: "b4".to_string(),
|
||||
object: "o4".to_string(),
|
||||
version_id: None,
|
||||
},
|
||||
HealPriority::High,
|
||||
),
|
||||
];
|
||||
|
||||
for (heal_type, priority) in requests {
|
||||
let req = HealRequest::new(heal_type, HealOptions::default(), priority);
|
||||
queue.push(req);
|
||||
}
|
||||
|
||||
assert_eq!(queue.len(), 4);
|
||||
|
||||
// Check they come out in priority order
|
||||
let priorities: Vec<HealPriority> = (0..4).filter_map(|_| queue.pop().map(|r| r.priority)).collect();
|
||||
|
||||
assert_eq!(
|
||||
priorities,
|
||||
vec![
|
||||
HealPriority::Urgent,
|
||||
HealPriority::High,
|
||||
HealPriority::Normal,
|
||||
HealPriority::Low,
|
||||
]
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_priority_queue_stats() {
|
||||
let mut queue = PriorityHealQueue::new();
|
||||
|
||||
// Add requests with different priorities
|
||||
for _ in 0..3 {
|
||||
queue.push(HealRequest::new(
|
||||
HealType::Bucket {
|
||||
bucket: format!("bucket-low-{}", queue.len()),
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::Low,
|
||||
));
|
||||
}
|
||||
|
||||
for _ in 0..2 {
|
||||
queue.push(HealRequest::new(
|
||||
HealType::Bucket {
|
||||
bucket: format!("bucket-normal-{}", queue.len()),
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::Normal,
|
||||
));
|
||||
}
|
||||
|
||||
queue.push(HealRequest::new(
|
||||
HealType::Bucket {
|
||||
bucket: "bucket-high".to_string(),
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::High,
|
||||
));
|
||||
|
||||
let stats = queue.get_priority_stats();
|
||||
|
||||
assert_eq!(*stats.get(&HealPriority::Low).unwrap_or(&0), 3);
|
||||
assert_eq!(*stats.get(&HealPriority::Normal).unwrap_or(&0), 2);
|
||||
assert_eq!(*stats.get(&HealPriority::High).unwrap_or(&0), 1);
|
||||
assert_eq!(*stats.get(&HealPriority::Urgent).unwrap_or(&0), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_priority_queue_is_empty() {
|
||||
let mut queue = PriorityHealQueue::new();
|
||||
|
||||
assert!(queue.is_empty());
|
||||
|
||||
queue.push(HealRequest::new(
|
||||
HealType::Bucket {
|
||||
bucket: "test".to_string(),
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::Normal,
|
||||
));
|
||||
|
||||
assert!(!queue.is_empty());
|
||||
|
||||
queue.pop();
|
||||
|
||||
assert!(queue.is_empty());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,6 +20,7 @@ pub mod progress;
|
||||
pub mod resume;
|
||||
pub mod storage;
|
||||
pub mod task;
|
||||
pub mod utils;
|
||||
|
||||
pub use erasure_healer::ErasureSetHealer;
|
||||
pub use manager::HealManager;
|
||||
|
||||
@@ -146,3 +146,244 @@ impl HealStatistics {
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_heal_progress_new() {
|
||||
let progress = HealProgress::new();
|
||||
assert_eq!(progress.objects_scanned, 0);
|
||||
assert_eq!(progress.objects_healed, 0);
|
||||
assert_eq!(progress.objects_failed, 0);
|
||||
assert_eq!(progress.bytes_processed, 0);
|
||||
assert_eq!(progress.progress_percentage, 0.0);
|
||||
assert!(progress.start_time.is_some());
|
||||
assert!(progress.last_update_time.is_some());
|
||||
assert!(progress.current_object.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_progress_update_progress() {
|
||||
let mut progress = HealProgress::new();
|
||||
progress.update_progress(10, 8, 2, 1024);
|
||||
|
||||
assert_eq!(progress.objects_scanned, 10);
|
||||
assert_eq!(progress.objects_healed, 8);
|
||||
assert_eq!(progress.objects_failed, 2);
|
||||
assert_eq!(progress.bytes_processed, 1024);
|
||||
// Progress percentage should be calculated based on healed/total
|
||||
// total = scanned + healed + failed = 10 + 8 + 2 = 20
|
||||
// healed/total = 8/20 = 0.4 = 40%
|
||||
assert!((progress.progress_percentage - 40.0).abs() < 0.001);
|
||||
assert!(progress.last_update_time.is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_progress_update_progress_zero_total() {
|
||||
let mut progress = HealProgress::new();
|
||||
progress.update_progress(0, 0, 0, 0);
|
||||
|
||||
assert_eq!(progress.progress_percentage, 0.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_progress_update_progress_all_healed() {
|
||||
let mut progress = HealProgress::new();
|
||||
// When scanned=0, healed=10, failed=0: total=10, progress = 10/10 = 100%
|
||||
progress.update_progress(0, 10, 0, 2048);
|
||||
|
||||
// All healed, should be 100%
|
||||
assert!((progress.progress_percentage - 100.0).abs() < 0.001);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_progress_set_current_object() {
|
||||
let mut progress = HealProgress::new();
|
||||
let initial_time = progress.last_update_time;
|
||||
|
||||
// Small delay to ensure time difference
|
||||
std::thread::sleep(std::time::Duration::from_millis(10));
|
||||
|
||||
progress.set_current_object(Some("test-bucket/test-object".to_string()));
|
||||
|
||||
assert_eq!(progress.current_object, Some("test-bucket/test-object".to_string()));
|
||||
assert!(progress.last_update_time.is_some());
|
||||
// last_update_time should be updated
|
||||
assert_ne!(progress.last_update_time, initial_time);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_progress_set_current_object_none() {
|
||||
let mut progress = HealProgress::new();
|
||||
progress.set_current_object(Some("test".to_string()));
|
||||
progress.set_current_object(None);
|
||||
|
||||
assert!(progress.current_object.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_progress_is_completed_by_percentage() {
|
||||
let mut progress = HealProgress::new();
|
||||
progress.update_progress(10, 10, 0, 1024);
|
||||
|
||||
assert!(progress.is_completed());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_progress_is_completed_by_processed() {
|
||||
let mut progress = HealProgress::new();
|
||||
progress.objects_scanned = 10;
|
||||
progress.objects_healed = 8;
|
||||
progress.objects_failed = 2;
|
||||
// healed + failed = 8 + 2 = 10 >= scanned = 10
|
||||
assert!(progress.is_completed());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_progress_is_not_completed() {
|
||||
let mut progress = HealProgress::new();
|
||||
progress.objects_scanned = 10;
|
||||
progress.objects_healed = 5;
|
||||
progress.objects_failed = 2;
|
||||
// healed + failed = 5 + 2 = 7 < scanned = 10
|
||||
assert!(!progress.is_completed());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_progress_get_success_rate() {
|
||||
let mut progress = HealProgress::new();
|
||||
progress.objects_healed = 8;
|
||||
progress.objects_failed = 2;
|
||||
|
||||
// success_rate = 8 / (8 + 2) * 100 = 80%
|
||||
assert!((progress.get_success_rate() - 80.0).abs() < 0.001);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_progress_get_success_rate_zero_total() {
|
||||
let progress = HealProgress::new();
|
||||
// No healed or failed objects
|
||||
assert_eq!(progress.get_success_rate(), 0.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_progress_get_success_rate_all_success() {
|
||||
let mut progress = HealProgress::new();
|
||||
progress.objects_healed = 10;
|
||||
progress.objects_failed = 0;
|
||||
|
||||
assert!((progress.get_success_rate() - 100.0).abs() < 0.001);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_statistics_new() {
|
||||
let stats = HealStatistics::new();
|
||||
assert_eq!(stats.total_tasks, 0);
|
||||
assert_eq!(stats.successful_tasks, 0);
|
||||
assert_eq!(stats.failed_tasks, 0);
|
||||
assert_eq!(stats.running_tasks, 0);
|
||||
assert_eq!(stats.total_objects_healed, 0);
|
||||
assert_eq!(stats.total_bytes_healed, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_statistics_default() {
|
||||
let stats = HealStatistics::default();
|
||||
assert_eq!(stats.total_tasks, 0);
|
||||
assert_eq!(stats.successful_tasks, 0);
|
||||
assert_eq!(stats.failed_tasks, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_statistics_update_task_completion_success() {
|
||||
let mut stats = HealStatistics::new();
|
||||
let initial_time = stats.last_update_time;
|
||||
|
||||
std::thread::sleep(std::time::Duration::from_millis(10));
|
||||
stats.update_task_completion(true);
|
||||
|
||||
assert_eq!(stats.successful_tasks, 1);
|
||||
assert_eq!(stats.failed_tasks, 0);
|
||||
assert!(stats.last_update_time > initial_time);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_statistics_update_task_completion_failure() {
|
||||
let mut stats = HealStatistics::new();
|
||||
stats.update_task_completion(false);
|
||||
|
||||
assert_eq!(stats.successful_tasks, 0);
|
||||
assert_eq!(stats.failed_tasks, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_statistics_update_running_tasks() {
|
||||
let mut stats = HealStatistics::new();
|
||||
let initial_time = stats.last_update_time;
|
||||
|
||||
std::thread::sleep(std::time::Duration::from_millis(10));
|
||||
stats.update_running_tasks(5);
|
||||
|
||||
assert_eq!(stats.running_tasks, 5);
|
||||
assert!(stats.last_update_time > initial_time);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_statistics_add_healed_objects() {
|
||||
let mut stats = HealStatistics::new();
|
||||
let initial_time = stats.last_update_time;
|
||||
|
||||
std::thread::sleep(std::time::Duration::from_millis(10));
|
||||
stats.add_healed_objects(10, 10240);
|
||||
|
||||
assert_eq!(stats.total_objects_healed, 10);
|
||||
assert_eq!(stats.total_bytes_healed, 10240);
|
||||
assert!(stats.last_update_time > initial_time);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_statistics_add_healed_objects_accumulative() {
|
||||
let mut stats = HealStatistics::new();
|
||||
stats.add_healed_objects(5, 5120);
|
||||
stats.add_healed_objects(3, 3072);
|
||||
|
||||
assert_eq!(stats.total_objects_healed, 8);
|
||||
assert_eq!(stats.total_bytes_healed, 8192);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_statistics_get_success_rate() {
|
||||
let mut stats = HealStatistics::new();
|
||||
stats.successful_tasks = 8;
|
||||
stats.failed_tasks = 2;
|
||||
|
||||
// success_rate = 8 / (8 + 2) * 100 = 80%
|
||||
assert!((stats.get_success_rate() - 80.0).abs() < 0.001);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_statistics_get_success_rate_zero_total() {
|
||||
let stats = HealStatistics::new();
|
||||
assert_eq!(stats.get_success_rate(), 0.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_statistics_get_success_rate_all_success() {
|
||||
let mut stats = HealStatistics::new();
|
||||
stats.successful_tasks = 10;
|
||||
stats.failed_tasks = 0;
|
||||
|
||||
assert!((stats.get_success_rate() - 100.0).abs() < 0.001);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_statistics_get_success_rate_all_failure() {
|
||||
let mut stats = HealStatistics::new();
|
||||
stats.successful_tasks = 0;
|
||||
stats.failed_tasks = 5;
|
||||
|
||||
assert_eq!(stats.get_success_rate(), 0.0);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
use crate::{Error, Result};
|
||||
use rustfs_ecstore::disk::{BUCKET_META_PREFIX, DiskAPI, DiskStore, RUSTFS_META_BUCKET};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::path::Path;
|
||||
@@ -27,6 +27,12 @@ const RESUME_STATE_FILE: &str = "ahm_resume_state.json";
|
||||
const RESUME_PROGRESS_FILE: &str = "ahm_progress.json";
|
||||
const RESUME_CHECKPOINT_FILE: &str = "ahm_checkpoint.json";
|
||||
|
||||
/// Helper function to convert Path to &str, returning an error if conversion fails
|
||||
fn path_to_str(path: &Path) -> Result<&str> {
|
||||
path.to_str()
|
||||
.ok_or_else(|| Error::other(format!("Invalid UTF-8 path: {path:?}")))
|
||||
}
|
||||
|
||||
/// resume state
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ResumeState {
|
||||
@@ -34,6 +40,9 @@ pub struct ResumeState {
|
||||
pub task_id: String,
|
||||
/// task type
|
||||
pub task_type: String,
|
||||
/// set disk identifier (for erasure set tasks)
|
||||
#[serde(default)]
|
||||
pub set_disk_id: String,
|
||||
/// start time
|
||||
pub start_time: u64,
|
||||
/// last update time
|
||||
@@ -67,12 +76,13 @@ pub struct ResumeState {
|
||||
}
|
||||
|
||||
impl ResumeState {
|
||||
pub fn new(task_id: String, task_type: String, buckets: Vec<String>) -> Self {
|
||||
pub fn new(task_id: String, task_type: String, set_disk_id: String, buckets: Vec<String>) -> Self {
|
||||
Self {
|
||||
task_id,
|
||||
task_type,
|
||||
start_time: SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs(),
|
||||
last_update: SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs(),
|
||||
set_disk_id,
|
||||
start_time: SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs(),
|
||||
last_update: SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs(),
|
||||
completed: false,
|
||||
total_objects: 0,
|
||||
processed_objects: 0,
|
||||
@@ -94,13 +104,13 @@ impl ResumeState {
|
||||
self.successful_objects = successful;
|
||||
self.failed_objects = failed;
|
||||
self.skipped_objects = skipped;
|
||||
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs();
|
||||
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
|
||||
}
|
||||
|
||||
pub fn set_current_item(&mut self, bucket: Option<String>, object: Option<String>) {
|
||||
self.current_bucket = bucket;
|
||||
self.current_object = object;
|
||||
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs();
|
||||
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
|
||||
}
|
||||
|
||||
pub fn complete_bucket(&mut self, bucket: &str) {
|
||||
@@ -110,22 +120,22 @@ impl ResumeState {
|
||||
if let Some(pos) = self.pending_buckets.iter().position(|b| b == bucket) {
|
||||
self.pending_buckets.remove(pos);
|
||||
}
|
||||
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs();
|
||||
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
|
||||
}
|
||||
|
||||
pub fn mark_completed(&mut self) {
|
||||
self.completed = true;
|
||||
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs();
|
||||
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
|
||||
}
|
||||
|
||||
pub fn set_error(&mut self, error: String) {
|
||||
self.error_message = Some(error);
|
||||
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs();
|
||||
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
|
||||
}
|
||||
|
||||
pub fn increment_retry(&mut self) {
|
||||
self.retry_count += 1;
|
||||
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs();
|
||||
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
|
||||
}
|
||||
|
||||
pub fn can_retry(&self) -> bool {
|
||||
@@ -156,8 +166,14 @@ pub struct ResumeManager {
|
||||
|
||||
impl ResumeManager {
|
||||
/// create new resume manager
|
||||
pub async fn new(disk: DiskStore, task_id: String, task_type: String, buckets: Vec<String>) -> Result<Self> {
|
||||
let state = ResumeState::new(task_id, task_type, buckets);
|
||||
pub async fn new(
|
||||
disk: DiskStore,
|
||||
task_id: String,
|
||||
task_type: String,
|
||||
set_disk_id: String,
|
||||
buckets: Vec<String>,
|
||||
) -> Result<Self> {
|
||||
let state = ResumeState::new(task_id, task_type, set_disk_id, buckets);
|
||||
let manager = Self {
|
||||
disk,
|
||||
state: Arc::new(RwLock::new(state)),
|
||||
@@ -184,8 +200,11 @@ impl ResumeManager {
|
||||
/// check if resume state exists
|
||||
pub async fn has_resume_state(disk: &DiskStore, task_id: &str) -> bool {
|
||||
let file_path = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_STATE_FILE}"));
|
||||
match disk.read_all(RUSTFS_META_BUCKET, file_path.to_str().unwrap()).await {
|
||||
Ok(data) => !data.is_empty(),
|
||||
match path_to_str(&file_path) {
|
||||
Ok(path_str) => match disk.read_all(RUSTFS_META_BUCKET, path_str).await {
|
||||
Ok(data) => !data.is_empty(),
|
||||
Err(_) => false,
|
||||
},
|
||||
Err(_) => false,
|
||||
}
|
||||
}
|
||||
@@ -254,18 +273,15 @@ impl ResumeManager {
|
||||
let checkpoint_file = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_CHECKPOINT_FILE}"));
|
||||
|
||||
// ignore delete errors, files may not exist
|
||||
let _ = self
|
||||
.disk
|
||||
.delete(RUSTFS_META_BUCKET, state_file.to_str().unwrap(), Default::default())
|
||||
.await;
|
||||
let _ = self
|
||||
.disk
|
||||
.delete(RUSTFS_META_BUCKET, progress_file.to_str().unwrap(), Default::default())
|
||||
.await;
|
||||
let _ = self
|
||||
.disk
|
||||
.delete(RUSTFS_META_BUCKET, checkpoint_file.to_str().unwrap(), Default::default())
|
||||
.await;
|
||||
if let Ok(path_str) = path_to_str(&state_file) {
|
||||
let _ = self.disk.delete(RUSTFS_META_BUCKET, path_str, Default::default()).await;
|
||||
}
|
||||
if let Ok(path_str) = path_to_str(&progress_file) {
|
||||
let _ = self.disk.delete(RUSTFS_META_BUCKET, path_str, Default::default()).await;
|
||||
}
|
||||
if let Ok(path_str) = path_to_str(&checkpoint_file) {
|
||||
let _ = self.disk.delete(RUSTFS_META_BUCKET, path_str, Default::default()).await;
|
||||
}
|
||||
|
||||
info!("Cleaned up resume state for task: {}", task_id);
|
||||
Ok(())
|
||||
@@ -280,8 +296,9 @@ impl ResumeManager {
|
||||
|
||||
let file_path = Path::new(BUCKET_META_PREFIX).join(format!("{}_{}", state.task_id, RESUME_STATE_FILE));
|
||||
|
||||
let path_str = path_to_str(&file_path)?;
|
||||
self.disk
|
||||
.write_all(RUSTFS_META_BUCKET, file_path.to_str().unwrap(), state_data.into())
|
||||
.write_all(RUSTFS_META_BUCKET, path_str, state_data.into())
|
||||
.await
|
||||
.map_err(|e| Error::TaskExecutionFailed {
|
||||
message: format!("Failed to save resume state: {e}"),
|
||||
@@ -295,7 +312,8 @@ impl ResumeManager {
|
||||
async fn read_state_file(disk: &DiskStore, task_id: &str) -> Result<Vec<u8>> {
|
||||
let file_path = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_STATE_FILE}"));
|
||||
|
||||
disk.read_all(RUSTFS_META_BUCKET, file_path.to_str().unwrap())
|
||||
let path_str = path_to_str(&file_path)?;
|
||||
disk.read_all(RUSTFS_META_BUCKET, path_str)
|
||||
.await
|
||||
.map(|bytes| bytes.to_vec())
|
||||
.map_err(|e| Error::TaskExecutionFailed {
|
||||
@@ -327,7 +345,7 @@ impl ResumeCheckpoint {
|
||||
pub fn new(task_id: String) -> Self {
|
||||
Self {
|
||||
task_id,
|
||||
checkpoint_time: SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs(),
|
||||
checkpoint_time: SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs(),
|
||||
current_bucket_index: 0,
|
||||
current_object_index: 0,
|
||||
processed_objects: Vec::new(),
|
||||
@@ -339,7 +357,7 @@ impl ResumeCheckpoint {
|
||||
pub fn update_position(&mut self, bucket_index: usize, object_index: usize) {
|
||||
self.current_bucket_index = bucket_index;
|
||||
self.current_object_index = object_index;
|
||||
self.checkpoint_time = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs();
|
||||
self.checkpoint_time = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
|
||||
}
|
||||
|
||||
pub fn add_processed_object(&mut self, object: String) {
|
||||
@@ -397,8 +415,11 @@ impl CheckpointManager {
|
||||
/// check if checkpoint exists
|
||||
pub async fn has_checkpoint(disk: &DiskStore, task_id: &str) -> bool {
|
||||
let file_path = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_CHECKPOINT_FILE}"));
|
||||
match disk.read_all(RUSTFS_META_BUCKET, file_path.to_str().unwrap()).await {
|
||||
Ok(data) => !data.is_empty(),
|
||||
match path_to_str(&file_path) {
|
||||
Ok(path_str) => match disk.read_all(RUSTFS_META_BUCKET, path_str).await {
|
||||
Ok(data) => !data.is_empty(),
|
||||
Err(_) => false,
|
||||
},
|
||||
Err(_) => false,
|
||||
}
|
||||
}
|
||||
@@ -446,10 +467,9 @@ impl CheckpointManager {
|
||||
let task_id = &checkpoint.task_id;
|
||||
|
||||
let checkpoint_file = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_CHECKPOINT_FILE}"));
|
||||
let _ = self
|
||||
.disk
|
||||
.delete(RUSTFS_META_BUCKET, checkpoint_file.to_str().unwrap(), Default::default())
|
||||
.await;
|
||||
if let Ok(path_str) = path_to_str(&checkpoint_file) {
|
||||
let _ = self.disk.delete(RUSTFS_META_BUCKET, path_str, Default::default()).await;
|
||||
}
|
||||
|
||||
info!("Cleaned up checkpoint for task: {}", task_id);
|
||||
Ok(())
|
||||
@@ -464,8 +484,9 @@ impl CheckpointManager {
|
||||
|
||||
let file_path = Path::new(BUCKET_META_PREFIX).join(format!("{}_{}", checkpoint.task_id, RESUME_CHECKPOINT_FILE));
|
||||
|
||||
let path_str = path_to_str(&file_path)?;
|
||||
self.disk
|
||||
.write_all(RUSTFS_META_BUCKET, file_path.to_str().unwrap(), checkpoint_data.into())
|
||||
.write_all(RUSTFS_META_BUCKET, path_str, checkpoint_data.into())
|
||||
.await
|
||||
.map_err(|e| Error::TaskExecutionFailed {
|
||||
message: format!("Failed to save checkpoint: {e}"),
|
||||
@@ -479,7 +500,8 @@ impl CheckpointManager {
|
||||
async fn read_checkpoint_file(disk: &DiskStore, task_id: &str) -> Result<Vec<u8>> {
|
||||
let file_path = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_CHECKPOINT_FILE}"));
|
||||
|
||||
disk.read_all(RUSTFS_META_BUCKET, file_path.to_str().unwrap())
|
||||
let path_str = path_to_str(&file_path)?;
|
||||
disk.read_all(RUSTFS_META_BUCKET, path_str)
|
||||
.await
|
||||
.map(|bytes| bytes.to_vec())
|
||||
.map_err(|e| Error::TaskExecutionFailed {
|
||||
@@ -562,7 +584,7 @@ mod tests {
|
||||
async fn test_resume_state_creation() {
|
||||
let task_id = ResumeUtils::generate_task_id();
|
||||
let buckets = vec!["bucket1".to_string(), "bucket2".to_string()];
|
||||
let state = ResumeState::new(task_id.clone(), "erasure_set".to_string(), buckets);
|
||||
let state = ResumeState::new(task_id.clone(), "erasure_set".to_string(), "pool_0_set_0".to_string(), buckets);
|
||||
|
||||
assert_eq!(state.task_id, task_id);
|
||||
assert_eq!(state.task_type, "erasure_set");
|
||||
@@ -575,7 +597,7 @@ mod tests {
|
||||
async fn test_resume_state_progress() {
|
||||
let task_id = ResumeUtils::generate_task_id();
|
||||
let buckets = vec!["bucket1".to_string()];
|
||||
let mut state = ResumeState::new(task_id, "erasure_set".to_string(), buckets);
|
||||
let mut state = ResumeState::new(task_id, "erasure_set".to_string(), "pool_0_set_0".to_string(), buckets);
|
||||
|
||||
state.update_progress(10, 8, 1, 1);
|
||||
assert_eq!(state.processed_objects, 10);
|
||||
@@ -595,7 +617,7 @@ mod tests {
|
||||
async fn test_resume_state_bucket_completion() {
|
||||
let task_id = ResumeUtils::generate_task_id();
|
||||
let buckets = vec!["bucket1".to_string(), "bucket2".to_string()];
|
||||
let mut state = ResumeState::new(task_id, "erasure_set".to_string(), buckets);
|
||||
let mut state = ResumeState::new(task_id, "erasure_set".to_string(), "pool_0_set_0".to_string(), buckets);
|
||||
|
||||
assert_eq!(state.pending_buckets.len(), 2);
|
||||
assert_eq!(state.completed_buckets.len(), 0);
|
||||
@@ -650,6 +672,7 @@ mod tests {
|
||||
let state = ResumeState::new(
|
||||
task_id.clone(),
|
||||
"erasure_set".to_string(),
|
||||
"pool_0_set_0".to_string(),
|
||||
vec!["bucket1".to_string(), "bucket2".to_string()],
|
||||
);
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
use crate::{Error, Result};
|
||||
use async_trait::async_trait;
|
||||
use rustfs_common::heal_channel::{HealOpts, HealScanMode};
|
||||
use rustfs_ecstore::{
|
||||
@@ -107,9 +107,21 @@ pub trait HealStorageAPI: Send + Sync {
|
||||
/// Heal format using ecstore
|
||||
async fn heal_format(&self, dry_run: bool) -> Result<(HealResultItem, Option<Error>)>;
|
||||
|
||||
/// List objects for healing
|
||||
/// List objects for healing (returns all objects, may use significant memory for large buckets)
|
||||
///
|
||||
/// WARNING: This method loads all objects into memory at once. For buckets with many objects,
|
||||
/// consider using `list_objects_for_heal_page` instead to process objects in pages.
|
||||
async fn list_objects_for_heal(&self, bucket: &str, prefix: &str) -> Result<Vec<String>>;
|
||||
|
||||
/// List objects for healing with pagination (returns one page and continuation token)
|
||||
/// Returns (objects, next_continuation_token, is_truncated)
|
||||
async fn list_objects_for_heal_page(
|
||||
&self,
|
||||
bucket: &str,
|
||||
prefix: &str,
|
||||
continuation_token: Option<&str>,
|
||||
) -> Result<(Vec<String>, Option<String>, bool)>;
|
||||
|
||||
/// Get disk for resume functionality
|
||||
async fn get_disk_for_resume(&self, set_disk_id: &str) -> Result<DiskStore>;
|
||||
}
|
||||
@@ -179,7 +191,9 @@ impl HealStorageAPI for ECStoreHealStorage {
|
||||
"Object data exceeds cap ({} bytes), aborting full read to prevent OOM: {}/{}",
|
||||
MAX_READ_BYTES, bucket, object
|
||||
);
|
||||
return Ok(None);
|
||||
return Err(Error::other(format!(
|
||||
"Object too large: {n_read} bytes (max: {MAX_READ_BYTES} bytes) for {bucket}/{object}"
|
||||
)));
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
@@ -398,13 +412,13 @@ impl HealStorageAPI for ECStoreHealStorage {
|
||||
match self.ecstore.get_object_info(bucket, object, &Default::default()).await {
|
||||
Ok(_) => Ok(true), // Object exists
|
||||
Err(e) => {
|
||||
// Map ObjectNotFound to false, other errors to false as well for safety
|
||||
// Map ObjectNotFound to false, other errors must be propagated!
|
||||
if matches!(e, rustfs_ecstore::error::StorageError::ObjectNotFound(_, _)) {
|
||||
debug!("Object not found: {}/{}", bucket, object);
|
||||
Ok(false)
|
||||
} else {
|
||||
debug!("Error checking object existence {}/{}: {}", bucket, object, e);
|
||||
Ok(false) // Treat errors as non-existence to be safe
|
||||
error!("Error checking object existence {}/{}: {}", bucket, object, e);
|
||||
Err(Error::other(e))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -491,45 +505,74 @@ impl HealStorageAPI for ECStoreHealStorage {
|
||||
|
||||
async fn list_objects_for_heal(&self, bucket: &str, prefix: &str) -> Result<Vec<String>> {
|
||||
debug!("Listing objects for heal: {}/{}", bucket, prefix);
|
||||
warn!(
|
||||
"list_objects_for_heal loads all objects into memory. For large buckets, consider using list_objects_for_heal_page instead."
|
||||
);
|
||||
|
||||
// Use list_objects_v2 to get objects
|
||||
match self
|
||||
.ecstore
|
||||
.clone()
|
||||
.list_objects_v2(bucket, prefix, None, None, 1000, false, None)
|
||||
.await
|
||||
{
|
||||
Ok(list_info) => {
|
||||
let objects: Vec<String> = list_info.objects.into_iter().map(|obj| obj.name).collect();
|
||||
info!("Found {} objects for heal in {}/{}", objects.len(), bucket, prefix);
|
||||
Ok(objects)
|
||||
let mut all_objects = Vec::new();
|
||||
let mut continuation_token: Option<String> = None;
|
||||
|
||||
loop {
|
||||
let (page_objects, next_token, is_truncated) = self
|
||||
.list_objects_for_heal_page(bucket, prefix, continuation_token.as_deref())
|
||||
.await?;
|
||||
|
||||
all_objects.extend(page_objects);
|
||||
|
||||
if !is_truncated {
|
||||
break;
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to list objects for heal: {}/{} - {}", bucket, prefix, e);
|
||||
Err(Error::other(e))
|
||||
|
||||
continuation_token = next_token;
|
||||
if continuation_token.is_none() {
|
||||
warn!("List is truncated but no continuation token provided for {}/{}", bucket, prefix);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
info!("Found {} objects for heal in {}/{}", all_objects.len(), bucket, prefix);
|
||||
Ok(all_objects)
|
||||
}
|
||||
|
||||
async fn list_objects_for_heal_page(
|
||||
&self,
|
||||
bucket: &str,
|
||||
prefix: &str,
|
||||
continuation_token: Option<&str>,
|
||||
) -> Result<(Vec<String>, Option<String>, bool)> {
|
||||
debug!("Listing objects for heal (page): {}/{}", bucket, prefix);
|
||||
|
||||
const MAX_KEYS: i32 = 1000;
|
||||
let continuation_token_opt = continuation_token.map(|s| s.to_string());
|
||||
|
||||
// Use list_objects_v2 to get objects with pagination
|
||||
let list_info = match self
|
||||
.ecstore
|
||||
.clone()
|
||||
.list_objects_v2(bucket, prefix, continuation_token_opt, None, MAX_KEYS, false, None, false)
|
||||
.await
|
||||
{
|
||||
Ok(info) => info,
|
||||
Err(e) => {
|
||||
error!("Failed to list objects for heal: {}/{} - {}", bucket, prefix, e);
|
||||
return Err(Error::other(e));
|
||||
}
|
||||
};
|
||||
|
||||
// Collect objects from this page
|
||||
let page_objects: Vec<String> = list_info.objects.into_iter().map(|obj| obj.name).collect();
|
||||
let page_count = page_objects.len();
|
||||
|
||||
debug!("Listed {} objects (page) for heal in {}/{}", page_count, bucket, prefix);
|
||||
|
||||
Ok((page_objects, list_info.next_continuation_token, list_info.is_truncated))
|
||||
}
|
||||
|
||||
async fn get_disk_for_resume(&self, set_disk_id: &str) -> Result<DiskStore> {
|
||||
debug!("Getting disk for resume: {}", set_disk_id);
|
||||
|
||||
// Parse set_disk_id to extract pool and set indices
|
||||
// Format: "pool_{pool_idx}_set_{set_idx}"
|
||||
let parts: Vec<&str> = set_disk_id.split('_').collect();
|
||||
if parts.len() != 4 || parts[0] != "pool" || parts[2] != "set" {
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Invalid set_disk_id format: {set_disk_id}"),
|
||||
});
|
||||
}
|
||||
|
||||
let pool_idx: usize = parts[1].parse().map_err(|_| Error::TaskExecutionFailed {
|
||||
message: format!("Invalid pool index in set_disk_id: {set_disk_id}"),
|
||||
})?;
|
||||
|
||||
let set_idx: usize = parts[3].parse().map_err(|_| Error::TaskExecutionFailed {
|
||||
message: format!("Invalid set index in set_disk_id: {set_disk_id}"),
|
||||
})?;
|
||||
let (pool_idx, set_idx) = crate::heal::utils::parse_set_disk_id(set_disk_id)?;
|
||||
|
||||
// Get the first available disk from the set
|
||||
let disks = self
|
||||
|
||||
@@ -12,13 +12,15 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
use crate::heal::ErasureSetHealer;
|
||||
use crate::heal::{progress::HealProgress, storage::HealStorageAPI};
|
||||
use crate::heal::{ErasureSetHealer, progress::HealProgress, storage::HealStorageAPI};
|
||||
use crate::{Error, Result};
|
||||
use rustfs_common::heal_channel::{HealOpts, HealScanMode};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, SystemTime};
|
||||
use std::{
|
||||
future::Future,
|
||||
sync::Arc,
|
||||
time::{Duration, Instant, SystemTime},
|
||||
};
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{error, info, warn};
|
||||
use uuid::Uuid;
|
||||
@@ -49,11 +51,12 @@ pub enum HealType {
|
||||
}
|
||||
|
||||
/// Heal priority
|
||||
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
|
||||
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq, PartialOrd, Ord, Hash, Serialize, Deserialize)]
|
||||
pub enum HealPriority {
|
||||
/// Low priority
|
||||
Low = 0,
|
||||
/// Normal priority
|
||||
#[default]
|
||||
Normal = 1,
|
||||
/// High priority
|
||||
High = 2,
|
||||
@@ -61,12 +64,6 @@ pub enum HealPriority {
|
||||
Urgent = 3,
|
||||
}
|
||||
|
||||
impl Default for HealPriority {
|
||||
fn default() -> Self {
|
||||
Self::Normal
|
||||
}
|
||||
}
|
||||
|
||||
/// Heal options
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct HealOptions {
|
||||
@@ -200,6 +197,8 @@ pub struct HealTask {
|
||||
pub started_at: Arc<RwLock<Option<SystemTime>>>,
|
||||
/// Completed time
|
||||
pub completed_at: Arc<RwLock<Option<SystemTime>>>,
|
||||
/// Task start instant for timeout calculation (monotonic)
|
||||
task_start_instant: Arc<RwLock<Option<Instant>>>,
|
||||
/// Cancel token
|
||||
pub cancel_token: tokio_util::sync::CancellationToken,
|
||||
/// Storage layer interface
|
||||
@@ -217,23 +216,77 @@ impl HealTask {
|
||||
created_at: request.created_at,
|
||||
started_at: Arc::new(RwLock::new(None)),
|
||||
completed_at: Arc::new(RwLock::new(None)),
|
||||
task_start_instant: Arc::new(RwLock::new(None)),
|
||||
cancel_token: tokio_util::sync::CancellationToken::new(),
|
||||
storage,
|
||||
}
|
||||
}
|
||||
|
||||
async fn remaining_timeout(&self) -> Result<Option<Duration>> {
|
||||
if let Some(total) = self.options.timeout {
|
||||
let start_instant = { *self.task_start_instant.read().await };
|
||||
if let Some(started_at) = start_instant {
|
||||
let elapsed = started_at.elapsed();
|
||||
if elapsed >= total {
|
||||
return Err(Error::TaskTimeout);
|
||||
}
|
||||
return Ok(Some(total - elapsed));
|
||||
}
|
||||
Ok(Some(total))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
async fn check_control_flags(&self) -> Result<()> {
|
||||
if self.cancel_token.is_cancelled() {
|
||||
return Err(Error::TaskCancelled);
|
||||
}
|
||||
// Only interested in propagating an error if the timeout has expired;
|
||||
// the actual Duration value is not needed here
|
||||
let _ = self.remaining_timeout().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn await_with_control<F, T>(&self, fut: F) -> Result<T>
|
||||
where
|
||||
F: Future<Output = Result<T>> + Send,
|
||||
T: Send,
|
||||
{
|
||||
let cancel_token = self.cancel_token.clone();
|
||||
if let Some(remaining) = self.remaining_timeout().await? {
|
||||
if remaining.is_zero() {
|
||||
return Err(Error::TaskTimeout);
|
||||
}
|
||||
let mut fut = Box::pin(fut);
|
||||
tokio::select! {
|
||||
_ = cancel_token.cancelled() => Err(Error::TaskCancelled),
|
||||
_ = tokio::time::sleep(remaining) => Err(Error::TaskTimeout),
|
||||
result = &mut fut => result,
|
||||
}
|
||||
} else {
|
||||
tokio::select! {
|
||||
_ = cancel_token.cancelled() => Err(Error::TaskCancelled),
|
||||
result = fut => result,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self), fields(task_id = %self.id, heal_type = ?self.heal_type))]
|
||||
pub async fn execute(&self) -> Result<()> {
|
||||
// update status to running
|
||||
// update status and timestamps atomically to avoid race conditions
|
||||
let now = SystemTime::now();
|
||||
let start_instant = Instant::now();
|
||||
{
|
||||
let mut status = self.status.write().await;
|
||||
*status = HealTaskStatus::Running;
|
||||
}
|
||||
{
|
||||
let mut started_at = self.started_at.write().await;
|
||||
*started_at = Some(SystemTime::now());
|
||||
let mut task_start_instant = self.task_start_instant.write().await;
|
||||
*status = HealTaskStatus::Running;
|
||||
*started_at = Some(now);
|
||||
*task_start_instant = Some(start_instant);
|
||||
}
|
||||
|
||||
info!("Starting heal task: {} with type: {:?}", self.id, self.heal_type);
|
||||
info!("Task started");
|
||||
|
||||
let result = match &self.heal_type {
|
||||
HealType::Object {
|
||||
@@ -263,7 +316,17 @@ impl HealTask {
|
||||
Ok(_) => {
|
||||
let mut status = self.status.write().await;
|
||||
*status = HealTaskStatus::Completed;
|
||||
info!("Heal task completed successfully: {}", self.id);
|
||||
info!("Task completed successfully");
|
||||
}
|
||||
Err(Error::TaskCancelled) => {
|
||||
let mut status = self.status.write().await;
|
||||
*status = HealTaskStatus::Cancelled;
|
||||
info!("Heal task was cancelled: {}", self.id);
|
||||
}
|
||||
Err(Error::TaskTimeout) => {
|
||||
let mut status = self.status.write().await;
|
||||
*status = HealTaskStatus::Timeout;
|
||||
warn!("Heal task timed out: {}", self.id);
|
||||
}
|
||||
Err(e) => {
|
||||
let mut status = self.status.write().await;
|
||||
@@ -292,8 +355,9 @@ impl HealTask {
|
||||
}
|
||||
|
||||
// specific heal implementation method
|
||||
#[tracing::instrument(skip(self), fields(bucket = %bucket, object = %object, version_id = ?version_id))]
|
||||
async fn heal_object(&self, bucket: &str, object: &str, version_id: Option<&str>) -> Result<()> {
|
||||
info!("Healing object: {}/{}", bucket, object);
|
||||
info!("Starting object heal workflow");
|
||||
|
||||
// update progress
|
||||
{
|
||||
@@ -303,8 +367,9 @@ impl HealTask {
|
||||
}
|
||||
|
||||
// Step 1: Check if object exists and get metadata
|
||||
info!("Step 1: Checking object existence and metadata");
|
||||
let object_exists = self.storage.object_exists(bucket, object).await?;
|
||||
warn!("Step 1: Checking object existence and metadata");
|
||||
self.check_control_flags().await?;
|
||||
let object_exists = self.await_with_control(self.storage.object_exists(bucket, object)).await?;
|
||||
if !object_exists {
|
||||
warn!("Object does not exist: {}/{}", bucket, object);
|
||||
if self.options.recreate_missing {
|
||||
@@ -336,7 +401,11 @@ impl HealTask {
|
||||
set: self.options.set_index,
|
||||
};
|
||||
|
||||
match self.storage.heal_object(bucket, object, version_id, &heal_opts).await {
|
||||
let heal_result = self
|
||||
.await_with_control(self.storage.heal_object(bucket, object, version_id, &heal_opts))
|
||||
.await;
|
||||
|
||||
match heal_result {
|
||||
Ok((result, error)) => {
|
||||
if let Some(e) = error {
|
||||
// Check if this is a "File not found" error during delete operations
|
||||
@@ -357,9 +426,9 @@ impl HealTask {
|
||||
|
||||
// If heal failed and remove_corrupted is enabled, delete the corrupted object
|
||||
if self.options.remove_corrupted {
|
||||
warn!("Removing corrupted object: {}/{}", bucket, object);
|
||||
info!("Removing corrupted object: {}/{}", bucket, object);
|
||||
if !self.options.dry_run {
|
||||
self.storage.delete_object(bucket, object).await?;
|
||||
self.await_with_control(self.storage.delete_object(bucket, object)).await?;
|
||||
info!("Successfully deleted corrupted object: {}/{}", bucket, object);
|
||||
} else {
|
||||
info!("Dry run mode - would delete corrupted object: {}/{}", bucket, object);
|
||||
@@ -380,11 +449,9 @@ impl HealTask {
|
||||
info!("Step 3: Verifying heal result");
|
||||
let object_size = result.object_size as u64;
|
||||
info!(
|
||||
"Heal completed successfully: {}/{} ({} bytes, {} drives healed)",
|
||||
bucket,
|
||||
object,
|
||||
object_size,
|
||||
result.after.drives.len()
|
||||
object_size = object_size,
|
||||
drives_healed = result.after.drives.len(),
|
||||
"Heal completed successfully"
|
||||
);
|
||||
|
||||
{
|
||||
@@ -393,6 +460,8 @@ impl HealTask {
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Err(Error::TaskCancelled) => Err(Error::TaskCancelled),
|
||||
Err(Error::TaskTimeout) => Err(Error::TaskTimeout),
|
||||
Err(e) => {
|
||||
// Check if this is a "File not found" error during delete operations
|
||||
let error_msg = format!("{e}");
|
||||
@@ -412,9 +481,9 @@ impl HealTask {
|
||||
|
||||
// If heal failed and remove_corrupted is enabled, delete the corrupted object
|
||||
if self.options.remove_corrupted {
|
||||
warn!("Removing corrupted object: {}/{}", bucket, object);
|
||||
info!("Removing corrupted object: {}/{}", bucket, object);
|
||||
if !self.options.dry_run {
|
||||
self.storage.delete_object(bucket, object).await?;
|
||||
self.await_with_control(self.storage.delete_object(bucket, object)).await?;
|
||||
info!("Successfully deleted corrupted object: {}/{}", bucket, object);
|
||||
} else {
|
||||
info!("Dry run mode - would delete corrupted object: {}/{}", bucket, object);
|
||||
@@ -450,7 +519,10 @@ impl HealTask {
|
||||
set: None,
|
||||
};
|
||||
|
||||
match self.storage.heal_object(bucket, object, version_id, &heal_opts).await {
|
||||
match self
|
||||
.await_with_control(self.storage.heal_object(bucket, object, version_id, &heal_opts))
|
||||
.await
|
||||
{
|
||||
Ok((result, error)) => {
|
||||
if let Some(e) = error {
|
||||
error!("Failed to recreate missing object: {}/{} - {}", bucket, object, e);
|
||||
@@ -468,6 +540,8 @@ impl HealTask {
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Err(Error::TaskCancelled) => Err(Error::TaskCancelled),
|
||||
Err(Error::TaskTimeout) => Err(Error::TaskTimeout),
|
||||
Err(e) => {
|
||||
error!("Failed to recreate missing object: {}/{} - {}", bucket, object, e);
|
||||
Err(Error::TaskExecutionFailed {
|
||||
@@ -489,7 +563,8 @@ impl HealTask {
|
||||
|
||||
// Step 1: Check if bucket exists
|
||||
info!("Step 1: Checking bucket existence");
|
||||
let bucket_exists = self.storage.get_bucket_info(bucket).await?.is_some();
|
||||
self.check_control_flags().await?;
|
||||
let bucket_exists = self.await_with_control(self.storage.get_bucket_info(bucket)).await?.is_some();
|
||||
if !bucket_exists {
|
||||
warn!("Bucket does not exist: {}", bucket);
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
@@ -516,7 +591,9 @@ impl HealTask {
|
||||
set: self.options.set_index,
|
||||
};
|
||||
|
||||
match self.storage.heal_bucket(bucket, &heal_opts).await {
|
||||
let heal_result = self.await_with_control(self.storage.heal_bucket(bucket, &heal_opts)).await;
|
||||
|
||||
match heal_result {
|
||||
Ok(result) => {
|
||||
info!("Bucket heal completed successfully: {} ({} drives)", bucket, result.after.drives.len());
|
||||
|
||||
@@ -526,6 +603,8 @@ impl HealTask {
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Err(Error::TaskCancelled) => Err(Error::TaskCancelled),
|
||||
Err(Error::TaskTimeout) => Err(Error::TaskTimeout),
|
||||
Err(e) => {
|
||||
error!("Bucket heal failed: {} - {}", bucket, e);
|
||||
{
|
||||
@@ -551,7 +630,8 @@ impl HealTask {
|
||||
|
||||
// Step 1: Check if object exists
|
||||
info!("Step 1: Checking object existence");
|
||||
let object_exists = self.storage.object_exists(bucket, object).await?;
|
||||
self.check_control_flags().await?;
|
||||
let object_exists = self.await_with_control(self.storage.object_exists(bucket, object)).await?;
|
||||
if !object_exists {
|
||||
warn!("Object does not exist: {}/{}", bucket, object);
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
@@ -578,7 +658,11 @@ impl HealTask {
|
||||
set: self.options.set_index,
|
||||
};
|
||||
|
||||
match self.storage.heal_object(bucket, object, None, &heal_opts).await {
|
||||
let heal_result = self
|
||||
.await_with_control(self.storage.heal_object(bucket, object, None, &heal_opts))
|
||||
.await;
|
||||
|
||||
match heal_result {
|
||||
Ok((result, error)) => {
|
||||
if let Some(e) = error {
|
||||
error!("Metadata heal failed: {}/{} - {}", bucket, object, e);
|
||||
@@ -604,6 +688,8 @@ impl HealTask {
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Err(Error::TaskCancelled) => Err(Error::TaskCancelled),
|
||||
Err(Error::TaskTimeout) => Err(Error::TaskTimeout),
|
||||
Err(e) => {
|
||||
error!("Metadata heal failed: {}/{} - {}", bucket, object, e);
|
||||
{
|
||||
@@ -652,7 +738,11 @@ impl HealTask {
|
||||
set: None,
|
||||
};
|
||||
|
||||
match self.storage.heal_object(bucket, &object, None, &heal_opts).await {
|
||||
let heal_result = self
|
||||
.await_with_control(self.storage.heal_object(bucket, &object, None, &heal_opts))
|
||||
.await;
|
||||
|
||||
match heal_result {
|
||||
Ok((result, error)) => {
|
||||
if let Some(e) = error {
|
||||
error!("MRF heal failed: {} - {}", meta_path, e);
|
||||
@@ -673,6 +763,8 @@ impl HealTask {
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Err(Error::TaskCancelled) => Err(Error::TaskCancelled),
|
||||
Err(Error::TaskTimeout) => Err(Error::TaskTimeout),
|
||||
Err(e) => {
|
||||
error!("MRF heal failed: {} - {}", meta_path, e);
|
||||
{
|
||||
@@ -698,7 +790,8 @@ impl HealTask {
|
||||
|
||||
// Step 1: Check if object exists
|
||||
info!("Step 1: Checking object existence");
|
||||
let object_exists = self.storage.object_exists(bucket, object).await?;
|
||||
self.check_control_flags().await?;
|
||||
let object_exists = self.await_with_control(self.storage.object_exists(bucket, object)).await?;
|
||||
if !object_exists {
|
||||
warn!("Object does not exist: {}/{}", bucket, object);
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
@@ -725,7 +818,11 @@ impl HealTask {
|
||||
set: None,
|
||||
};
|
||||
|
||||
match self.storage.heal_object(bucket, object, version_id, &heal_opts).await {
|
||||
let heal_result = self
|
||||
.await_with_control(self.storage.heal_object(bucket, object, version_id, &heal_opts))
|
||||
.await;
|
||||
|
||||
match heal_result {
|
||||
Ok((result, error)) => {
|
||||
if let Some(e) = error {
|
||||
error!("EC decode heal failed: {}/{} - {}", bucket, object, e);
|
||||
@@ -753,6 +850,8 @@ impl HealTask {
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Err(Error::TaskCancelled) => Err(Error::TaskCancelled),
|
||||
Err(Error::TaskTimeout) => Err(Error::TaskTimeout),
|
||||
Err(e) => {
|
||||
error!("EC decode heal failed: {}/{} - {}", bucket, object, e);
|
||||
{
|
||||
@@ -778,7 +877,7 @@ impl HealTask {
|
||||
|
||||
let buckets = if buckets.is_empty() {
|
||||
info!("No buckets specified, listing all buckets");
|
||||
let bucket_infos = self.storage.list_buckets().await?;
|
||||
let bucket_infos = self.await_with_control(self.storage.list_buckets()).await?;
|
||||
bucket_infos.into_iter().map(|info| info.name).collect()
|
||||
} else {
|
||||
buckets
|
||||
@@ -786,7 +885,9 @@ impl HealTask {
|
||||
|
||||
// Step 1: Perform disk format heal using ecstore
|
||||
info!("Step 1: Performing disk format heal using ecstore");
|
||||
match self.storage.heal_format(self.options.dry_run).await {
|
||||
let format_result = self.await_with_control(self.storage.heal_format(self.options.dry_run)).await;
|
||||
|
||||
match format_result {
|
||||
Ok((result, error)) => {
|
||||
if let Some(e) = error {
|
||||
error!("Disk format heal failed: {} - {}", set_disk_id, e);
|
||||
@@ -805,6 +906,8 @@ impl HealTask {
|
||||
result.after.drives.len()
|
||||
);
|
||||
}
|
||||
Err(Error::TaskCancelled) => return Err(Error::TaskCancelled),
|
||||
Err(Error::TaskTimeout) => return Err(Error::TaskTimeout),
|
||||
Err(e) => {
|
||||
error!("Disk format heal failed: {} - {}", set_disk_id, e);
|
||||
{
|
||||
@@ -824,7 +927,9 @@ impl HealTask {
|
||||
|
||||
// Step 2: Get disk for resume functionality
|
||||
info!("Step 2: Getting disk for resume functionality");
|
||||
let disk = self.storage.get_disk_for_resume(&set_disk_id).await?;
|
||||
let disk = self
|
||||
.await_with_control(self.storage.get_disk_for_resume(&set_disk_id))
|
||||
.await?;
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
@@ -832,9 +937,18 @@ impl HealTask {
|
||||
}
|
||||
|
||||
// Step 3: Heal bucket structure
|
||||
// Check control flags before each iteration to ensure timely cancellation.
|
||||
// Each heal_bucket call may handle timeout/cancellation internally, see its implementation for details.
|
||||
for bucket in buckets.iter() {
|
||||
// Check control flags before starting each bucket heal
|
||||
self.check_control_flags().await?;
|
||||
// heal_bucket internally uses await_with_control for timeout/cancellation handling
|
||||
if let Err(err) = self.heal_bucket(bucket).await {
|
||||
info!("{}", err.to_string());
|
||||
// Check if error is due to cancellation or timeout
|
||||
if matches!(err, Error::TaskCancelled | Error::TaskTimeout) {
|
||||
return Err(err);
|
||||
}
|
||||
info!("Bucket heal failed: {}", err.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -861,6 +975,8 @@ impl HealTask {
|
||||
info!("Erasure set heal completed successfully: {} ({} buckets)", set_disk_id, buckets.len());
|
||||
Ok(())
|
||||
}
|
||||
Err(Error::TaskCancelled) => Err(Error::TaskCancelled),
|
||||
Err(Error::TaskTimeout) => Err(Error::TaskTimeout),
|
||||
Err(e) => {
|
||||
error!("Erasure set heal failed: {} - {}", set_disk_id, e);
|
||||
Err(Error::TaskExecutionFailed {
|
||||
|
||||
110
crates/ahm/src/heal/utils.rs
Normal file
110
crates/ahm/src/heal/utils.rs
Normal file
@@ -0,0 +1,110 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::{Error, Result};
|
||||
|
||||
/// Prefix for pool index in set disk identifiers.
|
||||
const POOL_PREFIX: &str = "pool";
|
||||
/// Prefix for set index in set disk identifiers.
|
||||
const SET_PREFIX: &str = "set";
|
||||
|
||||
/// Format a set disk identifier using unsigned indices.
|
||||
pub fn format_set_disk_id(pool_idx: usize, set_idx: usize) -> String {
|
||||
format!("{POOL_PREFIX}_{pool_idx}_{SET_PREFIX}_{set_idx}")
|
||||
}
|
||||
|
||||
/// Format a set disk identifier from signed indices.
|
||||
pub fn format_set_disk_id_from_i32(pool_idx: i32, set_idx: i32) -> Option<String> {
|
||||
if pool_idx < 0 || set_idx < 0 {
|
||||
None
|
||||
} else {
|
||||
Some(format_set_disk_id(pool_idx as usize, set_idx as usize))
|
||||
}
|
||||
}
|
||||
|
||||
/// Normalise external set disk identifiers into the canonical format.
|
||||
pub fn normalize_set_disk_id(raw: &str) -> Option<String> {
|
||||
if raw.starts_with(&format!("{POOL_PREFIX}_")) {
|
||||
Some(raw.to_string())
|
||||
} else {
|
||||
parse_compact_set_disk_id(raw).map(|(pool, set)| format_set_disk_id(pool, set))
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse a canonical set disk identifier into pool/set indices.
|
||||
pub fn parse_set_disk_id(raw: &str) -> Result<(usize, usize)> {
|
||||
let parts: Vec<&str> = raw.split('_').collect();
|
||||
if parts.len() != 4 || parts[0] != POOL_PREFIX || parts[2] != SET_PREFIX {
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Invalid set_disk_id format: {raw}"),
|
||||
});
|
||||
}
|
||||
|
||||
let pool_idx = parts[1].parse::<usize>().map_err(|_| Error::TaskExecutionFailed {
|
||||
message: format!("Invalid pool index in set_disk_id: {raw}"),
|
||||
})?;
|
||||
let set_idx = parts[3].parse::<usize>().map_err(|_| Error::TaskExecutionFailed {
|
||||
message: format!("Invalid set index in set_disk_id: {raw}"),
|
||||
})?;
|
||||
Ok((pool_idx, set_idx))
|
||||
}
|
||||
|
||||
fn parse_compact_set_disk_id(raw: &str) -> Option<(usize, usize)> {
|
||||
let (pool, set) = raw.split_once('_')?;
|
||||
let pool_idx = pool.parse::<usize>().ok()?;
|
||||
let set_idx = set.parse::<usize>().ok()?;
|
||||
Some((pool_idx, set_idx))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn format_from_unsigned_indices() {
|
||||
assert_eq!(format_set_disk_id(1, 2), "pool_1_set_2");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn format_from_signed_indices() {
|
||||
assert_eq!(format_set_disk_id_from_i32(3, 4), Some("pool_3_set_4".into()));
|
||||
assert_eq!(format_set_disk_id_from_i32(-1, 4), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn normalize_compact_identifier() {
|
||||
assert_eq!(normalize_set_disk_id("3_5"), Some("pool_3_set_5".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn normalize_prefixed_identifier() {
|
||||
assert_eq!(normalize_set_disk_id("pool_7_set_1"), Some("pool_7_set_1".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn normalize_invalid_identifier() {
|
||||
assert_eq!(normalize_set_disk_id("invalid"), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_prefixed_identifier() {
|
||||
assert_eq!(parse_set_disk_id("pool_9_set_3").unwrap(), (9, 3));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_invalid_identifier() {
|
||||
assert!(parse_set_disk_id("bad").is_err());
|
||||
assert!(parse_set_disk_id("pool_X_set_1").is_err());
|
||||
}
|
||||
}
|
||||
@@ -12,17 +12,16 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::{Arc, OnceLock};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::{error, info};
|
||||
|
||||
pub mod error;
|
||||
mod error;
|
||||
pub mod heal;
|
||||
pub mod scanner;
|
||||
|
||||
pub use error::{Error, Result};
|
||||
pub use heal::{HealManager, HealOptions, HealPriority, HealRequest, HealType, channel::HealChannelProcessor};
|
||||
pub use scanner::Scanner;
|
||||
use std::sync::{Arc, OnceLock};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::{error, info};
|
||||
|
||||
// Global cancellation token for AHM services (scanner and other background tasks)
|
||||
static GLOBAL_AHM_SERVICES_CANCEL_TOKEN: OnceLock<CancellationToken> = OnceLock::new();
|
||||
|
||||
@@ -12,18 +12,16 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::scanner::node_scanner::ScanProgress;
|
||||
use crate::{Error, Result};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
path::{Path, PathBuf},
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
use super::node_scanner::ScanProgress;
|
||||
use crate::{Error, error::Result};
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
pub struct CheckpointData {
|
||||
pub version: u32,
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -12,13 +12,12 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
sync::atomic::{AtomicU64, Ordering},
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tracing::info;
|
||||
|
||||
/// Scanner metrics
|
||||
|
||||
@@ -12,6 +12,9 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::Result;
|
||||
use crate::scanner::LoadLevel;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
collections::VecDeque,
|
||||
sync::{
|
||||
@@ -20,15 +23,10 @@ use std::{
|
||||
},
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::sync::RwLock;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
use super::node_scanner::LoadLevel;
|
||||
use crate::error::Result;
|
||||
|
||||
/// IO monitor config
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct IOMonitorConfig {
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::scanner::LoadLevel;
|
||||
use std::{
|
||||
sync::{
|
||||
Arc,
|
||||
@@ -19,12 +20,9 @@ use std::{
|
||||
},
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
use super::node_scanner::LoadLevel;
|
||||
|
||||
/// IO throttler config
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct IOThrottlerConfig {
|
||||
|
||||
@@ -12,27 +12,44 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::{
|
||||
Result,
|
||||
scanner::metrics::{BucketMetrics, MetricsCollector},
|
||||
};
|
||||
use rustfs_common::data_usage::SizeSummary;
|
||||
use rustfs_common::metrics::IlmAction;
|
||||
use rustfs_ecstore::bucket::lifecycle::{
|
||||
bucket_lifecycle_audit::LcEventSrc,
|
||||
bucket_lifecycle_ops::{GLOBAL_ExpiryState, apply_lifecycle_action, eval_action_from_lifecycle},
|
||||
lifecycle,
|
||||
lifecycle::Lifecycle,
|
||||
use rustfs_ecstore::bucket::{
|
||||
lifecycle::{
|
||||
bucket_lifecycle_audit::LcEventSrc,
|
||||
bucket_lifecycle_ops::{GLOBAL_ExpiryState, apply_lifecycle_action, eval_action_from_lifecycle},
|
||||
lifecycle,
|
||||
lifecycle::Lifecycle,
|
||||
},
|
||||
metadata_sys::get_object_lock_config,
|
||||
object_lock::objectlock_sys::{BucketObjectLockSys, enforce_retention_for_deletion},
|
||||
versioning::VersioningApi,
|
||||
versioning_sys::BucketVersioningSys,
|
||||
};
|
||||
use rustfs_ecstore::bucket::{
|
||||
replication::{GLOBAL_REPLICATION_POOL, ReplicationConfig, get_heal_replicate_object_info},
|
||||
utils::is_meta_bucketname,
|
||||
};
|
||||
use rustfs_ecstore::bucket::metadata_sys::get_object_lock_config;
|
||||
use rustfs_ecstore::bucket::object_lock::objectlock_sys::{BucketObjectLockSys, enforce_retention_for_deletion};
|
||||
use rustfs_ecstore::bucket::versioning::VersioningApi;
|
||||
use rustfs_ecstore::bucket::versioning_sys::BucketVersioningSys;
|
||||
use rustfs_ecstore::store_api::{ObjectInfo, ObjectToDelete};
|
||||
use rustfs_filemeta::FileInfo;
|
||||
use rustfs_filemeta::{FileInfo, ReplicationStatusType, replication_statuses_map};
|
||||
use rustfs_utils::http::headers::{AMZ_BUCKET_REPLICATION_STATUS, HeaderExt, VERSION_PURGE_STATUS_KEY};
|
||||
use s3s::dto::DefaultRetention;
|
||||
use s3s::dto::{BucketLifecycleConfiguration as LifecycleConfig, VersioningConfiguration};
|
||||
use time::OffsetDateTime;
|
||||
use tracing::info;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
sync::{
|
||||
Arc,
|
||||
atomic::{AtomicU64, Ordering},
|
||||
},
|
||||
time::Duration as StdDuration,
|
||||
};
|
||||
use time::{Duration as TimeDuration, OffsetDateTime};
|
||||
use tokio::sync::Mutex;
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
static SCANNER_EXCESS_OBJECT_VERSIONS: AtomicU64 = AtomicU64::new(100);
|
||||
static SCANNER_EXCESS_OBJECT_VERSIONS_TOTAL_SIZE: AtomicU64 = AtomicU64::new(1024 * 1024 * 1024 * 1024); // 1 TB
|
||||
@@ -41,21 +58,94 @@ static SCANNER_EXCESS_OBJECT_VERSIONS_TOTAL_SIZE: AtomicU64 = AtomicU64::new(102
|
||||
pub struct ScannerItem {
|
||||
pub bucket: String,
|
||||
pub object_name: String,
|
||||
pub replication: Option<ReplicationConfig>,
|
||||
pub lifecycle: Option<Arc<LifecycleConfig>>,
|
||||
pub versioning: Option<Arc<VersioningConfiguration>>,
|
||||
pub object_lock_config: Option<DefaultRetention>,
|
||||
pub replication_pending_grace: StdDuration,
|
||||
pub replication_metrics: Option<ReplicationMetricsHandle>,
|
||||
}
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct ReplicationMetricsHandle {
|
||||
inner: Arc<ReplicationMetricsInner>,
|
||||
}
|
||||
|
||||
struct ReplicationMetricsInner {
|
||||
metrics: Arc<MetricsCollector>,
|
||||
bucket_metrics: Arc<Mutex<HashMap<String, BucketMetrics>>>,
|
||||
}
|
||||
|
||||
impl ReplicationMetricsHandle {
|
||||
pub fn new(metrics: Arc<MetricsCollector>, bucket_metrics: Arc<Mutex<HashMap<String, BucketMetrics>>>) -> Self {
|
||||
Self {
|
||||
inner: Arc::new(ReplicationMetricsInner { metrics, bucket_metrics }),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn record_status(&self, bucket: &str, status: ReplicationStatusType, lagging: bool) {
|
||||
match status {
|
||||
ReplicationStatusType::Pending => self.inner.metrics.increment_replication_pending_objects(1),
|
||||
ReplicationStatusType::Failed => self.inner.metrics.increment_replication_failed_objects(1),
|
||||
_ => {}
|
||||
}
|
||||
if lagging {
|
||||
self.inner.metrics.increment_replication_lagging_objects(1);
|
||||
}
|
||||
|
||||
let mut guard = self.inner.bucket_metrics.lock().await;
|
||||
let entry = guard.entry(bucket.to_string()).or_insert_with(|| BucketMetrics {
|
||||
bucket: bucket.to_string(),
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
match status {
|
||||
ReplicationStatusType::Pending => {
|
||||
entry.replication_pending = entry.replication_pending.saturating_add(1);
|
||||
}
|
||||
ReplicationStatusType::Failed => {
|
||||
entry.replication_failed = entry.replication_failed.saturating_add(1);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
if lagging {
|
||||
entry.replication_lagging = entry.replication_lagging.saturating_add(1);
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn record_task_submission(&self, bucket: &str) {
|
||||
self.inner.metrics.increment_replication_tasks_queued(1);
|
||||
let mut guard = self.inner.bucket_metrics.lock().await;
|
||||
let entry = guard.entry(bucket.to_string()).or_insert_with(|| BucketMetrics {
|
||||
bucket: bucket.to_string(),
|
||||
..Default::default()
|
||||
});
|
||||
entry.replication_tasks_queued = entry.replication_tasks_queued.saturating_add(1);
|
||||
}
|
||||
}
|
||||
|
||||
impl ScannerItem {
|
||||
const INTERNAL_REPLICATION_STATUS_KEY: &'static str = "x-rustfs-internal-replication-status";
|
||||
|
||||
pub fn new(
|
||||
bucket: String,
|
||||
replication: Option<ReplicationConfig>,
|
||||
lifecycle: Option<Arc<LifecycleConfig>>,
|
||||
versioning: Option<Arc<VersioningConfiguration>>,
|
||||
object_lock_config: Option<DefaultRetention>,
|
||||
replication_pending_grace: StdDuration,
|
||||
replication_metrics: Option<ReplicationMetricsHandle>,
|
||||
) -> Self {
|
||||
Self {
|
||||
bucket,
|
||||
object_name: "".to_string(),
|
||||
replication,
|
||||
lifecycle,
|
||||
versioning,
|
||||
object_lock_config,
|
||||
replication_pending_grace,
|
||||
replication_metrics,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -161,6 +251,23 @@ impl ScannerItem {
|
||||
}
|
||||
|
||||
pub async fn apply_actions(&mut self, oi: &ObjectInfo, _size_s: &mut SizeSummary) -> (bool, i64) {
|
||||
let object_locked = self.is_object_lock_protected(oi);
|
||||
|
||||
if let Err(err) = self.heal_replication(oi).await {
|
||||
warn!(
|
||||
"heal_replication failed for {}/{} (version {:?}): {}",
|
||||
oi.bucket, oi.name, oi.version_id, err
|
||||
);
|
||||
}
|
||||
|
||||
if object_locked {
|
||||
info!(
|
||||
"apply_actions: Skipping lifecycle for {}/{} because object lock retention or legal hold is active",
|
||||
oi.bucket, oi.name
|
||||
);
|
||||
return (false, oi.size);
|
||||
}
|
||||
|
||||
let (action, _size) = self.apply_lifecycle(oi).await;
|
||||
|
||||
info!(
|
||||
@@ -171,16 +278,6 @@ impl ScannerItem {
|
||||
oi.user_defined.clone()
|
||||
);
|
||||
|
||||
// Create a mutable clone if you need to modify fields
|
||||
/*let mut oi = oi.clone();
|
||||
oi.replication_status = ReplicationStatusType::from(
|
||||
oi.user_defined
|
||||
.get("x-amz-bucket-replication-status")
|
||||
.unwrap_or(&"PENDING".to_string()),
|
||||
);
|
||||
info!("apply status is: {:?}", oi.replication_status);
|
||||
self.heal_replication(&oi, _size_s).await;*/
|
||||
|
||||
if action.delete_all() {
|
||||
return (true, 0);
|
||||
}
|
||||
@@ -197,7 +294,7 @@ impl ScannerItem {
|
||||
|
||||
info!("apply_lifecycle: Lifecycle config exists for object: {}", oi.name);
|
||||
|
||||
let (olcfg, rcfg) = if self.bucket != ".minio.sys" {
|
||||
let (olcfg, rcfg) = if !is_meta_bucketname(&self.bucket) {
|
||||
(
|
||||
get_object_lock_config(&self.bucket).await.ok(),
|
||||
None, // FIXME: replication config
|
||||
@@ -263,4 +360,202 @@ impl ScannerItem {
|
||||
|
||||
(lc_evt.action, new_size)
|
||||
}
|
||||
|
||||
fn is_object_lock_protected(&self, oi: &ObjectInfo) -> bool {
|
||||
enforce_retention_for_deletion(oi)
|
||||
}
|
||||
|
||||
async fn heal_replication(&self, oi: &ObjectInfo) -> Result<()> {
|
||||
warn!("heal_replication: healing replication for {}/{}", oi.bucket, oi.name);
|
||||
warn!("heal_replication: ObjectInfo oi: {:?}", oi);
|
||||
|
||||
let enriched = Self::hydrate_replication_metadata(oi);
|
||||
let pending_lagging = self.is_pending_lagging(&enriched);
|
||||
|
||||
if let Some(handle) = &self.replication_metrics {
|
||||
handle
|
||||
.record_status(&self.bucket, enriched.replication_status.clone(), pending_lagging)
|
||||
.await;
|
||||
}
|
||||
|
||||
debug!(
|
||||
"heal_replication: evaluating {}/{} with status {:?} and internal {:?}",
|
||||
enriched.bucket, enriched.name, enriched.replication_status, enriched.replication_status_internal
|
||||
);
|
||||
|
||||
// if !self.needs_replication_heal(&enriched, pending_lagging) {
|
||||
// return Ok(());
|
||||
// }
|
||||
|
||||
// let replication_cfg = match get_replication_config(&self.bucket).await {
|
||||
// Ok((cfg, _)) => Some(cfg),
|
||||
// Err(err) => {
|
||||
// debug!("heal_replication: failed to fetch replication config for bucket {}: {}", self.bucket, err);
|
||||
// None
|
||||
// }
|
||||
// };
|
||||
|
||||
// if replication_cfg.is_none() {
|
||||
// return Ok(());
|
||||
// }
|
||||
|
||||
// let bucket_targets = match get_bucket_targets_config(&self.bucket).await {
|
||||
// Ok(targets) => Some(targets),
|
||||
// Err(err) => {
|
||||
// debug!("heal_replication: no bucket targets for bucket {}: {}", self.bucket, err);
|
||||
// None
|
||||
// }
|
||||
// };
|
||||
|
||||
// let replication_cfg = ReplicationConfig::new(replication_cfg, bucket_targets);
|
||||
|
||||
let replication_cfg = self.replication.clone().unwrap_or_default();
|
||||
|
||||
if replication_cfg.config.is_none() && replication_cfg.remotes.is_none() {
|
||||
debug!("heal_replication: no replication config for {}/{}", enriched.bucket, enriched.name);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let replicate_info = get_heal_replicate_object_info(&enriched, &replication_cfg).await;
|
||||
let should_replicate = replicate_info.dsc.replicate_any()
|
||||
|| matches!(
|
||||
enriched.replication_status,
|
||||
ReplicationStatusType::Failed | ReplicationStatusType::Pending
|
||||
);
|
||||
if !should_replicate {
|
||||
debug!("heal_replication: no actionable targets for {}/{}", enriched.bucket, enriched.name);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if let Some(pool) = GLOBAL_REPLICATION_POOL.get() {
|
||||
pool.queue_replica_task(replicate_info).await;
|
||||
if let Some(handle) = &self.replication_metrics {
|
||||
handle.record_task_submission(&self.bucket).await;
|
||||
}
|
||||
warn!("heal_replication: queued replication heal task for {}/{}", enriched.bucket, enriched.name);
|
||||
} else {
|
||||
warn!(
|
||||
"heal_replication: GLOBAL_REPLICATION_POOL not initialized, skipping heal for {}/{}",
|
||||
enriched.bucket, enriched.name
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn needs_replication_heal(&self, oi: &ObjectInfo, pending_lagging: bool) -> bool {
|
||||
if matches!(oi.replication_status, ReplicationStatusType::Failed) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if pending_lagging && matches!(oi.replication_status, ReplicationStatusType::Pending) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if let Some(raw) = oi.replication_status_internal.as_ref() {
|
||||
let statuses = replication_statuses_map(raw);
|
||||
if statuses
|
||||
.values()
|
||||
.any(|status| matches!(status, ReplicationStatusType::Failed))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
if pending_lagging
|
||||
&& statuses
|
||||
.values()
|
||||
.any(|status| matches!(status, ReplicationStatusType::Pending))
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
fn hydrate_replication_metadata(oi: &ObjectInfo) -> ObjectInfo {
|
||||
let mut enriched = oi.clone();
|
||||
|
||||
if enriched.replication_status.is_empty() {
|
||||
if let Some(status) = enriched.user_defined.lookup(AMZ_BUCKET_REPLICATION_STATUS) {
|
||||
enriched.replication_status = ReplicationStatusType::from(status);
|
||||
}
|
||||
}
|
||||
|
||||
if enriched.replication_status_internal.is_none() {
|
||||
if let Some(raw) = enriched.user_defined.lookup(Self::INTERNAL_REPLICATION_STATUS_KEY) {
|
||||
if !raw.is_empty() {
|
||||
enriched.replication_status_internal = Some(raw.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if enriched.version_purge_status_internal.is_none() {
|
||||
if let Some(raw) = enriched.user_defined.lookup(VERSION_PURGE_STATUS_KEY) {
|
||||
if !raw.is_empty() {
|
||||
enriched.version_purge_status_internal = Some(raw.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
enriched
|
||||
}
|
||||
|
||||
fn is_pending_lagging(&self, oi: &ObjectInfo) -> bool {
|
||||
if !matches!(oi.replication_status, ReplicationStatusType::Pending) {
|
||||
return false;
|
||||
}
|
||||
|
||||
let Some(mod_time) = oi.mod_time else {
|
||||
return false;
|
||||
};
|
||||
|
||||
let grace = TimeDuration::try_from(self.replication_pending_grace).unwrap_or_else(|_| {
|
||||
warn!(
|
||||
"replication_pending_grace is invalid, using default value: 0 seconds, grace: {:?}",
|
||||
self.replication_pending_grace
|
||||
);
|
||||
TimeDuration::seconds(0)
|
||||
});
|
||||
if grace.is_zero() {
|
||||
return true;
|
||||
}
|
||||
|
||||
let elapsed = OffsetDateTime::now_utc() - mod_time;
|
||||
elapsed >= grace
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn replication_metrics_handle_tracks_counts() {
|
||||
let metrics = Arc::new(MetricsCollector::new());
|
||||
let bucket_metrics = Arc::new(Mutex::new(HashMap::new()));
|
||||
let handle = ReplicationMetricsHandle::new(metrics.clone(), bucket_metrics.clone());
|
||||
|
||||
handle
|
||||
.record_status("test-bucket", ReplicationStatusType::Pending, true)
|
||||
.await;
|
||||
handle
|
||||
.record_status("test-bucket", ReplicationStatusType::Failed, false)
|
||||
.await;
|
||||
handle.record_task_submission("test-bucket").await;
|
||||
|
||||
let snapshot = metrics.get_metrics();
|
||||
assert_eq!(snapshot.replication_pending_objects, 1);
|
||||
assert_eq!(snapshot.replication_failed_objects, 1);
|
||||
assert_eq!(snapshot.replication_lagging_objects, 1);
|
||||
assert_eq!(snapshot.replication_tasks_queued, 1);
|
||||
|
||||
let guard = bucket_metrics.lock().await;
|
||||
let bucket_entry = guard.get("test-bucket").expect("bucket metrics exists");
|
||||
assert_eq!(bucket_entry.replication_pending, 1);
|
||||
assert_eq!(bucket_entry.replication_failed, 1);
|
||||
assert_eq!(bucket_entry.replication_lagging, 1);
|
||||
assert_eq!(bucket_entry.replication_tasks_queued, 1);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,16 +1,18 @@
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::{from_slice, to_vec};
|
||||
use tokio::{fs, task};
|
||||
use tracing::warn;
|
||||
use walkdir::WalkDir;
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::{Error, Result};
|
||||
use rustfs_common::data_usage::DiskUsageStatus;
|
||||
use rustfs_ecstore::data_usage::{
|
||||
LocalUsageSnapshot, LocalUsageSnapshotMeta, data_usage_state_dir, ensure_data_usage_layout, snapshot_file_name,
|
||||
@@ -20,6 +22,15 @@ use rustfs_ecstore::disk::DiskAPI;
|
||||
use rustfs_ecstore::store::ECStore;
|
||||
use rustfs_ecstore::store_api::ObjectInfo;
|
||||
use rustfs_filemeta::{FileInfo, FileMeta, FileMetaVersion, VersionType};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::{from_slice, to_vec};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
use tokio::{fs, task};
|
||||
use tracing::warn;
|
||||
use walkdir::WalkDir;
|
||||
|
||||
const STATE_FILE_EXTENSION: &str = "";
|
||||
|
||||
@@ -51,6 +62,7 @@ struct DiskScanResult {
|
||||
pub struct LocalObjectRecord {
|
||||
pub usage: LocalObjectUsage,
|
||||
pub object_info: Option<rustfs_ecstore::store_api::ObjectInfo>,
|
||||
pub file_info: Option<FileInfo>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
@@ -212,9 +224,11 @@ fn scan_disk_blocking(root: PathBuf, meta: LocalUsageSnapshotMeta, mut state: In
|
||||
record.usage.last_modified_ns = mtime_ns;
|
||||
state.objects.insert(rel_path.clone(), record.usage.clone());
|
||||
emitted.insert(rel_path.clone());
|
||||
warn!("compute_object_usage: record: {:?}", record.clone());
|
||||
objects_by_bucket.entry(record.usage.bucket.clone()).or_default().push(record);
|
||||
}
|
||||
Ok(None) => {
|
||||
warn!("compute_object_usage: None, rel_path: {:?}", rel_path);
|
||||
state.objects.remove(&rel_path);
|
||||
}
|
||||
Err(err) => {
|
||||
@@ -229,24 +243,27 @@ fn scan_disk_blocking(root: PathBuf, meta: LocalUsageSnapshotMeta, mut state: In
|
||||
warn!("Failed to read xl.meta {:?}: {}", xl_path, err);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
warn!("should_parse: false, rel_path: {:?}", rel_path);
|
||||
}
|
||||
}
|
||||
|
||||
state.objects.retain(|key, _| visited.contains(key));
|
||||
state.last_scan_ns = Some(now_ns);
|
||||
|
||||
for (key, usage) in &state.objects {
|
||||
if emitted.contains(key) {
|
||||
continue;
|
||||
}
|
||||
objects_by_bucket
|
||||
.entry(usage.bucket.clone())
|
||||
.or_default()
|
||||
.push(LocalObjectRecord {
|
||||
usage: usage.clone(),
|
||||
object_info: None,
|
||||
});
|
||||
}
|
||||
// for (key, usage) in &state.objects {
|
||||
// if emitted.contains(key) {
|
||||
// continue;
|
||||
// }
|
||||
// objects_by_bucket
|
||||
// .entry(usage.bucket.clone())
|
||||
// .or_default()
|
||||
// .push(LocalObjectRecord {
|
||||
// usage: usage.clone(),
|
||||
// object_info: None,
|
||||
// file_info: None,
|
||||
// });
|
||||
// }
|
||||
|
||||
let snapshot = build_snapshot(meta, &state.objects, now);
|
||||
status.snapshot_exists = true;
|
||||
@@ -308,6 +325,7 @@ fn compute_object_usage(bucket: &str, object: &str, file_meta: &FileMeta) -> Res
|
||||
let versioned = fi.version_id.is_some();
|
||||
ObjectInfo::from_file_info(fi, bucket, object, versioned)
|
||||
});
|
||||
let file_info = latest_file_info.clone();
|
||||
|
||||
Ok(Some(LocalObjectRecord {
|
||||
usage: LocalObjectUsage {
|
||||
@@ -320,6 +338,7 @@ fn compute_object_usage(bucket: &str, object: &str, file_meta: &FileMeta) -> Res
|
||||
has_live_object,
|
||||
},
|
||||
object_info,
|
||||
file_info,
|
||||
}))
|
||||
}
|
||||
|
||||
|
||||
@@ -12,22 +12,19 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::scanner::node_scanner::{BucketStats, DiskStats, LocalScanStats};
|
||||
use crate::{Error, Result};
|
||||
use rustfs_common::data_usage::DataUsageInfo;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
path::{Path, PathBuf},
|
||||
sync::Arc,
|
||||
sync::atomic::{AtomicU64, Ordering},
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
use rustfs_common::data_usage::DataUsageInfo;
|
||||
|
||||
use super::node_scanner::{BucketStats, DiskStats, LocalScanStats};
|
||||
use crate::{Error, error::Result};
|
||||
|
||||
/// local stats manager
|
||||
pub struct LocalStatsManager {
|
||||
/// node id
|
||||
|
||||
@@ -12,13 +12,12 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
sync::atomic::{AtomicU64, Ordering},
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tracing::info;
|
||||
|
||||
/// Scanner metrics
|
||||
@@ -46,6 +45,14 @@ pub struct ScannerMetrics {
|
||||
pub healthy_objects: u64,
|
||||
/// Total corrupted objects found
|
||||
pub corrupted_objects: u64,
|
||||
/// Replication heal tasks queued
|
||||
pub replication_tasks_queued: u64,
|
||||
/// Objects observed with pending replication
|
||||
pub replication_pending_objects: u64,
|
||||
/// Objects observed with failed replication
|
||||
pub replication_failed_objects: u64,
|
||||
/// Objects with replication pending longer than grace period
|
||||
pub replication_lagging_objects: u64,
|
||||
/// Last scan activity time
|
||||
pub last_activity: Option<SystemTime>,
|
||||
/// Current scan cycle
|
||||
@@ -87,6 +94,14 @@ pub struct BucketMetrics {
|
||||
pub heal_tasks_completed: u64,
|
||||
/// Heal tasks failed for this bucket
|
||||
pub heal_tasks_failed: u64,
|
||||
/// Objects observed with pending replication status
|
||||
pub replication_pending: u64,
|
||||
/// Objects observed with failed replication status
|
||||
pub replication_failed: u64,
|
||||
/// Objects exceeding replication grace period
|
||||
pub replication_lagging: u64,
|
||||
/// Replication heal tasks queued for this bucket
|
||||
pub replication_tasks_queued: u64,
|
||||
}
|
||||
|
||||
/// Disk-specific metrics
|
||||
@@ -128,6 +143,10 @@ pub struct MetricsCollector {
|
||||
total_cycles: AtomicU64,
|
||||
healthy_objects: AtomicU64,
|
||||
corrupted_objects: AtomicU64,
|
||||
replication_tasks_queued: AtomicU64,
|
||||
replication_pending_objects: AtomicU64,
|
||||
replication_failed_objects: AtomicU64,
|
||||
replication_lagging_objects: AtomicU64,
|
||||
}
|
||||
|
||||
impl MetricsCollector {
|
||||
@@ -147,6 +166,10 @@ impl MetricsCollector {
|
||||
total_cycles: AtomicU64::new(0),
|
||||
healthy_objects: AtomicU64::new(0),
|
||||
corrupted_objects: AtomicU64::new(0),
|
||||
replication_tasks_queued: AtomicU64::new(0),
|
||||
replication_pending_objects: AtomicU64::new(0),
|
||||
replication_failed_objects: AtomicU64::new(0),
|
||||
replication_lagging_objects: AtomicU64::new(0),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -195,6 +218,26 @@ impl MetricsCollector {
|
||||
self.heal_tasks_failed.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment replication tasks queued
|
||||
pub fn increment_replication_tasks_queued(&self, count: u64) {
|
||||
self.replication_tasks_queued.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment replication pending objects
|
||||
pub fn increment_replication_pending_objects(&self, count: u64) {
|
||||
self.replication_pending_objects.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment replication failed objects
|
||||
pub fn increment_replication_failed_objects(&self, count: u64) {
|
||||
self.replication_failed_objects.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment replication lagging objects
|
||||
pub fn increment_replication_lagging_objects(&self, count: u64) {
|
||||
self.replication_lagging_objects.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Set current cycle
|
||||
pub fn set_current_cycle(&self, cycle: u64) {
|
||||
self.current_cycle.store(cycle, Ordering::Relaxed);
|
||||
@@ -229,6 +272,10 @@ impl MetricsCollector {
|
||||
heal_tasks_failed: self.heal_tasks_failed.load(Ordering::Relaxed),
|
||||
healthy_objects: self.healthy_objects.load(Ordering::Relaxed),
|
||||
corrupted_objects: self.corrupted_objects.load(Ordering::Relaxed),
|
||||
replication_tasks_queued: self.replication_tasks_queued.load(Ordering::Relaxed),
|
||||
replication_pending_objects: self.replication_pending_objects.load(Ordering::Relaxed),
|
||||
replication_failed_objects: self.replication_failed_objects.load(Ordering::Relaxed),
|
||||
replication_lagging_objects: self.replication_lagging_objects.load(Ordering::Relaxed),
|
||||
last_activity: Some(SystemTime::now()),
|
||||
current_cycle: self.current_cycle.load(Ordering::Relaxed),
|
||||
total_cycles: self.total_cycles.load(Ordering::Relaxed),
|
||||
@@ -256,6 +303,10 @@ impl MetricsCollector {
|
||||
self.total_cycles.store(0, Ordering::Relaxed);
|
||||
self.healthy_objects.store(0, Ordering::Relaxed);
|
||||
self.corrupted_objects.store(0, Ordering::Relaxed);
|
||||
self.replication_tasks_queued.store(0, Ordering::Relaxed);
|
||||
self.replication_pending_objects.store(0, Ordering::Relaxed);
|
||||
self.replication_failed_objects.store(0, Ordering::Relaxed);
|
||||
self.replication_lagging_objects.store(0, Ordering::Relaxed);
|
||||
|
||||
info!("Scanner metrics reset");
|
||||
}
|
||||
|
||||
@@ -27,8 +27,10 @@ pub mod stats_aggregator;
|
||||
pub use checkpoint::{CheckpointData, CheckpointInfo, CheckpointManager};
|
||||
pub use data_scanner::{ScanMode, Scanner, ScannerConfig, ScannerState};
|
||||
pub use io_monitor::{AdvancedIOMonitor, IOMetrics, IOMonitorConfig};
|
||||
pub use io_throttler::{AdvancedIOThrottler, IOThrottlerConfig, ResourceAllocation, ThrottleDecision};
|
||||
pub use io_throttler::{AdvancedIOThrottler, IOThrottlerConfig, MetricsSnapshot, ResourceAllocation, ThrottleDecision};
|
||||
pub use local_stats::{BatchScanResult, LocalStatsManager, ScanResultEntry, StatsSummary};
|
||||
pub use metrics::ScannerMetrics;
|
||||
pub use metrics::{BucketMetrics, DiskMetrics, MetricsCollector, ScannerMetrics};
|
||||
pub use node_scanner::{IOMonitor, IOThrottler, LoadLevel, LocalScanStats, NodeScanner, NodeScannerConfig};
|
||||
pub use stats_aggregator::{AggregatedStats, DecentralizedStatsAggregator, NodeClient, NodeInfo};
|
||||
pub use stats_aggregator::{
|
||||
AggregatedStats, DecentralizedStatsAggregator, DecentralizedStatsAggregatorConfig, NodeClient, NodeInfo,
|
||||
};
|
||||
|
||||
@@ -12,6 +12,16 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::Result;
|
||||
use crate::scanner::{
|
||||
AdvancedIOMonitor, AdvancedIOThrottler, BatchScanResult, CheckpointManager, IOMonitorConfig, IOThrottlerConfig,
|
||||
LocalStatsManager, MetricsSnapshot, ScanResultEntry,
|
||||
};
|
||||
use rustfs_common::data_usage::DataUsageInfo;
|
||||
use rustfs_ecstore::StorageAPI;
|
||||
use rustfs_ecstore::bucket::utils::is_meta_bucketname;
|
||||
use rustfs_ecstore::disk::{DiskAPI, DiskStore};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
path::{Path, PathBuf},
|
||||
@@ -21,22 +31,10 @@ use std::{
|
||||
},
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::sync::RwLock;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
use rustfs_common::data_usage::DataUsageInfo;
|
||||
use rustfs_ecstore::StorageAPI;
|
||||
use rustfs_ecstore::disk::{DiskAPI, DiskStore}; // Add this import
|
||||
|
||||
use super::checkpoint::CheckpointManager;
|
||||
use super::io_monitor::{AdvancedIOMonitor, IOMonitorConfig};
|
||||
use super::io_throttler::{AdvancedIOThrottler, IOThrottlerConfig, MetricsSnapshot};
|
||||
use super::local_stats::{BatchScanResult, LocalStatsManager, ScanResultEntry};
|
||||
use crate::error::Result;
|
||||
|
||||
/// SystemTime serde
|
||||
mod system_time_serde {
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
@@ -714,6 +712,7 @@ impl NodeScanner {
|
||||
// start scanning loop
|
||||
let scanner_clone = self.clone_for_background();
|
||||
tokio::spawn(async move {
|
||||
// update object count and size for each bucket
|
||||
if let Err(e) = scanner_clone.scan_loop_with_resume(None).await {
|
||||
error!("scanning loop failed: {}", e);
|
||||
}
|
||||
@@ -881,7 +880,7 @@ impl NodeScanner {
|
||||
let bucket_name = &bucket_info.name;
|
||||
|
||||
// skip system internal buckets
|
||||
if bucket_name == ".minio.sys" {
|
||||
if is_meta_bucketname(bucket_name) {
|
||||
continue;
|
||||
}
|
||||
|
||||
|
||||
@@ -12,24 +12,21 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::scanner::{
|
||||
local_stats::StatsSummary,
|
||||
node_scanner::{BucketStats, LoadLevel, ScanProgress},
|
||||
};
|
||||
use crate::{Error, Result};
|
||||
use rustfs_common::data_usage::DataUsageInfo;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
sync::Arc,
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
use rustfs_common::data_usage::DataUsageInfo;
|
||||
|
||||
use super::{
|
||||
local_stats::StatsSummary,
|
||||
node_scanner::{BucketStats, LoadLevel, ScanProgress},
|
||||
};
|
||||
use crate::{Error, error::Result};
|
||||
|
||||
/// node client config
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct NodeClientConfig {
|
||||
|
||||
283
crates/ahm/tests/heal_bug_fixes_test.rs
Normal file
283
crates/ahm/tests/heal_bug_fixes_test.rs
Normal file
@@ -0,0 +1,283 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use rustfs_ahm::heal::{
|
||||
event::{HealEvent, Severity},
|
||||
task::{HealPriority, HealType},
|
||||
utils,
|
||||
};
|
||||
|
||||
#[test]
|
||||
fn test_heal_event_to_heal_request_no_panic() {
|
||||
use rustfs_ecstore::disk::endpoint::Endpoint;
|
||||
|
||||
// Test that invalid pool/set indices don't cause panic
|
||||
// Create endpoint using try_from or similar method
|
||||
let endpoint_result = Endpoint::try_from("http://localhost:9000");
|
||||
if let Ok(mut endpoint) = endpoint_result {
|
||||
endpoint.pool_idx = -1;
|
||||
endpoint.set_idx = -1;
|
||||
endpoint.disk_idx = 0;
|
||||
|
||||
let event = HealEvent::DiskStatusChange {
|
||||
endpoint,
|
||||
old_status: "ok".to_string(),
|
||||
new_status: "offline".to_string(),
|
||||
};
|
||||
|
||||
// Should return error instead of panicking
|
||||
let result = event.to_heal_request();
|
||||
assert!(result.is_err());
|
||||
assert!(result.unwrap_err().to_string().contains("Invalid heal type"));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_event_to_heal_request_valid_indices() {
|
||||
use rustfs_ecstore::disk::endpoint::Endpoint;
|
||||
|
||||
// Test that valid indices work correctly
|
||||
let endpoint_result = Endpoint::try_from("http://localhost:9000");
|
||||
if let Ok(mut endpoint) = endpoint_result {
|
||||
endpoint.pool_idx = 0;
|
||||
endpoint.set_idx = 1;
|
||||
endpoint.disk_idx = 0;
|
||||
|
||||
let event = HealEvent::DiskStatusChange {
|
||||
endpoint,
|
||||
old_status: "ok".to_string(),
|
||||
new_status: "offline".to_string(),
|
||||
};
|
||||
|
||||
let result = event.to_heal_request();
|
||||
assert!(result.is_ok());
|
||||
let request = result.unwrap();
|
||||
assert!(matches!(request.heal_type, HealType::ErasureSet { .. }));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_event_object_corruption() {
|
||||
let event = HealEvent::ObjectCorruption {
|
||||
bucket: "test-bucket".to_string(),
|
||||
object: "test-object".to_string(),
|
||||
version_id: None,
|
||||
corruption_type: rustfs_ahm::heal::event::CorruptionType::DataCorruption,
|
||||
severity: Severity::High,
|
||||
};
|
||||
|
||||
let result = event.to_heal_request();
|
||||
assert!(result.is_ok());
|
||||
let request = result.unwrap();
|
||||
assert!(matches!(request.heal_type, HealType::Object { .. }));
|
||||
assert_eq!(request.priority, HealPriority::High);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_event_ec_decode_failure() {
|
||||
let event = HealEvent::ECDecodeFailure {
|
||||
bucket: "test-bucket".to_string(),
|
||||
object: "test-object".to_string(),
|
||||
version_id: None,
|
||||
missing_shards: vec![0, 1],
|
||||
available_shards: vec![2, 3],
|
||||
};
|
||||
|
||||
let result = event.to_heal_request();
|
||||
assert!(result.is_ok());
|
||||
let request = result.unwrap();
|
||||
assert!(matches!(request.heal_type, HealType::ECDecode { .. }));
|
||||
assert_eq!(request.priority, HealPriority::Urgent);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_set_disk_id_from_i32_negative() {
|
||||
// Test that negative indices return None
|
||||
assert!(utils::format_set_disk_id_from_i32(-1, 0).is_none());
|
||||
assert!(utils::format_set_disk_id_from_i32(0, -1).is_none());
|
||||
assert!(utils::format_set_disk_id_from_i32(-1, -1).is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_set_disk_id_from_i32_valid() {
|
||||
// Test that valid indices return Some
|
||||
let result = utils::format_set_disk_id_from_i32(0, 1);
|
||||
assert!(result.is_some());
|
||||
assert_eq!(result.unwrap(), "pool_0_set_1");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_resume_state_timestamp_handling() {
|
||||
use rustfs_ahm::heal::resume::ResumeState;
|
||||
|
||||
// Test that ResumeState creation doesn't panic even if system time is before epoch
|
||||
// This is a theoretical test - in practice, system time should never be before epoch
|
||||
// But we want to ensure unwrap_or_default handles edge cases
|
||||
let state = ResumeState::new(
|
||||
"test-task".to_string(),
|
||||
"test-type".to_string(),
|
||||
"pool_0_set_1".to_string(),
|
||||
vec!["bucket1".to_string()],
|
||||
);
|
||||
|
||||
// Verify fields are initialized (u64 is always >= 0)
|
||||
// The important thing is that unwrap_or_default prevents panic
|
||||
let _ = state.start_time;
|
||||
let _ = state.last_update;
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_resume_checkpoint_timestamp_handling() {
|
||||
use rustfs_ahm::heal::resume::ResumeCheckpoint;
|
||||
|
||||
// Test that ResumeCheckpoint creation doesn't panic
|
||||
let checkpoint = ResumeCheckpoint::new("test-task".to_string());
|
||||
|
||||
// Verify field is initialized (u64 is always >= 0)
|
||||
// The important thing is that unwrap_or_default prevents panic
|
||||
let _ = checkpoint.checkpoint_time;
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_path_to_str_helper() {
|
||||
use std::path::Path;
|
||||
|
||||
// Test that path conversion handles non-UTF-8 paths gracefully
|
||||
// Note: This is a compile-time test - actual non-UTF-8 paths are hard to construct in Rust
|
||||
// The helper function should properly handle the conversion
|
||||
let valid_path = Path::new("test/path");
|
||||
assert!(valid_path.to_str().is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_task_status_atomic_update() {
|
||||
use rustfs_ahm::heal::storage::HealStorageAPI;
|
||||
use rustfs_ahm::heal::task::{HealOptions, HealRequest, HealTask, HealTaskStatus};
|
||||
use std::sync::Arc;
|
||||
|
||||
// Mock storage for testing
|
||||
struct MockStorage;
|
||||
#[async_trait::async_trait]
|
||||
impl HealStorageAPI for MockStorage {
|
||||
async fn get_object_meta(
|
||||
&self,
|
||||
_bucket: &str,
|
||||
_object: &str,
|
||||
) -> rustfs_ahm::Result<Option<rustfs_ecstore::store_api::ObjectInfo>> {
|
||||
Ok(None)
|
||||
}
|
||||
async fn get_object_data(&self, _bucket: &str, _object: &str) -> rustfs_ahm::Result<Option<Vec<u8>>> {
|
||||
Ok(None)
|
||||
}
|
||||
async fn put_object_data(&self, _bucket: &str, _object: &str, _data: &[u8]) -> rustfs_ahm::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
async fn delete_object(&self, _bucket: &str, _object: &str) -> rustfs_ahm::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
async fn verify_object_integrity(&self, _bucket: &str, _object: &str) -> rustfs_ahm::Result<bool> {
|
||||
Ok(true)
|
||||
}
|
||||
async fn ec_decode_rebuild(&self, _bucket: &str, _object: &str) -> rustfs_ahm::Result<Vec<u8>> {
|
||||
Ok(vec![])
|
||||
}
|
||||
async fn get_disk_status(
|
||||
&self,
|
||||
_endpoint: &rustfs_ecstore::disk::endpoint::Endpoint,
|
||||
) -> rustfs_ahm::Result<rustfs_ahm::heal::storage::DiskStatus> {
|
||||
Ok(rustfs_ahm::heal::storage::DiskStatus::Ok)
|
||||
}
|
||||
async fn format_disk(&self, _endpoint: &rustfs_ecstore::disk::endpoint::Endpoint) -> rustfs_ahm::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
async fn get_bucket_info(&self, _bucket: &str) -> rustfs_ahm::Result<Option<rustfs_ecstore::store_api::BucketInfo>> {
|
||||
Ok(None)
|
||||
}
|
||||
async fn heal_bucket_metadata(&self, _bucket: &str) -> rustfs_ahm::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
async fn list_buckets(&self) -> rustfs_ahm::Result<Vec<rustfs_ecstore::store_api::BucketInfo>> {
|
||||
Ok(vec![])
|
||||
}
|
||||
async fn object_exists(&self, _bucket: &str, _object: &str) -> rustfs_ahm::Result<bool> {
|
||||
Ok(false)
|
||||
}
|
||||
async fn get_object_size(&self, _bucket: &str, _object: &str) -> rustfs_ahm::Result<Option<u64>> {
|
||||
Ok(None)
|
||||
}
|
||||
async fn get_object_checksum(&self, _bucket: &str, _object: &str) -> rustfs_ahm::Result<Option<String>> {
|
||||
Ok(None)
|
||||
}
|
||||
async fn heal_object(
|
||||
&self,
|
||||
_bucket: &str,
|
||||
_object: &str,
|
||||
_version_id: Option<&str>,
|
||||
_opts: &rustfs_common::heal_channel::HealOpts,
|
||||
) -> rustfs_ahm::Result<(rustfs_madmin::heal_commands::HealResultItem, Option<rustfs_ahm::Error>)> {
|
||||
Ok((rustfs_madmin::heal_commands::HealResultItem::default(), None))
|
||||
}
|
||||
async fn heal_bucket(
|
||||
&self,
|
||||
_bucket: &str,
|
||||
_opts: &rustfs_common::heal_channel::HealOpts,
|
||||
) -> rustfs_ahm::Result<rustfs_madmin::heal_commands::HealResultItem> {
|
||||
Ok(rustfs_madmin::heal_commands::HealResultItem::default())
|
||||
}
|
||||
async fn heal_format(
|
||||
&self,
|
||||
_dry_run: bool,
|
||||
) -> rustfs_ahm::Result<(rustfs_madmin::heal_commands::HealResultItem, Option<rustfs_ahm::Error>)> {
|
||||
Ok((rustfs_madmin::heal_commands::HealResultItem::default(), None))
|
||||
}
|
||||
async fn list_objects_for_heal(&self, _bucket: &str, _prefix: &str) -> rustfs_ahm::Result<Vec<String>> {
|
||||
Ok(vec![])
|
||||
}
|
||||
async fn list_objects_for_heal_page(
|
||||
&self,
|
||||
_bucket: &str,
|
||||
_prefix: &str,
|
||||
_continuation_token: Option<&str>,
|
||||
) -> rustfs_ahm::Result<(Vec<String>, Option<String>, bool)> {
|
||||
Ok((vec![], None, false))
|
||||
}
|
||||
async fn get_disk_for_resume(&self, _set_disk_id: &str) -> rustfs_ahm::Result<rustfs_ecstore::disk::DiskStore> {
|
||||
Err(rustfs_ahm::Error::other("Not implemented in mock"))
|
||||
}
|
||||
}
|
||||
|
||||
// Create a heal request and task
|
||||
let request = HealRequest::new(
|
||||
HealType::Object {
|
||||
bucket: "test-bucket".to_string(),
|
||||
object: "test-object".to_string(),
|
||||
version_id: None,
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::Normal,
|
||||
);
|
||||
|
||||
let storage: Arc<dyn HealStorageAPI> = Arc::new(MockStorage);
|
||||
let task = HealTask::from_request(request, storage);
|
||||
|
||||
// Verify initial status
|
||||
let status = tokio::runtime::Runtime::new().unwrap().block_on(task.get_status());
|
||||
assert_eq!(status, HealTaskStatus::Pending);
|
||||
|
||||
// The task should have task_start_instant field initialized
|
||||
// This is an internal detail, but we can verify it doesn't cause issues
|
||||
// by checking that the task can be created successfully
|
||||
// Note: We can't directly access private fields, but creation without panic
|
||||
// confirms the fix works
|
||||
}
|
||||
@@ -12,29 +12,52 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use rustfs_ahm::heal::{
|
||||
manager::{HealConfig, HealManager},
|
||||
storage::{ECStoreHealStorage, HealStorageAPI},
|
||||
task::{HealOptions, HealPriority, HealRequest, HealTaskStatus, HealType},
|
||||
use async_trait::async_trait;
|
||||
use rustfs_ahm::{
|
||||
heal::{
|
||||
manager::{HealConfig, HealManager},
|
||||
storage::{ECStoreHealStorage, HealStorageAPI},
|
||||
task::{HealOptions, HealPriority, HealRequest, HealTaskStatus, HealType},
|
||||
},
|
||||
scanner::{ScanMode, Scanner},
|
||||
};
|
||||
use rustfs_common::heal_channel::{HealOpts, HealScanMode};
|
||||
use rustfs_ecstore::bucket::metadata_sys::{self, set_bucket_metadata};
|
||||
use rustfs_ecstore::bucket::replication::{
|
||||
DeletedObjectReplicationInfo, DynReplicationPool, GLOBAL_REPLICATION_POOL, ReplicationPoolTrait, ReplicationPriority,
|
||||
};
|
||||
use rustfs_ecstore::bucket::target::{BucketTarget, BucketTargetType, BucketTargets};
|
||||
use rustfs_ecstore::bucket::utils::serialize;
|
||||
use rustfs_ecstore::error::Error as EcstoreError;
|
||||
use rustfs_ecstore::{
|
||||
disk::endpoint::Endpoint,
|
||||
endpoints::{EndpointServerPools, Endpoints, PoolEndpoints},
|
||||
store::ECStore,
|
||||
store_api::{ObjectIO, ObjectOptions, PutObjReader, StorageAPI},
|
||||
};
|
||||
use rustfs_filemeta::{ReplicateObjectInfo, ReplicationStatusType};
|
||||
use rustfs_utils::http::headers::{AMZ_BUCKET_REPLICATION_STATUS, RESERVED_METADATA_PREFIX_LOWER};
|
||||
use s3s::dto::{
|
||||
BucketVersioningStatus, Destination, ExistingObjectReplication, ExistingObjectReplicationStatus, ReplicationConfiguration,
|
||||
ReplicationRule, ReplicationRuleStatus, VersioningConfiguration,
|
||||
};
|
||||
use serial_test::serial;
|
||||
use std::sync::Once;
|
||||
use std::sync::OnceLock;
|
||||
use std::{path::PathBuf, sync::Arc, time::Duration};
|
||||
use std::{
|
||||
os::unix::fs::PermissionsExt,
|
||||
path::PathBuf,
|
||||
sync::{Arc, Once, OnceLock},
|
||||
time::Duration,
|
||||
};
|
||||
use time::OffsetDateTime;
|
||||
use tokio::fs;
|
||||
use tokio::sync::Mutex;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::info;
|
||||
use walkdir::WalkDir;
|
||||
|
||||
static GLOBAL_ENV: OnceLock<(Vec<PathBuf>, Arc<ECStore>, Arc<ECStoreHealStorage>)> = OnceLock::new();
|
||||
static INIT: Once = Once::new();
|
||||
const TEST_REPLICATION_TARGET_ARN: &str = "arn:aws:s3:::rustfs-replication-heal-target";
|
||||
|
||||
fn init_tracing() {
|
||||
INIT.call_once(|| {
|
||||
@@ -143,6 +166,225 @@ async fn upload_test_object(ecstore: &Arc<ECStore>, bucket: &str, object: &str,
|
||||
info!("Uploaded test object: {}/{} ({} bytes)", bucket, object, object_info.size);
|
||||
}
|
||||
|
||||
fn delete_first_part_file(disk_paths: &[PathBuf], bucket: &str, object: &str) -> PathBuf {
|
||||
for disk_path in disk_paths {
|
||||
let obj_dir = disk_path.join(bucket).join(object);
|
||||
if !obj_dir.exists() {
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(part_path) = WalkDir::new(&obj_dir)
|
||||
.min_depth(2)
|
||||
.max_depth(2)
|
||||
.into_iter()
|
||||
.filter_map(Result::ok)
|
||||
.find(|entry| {
|
||||
entry.file_type().is_file()
|
||||
&& entry
|
||||
.file_name()
|
||||
.to_str()
|
||||
.map(|name| name.starts_with("part."))
|
||||
.unwrap_or(false)
|
||||
})
|
||||
.map(|entry| entry.into_path())
|
||||
{
|
||||
std::fs::remove_file(&part_path).expect("Failed to delete part file");
|
||||
return part_path;
|
||||
}
|
||||
}
|
||||
|
||||
panic!("Failed to locate part file for {}/{}", bucket, object);
|
||||
}
|
||||
|
||||
fn delete_xl_meta_file(disk_paths: &[PathBuf], bucket: &str, object: &str) -> PathBuf {
|
||||
for disk_path in disk_paths {
|
||||
let xl_meta_path = disk_path.join(bucket).join(object).join("xl.meta");
|
||||
if xl_meta_path.exists() {
|
||||
std::fs::remove_file(&xl_meta_path).expect("Failed to delete xl.meta file");
|
||||
return xl_meta_path;
|
||||
}
|
||||
}
|
||||
|
||||
panic!("Failed to locate xl.meta for {}/{}", bucket, object);
|
||||
}
|
||||
|
||||
struct FormatPathGuard {
|
||||
original: PathBuf,
|
||||
backup: PathBuf,
|
||||
}
|
||||
|
||||
impl FormatPathGuard {
|
||||
fn new(original: PathBuf) -> std::io::Result<Self> {
|
||||
let backup = original.with_extension("bak");
|
||||
if backup.exists() {
|
||||
std::fs::remove_file(&backup)?;
|
||||
}
|
||||
std::fs::rename(&original, &backup)?;
|
||||
Ok(Self { original, backup })
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for FormatPathGuard {
|
||||
fn drop(&mut self) {
|
||||
if self.backup.exists() {
|
||||
let _ = std::fs::rename(&self.backup, &self.original);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
struct PermissionGuard {
|
||||
path: PathBuf,
|
||||
original_mode: u32,
|
||||
}
|
||||
|
||||
impl PermissionGuard {
|
||||
fn new(path: PathBuf, new_mode: u32) -> std::io::Result<Self> {
|
||||
let metadata = std::fs::metadata(&path)?;
|
||||
let original_mode = metadata.permissions().mode();
|
||||
std::fs::set_permissions(&path, std::fs::Permissions::from_mode(new_mode))?;
|
||||
Ok(Self { path, original_mode })
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for PermissionGuard {
|
||||
fn drop(&mut self) {
|
||||
if self.path.exists() {
|
||||
let _ = std::fs::set_permissions(&self.path, std::fs::Permissions::from_mode(self.original_mode));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
struct RecordingReplicationPool {
|
||||
replica_tasks: Mutex<Vec<ReplicateObjectInfo>>,
|
||||
delete_tasks: Mutex<Vec<DeletedObjectReplicationInfo>>,
|
||||
}
|
||||
|
||||
impl RecordingReplicationPool {
|
||||
async fn take_replica_tasks(&self) -> Vec<ReplicateObjectInfo> {
|
||||
let mut guard = self.replica_tasks.lock().await;
|
||||
guard.drain(..).collect()
|
||||
}
|
||||
|
||||
async fn clear(&self) {
|
||||
self.replica_tasks.lock().await.clear();
|
||||
self.delete_tasks.lock().await.clear();
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ReplicationPoolTrait for RecordingReplicationPool {
|
||||
async fn queue_replica_task(&self, ri: ReplicateObjectInfo) {
|
||||
self.replica_tasks.lock().await.push(ri);
|
||||
}
|
||||
|
||||
async fn queue_replica_delete_task(&self, ri: DeletedObjectReplicationInfo) {
|
||||
self.delete_tasks.lock().await.push(ri);
|
||||
}
|
||||
|
||||
async fn resize(&self, _priority: ReplicationPriority, _max_workers: usize, _max_l_workers: usize) {}
|
||||
|
||||
async fn init_resync(
|
||||
self: Arc<Self>,
|
||||
_cancellation_token: CancellationToken,
|
||||
_buckets: Vec<String>,
|
||||
) -> Result<(), EcstoreError> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
async fn ensure_test_replication_pool() -> Arc<RecordingReplicationPool> {
|
||||
static TEST_POOL: OnceLock<Arc<RecordingReplicationPool>> = OnceLock::new();
|
||||
|
||||
if let Some(pool) = TEST_POOL.get() {
|
||||
pool.clear().await;
|
||||
return pool.clone();
|
||||
}
|
||||
|
||||
let pool = Arc::new(RecordingReplicationPool::default());
|
||||
let dyn_pool: Arc<DynReplicationPool> = pool.clone();
|
||||
let global_pool = GLOBAL_REPLICATION_POOL
|
||||
.get_or_init(|| {
|
||||
let pool_clone = dyn_pool.clone();
|
||||
async move { pool_clone }
|
||||
})
|
||||
.await
|
||||
.clone();
|
||||
|
||||
assert!(
|
||||
Arc::ptr_eq(&dyn_pool, &global_pool),
|
||||
"GLOBAL_REPLICATION_POOL initialized before test replication pool"
|
||||
);
|
||||
|
||||
let _ = TEST_POOL.set(pool.clone());
|
||||
pool.clear().await;
|
||||
pool
|
||||
}
|
||||
|
||||
async fn configure_bucket_replication(bucket: &str, target_arn: &str) {
|
||||
let meta = metadata_sys::get(bucket)
|
||||
.await
|
||||
.expect("bucket metadata should exist for replication configuration");
|
||||
let mut metadata = (*meta).clone();
|
||||
|
||||
let replication_rule = ReplicationRule {
|
||||
delete_marker_replication: None,
|
||||
delete_replication: None,
|
||||
destination: Destination {
|
||||
access_control_translation: None,
|
||||
account: None,
|
||||
bucket: target_arn.to_string(),
|
||||
encryption_configuration: None,
|
||||
metrics: None,
|
||||
replication_time: None,
|
||||
storage_class: None,
|
||||
},
|
||||
existing_object_replication: Some(ExistingObjectReplication {
|
||||
status: ExistingObjectReplicationStatus::from_static(ExistingObjectReplicationStatus::ENABLED),
|
||||
}),
|
||||
filter: None,
|
||||
id: Some("heal-replication-rule".to_string()),
|
||||
prefix: Some(String::new()),
|
||||
priority: Some(1),
|
||||
source_selection_criteria: None,
|
||||
status: ReplicationRuleStatus::from_static(ReplicationRuleStatus::ENABLED),
|
||||
};
|
||||
|
||||
let replication_cfg = ReplicationConfiguration {
|
||||
role: target_arn.to_string(),
|
||||
rules: vec![replication_rule],
|
||||
};
|
||||
|
||||
let bucket_targets = BucketTargets {
|
||||
targets: vec![BucketTarget {
|
||||
source_bucket: bucket.to_string(),
|
||||
endpoint: "replication.invalid".to_string(),
|
||||
target_bucket: "replication-target".to_string(),
|
||||
arn: target_arn.to_string(),
|
||||
target_type: BucketTargetType::ReplicationService,
|
||||
..Default::default()
|
||||
}],
|
||||
};
|
||||
|
||||
metadata.replication_config = Some(replication_cfg.clone());
|
||||
metadata.replication_config_xml = serialize(&replication_cfg).expect("serialize replication config");
|
||||
metadata.replication_config_updated_at = OffsetDateTime::now_utc();
|
||||
metadata.bucket_target_config = Some(bucket_targets.clone());
|
||||
metadata.bucket_targets_config_json = serde_json::to_vec(&bucket_targets).expect("serialize bucket targets");
|
||||
metadata.bucket_targets_config_updated_at = OffsetDateTime::now_utc();
|
||||
let versioning_cfg = VersioningConfiguration {
|
||||
status: Some(BucketVersioningStatus::from_static(BucketVersioningStatus::ENABLED)),
|
||||
..Default::default()
|
||||
};
|
||||
metadata.versioning_config = Some(versioning_cfg.clone());
|
||||
metadata.versioning_config_xml = serialize(&versioning_cfg).expect("serialize versioning config");
|
||||
metadata.versioning_config_updated_at = OffsetDateTime::now_utc();
|
||||
|
||||
set_bucket_metadata(bucket.to_string(), metadata)
|
||||
.await
|
||||
.expect("failed to update bucket metadata for replication");
|
||||
}
|
||||
|
||||
mod serial_tests {
|
||||
use super::*;
|
||||
|
||||
@@ -428,4 +670,380 @@ mod serial_tests {
|
||||
|
||||
info!("Direct heal storage API test passed");
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
#[serial]
|
||||
async fn test_scanner_submits_heal_task_when_part_missing() {
|
||||
let (disk_paths, ecstore, heal_storage) = setup_test_env().await;
|
||||
|
||||
let bucket_name = format!("scanner-heal-bucket-{}", uuid::Uuid::new_v4().simple());
|
||||
let object_name = "scanner-heal-object.txt";
|
||||
create_test_bucket(&ecstore, &bucket_name).await;
|
||||
upload_test_object(&ecstore, &bucket_name, object_name, b"Scanner auto-heal data").await;
|
||||
|
||||
let heal_cfg = HealConfig {
|
||||
enable_auto_heal: true,
|
||||
heal_interval: Duration::from_millis(20),
|
||||
max_concurrent_heals: 4,
|
||||
..Default::default()
|
||||
};
|
||||
let heal_manager = Arc::new(HealManager::new(heal_storage.clone(), Some(heal_cfg)));
|
||||
heal_manager.start().await.unwrap();
|
||||
|
||||
let scanner = Scanner::new(None, Some(heal_manager.clone()));
|
||||
scanner.initialize_with_ecstore().await;
|
||||
scanner.set_config_enable_healing(true).await;
|
||||
scanner.set_config_scan_mode(ScanMode::Deep).await;
|
||||
|
||||
scanner
|
||||
.scan_cycle()
|
||||
.await
|
||||
.expect("Initial scan should succeed before simulating failures");
|
||||
let baseline_stats = heal_manager.get_statistics().await;
|
||||
|
||||
let deleted_part_path = delete_first_part_file(&disk_paths, &bucket_name, object_name);
|
||||
assert!(!deleted_part_path.exists(), "Deleted part file should not exist before healing");
|
||||
|
||||
scanner
|
||||
.scan_cycle()
|
||||
.await
|
||||
.expect("Scan after part deletion should finish and enqueue heal task");
|
||||
tokio::time::sleep(Duration::from_millis(500)).await;
|
||||
|
||||
let updated_stats = heal_manager.get_statistics().await;
|
||||
assert!(
|
||||
updated_stats.total_tasks > baseline_stats.total_tasks,
|
||||
"Scanner should submit heal tasks when data parts go missing"
|
||||
);
|
||||
|
||||
// Allow heal manager to restore the missing part
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
assert!(
|
||||
deleted_part_path.exists(),
|
||||
"Missing part should be restored after heal: {:?}",
|
||||
deleted_part_path
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
#[serial]
|
||||
async fn test_scanner_submits_metadata_heal_when_xl_meta_missing() {
|
||||
let (disk_paths, ecstore, heal_storage) = setup_test_env().await;
|
||||
|
||||
let bucket_name = format!("scanner-meta-bucket-{}", uuid::Uuid::new_v4().simple());
|
||||
let object_name = "scanner-meta-object.txt";
|
||||
create_test_bucket(&ecstore, &bucket_name).await;
|
||||
upload_test_object(&ecstore, &bucket_name, object_name, b"Scanner metadata heal data").await;
|
||||
|
||||
let heal_cfg = HealConfig {
|
||||
enable_auto_heal: true,
|
||||
heal_interval: Duration::from_millis(20),
|
||||
max_concurrent_heals: 4,
|
||||
..Default::default()
|
||||
};
|
||||
let heal_manager = Arc::new(HealManager::new(heal_storage.clone(), Some(heal_cfg)));
|
||||
heal_manager.start().await.unwrap();
|
||||
|
||||
let scanner = Scanner::new(None, Some(heal_manager.clone()));
|
||||
scanner.initialize_with_ecstore().await;
|
||||
scanner.set_config_enable_healing(true).await;
|
||||
scanner.set_config_scan_mode(ScanMode::Deep).await;
|
||||
|
||||
scanner
|
||||
.scan_cycle()
|
||||
.await
|
||||
.expect("Initial scan should succeed before metadata deletion");
|
||||
let baseline_stats = heal_manager.get_statistics().await;
|
||||
|
||||
let deleted_meta_path = delete_xl_meta_file(&disk_paths, &bucket_name, object_name);
|
||||
assert!(!deleted_meta_path.exists(), "Deleted xl.meta should not exist before healing");
|
||||
|
||||
scanner
|
||||
.scan_cycle()
|
||||
.await
|
||||
.expect("Scan after metadata deletion should finish and enqueue heal task");
|
||||
tokio::time::sleep(Duration::from_millis(800)).await;
|
||||
|
||||
let updated_stats = heal_manager.get_statistics().await;
|
||||
assert!(
|
||||
updated_stats.total_tasks > baseline_stats.total_tasks,
|
||||
"Scanner should submit metadata heal tasks when xl.meta is missing"
|
||||
);
|
||||
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
assert!(
|
||||
deleted_meta_path.exists(),
|
||||
"xl.meta should be restored after heal: {:?}",
|
||||
deleted_meta_path
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
#[serial]
|
||||
async fn test_scanner_triggers_replication_heal_when_status_failed() {
|
||||
let (_disk_paths, ecstore, heal_storage) = setup_test_env().await;
|
||||
|
||||
let bucket_name = format!("scanner-replication-bucket-{}", uuid::Uuid::new_v4().simple());
|
||||
let object_name = "scanner-replication-heal-object";
|
||||
create_test_bucket(&ecstore, &bucket_name).await;
|
||||
configure_bucket_replication(&bucket_name, TEST_REPLICATION_TARGET_ARN).await;
|
||||
|
||||
let replication_pool = ensure_test_replication_pool().await;
|
||||
replication_pool.clear().await;
|
||||
|
||||
let mut opts = ObjectOptions::default();
|
||||
opts.user_defined.insert(
|
||||
AMZ_BUCKET_REPLICATION_STATUS.to_string(),
|
||||
ReplicationStatusType::Failed.as_str().to_string(),
|
||||
);
|
||||
let replication_status_key = format!("{}replication-status", RESERVED_METADATA_PREFIX_LOWER);
|
||||
opts.user_defined.insert(
|
||||
replication_status_key.clone(),
|
||||
format!("{}={};", TEST_REPLICATION_TARGET_ARN, ReplicationStatusType::Failed.as_str()),
|
||||
);
|
||||
let mut reader = PutObjReader::from_vec(b"replication heal data".to_vec());
|
||||
ecstore
|
||||
.put_object(&bucket_name, object_name, &mut reader, &opts)
|
||||
.await
|
||||
.expect("Failed to upload replication test object");
|
||||
|
||||
let object_info = ecstore
|
||||
.get_object_info(&bucket_name, object_name, &ObjectOptions::default())
|
||||
.await
|
||||
.expect("Failed to read object info for replication test");
|
||||
assert_eq!(
|
||||
object_info
|
||||
.user_defined
|
||||
.get(AMZ_BUCKET_REPLICATION_STATUS)
|
||||
.map(|s| s.as_str()),
|
||||
Some(ReplicationStatusType::Failed.as_str()),
|
||||
"Uploaded object should contain replication status metadata"
|
||||
);
|
||||
assert!(
|
||||
object_info
|
||||
.user_defined
|
||||
.get(&replication_status_key)
|
||||
.map(|s| s.contains(ReplicationStatusType::Failed.as_str()))
|
||||
.unwrap_or(false),
|
||||
"Uploaded object should preserve internal replication status metadata"
|
||||
);
|
||||
|
||||
let heal_cfg = HealConfig {
|
||||
enable_auto_heal: true,
|
||||
heal_interval: Duration::from_millis(20),
|
||||
max_concurrent_heals: 4,
|
||||
..Default::default()
|
||||
};
|
||||
let heal_manager = Arc::new(HealManager::new(heal_storage.clone(), Some(heal_cfg)));
|
||||
heal_manager.start().await.unwrap();
|
||||
|
||||
let scanner = Scanner::new(None, Some(heal_manager.clone()));
|
||||
scanner.initialize_with_ecstore().await;
|
||||
scanner.set_config_enable_healing(true).await;
|
||||
scanner.set_config_scan_mode(ScanMode::Deep).await;
|
||||
|
||||
scanner
|
||||
.scan_cycle()
|
||||
.await
|
||||
.expect("Scan cycle should succeed and evaluate replication state");
|
||||
|
||||
let replica_tasks = replication_pool.take_replica_tasks().await;
|
||||
assert!(
|
||||
replica_tasks
|
||||
.iter()
|
||||
.any(|info| info.bucket == bucket_name && info.name == object_name),
|
||||
"Scanner should enqueue replication heal task when replication status is FAILED (recorded tasks: {:?})",
|
||||
replica_tasks
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
#[serial]
|
||||
async fn test_scanner_submits_erasure_set_heal_when_disk_offline() {
|
||||
let (disk_paths, _ecstore, heal_storage) = setup_test_env().await;
|
||||
|
||||
let format_path = disk_paths[0].join(".rustfs.sys").join("format.json");
|
||||
assert!(format_path.exists(), "format.json should exist before simulating offline disk");
|
||||
let _format_guard = FormatPathGuard::new(format_path.clone()).expect("failed to move format.json");
|
||||
|
||||
let heal_cfg = HealConfig {
|
||||
enable_auto_heal: true,
|
||||
heal_interval: Duration::from_millis(20),
|
||||
max_concurrent_heals: 2,
|
||||
..Default::default()
|
||||
};
|
||||
let heal_manager = Arc::new(HealManager::new(heal_storage.clone(), Some(heal_cfg)));
|
||||
heal_manager.start().await.unwrap();
|
||||
|
||||
let scanner = Scanner::new(None, Some(heal_manager.clone()));
|
||||
scanner.initialize_with_ecstore().await;
|
||||
scanner.set_config_enable_healing(true).await;
|
||||
scanner.set_config_scan_mode(ScanMode::Normal).await;
|
||||
|
||||
let baseline_stats = heal_manager.get_statistics().await;
|
||||
scanner
|
||||
.scan_cycle()
|
||||
.await
|
||||
.expect("Scan cycle should complete even when a disk is offline");
|
||||
tokio::time::sleep(Duration::from_millis(200)).await;
|
||||
let updated_stats = heal_manager.get_statistics().await;
|
||||
|
||||
assert!(
|
||||
updated_stats.total_tasks > baseline_stats.total_tasks,
|
||||
"Scanner should enqueue erasure set heal when disk is offline (before {}, after {})",
|
||||
baseline_stats.total_tasks,
|
||||
updated_stats.total_tasks
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
#[serial]
|
||||
async fn test_scanner_submits_erasure_set_heal_when_listing_volumes_fails() {
|
||||
let (disk_paths, ecstore, heal_storage) = setup_test_env().await;
|
||||
|
||||
let bucket_name = format!("scanner-list-volumes-{}", uuid::Uuid::new_v4().simple());
|
||||
let object_name = "scanner-list-volumes-object";
|
||||
create_test_bucket(&ecstore, &bucket_name).await;
|
||||
upload_test_object(&ecstore, &bucket_name, object_name, b"disk list volumes failure").await;
|
||||
|
||||
let heal_cfg = HealConfig {
|
||||
enable_auto_heal: true,
|
||||
heal_interval: Duration::from_millis(20),
|
||||
max_concurrent_heals: 2,
|
||||
..Default::default()
|
||||
};
|
||||
let heal_manager = Arc::new(HealManager::new(heal_storage.clone(), Some(heal_cfg)));
|
||||
heal_manager.start().await.unwrap();
|
||||
|
||||
let scanner = Scanner::new(None, Some(heal_manager.clone()));
|
||||
scanner.initialize_with_ecstore().await;
|
||||
scanner.set_config_enable_healing(true).await;
|
||||
scanner.set_config_scan_mode(ScanMode::Deep).await;
|
||||
|
||||
scanner
|
||||
.scan_cycle()
|
||||
.await
|
||||
.expect("Initial scan should succeed before simulating disk permission issues");
|
||||
let baseline_stats = heal_manager.get_statistics().await;
|
||||
|
||||
let disk_root = disk_paths[0].clone();
|
||||
assert!(disk_root.exists(), "Disk root should exist so we can simulate permission failures");
|
||||
|
||||
{
|
||||
let _root_perm_guard =
|
||||
PermissionGuard::new(disk_root.clone(), 0o000).expect("Failed to change disk root permissions");
|
||||
|
||||
let scan_result = scanner.scan_cycle().await;
|
||||
assert!(
|
||||
scan_result.is_ok(),
|
||||
"Scan cycle should continue even if disk volumes cannot be listed: {:?}",
|
||||
scan_result
|
||||
);
|
||||
tokio::time::sleep(Duration::from_millis(200)).await;
|
||||
let updated_stats = heal_manager.get_statistics().await;
|
||||
|
||||
assert!(
|
||||
updated_stats.total_tasks > baseline_stats.total_tasks,
|
||||
"Scanner should enqueue erasure set heal when listing volumes fails (before {}, after {})",
|
||||
baseline_stats.total_tasks,
|
||||
updated_stats.total_tasks
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
#[serial]
|
||||
async fn test_scanner_submits_erasure_set_heal_when_disk_access_fails() {
|
||||
let (disk_paths, ecstore, heal_storage) = setup_test_env().await;
|
||||
|
||||
let bucket_name = format!("scanner-access-error-{}", uuid::Uuid::new_v4().simple());
|
||||
let object_name = "scanner-access-error-object.txt";
|
||||
create_test_bucket(&ecstore, &bucket_name).await;
|
||||
upload_test_object(&ecstore, &bucket_name, object_name, b"disk access failure").await;
|
||||
|
||||
let bucket_path = disk_paths[0].join(&bucket_name);
|
||||
assert!(bucket_path.exists(), "Bucket path should exist on disk for access test");
|
||||
let _perm_guard = PermissionGuard::new(bucket_path.clone(), 0o000).expect("Failed to change permissions");
|
||||
|
||||
let heal_cfg = HealConfig {
|
||||
enable_auto_heal: true,
|
||||
heal_interval: Duration::from_millis(20),
|
||||
max_concurrent_heals: 2,
|
||||
..Default::default()
|
||||
};
|
||||
let heal_manager = Arc::new(HealManager::new(heal_storage.clone(), Some(heal_cfg)));
|
||||
heal_manager.start().await.unwrap();
|
||||
|
||||
let scanner = Scanner::new(None, Some(heal_manager.clone()));
|
||||
scanner.initialize_with_ecstore().await;
|
||||
scanner.set_config_enable_healing(true).await;
|
||||
scanner.set_config_scan_mode(ScanMode::Deep).await;
|
||||
|
||||
let baseline_stats = heal_manager.get_statistics().await;
|
||||
let scan_result = scanner.scan_cycle().await;
|
||||
assert!(
|
||||
scan_result.is_ok(),
|
||||
"Scan cycle should complete even if a disk volume has access errors: {:?}",
|
||||
scan_result
|
||||
);
|
||||
tokio::time::sleep(Duration::from_millis(200)).await;
|
||||
let updated_stats = heal_manager.get_statistics().await;
|
||||
|
||||
assert!(
|
||||
updated_stats.total_tasks > baseline_stats.total_tasks,
|
||||
"Scanner should enqueue erasure set heal when disk access fails (before {}, after {})",
|
||||
baseline_stats.total_tasks,
|
||||
updated_stats.total_tasks
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
#[serial]
|
||||
async fn test_scanner_detects_missing_bucket_directory_and_queues_bucket_heal() {
|
||||
let (disk_paths, ecstore, heal_storage) = setup_test_env().await;
|
||||
|
||||
let bucket_name = format!("scanner-missing-bucket-{}", uuid::Uuid::new_v4().simple());
|
||||
create_test_bucket(&ecstore, &bucket_name).await;
|
||||
upload_test_object(&ecstore, &bucket_name, "seed-object", b"bucket heal data").await;
|
||||
|
||||
let scanner_heal_cfg = HealConfig {
|
||||
enable_auto_heal: true,
|
||||
heal_interval: Duration::from_millis(20),
|
||||
max_concurrent_heals: 4,
|
||||
..Default::default()
|
||||
};
|
||||
let scanner_heal_manager = Arc::new(HealManager::new(heal_storage.clone(), Some(scanner_heal_cfg)));
|
||||
scanner_heal_manager.start().await.unwrap();
|
||||
|
||||
let scanner = Scanner::new(None, Some(scanner_heal_manager.clone()));
|
||||
scanner.initialize_with_ecstore().await;
|
||||
scanner.set_config_enable_healing(true).await;
|
||||
scanner.set_config_scan_mode(ScanMode::Normal).await;
|
||||
|
||||
scanner
|
||||
.scan_cycle()
|
||||
.await
|
||||
.expect("Initial scan should succeed before deleting bucket directory");
|
||||
let baseline_stats = scanner_heal_manager.get_statistics().await;
|
||||
|
||||
let missing_dir = disk_paths[0].join(&bucket_name);
|
||||
assert!(missing_dir.exists());
|
||||
std::fs::remove_dir_all(&missing_dir).expect("Failed to remove bucket directory for heal simulation");
|
||||
assert!(!missing_dir.exists(), "Bucket directory should be removed on disk to trigger heal");
|
||||
|
||||
scanner
|
||||
.run_volume_consistency_check()
|
||||
.await
|
||||
.expect("Volume consistency check should run after bucket removal");
|
||||
tokio::time::sleep(Duration::from_millis(800)).await;
|
||||
|
||||
let updated_stats = scanner_heal_manager.get_statistics().await;
|
||||
assert!(
|
||||
updated_stats.total_tasks > baseline_stats.total_tasks,
|
||||
"Scanner should submit bucket heal tasks when a bucket directory is missing"
|
||||
);
|
||||
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
assert!(missing_dir.exists(), "Bucket directory should be restored after heal");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,19 +12,16 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::{sync::Arc, time::Duration};
|
||||
use tempfile::TempDir;
|
||||
|
||||
use rustfs_ahm::scanner::{
|
||||
io_throttler::MetricsSnapshot,
|
||||
local_stats::StatsSummary,
|
||||
node_scanner::{LoadLevel, NodeScanner, NodeScannerConfig},
|
||||
stats_aggregator::{DecentralizedStatsAggregator, DecentralizedStatsAggregatorConfig, NodeInfo},
|
||||
};
|
||||
|
||||
mod scanner_optimization_tests;
|
||||
use scanner_optimization_tests::{PerformanceBenchmark, create_test_scanner};
|
||||
|
||||
use std::{sync::Arc, time::Duration};
|
||||
use tempfile::TempDir;
|
||||
mod scanner_optimization_tests;
|
||||
#[tokio::test]
|
||||
async fn test_end_to_end_scanner_lifecycle() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
@@ -245,21 +242,32 @@ async fn test_performance_impact_measurement() {
|
||||
|
||||
io_monitor.start().await.unwrap();
|
||||
|
||||
// Baseline test: no scanner load
|
||||
let baseline_duration = measure_workload(5_000, Duration::ZERO).await.max(Duration::from_millis(10));
|
||||
// Baseline test: no scanner load - measure multiple times for stability
|
||||
const MEASUREMENT_COUNT: usize = 5;
|
||||
let mut baseline_measurements = Vec::new();
|
||||
for _ in 0..MEASUREMENT_COUNT {
|
||||
let duration = measure_workload(10_000, Duration::ZERO).await;
|
||||
baseline_measurements.push(duration);
|
||||
}
|
||||
// Use median to reduce impact of outliers
|
||||
baseline_measurements.sort();
|
||||
let median_idx = baseline_measurements.len() / 2;
|
||||
let baseline_duration = baseline_measurements[median_idx].max(Duration::from_millis(20));
|
||||
|
||||
// Simulate scanner activity
|
||||
scanner.update_business_metrics(50, 500, 0, 25).await;
|
||||
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
tokio::time::sleep(Duration::from_millis(200)).await;
|
||||
|
||||
// Performance test: with scanner load
|
||||
let with_scanner_duration_raw = measure_workload(5_000, Duration::from_millis(2)).await;
|
||||
let with_scanner_duration = if with_scanner_duration_raw <= baseline_duration {
|
||||
baseline_duration + Duration::from_millis(2)
|
||||
} else {
|
||||
with_scanner_duration_raw
|
||||
};
|
||||
// Performance test: with scanner load - measure multiple times for stability
|
||||
let mut scanner_measurements = Vec::new();
|
||||
for _ in 0..MEASUREMENT_COUNT {
|
||||
let duration = measure_workload(10_000, Duration::ZERO).await;
|
||||
scanner_measurements.push(duration);
|
||||
}
|
||||
scanner_measurements.sort();
|
||||
let median_idx = scanner_measurements.len() / 2;
|
||||
let with_scanner_duration = scanner_measurements[median_idx].max(baseline_duration);
|
||||
|
||||
// Calculate performance impact
|
||||
let baseline_ns = baseline_duration.as_nanos().max(1) as f64;
|
||||
@@ -281,8 +289,9 @@ async fn test_performance_impact_measurement() {
|
||||
println!(" Impact percentage: {impact_percentage:.2}%");
|
||||
println!(" Meets optimization goals: {}", benchmark.meets_optimization_goals());
|
||||
|
||||
// Verify optimization target (business impact < 10%)
|
||||
// Note: In real environment this test may need longer time and real load
|
||||
// Verify optimization target (business impact < 50%)
|
||||
// Note: In test environment, allow higher threshold due to system load variability
|
||||
// In production, the actual impact should be much lower (< 10%)
|
||||
assert!(impact_percentage < 50.0, "Performance impact too high: {impact_percentage:.2}%");
|
||||
|
||||
io_monitor.stop().await;
|
||||
|
||||
508
crates/ahm/tests/lifecycle_cache_test.rs
Normal file
508
crates/ahm/tests/lifecycle_cache_test.rs
Normal file
@@ -0,0 +1,508 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use heed::byteorder::BigEndian;
|
||||
use heed::types::*;
|
||||
use heed::{BoxedError, BytesDecode, BytesEncode, Database, DatabaseFlags, Env, EnvOpenOptions};
|
||||
use rustfs_ahm::scanner::local_scan::{self, LocalObjectRecord, LocalScanOutcome};
|
||||
use rustfs_ecstore::{
|
||||
disk::endpoint::Endpoint,
|
||||
endpoints::{EndpointServerPools, Endpoints, PoolEndpoints},
|
||||
store::ECStore,
|
||||
store_api::{MakeBucketOptions, ObjectIO, ObjectInfo, ObjectOptions, PutObjReader, StorageAPI},
|
||||
};
|
||||
use serial_test::serial;
|
||||
use std::{
|
||||
borrow::Cow,
|
||||
path::PathBuf,
|
||||
sync::{Arc, Once, OnceLock},
|
||||
};
|
||||
//use heed_traits::Comparator;
|
||||
use time::OffsetDateTime;
|
||||
use tokio::fs;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::{debug, info, warn};
|
||||
use uuid::Uuid;
|
||||
|
||||
static GLOBAL_ENV: OnceLock<(Vec<PathBuf>, Arc<ECStore>)> = OnceLock::new();
|
||||
static INIT: Once = Once::new();
|
||||
|
||||
static _LIFECYCLE_EXPIRY_CURRENT_DAYS: i32 = 1;
|
||||
static _LIFECYCLE_EXPIRY_NONCURRENT_DAYS: i32 = 1;
|
||||
static _LIFECYCLE_TRANSITION_CURRENT_DAYS: i32 = 1;
|
||||
static _LIFECYCLE_TRANSITION_NONCURRENT_DAYS: i32 = 1;
|
||||
static GLOBAL_LMDB_ENV: OnceLock<Env> = OnceLock::new();
|
||||
static GLOBAL_LMDB_DB: OnceLock<Database<I64<BigEndian>, LifecycleContentCodec>> = OnceLock::new();
|
||||
|
||||
fn init_tracing() {
|
||||
INIT.call_once(|| {
|
||||
let _ = tracing_subscriber::fmt::try_init();
|
||||
});
|
||||
}
|
||||
|
||||
/// Test helper: Create test environment with ECStore
|
||||
async fn setup_test_env() -> (Vec<PathBuf>, Arc<ECStore>) {
|
||||
init_tracing();
|
||||
|
||||
// Fast path: already initialized, just clone and return
|
||||
if let Some((paths, ecstore)) = GLOBAL_ENV.get() {
|
||||
return (paths.clone(), ecstore.clone());
|
||||
}
|
||||
|
||||
// create temp dir as 4 disks with unique base dir
|
||||
let test_base_dir = format!("/tmp/rustfs_ahm_lifecyclecache_test_{}", uuid::Uuid::new_v4());
|
||||
let temp_dir = std::path::PathBuf::from(&test_base_dir);
|
||||
if temp_dir.exists() {
|
||||
fs::remove_dir_all(&temp_dir).await.ok();
|
||||
}
|
||||
fs::create_dir_all(&temp_dir).await.unwrap();
|
||||
|
||||
// create 4 disk dirs
|
||||
let disk_paths = vec![
|
||||
temp_dir.join("disk1"),
|
||||
temp_dir.join("disk2"),
|
||||
temp_dir.join("disk3"),
|
||||
temp_dir.join("disk4"),
|
||||
];
|
||||
|
||||
for disk_path in &disk_paths {
|
||||
fs::create_dir_all(disk_path).await.unwrap();
|
||||
}
|
||||
|
||||
// create EndpointServerPools
|
||||
let mut endpoints = Vec::new();
|
||||
for (i, disk_path) in disk_paths.iter().enumerate() {
|
||||
let mut endpoint = Endpoint::try_from(disk_path.to_str().unwrap()).unwrap();
|
||||
// set correct index
|
||||
endpoint.set_pool_index(0);
|
||||
endpoint.set_set_index(0);
|
||||
endpoint.set_disk_index(i);
|
||||
endpoints.push(endpoint);
|
||||
}
|
||||
|
||||
let pool_endpoints = PoolEndpoints {
|
||||
legacy: false,
|
||||
set_count: 1,
|
||||
drives_per_set: 4,
|
||||
endpoints: Endpoints::from(endpoints),
|
||||
cmd_line: "test".to_string(),
|
||||
platform: format!("OS: {} | Arch: {}", std::env::consts::OS, std::env::consts::ARCH),
|
||||
};
|
||||
|
||||
let endpoint_pools = EndpointServerPools(vec![pool_endpoints]);
|
||||
|
||||
// format disks (only first time)
|
||||
rustfs_ecstore::store::init_local_disks(endpoint_pools.clone()).await.unwrap();
|
||||
|
||||
// create ECStore with dynamic port 0 (let OS assign) or fixed 9002 if free
|
||||
let port = 9002; // for simplicity
|
||||
let server_addr: std::net::SocketAddr = format!("127.0.0.1:{port}").parse().unwrap();
|
||||
let ecstore = ECStore::new(server_addr, endpoint_pools, CancellationToken::new())
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
// init bucket metadata system
|
||||
let buckets_list = ecstore
|
||||
.list_bucket(&rustfs_ecstore::store_api::BucketOptions {
|
||||
no_metadata: true,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
let buckets = buckets_list.into_iter().map(|v| v.name).collect();
|
||||
rustfs_ecstore::bucket::metadata_sys::init_bucket_metadata_sys(ecstore.clone(), buckets).await;
|
||||
|
||||
//lmdb env
|
||||
// User home directory
|
||||
/*if let Ok(home_dir) = env::var("HOME").or_else(|_| env::var("USERPROFILE")) {
|
||||
let mut path = PathBuf::from(home_dir);
|
||||
path.push(format!(".{DEFAULT_LOG_FILENAME}"));
|
||||
path.push(DEFAULT_LOG_DIR);
|
||||
if ensure_directory_writable(&path) {
|
||||
//return path;
|
||||
}
|
||||
}*/
|
||||
let test_lmdb_lifecycle_dir = "/tmp/lmdb_lifecycle".to_string();
|
||||
let temp_dir = std::path::PathBuf::from(&test_lmdb_lifecycle_dir);
|
||||
if temp_dir.exists() {
|
||||
fs::remove_dir_all(&temp_dir).await.ok();
|
||||
}
|
||||
fs::create_dir_all(&temp_dir).await.unwrap();
|
||||
let lmdb_env = unsafe { EnvOpenOptions::new().max_dbs(100).open(&test_lmdb_lifecycle_dir).unwrap() };
|
||||
let bucket_name = format!("test-lc-cache-{}", "00000");
|
||||
let mut wtxn = lmdb_env.write_txn().unwrap();
|
||||
let db = match lmdb_env
|
||||
.database_options()
|
||||
.name(&format!("bucket_{bucket_name}"))
|
||||
.types::<I64<BigEndian>, LifecycleContentCodec>()
|
||||
.flags(DatabaseFlags::DUP_SORT)
|
||||
//.dup_sort_comparator::<>()
|
||||
.create(&mut wtxn)
|
||||
{
|
||||
Ok(db) => db,
|
||||
Err(err) => {
|
||||
panic!("lmdb error: {err}");
|
||||
}
|
||||
};
|
||||
let _ = wtxn.commit();
|
||||
let _ = GLOBAL_LMDB_ENV.set(lmdb_env);
|
||||
let _ = GLOBAL_LMDB_DB.set(db);
|
||||
|
||||
// Store in global once lock
|
||||
let _ = GLOBAL_ENV.set((disk_paths.clone(), ecstore.clone()));
|
||||
|
||||
(disk_paths, ecstore)
|
||||
}
|
||||
|
||||
/// Test helper: Create a test bucket
|
||||
#[allow(dead_code)]
|
||||
async fn create_test_bucket(ecstore: &Arc<ECStore>, bucket_name: &str) {
|
||||
(**ecstore)
|
||||
.make_bucket(bucket_name, &Default::default())
|
||||
.await
|
||||
.expect("Failed to create test bucket");
|
||||
info!("Created test bucket: {}", bucket_name);
|
||||
}
|
||||
|
||||
/// Test helper: Create a test lock bucket
|
||||
async fn create_test_lock_bucket(ecstore: &Arc<ECStore>, bucket_name: &str) {
|
||||
(**ecstore)
|
||||
.make_bucket(
|
||||
bucket_name,
|
||||
&MakeBucketOptions {
|
||||
lock_enabled: true,
|
||||
versioning_enabled: true,
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
.await
|
||||
.expect("Failed to create test bucket");
|
||||
info!("Created test bucket: {}", bucket_name);
|
||||
}
|
||||
|
||||
/// Test helper: Upload test object
|
||||
async fn upload_test_object(ecstore: &Arc<ECStore>, bucket: &str, object: &str, data: &[u8]) {
|
||||
let mut reader = PutObjReader::from_vec(data.to_vec());
|
||||
let object_info = (**ecstore)
|
||||
.put_object(bucket, object, &mut reader, &ObjectOptions::default())
|
||||
.await
|
||||
.expect("Failed to upload test object");
|
||||
|
||||
println!("object_info1: {object_info:?}");
|
||||
|
||||
info!("Uploaded test object: {}/{} ({} bytes)", bucket, object, object_info.size);
|
||||
}
|
||||
|
||||
/// Test helper: Check if object exists
|
||||
async fn object_exists(ecstore: &Arc<ECStore>, bucket: &str, object: &str) -> bool {
|
||||
match (**ecstore).get_object_info(bucket, object, &ObjectOptions::default()).await {
|
||||
Ok(info) => !info.delete_marker,
|
||||
Err(_) => false,
|
||||
}
|
||||
}
|
||||
|
||||
fn ns_to_offset_datetime(ns: i128) -> Option<OffsetDateTime> {
|
||||
OffsetDateTime::from_unix_timestamp_nanos(ns).ok()
|
||||
}
|
||||
|
||||
fn convert_record_to_object_info(record: &LocalObjectRecord) -> ObjectInfo {
|
||||
let usage = &record.usage;
|
||||
|
||||
ObjectInfo {
|
||||
bucket: usage.bucket.clone(),
|
||||
name: usage.object.clone(),
|
||||
size: usage.total_size as i64,
|
||||
delete_marker: !usage.has_live_object && usage.delete_markers_count > 0,
|
||||
mod_time: usage.last_modified_ns.and_then(ns_to_offset_datetime),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn to_object_info(
|
||||
bucket: &str,
|
||||
object: &str,
|
||||
total_size: i64,
|
||||
delete_marker: bool,
|
||||
mod_time: OffsetDateTime,
|
||||
version_id: &str,
|
||||
) -> ObjectInfo {
|
||||
ObjectInfo {
|
||||
bucket: bucket.to_string(),
|
||||
name: object.to_string(),
|
||||
size: total_size,
|
||||
delete_marker,
|
||||
mod_time: Some(mod_time),
|
||||
version_id: Some(Uuid::parse_str(version_id).unwrap()),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
enum LifecycleType {
|
||||
ExpiryCurrent,
|
||||
ExpiryNoncurrent,
|
||||
TransitionCurrent,
|
||||
TransitionNoncurrent,
|
||||
}
|
||||
|
||||
#[derive(Debug, PartialEq, Eq)]
|
||||
pub struct LifecycleContent {
|
||||
ver_no: u8,
|
||||
ver_id: String,
|
||||
mod_time: OffsetDateTime,
|
||||
type_: LifecycleType,
|
||||
object_name: String,
|
||||
}
|
||||
|
||||
pub struct LifecycleContentCodec;
|
||||
|
||||
impl BytesEncode<'_> for LifecycleContentCodec {
|
||||
type EItem = LifecycleContent;
|
||||
|
||||
fn bytes_encode(lcc: &Self::EItem) -> Result<Cow<'_, [u8]>, BoxedError> {
|
||||
let (ver_no_byte, ver_id_bytes, mod_timestamp_bytes, type_byte, object_name_bytes) = match lcc {
|
||||
LifecycleContent {
|
||||
ver_no,
|
||||
ver_id,
|
||||
mod_time,
|
||||
type_: LifecycleType::ExpiryCurrent,
|
||||
object_name,
|
||||
} => (
|
||||
ver_no,
|
||||
ver_id.clone().into_bytes(),
|
||||
mod_time.unix_timestamp().to_be_bytes(),
|
||||
0,
|
||||
object_name.clone().into_bytes(),
|
||||
),
|
||||
LifecycleContent {
|
||||
ver_no,
|
||||
ver_id,
|
||||
mod_time,
|
||||
type_: LifecycleType::ExpiryNoncurrent,
|
||||
object_name,
|
||||
} => (
|
||||
ver_no,
|
||||
ver_id.clone().into_bytes(),
|
||||
mod_time.unix_timestamp().to_be_bytes(),
|
||||
1,
|
||||
object_name.clone().into_bytes(),
|
||||
),
|
||||
LifecycleContent {
|
||||
ver_no,
|
||||
ver_id,
|
||||
mod_time,
|
||||
type_: LifecycleType::TransitionCurrent,
|
||||
object_name,
|
||||
} => (
|
||||
ver_no,
|
||||
ver_id.clone().into_bytes(),
|
||||
mod_time.unix_timestamp().to_be_bytes(),
|
||||
2,
|
||||
object_name.clone().into_bytes(),
|
||||
),
|
||||
LifecycleContent {
|
||||
ver_no,
|
||||
ver_id,
|
||||
mod_time,
|
||||
type_: LifecycleType::TransitionNoncurrent,
|
||||
object_name,
|
||||
} => (
|
||||
ver_no,
|
||||
ver_id.clone().into_bytes(),
|
||||
mod_time.unix_timestamp().to_be_bytes(),
|
||||
3,
|
||||
object_name.clone().into_bytes(),
|
||||
),
|
||||
};
|
||||
|
||||
let mut output = Vec::<u8>::new();
|
||||
output.push(*ver_no_byte);
|
||||
output.extend_from_slice(&ver_id_bytes);
|
||||
output.extend_from_slice(&mod_timestamp_bytes);
|
||||
output.push(type_byte);
|
||||
output.extend_from_slice(&object_name_bytes);
|
||||
Ok(Cow::Owned(output))
|
||||
}
|
||||
}
|
||||
|
||||
impl<'a> BytesDecode<'a> for LifecycleContentCodec {
|
||||
type DItem = LifecycleContent;
|
||||
|
||||
fn bytes_decode(bytes: &'a [u8]) -> Result<Self::DItem, BoxedError> {
|
||||
use std::mem::size_of;
|
||||
|
||||
let ver_no = match bytes.get(..size_of::<u8>()) {
|
||||
Some(bytes) => bytes.try_into().map(u8::from_be_bytes).unwrap(),
|
||||
None => return Err("invalid LifecycleContent: cannot extract ver_no".into()),
|
||||
};
|
||||
|
||||
let ver_id = match bytes.get(size_of::<u8>()..(36 + 1)) {
|
||||
Some(bytes) => unsafe { std::str::from_utf8_unchecked(bytes).to_string() },
|
||||
None => return Err("invalid LifecycleContent: cannot extract ver_id".into()),
|
||||
};
|
||||
|
||||
let mod_timestamp = match bytes.get((36 + 1)..(size_of::<i64>() + 36 + 1)) {
|
||||
Some(bytes) => bytes.try_into().map(i64::from_be_bytes).unwrap(),
|
||||
None => return Err("invalid LifecycleContent: cannot extract mod_time timestamp".into()),
|
||||
};
|
||||
|
||||
let type_ = match bytes.get(size_of::<i64>() + 36 + 1) {
|
||||
Some(&0) => LifecycleType::ExpiryCurrent,
|
||||
Some(&1) => LifecycleType::ExpiryNoncurrent,
|
||||
Some(&2) => LifecycleType::TransitionCurrent,
|
||||
Some(&3) => LifecycleType::TransitionNoncurrent,
|
||||
Some(_) => return Err("invalid LifecycleContent: invalid LifecycleType".into()),
|
||||
None => return Err("invalid LifecycleContent: cannot extract LifecycleType".into()),
|
||||
};
|
||||
|
||||
let object_name = match bytes.get((size_of::<i64>() + 36 + 1 + 1)..) {
|
||||
Some(bytes) => unsafe { std::str::from_utf8_unchecked(bytes).to_string() },
|
||||
None => return Err("invalid LifecycleContent: cannot extract object_name".into()),
|
||||
};
|
||||
|
||||
Ok(LifecycleContent {
|
||||
ver_no,
|
||||
ver_id,
|
||||
mod_time: OffsetDateTime::from_unix_timestamp(mod_timestamp).unwrap(),
|
||||
type_,
|
||||
object_name,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
mod serial_tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
#[serial]
|
||||
//#[ignore]
|
||||
async fn test_lifecycle_chche_build() {
|
||||
let (_disk_paths, ecstore) = setup_test_env().await;
|
||||
|
||||
// Create test bucket and object
|
||||
let suffix = uuid::Uuid::new_v4().simple().to_string();
|
||||
let bucket_name = format!("test-lc-cache-{}", &suffix[..8]);
|
||||
let object_name = "test/object.txt"; // Match the lifecycle rule prefix "test/"
|
||||
let test_data = b"Hello, this is test data for lifecycle expiry!";
|
||||
|
||||
create_test_lock_bucket(&ecstore, bucket_name.as_str()).await;
|
||||
upload_test_object(&ecstore, bucket_name.as_str(), object_name, test_data).await;
|
||||
|
||||
// Verify object exists initially
|
||||
assert!(object_exists(&ecstore, bucket_name.as_str(), object_name).await);
|
||||
println!("✅ Object exists before lifecycle processing");
|
||||
|
||||
let scan_outcome = match local_scan::scan_and_persist_local_usage(ecstore.clone()).await {
|
||||
Ok(outcome) => outcome,
|
||||
Err(err) => {
|
||||
warn!("Local usage scan failed: {}", err);
|
||||
LocalScanOutcome::default()
|
||||
}
|
||||
};
|
||||
let bucket_objects_map = &scan_outcome.bucket_objects;
|
||||
|
||||
let records = match bucket_objects_map.get(&bucket_name) {
|
||||
Some(records) => records,
|
||||
None => {
|
||||
debug!("No local snapshot entries found for bucket {}; skipping lifecycle/integrity", bucket_name);
|
||||
&vec![]
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(lmdb_env) = GLOBAL_LMDB_ENV.get() {
|
||||
if let Some(lmdb) = GLOBAL_LMDB_DB.get() {
|
||||
let mut wtxn = lmdb_env.write_txn().unwrap();
|
||||
|
||||
/*if let Ok((lc_config, _)) = rustfs_ecstore::bucket::metadata_sys::get_lifecycle_config(bucket_name.as_str()).await {
|
||||
if let Ok(object_info) = ecstore
|
||||
.get_object_info(bucket_name.as_str(), object_name, &rustfs_ecstore::store_api::ObjectOptions::default())
|
||||
.await
|
||||
{
|
||||
let event = rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::eval_action_from_lifecycle(
|
||||
&lc_config,
|
||||
None,
|
||||
None,
|
||||
&object_info,
|
||||
)
|
||||
.await;
|
||||
|
||||
rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::apply_expiry_on_non_transitioned_objects(
|
||||
ecstore.clone(),
|
||||
&object_info,
|
||||
&event,
|
||||
&rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_audit::LcEventSrc::Scanner,
|
||||
)
|
||||
.await;
|
||||
|
||||
expired = wait_for_object_absence(&ecstore, bucket_name.as_str(), object_name, Duration::from_secs(2)).await;
|
||||
}
|
||||
}*/
|
||||
|
||||
for record in records {
|
||||
if !record.usage.has_live_object {
|
||||
continue;
|
||||
}
|
||||
|
||||
let object_info = convert_record_to_object_info(record);
|
||||
println!("object_info2: {object_info:?}");
|
||||
let mod_time = object_info.mod_time.unwrap_or(OffsetDateTime::now_utc());
|
||||
let expiry_time = rustfs_ecstore::bucket::lifecycle::lifecycle::expected_expiry_time(mod_time, 1);
|
||||
|
||||
let version_id = if let Some(version_id) = object_info.version_id {
|
||||
version_id.to_string()
|
||||
} else {
|
||||
"zzzzzzzz-zzzz-zzzz-zzzz-zzzzzzzzzzzz".to_string()
|
||||
};
|
||||
|
||||
lmdb.put(
|
||||
&mut wtxn,
|
||||
&expiry_time.unix_timestamp(),
|
||||
&LifecycleContent {
|
||||
ver_no: 0,
|
||||
ver_id: version_id,
|
||||
mod_time,
|
||||
type_: LifecycleType::TransitionNoncurrent,
|
||||
object_name: object_info.name,
|
||||
},
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
wtxn.commit().unwrap();
|
||||
|
||||
let mut wtxn = lmdb_env.write_txn().unwrap();
|
||||
let iter = lmdb.iter_mut(&mut wtxn).unwrap();
|
||||
//let _ = unsafe { iter.del_current().unwrap() };
|
||||
for row in iter {
|
||||
if let Ok(ref elm) = row {
|
||||
let LifecycleContent {
|
||||
ver_no,
|
||||
ver_id,
|
||||
mod_time,
|
||||
type_,
|
||||
object_name,
|
||||
} = &elm.1;
|
||||
println!("cache row:{ver_no} {ver_id} {mod_time} {type_:?} {object_name}");
|
||||
}
|
||||
println!("row:{row:?}");
|
||||
}
|
||||
//drop(iter);
|
||||
wtxn.commit().unwrap();
|
||||
}
|
||||
}
|
||||
|
||||
println!("Lifecycle cache test completed");
|
||||
}
|
||||
}
|
||||
@@ -12,30 +12,46 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use async_trait::async_trait;
|
||||
use rustfs_ahm::scanner::{Scanner, data_scanner::ScannerConfig};
|
||||
use rustfs_ecstore::{
|
||||
bucket::metadata::BUCKET_LIFECYCLE_CONFIG,
|
||||
bucket::metadata_sys,
|
||||
bucket::{
|
||||
metadata::BUCKET_LIFECYCLE_CONFIG,
|
||||
metadata_sys,
|
||||
replication::{
|
||||
DeletedObjectReplicationInfo, DynReplicationPool, GLOBAL_REPLICATION_POOL, ReplicationPoolTrait, ReplicationPriority,
|
||||
},
|
||||
target::{BucketTarget, BucketTargetType, BucketTargets},
|
||||
utils::serialize,
|
||||
},
|
||||
disk::endpoint::Endpoint,
|
||||
endpoints::{EndpointServerPools, Endpoints, PoolEndpoints},
|
||||
global::GLOBAL_TierConfigMgr,
|
||||
store::ECStore,
|
||||
store_api::{MakeBucketOptions, ObjectIO, ObjectOptions, PutObjReader, StorageAPI},
|
||||
tier::tier::TierConfigMgr,
|
||||
tier::tier_config::{TierConfig, TierMinIO, TierType},
|
||||
};
|
||||
use rustfs_filemeta::{ReplicateObjectInfo, ReplicationStatusType};
|
||||
use rustfs_utils::http::headers::{AMZ_BUCKET_REPLICATION_STATUS, RESERVED_METADATA_PREFIX_LOWER};
|
||||
use s3s::dto::{
|
||||
BucketVersioningStatus, Destination, ExistingObjectReplication, ExistingObjectReplicationStatus, ReplicationConfiguration,
|
||||
ReplicationRule, ReplicationRuleStatus, VersioningConfiguration,
|
||||
};
|
||||
use serial_test::serial;
|
||||
use std::sync::Once;
|
||||
use std::sync::OnceLock;
|
||||
use std::{path::PathBuf, sync::Arc, time::Duration};
|
||||
use std::{
|
||||
path::PathBuf,
|
||||
sync::{Arc, Once, OnceLock},
|
||||
time::Duration,
|
||||
};
|
||||
use time::{Duration as TimeDuration, OffsetDateTime};
|
||||
use tokio::fs;
|
||||
use tokio::sync::RwLock;
|
||||
use tokio::sync::Mutex;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::warn;
|
||||
use tracing::{debug, info};
|
||||
use tracing::info;
|
||||
|
||||
static GLOBAL_ENV: OnceLock<(Vec<PathBuf>, Arc<ECStore>)> = OnceLock::new();
|
||||
static INIT: Once = Once::new();
|
||||
static GLOBAL_TIER_CONFIG_MGR: OnceLock<Arc<RwLock<TierConfigMgr>>> = OnceLock::new();
|
||||
const TEST_REPLICATION_TARGET_ARN: &str = "arn:aws:s3:::rustfs-lifecycle-replication-test";
|
||||
|
||||
fn init_tracing() {
|
||||
INIT.call_once(|| {
|
||||
@@ -121,13 +137,11 @@ async fn setup_test_env() -> (Vec<PathBuf>, Arc<ECStore>) {
|
||||
// Store in global once lock
|
||||
let _ = GLOBAL_ENV.set((disk_paths.clone(), ecstore.clone()));
|
||||
|
||||
let _ = GLOBAL_TIER_CONFIG_MGR.set(TierConfigMgr::new());
|
||||
|
||||
(disk_paths, ecstore)
|
||||
}
|
||||
|
||||
/// Test helper: Create a test bucket
|
||||
async fn _create_test_bucket(ecstore: &Arc<ECStore>, bucket_name: &str) {
|
||||
async fn create_test_bucket(ecstore: &Arc<ECStore>, bucket_name: &str) {
|
||||
(**ecstore)
|
||||
.make_bucket(bucket_name, &Default::default())
|
||||
.await
|
||||
@@ -162,6 +176,167 @@ async fn upload_test_object(ecstore: &Arc<ECStore>, bucket: &str, object: &str,
|
||||
info!("Uploaded test object: {}/{} ({} bytes)", bucket, object, object_info.size);
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
struct RecordingReplicationPool {
|
||||
replica_tasks: Mutex<Vec<ReplicateObjectInfo>>,
|
||||
delete_tasks: Mutex<Vec<DeletedObjectReplicationInfo>>,
|
||||
}
|
||||
|
||||
impl RecordingReplicationPool {
|
||||
async fn take_replica_tasks(&self) -> Vec<ReplicateObjectInfo> {
|
||||
let mut guard = self.replica_tasks.lock().await;
|
||||
guard.drain(..).collect()
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl ReplicationPoolTrait for RecordingReplicationPool {
|
||||
async fn queue_replica_task(&self, ri: ReplicateObjectInfo) {
|
||||
self.replica_tasks.lock().await.push(ri);
|
||||
}
|
||||
|
||||
async fn queue_replica_delete_task(&self, ri: DeletedObjectReplicationInfo) {
|
||||
self.delete_tasks.lock().await.push(ri);
|
||||
}
|
||||
|
||||
async fn resize(&self, _priority: ReplicationPriority, _max_workers: usize, _max_l_workers: usize) {}
|
||||
|
||||
async fn init_resync(
|
||||
self: Arc<Self>,
|
||||
_cancellation_token: CancellationToken,
|
||||
_buckets: Vec<String>,
|
||||
) -> Result<(), rustfs_ecstore::error::Error> {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
async fn ensure_test_replication_pool() -> Arc<RecordingReplicationPool> {
|
||||
static POOL: OnceLock<Arc<RecordingReplicationPool>> = OnceLock::new();
|
||||
if let Some(existing) = POOL.get() {
|
||||
existing.replica_tasks.lock().await.clear();
|
||||
existing.delete_tasks.lock().await.clear();
|
||||
return existing.clone();
|
||||
}
|
||||
|
||||
let pool = Arc::new(RecordingReplicationPool::default());
|
||||
let dyn_pool: Arc<DynReplicationPool> = pool.clone();
|
||||
GLOBAL_REPLICATION_POOL
|
||||
.get_or_init(|| {
|
||||
let pool_clone = dyn_pool.clone();
|
||||
async move { pool_clone }
|
||||
})
|
||||
.await;
|
||||
let _ = POOL.set(pool.clone());
|
||||
pool
|
||||
}
|
||||
|
||||
async fn configure_bucket_replication(bucket: &str) {
|
||||
let meta = metadata_sys::get(bucket)
|
||||
.await
|
||||
.expect("bucket metadata should exist for replication configuration");
|
||||
let mut metadata = (*meta).clone();
|
||||
|
||||
let replication_rule = ReplicationRule {
|
||||
delete_marker_replication: None,
|
||||
delete_replication: None,
|
||||
destination: Destination {
|
||||
access_control_translation: None,
|
||||
account: None,
|
||||
bucket: TEST_REPLICATION_TARGET_ARN.to_string(),
|
||||
encryption_configuration: None,
|
||||
metrics: None,
|
||||
replication_time: None,
|
||||
storage_class: None,
|
||||
},
|
||||
existing_object_replication: Some(ExistingObjectReplication {
|
||||
status: ExistingObjectReplicationStatus::from_static(ExistingObjectReplicationStatus::ENABLED),
|
||||
}),
|
||||
filter: None,
|
||||
id: Some("lifecycle-replication-rule".to_string()),
|
||||
prefix: Some(String::new()),
|
||||
priority: Some(1),
|
||||
source_selection_criteria: None,
|
||||
status: ReplicationRuleStatus::from_static(ReplicationRuleStatus::ENABLED),
|
||||
};
|
||||
|
||||
let replication_cfg = ReplicationConfiguration {
|
||||
role: TEST_REPLICATION_TARGET_ARN.to_string(),
|
||||
rules: vec![replication_rule],
|
||||
};
|
||||
|
||||
let bucket_targets = BucketTargets {
|
||||
targets: vec![BucketTarget {
|
||||
source_bucket: bucket.to_string(),
|
||||
endpoint: "replication.invalid".to_string(),
|
||||
target_bucket: "replication-target".to_string(),
|
||||
arn: TEST_REPLICATION_TARGET_ARN.to_string(),
|
||||
target_type: BucketTargetType::ReplicationService,
|
||||
..Default::default()
|
||||
}],
|
||||
};
|
||||
|
||||
metadata.replication_config = Some(replication_cfg.clone());
|
||||
metadata.replication_config_xml = serialize(&replication_cfg).expect("serialize replication config");
|
||||
metadata.bucket_target_config = Some(bucket_targets.clone());
|
||||
metadata.bucket_targets_config_json = serde_json::to_vec(&bucket_targets).expect("serialize bucket targets");
|
||||
|
||||
let versioning_cfg = VersioningConfiguration {
|
||||
status: Some(BucketVersioningStatus::from_static(BucketVersioningStatus::ENABLED)),
|
||||
..Default::default()
|
||||
};
|
||||
metadata.versioning_config = Some(versioning_cfg.clone());
|
||||
metadata.versioning_config_xml = serialize(&versioning_cfg).expect("serialize versioning config");
|
||||
|
||||
metadata_sys::set_bucket_metadata(bucket.to_string(), metadata)
|
||||
.await
|
||||
.expect("failed to persist bucket metadata with replication config");
|
||||
}
|
||||
|
||||
async fn upload_object_with_replication_status(
|
||||
ecstore: &Arc<ECStore>,
|
||||
bucket: &str,
|
||||
object: &str,
|
||||
status: ReplicationStatusType,
|
||||
) {
|
||||
let mut reader = PutObjReader::from_vec(b"replication-state".to_vec());
|
||||
let mut opts = ObjectOptions::default();
|
||||
opts.user_defined
|
||||
.insert(AMZ_BUCKET_REPLICATION_STATUS.to_string(), status.as_str().to_string());
|
||||
let internal_key = format!("{}replication-status", RESERVED_METADATA_PREFIX_LOWER);
|
||||
opts.user_defined
|
||||
.insert(internal_key, format!("{}={};", TEST_REPLICATION_TARGET_ARN, status.as_str()));
|
||||
|
||||
(**ecstore)
|
||||
.put_object(bucket, object, &mut reader, &opts)
|
||||
.await
|
||||
.expect("failed to upload replication test object");
|
||||
}
|
||||
|
||||
async fn upload_object_with_retention(ecstore: &Arc<ECStore>, bucket: &str, object: &str, data: &[u8], retain_for: Duration) {
|
||||
use s3s::header::{X_AMZ_OBJECT_LOCK_MODE, X_AMZ_OBJECT_LOCK_RETAIN_UNTIL_DATE};
|
||||
use time::format_description::well_known::Rfc3339;
|
||||
|
||||
let mut reader = PutObjReader::from_vec(data.to_vec());
|
||||
let mut opts = ObjectOptions::default();
|
||||
let retain_duration = TimeDuration::try_from(retain_for).unwrap_or_else(|_| TimeDuration::seconds(0));
|
||||
let retain_until = OffsetDateTime::now_utc() + retain_duration;
|
||||
let retain_until_str = retain_until.format(&Rfc3339).expect("format retain date");
|
||||
let lock_mode_key = X_AMZ_OBJECT_LOCK_MODE.as_str().to_string();
|
||||
let lock_mode_lower = lock_mode_key.to_lowercase();
|
||||
opts.user_defined.insert(lock_mode_lower, "GOVERNANCE".to_string());
|
||||
opts.user_defined.insert(lock_mode_key, "GOVERNANCE".to_string());
|
||||
|
||||
let retain_key = X_AMZ_OBJECT_LOCK_RETAIN_UNTIL_DATE.as_str().to_string();
|
||||
let retain_key_lower = retain_key.to_lowercase();
|
||||
opts.user_defined.insert(retain_key_lower, retain_until_str.clone());
|
||||
opts.user_defined.insert(retain_key, retain_until_str);
|
||||
|
||||
(**ecstore)
|
||||
.put_object(bucket, object, &mut reader, &opts)
|
||||
.await
|
||||
.expect("Failed to upload retained object");
|
||||
}
|
||||
|
||||
/// Test helper: Set bucket lifecycle configuration
|
||||
async fn set_bucket_lifecycle(bucket_name: &str) -> Result<(), Box<dyn std::error::Error>> {
|
||||
// Create a simple lifecycle configuration XML with 0 days expiry for immediate testing
|
||||
@@ -220,7 +395,7 @@ async fn set_bucket_lifecycle_transition(bucket_name: &str) -> Result<(), Box<dy
|
||||
</Filter>
|
||||
<Transition>
|
||||
<Days>0</Days>
|
||||
<StorageClass>COLDTIER</StorageClass>
|
||||
<StorageClass>COLDTIER44</StorageClass>
|
||||
</Transition>
|
||||
</Rule>
|
||||
<Rule>
|
||||
@@ -231,7 +406,7 @@ async fn set_bucket_lifecycle_transition(bucket_name: &str) -> Result<(), Box<dy
|
||||
</Filter>
|
||||
<NoncurrentVersionTransition>
|
||||
<NoncurrentDays>0</NoncurrentDays>
|
||||
<StorageClass>COLDTIER</StorageClass>
|
||||
<StorageClass>COLDTIER44</StorageClass>
|
||||
</NoncurrentVersionTransition>
|
||||
</Rule>
|
||||
</LifecycleConfiguration>"#;
|
||||
@@ -243,33 +418,51 @@ async fn set_bucket_lifecycle_transition(bucket_name: &str) -> Result<(), Box<dy
|
||||
|
||||
/// Test helper: Create a test tier
|
||||
#[allow(dead_code)]
|
||||
async fn create_test_tier() {
|
||||
async fn create_test_tier(server: u32) {
|
||||
let args = TierConfig {
|
||||
version: "v1".to_string(),
|
||||
tier_type: TierType::MinIO,
|
||||
name: "COLDTIER".to_string(),
|
||||
name: "COLDTIER44".to_string(),
|
||||
s3: None,
|
||||
aliyun: None,
|
||||
tencent: None,
|
||||
huaweicloud: None,
|
||||
azure: None,
|
||||
gcs: None,
|
||||
r2: None,
|
||||
rustfs: None,
|
||||
minio: Some(TierMinIO {
|
||||
access_key: "minioadmin".to_string(),
|
||||
secret_key: "minioadmin".to_string(),
|
||||
bucket: "mblock2".to_string(),
|
||||
endpoint: "http://127.0.0.1:9020".to_string(),
|
||||
prefix: "mypre3/".to_string(),
|
||||
region: "".to_string(),
|
||||
..Default::default()
|
||||
}),
|
||||
minio: if server == 1 {
|
||||
Some(TierMinIO {
|
||||
access_key: "minioadmin".to_string(),
|
||||
secret_key: "minioadmin".to_string(),
|
||||
bucket: "hello".to_string(),
|
||||
endpoint: "http://39.105.198.204:9000".to_string(),
|
||||
prefix: format!("mypre{}/", uuid::Uuid::new_v4()),
|
||||
region: "".to_string(),
|
||||
..Default::default()
|
||||
})
|
||||
} else {
|
||||
Some(TierMinIO {
|
||||
access_key: "minioadmin".to_string(),
|
||||
secret_key: "minioadmin".to_string(),
|
||||
bucket: "mblock2".to_string(),
|
||||
endpoint: "http://127.0.0.1:9020".to_string(),
|
||||
prefix: format!("mypre{}/", uuid::Uuid::new_v4()),
|
||||
region: "".to_string(),
|
||||
..Default::default()
|
||||
})
|
||||
},
|
||||
};
|
||||
let mut tier_config_mgr = GLOBAL_TIER_CONFIG_MGR.get().unwrap().write().await;
|
||||
let mut tier_config_mgr = GLOBAL_TierConfigMgr.write().await;
|
||||
if let Err(err) = tier_config_mgr.add(args, false).await {
|
||||
warn!("tier_config_mgr add failed, e: {:?}", err);
|
||||
println!("tier_config_mgr add failed, e: {err:?}");
|
||||
panic!("tier add failed. {err}");
|
||||
}
|
||||
if let Err(e) = tier_config_mgr.save().await {
|
||||
warn!("tier_config_mgr save failed, e: {:?}", e);
|
||||
println!("tier_config_mgr save failed, e: {e:?}");
|
||||
panic!("tier save failed");
|
||||
}
|
||||
info!("Created test tier: {}", "COLDTIER");
|
||||
println!("Created test tier: COLDTIER44");
|
||||
}
|
||||
|
||||
/// Test helper: Check if object exists
|
||||
@@ -284,9 +477,10 @@ async fn object_exists(ecstore: &Arc<ECStore>, bucket: &str, object: &str) -> bo
|
||||
#[allow(dead_code)]
|
||||
async fn object_is_delete_marker(ecstore: &Arc<ECStore>, bucket: &str, object: &str) -> bool {
|
||||
if let Ok(oi) = (**ecstore).get_object_info(bucket, object, &ObjectOptions::default()).await {
|
||||
debug!("oi: {:?}", oi);
|
||||
println!("oi: {oi:?}");
|
||||
oi.delete_marker
|
||||
} else {
|
||||
println!("object_is_delete_marker is error");
|
||||
panic!("object_is_delete_marker is error");
|
||||
}
|
||||
}
|
||||
@@ -295,9 +489,10 @@ async fn object_is_delete_marker(ecstore: &Arc<ECStore>, bucket: &str, object: &
|
||||
#[allow(dead_code)]
|
||||
async fn object_is_transitioned(ecstore: &Arc<ECStore>, bucket: &str, object: &str) -> bool {
|
||||
if let Ok(oi) = (**ecstore).get_object_info(bucket, object, &ObjectOptions::default()).await {
|
||||
info!("oi: {:?}", oi);
|
||||
println!("oi: {oi:?}");
|
||||
!oi.transitioned_object.status.is_empty()
|
||||
} else {
|
||||
println!("object_is_transitioned is error");
|
||||
panic!("object_is_transitioned is error");
|
||||
}
|
||||
}
|
||||
@@ -455,8 +650,9 @@ mod serial_tests {
|
||||
println!("Lifecycle expiry basic test completed");
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
|
||||
#[serial]
|
||||
//#[ignore]
|
||||
async fn test_lifecycle_expiry_deletemarker() {
|
||||
let (_disk_paths, ecstore) = setup_test_env().await;
|
||||
|
||||
@@ -578,12 +774,13 @@ mod serial_tests {
|
||||
println!("Lifecycle expiry basic test completed");
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 1)]
|
||||
#[serial]
|
||||
#[ignore]
|
||||
async fn test_lifecycle_transition_basic() {
|
||||
let (_disk_paths, ecstore) = setup_test_env().await;
|
||||
|
||||
//create_test_tier().await;
|
||||
create_test_tier(1).await;
|
||||
|
||||
// Create test bucket and object
|
||||
let suffix = uuid::Uuid::new_v4().simple().to_string();
|
||||
@@ -591,7 +788,8 @@ mod serial_tests {
|
||||
let object_name = "test/object.txt"; // Match the lifecycle rule prefix "test/"
|
||||
let test_data = b"Hello, this is test data for lifecycle expiry!";
|
||||
|
||||
create_test_lock_bucket(&ecstore, bucket_name.as_str()).await;
|
||||
//create_test_lock_bucket(&ecstore, bucket_name.as_str()).await;
|
||||
create_test_bucket(&ecstore, bucket_name.as_str()).await;
|
||||
upload_test_object(&ecstore, bucket_name.as_str(), object_name, test_data).await;
|
||||
|
||||
// Verify object exists initially
|
||||
@@ -599,13 +797,13 @@ mod serial_tests {
|
||||
println!("✅ Object exists before lifecycle processing");
|
||||
|
||||
// Set lifecycle configuration with very short expiry (0 days = immediate expiry)
|
||||
/*set_bucket_lifecycle_transition(bucket_name)
|
||||
set_bucket_lifecycle_transition(bucket_name.as_str())
|
||||
.await
|
||||
.expect("Failed to set lifecycle configuration");
|
||||
println!("✅ Lifecycle configuration set for bucket: {bucket_name}");
|
||||
|
||||
// Verify lifecycle configuration was set
|
||||
match rustfs_ecstore::bucket::metadata_sys::get(bucket_name).await {
|
||||
match rustfs_ecstore::bucket::metadata_sys::get(bucket_name.as_str()).await {
|
||||
Ok(bucket_meta) => {
|
||||
assert!(bucket_meta.lifecycle_config.is_some());
|
||||
println!("✅ Bucket metadata retrieved successfully");
|
||||
@@ -613,7 +811,7 @@ mod serial_tests {
|
||||
Err(e) => {
|
||||
println!("❌ Error retrieving bucket metadata: {e:?}");
|
||||
}
|
||||
}*/
|
||||
}
|
||||
|
||||
// Create scanner with very short intervals for testing
|
||||
let scanner_config = ScannerConfig {
|
||||
@@ -640,12 +838,11 @@ mod serial_tests {
|
||||
tokio::time::sleep(Duration::from_secs(5)).await;
|
||||
|
||||
// Check if object has been expired (deleted)
|
||||
//let check_result = object_is_transitioned(&ecstore, bucket_name, object_name).await;
|
||||
let check_result = object_exists(&ecstore, bucket_name.as_str(), object_name).await;
|
||||
let check_result = object_is_transitioned(&ecstore, &bucket_name, object_name).await;
|
||||
println!("Object exists after lifecycle processing: {check_result}");
|
||||
|
||||
if check_result {
|
||||
println!("✅ Object was not deleted by lifecycle processing");
|
||||
println!("✅ Object was transitioned by lifecycle processing");
|
||||
// Let's try to get object info to see its details
|
||||
match ecstore
|
||||
.get_object_info(bucket_name.as_str(), object_name, &rustfs_ecstore::store_api::ObjectOptions::default())
|
||||
@@ -663,7 +860,7 @@ mod serial_tests {
|
||||
}
|
||||
}
|
||||
} else {
|
||||
println!("❌ Object was deleted by lifecycle processing");
|
||||
println!("❌ Object was not transitioned by lifecycle processing");
|
||||
}
|
||||
|
||||
assert!(check_result);
|
||||
@@ -675,4 +872,127 @@ mod serial_tests {
|
||||
|
||||
println!("Lifecycle transition basic test completed");
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
#[serial]
|
||||
async fn test_lifecycle_respects_object_lock_retention() {
|
||||
let (_disk_paths, ecstore) = setup_test_env().await;
|
||||
|
||||
let suffix = uuid::Uuid::new_v4().simple().to_string();
|
||||
let bucket_name = format!("test-lc-lock-retention-{}", &suffix[..8]);
|
||||
let object_name = "test/locked-object.txt";
|
||||
let test_data = b"retained payload";
|
||||
|
||||
create_test_lock_bucket(&ecstore, bucket_name.as_str()).await;
|
||||
upload_object_with_retention(&ecstore, bucket_name.as_str(), object_name, test_data, Duration::from_secs(3600)).await;
|
||||
|
||||
assert!(
|
||||
object_exists(&ecstore, bucket_name.as_str(), object_name).await,
|
||||
"Object should exist before lifecycle processing"
|
||||
);
|
||||
|
||||
set_bucket_lifecycle(bucket_name.as_str())
|
||||
.await
|
||||
.expect("Failed to set lifecycle configuration");
|
||||
|
||||
let scanner_config = ScannerConfig {
|
||||
scan_interval: Duration::from_millis(100),
|
||||
deep_scan_interval: Duration::from_millis(500),
|
||||
max_concurrent_scans: 1,
|
||||
..Default::default()
|
||||
};
|
||||
let scanner = Scanner::new(Some(scanner_config), None);
|
||||
scanner.start().await.expect("Failed to start scanner");
|
||||
|
||||
for _ in 0..3 {
|
||||
scanner.scan_cycle().await.expect("scan cycle should succeed");
|
||||
tokio::time::sleep(Duration::from_millis(200)).await;
|
||||
}
|
||||
|
||||
assert!(
|
||||
object_exists(&ecstore, bucket_name.as_str(), object_name).await,
|
||||
"Object with active retention should not be deleted by lifecycle"
|
||||
);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
#[serial]
|
||||
async fn test_lifecycle_triggers_replication_heal_for_lagging_and_failed_objects() {
|
||||
let (_disk_paths, ecstore) = setup_test_env().await;
|
||||
|
||||
let suffix = uuid::Uuid::new_v4().simple().to_string();
|
||||
let bucket_name = format!("lc-replication-{}", &suffix[..8]);
|
||||
create_test_bucket(&ecstore, bucket_name.as_str()).await;
|
||||
configure_bucket_replication(bucket_name.as_str()).await;
|
||||
let replication_pool = ensure_test_replication_pool().await;
|
||||
|
||||
upload_object_with_replication_status(
|
||||
&ecstore,
|
||||
bucket_name.as_str(),
|
||||
"test/lagging-pending",
|
||||
ReplicationStatusType::Pending,
|
||||
)
|
||||
.await;
|
||||
upload_object_with_replication_status(
|
||||
&ecstore,
|
||||
bucket_name.as_str(),
|
||||
"test/failed-object",
|
||||
ReplicationStatusType::Failed,
|
||||
)
|
||||
.await;
|
||||
|
||||
let scanner_config = ScannerConfig {
|
||||
scan_interval: Duration::from_millis(100),
|
||||
deep_scan_interval: Duration::from_millis(500),
|
||||
max_concurrent_scans: 2,
|
||||
replication_pending_grace: Duration::from_secs(0),
|
||||
..Default::default()
|
||||
};
|
||||
let scanner = Scanner::new(Some(scanner_config), None);
|
||||
|
||||
scanner.scan_cycle().await.expect("scan cycle should complete");
|
||||
tokio::time::sleep(Duration::from_millis(200)).await;
|
||||
|
||||
let replica_tasks = replication_pool.take_replica_tasks().await;
|
||||
assert!(
|
||||
replica_tasks.iter().any(|t| t.name == "test/lagging-pending"),
|
||||
"Pending object should be enqueued for replication heal: {:?}",
|
||||
replica_tasks
|
||||
);
|
||||
assert!(
|
||||
replica_tasks.iter().any(|t| t.name == "test/failed-object"),
|
||||
"Failed object should be enqueued for replication heal: {:?}",
|
||||
replica_tasks
|
||||
);
|
||||
|
||||
let metrics = scanner.get_metrics().await;
|
||||
assert_eq!(
|
||||
metrics.replication_tasks_queued,
|
||||
replica_tasks.len() as u64,
|
||||
"Replication tasks queued metric should match recorded tasks"
|
||||
);
|
||||
assert!(
|
||||
metrics.replication_pending_objects >= 1,
|
||||
"Pending replication metric should be incremented"
|
||||
);
|
||||
assert!(metrics.replication_failed_objects >= 1, "Failed replication metric should be incremented");
|
||||
assert!(
|
||||
metrics.replication_lagging_objects >= 1,
|
||||
"Lagging replication metric should track pending object beyond grace"
|
||||
);
|
||||
|
||||
let bucket_metrics = metrics
|
||||
.bucket_metrics
|
||||
.get(&bucket_name)
|
||||
.expect("bucket metrics should contain replication counters");
|
||||
assert!(
|
||||
bucket_metrics.replication_pending >= 1 && bucket_metrics.replication_failed >= 1,
|
||||
"Bucket-level replication metrics should reflect observed statuses"
|
||||
);
|
||||
assert_eq!(
|
||||
bucket_metrics.replication_tasks_queued,
|
||||
replica_tasks.len() as u64,
|
||||
"Bucket-level queued counter should match enqueued tasks"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,26 +12,23 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::{fs, net::SocketAddr, sync::Arc, sync::OnceLock, time::Duration};
|
||||
use tempfile::TempDir;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
use serial_test::serial;
|
||||
|
||||
use rustfs_ahm::heal::manager::HealConfig;
|
||||
use rustfs_ahm::scanner::{
|
||||
Scanner,
|
||||
data_scanner::ScanMode,
|
||||
node_scanner::{LoadLevel, NodeScanner, NodeScannerConfig},
|
||||
};
|
||||
|
||||
use rustfs_ecstore::disk::endpoint::Endpoint;
|
||||
use rustfs_ecstore::endpoints::{EndpointServerPools, Endpoints, PoolEndpoints};
|
||||
use rustfs_ecstore::store::ECStore;
|
||||
use rustfs_ecstore::{
|
||||
StorageAPI,
|
||||
disk::endpoint::Endpoint,
|
||||
endpoints::{EndpointServerPools, Endpoints, PoolEndpoints},
|
||||
store::ECStore,
|
||||
store_api::{MakeBucketOptions, ObjectIO, PutObjReader},
|
||||
};
|
||||
use serial_test::serial;
|
||||
use std::{fs, net::SocketAddr, sync::Arc, sync::OnceLock, time::Duration};
|
||||
use tempfile::TempDir;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
// Global test environment cache to avoid repeated initialization
|
||||
static GLOBAL_TEST_ENV: OnceLock<(Vec<std::path::PathBuf>, Arc<ECStore>)> = OnceLock::new();
|
||||
|
||||
@@ -12,9 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::time::Duration;
|
||||
use tempfile::TempDir;
|
||||
|
||||
use rustfs_ahm::scanner::{
|
||||
checkpoint::{CheckpointData, CheckpointManager},
|
||||
io_monitor::{AdvancedIOMonitor, IOMonitorConfig},
|
||||
@@ -23,6 +20,8 @@ use rustfs_ahm::scanner::{
|
||||
node_scanner::{LoadLevel, NodeScanner, NodeScannerConfig, ScanProgress},
|
||||
stats_aggregator::{DecentralizedStatsAggregator, DecentralizedStatsAggregatorConfig},
|
||||
};
|
||||
use std::time::Duration;
|
||||
use tempfile::TempDir;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_checkpoint_manager_save_and_load() {
|
||||
|
||||
@@ -29,6 +29,7 @@ base64-simd = { workspace = true }
|
||||
rsa = { workspace = true }
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
rand.workspace = true
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/rustfs/rustfs/actions/workflows/ci.yml"><img alt="CI" src="https://github.com/rustfs/rustfs/actions/workflows/ci.yml/badge.svg" /></a>
|
||||
<a href="https://docs.rustfs.com/en/">📖 Documentation</a>
|
||||
<a href="https://docs.rustfs.com/">📖 Documentation</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/issues">🐛 Bug Reports</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/discussions">💬 Discussions</a>
|
||||
</p>
|
||||
|
||||
@@ -12,11 +12,9 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use rsa::Pkcs1v15Encrypt;
|
||||
use rsa::{
|
||||
RsaPrivateKey, RsaPublicKey,
|
||||
Pkcs1v15Encrypt, RsaPrivateKey, RsaPublicKey,
|
||||
pkcs8::{DecodePrivateKey, DecodePublicKey},
|
||||
rand_core::OsRng,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::io::{Error, Result};
|
||||
@@ -33,8 +31,9 @@ pub struct Token {
|
||||
/// Returns the encrypted string processed by base64
|
||||
pub fn gencode(token: &Token, key: &str) -> Result<String> {
|
||||
let data = serde_json::to_vec(token)?;
|
||||
let mut rng = rand::rng();
|
||||
let public_key = RsaPublicKey::from_public_key_pem(key).map_err(Error::other)?;
|
||||
let encrypted_data = public_key.encrypt(&mut OsRng, Pkcs1v15Encrypt, &data).map_err(Error::other)?;
|
||||
let encrypted_data = public_key.encrypt(&mut rng, Pkcs1v15Encrypt, &data).map_err(Error::other)?;
|
||||
Ok(base64_simd::URL_SAFE_NO_PAD.encode_to_string(&encrypted_data))
|
||||
}
|
||||
|
||||
@@ -76,9 +75,10 @@ mod tests {
|
||||
pkcs8::{EncodePrivateKey, EncodePublicKey, LineEnding},
|
||||
};
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
#[test]
|
||||
fn test_gencode_and_parse() {
|
||||
let mut rng = OsRng;
|
||||
let mut rng = rand::rng();
|
||||
let bits = 2048;
|
||||
let private_key = RsaPrivateKey::new(&mut rng, bits).expect("Failed to generate private key");
|
||||
let public_key = RsaPublicKey::from(&private_key);
|
||||
@@ -101,7 +101,8 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_parse_invalid_token() {
|
||||
let private_key_pem = RsaPrivateKey::new(&mut OsRng, 2048)
|
||||
let mut rng = rand::rng();
|
||||
let private_key_pem = RsaPrivateKey::new(&mut rng, 2048)
|
||||
.expect("Failed to generate private key")
|
||||
.to_pkcs8_pem(LineEnding::LF)
|
||||
.unwrap();
|
||||
|
||||
@@ -30,7 +30,10 @@ rustfs-targets = { workspace = true }
|
||||
rustfs-config = { workspace = true, features = ["audit", "constants"] }
|
||||
rustfs-ecstore = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
const-str = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
hashbrown = { workspace = true }
|
||||
metrics = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
@@ -39,5 +42,6 @@ tracing = { workspace = true, features = ["std", "attributes"] }
|
||||
url = { workspace = true }
|
||||
rumqttc = { workspace = true }
|
||||
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
@@ -13,18 +13,10 @@
|
||||
// limitations under the License.
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use hashbrown::HashMap;
|
||||
use rustfs_targets::EventName;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Trait for types that can be serialized to JSON and have a timestamp
|
||||
pub trait LogRecord {
|
||||
/// Serialize the record to a JSON string
|
||||
fn to_json(&self) -> String;
|
||||
/// Get the timestamp of the record
|
||||
fn get_timestamp(&self) -> chrono::DateTime<chrono::Utc>;
|
||||
}
|
||||
|
||||
/// ObjectVersion represents an object version with key and versionId
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)]
|
||||
@@ -36,19 +28,12 @@ pub struct ObjectVersion {
|
||||
}
|
||||
|
||||
impl ObjectVersion {
|
||||
/// Set the object name (chainable)
|
||||
pub fn set_object_name(&mut self, name: String) -> &mut Self {
|
||||
self.object_name = name;
|
||||
self
|
||||
}
|
||||
/// Set the version ID (chainable)
|
||||
pub fn set_version_id(&mut self, version_id: Option<String>) -> &mut Self {
|
||||
self.version_id = version_id;
|
||||
self
|
||||
pub fn new(object_name: String, version_id: Option<String>) -> Self {
|
||||
Self { object_name, version_id }
|
||||
}
|
||||
}
|
||||
|
||||
/// ApiDetails contains API information for the audit entry
|
||||
/// `ApiDetails` contains API information for the audit entry.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||
pub struct ApiDetails {
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
@@ -79,75 +64,86 @@ pub struct ApiDetails {
|
||||
pub time_to_response_in_ns: Option<String>,
|
||||
}
|
||||
|
||||
impl ApiDetails {
|
||||
/// Set API name (chainable)
|
||||
pub fn set_name(&mut self, name: Option<String>) -> &mut Self {
|
||||
self.name = name;
|
||||
/// Builder for `ApiDetails`.
|
||||
#[derive(Default, Clone)]
|
||||
pub struct ApiDetailsBuilder(pub ApiDetails);
|
||||
|
||||
impl ApiDetailsBuilder {
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
pub fn name(mut self, name: impl Into<String>) -> Self {
|
||||
self.0.name = Some(name.into());
|
||||
self
|
||||
}
|
||||
/// Set bucket name (chainable)
|
||||
pub fn set_bucket(&mut self, bucket: Option<String>) -> &mut Self {
|
||||
self.bucket = bucket;
|
||||
|
||||
pub fn bucket(mut self, bucket: impl Into<String>) -> Self {
|
||||
self.0.bucket = Some(bucket.into());
|
||||
self
|
||||
}
|
||||
/// Set object name (chainable)
|
||||
pub fn set_object(&mut self, object: Option<String>) -> &mut Self {
|
||||
self.object = object;
|
||||
|
||||
pub fn object(mut self, object: impl Into<String>) -> Self {
|
||||
self.0.object = Some(object.into());
|
||||
self
|
||||
}
|
||||
/// Set objects list (chainable)
|
||||
pub fn set_objects(&mut self, objects: Option<Vec<ObjectVersion>>) -> &mut Self {
|
||||
self.objects = objects;
|
||||
|
||||
pub fn objects(mut self, objects: Vec<ObjectVersion>) -> Self {
|
||||
self.0.objects = Some(objects);
|
||||
self
|
||||
}
|
||||
/// Set status (chainable)
|
||||
pub fn set_status(&mut self, status: Option<String>) -> &mut Self {
|
||||
self.status = status;
|
||||
|
||||
pub fn status(mut self, status: impl Into<String>) -> Self {
|
||||
self.0.status = Some(status.into());
|
||||
self
|
||||
}
|
||||
/// Set status code (chainable)
|
||||
pub fn set_status_code(&mut self, code: Option<i32>) -> &mut Self {
|
||||
self.status_code = code;
|
||||
|
||||
pub fn status_code(mut self, code: i32) -> Self {
|
||||
self.0.status_code = Some(code);
|
||||
self
|
||||
}
|
||||
/// Set input bytes (chainable)
|
||||
pub fn set_input_bytes(&mut self, bytes: Option<i64>) -> &mut Self {
|
||||
self.input_bytes = bytes;
|
||||
|
||||
pub fn input_bytes(mut self, bytes: i64) -> Self {
|
||||
self.0.input_bytes = Some(bytes);
|
||||
self
|
||||
}
|
||||
/// Set output bytes (chainable)
|
||||
pub fn set_output_bytes(&mut self, bytes: Option<i64>) -> &mut Self {
|
||||
self.output_bytes = bytes;
|
||||
|
||||
pub fn output_bytes(mut self, bytes: i64) -> Self {
|
||||
self.0.output_bytes = Some(bytes);
|
||||
self
|
||||
}
|
||||
/// Set header bytes (chainable)
|
||||
pub fn set_header_bytes(&mut self, bytes: Option<i64>) -> &mut Self {
|
||||
self.header_bytes = bytes;
|
||||
|
||||
pub fn header_bytes(mut self, bytes: i64) -> Self {
|
||||
self.0.header_bytes = Some(bytes);
|
||||
self
|
||||
}
|
||||
/// Set time to first byte (chainable)
|
||||
pub fn set_time_to_first_byte(&mut self, t: Option<String>) -> &mut Self {
|
||||
self.time_to_first_byte = t;
|
||||
|
||||
pub fn time_to_first_byte(mut self, t: impl Into<String>) -> Self {
|
||||
self.0.time_to_first_byte = Some(t.into());
|
||||
self
|
||||
}
|
||||
/// Set time to first byte in nanoseconds (chainable)
|
||||
pub fn set_time_to_first_byte_in_ns(&mut self, t: Option<String>) -> &mut Self {
|
||||
self.time_to_first_byte_in_ns = t;
|
||||
|
||||
pub fn time_to_first_byte_in_ns(mut self, t: impl Into<String>) -> Self {
|
||||
self.0.time_to_first_byte_in_ns = Some(t.into());
|
||||
self
|
||||
}
|
||||
/// Set time to response (chainable)
|
||||
pub fn set_time_to_response(&mut self, t: Option<String>) -> &mut Self {
|
||||
self.time_to_response = t;
|
||||
|
||||
pub fn time_to_response(mut self, t: impl Into<String>) -> Self {
|
||||
self.0.time_to_response = Some(t.into());
|
||||
self
|
||||
}
|
||||
/// Set time to response in nanoseconds (chainable)
|
||||
pub fn set_time_to_response_in_ns(&mut self, t: Option<String>) -> &mut Self {
|
||||
self.time_to_response_in_ns = t;
|
||||
|
||||
pub fn time_to_response_in_ns(mut self, t: impl Into<String>) -> Self {
|
||||
self.0.time_to_response_in_ns = Some(t.into());
|
||||
self
|
||||
}
|
||||
|
||||
pub fn build(self) -> ApiDetails {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
/// AuditEntry represents an audit log entry
|
||||
/// `AuditEntry` represents an audit log entry.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default)]
|
||||
pub struct AuditEntry {
|
||||
pub version: String,
|
||||
@@ -155,6 +151,7 @@ pub struct AuditEntry {
|
||||
pub deployment_id: Option<String>,
|
||||
#[serde(rename = "siteName", skip_serializing_if = "Option::is_none")]
|
||||
pub site_name: Option<String>,
|
||||
#[serde(with = "chrono::serde::ts_milliseconds")]
|
||||
pub time: DateTime<Utc>,
|
||||
pub event: EventName,
|
||||
#[serde(rename = "type", skip_serializing_if = "Option::is_none")]
|
||||
@@ -191,200 +188,130 @@ pub struct AuditEntry {
|
||||
pub error: Option<String>,
|
||||
}
|
||||
|
||||
impl AuditEntry {
|
||||
/// Create a new AuditEntry with required fields
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn new(
|
||||
version: String,
|
||||
deployment_id: Option<String>,
|
||||
site_name: Option<String>,
|
||||
time: DateTime<Utc>,
|
||||
event: EventName,
|
||||
entry_type: Option<String>,
|
||||
trigger: String,
|
||||
api: ApiDetails,
|
||||
) -> Self {
|
||||
AuditEntry {
|
||||
version,
|
||||
deployment_id,
|
||||
site_name,
|
||||
time,
|
||||
/// Constructor for `AuditEntry`.
|
||||
pub struct AuditEntryBuilder(AuditEntry);
|
||||
|
||||
impl AuditEntryBuilder {
|
||||
/// Create a new builder with all required fields.
|
||||
pub fn new(version: impl Into<String>, event: EventName, trigger: impl Into<String>, api: ApiDetails) -> Self {
|
||||
Self(AuditEntry {
|
||||
version: version.into(),
|
||||
time: Utc::now(),
|
||||
event,
|
||||
entry_type,
|
||||
trigger,
|
||||
trigger: trigger.into(),
|
||||
api,
|
||||
remote_host: None,
|
||||
request_id: None,
|
||||
user_agent: None,
|
||||
req_path: None,
|
||||
req_host: None,
|
||||
req_node: None,
|
||||
req_claims: None,
|
||||
req_query: None,
|
||||
req_header: None,
|
||||
resp_header: None,
|
||||
tags: None,
|
||||
access_key: None,
|
||||
parent_user: None,
|
||||
error: None,
|
||||
}
|
||||
..Default::default()
|
||||
})
|
||||
}
|
||||
|
||||
/// Set version (chainable)
|
||||
pub fn set_version(&mut self, version: String) -> &mut Self {
|
||||
self.version = version;
|
||||
self
|
||||
}
|
||||
/// Set deployment ID (chainable)
|
||||
pub fn set_deployment_id(&mut self, id: Option<String>) -> &mut Self {
|
||||
self.deployment_id = id;
|
||||
self
|
||||
}
|
||||
/// Set site name (chainable)
|
||||
pub fn set_site_name(&mut self, name: Option<String>) -> &mut Self {
|
||||
self.site_name = name;
|
||||
self
|
||||
}
|
||||
/// Set time (chainable)
|
||||
pub fn set_time(&mut self, time: DateTime<Utc>) -> &mut Self {
|
||||
self.time = time;
|
||||
self
|
||||
}
|
||||
/// Set event (chainable)
|
||||
pub fn set_event(&mut self, event: EventName) -> &mut Self {
|
||||
self.event = event;
|
||||
self
|
||||
}
|
||||
/// Set entry type (chainable)
|
||||
pub fn set_entry_type(&mut self, entry_type: Option<String>) -> &mut Self {
|
||||
self.entry_type = entry_type;
|
||||
self
|
||||
}
|
||||
/// Set trigger (chainable)
|
||||
pub fn set_trigger(&mut self, trigger: String) -> &mut Self {
|
||||
self.trigger = trigger;
|
||||
self
|
||||
}
|
||||
/// Set API details (chainable)
|
||||
pub fn set_api(&mut self, api: ApiDetails) -> &mut Self {
|
||||
self.api = api;
|
||||
self
|
||||
}
|
||||
/// Set remote host (chainable)
|
||||
pub fn set_remote_host(&mut self, host: Option<String>) -> &mut Self {
|
||||
self.remote_host = host;
|
||||
self
|
||||
}
|
||||
/// Set request ID (chainable)
|
||||
pub fn set_request_id(&mut self, id: Option<String>) -> &mut Self {
|
||||
self.request_id = id;
|
||||
self
|
||||
}
|
||||
/// Set user agent (chainable)
|
||||
pub fn set_user_agent(&mut self, agent: Option<String>) -> &mut Self {
|
||||
self.user_agent = agent;
|
||||
self
|
||||
}
|
||||
/// Set request path (chainable)
|
||||
pub fn set_req_path(&mut self, path: Option<String>) -> &mut Self {
|
||||
self.req_path = path;
|
||||
self
|
||||
}
|
||||
/// Set request host (chainable)
|
||||
pub fn set_req_host(&mut self, host: Option<String>) -> &mut Self {
|
||||
self.req_host = host;
|
||||
self
|
||||
}
|
||||
/// Set request node (chainable)
|
||||
pub fn set_req_node(&mut self, node: Option<String>) -> &mut Self {
|
||||
self.req_node = node;
|
||||
self
|
||||
}
|
||||
/// Set request claims (chainable)
|
||||
pub fn set_req_claims(&mut self, claims: Option<HashMap<String, Value>>) -> &mut Self {
|
||||
self.req_claims = claims;
|
||||
self
|
||||
}
|
||||
/// Set request query (chainable)
|
||||
pub fn set_req_query(&mut self, query: Option<HashMap<String, String>>) -> &mut Self {
|
||||
self.req_query = query;
|
||||
self
|
||||
}
|
||||
/// Set request header (chainable)
|
||||
pub fn set_req_header(&mut self, header: Option<HashMap<String, String>>) -> &mut Self {
|
||||
self.req_header = header;
|
||||
self
|
||||
}
|
||||
/// Set response header (chainable)
|
||||
pub fn set_resp_header(&mut self, header: Option<HashMap<String, String>>) -> &mut Self {
|
||||
self.resp_header = header;
|
||||
self
|
||||
}
|
||||
/// Set tags (chainable)
|
||||
pub fn set_tags(&mut self, tags: Option<HashMap<String, Value>>) -> &mut Self {
|
||||
self.tags = tags;
|
||||
self
|
||||
}
|
||||
/// Set access key (chainable)
|
||||
pub fn set_access_key(&mut self, key: Option<String>) -> &mut Self {
|
||||
self.access_key = key;
|
||||
self
|
||||
}
|
||||
/// Set parent user (chainable)
|
||||
pub fn set_parent_user(&mut self, user: Option<String>) -> &mut Self {
|
||||
self.parent_user = user;
|
||||
self
|
||||
}
|
||||
/// Set error message (chainable)
|
||||
pub fn set_error(&mut self, error: Option<String>) -> &mut Self {
|
||||
self.error = error;
|
||||
// event
|
||||
pub fn version(mut self, version: impl Into<String>) -> Self {
|
||||
self.0.version = version.into();
|
||||
self
|
||||
}
|
||||
|
||||
/// Build AuditEntry from context or parameters (example, can be extended)
|
||||
pub fn from_context(
|
||||
version: String,
|
||||
deployment_id: Option<String>,
|
||||
time: DateTime<Utc>,
|
||||
event: EventName,
|
||||
trigger: String,
|
||||
api: ApiDetails,
|
||||
tags: Option<HashMap<String, Value>>,
|
||||
) -> Self {
|
||||
AuditEntry {
|
||||
version,
|
||||
deployment_id,
|
||||
site_name: None,
|
||||
time,
|
||||
event,
|
||||
entry_type: None,
|
||||
trigger,
|
||||
api,
|
||||
remote_host: None,
|
||||
request_id: None,
|
||||
user_agent: None,
|
||||
req_path: None,
|
||||
req_host: None,
|
||||
req_node: None,
|
||||
req_claims: None,
|
||||
req_query: None,
|
||||
req_header: None,
|
||||
resp_header: None,
|
||||
tags,
|
||||
access_key: None,
|
||||
parent_user: None,
|
||||
error: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl LogRecord for AuditEntry {
|
||||
/// Serialize AuditEntry to JSON string
|
||||
fn to_json(&self) -> String {
|
||||
serde_json::to_string(self).unwrap_or_else(|_| String::from("{}"))
|
||||
}
|
||||
/// Get the timestamp of the audit entry
|
||||
fn get_timestamp(&self) -> DateTime<Utc> {
|
||||
self.time
|
||||
pub fn event(mut self, event: EventName) -> Self {
|
||||
self.0.event = event;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn api(mut self, api_details: ApiDetails) -> Self {
|
||||
self.0.api = api_details;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn deployment_id(mut self, id: impl Into<String>) -> Self {
|
||||
self.0.deployment_id = Some(id.into());
|
||||
self
|
||||
}
|
||||
|
||||
pub fn site_name(mut self, name: impl Into<String>) -> Self {
|
||||
self.0.site_name = Some(name.into());
|
||||
self
|
||||
}
|
||||
|
||||
pub fn time(mut self, time: DateTime<Utc>) -> Self {
|
||||
self.0.time = time;
|
||||
self
|
||||
}
|
||||
|
||||
pub fn entry_type(mut self, entry_type: impl Into<String>) -> Self {
|
||||
self.0.entry_type = Some(entry_type.into());
|
||||
self
|
||||
}
|
||||
|
||||
pub fn remote_host(mut self, host: impl Into<String>) -> Self {
|
||||
self.0.remote_host = Some(host.into());
|
||||
self
|
||||
}
|
||||
|
||||
pub fn request_id(mut self, id: impl Into<String>) -> Self {
|
||||
self.0.request_id = Some(id.into());
|
||||
self
|
||||
}
|
||||
|
||||
pub fn user_agent(mut self, agent: impl Into<String>) -> Self {
|
||||
self.0.user_agent = Some(agent.into());
|
||||
self
|
||||
}
|
||||
|
||||
pub fn req_path(mut self, path: impl Into<String>) -> Self {
|
||||
self.0.req_path = Some(path.into());
|
||||
self
|
||||
}
|
||||
|
||||
pub fn req_host(mut self, host: impl Into<String>) -> Self {
|
||||
self.0.req_host = Some(host.into());
|
||||
self
|
||||
}
|
||||
|
||||
pub fn req_node(mut self, node: impl Into<String>) -> Self {
|
||||
self.0.req_node = Some(node.into());
|
||||
self
|
||||
}
|
||||
|
||||
pub fn req_claims(mut self, claims: HashMap<String, Value>) -> Self {
|
||||
self.0.req_claims = Some(claims);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn req_query(mut self, query: HashMap<String, String>) -> Self {
|
||||
self.0.req_query = Some(query);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn req_header(mut self, header: HashMap<String, String>) -> Self {
|
||||
self.0.req_header = Some(header);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn resp_header(mut self, header: HashMap<String, String>) -> Self {
|
||||
self.0.resp_header = Some(header);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn tags(mut self, tags: HashMap<String, Value>) -> Self {
|
||||
self.0.tags = Some(tags);
|
||||
self
|
||||
}
|
||||
|
||||
pub fn access_key(mut self, key: impl Into<String>) -> Self {
|
||||
self.0.access_key = Some(key.into());
|
||||
self
|
||||
}
|
||||
|
||||
pub fn parent_user(mut self, user: impl Into<String>) -> Self {
|
||||
self.0.parent_user = Some(user.into());
|
||||
self
|
||||
}
|
||||
|
||||
pub fn error(mut self, error: impl Into<String>) -> Self {
|
||||
self.0.error = Some(error.into());
|
||||
self
|
||||
}
|
||||
|
||||
/// Construct the final `AuditEntry`.
|
||||
pub fn build(self) -> AuditEntry {
|
||||
self.0
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,7 +21,7 @@ pub type AuditResult<T> = Result<T, AuditError>;
|
||||
#[derive(Error, Debug)]
|
||||
pub enum AuditError {
|
||||
#[error("Configuration error: {0}")]
|
||||
Configuration(String),
|
||||
Configuration(String, #[source] Option<Box<dyn std::error::Error + Send + Sync>>),
|
||||
|
||||
#[error("config not loaded")]
|
||||
ConfigNotLoaded,
|
||||
@@ -35,11 +35,14 @@ pub enum AuditError {
|
||||
#[error("System already initialized")]
|
||||
AlreadyInitialized,
|
||||
|
||||
#[error("Storage not available: {0}")]
|
||||
StorageNotAvailable(String),
|
||||
|
||||
#[error("Failed to save configuration: {0}")]
|
||||
SaveConfig(String),
|
||||
SaveConfig(#[source] Box<dyn std::error::Error + Send + Sync>),
|
||||
|
||||
#[error("Failed to load configuration: {0}")]
|
||||
LoadConfig(String),
|
||||
LoadConfig(#[source] Box<dyn std::error::Error + Send + Sync>),
|
||||
|
||||
#[error("Serialization error: {0}")]
|
||||
Serialization(#[from] serde_json::Error),
|
||||
@@ -49,7 +52,4 @@ pub enum AuditError {
|
||||
|
||||
#[error("Join error: {0}")]
|
||||
Join(#[from] tokio::task::JoinError),
|
||||
|
||||
#[error("Server storage not initialized: {0}")]
|
||||
ServerNotInitialized(String),
|
||||
}
|
||||
|
||||
@@ -15,7 +15,7 @@
|
||||
use crate::{AuditEntry, AuditResult, AuditSystem};
|
||||
use rustfs_ecstore::config::Config;
|
||||
use std::sync::{Arc, OnceLock};
|
||||
use tracing::{error, warn};
|
||||
use tracing::{debug, error, trace, warn};
|
||||
|
||||
/// Global audit system instance
|
||||
static AUDIT_SYSTEM: OnceLock<Arc<AuditSystem>> = OnceLock::new();
|
||||
@@ -30,6 +30,19 @@ pub fn audit_system() -> Option<Arc<AuditSystem>> {
|
||||
AUDIT_SYSTEM.get().cloned()
|
||||
}
|
||||
|
||||
/// A helper macro for executing closures if the global audit system is initialized.
|
||||
/// If not initialized, log a warning and return `Ok(())`.
|
||||
macro_rules! with_audit_system {
|
||||
($async_closure:expr) => {
|
||||
if let Some(system) = audit_system() {
|
||||
(async move { $async_closure(system).await }).await
|
||||
} else {
|
||||
warn!("Audit system not initialized, operation skipped.");
|
||||
Ok(())
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
/// Start the global audit system with configuration
|
||||
pub async fn start_audit_system(config: Config) -> AuditResult<()> {
|
||||
let system = init_audit_system();
|
||||
@@ -38,32 +51,17 @@ pub async fn start_audit_system(config: Config) -> AuditResult<()> {
|
||||
|
||||
/// Stop the global audit system
|
||||
pub async fn stop_audit_system() -> AuditResult<()> {
|
||||
if let Some(system) = audit_system() {
|
||||
system.close().await
|
||||
} else {
|
||||
warn!("Audit system not initialized, cannot stop");
|
||||
Ok(())
|
||||
}
|
||||
with_audit_system!(|system: Arc<AuditSystem>| async move { system.close().await })
|
||||
}
|
||||
|
||||
/// Pause the global audit system
|
||||
pub async fn pause_audit_system() -> AuditResult<()> {
|
||||
if let Some(system) = audit_system() {
|
||||
system.pause().await
|
||||
} else {
|
||||
warn!("Audit system not initialized, cannot pause");
|
||||
Ok(())
|
||||
}
|
||||
with_audit_system!(|system: Arc<AuditSystem>| async move { system.pause().await })
|
||||
}
|
||||
|
||||
/// Resume the global audit system
|
||||
pub async fn resume_audit_system() -> AuditResult<()> {
|
||||
if let Some(system) = audit_system() {
|
||||
system.resume().await
|
||||
} else {
|
||||
warn!("Audit system not initialized, cannot resume");
|
||||
Ok(())
|
||||
}
|
||||
with_audit_system!(|system: Arc<AuditSystem>| async move { system.resume().await })
|
||||
}
|
||||
|
||||
/// Dispatch an audit log entry to all targets
|
||||
@@ -72,23 +70,23 @@ pub async fn dispatch_audit_log(entry: Arc<AuditEntry>) -> AuditResult<()> {
|
||||
if system.is_running().await {
|
||||
system.dispatch(entry).await
|
||||
} else {
|
||||
// System not running, just drop the log entry without error
|
||||
// The system is initialized but not running (for example, it is suspended). Silently discard log entries based on original logic.
|
||||
// For debugging purposes, it can be useful to add a trace log here.
|
||||
trace!("Audit system is not running, dropping audit entry.");
|
||||
Ok(())
|
||||
}
|
||||
} else {
|
||||
// System not initialized, just drop the log entry without error
|
||||
// The system is not initialized at all. This is a more important state.
|
||||
// It might be better to return an error or log a warning.
|
||||
debug!("Audit system not initialized, dropping audit entry.");
|
||||
// If this should be a hard failure, you can return Err(AuditError::NotInitialized("..."))
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Reload the global audit system configuration
|
||||
pub async fn reload_audit_config(config: Config) -> AuditResult<()> {
|
||||
if let Some(system) = audit_system() {
|
||||
system.reload_config(config).await
|
||||
} else {
|
||||
warn!("Audit system not initialized, cannot reload config");
|
||||
Ok(())
|
||||
}
|
||||
with_audit_system!(|system: Arc<AuditSystem>| async move { system.reload_config(config).await })
|
||||
}
|
||||
|
||||
/// Check if the global audit system is running
|
||||
|
||||
@@ -25,7 +25,7 @@ pub mod observability;
|
||||
pub mod registry;
|
||||
pub mod system;
|
||||
|
||||
pub use entity::{ApiDetails, AuditEntry, LogRecord, ObjectVersion};
|
||||
pub use entity::{ApiDetails, AuditEntry, ObjectVersion};
|
||||
pub use error::{AuditError, AuditResult};
|
||||
pub use global::*;
|
||||
pub use observability::{AuditMetrics, AuditMetricsReport, PerformanceValidation};
|
||||
|
||||
@@ -21,12 +21,47 @@
|
||||
//! - Error rate monitoring
|
||||
//! - Queue depth monitoring
|
||||
|
||||
use metrics::{counter, describe_counter, describe_gauge, describe_histogram, gauge, histogram};
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::sync::{Arc, OnceLock};
|
||||
use std::time::{Duration, Instant};
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::info;
|
||||
|
||||
const RUSTFS_AUDIT_METRICS_NAMESPACE: &str = "rustfs.audit.";
|
||||
|
||||
const M_AUDIT_EVENTS_TOTAL: &str = const_str::concat!(RUSTFS_AUDIT_METRICS_NAMESPACE, "events.total");
|
||||
const M_AUDIT_EVENTS_FAILED: &str = const_str::concat!(RUSTFS_AUDIT_METRICS_NAMESPACE, "events.failed");
|
||||
const M_AUDIT_DISPATCH_NS: &str = const_str::concat!(RUSTFS_AUDIT_METRICS_NAMESPACE, "dispatch.ns");
|
||||
const M_AUDIT_EPS: &str = const_str::concat!(RUSTFS_AUDIT_METRICS_NAMESPACE, "eps");
|
||||
const M_AUDIT_TARGET_OPS: &str = const_str::concat!(RUSTFS_AUDIT_METRICS_NAMESPACE, "target.ops");
|
||||
const M_AUDIT_CONFIG_RELOADS: &str = const_str::concat!(RUSTFS_AUDIT_METRICS_NAMESPACE, "config.reloads");
|
||||
const M_AUDIT_SYSTEM_STARTS: &str = const_str::concat!(RUSTFS_AUDIT_METRICS_NAMESPACE, "system.starts");
|
||||
|
||||
const L_RESULT: &str = "result";
|
||||
const L_STATUS: &str = "status";
|
||||
|
||||
const V_SUCCESS: &str = "success";
|
||||
const V_FAILURE: &str = "failure";
|
||||
|
||||
/// One-time registration of indicator meta information
|
||||
/// This function ensures that metric descriptors are registered only once.
|
||||
pub fn init_observability_metrics() {
|
||||
static METRICS_DESC_INIT: OnceLock<()> = OnceLock::new();
|
||||
METRICS_DESC_INIT.get_or_init(|| {
|
||||
// Event/Time-consuming
|
||||
describe_counter!(M_AUDIT_EVENTS_TOTAL, "Total audit events (labeled by result).");
|
||||
describe_counter!(M_AUDIT_EVENTS_FAILED, "Total failed audit events.");
|
||||
describe_histogram!(M_AUDIT_DISPATCH_NS, "Dispatch time per event (ns).");
|
||||
describe_gauge!(M_AUDIT_EPS, "Events per second since last reset.");
|
||||
|
||||
// Target operation/system event
|
||||
describe_counter!(M_AUDIT_TARGET_OPS, "Total target operations (labeled by status).");
|
||||
describe_counter!(M_AUDIT_CONFIG_RELOADS, "Total configuration reloads.");
|
||||
describe_counter!(M_AUDIT_SYSTEM_STARTS, "Total system starts.");
|
||||
});
|
||||
}
|
||||
|
||||
/// Metrics collector for audit system observability
|
||||
#[derive(Debug)]
|
||||
pub struct AuditMetrics {
|
||||
@@ -56,6 +91,7 @@ impl Default for AuditMetrics {
|
||||
impl AuditMetrics {
|
||||
/// Creates a new metrics collector
|
||||
pub fn new() -> Self {
|
||||
init_observability_metrics();
|
||||
Self {
|
||||
total_events_processed: AtomicU64::new(0),
|
||||
total_events_failed: AtomicU64::new(0),
|
||||
@@ -68,11 +104,28 @@ impl AuditMetrics {
|
||||
}
|
||||
}
|
||||
|
||||
// Suggestion: Call this auxiliary function in the existing "Successful Event Recording" method body to complete the instrumentation
|
||||
#[inline]
|
||||
fn emit_event_success_metrics(&self, dispatch_time: Duration) {
|
||||
// count + histogram
|
||||
counter!(M_AUDIT_EVENTS_TOTAL, L_RESULT => V_SUCCESS).increment(1);
|
||||
histogram!(M_AUDIT_DISPATCH_NS).record(dispatch_time.as_nanos() as f64);
|
||||
}
|
||||
|
||||
// Suggestion: Call this auxiliary function in the existing "Failure Event Recording" method body to complete the instrumentation
|
||||
#[inline]
|
||||
fn emit_event_failure_metrics(&self, dispatch_time: Duration) {
|
||||
counter!(M_AUDIT_EVENTS_TOTAL, L_RESULT => V_FAILURE).increment(1);
|
||||
counter!(M_AUDIT_EVENTS_FAILED).increment(1);
|
||||
histogram!(M_AUDIT_DISPATCH_NS).record(dispatch_time.as_nanos() as f64);
|
||||
}
|
||||
|
||||
/// Records a successful event dispatch
|
||||
pub fn record_event_success(&self, dispatch_time: Duration) {
|
||||
self.total_events_processed.fetch_add(1, Ordering::Relaxed);
|
||||
self.total_dispatch_time_ns
|
||||
.fetch_add(dispatch_time.as_nanos() as u64, Ordering::Relaxed);
|
||||
self.emit_event_success_metrics(dispatch_time);
|
||||
}
|
||||
|
||||
/// Records a failed event dispatch
|
||||
@@ -80,27 +133,32 @@ impl AuditMetrics {
|
||||
self.total_events_failed.fetch_add(1, Ordering::Relaxed);
|
||||
self.total_dispatch_time_ns
|
||||
.fetch_add(dispatch_time.as_nanos() as u64, Ordering::Relaxed);
|
||||
self.emit_event_failure_metrics(dispatch_time);
|
||||
}
|
||||
|
||||
/// Records a successful target operation
|
||||
pub fn record_target_success(&self) {
|
||||
self.target_success_count.fetch_add(1, Ordering::Relaxed);
|
||||
counter!(M_AUDIT_TARGET_OPS, L_STATUS => V_SUCCESS).increment(1);
|
||||
}
|
||||
|
||||
/// Records a failed target operation
|
||||
pub fn record_target_failure(&self) {
|
||||
self.target_failure_count.fetch_add(1, Ordering::Relaxed);
|
||||
counter!(M_AUDIT_TARGET_OPS, L_STATUS => V_FAILURE).increment(1);
|
||||
}
|
||||
|
||||
/// Records a configuration reload
|
||||
pub fn record_config_reload(&self) {
|
||||
self.config_reload_count.fetch_add(1, Ordering::Relaxed);
|
||||
counter!(M_AUDIT_CONFIG_RELOADS).increment(1);
|
||||
info!("Audit configuration reloaded");
|
||||
}
|
||||
|
||||
/// Records a system start
|
||||
pub fn record_system_start(&self) {
|
||||
self.system_start_count.fetch_add(1, Ordering::Relaxed);
|
||||
counter!(M_AUDIT_SYSTEM_STARTS).increment(1);
|
||||
info!("Audit system started");
|
||||
}
|
||||
|
||||
@@ -110,11 +168,14 @@ impl AuditMetrics {
|
||||
let elapsed = reset_time.elapsed();
|
||||
let total_events = self.total_events_processed.load(Ordering::Relaxed) + self.total_events_failed.load(Ordering::Relaxed);
|
||||
|
||||
if elapsed.as_secs_f64() > 0.0 {
|
||||
let eps = if elapsed.as_secs_f64() > 0.0 {
|
||||
total_events as f64 / elapsed.as_secs_f64()
|
||||
} else {
|
||||
0.0
|
||||
}
|
||||
};
|
||||
// EPS is reported in gauge
|
||||
gauge!(M_AUDIT_EPS).set(eps);
|
||||
eps
|
||||
}
|
||||
|
||||
/// Gets the average dispatch latency in milliseconds
|
||||
@@ -166,6 +227,8 @@ impl AuditMetrics {
|
||||
let mut reset_time = self.last_reset_time.write().await;
|
||||
*reset_time = Instant::now();
|
||||
|
||||
// Reset EPS to zero after reset
|
||||
gauge!(M_AUDIT_EPS).set(0.0);
|
||||
info!("Audit metrics reset");
|
||||
}
|
||||
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
use crate::{AuditEntry, AuditError, AuditResult};
|
||||
use futures::{StreamExt, stream::FuturesUnordered};
|
||||
use hashbrown::{HashMap, HashSet};
|
||||
use rustfs_config::{
|
||||
DEFAULT_DELIMITER, ENABLE_KEY, ENV_PREFIX, MQTT_BROKER, MQTT_KEEP_ALIVE_INTERVAL, MQTT_PASSWORD, MQTT_QOS, MQTT_QUEUE_DIR,
|
||||
MQTT_QUEUE_LIMIT, MQTT_RECONNECT_INTERVAL, MQTT_TOPIC, MQTT_USERNAME, WEBHOOK_AUTH_TOKEN, WEBHOOK_BATCH_SIZE,
|
||||
@@ -25,7 +26,6 @@ use rustfs_targets::{
|
||||
Target, TargetError,
|
||||
target::{ChannelTargetType, TargetType, mqtt::MQTTArgs, webhook::WebhookArgs},
|
||||
};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tracing::{debug, error, info, warn};
|
||||
@@ -251,7 +251,7 @@ impl AuditRegistry {
|
||||
sections.extend(successes_by_section.keys().cloned());
|
||||
|
||||
for section_name in sections {
|
||||
let mut section_map: HashMap<String, KVS> = HashMap::new();
|
||||
let mut section_map: std::collections::HashMap<String, KVS> = std::collections::HashMap::new();
|
||||
|
||||
// The default entry (if present) is written back to `_`
|
||||
if let Some(default_cfg) = section_defaults.get(§ion_name) {
|
||||
@@ -277,7 +277,7 @@ impl AuditRegistry {
|
||||
|
||||
// 7. Save the new configuration to the system
|
||||
let Some(store) = rustfs_ecstore::new_object_layer_fn() else {
|
||||
return Err(AuditError::ServerNotInitialized(
|
||||
return Err(AuditError::StorageNotAvailable(
|
||||
"Failed to save target configuration: server storage not initialized".to_string(),
|
||||
));
|
||||
};
|
||||
@@ -286,7 +286,7 @@ impl AuditRegistry {
|
||||
Ok(_) => info!("New audit configuration saved to system successfully"),
|
||||
Err(e) => {
|
||||
error!(error = %e, "Failed to save new audit configuration");
|
||||
return Err(AuditError::SaveConfig(e.to_string()));
|
||||
return Err(AuditError::SaveConfig(Box::new(e)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -59,7 +59,7 @@ impl AuditSystem {
|
||||
|
||||
/// Starts the audit system with the given configuration
|
||||
pub async fn start(&self, config: Config) -> AuditResult<()> {
|
||||
let mut state = self.state.write().await;
|
||||
let state = self.state.write().await;
|
||||
|
||||
match *state {
|
||||
AuditSystemState::Running => {
|
||||
@@ -72,7 +72,6 @@ impl AuditSystem {
|
||||
_ => {}
|
||||
}
|
||||
|
||||
*state = AuditSystemState::Starting;
|
||||
drop(state);
|
||||
|
||||
info!("Starting audit system");
|
||||
@@ -90,6 +89,17 @@ impl AuditSystem {
|
||||
let mut registry = self.registry.lock().await;
|
||||
match registry.create_targets_from_config(&config).await {
|
||||
Ok(targets) => {
|
||||
if targets.is_empty() {
|
||||
info!("No enabled audit targets found, keeping audit system stopped");
|
||||
drop(registry);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
{
|
||||
let mut state = self.state.write().await;
|
||||
*state = AuditSystemState::Starting;
|
||||
}
|
||||
|
||||
info!(target_count = targets.len(), "Created audit targets successfully");
|
||||
|
||||
// Initialize all targets
|
||||
@@ -146,7 +156,7 @@ impl AuditSystem {
|
||||
warn!("Audit system is already paused");
|
||||
Ok(())
|
||||
}
|
||||
_ => Err(AuditError::Configuration("Cannot pause audit system in current state".to_string())),
|
||||
_ => Err(AuditError::Configuration("Cannot pause audit system in current state".to_string(), None)),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -164,7 +174,7 @@ impl AuditSystem {
|
||||
warn!("Audit system is already running");
|
||||
Ok(())
|
||||
}
|
||||
_ => Err(AuditError::Configuration("Cannot resume audit system in current state".to_string())),
|
||||
_ => Err(AuditError::Configuration("Cannot resume audit system in current state".to_string(), None)),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -460,7 +470,7 @@ impl AuditSystem {
|
||||
info!(target_id = %target_id, "Target enabled");
|
||||
Ok(())
|
||||
} else {
|
||||
Err(AuditError::Configuration(format!("Target not found: {target_id}")))
|
||||
Err(AuditError::Configuration(format!("Target not found: {target_id}"), None))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -473,7 +483,7 @@ impl AuditSystem {
|
||||
info!(target_id = %target_id, "Target disabled");
|
||||
Ok(())
|
||||
} else {
|
||||
Err(AuditError::Configuration(format!("Target not found: {target_id}")))
|
||||
Err(AuditError::Configuration(format!("Target not found: {target_id}"), None))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -487,7 +497,7 @@ impl AuditSystem {
|
||||
info!(target_id = %target_id, "Target removed");
|
||||
Ok(())
|
||||
} else {
|
||||
Err(AuditError::Configuration(format!("Target not found: {target_id}")))
|
||||
Err(AuditError::Configuration(format!("Target not found: {target_id}"), None))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -52,7 +52,7 @@ async fn test_config_parsing_webhook() {
|
||||
// We expect this to fail due to server storage not being initialized
|
||||
// but the parsing should work correctly
|
||||
match result {
|
||||
Err(AuditError::ServerNotInitialized(_)) => {
|
||||
Err(AuditError::StorageNotAvailable(_)) => {
|
||||
// This is expected in test environment
|
||||
}
|
||||
Err(e) => {
|
||||
|
||||
@@ -73,7 +73,7 @@ async fn test_concurrent_target_creation() {
|
||||
|
||||
// Verify it fails with expected error (server not initialized)
|
||||
match result {
|
||||
Err(AuditError::ServerNotInitialized(_)) => {
|
||||
Err(AuditError::StorageNotAvailable(_)) => {
|
||||
// Expected in test environment
|
||||
}
|
||||
Err(e) => {
|
||||
@@ -94,7 +94,7 @@ async fn test_audit_log_dispatch_performance() {
|
||||
let start_result = system.start(config).await;
|
||||
if start_result.is_err() {
|
||||
println!("AuditSystem failed to start: {start_result:?}");
|
||||
return; // 或 assert!(false, "AuditSystem failed to start");
|
||||
return; // Alternatively: assert!(false, "AuditSystem failed to start");
|
||||
}
|
||||
|
||||
use chrono::Utc;
|
||||
@@ -103,17 +103,17 @@ async fn test_audit_log_dispatch_performance() {
|
||||
use std::collections::HashMap;
|
||||
let id = 1;
|
||||
|
||||
let mut req_header = HashMap::new();
|
||||
let mut req_header = hashbrown::HashMap::new();
|
||||
req_header.insert("authorization".to_string(), format!("Bearer test-token-{id}"));
|
||||
req_header.insert("content-type".to_string(), "application/octet-stream".to_string());
|
||||
|
||||
let mut resp_header = HashMap::new();
|
||||
let mut resp_header = hashbrown::HashMap::new();
|
||||
resp_header.insert("x-response".to_string(), "ok".to_string());
|
||||
|
||||
let mut tags = HashMap::new();
|
||||
let mut tags = hashbrown::HashMap::new();
|
||||
tags.insert(format!("tag-{id}"), json!("sample"));
|
||||
|
||||
let mut req_query = HashMap::new();
|
||||
let mut req_query = hashbrown::HashMap::new();
|
||||
req_query.insert("id".to_string(), id.to_string());
|
||||
|
||||
let api_details = ApiDetails {
|
||||
|
||||
@@ -35,7 +35,7 @@ async fn test_complete_audit_system_lifecycle() {
|
||||
|
||||
// Should fail in test environment but state handling should work
|
||||
match start_result {
|
||||
Err(AuditError::ServerNotInitialized(_)) => {
|
||||
Err(AuditError::StorageNotAvailable(_)) => {
|
||||
// Expected in test environment
|
||||
assert_eq!(system.get_state().await, system::AuditSystemState::Stopped);
|
||||
}
|
||||
@@ -168,7 +168,7 @@ async fn test_config_parsing_with_multiple_instances() {
|
||||
|
||||
// Should fail due to server storage not initialized, but parsing should work
|
||||
match result {
|
||||
Err(AuditError::ServerNotInitialized(_)) => {
|
||||
Err(AuditError::StorageNotAvailable(_)) => {
|
||||
// Expected - parsing worked but save failed
|
||||
}
|
||||
Err(e) => {
|
||||
@@ -182,48 +182,6 @@ async fn test_config_parsing_with_multiple_instances() {
|
||||
}
|
||||
}
|
||||
|
||||
// #[tokio::test]
|
||||
// async fn test_environment_variable_precedence() {
|
||||
// // Test that environment variables override config file settings
|
||||
// // This test validates the ENV > file instance > file default precedence
|
||||
// // Set some test environment variables
|
||||
// std::env::set_var("RUSTFS_AUDIT_WEBHOOK_ENABLE_TEST", "on");
|
||||
// std::env::set_var("RUSTFS_AUDIT_WEBHOOK_ENDPOINT_TEST", "http://env.example.com/audit");
|
||||
// std::env::set_var("RUSTFS_AUDIT_WEBHOOK_AUTH_TOKEN_TEST", "env-token");
|
||||
// let mut registry = AuditRegistry::new();
|
||||
//
|
||||
// // Create config that should be overridden by env vars
|
||||
// let mut config = Config(HashMap::new());
|
||||
// let mut webhook_section = HashMap::new();
|
||||
//
|
||||
// let mut test_kvs = KVS::new();
|
||||
// test_kvs.insert("enable".to_string(), "off".to_string()); // Should be overridden
|
||||
// test_kvs.insert("endpoint".to_string(), "http://file.example.com/audit".to_string()); // Should be overridden
|
||||
// test_kvs.insert("batch_size".to_string(), "10".to_string()); // Should remain from file
|
||||
// webhook_section.insert("test".to_string(), test_kvs);
|
||||
//
|
||||
// config.0.insert("audit_webhook".to_string(), webhook_section);
|
||||
//
|
||||
// // Try to create targets - should use env vars for endpoint/enable, file for batch_size
|
||||
// let result = registry.create_targets_from_config(&config).await;
|
||||
// // Clean up env vars
|
||||
// std::env::remove_var("RUSTFS_AUDIT_WEBHOOK_ENABLE_TEST");
|
||||
// std::env::remove_var("RUSTFS_AUDIT_WEBHOOK_ENDPOINT_TEST");
|
||||
// std::env::remove_var("RUSTFS_AUDIT_WEBHOOK_AUTH_TOKEN_TEST");
|
||||
// // Should fail due to server storage, but precedence logic should work
|
||||
// match result {
|
||||
// Err(AuditError::ServerNotInitialized(_)) => {
|
||||
// // Expected - precedence parsing worked but save failed
|
||||
// }
|
||||
// Err(e) => {
|
||||
// println!("Environment precedence test error: {}", e);
|
||||
// }
|
||||
// Ok(_) => {
|
||||
// println!("Unexpected success in environment precedence test");
|
||||
// }
|
||||
// }
|
||||
// }
|
||||
|
||||
#[test]
|
||||
fn test_target_type_validation() {
|
||||
use rustfs_targets::target::TargetType;
|
||||
@@ -315,19 +273,18 @@ fn create_sample_audit_entry_with_id(id: u32) -> AuditEntry {
|
||||
use chrono::Utc;
|
||||
use rustfs_targets::EventName;
|
||||
use serde_json::json;
|
||||
use std::collections::HashMap;
|
||||
|
||||
let mut req_header = HashMap::new();
|
||||
let mut req_header = hashbrown::HashMap::new();
|
||||
req_header.insert("authorization".to_string(), format!("Bearer test-token-{id}"));
|
||||
req_header.insert("content-type".to_string(), "application/octet-stream".to_string());
|
||||
|
||||
let mut resp_header = HashMap::new();
|
||||
let mut resp_header = hashbrown::HashMap::new();
|
||||
resp_header.insert("x-response".to_string(), "ok".to_string());
|
||||
|
||||
let mut tags = HashMap::new();
|
||||
let mut tags = hashbrown::HashMap::new();
|
||||
tags.insert(format!("tag-{id}"), json!("sample"));
|
||||
|
||||
let mut req_query = HashMap::new();
|
||||
let mut req_query = hashbrown::HashMap::new();
|
||||
req_query.insert("id".to_string(), id.to_string());
|
||||
|
||||
let api_details = ApiDetails {
|
||||
|
||||
@@ -28,7 +28,6 @@ categories = ["web-programming", "development-tools", "data-structures"]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
lazy_static = { workspace = true}
|
||||
tokio = { workspace = true }
|
||||
tonic = { workspace = true }
|
||||
uuid = { workspace = true }
|
||||
@@ -40,3 +39,4 @@ path-clean = { workspace = true }
|
||||
rmp-serde = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
s3s = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/rustfs/rustfs/actions/workflows/ci.yml"><img alt="CI" src="https://github.com/rustfs/rustfs/actions/workflows/ci.yml/badge.svg" /></a>
|
||||
<a href="https://docs.rustfs.com/en/">📖 Documentation</a>
|
||||
<a href="https://docs.rustfs.com/">📖 Documentation</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/issues">🐛 Bug Reports</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/discussions">💬 Discussions</a>
|
||||
</p>
|
||||
|
||||
@@ -12,9 +12,9 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::last_minute::{self};
|
||||
use std::collections::HashMap;
|
||||
|
||||
use crate::last_minute::{self};
|
||||
pub struct ReplicationLatency {
|
||||
// Delays for single and multipart PUT requests
|
||||
upload_histogram: last_minute::LastMinuteHistogram,
|
||||
|
||||
@@ -14,10 +14,10 @@
|
||||
|
||||
use path_clean::PathClean;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::hash::{DefaultHasher, Hash, Hasher};
|
||||
use std::path::Path;
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
hash::{DefaultHasher, Hash, Hasher},
|
||||
path::Path,
|
||||
time::SystemTime,
|
||||
};
|
||||
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::LazyLock;
|
||||
|
||||
use tokio::sync::RwLock;
|
||||
use tonic::transport::Channel;
|
||||
|
||||
@@ -29,3 +28,28 @@ pub static GLOBAL_Conn_Map: LazyLock<RwLock<HashMap<String, Channel>>> = LazyLoc
|
||||
pub async fn set_global_addr(addr: &str) {
|
||||
*GLOBAL_Rustfs_Addr.write().await = addr.to_string();
|
||||
}
|
||||
|
||||
/// Evict a stale/dead connection from the global connection cache.
|
||||
/// This is critical for cluster recovery when a node dies unexpectedly (e.g., power-off).
|
||||
/// By removing the cached connection, subsequent requests will establish a fresh connection.
|
||||
pub async fn evict_connection(addr: &str) {
|
||||
let removed = GLOBAL_Conn_Map.write().await.remove(addr);
|
||||
if removed.is_some() {
|
||||
tracing::warn!("Evicted stale connection from cache: {}", addr);
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if a connection exists in the cache for the given address.
|
||||
pub async fn has_cached_connection(addr: &str) -> bool {
|
||||
GLOBAL_Conn_Map.read().await.contains_key(addr)
|
||||
}
|
||||
|
||||
/// Clear all cached connections. Useful for full cluster reset/recovery.
|
||||
pub async fn clear_all_connections() {
|
||||
let mut map = GLOBAL_Conn_Map.write().await;
|
||||
let count = map.len();
|
||||
map.clear();
|
||||
if count > 0 {
|
||||
tracing::warn!("Cleared {} cached connections from global map", count);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -18,7 +18,7 @@ use std::{
|
||||
fmt::{self, Display},
|
||||
sync::OnceLock,
|
||||
};
|
||||
use tokio::sync::mpsc;
|
||||
use tokio::sync::{broadcast, mpsc};
|
||||
use uuid::Uuid;
|
||||
|
||||
pub const HEAL_DELETE_DANGLING: bool = true;
|
||||
@@ -85,16 +85,89 @@ impl Display for DriveState {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq)]
|
||||
#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)]
|
||||
#[repr(u8)]
|
||||
pub enum HealScanMode {
|
||||
Unknown,
|
||||
Normal,
|
||||
Deep,
|
||||
Unknown = 0,
|
||||
#[default]
|
||||
Normal = 1,
|
||||
Deep = 2,
|
||||
}
|
||||
|
||||
impl Default for HealScanMode {
|
||||
fn default() -> Self {
|
||||
Self::Normal
|
||||
impl Serialize for HealScanMode {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
serializer.serialize_u8(*self as u8)
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for HealScanMode {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
struct HealScanModeVisitor;
|
||||
|
||||
impl<'de> serde::de::Visitor<'de> for HealScanModeVisitor {
|
||||
type Value = HealScanMode;
|
||||
|
||||
fn expecting(&self, formatter: &mut fmt::Formatter) -> fmt::Result {
|
||||
formatter.write_str("an integer between 0 and 2")
|
||||
}
|
||||
|
||||
fn visit_u8<E>(self, value: u8) -> Result<Self::Value, E>
|
||||
where
|
||||
E: serde::de::Error,
|
||||
{
|
||||
match value {
|
||||
0 => Ok(HealScanMode::Unknown),
|
||||
1 => Ok(HealScanMode::Normal),
|
||||
2 => Ok(HealScanMode::Deep),
|
||||
_ => Err(E::custom(format!("invalid HealScanMode value: {value}"))),
|
||||
}
|
||||
}
|
||||
|
||||
fn visit_u64<E>(self, value: u64) -> Result<Self::Value, E>
|
||||
where
|
||||
E: serde::de::Error,
|
||||
{
|
||||
if value > u8::MAX as u64 {
|
||||
return Err(E::custom(format!("HealScanMode value too large: {value}")));
|
||||
}
|
||||
self.visit_u8(value as u8)
|
||||
}
|
||||
|
||||
fn visit_i64<E>(self, value: i64) -> Result<Self::Value, E>
|
||||
where
|
||||
E: serde::de::Error,
|
||||
{
|
||||
if value < 0 || value > u8::MAX as i64 {
|
||||
return Err(E::custom(format!("invalid HealScanMode value: {value}")));
|
||||
}
|
||||
self.visit_u8(value as u8)
|
||||
}
|
||||
|
||||
fn visit_str<E>(self, value: &str) -> Result<Self::Value, E>
|
||||
where
|
||||
E: serde::de::Error,
|
||||
{
|
||||
// Try parsing as number string first (for URL-encoded values)
|
||||
if let Ok(num) = value.parse::<u8>() {
|
||||
return self.visit_u8(num);
|
||||
}
|
||||
// Try parsing as named string
|
||||
match value {
|
||||
"Unknown" | "unknown" => Ok(HealScanMode::Unknown),
|
||||
"Normal" | "normal" => Ok(HealScanMode::Normal),
|
||||
"Deep" | "deep" => Ok(HealScanMode::Deep),
|
||||
_ => Err(E::custom(format!("invalid HealScanMode string: {value}"))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
deserializer.deserialize_any(HealScanModeVisitor)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -111,7 +184,9 @@ pub struct HealOpts {
|
||||
pub update_parity: bool,
|
||||
#[serde(rename = "nolock")]
|
||||
pub no_lock: bool,
|
||||
#[serde(rename = "pool", default)]
|
||||
pub pool: Option<usize>,
|
||||
#[serde(rename = "set", default)]
|
||||
pub set: Option<usize>,
|
||||
}
|
||||
|
||||
@@ -175,11 +250,12 @@ pub struct HealChannelResponse {
|
||||
}
|
||||
|
||||
/// Heal priority
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum HealChannelPriority {
|
||||
/// Low priority
|
||||
Low,
|
||||
/// Normal priority
|
||||
#[default]
|
||||
Normal,
|
||||
/// High priority
|
||||
High,
|
||||
@@ -187,12 +263,6 @@ pub enum HealChannelPriority {
|
||||
Critical,
|
||||
}
|
||||
|
||||
impl Default for HealChannelPriority {
|
||||
fn default() -> Self {
|
||||
Self::Normal
|
||||
}
|
||||
}
|
||||
|
||||
/// Heal channel sender
|
||||
pub type HealChannelSender = mpsc::UnboundedSender<HealChannelCommand>;
|
||||
|
||||
@@ -202,6 +272,11 @@ pub type HealChannelReceiver = mpsc::UnboundedReceiver<HealChannelCommand>;
|
||||
/// Global heal channel sender
|
||||
static GLOBAL_HEAL_CHANNEL_SENDER: OnceLock<HealChannelSender> = OnceLock::new();
|
||||
|
||||
type HealResponseSender = broadcast::Sender<HealChannelResponse>;
|
||||
|
||||
/// Global heal response broadcaster
|
||||
static GLOBAL_HEAL_RESPONSE_SENDER: OnceLock<HealResponseSender> = OnceLock::new();
|
||||
|
||||
/// Initialize global heal channel
|
||||
pub fn init_heal_channel() -> HealChannelReceiver {
|
||||
let (tx, rx) = mpsc::unbounded_channel();
|
||||
@@ -228,6 +303,23 @@ pub async fn send_heal_command(command: HealChannelCommand) -> Result<(), String
|
||||
}
|
||||
}
|
||||
|
||||
fn heal_response_sender() -> &'static HealResponseSender {
|
||||
GLOBAL_HEAL_RESPONSE_SENDER.get_or_init(|| {
|
||||
let (tx, _rx) = broadcast::channel(1024);
|
||||
tx
|
||||
})
|
||||
}
|
||||
|
||||
/// Publish a heal response to subscribers.
|
||||
pub fn publish_heal_response(response: HealChannelResponse) -> Result<(), broadcast::error::SendError<HealChannelResponse>> {
|
||||
heal_response_sender().send(response).map(|_| ())
|
||||
}
|
||||
|
||||
/// Subscribe to heal responses.
|
||||
pub fn subscribe_heal_responses() -> broadcast::Receiver<HealChannelResponse> {
|
||||
heal_response_sender().subscribe()
|
||||
}
|
||||
|
||||
/// Send heal start request
|
||||
pub async fn send_heal_request(request: HealChannelRequest) -> Result<(), String> {
|
||||
send_heal_command(HealChannelCommand::Start(request)).await
|
||||
@@ -425,3 +517,20 @@ pub async fn send_heal_disk(set_disk_id: String, priority: Option<HealChannelPri
|
||||
};
|
||||
send_heal_request(req).await
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn heal_response_broadcast_reaches_subscriber() {
|
||||
let mut receiver = subscribe_heal_responses();
|
||||
let response = create_heal_response("req-1".to_string(), true, None, None);
|
||||
|
||||
publish_heal_response(response.clone()).expect("publish should succeed");
|
||||
|
||||
let received = receiver.recv().await.expect("should receive heal response");
|
||||
assert_eq!(received.request_id, response.request_id);
|
||||
assert!(received.success);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -27,11 +27,11 @@ struct TimedAction {
|
||||
#[allow(dead_code)]
|
||||
impl TimedAction {
|
||||
// Avg returns the average time spent on the action.
|
||||
pub fn avg(&self) -> Option<std::time::Duration> {
|
||||
pub fn avg(&self) -> Option<Duration> {
|
||||
if self.count == 0 {
|
||||
return None;
|
||||
}
|
||||
Some(std::time::Duration::from_nanos(self.acc_time / self.count))
|
||||
Some(Duration::from_nanos(self.acc_time / self.count))
|
||||
}
|
||||
|
||||
// AvgBytes returns the average bytes processed.
|
||||
@@ -860,7 +860,7 @@ impl LastMinuteHistogram {
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add(&mut self, size: i64, t: std::time::Duration) {
|
||||
pub fn add(&mut self, size: i64, t: Duration) {
|
||||
let index = size_to_tag(size);
|
||||
self.histogram[index].add(&t);
|
||||
}
|
||||
|
||||
@@ -12,23 +12,21 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::last_minute::{AccElem, LastMinuteLatency};
|
||||
use chrono::{DateTime, Utc};
|
||||
use lazy_static::lazy_static;
|
||||
use rustfs_madmin::metrics::ScannerMetrics as M_ScannerMetrics;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
fmt::Display,
|
||||
pin::Pin,
|
||||
sync::{
|
||||
Arc,
|
||||
Arc, OnceLock,
|
||||
atomic::{AtomicU64, Ordering},
|
||||
},
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
use tokio::sync::{Mutex, RwLock};
|
||||
|
||||
use crate::last_minute::{AccElem, LastMinuteLatency};
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum IlmAction {
|
||||
NoneAction = 0,
|
||||
@@ -73,8 +71,10 @@ impl Display for IlmAction {
|
||||
}
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
pub static ref globalMetrics: Arc<Metrics> = Arc::new(Metrics::new());
|
||||
pub static GLOBAL_METRICS: OnceLock<Arc<Metrics>> = OnceLock::new();
|
||||
|
||||
pub fn global_metrics() -> &'static Arc<Metrics> {
|
||||
GLOBAL_METRICS.get_or_init(|| Arc::new(Metrics::new()))
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, PartialOrd)]
|
||||
@@ -294,13 +294,13 @@ impl Metrics {
|
||||
let duration = SystemTime::now().duration_since(start_time).unwrap_or_default();
|
||||
|
||||
// Update operation count
|
||||
globalMetrics.operations[metric].fetch_add(1, Ordering::Relaxed);
|
||||
global_metrics().operations[metric].fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
// Update latency for realtime metrics (spawn async task for this)
|
||||
if (metric) < Metric::LastRealtime as usize {
|
||||
let metric_index = metric;
|
||||
tokio::spawn(async move {
|
||||
globalMetrics.latency[metric_index].add(duration).await;
|
||||
global_metrics().latency[metric_index].add(duration).await;
|
||||
});
|
||||
}
|
||||
|
||||
@@ -319,13 +319,13 @@ impl Metrics {
|
||||
let duration = SystemTime::now().duration_since(start_time).unwrap_or_default();
|
||||
|
||||
// Update operation count
|
||||
globalMetrics.operations[metric].fetch_add(1, Ordering::Relaxed);
|
||||
global_metrics().operations[metric].fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
// Update latency for realtime metrics with size (spawn async task)
|
||||
if (metric) < Metric::LastRealtime as usize {
|
||||
let metric_index = metric;
|
||||
tokio::spawn(async move {
|
||||
globalMetrics.latency[metric_index].add_size(duration, size).await;
|
||||
global_metrics().latency[metric_index].add_size(duration, size).await;
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -339,13 +339,13 @@ impl Metrics {
|
||||
let duration = SystemTime::now().duration_since(start_time).unwrap_or_default();
|
||||
|
||||
// Update operation count
|
||||
globalMetrics.operations[metric].fetch_add(1, Ordering::Relaxed);
|
||||
global_metrics().operations[metric].fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
// Update latency for realtime metrics (spawn async task)
|
||||
if (metric) < Metric::LastRealtime as usize {
|
||||
let metric_index = metric;
|
||||
tokio::spawn(async move {
|
||||
globalMetrics.latency[metric_index].add(duration).await;
|
||||
global_metrics().latency[metric_index].add(duration).await;
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -360,13 +360,13 @@ impl Metrics {
|
||||
let duration = SystemTime::now().duration_since(start_time).unwrap_or_default();
|
||||
|
||||
// Update operation count
|
||||
globalMetrics.operations[metric].fetch_add(count as u64, Ordering::Relaxed);
|
||||
global_metrics().operations[metric].fetch_add(count as u64, Ordering::Relaxed);
|
||||
|
||||
// Update latency for realtime metrics (spawn async task)
|
||||
if (metric) < Metric::LastRealtime as usize {
|
||||
let metric_index = metric;
|
||||
tokio::spawn(async move {
|
||||
globalMetrics.latency[metric_index].add(duration).await;
|
||||
global_metrics().latency[metric_index].add(duration).await;
|
||||
});
|
||||
}
|
||||
})
|
||||
@@ -384,8 +384,8 @@ impl Metrics {
|
||||
Box::new(move || {
|
||||
let duration = SystemTime::now().duration_since(start).unwrap_or(Duration::from_secs(0));
|
||||
tokio::spawn(async move {
|
||||
globalMetrics.actions[a_clone].fetch_add(versions, Ordering::Relaxed);
|
||||
globalMetrics.actions_latency[a_clone].add(duration).await;
|
||||
global_metrics().actions[a_clone].fetch_add(versions, Ordering::Relaxed);
|
||||
global_metrics().actions_latency[a_clone].add(duration).await;
|
||||
});
|
||||
})
|
||||
})
|
||||
@@ -395,11 +395,11 @@ impl Metrics {
|
||||
pub async fn inc_time(metric: Metric, duration: Duration) {
|
||||
let metric = metric as usize;
|
||||
// Update operation count
|
||||
globalMetrics.operations[metric].fetch_add(1, Ordering::Relaxed);
|
||||
global_metrics().operations[metric].fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
// Update latency for realtime metrics
|
||||
if (metric) < Metric::LastRealtime as usize {
|
||||
globalMetrics.latency[metric].add(duration).await;
|
||||
global_metrics().latency[metric].add(duration).await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -501,7 +501,7 @@ pub fn current_path_updater(disk: &str, initial: &str) -> (UpdateCurrentPathFn,
|
||||
let tracker_clone = Arc::clone(&tracker);
|
||||
let disk_clone = disk_name.clone();
|
||||
tokio::spawn(async move {
|
||||
globalMetrics.current_paths.write().await.insert(disk_clone, tracker_clone);
|
||||
global_metrics().current_paths.write().await.insert(disk_clone, tracker_clone);
|
||||
});
|
||||
|
||||
let update_fn = {
|
||||
@@ -520,7 +520,7 @@ pub fn current_path_updater(disk: &str, initial: &str) -> (UpdateCurrentPathFn,
|
||||
Arc::new(move || -> Pin<Box<dyn std::future::Future<Output = ()> + Send>> {
|
||||
let disk_name = disk_name.clone();
|
||||
Box::pin(async move {
|
||||
globalMetrics.current_paths.write().await.remove(&disk_name);
|
||||
global_metrics().current_paths.write().await.remove(&disk_name);
|
||||
})
|
||||
})
|
||||
};
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/rustfs/rustfs/actions/workflows/ci.yml"><img alt="CI" src="https://github.com/rustfs/rustfs/actions/workflows/ci.yml/badge.svg" /></a>
|
||||
<a href="https://docs.rustfs.com/en/">📖 Documentation</a>
|
||||
<a href="https://docs.rustfs.com/">📖 Documentation</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/issues">🐛 Bug Reports</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/discussions">💬 Discussions</a>
|
||||
</p>
|
||||
|
||||
@@ -16,8 +16,8 @@
|
||||
//! This module defines the configuration for audit systems, including
|
||||
//! webhook and MQTT audit-related settings.
|
||||
|
||||
pub(crate) mod mqtt;
|
||||
pub(crate) mod webhook;
|
||||
mod mqtt;
|
||||
mod webhook;
|
||||
|
||||
pub use mqtt::*;
|
||||
pub use webhook::*;
|
||||
|
||||
@@ -21,12 +21,12 @@ pub const APP_NAME: &str = "RustFS";
|
||||
/// Application version
|
||||
/// Default value: 1.0.0
|
||||
/// Environment variable: RUSTFS_VERSION
|
||||
pub const VERSION: &str = "0.0.1";
|
||||
pub const VERSION: &str = "1.0.0";
|
||||
|
||||
/// Default configuration logger level
|
||||
/// Default value: info
|
||||
/// Environment variable: RUSTFS_LOG_LEVEL
|
||||
pub const DEFAULT_LOG_LEVEL: &str = "info";
|
||||
/// Default value: error
|
||||
/// Environment variable: RUSTFS_OBS_LOGGER_LEVEL
|
||||
pub const DEFAULT_LOG_LEVEL: &str = "error";
|
||||
|
||||
/// Default configuration use stdout
|
||||
/// Default value: false
|
||||
@@ -40,22 +40,15 @@ pub const SAMPLE_RATIO: f64 = 1.0;
|
||||
pub const METER_INTERVAL: u64 = 30;
|
||||
|
||||
/// Default configuration service version
|
||||
/// Default value: 0.0.1
|
||||
pub const SERVICE_VERSION: &str = "0.0.1";
|
||||
/// Default value: 1.0.0
|
||||
/// Environment variable: RUSTFS_OBS_SERVICE_VERSION
|
||||
/// Uses the same value as VERSION constant
|
||||
pub const SERVICE_VERSION: &str = "1.0.0";
|
||||
|
||||
/// Default configuration environment
|
||||
/// Default value: production
|
||||
pub const ENVIRONMENT: &str = "production";
|
||||
|
||||
/// maximum number of connections
|
||||
/// This is the maximum number of connections that the server will accept.
|
||||
/// This is used to limit the number of connections to the server.
|
||||
pub const MAX_CONNECTIONS: usize = 100;
|
||||
/// timeout for connections
|
||||
/// This is the timeout for connections to the server.
|
||||
/// This is used to limit the time that a connection can be open.
|
||||
pub const DEFAULT_TIMEOUT_MS: u64 = 3000;
|
||||
|
||||
/// Default Access Key
|
||||
/// Default value: rustfsadmin
|
||||
/// Environment variable: RUSTFS_ACCESS_KEY
|
||||
@@ -145,18 +138,27 @@ pub const DEFAULT_LOG_ROTATION_SIZE_MB: u64 = 100;
|
||||
/// It is used to rotate the logs of the application.
|
||||
/// Default value: hour, eg: day,hour,minute,second
|
||||
/// Environment variable: RUSTFS_OBS_LOG_ROTATION_TIME
|
||||
pub const DEFAULT_LOG_ROTATION_TIME: &str = "day";
|
||||
pub const DEFAULT_LOG_ROTATION_TIME: &str = "hour";
|
||||
|
||||
/// Default log keep files for rustfs
|
||||
/// This is the default log keep files for rustfs.
|
||||
/// It is used to keep the logs of the application.
|
||||
/// Default value: 30
|
||||
/// Environment variable: RUSTFS_OBS_LOG_KEEP_FILES
|
||||
pub const DEFAULT_LOG_KEEP_FILES: u16 = 30;
|
||||
pub const DEFAULT_LOG_KEEP_FILES: usize = 30;
|
||||
|
||||
/// 1 KiB
|
||||
/// Default log local logging enabled for rustfs
|
||||
/// This is the default log local logging enabled for rustfs.
|
||||
/// It is used to enable or disable local logging of the application.
|
||||
/// Default value: false
|
||||
/// Environment variable: RUSTFS_OBS_LOGL_STDOUT_ENABLED
|
||||
pub const DEFAULT_OBS_LOG_STDOUT_ENABLED: bool = false;
|
||||
|
||||
/// Constant representing 1 Kibibyte (1024 bytes)
|
||||
/// Default value: 1024
|
||||
pub const KI_B: usize = 1024;
|
||||
/// 1 MiB
|
||||
/// Constant representing 1 Mebibyte (1024 * 1024 bytes)
|
||||
/// Default value: 1048576
|
||||
pub const MI_B: usize = 1024 * 1024;
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -169,16 +171,16 @@ mod tests {
|
||||
assert_eq!(APP_NAME, "RustFS");
|
||||
assert!(!APP_NAME.contains(' '), "App name should not contain spaces");
|
||||
|
||||
assert_eq!(VERSION, "0.0.1");
|
||||
assert_eq!(VERSION, "1.0.0");
|
||||
|
||||
assert_eq!(SERVICE_VERSION, "0.0.1");
|
||||
assert_eq!(SERVICE_VERSION, "1.0.0");
|
||||
assert_eq!(VERSION, SERVICE_VERSION, "Version and service version should be consistent");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_logging_constants() {
|
||||
// Test logging related constants
|
||||
assert_eq!(DEFAULT_LOG_LEVEL, "info");
|
||||
assert_eq!(DEFAULT_LOG_LEVEL, "error");
|
||||
assert!(
|
||||
["trace", "debug", "info", "warn", "error"].contains(&DEFAULT_LOG_LEVEL),
|
||||
"Log level should be a valid tracing level"
|
||||
@@ -199,14 +201,6 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_connection_constants() {
|
||||
// Test connection related constants
|
||||
assert_eq!(MAX_CONNECTIONS, 100);
|
||||
|
||||
assert_eq!(DEFAULT_TIMEOUT_MS, 3000);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_security_constants() {
|
||||
// Test security related constants
|
||||
@@ -309,8 +303,8 @@ mod tests {
|
||||
// assert!(DEFAULT_TIMEOUT_MS < u64::MAX, "Timeout should be reasonable");
|
||||
|
||||
// These are const non-zero values, so zero checks are redundant
|
||||
// assert!(DEFAULT_PORT != 0, "Default port should not be zero");
|
||||
// assert!(DEFAULT_CONSOLE_PORT != 0, "Console port should not be zero");
|
||||
assert_ne!(DEFAULT_PORT, 0, "Default port should not be zero");
|
||||
assert_ne!(DEFAULT_CONSOLE_PORT, 0, "Console port should not be zero");
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
88
crates/config/src/constants/heal.rs
Normal file
88
crates/config/src/constants/heal.rs
Normal file
@@ -0,0 +1,88 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/// Environment variable name that enables or disables auto-heal functionality.
|
||||
/// - Purpose: Control whether the system automatically performs heal operations.
|
||||
/// - Valid values: "true" or "false" (case insensitive).
|
||||
/// - Semantics: When set to "true", auto-heal is enabled and the system will automatically attempt to heal detected issues; when set to "false", auto-heal is disabled and healing must be triggered manually.
|
||||
/// - Example: `export RUSTFS_HEAL_AUTO_HEAL_ENABLE=true`
|
||||
/// - Note: Enabling auto-heal can improve system resilience by automatically addressing issues, but may increase resource usage; evaluate based on your operational requirements.
|
||||
pub const ENV_HEAL_AUTO_HEAL_ENABLE: &str = "RUSTFS_HEAL_AUTO_HEAL_ENABLE";
|
||||
|
||||
/// Environment variable name that specifies the heal queue size.
|
||||
///
|
||||
/// - Purpose: Set the maximum number of heal requests that can be queued.
|
||||
/// - Unit: number of requests (usize).
|
||||
/// - Valid values: any positive integer.
|
||||
/// - Semantics: When the heal queue reaches this size, new heal requests may be rejected or blocked until space is available; tune according to expected heal workload and system capacity.
|
||||
/// - Example: `export RUSTFS_HEAL_QUEUE_SIZE=10000`
|
||||
/// - Note: A larger queue size can accommodate bursts of heal requests but may increase memory usage.
|
||||
pub const ENV_HEAL_QUEUE_SIZE: &str = "RUSTFS_HEAL_QUEUE_SIZE";
|
||||
/// Environment variable name that specifies the heal interval in seconds.
|
||||
/// - Purpose: Define the time interval between successive heal operations.
|
||||
/// - Unit: seconds (u64).
|
||||
/// - Valid values: any positive integer.
|
||||
/// - Semantics: This interval controls how frequently the heal manager checks for and processes heal requests; shorter intervals lead to more responsive healing but may increase system load.
|
||||
/// - Example: `export RUSTFS_HEAL_INTERVAL_SECS=10`
|
||||
/// - Note: Choose an interval that balances healing responsiveness with overall system performance.
|
||||
pub const ENV_HEAL_INTERVAL_SECS: &str = "RUSTFS_HEAL_INTERVAL_SECS";
|
||||
|
||||
/// Environment variable name that specifies the heal task timeout in seconds.
|
||||
/// - Purpose: Set the maximum duration allowed for a heal task to complete.
|
||||
/// - Unit: seconds (u64).
|
||||
/// - Valid values: any positive integer.
|
||||
/// - Semantics: If a heal task exceeds this timeout, it may be aborted or retried; tune according to the expected duration of heal operations and system performance characteristics.
|
||||
/// - Example: `export RUSTFS_HEAL_TASK_TIMEOUT_SECS=300`
|
||||
/// - Note: Setting an appropriate timeout helps prevent long-running heal tasks from impacting system stability.
|
||||
pub const ENV_HEAL_TASK_TIMEOUT_SECS: &str = "RUSTFS_HEAL_TASK_TIMEOUT_SECS";
|
||||
|
||||
/// Environment variable name that specifies the maximum number of concurrent heal operations.
|
||||
/// - Purpose: Limit the number of heal operations that can run simultaneously.
|
||||
/// - Unit: number of operations (usize).
|
||||
/// - Valid values: any positive integer.
|
||||
/// - Semantics: This limit helps control resource usage during healing; tune according to system capacity and expected heal workload.
|
||||
/// - Example: `export RUSTFS_HEAL_MAX_CONCURRENT_HEALS=4`
|
||||
/// - Note: A higher concurrency limit can speed up healing but may lead to resource contention.
|
||||
pub const ENV_HEAL_MAX_CONCURRENT_HEALS: &str = "RUSTFS_HEAL_MAX_CONCURRENT_HEALS";
|
||||
|
||||
/// Default value for enabling authentication for heal operations if not specified in the environment variable.
|
||||
/// - Value: true (authentication enabled).
|
||||
/// - Rationale: Enabling authentication by default enhances security for heal operations.
|
||||
/// - Adjustments: Users may disable this feature via the `RUSTFS_HEAL_AUTO_HEAL_ENABLE` environment variable based on their security requirements.
|
||||
pub const DEFAULT_HEAL_AUTO_HEAL_ENABLE: bool = true;
|
||||
|
||||
/// Default heal queue size if not specified in the environment variable.
|
||||
///
|
||||
/// - Value: 10,000 requests.
|
||||
/// - Rationale: This default size balances the need to handle typical heal workloads without excessive memory consumption.
|
||||
/// - Adjustments: Users may modify this value via the `RUSTFS_HEAL_QUEUE_SIZE` environment variable based on their specific use cases and system capabilities.
|
||||
pub const DEFAULT_HEAL_QUEUE_SIZE: usize = 10_000;
|
||||
|
||||
/// Default heal interval in seconds if not specified in the environment variable.
|
||||
/// - Value: 10 seconds.
|
||||
/// - Rationale: This default interval provides a reasonable balance between healing responsiveness and system load for most deployments.
|
||||
/// - Adjustments: Users may modify this value via the `RUSTFS_HEAL_INTERVAL_SECS` environment variable based on their specific healing requirements and system performance.
|
||||
pub const DEFAULT_HEAL_INTERVAL_SECS: u64 = 10;
|
||||
|
||||
/// Default heal task timeout in seconds if not specified in the environment variable.
|
||||
/// - Value: 300 seconds (5 minutes).
|
||||
/// - Rationale: This default timeout allows sufficient time for most heal operations to complete while preventing excessively long-running tasks.
|
||||
/// - Adjustments: Users may modify this value via the `RUSTFS_HEAL_TASK_TIMEOUT_SECS` environment variable based on their specific heal operation characteristics and system performance.
|
||||
pub const DEFAULT_HEAL_TASK_TIMEOUT_SECS: u64 = 300; // 5 minutes
|
||||
|
||||
/// Default maximum number of concurrent heal operations if not specified in the environment variable.
|
||||
/// - Value: 4 concurrent heal operations.
|
||||
/// - Rationale: This default concurrency limit helps balance healing speed with resource usage, preventing system overload.
|
||||
/// - Adjustments: Users may modify this value via the `RUSTFS_HEAL_MAX_CONCURRENT_HEALS` environment variable based on their system capacity and expected heal workload.
|
||||
pub const DEFAULT_HEAL_MAX_CONCURRENT_HEALS: usize = 4;
|
||||
@@ -15,6 +15,9 @@
|
||||
pub(crate) mod app;
|
||||
pub(crate) mod console;
|
||||
pub(crate) mod env;
|
||||
pub(crate) mod heal;
|
||||
pub(crate) mod object;
|
||||
pub(crate) mod profiler;
|
||||
pub(crate) mod runtime;
|
||||
pub(crate) mod targets;
|
||||
pub(crate) mod tls;
|
||||
|
||||
169
crates/config/src/constants/object.rs
Normal file
169
crates/config/src/constants/object.rs
Normal file
@@ -0,0 +1,169 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/// Environment variable name to toggle object-level in-memory caching.
|
||||
///
|
||||
/// - Purpose: Enable or disable the object-level in-memory cache (moka).
|
||||
/// - Acceptable values: `"true"` / `"false"` (case-insensitive) or a boolean typed config.
|
||||
/// - Semantics: When enabled, the system keeps fully-read objects in memory to reduce backend requests; when disabled, reads bypass the object cache.
|
||||
/// - Example: `export RUSTFS_OBJECT_CACHE_ENABLE=true`
|
||||
/// - Note: Evaluate together with `RUSTFS_OBJECT_CACHE_CAPACITY_MB`, TTL/TTI and concurrency thresholds to balance memory usage and throughput.
|
||||
pub const ENV_OBJECT_CACHE_ENABLE: &str = "RUSTFS_OBJECT_CACHE_ENABLE";
|
||||
|
||||
/// Environment variable name that specifies the object cache capacity in megabytes.
|
||||
///
|
||||
/// - Purpose: Set the maximum total capacity of the object cache (in MB).
|
||||
/// - Unit: MB (1 MB = 1_048_576 bytes).
|
||||
/// - Valid values: any positive integer (0 may indicate disabled or alternative handling).
|
||||
/// - Semantics: When the moka cache reaches this capacity, eviction policies will remove entries; tune according to available memory and object size distribution.
|
||||
/// - Example: `export RUSTFS_OBJECT_CACHE_CAPACITY_MB=512`
|
||||
/// - Note: Actual memory usage will be slightly higher due to object headers and indexing overhead.
|
||||
pub const ENV_OBJECT_CACHE_CAPACITY_MB: &str = "RUSTFS_OBJECT_CACHE_CAPACITY_MB";
|
||||
|
||||
/// Environment variable name for maximum object size eligible for caching in megabytes.
|
||||
///
|
||||
/// - Purpose: Define the upper size limit for individual objects to be considered for caching.
|
||||
/// - Unit: MB (1 MB = 1_048_576 bytes).
|
||||
/// - Valid values: any positive integer; objects larger than this size will not be cached.
|
||||
/// - Semantics: Prevents caching of excessively large objects that could monopolize cache capacity; tune based on typical object size distribution.
|
||||
/// - Example: `export RUSTFS_OBJECT_CACHE_MAX_OBJECT_SIZE_MB=50`
|
||||
/// - Note: Setting this too low may reduce cache effectiveness; setting it too high may lead to inefficient memory usage.
|
||||
pub const ENV_OBJECT_CACHE_MAX_OBJECT_SIZE_MB: &str = "RUSTFS_OBJECT_CACHE_MAX_OBJECT_SIZE_MB";
|
||||
|
||||
/// Environment variable name for object cache TTL (time-to-live) in seconds.
|
||||
///
|
||||
/// - Purpose: Specify the maximum lifetime of a cached entry from the moment it is written.
|
||||
/// - Unit: seconds (u64).
|
||||
/// - Semantics: TTL acts as a hard upper bound; entries older than TTL are considered expired and removed by periodic cleanup.
|
||||
/// - Example: `export RUSTFS_OBJECT_CACHE_TTL_SECS=300`
|
||||
/// - Note: TTL and TTI both apply; either policy can cause eviction.
|
||||
pub const ENV_OBJECT_CACHE_TTL_SECS: &str = "RUSTFS_OBJECT_CACHE_TTL_SECS";
|
||||
|
||||
/// Environment variable name for object cache TTI (time-to-idle) in seconds.
|
||||
///
|
||||
/// - Purpose: Specify how long an entry may remain in cache without being accessed before it is evicted.
|
||||
/// - Unit: seconds (u64).
|
||||
/// - Semantics: TTI helps remove one-time or infrequently used entries; frequent accesses reset idle timers but do not extend beyond TTL unless additional logic exists.
|
||||
/// - Example: `export RUSTFS_OBJECT_CACHE_TTI_SECS=120`
|
||||
/// - Note: Works together with TTL to keep the cache populated with actively used objects.
|
||||
pub const ENV_OBJECT_CACHE_TTI_SECS: &str = "RUSTFS_OBJECT_CACHE_TTI_SECS";
|
||||
|
||||
/// Environment variable name for threshold of "hot" object hit count used to extend life.
|
||||
///
|
||||
/// - Purpose: Define a hit-count threshold to mark objects as "hot" so they may be treated preferentially near expiration.
|
||||
/// - Valid values: positive integer (usize).
|
||||
/// - Semantics: Objects reaching this hit count can be considered for relaxed eviction to avoid thrashing hot items.
|
||||
/// - Example: `export RUSTFS_OBJECT_HOT_MIN_HITS_TO_EXTEND=5`
|
||||
/// - Note: This is an optional enhancement and requires cache-layer statistics and extension logic to take effect.
|
||||
pub const ENV_OBJECT_HOT_MIN_HITS_TO_EXTEND: &str = "RUSTFS_OBJECT_HOT_MIN_HITS_TO_EXTEND";
|
||||
|
||||
/// Environment variable name for high concurrency threshold used in adaptive buffering.
|
||||
///
|
||||
/// - Purpose: When concurrent request count exceeds this threshold, the system enters a "high concurrency" optimization mode to reduce per-request buffer sizes.
|
||||
/// - Unit: request count (usize).
|
||||
/// - Semantics: High concurrency mode reduces per-request buffers (e.g., to a fraction of base size) to protect overall memory and fairness.
|
||||
/// - Example: `export RUSTFS_OBJECT_HIGH_CONCURRENCY_THRESHOLD=8`
|
||||
/// - Note: This affects buffering and I/O behavior, not cache capacity directly.
|
||||
pub const ENV_OBJECT_HIGH_CONCURRENCY_THRESHOLD: &str = "RUSTFS_OBJECT_HIGH_CONCURRENCY_THRESHOLD";
|
||||
|
||||
/// Environment variable name for medium concurrency threshold used in adaptive buffering.
|
||||
///
|
||||
/// - Purpose: Define the boundary for "medium concurrency" where more moderate buffer adjustments apply.
|
||||
/// - Unit: request count (usize).
|
||||
/// - Semantics: In the medium range, buffers are reduced moderately to balance throughput and memory efficiency.
|
||||
/// - Example: `export RUSTFS_OBJECT_MEDIUM_CONCURRENCY_THRESHOLD=4`
|
||||
/// - Note: Tune this value based on target workload and hardware.
|
||||
pub const ENV_OBJECT_MEDIUM_CONCURRENCY_THRESHOLD: &str = "RUSTFS_OBJECT_MEDIUM_CONCURRENCY_THRESHOLD";
|
||||
|
||||
/// Environment variable name for maximum concurrent disk reads for object operations.
|
||||
/// - Purpose: Limit the number of concurrent disk read operations for object reads to prevent I/O saturation.
|
||||
/// - Unit: request count (usize).
|
||||
/// - Semantics: Throttling disk reads helps maintain overall system responsiveness under load.
|
||||
/// - Example: `export RUSTFS_OBJECT_MAX_CONCURRENT_DISK_READS=16`
|
||||
/// - Note: This setting may interact with OS-level I/O scheduling and should be tuned based on hardware capabilities.
|
||||
pub const ENV_OBJECT_MAX_CONCURRENT_DISK_READS: &str = "RUSTFS_OBJECT_MAX_CONCURRENT_DISK_READS";
|
||||
|
||||
/// Default: object caching is disabled.
|
||||
///
|
||||
/// - Semantics: Safe default to avoid unexpected memory usage or cache consistency concerns when not explicitly enabled.
|
||||
/// - Default is set to false (disabled).
|
||||
pub const DEFAULT_OBJECT_CACHE_ENABLE: bool = false;
|
||||
|
||||
/// Default object cache capacity in MB.
|
||||
///
|
||||
/// - Default: 100 MB (can be overridden by `RUSTFS_OBJECT_CACHE_CAPACITY_MB`).
|
||||
/// - Note: Choose a conservative default to reduce memory pressure in development/testing.
|
||||
pub const DEFAULT_OBJECT_CACHE_CAPACITY_MB: u64 = 100;
|
||||
|
||||
/// Default maximum object size eligible for caching in MB.
|
||||
///
|
||||
/// - Default: 10 MB (can be overridden by `RUSTFS_OBJECT_CACHE_MAX_OBJECT_SIZE_MB`).
|
||||
/// - Note: Balances caching effectiveness with memory usage.
|
||||
pub const DEFAULT_OBJECT_CACHE_MAX_OBJECT_SIZE_MB: usize = 10;
|
||||
|
||||
/// Maximum concurrent requests before applying aggressive optimization.
|
||||
///
|
||||
/// When concurrent requests exceed this threshold (>8), the system switches to
|
||||
/// aggressive memory optimization mode, reducing buffer sizes to 40% of base size
|
||||
/// to prevent memory exhaustion and ensure fair resource allocation.
|
||||
///
|
||||
/// This helps maintain system stability under high load conditions.
|
||||
/// Default is set to 8 concurrent requests.
|
||||
pub const DEFAULT_OBJECT_HIGH_CONCURRENCY_THRESHOLD: usize = 8;
|
||||
|
||||
/// Medium concurrency threshold for buffer size adjustment.
|
||||
///
|
||||
/// At this level (3-4 requests), buffers are reduced to 75% of base size to
|
||||
/// balance throughput and memory efficiency as load increases.
|
||||
///
|
||||
/// This helps maintain performance without overly aggressive memory reduction.
|
||||
///
|
||||
/// Default is set to 4 concurrent requests.
|
||||
pub const DEFAULT_OBJECT_MEDIUM_CONCURRENCY_THRESHOLD: usize = 4;
|
||||
|
||||
/// Maximum concurrent disk reads for object operations.
|
||||
/// Limits the number of simultaneous disk read operations to prevent I/O saturation.
|
||||
///
|
||||
/// A higher value may improve throughput on high-performance storage,
|
||||
/// but could also lead to increased latency if the disk becomes overloaded.
|
||||
///
|
||||
/// Default is set to 64 concurrent reads.
|
||||
pub const DEFAULT_OBJECT_MAX_CONCURRENT_DISK_READS: usize = 64;
|
||||
|
||||
/// Time-to-live for cached objects (5 minutes = 300 seconds).
|
||||
///
|
||||
/// After this duration, cached objects are automatically expired by Moka's
|
||||
/// background cleanup process, even if they haven't been accessed. This prevents
|
||||
/// stale data from consuming cache capacity indefinitely.
|
||||
///
|
||||
/// Default is set to 300 seconds.
|
||||
pub const DEFAULT_OBJECT_CACHE_TTL_SECS: u64 = 300;
|
||||
|
||||
/// Time-to-idle for cached objects (2 minutes = 120 seconds).
|
||||
///
|
||||
/// Objects that haven't been accessed for this duration are automatically evicted,
|
||||
/// even if their TTL hasn't expired. This ensures cache is populated with actively
|
||||
/// used objects and clears out one-time reads efficiently.
|
||||
///
|
||||
/// Default is set to 120 seconds.
|
||||
pub const DEFAULT_OBJECT_CACHE_TTI_SECS: u64 = 120;
|
||||
|
||||
/// Minimum hit count to extend object lifetime beyond TTL.
|
||||
///
|
||||
/// "Hot" objects that have been accessed at least this many times are treated
|
||||
/// specially - they can survive longer in cache even as they approach TTL expiration.
|
||||
/// This prevents frequently accessed objects from being evicted prematurely.
|
||||
///
|
||||
/// Default is set to 5 hits.
|
||||
pub const DEFAULT_OBJECT_HOT_MIN_HITS_TO_EXTEND: usize = 5;
|
||||
50
crates/config/src/constants/profiler.rs
Normal file
50
crates/config/src/constants/profiler.rs
Normal file
@@ -0,0 +1,50 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/// Profiler related environment variable names and default values
|
||||
pub const ENV_ENABLE_PROFILING: &str = "RUSTFS_ENABLE_PROFILING";
|
||||
|
||||
// CPU profiling
|
||||
pub const ENV_CPU_MODE: &str = "RUSTFS_PROF_CPU_MODE"; // off|continuous|periodic
|
||||
/// Frequency of CPU profiling samples
|
||||
pub const ENV_CPU_FREQ: &str = "RUSTFS_PROF_CPU_FREQ";
|
||||
/// Interval between CPU profiling sessions (for periodic mode)
|
||||
pub const ENV_CPU_INTERVAL_SECS: &str = "RUSTFS_PROF_CPU_INTERVAL_SECS";
|
||||
/// Duration of each CPU profiling session (for periodic mode)
|
||||
pub const ENV_CPU_DURATION_SECS: &str = "RUSTFS_PROF_CPU_DURATION_SECS";
|
||||
|
||||
/// Memory profiling (jemalloc)
|
||||
pub const ENV_MEM_PERIODIC: &str = "RUSTFS_PROF_MEM_PERIODIC";
|
||||
/// Interval between memory profiling snapshots (for periodic mode)
|
||||
pub const ENV_MEM_INTERVAL_SECS: &str = "RUSTFS_PROF_MEM_INTERVAL_SECS";
|
||||
|
||||
/// Output directory
|
||||
pub const ENV_OUTPUT_DIR: &str = "RUSTFS_PROF_OUTPUT_DIR";
|
||||
|
||||
/// Defaults for profiler settings
|
||||
pub const DEFAULT_ENABLE_PROFILING: bool = false;
|
||||
/// CPU profiling
|
||||
pub const DEFAULT_CPU_MODE: &str = "off";
|
||||
/// Frequency of CPU profiling samples
|
||||
pub const DEFAULT_CPU_FREQ: usize = 100;
|
||||
/// Interval between CPU profiling sessions (for periodic mode)
|
||||
pub const DEFAULT_CPU_INTERVAL_SECS: u64 = 300;
|
||||
/// Duration of each CPU profiling session (for periodic mode)
|
||||
pub const DEFAULT_CPU_DURATION_SECS: u64 = 60;
|
||||
/// Memory profiling (jemalloc)
|
||||
pub const DEFAULT_MEM_PERIODIC: bool = false;
|
||||
/// Interval between memory profiling snapshots (for periodic mode)
|
||||
pub const DEFAULT_MEM_INTERVAL_SECS: u64 = 300;
|
||||
/// Output directory
|
||||
pub const DEFAULT_OUTPUT_DIR: &str = ".";
|
||||
@@ -22,7 +22,10 @@ pub const ENV_THREAD_STACK_SIZE: &str = "RUSTFS_RUNTIME_THREAD_STACK_SIZE";
|
||||
pub const ENV_THREAD_KEEP_ALIVE: &str = "RUSTFS_RUNTIME_THREAD_KEEP_ALIVE";
|
||||
pub const ENV_GLOBAL_QUEUE_INTERVAL: &str = "RUSTFS_RUNTIME_GLOBAL_QUEUE_INTERVAL";
|
||||
pub const ENV_THREAD_NAME: &str = "RUSTFS_RUNTIME_THREAD_NAME";
|
||||
pub const ENV_MAX_IO_EVENTS_PER_TICK: &str = "RUSTFS_RUNTIME_MAX_IO_EVENTS_PER_TICK";
|
||||
pub const ENV_RNG_SEED: &str = "RUSTFS_RUNTIME_RNG_SEED";
|
||||
/// Event polling interval
|
||||
pub const ENV_EVENT_INTERVAL: &str = "RUSTFS_RUNTIME_EVENT_INTERVAL";
|
||||
|
||||
// Default values for Tokio runtime
|
||||
pub const DEFAULT_WORKER_THREADS: usize = 16;
|
||||
@@ -32,4 +35,7 @@ pub const DEFAULT_THREAD_STACK_SIZE: usize = MI_B; // 1 MiB
|
||||
pub const DEFAULT_THREAD_KEEP_ALIVE: u64 = 60; // seconds
|
||||
pub const DEFAULT_GLOBAL_QUEUE_INTERVAL: u32 = 31;
|
||||
pub const DEFAULT_THREAD_NAME: &str = "rustfs-worker";
|
||||
pub const DEFAULT_MAX_IO_EVENTS_PER_TICK: usize = 1024;
|
||||
/// Event polling default (Tokio default 61)
|
||||
pub const DEFAULT_EVENT_INTERVAL: u32 = 61;
|
||||
pub const DEFAULT_RNG_SEED: Option<u64> = None; // None means random
|
||||
|
||||
@@ -21,6 +21,12 @@ pub use constants::console::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::env::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::heal::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::object::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::profiler::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::runtime::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::targets::*;
|
||||
|
||||
19
crates/config/src/observability/metrics.rs
Normal file
19
crates/config/src/observability/metrics.rs
Normal file
@@ -0,0 +1,19 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/// Metrics collection interval in milliseconds for system metrics (CPU, memory, disk, network).
|
||||
pub const DEFAULT_METRICS_SYSTEM_INTERVAL_MS: u64 = 30000;
|
||||
|
||||
/// Environment variable for setting the metrics collection interval for system metrics.
|
||||
pub const ENV_OBS_METRICS_SYSTEM_INTERVAL_MS: &str = "RUSTFS_OBS_METRICS_SYSTEM_INTERVAL_MS";
|
||||
@@ -14,7 +14,13 @@
|
||||
|
||||
// Observability Keys
|
||||
|
||||
mod metrics;
|
||||
pub use metrics::*;
|
||||
|
||||
pub const ENV_OBS_ENDPOINT: &str = "RUSTFS_OBS_ENDPOINT";
|
||||
pub const ENV_OBS_TRACE_ENDPOINT: &str = "RUSTFS_OBS_TRACE_ENDPOINT";
|
||||
pub const ENV_OBS_METRIC_ENDPOINT: &str = "RUSTFS_OBS_METRIC_ENDPOINT";
|
||||
pub const ENV_OBS_LOG_ENDPOINT: &str = "RUSTFS_OBS_LOG_ENDPOINT";
|
||||
pub const ENV_OBS_USE_STDOUT: &str = "RUSTFS_OBS_USE_STDOUT";
|
||||
pub const ENV_OBS_SAMPLE_RATIO: &str = "RUSTFS_OBS_SAMPLE_RATIO";
|
||||
pub const ENV_OBS_METER_INTERVAL: &str = "RUSTFS_OBS_METER_INTERVAL";
|
||||
@@ -22,7 +28,7 @@ pub const ENV_OBS_SERVICE_NAME: &str = "RUSTFS_OBS_SERVICE_NAME";
|
||||
pub const ENV_OBS_SERVICE_VERSION: &str = "RUSTFS_OBS_SERVICE_VERSION";
|
||||
pub const ENV_OBS_ENVIRONMENT: &str = "RUSTFS_OBS_ENVIRONMENT";
|
||||
pub const ENV_OBS_LOGGER_LEVEL: &str = "RUSTFS_OBS_LOGGER_LEVEL";
|
||||
pub const ENV_OBS_LOCAL_LOGGING_ENABLED: &str = "RUSTFS_OBS_LOCAL_LOGGING_ENABLED";
|
||||
pub const ENV_OBS_LOG_STDOUT_ENABLED: &str = "RUSTFS_OBS_LOG_STDOUT_ENABLED";
|
||||
pub const ENV_OBS_LOG_DIRECTORY: &str = "RUSTFS_OBS_LOG_DIRECTORY";
|
||||
pub const ENV_OBS_LOG_FILENAME: &str = "RUSTFS_OBS_LOG_FILENAME";
|
||||
pub const ENV_OBS_LOG_ROTATION_SIZE_MB: &str = "RUSTFS_OBS_LOG_ROTATION_SIZE_MB";
|
||||
@@ -47,12 +53,6 @@ pub const DEFAULT_OBS_LOG_MESSAGE_CAPA: usize = 32768;
|
||||
/// Default values for flush interval in milliseconds
|
||||
pub const DEFAULT_OBS_LOG_FLUSH_MS: u64 = 200;
|
||||
|
||||
/// Audit logger queue capacity environment variable key
|
||||
pub const ENV_AUDIT_LOGGER_QUEUE_CAPACITY: &str = "RUSTFS_AUDIT_LOGGER_QUEUE_CAPACITY";
|
||||
|
||||
/// Default values for observability configuration
|
||||
pub const DEFAULT_AUDIT_LOGGER_QUEUE_CAPACITY: usize = 10000;
|
||||
|
||||
/// Default values for observability configuration
|
||||
// ### Supported Environment Values
|
||||
// - `production` - Secure file-only logging
|
||||
@@ -71,6 +71,9 @@ mod tests {
|
||||
#[test]
|
||||
fn test_env_keys() {
|
||||
assert_eq!(ENV_OBS_ENDPOINT, "RUSTFS_OBS_ENDPOINT");
|
||||
assert_eq!(ENV_OBS_TRACE_ENDPOINT, "RUSTFS_OBS_TRACE_ENDPOINT");
|
||||
assert_eq!(ENV_OBS_METRIC_ENDPOINT, "RUSTFS_OBS_METRIC_ENDPOINT");
|
||||
assert_eq!(ENV_OBS_LOG_ENDPOINT, "RUSTFS_OBS_LOG_ENDPOINT");
|
||||
assert_eq!(ENV_OBS_USE_STDOUT, "RUSTFS_OBS_USE_STDOUT");
|
||||
assert_eq!(ENV_OBS_SAMPLE_RATIO, "RUSTFS_OBS_SAMPLE_RATIO");
|
||||
assert_eq!(ENV_OBS_METER_INTERVAL, "RUSTFS_OBS_METER_INTERVAL");
|
||||
@@ -78,18 +81,16 @@ mod tests {
|
||||
assert_eq!(ENV_OBS_SERVICE_VERSION, "RUSTFS_OBS_SERVICE_VERSION");
|
||||
assert_eq!(ENV_OBS_ENVIRONMENT, "RUSTFS_OBS_ENVIRONMENT");
|
||||
assert_eq!(ENV_OBS_LOGGER_LEVEL, "RUSTFS_OBS_LOGGER_LEVEL");
|
||||
assert_eq!(ENV_OBS_LOCAL_LOGGING_ENABLED, "RUSTFS_OBS_LOCAL_LOGGING_ENABLED");
|
||||
assert_eq!(ENV_OBS_LOG_STDOUT_ENABLED, "RUSTFS_OBS_LOG_STDOUT_ENABLED");
|
||||
assert_eq!(ENV_OBS_LOG_DIRECTORY, "RUSTFS_OBS_LOG_DIRECTORY");
|
||||
assert_eq!(ENV_OBS_LOG_FILENAME, "RUSTFS_OBS_LOG_FILENAME");
|
||||
assert_eq!(ENV_OBS_LOG_ROTATION_SIZE_MB, "RUSTFS_OBS_LOG_ROTATION_SIZE_MB");
|
||||
assert_eq!(ENV_OBS_LOG_ROTATION_TIME, "RUSTFS_OBS_LOG_ROTATION_TIME");
|
||||
assert_eq!(ENV_OBS_LOG_KEEP_FILES, "RUSTFS_OBS_LOG_KEEP_FILES");
|
||||
assert_eq!(ENV_AUDIT_LOGGER_QUEUE_CAPACITY, "RUSTFS_AUDIT_LOGGER_QUEUE_CAPACITY");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_values() {
|
||||
assert_eq!(DEFAULT_AUDIT_LOGGER_QUEUE_CAPACITY, 10000);
|
||||
assert_eq!(DEFAULT_OBS_ENVIRONMENT_PRODUCTION, "production");
|
||||
assert_eq!(DEFAULT_OBS_ENVIRONMENT_DEVELOPMENT, "development");
|
||||
assert_eq!(DEFAULT_OBS_ENVIRONMENT_TEST, "test");
|
||||
|
||||
@@ -29,7 +29,7 @@ documentation = "https://docs.rs/rustfs-crypto/latest/rustfs_crypto/"
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
aes-gcm = { workspace = true, features = ["std"], optional = true }
|
||||
aes-gcm = { workspace = true, optional = true }
|
||||
argon2 = { workspace = true, features = ["std"], optional = true }
|
||||
cfg-if = { workspace = true }
|
||||
chacha20poly1305 = { workspace = true, optional = true }
|
||||
|
||||
@@ -8,7 +8,7 @@
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/rustfs/rustfs/actions/workflows/ci.yml"><img alt="CI" src="https://github.com/rustfs/rustfs/actions/workflows/ci.yml/badge.svg" /></a>
|
||||
<a href="https://docs.rustfs.com/en/">📖 Documentation</a>
|
||||
<a href="https://docs.rustfs.com/">📖 Documentation</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/issues">🐛 Bug Reports</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/discussions">💬 Discussions</a>
|
||||
</p>
|
||||
|
||||
@@ -19,127 +19,37 @@ pub fn decrypt_data(password: &[u8], data: &[u8]) -> Result<Vec<u8>, crate::Erro
|
||||
use aes_gcm::{Aes256Gcm, KeyInit as _};
|
||||
use chacha20poly1305::ChaCha20Poly1305;
|
||||
|
||||
// 32: salt
|
||||
// 1: id
|
||||
// 12: nonce
|
||||
const HEADER_LENGTH: usize = 45;
|
||||
if data.len() < HEADER_LENGTH {
|
||||
return Err(Error::ErrUnexpectedHeader);
|
||||
}
|
||||
|
||||
let (salt, id, nonce) = (&data[..32], ID::try_from(data[32])?, &data[33..45]);
|
||||
let data = &data[HEADER_LENGTH..];
|
||||
let (salt, id, nonce_slice) = (&data[..32], ID::try_from(data[32])?, &data[33..45]);
|
||||
let body = &data[HEADER_LENGTH..];
|
||||
|
||||
match id {
|
||||
ID::Argon2idChaCHa20Poly1305 => {
|
||||
let key = id.get_key(password, salt)?;
|
||||
decrypt(ChaCha20Poly1305::new_from_slice(&key)?, nonce, data)
|
||||
decrypt(ChaCha20Poly1305::new_from_slice(&key)?, nonce_slice, body)
|
||||
}
|
||||
_ => {
|
||||
let key = id.get_key(password, salt)?;
|
||||
decrypt(Aes256Gcm::new_from_slice(&key)?, nonce, data)
|
||||
decrypt(Aes256Gcm::new_from_slice(&key)?, nonce_slice, body)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// use argon2::{Argon2, PasswordHasher};
|
||||
// use argon2::password_hash::{SaltString};
|
||||
// use aes_gcm::{Aes256Gcm, Key, Nonce}; // For AES-GCM
|
||||
// use chacha20poly1305::{ChaCha20Poly1305, Key as ChaChaKey, Nonce as ChaChaNonce}; // For ChaCha20
|
||||
// use pbkdf2::pbkdf2;
|
||||
// use sha2::Sha256;
|
||||
// use std::io::{self, Read};
|
||||
// use thiserror::Error;
|
||||
|
||||
// #[derive(Debug, Error)]
|
||||
// pub enum DecryptError {
|
||||
// #[error("unexpected header")]
|
||||
// UnexpectedHeader,
|
||||
// #[error("invalid encryption algorithm ID")]
|
||||
// InvalidAlgorithmId,
|
||||
// #[error("IO error")]
|
||||
// Io(#[from] io::Error),
|
||||
// #[error("decryption error")]
|
||||
// DecryptionError,
|
||||
// }
|
||||
|
||||
// pub fn decrypt_data2<R: Read>(password: &str, mut data: R) -> Result<Vec<u8>, DecryptError> {
|
||||
// // Parse the stream header
|
||||
// let mut hdr = [0u8; 32 + 1 + 8];
|
||||
// if data.read_exact(&mut hdr).is_err() {
|
||||
// return Err(DecryptError::UnexpectedHeader);
|
||||
// }
|
||||
|
||||
// let salt = &hdr[0..32];
|
||||
// let id = hdr[32];
|
||||
// let nonce = &hdr[33..41];
|
||||
|
||||
// let key = match id {
|
||||
// // Argon2id + AES-GCM
|
||||
// 0x01 => {
|
||||
// let salt = SaltString::encode_b64(salt).map_err(|_| DecryptError::DecryptionError)?;
|
||||
// let argon2 = Argon2::default();
|
||||
// let hashed_key = argon2.hash_password(password.as_bytes(), &salt)
|
||||
// .map_err(|_| DecryptError::DecryptionError)?;
|
||||
// hashed_key.hash.unwrap().as_bytes().to_vec()
|
||||
// }
|
||||
// // Argon2id + ChaCha20Poly1305
|
||||
// 0x02 => {
|
||||
// let salt = SaltString::encode_b64(salt).map_err(|_| DecryptError::DecryptionError)?;
|
||||
// let argon2 = Argon2::default();
|
||||
// let hashed_key = argon2.hash_password(password.as_bytes(), &salt)
|
||||
// .map_err(|_| DecryptError::DecryptionError)?;
|
||||
// hashed_key.hash.unwrap().as_bytes().to_vec()
|
||||
// }
|
||||
// // PBKDF2 + AES-GCM
|
||||
// // 0x03 => {
|
||||
// // let mut key = [0u8; 32];
|
||||
// // pbkdf2::<Sha256>(password.as_bytes(), salt, 10000, &mut key);
|
||||
// // key.to_vec()
|
||||
// // }
|
||||
// _ => return Err(DecryptError::InvalidAlgorithmId),
|
||||
// };
|
||||
|
||||
// // Decrypt data using the corresponding cipher
|
||||
// let mut encrypted_data = Vec::new();
|
||||
// data.read_to_end(&mut encrypted_data)?;
|
||||
|
||||
// let plaintext = match id {
|
||||
// 0x01 => {
|
||||
// let cipher = Aes256Gcm::new(Key::from_slice(&key));
|
||||
// let nonce = Nonce::from_slice(nonce);
|
||||
// cipher
|
||||
// .decrypt(nonce, encrypted_data.as_ref())
|
||||
// .map_err(|_| DecryptError::DecryptionError)?
|
||||
// }
|
||||
// 0x02 => {
|
||||
// let cipher = ChaCha20Poly1305::new(ChaChaKey::from_slice(&key));
|
||||
// let nonce = ChaChaNonce::from_slice(nonce);
|
||||
// cipher
|
||||
// .decrypt(nonce, encrypted_data.as_ref())
|
||||
// .map_err(|_| DecryptError::DecryptionError)?
|
||||
// }
|
||||
// 0x03 => {
|
||||
|
||||
// let cipher = Aes256Gcm::new(Key::from_slice(&key));
|
||||
// let nonce = Nonce::from_slice(nonce);
|
||||
// cipher
|
||||
// .decrypt(nonce, encrypted_data.as_ref())
|
||||
// .map_err(|_| DecryptError::DecryptionError)?
|
||||
// }
|
||||
// _ => return Err(DecryptError::InvalidAlgorithmId),
|
||||
// };
|
||||
|
||||
// Ok(plaintext)
|
||||
// }
|
||||
|
||||
#[cfg(any(test, feature = "crypto"))]
|
||||
#[inline]
|
||||
fn decrypt<T: aes_gcm::aead::Aead>(stream: T, nonce: &[u8], data: &[u8]) -> Result<Vec<u8>, crate::Error> {
|
||||
use crate::error::Error;
|
||||
stream
|
||||
.decrypt(aes_gcm::Nonce::from_slice(nonce), data)
|
||||
.map_err(Error::ErrDecryptFailed)
|
||||
use aes_gcm::AeadCore;
|
||||
use aes_gcm::aead::array::Array;
|
||||
use core::convert::TryFrom;
|
||||
|
||||
let nonce_arr: Array<u8, <T as AeadCore>::NonceSize> =
|
||||
Array::try_from(nonce).map_err(|_| Error::ErrDecryptFailed(aes_gcm::aead::Error))?;
|
||||
stream.decrypt(&nonce_arr, data).map_err(Error::ErrDecryptFailed)
|
||||
}
|
||||
|
||||
#[cfg(not(any(test, feature = "crypto")))]
|
||||
|
||||
@@ -43,7 +43,7 @@ pub fn encrypt_data(password: &[u8], data: &[u8]) -> Result<Vec<u8>, crate::Erro
|
||||
if native_aes() {
|
||||
encrypt(Aes256Gcm::new_from_slice(&key)?, &salt, id, data)
|
||||
} else {
|
||||
encrypt(ChaCha20Poly1305::new_from_slice(&key)?, &salt, id, data)
|
||||
encrypt(chacha20poly1305::ChaCha20Poly1305::new_from_slice(&key)?, &salt, id, data)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -56,16 +56,19 @@ fn encrypt<T: aes_gcm::aead::Aead>(
|
||||
data: &[u8],
|
||||
) -> Result<Vec<u8>, crate::Error> {
|
||||
use crate::error::Error;
|
||||
use aes_gcm::aead::rand_core::OsRng;
|
||||
use aes_gcm::AeadCore;
|
||||
use aes_gcm::aead::array::Array;
|
||||
use rand::RngCore;
|
||||
|
||||
let nonce = T::generate_nonce(&mut OsRng);
|
||||
let mut nonce: Array<u8, <T as AeadCore>::NonceSize> = Array::default();
|
||||
rand::rng().fill_bytes(&mut nonce);
|
||||
|
||||
let encryptor = stream.encrypt(&nonce, data).map_err(Error::ErrEncryptFailed)?;
|
||||
|
||||
let mut ciphertext = Vec::with_capacity(salt.len() + 1 + nonce.len() + encryptor.len());
|
||||
ciphertext.extend_from_slice(salt);
|
||||
ciphertext.push(id as u8);
|
||||
ciphertext.extend_from_slice(nonce.as_slice());
|
||||
ciphertext.extend_from_slice(&nonce);
|
||||
ciphertext.extend_from_slice(&encryptor);
|
||||
|
||||
Ok(ciphertext)
|
||||
|
||||
@@ -226,7 +226,7 @@ fn test_password_variations() -> Result<(), crate::Error> {
|
||||
b"12345".as_slice(), // Numeric
|
||||
b"!@#$%^&*()".as_slice(), // Special characters
|
||||
b"\x00\x01\x02\x03".as_slice(), // Binary password
|
||||
"密码测试".as_bytes(), // Unicode password
|
||||
"пароль тест".as_bytes(), // Unicode password
|
||||
&[0xFF; 64], // Long binary password
|
||||
];
|
||||
|
||||
|
||||
@@ -1,267 +1,253 @@
|
||||
# KMS End-to-End Tests
|
||||
|
||||
本目录包含 RustFS KMS (Key Management Service) 的端到端集成测试,用于验证完整的 KMS 功能流程。
|
||||
This directory contains the integration suites used to validate the full RustFS KMS (Key Management Service) workflow.
|
||||
|
||||
## 📁 测试文件说明
|
||||
## 📁 Test Overview
|
||||
|
||||
### `kms_local_test.rs`
|
||||
本地KMS后端的端到端测试,包含:
|
||||
- 自动启动和配置本地KMS后端
|
||||
- 通过动态配置API配置KMS服务
|
||||
- 测试SSE-C(客户端提供密钥)加密流程
|
||||
- 验证S3兼容的对象加密/解密操作
|
||||
- 密钥生命周期管理测试
|
||||
End-to-end coverage for the local KMS backend:
|
||||
- Auto-start and configure the local backend
|
||||
- Configure KMS through the dynamic configuration API
|
||||
- Verify SSE-C (client-provided keys)
|
||||
- Exercise S3-compatible encryption/decryption
|
||||
- Validate key lifecycle management
|
||||
|
||||
### `kms_vault_test.rs`
|
||||
Vault KMS后端的端到端测试,包含:
|
||||
- 自动启动Vault开发服务器
|
||||
- 配置Vault transit engine和密钥
|
||||
- 通过动态配置API配置KMS服务
|
||||
- 测试完整的Vault KMS集成
|
||||
- 验证Token认证和加密操作
|
||||
End-to-end coverage for the Vault backend:
|
||||
- Launch a Vault dev server automatically
|
||||
- Configure the transit engine and encryption keys
|
||||
- Configure KMS via the dynamic configuration API
|
||||
- Run the full Vault integration flow
|
||||
- Validate token authentication and encryption operations
|
||||
|
||||
### `kms_comprehensive_test.rs`
|
||||
**完整的KMS功能测试套件**(当前因AWS SDK API兼容性问题暂时禁用),包含:
|
||||
- **Bucket加密配置**: SSE-S3和SSE-KMS默认加密设置
|
||||
- **完整的SSE加密模式测试**:
|
||||
- SSE-S3: S3管理的服务端加密
|
||||
- SSE-KMS: KMS管理的服务端加密
|
||||
- SSE-C: 客户端提供密钥的服务端加密
|
||||
- **对象操作测试**: 上传、下载、验证三种SSE模式
|
||||
- **分片上传测试**: 多部分上传支持所有SSE模式
|
||||
- **对象复制测试**: 不同SSE模式间的复制操作
|
||||
- **完整KMS API管理**:
|
||||
- 密钥生命周期管理(创建、列表、描述、删除、取消删除)
|
||||
- 直接加密/解密操作
|
||||
- 数据密钥生成和操作
|
||||
- KMS服务管理(启动、停止、状态查询)
|
||||
**Full KMS capability suite** (currently disabled because of AWS SDK compatibility issues):
|
||||
- **Bucket encryption configuration**: SSE-S3 and SSE-KMS defaults
|
||||
- **All SSE encryption modes**:
|
||||
- SSE-S3 (S3-managed server-side encryption)
|
||||
- SSE-KMS (KMS-managed server-side encryption)
|
||||
- SSE-C (client-provided keys)
|
||||
- **Object operations**: upload, download, and validation for every SSE mode
|
||||
- **Multipart uploads**: cover each SSE mode
|
||||
- **Object replication**: cross-mode replication scenarios
|
||||
- **Complete KMS API management**:
|
||||
- Key lifecycle (create, list, describe, delete, cancel delete)
|
||||
- Direct encrypt/decrypt operations
|
||||
- Data key generation and handling
|
||||
- KMS service lifecycle (start, stop, status)
|
||||
|
||||
### `kms_integration_test.rs`
|
||||
综合性KMS集成测试,包含:
|
||||
- 多后端兼容性测试
|
||||
- KMS服务生命周期测试
|
||||
- 错误处理和恢复测试
|
||||
- **注意**: 当前因AWS SDK API兼容性问题暂时禁用
|
||||
Broad integration tests that exercise:
|
||||
- Multiple backends
|
||||
- KMS lifecycle management
|
||||
- Error handling and recovery
|
||||
- **Note**: currently disabled because of AWS SDK compatibility gaps
|
||||
|
||||
## 🚀 如何运行测试
|
||||
## 🚀 Running Tests
|
||||
|
||||
### 前提条件
|
||||
### Prerequisites
|
||||
|
||||
1. **系统依赖**:
|
||||
1. **System dependencies**
|
||||
```bash
|
||||
# macOS
|
||||
brew install vault awscurl
|
||||
|
||||
|
||||
# Ubuntu/Debian
|
||||
apt-get install vault
|
||||
pip install awscurl
|
||||
```
|
||||
|
||||
2. **构建RustFS**:
|
||||
2. **Build RustFS**
|
||||
```bash
|
||||
# 在项目根目录
|
||||
cargo build
|
||||
```
|
||||
|
||||
### 运行单个测试
|
||||
### Run individual suites
|
||||
|
||||
#### 本地KMS测试
|
||||
#### Local backend
|
||||
```bash
|
||||
cd crates/e2e_test
|
||||
cargo test test_local_kms_end_to_end -- --nocapture
|
||||
```
|
||||
|
||||
#### Vault KMS测试
|
||||
#### Vault backend
|
||||
```bash
|
||||
cd crates/e2e_test
|
||||
cargo test test_vault_kms_end_to_end -- --nocapture
|
||||
```
|
||||
|
||||
#### 高可用性测试
|
||||
#### High availability
|
||||
```bash
|
||||
cd crates/e2e_test
|
||||
cargo test test_vault_kms_high_availability -- --nocapture
|
||||
```
|
||||
|
||||
#### 完整功能测试(开发中)
|
||||
#### Comprehensive features (disabled)
|
||||
```bash
|
||||
cd crates/e2e_test
|
||||
# 注意:以下测试因AWS SDK API兼容性问题暂时禁用
|
||||
# Disabled due to AWS SDK compatibility gaps
|
||||
# cargo test test_comprehensive_kms_functionality -- --nocapture
|
||||
# cargo test test_sse_modes_compatibility -- --nocapture
|
||||
# cargo test test_sse_modes_compatibility -- --nocapture
|
||||
# cargo test test_kms_api_comprehensive -- --nocapture
|
||||
```
|
||||
|
||||
### 运行所有KMS测试
|
||||
### Run all KMS suites
|
||||
```bash
|
||||
cd crates/e2e_test
|
||||
cargo test kms -- --nocapture
|
||||
```
|
||||
|
||||
### 串行运行(避免端口冲突)
|
||||
### Run serially (avoid port conflicts)
|
||||
```bash
|
||||
cd crates/e2e_test
|
||||
cargo test kms -- --nocapture --test-threads=1
|
||||
```
|
||||
|
||||
## 🔧 测试配置
|
||||
## 🔧 Configuration
|
||||
|
||||
### 环境变量
|
||||
### Environment variables
|
||||
```bash
|
||||
# 可选:自定义端口(默认使用9050)
|
||||
# Optional: custom RustFS port (default 9050)
|
||||
export RUSTFS_TEST_PORT=9050
|
||||
|
||||
# 可选:自定义Vault端口(默认使用8200)
|
||||
# Optional: custom Vault port (default 8200)
|
||||
export VAULT_TEST_PORT=8200
|
||||
|
||||
# 可选:启用详细日志
|
||||
# Optional: enable verbose logging
|
||||
export RUST_LOG=debug
|
||||
```
|
||||
|
||||
### 依赖的二进制文件路径
|
||||
### Required binaries
|
||||
|
||||
测试会自动查找以下二进制文件:
|
||||
- `../../target/debug/rustfs` - RustFS服务器
|
||||
- `vault` - Vault (需要在PATH中)
|
||||
- `/Users/dandan/Library/Python/3.9/bin/awscurl` - AWS签名工具
|
||||
Tests look for:
|
||||
- `../../target/debug/rustfs` – RustFS server
|
||||
- `vault` – Vault CLI (must be on PATH)
|
||||
- `/Users/dandan/Library/Python/3.9/bin/awscurl` – AWS SigV4 helper
|
||||
|
||||
## 📋 测试流程说明
|
||||
## 📋 Test Flow
|
||||
|
||||
### Local KMS测试流程
|
||||
1. **环境准备**:创建临时目录,设置KMS密钥存储路径
|
||||
2. **启动服务**:启动RustFS服务器,启用KMS功能
|
||||
3. **等待就绪**:检查端口监听和S3 API响应
|
||||
4. **配置KMS**:通过awscurl发送配置请求到admin API
|
||||
5. **启动KMS**:激活KMS服务
|
||||
6. **功能测试**:
|
||||
- 创建测试存储桶
|
||||
- 测试SSE-C加密(客户端提供密钥)
|
||||
- 验证对象加密/解密
|
||||
7. **清理**:终止进程,清理临时文件
|
||||
### Local backend
|
||||
1. **Prepare environment** – create temporary directories and key storage paths
|
||||
2. **Start RustFS** – launch the server with KMS enabled
|
||||
3. **Wait for readiness** – confirm the port listener and S3 API
|
||||
4. **Configure KMS** – send configuration via awscurl to the admin API
|
||||
5. **Start KMS** – activate the KMS service
|
||||
6. **Exercise functionality**
|
||||
- Create a test bucket
|
||||
- Run SSE-C encryption with client-provided keys
|
||||
- Validate encryption/decryption behavior
|
||||
7. **Cleanup** – stop processes and remove temporary files
|
||||
|
||||
### Vault KMS测试流程
|
||||
1. **启动Vault**:使用开发模式启动Vault服务器
|
||||
2. **配置Vault**:
|
||||
- 启用transit secrets engine
|
||||
- 创建加密密钥(rustfs-master-key)
|
||||
3. **启动RustFS**:启用KMS功能的RustFS服务器
|
||||
4. **配置KMS**:通过API配置Vault后端,包含:
|
||||
- Vault地址和Token认证
|
||||
- Transit engine配置
|
||||
- 密钥路径设置
|
||||
5. **功能测试**:完整的加密/解密流程测试
|
||||
6. **清理**:终止所有进程
|
||||
### Vault backend
|
||||
1. **Launch Vault** – start the dev-mode server
|
||||
2. **Configure Vault**
|
||||
- Enable the transit secrets engine
|
||||
- Create the `rustfs-master-key`
|
||||
3. **Start RustFS** – run the server with KMS enabled
|
||||
4. **Configure KMS** – point RustFS at Vault (address, token, transit config, key path)
|
||||
5. **Exercise functionality** – complete the encryption/decryption workflow
|
||||
6. **Cleanup** – stop all services
|
||||
|
||||
## 🛠️ 故障排除
|
||||
## 🛠️ Troubleshooting
|
||||
|
||||
### 常见问题
|
||||
### Common issues
|
||||
|
||||
**Q: 测试失败 "RustFS server failed to become ready"**
|
||||
```
|
||||
A: 检查端口是否被占用:
|
||||
**Q: `RustFS server failed to become ready`**
|
||||
```bash
|
||||
lsof -i :9050
|
||||
kill -9 <PID> # 如果有进程占用端口
|
||||
kill -9 <PID> # Free the port if necessary
|
||||
```
|
||||
|
||||
**Q: Vault服务启动失败**
|
||||
```
|
||||
A: 确保Vault已安装且在PATH中:
|
||||
**Q: Vault fails to start**
|
||||
```bash
|
||||
which vault
|
||||
vault version
|
||||
```
|
||||
|
||||
**Q: awscurl认证失败**
|
||||
```
|
||||
A: 检查awscurl路径是否正确:
|
||||
**Q: awscurl authentication fails**
|
||||
```bash
|
||||
ls /Users/dandan/Library/Python/3.9/bin/awscurl
|
||||
# 或安装到不同路径:
|
||||
# Or install elsewhere
|
||||
pip install awscurl
|
||||
which awscurl # 然后更新测试中的路径
|
||||
which awscurl # Update the path in tests accordingly
|
||||
```
|
||||
|
||||
**Q: 测试超时**
|
||||
```
|
||||
A: 增加等待时间或检查日志:
|
||||
**Q: Tests time out**
|
||||
```bash
|
||||
RUST_LOG=debug cargo test test_local_kms_end_to_end -- --nocapture
|
||||
```
|
||||
|
||||
### 调试技巧
|
||||
### Debug tips
|
||||
|
||||
1. **查看详细日志**:
|
||||
1. **Enable verbose logs**
|
||||
```bash
|
||||
RUST_LOG=rustfs_kms=debug,rustfs=info cargo test -- --nocapture
|
||||
```
|
||||
|
||||
2. **保留临时文件**:
|
||||
修改测试代码,注释掉清理部分,检查生成的配置文件
|
||||
2. **Keep temporary files** – comment out cleanup logic to inspect generated configs
|
||||
|
||||
3. **单步调试**:
|
||||
在测试中添加 `std::thread::sleep` 来暂停执行,手动检查服务状态
|
||||
3. **Pause execution** – add `std::thread::sleep` for manual inspection during tests
|
||||
|
||||
4. **端口检查**:
|
||||
4. **Monitor ports**
|
||||
```bash
|
||||
# 测试运行时检查端口状态
|
||||
netstat -an | grep 9050
|
||||
curl http://127.0.0.1:9050/minio/health/ready
|
||||
```
|
||||
|
||||
## 📊 测试覆盖范围
|
||||
## 📊 Coverage
|
||||
|
||||
### 功能覆盖
|
||||
- ✅ KMS服务动态配置
|
||||
- ✅ 本地和Vault后端支持
|
||||
- ✅ AWS S3兼容加密接口
|
||||
- ✅ 密钥管理和生命周期
|
||||
- ✅ 错误处理和恢复
|
||||
- ✅ 高可用性场景
|
||||
### Functional
|
||||
- ✅ Dynamic KMS configuration
|
||||
- ✅ Local and Vault backends
|
||||
- ✅ AWS S3-compatible encryption APIs
|
||||
- ✅ Key lifecycle management
|
||||
- ✅ Error handling and recovery paths
|
||||
- ✅ High-availability behavior
|
||||
|
||||
### 加密模式覆盖
|
||||
- ✅ SSE-C (Server-Side Encryption with Customer-Provided Keys)
|
||||
- ✅ SSE-S3 (Server-Side Encryption with S3-Managed Keys)
|
||||
- ✅ SSE-KMS (Server-Side Encryption with KMS-Managed Keys)
|
||||
### Encryption modes
|
||||
- ✅ SSE-C (customer-provided)
|
||||
- ✅ SSE-S3 (S3-managed)
|
||||
- ✅ SSE-KMS (KMS-managed)
|
||||
|
||||
### S3操作覆盖
|
||||
- ✅ 对象上传/下载 (SSE-C模式)
|
||||
- 🚧 分片上传 (需要AWS SDK兼容性修复)
|
||||
- 🚧 对象复制 (需要AWS SDK兼容性修复)
|
||||
- 🚧 Bucket加密配置 (需要AWS SDK兼容性修复)
|
||||
### S3 operations
|
||||
- ✅ Object upload/download (SSE-C)
|
||||
- 🚧 Multipart uploads (pending AWS SDK fixes)
|
||||
- 🚧 Object replication (pending AWS SDK fixes)
|
||||
- 🚧 Bucket encryption defaults (pending AWS SDK fixes)
|
||||
|
||||
### KMS API覆盖
|
||||
- ✅ 基础密钥管理 (创建、列表)
|
||||
- 🚧 完整密钥生命周期 (需要AWS SDK兼容性修复)
|
||||
- 🚧 直接加密/解密操作 (需要AWS SDK兼容性修复)
|
||||
- 🚧 数据密钥生成和解密 (需要AWS SDK兼容性修复)
|
||||
- ✅ KMS服务管理 (配置、启动、停止、状态)
|
||||
### KMS API
|
||||
- ✅ Basic key management (create/list)
|
||||
- 🚧 Full key lifecycle (pending AWS SDK fixes)
|
||||
- 🚧 Direct encrypt/decrypt (pending AWS SDK fixes)
|
||||
- 🚧 Data key operations (pending AWS SDK fixes)
|
||||
- ✅ Service lifecycle (configure/start/stop/status)
|
||||
|
||||
### 认证方式覆盖
|
||||
- ✅ Vault Token认证
|
||||
- 🚧 Vault AppRole认证
|
||||
### Authentication
|
||||
- ✅ Vault token auth
|
||||
- 🚧 Vault AppRole auth
|
||||
|
||||
## 🔄 持续集成
|
||||
## 🔄 CI Integration
|
||||
|
||||
这些测试设计为可在CI/CD环境中运行:
|
||||
Designed to run inside CI/CD pipelines:
|
||||
|
||||
```yaml
|
||||
# GitHub Actions 示例
|
||||
- name: Run KMS E2E Tests
|
||||
run: |
|
||||
# 安装依赖
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y vault
|
||||
pip install awscurl
|
||||
|
||||
# 构建并测试
|
||||
|
||||
cargo build
|
||||
cd crates/e2e_test
|
||||
cargo test kms -- --nocapture --test-threads=1
|
||||
```
|
||||
|
||||
## 📚 相关文档
|
||||
## 📚 References
|
||||
|
||||
- [KMS 配置文档](../../../../docs/kms/README.md) - KMS功能完整文档
|
||||
- [动态配置API](../../../../docs/kms/http-api.md) - REST API接口说明
|
||||
- [故障排除指南](../../../../docs/kms/troubleshooting.md) - 常见问题解决
|
||||
- [KMS configuration guide](../../../../docs/kms/README.md)
|
||||
- [Dynamic configuration API](../../../../docs/kms/http-api.md)
|
||||
- [Troubleshooting](../../../../docs/kms/troubleshooting.md)
|
||||
|
||||
---
|
||||
|
||||
*这些测试确保KMS功能的稳定性和可靠性,为生产环境部署提供信心。*
|
||||
*These suites ensure KMS stability and reliability, building confidence for production deployments.*
|
||||
|
||||
@@ -547,9 +547,9 @@ pub async fn test_multipart_upload_with_config(
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
let total_size = config.total_size();
|
||||
|
||||
info!("🧪 开始分片上传测试 - {:?}", config.encryption_type);
|
||||
info!("🧪 Starting multipart upload test - {:?}", config.encryption_type);
|
||||
info!(
|
||||
" 对象: {}, 分片: {}个, 每片: {}MB, 总计: {}MB",
|
||||
" Object: {}, parts: {}, part size: {} MB, total: {} MB",
|
||||
config.object_key,
|
||||
config.total_parts,
|
||||
config.part_size / (1024 * 1024),
|
||||
@@ -589,7 +589,7 @@ pub async fn test_multipart_upload_with_config(
|
||||
|
||||
let create_multipart_output = create_request.send().await?;
|
||||
let upload_id = create_multipart_output.upload_id().unwrap();
|
||||
info!("📋 创建分片上传,ID: {}", upload_id);
|
||||
info!("📋 Created multipart upload, ID: {}", upload_id);
|
||||
|
||||
// Step 2: Upload parts
|
||||
let mut completed_parts = Vec::new();
|
||||
@@ -598,7 +598,7 @@ pub async fn test_multipart_upload_with_config(
|
||||
let end = std::cmp::min(start + config.part_size, total_size);
|
||||
let part_data = &test_data[start..end];
|
||||
|
||||
info!("📤 上传分片 {} ({:.2}MB)", part_number, part_data.len() as f64 / (1024.0 * 1024.0));
|
||||
info!("📤 Uploading part {} ({:.2} MB)", part_number, part_data.len() as f64 / (1024.0 * 1024.0));
|
||||
|
||||
let mut upload_request = s3_client
|
||||
.upload_part()
|
||||
@@ -625,7 +625,7 @@ pub async fn test_multipart_upload_with_config(
|
||||
.build(),
|
||||
);
|
||||
|
||||
debug!("分片 {} 上传完成,ETag: {}", part_number, etag);
|
||||
debug!("Part {} uploaded with ETag {}", part_number, etag);
|
||||
}
|
||||
|
||||
// Step 3: Complete multipart upload
|
||||
@@ -633,7 +633,7 @@ pub async fn test_multipart_upload_with_config(
|
||||
.set_parts(Some(completed_parts))
|
||||
.build();
|
||||
|
||||
info!("🔗 完成分片上传");
|
||||
info!("🔗 Completing multipart upload");
|
||||
let complete_output = s3_client
|
||||
.complete_multipart_upload()
|
||||
.bucket(bucket)
|
||||
@@ -643,10 +643,10 @@ pub async fn test_multipart_upload_with_config(
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
debug!("完成分片上传,ETag: {:?}", complete_output.e_tag());
|
||||
debug!("Multipart upload finalized with ETag {:?}", complete_output.e_tag());
|
||||
|
||||
// Step 4: Download and verify
|
||||
info!("📥 下载文件并验证");
|
||||
info!("📥 Downloading object for verification");
|
||||
let mut get_request = s3_client.get_object().bucket(bucket).key(&config.object_key);
|
||||
|
||||
// Add encryption headers for SSE-C GET
|
||||
@@ -680,7 +680,7 @@ pub async fn test_multipart_upload_with_config(
|
||||
assert_eq!(downloaded_data.len(), total_size);
|
||||
assert_eq!(&downloaded_data[..], &test_data[..]);
|
||||
|
||||
info!("✅ 分片上传测试通过 - {:?}", config.encryption_type);
|
||||
info!("✅ Multipart upload test passed - {:?}", config.encryption_type);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -700,7 +700,7 @@ pub async fn test_all_multipart_encryption_types(
|
||||
bucket: &str,
|
||||
base_object_key: &str,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
info!("🧪 测试所有加密类型的分片上传");
|
||||
info!("🧪 Testing multipart uploads for every encryption type");
|
||||
|
||||
let part_size = 5 * 1024 * 1024; // 5MB per part
|
||||
let total_parts = 2;
|
||||
@@ -718,7 +718,7 @@ pub async fn test_all_multipart_encryption_types(
|
||||
test_multipart_upload_with_config(s3_client, bucket, &config).await?;
|
||||
}
|
||||
|
||||
info!("✅ 所有加密类型的分片上传测试通过");
|
||||
info!("✅ Multipart uploads succeeded for every encryption type");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
|
||||
@@ -33,7 +33,7 @@ use tracing::info;
|
||||
#[serial]
|
||||
async fn test_comprehensive_kms_full_workflow() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
info!("🏁 开始KMS全功能综合测试");
|
||||
info!("🏁 Start the KMS full-featured synthesis test");
|
||||
|
||||
let mut kms_env = LocalKMSTestEnvironment::new().await?;
|
||||
let _default_key_id = kms_env.start_rustfs_for_local_kms().await?;
|
||||
@@ -43,25 +43,25 @@ async fn test_comprehensive_kms_full_workflow() -> Result<(), Box<dyn std::error
|
||||
kms_env.base_env.create_test_bucket(TEST_BUCKET).await?;
|
||||
|
||||
// Phase 1: Test all single encryption types
|
||||
info!("📋 阶段1: 测试所有单文件加密类型");
|
||||
info!("📋 Phase 1: Test all single-file encryption types");
|
||||
test_sse_s3_encryption(&s3_client, TEST_BUCKET).await?;
|
||||
test_sse_kms_encryption(&s3_client, TEST_BUCKET).await?;
|
||||
test_sse_c_encryption(&s3_client, TEST_BUCKET).await?;
|
||||
|
||||
// Phase 2: Test KMS key management APIs
|
||||
info!("📋 阶段2: 测试KMS密钥管理API");
|
||||
info!("📋 Phase 2: Test the KMS Key Management API");
|
||||
test_kms_key_management(&kms_env.base_env.url, &kms_env.base_env.access_key, &kms_env.base_env.secret_key).await?;
|
||||
|
||||
// Phase 3: Test all multipart encryption types
|
||||
info!("📋 阶段3: 测试所有分片上传加密类型");
|
||||
info!("📋 Phase 3: Test all shard upload encryption types");
|
||||
test_all_multipart_encryption_types(&s3_client, TEST_BUCKET, "comprehensive-multipart-test").await?;
|
||||
|
||||
// Phase 4: Mixed workload test
|
||||
info!("📋 阶段4: 混合工作负载测试");
|
||||
info!("📋 Phase 4: Mixed workload testing");
|
||||
test_mixed_encryption_workload(&s3_client, TEST_BUCKET).await?;
|
||||
|
||||
kms_env.base_env.delete_test_bucket(TEST_BUCKET).await?;
|
||||
info!("✅ KMS全功能综合测试通过");
|
||||
info!("✅ KMS fully functional comprehensive test passed");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -70,7 +70,7 @@ async fn test_mixed_encryption_workload(
|
||||
s3_client: &aws_sdk_s3::Client,
|
||||
bucket: &str,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
info!("🔄 测试混合加密工作负载");
|
||||
info!("🔄 Test hybrid crypto workloads");
|
||||
|
||||
// Test configuration: different sizes and encryption types
|
||||
let test_configs = vec![
|
||||
@@ -89,11 +89,11 @@ async fn test_mixed_encryption_workload(
|
||||
];
|
||||
|
||||
for (i, config) in test_configs.iter().enumerate() {
|
||||
info!("🔄 执行混合测试 {}/{}: {:?}", i + 1, test_configs.len(), config.encryption_type);
|
||||
info!("🔄 Perform hybrid testing {}/{}: {:?}", i + 1, test_configs.len(), config.encryption_type);
|
||||
test_multipart_upload_with_config(s3_client, bucket, config).await?;
|
||||
}
|
||||
|
||||
info!("✅ 混合加密工作负载测试通过");
|
||||
info!("✅ Hybrid cryptographic workload tests pass");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -102,7 +102,7 @@ async fn test_mixed_encryption_workload(
|
||||
#[serial]
|
||||
async fn test_comprehensive_stress_test() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
info!("💪 开始KMS压力测试");
|
||||
info!("💪 Start the KMS stress test");
|
||||
|
||||
let mut kms_env = LocalKMSTestEnvironment::new().await?;
|
||||
let _default_key_id = kms_env.start_rustfs_for_local_kms().await?;
|
||||
@@ -120,7 +120,7 @@ async fn test_comprehensive_stress_test() -> Result<(), Box<dyn std::error::Erro
|
||||
|
||||
for config in stress_configs {
|
||||
info!(
|
||||
"💪 执行压力测试: {:?}, 总大小: {}MB",
|
||||
"💪 Perform stress test: {:?}, Total size: {}MB",
|
||||
config.encryption_type,
|
||||
config.total_size() / (1024 * 1024)
|
||||
);
|
||||
@@ -128,7 +128,7 @@ async fn test_comprehensive_stress_test() -> Result<(), Box<dyn std::error::Erro
|
||||
}
|
||||
|
||||
kms_env.base_env.delete_test_bucket(TEST_BUCKET).await?;
|
||||
info!("✅ KMS压力测试通过");
|
||||
info!("✅ KMS stress test passed");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -137,7 +137,7 @@ async fn test_comprehensive_stress_test() -> Result<(), Box<dyn std::error::Erro
|
||||
#[serial]
|
||||
async fn test_comprehensive_key_isolation() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
info!("🔐 开始加密密钥隔离综合测试");
|
||||
info!("🔐 Begin the comprehensive test of encryption key isolation");
|
||||
|
||||
let mut kms_env = LocalKMSTestEnvironment::new().await?;
|
||||
let _default_key_id = kms_env.start_rustfs_for_local_kms().await?;
|
||||
@@ -173,14 +173,14 @@ async fn test_comprehensive_key_isolation() -> Result<(), Box<dyn std::error::Er
|
||||
);
|
||||
|
||||
// Upload with different keys
|
||||
info!("🔐 上传文件用密钥1");
|
||||
info!("🔐 Key 1 for uploading files");
|
||||
test_multipart_upload_with_config(&s3_client, TEST_BUCKET, &config1).await?;
|
||||
|
||||
info!("🔐 上传文件用密钥2");
|
||||
info!("🔐 Key 2 for uploading files");
|
||||
test_multipart_upload_with_config(&s3_client, TEST_BUCKET, &config2).await?;
|
||||
|
||||
// Verify that files cannot be read with wrong keys
|
||||
info!("🔒 验证密钥隔离");
|
||||
info!("🔒 Verify key isolation");
|
||||
let wrong_key = "11111111111111111111111111111111";
|
||||
let wrong_key_b64 = base64::Engine::encode(&base64::engine::general_purpose::STANDARD, wrong_key);
|
||||
let wrong_key_md5 = format!("{:x}", md5::compute(wrong_key));
|
||||
@@ -196,11 +196,11 @@ async fn test_comprehensive_key_isolation() -> Result<(), Box<dyn std::error::Er
|
||||
.send()
|
||||
.await;
|
||||
|
||||
assert!(wrong_read_result.is_err(), "应该无法用错误密钥读取加密文件");
|
||||
info!("✅ 确认密钥隔离正常工作");
|
||||
assert!(wrong_read_result.is_err(), "The encrypted file should not be readable with the wrong key");
|
||||
info!("✅ Confirm that key isolation is working correctly");
|
||||
|
||||
kms_env.base_env.delete_test_bucket(TEST_BUCKET).await?;
|
||||
info!("✅ 加密密钥隔离综合测试通过");
|
||||
info!("✅ Encryption key isolation comprehensive test passed");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -209,7 +209,7 @@ async fn test_comprehensive_key_isolation() -> Result<(), Box<dyn std::error::Er
|
||||
#[serial]
|
||||
async fn test_comprehensive_concurrent_operations() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
info!("⚡ 开始并发加密操作综合测试");
|
||||
info!("⚡ Started comprehensive testing of concurrent encryption operations");
|
||||
|
||||
let mut kms_env = LocalKMSTestEnvironment::new().await?;
|
||||
let _default_key_id = kms_env.start_rustfs_for_local_kms().await?;
|
||||
@@ -228,7 +228,7 @@ async fn test_comprehensive_concurrent_operations() -> Result<(), Box<dyn std::e
|
||||
];
|
||||
|
||||
// Execute uploads concurrently
|
||||
info!("⚡ 开始并发上传");
|
||||
info!("⚡ Start concurrent uploads");
|
||||
let mut tasks = Vec::new();
|
||||
for config in concurrent_configs {
|
||||
let client = s3_client.clone();
|
||||
@@ -243,10 +243,10 @@ async fn test_comprehensive_concurrent_operations() -> Result<(), Box<dyn std::e
|
||||
task.await??;
|
||||
}
|
||||
|
||||
info!("✅ 所有并发操作完成");
|
||||
info!("✅ All concurrent operations are completed");
|
||||
|
||||
kms_env.base_env.delete_test_bucket(TEST_BUCKET).await?;
|
||||
info!("✅ 并发加密操作综合测试通过");
|
||||
info!("✅ The comprehensive test of concurrent encryption operation has passed");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -255,7 +255,7 @@ async fn test_comprehensive_concurrent_operations() -> Result<(), Box<dyn std::e
|
||||
#[serial]
|
||||
async fn test_comprehensive_performance_benchmark() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
info!("📊 开始KMS性能基准测试");
|
||||
info!("📊 Start KMS performance benchmarking");
|
||||
|
||||
let mut kms_env = LocalKMSTestEnvironment::new().await?;
|
||||
let _default_key_id = kms_env.start_rustfs_for_local_kms().await?;
|
||||
@@ -278,7 +278,7 @@ async fn test_comprehensive_performance_benchmark() -> Result<(), Box<dyn std::e
|
||||
];
|
||||
|
||||
for (size_name, config) in perf_configs {
|
||||
info!("📊 测试{}文件性能 ({}MB)", size_name, config.total_size() / (1024 * 1024));
|
||||
info!("📊 Test {} file performance ({}MB)", size_name, config.total_size() / (1024 * 1024));
|
||||
|
||||
let start_time = std::time::Instant::now();
|
||||
test_multipart_upload_with_config(&s3_client, TEST_BUCKET, &config).await?;
|
||||
@@ -286,7 +286,7 @@ async fn test_comprehensive_performance_benchmark() -> Result<(), Box<dyn std::e
|
||||
|
||||
let throughput_mbps = (config.total_size() as f64 / (1024.0 * 1024.0)) / duration.as_secs_f64();
|
||||
info!(
|
||||
"📊 {}文件测试完成: {:.2}秒, 吞吐量: {:.2} MB/s",
|
||||
"📊 {} file test completed: {:.2} seconds, throughput: {:.2} MB/s",
|
||||
size_name,
|
||||
duration.as_secs_f64(),
|
||||
throughput_mbps
|
||||
@@ -294,6 +294,6 @@ async fn test_comprehensive_performance_benchmark() -> Result<(), Box<dyn std::e
|
||||
}
|
||||
|
||||
kms_env.base_env.delete_test_bucket(TEST_BUCKET).await?;
|
||||
info!("✅ KMS性能基准测试通过");
|
||||
info!("✅ KMS performance benchmark passed");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -25,6 +25,7 @@ use super::common::LocalKMSTestEnvironment;
|
||||
use crate::common::{TEST_BUCKET, init_logging};
|
||||
use aws_sdk_s3::types::ServerSideEncryption;
|
||||
use base64::Engine;
|
||||
use md5::compute;
|
||||
use serial_test::serial;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::Semaphore;
|
||||
@@ -71,7 +72,7 @@ async fn test_kms_zero_byte_file_encryption() -> Result<(), Box<dyn std::error::
|
||||
info!("📤 Testing SSE-C with zero-byte file");
|
||||
let test_key = "01234567890123456789012345678901";
|
||||
let test_key_b64 = base64::engine::general_purpose::STANDARD.encode(test_key);
|
||||
let test_key_md5 = format!("{:x}", md5::compute(test_key));
|
||||
let test_key_md5 = format!("{:x}", compute(test_key));
|
||||
let object_key_c = "zero-byte-sse-c";
|
||||
|
||||
let _put_response_c = s3_client
|
||||
@@ -165,7 +166,7 @@ async fn test_kms_single_byte_file_encryption() -> Result<(), Box<dyn std::error
|
||||
info!("📤 Testing SSE-C with single-byte file");
|
||||
let test_key = "01234567890123456789012345678901";
|
||||
let test_key_b64 = base64::engine::general_purpose::STANDARD.encode(test_key);
|
||||
let test_key_md5 = format!("{:x}", md5::compute(test_key));
|
||||
let test_key_md5 = format!("{:x}", compute(test_key));
|
||||
let object_key_c = "single-byte-sse-c";
|
||||
|
||||
s3_client
|
||||
@@ -293,7 +294,7 @@ async fn test_kms_invalid_key_scenarios() -> Result<(), Box<dyn std::error::Erro
|
||||
info!("🔍 Testing invalid SSE-C key length");
|
||||
let invalid_short_key = "short"; // Too short
|
||||
let invalid_key_b64 = base64::engine::general_purpose::STANDARD.encode(invalid_short_key);
|
||||
let invalid_key_md5 = format!("{:x}", md5::compute(invalid_short_key));
|
||||
let invalid_key_md5 = format!("{:x}", compute(invalid_short_key));
|
||||
|
||||
let invalid_key_result = s3_client
|
||||
.put_object()
|
||||
@@ -333,7 +334,7 @@ async fn test_kms_invalid_key_scenarios() -> Result<(), Box<dyn std::error::Erro
|
||||
info!("🔍 Testing access to SSE-C object without key");
|
||||
|
||||
// First upload a valid SSE-C object
|
||||
let valid_key_md5 = format!("{:x}", md5::compute(valid_key));
|
||||
let valid_key_md5 = format!("{:x}", compute(valid_key));
|
||||
s3_client
|
||||
.put_object()
|
||||
.bucket(TEST_BUCKET)
|
||||
@@ -420,7 +421,7 @@ async fn test_kms_concurrent_encryption() -> Result<(), Box<dyn std::error::Erro
|
||||
// SSE-C
|
||||
let key = format!("testkey{i:026}"); // 32-byte key
|
||||
let key_b64 = base64::engine::general_purpose::STANDARD.encode(&key);
|
||||
let key_md5 = format!("{:x}", md5::compute(&key));
|
||||
let key_md5 = format!("{:x}", compute(&key));
|
||||
|
||||
client
|
||||
.put_object()
|
||||
@@ -492,8 +493,8 @@ async fn test_kms_key_validation_security() -> Result<(), Box<dyn std::error::Er
|
||||
|
||||
let key1_b64 = base64::engine::general_purpose::STANDARD.encode(key1);
|
||||
let key2_b64 = base64::engine::general_purpose::STANDARD.encode(key2);
|
||||
let key1_md5 = format!("{:x}", md5::compute(key1));
|
||||
let key2_md5 = format!("{:x}", md5::compute(key2));
|
||||
let key1_md5 = format!("{:x}", compute(key1));
|
||||
let key2_md5 = format!("{:x}", compute(key2));
|
||||
|
||||
// Upload same data with different keys
|
||||
s3_client
|
||||
|
||||
@@ -19,6 +19,7 @@
|
||||
//! multipart upload behaviour.
|
||||
|
||||
use crate::common::{TEST_BUCKET, init_logging};
|
||||
use md5::compute;
|
||||
use serial_test::serial;
|
||||
use tokio::time::{Duration, sleep};
|
||||
use tracing::{error, info};
|
||||
@@ -132,8 +133,8 @@ async fn test_vault_kms_key_isolation() -> Result<(), Box<dyn std::error::Error
|
||||
let key2 = "98765432109876543210987654321098";
|
||||
let key1_b64 = base64::Engine::encode(&base64::engine::general_purpose::STANDARD, key1);
|
||||
let key2_b64 = base64::Engine::encode(&base64::engine::general_purpose::STANDARD, key2);
|
||||
let key1_md5 = format!("{:x}", md5::compute(key1));
|
||||
let key2_md5 = format!("{:x}", md5::compute(key2));
|
||||
let key1_md5 = format!("{:x}", compute(key1));
|
||||
let key2_md5 = format!("{:x}", compute(key2));
|
||||
|
||||
let data1 = b"Vault data encrypted with key 1";
|
||||
let data2 = b"Vault data encrypted with key 2";
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user