mirror of
https://github.com/rustfs/rustfs.git
synced 2026-01-17 01:30:33 +00:00
Compare commits
16 Commits
1.0.0-alph
...
1.0.0-alph
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
57f082ee2b | ||
|
|
ae7e86d7ef | ||
|
|
a12a3bedc3 | ||
|
|
cafec06b7e | ||
|
|
1770679e66 | ||
|
|
a4fbf596e6 | ||
|
|
3f717292bf | ||
|
|
73f0ecbf8f | ||
|
|
0c3079ae5e | ||
|
|
ebf30b0db5 | ||
|
|
29c004d935 | ||
|
|
4595bf7db6 | ||
|
|
f372ccf4a8 | ||
|
|
9ce867f585 | ||
|
|
124c31a68b | ||
|
|
62a01f3801 |
@@ -14,18 +14,27 @@
|
||||
|
||||
services:
|
||||
|
||||
tempo-init:
|
||||
image: busybox:latest
|
||||
command: ["sh", "-c", "chown -R 10001:10001 /var/tempo"]
|
||||
volumes:
|
||||
- ./tempo-data:/var/tempo
|
||||
user: root
|
||||
networks:
|
||||
- otel-network
|
||||
restart: "no"
|
||||
|
||||
tempo:
|
||||
image: grafana/tempo:latest
|
||||
#user: root # The container must be started with root to execute chown in the script
|
||||
#entrypoint: [ "/etc/tempo/entrypoint.sh" ] # Specify a custom entry point
|
||||
user: "10001" # The container must be started with root to execute chown in the script
|
||||
command: [ "-config.file=/etc/tempo.yaml" ] # This is passed as a parameter to the entry point script
|
||||
volumes:
|
||||
- ./tempo-entrypoint.sh:/etc/tempo/entrypoint.sh # Mount entry point script
|
||||
- ./tempo.yaml:/etc/tempo.yaml
|
||||
- ./tempo.yaml:/etc/tempo.yaml:ro
|
||||
- ./tempo-data:/var/tempo
|
||||
ports:
|
||||
- "3200:3200" # tempo
|
||||
- "24317:4317" # otlp grpc
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- otel-network
|
||||
|
||||
@@ -94,4 +103,4 @@ networks:
|
||||
driver: bridge
|
||||
name: "network_otel_config"
|
||||
driver_opts:
|
||||
com.docker.network.enable_ipv6: "true"
|
||||
com.docker.network.enable_ipv6: "true"
|
||||
|
||||
@@ -42,9 +42,9 @@ exporters:
|
||||
namespace: "rustfs" # 指标前缀
|
||||
send_timestamps: true # 发送时间戳
|
||||
# enable_open_metrics: true
|
||||
loki: # Loki 导出器,用于日志数据
|
||||
otlphttp/loki: # Loki 导出器,用于日志数据
|
||||
# endpoint: "http://loki:3100/otlp/v1/logs"
|
||||
endpoint: "http://loki:3100/loki/api/v1/push"
|
||||
endpoint: "http://loki:3100/otlp/v1/logs"
|
||||
tls:
|
||||
insecure: true
|
||||
extensions:
|
||||
@@ -65,7 +65,7 @@ service:
|
||||
logs:
|
||||
receivers: [ otlp ]
|
||||
processors: [ batch ]
|
||||
exporters: [ loki ]
|
||||
exporters: [ otlphttp/loki ]
|
||||
telemetry:
|
||||
logs:
|
||||
level: "info" # Collector 日志级别
|
||||
|
||||
@@ -1,8 +0,0 @@
|
||||
#!/bin/sh
|
||||
# Run as root to fix directory permissions
|
||||
chown -R 10001:10001 /var/tempo
|
||||
|
||||
# Use su-exec (a lightweight sudo/gosu alternative, commonly used in Alpine mirroring)
|
||||
# Switch to user 10001 and execute the original command (CMD) passed to the script
|
||||
# "$@" represents all parameters passed to this script, i.e. command in docker-compose
|
||||
exec su-exec 10001:10001 /tempo "$@"
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -20,4 +20,6 @@ profile.json
|
||||
.docker/openobserve-otel/data
|
||||
*.zst
|
||||
.secrets
|
||||
*.go
|
||||
*.go
|
||||
*.pb
|
||||
*.svg
|
||||
122
CLAUDE.md
Normal file
122
CLAUDE.md
Normal file
@@ -0,0 +1,122 @@
|
||||
# CLAUDE.md
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
|
||||
## Project Overview
|
||||
|
||||
RustFS is a high-performance distributed object storage software built with Rust, providing S3-compatible APIs and advanced features like data lakes, AI, and big data support. It's designed as an alternative to MinIO with better performance and a more business-friendly Apache 2.0 license.
|
||||
|
||||
## Build Commands
|
||||
|
||||
### Primary Build Commands
|
||||
- `cargo build --release` - Build the main RustFS binary
|
||||
- `./build-rustfs.sh` - Recommended build script that handles console resources and cross-platform compilation
|
||||
- `./build-rustfs.sh --dev` - Development build with debug symbols
|
||||
- `make build` or `just build` - Use Make/Just for standardized builds
|
||||
|
||||
### Platform-Specific Builds
|
||||
- `./build-rustfs.sh --platform x86_64-unknown-linux-musl` - Build for musl target
|
||||
- `./build-rustfs.sh --platform aarch64-unknown-linux-gnu` - Build for ARM64
|
||||
- `make build-musl` or `just build-musl` - Build musl variant
|
||||
- `make build-cross-all` - Build all supported architectures
|
||||
|
||||
### Testing Commands
|
||||
- `cargo test --workspace --exclude e2e_test` - Run unit tests (excluding e2e tests)
|
||||
- `cargo nextest run --all --exclude e2e_test` - Use nextest if available (faster)
|
||||
- `cargo test --all --doc` - Run documentation tests
|
||||
- `make test` or `just test` - Run full test suite
|
||||
|
||||
### Code Quality
|
||||
- `cargo fmt --all` - Format code
|
||||
- `cargo clippy --all-targets --all-features -- -D warnings` - Lint code
|
||||
- `make pre-commit` or `just pre-commit` - Run all quality checks (fmt, clippy, check, test)
|
||||
|
||||
### Docker Build Commands
|
||||
- `make docker-buildx` - Build multi-architecture production images
|
||||
- `make docker-dev-local` - Build development image for local use
|
||||
- `./docker-buildx.sh --push` - Build and push production images
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
### Core Components
|
||||
|
||||
**Main Binary (`rustfs/`):**
|
||||
- Entry point at `rustfs/src/main.rs`
|
||||
- Core modules: admin, auth, config, server, storage, license management, profiling
|
||||
- HTTP server with S3-compatible APIs
|
||||
- Service state management and graceful shutdown
|
||||
- Parallel service initialization with DNS resolver, bucket metadata, and IAM
|
||||
|
||||
**Key Crates (`crates/`):**
|
||||
- `ecstore` - Erasure coding storage implementation (core storage layer)
|
||||
- `iam` - Identity and Access Management
|
||||
- `madmin` - Management dashboard and admin API interface
|
||||
- `s3select-api` & `s3select-query` - S3 Select API and query engine
|
||||
- `config` - Configuration management with notify features
|
||||
- `crypto` - Cryptography and security features
|
||||
- `lock` - Distributed locking implementation
|
||||
- `filemeta` - File metadata management
|
||||
- `rio` - Rust I/O utilities and abstractions
|
||||
- `common` - Shared utilities and data structures
|
||||
- `protos` - Protocol buffer definitions
|
||||
- `audit-logger` - Audit logging for file operations
|
||||
- `notify` - Event notification system
|
||||
- `obs` - Observability utilities
|
||||
- `workers` - Worker thread pools and task scheduling
|
||||
- `appauth` - Application authentication and authorization
|
||||
|
||||
### Build System
|
||||
- Cargo workspace with 25+ crates
|
||||
- Custom `build-rustfs.sh` script for advanced build options
|
||||
- Multi-architecture Docker builds via `docker-buildx.sh`
|
||||
- Both Make and Just task runners supported
|
||||
- Cross-compilation support for multiple Linux targets
|
||||
|
||||
### Key Dependencies
|
||||
- `axum` - HTTP framework for S3 API server
|
||||
- `tokio` - Async runtime
|
||||
- `s3s` - S3 protocol implementation library
|
||||
- `datafusion` - For S3 Select query processing
|
||||
- `hyper`/`hyper-util` - HTTP client/server utilities
|
||||
- `rustls` - TLS implementation
|
||||
- `serde`/`serde_json` - Serialization
|
||||
- `tracing` - Structured logging and observability
|
||||
- `pprof` - Performance profiling with flamegraph support
|
||||
- `tikv-jemallocator` - Memory allocator for Linux GNU builds
|
||||
|
||||
### Development Workflow
|
||||
- Console resources are embedded during build via `rust-embed`
|
||||
- Protocol buffers generated via custom `gproto` binary
|
||||
- E2E tests in separate crate (`e2e_test`)
|
||||
- Shadow build for version/metadata embedding
|
||||
- Support for both GNU and musl libc targets
|
||||
|
||||
### Performance & Observability
|
||||
- Performance profiling available with `pprof` integration (disabled on Windows)
|
||||
- Profiling enabled via environment variables in production
|
||||
- Built-in observability with OpenTelemetry integration
|
||||
- Background services (scanner, heal) can be controlled via environment variables:
|
||||
- `RUSTFS_ENABLE_SCANNER` (default: true)
|
||||
- `RUSTFS_ENABLE_HEAL` (default: true)
|
||||
|
||||
### Service Architecture
|
||||
- Service state management with graceful shutdown handling
|
||||
- Parallel initialization of core systems (DNS, bucket metadata, IAM)
|
||||
- Event notification system with MQTT and webhook support
|
||||
- Auto-heal and data scanner for storage integrity
|
||||
- Jemalloc allocator for Linux GNU targets for better performance
|
||||
|
||||
## Environment Variables
|
||||
- `RUSTFS_ENABLE_SCANNER` - Enable/disable background data scanner
|
||||
- `RUSTFS_ENABLE_HEAL` - Enable/disable auto-heal functionality
|
||||
- Various profiling and observability controls
|
||||
|
||||
## Code Style
|
||||
- Communicate with me in Chinese, but only English can be used in code files.
|
||||
- Code that may cause program crashes (such as unwrap/expect) must not be used, except for testing purposes.
|
||||
- Code that may cause performance issues (such as blocking IO) must not be used, except for testing purposes.
|
||||
- Code that may cause memory leaks must not be used, except for testing purposes.
|
||||
- Code that may cause deadlocks must not be used, except for testing purposes.
|
||||
- Code that may cause undefined behavior must not be used, except for testing purposes.
|
||||
- Code that may cause panics must not be used, except for testing purposes.
|
||||
- Code that may cause data races must not be used, except for testing purposes.
|
||||
898
Cargo.lock
generated
898
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
38
Cargo.toml
38
Cargo.toml
@@ -100,6 +100,8 @@ atomic_enum = "0.3.0"
|
||||
aws-config = { version = "1.8.6" }
|
||||
aws-sdk-s3 = "1.101.0"
|
||||
axum = "0.8.4"
|
||||
axum-extra = "0.10.1"
|
||||
axum-server = "0.7.2"
|
||||
base64-simd = "0.8.0"
|
||||
base64 = "0.22.1"
|
||||
brotli = "8.0.2"
|
||||
@@ -109,27 +111,26 @@ byteorder = "1.5.0"
|
||||
cfg-if = "1.0.3"
|
||||
crc-fast = "1.5.0"
|
||||
chacha20poly1305 = { version = "0.10.1" }
|
||||
chrono = { version = "0.4.41", features = ["serde"] }
|
||||
clap = { version = "4.5.46", features = ["derive", "env"] }
|
||||
const-str = { version = "0.6.4", features = ["std", "proc"] }
|
||||
chrono = { version = "0.4.42", features = ["serde"] }
|
||||
clap = { version = "4.5.47", features = ["derive", "env"] }
|
||||
const-str = { version = "0.7.0", features = ["std", "proc"] }
|
||||
crc32fast = "1.5.0"
|
||||
criterion = { version = "0.7", features = ["html_reports"] }
|
||||
crossbeam-queue = "0.3.12"
|
||||
dashmap = "6.1.0"
|
||||
datafusion = "46.0.1"
|
||||
derive_builder = "0.20.2"
|
||||
enumset = "1.1.10"
|
||||
flatbuffers = "25.2.10"
|
||||
flate2 = "1.1.2"
|
||||
flexi_logger = { version = "0.31.2", features = ["trc", "dont_minimize_extra_stacks"] }
|
||||
flexi_logger = { version = "0.31.2", features = ["trc", "dont_minimize_extra_stacks", "compress", "kv"] }
|
||||
form_urlencoded = "1.2.2"
|
||||
futures = "0.3.31"
|
||||
futures-core = "0.3.31"
|
||||
futures-util = "0.3.31"
|
||||
glob = "0.3.3"
|
||||
hex = "0.4.3"
|
||||
hex-simd = "0.8.0"
|
||||
highway = { version = "1.3.0" }
|
||||
hickory-proto = "0.25.2"
|
||||
hickory-resolver = { version = "0.25.2", features = ["tls-ring"] }
|
||||
hmac = "0.12.1"
|
||||
hyper = "1.7.0"
|
||||
@@ -141,7 +142,7 @@ hyper-util = { version = "0.1.16", features = [
|
||||
hyper-rustls = "0.27.7"
|
||||
http = "1.3.1"
|
||||
http-body = "1.0.1"
|
||||
humantime = "2.2.0"
|
||||
humantime = "2.3.0"
|
||||
ipnetwork = { version = "0.21.1", features = ["serde"] }
|
||||
jsonwebtoken = "9.3.1"
|
||||
lazy_static = "1.5.0"
|
||||
@@ -196,11 +197,11 @@ reqwest = { version = "0.12.23", default-features = false, features = [
|
||||
"json",
|
||||
"blocking",
|
||||
] }
|
||||
rmcp = { version = "0.6.1" }
|
||||
rmcp = { version = "0.6.4" }
|
||||
rmp = "0.8.14"
|
||||
rmp-serde = "1.3.0"
|
||||
rsa = "0.9.8"
|
||||
rumqttc = { version = "0.24" }
|
||||
rumqttc = { version = "0.25.0" }
|
||||
rust-embed = { version = "8.7.2" }
|
||||
rustfs-rsc = "2025.506.1"
|
||||
rustls = { version = "0.23.31" }
|
||||
@@ -208,8 +209,8 @@ rustls-pki-types = "1.12.0"
|
||||
rustls-pemfile = "2.2.0"
|
||||
s3s = { version = "0.12.0-minio-preview.3" }
|
||||
schemars = "1.0.4"
|
||||
serde = { version = "1.0.219", features = ["derive"] }
|
||||
serde_json = { version = "1.0.143", features = ["raw_value"] }
|
||||
serde = { version = "1.0.223", features = ["derive"] }
|
||||
serde_json = { version = "1.0.145", features = ["raw_value"] }
|
||||
serde_urlencoded = "0.7.1"
|
||||
serial_test = "3.2.0"
|
||||
sha1 = "0.10.6"
|
||||
@@ -217,17 +218,18 @@ sha2 = "0.10.9"
|
||||
shadow-rs = { version = "1.3.0", default-features = false }
|
||||
siphasher = "1.0.1"
|
||||
smallvec = { version = "1.15.1", features = ["serde"] }
|
||||
snafu = "0.8.8"
|
||||
smartstring = "1.0.1"
|
||||
snafu = "0.8.9"
|
||||
snap = "1.1.1"
|
||||
socket2 = "0.6.0"
|
||||
strum = { version = "0.27.2", features = ["derive"] }
|
||||
sysinfo = "0.37.0"
|
||||
sysctl = "0.6.0"
|
||||
tempfile = "3.21.0"
|
||||
tempfile = "3.22.0"
|
||||
temp-env = "0.3.6"
|
||||
test-case = "3.3.1"
|
||||
thiserror = "2.0.16"
|
||||
time = { version = "0.3.42", features = [
|
||||
time = { version = "0.3.43", features = [
|
||||
"std",
|
||||
"parsing",
|
||||
"formatting",
|
||||
@@ -240,9 +242,9 @@ tokio-stream = { version = "0.1.17" }
|
||||
tokio-tar = "0.3.1"
|
||||
tokio-test = "0.4.4"
|
||||
tokio-util = { version = "0.7.16", features = ["io", "compat"] }
|
||||
tonic = { version = "0.14.1", features = ["gzip"] }
|
||||
tonic-prost = { version = "0.14.1" }
|
||||
tonic-prost-build = { version = "0.14.1" }
|
||||
tonic = { version = "0.14.2", features = ["gzip"] }
|
||||
tonic-prost = { version = "0.14.2" }
|
||||
tonic-prost-build = { version = "0.14.2" }
|
||||
tower = { version = "0.5.2", features = ["timeout"] }
|
||||
tower-http = { version = "0.6.6", features = ["cors"] }
|
||||
tracing = "0.1.41"
|
||||
@@ -253,7 +255,7 @@ tracing-subscriber = { version = "0.3.20", features = ["env-filter", "time"] }
|
||||
transform-stream = "0.3.1"
|
||||
url = "2.5.7"
|
||||
urlencoding = "2.1.3"
|
||||
uuid = { version = "1.18.0", features = [
|
||||
uuid = { version = "1.18.1", features = [
|
||||
"v4",
|
||||
"fast-rng",
|
||||
"macro-diagnostics",
|
||||
|
||||
@@ -69,15 +69,19 @@ RUN chmod +x /usr/bin/rustfs /entrypoint.sh && \
|
||||
chmod 0750 /data /logs
|
||||
|
||||
ENV RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_CONSOLE_ADDRESS=":9001" \
|
||||
RUSTFS_ACCESS_KEY="rustfsadmin" \
|
||||
RUSTFS_SECRET_KEY="rustfsadmin" \
|
||||
RUSTFS_CONSOLE_ENABLE="true" \
|
||||
RUSTFS_EXTERNAL_ADDRESS="" \
|
||||
RUSTFS_CORS_ALLOWED_ORIGINS="*" \
|
||||
RUSTFS_CONSOLE_CORS_ALLOWED_ORIGINS="*" \
|
||||
RUSTFS_VOLUMES="/data" \
|
||||
RUST_LOG="warn" \
|
||||
RUSTFS_OBS_LOG_DIRECTORY="/logs" \
|
||||
RUSTFS_SINKS_FILE_PATH="/logs"
|
||||
|
||||
EXPOSE 9000
|
||||
EXPOSE 9000 9001
|
||||
VOLUME ["/data", "/logs"]
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
|
||||
14
README.md
14
README.md
@@ -74,9 +74,9 @@ To get started with RustFS, follow these steps:
|
||||
|
||||
1. **One-click installation script (Option 1)**
|
||||
|
||||
```bash
|
||||
curl -O https://rustfs.com/install_rustfs.sh && bash install_rustfs.sh
|
||||
```
|
||||
```bash
|
||||
curl -O https://rustfs.com/install_rustfs.sh && bash install_rustfs.sh
|
||||
```
|
||||
|
||||
2. **Docker Quick Start (Option 2)**
|
||||
|
||||
@@ -91,6 +91,14 @@ To get started with RustFS, follow these steps:
|
||||
docker run -d -p 9000:9000 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:1.0.0.alpha.45
|
||||
```
|
||||
|
||||
For docker installation, you can also run the container with docker compose. With the `docker-compose.yml` file under root directory, running the command:
|
||||
|
||||
```
|
||||
docker compose --profile observability up -d
|
||||
```
|
||||
|
||||
**NOTE**: You should be better to have a look for `docker-compose.yaml` file. Because, several services contains in the file. Grafan,prometheus,jaeger containers will be launched using docker compose file, which is helpful for rustfs observability. If you want to start redis as well as nginx container, you can specify the corresponding profiles.
|
||||
|
||||
3. **Build from Source (Option 3) - Advanced Users**
|
||||
|
||||
For developers who want to build RustFS Docker images from source with multi-architecture support:
|
||||
|
||||
@@ -74,6 +74,14 @@ RustFS 是一个使用 Rust(全球最受欢迎的编程语言之一)构建
|
||||
docker run -d -p 9000:9000 -v /data:/data rustfs/rustfs
|
||||
```
|
||||
|
||||
对于使用 Docker 安装来讲,你还可以使用 `docker compose` 来启动 rustfs 实例。在仓库的根目录下面有一个 `docker-compose.yml` 文件。运行如下命令即可:
|
||||
|
||||
```
|
||||
docker compose --profile observability up -d
|
||||
```
|
||||
|
||||
**注意**:在使用 `docker compose` 之前,你应该仔细阅读一下 `docker-compose.yaml`,因为该文件中包含多个服务,除了 rustfs 以外,还有 grafana、prometheus、jaeger 等,这些是为 rustfs 可观测性服务的,还有 redis 和 nginx。你想启动哪些容器,就需要用 `--profile` 参数指定相应的 profile。
|
||||
|
||||
3. **访问控制台**:打开 Web 浏览器并导航到 `http://localhost:9000` 以访问 RustFS 控制台,默认的用户名和密码是 `rustfsadmin` 。
|
||||
4. **创建存储桶**:使用控制台为您的对象创建新的存储桶。
|
||||
5. **上传对象**:您可以直接通过控制台上传文件,或使用 S3 兼容的 API 与您的 RustFS 实例交互。
|
||||
|
||||
@@ -17,7 +17,6 @@ rustfs-ecstore = { workspace = true }
|
||||
rustfs-common = { workspace = true }
|
||||
rustfs-filemeta = { workspace = true }
|
||||
rustfs-madmin = { workspace = true }
|
||||
rustfs-utils = { workspace = true }
|
||||
tokio = { workspace = true, features = ["full"] }
|
||||
tokio-util = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
@@ -29,10 +28,7 @@ uuid = { workspace = true, features = ["v4", "serde"] }
|
||||
anyhow = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
url = { workspace = true }
|
||||
rustfs-lock = { workspace = true }
|
||||
s3s = { workspace = true }
|
||||
lazy_static = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
reqwest = { workspace = true }
|
||||
@@ -44,5 +40,3 @@ serial_test = "3.2.0"
|
||||
tracing-subscriber = { workspace = true }
|
||||
walkdir = "2.5.0"
|
||||
tempfile = { workspace = true }
|
||||
criterion = { workspace = true, features = ["html_reports"] }
|
||||
sysinfo = "0.30.8"
|
||||
|
||||
@@ -340,7 +340,7 @@ impl HealTask {
|
||||
Ok((result, error)) => {
|
||||
if let Some(e) = error {
|
||||
// Check if this is a "File not found" error during delete operations
|
||||
let error_msg = format!("{}", e);
|
||||
let error_msg = format!("{e}");
|
||||
if error_msg.contains("File not found") || error_msg.contains("not found") {
|
||||
info!(
|
||||
"Object {}/{} not found during heal - likely deleted intentionally, treating as successful",
|
||||
@@ -395,7 +395,7 @@ impl HealTask {
|
||||
}
|
||||
Err(e) => {
|
||||
// Check if this is a "File not found" error during delete operations
|
||||
let error_msg = format!("{}", e);
|
||||
let error_msg = format!("{e}");
|
||||
if error_msg.contains("File not found") || error_msg.contains("not found") {
|
||||
info!(
|
||||
"Object {}/{} not found during heal - likely deleted intentionally, treating as successful",
|
||||
|
||||
@@ -124,7 +124,7 @@ pub const DEFAULT_LOG_FILENAME: &str = "rustfs";
|
||||
/// This is the default log filename for OBS.
|
||||
/// It is used to store the logs of the application.
|
||||
/// Default value: rustfs.log
|
||||
pub const DEFAULT_OBS_LOG_FILENAME: &str = concat!(DEFAULT_LOG_FILENAME, ".");
|
||||
pub const DEFAULT_OBS_LOG_FILENAME: &str = concat!(DEFAULT_LOG_FILENAME, "");
|
||||
|
||||
/// Default sink file log file for rustfs
|
||||
/// This is the default sink file log file for rustfs.
|
||||
@@ -160,6 +160,16 @@ pub const DEFAULT_LOG_ROTATION_TIME: &str = "day";
|
||||
/// Environment variable: RUSTFS_OBS_LOG_KEEP_FILES
|
||||
pub const DEFAULT_LOG_KEEP_FILES: u16 = 30;
|
||||
|
||||
/// This is the external address for rustfs to access endpoint (used in Docker deployments).
|
||||
/// This should match the mapped host port when using Docker port mapping.
|
||||
/// Example: ":9020" when mapping host port 9020 to container port 9000.
|
||||
/// Default value: DEFAULT_ADDRESS
|
||||
/// Environment variable: RUSTFS_EXTERNAL_ADDRESS
|
||||
/// Command line argument: --external-address
|
||||
/// Example: RUSTFS_EXTERNAL_ADDRESS=":9020"
|
||||
/// Example: --external-address ":9020"
|
||||
pub const ENV_EXTERNAL_ADDRESS: &str = "RUSTFS_EXTERNAL_ADDRESS";
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
91
crates/config/src/constants/console.rs
Normal file
91
crates/config/src/constants/console.rs
Normal file
@@ -0,0 +1,91 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/// CORS allowed origins for the endpoint service
|
||||
/// Comma-separated list of origins or "*" for all origins
|
||||
pub const ENV_CORS_ALLOWED_ORIGINS: &str = "RUSTFS_CORS_ALLOWED_ORIGINS";
|
||||
|
||||
/// Default CORS allowed origins for the endpoint service
|
||||
/// Comes from the console service default
|
||||
/// See DEFAULT_CONSOLE_CORS_ALLOWED_ORIGINS
|
||||
pub const DEFAULT_CORS_ALLOWED_ORIGINS: &str = DEFAULT_CONSOLE_CORS_ALLOWED_ORIGINS;
|
||||
|
||||
/// CORS allowed origins for the console service
|
||||
/// Comma-separated list of origins or "*" for all origins
|
||||
pub const ENV_CONSOLE_CORS_ALLOWED_ORIGINS: &str = "RUSTFS_CONSOLE_CORS_ALLOWED_ORIGINS";
|
||||
|
||||
/// Default CORS allowed origins for the console service
|
||||
pub const DEFAULT_CONSOLE_CORS_ALLOWED_ORIGINS: &str = "*";
|
||||
|
||||
/// Enable or disable the console service
|
||||
pub const ENV_CONSOLE_ENABLE: &str = "RUSTFS_CONSOLE_ENABLE";
|
||||
|
||||
/// Address for the console service to bind to
|
||||
pub const ENV_CONSOLE_ADDRESS: &str = "RUSTFS_CONSOLE_ADDRESS";
|
||||
|
||||
/// RUSTFS_CONSOLE_RATE_LIMIT_ENABLE
|
||||
/// Enable or disable rate limiting for the console service
|
||||
pub const ENV_CONSOLE_RATE_LIMIT_ENABLE: &str = "RUSTFS_CONSOLE_RATE_LIMIT_ENABLE";
|
||||
|
||||
/// Default console rate limit enable
|
||||
/// This is the default value for enabling rate limiting on the console server.
|
||||
/// Rate limiting helps protect against abuse and DoS attacks on the management interface.
|
||||
/// Default value: false
|
||||
/// Environment variable: RUSTFS_CONSOLE_RATE_LIMIT_ENABLE
|
||||
/// Command line argument: --console-rate-limit-enable
|
||||
/// Example: RUSTFS_CONSOLE_RATE_LIMIT_ENABLE=true
|
||||
/// Example: --console-rate-limit-enable true
|
||||
pub const DEFAULT_CONSOLE_RATE_LIMIT_ENABLE: bool = false;
|
||||
|
||||
/// Set the rate limit requests per minute for the console service
|
||||
/// Limits the number of requests per minute per client IP when rate limiting is enabled
|
||||
/// Default: 100 requests per minute
|
||||
pub const ENV_CONSOLE_RATE_LIMIT_RPM: &str = "RUSTFS_CONSOLE_RATE_LIMIT_RPM";
|
||||
|
||||
/// Default console rate limit requests per minute
|
||||
/// This is the default rate limit for console requests when rate limiting is enabled.
|
||||
/// Limits the number of requests per minute per client IP to prevent abuse.
|
||||
/// Default value: 100 requests per minute
|
||||
/// Environment variable: RUSTFS_CONSOLE_RATE_LIMIT_RPM
|
||||
/// Command line argument: --console-rate-limit-rpm
|
||||
/// Example: RUSTFS_CONSOLE_RATE_LIMIT_RPM=100
|
||||
/// Example: --console-rate-limit-rpm 100
|
||||
pub const DEFAULT_CONSOLE_RATE_LIMIT_RPM: u32 = 100;
|
||||
|
||||
/// Set the console authentication timeout in seconds
|
||||
/// Specifies how long a console authentication session remains valid
|
||||
/// Default: 3600 seconds (1 hour)
|
||||
/// Minimum: 300 seconds (5 minutes)
|
||||
/// Maximum: 86400 seconds (24 hours)
|
||||
pub const ENV_CONSOLE_AUTH_TIMEOUT: &str = "RUSTFS_CONSOLE_AUTH_TIMEOUT";
|
||||
|
||||
/// Default console authentication timeout in seconds
|
||||
/// This is the default timeout for console authentication sessions.
|
||||
/// After this timeout, users need to re-authenticate to access the console.
|
||||
/// Default value: 3600 seconds (1 hour)
|
||||
/// Environment variable: RUSTFS_CONSOLE_AUTH_TIMEOUT
|
||||
/// Command line argument: --console-auth-timeout
|
||||
/// Example: RUSTFS_CONSOLE_AUTH_TIMEOUT=3600
|
||||
/// Example: --console-auth-timeout 3600
|
||||
pub const DEFAULT_CONSOLE_AUTH_TIMEOUT: u64 = 3600;
|
||||
|
||||
/// Toggle update check
|
||||
/// It controls whether to check for newer versions of rustfs
|
||||
/// Default value: true
|
||||
/// Environment variable: RUSTFS_CHECK_UPDATE
|
||||
/// Example: RUSTFS_CHECK_UPDATE=false
|
||||
pub const ENV_UPDATE_CHECK: &str = "RUSTFS_CHECK_UPDATE";
|
||||
|
||||
/// Default value for update toggle
|
||||
pub const DEFAULT_UPDATE_CHECK: bool = true;
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod app;
|
||||
pub mod env;
|
||||
pub mod tls;
|
||||
pub(crate) mod app;
|
||||
pub(crate) mod console;
|
||||
pub(crate) mod env;
|
||||
pub(crate) mod tls;
|
||||
|
||||
@@ -17,6 +17,8 @@ pub mod constants;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::app::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::console::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::env::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::tls::*;
|
||||
|
||||
@@ -29,7 +29,70 @@ pub const ENV_OBS_LOG_ROTATION_SIZE_MB: &str = "RUSTFS_OBS_LOG_ROTATION_SIZE_MB"
|
||||
pub const ENV_OBS_LOG_ROTATION_TIME: &str = "RUSTFS_OBS_LOG_ROTATION_TIME";
|
||||
pub const ENV_OBS_LOG_KEEP_FILES: &str = "RUSTFS_OBS_LOG_KEEP_FILES";
|
||||
|
||||
/// Log pool capacity for async logging
|
||||
pub const ENV_OBS_LOG_POOL_CAPA: &str = "RUSTFS_OBS_LOG_POOL_CAPA";
|
||||
|
||||
/// Log message capacity for async logging
|
||||
pub const ENV_OBS_LOG_MESSAGE_CAPA: &str = "RUSTFS_OBS_LOG_MESSAGE_CAPA";
|
||||
|
||||
/// Log flush interval in milliseconds for async logging
|
||||
pub const ENV_OBS_LOG_FLUSH_MS: &str = "RUSTFS_OBS_LOG_FLUSH_MS";
|
||||
|
||||
/// Default values for log pool
|
||||
pub const DEFAULT_OBS_LOG_POOL_CAPA: usize = 10240;
|
||||
|
||||
/// Default values for message capacity
|
||||
pub const DEFAULT_OBS_LOG_MESSAGE_CAPA: usize = 32768;
|
||||
|
||||
/// Default values for flush interval in milliseconds
|
||||
pub const DEFAULT_OBS_LOG_FLUSH_MS: u64 = 200;
|
||||
|
||||
/// Audit logger queue capacity environment variable key
|
||||
pub const ENV_AUDIT_LOGGER_QUEUE_CAPACITY: &str = "RUSTFS_AUDIT_LOGGER_QUEUE_CAPACITY";
|
||||
|
||||
// Default values for observability configuration
|
||||
/// Default values for observability configuration
|
||||
pub const DEFAULT_AUDIT_LOGGER_QUEUE_CAPACITY: usize = 10000;
|
||||
|
||||
/// Default values for observability configuration
|
||||
// ### Supported Environment Values
|
||||
// - `production` - Secure file-only logging
|
||||
// - `development` - Full debugging with stdout
|
||||
// - `test` - Test environment with stdout support
|
||||
// - `staging` - Staging environment with stdout support
|
||||
pub const DEFAULT_OBS_ENVIRONMENT_PRODUCTION: &str = "production";
|
||||
pub const DEFAULT_OBS_ENVIRONMENT_DEVELOPMENT: &str = "development";
|
||||
pub const DEFAULT_OBS_ENVIRONMENT_TEST: &str = "test";
|
||||
pub const DEFAULT_OBS_ENVIRONMENT_STAGING: &str = "staging";
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_env_keys() {
|
||||
assert_eq!(ENV_OBS_ENDPOINT, "RUSTFS_OBS_ENDPOINT");
|
||||
assert_eq!(ENV_OBS_USE_STDOUT, "RUSTFS_OBS_USE_STDOUT");
|
||||
assert_eq!(ENV_OBS_SAMPLE_RATIO, "RUSTFS_OBS_SAMPLE_RATIO");
|
||||
assert_eq!(ENV_OBS_METER_INTERVAL, "RUSTFS_OBS_METER_INTERVAL");
|
||||
assert_eq!(ENV_OBS_SERVICE_NAME, "RUSTFS_OBS_SERVICE_NAME");
|
||||
assert_eq!(ENV_OBS_SERVICE_VERSION, "RUSTFS_OBS_SERVICE_VERSION");
|
||||
assert_eq!(ENV_OBS_ENVIRONMENT, "RUSTFS_OBS_ENVIRONMENT");
|
||||
assert_eq!(ENV_OBS_LOGGER_LEVEL, "RUSTFS_OBS_LOGGER_LEVEL");
|
||||
assert_eq!(ENV_OBS_LOCAL_LOGGING_ENABLED, "RUSTFS_OBS_LOCAL_LOGGING_ENABLED");
|
||||
assert_eq!(ENV_OBS_LOG_DIRECTORY, "RUSTFS_OBS_LOG_DIRECTORY");
|
||||
assert_eq!(ENV_OBS_LOG_FILENAME, "RUSTFS_OBS_LOG_FILENAME");
|
||||
assert_eq!(ENV_OBS_LOG_ROTATION_SIZE_MB, "RUSTFS_OBS_LOG_ROTATION_SIZE_MB");
|
||||
assert_eq!(ENV_OBS_LOG_ROTATION_TIME, "RUSTFS_OBS_LOG_ROTATION_TIME");
|
||||
assert_eq!(ENV_OBS_LOG_KEEP_FILES, "RUSTFS_OBS_LOG_KEEP_FILES");
|
||||
assert_eq!(ENV_AUDIT_LOGGER_QUEUE_CAPACITY, "RUSTFS_AUDIT_LOGGER_QUEUE_CAPACITY");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_values() {
|
||||
assert_eq!(DEFAULT_AUDIT_LOGGER_QUEUE_CAPACITY, 10000);
|
||||
assert_eq!(DEFAULT_OBS_ENVIRONMENT_PRODUCTION, "production");
|
||||
assert_eq!(DEFAULT_OBS_ENVIRONMENT_DEVELOPMENT, "development");
|
||||
assert_eq!(DEFAULT_OBS_ENVIRONMENT_TEST, "test");
|
||||
assert_eq!(DEFAULT_OBS_ENVIRONMENT_STAGING, "staging");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -101,6 +101,8 @@ rustfs-signer.workspace = true
|
||||
rustfs-checksums.workspace = true
|
||||
futures-util.workspace = true
|
||||
async-recursion.workspace = true
|
||||
parking_lot = "0.12"
|
||||
moka = { version = "0.12", features = ["future"] }
|
||||
|
||||
[target.'cfg(not(windows))'.dependencies]
|
||||
nix = { workspace = true }
|
||||
|
||||
231
crates/ecstore/src/batch_processor.rs
Normal file
231
crates/ecstore/src/batch_processor.rs
Normal file
@@ -0,0 +1,231 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! High-performance batch processor using JoinSet
|
||||
//!
|
||||
//! This module provides optimized batching utilities to reduce async runtime overhead
|
||||
//! and improve concurrent operation performance.
|
||||
|
||||
use crate::disk::error::{Error, Result};
|
||||
use std::future::Future;
|
||||
use std::sync::Arc;
|
||||
use tokio::task::JoinSet;
|
||||
|
||||
/// Batch processor that executes tasks concurrently with a semaphore
|
||||
pub struct AsyncBatchProcessor {
|
||||
max_concurrent: usize,
|
||||
}
|
||||
|
||||
impl AsyncBatchProcessor {
|
||||
pub fn new(max_concurrent: usize) -> Self {
|
||||
Self { max_concurrent }
|
||||
}
|
||||
|
||||
/// Execute a batch of tasks concurrently with concurrency control
|
||||
pub async fn execute_batch<T, F>(&self, tasks: Vec<F>) -> Vec<Result<T>>
|
||||
where
|
||||
T: Send + 'static,
|
||||
F: Future<Output = Result<T>> + Send + 'static,
|
||||
{
|
||||
if tasks.is_empty() {
|
||||
return Vec::new();
|
||||
}
|
||||
|
||||
let semaphore = Arc::new(tokio::sync::Semaphore::new(self.max_concurrent));
|
||||
let mut join_set = JoinSet::new();
|
||||
let mut results = Vec::with_capacity(tasks.len());
|
||||
for _ in 0..tasks.len() {
|
||||
results.push(Err(Error::other("Not completed")));
|
||||
}
|
||||
|
||||
// Spawn all tasks with semaphore control
|
||||
for (i, task) in tasks.into_iter().enumerate() {
|
||||
let sem = semaphore.clone();
|
||||
join_set.spawn(async move {
|
||||
let _permit = sem.acquire().await.map_err(|_| Error::other("Semaphore error"))?;
|
||||
let result = task.await;
|
||||
Ok::<(usize, Result<T>), Error>((i, result))
|
||||
});
|
||||
}
|
||||
|
||||
// Collect results
|
||||
while let Some(join_result) = join_set.join_next().await {
|
||||
match join_result {
|
||||
Ok(Ok((index, task_result))) => {
|
||||
if index < results.len() {
|
||||
results[index] = task_result;
|
||||
}
|
||||
}
|
||||
Ok(Err(e)) => {
|
||||
// Semaphore or other system error - this is rare
|
||||
tracing::warn!("Batch processor system error: {:?}", e);
|
||||
}
|
||||
Err(join_error) => {
|
||||
// Task panicked - log but continue
|
||||
tracing::warn!("Task panicked in batch processor: {:?}", join_error);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
results
|
||||
}
|
||||
|
||||
/// Execute batch with early termination when sufficient successful results are obtained
|
||||
pub async fn execute_batch_with_quorum<T, F>(&self, tasks: Vec<F>, required_successes: usize) -> Result<Vec<T>>
|
||||
where
|
||||
T: Send + 'static,
|
||||
F: Future<Output = Result<T>> + Send + 'static,
|
||||
{
|
||||
let results = self.execute_batch(tasks).await;
|
||||
let mut successes = Vec::new();
|
||||
|
||||
for value in results.into_iter().flatten() {
|
||||
successes.push(value);
|
||||
if successes.len() >= required_successes {
|
||||
return Ok(successes);
|
||||
}
|
||||
}
|
||||
|
||||
if successes.len() >= required_successes {
|
||||
Ok(successes)
|
||||
} else {
|
||||
Err(Error::other(format!(
|
||||
"Insufficient successful results: got {}, needed {}",
|
||||
successes.len(),
|
||||
required_successes
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Global batch processor instances
|
||||
pub struct GlobalBatchProcessors {
|
||||
read_processor: AsyncBatchProcessor,
|
||||
write_processor: AsyncBatchProcessor,
|
||||
metadata_processor: AsyncBatchProcessor,
|
||||
}
|
||||
|
||||
impl GlobalBatchProcessors {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
read_processor: AsyncBatchProcessor::new(16), // Higher concurrency for reads
|
||||
write_processor: AsyncBatchProcessor::new(8), // Lower concurrency for writes
|
||||
metadata_processor: AsyncBatchProcessor::new(12), // Medium concurrency for metadata
|
||||
}
|
||||
}
|
||||
|
||||
pub fn read_processor(&self) -> &AsyncBatchProcessor {
|
||||
&self.read_processor
|
||||
}
|
||||
|
||||
pub fn write_processor(&self) -> &AsyncBatchProcessor {
|
||||
&self.write_processor
|
||||
}
|
||||
|
||||
pub fn metadata_processor(&self) -> &AsyncBatchProcessor {
|
||||
&self.metadata_processor
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for GlobalBatchProcessors {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
// Global instance
|
||||
use std::sync::OnceLock;
|
||||
|
||||
static GLOBAL_PROCESSORS: OnceLock<GlobalBatchProcessors> = OnceLock::new();
|
||||
|
||||
pub fn get_global_processors() -> &'static GlobalBatchProcessors {
|
||||
GLOBAL_PROCESSORS.get_or_init(GlobalBatchProcessors::new)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::time::Duration;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_batch_processor_basic() {
|
||||
let processor = AsyncBatchProcessor::new(4);
|
||||
|
||||
let tasks: Vec<_> = (0..10)
|
||||
.map(|i| async move {
|
||||
tokio::time::sleep(Duration::from_millis(10)).await;
|
||||
Ok::<i32, Error>(i)
|
||||
})
|
||||
.collect();
|
||||
|
||||
let results = processor.execute_batch(tasks).await;
|
||||
assert_eq!(results.len(), 10);
|
||||
|
||||
// All tasks should succeed
|
||||
for (i, result) in results.iter().enumerate() {
|
||||
assert!(result.is_ok());
|
||||
assert_eq!(result.as_ref().unwrap(), &(i as i32));
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_batch_processor_with_errors() {
|
||||
let processor = AsyncBatchProcessor::new(2);
|
||||
|
||||
let tasks: Vec<_> = (0..5)
|
||||
.map(|i| async move {
|
||||
tokio::time::sleep(Duration::from_millis(10)).await;
|
||||
if i % 2 == 0 {
|
||||
Ok::<i32, Error>(i)
|
||||
} else {
|
||||
Err(Error::other("Test error"))
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
let results = processor.execute_batch(tasks).await;
|
||||
assert_eq!(results.len(), 5);
|
||||
|
||||
// Check results pattern
|
||||
for (i, result) in results.iter().enumerate() {
|
||||
if i % 2 == 0 {
|
||||
assert!(result.is_ok());
|
||||
assert_eq!(result.as_ref().unwrap(), &(i as i32));
|
||||
} else {
|
||||
assert!(result.is_err());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_batch_processor_quorum() {
|
||||
let processor = AsyncBatchProcessor::new(4);
|
||||
|
||||
let tasks: Vec<_> = (0..10)
|
||||
.map(|i| async move {
|
||||
tokio::time::sleep(Duration::from_millis(10)).await;
|
||||
if i < 3 {
|
||||
Ok::<i32, Error>(i)
|
||||
} else {
|
||||
Err(Error::other("Test error"))
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
let results = processor.execute_batch_with_quorum(tasks, 2).await;
|
||||
assert!(results.is_ok());
|
||||
let successes = results.unwrap();
|
||||
assert!(successes.len() >= 2);
|
||||
}
|
||||
}
|
||||
@@ -321,7 +321,7 @@ impl ExpiryState {
|
||||
let mut state = GLOBAL_ExpiryState.write().await;
|
||||
|
||||
while state.tasks_tx.len() < n {
|
||||
let (tx, rx) = mpsc::channel(10000);
|
||||
let (tx, rx) = mpsc::channel(1000);
|
||||
let api = api.clone();
|
||||
let rx = Arc::new(tokio::sync::Mutex::new(rx));
|
||||
state.tasks_tx.push(tx);
|
||||
@@ -432,7 +432,7 @@ pub struct TransitionState {
|
||||
impl TransitionState {
|
||||
#[allow(clippy::new_ret_no_self)]
|
||||
pub fn new() -> Arc<Self> {
|
||||
let (tx1, rx1) = bounded(100000);
|
||||
let (tx1, rx1) = bounded(1000);
|
||||
let (tx2, rx2) = bounded(1);
|
||||
Arc::new(Self {
|
||||
transition_tx: tx1,
|
||||
@@ -467,8 +467,12 @@ impl TransitionState {
|
||||
}
|
||||
|
||||
pub async fn init(api: Arc<ECStore>) {
|
||||
let mut n = 10; //globalAPIConfig.getTransitionWorkers();
|
||||
let tw = 10; //globalILMConfig.getTransitionWorkers();
|
||||
let max_workers = std::env::var("RUSTFS_MAX_TRANSITION_WORKERS")
|
||||
.ok()
|
||||
.and_then(|s| s.parse::<i64>().ok())
|
||||
.unwrap_or_else(|| std::cmp::min(num_cpus::get() as i64, 16));
|
||||
let mut n = max_workers;
|
||||
let tw = 8; //globalILMConfig.getTransitionWorkers();
|
||||
if tw > 0 {
|
||||
n = tw;
|
||||
}
|
||||
@@ -561,8 +565,18 @@ impl TransitionState {
|
||||
pub async fn update_workers_inner(api: Arc<ECStore>, n: i64) {
|
||||
let mut n = n;
|
||||
if n == 0 {
|
||||
n = 100;
|
||||
let max_workers = std::env::var("RUSTFS_MAX_TRANSITION_WORKERS")
|
||||
.ok()
|
||||
.and_then(|s| s.parse::<i64>().ok())
|
||||
.unwrap_or_else(|| std::cmp::min(num_cpus::get() as i64, 16));
|
||||
n = max_workers;
|
||||
}
|
||||
// Allow environment override of maximum workers
|
||||
let absolute_max = std::env::var("RUSTFS_ABSOLUTE_MAX_WORKERS")
|
||||
.ok()
|
||||
.and_then(|s| s.parse::<i64>().ok())
|
||||
.unwrap_or(32);
|
||||
n = std::cmp::min(n, absolute_max);
|
||||
|
||||
let mut num_workers = GLOBAL_TransitionState.num_workers.load(Ordering::SeqCst);
|
||||
while num_workers < n {
|
||||
@@ -585,7 +599,10 @@ impl TransitionState {
|
||||
}
|
||||
|
||||
pub async fn init_background_expiry(api: Arc<ECStore>) {
|
||||
let mut workers = num_cpus::get() / 2;
|
||||
let mut workers = std::env::var("RUSTFS_MAX_EXPIRY_WORKERS")
|
||||
.ok()
|
||||
.and_then(|s| s.parse::<usize>().ok())
|
||||
.unwrap_or_else(|| std::cmp::min(num_cpus::get(), 16));
|
||||
//globalILMConfig.getExpirationWorkers()
|
||||
if let Ok(env_expiration_workers) = env::var("_RUSTFS_ILM_EXPIRATION_WORKERS") {
|
||||
if let Ok(num_expirations) = env_expiration_workers.parse::<usize>() {
|
||||
@@ -594,7 +611,10 @@ pub async fn init_background_expiry(api: Arc<ECStore>) {
|
||||
}
|
||||
|
||||
if workers == 0 {
|
||||
workers = 100;
|
||||
workers = std::env::var("RUSTFS_DEFAULT_EXPIRY_WORKERS")
|
||||
.ok()
|
||||
.and_then(|s| s.parse::<usize>().ok())
|
||||
.unwrap_or(8);
|
||||
}
|
||||
|
||||
//let expiry_state = GLOBAL_ExpiryStSate.write().await;
|
||||
|
||||
@@ -12,13 +12,45 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::{fs::Metadata, path::Path};
|
||||
use std::{
|
||||
fs::Metadata,
|
||||
path::Path,
|
||||
sync::{Arc, OnceLock},
|
||||
};
|
||||
|
||||
use tokio::{
|
||||
fs::{self, File},
|
||||
io,
|
||||
};
|
||||
|
||||
static READONLY_OPTIONS: OnceLock<Arc<fs::OpenOptions>> = OnceLock::new();
|
||||
static WRITEONLY_OPTIONS: OnceLock<Arc<fs::OpenOptions>> = OnceLock::new();
|
||||
static READWRITE_OPTIONS: OnceLock<Arc<fs::OpenOptions>> = OnceLock::new();
|
||||
|
||||
fn get_readonly_options() -> &'static Arc<fs::OpenOptions> {
|
||||
READONLY_OPTIONS.get_or_init(|| {
|
||||
let mut opts = fs::OpenOptions::new();
|
||||
opts.read(true);
|
||||
Arc::new(opts)
|
||||
})
|
||||
}
|
||||
|
||||
fn get_writeonly_options() -> &'static Arc<fs::OpenOptions> {
|
||||
WRITEONLY_OPTIONS.get_or_init(|| {
|
||||
let mut opts = fs::OpenOptions::new();
|
||||
opts.write(true);
|
||||
Arc::new(opts)
|
||||
})
|
||||
}
|
||||
|
||||
fn get_readwrite_options() -> &'static Arc<fs::OpenOptions> {
|
||||
READWRITE_OPTIONS.get_or_init(|| {
|
||||
let mut opts = fs::OpenOptions::new();
|
||||
opts.read(true).write(true);
|
||||
Arc::new(opts)
|
||||
})
|
||||
}
|
||||
|
||||
#[cfg(not(windows))]
|
||||
pub fn same_file(f1: &Metadata, f2: &Metadata) -> bool {
|
||||
use std::os::unix::fs::MetadataExt;
|
||||
@@ -84,35 +116,28 @@ pub const O_APPEND: FileMode = 0x00400;
|
||||
// create_new: bool,
|
||||
|
||||
pub async fn open_file(path: impl AsRef<Path>, mode: FileMode) -> io::Result<File> {
|
||||
let mut opts = fs::OpenOptions::new();
|
||||
|
||||
match mode & (O_RDONLY | O_WRONLY | O_RDWR) {
|
||||
O_RDONLY => {
|
||||
opts.read(true);
|
||||
}
|
||||
O_WRONLY => {
|
||||
opts.write(true);
|
||||
}
|
||||
O_RDWR => {
|
||||
opts.read(true);
|
||||
opts.write(true);
|
||||
}
|
||||
_ => (),
|
||||
let base_opts = match mode & (O_RDONLY | O_WRONLY | O_RDWR) {
|
||||
O_RDONLY => get_readonly_options(),
|
||||
O_WRONLY => get_writeonly_options(),
|
||||
O_RDWR => get_readwrite_options(),
|
||||
_ => get_readonly_options(),
|
||||
};
|
||||
|
||||
if mode & O_CREATE != 0 {
|
||||
opts.create(true);
|
||||
if (mode & (O_CREATE | O_APPEND | O_TRUNC)) != 0 {
|
||||
let mut opts = (**base_opts).clone();
|
||||
if mode & O_CREATE != 0 {
|
||||
opts.create(true);
|
||||
}
|
||||
if mode & O_APPEND != 0 {
|
||||
opts.append(true);
|
||||
}
|
||||
if mode & O_TRUNC != 0 {
|
||||
opts.truncate(true);
|
||||
}
|
||||
opts.open(path.as_ref()).await
|
||||
} else {
|
||||
base_opts.open(path.as_ref()).await
|
||||
}
|
||||
|
||||
if mode & O_APPEND != 0 {
|
||||
opts.append(true);
|
||||
}
|
||||
|
||||
if mode & O_TRUNC != 0 {
|
||||
opts.truncate(true);
|
||||
}
|
||||
|
||||
opts.open(path.as_ref()).await
|
||||
}
|
||||
|
||||
pub async fn access(path: impl AsRef<Path>) -> io::Result<()> {
|
||||
@@ -121,7 +146,7 @@ pub async fn access(path: impl AsRef<Path>) -> io::Result<()> {
|
||||
}
|
||||
|
||||
pub fn access_std(path: impl AsRef<Path>) -> io::Result<()> {
|
||||
tokio::task::block_in_place(|| std::fs::metadata(path))?;
|
||||
std::fs::metadata(path)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -130,7 +155,7 @@ pub async fn lstat(path: impl AsRef<Path>) -> io::Result<Metadata> {
|
||||
}
|
||||
|
||||
pub fn lstat_std(path: impl AsRef<Path>) -> io::Result<Metadata> {
|
||||
tokio::task::block_in_place(|| std::fs::metadata(path))
|
||||
std::fs::metadata(path)
|
||||
}
|
||||
|
||||
pub async fn make_dir_all(path: impl AsRef<Path>) -> io::Result<()> {
|
||||
@@ -159,26 +184,22 @@ pub async fn remove_all(path: impl AsRef<Path>) -> io::Result<()> {
|
||||
#[tracing::instrument(level = "debug", skip_all)]
|
||||
pub fn remove_std(path: impl AsRef<Path>) -> io::Result<()> {
|
||||
let path = path.as_ref();
|
||||
tokio::task::block_in_place(|| {
|
||||
let meta = std::fs::metadata(path)?;
|
||||
if meta.is_dir() {
|
||||
std::fs::remove_dir(path)
|
||||
} else {
|
||||
std::fs::remove_file(path)
|
||||
}
|
||||
})
|
||||
let meta = std::fs::metadata(path)?;
|
||||
if meta.is_dir() {
|
||||
std::fs::remove_dir(path)
|
||||
} else {
|
||||
std::fs::remove_file(path)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn remove_all_std(path: impl AsRef<Path>) -> io::Result<()> {
|
||||
let path = path.as_ref();
|
||||
tokio::task::block_in_place(|| {
|
||||
let meta = std::fs::metadata(path)?;
|
||||
if meta.is_dir() {
|
||||
std::fs::remove_dir_all(path)
|
||||
} else {
|
||||
std::fs::remove_file(path)
|
||||
}
|
||||
})
|
||||
let meta = std::fs::metadata(path)?;
|
||||
if meta.is_dir() {
|
||||
std::fs::remove_dir_all(path)
|
||||
} else {
|
||||
std::fs::remove_file(path)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn mkdir(path: impl AsRef<Path>) -> io::Result<()> {
|
||||
@@ -190,7 +211,7 @@ pub async fn rename(from: impl AsRef<Path>, to: impl AsRef<Path>) -> io::Result<
|
||||
}
|
||||
|
||||
pub fn rename_std(from: impl AsRef<Path>, to: impl AsRef<Path>) -> io::Result<()> {
|
||||
tokio::task::block_in_place(|| std::fs::rename(from, to))
|
||||
std::fs::rename(from, to)
|
||||
}
|
||||
|
||||
#[tracing::instrument(level = "debug", skip_all)]
|
||||
|
||||
@@ -41,18 +41,21 @@ use tokio::time::interval;
|
||||
|
||||
use crate::erasure_coding::bitrot_verify;
|
||||
use bytes::Bytes;
|
||||
use path_absolutize::Absolutize;
|
||||
// use path_absolutize::Absolutize; // Replaced with direct path operations for better performance
|
||||
use crate::file_cache::{get_global_file_cache, prefetch_metadata_patterns, read_metadata_cached};
|
||||
use parking_lot::RwLock as ParkingLotRwLock;
|
||||
use rustfs_filemeta::{
|
||||
Cache, FileInfo, FileInfoOpts, FileMeta, MetaCacheEntry, MetacacheWriter, ObjectPartInfo, Opts, RawFileInfo, UpdateFn,
|
||||
get_file_info, read_xl_meta_no_data,
|
||||
};
|
||||
use rustfs_utils::HashAlgorithm;
|
||||
use rustfs_utils::os::get_info;
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
use std::fmt::Debug;
|
||||
use std::io::SeekFrom;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicU32, Ordering};
|
||||
use std::sync::{Arc, OnceLock};
|
||||
use std::time::Duration;
|
||||
use std::{
|
||||
fs::Metadata,
|
||||
@@ -101,6 +104,9 @@ pub struct LocalDisk {
|
||||
pub major: u64,
|
||||
pub minor: u64,
|
||||
pub nrrequests: u64,
|
||||
// Performance optimization fields
|
||||
path_cache: Arc<ParkingLotRwLock<HashMap<String, PathBuf>>>,
|
||||
current_dir: Arc<OnceLock<PathBuf>>,
|
||||
// pub id: Mutex<Option<Uuid>>,
|
||||
// pub format_data: Mutex<Vec<u8>>,
|
||||
// pub format_file_info: Mutex<Option<Metadata>>,
|
||||
@@ -130,8 +136,9 @@ impl Debug for LocalDisk {
|
||||
impl LocalDisk {
|
||||
pub async fn new(ep: &Endpoint, cleanup: bool) -> Result<Self> {
|
||||
debug!("Creating local disk");
|
||||
let root = match PathBuf::from(ep.get_file_path()).absolutize() {
|
||||
Ok(path) => path.into_owned(),
|
||||
// Use optimized path resolution instead of absolutize() for better performance
|
||||
let root = match std::fs::canonicalize(ep.get_file_path()) {
|
||||
Ok(path) => path,
|
||||
Err(e) => {
|
||||
if e.kind() == ErrorKind::NotFound {
|
||||
return Err(DiskError::VolumeNotFound);
|
||||
@@ -144,10 +151,8 @@ impl LocalDisk {
|
||||
// TODO: 删除 tmp 数据
|
||||
}
|
||||
|
||||
let format_path = Path::new(RUSTFS_META_BUCKET)
|
||||
.join(Path::new(super::FORMAT_CONFIG_FILE))
|
||||
.absolutize_virtually(&root)?
|
||||
.into_owned();
|
||||
// Use optimized path resolution instead of absolutize_virtually
|
||||
let format_path = root.join(RUSTFS_META_BUCKET).join(super::FORMAT_CONFIG_FILE);
|
||||
debug!("format_path: {:?}", format_path);
|
||||
let (format_data, format_meta) = read_file_exists(&format_path).await?;
|
||||
|
||||
@@ -227,6 +232,8 @@ impl LocalDisk {
|
||||
// format_file_info: Mutex::new(format_meta),
|
||||
// format_data: Mutex::new(format_data),
|
||||
// format_last_check: Mutex::new(format_last_check),
|
||||
path_cache: Arc::new(ParkingLotRwLock::new(HashMap::with_capacity(2048))),
|
||||
current_dir: Arc::new(OnceLock::new()),
|
||||
exit_signal: None,
|
||||
};
|
||||
let (info, _root) = get_disk_info(root).await?;
|
||||
@@ -351,19 +358,178 @@ impl LocalDisk {
|
||||
self.make_volumes(defaults).await
|
||||
}
|
||||
|
||||
// Optimized path resolution with caching
|
||||
pub fn resolve_abs_path(&self, path: impl AsRef<Path>) -> Result<PathBuf> {
|
||||
Ok(path.as_ref().absolutize_virtually(&self.root)?.into_owned())
|
||||
let path_ref = path.as_ref();
|
||||
let path_str = path_ref.to_string_lossy();
|
||||
|
||||
// Fast cache read
|
||||
{
|
||||
let cache = self.path_cache.read();
|
||||
if let Some(cached_path) = cache.get(path_str.as_ref()) {
|
||||
return Ok(cached_path.clone());
|
||||
}
|
||||
}
|
||||
|
||||
// Calculate absolute path without using path_absolutize for better performance
|
||||
let abs_path = if path_ref.is_absolute() {
|
||||
path_ref.to_path_buf()
|
||||
} else {
|
||||
self.root.join(path_ref)
|
||||
};
|
||||
|
||||
// Normalize path components to avoid filesystem calls
|
||||
let normalized = self.normalize_path_components(&abs_path);
|
||||
|
||||
// Cache the result
|
||||
{
|
||||
let mut cache = self.path_cache.write();
|
||||
|
||||
// Simple cache size control
|
||||
if cache.len() >= 4096 {
|
||||
// Clear half the cache - simple eviction strategy
|
||||
let keys_to_remove: Vec<_> = cache.keys().take(cache.len() / 2).cloned().collect();
|
||||
for key in keys_to_remove {
|
||||
cache.remove(&key);
|
||||
}
|
||||
}
|
||||
|
||||
cache.insert(path_str.into_owned(), normalized.clone());
|
||||
}
|
||||
|
||||
Ok(normalized)
|
||||
}
|
||||
|
||||
// Lightweight path normalization without filesystem calls
|
||||
fn normalize_path_components(&self, path: &Path) -> PathBuf {
|
||||
let mut result = PathBuf::new();
|
||||
|
||||
for component in path.components() {
|
||||
match component {
|
||||
std::path::Component::Normal(name) => {
|
||||
result.push(name);
|
||||
}
|
||||
std::path::Component::ParentDir => {
|
||||
result.pop();
|
||||
}
|
||||
std::path::Component::CurDir => {
|
||||
// Ignore current directory components
|
||||
}
|
||||
std::path::Component::RootDir => {
|
||||
result.push(component);
|
||||
}
|
||||
std::path::Component::Prefix(_prefix) => {
|
||||
result.push(component);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
// Highly optimized object path generation
|
||||
pub fn get_object_path(&self, bucket: &str, key: &str) -> Result<PathBuf> {
|
||||
let dir = Path::new(&bucket);
|
||||
let file_path = Path::new(&key);
|
||||
self.resolve_abs_path(dir.join(file_path))
|
||||
// For high-frequency paths, use faster string concatenation
|
||||
let cache_key = if key.is_empty() {
|
||||
bucket.to_string()
|
||||
} else {
|
||||
// Use with_capacity to pre-allocate, reducing memory reallocations
|
||||
let mut path_str = String::with_capacity(bucket.len() + key.len() + 1);
|
||||
path_str.push_str(bucket);
|
||||
path_str.push('/');
|
||||
path_str.push_str(key);
|
||||
path_str
|
||||
};
|
||||
|
||||
// Fast path: directly calculate based on root, avoiding cache lookup overhead for simple cases
|
||||
Ok(self.root.join(&cache_key))
|
||||
}
|
||||
|
||||
pub fn get_bucket_path(&self, bucket: &str) -> Result<PathBuf> {
|
||||
let dir = Path::new(&bucket);
|
||||
self.resolve_abs_path(dir)
|
||||
Ok(self.root.join(bucket))
|
||||
}
|
||||
|
||||
// Batch path generation with single lock acquisition
|
||||
pub fn get_object_paths_batch(&self, requests: &[(String, String)]) -> Result<Vec<PathBuf>> {
|
||||
let mut results = Vec::with_capacity(requests.len());
|
||||
let mut cache_misses = Vec::new();
|
||||
|
||||
// First attempt to get all paths from cache
|
||||
{
|
||||
let cache = self.path_cache.read();
|
||||
for (i, (bucket, key)) in requests.iter().enumerate() {
|
||||
let cache_key = format!("{bucket}/{key}");
|
||||
if let Some(cached_path) = cache.get(&cache_key) {
|
||||
results.push((i, cached_path.clone()));
|
||||
} else {
|
||||
cache_misses.push((i, bucket, key, cache_key));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Handle cache misses
|
||||
if !cache_misses.is_empty() {
|
||||
let mut new_entries = Vec::new();
|
||||
for (i, _bucket, _key, cache_key) in cache_misses {
|
||||
let path = self.root.join(&cache_key);
|
||||
results.push((i, path.clone()));
|
||||
new_entries.push((cache_key, path));
|
||||
}
|
||||
|
||||
// Batch update cache
|
||||
{
|
||||
let mut cache = self.path_cache.write();
|
||||
for (key, path) in new_entries {
|
||||
cache.insert(key, path);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sort results back to original order
|
||||
results.sort_by_key(|(i, _)| *i);
|
||||
Ok(results.into_iter().map(|(_, path)| path).collect())
|
||||
}
|
||||
|
||||
// Optimized metadata reading with caching
|
||||
pub async fn read_metadata_cached(&self, path: PathBuf) -> Result<Arc<FileMeta>> {
|
||||
read_metadata_cached(path).await
|
||||
}
|
||||
|
||||
// Smart prefetching for related files
|
||||
pub async fn read_version_with_prefetch(
|
||||
&self,
|
||||
volume: &str,
|
||||
path: &str,
|
||||
version_id: &str,
|
||||
opts: &ReadOptions,
|
||||
) -> Result<FileInfo> {
|
||||
let file_path = self.get_object_path(volume, path)?;
|
||||
|
||||
// Async prefetch related files, don't block current read
|
||||
if let Some(parent) = file_path.parent() {
|
||||
prefetch_metadata_patterns(parent, &[super::STORAGE_FORMAT_FILE, "part.1", "part.2", "part.meta"]).await;
|
||||
}
|
||||
|
||||
// Main read logic
|
||||
let file_dir = self.get_bucket_path(volume)?;
|
||||
let (data, _) = self.read_raw(volume, file_dir, file_path, opts.read_data).await?;
|
||||
|
||||
get_file_info(&data, volume, path, version_id, FileInfoOpts { data: opts.read_data })
|
||||
.await
|
||||
.map_err(|_e| DiskError::Unexpected)
|
||||
}
|
||||
|
||||
// Batch metadata reading for multiple objects
|
||||
pub async fn read_metadata_batch(&self, requests: Vec<(String, String)>) -> Result<Vec<Option<Arc<FileMeta>>>> {
|
||||
let paths: Vec<PathBuf> = requests
|
||||
.iter()
|
||||
.map(|(bucket, key)| self.get_object_path(bucket, &format!("{}/{}", key, super::STORAGE_FORMAT_FILE)))
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
let cache = get_global_file_cache();
|
||||
let results = cache.get_metadata_batch(paths).await;
|
||||
|
||||
Ok(results.into_iter().map(|r| r.ok()).collect())
|
||||
}
|
||||
|
||||
// /// Write to the filesystem atomically.
|
||||
@@ -549,7 +715,15 @@ impl LocalDisk {
|
||||
}
|
||||
|
||||
async fn read_metadata(&self, file_path: impl AsRef<Path>) -> Result<Vec<u8>> {
|
||||
// TODO: support timeout
|
||||
// Try to use cached file content reading for better performance, with safe fallback
|
||||
let path = file_path.as_ref().to_path_buf();
|
||||
|
||||
// First, try the cache
|
||||
if let Ok(bytes) = get_global_file_cache().get_file_content(path.clone()).await {
|
||||
return Ok(bytes.to_vec());
|
||||
}
|
||||
|
||||
// Fallback to direct read if cache fails
|
||||
let (data, _) = self.read_metadata_with_dmtime(file_path.as_ref()).await?;
|
||||
Ok(data)
|
||||
}
|
||||
|
||||
@@ -668,7 +668,7 @@ pub struct VolumeInfo {
|
||||
pub created: Option<OffsetDateTime>,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Serialize, Debug, Default)]
|
||||
#[derive(Deserialize, Serialize, Debug, Default, Clone)]
|
||||
pub struct ReadOptions {
|
||||
pub incl_free_versions: bool,
|
||||
pub read_data: bool,
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use rustfs_utils::{XHost, check_local_server_addr, get_host_ip, get_host_ip_async, is_local_host};
|
||||
use rustfs_utils::{XHost, check_local_server_addr, get_host_ip, is_local_host};
|
||||
use tracing::{error, instrument, warn};
|
||||
|
||||
use crate::{
|
||||
@@ -169,7 +169,7 @@ impl AsMut<Vec<Endpoints>> for PoolEndpointList {
|
||||
impl PoolEndpointList {
|
||||
/// creates a list of endpoints per pool, resolves their relevant
|
||||
/// hostnames and discovers those are local or remote.
|
||||
fn create_pool_endpoints(server_addr: &str, disks_layout: &DisksLayout) -> Result<Self> {
|
||||
async fn create_pool_endpoints(server_addr: &str, disks_layout: &DisksLayout) -> Result<Self> {
|
||||
if disks_layout.is_empty_layout() {
|
||||
return Err(Error::other("invalid number of endpoints"));
|
||||
}
|
||||
@@ -244,12 +244,11 @@ impl PoolEndpointList {
|
||||
let host_ip_set = if let Some(set) = host_ip_cache.get(&host) {
|
||||
set
|
||||
} else {
|
||||
let ips = match get_host_ip(host.clone()) {
|
||||
let ips = match get_host_ip(host.clone()).await {
|
||||
Ok(ips) => ips,
|
||||
Err(e) => {
|
||||
error!("host {} not found, error:{}", host, e);
|
||||
get_host_ip_async(host.clone())
|
||||
.map_err(|e| Error::other(format!("host '{host}' cannot resolve: {e}")))?
|
||||
return Err(Error::other(format!("host '{host}' cannot resolve: {e}")));
|
||||
}
|
||||
};
|
||||
host_ip_cache.insert(host.clone(), ips);
|
||||
@@ -467,19 +466,22 @@ impl EndpointServerPools {
|
||||
}
|
||||
None
|
||||
}
|
||||
pub fn from_volumes(server_addr: &str, endpoints: Vec<String>) -> Result<(EndpointServerPools, SetupType)> {
|
||||
pub async fn from_volumes(server_addr: &str, endpoints: Vec<String>) -> Result<(EndpointServerPools, SetupType)> {
|
||||
let layouts = DisksLayout::from_volumes(endpoints.as_slice())?;
|
||||
|
||||
Self::create_server_endpoints(server_addr, &layouts)
|
||||
Self::create_server_endpoints(server_addr, &layouts).await
|
||||
}
|
||||
/// validates and creates new endpoints from input args, supports
|
||||
/// both ellipses and without ellipses transparently.
|
||||
pub fn create_server_endpoints(server_addr: &str, disks_layout: &DisksLayout) -> Result<(EndpointServerPools, SetupType)> {
|
||||
pub async fn create_server_endpoints(
|
||||
server_addr: &str,
|
||||
disks_layout: &DisksLayout,
|
||||
) -> Result<(EndpointServerPools, SetupType)> {
|
||||
if disks_layout.pools.is_empty() {
|
||||
return Err(Error::other("Invalid arguments specified"));
|
||||
}
|
||||
|
||||
let pool_eps = PoolEndpointList::create_pool_endpoints(server_addr, disks_layout)?;
|
||||
let pool_eps = PoolEndpointList::create_pool_endpoints(server_addr, disks_layout).await?;
|
||||
|
||||
let mut ret: EndpointServerPools = Vec::with_capacity(pool_eps.as_ref().len()).into();
|
||||
for (i, eps) in pool_eps.inner.into_iter().enumerate() {
|
||||
@@ -754,8 +756,8 @@ mod test {
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_create_pool_endpoints() {
|
||||
#[tokio::test]
|
||||
async fn test_create_pool_endpoints() {
|
||||
#[derive(Default)]
|
||||
struct TestCase<'a> {
|
||||
num: usize,
|
||||
@@ -1277,7 +1279,7 @@ mod test {
|
||||
|
||||
match (
|
||||
test_case.expected_err,
|
||||
PoolEndpointList::create_pool_endpoints(test_case.server_addr, &disks_layout),
|
||||
PoolEndpointList::create_pool_endpoints(test_case.server_addr, &disks_layout).await,
|
||||
) {
|
||||
(None, Err(err)) => panic!("Test {}: error: expected = <nil>, got = {}", test_case.num, err),
|
||||
(Some(err), Ok(_)) => panic!("Test {}: error: expected = {}, got = <nil>", test_case.num, err),
|
||||
@@ -1344,8 +1346,8 @@ mod test {
|
||||
(urls, local_flags)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_create_server_endpoints() {
|
||||
#[tokio::test]
|
||||
async fn test_create_server_endpoints() {
|
||||
let test_cases = [
|
||||
// Invalid input.
|
||||
("", vec![], false),
|
||||
@@ -1380,7 +1382,7 @@ mod test {
|
||||
}
|
||||
};
|
||||
|
||||
let ret = EndpointServerPools::create_server_endpoints(test_case.0, &disks_layout);
|
||||
let ret = EndpointServerPools::create_server_endpoints(test_case.0, &disks_layout).await;
|
||||
|
||||
if let Err(err) = ret {
|
||||
if test_case.2 {
|
||||
|
||||
332
crates/ecstore/src/file_cache.rs
Normal file
332
crates/ecstore/src/file_cache.rs
Normal file
@@ -0,0 +1,332 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! High-performance file content and metadata caching using moka
|
||||
//!
|
||||
//! This module provides optimized caching for file operations to reduce
|
||||
//! redundant I/O and improve overall system performance.
|
||||
|
||||
use super::disk::error::{Error, Result};
|
||||
use bytes::Bytes;
|
||||
use moka::future::Cache;
|
||||
use rustfs_filemeta::FileMeta;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
|
||||
pub struct OptimizedFileCache {
|
||||
// Use moka as high-performance async cache
|
||||
metadata_cache: Cache<PathBuf, Arc<FileMeta>>,
|
||||
file_content_cache: Cache<PathBuf, Bytes>,
|
||||
// Performance monitoring
|
||||
cache_hits: std::sync::atomic::AtomicU64,
|
||||
cache_misses: std::sync::atomic::AtomicU64,
|
||||
}
|
||||
|
||||
impl OptimizedFileCache {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
metadata_cache: Cache::builder()
|
||||
.max_capacity(2048)
|
||||
.time_to_live(Duration::from_secs(300)) // 5 minutes TTL
|
||||
.time_to_idle(Duration::from_secs(60)) // 1 minute idle
|
||||
.build(),
|
||||
|
||||
file_content_cache: Cache::builder()
|
||||
.max_capacity(512) // Smaller file content cache
|
||||
.time_to_live(Duration::from_secs(120))
|
||||
.weigher(|_key: &PathBuf, value: &Bytes| value.len() as u32)
|
||||
.build(),
|
||||
|
||||
cache_hits: std::sync::atomic::AtomicU64::new(0),
|
||||
cache_misses: std::sync::atomic::AtomicU64::new(0),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_metadata(&self, path: PathBuf) -> Result<Arc<FileMeta>> {
|
||||
if let Some(cached) = self.metadata_cache.get(&path).await {
|
||||
self.cache_hits.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
return Ok(cached);
|
||||
}
|
||||
|
||||
self.cache_misses.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
|
||||
// Cache miss, read file
|
||||
let data = tokio::fs::read(&path)
|
||||
.await
|
||||
.map_err(|e| Error::other(format!("Read metadata failed: {e}")))?;
|
||||
|
||||
let mut meta = FileMeta::default();
|
||||
meta.unmarshal_msg(&data)?;
|
||||
|
||||
let arc_meta = Arc::new(meta);
|
||||
self.metadata_cache.insert(path, arc_meta.clone()).await;
|
||||
|
||||
Ok(arc_meta)
|
||||
}
|
||||
|
||||
pub async fn get_file_content(&self, path: PathBuf) -> Result<Bytes> {
|
||||
if let Some(cached) = self.file_content_cache.get(&path).await {
|
||||
self.cache_hits.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
return Ok(cached);
|
||||
}
|
||||
|
||||
self.cache_misses.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
|
||||
let data = tokio::fs::read(&path)
|
||||
.await
|
||||
.map_err(|e| Error::other(format!("Read file failed: {e}")))?;
|
||||
|
||||
let bytes = Bytes::from(data);
|
||||
self.file_content_cache.insert(path, bytes.clone()).await;
|
||||
|
||||
Ok(bytes)
|
||||
}
|
||||
|
||||
// Prefetch related files
|
||||
pub async fn prefetch_related(&self, base_path: &Path, patterns: &[&str]) {
|
||||
let mut prefetch_tasks = Vec::new();
|
||||
|
||||
for pattern in patterns {
|
||||
let path = base_path.join(pattern);
|
||||
if tokio::fs::metadata(&path).await.is_ok() {
|
||||
let cache = self.clone();
|
||||
let path_clone = path.clone();
|
||||
prefetch_tasks.push(async move {
|
||||
let _ = cache.get_metadata(path_clone).await;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Parallel prefetch, don't wait for completion
|
||||
if !prefetch_tasks.is_empty() {
|
||||
tokio::spawn(async move {
|
||||
futures::future::join_all(prefetch_tasks).await;
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
// Batch metadata reading with deduplication
|
||||
pub async fn get_metadata_batch(
|
||||
&self,
|
||||
paths: Vec<PathBuf>,
|
||||
) -> Vec<std::result::Result<Arc<FileMeta>, rustfs_filemeta::Error>> {
|
||||
let mut results = Vec::with_capacity(paths.len());
|
||||
let mut cache_futures = Vec::new();
|
||||
|
||||
// First, attempt to get from cache
|
||||
for (i, path) in paths.iter().enumerate() {
|
||||
if let Some(cached) = self.metadata_cache.get(path).await {
|
||||
results.push((i, Ok(cached)));
|
||||
self.cache_hits.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
} else {
|
||||
cache_futures.push((i, path.clone()));
|
||||
}
|
||||
}
|
||||
|
||||
// For cache misses, read from filesystem
|
||||
if !cache_futures.is_empty() {
|
||||
let mut fs_results = Vec::new();
|
||||
|
||||
for (i, path) in cache_futures {
|
||||
self.cache_misses.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
|
||||
match tokio::fs::read(&path).await {
|
||||
Ok(data) => {
|
||||
let mut meta = FileMeta::default();
|
||||
match meta.unmarshal_msg(&data) {
|
||||
Ok(_) => {
|
||||
let arc_meta = Arc::new(meta);
|
||||
self.metadata_cache.insert(path, arc_meta.clone()).await;
|
||||
fs_results.push((i, Ok(arc_meta)));
|
||||
}
|
||||
Err(e) => {
|
||||
fs_results.push((i, Err(e)));
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(_e) => {
|
||||
fs_results.push((i, Err(rustfs_filemeta::Error::Unexpected)));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
results.extend(fs_results);
|
||||
}
|
||||
|
||||
// Sort results back to original order
|
||||
results.sort_by_key(|(i, _)| *i);
|
||||
results.into_iter().map(|(_, result)| result).collect()
|
||||
}
|
||||
|
||||
// Invalidate cache entries for a path
|
||||
pub async fn invalidate(&self, path: &Path) {
|
||||
self.metadata_cache.remove(path).await;
|
||||
self.file_content_cache.remove(path).await;
|
||||
}
|
||||
|
||||
// Get cache statistics
|
||||
pub fn get_stats(&self) -> FileCacheStats {
|
||||
let hits = self.cache_hits.load(std::sync::atomic::Ordering::Relaxed);
|
||||
let misses = self.cache_misses.load(std::sync::atomic::Ordering::Relaxed);
|
||||
let hit_rate = if hits + misses > 0 {
|
||||
(hits as f64 / (hits + misses) as f64) * 100.0
|
||||
} else {
|
||||
0.0
|
||||
};
|
||||
|
||||
FileCacheStats {
|
||||
metadata_cache_size: self.metadata_cache.entry_count(),
|
||||
content_cache_size: self.file_content_cache.entry_count(),
|
||||
cache_hits: hits,
|
||||
cache_misses: misses,
|
||||
hit_rate,
|
||||
total_weight: 0, // Simplified for compatibility
|
||||
}
|
||||
}
|
||||
|
||||
// Clear all caches
|
||||
pub async fn clear(&self) {
|
||||
self.metadata_cache.invalidate_all();
|
||||
self.file_content_cache.invalidate_all();
|
||||
|
||||
// Wait for invalidation to complete
|
||||
self.metadata_cache.run_pending_tasks().await;
|
||||
self.file_content_cache.run_pending_tasks().await;
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for OptimizedFileCache {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
metadata_cache: self.metadata_cache.clone(),
|
||||
file_content_cache: self.file_content_cache.clone(),
|
||||
cache_hits: std::sync::atomic::AtomicU64::new(self.cache_hits.load(std::sync::atomic::Ordering::Relaxed)),
|
||||
cache_misses: std::sync::atomic::AtomicU64::new(self.cache_misses.load(std::sync::atomic::Ordering::Relaxed)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct FileCacheStats {
|
||||
pub metadata_cache_size: u64,
|
||||
pub content_cache_size: u64,
|
||||
pub cache_hits: u64,
|
||||
pub cache_misses: u64,
|
||||
pub hit_rate: f64,
|
||||
pub total_weight: u64,
|
||||
}
|
||||
|
||||
impl Default for OptimizedFileCache {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
// Global cache instance
|
||||
use std::sync::OnceLock;
|
||||
|
||||
static GLOBAL_FILE_CACHE: OnceLock<OptimizedFileCache> = OnceLock::new();
|
||||
|
||||
pub fn get_global_file_cache() -> &'static OptimizedFileCache {
|
||||
GLOBAL_FILE_CACHE.get_or_init(OptimizedFileCache::new)
|
||||
}
|
||||
|
||||
// Utility functions for common operations
|
||||
pub async fn read_metadata_cached(path: PathBuf) -> Result<Arc<FileMeta>> {
|
||||
get_global_file_cache().get_metadata(path).await
|
||||
}
|
||||
|
||||
pub async fn read_file_content_cached(path: PathBuf) -> Result<Bytes> {
|
||||
get_global_file_cache().get_file_content(path).await
|
||||
}
|
||||
|
||||
pub async fn prefetch_metadata_patterns(base_path: &Path, patterns: &[&str]) {
|
||||
get_global_file_cache().prefetch_related(base_path, patterns).await;
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::io::Write;
|
||||
use tempfile::tempdir;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_file_cache_basic() {
|
||||
let cache = OptimizedFileCache::new();
|
||||
|
||||
// Create a temporary file
|
||||
let dir = tempdir().unwrap();
|
||||
let file_path = dir.path().join("test.txt");
|
||||
let mut file = std::fs::File::create(&file_path).unwrap();
|
||||
writeln!(file, "test content").unwrap();
|
||||
drop(file);
|
||||
|
||||
// First read should be cache miss
|
||||
let content1 = cache.get_file_content(file_path.clone()).await.unwrap();
|
||||
assert_eq!(content1, Bytes::from("test content\n"));
|
||||
|
||||
// Second read should be cache hit
|
||||
let content2 = cache.get_file_content(file_path.clone()).await.unwrap();
|
||||
assert_eq!(content2, content1);
|
||||
|
||||
let stats = cache.get_stats();
|
||||
assert!(stats.cache_hits > 0);
|
||||
assert!(stats.cache_misses > 0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_metadata_batch_read() {
|
||||
let cache = OptimizedFileCache::new();
|
||||
|
||||
// Create test files
|
||||
let dir = tempdir().unwrap();
|
||||
let mut paths = Vec::new();
|
||||
|
||||
for i in 0..5 {
|
||||
let file_path = dir.path().join(format!("test_{i}.txt"));
|
||||
let mut file = std::fs::File::create(&file_path).unwrap();
|
||||
writeln!(file, "content {i}").unwrap();
|
||||
paths.push(file_path);
|
||||
}
|
||||
|
||||
// Note: This test would need actual FileMeta files to work properly
|
||||
// For now, we just test that the function runs without errors
|
||||
let results = cache.get_metadata_batch(paths).await;
|
||||
assert_eq!(results.len(), 5);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_cache_invalidation() {
|
||||
let cache = OptimizedFileCache::new();
|
||||
|
||||
let dir = tempdir().unwrap();
|
||||
let file_path = dir.path().join("test.txt");
|
||||
let mut file = std::fs::File::create(&file_path).unwrap();
|
||||
writeln!(file, "test content").unwrap();
|
||||
drop(file);
|
||||
|
||||
// Read file to populate cache
|
||||
let _ = cache.get_file_content(file_path.clone()).await.unwrap();
|
||||
|
||||
// Invalidate cache
|
||||
cache.invalidate(&file_path).await;
|
||||
|
||||
// Next read should be cache miss again
|
||||
let _ = cache.get_file_content(file_path.clone()).await.unwrap();
|
||||
|
||||
let stats = cache.get_stats();
|
||||
assert!(stats.cache_misses >= 2);
|
||||
}
|
||||
}
|
||||
@@ -37,26 +37,27 @@ pub const DISK_FILL_FRACTION: f64 = 0.99;
|
||||
pub const DISK_RESERVE_FRACTION: f64 = 0.15;
|
||||
|
||||
lazy_static! {
|
||||
static ref GLOBAL_RUSTFS_PORT: OnceLock<u16> = OnceLock::new();
|
||||
pub static ref GLOBAL_OBJECT_API: OnceLock<Arc<ECStore>> = OnceLock::new();
|
||||
pub static ref GLOBAL_LOCAL_DISK: Arc<RwLock<Vec<Option<DiskStore>>>> = Arc::new(RwLock::new(Vec::new()));
|
||||
pub static ref GLOBAL_IsErasure: RwLock<bool> = RwLock::new(false);
|
||||
pub static ref GLOBAL_IsDistErasure: RwLock<bool> = RwLock::new(false);
|
||||
pub static ref GLOBAL_IsErasureSD: RwLock<bool> = RwLock::new(false);
|
||||
pub static ref GLOBAL_LOCAL_DISK_MAP: Arc<RwLock<HashMap<String, Option<DiskStore>>>> = Arc::new(RwLock::new(HashMap::new()));
|
||||
pub static ref GLOBAL_LOCAL_DISK_SET_DRIVES: Arc<RwLock<TypeLocalDiskSetDrives>> = Arc::new(RwLock::new(Vec::new()));
|
||||
pub static ref GLOBAL_Endpoints: OnceLock<EndpointServerPools> = OnceLock::new();
|
||||
pub static ref GLOBAL_RootDiskThreshold: RwLock<u64> = RwLock::new(0);
|
||||
pub static ref GLOBAL_TierConfigMgr: Arc<RwLock<TierConfigMgr>> = TierConfigMgr::new();
|
||||
pub static ref GLOBAL_LifecycleSys: Arc<LifecycleSys> = LifecycleSys::new();
|
||||
pub static ref GLOBAL_EventNotifier: Arc<RwLock<EventNotifier>> = EventNotifier::new();
|
||||
//pub static ref GLOBAL_RemoteTargetTransport
|
||||
static ref globalDeploymentIDPtr: OnceLock<Uuid> = OnceLock::new();
|
||||
pub static ref GLOBAL_BOOT_TIME: OnceCell<SystemTime> = OnceCell::new();
|
||||
pub static ref GLOBAL_LocalNodeName: String = "127.0.0.1:9000".to_string();
|
||||
pub static ref GLOBAL_LocalNodeNameHex: String = rustfs_utils::crypto::hex(GLOBAL_LocalNodeName.as_bytes());
|
||||
pub static ref GLOBAL_NodeNamesHex: HashMap<String, ()> = HashMap::new();
|
||||
pub static ref GLOBAL_REGION: OnceLock<String> = OnceLock::new();
|
||||
static ref GLOBAL_RUSTFS_PORT: OnceLock<u16> = OnceLock::new();
|
||||
static ref GLOBAL_RUSTFS_EXTERNAL_PORT: OnceLock<u16> = OnceLock::new();
|
||||
pub static ref GLOBAL_OBJECT_API: OnceLock<Arc<ECStore>> = OnceLock::new();
|
||||
pub static ref GLOBAL_LOCAL_DISK: Arc<RwLock<Vec<Option<DiskStore>>>> = Arc::new(RwLock::new(Vec::new()));
|
||||
pub static ref GLOBAL_IsErasure: RwLock<bool> = RwLock::new(false);
|
||||
pub static ref GLOBAL_IsDistErasure: RwLock<bool> = RwLock::new(false);
|
||||
pub static ref GLOBAL_IsErasureSD: RwLock<bool> = RwLock::new(false);
|
||||
pub static ref GLOBAL_LOCAL_DISK_MAP: Arc<RwLock<HashMap<String, Option<DiskStore>>>> = Arc::new(RwLock::new(HashMap::new()));
|
||||
pub static ref GLOBAL_LOCAL_DISK_SET_DRIVES: Arc<RwLock<TypeLocalDiskSetDrives>> = Arc::new(RwLock::new(Vec::new()));
|
||||
pub static ref GLOBAL_Endpoints: OnceLock<EndpointServerPools> = OnceLock::new();
|
||||
pub static ref GLOBAL_RootDiskThreshold: RwLock<u64> = RwLock::new(0);
|
||||
pub static ref GLOBAL_TierConfigMgr: Arc<RwLock<TierConfigMgr>> = TierConfigMgr::new();
|
||||
pub static ref GLOBAL_LifecycleSys: Arc<LifecycleSys> = LifecycleSys::new();
|
||||
pub static ref GLOBAL_EventNotifier: Arc<RwLock<EventNotifier>> = EventNotifier::new();
|
||||
//pub static ref GLOBAL_RemoteTargetTransport
|
||||
static ref globalDeploymentIDPtr: OnceLock<Uuid> = OnceLock::new();
|
||||
pub static ref GLOBAL_BOOT_TIME: OnceCell<SystemTime> = OnceCell::new();
|
||||
pub static ref GLOBAL_LocalNodeName: String = "127.0.0.1:9000".to_string();
|
||||
pub static ref GLOBAL_LocalNodeNameHex: String = rustfs_utils::crypto::hex(GLOBAL_LocalNodeName.as_bytes());
|
||||
pub static ref GLOBAL_NodeNamesHex: HashMap<String, ()> = HashMap::new();
|
||||
pub static ref GLOBAL_REGION: OnceLock<String> = OnceLock::new();
|
||||
}
|
||||
|
||||
// Global cancellation token for background services (data scanner and auto heal)
|
||||
@@ -108,6 +109,22 @@ pub fn set_global_rustfs_port(value: u16) {
|
||||
GLOBAL_RUSTFS_PORT.set(value).expect("set_global_rustfs_port fail");
|
||||
}
|
||||
|
||||
/// Get the global rustfs external port
|
||||
pub fn global_rustfs_external_port() -> u16 {
|
||||
if let Some(p) = GLOBAL_RUSTFS_EXTERNAL_PORT.get() {
|
||||
*p
|
||||
} else {
|
||||
rustfs_config::DEFAULT_PORT
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the global rustfs external port
|
||||
pub fn set_global_rustfs_external_port(value: u16) {
|
||||
GLOBAL_RUSTFS_EXTERNAL_PORT
|
||||
.set(value)
|
||||
.expect("set_global_rustfs_external_port fail");
|
||||
}
|
||||
|
||||
/// Get the global rustfs port
|
||||
pub fn set_global_deployment_id(id: Uuid) {
|
||||
globalDeploymentIDPtr.set(id).unwrap();
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
extern crate core;
|
||||
|
||||
pub mod admin_server_info;
|
||||
pub mod batch_processor;
|
||||
pub mod bitrot;
|
||||
pub mod bucket;
|
||||
pub mod cache_value;
|
||||
@@ -29,6 +30,7 @@ pub mod disks_layout;
|
||||
pub mod endpoints;
|
||||
pub mod erasure_coding;
|
||||
pub mod error;
|
||||
pub mod file_cache;
|
||||
pub mod global;
|
||||
pub mod lock_utils;
|
||||
pub mod metrics_realtime;
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
#![allow(unused_imports)]
|
||||
#![allow(unused_variables)]
|
||||
|
||||
use crate::batch_processor::{AsyncBatchProcessor, get_global_processors};
|
||||
use crate::bitrot::{create_bitrot_reader, create_bitrot_writer};
|
||||
use crate::bucket::lifecycle::lifecycle::TRANSITION_COMPLETE;
|
||||
use crate::bucket::versioning::VersioningApi;
|
||||
@@ -232,7 +233,10 @@ impl SetDisks {
|
||||
});
|
||||
}
|
||||
|
||||
let results = join_all(futures).await;
|
||||
// Use optimized batch processor for disk info retrieval
|
||||
let processor = get_global_processors().metadata_processor();
|
||||
let results = processor.execute_batch(futures).await;
|
||||
|
||||
for result in results {
|
||||
match result {
|
||||
Ok(res) => {
|
||||
@@ -507,21 +511,28 @@ impl SetDisks {
|
||||
|
||||
#[tracing::instrument(skip(disks))]
|
||||
async fn cleanup_multipart_path(disks: &[Option<DiskStore>], paths: &[String]) {
|
||||
let mut futures = Vec::with_capacity(disks.len());
|
||||
|
||||
let mut errs = Vec::with_capacity(disks.len());
|
||||
|
||||
for disk in disks.iter() {
|
||||
futures.push(async move {
|
||||
if let Some(disk) = disk {
|
||||
disk.delete_paths(RUSTFS_META_MULTIPART_BUCKET, paths).await
|
||||
} else {
|
||||
Err(DiskError::DiskNotFound)
|
||||
// Use improved simple batch processor instead of join_all for better performance
|
||||
let processor = get_global_processors().write_processor();
|
||||
|
||||
let tasks: Vec<_> = disks
|
||||
.iter()
|
||||
.map(|disk| {
|
||||
let disk = disk.clone();
|
||||
let paths = paths.to_vec();
|
||||
|
||||
async move {
|
||||
if let Some(disk) = disk {
|
||||
disk.delete_paths(RUSTFS_META_MULTIPART_BUCKET, &paths).await
|
||||
} else {
|
||||
Err(DiskError::DiskNotFound)
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
.collect();
|
||||
|
||||
let results = join_all(futures).await;
|
||||
let results = processor.execute_batch(tasks).await;
|
||||
for result in results {
|
||||
match result {
|
||||
Ok(_) => {
|
||||
@@ -545,21 +556,32 @@ impl SetDisks {
|
||||
part_numbers: &[usize],
|
||||
read_quorum: usize,
|
||||
) -> disk::error::Result<Vec<ObjectPartInfo>> {
|
||||
let mut futures = Vec::with_capacity(disks.len());
|
||||
for (i, disk) in disks.iter().enumerate() {
|
||||
futures.push(async move {
|
||||
if let Some(disk) = disk {
|
||||
disk.read_parts(bucket, part_meta_paths).await
|
||||
} else {
|
||||
Err(DiskError::DiskNotFound)
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
let mut errs = Vec::with_capacity(disks.len());
|
||||
let mut object_parts = Vec::with_capacity(disks.len());
|
||||
|
||||
let results = join_all(futures).await;
|
||||
// Use batch processor for better performance
|
||||
let processor = get_global_processors().read_processor();
|
||||
let bucket = bucket.to_string();
|
||||
let part_meta_paths = part_meta_paths.to_vec();
|
||||
|
||||
let tasks: Vec<_> = disks
|
||||
.iter()
|
||||
.map(|disk| {
|
||||
let disk = disk.clone();
|
||||
let bucket = bucket.clone();
|
||||
let part_meta_paths = part_meta_paths.clone();
|
||||
|
||||
async move {
|
||||
if let Some(disk) = disk {
|
||||
disk.read_parts(&bucket, &part_meta_paths).await
|
||||
} else {
|
||||
Err(DiskError::DiskNotFound)
|
||||
}
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
let results = processor.execute_batch(tasks).await;
|
||||
for result in results {
|
||||
match result {
|
||||
Ok(res) => {
|
||||
@@ -1369,22 +1391,71 @@ impl SetDisks {
|
||||
})
|
||||
});
|
||||
|
||||
// Wait for all tasks to complete
|
||||
let results = join_all(futures).await;
|
||||
|
||||
for result in results {
|
||||
match result? {
|
||||
Ok(res) => {
|
||||
ress.push(res);
|
||||
errors.push(None);
|
||||
}
|
||||
Err(e) => {
|
||||
match result {
|
||||
Ok(res) => match res {
|
||||
Ok(file_info) => {
|
||||
ress.push(file_info);
|
||||
errors.push(None);
|
||||
}
|
||||
Err(e) => {
|
||||
ress.push(FileInfo::default());
|
||||
errors.push(Some(e));
|
||||
}
|
||||
},
|
||||
Err(_) => {
|
||||
ress.push(FileInfo::default());
|
||||
errors.push(Some(e));
|
||||
errors.push(Some(DiskError::Unexpected));
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok((ress, errors))
|
||||
}
|
||||
|
||||
// Optimized version using batch processor with quorum support
|
||||
pub async fn read_version_optimized(
|
||||
&self,
|
||||
bucket: &str,
|
||||
object: &str,
|
||||
version_id: &str,
|
||||
opts: &ReadOptions,
|
||||
) -> Result<Vec<rustfs_filemeta::FileInfo>> {
|
||||
// Use existing disk selection logic
|
||||
let disks = self.disks.read().await;
|
||||
let required_reads = self.format.erasure.sets.len();
|
||||
|
||||
// Clone parameters outside the closure to avoid lifetime issues
|
||||
let bucket = bucket.to_string();
|
||||
let object = object.to_string();
|
||||
let version_id = version_id.to_string();
|
||||
let opts = opts.clone();
|
||||
|
||||
let processor = get_global_processors().read_processor();
|
||||
let tasks: Vec<_> = disks
|
||||
.iter()
|
||||
.take(required_reads + 2) // Read a few extra for reliability
|
||||
.filter_map(|disk| {
|
||||
disk.as_ref().map(|d| {
|
||||
let disk = d.clone();
|
||||
let bucket = bucket.clone();
|
||||
let object = object.clone();
|
||||
let version_id = version_id.clone();
|
||||
let opts = opts.clone();
|
||||
|
||||
async move { disk.read_version(&bucket, &bucket, &object, &version_id, &opts).await }
|
||||
})
|
||||
})
|
||||
.collect();
|
||||
|
||||
match processor.execute_batch_with_quorum(tasks, required_reads).await {
|
||||
Ok(results) => Ok(results),
|
||||
Err(_) => Err(DiskError::FileNotFound.into()), // Use existing error type
|
||||
}
|
||||
}
|
||||
|
||||
async fn read_all_xl(
|
||||
disks: &[Option<DiskStore>],
|
||||
bucket: &str,
|
||||
@@ -1403,10 +1474,11 @@ impl SetDisks {
|
||||
object: &str,
|
||||
read_data: bool,
|
||||
) -> (Vec<Option<RawFileInfo>>, Vec<Option<DiskError>>) {
|
||||
let mut futures = Vec::with_capacity(disks.len());
|
||||
let mut ress = Vec::with_capacity(disks.len());
|
||||
let mut errors = Vec::with_capacity(disks.len());
|
||||
|
||||
let mut futures = Vec::with_capacity(disks.len());
|
||||
|
||||
for disk in disks.iter() {
|
||||
futures.push(async move {
|
||||
if let Some(disk) = disk {
|
||||
@@ -2358,7 +2430,7 @@ impl SetDisks {
|
||||
.map_err(|e| {
|
||||
let elapsed = start_time.elapsed();
|
||||
error!("Failed to acquire write lock for heal operation after {:?}: {:?}", elapsed, e);
|
||||
DiskError::other(format!("Failed to acquire write lock for heal operation: {:?}", e))
|
||||
DiskError::other(format!("Failed to acquire write lock for heal operation: {e:?}"))
|
||||
})?;
|
||||
let elapsed = start_time.elapsed();
|
||||
info!("Successfully acquired write lock for object: {} in {:?}", object, elapsed);
|
||||
@@ -2973,7 +3045,7 @@ impl SetDisks {
|
||||
.fast_lock_manager
|
||||
.acquire_write_lock("", object, self.locker_owner.as_str())
|
||||
.await
|
||||
.map_err(|e| DiskError::other(format!("Failed to acquire write lock for heal directory operation: {:?}", e)))?;
|
||||
.map_err(|e| DiskError::other(format!("Failed to acquire write lock for heal directory operation: {e:?}")))?;
|
||||
|
||||
let disks = {
|
||||
let disks = self.disks.read().await;
|
||||
@@ -5522,7 +5594,7 @@ impl StorageAPI for SetDisks {
|
||||
self.fast_lock_manager
|
||||
.acquire_write_lock("", object, self.locker_owner.as_str())
|
||||
.await
|
||||
.map_err(|e| Error::other(format!("Failed to acquire write lock for heal operation: {:?}", e)))?,
|
||||
.map_err(|e| Error::other(format!("Failed to acquire write lock for heal operation: {e:?}")))?,
|
||||
)
|
||||
} else {
|
||||
None
|
||||
|
||||
@@ -302,17 +302,19 @@ impl TierConfigMgr {
|
||||
}
|
||||
|
||||
pub async fn get_driver<'a>(&'a mut self, tier_name: &str) -> std::result::Result<&'a WarmBackendImpl, AdminError> {
|
||||
Ok(match self.driver_cache.entry(tier_name.to_string()) {
|
||||
Entry::Occupied(e) => e.into_mut(),
|
||||
Entry::Vacant(e) => {
|
||||
let t = self.tiers.get(tier_name);
|
||||
if t.is_none() {
|
||||
return Err(ERR_TIER_NOT_FOUND.clone());
|
||||
}
|
||||
let d = new_warm_backend(t.expect("err"), false).await?;
|
||||
e.insert(d)
|
||||
}
|
||||
})
|
||||
// Return cached driver if present
|
||||
if self.driver_cache.contains_key(tier_name) {
|
||||
return Ok(self.driver_cache.get(tier_name).unwrap());
|
||||
}
|
||||
|
||||
// Get tier configuration and create new driver
|
||||
let tier_config = self.tiers.get(tier_name).ok_or_else(|| ERR_TIER_NOT_FOUND.clone())?;
|
||||
|
||||
let driver = new_warm_backend(tier_config, false).await?;
|
||||
|
||||
// Insert and return reference
|
||||
self.driver_cache.insert(tier_name.to_string(), driver);
|
||||
Ok(self.driver_cache.get(tier_name).unwrap())
|
||||
}
|
||||
|
||||
pub async fn reload(&mut self, api: Arc<ECStore>) -> std::result::Result<(), std::io::Error> {
|
||||
|
||||
@@ -42,8 +42,7 @@ url.workspace = true
|
||||
uuid.workspace = true
|
||||
thiserror.workspace = true
|
||||
once_cell.workspace = true
|
||||
parking_lot = "0.12"
|
||||
smallvec = "1.11"
|
||||
smartstring = "1.0"
|
||||
crossbeam-queue = "0.3"
|
||||
heapless = "0.8"
|
||||
parking_lot.workspace = true
|
||||
smallvec.workspace = true
|
||||
smartstring.workspace = true
|
||||
crossbeam-queue = { workspace = true }
|
||||
|
||||
@@ -23,7 +23,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
println!("Lock system status: {}", if manager.is_disabled() { "DISABLED" } else { "ENABLED" });
|
||||
|
||||
match std::env::var("RUSTFS_ENABLE_LOCKS") {
|
||||
Ok(value) => println!("RUSTFS_ENABLE_LOCKS set to: {}", value),
|
||||
Ok(value) => println!("RUSTFS_ENABLE_LOCKS set to: {value}"),
|
||||
Err(_) => println!("RUSTFS_ENABLE_LOCKS not set (defaults to enabled)"),
|
||||
}
|
||||
|
||||
@@ -34,7 +34,7 @@ async fn main() -> Result<(), Box<dyn std::error::Error>> {
|
||||
println!("Lock acquired successfully! Disabled: {}", guard.is_disabled());
|
||||
}
|
||||
Err(e) => {
|
||||
println!("Failed to acquire lock: {:?}", e);
|
||||
println!("Failed to acquire lock: {e:?}");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -87,7 +87,7 @@ impl LockClient for LocalClient {
|
||||
current_owner,
|
||||
current_mode,
|
||||
}) => Ok(LockResponse::failure(
|
||||
format!("Lock conflict: resource held by {} in {:?} mode", current_owner, current_mode),
|
||||
format!("Lock conflict: resource held by {current_owner} in {current_mode:?} mode"),
|
||||
std::time::Duration::ZERO,
|
||||
)),
|
||||
Err(crate::fast_lock::LockResult::Acquired) => {
|
||||
@@ -131,7 +131,7 @@ impl LockClient for LocalClient {
|
||||
current_owner,
|
||||
current_mode,
|
||||
}) => Ok(LockResponse::failure(
|
||||
format!("Lock conflict: resource held by {} in {:?} mode", current_owner, current_mode),
|
||||
format!("Lock conflict: resource held by {current_owner} in {current_mode:?} mode"),
|
||||
std::time::Duration::ZERO,
|
||||
)),
|
||||
Err(crate::fast_lock::LockResult::Acquired) => {
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -152,14 +152,14 @@ pub mod performance_comparison {
|
||||
|
||||
for i in 0..1000 {
|
||||
let guard = fast_manager
|
||||
.acquire_write_lock("bucket", format!("object_{}", i), owner)
|
||||
.acquire_write_lock("bucket", format!("object_{i}"), owner)
|
||||
.await
|
||||
.expect("Failed to acquire fast lock");
|
||||
guards.push(guard);
|
||||
}
|
||||
|
||||
let fast_duration = start.elapsed();
|
||||
println!("Fast lock: 1000 acquisitions in {:?}", fast_duration);
|
||||
println!("Fast lock: 1000 acquisitions in {fast_duration:?}");
|
||||
|
||||
// Release all
|
||||
drop(guards);
|
||||
|
||||
@@ -27,7 +27,7 @@ mod tests {
|
||||
let mut guards = Vec::new();
|
||||
for i in 0..100 {
|
||||
let bucket = format!("test-bucket-{}", i % 10); // Reuse some bucket names
|
||||
let object = format!("test-object-{}", i);
|
||||
let object = format!("test-object-{i}");
|
||||
|
||||
let guard = manager
|
||||
.acquire_write_lock(bucket.as_str(), object.as_str(), "test-owner")
|
||||
@@ -53,10 +53,7 @@ mod tests {
|
||||
0.0
|
||||
};
|
||||
|
||||
println!(
|
||||
"Pool stats - Hits: {}, Misses: {}, Releases: {}, Pool size: {}",
|
||||
hits, misses, releases, pool_size
|
||||
);
|
||||
println!("Pool stats - Hits: {hits}, Misses: {misses}, Releases: {releases}, Pool size: {pool_size}");
|
||||
println!("Hit rate: {:.2}%", hit_rate * 100.0);
|
||||
|
||||
// We should see some pool activity
|
||||
@@ -82,7 +79,7 @@ mod tests {
|
||||
.expect("Failed to acquire second read lock");
|
||||
|
||||
let duration = start.elapsed();
|
||||
println!("Two read locks on different objects took: {:?}", duration);
|
||||
println!("Two read locks on different objects took: {duration:?}");
|
||||
|
||||
// Should be very fast since no contention
|
||||
assert!(duration < Duration::from_millis(10), "Read locks should be fast with no contention");
|
||||
@@ -103,7 +100,7 @@ mod tests {
|
||||
.expect("Failed to acquire second read lock on same object");
|
||||
|
||||
let duration = start.elapsed();
|
||||
println!("Two read locks on same object took: {:?}", duration);
|
||||
println!("Two read locks on same object took: {duration:?}");
|
||||
|
||||
// Should still be fast since read locks are compatible
|
||||
assert!(duration < Duration::from_millis(10), "Compatible read locks should be fast");
|
||||
@@ -132,7 +129,7 @@ mod tests {
|
||||
.expect("Failed to acquire second read lock");
|
||||
let second_duration = start.elapsed();
|
||||
|
||||
println!("First lock: {:?}, Second lock: {:?}", first_duration, second_duration);
|
||||
println!("First lock: {first_duration:?}, Second lock: {second_duration:?}");
|
||||
|
||||
// Both should be very fast (sub-millisecond typically)
|
||||
assert!(first_duration < Duration::from_millis(10));
|
||||
@@ -157,7 +154,7 @@ mod tests {
|
||||
let result = manager.acquire_locks_batch(batch).await;
|
||||
let duration = start.elapsed();
|
||||
|
||||
println!("Batch operation took: {:?}", duration);
|
||||
println!("Batch operation took: {duration:?}");
|
||||
|
||||
assert!(result.all_acquired, "All locks should be acquired");
|
||||
assert_eq!(result.successful_locks.len(), 3);
|
||||
|
||||
@@ -27,7 +27,7 @@ use crate::fast_lock::{
|
||||
/// High-performance object lock manager
|
||||
#[derive(Debug)]
|
||||
pub struct FastObjectLockManager {
|
||||
shards: Vec<Arc<LockShard>>,
|
||||
pub shards: Vec<Arc<LockShard>>,
|
||||
shard_mask: usize,
|
||||
config: LockConfig,
|
||||
metrics: Arc<GlobalMetrics>,
|
||||
@@ -66,7 +66,12 @@ impl FastObjectLockManager {
|
||||
pub async fn acquire_lock(&self, request: ObjectLockRequest) -> Result<FastLockGuard, LockResult> {
|
||||
let shard = self.get_shard(&request.key);
|
||||
match shard.acquire_lock(&request).await {
|
||||
Ok(()) => Ok(FastLockGuard::new(request.key, request.mode, request.owner, shard.clone())),
|
||||
Ok(()) => {
|
||||
let guard = FastLockGuard::new(request.key, request.mode, request.owner, shard.clone());
|
||||
// Register guard to prevent premature cleanup
|
||||
shard.register_guard(guard.guard_id());
|
||||
Ok(guard)
|
||||
}
|
||||
Err(err) => Err(err),
|
||||
}
|
||||
}
|
||||
@@ -117,6 +122,54 @@ impl FastObjectLockManager {
|
||||
self.acquire_lock(request).await
|
||||
}
|
||||
|
||||
/// Acquire high-priority read lock - optimized for database queries
|
||||
pub async fn acquire_high_priority_read_lock(
|
||||
&self,
|
||||
bucket: impl Into<Arc<str>>,
|
||||
object: impl Into<Arc<str>>,
|
||||
owner: impl Into<Arc<str>>,
|
||||
) -> Result<FastLockGuard, LockResult> {
|
||||
let request =
|
||||
ObjectLockRequest::new_read(bucket, object, owner).with_priority(crate::fast_lock::types::LockPriority::High);
|
||||
self.acquire_lock(request).await
|
||||
}
|
||||
|
||||
/// Acquire high-priority write lock - optimized for database queries
|
||||
pub async fn acquire_high_priority_write_lock(
|
||||
&self,
|
||||
bucket: impl Into<Arc<str>>,
|
||||
object: impl Into<Arc<str>>,
|
||||
owner: impl Into<Arc<str>>,
|
||||
) -> Result<FastLockGuard, LockResult> {
|
||||
let request =
|
||||
ObjectLockRequest::new_write(bucket, object, owner).with_priority(crate::fast_lock::types::LockPriority::High);
|
||||
self.acquire_lock(request).await
|
||||
}
|
||||
|
||||
/// Acquire critical priority read lock - for system operations
|
||||
pub async fn acquire_critical_read_lock(
|
||||
&self,
|
||||
bucket: impl Into<Arc<str>>,
|
||||
object: impl Into<Arc<str>>,
|
||||
owner: impl Into<Arc<str>>,
|
||||
) -> Result<FastLockGuard, LockResult> {
|
||||
let request =
|
||||
ObjectLockRequest::new_read(bucket, object, owner).with_priority(crate::fast_lock::types::LockPriority::Critical);
|
||||
self.acquire_lock(request).await
|
||||
}
|
||||
|
||||
/// Acquire critical priority write lock - for system operations
|
||||
pub async fn acquire_critical_write_lock(
|
||||
&self,
|
||||
bucket: impl Into<Arc<str>>,
|
||||
object: impl Into<Arc<str>>,
|
||||
owner: impl Into<Arc<str>>,
|
||||
) -> Result<FastLockGuard, LockResult> {
|
||||
let request =
|
||||
ObjectLockRequest::new_write(bucket, object, owner).with_priority(crate::fast_lock::types::LockPriority::Critical);
|
||||
self.acquire_lock(request).await
|
||||
}
|
||||
|
||||
/// Acquire multiple locks atomically - optimized version
|
||||
pub async fn acquire_locks_batch(&self, batch_request: BatchLockRequest) -> BatchLockResult {
|
||||
// Pre-sort requests by (shard_id, key) to avoid deadlocks
|
||||
@@ -304,7 +357,7 @@ impl FastObjectLockManager {
|
||||
}
|
||||
|
||||
/// Get shard for object key
|
||||
fn get_shard(&self, key: &crate::fast_lock::types::ObjectKey) -> &Arc<LockShard> {
|
||||
pub fn get_shard(&self, key: &crate::fast_lock::types::ObjectKey) -> &Arc<LockShard> {
|
||||
let index = key.shard_index(self.shard_mask);
|
||||
&self.shards[index]
|
||||
}
|
||||
@@ -362,6 +415,18 @@ impl Drop for FastObjectLockManager {
|
||||
}
|
||||
}
|
||||
|
||||
impl Clone for FastObjectLockManager {
|
||||
fn clone(&self) -> Self {
|
||||
Self {
|
||||
shards: self.shards.clone(),
|
||||
shard_mask: self.shard_mask,
|
||||
config: self.config.clone(),
|
||||
metrics: self.metrics.clone(),
|
||||
cleanup_handle: RwLock::new(None), // Don't clone the cleanup task
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl LockManager for FastObjectLockManager {
|
||||
async fn acquire_lock(&self, request: ObjectLockRequest) -> Result<FastLockGuard, LockResult> {
|
||||
|
||||
@@ -53,8 +53,11 @@ pub const DEFAULT_SHARD_COUNT: usize = 1024;
|
||||
/// Default lock timeout
|
||||
pub const DEFAULT_LOCK_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(30);
|
||||
|
||||
/// Default acquire timeout
|
||||
pub const DEFAULT_ACQUIRE_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(5);
|
||||
/// Default acquire timeout - increased for database workloads
|
||||
pub const DEFAULT_ACQUIRE_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(30);
|
||||
|
||||
/// Maximum acquire timeout for high-load scenarios
|
||||
pub const MAX_ACQUIRE_TIMEOUT: std::time::Duration = std::time::Duration::from_secs(60);
|
||||
|
||||
/// Lock cleanup interval
|
||||
pub const CLEANUP_INTERVAL: std::time::Duration = std::time::Duration::from_secs(60);
|
||||
|
||||
@@ -18,7 +18,8 @@ use std::sync::atomic::{AtomicU32, AtomicUsize, Ordering};
|
||||
use tokio::sync::Notify;
|
||||
|
||||
/// Optimized notification pool to reduce memory overhead and thundering herd effects
|
||||
static NOTIFY_POOL: Lazy<Vec<Arc<Notify>>> = Lazy::new(|| (0..64).map(|_| Arc::new(Notify::new())).collect());
|
||||
/// Increased pool size for better performance under high concurrency
|
||||
static NOTIFY_POOL: Lazy<Vec<Arc<Notify>>> = Lazy::new(|| (0..128).map(|_| Arc::new(Notify::new())).collect());
|
||||
|
||||
/// Optimized notification system for object locks
|
||||
#[derive(Debug)]
|
||||
|
||||
@@ -24,6 +24,7 @@ use crate::fast_lock::{
|
||||
state::ObjectLockState,
|
||||
types::{LockMode, LockResult, ObjectKey, ObjectLockRequest},
|
||||
};
|
||||
use std::collections::HashSet;
|
||||
|
||||
/// Lock shard to reduce global contention
|
||||
#[derive(Debug)]
|
||||
@@ -36,6 +37,8 @@ pub struct LockShard {
|
||||
metrics: ShardMetrics,
|
||||
/// Shard ID for debugging
|
||||
_shard_id: usize,
|
||||
/// Active guard IDs to prevent cleanup of locks with live guards
|
||||
active_guards: parking_lot::Mutex<HashSet<u64>>,
|
||||
}
|
||||
|
||||
impl LockShard {
|
||||
@@ -45,6 +48,7 @@ impl LockShard {
|
||||
object_pool: ObjectStatePool::new(),
|
||||
metrics: ShardMetrics::new(),
|
||||
_shard_id: shard_id,
|
||||
active_guards: parking_lot::Mutex::new(HashSet::new()),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -123,7 +127,12 @@ impl LockShard {
|
||||
|
||||
/// Slow path with async waiting
|
||||
async fn acquire_lock_slow_path(&self, request: &ObjectLockRequest, start_time: Instant) -> Result<(), LockResult> {
|
||||
let deadline = start_time + request.acquire_timeout;
|
||||
// Use adaptive timeout based on current load and request priority
|
||||
let adaptive_timeout = self.calculate_adaptive_timeout(request);
|
||||
let deadline = start_time + adaptive_timeout;
|
||||
|
||||
let mut retry_count = 0u32;
|
||||
const MAX_RETRIES: u32 = 10;
|
||||
|
||||
loop {
|
||||
// Get or create object state
|
||||
@@ -157,8 +166,22 @@ impl LockShard {
|
||||
return Err(LockResult::Timeout);
|
||||
}
|
||||
|
||||
// Wait for notification using optimized notify system
|
||||
// Use intelligent wait strategy: mix of notification wait and exponential backoff
|
||||
let remaining = deadline - Instant::now();
|
||||
|
||||
if retry_count < MAX_RETRIES && remaining > Duration::from_millis(10) {
|
||||
// For early retries, use a brief exponential backoff instead of full notification wait
|
||||
let backoff_ms = std::cmp::min(10 << retry_count, 100); // 10ms, 20ms, 40ms, 80ms, 100ms max
|
||||
let backoff_duration = Duration::from_millis(backoff_ms);
|
||||
|
||||
if backoff_duration < remaining {
|
||||
tokio::time::sleep(backoff_duration).await;
|
||||
retry_count += 1;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// If we've exhausted quick retries or have little time left, use notification wait
|
||||
let wait_result = match request.mode {
|
||||
LockMode::Shared => {
|
||||
state.atomic_state.inc_readers_waiting();
|
||||
@@ -179,7 +202,7 @@ impl LockShard {
|
||||
return Err(LockResult::Timeout);
|
||||
}
|
||||
|
||||
// Continue the loop to try acquisition again
|
||||
retry_count += 1;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -203,10 +226,30 @@ impl LockShard {
|
||||
should_cleanup = !state.is_locked() && !state.atomic_state.has_waiters();
|
||||
} else {
|
||||
should_cleanup = false;
|
||||
// Additional diagnostics for release failures
|
||||
let current_mode = state.current_mode();
|
||||
let is_locked = state.is_locked();
|
||||
let has_waiters = state.atomic_state.has_waiters();
|
||||
|
||||
tracing::debug!(
|
||||
"Lock release failed in shard: key={}, owner={}, mode={:?}, current_mode={:?}, is_locked={}, has_waiters={}",
|
||||
key,
|
||||
owner,
|
||||
mode,
|
||||
current_mode,
|
||||
is_locked,
|
||||
has_waiters
|
||||
);
|
||||
}
|
||||
} else {
|
||||
result = false;
|
||||
should_cleanup = false;
|
||||
tracing::debug!(
|
||||
"Lock release failed - key not found in shard: key={}, owner={}, mode={:?}",
|
||||
key,
|
||||
owner,
|
||||
mode
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -218,6 +261,134 @@ impl LockShard {
|
||||
result
|
||||
}
|
||||
|
||||
/// Release lock with guard ID tracking for double-release prevention
|
||||
pub fn release_lock_with_guard(&self, key: &ObjectKey, owner: &Arc<str>, mode: LockMode, guard_id: u64) -> bool {
|
||||
// First, try to remove the guard from active set
|
||||
let guard_was_active = {
|
||||
let mut guards = self.active_guards.lock();
|
||||
guards.remove(&guard_id)
|
||||
};
|
||||
|
||||
// If guard was not active, this is a double-release attempt
|
||||
if !guard_was_active {
|
||||
tracing::debug!(
|
||||
"Double-release attempt blocked: key={}, owner={}, mode={:?}, guard_id={}",
|
||||
key,
|
||||
owner,
|
||||
mode,
|
||||
guard_id
|
||||
);
|
||||
return false;
|
||||
}
|
||||
|
||||
// Proceed with normal release
|
||||
let should_cleanup;
|
||||
let result;
|
||||
|
||||
{
|
||||
let objects = self.objects.read();
|
||||
if let Some(state) = objects.get(key) {
|
||||
result = match mode {
|
||||
LockMode::Shared => state.release_shared(owner),
|
||||
LockMode::Exclusive => state.release_exclusive(owner),
|
||||
};
|
||||
|
||||
if result {
|
||||
self.metrics.record_release();
|
||||
should_cleanup = !state.is_locked() && !state.atomic_state.has_waiters();
|
||||
} else {
|
||||
should_cleanup = false;
|
||||
}
|
||||
} else {
|
||||
result = false;
|
||||
should_cleanup = false;
|
||||
}
|
||||
}
|
||||
|
||||
if should_cleanup {
|
||||
self.schedule_cleanup(key.clone());
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
/// Register a guard to prevent premature cleanup
|
||||
pub fn register_guard(&self, guard_id: u64) {
|
||||
let mut guards = self.active_guards.lock();
|
||||
guards.insert(guard_id);
|
||||
}
|
||||
|
||||
/// Unregister a guard (called when guard is dropped)
|
||||
pub fn unregister_guard(&self, guard_id: u64) {
|
||||
let mut guards = self.active_guards.lock();
|
||||
guards.remove(&guard_id);
|
||||
}
|
||||
|
||||
/// Get count of active guards (for testing)
|
||||
#[cfg(test)]
|
||||
pub fn active_guard_count(&self) -> usize {
|
||||
let guards = self.active_guards.lock();
|
||||
guards.len()
|
||||
}
|
||||
|
||||
/// Check if a guard is active (for testing)
|
||||
#[cfg(test)]
|
||||
pub fn is_guard_active(&self, guard_id: u64) -> bool {
|
||||
let guards = self.active_guards.lock();
|
||||
guards.contains(&guard_id)
|
||||
}
|
||||
|
||||
/// Calculate adaptive timeout based on current system load and request priority
|
||||
fn calculate_adaptive_timeout(&self, request: &ObjectLockRequest) -> Duration {
|
||||
let base_timeout = request.acquire_timeout;
|
||||
|
||||
// Get current shard load metrics
|
||||
let lock_count = {
|
||||
let objects = self.objects.read();
|
||||
objects.len()
|
||||
};
|
||||
|
||||
let active_guard_count = {
|
||||
let guards = self.active_guards.lock();
|
||||
guards.len()
|
||||
};
|
||||
|
||||
// Calculate load factor with more generous thresholds for database workloads
|
||||
let total_load = (lock_count + active_guard_count) as f64;
|
||||
let load_factor = total_load / 500.0; // Lowered threshold for faster scaling
|
||||
|
||||
// More aggressive priority multipliers for database scenarios
|
||||
let priority_multiplier = match request.priority {
|
||||
crate::fast_lock::types::LockPriority::Critical => 3.0, // Increased
|
||||
crate::fast_lock::types::LockPriority::High => 2.0, // Increased
|
||||
crate::fast_lock::types::LockPriority::Normal => 1.2, // Slightly increased base
|
||||
crate::fast_lock::types::LockPriority::Low => 0.9,
|
||||
};
|
||||
|
||||
// More generous load-based scaling
|
||||
let load_multiplier = if load_factor > 2.0 {
|
||||
// Very high load: drastically extend timeout
|
||||
1.0 + (load_factor * 2.0)
|
||||
} else if load_factor > 1.0 {
|
||||
// High load: significantly extend timeout
|
||||
1.0 + (load_factor * 1.8)
|
||||
} else if load_factor > 0.3 {
|
||||
// Medium load: moderately extend timeout
|
||||
1.0 + (load_factor * 1.2)
|
||||
} else {
|
||||
// Low load: still give some buffer
|
||||
1.1
|
||||
};
|
||||
|
||||
let total_multiplier = priority_multiplier * load_multiplier;
|
||||
let adaptive_timeout_secs =
|
||||
(base_timeout.as_secs_f64() * total_multiplier).min(crate::fast_lock::MAX_ACQUIRE_TIMEOUT.as_secs_f64());
|
||||
|
||||
// Ensure minimum reasonable timeout even for low priority
|
||||
let min_timeout_secs = base_timeout.as_secs_f64() * 0.8;
|
||||
Duration::from_secs_f64(adaptive_timeout_secs.max(min_timeout_secs))
|
||||
}
|
||||
|
||||
/// Batch acquire locks with ordering to prevent deadlocks
|
||||
pub async fn acquire_locks_batch(
|
||||
&self,
|
||||
@@ -324,22 +495,44 @@ impl LockShard {
|
||||
pub fn adaptive_cleanup(&self) -> usize {
|
||||
let current_load = self.current_load_factor();
|
||||
let lock_count = self.lock_count();
|
||||
let active_guard_count = self.active_guards.lock().len();
|
||||
|
||||
// Be much more conservative if there are active guards or very high load
|
||||
if active_guard_count > 0 && current_load > 0.8 {
|
||||
tracing::debug!(
|
||||
"Skipping aggressive cleanup due to {} active guards and high load ({:.2})",
|
||||
active_guard_count,
|
||||
current_load
|
||||
);
|
||||
// Only clean very old entries when under high load with active guards
|
||||
return self.cleanup_expired_batch(3, 1_200_000); // 20 minutes, smaller batches
|
||||
}
|
||||
|
||||
// Under extreme load, skip cleanup entirely to reduce contention
|
||||
if current_load > 1.5 && active_guard_count > 10 {
|
||||
tracing::debug!(
|
||||
"Skipping all cleanup due to extreme load ({:.2}) and {} active guards",
|
||||
current_load,
|
||||
active_guard_count
|
||||
);
|
||||
return 0;
|
||||
}
|
||||
|
||||
// Dynamically adjust cleanup strategy based on load
|
||||
let cleanup_batch_size = match current_load {
|
||||
load if load > 0.9 => lock_count / 20, // High load: small batch cleanup
|
||||
load if load > 0.7 => lock_count / 10, // Medium load: moderate cleanup
|
||||
_ => lock_count / 5, // Low load: aggressive cleanup
|
||||
load if load > 0.9 => lock_count / 50, // Much smaller batches for high load
|
||||
load if load > 0.7 => lock_count / 20, // Smaller batches for medium load
|
||||
_ => lock_count / 10, // More conservative even for low load
|
||||
};
|
||||
|
||||
// Use longer timeout for high load scenarios
|
||||
// Use much longer timeouts to prevent premature cleanup
|
||||
let cleanup_threshold_millis = match current_load {
|
||||
load if load > 0.8 => 300_000, // 5 minutes for high load
|
||||
load if load > 0.5 => 180_000, // 3 minutes for medium load
|
||||
_ => 60_000, // 1 minute for low load
|
||||
load if load > 0.8 => 600_000, // 10 minutes for high load
|
||||
load if load > 0.5 => 300_000, // 5 minutes for medium load
|
||||
_ => 120_000, // 2 minutes for low load
|
||||
};
|
||||
|
||||
self.cleanup_expired_batch(cleanup_batch_size.max(10), cleanup_threshold_millis)
|
||||
self.cleanup_expired_batch_protected(cleanup_batch_size.max(5), cleanup_threshold_millis)
|
||||
}
|
||||
|
||||
/// Cleanup expired and unused locks
|
||||
@@ -378,6 +571,19 @@ impl LockShard {
|
||||
cleaned
|
||||
}
|
||||
|
||||
/// Protected batch cleanup that respects active guards
|
||||
fn cleanup_expired_batch_protected(&self, max_batch_size: usize, cleanup_threshold_millis: u64) -> usize {
|
||||
let active_guards = self.active_guards.lock();
|
||||
let guard_count = active_guards.len();
|
||||
drop(active_guards); // Release lock early
|
||||
|
||||
if guard_count > 0 {
|
||||
tracing::debug!("Cleanup with {} active guards, being conservative", guard_count);
|
||||
}
|
||||
|
||||
self.cleanup_expired_batch(max_batch_size, cleanup_threshold_millis)
|
||||
}
|
||||
|
||||
/// Batch cleanup with limited processing to avoid blocking
|
||||
fn cleanup_expired_batch(&self, max_batch_size: usize, cleanup_threshold_millis: u64) -> usize {
|
||||
let mut cleaned = 0;
|
||||
|
||||
@@ -373,11 +373,23 @@ impl ObjectLockState {
|
||||
}
|
||||
true
|
||||
} else {
|
||||
// Inconsistency - re-add owner
|
||||
// Inconsistency detected - atomic state shows no shared lock but owner was found
|
||||
tracing::warn!(
|
||||
"Atomic state inconsistency during shared lock release: owner={}, remaining_owners={}",
|
||||
owner,
|
||||
shared.len()
|
||||
);
|
||||
// Re-add owner to maintain consistency
|
||||
shared.push(owner.clone());
|
||||
false
|
||||
}
|
||||
} else {
|
||||
// Owner not found in shared owners list
|
||||
tracing::debug!(
|
||||
"Shared lock release failed - owner not found: owner={}, current_owners={:?}",
|
||||
owner,
|
||||
shared.iter().map(|s| s.as_ref()).collect::<Vec<_>>()
|
||||
);
|
||||
false
|
||||
}
|
||||
}
|
||||
@@ -401,9 +413,21 @@ impl ObjectLockState {
|
||||
}
|
||||
true
|
||||
} else {
|
||||
// Atomic state inconsistency - current owner matches but atomic release failed
|
||||
tracing::warn!(
|
||||
"Atomic state inconsistency during exclusive lock release: owner={}, atomic_state={:b}",
|
||||
owner,
|
||||
self.atomic_state.state.load(Ordering::Acquire)
|
||||
);
|
||||
false
|
||||
}
|
||||
} else {
|
||||
// Owner mismatch
|
||||
tracing::debug!(
|
||||
"Exclusive lock release failed - owner mismatch: expected_owner={}, actual_owner={:?}",
|
||||
owner,
|
||||
current.as_ref().map(|s| s.as_ref())
|
||||
);
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
@@ -616,6 +616,7 @@ impl ServerHandler for RustfsMcpServer {
|
||||
server_info: Implementation {
|
||||
name: "rustfs-mcp-server".into(),
|
||||
version: env!("CARGO_PKG_VERSION").into(),
|
||||
..Default::default()
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -40,7 +40,7 @@ rustfs-config = { workspace = true, features = ["constants", "observability"] }
|
||||
rustfs-utils = { workspace = true, features = ["ip", "path"] }
|
||||
async-trait = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
flexi_logger = { workspace = true, features = ["trc", "kv"] }
|
||||
flexi_logger = { workspace = true }
|
||||
nu-ansi-term = { workspace = true }
|
||||
nvml-wrapper = { workspace = true, optional = true }
|
||||
opentelemetry = { workspace = true }
|
||||
@@ -62,6 +62,7 @@ serde_json = { workspace = true }
|
||||
sysinfo = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
|
||||
|
||||
# Only enable kafka features and related dependencies on Linux
|
||||
[target.'cfg(target_os = "linux")'.dependencies]
|
||||
rdkafka = { workspace = true, features = ["tokio"], optional = true }
|
||||
|
||||
@@ -21,12 +21,57 @@
|
||||
|
||||
## ✨ Features
|
||||
|
||||
- **Environment-Aware Logging**: Automatically configures logging behavior based on deployment environment
|
||||
- Production: File-only logging (stdout disabled by default for security and log aggregation)
|
||||
- Development/Test: Full logging with stdout support for debugging
|
||||
- OpenTelemetry integration for distributed tracing
|
||||
- Prometheus metrics collection and exposition
|
||||
- Structured logging with configurable levels
|
||||
- Structured logging with configurable levels and rotation
|
||||
- Performance profiling and analytics
|
||||
- Real-time health checks and status monitoring
|
||||
- Custom dashboards and alerting integration
|
||||
- Enhanced error handling and resilience
|
||||
|
||||
## 🚀 Environment-Aware Logging
|
||||
|
||||
The obs module automatically adapts logging behavior based on your deployment environment:
|
||||
|
||||
### Production Environment
|
||||
```bash
|
||||
# Set production environment - disables stdout logging by default
|
||||
export RUSTFS_OBS_ENVIRONMENT=production
|
||||
|
||||
# All logs go to files only (no stdout) for security and log aggregation
|
||||
# Enhanced error handling with clear failure diagnostics
|
||||
```
|
||||
|
||||
### Development/Test Environment
|
||||
```bash
|
||||
# Set development environment - enables stdout logging
|
||||
export RUSTFS_OBS_ENVIRONMENT=development
|
||||
|
||||
# Logs appear both in files and stdout for easier debugging
|
||||
# Full span tracking and verbose error messages
|
||||
```
|
||||
|
||||
### Configuration Override
|
||||
You can always override the environment defaults:
|
||||
```rust
|
||||
use rustfs_obs::OtelConfig;
|
||||
|
||||
let config = OtelConfig {
|
||||
endpoint: "".to_string(),
|
||||
use_stdout: Some(true), // Explicit override - forces stdout even in production
|
||||
environment: Some("production".to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
```
|
||||
|
||||
### Supported Environment Values
|
||||
- `production` - Secure file-only logging
|
||||
- `development` - Full debugging with stdout
|
||||
- `test` - Test environment with stdout support
|
||||
- `staging` - Staging environment with stdout support
|
||||
|
||||
## 📚 Documentation
|
||||
|
||||
|
||||
@@ -13,15 +13,19 @@
|
||||
// limitations under the License.
|
||||
|
||||
use crate::OtelConfig;
|
||||
use flexi_logger::{Age, Cleanup, Criterion, DeferredNow, FileSpec, LogSpecification, Naming, Record, WriteMode, style};
|
||||
use flexi_logger::{
|
||||
Age, Cleanup, Criterion, DeferredNow, FileSpec, LogSpecification, Naming, Record, WriteMode,
|
||||
WriteMode::{AsyncWith, BufferAndFlush},
|
||||
style,
|
||||
};
|
||||
use nu_ansi_term::Color;
|
||||
use opentelemetry::trace::TracerProvider;
|
||||
use opentelemetry::{KeyValue, global};
|
||||
use opentelemetry_appender_tracing::layer::OpenTelemetryTracingBridge;
|
||||
use opentelemetry_otlp::WithExportConfig;
|
||||
use opentelemetry_sdk::logs::SdkLoggerProvider;
|
||||
use opentelemetry_sdk::{
|
||||
Resource,
|
||||
logs::SdkLoggerProvider,
|
||||
metrics::{MeterProviderBuilder, PeriodicReader, SdkMeterProvider},
|
||||
trace::{RandomIdGenerator, Sampler, SdkTracerProvider},
|
||||
};
|
||||
@@ -29,15 +33,19 @@ use opentelemetry_semantic_conventions::{
|
||||
SCHEMA_URL,
|
||||
attribute::{DEPLOYMENT_ENVIRONMENT_NAME, NETWORK_LOCAL_ADDRESS, SERVICE_VERSION as OTEL_SERVICE_VERSION},
|
||||
};
|
||||
use rustfs_config::observability::ENV_OBS_LOG_DIRECTORY;
|
||||
use rustfs_config::{
|
||||
APP_NAME, DEFAULT_LOG_KEEP_FILES, DEFAULT_LOG_LEVEL, ENVIRONMENT, METER_INTERVAL, SAMPLE_RATIO, SERVICE_VERSION, USE_STDOUT,
|
||||
observability::{
|
||||
DEFAULT_OBS_ENVIRONMENT_PRODUCTION, DEFAULT_OBS_LOG_FLUSH_MS, DEFAULT_OBS_LOG_MESSAGE_CAPA, DEFAULT_OBS_LOG_POOL_CAPA,
|
||||
ENV_OBS_LOG_DIRECTORY,
|
||||
},
|
||||
};
|
||||
use rustfs_utils::get_local_ip_with_default;
|
||||
use smallvec::SmallVec;
|
||||
use std::borrow::Cow;
|
||||
use std::fs;
|
||||
use std::io::IsTerminal;
|
||||
use std::time::Duration;
|
||||
use std::{env, fs};
|
||||
use tracing::info;
|
||||
use tracing_error::ErrorLayer;
|
||||
use tracing_opentelemetry::{MetricsLayer, OpenTelemetryLayer};
|
||||
@@ -121,7 +129,7 @@ fn resource(config: &OtelConfig) -> Resource {
|
||||
/// Creates a periodic reader for stdout metrics
|
||||
fn create_periodic_reader(interval: u64) -> PeriodicReader<opentelemetry_stdout::MetricExporter> {
|
||||
PeriodicReader::builder(opentelemetry_stdout::MetricExporter::default())
|
||||
.with_interval(std::time::Duration::from_secs(interval))
|
||||
.with_interval(Duration::from_secs(interval))
|
||||
.build()
|
||||
}
|
||||
|
||||
@@ -129,11 +137,23 @@ fn create_periodic_reader(interval: u64) -> PeriodicReader<opentelemetry_stdout:
|
||||
pub(crate) fn init_telemetry(config: &OtelConfig) -> OtelGuard {
|
||||
// avoid repeated access to configuration fields
|
||||
let endpoint = &config.endpoint;
|
||||
let use_stdout = config.use_stdout.unwrap_or(USE_STDOUT);
|
||||
let environment = config.environment.as_deref().unwrap_or(ENVIRONMENT);
|
||||
|
||||
// Environment-aware stdout configuration
|
||||
// Check for explicit environment control via RUSTFS_OBS_ENVIRONMENT
|
||||
let is_production = environment.to_lowercase() == DEFAULT_OBS_ENVIRONMENT_PRODUCTION;
|
||||
|
||||
// Default stdout behavior based on environment
|
||||
let default_use_stdout = if is_production {
|
||||
false // Disable stdout in production for security and log aggregation
|
||||
} else {
|
||||
USE_STDOUT // Use configured default for dev/test environments
|
||||
};
|
||||
|
||||
let use_stdout = config.use_stdout.unwrap_or(default_use_stdout);
|
||||
let meter_interval = config.meter_interval.unwrap_or(METER_INTERVAL);
|
||||
let logger_level = config.logger_level.as_deref().unwrap_or(DEFAULT_LOG_LEVEL);
|
||||
let service_name = config.service_name.as_deref().unwrap_or(APP_NAME);
|
||||
let environment = config.environment.as_deref().unwrap_or(ENVIRONMENT);
|
||||
|
||||
// Configure flexi_logger to cut by time and size
|
||||
let mut flexi_logger_handle = None;
|
||||
@@ -144,7 +164,7 @@ pub(crate) fn init_telemetry(config: &OtelConfig) -> OtelGuard {
|
||||
// initialize tracer provider
|
||||
let tracer_provider = {
|
||||
let sample_ratio = config.sample_ratio.unwrap_or(SAMPLE_RATIO);
|
||||
let sampler = if sample_ratio > 0.0 && sample_ratio < 1.0 {
|
||||
let sampler = if (0.0..1.0).contains(&sample_ratio) {
|
||||
Sampler::TraceIdRatioBased(sample_ratio)
|
||||
} else {
|
||||
Sampler::AlwaysOn
|
||||
@@ -197,7 +217,7 @@ pub(crate) fn init_telemetry(config: &OtelConfig) -> OtelGuard {
|
||||
|
||||
builder = builder.with_reader(
|
||||
PeriodicReader::builder(exporter)
|
||||
.with_interval(std::time::Duration::from_secs(meter_interval))
|
||||
.with_interval(Duration::from_secs(meter_interval))
|
||||
.build(),
|
||||
);
|
||||
|
||||
@@ -249,7 +269,7 @@ pub(crate) fn init_telemetry(config: &OtelConfig) -> OtelGuard {
|
||||
.with_line_number(true);
|
||||
|
||||
// Only add full span events tracking in the development environment
|
||||
if environment != ENVIRONMENT {
|
||||
if !is_production {
|
||||
layer = layer.with_span_events(FmtSpan::FULL);
|
||||
}
|
||||
|
||||
@@ -257,8 +277,7 @@ pub(crate) fn init_telemetry(config: &OtelConfig) -> OtelGuard {
|
||||
};
|
||||
|
||||
let filter = build_env_filter(logger_level, None);
|
||||
let otel_filter = build_env_filter(logger_level, None);
|
||||
let otel_layer = OpenTelemetryTracingBridge::new(&logger_provider).with_filter(otel_filter);
|
||||
let otel_layer = OpenTelemetryTracingBridge::new(&logger_provider).with_filter(build_env_filter(logger_level, None));
|
||||
let tracer = tracer_provider.tracer(Cow::Borrowed(service_name).to_string());
|
||||
|
||||
// Configure registry to avoid repeated calls to filter methods
|
||||
@@ -280,78 +299,96 @@ pub(crate) fn init_telemetry(config: &OtelConfig) -> OtelGuard {
|
||||
"OpenTelemetry telemetry initialized with OTLP endpoint: {}, logger_level: {},RUST_LOG env: {}",
|
||||
endpoint,
|
||||
logger_level,
|
||||
std::env::var("RUST_LOG").unwrap_or_else(|_| "Not set".to_string())
|
||||
env::var("RUST_LOG").unwrap_or_else(|_| "Not set".to_string())
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
OtelGuard {
|
||||
return OtelGuard {
|
||||
tracer_provider: Some(tracer_provider),
|
||||
meter_provider: Some(meter_provider),
|
||||
logger_provider: Some(logger_provider),
|
||||
_flexi_logger_handles: flexi_logger_handle,
|
||||
}
|
||||
} else {
|
||||
// Obtain the log directory and file name configuration
|
||||
let default_log_directory = rustfs_utils::dirs::get_log_directory_to_string(ENV_OBS_LOG_DIRECTORY);
|
||||
let log_directory = config.log_directory.as_deref().unwrap_or(default_log_directory.as_str());
|
||||
let log_filename = config.log_filename.as_deref().unwrap_or(service_name);
|
||||
|
||||
if let Err(e) = fs::create_dir_all(log_directory) {
|
||||
eprintln!("Failed to create log directory {log_directory}: {e}");
|
||||
}
|
||||
#[cfg(unix)]
|
||||
{
|
||||
// Linux/macOS Setting Permissions
|
||||
// Set the log directory permissions to 755 (rwxr-xr-x)
|
||||
use std::fs::Permissions;
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
match fs::set_permissions(log_directory, Permissions::from_mode(0o755)) {
|
||||
Ok(_) => eprintln!("Log directory permissions set to 755: {log_directory}"),
|
||||
Err(e) => eprintln!("Failed to set log directory permissions {log_directory}: {e}"),
|
||||
}
|
||||
}
|
||||
|
||||
// Build log cutting conditions
|
||||
let rotation_criterion = match (config.log_rotation_time.as_deref(), config.log_rotation_size_mb) {
|
||||
// Cut by time and size at the same time
|
||||
(Some(time), Some(size)) => {
|
||||
let age = match time.to_lowercase().as_str() {
|
||||
"hour" => Age::Hour,
|
||||
"day" => Age::Day,
|
||||
"minute" => Age::Minute,
|
||||
"second" => Age::Second,
|
||||
_ => Age::Day, // The default is by day
|
||||
};
|
||||
Criterion::AgeOrSize(age, size * 1024 * 1024) // Convert to bytes
|
||||
}
|
||||
// Cut by time only
|
||||
(Some(time), None) => {
|
||||
let age = match time.to_lowercase().as_str() {
|
||||
"hour" => Age::Hour,
|
||||
"day" => Age::Day,
|
||||
"minute" => Age::Minute,
|
||||
"second" => Age::Second,
|
||||
_ => Age::Day, // The default is by day
|
||||
};
|
||||
Criterion::Age(age)
|
||||
}
|
||||
// Cut by size only
|
||||
(None, Some(size)) => {
|
||||
Criterion::Size(size * 1024 * 1024) // Convert to bytes
|
||||
}
|
||||
// By default, it is cut by the day
|
||||
_ => Criterion::Age(Age::Day),
|
||||
};
|
||||
}
|
||||
|
||||
// The number of log files retained
|
||||
let keep_files = config.log_keep_files.unwrap_or(DEFAULT_LOG_KEEP_FILES);
|
||||
// Obtain the log directory and file name configuration
|
||||
let default_log_directory = rustfs_utils::dirs::get_log_directory_to_string(ENV_OBS_LOG_DIRECTORY);
|
||||
let log_directory = config.log_directory.as_deref().unwrap_or(default_log_directory.as_str());
|
||||
let log_filename = config.log_filename.as_deref().unwrap_or(service_name);
|
||||
|
||||
// Parsing the log level
|
||||
let log_spec = LogSpecification::parse(logger_level).unwrap_or(LogSpecification::info());
|
||||
// Enhanced error handling for directory creation
|
||||
if let Err(e) = fs::create_dir_all(log_directory) {
|
||||
eprintln!("ERROR: Failed to create log directory '{log_directory}': {e}");
|
||||
eprintln!("Ensure the parent directory exists and you have write permissions.");
|
||||
eprintln!("Attempting to continue with logging, but file logging may fail.");
|
||||
} else {
|
||||
eprintln!("Log directory ready: {log_directory}");
|
||||
}
|
||||
|
||||
// Convert the logger_level string to the corresponding LevelFilter
|
||||
let level_filter = match logger_level.to_lowercase().as_str() {
|
||||
#[cfg(unix)]
|
||||
{
|
||||
// Linux/macOS Setting Permissions with better error handling
|
||||
use std::fs::Permissions;
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
match fs::set_permissions(log_directory, Permissions::from_mode(0o755)) {
|
||||
Ok(_) => eprintln!("Log directory permissions set to 755: {log_directory}"),
|
||||
Err(e) => {
|
||||
eprintln!("WARNING: Failed to set log directory permissions for '{log_directory}': {e}");
|
||||
eprintln!("This may affect log file access. Consider checking directory ownership and permissions.");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Build log cutting conditions
|
||||
let rotation_criterion = match (config.log_rotation_time.as_deref(), config.log_rotation_size_mb) {
|
||||
// Cut by time and size at the same time
|
||||
(Some(time), Some(size)) => {
|
||||
let age = match time.to_lowercase().as_str() {
|
||||
"hour" => Age::Hour,
|
||||
"day" => Age::Day,
|
||||
"minute" => Age::Minute,
|
||||
"second" => Age::Second,
|
||||
_ => Age::Day, // The default is by day
|
||||
};
|
||||
Criterion::AgeOrSize(age, size * 1024 * 1024) // Convert to bytes
|
||||
}
|
||||
// Cut by time only
|
||||
(Some(time), None) => {
|
||||
let age = match time.to_lowercase().as_str() {
|
||||
"hour" => Age::Hour,
|
||||
"day" => Age::Day,
|
||||
"minute" => Age::Minute,
|
||||
"second" => Age::Second,
|
||||
_ => Age::Day, // The default is by day
|
||||
};
|
||||
Criterion::Age(age)
|
||||
}
|
||||
// Cut by size only
|
||||
(None, Some(size)) => {
|
||||
Criterion::Size(size * 1024 * 1024) // Convert to bytes
|
||||
}
|
||||
// By default, it is cut by the day
|
||||
_ => Criterion::Age(Age::Day),
|
||||
};
|
||||
|
||||
// The number of log files retained
|
||||
let keep_files = config.log_keep_files.unwrap_or(DEFAULT_LOG_KEEP_FILES);
|
||||
|
||||
// Parsing the log level
|
||||
let log_spec = LogSpecification::parse(logger_level).unwrap_or_else(|e| {
|
||||
eprintln!("WARNING: Invalid logger level '{logger_level}': {e}. Using default 'info' level.");
|
||||
LogSpecification::info()
|
||||
});
|
||||
|
||||
// Environment-aware stdout configuration
|
||||
// In production: disable stdout completely (Duplicate::None)
|
||||
// In development/test: use level-based filtering
|
||||
let level_filter = if is_production {
|
||||
flexi_logger::Duplicate::None // No stdout output in production
|
||||
} else {
|
||||
// Convert the logger_level string to the corresponding LevelFilter for dev/test
|
||||
match logger_level.to_lowercase().as_str() {
|
||||
"trace" => flexi_logger::Duplicate::Trace,
|
||||
"debug" => flexi_logger::Duplicate::Debug,
|
||||
"info" => flexi_logger::Duplicate::Info,
|
||||
@@ -359,56 +396,114 @@ pub(crate) fn init_telemetry(config: &OtelConfig) -> OtelGuard {
|
||||
"error" => flexi_logger::Duplicate::Error,
|
||||
"off" => flexi_logger::Duplicate::None,
|
||||
_ => flexi_logger::Duplicate::Info, // the default is info
|
||||
};
|
||||
}
|
||||
};
|
||||
|
||||
// Configure the flexi_logger
|
||||
let flexi_logger_result = flexi_logger::Logger::try_with_env_or_str(logger_level)
|
||||
.unwrap_or_else(|e| {
|
||||
eprintln!("Invalid logger level: {logger_level}, using default: {DEFAULT_LOG_LEVEL}, failed error: {e:?}");
|
||||
flexi_logger::Logger::with(log_spec.clone())
|
||||
})
|
||||
.log_to_file(
|
||||
FileSpec::default()
|
||||
.directory(log_directory)
|
||||
.basename(log_filename)
|
||||
.suppress_timestamp(),
|
||||
)
|
||||
.rotate(rotation_criterion, Naming::TimestampsDirect, Cleanup::KeepLogFiles(keep_files.into()))
|
||||
.format_for_files(format_for_file) // Add a custom formatting function for file output
|
||||
.duplicate_to_stdout(level_filter) // Use dynamic levels
|
||||
.format_for_stdout(format_with_color) // Add a custom formatting function for terminal output
|
||||
.write_mode(WriteMode::BufferAndFlush)
|
||||
.append() // Avoid clearing existing logs at startup
|
||||
.print_message() // Startup information output to console
|
||||
.start();
|
||||
|
||||
if let Ok(logger) = flexi_logger_result {
|
||||
// Save the logger handle to keep the logging
|
||||
flexi_logger_handle = Some(logger);
|
||||
|
||||
eprintln!("Flexi logger initialized with file logging to {log_directory}/{log_filename}.log");
|
||||
|
||||
// Log logging of log cutting conditions
|
||||
match (config.log_rotation_time.as_deref(), config.log_rotation_size_mb) {
|
||||
(Some(time), Some(size)) => eprintln!(
|
||||
"Log rotation configured for: every {time} or when size exceeds {size}MB, keeping {keep_files} files"
|
||||
),
|
||||
(Some(time), None) => eprintln!("Log rotation configured for: every {time}, keeping {keep_files} files"),
|
||||
(None, Some(size)) => {
|
||||
eprintln!("Log rotation configured for: when size exceeds {size}MB, keeping {keep_files} files")
|
||||
}
|
||||
_ => eprintln!("Log rotation configured for: daily, keeping {keep_files} files"),
|
||||
// Choose write mode based on environment
|
||||
let write_mode = if is_production {
|
||||
get_env_async_with().unwrap_or_else(|| {
|
||||
eprintln!(
|
||||
"Using default Async write mode in production. To customize, set RUSTFS_OBS_LOG_POOL_CAPA, RUSTFS_OBS_LOG_MESSAGE_CAPA, and RUSTFS_OBS_LOG_FLUSH_MS environment variables."
|
||||
);
|
||||
AsyncWith {
|
||||
pool_capa: DEFAULT_OBS_LOG_POOL_CAPA,
|
||||
message_capa: DEFAULT_OBS_LOG_MESSAGE_CAPA,
|
||||
flush_interval: Duration::from_millis(DEFAULT_OBS_LOG_FLUSH_MS),
|
||||
}
|
||||
})
|
||||
} else {
|
||||
BufferAndFlush
|
||||
};
|
||||
|
||||
// Configure the flexi_logger with enhanced error handling
|
||||
let mut flexi_logger_builder = flexi_logger::Logger::try_with_env_or_str(logger_level)
|
||||
.unwrap_or_else(|e| {
|
||||
eprintln!("WARNING: Invalid logger configuration '{logger_level}': {e:?}");
|
||||
eprintln!("Falling back to default configuration with level: {DEFAULT_LOG_LEVEL}");
|
||||
flexi_logger::Logger::with(log_spec.clone())
|
||||
})
|
||||
.log_to_file(
|
||||
FileSpec::default()
|
||||
.directory(log_directory)
|
||||
.basename(log_filename)
|
||||
.suppress_timestamp(),
|
||||
)
|
||||
.rotate(rotation_criterion, Naming::TimestampsDirect, Cleanup::KeepLogFiles(keep_files.into()))
|
||||
.format_for_files(format_for_file) // Add a custom formatting function for file output
|
||||
.write_mode(write_mode)
|
||||
.append(); // Avoid clearing existing logs at startup
|
||||
|
||||
// Environment-aware stdout configuration
|
||||
flexi_logger_builder = flexi_logger_builder.duplicate_to_stdout(level_filter);
|
||||
|
||||
// Only add stdout formatting and startup messages in non-production environments
|
||||
if !is_production {
|
||||
flexi_logger_builder = flexi_logger_builder
|
||||
.format_for_stdout(format_with_color) // Add a custom formatting function for terminal output
|
||||
.print_message(); // Startup information output to console
|
||||
}
|
||||
|
||||
let flexi_logger_result = flexi_logger_builder.start();
|
||||
|
||||
if let Ok(logger) = flexi_logger_result {
|
||||
// Save the logger handle to keep the logging
|
||||
flexi_logger_handle = Some(logger);
|
||||
|
||||
// Environment-aware success messages
|
||||
if is_production {
|
||||
eprintln!("Production logging initialized: file-only mode to {log_directory}/{log_filename}.log");
|
||||
eprintln!("Stdout logging disabled in production environment for security and log aggregation.");
|
||||
} else {
|
||||
eprintln!("Failed to initialize flexi_logger: {:?}", flexi_logger_result.err());
|
||||
eprintln!("Development/Test logging initialized with file logging to {log_directory}/{log_filename}.log");
|
||||
eprintln!("Stdout logging enabled for debugging. Environment: {environment}");
|
||||
}
|
||||
|
||||
OtelGuard {
|
||||
tracer_provider: None,
|
||||
meter_provider: None,
|
||||
logger_provider: None,
|
||||
_flexi_logger_handles: flexi_logger_handle,
|
||||
// Log rotation configuration details
|
||||
match (config.log_rotation_time.as_deref(), config.log_rotation_size_mb) {
|
||||
(Some(time), Some(size)) => {
|
||||
eprintln!("Log rotation configured for: every {time} or when size exceeds {size}MB, keeping {keep_files} files")
|
||||
}
|
||||
(Some(time), None) => eprintln!("Log rotation configured for: every {time}, keeping {keep_files} files"),
|
||||
(None, Some(size)) => {
|
||||
eprintln!("Log rotation configured for: when size exceeds {size}MB, keeping {keep_files} files")
|
||||
}
|
||||
_ => eprintln!("Log rotation configured for: daily, keeping {keep_files} files"),
|
||||
}
|
||||
} else {
|
||||
eprintln!("CRITICAL: Failed to initialize flexi_logger: {:?}", flexi_logger_result.err());
|
||||
eprintln!("Possible causes:");
|
||||
eprintln!(" 1. Insufficient permissions to write to log directory: {log_directory}");
|
||||
eprintln!(" 2. Log directory does not exist or is not accessible");
|
||||
eprintln!(" 3. Invalid log configuration parameters");
|
||||
eprintln!(" 4. Disk space issues");
|
||||
eprintln!("Application will continue but logging to files will not work properly.");
|
||||
}
|
||||
|
||||
OtelGuard {
|
||||
tracer_provider: None,
|
||||
meter_provider: None,
|
||||
logger_provider: None,
|
||||
_flexi_logger_handles: flexi_logger_handle,
|
||||
}
|
||||
}
|
||||
|
||||
// Read the AsyncWith parameter from the environment variable
|
||||
fn get_env_async_with() -> Option<WriteMode> {
|
||||
let pool_capa = env::var("RUSTFS_OBS_LOG_POOL_CAPA")
|
||||
.ok()
|
||||
.and_then(|v| v.parse::<usize>().ok());
|
||||
let message_capa = env::var("RUSTFS_OBS_LOG_MESSAGE_CAPA")
|
||||
.ok()
|
||||
.and_then(|v| v.parse::<usize>().ok());
|
||||
let flush_ms = env::var("RUSTFS_OBS_LOG_FLUSH_MS").ok().and_then(|v| v.parse::<u64>().ok());
|
||||
|
||||
match (pool_capa, message_capa, flush_ms) {
|
||||
(Some(pool), Some(msg), Some(flush)) => Some(AsyncWith {
|
||||
pool_capa: pool,
|
||||
message_capa: msg,
|
||||
flush_interval: Duration::from_millis(flush),
|
||||
}),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -473,3 +568,140 @@ fn format_for_file(w: &mut dyn std::io::Write, now: &mut DeferredNow, record: &R
|
||||
record.args()
|
||||
)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_production_environment_detection() {
|
||||
// Test production environment logic
|
||||
let production_envs = vec!["production", "PRODUCTION", "Production"];
|
||||
|
||||
for env_value in production_envs {
|
||||
let is_production = env_value.to_lowercase() == "production";
|
||||
assert!(is_production, "Should detect '{env_value}' as production environment");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_non_production_environment_detection() {
|
||||
// Test non-production environment logic
|
||||
let non_production_envs = vec!["development", "test", "staging", "dev", "local"];
|
||||
|
||||
for env_value in non_production_envs {
|
||||
let is_production = env_value.to_lowercase() == "production";
|
||||
assert!(!is_production, "Should not detect '{env_value}' as production environment");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stdout_behavior_logic() {
|
||||
// Test the stdout behavior logic without environment manipulation
|
||||
struct TestCase {
|
||||
is_production: bool,
|
||||
config_use_stdout: Option<bool>,
|
||||
expected_use_stdout: bool,
|
||||
description: &'static str,
|
||||
}
|
||||
|
||||
let test_cases = vec![
|
||||
TestCase {
|
||||
is_production: true,
|
||||
config_use_stdout: None,
|
||||
expected_use_stdout: false,
|
||||
description: "Production with no config should disable stdout",
|
||||
},
|
||||
TestCase {
|
||||
is_production: false,
|
||||
config_use_stdout: None,
|
||||
expected_use_stdout: USE_STDOUT,
|
||||
description: "Non-production with no config should use default",
|
||||
},
|
||||
TestCase {
|
||||
is_production: true,
|
||||
config_use_stdout: Some(true),
|
||||
expected_use_stdout: true,
|
||||
description: "Production with explicit true should enable stdout",
|
||||
},
|
||||
TestCase {
|
||||
is_production: true,
|
||||
config_use_stdout: Some(false),
|
||||
expected_use_stdout: false,
|
||||
description: "Production with explicit false should disable stdout",
|
||||
},
|
||||
TestCase {
|
||||
is_production: false,
|
||||
config_use_stdout: Some(true),
|
||||
expected_use_stdout: true,
|
||||
description: "Non-production with explicit true should enable stdout",
|
||||
},
|
||||
];
|
||||
|
||||
for case in test_cases {
|
||||
let default_use_stdout = if case.is_production { false } else { USE_STDOUT };
|
||||
|
||||
let actual_use_stdout = case.config_use_stdout.unwrap_or(default_use_stdout);
|
||||
|
||||
assert_eq!(actual_use_stdout, case.expected_use_stdout, "Test case failed: {}", case.description);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_log_level_filter_mapping_logic() {
|
||||
// Test the log level mapping logic used in the real implementation
|
||||
let test_cases = vec![
|
||||
("trace", "Trace"),
|
||||
("debug", "Debug"),
|
||||
("info", "Info"),
|
||||
("warn", "Warn"),
|
||||
("warning", "Warn"),
|
||||
("error", "Error"),
|
||||
("off", "None"),
|
||||
("invalid_level", "Info"), // Should default to Info
|
||||
];
|
||||
|
||||
for (input_level, expected_variant) in test_cases {
|
||||
let filter_variant = match input_level.to_lowercase().as_str() {
|
||||
"trace" => "Trace",
|
||||
"debug" => "Debug",
|
||||
"info" => "Info",
|
||||
"warn" | "warning" => "Warn",
|
||||
"error" => "Error",
|
||||
"off" => "None",
|
||||
_ => "Info", // default case
|
||||
};
|
||||
|
||||
assert_eq!(
|
||||
filter_variant, expected_variant,
|
||||
"Log level '{input_level}' should map to '{expected_variant}'"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_otel_config_environment_defaults() {
|
||||
// Test that OtelConfig properly handles environment detection logic
|
||||
let config = OtelConfig {
|
||||
endpoint: "".to_string(),
|
||||
use_stdout: None,
|
||||
environment: Some("production".to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Simulate the logic from init_telemetry
|
||||
let environment = config.environment.as_deref().unwrap_or(ENVIRONMENT);
|
||||
assert_eq!(environment, "production");
|
||||
|
||||
// Test with development environment
|
||||
let dev_config = OtelConfig {
|
||||
endpoint: "".to_string(),
|
||||
use_stdout: None,
|
||||
environment: Some("development".to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let dev_environment = dev_config.environment.as_deref().unwrap_or(ENVIRONMENT);
|
||||
assert_eq!(dev_environment, "development");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde::{Deserialize, Serialize, de::Error};
|
||||
use time::OffsetDateTime;
|
||||
|
||||
use super::Policy;
|
||||
@@ -59,15 +59,17 @@ impl TryFrom<Vec<u8>> for PolicyDoc {
|
||||
type Error = serde_json::Error;
|
||||
|
||||
fn try_from(value: Vec<u8>) -> Result<Self, Self::Error> {
|
||||
match serde_json::from_slice::<PolicyDoc>(&value) {
|
||||
Ok(res) => Ok(res),
|
||||
Err(err) => match serde_json::from_slice::<Policy>(&value) {
|
||||
Ok(res2) => Ok(Self {
|
||||
policy: res2,
|
||||
..Default::default()
|
||||
}),
|
||||
Err(_) => Err(err),
|
||||
},
|
||||
// Try to parse as PolicyDoc first
|
||||
if let Ok(policy_doc) = serde_json::from_slice::<PolicyDoc>(&value) {
|
||||
return Ok(policy_doc);
|
||||
}
|
||||
|
||||
// Fall back to parsing as Policy and wrap in PolicyDoc
|
||||
serde_json::from_slice::<Policy>(&value)
|
||||
.map(|policy| Self {
|
||||
policy,
|
||||
..Default::default()
|
||||
})
|
||||
.map_err(|_| serde_json::Error::custom("Failed to parse as PolicyDoc or Policy".to_string()))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -438,7 +438,7 @@ fn is_fatal_mqtt_error(err: &ConnectionError) -> bool {
|
||||
rumqttc::StateError::InvalidState // The internal state machine is in invalid state
|
||||
| rumqttc::StateError::WrongPacket // Agreement Violation: Unexpected Data Packet Received
|
||||
| rumqttc::StateError::Unsolicited(_) // Agreement Violation: Unsolicited ACK Received
|
||||
| rumqttc::StateError::OutgoingPacketTooLarge { .. } // Try to send too large packets
|
||||
| rumqttc::StateError::CollisionTimeout // Agreement Violation (if this stage occurs)
|
||||
| rumqttc::StateError::EmptySubscription // Agreement violation (if this stage occurs)
|
||||
=> true,
|
||||
|
||||
|
||||
@@ -35,10 +35,8 @@ futures = { workspace = true, optional = true }
|
||||
hex-simd = { workspace = true, optional = true }
|
||||
highway = { workspace = true, optional = true }
|
||||
hickory-resolver = { workspace = true, optional = true }
|
||||
hickory-proto = { workspace = true, optional = true }
|
||||
hmac = { workspace = true, optional = true }
|
||||
hyper = { workspace = true, optional = true }
|
||||
hyper-util = { workspace = true, optional = true }
|
||||
local-ip-address = { workspace = true, optional = true }
|
||||
lz4 = { workspace = true, optional = true }
|
||||
md-5 = { workspace = true, optional = true }
|
||||
@@ -81,7 +79,7 @@ workspace = true
|
||||
default = ["ip"] # features that are enabled by default
|
||||
ip = ["dep:local-ip-address"] # ip characteristics and their dependencies
|
||||
tls = ["dep:rustls", "dep:rustls-pemfile", "dep:rustls-pki-types"] # tls characteristics and their dependencies
|
||||
net = ["ip", "dep:url", "dep:netif", "dep:futures", "dep:transform-stream", "dep:bytes", "dep:s3s", "dep:hyper", "dep:hyper-util", "dep:hickory-resolver", "dep:hickory-proto", "dep:moka", "dep:thiserror"] # network features with DNS resolver
|
||||
net = ["ip", "dep:url", "dep:netif", "dep:futures", "dep:transform-stream", "dep:bytes", "dep:s3s", "dep:hyper", "dep:hickory-resolver", "dep:moka", "dep:thiserror", "dep:tokio"] # network features with DNS resolver
|
||||
io = ["dep:tokio"]
|
||||
path = []
|
||||
notify = ["dep:hyper", "dep:s3s"] # file system notification features
|
||||
|
||||
@@ -127,6 +127,7 @@ impl LayeredDnsResolver {
|
||||
/// Validate domain format according to RFC standards
|
||||
#[instrument(skip_all, fields(domain = %domain))]
|
||||
fn validate_domain_format(domain: &str) -> Result<(), DnsError> {
|
||||
info!("Validating domain format start");
|
||||
// Check FQDN length
|
||||
if domain.len() > MAX_FQDN_LENGTH {
|
||||
return Err(DnsError::InvalidFormat {
|
||||
@@ -157,7 +158,7 @@ impl LayeredDnsResolver {
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
info!("DNS resolver validated successfully");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -209,7 +210,6 @@ impl LayeredDnsResolver {
|
||||
let ips: Vec<IpAddr> = lookup.iter().collect();
|
||||
if !ips.is_empty() {
|
||||
info!("System DNS resolution successful for domain: {} -> {} IPs", domain, ips.len());
|
||||
debug!("System DNS resolved IPs: {:?}", ips);
|
||||
Ok(ips)
|
||||
} else {
|
||||
warn!("System DNS returned empty result for domain: {}", domain);
|
||||
@@ -242,7 +242,6 @@ impl LayeredDnsResolver {
|
||||
let ips: Vec<IpAddr> = lookup.iter().collect();
|
||||
if !ips.is_empty() {
|
||||
info!("Public DNS resolution successful for domain: {} -> {} IPs", domain, ips.len());
|
||||
debug!("Public DNS resolved IPs: {:?}", ips);
|
||||
Ok(ips)
|
||||
} else {
|
||||
warn!("Public DNS returned empty result for domain: {}", domain);
|
||||
@@ -270,6 +269,7 @@ impl LayeredDnsResolver {
|
||||
/// 3. Public DNS (hickory-resolver with TLS-enabled Cloudflare DNS fallback)
|
||||
#[instrument(skip_all, fields(domain = %domain))]
|
||||
pub async fn resolve(&self, domain: &str) -> Result<Vec<IpAddr>, DnsError> {
|
||||
info!("Starting DNS resolution process for domain: {} start", domain);
|
||||
// Validate domain format first
|
||||
Self::validate_domain_format(domain)?;
|
||||
|
||||
@@ -305,7 +305,7 @@ impl LayeredDnsResolver {
|
||||
}
|
||||
Err(public_err) => {
|
||||
error!(
|
||||
"All DNS resolution attempts failed for domain: {}. System DNS: failed, Public DNS: {}",
|
||||
"All DNS resolution attempts failed for domain:` {}`. System DNS: failed, Public DNS: {}",
|
||||
domain, public_err
|
||||
);
|
||||
Err(DnsError::AllAttemptsFailed {
|
||||
@@ -345,6 +345,7 @@ pub fn get_global_dns_resolver() -> Option<&'static LayeredDnsResolver> {
|
||||
/// Resolve domain using the global DNS resolver with comprehensive tracing
|
||||
#[instrument(skip_all, fields(domain = %domain))]
|
||||
pub async fn resolve_domain(domain: &str) -> Result<Vec<IpAddr>, DnsError> {
|
||||
info!("resolving domain for: {}", domain);
|
||||
match get_global_dns_resolver() {
|
||||
Some(resolver) => resolver.resolve(domain).await,
|
||||
None => Err(DnsError::InitializationFailed {
|
||||
@@ -395,7 +396,7 @@ mod tests {
|
||||
// Test cache stats (note: moka cache might not immediately reflect changes)
|
||||
let (total, _weighted_size) = resolver.cache_stats().await;
|
||||
// Cache should have at least the entry we just added (might be 0 due to async nature)
|
||||
assert!(total <= 1, "Cache should have at most 1 entry, got {}", total);
|
||||
assert!(total <= 1, "Cache should have at most 1 entry, got {total}");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
@@ -406,12 +407,12 @@ mod tests {
|
||||
match resolver.resolve("localhost").await {
|
||||
Ok(ips) => {
|
||||
assert!(!ips.is_empty());
|
||||
println!("Resolved localhost to: {:?}", ips);
|
||||
println!("Resolved localhost to: {ips:?}");
|
||||
}
|
||||
Err(e) => {
|
||||
// In some test environments, even localhost might fail
|
||||
// This is acceptable as long as our error handling works
|
||||
println!("DNS resolution failed (might be expected in test environments): {}", e);
|
||||
println!("DNS resolution failed (might be expected in test environments): {e}");
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -427,7 +428,7 @@ mod tests {
|
||||
assert!(result.is_err());
|
||||
|
||||
if let Err(e) = result {
|
||||
println!("Expected error for invalid domain: {}", e);
|
||||
println!("Expected error for invalid domain: {e}");
|
||||
// Should be AllAttemptsFailed since both system and public DNS should fail
|
||||
assert!(matches!(e, DnsError::AllAttemptsFailed { .. }));
|
||||
}
|
||||
@@ -463,10 +464,10 @@ mod tests {
|
||||
match resolve_domain("localhost").await {
|
||||
Ok(ips) => {
|
||||
assert!(!ips.is_empty());
|
||||
println!("Global resolver resolved localhost to: {:?}", ips);
|
||||
println!("Global resolver resolved localhost to: {ips:?}");
|
||||
}
|
||||
Err(e) => {
|
||||
println!("Global resolver DNS resolution failed (might be expected in test environments): {}", e);
|
||||
println!("Global resolver DNS resolution failed (might be expected in test environments): {e}");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -15,18 +15,43 @@
|
||||
use bytes::Bytes;
|
||||
use futures::pin_mut;
|
||||
use futures::{Stream, StreamExt};
|
||||
use std::io::Error;
|
||||
use std::net::Ipv6Addr;
|
||||
use std::sync::LazyLock;
|
||||
use std::sync::{LazyLock, Mutex};
|
||||
use std::{
|
||||
collections::HashSet,
|
||||
collections::{HashMap, HashSet},
|
||||
fmt::Display,
|
||||
net::{IpAddr, SocketAddr, TcpListener, ToSocketAddrs},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use tracing::{error, info};
|
||||
use transform_stream::AsyncTryStream;
|
||||
use url::{Host, Url};
|
||||
|
||||
static LOCAL_IPS: LazyLock<Vec<IpAddr>> = LazyLock::new(|| must_get_local_ips().unwrap());
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct DnsCacheEntry {
|
||||
ips: HashSet<IpAddr>,
|
||||
cached_at: Instant,
|
||||
}
|
||||
|
||||
impl DnsCacheEntry {
|
||||
fn new(ips: HashSet<IpAddr>) -> Self {
|
||||
Self {
|
||||
ips,
|
||||
cached_at: Instant::now(),
|
||||
}
|
||||
}
|
||||
|
||||
fn is_expired(&self, ttl: Duration) -> bool {
|
||||
self.cached_at.elapsed() > ttl
|
||||
}
|
||||
}
|
||||
|
||||
static DNS_CACHE: LazyLock<Mutex<HashMap<String, DnsCacheEntry>>> = LazyLock::new(|| Mutex::new(HashMap::new()));
|
||||
const DNS_CACHE_TTL: Duration = Duration::from_secs(300); // 5 minutes
|
||||
|
||||
/// helper for validating if the provided arg is an ip address.
|
||||
pub fn is_socket_addr(addr: &str) -> bool {
|
||||
// TODO IPv6 zone information?
|
||||
@@ -38,7 +63,7 @@ pub fn is_socket_addr(addr: &str) -> bool {
|
||||
pub fn check_local_server_addr(server_addr: &str) -> std::io::Result<SocketAddr> {
|
||||
let addr: Vec<SocketAddr> = match server_addr.to_socket_addrs() {
|
||||
Ok(addr) => addr.collect(),
|
||||
Err(err) => return Err(std::io::Error::other(err)),
|
||||
Err(err) => return Err(Error::other(err)),
|
||||
};
|
||||
|
||||
// 0.0.0.0 is a wildcard address and refers to local network
|
||||
@@ -59,7 +84,7 @@ pub fn check_local_server_addr(server_addr: &str) -> std::io::Result<SocketAddr>
|
||||
}
|
||||
}
|
||||
|
||||
Err(std::io::Error::other("host in server address should be this server"))
|
||||
Err(Error::other("host in server address should be this server"))
|
||||
}
|
||||
|
||||
/// checks if the given parameter correspond to one of
|
||||
@@ -70,7 +95,7 @@ pub fn is_local_host(host: Host<&str>, port: u16, local_port: u16) -> std::io::R
|
||||
Host::Domain(domain) => {
|
||||
let ips = match (domain, 0).to_socket_addrs().map(|v| v.map(|v| v.ip()).collect::<Vec<_>>()) {
|
||||
Ok(ips) => ips,
|
||||
Err(err) => return Err(std::io::Error::other(err)),
|
||||
Err(err) => return Err(Error::other(err)),
|
||||
};
|
||||
|
||||
ips.iter().any(|ip| local_set.contains(ip))
|
||||
@@ -87,68 +112,61 @@ pub fn is_local_host(host: Host<&str>, port: u16, local_port: u16) -> std::io::R
|
||||
}
|
||||
|
||||
/// returns IP address of given host using layered DNS resolution.
|
||||
pub fn get_host_ip_async(host: Host<&str>) -> std::io::Result<HashSet<IpAddr>> {
|
||||
///
|
||||
/// This is the async version of `get_host_ip()` that provides enhanced DNS resolution
|
||||
/// with Kubernetes support when the "net" feature is enabled.
|
||||
pub async fn get_host_ip(host: Host<&str>) -> std::io::Result<HashSet<IpAddr>> {
|
||||
match host {
|
||||
Host::Domain(domain) => {
|
||||
#[cfg(feature = "net")]
|
||||
{
|
||||
use crate::dns_resolver::resolve_domain;
|
||||
let handle = tokio::runtime::Handle::current();
|
||||
handle.block_on(async {
|
||||
match resolve_domain(domain).await {
|
||||
Ok(ips) => Ok(ips.into_iter().collect()),
|
||||
Err(e) => Err(std::io::Error::other(format!("DNS resolution failed: {}", e))),
|
||||
}
|
||||
})
|
||||
match crate::dns_resolver::resolve_domain(domain).await {
|
||||
Ok(ips) => {
|
||||
info!("Resolved domain {domain} using custom DNS resolver: {ips:?}");
|
||||
return Ok(ips.into_iter().collect());
|
||||
}
|
||||
Err(err) => {
|
||||
error!(
|
||||
"Failed to resolve domain {domain} using custom DNS resolver, falling back to system resolver,err: {err}"
|
||||
);
|
||||
}
|
||||
}
|
||||
#[cfg(not(feature = "net"))]
|
||||
// Check cache first
|
||||
if let Ok(mut cache) = DNS_CACHE.lock() {
|
||||
if let Some(entry) = cache.get(domain) {
|
||||
if !entry.is_expired(DNS_CACHE_TTL) {
|
||||
return Ok(entry.ips.clone());
|
||||
}
|
||||
// Remove expired entry
|
||||
cache.remove(domain);
|
||||
}
|
||||
}
|
||||
|
||||
info!("Cache miss for domain {domain}, querying system resolver.");
|
||||
|
||||
// Fallback to standard resolution when DNS resolver is not available
|
||||
match (domain, 0)
|
||||
.to_socket_addrs()
|
||||
.map(|v| v.map(|v| v.ip()).collect::<HashSet<_>>())
|
||||
{
|
||||
// Fallback to standard resolution when DNS resolver is not available
|
||||
match (domain, 0)
|
||||
.to_socket_addrs()
|
||||
.map(|v| v.map(|v| v.ip()).collect::<HashSet<_>>())
|
||||
{
|
||||
Ok(ips) => Ok(ips),
|
||||
Err(err) => Err(std::io::Error::other(err)),
|
||||
Ok(ips) => {
|
||||
// Cache the result
|
||||
if let Ok(mut cache) = DNS_CACHE.lock() {
|
||||
cache.insert(domain.to_string(), DnsCacheEntry::new(ips.clone()));
|
||||
// Limit cache size to prevent memory bloat
|
||||
if cache.len() > 1000 {
|
||||
cache.retain(|_, v| !v.is_expired(DNS_CACHE_TTL));
|
||||
}
|
||||
}
|
||||
info!("System query for domain {domain}: {:?}", ips);
|
||||
Ok(ips)
|
||||
}
|
||||
Err(err) => {
|
||||
error!("Failed to resolve domain {domain} using system resolver, err: {err}");
|
||||
Err(Error::other(err))
|
||||
}
|
||||
}
|
||||
}
|
||||
Host::Ipv4(ip) => {
|
||||
let mut set = HashSet::with_capacity(1);
|
||||
set.insert(IpAddr::V4(ip));
|
||||
Ok(set)
|
||||
}
|
||||
Host::Ipv6(ip) => {
|
||||
let mut set = HashSet::with_capacity(1);
|
||||
set.insert(IpAddr::V6(ip));
|
||||
Ok(set)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// returns IP address of given host using standard resolution.
|
||||
///
|
||||
/// **Note**: This function uses standard library DNS resolution.
|
||||
/// For enhanced DNS resolution with Kubernetes support, use `get_host_ip_async()`.
|
||||
pub fn get_host_ip(host: Host<&str>) -> std::io::Result<HashSet<IpAddr>> {
|
||||
match host {
|
||||
Host::Domain(domain) => match (domain, 0)
|
||||
.to_socket_addrs()
|
||||
.map(|v| v.map(|v| v.ip()).collect::<HashSet<_>>())
|
||||
{
|
||||
Ok(ips) => Ok(ips),
|
||||
Err(err) => Err(std::io::Error::other(err)),
|
||||
},
|
||||
Host::Ipv4(ip) => {
|
||||
let mut set = HashSet::with_capacity(1);
|
||||
set.insert(IpAddr::V4(ip));
|
||||
Ok(set)
|
||||
}
|
||||
Host::Ipv6(ip) => {
|
||||
let mut set = HashSet::with_capacity(1);
|
||||
set.insert(IpAddr::V6(ip));
|
||||
Ok(set)
|
||||
}
|
||||
Host::Ipv4(ip) => Ok([IpAddr::V4(ip)].into_iter().collect()),
|
||||
Host::Ipv6(ip) => Ok([IpAddr::V6(ip)].into_iter().collect()),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -160,7 +178,7 @@ pub fn get_available_port() -> u16 {
|
||||
pub fn must_get_local_ips() -> std::io::Result<Vec<IpAddr>> {
|
||||
match netif::up() {
|
||||
Ok(up) => Ok(up.map(|x| x.address().to_owned()).collect()),
|
||||
Err(err) => Err(std::io::Error::other(format!("Unable to get IP addresses of this host: {err}"))),
|
||||
Err(err) => Err(Error::other(format!("Unable to get IP addresses of this host: {err}"))),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -168,7 +186,7 @@ pub fn get_default_location(_u: Url, _region_override: &str) -> String {
|
||||
todo!();
|
||||
}
|
||||
|
||||
pub fn get_endpoint_url(endpoint: &str, secure: bool) -> Result<Url, std::io::Error> {
|
||||
pub fn get_endpoint_url(endpoint: &str, secure: bool) -> Result<Url, Error> {
|
||||
let mut scheme = "https";
|
||||
if !secure {
|
||||
scheme = "http";
|
||||
@@ -176,7 +194,7 @@ pub fn get_endpoint_url(endpoint: &str, secure: bool) -> Result<Url, std::io::Er
|
||||
|
||||
let endpoint_url_str = format!("{scheme}://{endpoint}");
|
||||
let Ok(endpoint_url) = Url::parse(&endpoint_url_str) else {
|
||||
return Err(std::io::Error::other("url parse error."));
|
||||
return Err(Error::other("url parse error."));
|
||||
};
|
||||
|
||||
//is_valid_endpoint_url(endpoint_url)?;
|
||||
@@ -211,7 +229,7 @@ impl Display for XHost {
|
||||
}
|
||||
|
||||
impl TryFrom<String> for XHost {
|
||||
type Error = std::io::Error;
|
||||
type Error = Error;
|
||||
|
||||
fn try_from(value: String) -> Result<Self, Self::Error> {
|
||||
if let Some(addr) = value.to_socket_addrs()?.next() {
|
||||
@@ -221,7 +239,7 @@ impl TryFrom<String> for XHost {
|
||||
is_port_set: addr.port() > 0,
|
||||
})
|
||||
} else {
|
||||
Err(std::io::Error::new(std::io::ErrorKind::InvalidData, "value invalid"))
|
||||
Err(Error::new(std::io::ErrorKind::InvalidData, "value invalid"))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -231,7 +249,7 @@ pub fn parse_and_resolve_address(addr_str: &str) -> std::io::Result<SocketAddr>
|
||||
let port_str = port;
|
||||
let port: u16 = port_str
|
||||
.parse()
|
||||
.map_err(|e| std::io::Error::other(format!("Invalid port format: {addr_str}, err:{e:?}")))?;
|
||||
.map_err(|e| Error::other(format!("Invalid port format: {addr_str}, err:{e:?}")))?;
|
||||
let final_port = if port == 0 {
|
||||
get_available_port() // assume get_available_port is available here
|
||||
} else {
|
||||
@@ -271,9 +289,9 @@ where
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use std::net::{Ipv4Addr, Ipv6Addr};
|
||||
|
||||
use super::*;
|
||||
use crate::init_global_dns_resolver;
|
||||
use std::net::{Ipv4Addr, Ipv6Addr};
|
||||
|
||||
#[test]
|
||||
fn test_is_socket_addr() {
|
||||
@@ -377,23 +395,29 @@ mod test {
|
||||
assert!(is_local_host(invalid_host, 0, 0).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_host_ip() {
|
||||
#[tokio::test]
|
||||
async fn test_get_host_ip() {
|
||||
match init_global_dns_resolver().await {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
error!("Failed to initialize global DNS resolver: {e}");
|
||||
}
|
||||
}
|
||||
// Test IPv4 address
|
||||
let ipv4_host = Host::Ipv4(Ipv4Addr::new(192, 168, 1, 1));
|
||||
let ipv4_result = get_host_ip(ipv4_host).unwrap();
|
||||
let ipv4_result = get_host_ip(ipv4_host).await.unwrap();
|
||||
assert_eq!(ipv4_result.len(), 1);
|
||||
assert!(ipv4_result.contains(&IpAddr::V4(Ipv4Addr::new(192, 168, 1, 1))));
|
||||
|
||||
// Test IPv6 address
|
||||
let ipv6_host = Host::Ipv6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 1));
|
||||
let ipv6_result = get_host_ip(ipv6_host).unwrap();
|
||||
let ipv6_result = get_host_ip(ipv6_host).await.unwrap();
|
||||
assert_eq!(ipv6_result.len(), 1);
|
||||
assert!(ipv6_result.contains(&IpAddr::V6(Ipv6Addr::new(0x2001, 0xdb8, 0, 0, 0, 0, 0, 1))));
|
||||
|
||||
// Test localhost domain
|
||||
let localhost_host = Host::Domain("localhost");
|
||||
let localhost_result = get_host_ip(localhost_host).unwrap();
|
||||
let localhost_result = get_host_ip(localhost_host).await.unwrap();
|
||||
assert!(!localhost_result.is_empty());
|
||||
// Should contain at least loopback address
|
||||
assert!(
|
||||
@@ -403,7 +427,16 @@ mod test {
|
||||
|
||||
// Test invalid domain
|
||||
let invalid_host = Host::Domain("invalid.nonexistent.domain.example");
|
||||
assert!(get_host_ip(invalid_host).is_err());
|
||||
match get_host_ip(invalid_host.clone()).await {
|
||||
Ok(ips) => {
|
||||
// Depending on DNS resolver behavior, it might return empty set or error
|
||||
assert!(ips.is_empty(), "Expected empty IP set for invalid domain, got: {ips:?}");
|
||||
}
|
||||
Err(_) => {
|
||||
error!("Expected error for invalid domain");
|
||||
} // Expected error
|
||||
}
|
||||
assert!(get_host_ip(invalid_host).await.is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -28,10 +28,15 @@ services:
|
||||
TARGETPLATFORM: linux/amd64
|
||||
ports:
|
||||
- "9000:9000" # S3 API port
|
||||
- "9001:9001" # Console port
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=/data/rustfs0,/data/rustfs1,/data/rustfs2,/data/rustfs3
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9001
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_EXTERNAL_ADDRESS=:9000 # Same as internal since no port mapping
|
||||
- RUSTFS_CORS_ALLOWED_ORIGINS=*
|
||||
- RUSTFS_CONSOLE_CORS_ALLOWED_ORIGINS=*
|
||||
- RUSTFS_ACCESS_KEY=rustfsadmin
|
||||
- RUSTFS_SECRET_KEY=rustfsadmin
|
||||
- RUSTFS_LOG_LEVEL=info
|
||||
@@ -41,7 +46,7 @@ services:
|
||||
- rustfs_data_1:/data/rustfs1
|
||||
- rustfs_data_2:/data/rustfs2
|
||||
- rustfs_data_3:/data/rustfs3
|
||||
- ./logs:/app/logs
|
||||
- logs_data:/app/logs
|
||||
networks:
|
||||
- rustfs-network
|
||||
restart: unless-stopped
|
||||
@@ -49,11 +54,8 @@ services:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--no-verbose",
|
||||
"--tries=1",
|
||||
"--spider",
|
||||
"http://localhost:9000/health",
|
||||
"sh", "-c",
|
||||
"curl -f http://localhost:9000/health && curl -f http://localhost:9001/health"
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
@@ -71,11 +73,16 @@ services:
|
||||
dockerfile: Dockerfile.source
|
||||
# Pure development environment
|
||||
ports:
|
||||
- "9010:9000"
|
||||
- "9010:9000" # S3 API port
|
||||
- "9011:9001" # Console port
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=/data/rustfs0,/data/rustfs1
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9001
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_EXTERNAL_ADDRESS=:9010 # External port mapping 9010 -> 9000
|
||||
- RUSTFS_CORS_ALLOWED_ORIGINS=*
|
||||
- RUSTFS_CONSOLE_CORS_ALLOWED_ORIGINS=*
|
||||
- RUSTFS_ACCESS_KEY=devadmin
|
||||
- RUSTFS_SECRET_KEY=devadmin
|
||||
- RUSTFS_LOG_LEVEL=debug
|
||||
@@ -85,6 +92,17 @@ services:
|
||||
networks:
|
||||
- rustfs-network
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"sh", "-c",
|
||||
"curl -f http://localhost:9000/health && curl -f http://localhost:9001/health"
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
start_period: 40s
|
||||
profiles:
|
||||
- dev
|
||||
|
||||
@@ -95,7 +113,7 @@ services:
|
||||
command:
|
||||
- --config=/etc/otelcol-contrib/otel-collector.yml
|
||||
volumes:
|
||||
- ./.docker/observability/otel-collector.yml:/etc/otelcol-contrib/otel-collector.yml:ro
|
||||
- ./.docker/observability/otel-collector-config.yaml:/etc/otelcol-contrib/otel-collector.yml:ro
|
||||
ports:
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
@@ -219,3 +237,5 @@ volumes:
|
||||
driver: local
|
||||
redis_data:
|
||||
driver: local
|
||||
logs_data:
|
||||
driver: local
|
||||
|
||||
326
docs/PERFORMANCE_TESTING.md
Normal file
326
docs/PERFORMANCE_TESTING.md
Normal file
@@ -0,0 +1,326 @@
|
||||
# RustFS 性能测试指南
|
||||
|
||||
本文档提供了对 RustFS 进行性能测试和性能分析的完整方法和工具。
|
||||
|
||||
## 概览
|
||||
|
||||
RustFS 提供了多种性能测试和分析工具:
|
||||
|
||||
1. **性能分析(Profiling)** - 使用内置的 pprof 接口收集 CPU 性能数据
|
||||
2. **负载测试(Load Testing)** - 使用多种客户端工具模拟高并发请求
|
||||
3. **监控和分析** - 查看性能指标和识别性能瓶颈
|
||||
|
||||
## 前置条件
|
||||
|
||||
### 1. 启用性能分析
|
||||
|
||||
在启动 RustFS 时,需要设置环境变量启用性能分析功能:
|
||||
|
||||
```bash
|
||||
export RUSTFS_ENABLE_PROFILING=true
|
||||
./rustfs
|
||||
```
|
||||
|
||||
### 2. 安装依赖工具
|
||||
|
||||
确保系统中安装了以下工具:
|
||||
|
||||
```bash
|
||||
# 基础工具
|
||||
curl # HTTP 请求
|
||||
jq # JSON 处理 (可选)
|
||||
|
||||
# 分析工具
|
||||
go # Go pprof 工具 (可选,用于 protobuf 格式)
|
||||
python3 # Python 负载测试脚本
|
||||
|
||||
# macOS 用户
|
||||
brew install curl jq go python3
|
||||
|
||||
# Ubuntu/Debian 用户
|
||||
sudo apt-get install curl jq golang-go python3
|
||||
```
|
||||
|
||||
## 性能测试方法
|
||||
|
||||
### 方法 1:使用专业脚本(推荐)
|
||||
|
||||
项目提供了完整的性能分析脚本:
|
||||
|
||||
```bash
|
||||
# 查看脚本帮助
|
||||
./scripts/profile_rustfs.sh help
|
||||
|
||||
# 检查性能分析状态
|
||||
./scripts/profile_rustfs.sh status
|
||||
|
||||
# 收集火焰图(30秒)
|
||||
./scripts/profile_rustfs.sh flamegraph
|
||||
|
||||
# 收集 protobuf 格式性能数据
|
||||
./scripts/profile_rustfs.sh protobuf
|
||||
|
||||
# 收集两种格式的性能数据
|
||||
./scripts/profile_rustfs.sh both
|
||||
|
||||
# 自定义参数
|
||||
./scripts/profile_rustfs.sh -d 60 -u http://192.168.1.100:9000 both
|
||||
```
|
||||
|
||||
### 方法 2:使用 Python 综合测试
|
||||
|
||||
Python 脚本提供了负载测试和性能分析的一体化解决方案:
|
||||
|
||||
```bash
|
||||
# 运行综合性能分析
|
||||
python3 test_load.py
|
||||
```
|
||||
|
||||
此脚本会:
|
||||
1. 启动后台负载测试(多线程 S3 操作)
|
||||
2. 并行收集性能分析数据
|
||||
3. 生成火焰图用于分析
|
||||
|
||||
### 方法 3:使用简单负载测试
|
||||
|
||||
对于快速测试,可以使用 bash 脚本:
|
||||
|
||||
```bash
|
||||
# 运行简单负载测试
|
||||
./simple_load_test.sh
|
||||
```
|
||||
|
||||
## 性能分析输出格式
|
||||
|
||||
### 1. 火焰图(SVG 格式)
|
||||
|
||||
- **用途**: 可视化 CPU 使用情况
|
||||
- **文件**: `rustfs_profile_TIMESTAMP.svg`
|
||||
- **查看方式**: 使用浏览器打开 SVG 文件
|
||||
- **分析要点**:
|
||||
- 宽度表示 CPU 使用时间
|
||||
- 高度表示调用栈深度
|
||||
- 点击可以放大特定函数
|
||||
|
||||
```bash
|
||||
# 在浏览器中打开
|
||||
open profiles/rustfs_profile_20240911_143000.svg
|
||||
```
|
||||
|
||||
### 2. Protobuf 格式
|
||||
|
||||
- **用途**: 使用 Go pprof 工具进行详细分析
|
||||
- **文件**: `rustfs_profile_TIMESTAMP.pb`
|
||||
- **分析工具**: `go tool pprof`
|
||||
|
||||
```bash
|
||||
# 使用 Go pprof 分析
|
||||
go tool pprof profiles/rustfs_profile_20240911_143000.pb
|
||||
|
||||
# pprof 常用命令
|
||||
(pprof) top # 显示 CPU 使用率最高的函数
|
||||
(pprof) list func # 显示指定函数的源代码
|
||||
(pprof) web # 生成 web 界面(需要 graphviz)
|
||||
(pprof) png # 生成 PNG 图片
|
||||
(pprof) help # 查看所有命令
|
||||
```
|
||||
|
||||
## API 接口使用
|
||||
|
||||
### 检查性能分析状态
|
||||
|
||||
```bash
|
||||
curl "http://127.0.0.1:9000/rustfs/admin/debug/pprof/status"
|
||||
```
|
||||
|
||||
返回示例:
|
||||
```json
|
||||
{
|
||||
"enabled": "true",
|
||||
"sampling_rate": "100"
|
||||
}
|
||||
```
|
||||
|
||||
### 收集性能数据
|
||||
|
||||
```bash
|
||||
# 收集 30 秒的火焰图
|
||||
curl "http://127.0.0.1:9000/rustfs/admin/debug/pprof/profile?seconds=30&format=flamegraph" \
|
||||
-o profile.svg
|
||||
|
||||
# 收集 protobuf 格式数据
|
||||
curl "http://127.0.0.1:9000/rustfs/admin/debug/pprof/profile?seconds=30&format=protobuf" \
|
||||
-o profile.pb
|
||||
```
|
||||
|
||||
**参数说明**:
|
||||
- `seconds`: 收集时长(1-300 秒)
|
||||
- `format`: 输出格式(`flamegraph`/`svg` 或 `protobuf`/`pb`)
|
||||
|
||||
## 负载测试场景
|
||||
|
||||
### 1. S3 API 负载测试
|
||||
|
||||
使用 Python 脚本进行完整的 S3 操作负载测试:
|
||||
|
||||
```python
|
||||
# 基本配置
|
||||
tester = S3LoadTester(
|
||||
endpoint="http://127.0.0.1:9000",
|
||||
access_key="rustfsadmin",
|
||||
secret_key="rustfsadmin"
|
||||
)
|
||||
|
||||
# 运行负载测试
|
||||
# 4 个线程,每个线程执行 10 次操作
|
||||
tester.run_load_test(num_threads=4, operations_per_thread=10)
|
||||
```
|
||||
|
||||
每次操作包括:
|
||||
1. 上传 1MB 对象
|
||||
2. 下载对象
|
||||
3. 删除对象
|
||||
|
||||
### 2. 自定义负载测试
|
||||
|
||||
```bash
|
||||
# 创建测试桶
|
||||
curl -X PUT "http://127.0.0.1:9000/test-bucket"
|
||||
|
||||
# 并发上传测试
|
||||
for i in {1..10}; do
|
||||
echo "test data $i" | curl -X PUT "http://127.0.0.1:9000/test-bucket/object-$i" -d @- &
|
||||
done
|
||||
wait
|
||||
|
||||
# 并发下载测试
|
||||
for i in {1..10}; do
|
||||
curl "http://127.0.0.1:9000/test-bucket/object-$i" > /dev/null &
|
||||
done
|
||||
wait
|
||||
```
|
||||
|
||||
## 性能分析最佳实践
|
||||
|
||||
### 1. 测试环境准备
|
||||
|
||||
- 确保 RustFS 已启用性能分析: `RUSTFS_ENABLE_PROFILING=true`
|
||||
- 使用独立的测试环境,避免其他程序干扰
|
||||
- 确保有足够的磁盘空间存储分析文件
|
||||
|
||||
### 2. 数据收集建议
|
||||
|
||||
- **预热阶段**: 先运行 5-10 分钟的轻量负载
|
||||
- **数据收集**: 在稳定负载下收集 30-60 秒的性能数据
|
||||
- **多次采样**: 收集多个样本进行对比分析
|
||||
|
||||
### 3. 分析重点
|
||||
|
||||
在火焰图中重点关注:
|
||||
|
||||
1. **宽度最大的函数** - CPU 使用时间最长
|
||||
2. **平顶函数** - 可能的性能瓶颈
|
||||
3. **深度调用栈** - 可能的递归或复杂逻辑
|
||||
4. **意外的系统调用** - I/O 或内存分配问题
|
||||
|
||||
### 4. 常见性能问题
|
||||
|
||||
- **锁竞争**: 查找 `std::sync` 相关函数
|
||||
- **内存分配**: 查找 `alloc` 相关函数
|
||||
- **I/O 等待**: 查找文件系统或网络 I/O 函数
|
||||
- **序列化开销**: 查找 JSON/XML 解析函数
|
||||
|
||||
## 故障排除
|
||||
|
||||
### 1. 性能分析未启用
|
||||
|
||||
错误信息:`{"enabled":"false"}`
|
||||
|
||||
解决方案:
|
||||
```bash
|
||||
export RUSTFS_ENABLE_PROFILING=true
|
||||
# 重启 RustFS
|
||||
```
|
||||
|
||||
### 2. 连接被拒绝
|
||||
|
||||
错误信息:`Connection refused`
|
||||
|
||||
检查项:
|
||||
- RustFS 是否正在运行
|
||||
- 端口是否正确(默认 9000)
|
||||
- 防火墙设置
|
||||
|
||||
### 3. 分析文件过大
|
||||
|
||||
如果生成的分析文件过大:
|
||||
- 减少收集时间(如 15-30 秒)
|
||||
- 降低负载测试的并发度
|
||||
- 使用 protobuf 格式而非 SVG
|
||||
|
||||
## 配置参数
|
||||
|
||||
### 环境变量
|
||||
|
||||
| 变量 | 默认值 | 描述 |
|
||||
|------|--------|------|
|
||||
| `RUSTFS_ENABLE_PROFILING` | `false` | 启用性能分析 |
|
||||
| `RUSTFS_URL` | `http://127.0.0.1:9000` | RustFS 服务器地址 |
|
||||
| `PROFILE_DURATION` | `30` | 性能数据收集时长(秒) |
|
||||
| `OUTPUT_DIR` | `./profiles` | 输出文件目录 |
|
||||
|
||||
### 脚本参数
|
||||
|
||||
```bash
|
||||
./scripts/profile_rustfs.sh [OPTIONS] [COMMAND]
|
||||
|
||||
OPTIONS:
|
||||
-u, --url URL RustFS URL
|
||||
-d, --duration SECONDS Profile duration
|
||||
-o, --output DIR Output directory
|
||||
|
||||
COMMANDS:
|
||||
status 检查状态
|
||||
flamegraph 收集火焰图
|
||||
protobuf 收集 protobuf 数据
|
||||
both 收集两种格式(默认)
|
||||
```
|
||||
|
||||
## 输出文件位置
|
||||
|
||||
- **脚本输出**: `./profiles/` 目录
|
||||
- **Python 脚本**: `/tmp/rustfs_profiles/` 目录
|
||||
- **文件命名**: `rustfs_profile_TIMESTAMP.{svg|pb}`
|
||||
|
||||
## 示例工作流程
|
||||
|
||||
1. **启动 RustFS**:
|
||||
```bash
|
||||
RUSTFS_ENABLE_PROFILING=true ./rustfs
|
||||
```
|
||||
|
||||
2. **验证性能分析可用**:
|
||||
```bash
|
||||
./scripts/profile_rustfs.sh status
|
||||
```
|
||||
|
||||
3. **开始负载测试**:
|
||||
```bash
|
||||
python3 test_load.py &
|
||||
```
|
||||
|
||||
4. **收集性能数据**:
|
||||
```bash
|
||||
./scripts/profile_rustfs.sh -d 60 both
|
||||
```
|
||||
|
||||
5. **分析结果**:
|
||||
```bash
|
||||
# 查看火焰图
|
||||
open profiles/rustfs_profile_*.svg
|
||||
|
||||
# 或使用 pprof 分析
|
||||
go tool pprof profiles/rustfs_profile_*.pb
|
||||
```
|
||||
|
||||
通过这个完整的性能测试流程,你可以系统地分析 RustFS 的性能特征,识别瓶颈,并进行有针对性的优化。
|
||||
1362
docs/console-separation.md
Normal file
1362
docs/console-separation.md
Normal file
File diff suppressed because it is too large
Load Diff
@@ -56,4 +56,5 @@ if [ "${RUSTFS_ACCESS_KEY}" = "rustfsadmin" ] || [ "${RUSTFS_SECRET_KEY}" = "rus
|
||||
fi
|
||||
|
||||
echo "Starting: $*"
|
||||
set -- "$@" $LOCAL_VOLUMES
|
||||
exec "$@"
|
||||
|
||||
270
examples/README.md
Normal file
270
examples/README.md
Normal file
@@ -0,0 +1,270 @@
|
||||
# RustFS Docker Deployment Examples
|
||||
|
||||
This directory contains various deployment scripts and configuration files for RustFS with console and endpoint service separation.
|
||||
|
||||
## Quick Start Scripts
|
||||
|
||||
### `docker-quickstart.sh`
|
||||
The fastest way to get RustFS running with different configurations.
|
||||
|
||||
```bash
|
||||
# Basic deployment (ports 9000-9001)
|
||||
./docker-quickstart.sh basic
|
||||
|
||||
# Development environment (ports 9010-9011)
|
||||
./docker-quickstart.sh dev
|
||||
|
||||
# Production-like deployment (ports 9020-9021)
|
||||
./docker-quickstart.sh prod
|
||||
|
||||
# Check status of all deployments
|
||||
./docker-quickstart.sh status
|
||||
|
||||
# Test health of all running services
|
||||
./docker-quickstart.sh test
|
||||
|
||||
# Clean up all containers
|
||||
./docker-quickstart.sh cleanup
|
||||
```
|
||||
|
||||
### `enhanced-docker-deployment.sh`
|
||||
Comprehensive deployment script with multiple scenarios and detailed logging.
|
||||
|
||||
```bash
|
||||
# Deploy individual scenarios
|
||||
./enhanced-docker-deployment.sh basic # Basic setup with port mapping
|
||||
./enhanced-docker-deployment.sh dev # Development environment
|
||||
./enhanced-docker-deployment.sh prod # Production-like with security
|
||||
|
||||
# Deploy all scenarios at once
|
||||
./enhanced-docker-deployment.sh all
|
||||
|
||||
# Check status and test services
|
||||
./enhanced-docker-deployment.sh status
|
||||
./enhanced-docker-deployment.sh test
|
||||
|
||||
# View logs for specific container
|
||||
./enhanced-docker-deployment.sh logs rustfs-dev
|
||||
|
||||
# Complete cleanup
|
||||
./enhanced-docker-deployment.sh cleanup
|
||||
```
|
||||
|
||||
### `enhanced-security-deployment.sh`
|
||||
Production-ready deployment with enhanced security features including TLS, rate limiting, and secure credential generation.
|
||||
|
||||
```bash
|
||||
# Deploy with security hardening
|
||||
./enhanced-security-deployment.sh
|
||||
|
||||
# Features:
|
||||
# - Automatic TLS certificate generation
|
||||
# - Secure credential generation
|
||||
# - Rate limiting configuration
|
||||
# - Console access restrictions
|
||||
# - Health check validation
|
||||
```
|
||||
|
||||
## Docker Compose Examples
|
||||
|
||||
### `docker-comprehensive.yml`
|
||||
Complete Docker Compose configuration with multiple deployment profiles.
|
||||
|
||||
```bash
|
||||
# Deploy specific profiles
|
||||
docker-compose -f docker-comprehensive.yml --profile basic up -d
|
||||
docker-compose -f docker-comprehensive.yml --profile dev up -d
|
||||
docker-compose -f docker-comprehensive.yml --profile production up -d
|
||||
docker-compose -f docker-comprehensive.yml --profile enterprise up -d
|
||||
docker-compose -f docker-comprehensive.yml --profile api-only up -d
|
||||
|
||||
# Deploy with reverse proxy
|
||||
docker-compose -f docker-comprehensive.yml --profile production --profile nginx up -d
|
||||
```
|
||||
|
||||
#### Available Profiles:
|
||||
|
||||
- **basic**: Simple deployment for testing (ports 9000-9001)
|
||||
- **dev**: Development environment with debug logging (ports 9010-9011)
|
||||
- **production**: Production deployment with security (ports 9020-9021)
|
||||
- **enterprise**: Full enterprise setup with TLS (ports 9030-9443)
|
||||
- **api-only**: API endpoint without console (port 9040)
|
||||
|
||||
## Usage Examples by Scenario
|
||||
|
||||
### Development Setup
|
||||
|
||||
```bash
|
||||
# Quick development start
|
||||
./docker-quickstart.sh dev
|
||||
|
||||
# Or use enhanced deployment for more features
|
||||
./enhanced-docker-deployment.sh dev
|
||||
|
||||
# Or use Docker Compose
|
||||
docker-compose -f docker-comprehensive.yml --profile dev up -d
|
||||
```
|
||||
|
||||
**Access Points:**
|
||||
- API: http://localhost:9010 (or 9030 for enhanced)
|
||||
- Console: http://localhost:9011/rustfs/console/ (or 9031 for enhanced)
|
||||
- Credentials: dev-admin / dev-secret
|
||||
|
||||
### Production Deployment
|
||||
|
||||
```bash
|
||||
# Security-hardened deployment
|
||||
./enhanced-security-deployment.sh
|
||||
|
||||
# Or production profile
|
||||
./enhanced-docker-deployment.sh prod
|
||||
```
|
||||
|
||||
**Features:**
|
||||
- TLS encryption for console
|
||||
- Rate limiting enabled
|
||||
- Restricted CORS policies
|
||||
- Secure credential generation
|
||||
- Console bound to localhost only
|
||||
|
||||
### Testing and CI/CD
|
||||
|
||||
```bash
|
||||
# API-only deployment for testing
|
||||
docker-compose -f docker-comprehensive.yml --profile api-only up -d
|
||||
|
||||
# Quick basic setup for integration tests
|
||||
./docker-quickstart.sh basic
|
||||
```
|
||||
|
||||
## Configuration Examples
|
||||
|
||||
### Environment Variables
|
||||
|
||||
All deployment scripts support customization via environment variables:
|
||||
|
||||
```bash
|
||||
# Custom image and ports
|
||||
export RUSTFS_IMAGE="rustfs/rustfs:custom-tag"
|
||||
export CONSOLE_PORT="8001"
|
||||
export API_PORT="8000"
|
||||
|
||||
# Custom data directories
|
||||
export DATA_DIR="/custom/data/path"
|
||||
export CERTS_DIR="/custom/certs/path"
|
||||
|
||||
# Run with custom configuration
|
||||
./enhanced-security-deployment.sh
|
||||
```
|
||||
|
||||
### Common Configurations
|
||||
|
||||
```bash
|
||||
# Development - permissive CORS
|
||||
RUSTFS_CORS_ALLOWED_ORIGINS="*"
|
||||
RUSTFS_CONSOLE_CORS_ALLOWED_ORIGINS="*"
|
||||
|
||||
# Production - restrictive CORS
|
||||
RUSTFS_CORS_ALLOWED_ORIGINS="https://myapp.com,https://api.myapp.com"
|
||||
RUSTFS_CONSOLE_CORS_ALLOWED_ORIGINS="https://admin.myapp.com"
|
||||
|
||||
# Security hardening
|
||||
RUSTFS_CONSOLE_RATE_LIMIT_ENABLE="true"
|
||||
RUSTFS_CONSOLE_RATE_LIMIT_RPM="60"
|
||||
RUSTFS_CONSOLE_AUTH_TIMEOUT="1800"
|
||||
```
|
||||
|
||||
## Monitoring and Health Checks
|
||||
|
||||
All deployments include health check endpoints:
|
||||
|
||||
```bash
|
||||
# Test API health
|
||||
curl http://localhost:9000/health
|
||||
|
||||
# Test console health
|
||||
curl http://localhost:9001/health
|
||||
|
||||
# Test all deployments
|
||||
./docker-quickstart.sh test
|
||||
./enhanced-docker-deployment.sh test
|
||||
```
|
||||
|
||||
## Network Architecture
|
||||
|
||||
### Port Mappings
|
||||
|
||||
| Deployment | API Port | Console Port | Description |
|
||||
|-----------|----------|--------------|-------------|
|
||||
| Basic | 9000 | 9001 | Simple deployment |
|
||||
| Dev | 9010 | 9011 | Development environment |
|
||||
| Prod | 9020 | 9021 | Production-like setup |
|
||||
| Enterprise | 9030 | 9443 | Enterprise with TLS |
|
||||
| API-Only | 9040 | - | API endpoint only |
|
||||
|
||||
### Network Isolation
|
||||
|
||||
Production deployments use network isolation:
|
||||
|
||||
- **Public API Network**: Exposes API endpoints to external clients
|
||||
- **Internal Console Network**: Restricts console access to internal networks
|
||||
- **Secure Network**: Isolated network for enterprise deployments
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### Development
|
||||
- Permissive CORS policies for easy testing
|
||||
- Debug logging enabled
|
||||
- Default credentials for simplicity
|
||||
|
||||
### Production
|
||||
- Restrictive CORS policies
|
||||
- TLS encryption for console
|
||||
- Rate limiting enabled
|
||||
- Secure credential generation
|
||||
- Console bound to localhost
|
||||
- Network isolation
|
||||
|
||||
### Enterprise
|
||||
- Complete TLS encryption
|
||||
- Advanced rate limiting
|
||||
- Authentication timeouts
|
||||
- Secret management
|
||||
- Network segregation
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **Port Conflicts**: Use different ports via environment variables
|
||||
2. **CORS Errors**: Check origin configuration and browser network tab
|
||||
3. **Health Check Failures**: Verify services are running and ports are accessible
|
||||
4. **Permission Issues**: Check volume mount permissions and certificate file permissions
|
||||
|
||||
### Debug Commands
|
||||
|
||||
```bash
|
||||
# Check container logs
|
||||
docker logs rustfs-container
|
||||
|
||||
# Check container environment
|
||||
docker exec rustfs-container env | grep RUSTFS
|
||||
|
||||
# Test connectivity
|
||||
docker exec rustfs-container curl http://localhost:9000/health
|
||||
docker exec rustfs-container curl http://localhost:9001/health
|
||||
|
||||
# Check listening ports
|
||||
docker exec rustfs-container netstat -tulpn | grep -E ':(9000|9001)'
|
||||
```
|
||||
|
||||
## Migration from Previous Versions
|
||||
|
||||
See [docs/console-separation.md](../docs/console-separation.md) for detailed migration instructions from single-port deployments to the separated architecture.
|
||||
|
||||
## Additional Resources
|
||||
|
||||
- [Console Separation Documentation](../docs/console-separation.md)
|
||||
- [Docker Compose Configuration](../docker-compose.yml)
|
||||
- [Main Dockerfile](../Dockerfile)
|
||||
- [Security Best Practices](../docs/console-separation.md#security-hardening)
|
||||
224
examples/docker-comprehensive.yml
Normal file
224
examples/docker-comprehensive.yml
Normal file
@@ -0,0 +1,224 @@
|
||||
# RustFS Comprehensive Docker Deployment Examples
|
||||
# This file demonstrates various deployment scenarios for RustFS with console separation
|
||||
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
# Basic deployment with default settings
|
||||
rustfs-basic:
|
||||
image: rustfs/rustfs:latest
|
||||
container_name: rustfs-basic
|
||||
ports:
|
||||
- "9000:9000" # API endpoint
|
||||
- "9001:9001" # Console interface
|
||||
environment:
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9001
|
||||
- RUSTFS_EXTERNAL_ADDRESS=:9000
|
||||
- RUSTFS_CORS_ALLOWED_ORIGINS=http://localhost:9001
|
||||
- RUSTFS_CONSOLE_CORS_ALLOWED_ORIGINS=*
|
||||
- RUSTFS_ACCESS_KEY=admin
|
||||
- RUSTFS_SECRET_KEY=password
|
||||
volumes:
|
||||
- rustfs-basic-data:/data
|
||||
networks:
|
||||
- rustfs-network
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "sh", "-c", "curl -f http://localhost:9000/health && curl -f http://localhost:9001/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
profiles:
|
||||
- basic
|
||||
|
||||
# Development environment with debug logging
|
||||
rustfs-dev:
|
||||
image: rustfs/rustfs:latest
|
||||
container_name: rustfs-dev
|
||||
ports:
|
||||
- "9010:9000" # API endpoint
|
||||
- "9011:9001" # Console interface
|
||||
environment:
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9001
|
||||
- RUSTFS_EXTERNAL_ADDRESS=:9010
|
||||
- RUSTFS_CORS_ALLOWED_ORIGINS=*
|
||||
- RUSTFS_CONSOLE_CORS_ALLOWED_ORIGINS=*
|
||||
- RUSTFS_ACCESS_KEY=dev-admin
|
||||
- RUSTFS_SECRET_KEY=dev-password
|
||||
- RUST_LOG=debug
|
||||
- RUSTFS_LOG_LEVEL=debug
|
||||
volumes:
|
||||
- rustfs-dev-data:/data
|
||||
- rustfs-dev-logs:/logs
|
||||
networks:
|
||||
- rustfs-network
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "sh", "-c", "curl -f http://localhost:9000/health && curl -f http://localhost:9001/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
profiles:
|
||||
- dev
|
||||
|
||||
# Production environment with security hardening
|
||||
rustfs-production:
|
||||
image: rustfs/rustfs:latest
|
||||
container_name: rustfs-production
|
||||
ports:
|
||||
- "9020:9000" # API endpoint (public)
|
||||
- "127.0.0.1:9021:9001" # Console (localhost only)
|
||||
environment:
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9001
|
||||
- RUSTFS_EXTERNAL_ADDRESS=:9020
|
||||
- RUSTFS_CORS_ALLOWED_ORIGINS=https://myapp.com,https://api.myapp.com
|
||||
- RUSTFS_CONSOLE_CORS_ALLOWED_ORIGINS=https://admin.myapp.com
|
||||
- RUSTFS_CONSOLE_RATE_LIMIT_ENABLE=true
|
||||
- RUSTFS_CONSOLE_RATE_LIMIT_RPM=60
|
||||
- RUSTFS_CONSOLE_AUTH_TIMEOUT=1800
|
||||
- RUSTFS_ACCESS_KEY_FILE=/run/secrets/rustfs_access_key
|
||||
- RUSTFS_SECRET_KEY_FILE=/run/secrets/rustfs_secret_key
|
||||
volumes:
|
||||
- rustfs-production-data:/data
|
||||
- rustfs-production-logs:/logs
|
||||
- rustfs-certs:/certs:ro
|
||||
networks:
|
||||
- rustfs-network
|
||||
secrets:
|
||||
- rustfs_access_key
|
||||
- rustfs_secret_key
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "sh", "-c", "curl -f http://localhost:9000/health && curl -f http://localhost:9001/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
profiles:
|
||||
- production
|
||||
|
||||
# Enterprise deployment with TLS and full security
|
||||
rustfs-enterprise:
|
||||
image: rustfs/rustfs:latest
|
||||
container_name: rustfs-enterprise
|
||||
ports:
|
||||
- "9030:9000" # API endpoint
|
||||
- "127.0.0.1:9443:9001" # Console with TLS (localhost only)
|
||||
environment:
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9001
|
||||
- RUSTFS_EXTERNAL_ADDRESS=:9030
|
||||
- RUSTFS_TLS_PATH=/certs
|
||||
- RUSTFS_CORS_ALLOWED_ORIGINS=https://enterprise.com
|
||||
- RUSTFS_CONSOLE_CORS_ALLOWED_ORIGINS=https://admin.enterprise.com
|
||||
- RUSTFS_CONSOLE_RATE_LIMIT_ENABLE=true
|
||||
- RUSTFS_CONSOLE_RATE_LIMIT_RPM=30
|
||||
- RUSTFS_CONSOLE_AUTH_TIMEOUT=900
|
||||
volumes:
|
||||
- rustfs-enterprise-data:/data
|
||||
- rustfs-enterprise-logs:/logs
|
||||
- rustfs-enterprise-certs:/certs:ro
|
||||
networks:
|
||||
- rustfs-secure-network
|
||||
secrets:
|
||||
- rustfs_enterprise_access_key
|
||||
- rustfs_enterprise_secret_key
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "sh", "-c", "curl -f http://localhost:9000/health && curl -k -f https://localhost:9001/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
profiles:
|
||||
- enterprise
|
||||
|
||||
# API-only deployment (console disabled)
|
||||
rustfs-api-only:
|
||||
image: rustfs/rustfs:latest
|
||||
container_name: rustfs-api-only
|
||||
ports:
|
||||
- "9040:9000" # API endpoint only
|
||||
environment:
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=false
|
||||
- RUSTFS_CORS_ALLOWED_ORIGINS=https://client-app.com
|
||||
- RUSTFS_ACCESS_KEY=api-only-key
|
||||
- RUSTFS_SECRET_KEY=api-only-secret
|
||||
volumes:
|
||||
- rustfs-api-data:/data
|
||||
networks:
|
||||
- rustfs-network
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: ["CMD", "curl", "-f", "http://localhost:9000/health"]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
profiles:
|
||||
- api-only
|
||||
|
||||
# Nginx reverse proxy for production
|
||||
nginx-proxy:
|
||||
image: nginx:alpine
|
||||
container_name: rustfs-nginx
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
volumes:
|
||||
- ./nginx/nginx.conf:/etc/nginx/nginx.conf:ro
|
||||
- ./nginx/ssl:/etc/nginx/ssl:ro
|
||||
networks:
|
||||
- rustfs-network
|
||||
restart: unless-stopped
|
||||
depends_on:
|
||||
- rustfs-production
|
||||
profiles:
|
||||
- production
|
||||
- enterprise
|
||||
|
||||
networks:
|
||||
rustfs-network:
|
||||
driver: bridge
|
||||
ipam:
|
||||
config:
|
||||
- subnet: 172.20.0.0/16
|
||||
rustfs-secure-network:
|
||||
driver: bridge
|
||||
internal: true
|
||||
ipam:
|
||||
config:
|
||||
- subnet: 172.21.0.0/16
|
||||
|
||||
volumes:
|
||||
rustfs-basic-data:
|
||||
driver: local
|
||||
rustfs-dev-data:
|
||||
driver: local
|
||||
rustfs-dev-logs:
|
||||
driver: local
|
||||
rustfs-production-data:
|
||||
driver: local
|
||||
rustfs-production-logs:
|
||||
driver: local
|
||||
rustfs-enterprise-data:
|
||||
driver: local
|
||||
rustfs-enterprise-logs:
|
||||
driver: local
|
||||
rustfs-enterprise-certs:
|
||||
driver: local
|
||||
rustfs-api-data:
|
||||
driver: local
|
||||
rustfs-certs:
|
||||
driver: local
|
||||
|
||||
secrets:
|
||||
rustfs_access_key:
|
||||
external: true
|
||||
rustfs_secret_key:
|
||||
external: true
|
||||
rustfs_enterprise_access_key:
|
||||
external: true
|
||||
rustfs_enterprise_secret_key:
|
||||
external: true
|
||||
295
examples/docker-quickstart.sh
Executable file
295
examples/docker-quickstart.sh
Executable file
@@ -0,0 +1,295 @@
|
||||
#!/bin/bash
|
||||
|
||||
# RustFS Docker Quick Start Script
|
||||
# This script provides easy deployment commands for different scenarios
|
||||
|
||||
set -e
|
||||
|
||||
# Colors for output
|
||||
GREEN='\033[0;32m'
|
||||
BLUE='\033[0;34m'
|
||||
YELLOW='\033[1;33m'
|
||||
RED='\033[0;31m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
log() {
|
||||
echo -e "${GREEN}[RustFS]${NC} $1"
|
||||
}
|
||||
|
||||
info() {
|
||||
echo -e "${BLUE}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
warn() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1"
|
||||
}
|
||||
|
||||
# Print banner
|
||||
print_banner() {
|
||||
echo -e "${BLUE}"
|
||||
echo "=================================================="
|
||||
echo " RustFS Docker Quick Start"
|
||||
echo " Console & Endpoint Separation"
|
||||
echo "=================================================="
|
||||
echo -e "${NC}"
|
||||
}
|
||||
|
||||
# Check Docker availability
|
||||
check_docker() {
|
||||
if ! command -v docker &> /dev/null; then
|
||||
error "Docker is not installed or not available in PATH"
|
||||
exit 1
|
||||
fi
|
||||
info "Docker is available: $(docker --version)"
|
||||
}
|
||||
|
||||
# Quick start - basic deployment
|
||||
quick_basic() {
|
||||
log "Starting RustFS basic deployment..."
|
||||
|
||||
docker run -d \
|
||||
--name rustfs-quick \
|
||||
-p 9000:9000 \
|
||||
-p 9001:9001 \
|
||||
-e RUSTFS_EXTERNAL_ADDRESS=":9000" \
|
||||
-e RUSTFS_CORS_ALLOWED_ORIGINS="http://localhost:9001" \
|
||||
-v rustfs-quick-data:/data \
|
||||
rustfs/rustfs:latest
|
||||
|
||||
echo
|
||||
info "✅ RustFS deployed successfully!"
|
||||
info "🌐 API Endpoint: http://localhost:9000"
|
||||
info "🖥️ Console UI: http://localhost:9001/rustfs/console/"
|
||||
info "🔐 Credentials: rustfsadmin / rustfsadmin"
|
||||
info "🏥 Health Check: curl http://localhost:9000/health"
|
||||
echo
|
||||
info "To stop: docker stop rustfs-quick"
|
||||
info "To remove: docker rm rustfs-quick && docker volume rm rustfs-quick-data"
|
||||
}
|
||||
|
||||
# Development deployment with debug logging
|
||||
quick_dev() {
|
||||
log "Starting RustFS development environment..."
|
||||
|
||||
docker run -d \
|
||||
--name rustfs-dev \
|
||||
-p 9010:9000 \
|
||||
-p 9011:9001 \
|
||||
-e RUSTFS_EXTERNAL_ADDRESS=":9010" \
|
||||
-e RUSTFS_CORS_ALLOWED_ORIGINS="*" \
|
||||
-e RUSTFS_CONSOLE_CORS_ALLOWED_ORIGINS="*" \
|
||||
-e RUSTFS_ACCESS_KEY="dev-admin" \
|
||||
-e RUSTFS_SECRET_KEY="dev-secret" \
|
||||
-e RUST_LOG="debug" \
|
||||
-v rustfs-dev-data:/data \
|
||||
rustfs/rustfs:latest
|
||||
|
||||
echo
|
||||
info "✅ RustFS development environment ready!"
|
||||
info "🌐 API Endpoint: http://localhost:9010"
|
||||
info "🖥️ Console UI: http://localhost:9011/rustfs/console/"
|
||||
info "🔐 Credentials: dev-admin / dev-secret"
|
||||
info "📊 Debug logging enabled"
|
||||
echo
|
||||
info "To stop: docker stop rustfs-dev"
|
||||
}
|
||||
|
||||
# Production-like deployment
|
||||
quick_prod() {
|
||||
log "Starting RustFS production-like deployment..."
|
||||
|
||||
# Generate secure credentials
|
||||
ACCESS_KEY="prod-$(openssl rand -hex 8)"
|
||||
SECRET_KEY="$(openssl rand -hex 24)"
|
||||
|
||||
docker run -d \
|
||||
--name rustfs-prod \
|
||||
-p 9020:9000 \
|
||||
-p 127.0.0.1:9021:9001 \
|
||||
-e RUSTFS_EXTERNAL_ADDRESS=":9020" \
|
||||
-e RUSTFS_CORS_ALLOWED_ORIGINS="https://myapp.com" \
|
||||
-e RUSTFS_CONSOLE_CORS_ALLOWED_ORIGINS="https://admin.myapp.com" \
|
||||
-e RUSTFS_CONSOLE_RATE_LIMIT_ENABLE="true" \
|
||||
-e RUSTFS_CONSOLE_RATE_LIMIT_RPM="60" \
|
||||
-e RUSTFS_ACCESS_KEY="$ACCESS_KEY" \
|
||||
-e RUSTFS_SECRET_KEY="$SECRET_KEY" \
|
||||
-v rustfs-prod-data:/data \
|
||||
rustfs/rustfs:latest
|
||||
|
||||
# Save credentials
|
||||
echo "RUSTFS_ACCESS_KEY=$ACCESS_KEY" > rustfs-prod-credentials.txt
|
||||
echo "RUSTFS_SECRET_KEY=$SECRET_KEY" >> rustfs-prod-credentials.txt
|
||||
chmod 600 rustfs-prod-credentials.txt
|
||||
|
||||
echo
|
||||
info "✅ RustFS production deployment ready!"
|
||||
info "🌐 API Endpoint: http://localhost:9020 (public)"
|
||||
info "🖥️ Console UI: http://127.0.0.1:9021/rustfs/console/ (localhost only)"
|
||||
info "🔐 Credentials saved to rustfs-prod-credentials.txt"
|
||||
info "🔒 Console restricted to localhost for security"
|
||||
echo
|
||||
warn "⚠️ Change default CORS origins for production use"
|
||||
}
|
||||
|
||||
# Stop and cleanup
|
||||
cleanup() {
|
||||
log "Cleaning up RustFS deployments..."
|
||||
|
||||
docker stop rustfs-quick rustfs-dev rustfs-prod 2>/dev/null || true
|
||||
docker rm rustfs-quick rustfs-dev rustfs-prod 2>/dev/null || true
|
||||
|
||||
info "Containers stopped and removed"
|
||||
echo
|
||||
info "To also remove data volumes, run:"
|
||||
info "docker volume rm rustfs-quick-data rustfs-dev-data rustfs-prod-data"
|
||||
}
|
||||
|
||||
# Show status of all deployments
|
||||
status() {
|
||||
log "RustFS deployment status:"
|
||||
echo
|
||||
|
||||
if docker ps --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}" | grep -q rustfs; then
|
||||
docker ps --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}" | head -n1
|
||||
docker ps --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}" | grep rustfs
|
||||
else
|
||||
info "No RustFS containers are currently running"
|
||||
fi
|
||||
|
||||
echo
|
||||
info "Available endpoints:"
|
||||
|
||||
if docker ps --filter "name=rustfs-quick" --format "{{.Names}}" | grep -q rustfs-quick; then
|
||||
echo " Basic: http://localhost:9000 (API) | http://localhost:9001/rustfs/console/ (Console)"
|
||||
fi
|
||||
|
||||
if docker ps --filter "name=rustfs-dev" --format "{{.Names}}" | grep -q rustfs-dev; then
|
||||
echo " Dev: http://localhost:9010 (API) | http://localhost:9011/rustfs/console/ (Console)"
|
||||
fi
|
||||
|
||||
if docker ps --filter "name=rustfs-prod" --format "{{.Names}}" | grep -q rustfs-prod; then
|
||||
echo " Prod: http://localhost:9020 (API) | http://127.0.0.1:9021/rustfs/console/ (Console)"
|
||||
fi
|
||||
}
|
||||
|
||||
# Test deployments
|
||||
test_deployments() {
|
||||
log "Testing RustFS deployments..."
|
||||
echo
|
||||
|
||||
# Test basic deployment
|
||||
if docker ps --filter "name=rustfs-quick" --format "{{.Names}}" | grep -q rustfs-quick; then
|
||||
info "Testing basic deployment..."
|
||||
if curl -s -f http://localhost:9000/health | grep -q "ok"; then
|
||||
echo " ✅ API health check: PASS"
|
||||
else
|
||||
echo " ❌ API health check: FAIL"
|
||||
fi
|
||||
|
||||
if curl -s -f http://localhost:9001/health | grep -q "console"; then
|
||||
echo " ✅ Console health check: PASS"
|
||||
else
|
||||
echo " ❌ Console health check: FAIL"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Test dev deployment
|
||||
if docker ps --filter "name=rustfs-dev" --format "{{.Names}}" | grep -q rustfs-dev; then
|
||||
info "Testing development deployment..."
|
||||
if curl -s -f http://localhost:9010/health | grep -q "ok"; then
|
||||
echo " ✅ Dev API health check: PASS"
|
||||
else
|
||||
echo " ❌ Dev API health check: FAIL"
|
||||
fi
|
||||
|
||||
if curl -s -f http://localhost:9011/health | grep -q "console"; then
|
||||
echo " ✅ Dev Console health check: PASS"
|
||||
else
|
||||
echo " ❌ Dev Console health check: FAIL"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Test prod deployment
|
||||
if docker ps --filter "name=rustfs-prod" --format "{{.Names}}" | grep -q rustfs-prod; then
|
||||
info "Testing production deployment..."
|
||||
if curl -s -f http://localhost:9020/health | grep -q "ok"; then
|
||||
echo " ✅ Prod API health check: PASS"
|
||||
else
|
||||
echo " ❌ Prod API health check: FAIL"
|
||||
fi
|
||||
|
||||
if curl -s -f http://127.0.0.1:9021/health | grep -q "console"; then
|
||||
echo " ✅ Prod Console health check: PASS"
|
||||
else
|
||||
echo " ❌ Prod Console health check: FAIL"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Show help
|
||||
show_help() {
|
||||
print_banner
|
||||
echo "Usage: $0 [command]"
|
||||
echo
|
||||
echo "Commands:"
|
||||
echo " basic Start basic RustFS deployment (ports 9000-9001)"
|
||||
echo " dev Start development deployment with debug logging (ports 9010-9011)"
|
||||
echo " prod Start production-like deployment with security (ports 9020-9021)"
|
||||
echo " status Show status of running deployments"
|
||||
echo " test Test health of all running deployments"
|
||||
echo " cleanup Stop and remove all RustFS containers"
|
||||
echo " help Show this help message"
|
||||
echo
|
||||
echo "Examples:"
|
||||
echo " $0 basic # Quick start with default settings"
|
||||
echo " $0 dev # Development environment with debug logs"
|
||||
echo " $0 prod # Production-like setup with security"
|
||||
echo " $0 status # Check what's running"
|
||||
echo " $0 test # Test all deployments"
|
||||
echo " $0 cleanup # Clean everything up"
|
||||
echo
|
||||
echo "For more advanced deployments, see:"
|
||||
echo " - examples/enhanced-docker-deployment.sh"
|
||||
echo " - examples/enhanced-security-deployment.sh"
|
||||
echo " - examples/docker-comprehensive.yml"
|
||||
echo " - docs/console-separation.md"
|
||||
echo
|
||||
}
|
||||
|
||||
# Main execution
|
||||
case "${1:-help}" in
|
||||
"basic")
|
||||
print_banner
|
||||
check_docker
|
||||
quick_basic
|
||||
;;
|
||||
"dev")
|
||||
print_banner
|
||||
check_docker
|
||||
quick_dev
|
||||
;;
|
||||
"prod")
|
||||
print_banner
|
||||
check_docker
|
||||
quick_prod
|
||||
;;
|
||||
"status")
|
||||
print_banner
|
||||
status
|
||||
;;
|
||||
"test")
|
||||
print_banner
|
||||
test_deployments
|
||||
;;
|
||||
"cleanup")
|
||||
print_banner
|
||||
cleanup
|
||||
;;
|
||||
"help"|*)
|
||||
show_help
|
||||
;;
|
||||
esac
|
||||
321
examples/enhanced-docker-deployment.sh
Executable file
321
examples/enhanced-docker-deployment.sh
Executable file
@@ -0,0 +1,321 @@
|
||||
#!/bin/bash
|
||||
|
||||
# RustFS Enhanced Docker Deployment Examples
|
||||
# This script demonstrates various deployment scenarios for RustFS with console separation
|
||||
|
||||
set -e
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
log_info() {
|
||||
echo -e "${GREEN}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
log_warn() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
log_error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1"
|
||||
}
|
||||
|
||||
log_section() {
|
||||
echo -e "\n${BLUE}========================================${NC}"
|
||||
echo -e "${BLUE}$1${NC}"
|
||||
echo -e "${BLUE}========================================${NC}\n"
|
||||
}
|
||||
|
||||
# Function to clean up existing containers
|
||||
cleanup() {
|
||||
log_info "Cleaning up existing RustFS containers..."
|
||||
docker stop rustfs-basic rustfs-dev rustfs-prod 2>/dev/null || true
|
||||
docker rm rustfs-basic rustfs-dev rustfs-prod 2>/dev/null || true
|
||||
}
|
||||
|
||||
# Function to wait for service to be ready
|
||||
wait_for_service() {
|
||||
local url=$1
|
||||
local service_name=$2
|
||||
local max_attempts=30
|
||||
local attempt=0
|
||||
|
||||
log_info "Waiting for $service_name to be ready at $url..."
|
||||
|
||||
while [ $attempt -lt $max_attempts ]; do
|
||||
if curl -s -f "$url" > /dev/null 2>&1; then
|
||||
log_info "$service_name is ready!"
|
||||
return 0
|
||||
fi
|
||||
attempt=$((attempt + 1))
|
||||
sleep 1
|
||||
done
|
||||
|
||||
log_error "$service_name failed to start within ${max_attempts}s"
|
||||
return 1
|
||||
}
|
||||
|
||||
# Scenario 1: Basic deployment with port mapping
|
||||
deploy_basic() {
|
||||
log_section "Scenario 1: Basic Docker Deployment with Port Mapping"
|
||||
|
||||
log_info "Starting RustFS with port mapping 9020:9000 and 9021:9001"
|
||||
|
||||
docker run -d \
|
||||
--name rustfs-basic \
|
||||
-p 9020:9000 \
|
||||
-p 9021:9001 \
|
||||
-e RUSTFS_EXTERNAL_ADDRESS=":9020" \
|
||||
-e RUSTFS_CORS_ALLOWED_ORIGINS="http://localhost:9021,http://127.0.0.1:9021" \
|
||||
-e RUSTFS_CONSOLE_CORS_ALLOWED_ORIGINS="*" \
|
||||
-e RUSTFS_ACCESS_KEY="basic-access" \
|
||||
-e RUSTFS_SECRET_KEY="basic-secret" \
|
||||
-v rustfs-basic-data:/data \
|
||||
rustfs/rustfs:latest
|
||||
|
||||
# Wait for services to be ready
|
||||
wait_for_service "http://localhost:9020/health" "API Service"
|
||||
wait_for_service "http://localhost:9021/health" "Console Service"
|
||||
|
||||
log_info "Basic deployment ready!"
|
||||
log_info "🌐 API endpoint: http://localhost:9020"
|
||||
log_info "🖥️ Console UI: http://localhost:9021/rustfs/console/"
|
||||
log_info "🔐 Credentials: basic-access / basic-secret"
|
||||
log_info "🏥 Health checks:"
|
||||
log_info " API: curl http://localhost:9020/health"
|
||||
log_info " Console: curl http://localhost:9021/health"
|
||||
}
|
||||
|
||||
# Scenario 2: Development environment
|
||||
deploy_development() {
|
||||
log_section "Scenario 2: Development Environment"
|
||||
|
||||
log_info "Starting RustFS development environment"
|
||||
|
||||
docker run -d \
|
||||
--name rustfs-dev \
|
||||
-p 9030:9000 \
|
||||
-p 9031:9001 \
|
||||
-e RUSTFS_EXTERNAL_ADDRESS=":9030" \
|
||||
-e RUSTFS_CORS_ALLOWED_ORIGINS="*" \
|
||||
-e RUSTFS_CONSOLE_CORS_ALLOWED_ORIGINS="*" \
|
||||
-e RUSTFS_ACCESS_KEY="dev-access" \
|
||||
-e RUSTFS_SECRET_KEY="dev-secret" \
|
||||
-e RUST_LOG="debug" \
|
||||
-v rustfs-dev-data:/data \
|
||||
rustfs/rustfs:latest
|
||||
|
||||
# Wait for services to be ready
|
||||
wait_for_service "http://localhost:9030/health" "Dev API Service"
|
||||
wait_for_service "http://localhost:9031/health" "Dev Console Service"
|
||||
|
||||
log_info "Development deployment ready!"
|
||||
log_info "🌐 API endpoint: http://localhost:9030"
|
||||
log_info "🖥️ Console UI: http://localhost:9031/rustfs/console/"
|
||||
log_info "🔐 Credentials: dev-access / dev-secret"
|
||||
log_info "📊 Debug logging enabled"
|
||||
log_info "🏥 Health checks:"
|
||||
log_info " API: curl http://localhost:9030/health"
|
||||
log_info " Console: curl http://localhost:9031/health"
|
||||
}
|
||||
|
||||
# Scenario 3: Production-like environment with security
|
||||
deploy_production() {
|
||||
log_section "Scenario 3: Production-like Deployment"
|
||||
|
||||
log_info "Starting RustFS production-like environment with security"
|
||||
|
||||
# Generate secure credentials
|
||||
ACCESS_KEY=$(openssl rand -hex 16)
|
||||
SECRET_KEY=$(openssl rand -hex 32)
|
||||
|
||||
# Save credentials for reference
|
||||
cat > rustfs-prod-credentials.env << EOF
|
||||
# RustFS Production Deployment Credentials
|
||||
# Generated: $(date)
|
||||
RUSTFS_ACCESS_KEY=$ACCESS_KEY
|
||||
RUSTFS_SECRET_KEY=$SECRET_KEY
|
||||
EOF
|
||||
chmod 600 rustfs-prod-credentials.env
|
||||
|
||||
docker run -d \
|
||||
--name rustfs-prod \
|
||||
-p 9040:9000 \
|
||||
-p 127.0.0.1:9041:9001 \
|
||||
-e RUSTFS_ADDRESS="0.0.0.0:9000" \
|
||||
-e RUSTFS_CONSOLE_ADDRESS="0.0.0.0:9001" \
|
||||
-e RUSTFS_EXTERNAL_ADDRESS=":9040" \
|
||||
-e RUSTFS_CORS_ALLOWED_ORIGINS="https://myapp.example.com" \
|
||||
-e RUSTFS_CONSOLE_CORS_ALLOWED_ORIGINS="https://admin.example.com" \
|
||||
-e RUSTFS_ACCESS_KEY="$ACCESS_KEY" \
|
||||
-e RUSTFS_SECRET_KEY="$SECRET_KEY" \
|
||||
-v rustfs-prod-data:/data \
|
||||
rustfs/rustfs:latest
|
||||
|
||||
# Wait for services to be ready
|
||||
wait_for_service "http://localhost:9040/health" "Prod API Service"
|
||||
wait_for_service "http://127.0.0.1:9041/health" "Prod Console Service"
|
||||
|
||||
log_info "Production deployment ready!"
|
||||
log_info "🌐 API endpoint: http://localhost:9040 (public)"
|
||||
log_info "🖥️ Console UI: http://127.0.0.1:9041/rustfs/console/ (localhost only)"
|
||||
log_info "🔐 Credentials: $ACCESS_KEY / $SECRET_KEY"
|
||||
log_info "🔒 Security: Console restricted to localhost"
|
||||
log_info "🏥 Health checks:"
|
||||
log_info " API: curl http://localhost:9040/health"
|
||||
log_info " Console: curl http://127.0.0.1:9041/health"
|
||||
log_warn "⚠️ Console is restricted to localhost for security"
|
||||
log_warn "⚠️ Credentials saved to rustfs-prod-credentials.env file"
|
||||
}
|
||||
|
||||
# Function to show service status
|
||||
show_status() {
|
||||
log_section "Service Status"
|
||||
|
||||
echo "Running containers:"
|
||||
docker ps --filter "name=rustfs-" --format "table {{.Names}}\t{{.Status}}\t{{.Ports}}"
|
||||
|
||||
echo -e "\nService endpoints:"
|
||||
if docker ps --filter "name=rustfs-basic" --format "{{.Names}}" | grep -q rustfs-basic; then
|
||||
echo " Basic API: http://localhost:9020"
|
||||
echo " Basic Console: http://localhost:9021/rustfs/console/"
|
||||
fi
|
||||
|
||||
if docker ps --filter "name=rustfs-dev" --format "{{.Names}}" | grep -q rustfs-dev; then
|
||||
echo " Dev API: http://localhost:9030"
|
||||
echo " Dev Console: http://localhost:9031/rustfs/console/"
|
||||
fi
|
||||
|
||||
if docker ps --filter "name=rustfs-prod" --format "{{.Names}}" | grep -q rustfs-prod; then
|
||||
echo " Prod API: http://localhost:9040"
|
||||
echo " Prod Console: http://127.0.0.1:9041/rustfs/console/"
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to test services
|
||||
test_services() {
|
||||
log_section "Testing Services"
|
||||
|
||||
# Test basic deployment
|
||||
if docker ps --filter "name=rustfs-basic" --format "{{.Names}}" | grep -q rustfs-basic; then
|
||||
log_info "Testing basic deployment..."
|
||||
if curl -s http://localhost:9020/health | grep -q "ok"; then
|
||||
log_info "✓ Basic API health check passed"
|
||||
else
|
||||
log_error "✗ Basic API health check failed"
|
||||
fi
|
||||
|
||||
if curl -s http://localhost:9021/health | grep -q "console"; then
|
||||
log_info "✓ Basic Console health check passed"
|
||||
else
|
||||
log_error "✗ Basic Console health check failed"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Test development deployment
|
||||
if docker ps --filter "name=rustfs-dev" --format "{{.Names}}" | grep -q rustfs-dev; then
|
||||
log_info "Testing development deployment..."
|
||||
if curl -s http://localhost:9030/health | grep -q "ok"; then
|
||||
log_info "✓ Dev API health check passed"
|
||||
else
|
||||
log_error "✗ Dev API health check failed"
|
||||
fi
|
||||
|
||||
if curl -s http://localhost:9031/health | grep -q "console"; then
|
||||
log_info "✓ Dev Console health check passed"
|
||||
else
|
||||
log_error "✗ Dev Console health check failed"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Test production deployment
|
||||
if docker ps --filter "name=rustfs-prod" --format "{{.Names}}" | grep -q rustfs-prod; then
|
||||
log_info "Testing production deployment..."
|
||||
if curl -s http://localhost:9040/health | grep -q "ok"; then
|
||||
log_info "✓ Prod API health check passed"
|
||||
else
|
||||
log_error "✗ Prod API health check failed"
|
||||
fi
|
||||
|
||||
if curl -s http://127.0.0.1:9041/health | grep -q "console"; then
|
||||
log_info "✓ Prod Console health check passed"
|
||||
else
|
||||
log_error "✗ Prod Console health check failed"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to show logs
|
||||
show_logs() {
|
||||
log_section "Service Logs"
|
||||
|
||||
if [ -n "$1" ]; then
|
||||
docker logs "$1"
|
||||
else
|
||||
echo "Available containers:"
|
||||
docker ps --filter "name=rustfs-" --format "{{.Names}}"
|
||||
echo -e "\nUsage: $0 logs <container-name>"
|
||||
fi
|
||||
}
|
||||
|
||||
# Main menu
|
||||
case "${1:-menu}" in
|
||||
"basic")
|
||||
cleanup
|
||||
deploy_basic
|
||||
;;
|
||||
"dev")
|
||||
cleanup
|
||||
deploy_development
|
||||
;;
|
||||
"prod")
|
||||
cleanup
|
||||
deploy_production
|
||||
;;
|
||||
"all")
|
||||
cleanup
|
||||
deploy_basic
|
||||
deploy_development
|
||||
deploy_production
|
||||
show_status
|
||||
;;
|
||||
"status")
|
||||
show_status
|
||||
;;
|
||||
"test")
|
||||
test_services
|
||||
;;
|
||||
"logs")
|
||||
show_logs "$2"
|
||||
;;
|
||||
"cleanup")
|
||||
cleanup
|
||||
docker volume rm rustfs-basic-data rustfs-dev-data rustfs-prod-data 2>/dev/null || true
|
||||
log_info "Cleanup completed"
|
||||
;;
|
||||
"menu"|*)
|
||||
echo "RustFS Enhanced Docker Deployment Examples"
|
||||
echo ""
|
||||
echo "Usage: $0 [command]"
|
||||
echo ""
|
||||
echo "Commands:"
|
||||
echo " basic - Deploy basic RustFS with port mapping"
|
||||
echo " dev - Deploy development environment"
|
||||
echo " prod - Deploy production-like environment"
|
||||
echo " all - Deploy all scenarios"
|
||||
echo " status - Show status of running containers"
|
||||
echo " test - Test all running services"
|
||||
echo " logs - Show logs for specific container"
|
||||
echo " cleanup - Clean up all containers and volumes"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " $0 basic # Deploy basic setup"
|
||||
echo " $0 status # Check running services"
|
||||
echo " $0 logs rustfs-dev # Show dev container logs"
|
||||
echo " $0 cleanup # Clean everything up"
|
||||
;;
|
||||
esac
|
||||
207
examples/enhanced-security-deployment.sh
Executable file
207
examples/enhanced-security-deployment.sh
Executable file
@@ -0,0 +1,207 @@
|
||||
#!/bin/bash
|
||||
|
||||
# RustFS Enhanced Security Deployment Script
|
||||
# This script demonstrates production-ready deployment with enhanced security features
|
||||
|
||||
set -e
|
||||
|
||||
# Configuration
|
||||
RUSTFS_IMAGE="${RUSTFS_IMAGE:-rustfs/rustfs:latest}"
|
||||
CONTAINER_NAME="${CONTAINER_NAME:-rustfs-secure}"
|
||||
DATA_DIR="${DATA_DIR:-./data}"
|
||||
CERTS_DIR="${CERTS_DIR:-./certs}"
|
||||
CONSOLE_PORT="${CONSOLE_PORT:-9443}"
|
||||
API_PORT="${API_PORT:-9000}"
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
log() {
|
||||
echo -e "${BLUE}[INFO]${NC} $1"
|
||||
}
|
||||
|
||||
warn() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $1"
|
||||
}
|
||||
|
||||
error() {
|
||||
echo -e "${RED}[ERROR]${NC} $1"
|
||||
exit 1
|
||||
}
|
||||
|
||||
success() {
|
||||
echo -e "${GREEN}[SUCCESS]${NC} $1"
|
||||
}
|
||||
|
||||
# Check if Docker is available
|
||||
check_docker() {
|
||||
if ! command -v docker &> /dev/null; then
|
||||
error "Docker is not installed or not in PATH"
|
||||
fi
|
||||
log "Docker is available"
|
||||
}
|
||||
|
||||
# Generate TLS certificates for console
|
||||
generate_certs() {
|
||||
if [[ ! -d "$CERTS_DIR" ]]; then
|
||||
mkdir -p "$CERTS_DIR"
|
||||
log "Created certificates directory: $CERTS_DIR"
|
||||
fi
|
||||
|
||||
if [[ ! -f "$CERTS_DIR/console.crt" ]] || [[ ! -f "$CERTS_DIR/console.key" ]]; then
|
||||
log "Generating TLS certificates for console..."
|
||||
openssl req -x509 -newkey rsa:4096 \
|
||||
-keyout "$CERTS_DIR/console.key" \
|
||||
-out "$CERTS_DIR/console.crt" \
|
||||
-days 365 -nodes \
|
||||
-subj "/C=US/ST=CA/L=SF/O=RustFS/CN=localhost"
|
||||
|
||||
chmod 600 "$CERTS_DIR/console.key"
|
||||
chmod 644 "$CERTS_DIR/console.crt"
|
||||
success "TLS certificates generated"
|
||||
else
|
||||
log "TLS certificates already exist"
|
||||
fi
|
||||
}
|
||||
|
||||
# Create data directory
|
||||
create_data_dir() {
|
||||
if [[ ! -d "$DATA_DIR" ]]; then
|
||||
mkdir -p "$DATA_DIR"
|
||||
log "Created data directory: $DATA_DIR"
|
||||
fi
|
||||
}
|
||||
|
||||
# Generate secure credentials
|
||||
generate_credentials() {
|
||||
if [[ -z "$RUSTFS_ACCESS_KEY" ]]; then
|
||||
export RUSTFS_ACCESS_KEY="admin-$(openssl rand -hex 8)"
|
||||
log "Generated access key: $RUSTFS_ACCESS_KEY"
|
||||
fi
|
||||
|
||||
if [[ -z "$RUSTFS_SECRET_KEY" ]]; then
|
||||
export RUSTFS_SECRET_KEY="$(openssl rand -hex 32)"
|
||||
log "Generated secret key: [HIDDEN]"
|
||||
fi
|
||||
|
||||
# Save credentials to .env file
|
||||
cat > .env << EOF
|
||||
RUSTFS_ACCESS_KEY=$RUSTFS_ACCESS_KEY
|
||||
RUSTFS_SECRET_KEY=$RUSTFS_SECRET_KEY
|
||||
EOF
|
||||
chmod 600 .env
|
||||
success "Credentials saved to .env file"
|
||||
}
|
||||
|
||||
# Stop existing container
|
||||
stop_existing() {
|
||||
if docker ps -a --format "table {{.Names}}" | grep -q "^$CONTAINER_NAME\$"; then
|
||||
log "Stopping existing container: $CONTAINER_NAME"
|
||||
docker stop "$CONTAINER_NAME" 2>/dev/null || true
|
||||
docker rm "$CONTAINER_NAME" 2>/dev/null || true
|
||||
fi
|
||||
}
|
||||
|
||||
# Deploy RustFS with enhanced security
|
||||
deploy_rustfs() {
|
||||
log "Deploying RustFS with enhanced security..."
|
||||
|
||||
docker run -d \
|
||||
--name "$CONTAINER_NAME" \
|
||||
--restart unless-stopped \
|
||||
-p "$CONSOLE_PORT:9001" \
|
||||
-p "$API_PORT:9000" \
|
||||
-v "$(pwd)/$DATA_DIR:/data" \
|
||||
-v "$(pwd)/$CERTS_DIR:/certs:ro" \
|
||||
-e RUSTFS_CONSOLE_TLS_ENABLE=true \
|
||||
-e RUSTFS_CONSOLE_TLS_CERT=/certs/console.crt \
|
||||
-e RUSTFS_CONSOLE_TLS_KEY=/certs/console.key \
|
||||
-e RUSTFS_CONSOLE_RATE_LIMIT_ENABLE=true \
|
||||
-e RUSTFS_CONSOLE_RATE_LIMIT_RPM=60 \
|
||||
-e RUSTFS_CONSOLE_AUTH_TIMEOUT=1800 \
|
||||
-e RUSTFS_CONSOLE_CORS_ALLOWED_ORIGINS="https://localhost:$CONSOLE_PORT" \
|
||||
-e RUSTFS_CORS_ALLOWED_ORIGINS="http://localhost:$API_PORT" \
|
||||
-e RUSTFS_ACCESS_KEY="$RUSTFS_ACCESS_KEY" \
|
||||
-e RUSTFS_SECRET_KEY="$RUSTFS_SECRET_KEY" \
|
||||
-e RUSTFS_EXTERNAL_ADDRESS=":$API_PORT" \
|
||||
"$RUSTFS_IMAGE" /data
|
||||
|
||||
# Wait for container to start
|
||||
sleep 5
|
||||
|
||||
if docker ps --format "table {{.Names}}" | grep -q "^$CONTAINER_NAME\$"; then
|
||||
success "RustFS deployed successfully"
|
||||
else
|
||||
error "Failed to deploy RustFS"
|
||||
fi
|
||||
}
|
||||
|
||||
# Check service health
|
||||
check_health() {
|
||||
log "Checking service health..."
|
||||
|
||||
# Check console health
|
||||
if curl -k -s "https://localhost:$CONSOLE_PORT/health" | jq -e '.status == "ok"' > /dev/null 2>&1; then
|
||||
success "Console service is healthy"
|
||||
else
|
||||
warn "Console service health check failed"
|
||||
fi
|
||||
|
||||
# Check API health
|
||||
if curl -s "http://localhost:$API_PORT/health" | jq -e '.status == "ok"' > /dev/null 2>&1; then
|
||||
success "API service is healthy"
|
||||
else
|
||||
warn "API service health check failed"
|
||||
fi
|
||||
}
|
||||
|
||||
# Display access information
|
||||
show_access_info() {
|
||||
echo
|
||||
echo "=========================================="
|
||||
echo " RustFS Access Information"
|
||||
echo "=========================================="
|
||||
echo
|
||||
echo "🌐 Console (HTTPS): https://localhost:$CONSOLE_PORT/rustfs/console/"
|
||||
echo "🔧 API Endpoint: http://localhost:$API_PORT"
|
||||
echo "🏥 Console Health: https://localhost:$CONSOLE_PORT/health"
|
||||
echo "🏥 API Health: http://localhost:$API_PORT/health"
|
||||
echo
|
||||
echo "🔐 Credentials:"
|
||||
echo " Access Key: $RUSTFS_ACCESS_KEY"
|
||||
echo " Secret Key: [Check .env file]"
|
||||
echo
|
||||
echo "📝 Logs: docker logs $CONTAINER_NAME"
|
||||
echo "🛑 Stop: docker stop $CONTAINER_NAME"
|
||||
echo
|
||||
echo "⚠️ Note: Console uses self-signed certificate"
|
||||
echo " Accept the certificate warning in your browser"
|
||||
echo
|
||||
}
|
||||
|
||||
# Main deployment flow
|
||||
main() {
|
||||
log "Starting RustFS Enhanced Security Deployment"
|
||||
|
||||
check_docker
|
||||
create_data_dir
|
||||
generate_certs
|
||||
generate_credentials
|
||||
stop_existing
|
||||
deploy_rustfs
|
||||
|
||||
# Wait a bit for services to start
|
||||
sleep 10
|
||||
|
||||
check_health
|
||||
show_access_info
|
||||
|
||||
success "Deployment completed successfully!"
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
@@ -56,6 +56,8 @@ rustfs-targets = { workspace = true }
|
||||
atoi = { workspace = true }
|
||||
atomic_enum = { workspace = true }
|
||||
axum.workspace = true
|
||||
axum-extra = { workspace = true }
|
||||
axum-server = { workspace = true, features = ["tls-rustls"] }
|
||||
async-trait = { workspace = true }
|
||||
bytes = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
@@ -102,6 +104,8 @@ tower-http = { workspace = true, features = [
|
||||
"compression-gzip",
|
||||
"cors",
|
||||
"catch-panic",
|
||||
"timeout",
|
||||
"limit",
|
||||
] }
|
||||
url = { workspace = true }
|
||||
urlencoding = { workspace = true }
|
||||
@@ -118,6 +122,12 @@ libsystemd.workspace = true
|
||||
[target.'cfg(all(target_os = "linux", target_env = "gnu"))'.dependencies]
|
||||
tikv-jemallocator = "0.6"
|
||||
|
||||
[target.'cfg(all(target_os = "linux", target_env = "musl"))'.dependencies]
|
||||
mimalloc = "0.1"
|
||||
|
||||
[target.'cfg(not(target_os = "windows"))'.dependencies]
|
||||
pprof = { version = "0.15.0", features = ["flamegraph", "protobuf-codec"] }
|
||||
|
||||
[build-dependencies]
|
||||
http.workspace = true
|
||||
futures.workspace = true
|
||||
|
||||
@@ -12,38 +12,46 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// use crate::license::get_license;
|
||||
use axum::{
|
||||
// Router,
|
||||
body::Body,
|
||||
http::{Response, StatusCode},
|
||||
response::IntoResponse,
|
||||
// routing::get,
|
||||
};
|
||||
// use axum_extra::extract::Host;
|
||||
// use rustfs_config::{RUSTFS_TLS_CERT, RUSTFS_TLS_KEY};
|
||||
// use rustfs_utils::net::parse_and_resolve_address;
|
||||
// use std::io;
|
||||
|
||||
use http::Uri;
|
||||
// use axum::response::Redirect;
|
||||
// use axum_server::tls_rustls::RustlsConfig;
|
||||
// use http::{HeaderMap, HeaderName, Uri, header};
|
||||
use crate::config::build;
|
||||
use crate::license::get_license;
|
||||
use axum::body::Body;
|
||||
use axum::response::{IntoResponse, Response};
|
||||
use axum_extra::extract::Host;
|
||||
use http::{HeaderMap, HeaderName, StatusCode, Uri};
|
||||
use mime_guess::from_path;
|
||||
use rust_embed::RustEmbed;
|
||||
use serde::Serialize;
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::sync::OnceLock;
|
||||
// use axum::response::Redirect;
|
||||
// use axum::routing::get;
|
||||
// use axum::{
|
||||
// body::Body,
|
||||
// http::{Response, StatusCode},
|
||||
// response::IntoResponse,
|
||||
// Router,
|
||||
// };
|
||||
// use axum_extra::extract::Host;
|
||||
// use axum_server::tls_rustls::RustlsConfig;
|
||||
// use http::{header, HeaderMap, HeaderName, Uri};
|
||||
// use io::Error;
|
||||
// use mime_guess::from_path;
|
||||
// use rust_embed::RustEmbed;
|
||||
// use rustfs_config::{RUSTFS_TLS_CERT, RUSTFS_TLS_KEY};
|
||||
// use rustfs_utils::parse_and_resolve_address;
|
||||
// use serde::Serialize;
|
||||
// use shadow_rs::shadow;
|
||||
// use std::io;
|
||||
// use std::net::{IpAddr, SocketAddr};
|
||||
// use std::sync::OnceLock;
|
||||
// use std::time::Duration;
|
||||
// use tokio::signal;
|
||||
// use tower_http::cors::{Any, CorsLayer};
|
||||
// use tower_http::trace::TraceLayer;
|
||||
// use tracing::{debug, error, info, instrument};
|
||||
use tracing::{error, instrument};
|
||||
|
||||
// shadow!(build);
|
||||
|
||||
// const RUSTFS_ADMIN_PREFIX: &str = "/rustfs/admin/v3";
|
||||
const RUSTFS_ADMIN_PREFIX: &str = "/rustfs/admin/v3";
|
||||
|
||||
#[derive(RustEmbed)]
|
||||
#[folder = "$CARGO_MANIFEST_DIR/static"]
|
||||
@@ -77,235 +85,226 @@ pub(crate) async fn static_handler(uri: Uri) -> impl IntoResponse {
|
||||
}
|
||||
}
|
||||
|
||||
// #[derive(Debug, Serialize, Clone)]
|
||||
// pub(crate) struct Config {
|
||||
// #[serde(skip)]
|
||||
// port: u16,
|
||||
// api: Api,
|
||||
// s3: S3,
|
||||
// release: Release,
|
||||
// license: License,
|
||||
// doc: String,
|
||||
#[derive(Debug, Serialize, Clone)]
|
||||
pub(crate) struct Config {
|
||||
#[serde(skip)]
|
||||
port: u16,
|
||||
api: Api,
|
||||
s3: S3,
|
||||
release: Release,
|
||||
license: License,
|
||||
doc: String,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
fn new(local_ip: IpAddr, port: u16, version: &str, date: &str) -> Self {
|
||||
Config {
|
||||
port,
|
||||
api: Api {
|
||||
base_url: format!("http://{local_ip}:{port}/{RUSTFS_ADMIN_PREFIX}"),
|
||||
},
|
||||
s3: S3 {
|
||||
endpoint: format!("http://{local_ip}:{port}"),
|
||||
region: "cn-east-1".to_owned(),
|
||||
},
|
||||
release: Release {
|
||||
version: version.to_string(),
|
||||
date: date.to_string(),
|
||||
},
|
||||
license: License {
|
||||
name: "Apache-2.0".to_string(),
|
||||
url: "https://www.apache.org/licenses/LICENSE-2.0".to_string(),
|
||||
},
|
||||
doc: "https://rustfs.com/docs/".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn to_json(&self) -> String {
|
||||
serde_json::to_string(self).unwrap_or_default()
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn version_info(&self) -> String {
|
||||
format!(
|
||||
"RELEASE.{}@{} (rust {} {})",
|
||||
self.release.date.clone(),
|
||||
self.release.version.clone().trim_start_matches('@'),
|
||||
build::RUST_VERSION,
|
||||
build::BUILD_TARGET
|
||||
)
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn version(&self) -> String {
|
||||
self.release.version.clone()
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn license(&self) -> String {
|
||||
format!("{} {}", self.license.name.clone(), self.license.url.clone())
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub(crate) fn doc(&self) -> String {
|
||||
self.doc.clone()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Clone)]
|
||||
struct Api {
|
||||
#[serde(rename = "baseURL")]
|
||||
base_url: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Clone)]
|
||||
struct S3 {
|
||||
endpoint: String,
|
||||
region: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Clone)]
|
||||
struct Release {
|
||||
version: String,
|
||||
date: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Clone)]
|
||||
struct License {
|
||||
name: String,
|
||||
url: String,
|
||||
}
|
||||
|
||||
pub(crate) static CONSOLE_CONFIG: OnceLock<Config> = OnceLock::new();
|
||||
|
||||
#[allow(clippy::const_is_empty)]
|
||||
pub(crate) fn init_console_cfg(local_ip: IpAddr, port: u16) {
|
||||
CONSOLE_CONFIG.get_or_init(|| {
|
||||
let ver = {
|
||||
if !build::TAG.is_empty() {
|
||||
build::TAG.to_string()
|
||||
} else if !build::SHORT_COMMIT.is_empty() {
|
||||
format!("@{}", build::SHORT_COMMIT)
|
||||
} else {
|
||||
build::PKG_VERSION.to_string()
|
||||
}
|
||||
};
|
||||
|
||||
Config::new(local_ip, port, ver.as_str(), build::COMMIT_DATE_3339)
|
||||
});
|
||||
}
|
||||
|
||||
// fn is_socket_addr_or_ip_addr(host: &str) -> bool {
|
||||
// host.parse::<SocketAddr>().is_ok() || host.parse::<IpAddr>().is_ok()
|
||||
// }
|
||||
|
||||
// impl Config {
|
||||
// fn new(local_ip: IpAddr, port: u16, version: &str, date: &str) -> Self {
|
||||
// Config {
|
||||
// port,
|
||||
// api: Api {
|
||||
// base_url: format!("http://{local_ip}:{port}/{RUSTFS_ADMIN_PREFIX}"),
|
||||
// },
|
||||
// s3: S3 {
|
||||
// endpoint: format!("http://{local_ip}:{port}"),
|
||||
// region: "cn-east-1".to_owned(),
|
||||
// },
|
||||
// release: Release {
|
||||
// version: version.to_string(),
|
||||
// date: date.to_string(),
|
||||
// },
|
||||
// license: License {
|
||||
// name: "Apache-2.0".to_string(),
|
||||
// url: "https://www.apache.org/licenses/LICENSE-2.0".to_string(),
|
||||
// },
|
||||
// doc: "https://rustfs.com/docs/".to_string(),
|
||||
// }
|
||||
// }
|
||||
#[allow(dead_code)]
|
||||
pub async fn license_handler() -> impl IntoResponse {
|
||||
let license = get_license().unwrap_or_default();
|
||||
|
||||
// fn to_json(&self) -> String {
|
||||
// serde_json::to_string(self).unwrap_or_default()
|
||||
// }
|
||||
Response::builder()
|
||||
.header("content-type", "application/json")
|
||||
.status(StatusCode::OK)
|
||||
.body(Body::from(serde_json::to_string(&license).unwrap_or_default()))
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
// pub(crate) fn version_info(&self) -> String {
|
||||
// format!(
|
||||
// "RELEASE.{}@{} (rust {} {})",
|
||||
// self.release.date.clone(),
|
||||
// self.release.version.clone().trim_start_matches('@'),
|
||||
// build::RUST_VERSION,
|
||||
// build::BUILD_TARGET
|
||||
// )
|
||||
// }
|
||||
fn _is_private_ip(ip: IpAddr) -> bool {
|
||||
match ip {
|
||||
IpAddr::V4(ip) => {
|
||||
let octets = ip.octets();
|
||||
// 10.0.0.0/8
|
||||
octets[0] == 10 ||
|
||||
// 172.16.0.0/12
|
||||
(octets[0] == 172 && (octets[1] >= 16 && octets[1] <= 31)) ||
|
||||
// 192.168.0.0/16
|
||||
(octets[0] == 192 && octets[1] == 168)
|
||||
}
|
||||
IpAddr::V6(_) => false,
|
||||
}
|
||||
}
|
||||
|
||||
// pub(crate) fn version(&self) -> String {
|
||||
// self.release.version.clone()
|
||||
// }
|
||||
#[allow(clippy::const_is_empty)]
|
||||
#[allow(dead_code)]
|
||||
#[instrument(fields(host))]
|
||||
pub async fn config_handler(uri: Uri, Host(host): Host, headers: HeaderMap) -> impl IntoResponse {
|
||||
// Get the scheme from the headers or use the URI scheme
|
||||
let scheme = headers
|
||||
.get(HeaderName::from_static("x-forwarded-proto"))
|
||||
.and_then(|value| value.to_str().ok())
|
||||
.unwrap_or_else(|| uri.scheme().map(|s| s.as_str()).unwrap_or("http"));
|
||||
|
||||
// pub(crate) fn license(&self) -> String {
|
||||
// format!("{} {}", self.license.name.clone(), self.license.url.clone())
|
||||
// }
|
||||
let raw_host = uri.host().unwrap_or(host.as_str());
|
||||
let host_for_url = if let Ok(socket_addr) = raw_host.parse::<SocketAddr>() {
|
||||
// Successfully parsed, it's in IP:Port format.
|
||||
// For IPv6, we need to enclose it in brackets to form a valid URL.
|
||||
let ip = socket_addr.ip();
|
||||
if ip.is_ipv6() { format!("[{ip}]") } else { format!("{ip}") }
|
||||
} else if let Ok(ip) = raw_host.parse::<IpAddr>() {
|
||||
// Pure IP (no ports)
|
||||
if ip.is_ipv6() { format!("[{ip}]") } else { ip.to_string() }
|
||||
} else {
|
||||
// The domain name may not be able to resolve directly to IP, remove the port
|
||||
raw_host.split(':').next().unwrap_or(raw_host).to_string()
|
||||
};
|
||||
|
||||
// pub(crate) fn doc(&self) -> String {
|
||||
// self.doc.clone()
|
||||
// }
|
||||
// }
|
||||
// Make a copy of the current configuration
|
||||
let mut cfg = match CONSOLE_CONFIG.get() {
|
||||
Some(cfg) => cfg.clone(),
|
||||
None => {
|
||||
error!("Console configuration not initialized");
|
||||
return Response::builder()
|
||||
.status(StatusCode::INTERNAL_SERVER_ERROR)
|
||||
.body(Body::from("Console configuration not initialized"))
|
||||
.unwrap();
|
||||
}
|
||||
};
|
||||
|
||||
// #[derive(Debug, Serialize, Clone)]
|
||||
// struct Api {
|
||||
// #[serde(rename = "baseURL")]
|
||||
// base_url: String,
|
||||
// }
|
||||
let url = format!("{}://{}:{}", scheme, host_for_url, cfg.port);
|
||||
cfg.api.base_url = format!("{url}{RUSTFS_ADMIN_PREFIX}");
|
||||
cfg.s3.endpoint = url;
|
||||
|
||||
// #[derive(Debug, Serialize, Clone)]
|
||||
// struct S3 {
|
||||
// endpoint: String,
|
||||
// region: String,
|
||||
// }
|
||||
|
||||
// #[derive(Debug, Serialize, Clone)]
|
||||
// struct Release {
|
||||
// version: String,
|
||||
// date: String,
|
||||
// }
|
||||
|
||||
// #[derive(Debug, Serialize, Clone)]
|
||||
// struct License {
|
||||
// name: String,
|
||||
// url: String,
|
||||
// }
|
||||
|
||||
// pub(crate) static CONSOLE_CONFIG: OnceLock<Config> = OnceLock::new();
|
||||
|
||||
// #[allow(clippy::const_is_empty)]
|
||||
// pub(crate) fn init_console_cfg(local_ip: IpAddr, port: u16) {
|
||||
// CONSOLE_CONFIG.get_or_init(|| {
|
||||
// let ver = {
|
||||
// if !build::TAG.is_empty() {
|
||||
// build::TAG.to_string()
|
||||
// } else if !build::SHORT_COMMIT.is_empty() {
|
||||
// format!("@{}", build::SHORT_COMMIT)
|
||||
// } else {
|
||||
// build::PKG_VERSION.to_string()
|
||||
// }
|
||||
// };
|
||||
|
||||
// Config::new(local_ip, port, ver.as_str(), build::COMMIT_DATE_3339)
|
||||
// });
|
||||
// }
|
||||
|
||||
// // fn is_socket_addr_or_ip_addr(host: &str) -> bool {
|
||||
// // host.parse::<SocketAddr>().is_ok() || host.parse::<IpAddr>().is_ok()
|
||||
// // }
|
||||
|
||||
// #[allow(dead_code)]
|
||||
// async fn license_handler() -> impl IntoResponse {
|
||||
// let license = get_license().unwrap_or_default();
|
||||
|
||||
// Response::builder()
|
||||
// .header("content-type", "application/json")
|
||||
// .status(StatusCode::OK)
|
||||
// .body(Body::from(serde_json::to_string(&license).unwrap_or_default()))
|
||||
// .unwrap()
|
||||
// }
|
||||
|
||||
// fn _is_private_ip(ip: IpAddr) -> bool {
|
||||
// match ip {
|
||||
// IpAddr::V4(ip) => {
|
||||
// let octets = ip.octets();
|
||||
// // 10.0.0.0/8
|
||||
// octets[0] == 10 ||
|
||||
// // 172.16.0.0/12
|
||||
// (octets[0] == 172 && (octets[1] >= 16 && octets[1] <= 31)) ||
|
||||
// // 192.168.0.0/16
|
||||
// (octets[0] == 192 && octets[1] == 168)
|
||||
// }
|
||||
// IpAddr::V6(_) => false,
|
||||
// }
|
||||
// }
|
||||
|
||||
// #[allow(clippy::const_is_empty)]
|
||||
// #[allow(dead_code)]
|
||||
// #[instrument(fields(host))]
|
||||
// async fn config_handler(uri: Uri, Host(host): Host, headers: HeaderMap) -> impl IntoResponse {
|
||||
// // Get the scheme from the headers or use the URI scheme
|
||||
// let scheme = headers
|
||||
// .get(HeaderName::from_static("x-forwarded-proto"))
|
||||
// .and_then(|value| value.to_str().ok())
|
||||
// .unwrap_or_else(|| uri.scheme().map(|s| s.as_str()).unwrap_or("http"));
|
||||
|
||||
// // Print logs for debugging
|
||||
// info!("Scheme: {}, ", scheme);
|
||||
|
||||
// // Get the host from the uri and use the value of the host extractor if it doesn't have one
|
||||
// let host = uri.host().unwrap_or(host.as_str());
|
||||
|
||||
// let host = if let Ok(socket_addr) = host.parse::<SocketAddr>() {
|
||||
// // Successfully parsed, it's in IP:Port format.
|
||||
// // For IPv6, we need to enclose it in brackets to form a valid URL.
|
||||
// let ip = socket_addr.ip();
|
||||
// if ip.is_ipv6() { format!("[{ip}]") } else { format!("{ip}") }
|
||||
// } else {
|
||||
// // Failed to parse, it might be a domain name or a bare IP, use it as is.
|
||||
// host.to_string()
|
||||
// };
|
||||
|
||||
// // Make a copy of the current configuration
|
||||
// let mut cfg = match CONSOLE_CONFIG.get() {
|
||||
// Some(cfg) => cfg.clone(),
|
||||
// None => {
|
||||
// error!("Console configuration not initialized");
|
||||
// return Response::builder()
|
||||
// .status(StatusCode::INTERNAL_SERVER_ERROR)
|
||||
// .body(Body::from("Console configuration not initialized"))
|
||||
// .unwrap();
|
||||
// }
|
||||
// };
|
||||
|
||||
// let url = format!("{}://{}:{}", scheme, host, cfg.port);
|
||||
// cfg.api.base_url = format!("{url}{RUSTFS_ADMIN_PREFIX}");
|
||||
// cfg.s3.endpoint = url;
|
||||
|
||||
// Response::builder()
|
||||
// .header("content-type", "application/json")
|
||||
// .status(StatusCode::OK)
|
||||
// .body(Body::from(cfg.to_json()))
|
||||
// .unwrap()
|
||||
// }
|
||||
Response::builder()
|
||||
.header("content-type", "application/json")
|
||||
.status(StatusCode::OK)
|
||||
.body(Body::from(cfg.to_json()))
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
// pub fn register_router() -> Router {
|
||||
// Router::new()
|
||||
// // .route("/license", get(license_handler))
|
||||
// // .route("/config.json", get(config_handler))
|
||||
// .route("/license", get(license_handler))
|
||||
// .route("/config.json", get(config_handler))
|
||||
// .fallback_service(get(static_handler))
|
||||
// }
|
||||
|
||||
//
|
||||
// #[allow(dead_code)]
|
||||
// pub async fn start_static_file_server(
|
||||
// addrs: &str,
|
||||
// local_ip: IpAddr,
|
||||
// access_key: &str,
|
||||
// secret_key: &str,
|
||||
// tls_path: Option<String>,
|
||||
// ) {
|
||||
// pub async fn start_static_file_server(addrs: &str, tls_path: Option<String>) {
|
||||
// // Configure CORS
|
||||
// let cors = CorsLayer::new()
|
||||
// .allow_origin(Any) // In the production environment, we recommend that you specify a specific domain name
|
||||
// .allow_methods([http::Method::GET, http::Method::POST])
|
||||
// .allow_headers([header::CONTENT_TYPE]);
|
||||
|
||||
//
|
||||
// // Create a route
|
||||
// let app = register_router()
|
||||
// .layer(cors)
|
||||
// .layer(tower_http::compression::CompressionLayer::new().gzip(true).deflate(true))
|
||||
// .layer(TraceLayer::new_for_http());
|
||||
|
||||
// let server_addr = parse_and_resolve_address(addrs).expect("Failed to parse socket address");
|
||||
// let server_port = server_addr.port();
|
||||
// let server_address = server_addr.to_string();
|
||||
|
||||
// info!(
|
||||
// "WebUI: http://{}:{} http://127.0.0.1:{} http://{}",
|
||||
// local_ip, server_port, server_port, server_address
|
||||
// );
|
||||
// info!(" RootUser: {}", access_key);
|
||||
// info!(" RootPass: {}", secret_key);
|
||||
|
||||
//
|
||||
// // Check and start the HTTPS/HTTP server
|
||||
// match start_server(server_addr, tls_path, app.clone()).await {
|
||||
// Ok(_) => info!("Server shutdown gracefully"),
|
||||
// Err(e) => error!("Server error: {}", e),
|
||||
// match start_server(addrs, tls_path, app).await {
|
||||
// Ok(_) => info!("Console Server shutdown gracefully"),
|
||||
// Err(e) => error!("Console Server error: {}", e),
|
||||
// }
|
||||
// }
|
||||
|
||||
// async fn start_server(server_addr: SocketAddr, tls_path: Option<String>, app: Router) -> io::Result<()> {
|
||||
//
|
||||
// async fn start_server(addrs: &str, tls_path: Option<String>, app: Router) -> io::Result<()> {
|
||||
// let server_addr = parse_and_resolve_address(addrs).expect("Console Failed to parse socket address");
|
||||
// let server_port = server_addr.port();
|
||||
// let server_address = server_addr.to_string();
|
||||
//
|
||||
// info!("Console WebUI: http://{} http://127.0.0.1:{} ", server_address, server_port);
|
||||
//
|
||||
// let tls_path = tls_path.unwrap_or_default();
|
||||
// let key_path = format!("{tls_path}/{RUSTFS_TLS_KEY}");
|
||||
// let cert_path = format!("{tls_path}/{RUSTFS_TLS_CERT}");
|
||||
@@ -314,38 +313,38 @@ pub(crate) async fn static_handler(uri: Uri) -> impl IntoResponse {
|
||||
// let handle_clone = handle.clone();
|
||||
// tokio::spawn(async move {
|
||||
// shutdown_signal().await;
|
||||
// info!("Initiating graceful shutdown...");
|
||||
// info!("Console Initiating graceful shutdown...");
|
||||
// handle_clone.graceful_shutdown(Some(Duration::from_secs(10)));
|
||||
// });
|
||||
|
||||
//
|
||||
// let has_tls_certs = tokio::try_join!(tokio::fs::metadata(&key_path), tokio::fs::metadata(&cert_path)).is_ok();
|
||||
// info!("Console TLS certs: {:?}", has_tls_certs);
|
||||
// if has_tls_certs {
|
||||
// info!("Found TLS certificates, starting with HTTPS");
|
||||
// info!("Console Found TLS certificates, starting with HTTPS");
|
||||
// match RustlsConfig::from_pem_file(cert_path, key_path).await {
|
||||
// Ok(config) => {
|
||||
// info!("Starting HTTPS server...");
|
||||
// info!("Console Starting HTTPS server...");
|
||||
// axum_server::bind_rustls(server_addr, config)
|
||||
// .handle(handle.clone())
|
||||
// .serve(app.into_make_service())
|
||||
// .await
|
||||
// .map_err(io::Error::other)?;
|
||||
|
||||
// info!("HTTPS server running on https://{}", server_addr);
|
||||
|
||||
// .map_err(Error::other)?;
|
||||
//
|
||||
// info!("Console HTTPS server running on https://{}", server_addr);
|
||||
//
|
||||
// Ok(())
|
||||
// }
|
||||
// Err(e) => {
|
||||
// error!("Failed to create TLS config: {}", e);
|
||||
// error!("Console Failed to create TLS config: {}", e);
|
||||
// start_http_server(server_addr, app, handle).await
|
||||
// }
|
||||
// }
|
||||
// } else {
|
||||
// info!("TLS certificates not found at {} and {}", key_path, cert_path);
|
||||
// info!("Console TLS certificates not found at {} and {}", key_path, cert_path);
|
||||
// start_http_server(server_addr, app, handle).await
|
||||
// }
|
||||
// }
|
||||
|
||||
//
|
||||
// #[allow(dead_code)]
|
||||
// /// 308 redirect for HTTP to HTTPS
|
||||
// fn redirect_to_https(https_port: u16) -> Router {
|
||||
@@ -364,38 +363,38 @@ pub(crate) async fn static_handler(uri: Uri) -> impl IntoResponse {
|
||||
// }),
|
||||
// )
|
||||
// }
|
||||
|
||||
//
|
||||
// async fn start_http_server(addr: SocketAddr, app: Router, handle: axum_server::Handle) -> io::Result<()> {
|
||||
// debug!("Starting HTTP server...");
|
||||
// info!("Console Starting HTTP server... {}", addr.to_string());
|
||||
// axum_server::bind(addr)
|
||||
// .handle(handle)
|
||||
// .serve(app.into_make_service())
|
||||
// .await
|
||||
// .map_err(io::Error::other)
|
||||
// .map_err(Error::other)
|
||||
// }
|
||||
|
||||
//
|
||||
// async fn shutdown_signal() {
|
||||
// let ctrl_c = async {
|
||||
// signal::ctrl_c().await.expect("failed to install Ctrl+C handler");
|
||||
// signal::ctrl_c().await.expect("Console failed to install Ctrl+C handler");
|
||||
// };
|
||||
|
||||
//
|
||||
// #[cfg(unix)]
|
||||
// let terminate = async {
|
||||
// signal::unix::signal(signal::unix::SignalKind::terminate())
|
||||
// .expect("failed to install signal handler")
|
||||
// .expect("Console failed to install signal handler")
|
||||
// .recv()
|
||||
// .await;
|
||||
// };
|
||||
|
||||
//
|
||||
// #[cfg(not(unix))]
|
||||
// let terminate = std::future::pending::<()>();
|
||||
|
||||
//
|
||||
// tokio::select! {
|
||||
// _ = ctrl_c => {
|
||||
// info!("shutdown_signal ctrl_c")
|
||||
// info!("Console shutdown_signal ctrl_c")
|
||||
// },
|
||||
// _ = terminate => {
|
||||
// info!("shutdown_signal terminate")
|
||||
// info!("Console shutdown_signal terminate")
|
||||
// },
|
||||
// }
|
||||
// }
|
||||
|
||||
@@ -81,6 +81,8 @@ pub mod sts;
|
||||
pub mod tier;
|
||||
pub mod trace;
|
||||
pub mod user;
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
use pprof::protos::Message;
|
||||
use urlencoding::decode;
|
||||
|
||||
#[allow(dead_code)]
|
||||
@@ -92,6 +94,28 @@ pub struct AccountInfo {
|
||||
pub policy: BucketPolicy,
|
||||
}
|
||||
|
||||
/// Health check handler for endpoint monitoring
|
||||
pub struct HealthCheckHandler {}
|
||||
|
||||
#[async_trait::async_trait]
|
||||
impl Operation for HealthCheckHandler {
|
||||
async fn call(&self, _req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
|
||||
use serde_json::json;
|
||||
|
||||
let health_info = json!({
|
||||
"status": "ok",
|
||||
"service": "rustfs-endpoint",
|
||||
"timestamp": chrono::Utc::now().to_rfc3339(),
|
||||
"version": env!("CARGO_PKG_VERSION")
|
||||
});
|
||||
|
||||
let body = serde_json::to_string(&health_info).unwrap_or_else(|_| "{}".to_string());
|
||||
let response_body = Body::from(body);
|
||||
|
||||
Ok(S3Response::new((StatusCode::OK, response_body)))
|
||||
}
|
||||
}
|
||||
|
||||
pub struct AccountInfoHandler {}
|
||||
#[async_trait::async_trait]
|
||||
impl Operation for AccountInfoHandler {
|
||||
@@ -1233,6 +1257,172 @@ async fn count_bucket_objects(
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ProfileHandler {}
|
||||
#[async_trait::async_trait]
|
||||
impl Operation for ProfileHandler {
|
||||
async fn call(&self, req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
|
||||
#[cfg(target_os = "windows")]
|
||||
{
|
||||
return Ok(S3Response::new((
|
||||
StatusCode::NOT_IMPLEMENTED,
|
||||
Body::from("CPU profiling is not supported on Windows platform".to_string()),
|
||||
)));
|
||||
}
|
||||
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
{
|
||||
use crate::profiling;
|
||||
|
||||
if !profiling::is_profiler_enabled() {
|
||||
return Ok(S3Response::new((
|
||||
StatusCode::SERVICE_UNAVAILABLE,
|
||||
Body::from("Profiler not enabled. Set RUSTFS_ENABLE_PROFILING=true to enable profiling".to_string()),
|
||||
)));
|
||||
}
|
||||
|
||||
let queries = extract_query_params(&req.uri);
|
||||
let seconds = queries.get("seconds").and_then(|s| s.parse::<u64>().ok()).unwrap_or(30);
|
||||
let format = queries.get("format").cloned().unwrap_or_else(|| "protobuf".to_string());
|
||||
|
||||
if seconds > 300 {
|
||||
return Ok(S3Response::new((
|
||||
StatusCode::BAD_REQUEST,
|
||||
Body::from("Profile duration cannot exceed 300 seconds".to_string()),
|
||||
)));
|
||||
}
|
||||
|
||||
let guard = match profiling::get_profiler_guard() {
|
||||
Some(guard) => guard,
|
||||
None => {
|
||||
return Ok(S3Response::new((
|
||||
StatusCode::SERVICE_UNAVAILABLE,
|
||||
Body::from("Profiler not initialized".to_string()),
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
info!("Starting CPU profile collection for {} seconds", seconds);
|
||||
|
||||
tokio::time::sleep(std::time::Duration::from_secs(seconds)).await;
|
||||
|
||||
let guard_lock = match guard.lock() {
|
||||
Ok(guard) => guard,
|
||||
Err(_) => {
|
||||
error!("Failed to acquire profiler guard lock");
|
||||
return Ok(S3Response::new((
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
Body::from("Failed to acquire profiler lock".to_string()),
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
let report = match guard_lock.report().build() {
|
||||
Ok(report) => report,
|
||||
Err(e) => {
|
||||
error!("Failed to build profiler report: {}", e);
|
||||
return Ok(S3Response::new((
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
Body::from(format!("Failed to build profile report: {e}")),
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
info!("CPU profile collection completed");
|
||||
|
||||
match format.as_str() {
|
||||
"protobuf" | "pb" => {
|
||||
let profile = report.pprof().unwrap();
|
||||
let mut body = Vec::new();
|
||||
if let Err(e) = profile.write_to_vec(&mut body) {
|
||||
error!("Failed to serialize protobuf profile: {}", e);
|
||||
return Ok(S3Response::new((
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
Body::from("Failed to serialize profile".to_string()),
|
||||
)));
|
||||
}
|
||||
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert(CONTENT_TYPE, "application/octet-stream".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::from(body)), headers))
|
||||
}
|
||||
"flamegraph" | "svg" => {
|
||||
let mut flamegraph_buf = Vec::new();
|
||||
match report.flamegraph(&mut flamegraph_buf) {
|
||||
Ok(()) => (),
|
||||
Err(e) => {
|
||||
error!("Failed to generate flamegraph: {}", e);
|
||||
return Ok(S3Response::new((
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
Body::from(format!("Failed to generate flamegraph: {e}")),
|
||||
)));
|
||||
}
|
||||
};
|
||||
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert(CONTENT_TYPE, "image/svg+xml".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::from(flamegraph_buf)), headers))
|
||||
}
|
||||
_ => Ok(S3Response::new((
|
||||
StatusCode::BAD_REQUEST,
|
||||
Body::from("Unsupported format. Use 'protobuf' or 'flamegraph'".to_string()),
|
||||
))),
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub struct ProfileStatusHandler {}
|
||||
#[async_trait::async_trait]
|
||||
impl Operation for ProfileStatusHandler {
|
||||
async fn call(&self, _req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
|
||||
use std::collections::HashMap;
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
let status = HashMap::from([
|
||||
("enabled", "false"),
|
||||
("status", "not_supported"),
|
||||
("platform", "windows"),
|
||||
("message", "CPU profiling is not supported on Windows platform"),
|
||||
]);
|
||||
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
let status = {
|
||||
use crate::profiling;
|
||||
|
||||
if profiling::is_profiler_enabled() {
|
||||
HashMap::from([
|
||||
("enabled", "true"),
|
||||
("status", "running"),
|
||||
("supported_formats", "protobuf, flamegraph"),
|
||||
("max_duration_seconds", "300"),
|
||||
("endpoint", "/rustfs/admin/debug/pprof/profile"),
|
||||
])
|
||||
} else {
|
||||
HashMap::from([
|
||||
("enabled", "false"),
|
||||
("status", "disabled"),
|
||||
("message", "Set RUSTFS_ENABLE_PROFILING=true to enable profiling"),
|
||||
])
|
||||
}
|
||||
};
|
||||
|
||||
match serde_json::to_string(&status) {
|
||||
Ok(json) => {
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::from(json)), headers))
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to serialize status: {}", e);
|
||||
Ok(S3Response::new((
|
||||
StatusCode::INTERNAL_SERVER_ERROR,
|
||||
Body::from("Failed to serialize status".to_string()),
|
||||
)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
@@ -21,7 +21,8 @@ pub mod utils;
|
||||
|
||||
// use ecstore::global::{is_dist_erasure, is_erasure};
|
||||
use handlers::{
|
||||
GetReplicationMetricsHandler, ListRemoteTargetHandler, RemoveRemoteTargetHandler, SetRemoteTargetHandler, bucket_meta,
|
||||
GetReplicationMetricsHandler, HealthCheckHandler, ListRemoteTargetHandler, RemoveRemoteTargetHandler, SetRemoteTargetHandler,
|
||||
bucket_meta,
|
||||
event::{
|
||||
GetBucketNotification, ListNotificationTargets, NotificationTarget, RemoveBucketNotification, RemoveNotificationTarget,
|
||||
SetBucketNotification,
|
||||
@@ -41,6 +42,9 @@ const ADMIN_PREFIX: &str = "/rustfs/admin";
|
||||
pub fn make_admin_route(console_enabled: bool) -> std::io::Result<impl S3Route> {
|
||||
let mut r: S3Router<AdminOperation> = S3Router::new(console_enabled);
|
||||
|
||||
// Health check endpoint for monitoring and orchestration
|
||||
r.insert(Method::GET, "/health", AdminOperation(&HealthCheckHandler {}))?;
|
||||
|
||||
// 1
|
||||
r.insert(Method::POST, "/", AdminOperation(&sts::AssumeRoleHandle {}))?;
|
||||
|
||||
@@ -214,6 +218,21 @@ pub fn make_admin_route(console_enabled: bool) -> std::io::Result<impl S3Route>
|
||||
AdminOperation(&RemoveRemoteTargetHandler {}),
|
||||
)?;
|
||||
|
||||
// Performance profiling endpoints (available on all platforms, with platform-specific responses)
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
r.insert(
|
||||
Method::GET,
|
||||
format!("{}{}", ADMIN_PREFIX, "/debug/pprof/profile").as_str(),
|
||||
AdminOperation(&handlers::ProfileHandler {}),
|
||||
)?;
|
||||
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
r.insert(
|
||||
Method::GET,
|
||||
format!("{}{}", ADMIN_PREFIX, "/debug/pprof/status").as_str(),
|
||||
AdminOperation(&handlers::ProfileStatusHandler {}),
|
||||
)?;
|
||||
|
||||
Ok(r)
|
||||
}
|
||||
|
||||
|
||||
79
rustfs/src/config/config_test.rs
Normal file
79
rustfs/src/config/config_test.rs
Normal file
@@ -0,0 +1,79 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::config::Opt;
|
||||
use clap::Parser;
|
||||
|
||||
#[test]
|
||||
fn test_default_console_configuration() {
|
||||
// Test that default console configuration is correct
|
||||
let args = vec!["rustfs", "/test/volume"];
|
||||
let opt = Opt::parse_from(args);
|
||||
|
||||
assert!(opt.console_enable);
|
||||
assert_eq!(opt.console_address, ":9001");
|
||||
assert_eq!(opt.external_address, ":9000"); // Now defaults to DEFAULT_ADDRESS
|
||||
assert_eq!(opt.address, ":9000");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_custom_console_configuration() {
|
||||
// Test custom console configuration
|
||||
let args = vec![
|
||||
"rustfs",
|
||||
"/test/volume",
|
||||
"--console-address",
|
||||
":8080",
|
||||
"--address",
|
||||
":8000",
|
||||
"--console-enable",
|
||||
"false",
|
||||
];
|
||||
let opt = Opt::parse_from(args);
|
||||
|
||||
assert!(opt.console_enable);
|
||||
assert_eq!(opt.console_address, ":8080");
|
||||
assert_eq!(opt.address, ":8000");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_external_address_configuration() {
|
||||
// Test external address configuration for Docker
|
||||
let args = vec!["rustfs", "/test/volume", "--external-address", ":9020"];
|
||||
let opt = Opt::parse_from(args);
|
||||
|
||||
assert_eq!(opt.external_address, ":9020".to_string());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_console_and_endpoint_ports_different() {
|
||||
// Ensure console and endpoint use different default ports
|
||||
let args = vec!["rustfs", "/test/volume"];
|
||||
let opt = Opt::parse_from(args);
|
||||
|
||||
// Parse port numbers from addresses
|
||||
let endpoint_port: u16 = opt.address.trim_start_matches(':').parse().expect("Invalid endpoint port");
|
||||
let console_port: u16 = opt
|
||||
.console_address
|
||||
.trim_start_matches(':')
|
||||
.parse()
|
||||
.expect("Invalid console port");
|
||||
|
||||
assert_ne!(endpoint_port, console_port, "Console and endpoint should use different ports");
|
||||
assert_eq!(endpoint_port, 9000);
|
||||
assert_eq!(console_port, 9001);
|
||||
}
|
||||
}
|
||||
@@ -17,6 +17,9 @@ use const_str::concat;
|
||||
use std::string::ToString;
|
||||
shadow_rs::shadow!(build);
|
||||
|
||||
#[cfg(test)]
|
||||
mod config_test;
|
||||
|
||||
#[allow(clippy::const_is_empty)]
|
||||
const SHORT_VERSION: &str = {
|
||||
if !build::TAG.is_empty() {
|
||||
@@ -68,6 +71,16 @@ pub struct Opt {
|
||||
#[arg(long, default_value_t = rustfs_config::DEFAULT_CONSOLE_ENABLE, env = "RUSTFS_CONSOLE_ENABLE")]
|
||||
pub console_enable: bool,
|
||||
|
||||
/// Console server bind address
|
||||
#[arg(long, default_value_t = rustfs_config::DEFAULT_CONSOLE_ADDRESS.to_string(), env = "RUSTFS_CONSOLE_ADDRESS")]
|
||||
pub console_address: String,
|
||||
|
||||
/// External address for console to access endpoint (used in Docker deployments)
|
||||
/// This should match the mapped host port when using Docker port mapping
|
||||
/// Example: ":9020" when mapping host port 9020 to container port 9000
|
||||
#[arg(long, default_value_t = rustfs_config::DEFAULT_ADDRESS.to_string(), env = "RUSTFS_EXTERNAL_ADDRESS")]
|
||||
pub external_address: String,
|
||||
|
||||
/// Observability endpoint for trace, metrics and logs,only support grpc mode.
|
||||
#[arg(long, default_value_t = rustfs_config::DEFAULT_OBS_ENDPOINT.to_string(), env = "RUSTFS_OBS_ENDPOINT")]
|
||||
pub obs_endpoint: String,
|
||||
@@ -76,6 +89,18 @@ pub struct Opt {
|
||||
#[arg(long, env = "RUSTFS_TLS_PATH")]
|
||||
pub tls_path: Option<String>,
|
||||
|
||||
/// Enable rate limiting for console
|
||||
#[arg(long, default_value_t = rustfs_config::DEFAULT_CONSOLE_RATE_LIMIT_ENABLE, env = "RUSTFS_CONSOLE_RATE_LIMIT_ENABLE")]
|
||||
pub console_rate_limit_enable: bool,
|
||||
|
||||
/// Console rate limit: requests per minute
|
||||
#[arg(long, default_value_t = rustfs_config::DEFAULT_CONSOLE_RATE_LIMIT_RPM, env = "RUSTFS_CONSOLE_RATE_LIMIT_RPM")]
|
||||
pub console_rate_limit_rpm: u32,
|
||||
|
||||
/// Console authentication timeout in seconds
|
||||
#[arg(long, default_value_t = rustfs_config::DEFAULT_CONSOLE_AUTH_TIMEOUT, env = "RUSTFS_CONSOLE_AUTH_TIMEOUT")]
|
||||
pub console_auth_timeout: u64,
|
||||
|
||||
#[arg(long, env = "RUSTFS_LICENSE")]
|
||||
pub license: Option<String>,
|
||||
|
||||
|
||||
@@ -18,13 +18,19 @@ mod config;
|
||||
mod error;
|
||||
// mod grpc;
|
||||
pub mod license;
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
mod profiling;
|
||||
mod server;
|
||||
mod storage;
|
||||
mod update;
|
||||
mod version;
|
||||
|
||||
// Ensure the correct path for parse_license is imported
|
||||
use crate::server::{SHUTDOWN_TIMEOUT, ServiceState, ServiceStateManager, ShutdownSignal, start_http_server, wait_for_shutdown};
|
||||
use crate::admin::console::init_console_cfg;
|
||||
use crate::server::{
|
||||
SHUTDOWN_TIMEOUT, ServiceState, ServiceStateManager, ShutdownSignal, start_console_server, start_http_server,
|
||||
wait_for_shutdown,
|
||||
};
|
||||
use crate::storage::ecfs::{process_lambda_configurations, process_queue_configurations, process_topic_configurations};
|
||||
use chrono::Datelike;
|
||||
use clap::Parser;
|
||||
@@ -35,6 +41,8 @@ use rustfs_ahm::{
|
||||
};
|
||||
use rustfs_common::globals::set_global_addr;
|
||||
use rustfs_config::DEFAULT_DELIMITER;
|
||||
use rustfs_config::DEFAULT_UPDATE_CHECK;
|
||||
use rustfs_config::ENV_UPDATE_CHECK;
|
||||
use rustfs_ecstore::bucket::metadata_sys;
|
||||
use rustfs_ecstore::bucket::metadata_sys::init_bucket_metadata_sys;
|
||||
use rustfs_ecstore::cmd::bucket_replication::init_bucket_replication_pool;
|
||||
@@ -68,6 +76,18 @@ use tracing::{debug, error, info, instrument, warn};
|
||||
#[global_allocator]
|
||||
static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
|
||||
|
||||
#[cfg(all(target_os = "linux", target_env = "musl"))]
|
||||
#[global_allocator]
|
||||
static GLOBAL: mimalloc::MiMalloc = mimalloc::MiMalloc;
|
||||
|
||||
const LOGO: &str = r#"
|
||||
|
||||
░█▀▄░█░█░█▀▀░▀█▀░█▀▀░█▀▀
|
||||
░█▀▄░█░█░▀▀█░░█░░█▀▀░▀▀█
|
||||
░▀░▀░▀▀▀░▀▀▀░░▀░░▀░░░▀▀▀
|
||||
|
||||
"#;
|
||||
|
||||
#[instrument]
|
||||
fn print_server_info() {
|
||||
let current_year = chrono::Utc::now().year();
|
||||
@@ -91,9 +111,16 @@ async fn main() -> Result<()> {
|
||||
// Initialize Observability
|
||||
let (_logger, guard) = init_obs(Some(opt.clone().obs_endpoint)).await;
|
||||
|
||||
// print startup logo
|
||||
info!("{}", LOGO);
|
||||
|
||||
// Store in global storage
|
||||
set_global_guard(guard).map_err(Error::other)?;
|
||||
|
||||
// Initialize performance profiling if enabled
|
||||
#[cfg(not(target_os = "windows"))]
|
||||
profiling::start_profiling_if_enabled();
|
||||
|
||||
// Run parameters
|
||||
run(opt).await
|
||||
}
|
||||
@@ -102,10 +129,12 @@ async fn main() -> Result<()> {
|
||||
async fn run(opt: config::Opt) -> Result<()> {
|
||||
debug!("opt: {:?}", &opt);
|
||||
|
||||
// Initialize global DNS resolver early for enhanced DNS resolution
|
||||
if let Err(e) = init_global_dns_resolver().await {
|
||||
warn!("Failed to initialize global DNS resolver: {}. Using standard DNS resolution.", e);
|
||||
}
|
||||
// Initialize global DNS resolver early for enhanced DNS resolution (concurrent)
|
||||
let dns_init = tokio::spawn(async {
|
||||
if let Err(e) = init_global_dns_resolver().await {
|
||||
warn!("Failed to initialize global DNS resolver: {}. Using standard DNS resolution.", e);
|
||||
}
|
||||
});
|
||||
|
||||
if let Some(region) = &opt.region {
|
||||
rustfs_ecstore::global::set_global_region(region.clone());
|
||||
@@ -115,7 +144,7 @@ async fn run(opt: config::Opt) -> Result<()> {
|
||||
let server_port = server_addr.port();
|
||||
let server_address = server_addr.to_string();
|
||||
|
||||
debug!("server_address {}", &server_address);
|
||||
info!("server_address {}, ip:{}", &server_address, server_addr.ip());
|
||||
|
||||
// Set up AK and SK
|
||||
rustfs_ecstore::global::init_global_action_cred(Some(opt.access_key.clone()), Some(opt.secret_key.clone()));
|
||||
@@ -124,9 +153,13 @@ async fn run(opt: config::Opt) -> Result<()> {
|
||||
|
||||
set_global_addr(&opt.address).await;
|
||||
|
||||
// Wait for DNS initialization to complete before network-heavy operations
|
||||
dns_init.await.map_err(Error::other)?;
|
||||
|
||||
// For RPC
|
||||
let (endpoint_pools, setup_type) =
|
||||
EndpointServerPools::from_volumes(server_address.clone().as_str(), opt.volumes.clone()).map_err(Error::other)?;
|
||||
let (endpoint_pools, setup_type) = EndpointServerPools::from_volumes(server_address.clone().as_str(), opt.volumes.clone())
|
||||
.await
|
||||
.map_err(Error::other)?;
|
||||
|
||||
for (i, eps) in endpoint_pools.as_ref().iter().enumerate() {
|
||||
info!(
|
||||
@@ -157,6 +190,47 @@ async fn run(opt: config::Opt) -> Result<()> {
|
||||
state_manager.update(ServiceState::Starting);
|
||||
|
||||
let shutdown_tx = start_http_server(&opt, state_manager.clone()).await?;
|
||||
// Start console server if enabled
|
||||
let console_shutdown_tx = shutdown_tx.clone();
|
||||
if opt.console_enable && !opt.console_address.is_empty() {
|
||||
// Deal with port mapping issues for virtual machines like docker
|
||||
let (external_addr, external_port) = if !opt.external_address.is_empty() {
|
||||
let external_addr = parse_and_resolve_address(opt.external_address.as_str()).map_err(Error::other)?;
|
||||
let external_port = external_addr.port();
|
||||
if external_port != server_port {
|
||||
warn!(
|
||||
"External port {} is different from server port {}, ensure your firewall allows access to the external port if needed.",
|
||||
external_port, server_port
|
||||
);
|
||||
}
|
||||
info!("Using external address {} for endpoint access", external_addr);
|
||||
rustfs_ecstore::global::set_global_rustfs_external_port(external_port);
|
||||
set_global_addr(&opt.external_address).await;
|
||||
(external_addr.ip(), external_port)
|
||||
} else {
|
||||
(server_addr.ip(), server_port)
|
||||
};
|
||||
warn!("Starting console server on address: '{}', port: '{}'", external_addr, external_port);
|
||||
// init console configuration
|
||||
init_console_cfg(external_addr, external_port);
|
||||
|
||||
let opt_clone = opt.clone();
|
||||
tokio::spawn(async move {
|
||||
let console_shutdown_rx = console_shutdown_tx.subscribe();
|
||||
if let Err(e) = start_console_server(&opt_clone, console_shutdown_rx).await {
|
||||
error!("Console server failed to start: {}", e);
|
||||
}
|
||||
});
|
||||
} else {
|
||||
info!("Console server is disabled.");
|
||||
info!("You can access the RustFS API at {}", &opt.address);
|
||||
info!("For more information, visit https://rustfs.com/docs/");
|
||||
info!("To enable the console, restart the server with --console-enable and a valid --console-address.");
|
||||
info!(
|
||||
"Current console address is set to: '{}' ,console enable is set to: '{}'",
|
||||
&opt.console_address, &opt.console_enable
|
||||
);
|
||||
}
|
||||
|
||||
set_global_endpoints(endpoint_pools.as_ref().clone());
|
||||
update_erasure_type(setup_type).await;
|
||||
@@ -187,14 +261,11 @@ async fn run(opt: config::Opt) -> Result<()> {
|
||||
// Collect bucket names into a vector
|
||||
let buckets: Vec<String> = buckets_list.into_iter().map(|v| v.name).collect();
|
||||
|
||||
// Initialize the bucket metadata system
|
||||
init_bucket_metadata_sys(store.clone(), buckets.clone()).await;
|
||||
|
||||
// Initialize the IAM system
|
||||
init_iam_sys(store.clone()).await?;
|
||||
init_iam_sys(store.clone()).await.map_err(Error::other)?;
|
||||
|
||||
// add bucket notification configuration
|
||||
add_bucket_notification_configuration(buckets).await;
|
||||
add_bucket_notification_configuration(buckets.clone()).await;
|
||||
|
||||
// Initialize the global notification system
|
||||
new_global_notification_sys(endpoint_pools.clone()).await.map_err(|err| {
|
||||
@@ -239,37 +310,22 @@ async fn run(opt: config::Opt) -> Result<()> {
|
||||
// initialize bucket replication pool
|
||||
init_bucket_replication_pool().await;
|
||||
|
||||
// Async update check (optional)
|
||||
tokio::spawn(async {
|
||||
use crate::update::{UpdateCheckError, check_updates};
|
||||
init_update_check();
|
||||
|
||||
match check_updates().await {
|
||||
Ok(result) => {
|
||||
if result.update_available {
|
||||
if let Some(latest) = &result.latest_version {
|
||||
info!(
|
||||
"🚀 Version check: New version available: {} -> {} (current: {})",
|
||||
result.current_version, latest.version, result.current_version
|
||||
);
|
||||
if let Some(notes) = &latest.release_notes {
|
||||
info!("📝 Release notes: {}", notes);
|
||||
}
|
||||
if let Some(url) = &latest.download_url {
|
||||
info!("🔗 Download URL: {}", url);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
debug!("✅ Version check: Current version is up to date: {}", result.current_version);
|
||||
}
|
||||
}
|
||||
Err(UpdateCheckError::HttpError(e)) => {
|
||||
debug!("Version check: network error (this is normal): {}", e);
|
||||
}
|
||||
Err(e) => {
|
||||
debug!("Version check: failed (this is normal): {}", e);
|
||||
}
|
||||
}
|
||||
});
|
||||
// if opt.console_enable {
|
||||
// debug!("console is enabled");
|
||||
// let console_address = opt.console_address.clone();
|
||||
// let tls_path = opt.tls_path.clone();
|
||||
//
|
||||
// if console_address.is_empty() {
|
||||
// error!("console_address is empty");
|
||||
// return Err(Error::other("console_address is empty".to_string()));
|
||||
// }
|
||||
//
|
||||
// tokio::spawn(async move {
|
||||
// console::start_static_file_server(&console_address, tls_path).await;
|
||||
// });
|
||||
// }
|
||||
|
||||
// Perform hibernation for 1 second
|
||||
tokio::time::sleep(SHUTDOWN_TIMEOUT).await;
|
||||
@@ -348,7 +404,7 @@ async fn init_event_notifier() {
|
||||
}
|
||||
};
|
||||
|
||||
info!("Global server configuration loaded successfully. config: {:?}", server_config);
|
||||
info!("Global server configuration loaded successfully");
|
||||
// 2. Check if the notify subsystem exists in the configuration, and skip initialization if it doesn't
|
||||
if server_config
|
||||
.get_value(rustfs_config::notify::NOTIFY_MQTT_SUB_SYS, DEFAULT_DELIMITER)
|
||||
@@ -364,12 +420,57 @@ async fn init_event_notifier() {
|
||||
info!("Event notifier configuration found, proceeding with initialization.");
|
||||
|
||||
// 3. Initialize the notification system asynchronously with a global configuration
|
||||
// Put it into a separate task to avoid blocking the main initialization process
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = rustfs_notify::initialize(server_config).await {
|
||||
error!("Failed to initialize event notifier system: {}", e);
|
||||
} else {
|
||||
info!("Event notifier system initialized successfully.");
|
||||
// Use direct await for better error handling and faster initialization
|
||||
if let Err(e) = rustfs_notify::initialize(server_config).await {
|
||||
error!("Failed to initialize event notifier system: {}", e);
|
||||
} else {
|
||||
info!("Event notifier system initialized successfully.");
|
||||
}
|
||||
}
|
||||
|
||||
fn init_update_check() {
|
||||
let update_check_enable = std::env::var(ENV_UPDATE_CHECK)
|
||||
.unwrap_or_else(|_| DEFAULT_UPDATE_CHECK.to_string())
|
||||
.parse::<bool>()
|
||||
.unwrap_or(DEFAULT_UPDATE_CHECK);
|
||||
|
||||
if !update_check_enable {
|
||||
return;
|
||||
}
|
||||
|
||||
// Async update check with timeout
|
||||
tokio::spawn(async {
|
||||
use crate::update::{UpdateCheckError, check_updates};
|
||||
|
||||
// Add timeout to prevent hanging network calls
|
||||
match tokio::time::timeout(std::time::Duration::from_secs(30), check_updates()).await {
|
||||
Ok(Ok(result)) => {
|
||||
if result.update_available {
|
||||
if let Some(latest) = &result.latest_version {
|
||||
info!(
|
||||
"🚀 Version check: New version available: {} -> {} (current: {})",
|
||||
result.current_version, latest.version, result.current_version
|
||||
);
|
||||
if let Some(notes) = &latest.release_notes {
|
||||
info!("📝 Release notes: {}", notes);
|
||||
}
|
||||
if let Some(url) = &latest.download_url {
|
||||
info!("🔗 Download URL: {}", url);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
debug!("✅ Version check: Current version is up to date: {}", result.current_version);
|
||||
}
|
||||
}
|
||||
Ok(Err(UpdateCheckError::HttpError(e))) => {
|
||||
debug!("Version check: network error (this is normal): {}", e);
|
||||
}
|
||||
Ok(Err(e)) => {
|
||||
debug!("Version check: failed (this is normal): {}", e);
|
||||
}
|
||||
Err(_) => {
|
||||
debug!("Version check: timeout after 30 seconds (this is normal)");
|
||||
}
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
63
rustfs/src/profiling.rs
Normal file
63
rustfs/src/profiling.rs
Normal file
@@ -0,0 +1,63 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use pprof::ProfilerGuard;
|
||||
use std::sync::{Arc, Mutex, OnceLock};
|
||||
use tracing::info;
|
||||
|
||||
static PROFILER_GUARD: OnceLock<Arc<Mutex<ProfilerGuard<'static>>>> = OnceLock::new();
|
||||
|
||||
pub fn init_profiler() -> Result<(), Box<dyn std::error::Error>> {
|
||||
let guard = pprof::ProfilerGuardBuilder::default()
|
||||
.frequency(1000)
|
||||
.blocklist(&["libc", "libgcc", "pthread", "vdso"])
|
||||
.build()
|
||||
.map_err(|e| format!("Failed to build profiler guard: {e}"))?;
|
||||
|
||||
PROFILER_GUARD
|
||||
.set(Arc::new(Mutex::new(guard)))
|
||||
.map_err(|_| "Failed to set profiler guard (already initialized)")?;
|
||||
|
||||
info!("Performance profiler initialized");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn is_profiler_enabled() -> bool {
|
||||
PROFILER_GUARD.get().is_some()
|
||||
}
|
||||
|
||||
pub fn get_profiler_guard() -> Option<Arc<Mutex<ProfilerGuard<'static>>>> {
|
||||
PROFILER_GUARD.get().cloned()
|
||||
}
|
||||
|
||||
pub fn start_profiling_if_enabled() {
|
||||
let enable_profiling = std::env::var("RUSTFS_ENABLE_PROFILING")
|
||||
.unwrap_or_else(|_| "false".to_string())
|
||||
.parse::<bool>()
|
||||
.unwrap_or(false);
|
||||
|
||||
if enable_profiling {
|
||||
match init_profiler() {
|
||||
Ok(()) => {
|
||||
info!("Performance profiling enabled via RUSTFS_ENABLE_PROFILING environment variable");
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!("Failed to initialize profiler: {}", e);
|
||||
info!("Performance profiling disabled due to initialization error");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
info!("Performance profiling disabled. Set RUSTFS_ENABLE_PROFILING=true to enable");
|
||||
}
|
||||
}
|
||||
398
rustfs/src/server/console.rs
Normal file
398
rustfs/src/server/console.rs
Normal file
@@ -0,0 +1,398 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::admin::console::static_handler;
|
||||
use crate::config::Opt;
|
||||
use axum::{Router, extract::Request, middleware, response::Json, routing::get};
|
||||
use axum_server::tls_rustls::RustlsConfig;
|
||||
use http::{HeaderValue, Method};
|
||||
use rustfs_config::{RUSTFS_TLS_CERT, RUSTFS_TLS_KEY};
|
||||
use rustfs_utils::net::parse_and_resolve_address;
|
||||
use serde_json::json;
|
||||
use std::io::Result;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio_rustls::rustls::ServerConfig;
|
||||
use tower_http::catch_panic::CatchPanicLayer;
|
||||
use tower_http::cors::{AllowOrigin, Any, CorsLayer};
|
||||
use tower_http::limit::RequestBodyLimitLayer;
|
||||
use tower_http::timeout::TimeoutLayer;
|
||||
use tower_http::trace::TraceLayer;
|
||||
use tracing::{debug, error, info, instrument, warn};
|
||||
|
||||
const CONSOLE_PREFIX: &str = "/rustfs/console";
|
||||
|
||||
/// Console access logging middleware
|
||||
async fn console_logging_middleware(req: Request, next: axum::middleware::Next) -> axum::response::Response {
|
||||
let method = req.method().clone();
|
||||
let uri = req.uri().clone();
|
||||
let start = std::time::Instant::now();
|
||||
|
||||
let response = next.run(req).await;
|
||||
let duration = start.elapsed();
|
||||
|
||||
info!(
|
||||
target: "rustfs::console::access",
|
||||
method = %method,
|
||||
uri = %uri,
|
||||
status = %response.status(),
|
||||
duration_ms = %duration.as_millis(),
|
||||
"Console access"
|
||||
);
|
||||
|
||||
response
|
||||
}
|
||||
|
||||
/// Setup TLS configuration for console using axum-server, following endpoint TLS implementation logic
|
||||
#[instrument(skip(tls_path))]
|
||||
async fn setup_console_tls_config(tls_path: Option<&String>) -> Result<Option<RustlsConfig>> {
|
||||
let tls_path = match tls_path {
|
||||
Some(path) if !path.is_empty() => path,
|
||||
_ => {
|
||||
debug!("TLS path is not provided, console starting with HTTP");
|
||||
return Ok(None);
|
||||
}
|
||||
};
|
||||
|
||||
if tokio::fs::metadata(tls_path).await.is_err() {
|
||||
debug!("TLS path does not exist, console starting with HTTP");
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
debug!("Found TLS directory for console, checking for certificates");
|
||||
|
||||
// Make sure to use a modern encryption suite
|
||||
let _ = rustls::crypto::aws_lc_rs::default_provider().install_default();
|
||||
|
||||
// 1. Attempt to load all certificates in the directory (multi-certificate support, for SNI)
|
||||
if let Ok(cert_key_pairs) = rustfs_utils::load_all_certs_from_directory(tls_path) {
|
||||
if !cert_key_pairs.is_empty() {
|
||||
debug!(
|
||||
"Found {} certificates for console, creating SNI-aware multi-cert resolver",
|
||||
cert_key_pairs.len()
|
||||
);
|
||||
|
||||
// Create an SNI-enabled certificate resolver
|
||||
let resolver = rustfs_utils::create_multi_cert_resolver(cert_key_pairs)?;
|
||||
|
||||
// Configure the server to enable SNI support
|
||||
let mut server_config = ServerConfig::builder()
|
||||
.with_no_client_auth()
|
||||
.with_cert_resolver(Arc::new(resolver));
|
||||
|
||||
// Configure ALPN protocol priority
|
||||
server_config.alpn_protocols = vec![b"h2".to_vec(), b"http/1.1".to_vec(), b"http/1.0".to_vec()];
|
||||
|
||||
// Log SNI requests
|
||||
if rustfs_utils::tls_key_log() {
|
||||
server_config.key_log = Arc::new(rustls::KeyLogFile::new());
|
||||
}
|
||||
|
||||
info!(target: "rustfs::console::tls", "Console TLS enabled with multi-certificate SNI support");
|
||||
return Ok(Some(RustlsConfig::from_config(Arc::new(server_config))));
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Revert to the traditional single-certificate mode
|
||||
let key_path = format!("{tls_path}/{RUSTFS_TLS_KEY}");
|
||||
let cert_path = format!("{tls_path}/{RUSTFS_TLS_CERT}");
|
||||
if tokio::try_join!(tokio::fs::metadata(&key_path), tokio::fs::metadata(&cert_path)).is_ok() {
|
||||
debug!("Found legacy single TLS certificate for console, starting with HTTPS");
|
||||
|
||||
return match RustlsConfig::from_pem_file(cert_path, key_path).await {
|
||||
Ok(config) => {
|
||||
info!(target: "rustfs::console::tls", "Console TLS enabled with single certificate");
|
||||
Ok(Some(config))
|
||||
}
|
||||
Err(e) => {
|
||||
error!(target: "rustfs::console::error", error = %e, "Failed to create TLS config for console");
|
||||
Err(std::io::Error::other(e))
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
debug!("No valid TLS certificates found in the directory for console, starting with HTTP");
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
/// Get console configuration from environment variables
|
||||
fn get_console_config_from_env() -> (bool, u32, u64, String) {
|
||||
let rate_limit_enable = std::env::var(rustfs_config::ENV_CONSOLE_RATE_LIMIT_ENABLE)
|
||||
.unwrap_or_else(|_| rustfs_config::DEFAULT_CONSOLE_RATE_LIMIT_ENABLE.to_string())
|
||||
.parse::<bool>()
|
||||
.unwrap_or(rustfs_config::DEFAULT_CONSOLE_RATE_LIMIT_ENABLE);
|
||||
|
||||
let rate_limit_rpm = std::env::var(rustfs_config::ENV_CONSOLE_RATE_LIMIT_RPM)
|
||||
.unwrap_or_else(|_| rustfs_config::DEFAULT_CONSOLE_RATE_LIMIT_RPM.to_string())
|
||||
.parse::<u32>()
|
||||
.unwrap_or(rustfs_config::DEFAULT_CONSOLE_RATE_LIMIT_RPM);
|
||||
|
||||
let auth_timeout = std::env::var(rustfs_config::ENV_CONSOLE_AUTH_TIMEOUT)
|
||||
.unwrap_or_else(|_| rustfs_config::DEFAULT_CONSOLE_AUTH_TIMEOUT.to_string())
|
||||
.parse::<u64>()
|
||||
.unwrap_or(rustfs_config::DEFAULT_CONSOLE_AUTH_TIMEOUT);
|
||||
let cors_allowed_origins = std::env::var(rustfs_config::ENV_CONSOLE_CORS_ALLOWED_ORIGINS)
|
||||
.unwrap_or_else(|_| rustfs_config::DEFAULT_CONSOLE_CORS_ALLOWED_ORIGINS.to_string())
|
||||
.parse::<String>()
|
||||
.unwrap_or(rustfs_config::DEFAULT_CONSOLE_CORS_ALLOWED_ORIGINS.to_string());
|
||||
|
||||
(rate_limit_enable, rate_limit_rpm, auth_timeout, cors_allowed_origins)
|
||||
}
|
||||
|
||||
/// Setup comprehensive middleware stack with tower-http features
|
||||
fn setup_console_middleware_stack(
|
||||
cors_layer: CorsLayer,
|
||||
rate_limit_enable: bool,
|
||||
rate_limit_rpm: u32,
|
||||
auth_timeout: u64,
|
||||
) -> Router {
|
||||
let mut app = Router::new()
|
||||
.route("/license", get(crate::admin::console::license_handler))
|
||||
.route("/config.json", get(crate::admin::console::config_handler))
|
||||
.route("/health", get(health_check))
|
||||
.nest(CONSOLE_PREFIX, Router::new().fallback_service(get(static_handler)))
|
||||
.fallback_service(get(static_handler));
|
||||
|
||||
// Add comprehensive middleware layers using tower-http features
|
||||
app = app
|
||||
.layer(CatchPanicLayer::new())
|
||||
.layer(TraceLayer::new_for_http())
|
||||
.layer(middleware::from_fn(console_logging_middleware))
|
||||
.layer(cors_layer)
|
||||
// Add timeout layer - convert auth_timeout from seconds to Duration
|
||||
.layer(TimeoutLayer::new(Duration::from_secs(auth_timeout)))
|
||||
// Add request body limit (10MB for console uploads)
|
||||
.layer(RequestBodyLimitLayer::new(10 * 1024 * 1024));
|
||||
|
||||
// Add rate limiting if enabled
|
||||
if rate_limit_enable {
|
||||
info!("Console rate limiting enabled: {} requests per minute", rate_limit_rpm);
|
||||
// Note: tower-http doesn't provide a built-in rate limiter, but we have the foundation
|
||||
// For production, you would integrate with a rate limiting service like Redis
|
||||
// For now, we log that it's configured and ready for integration
|
||||
}
|
||||
|
||||
app
|
||||
}
|
||||
|
||||
/// Console health check handler with comprehensive health information
|
||||
async fn health_check() -> Json<serde_json::Value> {
|
||||
use rustfs_ecstore::new_object_layer_fn;
|
||||
|
||||
let mut health_status = "ok";
|
||||
let mut details = json!({});
|
||||
|
||||
// Check storage backend health
|
||||
if let Some(_store) = new_object_layer_fn() {
|
||||
details["storage"] = json!({"status": "connected"});
|
||||
} else {
|
||||
health_status = "degraded";
|
||||
details["storage"] = json!({"status": "disconnected"});
|
||||
}
|
||||
|
||||
// Check IAM system health
|
||||
match rustfs_iam::get() {
|
||||
Ok(_) => {
|
||||
details["iam"] = json!({"status": "connected"});
|
||||
}
|
||||
Err(_) => {
|
||||
health_status = "degraded";
|
||||
details["iam"] = json!({"status": "disconnected"});
|
||||
}
|
||||
}
|
||||
|
||||
Json(json!({
|
||||
"status": health_status,
|
||||
"service": "rustfs-console",
|
||||
"timestamp": chrono::Utc::now().to_rfc3339(),
|
||||
"version": env!("CARGO_PKG_VERSION"),
|
||||
"details": details,
|
||||
"uptime": std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap_or_default()
|
||||
.as_secs()
|
||||
}))
|
||||
}
|
||||
|
||||
/// Parse CORS allowed origins from configuration
|
||||
pub fn parse_cors_origins(origins: Option<&String>) -> CorsLayer {
|
||||
let cors_layer = CorsLayer::new()
|
||||
.allow_methods([Method::GET, Method::POST, Method::PUT, Method::DELETE, Method::OPTIONS])
|
||||
.allow_headers(Any);
|
||||
|
||||
match origins {
|
||||
Some(origins_str) if origins_str == "*" => cors_layer.allow_origin(Any).expose_headers(Any),
|
||||
Some(origins_str) => {
|
||||
let origins: Vec<&str> = origins_str.split(',').map(|s| s.trim()).collect();
|
||||
if origins.is_empty() {
|
||||
warn!("Empty CORS origins provided, using permissive CORS");
|
||||
cors_layer.allow_origin(Any).expose_headers(Any)
|
||||
} else {
|
||||
// Parse origins with proper error handling
|
||||
let mut valid_origins = Vec::new();
|
||||
for origin in origins {
|
||||
match origin.parse::<HeaderValue>() {
|
||||
Ok(header_value) => {
|
||||
valid_origins.push(header_value);
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Invalid CORS origin '{}': {}", origin, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if valid_origins.is_empty() {
|
||||
warn!("No valid CORS origins found, using permissive CORS");
|
||||
cors_layer.allow_origin(Any).expose_headers(Any)
|
||||
} else {
|
||||
info!("Console CORS origins configured: {:?}", valid_origins);
|
||||
cors_layer.allow_origin(AllowOrigin::list(valid_origins)).expose_headers(Any)
|
||||
}
|
||||
}
|
||||
}
|
||||
None => {
|
||||
debug!("No CORS origins configured for console, using permissive CORS");
|
||||
cors_layer.allow_origin(Any)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Start the standalone console server with enhanced security and monitoring
|
||||
#[instrument(skip(opt, shutdown_rx))]
|
||||
pub async fn start_console_server(opt: &Opt, shutdown_rx: tokio::sync::broadcast::Receiver<()>) -> Result<()> {
|
||||
if !opt.console_enable {
|
||||
debug!("Console server is disabled");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let console_addr = parse_and_resolve_address(&opt.console_address)?;
|
||||
|
||||
// Get configuration from environment variables
|
||||
let (rate_limit_enable, rate_limit_rpm, auth_timeout, cors_allowed_origins) = get_console_config_from_env();
|
||||
|
||||
// Setup TLS configuration if certificates are available
|
||||
let tls_config = setup_console_tls_config(opt.tls_path.as_ref()).await?;
|
||||
let tls_enabled = tls_config.is_some();
|
||||
|
||||
info!(
|
||||
target: "rustfs::console::startup",
|
||||
address = %console_addr,
|
||||
tls_enabled = tls_enabled,
|
||||
rate_limit_enabled = rate_limit_enable,
|
||||
rate_limit_rpm = rate_limit_rpm,
|
||||
auth_timeout_seconds = auth_timeout,
|
||||
cors_allowed_origins = %cors_allowed_origins,
|
||||
"Starting console server"
|
||||
);
|
||||
|
||||
// String to Option<&String>
|
||||
let cors_allowed_origins = if cors_allowed_origins.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(&cors_allowed_origins)
|
||||
};
|
||||
|
||||
// Configure CORS based on settings
|
||||
let cors_layer = parse_cors_origins(cors_allowed_origins);
|
||||
|
||||
// Build console router with enhanced middleware stack using tower-http features
|
||||
let app = setup_console_middleware_stack(cors_layer, rate_limit_enable, rate_limit_rpm, auth_timeout);
|
||||
|
||||
let local_ip = rustfs_utils::get_local_ip().unwrap_or_else(|| "127.0.0.1".parse().unwrap());
|
||||
let protocol = if tls_enabled { "https" } else { "http" };
|
||||
|
||||
info!(
|
||||
target: "rustfs::console::startup",
|
||||
"Console WebUI available at: {}://{}:{}/rustfs/console/index.html",
|
||||
protocol, local_ip, console_addr.port()
|
||||
);
|
||||
info!(
|
||||
target: "rustfs::console::startup",
|
||||
"Console WebUI (localhost): {}://127.0.0.1:{}/rustfs/console/index.html",
|
||||
protocol, console_addr.port()
|
||||
);
|
||||
|
||||
// Handle connections based on TLS availability using axum-server
|
||||
if let Some(tls_config) = tls_config {
|
||||
handle_tls_connections(console_addr, app, tls_config, shutdown_rx).await
|
||||
} else {
|
||||
handle_plain_connections(console_addr, app, shutdown_rx).await
|
||||
}
|
||||
}
|
||||
|
||||
/// Handle TLS connections for console using axum-server with proper TLS support
|
||||
async fn handle_tls_connections(
|
||||
server_addr: SocketAddr,
|
||||
app: Router,
|
||||
tls_config: RustlsConfig,
|
||||
mut shutdown_rx: tokio::sync::broadcast::Receiver<()>,
|
||||
) -> Result<()> {
|
||||
info!(target: "rustfs::console::tls", "Starting Console HTTPS server on {}", server_addr);
|
||||
|
||||
let handle = axum_server::Handle::new();
|
||||
let handle_clone = handle.clone();
|
||||
|
||||
// Spawn shutdown signal handler
|
||||
tokio::spawn(async move {
|
||||
let _ = shutdown_rx.recv().await;
|
||||
info!(target: "rustfs::console::shutdown", "Console TLS server shutdown signal received");
|
||||
handle_clone.graceful_shutdown(Some(Duration::from_secs(10)));
|
||||
});
|
||||
|
||||
// Start the HTTPS server using axum-server with RustlsConfig
|
||||
if let Err(e) = axum_server::bind_rustls(server_addr, tls_config)
|
||||
.handle(handle)
|
||||
.serve(app.into_make_service())
|
||||
.await
|
||||
{
|
||||
error!(target: "rustfs::console::error", error = %e, "Console TLS server error");
|
||||
return Err(std::io::Error::other(e));
|
||||
}
|
||||
|
||||
info!(target: "rustfs::console::shutdown", "Console TLS server stopped");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Handle plain HTTP connections using axum-server
|
||||
async fn handle_plain_connections(
|
||||
server_addr: SocketAddr,
|
||||
app: Router,
|
||||
mut shutdown_rx: tokio::sync::broadcast::Receiver<()>,
|
||||
) -> Result<()> {
|
||||
info!(target: "rustfs::console::startup", "Starting Console HTTP server on {}", server_addr);
|
||||
|
||||
let handle = axum_server::Handle::new();
|
||||
let handle_clone = handle.clone();
|
||||
|
||||
// Spawn shutdown signal handler
|
||||
tokio::spawn(async move {
|
||||
let _ = shutdown_rx.recv().await;
|
||||
info!(target: "rustfs::console::shutdown", "Console server shutdown signal received");
|
||||
handle_clone.graceful_shutdown(Some(Duration::from_secs(10)));
|
||||
});
|
||||
|
||||
// Start the HTTP server using axum-server
|
||||
if let Err(e) = axum_server::bind(server_addr)
|
||||
.handle(handle)
|
||||
.serve(app.into_make_service())
|
||||
.await
|
||||
{
|
||||
error!(target: "rustfs::console::error", error = %e, "Console server error");
|
||||
return Err(std::io::Error::other(e));
|
||||
}
|
||||
|
||||
info!(target: "rustfs::console::shutdown", "Console server stopped");
|
||||
Ok(())
|
||||
}
|
||||
146
rustfs/src/server/console_test.rs
Normal file
146
rustfs/src/server/console_test.rs
Normal file
@@ -0,0 +1,146 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::config::Opt;
|
||||
use crate::server::console::start_console_server;
|
||||
use clap::Parser;
|
||||
use tokio::time::{Duration, timeout};
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_console_server_can_start_and_stop() {
|
||||
// Test that console server can be started and shut down gracefully
|
||||
let args = vec!["rustfs", "/tmp/test", "--console-address", ":0"]; // Use port 0 for auto-assignment
|
||||
let opt = Opt::parse_from(args);
|
||||
|
||||
let (tx, rx) = tokio::sync::broadcast::channel(1);
|
||||
|
||||
// Start console server in a background task
|
||||
let handle = tokio::spawn(async move { start_console_server(&opt, rx).await });
|
||||
|
||||
// Give it a moment to start
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
|
||||
// Send shutdown signal
|
||||
let _ = tx.send(());
|
||||
|
||||
// Wait for server to shut down
|
||||
let result = timeout(Duration::from_secs(5), handle).await;
|
||||
|
||||
assert!(result.is_ok(), "Console server should shutdown gracefully");
|
||||
let server_result = result.unwrap();
|
||||
assert!(server_result.is_ok(), "Console server should not have errors");
|
||||
let final_result = server_result.unwrap();
|
||||
assert!(final_result.is_ok(), "Console server should complete successfully");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_console_cors_configuration() {
|
||||
// Test CORS configuration parsing
|
||||
use crate::server::console::parse_cors_origins;
|
||||
|
||||
// Test wildcard origin
|
||||
let cors_wildcard = Some("*".to_string());
|
||||
let _layer1 = parse_cors_origins(cors_wildcard.as_ref());
|
||||
// Should create a layer without error
|
||||
|
||||
// Test specific origins
|
||||
let cors_specific = Some("http://localhost:3000,https://admin.example.com".to_string());
|
||||
let _layer2 = parse_cors_origins(cors_specific.as_ref());
|
||||
// Should create a layer without error
|
||||
|
||||
// Test empty origin
|
||||
let cors_empty = Some("".to_string());
|
||||
let _layer3 = parse_cors_origins(cors_empty.as_ref());
|
||||
// Should create a layer without error (falls back to permissive)
|
||||
|
||||
// Test no origin
|
||||
let _layer4 = parse_cors_origins(None);
|
||||
// Should create a layer without error (uses default)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_external_address_configuration() {
|
||||
// Test external address configuration
|
||||
let args = vec![
|
||||
"rustfs",
|
||||
"/tmp/test",
|
||||
"--console-address",
|
||||
":9001",
|
||||
"--external-address",
|
||||
":9020",
|
||||
];
|
||||
let opt = Opt::parse_from(args);
|
||||
|
||||
assert_eq!(opt.console_address, ":9001");
|
||||
assert_eq!(opt.external_address, ":9020".to_string());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_console_tls_configuration() {
|
||||
// Test TLS configuration options (now uses shared tls_path)
|
||||
let args = vec!["rustfs", "/tmp/test", "--tls-path", "/path/to/tls"];
|
||||
let opt = Opt::parse_from(args);
|
||||
|
||||
assert_eq!(opt.tls_path, Some("/path/to/tls".to_string()));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_console_health_check_endpoint() {
|
||||
// Test that console health check can be called
|
||||
// This test would need a running server to be comprehensive
|
||||
// For now, we test configuration and startup behavior
|
||||
let args = vec!["rustfs", "/tmp/test", "--console-address", ":0"];
|
||||
let opt = Opt::parse_from(args);
|
||||
|
||||
// Verify the configuration supports health checks
|
||||
assert!(opt.console_enable, "Console should be enabled for health checks");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_console_separate_logging_target() {
|
||||
// Test that console uses separate logging targets
|
||||
use tracing::info;
|
||||
|
||||
// This test verifies that logging targets are properly set up
|
||||
info!(target: "rustfs::console::startup", "Test console startup log");
|
||||
info!(target: "rustfs::console::access", "Test console access log");
|
||||
info!(target: "rustfs::console::error", "Test console error log");
|
||||
info!(target: "rustfs::console::shutdown", "Test console shutdown log");
|
||||
|
||||
// In a real implementation, we would verify these logs are captured separately
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_console_configuration_validation() {
|
||||
// Test configuration validation
|
||||
let args = vec![
|
||||
"rustfs",
|
||||
"/tmp/test",
|
||||
"--console-enable",
|
||||
"true",
|
||||
"--console-address",
|
||||
":9001",
|
||||
"--external-address",
|
||||
":9020",
|
||||
];
|
||||
let opt = Opt::parse_from(args);
|
||||
|
||||
// Verify all console-related configuration is parsed correctly
|
||||
assert!(opt.console_enable);
|
||||
assert_eq!(opt.console_address, ":9001");
|
||||
assert_eq!(opt.external_address, ":9020".to_string());
|
||||
}
|
||||
}
|
||||
@@ -46,19 +46,77 @@ use tokio_rustls::TlsAcceptor;
|
||||
use tonic::{Request, Status, metadata::MetadataValue};
|
||||
use tower::ServiceBuilder;
|
||||
use tower_http::catch_panic::CatchPanicLayer;
|
||||
use tower_http::cors::CorsLayer;
|
||||
use tower_http::cors::{AllowOrigin, Any, CorsLayer};
|
||||
use tower_http::trace::TraceLayer;
|
||||
use tracing::{Span, debug, error, info, instrument, warn};
|
||||
|
||||
const MI_B: usize = 1024 * 1024;
|
||||
|
||||
/// Parse CORS allowed origins from configuration
|
||||
fn parse_cors_origins(origins: Option<&String>) -> CorsLayer {
|
||||
use http::Method;
|
||||
|
||||
let cors_layer = CorsLayer::new()
|
||||
.allow_methods([
|
||||
Method::GET,
|
||||
Method::POST,
|
||||
Method::PUT,
|
||||
Method::DELETE,
|
||||
Method::HEAD,
|
||||
Method::OPTIONS,
|
||||
])
|
||||
.allow_headers(Any);
|
||||
|
||||
match origins {
|
||||
Some(origins_str) if origins_str == "*" => cors_layer.allow_origin(Any).expose_headers(Any),
|
||||
Some(origins_str) => {
|
||||
let origins: Vec<&str> = origins_str.split(',').map(|s| s.trim()).collect();
|
||||
if origins.is_empty() {
|
||||
warn!("Empty CORS origins provided, using permissive CORS");
|
||||
cors_layer.allow_origin(Any).expose_headers(Any)
|
||||
} else {
|
||||
// Parse origins with proper error handling
|
||||
let mut valid_origins = Vec::new();
|
||||
for origin in origins {
|
||||
match origin.parse::<http::HeaderValue>() {
|
||||
Ok(header_value) => {
|
||||
valid_origins.push(header_value);
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Invalid CORS origin '{}': {}", origin, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if valid_origins.is_empty() {
|
||||
warn!("No valid CORS origins found, using permissive CORS");
|
||||
cors_layer.allow_origin(Any).expose_headers(Any)
|
||||
} else {
|
||||
info!("Endpoint CORS origins configured: {:?}", valid_origins);
|
||||
cors_layer.allow_origin(AllowOrigin::list(valid_origins)).expose_headers(Any)
|
||||
}
|
||||
}
|
||||
}
|
||||
None => {
|
||||
debug!("No CORS origins configured for endpoint, using permissive CORS");
|
||||
cors_layer.allow_origin(Any).expose_headers(Any)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_cors_allowed_origins() -> String {
|
||||
std::env::var(rustfs_config::ENV_CORS_ALLOWED_ORIGINS)
|
||||
.unwrap_or_else(|_| rustfs_config::DEFAULT_CORS_ALLOWED_ORIGINS.to_string())
|
||||
.parse::<String>()
|
||||
.unwrap_or(rustfs_config::DEFAULT_CONSOLE_CORS_ALLOWED_ORIGINS.to_string())
|
||||
}
|
||||
|
||||
pub async fn start_http_server(
|
||||
opt: &config::Opt,
|
||||
worker_state_manager: ServiceStateManager,
|
||||
) -> Result<tokio::sync::broadcast::Sender<()>> {
|
||||
let server_addr = parse_and_resolve_address(opt.address.as_str()).map_err(Error::other)?;
|
||||
let server_port = server_addr.port();
|
||||
let server_address = server_addr.to_string();
|
||||
|
||||
// The listening address and port are obtained from the parameters
|
||||
let listener = {
|
||||
@@ -107,12 +165,6 @@ pub async fn start_http_server(
|
||||
let api_endpoints = format!("http://{local_ip}:{server_port}");
|
||||
let localhost_endpoint = format!("http://127.0.0.1:{server_port}");
|
||||
info!(" API: {} {}", api_endpoints, localhost_endpoint);
|
||||
if opt.console_enable {
|
||||
info!(
|
||||
" WebUI: http://{}:{}/rustfs/console/index.html http://127.0.0.1:{}/rustfs/console/index.html http://{}/rustfs/console/index.html",
|
||||
local_ip, server_port, server_port, server_address
|
||||
);
|
||||
}
|
||||
info!(" RootUser: {}", opt.access_key.clone());
|
||||
info!(" RootPass: {}", opt.secret_key.clone());
|
||||
if DEFAULT_ACCESS_KEY.eq(&opt.access_key) && DEFAULT_SECRET_KEY.eq(&opt.secret_key) {
|
||||
@@ -134,7 +186,9 @@ pub async fn start_http_server(
|
||||
|
||||
b.set_auth(IAMAuth::new(access_key, secret_key));
|
||||
b.set_access(store.clone());
|
||||
b.set_route(admin::make_admin_route(opt.console_enable)?);
|
||||
// When console runs on separate port, disable console routes on main endpoint
|
||||
let console_on_endpoint = opt.console_enable; // Console will run separately
|
||||
b.set_route(admin::make_admin_route(console_on_endpoint)?);
|
||||
|
||||
if !opt.server_domains.is_empty() {
|
||||
MultiDomain::new(&opt.server_domains).map_err(Error::other)?; // validate domains
|
||||
@@ -157,6 +211,8 @@ pub async fn start_http_server(
|
||||
b.build()
|
||||
};
|
||||
|
||||
// Server will be created per connection - this ensures isolation
|
||||
|
||||
tokio::spawn(async move {
|
||||
// Record the PID-related metrics of the current process
|
||||
let meter = opentelemetry::global::meter("system");
|
||||
@@ -176,7 +232,17 @@ pub async fn start_http_server(
|
||||
let (shutdown_tx, mut shutdown_rx) = tokio::sync::broadcast::channel(1);
|
||||
let shutdown_tx_clone = shutdown_tx.clone();
|
||||
|
||||
// Capture CORS configuration for the server loop
|
||||
let cors_allowed_origins = get_cors_allowed_origins();
|
||||
let cors_allowed_origins = if cors_allowed_origins.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(cors_allowed_origins)
|
||||
};
|
||||
tokio::spawn(async move {
|
||||
// Create CORS layer inside the server loop closure
|
||||
let cors_layer = parse_cors_origins(cors_allowed_origins.as_ref());
|
||||
|
||||
#[cfg(unix)]
|
||||
let (mut sigterm_inner, mut sigint_inner) = {
|
||||
use tokio::signal::unix::{SignalKind, signal};
|
||||
@@ -263,7 +329,14 @@ pub async fn start_http_server(
|
||||
warn!(?err, "Failed to set set_send_buffer_size");
|
||||
}
|
||||
|
||||
process_connection(socket, tls_acceptor.clone(), http_server.clone(), s3_service.clone(), graceful.clone());
|
||||
process_connection(
|
||||
socket,
|
||||
tls_acceptor.clone(),
|
||||
http_server.clone(),
|
||||
s3_service.clone(),
|
||||
graceful.clone(),
|
||||
cors_layer.clone(),
|
||||
);
|
||||
}
|
||||
|
||||
worker_state_manager.update(ServiceState::Stopping);
|
||||
@@ -370,6 +443,7 @@ fn process_connection(
|
||||
http_server: Arc<ConnBuilder<TokioExecutor>>,
|
||||
s3_service: S3Service,
|
||||
graceful: Arc<GracefulShutdown>,
|
||||
cors_layer: CorsLayer,
|
||||
) {
|
||||
tokio::spawn(async move {
|
||||
// Build services inside each connected task to avoid passing complex service types across tasks,
|
||||
@@ -421,7 +495,7 @@ fn process_connection(
|
||||
debug!("http request failure error: {:?} in {:?}", _error, latency)
|
||||
}),
|
||||
)
|
||||
.layer(CorsLayer::permissive())
|
||||
.layer(cors_layer)
|
||||
.layer(RedirectLayer)
|
||||
.service(service);
|
||||
let hybrid_service = TowerToHyperService::new(hybrid_service);
|
||||
|
||||
@@ -13,11 +13,16 @@
|
||||
// limitations under the License.
|
||||
|
||||
mod audit;
|
||||
pub mod console;
|
||||
mod http;
|
||||
mod hybrid;
|
||||
mod layer;
|
||||
mod service_state;
|
||||
|
||||
#[cfg(test)]
|
||||
mod console_test;
|
||||
|
||||
pub(crate) use console::start_console_server;
|
||||
pub(crate) use http::start_http_server;
|
||||
pub(crate) use service_state::SHUTDOWN_TIMEOUT;
|
||||
pub(crate) use service_state::ServiceState;
|
||||
|
||||
@@ -73,15 +73,15 @@ pub(crate) async fn wait_for_shutdown() -> ShutdownSignal {
|
||||
|
||||
tokio::select! {
|
||||
_ = tokio::signal::ctrl_c() => {
|
||||
info!("Received Ctrl-C signal");
|
||||
info!("RustFS Received Ctrl-C signal");
|
||||
ShutdownSignal::CtrlC
|
||||
}
|
||||
_ = sigint.recv() => {
|
||||
info!("Received SIGINT signal");
|
||||
info!("RustFS Received SIGINT signal");
|
||||
ShutdownSignal::Sigint
|
||||
}
|
||||
_ = sigterm.recv() => {
|
||||
info!("Received SIGTERM signal");
|
||||
info!("RustFS Received SIGTERM signal");
|
||||
ShutdownSignal::Sigterm
|
||||
}
|
||||
}
|
||||
@@ -121,7 +121,7 @@ impl ServiceStateManager {
|
||||
fn notify_systemd(&self, state: &ServiceState) {
|
||||
match state {
|
||||
ServiceState::Starting => {
|
||||
info!("Service is starting...");
|
||||
info!("RustFS Service is starting...");
|
||||
#[cfg(target_os = "linux")]
|
||||
if let Err(e) =
|
||||
libsystemd::daemon::notify(false, &[libsystemd::daemon::NotifyState::Status("Starting...".to_string())])
|
||||
@@ -130,15 +130,15 @@ impl ServiceStateManager {
|
||||
}
|
||||
}
|
||||
ServiceState::Ready => {
|
||||
info!("Service is ready");
|
||||
info!("RustFS Service is ready");
|
||||
notify_systemd("ready");
|
||||
}
|
||||
ServiceState::Stopping => {
|
||||
info!("Service is stopping...");
|
||||
info!("RustFS Service is stopping...");
|
||||
notify_systemd("stopping");
|
||||
}
|
||||
ServiceState::Stopped => {
|
||||
info!("Service has stopped");
|
||||
info!("RustFS Service has stopped");
|
||||
#[cfg(target_os = "linux")]
|
||||
if let Err(e) =
|
||||
libsystemd::daemon::notify(false, &[libsystemd::daemon::NotifyState::Status("Stopped".to_string())])
|
||||
|
||||
@@ -1230,6 +1230,7 @@ impl S3 for FS {
|
||||
let prefix = prefix.unwrap_or_default();
|
||||
let max_keys = match max_keys {
|
||||
Some(v) if v > 0 && v <= 1000 => v,
|
||||
Some(v) if v > 1000 => 1000,
|
||||
None => 1000,
|
||||
_ => return Err(s3_error!(InvalidArgument, "max-keys must be between 1 and 1000")),
|
||||
};
|
||||
|
||||
@@ -45,7 +45,8 @@ export RUSTFS_VOLUMES="./target/volume/test{1...4}"
|
||||
# export RUSTFS_VOLUMES="./target/volume/test"
|
||||
export RUSTFS_ADDRESS=":9000"
|
||||
export RUSTFS_CONSOLE_ENABLE=true
|
||||
# export RUSTFS_CONSOLE_ADDRESS=":9001"
|
||||
export RUSTFS_CONSOLE_ADDRESS=":9001"
|
||||
export RUSTFS_EXTERNAL_ADDRESS=":9020"
|
||||
# export RUSTFS_SERVER_DOMAINS="localhost:9000"
|
||||
# HTTPS certificate directory
|
||||
# export RUSTFS_TLS_PATH="./deploy/certs"
|
||||
@@ -63,6 +64,9 @@ export RUSTFS_OBS_LOCAL_LOGGING_ENABLED=true # Whether to enable local logging
|
||||
export RUSTFS_OBS_LOG_DIRECTORY="$current_dir/deploy/logs" # Log directory
|
||||
export RUSTFS_OBS_LOG_ROTATION_TIME="hour" # Log rotation time unit, can be "second", "minute", "hour", "day"
|
||||
export RUSTFS_OBS_LOG_ROTATION_SIZE_MB=100 # Log rotation size in MB
|
||||
export RUSTFS_OBS_LOG_POOL_CAPA=10240
|
||||
export RUSTFS_OBS_LOG_MESSAGE_CAPA=32768
|
||||
export RUSTFS_OBS_LOG_FLUSH_MS=300
|
||||
|
||||
export RUSTFS_SINKS_FILE_PATH="$current_dir/deploy/logs"
|
||||
export RUSTFS_SINKS_FILE_BUFFER_SIZE=12
|
||||
|
||||
Reference in New Issue
Block a user