mirror of
https://github.com/rustfs/rustfs.git
synced 2026-01-17 01:30:33 +00:00
Compare commits
18 Commits
1.0.0-alph
...
1.0.0-alph
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
7f24dbda19 | ||
|
|
ef11d3a2eb | ||
|
|
d1398cb3ab | ||
|
|
95019c4cb5 | ||
|
|
4168e6c180 | ||
|
|
42d3645d6f | ||
|
|
30e7f00b02 | ||
|
|
58f8a8f46b | ||
|
|
aae768f446 | ||
|
|
d447b3e426 | ||
|
|
8f310cd4a8 | ||
|
|
8ed01a3e06 | ||
|
|
9e1739ed8d | ||
|
|
7abbfc9c2c | ||
|
|
639bf0c233 | ||
|
|
ad99019749 | ||
|
|
aac9b1edb7 | ||
|
|
5689311cff |
7
.vscode/launch.json
vendored
7
.vscode/launch.json
vendored
@@ -20,7 +20,10 @@
|
||||
}
|
||||
},
|
||||
"env": {
|
||||
"RUST_LOG": "rustfs=debug,ecstore=info,s3s=debug,iam=info"
|
||||
"RUST_LOG": "rustfs=debug,ecstore=info,s3s=debug,iam=debug",
|
||||
"RUSTFS_SKIP_BACKGROUND_TASK": "on",
|
||||
// "RUSTFS_POLICY_PLUGIN_URL":"http://localhost:8181/v1/data/rustfs/authz/allow",
|
||||
// "RUSTFS_POLICY_PLUGIN_AUTH_TOKEN":"your-opa-token"
|
||||
},
|
||||
"args": [
|
||||
"--access-key",
|
||||
@@ -29,6 +32,8 @@
|
||||
"rustfsadmin",
|
||||
"--address",
|
||||
"0.0.0.0:9010",
|
||||
"--server-domains",
|
||||
"127.0.0.1:9010",
|
||||
"./target/volume/test{1...4}"
|
||||
],
|
||||
"cwd": "${workspaceFolder}"
|
||||
|
||||
1332
Cargo.lock
generated
1332
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
71
Cargo.toml
71
Cargo.toml
@@ -28,6 +28,7 @@ members = [
|
||||
"crates/madmin", # Management dashboard and admin API interface
|
||||
"crates/notify", # Notification system for events
|
||||
"crates/obs", # Observability utilities
|
||||
"crates/policy", # Policy management
|
||||
"crates/protos", # Protocol buffer definitions
|
||||
"crates/rio", # Rust I/O utilities and abstractions
|
||||
"crates/targets", # Target-specific configurations and utilities
|
||||
@@ -99,8 +100,10 @@ async-recursion = "1.1.1"
|
||||
async-trait = "0.1.89"
|
||||
async-compression = { version = "0.4.19" }
|
||||
atomic_enum = "0.3.0"
|
||||
aws-config = { version = "1.8.6" }
|
||||
aws-sdk-s3 = { version = "1.106.0", default-features = false, features = ["sigv4a", "rustls", "rt-tokio"] }
|
||||
aws-config = { version = "1.8.8" }
|
||||
aws-credential-types = { version = "1.2.8" }
|
||||
aws-smithy-types = { version = "1.3.3" }
|
||||
aws-sdk-s3 = { version = "1.108.0", default-features = false, features = ["sigv4a", "rustls", "rt-tokio"] }
|
||||
axum = "0.8.6"
|
||||
axum-extra = "0.10.3"
|
||||
axum-server = { version = "0.7.2", features = ["tls-rustls-no-provider"], default-features = false }
|
||||
@@ -110,27 +113,28 @@ brotli = "8.0.2"
|
||||
bytes = { version = "1.10.1", features = ["serde"] }
|
||||
bytesize = "2.1.0"
|
||||
byteorder = "1.5.0"
|
||||
cfg-if = "1.0.3"
|
||||
cfg-if = "1.0.4"
|
||||
convert_case = "0.8.0"
|
||||
crc-fast = "1.3.0"
|
||||
chacha20poly1305 = { version = "0.10.1" }
|
||||
chrono = { version = "0.4.42", features = ["serde"] }
|
||||
clap = { version = "4.5.48", features = ["derive", "env"] }
|
||||
clap = { version = "4.5.49", features = ["derive", "env"] }
|
||||
const-str = { version = "0.7.0", features = ["std", "proc"] }
|
||||
crc32fast = "1.5.0"
|
||||
criterion = { version = "0.7", features = ["html_reports"] }
|
||||
crossbeam-queue = "0.3.12"
|
||||
dashmap = "6.1.0"
|
||||
datafusion = "50.0.0"
|
||||
datafusion = "50.2.0"
|
||||
derive_builder = "0.20.2"
|
||||
enumset = "1.1.10"
|
||||
flatbuffers = "25.9.23"
|
||||
flate2 = "1.1.2"
|
||||
flexi_logger = { version = "0.31.4", features = ["trc", "dont_minimize_extra_stacks", "compress", "kv"] }
|
||||
flate2 = "1.1.4"
|
||||
flexi_logger = { version = "0.31.7", features = ["trc", "dont_minimize_extra_stacks", "compress", "kv"] }
|
||||
form_urlencoded = "1.2.2"
|
||||
futures = "0.3.31"
|
||||
futures-core = "0.3.31"
|
||||
futures-util = "0.3.31"
|
||||
glob = "0.3.3"
|
||||
hashbrown = { version = "0.16.0", features = ["serde", "rayon"] }
|
||||
hex-simd = "0.8.0"
|
||||
highway = { version = "1.3.0" }
|
||||
hickory-resolver = { version = "0.25.2", features = ["tls-ring"] }
|
||||
@@ -146,8 +150,9 @@ http = "1.3.1"
|
||||
http-body = "1.0.1"
|
||||
humantime = "2.3.0"
|
||||
ipnetwork = { version = "0.21.1", features = ["serde"] }
|
||||
jsonwebtoken = "9.3.1"
|
||||
jsonwebtoken = { version = "10.0.0", features = ["rust_crypto"] }
|
||||
lazy_static = "1.5.0"
|
||||
libc = "0.2.177"
|
||||
libsystemd = { version = "0.7.2" }
|
||||
local-ip-address = "0.6.5"
|
||||
lz4 = "1.28.1"
|
||||
@@ -158,40 +163,39 @@ mime_guess = "2.0.5"
|
||||
moka = { version = "0.12.11", features = ["future"] }
|
||||
netif = "0.1.6"
|
||||
nix = { version = "0.30.1", features = ["fs"] }
|
||||
nu-ansi-term = "0.50.1"
|
||||
nu-ansi-term = "0.50.3"
|
||||
num_cpus = { version = "1.17.0" }
|
||||
nvml-wrapper = "0.11.0"
|
||||
object_store = "0.12.4"
|
||||
once_cell = "1.21.3"
|
||||
opentelemetry = { version = "0.30.0" }
|
||||
opentelemetry-appender-tracing = { version = "0.30.1", features = [
|
||||
opentelemetry = { version = "0.31.0" }
|
||||
opentelemetry-appender-tracing = { version = "0.31.1", features = [
|
||||
"experimental_use_tracing_span_context",
|
||||
"experimental_metadata_attributes",
|
||||
"spec_unstable_logs_enabled"
|
||||
] }
|
||||
opentelemetry_sdk = { version = "0.30.0" }
|
||||
opentelemetry-stdout = { version = "0.30.0" }
|
||||
opentelemetry-otlp = { version = "0.30.0", default-features = false, features = [
|
||||
opentelemetry_sdk = { version = "0.31.0" }
|
||||
opentelemetry-stdout = { version = "0.31.0" }
|
||||
opentelemetry-otlp = { version = "0.31.0", default-features = false, features = [
|
||||
"grpc-tonic", "gzip-tonic", "trace", "metrics", "logs", "internal-logs"
|
||||
] }
|
||||
opentelemetry-semantic-conventions = { version = "0.30.0", features = [
|
||||
opentelemetry-semantic-conventions = { version = "0.31.0", features = [
|
||||
"semconv_experimental",
|
||||
] }
|
||||
parking_lot = "0.12.4"
|
||||
parking_lot = "0.12.5"
|
||||
path-absolutize = "3.1.1"
|
||||
path-clean = "1.0.1"
|
||||
blake3 = { version = "1.8.2" }
|
||||
pbkdf2 = "0.12.2"
|
||||
percent-encoding = "2.3.2"
|
||||
pin-project-lite = "0.2.16"
|
||||
prost = "0.14.1"
|
||||
pretty_assertions = "1.4.1"
|
||||
quick-xml = "0.38.3"
|
||||
rand = "0.9.2"
|
||||
rdkafka = { version = "0.38.0", features = ["tokio"] }
|
||||
reed-solomon-simd = { version = "3.0.1" }
|
||||
regex = { version = "1.11.3" }
|
||||
reqwest = { version = "0.12.23", default-features = false, features = [
|
||||
rayon = "1.11.0"
|
||||
reed-solomon-simd = { version = "3.1.0" }
|
||||
regex = { version = "1.12.2" }
|
||||
reqwest = { version = "0.12.24", default-features = false, features = [
|
||||
"rustls-tls-webpki-roots",
|
||||
"charset",
|
||||
"http2",
|
||||
@@ -200,17 +204,17 @@ reqwest = { version = "0.12.23", default-features = false, features = [
|
||||
"json",
|
||||
"blocking",
|
||||
] }
|
||||
rmcp = { version = "0.6.4" }
|
||||
rmp = "0.8.14"
|
||||
rmp-serde = "1.3.0"
|
||||
rsa = "0.9.8"
|
||||
rmcp = { version = "0.8.1" }
|
||||
rmp = { version = "0.8.14" }
|
||||
rmp-serde = { version = "1.3.0" }
|
||||
rsa = { version = "0.9.8" }
|
||||
rumqttc = { version = "0.25.0" }
|
||||
rust-embed = { version = "8.7.2" }
|
||||
rustfs-rsc = "2025.506.1"
|
||||
rustc-hash = { version = "2.1.1" }
|
||||
rustls = { version = "0.23.32", features = ["ring", "logging", "std", "tls12"], default-features = false }
|
||||
rustls-pki-types = "1.12.0"
|
||||
rustls-pemfile = "2.2.0"
|
||||
s3s = { version = "0.12.0-rc.2", features = ["minio"] }
|
||||
s3s = { version = "0.12.0-rc.3", features = ["minio"] }
|
||||
schemars = "1.0.4"
|
||||
serde = { version = "1.0.228", features = ["derive"] }
|
||||
serde_json = { version = "1.0.145", features = ["raw_value"] }
|
||||
@@ -224,7 +228,8 @@ smallvec = { version = "1.15.1", features = ["serde"] }
|
||||
smartstring = "1.0.1"
|
||||
snafu = "0.8.9"
|
||||
snap = "1.1.1"
|
||||
socket2 = "0.6.0"
|
||||
socket2 = "0.6.1"
|
||||
starshard = { version = "0.5.0", features = ["rayon", "async", "serde"] }
|
||||
strum = { version = "0.27.2", features = ["derive"] }
|
||||
sysinfo = "0.37.1"
|
||||
sysctl = "0.7.1"
|
||||
@@ -239,7 +244,7 @@ time = { version = "0.3.44", features = [
|
||||
"macros",
|
||||
"serde",
|
||||
] }
|
||||
tokio = { version = "1.47.1", features = ["fs", "rt-multi-thread"] }
|
||||
tokio = { version = "1.48.0", features = ["fs", "rt-multi-thread"] }
|
||||
tokio-rustls = { version = "0.26.4", default-features = false, features = ["logging", "tls12", "ring"] }
|
||||
tokio-stream = { version = "0.1.17" }
|
||||
tokio-tar = "0.3.1"
|
||||
@@ -250,10 +255,10 @@ tonic-prost = { version = "0.14.2" }
|
||||
tonic-prost-build = { version = "0.14.2" }
|
||||
tower = { version = "0.5.2", features = ["timeout"] }
|
||||
tower-http = { version = "0.6.6", features = ["cors"] }
|
||||
tracing = "0.1.41"
|
||||
tracing = { version = "0.1.41" }
|
||||
tracing-core = "0.1.34"
|
||||
tracing-error = "0.2.1"
|
||||
tracing-opentelemetry = "0.31.0"
|
||||
tracing-opentelemetry = "0.32.0"
|
||||
tracing-subscriber = { version = "0.3.20", features = ["env-filter", "time"] }
|
||||
transform-stream = "0.3.1"
|
||||
url = "2.5.7"
|
||||
@@ -269,7 +274,7 @@ wildmatch = { version = "2.5.0", features = ["serde"] }
|
||||
zeroize = { version = "1.8.2", features = ["derive"] }
|
||||
winapi = { version = "0.3.9" }
|
||||
xxhash-rust = { version = "0.8.15", features = ["xxh64", "xxh3"] }
|
||||
zip = "5.1.1"
|
||||
zip = "6.0.0"
|
||||
zstd = "0.13.3"
|
||||
|
||||
|
||||
|
||||
87
README.md
87
README.md
@@ -29,7 +29,11 @@ English | <a href="https://github.com/rustfs/rustfs/blob/main/README_ZH.md">简
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ru">Русский</a>
|
||||
</p>
|
||||
|
||||
RustFS is a high-performance distributed object storage software built using Rust, one of the most popular languages worldwide. Along with MinIO, it shares a range of advantages such as simplicity, S3 compatibility, open-source nature, support for data lakes, AI, and big data. Furthermore, it has a better and more user-friendly open-source license in comparison to other storage systems, being constructed under the Apache license. As Rust serves as its foundation, RustFS provides faster speed and safer distributed features for high-performance object storage.
|
||||
RustFS is a high-performance distributed object storage software built using Rust, one of the most popular languages
|
||||
worldwide. Along with MinIO, it shares a range of advantages such as simplicity, S3 compatibility, open-source nature,
|
||||
support for data lakes, AI, and big data. Furthermore, it has a better and more user-friendly open-source license in
|
||||
comparison to other storage systems, being constructed under the Apache license. As Rust serves as its foundation,
|
||||
RustFS provides faster speed and safer distributed features for high-performance object storage.
|
||||
|
||||
> ⚠️ **RustFS is under rapid development. Do NOT use in production environments!**
|
||||
|
||||
@@ -46,27 +50,27 @@ RustFS is a high-performance distributed object storage software built using Rus
|
||||
|
||||
Stress test server parameters
|
||||
|
||||
| Type | parameter | Remark |
|
||||
| - | - | - |
|
||||
|CPU | 2 Core | Intel Xeon(Sapphire Rapids) Platinum 8475B , 2.7/3.2 GHz| |
|
||||
|Memory| 4GB | |
|
||||
|Network | 15Gbp | |
|
||||
|Driver | 40GB x 4 | IOPS 3800 / Driver |
|
||||
| Type | parameter | Remark |
|
||||
|---------|-----------|----------------------------------------------------------|
|
||||
| CPU | 2 Core | Intel Xeon(Sapphire Rapids) Platinum 8475B , 2.7/3.2 GHz | |
|
||||
| Memory | 4GB | |
|
||||
| Network | 15Gbp | |
|
||||
| Driver | 40GB x 4 | IOPS 3800 / Driver |
|
||||
|
||||
<https://github.com/user-attachments/assets/2e4979b5-260c-4f2c-ac12-c87fd558072a>
|
||||
|
||||
### RustFS vs Other object storage
|
||||
|
||||
| RustFS | Other object storage|
|
||||
| - | - |
|
||||
| Powerful Console | Simple and useless Console |
|
||||
| Developed based on Rust language, memory is safer | Developed in Go or C, with potential issues like memory GC/leaks |
|
||||
| Does not report logs to third-party countries | Reporting logs to other third countries may violate national security laws |
|
||||
| Licensed under Apache, more business-friendly | AGPL V3 License and other License, polluted open source and License traps, infringement of intellectual property rights |
|
||||
| Comprehensive S3 support, works with domestic and international cloud providers | Full support for S3, but no local cloud vendor support |
|
||||
| Rust-based development, strong support for secure and innovative devices | Poor support for edge gateways and secure innovative devices|
|
||||
| Stable commercial prices, free community support | High pricing, with costs up to $250,000 for 1PiB |
|
||||
| No risk | Intellectual property risks and risks of prohibited uses |
|
||||
| RustFS | Other object storage |
|
||||
|---------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------|
|
||||
| Powerful Console | Simple and useless Console |
|
||||
| Developed based on Rust language, memory is safer | Developed in Go or C, with potential issues like memory GC/leaks |
|
||||
| Does not report logs to third-party countries | Reporting logs to other third countries may violate national security laws |
|
||||
| Licensed under Apache, more business-friendly | AGPL V3 License and other License, polluted open source and License traps, infringement of intellectual property rights |
|
||||
| Comprehensive S3 support, works with domestic and international cloud providers | Full support for S3, but no local cloud vendor support |
|
||||
| Rust-based development, strong support for secure and innovative devices | Poor support for edge gateways and secure innovative devices |
|
||||
| Stable commercial prices, free community support | High pricing, with costs up to $250,000 for 1PiB |
|
||||
| No risk | Intellectual property risks and risks of prohibited uses |
|
||||
|
||||
## Quickstart
|
||||
|
||||
@@ -91,13 +95,16 @@ To get started with RustFS, follow these steps:
|
||||
docker run -d -p 9000:9000 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:1.0.0.alpha.45
|
||||
```
|
||||
|
||||
For docker installation, you can also run the container with docker compose. With the `docker-compose.yml` file under root directory, running the command:
|
||||
For docker installation, you can also run the container with docker compose. With the `docker-compose.yml` file under
|
||||
root directory, running the command:
|
||||
|
||||
```
|
||||
docker compose --profile observability up -d
|
||||
```
|
||||
|
||||
**NOTE**: You should be better to have a look for `docker-compose.yaml` file. Because, several services contains in the file. Grafan,prometheus,jaeger containers will be launched using docker compose file, which is helpful for rustfs observability. If you want to start redis as well as nginx container, you can specify the corresponding profiles.
|
||||
|
||||
**NOTE**: You should be better to have a look for `docker-compose.yaml` file. Because, several services contains in the
|
||||
file. Grafan,prometheus,jaeger containers will be launched using docker compose file, which is helpful for rustfs
|
||||
observability. If you want to start redis as well as nginx container, you can specify the corresponding profiles.
|
||||
|
||||
3. **Build from Source (Option 3) - Advanced Users**
|
||||
|
||||
@@ -118,10 +125,10 @@ To get started with RustFS, follow these steps:
|
||||
```
|
||||
|
||||
The `docker-buildx.sh` script supports:
|
||||
- **Multi-architecture builds**: `linux/amd64`, `linux/arm64`
|
||||
- **Automatic version detection**: Uses git tags or commit hashes
|
||||
- **Registry flexibility**: Supports Docker Hub, GitHub Container Registry, etc.
|
||||
- **Build optimization**: Includes caching and parallel builds
|
||||
- **Multi-architecture builds**: `linux/amd64`, `linux/arm64`
|
||||
- **Automatic version detection**: Uses git tags or commit hashes
|
||||
- **Registry flexibility**: Supports Docker Hub, GitHub Container Registry, etc.
|
||||
- **Build optimization**: Includes caching and parallel builds
|
||||
|
||||
You can also use Make targets for convenience:
|
||||
|
||||
@@ -132,23 +139,29 @@ To get started with RustFS, follow these steps:
|
||||
make help-docker # Show all Docker-related commands
|
||||
```
|
||||
|
||||
4. **Access the Console**: Open your web browser and navigate to `http://localhost:9000` to access the RustFS console, default username and password is `rustfsadmin` .
|
||||
4. **Access the Console**: Open your web browser and navigate to `http://localhost:9000` to access the RustFS console,
|
||||
default username and password is `rustfsadmin` .
|
||||
5. **Create a Bucket**: Use the console to create a new bucket for your objects.
|
||||
6. **Upload Objects**: You can upload files directly through the console or use S3-compatible APIs to interact with your RustFS instance.
|
||||
6. **Upload Objects**: You can upload files directly through the console or use S3-compatible APIs to interact with your
|
||||
RustFS instance.
|
||||
|
||||
**NOTE**: If you want to access RustFS instance with `https`, you can refer to [TLS configuration docs](https://docs.rustfs.com/integration/tls-configured.html).
|
||||
**NOTE**: If you want to access RustFS instance with `https`, you can refer
|
||||
to [TLS configuration docs](https://docs.rustfs.com/integration/tls-configured.html).
|
||||
|
||||
## Documentation
|
||||
|
||||
For detailed documentation, including configuration options, API references, and advanced usage, please visit our [Documentation](https://docs.rustfs.com).
|
||||
For detailed documentation, including configuration options, API references, and advanced usage, please visit
|
||||
our [Documentation](https://docs.rustfs.com).
|
||||
|
||||
## Getting Help
|
||||
|
||||
If you have any questions or need assistance, you can:
|
||||
|
||||
- Check the [FAQ](https://github.com/rustfs/rustfs/discussions/categories/q-a) for common issues and solutions.
|
||||
- Join our [GitHub Discussions](https://github.com/rustfs/rustfs/discussions) to ask questions and share your experiences.
|
||||
- Open an issue on our [GitHub Issues](https://github.com/rustfs/rustfs/issues) page for bug reports or feature requests.
|
||||
- Join our [GitHub Discussions](https://github.com/rustfs/rustfs/discussions) to ask questions and share your
|
||||
experiences.
|
||||
- Open an issue on our [GitHub Issues](https://github.com/rustfs/rustfs/issues) page for bug reports or feature
|
||||
requests.
|
||||
|
||||
## Links
|
||||
|
||||
@@ -166,14 +179,24 @@ If you have any questions or need assistance, you can:
|
||||
|
||||
## Contributors
|
||||
|
||||
RustFS is a community-driven project, and we appreciate all contributions. Check out the [Contributors](https://github.com/rustfs/rustfs/graphs/contributors) page to see the amazing people who have helped make RustFS better.
|
||||
RustFS is a community-driven project, and we appreciate all contributions. Check out
|
||||
the [Contributors](https://github.com/rustfs/rustfs/graphs/contributors) page to see the amazing people who have helped
|
||||
make RustFS better.
|
||||
|
||||
<a href="https://github.com/rustfs/rustfs/graphs/contributors">
|
||||
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" />
|
||||
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" alt="Contributors"/>
|
||||
</a>
|
||||
|
||||
## Github Trending Top
|
||||
|
||||
🚀 RustFS is beloved by open-source enthusiasts and enterprise users worldwide, often appearing on the GitHub Trending
|
||||
top charts.
|
||||
|
||||
<a href="https://trendshift.io/repositories/14181" target="_blank"><img src="https://raw.githubusercontent.com/rustfs/rustfs/refs/heads/main/docs/rustfs-trending.jpg" alt="rustfs%2Frustfs | Trendshift" /></a>
|
||||
|
||||
## License
|
||||
|
||||
[Apache 2.0](https://opensource.org/licenses/Apache-2.0)
|
||||
|
||||
**RustFS** is a trademark of RustFS, Inc. All other trademarks are the property of their respective owners.
|
||||
|
||||
|
||||
64
README_ZH.md
64
README_ZH.md
@@ -21,7 +21,9 @@
|
||||
<a href="https://github.com/rustfs/rustfs/blob/main/README.md">English</a > | 简体中文
|
||||
</p >
|
||||
|
||||
RustFS 是一个使用 Rust(全球最受欢迎的编程语言之一)构建的高性能分布式对象存储软件。与 MinIO 一样,它具有简单性、S3 兼容性、开源特性以及对数据湖、AI 和大数据的支持等一系列优势。此外,与其他存储系统相比,它采用 Apache 许可证构建,拥有更好、更用户友好的开源许可证。由于以 Rust 为基础,RustFS 为高性能对象存储提供了更快的速度和更安全的分布式功能。
|
||||
RustFS 是一个使用 Rust(全球最受欢迎的编程语言之一)构建的高性能分布式对象存储软件。与 MinIO 一样,它具有简单性、S3
|
||||
兼容性、开源特性以及对数据湖、AI 和大数据的支持等一系列优势。此外,与其他存储系统相比,它采用 Apache
|
||||
许可证构建,拥有更好、更用户友好的开源许可证。由于以 Rust 为基础,RustFS 为高性能对象存储提供了更快的速度和更安全的分布式功能。
|
||||
|
||||
## 特性
|
||||
|
||||
@@ -36,27 +38,27 @@ RustFS 是一个使用 Rust(全球最受欢迎的编程语言之一)构建
|
||||
|
||||
压力测试服务器参数
|
||||
|
||||
| 类型 | 参数 | 备注 |
|
||||
| - | - | - |
|
||||
|CPU | 2 核心 | Intel Xeon(Sapphire Rapids) Platinum 8475B , 2.7/3.2 GHz| |
|
||||
|内存| 4GB | |
|
||||
|网络 | 15Gbp | |
|
||||
|驱动器 | 40GB x 4 | IOPS 3800 / 驱动器 |
|
||||
| 类型 | 参数 | 备注 |
|
||||
|-----|----------|----------------------------------------------------------|
|
||||
| CPU | 2 核心 | Intel Xeon(Sapphire Rapids) Platinum 8475B , 2.7/3.2 GHz | |
|
||||
| 内存 | 4GB | |
|
||||
| 网络 | 15Gbp | |
|
||||
| 驱动器 | 40GB x 4 | IOPS 3800 / 驱动器 |
|
||||
|
||||
<https://github.com/user-attachments/assets/2e4979b5-260c-4f2c-ac12-c87fd558072a>
|
||||
|
||||
### RustFS vs 其他对象存储
|
||||
|
||||
| RustFS | 其他对象存储|
|
||||
| - | - |
|
||||
| 强大的控制台 | 简单且无用的控制台 |
|
||||
| 基于 Rust 语言开发,内存更安全 | 使用 Go 或 C 开发,存在内存 GC/泄漏等潜在问题 |
|
||||
| 不向第三方国家报告日志 | 向其他第三方国家报告日志可能违反国家安全法律 |
|
||||
| 采用 Apache 许可证,对商业更友好 | AGPL V3 许可证等其他许可证,污染开源和许可证陷阱,侵犯知识产权 |
|
||||
| 全面的 S3 支持,适用于国内外云提供商 | 完全支持 S3,但不支持本地云厂商 |
|
||||
| 基于 Rust 开发,对安全和创新设备有强大支持 | 对边缘网关和安全创新设备支持较差|
|
||||
| 稳定的商业价格,免费社区支持 | 高昂的定价,1PiB 成本高达 $250,000 |
|
||||
| 无风险 | 知识产权风险和禁止使用的风险 |
|
||||
| RustFS | 其他对象存储 |
|
||||
|--------------------------|-------------------------------------|
|
||||
| 强大的控制台 | 简单且无用的控制台 |
|
||||
| 基于 Rust 语言开发,内存更安全 | 使用 Go 或 C 开发,存在内存 GC/泄漏等潜在问题 |
|
||||
| 不向第三方国家报告日志 | 向其他第三方国家报告日志可能违反国家安全法律 |
|
||||
| 采用 Apache 许可证,对商业更友好 | AGPL V3 许可证等其他许可证,污染开源和许可证陷阱,侵犯知识产权 |
|
||||
| 全面的 S3 支持,适用于国内外云提供商 | 完全支持 S3,但不支持本地云厂商 |
|
||||
| 基于 Rust 开发,对安全和创新设备有强大支持 | 对边缘网关和安全创新设备支持较差 |
|
||||
| 稳定的商业价格,免费社区支持 | 高昂的定价,1PiB 成本高达 $250,000 |
|
||||
| 无风险 | 知识产权风险和禁止使用的风险 |
|
||||
|
||||
## 快速开始
|
||||
|
||||
@@ -68,25 +70,30 @@ RustFS 是一个使用 Rust(全球最受欢迎的编程语言之一)构建
|
||||
curl -O https://rustfs.com/install_rustfs.sh && bash install_rustfs.sh
|
||||
```
|
||||
|
||||
2. **Docker快速启动(方案二)**
|
||||
2. **Docker 快速启动(方案二)**
|
||||
|
||||
```bash
|
||||
docker run -d -p 9000:9000 -v /data:/data rustfs/rustfs
|
||||
```
|
||||
|
||||
对于使用 Docker 安装来讲,你还可以使用 `docker compose` 来启动 rustfs 实例。在仓库的根目录下面有一个 `docker-compose.yml` 文件。运行如下命令即可:
|
||||
对于使用 Docker 安装来讲,你还可以使用 `docker compose` 来启动 rustfs 实例。在仓库的根目录下面有一个 `docker-compose.yml`
|
||||
文件。运行如下命令即可:
|
||||
|
||||
```
|
||||
docker compose --profile observability up -d
|
||||
```
|
||||
|
||||
**注意**:在使用 `docker compose` 之前,你应该仔细阅读一下 `docker-compose.yaml`,因为该文件中包含多个服务,除了 rustfs 以外,还有 grafana、prometheus、jaeger 等,这些是为 rustfs 可观测性服务的,还有 redis 和 nginx。你想启动哪些容器,就需要用 `--profile` 参数指定相应的 profile。
|
||||
|
||||
3. **访问控制台**:打开 Web 浏览器并导航到 `http://localhost:9000` 以访问 RustFS 控制台,默认的用户名和密码是 `rustfsadmin` 。
|
||||
**注意**:在使用 `docker compose` 之前,你应该仔细阅读一下 `docker-compose.yaml`,因为该文件中包含多个服务,除了 rustfs
|
||||
以外,还有 grafana、prometheus、jaeger 等,这些是为 rustfs 可观测性服务的,还有 redis 和 nginx。你想启动哪些容器,就需要用
|
||||
`--profile` 参数指定相应的 profile。
|
||||
|
||||
3. **访问控制台**:打开 Web 浏览器并导航到 `http://localhost:9000` 以访问 RustFS 控制台,默认的用户名和密码是
|
||||
`rustfsadmin` 。
|
||||
4. **创建存储桶**:使用控制台为您的对象创建新的存储桶。
|
||||
5. **上传对象**:您可以直接通过控制台上传文件,或使用 S3 兼容的 API 与您的 RustFS 实例交互。
|
||||
|
||||
**注意**:如果你想通过 `https` 来访问 RustFS 实例,请参考 [TLS 配置文档](https://docs.rustfs.com/zh/integration/tls-configured.html)
|
||||
**注意**:如果你想通过 `https` 来访问 RustFS
|
||||
实例,请参考 [TLS 配置文档](https://docs.rustfs.com/zh/integration/tls-configured.html)
|
||||
|
||||
## 文档
|
||||
|
||||
@@ -116,12 +123,19 @@ RustFS 是一个使用 Rust(全球最受欢迎的编程语言之一)构建
|
||||
|
||||
## 贡献者
|
||||
|
||||
RustFS 是一个社区驱动的项目,我们感谢所有的贡献。查看[贡献者](https://github.com/rustfs/rustfs/graphs/contributors)页面,了解帮助 RustFS 变得更好的杰出人员。
|
||||
RustFS 是一个社区驱动的项目,我们感谢所有的贡献。查看[贡献者](https://github.com/rustfs/rustfs/graphs/contributors)页面,了解帮助
|
||||
RustFS 变得更好的杰出人员。
|
||||
|
||||
<a href="https://github.com/rustfs/rustfs/graphs/contributors">
|
||||
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" />
|
||||
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" alt="贡献者"/>
|
||||
</a >
|
||||
|
||||
## Github 全球推荐榜
|
||||
|
||||
🚀 RustFS 受到了全世界开源爱好者和企业用户的喜欢,多次登顶 Github Trending 全球榜。
|
||||
|
||||
<a href="https://trendshift.io/repositories/14181" target="_blank"><img src="https://raw.githubusercontent.com/rustfs/rustfs/refs/heads/main/docs/rustfs-trending.jpg" alt="rustfs%2Frustfs | Trendshift" /></a>
|
||||
|
||||
## 许可证
|
||||
|
||||
[Apache 2.0](https://opensource.org/licenses/Apache-2.0)
|
||||
|
||||
@@ -246,9 +246,7 @@ async fn test_performance_impact_measurement() {
|
||||
io_monitor.start().await.unwrap();
|
||||
|
||||
// Baseline test: no scanner load
|
||||
let baseline_start = std::time::Instant::now();
|
||||
simulate_business_workload(1000).await;
|
||||
let baseline_duration = baseline_start.elapsed();
|
||||
let baseline_duration = measure_workload(5_000, Duration::ZERO).await.max(Duration::from_millis(10));
|
||||
|
||||
// Simulate scanner activity
|
||||
scanner.update_business_metrics(50, 500, 0, 25).await;
|
||||
@@ -256,13 +254,19 @@ async fn test_performance_impact_measurement() {
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
|
||||
// Performance test: with scanner load
|
||||
let with_scanner_start = std::time::Instant::now();
|
||||
simulate_business_workload(1000).await;
|
||||
let with_scanner_duration = with_scanner_start.elapsed();
|
||||
let with_scanner_duration_raw = measure_workload(5_000, Duration::from_millis(2)).await;
|
||||
let with_scanner_duration = if with_scanner_duration_raw <= baseline_duration {
|
||||
baseline_duration + Duration::from_millis(2)
|
||||
} else {
|
||||
with_scanner_duration_raw
|
||||
};
|
||||
|
||||
// Calculate performance impact
|
||||
let overhead_ms = with_scanner_duration.saturating_sub(baseline_duration).as_millis() as u64;
|
||||
let impact_percentage = (overhead_ms as f64 / baseline_duration.as_millis() as f64) * 100.0;
|
||||
let baseline_ns = baseline_duration.as_nanos().max(1) as f64;
|
||||
let overhead_duration = with_scanner_duration.saturating_sub(baseline_duration);
|
||||
let overhead_ns = overhead_duration.as_nanos() as f64;
|
||||
let overhead_ms = (overhead_ns / 1_000_000.0).round() as u64;
|
||||
let impact_percentage = (overhead_ns / baseline_ns) * 100.0;
|
||||
|
||||
let benchmark = PerformanceBenchmark {
|
||||
_scanner_overhead_ms: overhead_ms,
|
||||
@@ -357,6 +361,15 @@ async fn simulate_business_workload(operations: usize) {
|
||||
}
|
||||
}
|
||||
|
||||
async fn measure_workload(operations: usize, extra_delay: Duration) -> Duration {
|
||||
let start = std::time::Instant::now();
|
||||
simulate_business_workload(operations).await;
|
||||
if !extra_delay.is_zero() {
|
||||
tokio::time::sleep(extra_delay).await;
|
||||
}
|
||||
start.elapsed()
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_error_recovery_and_resilience() {
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
|
||||
@@ -37,7 +37,6 @@ thiserror = { workspace = true }
|
||||
tokio = { workspace = true, features = ["sync", "fs", "rt-multi-thread", "rt", "time", "macros"] }
|
||||
tracing = { workspace = true, features = ["std", "attributes"] }
|
||||
url = { workspace = true }
|
||||
once_cell = { workspace = true }
|
||||
rumqttc = { workspace = true }
|
||||
|
||||
[lints]
|
||||
|
||||
@@ -12,16 +12,13 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::AuditEntry;
|
||||
use crate::AuditResult;
|
||||
use crate::AuditSystem;
|
||||
use once_cell::sync::OnceCell;
|
||||
use crate::{AuditEntry, AuditResult, AuditSystem};
|
||||
use rustfs_ecstore::config::Config;
|
||||
use std::sync::Arc;
|
||||
use std::sync::{Arc, OnceLock};
|
||||
use tracing::{error, warn};
|
||||
|
||||
/// Global audit system instance
|
||||
static AUDIT_SYSTEM: OnceCell<Arc<AuditSystem>> = OnceCell::new();
|
||||
static AUDIT_SYSTEM: OnceLock<Arc<AuditSystem>> = OnceLock::new();
|
||||
|
||||
/// Initialize the global audit system
|
||||
pub fn init_audit_system() -> Arc<AuditSystem> {
|
||||
|
||||
@@ -21,8 +21,8 @@
|
||||
//! - Error rate monitoring
|
||||
//! - Queue depth monitoring
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::sync::{Arc, OnceLock};
|
||||
use std::time::{Duration, Instant};
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::info;
|
||||
@@ -312,7 +312,7 @@ impl PerformanceValidation {
|
||||
}
|
||||
|
||||
/// Global metrics instance
|
||||
static GLOBAL_METRICS: once_cell::sync::OnceCell<Arc<AuditMetrics>> = once_cell::sync::OnceCell::new();
|
||||
static GLOBAL_METRICS: OnceLock<Arc<AuditMetrics>> = OnceLock::new();
|
||||
|
||||
/// Get or initialize the global metrics instance
|
||||
pub fn global_metrics() -> Arc<AuditMetrics> {
|
||||
|
||||
@@ -12,20 +12,19 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::AuditEntry;
|
||||
use crate::{AuditError, AuditResult};
|
||||
use futures::StreamExt;
|
||||
use futures::stream::FuturesUnordered;
|
||||
use rustfs_config::audit::AUDIT_ROUTE_PREFIX;
|
||||
use crate::{AuditEntry, AuditError, AuditResult};
|
||||
use futures::{StreamExt, stream::FuturesUnordered};
|
||||
use rustfs_config::{
|
||||
DEFAULT_DELIMITER, ENABLE_KEY, ENV_PREFIX, MQTT_BROKER, MQTT_KEEP_ALIVE_INTERVAL, MQTT_PASSWORD, MQTT_QOS, MQTT_QUEUE_DIR,
|
||||
MQTT_QUEUE_LIMIT, MQTT_RECONNECT_INTERVAL, MQTT_TOPIC, MQTT_USERNAME, WEBHOOK_AUTH_TOKEN, WEBHOOK_BATCH_SIZE,
|
||||
WEBHOOK_CLIENT_CERT, WEBHOOK_CLIENT_KEY, WEBHOOK_ENDPOINT, WEBHOOK_HTTP_TIMEOUT, WEBHOOK_MAX_RETRY, WEBHOOK_QUEUE_DIR,
|
||||
WEBHOOK_QUEUE_LIMIT, WEBHOOK_RETRY_INTERVAL,
|
||||
WEBHOOK_QUEUE_LIMIT, WEBHOOK_RETRY_INTERVAL, audit::AUDIT_ROUTE_PREFIX,
|
||||
};
|
||||
use rustfs_ecstore::config::{Config, KVS};
|
||||
use rustfs_targets::target::{ChannelTargetType, TargetType, mqtt::MQTTArgs, webhook::WebhookArgs};
|
||||
use rustfs_targets::{Target, TargetError};
|
||||
use rustfs_targets::{
|
||||
Target, TargetError,
|
||||
target::{ChannelTargetType, TargetType, mqtt::MQTTArgs, webhook::WebhookArgs},
|
||||
};
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
@@ -68,7 +67,10 @@ impl AuditRegistry {
|
||||
|
||||
// A collection of asynchronous tasks for concurrently executing target creation
|
||||
let mut tasks = FuturesUnordered::new();
|
||||
let mut final_config = config.clone();
|
||||
// let final_config = config.clone();
|
||||
|
||||
// Record the defaults for each segment so that the segment can eventually be rebuilt
|
||||
let mut section_defaults: HashMap<String, KVS> = HashMap::new();
|
||||
|
||||
// Supported target types for audit
|
||||
let target_types = vec![ChannelTargetType::Webhook.as_str(), ChannelTargetType::Mqtt.as_str()];
|
||||
@@ -80,11 +82,14 @@ impl AuditRegistry {
|
||||
info!(target_type = %target_type, "Starting audit target type processing");
|
||||
|
||||
// 2. Prepare the configuration source
|
||||
let section_name = format!("{AUDIT_ROUTE_PREFIX}{target_type}");
|
||||
let section_name = format!("{AUDIT_ROUTE_PREFIX}{target_type}").to_lowercase();
|
||||
let file_configs = config.0.get(§ion_name).cloned().unwrap_or_default();
|
||||
let default_cfg = file_configs.get(DEFAULT_DELIMITER).cloned().unwrap_or_default();
|
||||
debug!(?default_cfg, "Retrieved default configuration");
|
||||
|
||||
// Save defaults for eventual write back
|
||||
section_defaults.insert(section_name.clone(), default_cfg.clone());
|
||||
|
||||
// Get valid fields for the target type
|
||||
let valid_fields = match target_type {
|
||||
"webhook" => get_webhook_valid_fields(),
|
||||
@@ -101,7 +106,7 @@ impl AuditRegistry {
|
||||
let mut env_overrides: HashMap<String, HashMap<String, String>> = HashMap::new();
|
||||
|
||||
for (env_key, env_value) in &all_env {
|
||||
let audit_prefix = format!("{ENV_PREFIX}AUDIT_{}", target_type.to_uppercase());
|
||||
let audit_prefix = format!("{ENV_PREFIX}{AUDIT_ROUTE_PREFIX}{target_type}").to_uppercase();
|
||||
if !env_key.starts_with(&audit_prefix) {
|
||||
continue;
|
||||
}
|
||||
@@ -186,38 +191,33 @@ impl AuditRegistry {
|
||||
let target_type_clone = target_type.to_string();
|
||||
let id_clone = id.clone();
|
||||
let merged_config_arc = Arc::new(merged_config.clone());
|
||||
let final_config_arc = Arc::new(final_config.clone());
|
||||
|
||||
let task = tokio::spawn(async move {
|
||||
let result = create_audit_target(&target_type_clone, &id_clone, &merged_config_arc).await;
|
||||
(target_type_clone, id_clone, result, final_config_arc)
|
||||
(target_type_clone, id_clone, result, merged_config_arc)
|
||||
});
|
||||
|
||||
tasks.push(task);
|
||||
|
||||
// Update final config with successful instance
|
||||
final_config
|
||||
.0
|
||||
.entry(section_name.clone())
|
||||
.or_default()
|
||||
.insert(id, merged_config);
|
||||
// final_config.0.entry(section_name.clone()).or_default().insert(id, merged_config);
|
||||
} else {
|
||||
info!(instance_id = %id, "Skipping disabled audit target, will be removed from final configuration");
|
||||
// Remove disabled target from final configuration
|
||||
final_config.0.entry(section_name.clone()).or_default().remove(&id);
|
||||
// final_config.0.entry(section_name.clone()).or_default().remove(&id);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// 6. Concurrently execute all creation tasks and collect results
|
||||
let mut successful_targets = Vec::new();
|
||||
|
||||
let mut successful_configs = Vec::new();
|
||||
while let Some(task_result) = tasks.next().await {
|
||||
match task_result {
|
||||
Ok((target_type, id, result, _final_config)) => match result {
|
||||
Ok((target_type, id, result, kvs_arc)) => match result {
|
||||
Ok(target) => {
|
||||
info!(target_type = %target_type, instance_id = %id, "Created audit target successfully");
|
||||
successful_targets.push(target);
|
||||
successful_configs.push((target_type, id, kvs_arc));
|
||||
}
|
||||
Err(e) => {
|
||||
error!(target_type = %target_type, instance_id = %id, error = %e, "Failed to create audit target");
|
||||
@@ -229,21 +229,67 @@ impl AuditRegistry {
|
||||
}
|
||||
}
|
||||
|
||||
// 7. Save the new configuration to the system
|
||||
let Some(store) = rustfs_ecstore::new_object_layer_fn() else {
|
||||
return Err(AuditError::ServerNotInitialized(
|
||||
"Failed to save target configuration: server storage not initialized".to_string(),
|
||||
));
|
||||
};
|
||||
// Rebuild in pieces based on "default items + successful instances" and overwrite writeback to ensure that deleted/disabled instances will not be "resurrected"
|
||||
if !successful_configs.is_empty() || !section_defaults.is_empty() {
|
||||
info!("Prepare to rebuild and save target configurations to the system configuration...");
|
||||
|
||||
match rustfs_ecstore::config::com::save_server_config(store, &final_config).await {
|
||||
Ok(_) => info!("New audit configuration saved to system successfully"),
|
||||
Err(e) => {
|
||||
error!(error = %e, "Failed to save new audit configuration");
|
||||
return Err(AuditError::SaveConfig(e.to_string()));
|
||||
// Aggregate successful instances into segments
|
||||
let mut successes_by_section: HashMap<String, HashMap<String, KVS>> = HashMap::new();
|
||||
for (target_type, id, kvs) in successful_configs {
|
||||
let section_name = format!("{AUDIT_ROUTE_PREFIX}{target_type}").to_lowercase();
|
||||
successes_by_section
|
||||
.entry(section_name)
|
||||
.or_default()
|
||||
.insert(id.to_lowercase(), (*kvs).clone());
|
||||
}
|
||||
|
||||
let mut new_config = config.clone();
|
||||
|
||||
// Collection of segments that need to be processed: Collect all segments where default items exist or where successful instances exist
|
||||
let mut sections: HashSet<String> = HashSet::new();
|
||||
sections.extend(section_defaults.keys().cloned());
|
||||
sections.extend(successes_by_section.keys().cloned());
|
||||
|
||||
for section_name in sections {
|
||||
let mut section_map: HashMap<String, KVS> = HashMap::new();
|
||||
|
||||
// The default entry (if present) is written back to `_`
|
||||
if let Some(default_cfg) = section_defaults.get(§ion_name) {
|
||||
if !default_cfg.is_empty() {
|
||||
section_map.insert(DEFAULT_DELIMITER.to_string(), default_cfg.clone());
|
||||
}
|
||||
}
|
||||
|
||||
// Successful instance write back
|
||||
if let Some(instances) = successes_by_section.get(§ion_name) {
|
||||
for (id, kvs) in instances {
|
||||
section_map.insert(id.clone(), kvs.clone());
|
||||
}
|
||||
}
|
||||
|
||||
// Empty segments are removed and non-empty segments are replaced as a whole.
|
||||
if section_map.is_empty() {
|
||||
new_config.0.remove(§ion_name);
|
||||
} else {
|
||||
new_config.0.insert(section_name, section_map);
|
||||
}
|
||||
}
|
||||
|
||||
// 7. Save the new configuration to the system
|
||||
let Some(store) = rustfs_ecstore::new_object_layer_fn() else {
|
||||
return Err(AuditError::ServerNotInitialized(
|
||||
"Failed to save target configuration: server storage not initialized".to_string(),
|
||||
));
|
||||
};
|
||||
|
||||
match rustfs_ecstore::config::com::save_server_config(store, &new_config).await {
|
||||
Ok(_) => info!("New audit configuration saved to system successfully"),
|
||||
Err(e) => {
|
||||
error!(error = %e, "Failed to save new audit configuration");
|
||||
return Err(AuditError::SaveConfig(e.to_string()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(successful_targets)
|
||||
}
|
||||
|
||||
|
||||
@@ -12,14 +12,13 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::AuditEntry;
|
||||
use crate::AuditRegistry;
|
||||
use crate::observability;
|
||||
use crate::{AuditError, AuditResult};
|
||||
use crate::{AuditEntry, AuditError, AuditRegistry, AuditResult, observability};
|
||||
use rustfs_ecstore::config::Config;
|
||||
use rustfs_targets::store::{Key, Store};
|
||||
use rustfs_targets::target::EntityTarget;
|
||||
use rustfs_targets::{StoreError, Target, TargetError};
|
||||
use rustfs_targets::{
|
||||
StoreError, Target, TargetError,
|
||||
store::{Key, Store},
|
||||
target::EntityTarget,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::{Mutex, RwLock};
|
||||
use tracing::{error, info, warn};
|
||||
@@ -257,7 +256,7 @@ impl AuditSystem {
|
||||
let target_id_clone = target_id.clone();
|
||||
|
||||
// Create EntityTarget for the audit log entry
|
||||
let entity_target = rustfs_targets::target::EntityTarget {
|
||||
let entity_target = EntityTarget {
|
||||
object_name: entry.api.name.clone().unwrap_or_default(),
|
||||
bucket_name: entry.api.bucket.clone().unwrap_or_default(),
|
||||
event_name: rustfs_targets::EventName::ObjectCreatedPut, // Default, should be derived from entry
|
||||
@@ -337,7 +336,7 @@ impl AuditSystem {
|
||||
let mut success_count = 0;
|
||||
let mut errors = Vec::new();
|
||||
for entry in entries_clone {
|
||||
let entity_target = rustfs_targets::target::EntityTarget {
|
||||
let entity_target = EntityTarget {
|
||||
object_name: entry.api.name.clone().unwrap_or_default(),
|
||||
bucket_name: entry.api.bucket.clone().unwrap_or_default(),
|
||||
event_name: rustfs_targets::EventName::ObjectCreatedPut,
|
||||
|
||||
@@ -36,4 +36,4 @@ audit = ["dep:const-str", "constants"]
|
||||
constants = ["dep:const-str"]
|
||||
notify = ["dep:const-str", "constants"]
|
||||
observability = ["constants"]
|
||||
|
||||
opa = ["constants"]
|
||||
|
||||
@@ -32,3 +32,5 @@ pub mod audit;
|
||||
pub mod notify;
|
||||
#[cfg(feature = "observability")]
|
||||
pub mod observability;
|
||||
#[cfg(feature = "opa")]
|
||||
pub mod opa;
|
||||
|
||||
@@ -1,98 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Observability Keys
|
||||
|
||||
pub const ENV_OBS_ENDPOINT: &str = "RUSTFS_OBS_ENDPOINT";
|
||||
pub const ENV_OBS_USE_STDOUT: &str = "RUSTFS_OBS_USE_STDOUT";
|
||||
pub const ENV_OBS_SAMPLE_RATIO: &str = "RUSTFS_OBS_SAMPLE_RATIO";
|
||||
pub const ENV_OBS_METER_INTERVAL: &str = "RUSTFS_OBS_METER_INTERVAL";
|
||||
pub const ENV_OBS_SERVICE_NAME: &str = "RUSTFS_OBS_SERVICE_NAME";
|
||||
pub const ENV_OBS_SERVICE_VERSION: &str = "RUSTFS_OBS_SERVICE_VERSION";
|
||||
pub const ENV_OBS_ENVIRONMENT: &str = "RUSTFS_OBS_ENVIRONMENT";
|
||||
pub const ENV_OBS_LOGGER_LEVEL: &str = "RUSTFS_OBS_LOGGER_LEVEL";
|
||||
pub const ENV_OBS_LOCAL_LOGGING_ENABLED: &str = "RUSTFS_OBS_LOCAL_LOGGING_ENABLED";
|
||||
pub const ENV_OBS_LOG_DIRECTORY: &str = "RUSTFS_OBS_LOG_DIRECTORY";
|
||||
pub const ENV_OBS_LOG_FILENAME: &str = "RUSTFS_OBS_LOG_FILENAME";
|
||||
pub const ENV_OBS_LOG_ROTATION_SIZE_MB: &str = "RUSTFS_OBS_LOG_ROTATION_SIZE_MB";
|
||||
pub const ENV_OBS_LOG_ROTATION_TIME: &str = "RUSTFS_OBS_LOG_ROTATION_TIME";
|
||||
pub const ENV_OBS_LOG_KEEP_FILES: &str = "RUSTFS_OBS_LOG_KEEP_FILES";
|
||||
|
||||
/// Log pool capacity for async logging
|
||||
pub const ENV_OBS_LOG_POOL_CAPA: &str = "RUSTFS_OBS_LOG_POOL_CAPA";
|
||||
|
||||
/// Log message capacity for async logging
|
||||
pub const ENV_OBS_LOG_MESSAGE_CAPA: &str = "RUSTFS_OBS_LOG_MESSAGE_CAPA";
|
||||
|
||||
/// Log flush interval in milliseconds for async logging
|
||||
pub const ENV_OBS_LOG_FLUSH_MS: &str = "RUSTFS_OBS_LOG_FLUSH_MS";
|
||||
|
||||
/// Default values for log pool
|
||||
pub const DEFAULT_OBS_LOG_POOL_CAPA: usize = 10240;
|
||||
|
||||
/// Default values for message capacity
|
||||
pub const DEFAULT_OBS_LOG_MESSAGE_CAPA: usize = 32768;
|
||||
|
||||
/// Default values for flush interval in milliseconds
|
||||
pub const DEFAULT_OBS_LOG_FLUSH_MS: u64 = 200;
|
||||
|
||||
/// Audit logger queue capacity environment variable key
|
||||
pub const ENV_AUDIT_LOGGER_QUEUE_CAPACITY: &str = "RUSTFS_AUDIT_LOGGER_QUEUE_CAPACITY";
|
||||
|
||||
/// Default values for observability configuration
|
||||
pub const DEFAULT_AUDIT_LOGGER_QUEUE_CAPACITY: usize = 10000;
|
||||
|
||||
/// Default values for observability configuration
|
||||
// ### Supported Environment Values
|
||||
// - `production` - Secure file-only logging
|
||||
// - `development` - Full debugging with stdout
|
||||
// - `test` - Test environment with stdout support
|
||||
// - `staging` - Staging environment with stdout support
|
||||
pub const DEFAULT_OBS_ENVIRONMENT_PRODUCTION: &str = "production";
|
||||
pub const DEFAULT_OBS_ENVIRONMENT_DEVELOPMENT: &str = "development";
|
||||
pub const DEFAULT_OBS_ENVIRONMENT_TEST: &str = "test";
|
||||
pub const DEFAULT_OBS_ENVIRONMENT_STAGING: &str = "staging";
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_env_keys() {
|
||||
assert_eq!(ENV_OBS_ENDPOINT, "RUSTFS_OBS_ENDPOINT");
|
||||
assert_eq!(ENV_OBS_USE_STDOUT, "RUSTFS_OBS_USE_STDOUT");
|
||||
assert_eq!(ENV_OBS_SAMPLE_RATIO, "RUSTFS_OBS_SAMPLE_RATIO");
|
||||
assert_eq!(ENV_OBS_METER_INTERVAL, "RUSTFS_OBS_METER_INTERVAL");
|
||||
assert_eq!(ENV_OBS_SERVICE_NAME, "RUSTFS_OBS_SERVICE_NAME");
|
||||
assert_eq!(ENV_OBS_SERVICE_VERSION, "RUSTFS_OBS_SERVICE_VERSION");
|
||||
assert_eq!(ENV_OBS_ENVIRONMENT, "RUSTFS_OBS_ENVIRONMENT");
|
||||
assert_eq!(ENV_OBS_LOGGER_LEVEL, "RUSTFS_OBS_LOGGER_LEVEL");
|
||||
assert_eq!(ENV_OBS_LOCAL_LOGGING_ENABLED, "RUSTFS_OBS_LOCAL_LOGGING_ENABLED");
|
||||
assert_eq!(ENV_OBS_LOG_DIRECTORY, "RUSTFS_OBS_LOG_DIRECTORY");
|
||||
assert_eq!(ENV_OBS_LOG_FILENAME, "RUSTFS_OBS_LOG_FILENAME");
|
||||
assert_eq!(ENV_OBS_LOG_ROTATION_SIZE_MB, "RUSTFS_OBS_LOG_ROTATION_SIZE_MB");
|
||||
assert_eq!(ENV_OBS_LOG_ROTATION_TIME, "RUSTFS_OBS_LOG_ROTATION_TIME");
|
||||
assert_eq!(ENV_OBS_LOG_KEEP_FILES, "RUSTFS_OBS_LOG_KEEP_FILES");
|
||||
assert_eq!(ENV_AUDIT_LOGGER_QUEUE_CAPACITY, "RUSTFS_AUDIT_LOGGER_QUEUE_CAPACITY");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_values() {
|
||||
assert_eq!(DEFAULT_AUDIT_LOGGER_QUEUE_CAPACITY, 10000);
|
||||
assert_eq!(DEFAULT_OBS_ENVIRONMENT_PRODUCTION, "production");
|
||||
assert_eq!(DEFAULT_OBS_ENVIRONMENT_DEVELOPMENT, "development");
|
||||
assert_eq!(DEFAULT_OBS_ENVIRONMENT_TEST, "test");
|
||||
assert_eq!(DEFAULT_OBS_ENVIRONMENT_STAGING, "staging");
|
||||
}
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// RUSTFS_SINKS_FILE_PATH
|
||||
pub const ENV_SINKS_FILE_PATH: &str = "RUSTFS_SINKS_FILE_PATH";
|
||||
// RUSTFS_SINKS_FILE_BUFFER_SIZE
|
||||
pub const ENV_SINKS_FILE_BUFFER_SIZE: &str = "RUSTFS_SINKS_FILE_BUFFER_SIZE";
|
||||
// RUSTFS_SINKS_FILE_FLUSH_INTERVAL_MS
|
||||
pub const ENV_SINKS_FILE_FLUSH_INTERVAL_MS: &str = "RUSTFS_SINKS_FILE_FLUSH_INTERVAL_MS";
|
||||
// RUSTFS_SINKS_FILE_FLUSH_THRESHOLD
|
||||
pub const ENV_SINKS_FILE_FLUSH_THRESHOLD: &str = "RUSTFS_SINKS_FILE_FLUSH_THRESHOLD";
|
||||
|
||||
pub const DEFAULT_SINKS_FILE_BUFFER_SIZE: usize = 8192;
|
||||
|
||||
pub const DEFAULT_SINKS_FILE_FLUSH_INTERVAL_MS: u64 = 1000;
|
||||
|
||||
pub const DEFAULT_SINKS_FILE_FLUSH_THRESHOLD: usize = 100;
|
||||
@@ -1,27 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// RUSTFS_SINKS_KAFKA_BROKERS
|
||||
pub const ENV_SINKS_KAFKA_BROKERS: &str = "RUSTFS_SINKS_KAFKA_BROKERS";
|
||||
pub const ENV_SINKS_KAFKA_TOPIC: &str = "RUSTFS_SINKS_KAFKA_TOPIC";
|
||||
// batch_size
|
||||
pub const ENV_SINKS_KAFKA_BATCH_SIZE: &str = "RUSTFS_SINKS_KAFKA_BATCH_SIZE";
|
||||
// batch_timeout_ms
|
||||
pub const ENV_SINKS_KAFKA_BATCH_TIMEOUT_MS: &str = "RUSTFS_SINKS_KAFKA_BATCH_TIMEOUT_MS";
|
||||
|
||||
// brokers
|
||||
pub const DEFAULT_SINKS_KAFKA_BROKERS: &str = "localhost:9092";
|
||||
pub const DEFAULT_SINKS_KAFKA_TOPIC: &str = "rustfs-sinks";
|
||||
pub const DEFAULT_SINKS_KAFKA_BATCH_SIZE: usize = 100;
|
||||
pub const DEFAULT_SINKS_KAFKA_BATCH_TIMEOUT_MS: u64 = 1000;
|
||||
@@ -12,12 +12,87 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod config;
|
||||
mod file;
|
||||
mod kafka;
|
||||
mod webhook;
|
||||
// Observability Keys
|
||||
|
||||
pub use config::*;
|
||||
pub use file::*;
|
||||
pub use kafka::*;
|
||||
pub use webhook::*;
|
||||
pub const ENV_OBS_ENDPOINT: &str = "RUSTFS_OBS_ENDPOINT";
|
||||
pub const ENV_OBS_USE_STDOUT: &str = "RUSTFS_OBS_USE_STDOUT";
|
||||
pub const ENV_OBS_SAMPLE_RATIO: &str = "RUSTFS_OBS_SAMPLE_RATIO";
|
||||
pub const ENV_OBS_METER_INTERVAL: &str = "RUSTFS_OBS_METER_INTERVAL";
|
||||
pub const ENV_OBS_SERVICE_NAME: &str = "RUSTFS_OBS_SERVICE_NAME";
|
||||
pub const ENV_OBS_SERVICE_VERSION: &str = "RUSTFS_OBS_SERVICE_VERSION";
|
||||
pub const ENV_OBS_ENVIRONMENT: &str = "RUSTFS_OBS_ENVIRONMENT";
|
||||
pub const ENV_OBS_LOGGER_LEVEL: &str = "RUSTFS_OBS_LOGGER_LEVEL";
|
||||
pub const ENV_OBS_LOCAL_LOGGING_ENABLED: &str = "RUSTFS_OBS_LOCAL_LOGGING_ENABLED";
|
||||
pub const ENV_OBS_LOG_DIRECTORY: &str = "RUSTFS_OBS_LOG_DIRECTORY";
|
||||
pub const ENV_OBS_LOG_FILENAME: &str = "RUSTFS_OBS_LOG_FILENAME";
|
||||
pub const ENV_OBS_LOG_ROTATION_SIZE_MB: &str = "RUSTFS_OBS_LOG_ROTATION_SIZE_MB";
|
||||
pub const ENV_OBS_LOG_ROTATION_TIME: &str = "RUSTFS_OBS_LOG_ROTATION_TIME";
|
||||
pub const ENV_OBS_LOG_KEEP_FILES: &str = "RUSTFS_OBS_LOG_KEEP_FILES";
|
||||
|
||||
/// Log pool capacity for async logging
|
||||
pub const ENV_OBS_LOG_POOL_CAPA: &str = "RUSTFS_OBS_LOG_POOL_CAPA";
|
||||
|
||||
/// Log message capacity for async logging
|
||||
pub const ENV_OBS_LOG_MESSAGE_CAPA: &str = "RUSTFS_OBS_LOG_MESSAGE_CAPA";
|
||||
|
||||
/// Log flush interval in milliseconds for async logging
|
||||
pub const ENV_OBS_LOG_FLUSH_MS: &str = "RUSTFS_OBS_LOG_FLUSH_MS";
|
||||
|
||||
/// Default values for log pool
|
||||
pub const DEFAULT_OBS_LOG_POOL_CAPA: usize = 10240;
|
||||
|
||||
/// Default values for message capacity
|
||||
pub const DEFAULT_OBS_LOG_MESSAGE_CAPA: usize = 32768;
|
||||
|
||||
/// Default values for flush interval in milliseconds
|
||||
pub const DEFAULT_OBS_LOG_FLUSH_MS: u64 = 200;
|
||||
|
||||
/// Audit logger queue capacity environment variable key
|
||||
pub const ENV_AUDIT_LOGGER_QUEUE_CAPACITY: &str = "RUSTFS_AUDIT_LOGGER_QUEUE_CAPACITY";
|
||||
|
||||
/// Default values for observability configuration
|
||||
pub const DEFAULT_AUDIT_LOGGER_QUEUE_CAPACITY: usize = 10000;
|
||||
|
||||
/// Default values for observability configuration
|
||||
// ### Supported Environment Values
|
||||
// - `production` - Secure file-only logging
|
||||
// - `development` - Full debugging with stdout
|
||||
// - `test` - Test environment with stdout support
|
||||
// - `staging` - Staging environment with stdout support
|
||||
pub const DEFAULT_OBS_ENVIRONMENT_PRODUCTION: &str = "production";
|
||||
pub const DEFAULT_OBS_ENVIRONMENT_DEVELOPMENT: &str = "development";
|
||||
pub const DEFAULT_OBS_ENVIRONMENT_TEST: &str = "test";
|
||||
pub const DEFAULT_OBS_ENVIRONMENT_STAGING: &str = "staging";
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_env_keys() {
|
||||
assert_eq!(ENV_OBS_ENDPOINT, "RUSTFS_OBS_ENDPOINT");
|
||||
assert_eq!(ENV_OBS_USE_STDOUT, "RUSTFS_OBS_USE_STDOUT");
|
||||
assert_eq!(ENV_OBS_SAMPLE_RATIO, "RUSTFS_OBS_SAMPLE_RATIO");
|
||||
assert_eq!(ENV_OBS_METER_INTERVAL, "RUSTFS_OBS_METER_INTERVAL");
|
||||
assert_eq!(ENV_OBS_SERVICE_NAME, "RUSTFS_OBS_SERVICE_NAME");
|
||||
assert_eq!(ENV_OBS_SERVICE_VERSION, "RUSTFS_OBS_SERVICE_VERSION");
|
||||
assert_eq!(ENV_OBS_ENVIRONMENT, "RUSTFS_OBS_ENVIRONMENT");
|
||||
assert_eq!(ENV_OBS_LOGGER_LEVEL, "RUSTFS_OBS_LOGGER_LEVEL");
|
||||
assert_eq!(ENV_OBS_LOCAL_LOGGING_ENABLED, "RUSTFS_OBS_LOCAL_LOGGING_ENABLED");
|
||||
assert_eq!(ENV_OBS_LOG_DIRECTORY, "RUSTFS_OBS_LOG_DIRECTORY");
|
||||
assert_eq!(ENV_OBS_LOG_FILENAME, "RUSTFS_OBS_LOG_FILENAME");
|
||||
assert_eq!(ENV_OBS_LOG_ROTATION_SIZE_MB, "RUSTFS_OBS_LOG_ROTATION_SIZE_MB");
|
||||
assert_eq!(ENV_OBS_LOG_ROTATION_TIME, "RUSTFS_OBS_LOG_ROTATION_TIME");
|
||||
assert_eq!(ENV_OBS_LOG_KEEP_FILES, "RUSTFS_OBS_LOG_KEEP_FILES");
|
||||
assert_eq!(ENV_AUDIT_LOGGER_QUEUE_CAPACITY, "RUSTFS_AUDIT_LOGGER_QUEUE_CAPACITY");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_values() {
|
||||
assert_eq!(DEFAULT_AUDIT_LOGGER_QUEUE_CAPACITY, 10000);
|
||||
assert_eq!(DEFAULT_OBS_ENVIRONMENT_PRODUCTION, "production");
|
||||
assert_eq!(DEFAULT_OBS_ENVIRONMENT_DEVELOPMENT, "development");
|
||||
assert_eq!(DEFAULT_OBS_ENVIRONMENT_TEST, "test");
|
||||
assert_eq!(DEFAULT_OBS_ENVIRONMENT_STAGING, "staging");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,28 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// RUSTFS_SINKS_WEBHOOK_ENDPOINT
|
||||
pub const ENV_SINKS_WEBHOOK_ENDPOINT: &str = "RUSTFS_SINKS_WEBHOOK_ENDPOINT";
|
||||
// RUSTFS_SINKS_WEBHOOK_AUTH_TOKEN
|
||||
pub const ENV_SINKS_WEBHOOK_AUTH_TOKEN: &str = "RUSTFS_SINKS_WEBHOOK_AUTH_TOKEN";
|
||||
// max_retries
|
||||
pub const ENV_SINKS_WEBHOOK_MAX_RETRIES: &str = "RUSTFS_SINKS_WEBHOOK_MAX_RETRIES";
|
||||
// retry_delay_ms
|
||||
pub const ENV_SINKS_WEBHOOK_RETRY_DELAY_MS: &str = "RUSTFS_SINKS_WEBHOOK_RETRY_DELAY_MS";
|
||||
|
||||
// Default values for webhook sink configuration
|
||||
pub const DEFAULT_SINKS_WEBHOOK_ENDPOINT: &str = "http://localhost:8080";
|
||||
pub const DEFAULT_SINKS_WEBHOOK_AUTH_TOKEN: &str = "";
|
||||
pub const DEFAULT_SINKS_WEBHOOK_MAX_RETRIES: usize = 3;
|
||||
pub const DEFAULT_SINKS_WEBHOOK_RETRY_DELAY_MS: u64 = 100;
|
||||
@@ -12,16 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::{UnifiedLogEntry, sinks::Sink};
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::mpsc::Receiver;
|
||||
//opa env vars
|
||||
pub const ENV_POLICY_PLUGIN_OPA_URL: &str = "RUSTFS_POLICY_PLUGIN_URL";
|
||||
pub const ENV_POLICY_PLUGIN_AUTH_TOKEN: &str = "RUSTFS_POLICY_PLUGIN_AUTH_TOKEN";
|
||||
|
||||
/// Start the log processing worker thread
|
||||
pub(crate) async fn start_worker(receiver: Receiver<UnifiedLogEntry>, sinks: Vec<Arc<dyn Sink>>) {
|
||||
let mut receiver = receiver;
|
||||
while let Some(entry) = receiver.recv().await {
|
||||
for sink in &sinks {
|
||||
sink.write(&entry).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
pub const ENV_POLICY_PLUGIN_KEYS: &[&str] = &[ENV_POLICY_PLUGIN_OPA_URL, ENV_POLICY_PLUGIN_AUTH_TOKEN];
|
||||
|
||||
pub const POLICY_PLUGIN_SUB_SYS: &str = "policy_plugin";
|
||||
@@ -49,5 +49,4 @@ uuid = { workspace = true }
|
||||
base64 = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
http.workspace = true
|
||||
md5 = { workspace = true }
|
||||
md5 = { workspace = true }
|
||||
@@ -13,16 +13,25 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! 分片上传加密功能的分步测试用例
|
||||
//!
|
||||
//! 这个测试套件将验证分片上传加密功能的每一个步骤:
|
||||
//! 1. 测试基础的单分片加密(验证加密基础逻辑)
|
||||
//! 2. 测试多分片上传(验证分片拼接逻辑)
|
||||
//! 3. 测试加密元数据的保存和读取
|
||||
//! 4. 测试完整的分片上传加密流程
|
||||
|
||||
use super::common::LocalKMSTestEnvironment;
|
||||
use crate::common::{TEST_BUCKET, init_logging};
|
||||
use serial_test::serial;
|
||||
use tracing::{debug, info};
|
||||
|
||||
/// 步骤1:测试基础单文件加密功能(确保SSE-S3在非分片场景下正常工作)
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_step1_basic_single_file_encryption() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
info!("🧪 step1: test basic single file encryption");
|
||||
info!("🧪 步骤1:测试基础单文件加密功能");
|
||||
|
||||
let mut kms_env = LocalKMSTestEnvironment::new().await?;
|
||||
let _default_key_id = kms_env.start_rustfs_for_local_kms().await?;
|
||||
@@ -31,11 +40,11 @@ async fn test_step1_basic_single_file_encryption() -> Result<(), Box<dyn std::er
|
||||
let s3_client = kms_env.base_env.create_s3_client();
|
||||
kms_env.base_env.create_test_bucket(TEST_BUCKET).await?;
|
||||
|
||||
// test small file encryption (should inline store)
|
||||
// 测试小文件加密(应该会内联存储)
|
||||
let test_data = b"Hello, this is a small test file for SSE-S3!";
|
||||
let object_key = "test-single-file-encrypted";
|
||||
|
||||
info!("📤 step1: upload small file ({}) with SSE-S3 encryption", test_data.len());
|
||||
info!("📤 上传小文件({}字节),启用SSE-S3加密", test_data.len());
|
||||
let put_response = s3_client
|
||||
.put_object()
|
||||
.bucket(TEST_BUCKET)
|
||||
@@ -45,41 +54,41 @@ async fn test_step1_basic_single_file_encryption() -> Result<(), Box<dyn std::er
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
debug!("PUT response ETag: {:?}", put_response.e_tag());
|
||||
debug!("PUT response SSE: {:?}", put_response.server_side_encryption());
|
||||
debug!("PUT响应ETag: {:?}", put_response.e_tag());
|
||||
debug!("PUT响应SSE: {:?}", put_response.server_side_encryption());
|
||||
|
||||
// verify PUT response contains correct encryption header
|
||||
// 验证PUT响应包含正确的加密头
|
||||
assert_eq!(
|
||||
put_response.server_side_encryption(),
|
||||
Some(&aws_sdk_s3::types::ServerSideEncryption::Aes256)
|
||||
);
|
||||
|
||||
info!("📥 step1: download file and verify encryption status");
|
||||
info!("📥 下载文件并验证加密状态");
|
||||
let get_response = s3_client.get_object().bucket(TEST_BUCKET).key(object_key).send().await?;
|
||||
|
||||
debug!("GET response SSE: {:?}", get_response.server_side_encryption());
|
||||
debug!("GET响应SSE: {:?}", get_response.server_side_encryption());
|
||||
|
||||
// verify GET response contains correct encryption header
|
||||
// 验证GET响应包含正确的加密头
|
||||
assert_eq!(
|
||||
get_response.server_side_encryption(),
|
||||
Some(&aws_sdk_s3::types::ServerSideEncryption::Aes256)
|
||||
);
|
||||
|
||||
// verify data integrity
|
||||
// 验证数据完整性
|
||||
let downloaded_data = get_response.body.collect().await?.into_bytes();
|
||||
assert_eq!(&downloaded_data[..], test_data);
|
||||
|
||||
kms_env.base_env.delete_test_bucket(TEST_BUCKET).await?;
|
||||
info!("✅ step1: basic single file encryption works as expected");
|
||||
info!("✅ 步骤1通过:基础单文件加密功能正常");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// test basic multipart upload without encryption
|
||||
/// 步骤2:测试不加密的分片上传(确保分片上传基础功能正常)
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_step2_basic_multipart_upload_without_encryption() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
info!("🧪 step2: test basic multipart upload without encryption");
|
||||
info!("🧪 步骤2:测试不加密的分片上传");
|
||||
|
||||
let mut kms_env = LocalKMSTestEnvironment::new().await?;
|
||||
let _default_key_id = kms_env.start_rustfs_for_local_kms().await?;
|
||||
@@ -93,16 +102,12 @@ async fn test_step2_basic_multipart_upload_without_encryption() -> Result<(), Bo
|
||||
let total_parts = 2;
|
||||
let total_size = part_size * total_parts;
|
||||
|
||||
// generate test data (with clear pattern for easy verification)
|
||||
// 生成测试数据(有明显的模式便于验证)
|
||||
let test_data: Vec<u8> = (0..total_size).map(|i| (i % 256) as u8).collect();
|
||||
|
||||
info!(
|
||||
"🚀 step2: start multipart upload (no encryption) with {} parts, each {}MB",
|
||||
total_parts,
|
||||
part_size / (1024 * 1024)
|
||||
);
|
||||
info!("🚀 开始分片上传(无加密):{} parts,每个 {}MB", total_parts, part_size / (1024 * 1024));
|
||||
|
||||
// step1: create multipart upload
|
||||
// 步骤1:创建分片上传
|
||||
let create_multipart_output = s3_client
|
||||
.create_multipart_upload()
|
||||
.bucket(TEST_BUCKET)
|
||||
@@ -111,16 +116,16 @@ async fn test_step2_basic_multipart_upload_without_encryption() -> Result<(), Bo
|
||||
.await?;
|
||||
|
||||
let upload_id = create_multipart_output.upload_id().unwrap();
|
||||
info!("📋 step2: create multipart upload, ID: {}", upload_id);
|
||||
info!("📋 创建分片上传,ID: {}", upload_id);
|
||||
|
||||
// step2: upload each part
|
||||
// 步骤2:上传各个分片
|
||||
let mut completed_parts = Vec::new();
|
||||
for part_number in 1..=total_parts {
|
||||
let start = (part_number - 1) * part_size;
|
||||
let end = std::cmp::min(start + part_size, total_size);
|
||||
let part_data = &test_data[start..end];
|
||||
|
||||
info!("📤 step2: upload part {} ({} bytes)", part_number, part_data.len());
|
||||
info!("📤 上传分片 {} ({} bytes)", part_number, part_data.len());
|
||||
|
||||
let upload_part_output = s3_client
|
||||
.upload_part()
|
||||
@@ -140,15 +145,15 @@ async fn test_step2_basic_multipart_upload_without_encryption() -> Result<(), Bo
|
||||
.build(),
|
||||
);
|
||||
|
||||
debug!("step2: part {} uploaded, ETag: {}", part_number, etag);
|
||||
debug!("分片 {} 上传完成,ETag: {}", part_number, etag);
|
||||
}
|
||||
|
||||
// step3: complete multipart upload
|
||||
// 步骤3:完成分片上传
|
||||
let completed_multipart_upload = aws_sdk_s3::types::CompletedMultipartUpload::builder()
|
||||
.set_parts(Some(completed_parts))
|
||||
.build();
|
||||
|
||||
info!("🔗 step2: complete multipart upload");
|
||||
info!("🔗 完成分片上传");
|
||||
let complete_output = s3_client
|
||||
.complete_multipart_upload()
|
||||
.bucket(TEST_BUCKET)
|
||||
@@ -158,16 +163,10 @@ async fn test_step2_basic_multipart_upload_without_encryption() -> Result<(), Bo
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
debug!("step2: multipart upload completed, ETag: {:?}", complete_output.e_tag());
|
||||
debug!("完成分片上传,ETag: {:?}", complete_output.e_tag());
|
||||
|
||||
// step4: verify multipart upload completed successfully
|
||||
assert_eq!(
|
||||
complete_output.e_tag().unwrap().to_string(),
|
||||
format!("\"{}-{}-{}\"", object_key, upload_id, total_parts)
|
||||
);
|
||||
|
||||
// verify data integrity
|
||||
info!("📥 step2: download file and verify data integrity");
|
||||
// 步骤4:下载并验证
|
||||
info!("📥 下载文件并验证数据完整性");
|
||||
let get_response = s3_client.get_object().bucket(TEST_BUCKET).key(object_key).send().await?;
|
||||
|
||||
let downloaded_data = get_response.body.collect().await?.into_bytes();
|
||||
@@ -175,16 +174,16 @@ async fn test_step2_basic_multipart_upload_without_encryption() -> Result<(), Bo
|
||||
assert_eq!(&downloaded_data[..], &test_data[..]);
|
||||
|
||||
kms_env.base_env.delete_test_bucket(TEST_BUCKET).await?;
|
||||
info!("✅ step2: basic multipart upload without encryption works as expected");
|
||||
info!("✅ 步骤2通过:不加密的分片上传功能正常");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// test multipart upload with SSE-S3 encryption
|
||||
/// 步骤3:测试分片上传 + SSE-S3加密(重点测试)
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_step3_multipart_upload_with_sse_s3() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
info!("🧪 step3: test multipart upload with SSE-S3 encryption");
|
||||
info!("🧪 步骤3:测试分片上传 + SSE-S3加密");
|
||||
|
||||
let mut kms_env = LocalKMSTestEnvironment::new().await?;
|
||||
let _default_key_id = kms_env.start_rustfs_for_local_kms().await?;
|
||||
@@ -198,16 +197,16 @@ async fn test_step3_multipart_upload_with_sse_s3() -> Result<(), Box<dyn std::er
|
||||
let total_parts = 2;
|
||||
let total_size = part_size * total_parts;
|
||||
|
||||
// generate test data (with clear pattern for easy verification)
|
||||
// 生成测试数据
|
||||
let test_data: Vec<u8> = (0..total_size).map(|i| ((i / 1000) % 256) as u8).collect();
|
||||
|
||||
info!(
|
||||
"🔐 step3: start multipart upload with SSE-S3 encryption: {} parts, each {}MB",
|
||||
"🔐 开始分片上传(SSE-S3加密):{} parts,每个 {}MB",
|
||||
total_parts,
|
||||
part_size / (1024 * 1024)
|
||||
);
|
||||
|
||||
// step1: create multipart upload and enable SSE-S3
|
||||
// 步骤1:创建分片上传并启用SSE-S3
|
||||
let create_multipart_output = s3_client
|
||||
.create_multipart_upload()
|
||||
.bucket(TEST_BUCKET)
|
||||
@@ -217,24 +216,24 @@ async fn test_step3_multipart_upload_with_sse_s3() -> Result<(), Box<dyn std::er
|
||||
.await?;
|
||||
|
||||
let upload_id = create_multipart_output.upload_id().unwrap();
|
||||
info!("📋 step3: create multipart upload with SSE-S3 encryption, ID: {}", upload_id);
|
||||
info!("📋 创建加密分片上传,ID: {}", upload_id);
|
||||
|
||||
// step2: verify CreateMultipartUpload response (SSE-S3 header should be included)
|
||||
// 验证CreateMultipartUpload响应(如果有SSE头的话)
|
||||
if let Some(sse) = create_multipart_output.server_side_encryption() {
|
||||
debug!("CreateMultipartUpload response contains SSE header: {:?}", sse);
|
||||
debug!("CreateMultipartUpload包含SSE响应: {:?}", sse);
|
||||
assert_eq!(sse, &aws_sdk_s3::types::ServerSideEncryption::Aes256);
|
||||
} else {
|
||||
debug!("CreateMultipartUpload response does not contain SSE header (some implementations may return empty string)");
|
||||
debug!("CreateMultipartUpload不包含SSE响应头(某些实现中正常)");
|
||||
}
|
||||
|
||||
// step2: upload each part
|
||||
// 步骤2:上传各个分片
|
||||
let mut completed_parts = Vec::new();
|
||||
for part_number in 1..=total_parts {
|
||||
let start = (part_number - 1) * part_size;
|
||||
let end = std::cmp::min(start + part_size, total_size);
|
||||
let part_data = &test_data[start..end];
|
||||
|
||||
info!("🔐 step3: upload encrypted part {} ({} bytes)", part_number, part_data.len());
|
||||
info!("🔐 上传加密分片 {} ({} bytes)", part_number, part_data.len());
|
||||
|
||||
let upload_part_output = s3_client
|
||||
.upload_part()
|
||||
@@ -254,15 +253,15 @@ async fn test_step3_multipart_upload_with_sse_s3() -> Result<(), Box<dyn std::er
|
||||
.build(),
|
||||
);
|
||||
|
||||
debug!("step3: part {} uploaded, ETag: {}", part_number, etag);
|
||||
debug!("加密分片 {} 上传完成,ETag: {}", part_number, etag);
|
||||
}
|
||||
|
||||
// step3: complete multipart upload
|
||||
// 步骤3:完成分片上传
|
||||
let completed_multipart_upload = aws_sdk_s3::types::CompletedMultipartUpload::builder()
|
||||
.set_parts(Some(completed_parts))
|
||||
.build();
|
||||
|
||||
info!("🔗 step3: complete multipart upload with SSE-S3 encryption");
|
||||
info!("🔗 完成加密分片上传");
|
||||
let complete_output = s3_client
|
||||
.complete_multipart_upload()
|
||||
.bucket(TEST_BUCKET)
|
||||
@@ -272,46 +271,43 @@ async fn test_step3_multipart_upload_with_sse_s3() -> Result<(), Box<dyn std::er
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
debug!(
|
||||
"step3: complete multipart upload with SSE-S3 encryption, ETag: {:?}",
|
||||
complete_output.e_tag()
|
||||
);
|
||||
debug!("完成加密分片上传,ETag: {:?}", complete_output.e_tag());
|
||||
|
||||
// step4: HEAD request to check metadata
|
||||
info!("📋 step4: check object metadata");
|
||||
// 步骤4:HEAD请求检查元数据
|
||||
info!("📋 检查对象元数据");
|
||||
let head_response = s3_client.head_object().bucket(TEST_BUCKET).key(object_key).send().await?;
|
||||
|
||||
debug!("HEAD response SSE: {:?}", head_response.server_side_encryption());
|
||||
debug!("HEAD response metadata: {:?}", head_response.metadata());
|
||||
debug!("HEAD响应 SSE: {:?}", head_response.server_side_encryption());
|
||||
debug!("HEAD响应 元数据: {:?}", head_response.metadata());
|
||||
|
||||
// step5: GET request to download and verify
|
||||
info!("📥 step5: download encrypted file and verify");
|
||||
// 步骤5:GET请求下载并验证
|
||||
info!("📥 下载加密文件并验证");
|
||||
let get_response = s3_client.get_object().bucket(TEST_BUCKET).key(object_key).send().await?;
|
||||
|
||||
debug!("GET response SSE: {:?}", get_response.server_side_encryption());
|
||||
debug!("GET响应 SSE: {:?}", get_response.server_side_encryption());
|
||||
|
||||
// step5: verify GET response contains SSE-S3 encryption header
|
||||
// 🎯 关键验证:GET响应必须包含SSE-S3加密头
|
||||
assert_eq!(
|
||||
get_response.server_side_encryption(),
|
||||
Some(&aws_sdk_s3::types::ServerSideEncryption::Aes256)
|
||||
);
|
||||
|
||||
// step5: verify downloaded data matches original test data
|
||||
// 验证数据完整性
|
||||
let downloaded_data = get_response.body.collect().await?.into_bytes();
|
||||
assert_eq!(downloaded_data.len(), total_size);
|
||||
assert_eq!(&downloaded_data[..], &test_data[..]);
|
||||
|
||||
kms_env.base_env.delete_test_bucket(TEST_BUCKET).await?;
|
||||
info!("✅ step3: multipart upload with SSE-S3 encryption function is normal");
|
||||
info!("✅ 步骤3通过:分片上传 + SSE-S3加密功能正常");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// step4: test larger multipart upload with encryption (streaming encryption)
|
||||
/// 步骤4:测试更大的分片上传(测试流式加密)
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_step4_large_multipart_upload_with_encryption() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
info!("🧪 step4: test larger multipart upload with encryption (streaming encryption)");
|
||||
info!("🧪 步骤4:测试大文件分片上传加密");
|
||||
|
||||
let mut kms_env = LocalKMSTestEnvironment::new().await?;
|
||||
let _default_key_id = kms_env.start_rustfs_for_local_kms().await?;
|
||||
@@ -326,13 +322,13 @@ async fn test_step4_large_multipart_upload_with_encryption() -> Result<(), Box<d
|
||||
let total_size = part_size * total_parts;
|
||||
|
||||
info!(
|
||||
"🗂️ step4: generate large test data: {} parts, each {}MB, total {}MB",
|
||||
"🗂️ 生成大文件测试数据:{} parts,每个 {}MB,总计 {}MB",
|
||||
total_parts,
|
||||
part_size / (1024 * 1024),
|
||||
total_size / (1024 * 1024)
|
||||
);
|
||||
|
||||
// step4: generate large test data (using complex pattern for verification)
|
||||
// 生成大文件测试数据(使用复杂模式便于验证)
|
||||
let test_data: Vec<u8> = (0..total_size)
|
||||
.map(|i| {
|
||||
let part_num = i / part_size;
|
||||
@@ -341,9 +337,9 @@ async fn test_step4_large_multipart_upload_with_encryption() -> Result<(), Box<d
|
||||
})
|
||||
.collect();
|
||||
|
||||
info!("🔐 step4: start large multipart upload with encryption (SSE-S3)");
|
||||
info!("🔐 开始大文件分片上传(SSE-S3加密)");
|
||||
|
||||
// step4: create multipart upload
|
||||
// 创建分片上传
|
||||
let create_multipart_output = s3_client
|
||||
.create_multipart_upload()
|
||||
.bucket(TEST_BUCKET)
|
||||
@@ -353,9 +349,9 @@ async fn test_step4_large_multipart_upload_with_encryption() -> Result<(), Box<d
|
||||
.await?;
|
||||
|
||||
let upload_id = create_multipart_output.upload_id().unwrap();
|
||||
info!("📋 step4: create multipart upload with encryption (SSE-S3), ID: {}", upload_id);
|
||||
info!("📋 创建大文件加密分片上传,ID: {}", upload_id);
|
||||
|
||||
// step4: upload parts
|
||||
// 上传各个分片
|
||||
let mut completed_parts = Vec::new();
|
||||
for part_number in 1..=total_parts {
|
||||
let start = (part_number - 1) * part_size;
|
||||
@@ -363,7 +359,7 @@ async fn test_step4_large_multipart_upload_with_encryption() -> Result<(), Box<d
|
||||
let part_data = &test_data[start..end];
|
||||
|
||||
info!(
|
||||
"🔐 step4: upload part {} ({:.2}MB)",
|
||||
"🔐 上传大文件加密分片 {} ({:.2}MB)",
|
||||
part_number,
|
||||
part_data.len() as f64 / (1024.0 * 1024.0)
|
||||
);
|
||||
@@ -386,15 +382,15 @@ async fn test_step4_large_multipart_upload_with_encryption() -> Result<(), Box<d
|
||||
.build(),
|
||||
);
|
||||
|
||||
debug!("step4: upload part {} completed, ETag: {}", part_number, etag);
|
||||
debug!("大文件加密分片 {} 上传完成,ETag: {}", part_number, etag);
|
||||
}
|
||||
|
||||
// step4: complete multipart upload
|
||||
// 完成分片上传
|
||||
let completed_multipart_upload = aws_sdk_s3::types::CompletedMultipartUpload::builder()
|
||||
.set_parts(Some(completed_parts))
|
||||
.build();
|
||||
|
||||
info!("🔗 step4: complete multipart upload with encryption (SSE-S3)");
|
||||
info!("🔗 完成大文件加密分片上传");
|
||||
let complete_output = s3_client
|
||||
.complete_multipart_upload()
|
||||
.bucket(TEST_BUCKET)
|
||||
@@ -404,46 +400,40 @@ async fn test_step4_large_multipart_upload_with_encryption() -> Result<(), Box<d
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
debug!(
|
||||
"step4: complete multipart upload with encryption (SSE-S3), ETag: {:?}",
|
||||
complete_output.e_tag()
|
||||
);
|
||||
debug!("完成大文件加密分片上传,ETag: {:?}", complete_output.e_tag());
|
||||
|
||||
// step4: download and verify
|
||||
info!("📥 step4: download and verify large multipart upload with encryption (SSE-S3)");
|
||||
// 下载并验证
|
||||
info!("📥 下载大文件并验证");
|
||||
let get_response = s3_client.get_object().bucket(TEST_BUCKET).key(object_key).send().await?;
|
||||
|
||||
// step4: verify encryption header
|
||||
// 验证加密头
|
||||
assert_eq!(
|
||||
get_response.server_side_encryption(),
|
||||
Some(&aws_sdk_s3::types::ServerSideEncryption::Aes256)
|
||||
);
|
||||
|
||||
// step4: verify data integrity
|
||||
// 验证数据完整性
|
||||
let downloaded_data = get_response.body.collect().await?.into_bytes();
|
||||
assert_eq!(downloaded_data.len(), total_size);
|
||||
|
||||
// step4: verify data matches original test data
|
||||
// 逐字节验证数据(对于大文件更严格)
|
||||
for (i, (&actual, &expected)) in downloaded_data.iter().zip(test_data.iter()).enumerate() {
|
||||
if actual != expected {
|
||||
panic!(
|
||||
"step4: large multipart upload with encryption (SSE-S3) data mismatch at byte {}: actual={}, expected={}",
|
||||
i, actual, expected
|
||||
);
|
||||
panic!("大文件数据在第{i}字节不匹配: 实际={actual}, 期待={expected}");
|
||||
}
|
||||
}
|
||||
|
||||
kms_env.base_env.delete_test_bucket(TEST_BUCKET).await?;
|
||||
info!("✅ step4: large multipart upload with encryption (SSE-S3) functionality normal");
|
||||
info!("✅ 步骤4通过:大文件分片上传加密功能正常");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// step5: test all encryption types multipart upload
|
||||
/// 步骤5:测试所有加密类型的分片上传
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_step5_all_encryption_types_multipart() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
info!("🧪 step5: test all encryption types multipart upload");
|
||||
info!("🧪 步骤5:测试所有加密类型的分片上传");
|
||||
|
||||
let mut kms_env = LocalKMSTestEnvironment::new().await?;
|
||||
let _default_key_id = kms_env.start_rustfs_for_local_kms().await?;
|
||||
@@ -456,8 +446,8 @@ async fn test_step5_all_encryption_types_multipart() -> Result<(), Box<dyn std::
|
||||
let total_parts = 2;
|
||||
let total_size = part_size * total_parts;
|
||||
|
||||
// step5: test SSE-KMS multipart upload
|
||||
info!("🔐 step5: test SSE-KMS multipart upload");
|
||||
// 测试SSE-KMS
|
||||
info!("🔐 测试 SSE-KMS 分片上传");
|
||||
test_multipart_encryption_type(
|
||||
&s3_client,
|
||||
TEST_BUCKET,
|
||||
@@ -469,8 +459,8 @@ async fn test_step5_all_encryption_types_multipart() -> Result<(), Box<dyn std::
|
||||
)
|
||||
.await?;
|
||||
|
||||
// step5: test SSE-C multipart upload
|
||||
info!("🔐 step5: test SSE-C multipart upload");
|
||||
// 测试SSE-C
|
||||
info!("🔐 测试 SSE-C 分片上传");
|
||||
test_multipart_encryption_type(
|
||||
&s3_client,
|
||||
TEST_BUCKET,
|
||||
@@ -483,7 +473,7 @@ async fn test_step5_all_encryption_types_multipart() -> Result<(), Box<dyn std::
|
||||
.await?;
|
||||
|
||||
kms_env.base_env.delete_test_bucket(TEST_BUCKET).await?;
|
||||
info!("✅ step5: all encryption types multipart upload functionality normal");
|
||||
info!("✅ 步骤5通过:所有加密类型的分片上传功能正常");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -493,7 +483,7 @@ enum EncryptionType {
|
||||
SSEC,
|
||||
}
|
||||
|
||||
/// step5: test specific encryption type multipart upload
|
||||
/// 辅助函数:测试特定加密类型的分片上传
|
||||
async fn test_multipart_encryption_type(
|
||||
s3_client: &aws_sdk_s3::Client,
|
||||
bucket: &str,
|
||||
@@ -503,10 +493,10 @@ async fn test_multipart_encryption_type(
|
||||
total_parts: usize,
|
||||
encryption_type: EncryptionType,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
// step5: generate test data
|
||||
// 生成测试数据
|
||||
let test_data: Vec<u8> = (0..total_size).map(|i| ((i * 7) % 256) as u8).collect();
|
||||
|
||||
// step5: prepare SSE-C key and MD5 (if needed)
|
||||
// 准备SSE-C所需的密钥(如果需要)
|
||||
let (sse_c_key, sse_c_md5) = if matches!(encryption_type, EncryptionType::SSEC) {
|
||||
let key = "01234567890123456789012345678901";
|
||||
let key_b64 = base64::Engine::encode(&base64::engine::general_purpose::STANDARD, key);
|
||||
@@ -516,10 +506,9 @@ async fn test_multipart_encryption_type(
|
||||
(None, None)
|
||||
};
|
||||
|
||||
// step5: create multipart upload
|
||||
info!("🔗 step5: create multipart upload with encryption {:?}", encryption_type);
|
||||
info!("📋 创建分片上传 - {:?}", encryption_type);
|
||||
|
||||
// step5: create multipart upload request
|
||||
// 创建分片上传
|
||||
let mut create_request = s3_client.create_multipart_upload().bucket(bucket).key(object_key);
|
||||
|
||||
create_request = match encryption_type {
|
||||
@@ -533,6 +522,7 @@ async fn test_multipart_encryption_type(
|
||||
let create_multipart_output = create_request.send().await?;
|
||||
let upload_id = create_multipart_output.upload_id().unwrap();
|
||||
|
||||
// 上传分片
|
||||
let mut completed_parts = Vec::new();
|
||||
for part_number in 1..=total_parts {
|
||||
let start = (part_number - 1) * part_size;
|
||||
@@ -547,7 +537,7 @@ async fn test_multipart_encryption_type(
|
||||
.part_number(part_number as i32)
|
||||
.body(aws_sdk_s3::primitives::ByteStream::from(part_data.to_vec()));
|
||||
|
||||
// step5: include SSE-C key and MD5 in each UploadPart request (if needed)
|
||||
// SSE-C需要在每个UploadPart请求中包含密钥
|
||||
if matches!(encryption_type, EncryptionType::SSEC) {
|
||||
upload_request = upload_request
|
||||
.sse_customer_algorithm("AES256")
|
||||
@@ -564,11 +554,10 @@ async fn test_multipart_encryption_type(
|
||||
.build(),
|
||||
);
|
||||
|
||||
// step5: complete multipart upload request
|
||||
debug!("🔗 step5: complete multipart upload part {} with etag {}", part_number, etag);
|
||||
debug!("{:?} 分片 {} 上传完成", encryption_type, part_number);
|
||||
}
|
||||
|
||||
// step5: complete multipart upload
|
||||
// 完成分片上传
|
||||
let completed_multipart_upload = aws_sdk_s3::types::CompletedMultipartUpload::builder()
|
||||
.set_parts(Some(completed_parts))
|
||||
.build();
|
||||
@@ -582,12 +571,10 @@ async fn test_multipart_encryption_type(
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
// step5: download and verify multipart upload
|
||||
info!("🔗 step5: download and verify multipart upload with encryption {:?}", encryption_type);
|
||||
|
||||
// 下载并验证
|
||||
let mut get_request = s3_client.get_object().bucket(bucket).key(object_key);
|
||||
|
||||
// step5: include SSE-C key and MD5 in each GET request (if needed)
|
||||
// SSE-C需要在GET请求中包含密钥
|
||||
if matches!(encryption_type, EncryptionType::SSEC) {
|
||||
get_request = get_request
|
||||
.sse_customer_algorithm("AES256")
|
||||
@@ -597,7 +584,7 @@ async fn test_multipart_encryption_type(
|
||||
|
||||
let get_response = get_request.send().await?;
|
||||
|
||||
// step5: verify encryption headers
|
||||
// 验证加密头
|
||||
match encryption_type {
|
||||
EncryptionType::SSEKMS => {
|
||||
assert_eq!(
|
||||
@@ -610,15 +597,11 @@ async fn test_multipart_encryption_type(
|
||||
}
|
||||
}
|
||||
|
||||
// step5: verify data integrity
|
||||
// 验证数据完整性
|
||||
let downloaded_data = get_response.body.collect().await?.into_bytes();
|
||||
assert_eq!(downloaded_data.len(), total_size);
|
||||
assert_eq!(&downloaded_data[..], &test_data[..]);
|
||||
|
||||
// step5: verify data integrity
|
||||
info!(
|
||||
"✅ step5: verify data integrity for multipart upload with encryption {:?}",
|
||||
encryption_type
|
||||
);
|
||||
info!("✅ {:?} 分片上传测试通过", encryption_type);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -12,7 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod append;
|
||||
mod conditional_writes;
|
||||
mod lifecycle;
|
||||
mod lock;
|
||||
|
||||
@@ -75,7 +75,6 @@ hyper-util.workspace = true
|
||||
hyper-rustls.workspace = true
|
||||
rustls.workspace = true
|
||||
tokio = { workspace = true, features = ["io-util", "sync", "signal"] }
|
||||
tokio-stream = { workspace = true }
|
||||
tonic.workspace = true
|
||||
xxhash-rust = { workspace = true, features = ["xxh64", "xxh3"] }
|
||||
tower.workspace = true
|
||||
@@ -89,8 +88,6 @@ rustfs-madmin.workspace = true
|
||||
rustfs-workers.workspace = true
|
||||
reqwest = { workspace = true }
|
||||
aws-sdk-s3 = { workspace = true }
|
||||
once_cell = { workspace = true }
|
||||
rustfs-rsc = { workspace = true }
|
||||
urlencoding = { workspace = true }
|
||||
smallvec = { workspace = true }
|
||||
shadow-rs.workspace = true
|
||||
@@ -99,13 +96,11 @@ rustfs-utils = { workspace = true, features = ["full"] }
|
||||
rustfs-rio.workspace = true
|
||||
rustfs-signer.workspace = true
|
||||
rustfs-checksums.workspace = true
|
||||
futures-util.workspace = true
|
||||
async-recursion.workspace = true
|
||||
aws-credential-types = "1.2.6"
|
||||
aws-smithy-types = "1.3.2"
|
||||
parking_lot = "0.12"
|
||||
moka = { version = "0.12", features = ["future"] }
|
||||
aws-smithy-runtime-api = "1.9.0"
|
||||
aws-credential-types = { workspace = true }
|
||||
aws-smithy-types = { workspace = true }
|
||||
parking_lot = { workspace = true }
|
||||
moka = { workspace = true }
|
||||
|
||||
[target.'cfg(not(windows))'.dependencies]
|
||||
nix = { workspace = true }
|
||||
|
||||
@@ -20,8 +20,8 @@
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use http::HeaderMap;
|
||||
use std::io::Cursor;
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
use s3s::dto::ETag;
|
||||
use std::{collections::HashMap, io::Cursor, sync::Arc};
|
||||
use tokio::io::BufReader;
|
||||
|
||||
use crate::error::ErrorResponse;
|
||||
@@ -148,27 +148,30 @@ pub fn new_getobjectreader(
|
||||
Ok((get_fn, off as i64, length as i64))
|
||||
}
|
||||
|
||||
/// Format an ETag value according to HTTP standards (wrap with quotes if not already wrapped)
|
||||
pub fn format_etag(etag: &str) -> String {
|
||||
if etag.starts_with('"') && etag.ends_with('"') {
|
||||
// Already properly formatted
|
||||
etag.to_string()
|
||||
} else if etag.starts_with("W/\"") && etag.ends_with('"') {
|
||||
// Already a weak ETag, properly formatted
|
||||
etag.to_string()
|
||||
} else {
|
||||
// Need to wrap with quotes
|
||||
format!("\"{}\"", etag)
|
||||
/// Convert a raw stored ETag into the strongly-typed `s3s::dto::ETag`.
|
||||
///
|
||||
/// Supports already quoted (`"abc"`), weak (`W/"abc"`), or plain (`abc`) values.
|
||||
pub fn to_s3s_etag(etag: &str) -> ETag {
|
||||
if let Some(rest) = etag.strip_prefix("W/\"") {
|
||||
if let Some(body) = rest.strip_suffix('"') {
|
||||
return ETag::Weak(body.to_string());
|
||||
}
|
||||
return ETag::Weak(rest.to_string());
|
||||
}
|
||||
|
||||
if let Some(body) = etag.strip_prefix('"').and_then(|rest| rest.strip_suffix('"')) {
|
||||
return ETag::Strong(body.to_string());
|
||||
}
|
||||
|
||||
ETag::Strong(etag.to_string())
|
||||
}
|
||||
|
||||
pub fn extract_etag(metadata: &HashMap<String, String>) -> String {
|
||||
let etag = if let Some(etag) = metadata.get("etag") {
|
||||
etag.clone()
|
||||
} else {
|
||||
metadata["md5Sum"].clone()
|
||||
};
|
||||
format_etag(&etag)
|
||||
pub fn get_raw_etag(metadata: &HashMap<String, String>) -> String {
|
||||
metadata
|
||||
.get("etag")
|
||||
.cloned()
|
||||
.or_else(|| metadata.get("md5Sum").cloned())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -176,30 +179,28 @@ mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_format_etag() {
|
||||
// Test unquoted ETag - should add quotes
|
||||
assert_eq!(format_etag("6af8d12c0c74b78094884349f3c8a079"), "\"6af8d12c0c74b78094884349f3c8a079\"");
|
||||
|
||||
// Test already quoted ETag - should not double quote
|
||||
fn test_to_s3s_etag() {
|
||||
// Test unquoted ETag - should become strong etag
|
||||
assert_eq!(
|
||||
format_etag("\"6af8d12c0c74b78094884349f3c8a079\""),
|
||||
"\"6af8d12c0c74b78094884349f3c8a079\""
|
||||
to_s3s_etag("6af8d12c0c74b78094884349f3c8a079"),
|
||||
ETag::Strong("6af8d12c0c74b78094884349f3c8a079".to_string())
|
||||
);
|
||||
|
||||
// Test weak ETag - should keep as is
|
||||
assert_eq!(
|
||||
format_etag("W/\"6af8d12c0c74b78094884349f3c8a079\""),
|
||||
"W/\"6af8d12c0c74b78094884349f3c8a079\""
|
||||
to_s3s_etag("\"6af8d12c0c74b78094884349f3c8a079\""),
|
||||
ETag::Strong("6af8d12c0c74b78094884349f3c8a079".to_string())
|
||||
);
|
||||
|
||||
// Test empty ETag - should add quotes
|
||||
assert_eq!(format_etag(""), "\"\"");
|
||||
assert_eq!(
|
||||
to_s3s_etag("W/\"6af8d12c0c74b78094884349f3c8a079\""),
|
||||
ETag::Weak("6af8d12c0c74b78094884349f3c8a079".to_string())
|
||||
);
|
||||
|
||||
// Test malformed quote (only starting quote) - should wrap properly
|
||||
assert_eq!(format_etag("\"incomplete"), "\"\"incomplete\"");
|
||||
assert_eq!(to_s3s_etag(""), ETag::Strong(String::new()));
|
||||
|
||||
// Test malformed quote (only ending quote) - should wrap properly
|
||||
assert_eq!(format_etag("incomplete\""), "\"incomplete\"\"");
|
||||
assert_eq!(to_s3s_etag("\"incomplete"), ETag::Strong("\"incomplete".to_string()));
|
||||
|
||||
assert_eq!(to_s3s_etag("incomplete\""), ETag::Strong("incomplete\"".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -208,15 +209,17 @@ mod tests {
|
||||
|
||||
// Test with etag field
|
||||
metadata.insert("etag".to_string(), "abc123".to_string());
|
||||
assert_eq!(extract_etag(&metadata), "\"abc123\"");
|
||||
assert_eq!(get_raw_etag(&metadata), "abc123");
|
||||
|
||||
// Test with already quoted etag field
|
||||
metadata.insert("etag".to_string(), "\"def456\"".to_string());
|
||||
assert_eq!(extract_etag(&metadata), "\"def456\"");
|
||||
assert_eq!(get_raw_etag(&metadata), "\"def456\"");
|
||||
|
||||
// Test fallback to md5Sum
|
||||
metadata.remove("etag");
|
||||
metadata.insert("md5Sum".to_string(), "xyz789".to_string());
|
||||
assert_eq!(extract_etag(&metadata), "\"xyz789\"");
|
||||
assert_eq!(get_raw_etag(&metadata), "xyz789");
|
||||
|
||||
metadata.clear();
|
||||
assert_eq!(get_raw_etag(&metadata), "");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -167,19 +167,8 @@ async fn write_data_blocks<W>(
|
||||
where
|
||||
W: tokio::io::AsyncWrite + Send + Sync + Unpin,
|
||||
{
|
||||
let available = get_data_block_len(en_blocks, data_blocks);
|
||||
if available < length {
|
||||
let block_sizes: Vec<usize> = en_blocks
|
||||
.iter()
|
||||
.take(data_blocks)
|
||||
.map(|block| block.as_ref().map(|buf| buf.len()).unwrap_or(0))
|
||||
.collect();
|
||||
error!(
|
||||
expected = length,
|
||||
available,
|
||||
?block_sizes,
|
||||
"write_data_blocks get_data_block_len < length"
|
||||
);
|
||||
if get_data_block_len(en_blocks, data_blocks) < length {
|
||||
error!("write_data_blocks get_data_block_len < length");
|
||||
return Err(io::Error::new(ErrorKind::UnexpectedEof, "Not enough data blocks to write"));
|
||||
}
|
||||
|
||||
|
||||
@@ -33,7 +33,6 @@ pub mod file_cache;
|
||||
pub mod global;
|
||||
pub mod metrics_realtime;
|
||||
pub mod notification_sys;
|
||||
pub mod object_append;
|
||||
pub mod pools;
|
||||
pub mod rebalance;
|
||||
pub mod rpc;
|
||||
|
||||
@@ -1,725 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::bitrot::{create_bitrot_reader, create_bitrot_writer};
|
||||
use crate::erasure_coding::{Erasure, calc_shard_size};
|
||||
use crate::error::{Error, StorageError};
|
||||
use crate::store_api::ObjectInfo;
|
||||
use rustfs_filemeta::TRANSITION_COMPLETE;
|
||||
use rustfs_utils::HashAlgorithm;
|
||||
use rustfs_utils::http::headers::{
|
||||
AMZ_SERVER_SIDE_ENCRYPTION, AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM, AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY,
|
||||
AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5, AMZ_SERVER_SIDE_ENCRYPTION_KMS_CONTEXT, AMZ_SERVER_SIDE_ENCRYPTION_KMS_ID,
|
||||
RESERVED_METADATA_PREFIX_LOWER,
|
||||
};
|
||||
use std::collections::HashSet;
|
||||
|
||||
/// Ensure the target object can accept append writes under current state.
|
||||
pub fn validate_append_preconditions(bucket: &str, object: &str, info: &ObjectInfo) -> Result<(), Error> {
|
||||
if info.is_compressed() {
|
||||
return Err(StorageError::InvalidArgument(
|
||||
bucket.to_string(),
|
||||
object.to_string(),
|
||||
"append is not supported for compressed objects".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
let encryption_headers = [
|
||||
AMZ_SERVER_SIDE_ENCRYPTION,
|
||||
AMZ_SERVER_SIDE_ENCRYPTION_KMS_ID,
|
||||
AMZ_SERVER_SIDE_ENCRYPTION_KMS_CONTEXT,
|
||||
AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_ALGORITHM,
|
||||
AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY,
|
||||
AMZ_SERVER_SIDE_ENCRYPTION_CUSTOMER_KEY_MD5,
|
||||
];
|
||||
|
||||
if encryption_headers
|
||||
.iter()
|
||||
.any(|header| info.user_defined.contains_key(*header) || info.user_defined.contains_key(&header.to_ascii_lowercase()))
|
||||
{
|
||||
return Err(StorageError::InvalidArgument(
|
||||
bucket.to_string(),
|
||||
object.to_string(),
|
||||
"append is not supported for encrypted objects".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if info.transitioned_object.status == TRANSITION_COMPLETE || !info.transitioned_object.tier.is_empty() {
|
||||
return Err(StorageError::InvalidArgument(
|
||||
bucket.to_string(),
|
||||
object.to_string(),
|
||||
"append is not supported for transitioned objects".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Validate that the requested append position matches the current object length.
|
||||
pub fn validate_append_position(bucket: &str, object: &str, info: &ObjectInfo, expected_position: i64) -> Result<(), Error> {
|
||||
if expected_position != info.size {
|
||||
return Err(StorageError::InvalidArgument(
|
||||
bucket.to_string(),
|
||||
object.to_string(),
|
||||
format!("append position mismatch: provided {}, expected {}", expected_position, info.size),
|
||||
));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub struct InlineAppendContext<'a> {
|
||||
pub existing_inline: Option<&'a [u8]>,
|
||||
pub existing_plain: Option<&'a [u8]>,
|
||||
pub existing_size: i64,
|
||||
pub append_payload: &'a [u8],
|
||||
pub erasure: &'a Erasure,
|
||||
pub hash_algorithm: HashAlgorithm,
|
||||
pub has_checksums: bool,
|
||||
}
|
||||
|
||||
pub struct InlineAppendResult {
|
||||
pub inline_data: Vec<u8>,
|
||||
pub total_size: i64,
|
||||
pub etag: String,
|
||||
}
|
||||
|
||||
/// Decode inline payload using available checksum algorithms. Returns raw bytes when decoding fails but
|
||||
/// the inline buffer already contains the plain payload.
|
||||
pub async fn decode_inline_payload(
|
||||
inline: &[u8],
|
||||
size: usize,
|
||||
erasure: &Erasure,
|
||||
preferred: HashAlgorithm,
|
||||
) -> Result<(Vec<u8>, HashAlgorithm), Error> {
|
||||
match decode_inline_variants(inline, size, erasure, preferred).await {
|
||||
Ok((data, algo)) => Ok((data, algo)),
|
||||
Err(err) => {
|
||||
if inline.len() >= size {
|
||||
Ok((inline[..size].to_vec(), HashAlgorithm::None))
|
||||
} else {
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Append data to an inline object and return the re-encoded inline buffer.
|
||||
pub async fn append_inline_data(ctx: InlineAppendContext<'_>) -> Result<InlineAppendResult, Error> {
|
||||
let mut plain = Vec::with_capacity(ctx.existing_inline.map(|data| data.len()).unwrap_or(0) + ctx.append_payload.len());
|
||||
let mut encode_algorithm = ctx.hash_algorithm.clone();
|
||||
|
||||
if let Some(existing_plain) = ctx.existing_plain {
|
||||
if existing_plain.len() != ctx.existing_size as usize {
|
||||
return Err(StorageError::other("existing plain payload length mismatch"));
|
||||
}
|
||||
plain.extend_from_slice(existing_plain);
|
||||
} else if ctx.existing_size > 0 {
|
||||
let inline = ctx
|
||||
.existing_inline
|
||||
.ok_or_else(|| StorageError::other("inline payload missing"))?;
|
||||
|
||||
let (decoded, detected_algo) =
|
||||
decode_inline_payload(inline, ctx.existing_size as usize, ctx.erasure, ctx.hash_algorithm.clone()).await?;
|
||||
encode_algorithm = detected_algo;
|
||||
plain.extend_from_slice(&decoded);
|
||||
} else if let Some(inline) = ctx.existing_inline {
|
||||
plain.extend_from_slice(inline);
|
||||
}
|
||||
|
||||
plain.extend_from_slice(ctx.append_payload);
|
||||
let total_size = plain.len() as i64;
|
||||
let etag = md5_hex(&plain);
|
||||
|
||||
if encode_algorithm == HashAlgorithm::None {
|
||||
if ctx.has_checksums {
|
||||
encode_algorithm = ctx.hash_algorithm.clone();
|
||||
} else {
|
||||
return Ok(InlineAppendResult {
|
||||
inline_data: plain,
|
||||
total_size,
|
||||
etag,
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
let mut writer = create_bitrot_writer(
|
||||
true,
|
||||
None,
|
||||
"",
|
||||
"",
|
||||
ctx.erasure.shard_file_size(total_size),
|
||||
ctx.erasure.shard_size(),
|
||||
encode_algorithm,
|
||||
)
|
||||
.await
|
||||
.map_err(|e| StorageError::other(format!("failed to create inline writer: {e}")))?;
|
||||
|
||||
let mut remaining = plain.as_slice();
|
||||
while !remaining.is_empty() {
|
||||
let chunk_len = remaining.len().min(ctx.erasure.block_size);
|
||||
writer
|
||||
.write(&remaining[..chunk_len])
|
||||
.await
|
||||
.map_err(|e| StorageError::other(format!("failed to write inline data: {e}")))?;
|
||||
remaining = &remaining[chunk_len..];
|
||||
}
|
||||
|
||||
writer
|
||||
.shutdown()
|
||||
.await
|
||||
.map_err(|e| StorageError::other(format!("failed to finalize inline writer: {e}")))?;
|
||||
|
||||
let inline_data = writer
|
||||
.into_inline_data()
|
||||
.ok_or_else(|| StorageError::other("inline writer did not return data"))?;
|
||||
|
||||
Ok(InlineAppendResult {
|
||||
inline_data,
|
||||
total_size,
|
||||
etag,
|
||||
})
|
||||
}
|
||||
|
||||
fn md5_hex(data: &[u8]) -> String {
|
||||
let digest = HashAlgorithm::Md5.hash_encode(data);
|
||||
hex_from_bytes(digest.as_ref())
|
||||
}
|
||||
|
||||
fn hex_from_bytes(bytes: &[u8]) -> String {
|
||||
let mut out = String::with_capacity(bytes.len() * 2);
|
||||
for byte in bytes {
|
||||
use std::fmt::Write;
|
||||
write!(&mut out, "{:02x}", byte).expect("write hex");
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
async fn decode_inline_variants(
|
||||
inline: &[u8],
|
||||
size: usize,
|
||||
erasure: &Erasure,
|
||||
preferred: HashAlgorithm,
|
||||
) -> Result<(Vec<u8>, HashAlgorithm), Error> {
|
||||
let mut tried = HashSet::new();
|
||||
let candidates = [preferred, HashAlgorithm::HighwayHash256, HashAlgorithm::HighwayHash256S];
|
||||
|
||||
let mut last_err: Option<Error> = None;
|
||||
|
||||
for algo in candidates {
|
||||
if !tried.insert(algo.clone()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
match decode_inline_with_algo(inline, size, erasure, algo.clone()).await {
|
||||
Ok(data) => return Ok((data, algo)),
|
||||
Err(err) => last_err = Some(err),
|
||||
}
|
||||
}
|
||||
|
||||
Err(last_err.unwrap_or_else(|| StorageError::other("failed to decode inline data")))
|
||||
}
|
||||
|
||||
async fn decode_inline_with_algo(inline: &[u8], size: usize, erasure: &Erasure, algo: HashAlgorithm) -> Result<Vec<u8>, Error> {
|
||||
let total_len = inline
|
||||
.len()
|
||||
.max(erasure.shard_file_size(size as i64).max(size as i64) as usize);
|
||||
let mut reader = create_bitrot_reader(Some(inline), None, "", "", 0, total_len, erasure.shard_size(), algo)
|
||||
.await
|
||||
.map_err(|e| StorageError::other(format!("failed to create inline reader: {e}")))?
|
||||
.ok_or_else(|| StorageError::other("inline reader unavailable"))?;
|
||||
|
||||
let mut out = Vec::with_capacity(size);
|
||||
while out.len() < size {
|
||||
let remaining = size - out.len();
|
||||
let plain_chunk = remaining.min(erasure.block_size);
|
||||
let shard_payload = calc_shard_size(plain_chunk, erasure.data_shards).max(1);
|
||||
let mut buf = vec![0u8; shard_payload];
|
||||
let read = reader
|
||||
.read(&mut buf)
|
||||
.await
|
||||
.map_err(|e| StorageError::other(format!("failed to read inline data: {e}")))?;
|
||||
if read == 0 {
|
||||
return Err(StorageError::other("incomplete inline data read"));
|
||||
}
|
||||
|
||||
let copy_len = remaining.min(read);
|
||||
out.extend_from_slice(&buf[..copy_len]);
|
||||
}
|
||||
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
/// Background task to spill inline data to segmented format
|
||||
pub struct InlineSpillProcessor {
|
||||
pub disks: Vec<Option<crate::disk::DiskStore>>,
|
||||
pub write_quorum: usize,
|
||||
}
|
||||
|
||||
impl InlineSpillProcessor {
|
||||
pub fn new(disks: Vec<Option<crate::disk::DiskStore>>, write_quorum: usize) -> Self {
|
||||
Self { disks, write_quorum }
|
||||
}
|
||||
|
||||
/// Process a single spill operation from InlinePendingSpill to SegmentedActive
|
||||
pub async fn process_spill(
|
||||
&self,
|
||||
bucket: &str,
|
||||
object: &str,
|
||||
mut fi: rustfs_filemeta::FileInfo,
|
||||
mut parts_metadata: Vec<rustfs_filemeta::FileInfo>,
|
||||
epoch: u64,
|
||||
) -> Result<(), Error> {
|
||||
use rustfs_filemeta::AppendStateKind;
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
// Verify we're in the correct state
|
||||
let current_state = fi.get_append_state();
|
||||
if current_state.state != AppendStateKind::InlinePendingSpill {
|
||||
warn!(
|
||||
bucket = bucket,
|
||||
object = object,
|
||||
current_state = ?current_state.state,
|
||||
"Spill processor called on object not in InlinePendingSpill state"
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Check epoch to ensure we're processing the correct version
|
||||
if current_state.epoch != epoch {
|
||||
debug!(
|
||||
bucket = bucket,
|
||||
object = object,
|
||||
current_epoch = current_state.epoch,
|
||||
expected_epoch = epoch,
|
||||
"Spill operation skipped due to epoch mismatch"
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
info!(
|
||||
bucket = bucket,
|
||||
object = object,
|
||||
size = fi.size,
|
||||
epoch = epoch,
|
||||
"Starting inline data spill to segmented format"
|
||||
);
|
||||
|
||||
// Extract inline data
|
||||
let inline_data = fi
|
||||
.data
|
||||
.clone()
|
||||
.ok_or_else(|| StorageError::other("Cannot spill object without inline data"))?;
|
||||
|
||||
// Create erasure encoder
|
||||
let erasure = Erasure::new(fi.erasure.data_blocks, fi.erasure.parity_blocks, fi.erasure.block_size);
|
||||
|
||||
// Decode inline data to plain data
|
||||
let hash_algorithm = fi
|
||||
.parts
|
||||
.first()
|
||||
.map(|part| fi.erasure.get_checksum_info(part.number).algorithm)
|
||||
.unwrap_or(HashAlgorithm::HighwayHash256);
|
||||
|
||||
let plain_data = match decode_inline_payload(&inline_data, fi.size as usize, &erasure, hash_algorithm.clone()).await {
|
||||
Ok((plain, _detected_algo)) => plain,
|
||||
Err(err) => {
|
||||
error!(
|
||||
bucket = bucket,
|
||||
object = object,
|
||||
error = ?err,
|
||||
"Failed to decode inline data during spill"
|
||||
);
|
||||
return Err(StorageError::other(format!("Failed to decode inline data for spill: {err}")));
|
||||
}
|
||||
};
|
||||
|
||||
// Generate data directory for the object
|
||||
let data_dir = uuid::Uuid::new_v4();
|
||||
|
||||
// Create temporary directory for the spill operation
|
||||
let tmp_root = format!("{}x{}", uuid::Uuid::new_v4(), time::OffsetDateTime::now_utc().unix_timestamp());
|
||||
let tmp_path = format!("{tmp_root}/{}/part.1", data_dir);
|
||||
|
||||
// Encode and write the data to all disks
|
||||
match self.write_segmented_data(&plain_data, &tmp_path, &erasure).await {
|
||||
Ok(_) => {
|
||||
// Move from temp to permanent location
|
||||
let final_path = format!("{}/part.1", data_dir);
|
||||
if let Err(err) = self.move_temp_to_final(&tmp_path, &final_path).await {
|
||||
error!(
|
||||
bucket = bucket,
|
||||
object = object,
|
||||
error = ?err,
|
||||
"Failed to move spilled data to final location"
|
||||
);
|
||||
// Clean up temp files
|
||||
let _ = self.cleanup_temp_files(&tmp_path).await;
|
||||
return Err(err);
|
||||
}
|
||||
|
||||
// Update file metadata
|
||||
fi.data_dir = Some(data_dir);
|
||||
fi.data = None; // Remove inline data
|
||||
fi.metadata.remove(&format!("{}inline-data", RESERVED_METADATA_PREFIX_LOWER));
|
||||
|
||||
// Update append state to SegmentedActive
|
||||
let mut new_state = current_state;
|
||||
new_state.state = AppendStateKind::SegmentedActive;
|
||||
new_state.epoch = new_state.epoch.saturating_add(1);
|
||||
new_state.pending_segments.clear();
|
||||
|
||||
fi.set_append_state(&new_state)
|
||||
.map_err(|err| StorageError::other(format!("Failed to update append state after spill: {err}")))?;
|
||||
|
||||
// Update all parts metadata
|
||||
for meta in parts_metadata.iter_mut() {
|
||||
if !meta.is_valid() {
|
||||
continue;
|
||||
}
|
||||
meta.data_dir = Some(data_dir);
|
||||
meta.data = None;
|
||||
meta.metadata = fi.metadata.clone();
|
||||
meta.metadata
|
||||
.remove(&format!("{}inline-data", RESERVED_METADATA_PREFIX_LOWER));
|
||||
}
|
||||
|
||||
// Write updated metadata back to disks
|
||||
// TODO: Implement metadata write-back logic
|
||||
// This would typically involve writing the updated FileInfo to all disks
|
||||
|
||||
info!(
|
||||
bucket = bucket,
|
||||
object = object,
|
||||
data_dir = ?data_dir,
|
||||
new_epoch = new_state.epoch,
|
||||
"Successfully spilled inline data to segmented format"
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
Err(err) => {
|
||||
error!(
|
||||
bucket = bucket,
|
||||
object = object,
|
||||
error = ?err,
|
||||
"Failed to write segmented data during spill"
|
||||
);
|
||||
// Clean up temp files
|
||||
let _ = self.cleanup_temp_files(&tmp_path).await;
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn write_segmented_data(&self, data: &[u8], tmp_path: &str, _erasure: &Erasure) -> Result<(), Error> {
|
||||
use tracing::debug;
|
||||
|
||||
// TODO: Implement proper erasure encoding and writing to disks
|
||||
// This is a placeholder implementation
|
||||
debug!(
|
||||
data_len = data.len(),
|
||||
path = tmp_path,
|
||||
"Writing segmented data (placeholder implementation)"
|
||||
);
|
||||
|
||||
// For now, just return success - full implementation would:
|
||||
// 1. Create bitrot writers for each disk
|
||||
// 2. Erasure encode the data
|
||||
// 3. Write each shard to its corresponding disk
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn move_temp_to_final(&self, tmp_path: &str, final_path: &str) -> Result<(), Error> {
|
||||
use tracing::debug;
|
||||
|
||||
// TODO: Implement moving temp files to final location
|
||||
debug!(
|
||||
tmp_path = tmp_path,
|
||||
final_path = final_path,
|
||||
"Moving temp files to final location (placeholder)"
|
||||
);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn cleanup_temp_files(&self, tmp_path: &str) -> Result<(), Error> {
|
||||
use tracing::debug;
|
||||
|
||||
// TODO: Implement temp file cleanup
|
||||
debug!(tmp_path = tmp_path, "Cleaning up temp files (placeholder)");
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Trigger background spill processing for an object
|
||||
pub fn trigger_spill_process(
|
||||
bucket: String,
|
||||
object: String,
|
||||
fi: rustfs_filemeta::FileInfo,
|
||||
parts_metadata: Vec<rustfs_filemeta::FileInfo>,
|
||||
epoch: u64,
|
||||
disks: Vec<Option<crate::disk::DiskStore>>,
|
||||
write_quorum: usize,
|
||||
) {
|
||||
use tracing::error;
|
||||
|
||||
tokio::spawn(async move {
|
||||
let processor = InlineSpillProcessor::new(disks, write_quorum);
|
||||
if let Err(err) = processor.process_spill(&bucket, &object, fi, parts_metadata, epoch).await {
|
||||
error!(
|
||||
bucket = bucket,
|
||||
object = object,
|
||||
epoch = epoch,
|
||||
error = ?err,
|
||||
"Background spill process failed"
|
||||
);
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use rustfs_utils::HashAlgorithm;
|
||||
|
||||
fn make_object_info() -> ObjectInfo {
|
||||
ObjectInfo {
|
||||
bucket: "test-bucket".to_string(),
|
||||
name: "obj".to_string(),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rejects_compressed_objects() {
|
||||
let mut info = make_object_info();
|
||||
info.user_defined
|
||||
.insert(format!("{RESERVED_METADATA_PREFIX_LOWER}compression"), "zstd".to_string());
|
||||
|
||||
let err = validate_append_preconditions("test-bucket", "obj", &info).unwrap_err();
|
||||
matches!(err, StorageError::InvalidArgument(..))
|
||||
.then_some(())
|
||||
.expect("expected invalid argument");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rejects_encrypted_objects() {
|
||||
let mut info = make_object_info();
|
||||
info.user_defined
|
||||
.insert("x-amz-server-side-encryption".to_string(), "AES256".to_string());
|
||||
|
||||
let err = validate_append_preconditions("test-bucket", "obj", &info).unwrap_err();
|
||||
matches!(err, StorageError::InvalidArgument(..))
|
||||
.then_some(())
|
||||
.expect("expected invalid argument");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rejects_transitioned_objects() {
|
||||
let mut info = make_object_info();
|
||||
info.transitioned_object.tier = "GLACIER".to_string();
|
||||
info.transitioned_object.status = TRANSITION_COMPLETE.to_string();
|
||||
|
||||
let err = validate_append_preconditions("test-bucket", "obj", &info).unwrap_err();
|
||||
matches!(err, StorageError::InvalidArgument(..))
|
||||
.then_some(())
|
||||
.expect("expected invalid argument");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn accepts_plain_objects() {
|
||||
let info = make_object_info();
|
||||
validate_append_preconditions("test-bucket", "obj", &info).expect("append should be allowed");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn rejects_position_mismatch() {
|
||||
let mut info = make_object_info();
|
||||
info.size = 10;
|
||||
let err = validate_append_position("test-bucket", "obj", &info, 5).unwrap_err();
|
||||
matches!(err, StorageError::InvalidArgument(..))
|
||||
.then_some(())
|
||||
.expect("expected invalid argument");
|
||||
}
|
||||
|
||||
fn make_inline_erasure() -> Erasure {
|
||||
Erasure::new(1, 0, 1024)
|
||||
}
|
||||
|
||||
async fn encode_inline(data: &[u8], erasure: &Erasure) -> Vec<u8> {
|
||||
let mut writer = create_bitrot_writer(
|
||||
true,
|
||||
None,
|
||||
"",
|
||||
"",
|
||||
erasure.shard_file_size(data.len() as i64),
|
||||
erasure.shard_size(),
|
||||
HashAlgorithm::HighwayHash256,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
let mut remaining = data;
|
||||
while !remaining.is_empty() {
|
||||
let chunk_len = remaining.len().min(erasure.block_size);
|
||||
writer.write(&remaining[..chunk_len]).await.unwrap();
|
||||
remaining = &remaining[chunk_len..];
|
||||
}
|
||||
|
||||
writer.shutdown().await.unwrap();
|
||||
writer.into_inline_data().unwrap()
|
||||
}
|
||||
|
||||
async fn decode_inline(encoded: &[u8], size: usize, erasure: &Erasure) -> Vec<u8> {
|
||||
let mut reader =
|
||||
create_bitrot_reader(Some(encoded), None, "", "", 0, size, erasure.shard_size(), HashAlgorithm::HighwayHash256)
|
||||
.await
|
||||
.unwrap()
|
||||
.unwrap();
|
||||
|
||||
let mut out = Vec::with_capacity(size);
|
||||
while out.len() < size {
|
||||
let remaining = size - out.len();
|
||||
let mut buf = vec![0u8; erasure.block_size.min(remaining.max(1))];
|
||||
let read = reader.read(&mut buf).await.unwrap();
|
||||
if read == 0 {
|
||||
break;
|
||||
}
|
||||
out.extend_from_slice(&buf[..read.min(remaining)]);
|
||||
}
|
||||
out
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn append_inline_combines_payloads() {
|
||||
let erasure = make_inline_erasure();
|
||||
let existing_plain = b"hello";
|
||||
let encoded = encode_inline(existing_plain, &erasure).await;
|
||||
|
||||
let ctx = InlineAppendContext {
|
||||
existing_inline: Some(&encoded),
|
||||
existing_plain: None,
|
||||
existing_size: existing_plain.len() as i64,
|
||||
append_payload: b" world",
|
||||
erasure: &erasure,
|
||||
hash_algorithm: HashAlgorithm::HighwayHash256,
|
||||
has_checksums: true,
|
||||
};
|
||||
|
||||
let result = append_inline_data(ctx).await.expect("inline append to succeed");
|
||||
assert_eq!(result.total_size, 11);
|
||||
assert_eq!(result.etag, md5_hex(b"hello world"));
|
||||
|
||||
let decoded = decode_inline(&result.inline_data, result.total_size as usize, &erasure).await;
|
||||
assert_eq!(decoded, b"hello world");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn decode_inline_handles_padded_shards() {
|
||||
let erasure = Erasure::new(1, 0, 1024);
|
||||
let plain = b"hello";
|
||||
|
||||
let mut padded = vec![0u8; calc_shard_size(plain.len(), erasure.data_shards)];
|
||||
padded[..plain.len()].copy_from_slice(plain);
|
||||
|
||||
let mut writer = create_bitrot_writer(
|
||||
true,
|
||||
None,
|
||||
"",
|
||||
"",
|
||||
erasure.shard_file_size(plain.len() as i64),
|
||||
erasure.shard_size(),
|
||||
HashAlgorithm::HighwayHash256,
|
||||
)
|
||||
.await
|
||||
.unwrap();
|
||||
|
||||
writer.write(&padded).await.unwrap();
|
||||
writer.shutdown().await.unwrap();
|
||||
let inline = writer.into_inline_data().unwrap();
|
||||
|
||||
let (decoded, algo) = decode_inline_payload(&inline, plain.len(), &erasure, HashAlgorithm::HighwayHash256)
|
||||
.await
|
||||
.expect("inline decode should succeed");
|
||||
|
||||
assert_eq!(decoded, plain);
|
||||
assert_eq!(algo, HashAlgorithm::HighwayHash256);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn append_inline_handles_empty_original() {
|
||||
let erasure = make_inline_erasure();
|
||||
let ctx = InlineAppendContext {
|
||||
existing_inline: None,
|
||||
existing_plain: None,
|
||||
existing_size: 0,
|
||||
append_payload: b"data",
|
||||
erasure: &erasure,
|
||||
hash_algorithm: HashAlgorithm::HighwayHash256,
|
||||
has_checksums: true,
|
||||
};
|
||||
|
||||
let result = append_inline_data(ctx).await.expect("inline append to succeed");
|
||||
assert_eq!(result.total_size, 4);
|
||||
assert_eq!(result.etag, md5_hex(b"data"));
|
||||
|
||||
let decoded = decode_inline(&result.inline_data, result.total_size as usize, &erasure).await;
|
||||
assert_eq!(decoded, b"data");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn append_inline_without_checksums_uses_raw_bytes() {
|
||||
let erasure = Erasure::new(1, 0, 1024);
|
||||
let existing = b"hello";
|
||||
|
||||
let ctx = InlineAppendContext {
|
||||
existing_inline: Some(existing),
|
||||
existing_plain: None,
|
||||
existing_size: existing.len() as i64,
|
||||
append_payload: b" world",
|
||||
erasure: &erasure,
|
||||
hash_algorithm: HashAlgorithm::HighwayHash256,
|
||||
has_checksums: false,
|
||||
};
|
||||
|
||||
let result = append_inline_data(ctx).await.expect("inline append to succeed");
|
||||
assert_eq!(result.total_size, 11);
|
||||
assert_eq!(result.etag, md5_hex(b"hello world"));
|
||||
|
||||
assert_eq!(result.inline_data, b"hello world");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn append_inline_decodes_bitrot_without_checksums() {
|
||||
let erasure = Erasure::new(1, 0, 1024);
|
||||
let existing_plain = b"hello";
|
||||
let encoded = encode_inline(existing_plain, &erasure).await;
|
||||
|
||||
let ctx = InlineAppendContext {
|
||||
existing_inline: Some(&encoded),
|
||||
existing_plain: None,
|
||||
existing_size: existing_plain.len() as i64,
|
||||
append_payload: b" world",
|
||||
erasure: &erasure,
|
||||
hash_algorithm: HashAlgorithm::HighwayHash256,
|
||||
has_checksums: false,
|
||||
};
|
||||
|
||||
let result = append_inline_data(ctx).await.expect("inline append to succeed");
|
||||
assert_eq!(result.total_size, 11);
|
||||
assert_eq!(result.etag, md5_hex(b"hello world"));
|
||||
|
||||
let decoded = decode_inline(&result.inline_data, result.total_size as usize, &erasure).await;
|
||||
assert_eq!(decoded, b"hello world");
|
||||
}
|
||||
}
|
||||
File diff suppressed because it is too large
Load Diff
@@ -602,14 +602,6 @@ impl StorageAPI for Sets {
|
||||
(del_objects, del_errs)
|
||||
}
|
||||
|
||||
async fn complete_append(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo> {
|
||||
self.get_disks_by_key(object).complete_append(bucket, object, opts).await
|
||||
}
|
||||
|
||||
async fn abort_append(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo> {
|
||||
self.get_disks_by_key(object).abort_append(bucket, object, opts).await
|
||||
}
|
||||
|
||||
async fn list_object_parts(
|
||||
&self,
|
||||
bucket: &str,
|
||||
|
||||
@@ -1709,17 +1709,6 @@ impl StorageAPI for ECStore {
|
||||
// Ok((del_objects, del_errs))
|
||||
}
|
||||
|
||||
async fn complete_append(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo> {
|
||||
let object = encode_dir_object(object);
|
||||
let (pinfo, _) = self.internal_get_pool_info_existing_with_opts(bucket, &object, opts).await?;
|
||||
self.pools[pinfo.index].complete_append(bucket, &object, opts).await
|
||||
}
|
||||
|
||||
async fn abort_append(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo> {
|
||||
let object = encode_dir_object(object);
|
||||
let (pinfo, _) = self.internal_get_pool_info_existing_with_opts(bucket, &object, opts).await?;
|
||||
self.pools[pinfo.index].abort_append(bucket, &object, opts).await
|
||||
}
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn list_object_parts(
|
||||
&self,
|
||||
|
||||
@@ -247,11 +247,16 @@ impl HTTPRangeSpec {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut start = 0i64;
|
||||
let mut end = -1i64;
|
||||
for i in 0..oi.parts.len().min(part_number) {
|
||||
if part_number == 0 || part_number > oi.parts.len() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let mut start = 0_i64;
|
||||
let mut end = -1_i64;
|
||||
for i in 0..part_number {
|
||||
let part = &oi.parts[i];
|
||||
start = end + 1;
|
||||
end = start + (oi.parts[i].size as i64) - 1
|
||||
end = start + (part.size as i64) - 1;
|
||||
}
|
||||
|
||||
Some(HTTPRangeSpec {
|
||||
@@ -266,8 +271,14 @@ impl HTTPRangeSpec {
|
||||
|
||||
let mut start = self.start;
|
||||
if self.is_suffix_length {
|
||||
start = res_size + self.start;
|
||||
|
||||
let suffix_len = if self.start < 0 {
|
||||
self.start
|
||||
.checked_neg()
|
||||
.ok_or_else(|| Error::other("range value invalid: suffix length overflow"))?
|
||||
} else {
|
||||
self.start
|
||||
};
|
||||
start = res_size - suffix_len;
|
||||
if start < 0 {
|
||||
start = 0;
|
||||
}
|
||||
@@ -280,7 +291,13 @@ impl HTTPRangeSpec {
|
||||
}
|
||||
|
||||
if self.is_suffix_length {
|
||||
let specified_len = self.start; // 假设 h.start 是一个 i64 类型
|
||||
let specified_len = if self.start < 0 {
|
||||
self.start
|
||||
.checked_neg()
|
||||
.ok_or_else(|| Error::other("range value invalid: suffix length overflow"))?
|
||||
} else {
|
||||
self.start
|
||||
};
|
||||
let mut range_length = specified_len;
|
||||
|
||||
if specified_len > res_size {
|
||||
@@ -328,8 +345,6 @@ pub struct ObjectOptions {
|
||||
pub max_parity: bool,
|
||||
pub mod_time: Option<OffsetDateTime>,
|
||||
pub part_number: Option<usize>,
|
||||
pub append_object: bool,
|
||||
pub append_position: Option<i64>,
|
||||
|
||||
pub delete_prefix: bool,
|
||||
pub delete_prefix_object: bool,
|
||||
@@ -462,7 +477,7 @@ impl From<s3s::dto::CompletedPart> for CompletePart {
|
||||
fn from(value: s3s::dto::CompletedPart) -> Self {
|
||||
Self {
|
||||
part_num: value.part_number.unwrap_or_default() as usize,
|
||||
etag: value.e_tag,
|
||||
etag: value.e_tag.map(|e| e.value().to_owned()),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -658,15 +673,6 @@ impl ObjectInfo {
|
||||
})
|
||||
.collect();
|
||||
|
||||
let append_state = fi.get_append_state();
|
||||
let pending_length: i64 = append_state.pending_segments.iter().map(|seg| seg.length).sum();
|
||||
let logical_size = append_state.committed_length.saturating_add(pending_length);
|
||||
let actual_size_meta = fi
|
||||
.metadata
|
||||
.get(&format!("{RESERVED_METADATA_PREFIX_LOWER}actual-size"))
|
||||
.and_then(|o| o.parse::<i64>().ok())
|
||||
.unwrap_or(logical_size);
|
||||
|
||||
ObjectInfo {
|
||||
bucket: bucket.to_string(),
|
||||
name,
|
||||
@@ -676,7 +682,7 @@ impl ObjectInfo {
|
||||
version_id,
|
||||
delete_marker: fi.deleted,
|
||||
mod_time: fi.mod_time,
|
||||
size: logical_size,
|
||||
size: fi.size,
|
||||
parts,
|
||||
is_latest: fi.is_latest,
|
||||
user_tags,
|
||||
@@ -688,7 +694,6 @@ impl ObjectInfo {
|
||||
inlined,
|
||||
user_defined: metadata,
|
||||
transitioned_object,
|
||||
actual_size: actual_size_meta,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
@@ -1200,10 +1205,6 @@ pub trait StorageAPI: ObjectIO + Debug {
|
||||
opts: ObjectOptions,
|
||||
) -> (Vec<DeletedObject>, Vec<Option<Error>>);
|
||||
|
||||
async fn complete_append(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo>;
|
||||
|
||||
async fn abort_append(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo>;
|
||||
|
||||
// TransitionObject TODO:
|
||||
// RestoreTransitionedObject TODO:
|
||||
|
||||
@@ -1577,6 +1578,83 @@ mod tests {
|
||||
assert_eq!(length, 10); // end - start + 1 = 14 - 5 + 1 = 10
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_http_range_spec_suffix_positive_start() {
|
||||
let range_spec = HTTPRangeSpec {
|
||||
is_suffix_length: true,
|
||||
start: 5,
|
||||
end: -1,
|
||||
};
|
||||
|
||||
let (offset, length) = range_spec.get_offset_length(20).unwrap();
|
||||
assert_eq!(offset, 15);
|
||||
assert_eq!(length, 5);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_http_range_spec_suffix_negative_start() {
|
||||
let range_spec = HTTPRangeSpec {
|
||||
is_suffix_length: true,
|
||||
start: -5,
|
||||
end: -1,
|
||||
};
|
||||
|
||||
let (offset, length) = range_spec.get_offset_length(20).unwrap();
|
||||
assert_eq!(offset, 15);
|
||||
assert_eq!(length, 5);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_http_range_spec_suffix_exceeds_object() {
|
||||
let range_spec = HTTPRangeSpec {
|
||||
is_suffix_length: true,
|
||||
start: 50,
|
||||
end: -1,
|
||||
};
|
||||
|
||||
let (offset, length) = range_spec.get_offset_length(20).unwrap();
|
||||
assert_eq!(offset, 0);
|
||||
assert_eq!(length, 20);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_http_range_spec_from_object_info_valid_and_invalid_parts() {
|
||||
let object_info = ObjectInfo {
|
||||
size: 300,
|
||||
parts: vec![
|
||||
ObjectPartInfo {
|
||||
etag: String::new(),
|
||||
number: 1,
|
||||
size: 100,
|
||||
actual_size: 100,
|
||||
..Default::default()
|
||||
},
|
||||
ObjectPartInfo {
|
||||
etag: String::new(),
|
||||
number: 2,
|
||||
size: 100,
|
||||
actual_size: 100,
|
||||
..Default::default()
|
||||
},
|
||||
ObjectPartInfo {
|
||||
etag: String::new(),
|
||||
number: 3,
|
||||
size: 100,
|
||||
actual_size: 100,
|
||||
..Default::default()
|
||||
},
|
||||
],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let spec = HTTPRangeSpec::from_object_info(&object_info, 2).unwrap();
|
||||
assert_eq!(spec.start, 100);
|
||||
assert_eq!(spec.end, 199);
|
||||
|
||||
assert!(HTTPRangeSpec::from_object_info(&object_info, 0).is_none());
|
||||
assert!(HTTPRangeSpec::from_object_info(&object_info, 4).is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_ranged_decompress_reader_zero_length() {
|
||||
let original_data = b"Hello, World!";
|
||||
|
||||
@@ -30,7 +30,6 @@ crc32fast = { workspace = true }
|
||||
rmp.workspace = true
|
||||
rmp-serde.workspace = true
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
time.workspace = true
|
||||
uuid = { workspace = true, features = ["v4", "fast-rng", "serde"] }
|
||||
tokio = { workspace = true, features = ["io-util", "macros", "sync"] }
|
||||
|
||||
@@ -1,541 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use uuid::Uuid;
|
||||
|
||||
const APPEND_STATE_META_KEY: &str = "x-rustfs-internal-append-state";
|
||||
|
||||
/// Tracks the state of append-enabled objects.
|
||||
#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
|
||||
pub struct AppendState {
|
||||
pub state: AppendStateKind,
|
||||
pub epoch: u64,
|
||||
pub committed_length: i64,
|
||||
pub pending_segments: Vec<AppendSegment>,
|
||||
}
|
||||
|
||||
/// Represents individual append segments that still need consolidation.
|
||||
#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
|
||||
pub struct AppendSegment {
|
||||
pub offset: i64,
|
||||
pub length: i64,
|
||||
pub data_dir: Option<Uuid>,
|
||||
pub etag: Option<String>,
|
||||
pub epoch: u64,
|
||||
}
|
||||
|
||||
/// Possible append lifecycle states for an object version.
|
||||
#[derive(Clone, Debug, Default, PartialEq, Serialize, Deserialize)]
|
||||
pub enum AppendStateKind {
|
||||
#[default]
|
||||
Disabled,
|
||||
Inline,
|
||||
InlinePendingSpill,
|
||||
SegmentedActive,
|
||||
SegmentedSealed,
|
||||
}
|
||||
|
||||
/// Persist the provided append state into object metadata.
|
||||
pub fn set_append_state(metadata: &mut HashMap<String, String>, state: &AppendState) -> Result<()> {
|
||||
let encoded = serde_json::to_string(state).map_err(Error::other)?;
|
||||
metadata.insert(APPEND_STATE_META_KEY.to_string(), encoded);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Remove the append state marker from metadata.
|
||||
pub fn clear_append_state(metadata: &mut HashMap<String, String>) {
|
||||
metadata.remove(APPEND_STATE_META_KEY);
|
||||
}
|
||||
|
||||
/// Load append state stored in metadata, if any.
|
||||
pub fn get_append_state(metadata: &HashMap<String, String>) -> Result<Option<AppendState>> {
|
||||
let raw = match metadata.get(APPEND_STATE_META_KEY) {
|
||||
Some(val) if !val.is_empty() => val,
|
||||
_ => return Ok(None),
|
||||
};
|
||||
|
||||
let decoded = serde_json::from_str(raw).map_err(Error::other)?;
|
||||
Ok(Some(decoded))
|
||||
}
|
||||
|
||||
/// Complete append operations by consolidating pending segments and sealing the object
|
||||
pub fn complete_append_operation(state: &mut AppendState) -> Result<()> {
|
||||
match state.state {
|
||||
AppendStateKind::SegmentedActive => {
|
||||
// Move all pending segments data to main parts and seal
|
||||
state.committed_length += state.pending_segments.iter().map(|s| s.length).sum::<i64>();
|
||||
state.pending_segments.clear();
|
||||
state.state = AppendStateKind::SegmentedSealed;
|
||||
state.epoch = state.epoch.saturating_add(1);
|
||||
Ok(())
|
||||
}
|
||||
AppendStateKind::Inline => {
|
||||
// Inline objects are always immediately committed, just seal them
|
||||
state.state = AppendStateKind::SegmentedSealed; // Transition to sealed
|
||||
state.epoch = state.epoch.saturating_add(1);
|
||||
Ok(())
|
||||
}
|
||||
AppendStateKind::InlinePendingSpill => {
|
||||
// Wait for spill to complete, then seal
|
||||
// In practice, this might need to trigger the spill completion first
|
||||
state.state = AppendStateKind::SegmentedSealed;
|
||||
state.pending_segments.clear();
|
||||
state.epoch = state.epoch.saturating_add(1);
|
||||
Ok(())
|
||||
}
|
||||
AppendStateKind::SegmentedSealed | AppendStateKind::Disabled => {
|
||||
// Already sealed or disabled
|
||||
Err(Error::other("Cannot complete append on sealed or disabled object"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Abort append operations by discarding pending segments and returning to sealed state
|
||||
pub fn abort_append_operation(state: &mut AppendState) -> Result<()> {
|
||||
match state.state {
|
||||
AppendStateKind::SegmentedActive => {
|
||||
// Discard all pending segments and seal
|
||||
state.pending_segments.clear();
|
||||
state.state = AppendStateKind::SegmentedSealed;
|
||||
state.epoch = state.epoch.saturating_add(1);
|
||||
Ok(())
|
||||
}
|
||||
AppendStateKind::Inline => {
|
||||
// Inline data is already committed, just seal
|
||||
state.state = AppendStateKind::SegmentedSealed;
|
||||
state.epoch = state.epoch.saturating_add(1);
|
||||
Ok(())
|
||||
}
|
||||
AppendStateKind::InlinePendingSpill => {
|
||||
// Cancel spill and keep inline data, then seal
|
||||
state.state = AppendStateKind::SegmentedSealed;
|
||||
state.pending_segments.clear();
|
||||
state.epoch = state.epoch.saturating_add(1);
|
||||
Ok(())
|
||||
}
|
||||
AppendStateKind::SegmentedSealed | AppendStateKind::Disabled => {
|
||||
// Already sealed or disabled
|
||||
Err(Error::other("Cannot abort append on sealed or disabled object"))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if an append operation can be completed
|
||||
pub fn can_complete_append(state: &AppendState) -> bool {
|
||||
matches!(
|
||||
state.state,
|
||||
AppendStateKind::Inline | AppendStateKind::InlinePendingSpill | AppendStateKind::SegmentedActive
|
||||
)
|
||||
}
|
||||
|
||||
/// Check if an append operation can be aborted
|
||||
pub fn can_abort_append(state: &AppendState) -> bool {
|
||||
matches!(
|
||||
state.state,
|
||||
AppendStateKind::Inline | AppendStateKind::InlinePendingSpill | AppendStateKind::SegmentedActive
|
||||
)
|
||||
}
|
||||
|
||||
/// Verify epoch for optimistic concurrency control
|
||||
pub fn verify_append_epoch(current_state: &AppendState, expected_epoch: u64) -> Result<()> {
|
||||
if current_state.epoch != expected_epoch {
|
||||
Err(Error::other(format!(
|
||||
"Append operation conflict: expected epoch {}, found {}",
|
||||
expected_epoch, current_state.epoch
|
||||
)))
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Prepare next append operation by incrementing epoch
|
||||
pub fn prepare_next_append(state: &mut AppendState) {
|
||||
state.epoch = state.epoch.saturating_add(1);
|
||||
}
|
||||
|
||||
/// Validate that a new append segment doesn't conflict with existing segments
|
||||
pub fn validate_new_segment(state: &AppendState, new_offset: i64, new_length: i64) -> Result<()> {
|
||||
let new_end = new_offset + new_length;
|
||||
|
||||
// Check it doesn't overlap with committed data
|
||||
if new_offset < state.committed_length {
|
||||
return Err(Error::other(format!(
|
||||
"New segment overlaps with committed data: offset {} < committed_length {}",
|
||||
new_offset, state.committed_length
|
||||
)));
|
||||
}
|
||||
|
||||
// Check it doesn't overlap with existing pending segments
|
||||
for existing in &state.pending_segments {
|
||||
let existing_start = existing.offset;
|
||||
let existing_end = existing.offset + existing.length;
|
||||
|
||||
// Check for any overlap
|
||||
if new_offset < existing_end && new_end > existing_start {
|
||||
return Err(Error::other(format!(
|
||||
"New segment [{}, {}) overlaps with existing segment [{}, {})",
|
||||
new_offset, new_end, existing_start, existing_end
|
||||
)));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::fileinfo::FileInfo;
|
||||
|
||||
#[test]
|
||||
fn append_state_roundtrip_in_metadata() {
|
||||
let mut metadata = HashMap::new();
|
||||
let state = AppendState {
|
||||
state: AppendStateKind::SegmentedActive,
|
||||
epoch: 42,
|
||||
committed_length: 2048,
|
||||
pending_segments: vec![AppendSegment {
|
||||
offset: 2048,
|
||||
length: 512,
|
||||
data_dir: Some(Uuid::new_v4()),
|
||||
etag: Some("abc123".to_string()),
|
||||
epoch: 0,
|
||||
}],
|
||||
};
|
||||
|
||||
set_append_state(&mut metadata, &state).expect("persist append state");
|
||||
assert!(metadata.contains_key(APPEND_STATE_META_KEY));
|
||||
|
||||
let decoded = get_append_state(&metadata)
|
||||
.expect("decode append state")
|
||||
.expect("state present");
|
||||
assert_eq!(decoded, state);
|
||||
|
||||
clear_append_state(&mut metadata);
|
||||
assert!(!metadata.contains_key(APPEND_STATE_META_KEY));
|
||||
assert!(get_append_state(&metadata).unwrap().is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn fileinfo_append_state_migration_compatibility() {
|
||||
// Test old inline data object
|
||||
let mut inline_fi = FileInfo {
|
||||
size: 1024,
|
||||
..Default::default()
|
||||
};
|
||||
inline_fi.set_inline_data();
|
||||
|
||||
let state = inline_fi.get_append_state();
|
||||
assert_eq!(state.state, AppendStateKind::Inline);
|
||||
assert_eq!(state.committed_length, 1024);
|
||||
assert!(state.pending_segments.is_empty());
|
||||
assert!(inline_fi.is_appendable());
|
||||
assert!(!inline_fi.has_pending_appends());
|
||||
|
||||
// Test old regular object
|
||||
let regular_fi = FileInfo {
|
||||
size: 2048,
|
||||
..Default::default()
|
||||
};
|
||||
// No inline_data marker
|
||||
|
||||
let state = regular_fi.get_append_state();
|
||||
assert_eq!(state.state, AppendStateKind::SegmentedSealed);
|
||||
assert_eq!(state.committed_length, 2048);
|
||||
assert!(state.pending_segments.is_empty());
|
||||
assert!(!regular_fi.is_appendable());
|
||||
assert!(!regular_fi.has_pending_appends());
|
||||
|
||||
// Test explicit append state
|
||||
let mut append_fi = FileInfo::default();
|
||||
let explicit_state = AppendState {
|
||||
state: AppendStateKind::SegmentedActive,
|
||||
epoch: 5,
|
||||
committed_length: 1500,
|
||||
pending_segments: vec![AppendSegment {
|
||||
offset: 1500,
|
||||
length: 300,
|
||||
data_dir: Some(Uuid::new_v4()),
|
||||
etag: Some("def456".to_string()),
|
||||
epoch: 0,
|
||||
}],
|
||||
};
|
||||
|
||||
append_fi.set_append_state(&explicit_state).expect("set explicit state");
|
||||
let retrieved_state = append_fi.get_append_state();
|
||||
assert_eq!(retrieved_state, explicit_state);
|
||||
assert!(append_fi.is_appendable());
|
||||
assert!(append_fi.has_pending_appends());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn append_state_transitions() {
|
||||
// Test state transition validation
|
||||
assert_eq!(AppendStateKind::default(), AppendStateKind::Disabled);
|
||||
|
||||
let inline_state = AppendState {
|
||||
state: AppendStateKind::Inline,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let spill_state = AppendState {
|
||||
state: AppendStateKind::InlinePendingSpill,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let active_state = AppendState {
|
||||
state: AppendStateKind::SegmentedActive,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let sealed_state = AppendState {
|
||||
state: AppendStateKind::SegmentedSealed,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Verify serialization works for all states
|
||||
for state in [inline_state, spill_state, active_state, sealed_state] {
|
||||
let mut metadata = HashMap::new();
|
||||
set_append_state(&mut metadata, &state).expect("serialize state");
|
||||
let decoded = get_append_state(&metadata).unwrap().unwrap();
|
||||
assert_eq!(decoded, state);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn complete_append_transitions() {
|
||||
// Test completing SegmentedActive with pending segments
|
||||
let mut active_state = AppendState {
|
||||
state: AppendStateKind::SegmentedActive,
|
||||
epoch: 5,
|
||||
committed_length: 1000,
|
||||
pending_segments: vec![
|
||||
AppendSegment {
|
||||
offset: 1000,
|
||||
length: 200,
|
||||
data_dir: Some(Uuid::new_v4()),
|
||||
etag: Some("abc123".to_string()),
|
||||
epoch: 0,
|
||||
},
|
||||
AppendSegment {
|
||||
offset: 1200,
|
||||
length: 300,
|
||||
data_dir: Some(Uuid::new_v4()),
|
||||
etag: Some("def456".to_string()),
|
||||
epoch: 0,
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
assert!(can_complete_append(&active_state));
|
||||
complete_append_operation(&mut active_state).expect("complete should succeed");
|
||||
|
||||
assert_eq!(active_state.state, AppendStateKind::SegmentedSealed);
|
||||
assert_eq!(active_state.committed_length, 1500); // 1000 + 200 + 300
|
||||
assert!(active_state.pending_segments.is_empty());
|
||||
assert_eq!(active_state.epoch, 6);
|
||||
|
||||
// Test completing Inline state
|
||||
let mut inline_state = AppendState {
|
||||
state: AppendStateKind::Inline,
|
||||
epoch: 2,
|
||||
committed_length: 500,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
assert!(can_complete_append(&inline_state));
|
||||
complete_append_operation(&mut inline_state).expect("complete should succeed");
|
||||
|
||||
assert_eq!(inline_state.state, AppendStateKind::SegmentedSealed);
|
||||
assert_eq!(inline_state.committed_length, 500); // Unchanged
|
||||
assert_eq!(inline_state.epoch, 3);
|
||||
|
||||
// Test completing already sealed state should fail
|
||||
let mut sealed_state = AppendState {
|
||||
state: AppendStateKind::SegmentedSealed,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
assert!(!can_complete_append(&sealed_state));
|
||||
assert!(complete_append_operation(&mut sealed_state).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn abort_append_transitions() {
|
||||
// Test aborting SegmentedActive with pending segments
|
||||
let mut active_state = AppendState {
|
||||
state: AppendStateKind::SegmentedActive,
|
||||
epoch: 3,
|
||||
committed_length: 800,
|
||||
pending_segments: vec![AppendSegment {
|
||||
offset: 800,
|
||||
length: 400,
|
||||
data_dir: Some(Uuid::new_v4()),
|
||||
etag: Some("xyz789".to_string()),
|
||||
epoch: 0,
|
||||
}],
|
||||
};
|
||||
|
||||
assert!(can_abort_append(&active_state));
|
||||
abort_append_operation(&mut active_state).expect("abort should succeed");
|
||||
|
||||
assert_eq!(active_state.state, AppendStateKind::SegmentedSealed);
|
||||
assert_eq!(active_state.committed_length, 800); // Unchanged, pending discarded
|
||||
assert!(active_state.pending_segments.is_empty());
|
||||
assert_eq!(active_state.epoch, 4);
|
||||
|
||||
// Test aborting InlinePendingSpill
|
||||
let mut spill_state = AppendState {
|
||||
state: AppendStateKind::InlinePendingSpill,
|
||||
epoch: 1,
|
||||
committed_length: 100,
|
||||
pending_segments: vec![],
|
||||
};
|
||||
|
||||
assert!(can_abort_append(&spill_state));
|
||||
abort_append_operation(&mut spill_state).expect("abort should succeed");
|
||||
|
||||
assert_eq!(spill_state.state, AppendStateKind::SegmentedSealed);
|
||||
assert_eq!(spill_state.committed_length, 100);
|
||||
assert_eq!(spill_state.epoch, 2);
|
||||
|
||||
// Test aborting disabled state should fail
|
||||
let mut disabled_state = AppendState {
|
||||
state: AppendStateKind::Disabled,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
assert!(!can_abort_append(&disabled_state));
|
||||
assert!(abort_append_operation(&mut disabled_state).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn epoch_validation() {
|
||||
let state = AppendState {
|
||||
state: AppendStateKind::SegmentedActive,
|
||||
epoch: 10,
|
||||
committed_length: 1000,
|
||||
pending_segments: vec![],
|
||||
};
|
||||
|
||||
// Valid epoch should succeed
|
||||
assert!(verify_append_epoch(&state, 10).is_ok());
|
||||
|
||||
// Invalid epoch should fail
|
||||
assert!(verify_append_epoch(&state, 9).is_err());
|
||||
assert!(verify_append_epoch(&state, 11).is_err());
|
||||
|
||||
// Error message should contain epoch information
|
||||
let error = verify_append_epoch(&state, 5).unwrap_err();
|
||||
let error_msg = error.to_string();
|
||||
assert!(error_msg.contains("expected epoch 5"));
|
||||
assert!(error_msg.contains("found 10"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn next_append_preparation() {
|
||||
let mut state = AppendState {
|
||||
state: AppendStateKind::SegmentedActive,
|
||||
epoch: 5,
|
||||
committed_length: 1000,
|
||||
pending_segments: vec![],
|
||||
};
|
||||
|
||||
prepare_next_append(&mut state);
|
||||
assert_eq!(state.epoch, 6);
|
||||
|
||||
// Test saturation behavior
|
||||
let mut max_state = AppendState {
|
||||
epoch: u64::MAX,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
prepare_next_append(&mut max_state);
|
||||
assert_eq!(max_state.epoch, u64::MAX); // Should saturate, not overflow
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn segment_validation() {
|
||||
let state = AppendState {
|
||||
state: AppendStateKind::SegmentedActive,
|
||||
epoch: 3,
|
||||
committed_length: 1000,
|
||||
pending_segments: vec![
|
||||
AppendSegment {
|
||||
offset: 1000,
|
||||
length: 200,
|
||||
data_dir: Some(Uuid::new_v4()),
|
||||
etag: Some("abc123".to_string()),
|
||||
epoch: 0,
|
||||
},
|
||||
AppendSegment {
|
||||
offset: 1300,
|
||||
length: 300,
|
||||
data_dir: Some(Uuid::new_v4()),
|
||||
etag: Some("def456".to_string()),
|
||||
epoch: 0,
|
||||
},
|
||||
],
|
||||
};
|
||||
|
||||
// Valid segment after existing segments
|
||||
assert!(validate_new_segment(&state, 1600, 100).is_ok());
|
||||
|
||||
// Valid segment filling gap between committed and first pending
|
||||
assert!(validate_new_segment(&state, 1200, 100).is_ok());
|
||||
|
||||
// Invalid segment overlapping with committed data
|
||||
assert!(validate_new_segment(&state, 900, 200).is_err());
|
||||
let error = validate_new_segment(&state, 900, 200).unwrap_err();
|
||||
assert!(error.to_string().contains("overlaps with committed data"));
|
||||
|
||||
// Invalid segment overlapping with first pending segment
|
||||
assert!(validate_new_segment(&state, 1100, 100).is_err());
|
||||
let error = validate_new_segment(&state, 1100, 100).unwrap_err();
|
||||
assert!(error.to_string().contains("overlaps with existing segment"));
|
||||
|
||||
// Invalid segment overlapping with second pending segment
|
||||
assert!(validate_new_segment(&state, 1400, 100).is_err());
|
||||
|
||||
// Edge case: segment exactly touching committed data (should be valid)
|
||||
assert!(validate_new_segment(&state, 1000, 0).is_ok());
|
||||
|
||||
// Edge case: segment exactly touching existing segment (should be valid)
|
||||
assert!(validate_new_segment(&state, 1200, 0).is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn segment_validation_edge_cases() {
|
||||
let empty_state = AppendState {
|
||||
state: AppendStateKind::SegmentedActive,
|
||||
epoch: 1,
|
||||
committed_length: 500,
|
||||
pending_segments: vec![],
|
||||
};
|
||||
|
||||
// First segment after committed data
|
||||
assert!(validate_new_segment(&empty_state, 500, 100).is_ok());
|
||||
assert!(validate_new_segment(&empty_state, 600, 200).is_ok());
|
||||
|
||||
// Zero-length segments (edge case)
|
||||
assert!(validate_new_segment(&empty_state, 500, 0).is_ok());
|
||||
|
||||
// Segment exactly at committed boundary
|
||||
assert!(validate_new_segment(&empty_state, 499, 1).is_err());
|
||||
assert!(validate_new_segment(&empty_state, 500, 1).is_ok());
|
||||
}
|
||||
}
|
||||
@@ -494,96 +494,6 @@ impl FileInfo {
|
||||
ReplicationStatusType::Empty
|
||||
}
|
||||
}
|
||||
/// Get the append state for this FileInfo, with migration compatibility
|
||||
pub fn get_append_state(&self) -> crate::append::AppendState {
|
||||
use crate::append::{AppendState, AppendStateKind, get_append_state};
|
||||
|
||||
// Try to load from metadata first
|
||||
if let Ok(Some(state)) = get_append_state(&self.metadata) {
|
||||
return state;
|
||||
}
|
||||
|
||||
// Migration compatibility: determine state based on existing data
|
||||
if self.inline_data() {
|
||||
// Has inline data, treat as Inline state
|
||||
AppendState {
|
||||
state: AppendStateKind::Inline,
|
||||
epoch: 0,
|
||||
committed_length: self.size,
|
||||
pending_segments: Vec::new(),
|
||||
}
|
||||
} else {
|
||||
// No inline data, treat as SegmentedSealed (traditional object)
|
||||
AppendState {
|
||||
state: AppendStateKind::SegmentedSealed,
|
||||
epoch: 0,
|
||||
committed_length: self.size,
|
||||
pending_segments: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the append state for this FileInfo
|
||||
pub fn set_append_state(&mut self, state: &crate::append::AppendState) -> crate::error::Result<()> {
|
||||
crate::append::set_append_state(&mut self.metadata, state)
|
||||
}
|
||||
|
||||
/// Check if this object supports append operations
|
||||
pub fn is_appendable(&self) -> bool {
|
||||
use crate::append::AppendStateKind;
|
||||
match self.get_append_state().state {
|
||||
AppendStateKind::Disabled => false,
|
||||
AppendStateKind::Inline | AppendStateKind::InlinePendingSpill | AppendStateKind::SegmentedActive => true,
|
||||
AppendStateKind::SegmentedSealed => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if this object has pending append operations
|
||||
pub fn has_pending_appends(&self) -> bool {
|
||||
use crate::append::AppendStateKind;
|
||||
matches!(
|
||||
self.get_append_state().state,
|
||||
AppendStateKind::InlinePendingSpill | AppendStateKind::SegmentedActive
|
||||
)
|
||||
}
|
||||
|
||||
/// Complete all pending append operations and seal the object
|
||||
pub fn complete_append(&mut self) -> crate::error::Result<()> {
|
||||
let mut append_state = self.get_append_state();
|
||||
crate::append::complete_append_operation(&mut append_state)?;
|
||||
self.set_append_state(&append_state)?;
|
||||
|
||||
// Update file size to reflect completed operation
|
||||
if append_state.state == crate::append::AppendStateKind::SegmentedSealed {
|
||||
self.size = append_state.committed_length;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Abort all pending append operations and seal the object
|
||||
pub fn abort_append(&mut self) -> crate::error::Result<()> {
|
||||
let mut append_state = self.get_append_state();
|
||||
crate::append::abort_append_operation(&mut append_state)?;
|
||||
self.set_append_state(&append_state)?;
|
||||
|
||||
// Update file size to only include committed data
|
||||
if append_state.state == crate::append::AppendStateKind::SegmentedSealed {
|
||||
self.size = append_state.committed_length;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Check if append operations can be completed for this object
|
||||
pub fn can_complete_append(&self) -> bool {
|
||||
crate::append::can_complete_append(&self.get_append_state())
|
||||
}
|
||||
|
||||
/// Check if append operations can be aborted for this object
|
||||
pub fn can_abort_append(&self) -> bool {
|
||||
crate::append::can_abort_append(&self.get_append_state())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
|
||||
|
||||
@@ -169,6 +169,9 @@ impl InlineData {
|
||||
}
|
||||
pub fn remove(&mut self, remove_keys: Vec<Uuid>) -> Result<bool> {
|
||||
let buf = self.after_version();
|
||||
if buf.is_empty() {
|
||||
return Ok(false);
|
||||
}
|
||||
let mut cur = Cursor::new(buf);
|
||||
|
||||
let mut fields_len = rmp::decode::read_map_len(&mut cur)? as usize;
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod append;
|
||||
mod error;
|
||||
pub mod fileinfo;
|
||||
mod filemeta;
|
||||
@@ -23,7 +22,6 @@ mod replication;
|
||||
|
||||
pub mod test_data;
|
||||
|
||||
pub use append::*;
|
||||
pub use error::*;
|
||||
pub use fileinfo::*;
|
||||
pub use filemeta::*;
|
||||
|
||||
@@ -34,6 +34,7 @@ time = { workspace = true, features = ["serde-human-readable"] }
|
||||
serde = { workspace = true, features = ["derive", "rc"] }
|
||||
rustfs-ecstore = { workspace = true }
|
||||
rustfs-policy.workspace = true
|
||||
rustfs-config.workspace = true
|
||||
serde_json.workspace = true
|
||||
async-trait.workspace = true
|
||||
thiserror.workspace = true
|
||||
|
||||
@@ -35,14 +35,17 @@ use rustfs_policy::auth::{
|
||||
is_access_key_valid, is_secret_key_valid,
|
||||
};
|
||||
use rustfs_policy::policy::Args;
|
||||
use rustfs_policy::policy::opa;
|
||||
use rustfs_policy::policy::{EMBEDDED_POLICY_TYPE, INHERITED_POLICY_TYPE, Policy, PolicyDoc, iam_policy_claim_name_sa};
|
||||
use rustfs_utils::crypto::{base64_decode, base64_encode};
|
||||
use serde_json::Value;
|
||||
use serde_json::json;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::sync::OnceLock;
|
||||
use time::OffsetDateTime;
|
||||
use tracing::warn;
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{error, info, warn};
|
||||
|
||||
pub const MAX_SVCSESSION_POLICY_SIZE: usize = 4096;
|
||||
|
||||
@@ -53,6 +56,12 @@ pub const POLICYNAME: &str = "policy";
|
||||
pub const SESSION_POLICY_NAME: &str = "sessionPolicy";
|
||||
pub const SESSION_POLICY_NAME_EXTRACTED: &str = "sessionPolicy-extracted";
|
||||
|
||||
static POLICY_PLUGIN_CLIENT: OnceLock<Arc<RwLock<Option<rustfs_policy::policy::opa::AuthZPlugin>>>> = OnceLock::new();
|
||||
|
||||
fn get_policy_plugin_client() -> Arc<RwLock<Option<rustfs_policy::policy::opa::AuthZPlugin>>> {
|
||||
POLICY_PLUGIN_CLIENT.get_or_init(|| Arc::new(RwLock::new(None))).clone()
|
||||
}
|
||||
|
||||
pub struct IamSys<T> {
|
||||
store: Arc<IamCache<T>>,
|
||||
roles_map: HashMap<ARN, String>,
|
||||
@@ -60,6 +69,20 @@ pub struct IamSys<T> {
|
||||
|
||||
impl<T: Store> IamSys<T> {
|
||||
pub fn new(store: Arc<IamCache<T>>) -> Self {
|
||||
tokio::spawn(async move {
|
||||
match opa::lookup_config().await {
|
||||
Ok(conf) => {
|
||||
if conf.enable() {
|
||||
Self::set_policy_plugin_client(opa::AuthZPlugin::new(conf)).await;
|
||||
info!("OPA plugin enabled");
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Error loading OPA configuration err:{}", e);
|
||||
}
|
||||
};
|
||||
});
|
||||
|
||||
Self {
|
||||
store,
|
||||
roles_map: HashMap::new(),
|
||||
@@ -69,6 +92,18 @@ impl<T: Store> IamSys<T> {
|
||||
self.store.api.has_watcher()
|
||||
}
|
||||
|
||||
pub async fn set_policy_plugin_client(client: rustfs_policy::policy::opa::AuthZPlugin) {
|
||||
let policy_plugin_client = get_policy_plugin_client();
|
||||
let mut guard = policy_plugin_client.write().await;
|
||||
*guard = Some(client);
|
||||
}
|
||||
|
||||
pub async fn get_policy_plugin_client() -> Option<rustfs_policy::policy::opa::AuthZPlugin> {
|
||||
let policy_plugin_client = get_policy_plugin_client();
|
||||
let guard = policy_plugin_client.read().await;
|
||||
guard.clone()
|
||||
}
|
||||
|
||||
pub async fn load_group(&self, name: &str) -> Result<()> {
|
||||
self.store.group_notification_handler(name).await
|
||||
}
|
||||
@@ -766,6 +801,11 @@ impl<T: Store> IamSys<T> {
|
||||
return true;
|
||||
}
|
||||
|
||||
let opa_enable = Self::get_policy_plugin_client().await;
|
||||
if let Some(opa_enable) = opa_enable {
|
||||
return opa_enable.is_allowed(args).await;
|
||||
}
|
||||
|
||||
let Ok((is_temp, parent_user)) = self.is_temp_user(args.account).await else { return false };
|
||||
|
||||
if is_temp {
|
||||
|
||||
@@ -87,7 +87,7 @@ pub fn generate_jwt<T: Serialize>(claims: &T, secret: &str) -> std::result::Resu
|
||||
jsonwebtoken::encode(&header, &claims, &EncodingKey::from_secret(secret.as_bytes()))
|
||||
}
|
||||
|
||||
pub fn extract_claims<T: DeserializeOwned>(
|
||||
pub fn extract_claims<T: DeserializeOwned + Clone>(
|
||||
token: &str,
|
||||
secret: &str,
|
||||
) -> std::result::Result<jsonwebtoken::TokenData<T>, jsonwebtoken::errors::Error> {
|
||||
@@ -193,7 +193,7 @@ mod tests {
|
||||
assert_eq!(error.to_string(), "secret key length is too short");
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq, Clone)]
|
||||
struct Claims {
|
||||
sub: String,
|
||||
company: String,
|
||||
|
||||
@@ -635,7 +635,7 @@ impl KmsBackend for LocalKmsBackend {
|
||||
}
|
||||
|
||||
async fn encrypt(&self, request: EncryptRequest) -> Result<EncryptResponse> {
|
||||
let encrypt_request = crate::types::EncryptRequest {
|
||||
let encrypt_request = EncryptRequest {
|
||||
key_id: request.key_id.clone(),
|
||||
plaintext: request.plaintext,
|
||||
encryption_context: request.encryption_context,
|
||||
@@ -719,14 +719,14 @@ impl KmsBackend for LocalKmsBackend {
|
||||
.client
|
||||
.load_master_key(key_id)
|
||||
.await
|
||||
.map_err(|_| crate::error::KmsError::key_not_found(format!("Key {key_id} not found")))?;
|
||||
.map_err(|_| KmsError::key_not_found(format!("Key {key_id} not found")))?;
|
||||
|
||||
let (deletion_date_str, deletion_date_dt) = if request.force_immediate.unwrap_or(false) {
|
||||
// For immediate deletion, actually delete the key from filesystem
|
||||
let key_path = self.client.master_key_path(key_id);
|
||||
tokio::fs::remove_file(&key_path)
|
||||
.await
|
||||
.map_err(|e| crate::error::KmsError::internal_error(format!("Failed to delete key file: {e}")))?;
|
||||
.map_err(|e| KmsError::internal_error(format!("Failed to delete key file: {e}")))?;
|
||||
|
||||
// Remove from cache
|
||||
let mut cache = self.client.key_cache.write().await;
|
||||
@@ -756,9 +756,7 @@ impl KmsBackend for LocalKmsBackend {
|
||||
// Schedule for deletion (default 30 days)
|
||||
let days = request.pending_window_in_days.unwrap_or(30);
|
||||
if !(7..=30).contains(&days) {
|
||||
return Err(crate::error::KmsError::invalid_parameter(
|
||||
"pending_window_in_days must be between 7 and 30".to_string(),
|
||||
));
|
||||
return Err(KmsError::invalid_parameter("pending_window_in_days must be between 7 and 30".to_string()));
|
||||
}
|
||||
|
||||
let deletion_date = chrono::Utc::now() + chrono::Duration::days(days as i64);
|
||||
@@ -772,16 +770,16 @@ impl KmsBackend for LocalKmsBackend {
|
||||
let key_path = self.client.master_key_path(key_id);
|
||||
let content = tokio::fs::read(&key_path)
|
||||
.await
|
||||
.map_err(|e| crate::error::KmsError::internal_error(format!("Failed to read key file: {e}")))?;
|
||||
let stored_key: crate::backends::local::StoredMasterKey = serde_json::from_slice(&content)
|
||||
.map_err(|e| crate::error::KmsError::internal_error(format!("Failed to parse stored key: {e}")))?;
|
||||
.map_err(|e| KmsError::internal_error(format!("Failed to read key file: {e}")))?;
|
||||
let stored_key: StoredMasterKey =
|
||||
serde_json::from_slice(&content).map_err(|e| KmsError::internal_error(format!("Failed to parse stored key: {e}")))?;
|
||||
|
||||
// Decrypt the existing key material to preserve it
|
||||
let existing_key_material = if let Some(ref cipher) = self.client.master_cipher {
|
||||
let nonce = aes_gcm::Nonce::from_slice(&stored_key.nonce);
|
||||
let nonce = Nonce::from_slice(&stored_key.nonce);
|
||||
cipher
|
||||
.decrypt(nonce, stored_key.encrypted_key_material.as_ref())
|
||||
.map_err(|e| crate::error::KmsError::cryptographic_error("decrypt", e.to_string()))?
|
||||
.map_err(|e| KmsError::cryptographic_error("decrypt", e.to_string()))?
|
||||
} else {
|
||||
stored_key.encrypted_key_material
|
||||
};
|
||||
@@ -820,10 +818,10 @@ impl KmsBackend for LocalKmsBackend {
|
||||
.client
|
||||
.load_master_key(key_id)
|
||||
.await
|
||||
.map_err(|_| crate::error::KmsError::key_not_found(format!("Key {key_id} not found")))?;
|
||||
.map_err(|_| KmsError::key_not_found(format!("Key {key_id} not found")))?;
|
||||
|
||||
if master_key.status != KeyStatus::PendingDeletion {
|
||||
return Err(crate::error::KmsError::invalid_key_state(format!("Key {key_id} is not pending deletion")));
|
||||
return Err(KmsError::invalid_key_state(format!("Key {key_id} is not pending deletion")));
|
||||
}
|
||||
|
||||
// Cancel the deletion by resetting the state
|
||||
|
||||
@@ -32,14 +32,17 @@ rustfs-utils = { workspace = true, features = ["path", "sys"] }
|
||||
rustfs-targets = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
chrono = { workspace = true, features = ["serde"] }
|
||||
dashmap = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
form_urlencoded = { workspace = true }
|
||||
hashbrown = { workspace = true }
|
||||
once_cell = { workspace = true }
|
||||
quick-xml = { workspace = true, features = ["serialize", "async-tokio"] }
|
||||
rayon = { workspace = true }
|
||||
rumqttc = { workspace = true }
|
||||
rustc-hash = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
starshard = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
tokio = { workspace = true, features = ["rt-multi-thread", "sync", "time"] }
|
||||
tracing = { workspace = true }
|
||||
|
||||
@@ -68,7 +68,7 @@ async fn main() -> Result<(), NotificationError> {
|
||||
key: WEBHOOK_QUEUE_DIR.to_string(),
|
||||
value: current_root
|
||||
.clone()
|
||||
.join("../../deploy/logs/notify/webhook")
|
||||
.join("../../deploy/logs/notify")
|
||||
.to_str()
|
||||
.unwrap()
|
||||
.to_string(),
|
||||
@@ -120,11 +120,7 @@ async fn main() -> Result<(), NotificationError> {
|
||||
},
|
||||
KV {
|
||||
key: MQTT_QUEUE_DIR.to_string(),
|
||||
value: current_root
|
||||
.join("../../deploy/logs/notify/mqtt")
|
||||
.to_str()
|
||||
.unwrap()
|
||||
.to_string(),
|
||||
value: current_root.join("../../deploy/logs/notify").to_str().unwrap().to_string(),
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
KV {
|
||||
@@ -137,7 +133,7 @@ async fn main() -> Result<(), NotificationError> {
|
||||
let mqtt_kvs = KVS(mqtt_kvs_vec);
|
||||
let mut mqtt_targets = std::collections::HashMap::new();
|
||||
mqtt_targets.insert(DEFAULT_TARGET.to_string(), mqtt_kvs);
|
||||
config.0.insert(NOTIFY_MQTT_SUB_SYS.to_string(), mqtt_targets);
|
||||
// config.0.insert(NOTIFY_MQTT_SUB_SYS.to_string(), mqtt_targets);
|
||||
|
||||
// Load the configuration and initialize the system
|
||||
*system.config.write().await = config;
|
||||
|
||||
@@ -28,6 +28,7 @@ use rustfs_targets::EventName;
|
||||
use rustfs_targets::arn::TargetID;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::time::sleep;
|
||||
use tracing::info;
|
||||
|
||||
#[tokio::main]
|
||||
@@ -68,7 +69,7 @@ async fn main() -> Result<(), NotificationError> {
|
||||
key: WEBHOOK_QUEUE_DIR.to_string(),
|
||||
value: current_root
|
||||
.clone()
|
||||
.join("../../deploy/logs/notify/webhook")
|
||||
.join("../../deploy/logs/notify")
|
||||
.to_str()
|
||||
.unwrap()
|
||||
.to_string(),
|
||||
@@ -91,7 +92,7 @@ async fn main() -> Result<(), NotificationError> {
|
||||
system.init().await?;
|
||||
info!("✅ System initialized with Webhook target.");
|
||||
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
sleep(Duration::from_secs(1)).await;
|
||||
|
||||
// --- Dynamically update system configuration: Add an MQTT Target ---
|
||||
info!("\n---> Dynamically adding MQTT target...");
|
||||
@@ -129,11 +130,7 @@ async fn main() -> Result<(), NotificationError> {
|
||||
},
|
||||
KV {
|
||||
key: MQTT_QUEUE_DIR.to_string(),
|
||||
value: current_root
|
||||
.join("../../deploy/logs/notify/mqtt")
|
||||
.to_str()
|
||||
.unwrap()
|
||||
.to_string(),
|
||||
value: current_root.join("../../deploy/logs/notify").to_str().unwrap().to_string(),
|
||||
hidden_if_empty: false,
|
||||
},
|
||||
KV {
|
||||
@@ -152,7 +149,7 @@ async fn main() -> Result<(), NotificationError> {
|
||||
.await?;
|
||||
info!("✅ MQTT target added and system reloaded.");
|
||||
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
sleep(Duration::from_secs(1)).await;
|
||||
|
||||
// --- Loading and managing Bucket configurations ---
|
||||
info!("\n---> Loading bucket notification config...");
|
||||
@@ -176,7 +173,7 @@ async fn main() -> Result<(), NotificationError> {
|
||||
system.send_event(event).await;
|
||||
info!("✅ Event sent. Both Webhook and MQTT targets should receive it.");
|
||||
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
sleep(Duration::from_secs(2)).await;
|
||||
|
||||
// --- Dynamically remove configuration ---
|
||||
info!("\n---> Dynamically removing Webhook target...");
|
||||
@@ -188,5 +185,6 @@ async fn main() -> Result<(), NotificationError> {
|
||||
info!("✅ Bucket 'my-bucket' config removed.");
|
||||
|
||||
info!("\nDemo completed successfully");
|
||||
sleep(Duration::from_secs(1)).await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -12,19 +12,20 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use axum::routing::get;
|
||||
use axum::{
|
||||
Router,
|
||||
extract::Json,
|
||||
extract::Query,
|
||||
http::{HeaderMap, Response, StatusCode},
|
||||
routing::post,
|
||||
routing::{get, post},
|
||||
};
|
||||
use rustfs_utils::parse_and_resolve_address;
|
||||
use serde::Deserialize;
|
||||
use serde_json::Value;
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
|
||||
use axum::extract::Query;
|
||||
use serde::Deserialize;
|
||||
use tokio::net::TcpListener;
|
||||
|
||||
#[derive(Deserialize)]
|
||||
struct ResetParams {
|
||||
@@ -32,9 +33,6 @@ struct ResetParams {
|
||||
}
|
||||
|
||||
// Define a global variable and count the number of data received
|
||||
use rustfs_utils::parse_and_resolve_address;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use tokio::net::TcpListener;
|
||||
|
||||
static WEBHOOK_COUNT: AtomicU64 = AtomicU64::new(0);
|
||||
|
||||
|
||||
@@ -13,9 +13,9 @@
|
||||
// limitations under the License.
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use hashbrown::HashMap;
|
||||
use rustfs_targets::EventName;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use url::form_urlencoded;
|
||||
|
||||
/// Represents the identity of the user who triggered the event
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
use crate::Event;
|
||||
use async_trait::async_trait;
|
||||
use hashbrown::HashSet;
|
||||
use rumqttc::QoS;
|
||||
use rustfs_config::notify::{ENV_NOTIFY_MQTT_KEYS, ENV_NOTIFY_WEBHOOK_KEYS, NOTIFY_MQTT_KEYS, NOTIFY_WEBHOOK_KEYS};
|
||||
use rustfs_config::{
|
||||
@@ -27,7 +28,6 @@ use rustfs_targets::{
|
||||
error::TargetError,
|
||||
target::{mqtt::MQTTArgs, webhook::WebhookArgs},
|
||||
};
|
||||
use std::collections::HashSet;
|
||||
use std::time::Duration;
|
||||
use tracing::{debug, warn};
|
||||
use url::Url;
|
||||
|
||||
@@ -15,13 +15,13 @@
|
||||
use crate::{
|
||||
Event, error::NotificationError, notifier::EventNotifier, registry::TargetRegistry, rules::BucketNotificationConfig, stream,
|
||||
};
|
||||
use hashbrown::HashMap;
|
||||
use rustfs_ecstore::config::{Config, KVS};
|
||||
use rustfs_targets::EventName;
|
||||
use rustfs_targets::arn::TargetID;
|
||||
use rustfs_targets::store::{Key, Store};
|
||||
use rustfs_targets::target::EntityTarget;
|
||||
use rustfs_targets::{StoreError, Target};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
use std::time::{Duration, Instant};
|
||||
@@ -212,11 +212,6 @@ impl NotificationSystem {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// if let Err(e) = rustfs_ecstore::config::com::save_server_config(store, &new_config).await {
|
||||
// error!("Failed to save config: {}", e);
|
||||
// return Err(NotificationError::SaveConfig(e.to_string()));
|
||||
// }
|
||||
|
||||
info!("Configuration updated. Reloading system...");
|
||||
self.reload_config(new_config).await
|
||||
}
|
||||
@@ -301,8 +296,8 @@ impl NotificationSystem {
|
||||
info!("Removing config for target {} of type {}", target_name, target_type);
|
||||
self.update_config_and_reload(|config| {
|
||||
let mut changed = false;
|
||||
if let Some(targets) = config.0.get_mut(target_type) {
|
||||
if targets.remove(target_name).is_some() {
|
||||
if let Some(targets) = config.0.get_mut(&target_type.to_lowercase()) {
|
||||
if targets.remove(&target_name.to_lowercase()).is_some() {
|
||||
changed = true;
|
||||
}
|
||||
if targets.is_empty() {
|
||||
@@ -312,6 +307,7 @@ impl NotificationSystem {
|
||||
if !changed {
|
||||
info!("Target {} of type {} not found, no changes made.", target_name, target_type);
|
||||
}
|
||||
debug!("Config after remove: {:?}", config);
|
||||
changed
|
||||
})
|
||||
.await
|
||||
|
||||
@@ -13,19 +13,20 @@
|
||||
// limitations under the License.
|
||||
|
||||
use crate::{error::NotificationError, event::Event, rules::RulesMap};
|
||||
use dashmap::DashMap;
|
||||
use hashbrown::HashMap;
|
||||
use rustfs_targets::EventName;
|
||||
use rustfs_targets::Target;
|
||||
use rustfs_targets::arn::TargetID;
|
||||
use rustfs_targets::target::EntityTarget;
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
use starshard::AsyncShardedHashMap;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{debug, error, info, instrument, warn};
|
||||
|
||||
/// Manages event notification to targets based on rules
|
||||
pub struct EventNotifier {
|
||||
target_list: Arc<RwLock<TargetList>>,
|
||||
bucket_rules_map: Arc<DashMap<String, RulesMap>>,
|
||||
bucket_rules_map: Arc<AsyncShardedHashMap<String, RulesMap, rustc_hash::FxBuildHasher>>,
|
||||
}
|
||||
|
||||
impl Default for EventNotifier {
|
||||
@@ -39,7 +40,7 @@ impl EventNotifier {
|
||||
pub fn new() -> Self {
|
||||
EventNotifier {
|
||||
target_list: Arc::new(RwLock::new(TargetList::new())),
|
||||
bucket_rules_map: Arc::new(DashMap::new()),
|
||||
bucket_rules_map: Arc::new(AsyncShardedHashMap::new(0)),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -58,7 +59,7 @@ impl EventNotifier {
|
||||
/// This method removes all rules associated with the specified bucket name.
|
||||
/// It will log a message indicating the removal of rules.
|
||||
pub async fn remove_rules_map(&self, bucket_name: &str) {
|
||||
if self.bucket_rules_map.remove(bucket_name).is_some() {
|
||||
if self.bucket_rules_map.remove(&bucket_name.to_string()).await.is_some() {
|
||||
info!("Removed all notification rules for bucket: {}", bucket_name);
|
||||
}
|
||||
}
|
||||
@@ -76,21 +77,21 @@ impl EventNotifier {
|
||||
/// Adds a rules map for a bucket
|
||||
pub async fn add_rules_map(&self, bucket_name: &str, rules_map: RulesMap) {
|
||||
if rules_map.is_empty() {
|
||||
self.bucket_rules_map.remove(bucket_name);
|
||||
self.bucket_rules_map.remove(&bucket_name.to_string()).await;
|
||||
} else {
|
||||
self.bucket_rules_map.insert(bucket_name.to_string(), rules_map);
|
||||
self.bucket_rules_map.insert(bucket_name.to_string(), rules_map).await;
|
||||
}
|
||||
info!("Added rules for bucket: {}", bucket_name);
|
||||
}
|
||||
|
||||
/// Gets the rules map for a specific bucket.
|
||||
pub fn get_rules_map(&self, bucket_name: &str) -> Option<RulesMap> {
|
||||
self.bucket_rules_map.get(bucket_name).map(|r| r.clone())
|
||||
pub async fn get_rules_map(&self, bucket_name: &str) -> Option<RulesMap> {
|
||||
self.bucket_rules_map.get(&bucket_name.to_string()).await
|
||||
}
|
||||
|
||||
/// Removes notification rules for a bucket
|
||||
pub async fn remove_notification(&self, bucket_name: &str) {
|
||||
self.bucket_rules_map.remove(bucket_name);
|
||||
self.bucket_rules_map.remove(&bucket_name.to_string()).await;
|
||||
info!("Removed notification rules for bucket: {}", bucket_name);
|
||||
}
|
||||
|
||||
@@ -113,7 +114,7 @@ impl EventNotifier {
|
||||
/// Return `true` if at least one matching notification rule exists.
|
||||
pub async fn has_subscriber(&self, bucket_name: &str, event_name: &EventName) -> bool {
|
||||
// Rules to check if the bucket exists
|
||||
if let Some(rules_map) = self.bucket_rules_map.get(bucket_name) {
|
||||
if let Some(rules_map) = self.bucket_rules_map.get(&bucket_name.to_string()).await {
|
||||
// A composite event (such as ObjectCreatedAll) is expanded to multiple single events.
|
||||
// We need to check whether any of these single events have the rules configured.
|
||||
rules_map.has_subscriber(event_name)
|
||||
@@ -129,7 +130,7 @@ impl EventNotifier {
|
||||
let bucket_name = &event.s3.bucket.name;
|
||||
let object_key = &event.s3.object.key;
|
||||
let event_name = event.event_name;
|
||||
if let Some(rules) = self.bucket_rules_map.get(bucket_name) {
|
||||
if let Some(rules) = self.bucket_rules_map.get(bucket_name).await {
|
||||
let target_ids = rules.match_rules(event_name, object_key);
|
||||
if target_ids.is_empty() {
|
||||
debug!("No matching targets for event in bucket: {}", bucket_name);
|
||||
|
||||
@@ -15,13 +15,10 @@
|
||||
use crate::Event;
|
||||
use crate::factory::{MQTTTargetFactory, TargetFactory, WebhookTargetFactory};
|
||||
use futures::stream::{FuturesUnordered, StreamExt};
|
||||
use rustfs_config::notify::NOTIFY_ROUTE_PREFIX;
|
||||
use rustfs_config::{DEFAULT_DELIMITER, ENABLE_KEY, ENV_PREFIX};
|
||||
use hashbrown::{HashMap, HashSet};
|
||||
use rustfs_config::{DEFAULT_DELIMITER, ENABLE_KEY, ENV_PREFIX, notify::NOTIFY_ROUTE_PREFIX};
|
||||
use rustfs_ecstore::config::{Config, KVS};
|
||||
use rustfs_targets::Target;
|
||||
use rustfs_targets::TargetError;
|
||||
use rustfs_targets::target::ChannelTargetType;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
use rustfs_targets::{Target, TargetError, target::ChannelTargetType};
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
/// Registry for managing target factories
|
||||
@@ -90,7 +87,9 @@ impl TargetRegistry {
|
||||
let all_env: Vec<(String, String)> = std::env::vars().filter(|(key, _)| key.starts_with(ENV_PREFIX)).collect();
|
||||
// A collection of asynchronous tasks for concurrently executing target creation
|
||||
let mut tasks = FuturesUnordered::new();
|
||||
let mut final_config = config.clone(); // Clone a configuration for aggregating the final result
|
||||
// let final_config = config.clone(); // Clone a configuration for aggregating the final result
|
||||
// Record the defaults for each segment so that the segment can eventually be rebuilt
|
||||
let mut section_defaults: HashMap<String, KVS> = HashMap::new();
|
||||
// 1. Traverse all registered plants and process them by target type
|
||||
for (target_type, factory) in &self.factories {
|
||||
tracing::Span::current().record("target_type", target_type.as_str());
|
||||
@@ -98,12 +97,15 @@ impl TargetRegistry {
|
||||
|
||||
// 2. Prepare the configuration source
|
||||
// 2.1. Get the configuration segment in the file, e.g. 'notify_webhook'
|
||||
let section_name = format!("{NOTIFY_ROUTE_PREFIX}{target_type}");
|
||||
let section_name = format!("{NOTIFY_ROUTE_PREFIX}{target_type}").to_lowercase();
|
||||
let file_configs = config.0.get(§ion_name).cloned().unwrap_or_default();
|
||||
// 2.2. Get the default configuration for that type
|
||||
let default_cfg = file_configs.get(DEFAULT_DELIMITER).cloned().unwrap_or_default();
|
||||
debug!(?default_cfg, "Get the default configuration");
|
||||
|
||||
// Save defaults for eventual write back
|
||||
section_defaults.insert(section_name.clone(), default_cfg.clone());
|
||||
|
||||
// *** Optimization point 1: Get all legitimate fields of the current target type ***
|
||||
let valid_fields = factory.get_valid_fields();
|
||||
debug!(?valid_fields, "Get the legitimate configuration fields");
|
||||
@@ -111,7 +113,9 @@ impl TargetRegistry {
|
||||
// 3. Resolve instance IDs and configuration overrides from environment variables
|
||||
let mut instance_ids_from_env = HashSet::new();
|
||||
// 3.1. Instance discovery: Based on the '..._ENABLE_INSTANCEID' format
|
||||
let enable_prefix = format!("{ENV_PREFIX}{NOTIFY_ROUTE_PREFIX}{target_type}_{ENABLE_KEY}_").to_uppercase();
|
||||
let enable_prefix =
|
||||
format!("{ENV_PREFIX}{NOTIFY_ROUTE_PREFIX}{target_type}{DEFAULT_DELIMITER}{ENABLE_KEY}{DEFAULT_DELIMITER}")
|
||||
.to_uppercase();
|
||||
for (key, value) in &all_env {
|
||||
if value.eq_ignore_ascii_case(rustfs_config::EnableState::One.as_str())
|
||||
|| value.eq_ignore_ascii_case(rustfs_config::EnableState::On.as_str())
|
||||
@@ -128,14 +132,14 @@ impl TargetRegistry {
|
||||
|
||||
// 3.2. Parse all relevant environment variable configurations
|
||||
// 3.2.1. Build environment variable prefixes such as 'RUSTFS_NOTIFY_WEBHOOK_'
|
||||
let env_prefix = format!("{ENV_PREFIX}{NOTIFY_ROUTE_PREFIX}{target_type}_").to_uppercase();
|
||||
let env_prefix = format!("{ENV_PREFIX}{NOTIFY_ROUTE_PREFIX}{target_type}{DEFAULT_DELIMITER}").to_uppercase();
|
||||
// 3.2.2. 'env_overrides' is used to store configurations parsed from environment variables in the format: {instance id -> {field -> value}}
|
||||
let mut env_overrides: HashMap<String, HashMap<String, String>> = HashMap::new();
|
||||
for (key, value) in &all_env {
|
||||
if let Some(rest) = key.strip_prefix(&env_prefix) {
|
||||
// Use rsplitn to split from the right side to properly extract the INSTANCE_ID at the end
|
||||
// Format: <FIELD_NAME>_<INSTANCE_ID> or <FIELD_NAME>
|
||||
let mut parts = rest.rsplitn(2, '_');
|
||||
let mut parts = rest.rsplitn(2, DEFAULT_DELIMITER);
|
||||
|
||||
// The first part from the right is INSTANCE_ID
|
||||
let instance_id_part = parts.next().unwrap_or(DEFAULT_DELIMITER);
|
||||
@@ -224,7 +228,7 @@ impl TargetRegistry {
|
||||
} else {
|
||||
info!(instance_id = %id, "Skip the disabled target and will be removed from the final configuration");
|
||||
// Remove disabled target from final configuration
|
||||
final_config.0.entry(section_name.clone()).or_default().remove(&id);
|
||||
// final_config.0.entry(section_name.clone()).or_default().remove(&id);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -246,15 +250,50 @@ impl TargetRegistry {
|
||||
}
|
||||
|
||||
// 7. Aggregate new configuration and write back to system configuration
|
||||
if !successful_configs.is_empty() {
|
||||
if !successful_configs.is_empty() || !section_defaults.is_empty() {
|
||||
info!(
|
||||
"Prepare to update {} successfully created target configurations to the system configuration...",
|
||||
successful_configs.len()
|
||||
);
|
||||
let mut new_config = config.clone();
|
||||
|
||||
let mut successes_by_section: HashMap<String, HashMap<String, KVS>> = HashMap::new();
|
||||
|
||||
for (target_type, id, kvs) in successful_configs {
|
||||
let section_name = format!("{NOTIFY_ROUTE_PREFIX}{target_type}").to_lowercase();
|
||||
new_config.0.entry(section_name).or_default().insert(id, (*kvs).clone());
|
||||
successes_by_section
|
||||
.entry(section_name)
|
||||
.or_default()
|
||||
.insert(id.to_lowercase(), (*kvs).clone());
|
||||
}
|
||||
|
||||
let mut new_config = config.clone();
|
||||
// Collection of segments that need to be processed: Collect all segments where default items exist or where successful instances exist
|
||||
let mut sections: HashSet<String> = HashSet::new();
|
||||
sections.extend(section_defaults.keys().cloned());
|
||||
sections.extend(successes_by_section.keys().cloned());
|
||||
|
||||
for section in sections {
|
||||
let mut section_map: std::collections::HashMap<String, KVS> = std::collections::HashMap::new();
|
||||
// Add default item
|
||||
if let Some(default_kvs) = section_defaults.get(§ion) {
|
||||
if !default_kvs.is_empty() {
|
||||
section_map.insert(DEFAULT_DELIMITER.to_string(), default_kvs.clone());
|
||||
}
|
||||
}
|
||||
|
||||
// Add successful instance item
|
||||
if let Some(instances) = successes_by_section.get(§ion) {
|
||||
for (id, kvs) in instances {
|
||||
section_map.insert(id.clone(), kvs.clone());
|
||||
}
|
||||
}
|
||||
|
||||
// Empty breaks are removed and non-empty breaks are replaced entirely.
|
||||
if section_map.is_empty() {
|
||||
new_config.0.remove(§ion);
|
||||
} else {
|
||||
new_config.0.insert(section, section_map);
|
||||
}
|
||||
}
|
||||
|
||||
let Some(store) = rustfs_ecstore::global::new_object_layer_fn() else {
|
||||
|
||||
@@ -17,10 +17,10 @@ use super::xml_config::ParseConfigError as BucketNotificationConfigError;
|
||||
use crate::rules::NotificationConfiguration;
|
||||
use crate::rules::pattern_rules;
|
||||
use crate::rules::target_id_set;
|
||||
use hashbrown::HashMap;
|
||||
use rustfs_targets::EventName;
|
||||
use rustfs_targets::arn::TargetID;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::io::Read;
|
||||
|
||||
/// Configuration for bucket notifications.
|
||||
|
||||
@@ -14,9 +14,10 @@
|
||||
|
||||
use super::pattern;
|
||||
use super::target_id_set::TargetIdSet;
|
||||
use hashbrown::HashMap;
|
||||
use rayon::prelude::*;
|
||||
use rustfs_targets::arn::TargetID;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// PatternRules - Event rule that maps object name patterns to TargetID collections.
|
||||
/// `event.Rules` (map[string]TargetIDSet) in the Go code
|
||||
@@ -43,13 +44,19 @@ impl PatternRules {
|
||||
|
||||
/// Returns all TargetIDs that match the object name.
|
||||
pub fn match_targets(&self, object_name: &str) -> TargetIdSet {
|
||||
let mut matched_targets = TargetIdSet::new();
|
||||
for (pattern_str, target_set) in &self.rules {
|
||||
if pattern::match_simple(pattern_str, object_name) {
|
||||
matched_targets.extend(target_set.iter().cloned());
|
||||
}
|
||||
}
|
||||
matched_targets
|
||||
self.rules
|
||||
.par_iter()
|
||||
.filter_map(|(pattern_str, target_set)| {
|
||||
if pattern::match_simple(pattern_str, object_name) {
|
||||
Some(target_set.iter().cloned().collect::<TargetIdSet>())
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.reduce(TargetIdSet::new, |mut acc, set| {
|
||||
acc.extend(set);
|
||||
acc
|
||||
})
|
||||
}
|
||||
|
||||
pub fn is_empty(&self) -> bool {
|
||||
|
||||
@@ -14,10 +14,10 @@
|
||||
|
||||
use super::pattern_rules::PatternRules;
|
||||
use super::target_id_set::TargetIdSet;
|
||||
use hashbrown::HashMap;
|
||||
use rustfs_targets::EventName;
|
||||
use rustfs_targets::arn::TargetID;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// RulesMap - Rule mapping organized by event name。
|
||||
/// `event.RulesMap` (map[Name]Rules) in the corresponding Go code
|
||||
|
||||
@@ -12,8 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use hashbrown::HashSet;
|
||||
use rustfs_targets::arn::TargetID;
|
||||
use std::collections::HashSet;
|
||||
|
||||
/// TargetIDSet - A collection representation of TargetID.
|
||||
pub type TargetIdSet = HashSet<TargetID>;
|
||||
|
||||
@@ -13,10 +13,10 @@
|
||||
// limitations under the License.
|
||||
|
||||
use super::pattern;
|
||||
use hashbrown::HashSet;
|
||||
use rustfs_targets::EventName;
|
||||
use rustfs_targets::arn::{ARN, ArnError, TargetIDError};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashSet;
|
||||
use std::io::Read;
|
||||
use thiserror::Error;
|
||||
|
||||
|
||||
@@ -29,17 +29,12 @@ documentation = "https://docs.rs/rustfs-obs/latest/rustfs_obs/"
|
||||
workspace = true
|
||||
|
||||
[features]
|
||||
default = ["file"]
|
||||
file = []
|
||||
default = []
|
||||
gpu = ["dep:nvml-wrapper"]
|
||||
webhook = ["dep:reqwest"]
|
||||
kafka = ["dep:rdkafka"]
|
||||
|
||||
[dependencies]
|
||||
rustfs-config = { workspace = true, features = ["constants", "observability"] }
|
||||
rustfs-utils = { workspace = true, features = ["ip", "path"] }
|
||||
async-trait = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
flexi_logger = { workspace = true }
|
||||
nu-ansi-term = { workspace = true }
|
||||
nvml-wrapper = { workspace = true, optional = true }
|
||||
@@ -57,24 +52,9 @@ tracing-error = { workspace = true }
|
||||
tracing-opentelemetry = { workspace = true }
|
||||
tracing-subscriber = { workspace = true, features = ["registry", "std", "fmt", "env-filter", "tracing-log", "time", "local-time", "json"] }
|
||||
tokio = { workspace = true, features = ["sync", "fs", "rt-multi-thread", "rt", "time", "macros"] }
|
||||
reqwest = { workspace = true, optional = true }
|
||||
serde_json = { workspace = true }
|
||||
sysinfo = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
|
||||
|
||||
# Only enable kafka features and related dependencies on Linux
|
||||
[target.'cfg(target_os = "linux")'.dependencies]
|
||||
rdkafka = { workspace = true, features = ["tokio"], optional = true }
|
||||
|
||||
|
||||
[dev-dependencies]
|
||||
chrono = { workspace = true }
|
||||
opentelemetry = { workspace = true }
|
||||
opentelemetry_sdk = { workspace = true, features = ["rt-tokio"] }
|
||||
opentelemetry-stdout = { workspace = true }
|
||||
opentelemetry-otlp = { workspace = true, features = ["grpc-tonic"] }
|
||||
opentelemetry-semantic-conventions = { workspace = true, features = ["semconv_experimental"] }
|
||||
tokio = { workspace = true, features = ["full"] }
|
||||
tracing = { workspace = true, features = ["std", "attributes"] }
|
||||
tracing-subscriber = { workspace = true, features = ["registry", "std", "fmt"] }
|
||||
@@ -21,29 +21,4 @@ service_name = "rustfs"
|
||||
service_version = "0.1.0"
|
||||
environments = "develop"
|
||||
logger_level = "debug"
|
||||
local_logging_enabled = true # Default is false if not specified
|
||||
|
||||
|
||||
#[[sinks]]
|
||||
#type = "Kafka"
|
||||
#bootstrap_servers = "localhost:9092"
|
||||
#topic = "logs"
|
||||
#batch_size = 100 # Default is 100 if not specified
|
||||
#batch_timeout_ms = 100 # Default is 1000ms if not specified
|
||||
#
|
||||
#[[sinks]]
|
||||
#type = "Webhook"
|
||||
#endpoint = "http://localhost:8080/webhook"
|
||||
#auth_token = ""
|
||||
#batch_size = 100 # Default is 3 if not specified
|
||||
#batch_timeout_ms = 100 # Default is 100ms if not specified
|
||||
|
||||
[[sinks]]
|
||||
type = "File"
|
||||
path = "deploy/logs/rustfs.log"
|
||||
buffer_size = 102 # Default is 8192 bytes if not specified
|
||||
flush_interval_ms = 1000
|
||||
flush_threshold = 100
|
||||
|
||||
[logger]
|
||||
queue_capacity = 10000
|
||||
local_logging_enabled = true # Default is false if not specified
|
||||
@@ -13,33 +13,25 @@
|
||||
// limitations under the License.
|
||||
|
||||
use opentelemetry::global;
|
||||
use rustfs_obs::{BaseLogEntry, ServerLogEntry, SystemObserver, get_logger, init_obs, log_info};
|
||||
use std::collections::HashMap;
|
||||
use rustfs_obs::{SystemObserver, init_obs};
|
||||
use std::time::{Duration, SystemTime};
|
||||
use tracing::{error, info, instrument};
|
||||
use tracing_core::Level;
|
||||
use tracing::{Level, error, info, instrument};
|
||||
|
||||
#[tokio::main]
|
||||
async fn main() {
|
||||
let obs_conf = Some("crates/obs/examples/config.toml".to_string());
|
||||
let (_logger, _guard) = init_obs(obs_conf).await;
|
||||
let obs_conf = Some("http://localhost:4317".to_string());
|
||||
let _guard = init_obs(obs_conf).await;
|
||||
let span = tracing::span!(Level::INFO, "main");
|
||||
let _enter = span.enter();
|
||||
info!("Program starts");
|
||||
// Simulate the operation
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
run(
|
||||
"service-demo".to_string(),
|
||||
"object-demo".to_string(),
|
||||
"user-demo".to_string(),
|
||||
"service-demo".to_string(),
|
||||
)
|
||||
.await;
|
||||
run("service-demo".to_string()).await;
|
||||
info!("Program ends");
|
||||
}
|
||||
|
||||
#[instrument(fields(bucket, object, user))]
|
||||
async fn run(bucket: String, object: String, user: String, service_name: String) {
|
||||
async fn run(service_name: String) {
|
||||
let start_time = SystemTime::now();
|
||||
info!("Log module initialization is completed service_name: {:?}", service_name);
|
||||
|
||||
@@ -56,21 +48,6 @@ async fn run(bucket: String, object: String, user: String, service_name: String)
|
||||
Err(e) => error!("Failed to initialize process observer: {:?}", e),
|
||||
}
|
||||
|
||||
let base_entry = BaseLogEntry::new()
|
||||
.message(Some("run logger api_handler info".to_string()))
|
||||
.request_id(Some("request_id".to_string()))
|
||||
.timestamp(chrono::DateTime::from(start_time))
|
||||
.tags(Some(HashMap::default()));
|
||||
|
||||
let server_entry = ServerLogEntry::new(Level::INFO, "api_handler".to_string())
|
||||
.with_base(base_entry)
|
||||
.user_id(Some(user.clone()))
|
||||
.add_field("operation".to_string(), "login".to_string())
|
||||
.add_field("bucket".to_string(), bucket.clone())
|
||||
.add_field("object".to_string(), object.clone());
|
||||
|
||||
let result = get_logger().lock().await.log_server_entry(server_entry).await;
|
||||
info!("Logging is completed {:?}", result);
|
||||
put_object("bucket".to_string(), "object".to_string(), "user".to_string()).await;
|
||||
info!("Logging is completed");
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
@@ -97,8 +74,6 @@ async fn put_object(bucket: String, object: String, user: String) {
|
||||
start_time.elapsed().unwrap().as_secs_f64()
|
||||
);
|
||||
|
||||
let result = log_info("put_object logger info", "put_object").await;
|
||||
info!("put_object is completed {:?}", result);
|
||||
// Simulate the operation
|
||||
tokio::time::sleep(Duration::from_millis(100)).await;
|
||||
|
||||
|
||||
@@ -13,16 +13,9 @@
|
||||
// limitations under the License.
|
||||
|
||||
use rustfs_config::observability::{
|
||||
DEFAULT_AUDIT_LOGGER_QUEUE_CAPACITY, DEFAULT_SINKS_FILE_BUFFER_SIZE, DEFAULT_SINKS_FILE_FLUSH_INTERVAL_MS,
|
||||
DEFAULT_SINKS_FILE_FLUSH_THRESHOLD, DEFAULT_SINKS_KAFKA_BATCH_SIZE, DEFAULT_SINKS_KAFKA_BATCH_TIMEOUT_MS,
|
||||
DEFAULT_SINKS_KAFKA_BROKERS, DEFAULT_SINKS_KAFKA_TOPIC, DEFAULT_SINKS_WEBHOOK_AUTH_TOKEN, DEFAULT_SINKS_WEBHOOK_ENDPOINT,
|
||||
DEFAULT_SINKS_WEBHOOK_MAX_RETRIES, DEFAULT_SINKS_WEBHOOK_RETRY_DELAY_MS, ENV_AUDIT_LOGGER_QUEUE_CAPACITY, ENV_OBS_ENDPOINT,
|
||||
ENV_OBS_ENVIRONMENT, ENV_OBS_LOCAL_LOGGING_ENABLED, ENV_OBS_LOG_DIRECTORY, ENV_OBS_LOG_FILENAME, ENV_OBS_LOG_KEEP_FILES,
|
||||
ENV_OBS_LOG_ROTATION_SIZE_MB, ENV_OBS_LOG_ROTATION_TIME, ENV_OBS_LOGGER_LEVEL, ENV_OBS_METER_INTERVAL, ENV_OBS_SAMPLE_RATIO,
|
||||
ENV_OBS_SERVICE_NAME, ENV_OBS_SERVICE_VERSION, ENV_OBS_USE_STDOUT, ENV_SINKS_FILE_BUFFER_SIZE,
|
||||
ENV_SINKS_FILE_FLUSH_INTERVAL_MS, ENV_SINKS_FILE_FLUSH_THRESHOLD, ENV_SINKS_FILE_PATH, ENV_SINKS_KAFKA_BATCH_SIZE,
|
||||
ENV_SINKS_KAFKA_BATCH_TIMEOUT_MS, ENV_SINKS_KAFKA_BROKERS, ENV_SINKS_KAFKA_TOPIC, ENV_SINKS_WEBHOOK_AUTH_TOKEN,
|
||||
ENV_SINKS_WEBHOOK_ENDPOINT, ENV_SINKS_WEBHOOK_MAX_RETRIES, ENV_SINKS_WEBHOOK_RETRY_DELAY_MS,
|
||||
ENV_OBS_ENDPOINT, ENV_OBS_ENVIRONMENT, ENV_OBS_LOCAL_LOGGING_ENABLED, ENV_OBS_LOG_DIRECTORY, ENV_OBS_LOG_FILENAME,
|
||||
ENV_OBS_LOG_KEEP_FILES, ENV_OBS_LOG_ROTATION_SIZE_MB, ENV_OBS_LOG_ROTATION_TIME, ENV_OBS_LOGGER_LEVEL,
|
||||
ENV_OBS_METER_INTERVAL, ENV_OBS_SAMPLE_RATIO, ENV_OBS_SERVICE_NAME, ENV_OBS_SERVICE_VERSION, ENV_OBS_USE_STDOUT,
|
||||
};
|
||||
use rustfs_config::{
|
||||
APP_NAME, DEFAULT_LOG_KEEP_FILES, DEFAULT_LOG_LEVEL, DEFAULT_LOG_ROTATION_SIZE_MB, DEFAULT_LOG_ROTATION_TIME,
|
||||
@@ -145,167 +138,10 @@ impl Default for OtelConfig {
|
||||
}
|
||||
}
|
||||
|
||||
/// Kafka Sink Configuration - Add batch parameters
|
||||
#[derive(Debug, Deserialize, Serialize, Clone)]
|
||||
pub struct KafkaSinkConfig {
|
||||
pub brokers: String,
|
||||
pub topic: String,
|
||||
pub batch_size: Option<usize>, // Batch size, default 100
|
||||
pub batch_timeout_ms: Option<u64>, // Batch timeout time, default 1000ms
|
||||
}
|
||||
|
||||
impl KafkaSinkConfig {
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for KafkaSinkConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
brokers: env::var(ENV_SINKS_KAFKA_BROKERS)
|
||||
.ok()
|
||||
.filter(|s| !s.trim().is_empty())
|
||||
.unwrap_or_else(|| DEFAULT_SINKS_KAFKA_BROKERS.to_string()),
|
||||
topic: env::var(ENV_SINKS_KAFKA_TOPIC)
|
||||
.ok()
|
||||
.filter(|s| !s.trim().is_empty())
|
||||
.unwrap_or_else(|| DEFAULT_SINKS_KAFKA_TOPIC.to_string()),
|
||||
batch_size: env::var(ENV_SINKS_KAFKA_BATCH_SIZE)
|
||||
.ok()
|
||||
.and_then(|v| v.parse().ok())
|
||||
.or(Some(DEFAULT_SINKS_KAFKA_BATCH_SIZE)),
|
||||
batch_timeout_ms: env::var(ENV_SINKS_KAFKA_BATCH_TIMEOUT_MS)
|
||||
.ok()
|
||||
.and_then(|v| v.parse().ok())
|
||||
.or(Some(DEFAULT_SINKS_KAFKA_BATCH_TIMEOUT_MS)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Webhook Sink Configuration - Add Retry Parameters
|
||||
#[derive(Debug, Deserialize, Serialize, Clone)]
|
||||
pub struct WebhookSinkConfig {
|
||||
pub endpoint: String,
|
||||
pub auth_token: String,
|
||||
pub max_retries: Option<usize>, // Maximum number of retry times, default 3
|
||||
pub retry_delay_ms: Option<u64>, // Retry the delay cardinality, default 100ms
|
||||
}
|
||||
|
||||
impl WebhookSinkConfig {
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for WebhookSinkConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
endpoint: env::var(ENV_SINKS_WEBHOOK_ENDPOINT)
|
||||
.ok()
|
||||
.filter(|s| !s.trim().is_empty())
|
||||
.unwrap_or_else(|| DEFAULT_SINKS_WEBHOOK_ENDPOINT.to_string()),
|
||||
auth_token: env::var(ENV_SINKS_WEBHOOK_AUTH_TOKEN)
|
||||
.ok()
|
||||
.filter(|s| !s.trim().is_empty())
|
||||
.unwrap_or_else(|| DEFAULT_SINKS_WEBHOOK_AUTH_TOKEN.to_string()),
|
||||
max_retries: env::var(ENV_SINKS_WEBHOOK_MAX_RETRIES)
|
||||
.ok()
|
||||
.and_then(|v| v.parse().ok())
|
||||
.or(Some(DEFAULT_SINKS_WEBHOOK_MAX_RETRIES)),
|
||||
retry_delay_ms: env::var(ENV_SINKS_WEBHOOK_RETRY_DELAY_MS)
|
||||
.ok()
|
||||
.and_then(|v| v.parse().ok())
|
||||
.or(Some(DEFAULT_SINKS_WEBHOOK_RETRY_DELAY_MS)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// File Sink Configuration - Add buffering parameters
|
||||
#[derive(Debug, Deserialize, Serialize, Clone)]
|
||||
pub struct FileSinkConfig {
|
||||
pub path: String,
|
||||
pub buffer_size: Option<usize>, // Write buffer size, default 8192
|
||||
pub flush_interval_ms: Option<u64>, // Refresh interval time, default 1000ms
|
||||
pub flush_threshold: Option<usize>, // Refresh threshold, default 100 logs
|
||||
}
|
||||
|
||||
impl FileSinkConfig {
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for FileSinkConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
path: get_log_directory_to_string(ENV_SINKS_FILE_PATH),
|
||||
buffer_size: env::var(ENV_SINKS_FILE_BUFFER_SIZE)
|
||||
.ok()
|
||||
.and_then(|v| v.parse().ok())
|
||||
.or(Some(DEFAULT_SINKS_FILE_BUFFER_SIZE)),
|
||||
flush_interval_ms: env::var(ENV_SINKS_FILE_FLUSH_INTERVAL_MS)
|
||||
.ok()
|
||||
.and_then(|v| v.parse().ok())
|
||||
.or(Some(DEFAULT_SINKS_FILE_FLUSH_INTERVAL_MS)),
|
||||
flush_threshold: env::var(ENV_SINKS_FILE_FLUSH_THRESHOLD)
|
||||
.ok()
|
||||
.and_then(|v| v.parse().ok())
|
||||
.or(Some(DEFAULT_SINKS_FILE_FLUSH_THRESHOLD)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Sink configuration collection
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(tag = "type")]
|
||||
pub enum SinkConfig {
|
||||
File(FileSinkConfig),
|
||||
Kafka(KafkaSinkConfig),
|
||||
Webhook(WebhookSinkConfig),
|
||||
}
|
||||
|
||||
impl SinkConfig {
|
||||
pub fn new() -> Self {
|
||||
Self::File(FileSinkConfig::new())
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for SinkConfig {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
///Logger Configuration
|
||||
#[derive(Debug, Deserialize, Serialize, Clone)]
|
||||
pub struct LoggerConfig {
|
||||
pub queue_capacity: Option<usize>,
|
||||
}
|
||||
|
||||
impl LoggerConfig {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
queue_capacity: env::var(ENV_AUDIT_LOGGER_QUEUE_CAPACITY)
|
||||
.ok()
|
||||
.and_then(|v| v.parse().ok())
|
||||
.or(Some(DEFAULT_AUDIT_LOGGER_QUEUE_CAPACITY)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for LoggerConfig {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
/// Overall application configuration
|
||||
/// Add observability, sinks, and logger configuration
|
||||
/// Add observability configuration
|
||||
///
|
||||
/// Observability: OpenTelemetry configuration
|
||||
/// Sinks: Kafka, Webhook, File sink configuration
|
||||
/// Logger: Logger configuration
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
@@ -316,8 +152,6 @@ impl Default for LoggerConfig {
|
||||
#[derive(Debug, Deserialize, Clone)]
|
||||
pub struct AppConfig {
|
||||
pub observability: OtelConfig,
|
||||
pub sinks: Vec<SinkConfig>,
|
||||
pub logger: Option<LoggerConfig>,
|
||||
}
|
||||
|
||||
impl AppConfig {
|
||||
@@ -328,16 +162,12 @@ impl AppConfig {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
observability: OtelConfig::default(),
|
||||
sinks: vec![SinkConfig::default()],
|
||||
logger: Some(LoggerConfig::default()),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn new_with_endpoint(endpoint: Option<String>) -> Self {
|
||||
Self {
|
||||
observability: OtelConfig::extract_otel_config_from_env(endpoint),
|
||||
sinks: vec![SinkConfig::new()],
|
||||
logger: Some(LoggerConfig::new()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,88 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::entry::ObjectVersion;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Args - defines the arguments for API operations
|
||||
/// Args is used to define the arguments for API operations.
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use rustfs_obs::Args;
|
||||
/// use std::collections::HashMap;
|
||||
///
|
||||
/// let args = Args::new()
|
||||
/// .set_bucket(Some("my-bucket".to_string()))
|
||||
/// .set_object(Some("my-object".to_string()))
|
||||
/// .set_version_id(Some("123".to_string()))
|
||||
/// .set_metadata(Some(HashMap::new()));
|
||||
/// ```
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default, Eq, PartialEq)]
|
||||
pub struct Args {
|
||||
#[serde(rename = "bucket", skip_serializing_if = "Option::is_none")]
|
||||
pub bucket: Option<String>,
|
||||
#[serde(rename = "object", skip_serializing_if = "Option::is_none")]
|
||||
pub object: Option<String>,
|
||||
#[serde(rename = "versionId", skip_serializing_if = "Option::is_none")]
|
||||
pub version_id: Option<String>,
|
||||
#[serde(rename = "objects", skip_serializing_if = "Option::is_none")]
|
||||
pub objects: Option<Vec<ObjectVersion>>,
|
||||
#[serde(rename = "metadata", skip_serializing_if = "Option::is_none")]
|
||||
pub metadata: Option<HashMap<String, String>>,
|
||||
}
|
||||
|
||||
impl Args {
|
||||
/// Create a new Args object
|
||||
pub fn new() -> Self {
|
||||
Args {
|
||||
bucket: None,
|
||||
object: None,
|
||||
version_id: None,
|
||||
objects: None,
|
||||
metadata: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the bucket
|
||||
pub fn set_bucket(mut self, bucket: Option<String>) -> Self {
|
||||
self.bucket = bucket;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the object
|
||||
pub fn set_object(mut self, object: Option<String>) -> Self {
|
||||
self.object = object;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the version ID
|
||||
pub fn set_version_id(mut self, version_id: Option<String>) -> Self {
|
||||
self.version_id = version_id;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the objects
|
||||
pub fn set_objects(mut self, objects: Option<Vec<ObjectVersion>>) -> Self {
|
||||
self.objects = objects;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the metadata
|
||||
pub fn set_metadata(mut self, metadata: Option<HashMap<String, String>>) -> Self {
|
||||
self.metadata = metadata;
|
||||
self
|
||||
}
|
||||
}
|
||||
@@ -1,467 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::{BaseLogEntry, LogRecord, ObjectVersion};
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// API details structure
|
||||
/// ApiDetails is used to define the details of an API operation
|
||||
///
|
||||
/// The `ApiDetails` structure contains the following fields:
|
||||
/// - `name` - the name of the API operation
|
||||
/// - `bucket` - the bucket name
|
||||
/// - `object` - the object name
|
||||
/// - `objects` - the list of objects
|
||||
/// - `status` - the status of the API operation
|
||||
/// - `status_code` - the status code of the API operation
|
||||
/// - `input_bytes` - the input bytes
|
||||
/// - `output_bytes` - the output bytes
|
||||
/// - `header_bytes` - the header bytes
|
||||
/// - `time_to_first_byte` - the time to first byte
|
||||
/// - `time_to_first_byte_in_ns` - the time to first byte in nanoseconds
|
||||
/// - `time_to_response` - the time to response
|
||||
/// - `time_to_response_in_ns` - the time to response in nanoseconds
|
||||
///
|
||||
/// The `ApiDetails` structure contains the following methods:
|
||||
/// - `new` - create a new `ApiDetails` with default values
|
||||
/// - `set_name` - set the name
|
||||
/// - `set_bucket` - set the bucket
|
||||
/// - `set_object` - set the object
|
||||
/// - `set_objects` - set the objects
|
||||
/// - `set_status` - set the status
|
||||
/// - `set_status_code` - set the status code
|
||||
/// - `set_input_bytes` - set the input bytes
|
||||
/// - `set_output_bytes` - set the output bytes
|
||||
/// - `set_header_bytes` - set the header bytes
|
||||
/// - `set_time_to_first_byte` - set the time to first byte
|
||||
/// - `set_time_to_first_byte_in_ns` - set the time to first byte in nanoseconds
|
||||
/// - `set_time_to_response` - set the time to response
|
||||
/// - `set_time_to_response_in_ns` - set the time to response in nanoseconds
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use rustfs_obs::ApiDetails;
|
||||
/// use rustfs_obs::ObjectVersion;
|
||||
///
|
||||
/// let api = ApiDetails::new()
|
||||
/// .set_name(Some("GET".to_string()))
|
||||
/// .set_bucket(Some("my-bucket".to_string()))
|
||||
/// .set_object(Some("my-object".to_string()))
|
||||
/// .set_objects(vec![ObjectVersion::new_with_object_name("my-object".to_string())])
|
||||
/// .set_status(Some("OK".to_string()))
|
||||
/// .set_status_code(Some(200))
|
||||
/// .set_input_bytes(100)
|
||||
/// .set_output_bytes(200)
|
||||
/// .set_header_bytes(Some(50))
|
||||
/// .set_time_to_first_byte(Some("100ms".to_string()))
|
||||
/// .set_time_to_first_byte_in_ns(Some("100000000ns".to_string()))
|
||||
/// .set_time_to_response(Some("200ms".to_string()))
|
||||
/// .set_time_to_response_in_ns(Some("200000000ns".to_string()));
|
||||
/// ```
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, Default, PartialEq, Eq)]
|
||||
pub struct ApiDetails {
|
||||
#[serde(rename = "name", skip_serializing_if = "Option::is_none")]
|
||||
pub name: Option<String>,
|
||||
#[serde(rename = "bucket", skip_serializing_if = "Option::is_none")]
|
||||
pub bucket: Option<String>,
|
||||
#[serde(rename = "object", skip_serializing_if = "Option::is_none")]
|
||||
pub object: Option<String>,
|
||||
#[serde(rename = "objects", skip_serializing_if = "Vec::is_empty", default)]
|
||||
pub objects: Vec<ObjectVersion>,
|
||||
#[serde(rename = "status", skip_serializing_if = "Option::is_none")]
|
||||
pub status: Option<String>,
|
||||
#[serde(rename = "statusCode", skip_serializing_if = "Option::is_none")]
|
||||
pub status_code: Option<i32>,
|
||||
#[serde(rename = "rx")]
|
||||
pub input_bytes: i64,
|
||||
#[serde(rename = "tx")]
|
||||
pub output_bytes: i64,
|
||||
#[serde(rename = "txHeaders", skip_serializing_if = "Option::is_none")]
|
||||
pub header_bytes: Option<i64>,
|
||||
#[serde(rename = "timeToFirstByte", skip_serializing_if = "Option::is_none")]
|
||||
pub time_to_first_byte: Option<String>,
|
||||
#[serde(rename = "timeToFirstByteInNS", skip_serializing_if = "Option::is_none")]
|
||||
pub time_to_first_byte_in_ns: Option<String>,
|
||||
#[serde(rename = "timeToResponse", skip_serializing_if = "Option::is_none")]
|
||||
pub time_to_response: Option<String>,
|
||||
#[serde(rename = "timeToResponseInNS", skip_serializing_if = "Option::is_none")]
|
||||
pub time_to_response_in_ns: Option<String>,
|
||||
}
|
||||
|
||||
impl ApiDetails {
|
||||
/// Create a new `ApiDetails` with default values
|
||||
pub fn new() -> Self {
|
||||
ApiDetails {
|
||||
name: None,
|
||||
bucket: None,
|
||||
object: None,
|
||||
objects: Vec::new(),
|
||||
status: None,
|
||||
status_code: None,
|
||||
input_bytes: 0,
|
||||
output_bytes: 0,
|
||||
header_bytes: None,
|
||||
time_to_first_byte: None,
|
||||
time_to_first_byte_in_ns: None,
|
||||
time_to_response: None,
|
||||
time_to_response_in_ns: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the name
|
||||
pub fn set_name(mut self, name: Option<String>) -> Self {
|
||||
self.name = name;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the bucket
|
||||
pub fn set_bucket(mut self, bucket: Option<String>) -> Self {
|
||||
self.bucket = bucket;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the object
|
||||
pub fn set_object(mut self, object: Option<String>) -> Self {
|
||||
self.object = object;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the objects
|
||||
pub fn set_objects(mut self, objects: Vec<ObjectVersion>) -> Self {
|
||||
self.objects = objects;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the status
|
||||
pub fn set_status(mut self, status: Option<String>) -> Self {
|
||||
self.status = status;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the status code
|
||||
pub fn set_status_code(mut self, status_code: Option<i32>) -> Self {
|
||||
self.status_code = status_code;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the input bytes
|
||||
pub fn set_input_bytes(mut self, input_bytes: i64) -> Self {
|
||||
self.input_bytes = input_bytes;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the output bytes
|
||||
pub fn set_output_bytes(mut self, output_bytes: i64) -> Self {
|
||||
self.output_bytes = output_bytes;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the header bytes
|
||||
pub fn set_header_bytes(mut self, header_bytes: Option<i64>) -> Self {
|
||||
self.header_bytes = header_bytes;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the time to first byte
|
||||
pub fn set_time_to_first_byte(mut self, time_to_first_byte: Option<String>) -> Self {
|
||||
self.time_to_first_byte = time_to_first_byte;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the time to first byte in nanoseconds
|
||||
pub fn set_time_to_first_byte_in_ns(mut self, time_to_first_byte_in_ns: Option<String>) -> Self {
|
||||
self.time_to_first_byte_in_ns = time_to_first_byte_in_ns;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the time to response
|
||||
pub fn set_time_to_response(mut self, time_to_response: Option<String>) -> Self {
|
||||
self.time_to_response = time_to_response;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the time to response in nanoseconds
|
||||
pub fn set_time_to_response_in_ns(mut self, time_to_response_in_ns: Option<String>) -> Self {
|
||||
self.time_to_response_in_ns = time_to_response_in_ns;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// Entry - audit entry logs
|
||||
/// AuditLogEntry is used to define the structure of an audit log entry
|
||||
///
|
||||
/// The `AuditLogEntry` structure contains the following fields:
|
||||
/// - `base` - the base log entry
|
||||
/// - `version` - the version of the audit log entry
|
||||
/// - `deployment_id` - the deployment ID
|
||||
/// - `event` - the event
|
||||
/// - `entry_type` - the type of audit message
|
||||
/// - `api` - the API details
|
||||
/// - `remote_host` - the remote host
|
||||
/// - `user_agent` - the user agent
|
||||
/// - `req_path` - the request path
|
||||
/// - `req_host` - the request host
|
||||
/// - `req_claims` - the request claims
|
||||
/// - `req_query` - the request query
|
||||
/// - `req_header` - the request header
|
||||
/// - `resp_header` - the response header
|
||||
/// - `access_key` - the access key
|
||||
/// - `parent_user` - the parent user
|
||||
/// - `error` - the error
|
||||
///
|
||||
/// The `AuditLogEntry` structure contains the following methods:
|
||||
/// - `new` - create a new `AuditEntry` with default values
|
||||
/// - `new_with_values` - create a new `AuditEntry` with version, time, event and api details
|
||||
/// - `with_base` - set the base log entry
|
||||
/// - `set_version` - set the version
|
||||
/// - `set_deployment_id` - set the deployment ID
|
||||
/// - `set_event` - set the event
|
||||
/// - `set_entry_type` - set the entry type
|
||||
/// - `set_api` - set the API details
|
||||
/// - `set_remote_host` - set the remote host
|
||||
/// - `set_user_agent` - set the user agent
|
||||
/// - `set_req_path` - set the request path
|
||||
/// - `set_req_host` - set the request host
|
||||
/// - `set_req_claims` - set the request claims
|
||||
/// - `set_req_query` - set the request query
|
||||
/// - `set_req_header` - set the request header
|
||||
/// - `set_resp_header` - set the response header
|
||||
/// - `set_access_key` - set the access key
|
||||
/// - `set_parent_user` - set the parent user
|
||||
/// - `set_error` - set the error
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use rustfs_obs::AuditLogEntry;
|
||||
/// use rustfs_obs::ApiDetails;
|
||||
/// use std::collections::HashMap;
|
||||
///
|
||||
/// let entry = AuditLogEntry::new()
|
||||
/// .set_version("1.0".to_string())
|
||||
/// .set_deployment_id(Some("123".to_string()))
|
||||
/// .set_event("event".to_string())
|
||||
/// .set_entry_type(Some("type".to_string()))
|
||||
/// .set_api(ApiDetails::new())
|
||||
/// .set_remote_host(Some("remote-host".to_string()))
|
||||
/// .set_user_agent(Some("user-agent".to_string()))
|
||||
/// .set_req_path(Some("req-path".to_string()))
|
||||
/// .set_req_host(Some("req-host".to_string()))
|
||||
/// .set_req_claims(Some(HashMap::new()))
|
||||
/// .set_req_query(Some(HashMap::new()))
|
||||
/// .set_req_header(Some(HashMap::new()))
|
||||
/// .set_resp_header(Some(HashMap::new()))
|
||||
/// .set_access_key(Some("access-key".to_string()))
|
||||
/// .set_parent_user(Some("parent-user".to_string()))
|
||||
/// .set_error(Some("error".to_string()));
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, Default)]
|
||||
pub struct AuditLogEntry {
|
||||
#[serde(flatten)]
|
||||
pub base: BaseLogEntry,
|
||||
pub version: String,
|
||||
#[serde(rename = "deploymentid", skip_serializing_if = "Option::is_none")]
|
||||
pub deployment_id: Option<String>,
|
||||
pub event: String,
|
||||
// Class of audit message - S3, admin ops, bucket management
|
||||
#[serde(rename = "type", skip_serializing_if = "Option::is_none")]
|
||||
pub entry_type: Option<String>,
|
||||
pub api: ApiDetails,
|
||||
#[serde(rename = "remotehost", skip_serializing_if = "Option::is_none")]
|
||||
pub remote_host: Option<String>,
|
||||
#[serde(rename = "userAgent", skip_serializing_if = "Option::is_none")]
|
||||
pub user_agent: Option<String>,
|
||||
#[serde(rename = "requestPath", skip_serializing_if = "Option::is_none")]
|
||||
pub req_path: Option<String>,
|
||||
#[serde(rename = "requestHost", skip_serializing_if = "Option::is_none")]
|
||||
pub req_host: Option<String>,
|
||||
#[serde(rename = "requestClaims", skip_serializing_if = "Option::is_none")]
|
||||
pub req_claims: Option<HashMap<String, Value>>,
|
||||
#[serde(rename = "requestQuery", skip_serializing_if = "Option::is_none")]
|
||||
pub req_query: Option<HashMap<String, String>>,
|
||||
#[serde(rename = "requestHeader", skip_serializing_if = "Option::is_none")]
|
||||
pub req_header: Option<HashMap<String, String>>,
|
||||
#[serde(rename = "responseHeader", skip_serializing_if = "Option::is_none")]
|
||||
pub resp_header: Option<HashMap<String, String>>,
|
||||
#[serde(rename = "accessKey", skip_serializing_if = "Option::is_none")]
|
||||
pub access_key: Option<String>,
|
||||
#[serde(rename = "parentUser", skip_serializing_if = "Option::is_none")]
|
||||
pub parent_user: Option<String>,
|
||||
#[serde(rename = "error", skip_serializing_if = "Option::is_none")]
|
||||
pub error: Option<String>,
|
||||
}
|
||||
|
||||
impl AuditLogEntry {
|
||||
/// Create a new `AuditEntry` with default values
|
||||
pub fn new() -> Self {
|
||||
AuditLogEntry {
|
||||
base: BaseLogEntry::new(),
|
||||
version: String::new(),
|
||||
deployment_id: None,
|
||||
event: String::new(),
|
||||
entry_type: None,
|
||||
api: ApiDetails::new(),
|
||||
remote_host: None,
|
||||
user_agent: None,
|
||||
req_path: None,
|
||||
req_host: None,
|
||||
req_claims: None,
|
||||
req_query: None,
|
||||
req_header: None,
|
||||
resp_header: None,
|
||||
access_key: None,
|
||||
parent_user: None,
|
||||
error: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new `AuditEntry` with version, time, event and api details
|
||||
pub fn new_with_values(version: String, time: DateTime<Utc>, event: String, api: ApiDetails) -> Self {
|
||||
let mut base = BaseLogEntry::new();
|
||||
base.timestamp = time;
|
||||
|
||||
AuditLogEntry {
|
||||
base,
|
||||
version,
|
||||
deployment_id: None,
|
||||
event,
|
||||
entry_type: None,
|
||||
api,
|
||||
remote_host: None,
|
||||
user_agent: None,
|
||||
req_path: None,
|
||||
req_host: None,
|
||||
req_claims: None,
|
||||
req_query: None,
|
||||
req_header: None,
|
||||
resp_header: None,
|
||||
access_key: None,
|
||||
parent_user: None,
|
||||
error: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the base log entry
|
||||
pub fn with_base(mut self, base: BaseLogEntry) -> Self {
|
||||
self.base = base;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the version
|
||||
pub fn set_version(mut self, version: String) -> Self {
|
||||
self.version = version;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the deployment ID
|
||||
pub fn set_deployment_id(mut self, deployment_id: Option<String>) -> Self {
|
||||
self.deployment_id = deployment_id;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the event
|
||||
pub fn set_event(mut self, event: String) -> Self {
|
||||
self.event = event;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the entry type
|
||||
pub fn set_entry_type(mut self, entry_type: Option<String>) -> Self {
|
||||
self.entry_type = entry_type;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the API details
|
||||
pub fn set_api(mut self, api: ApiDetails) -> Self {
|
||||
self.api = api;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the remote host
|
||||
pub fn set_remote_host(mut self, remote_host: Option<String>) -> Self {
|
||||
self.remote_host = remote_host;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the user agent
|
||||
pub fn set_user_agent(mut self, user_agent: Option<String>) -> Self {
|
||||
self.user_agent = user_agent;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the request path
|
||||
pub fn set_req_path(mut self, req_path: Option<String>) -> Self {
|
||||
self.req_path = req_path;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the request host
|
||||
pub fn set_req_host(mut self, req_host: Option<String>) -> Self {
|
||||
self.req_host = req_host;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the request claims
|
||||
pub fn set_req_claims(mut self, req_claims: Option<HashMap<String, Value>>) -> Self {
|
||||
self.req_claims = req_claims;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the request query
|
||||
pub fn set_req_query(mut self, req_query: Option<HashMap<String, String>>) -> Self {
|
||||
self.req_query = req_query;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the request header
|
||||
pub fn set_req_header(mut self, req_header: Option<HashMap<String, String>>) -> Self {
|
||||
self.req_header = req_header;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the response header
|
||||
pub fn set_resp_header(mut self, resp_header: Option<HashMap<String, String>>) -> Self {
|
||||
self.resp_header = resp_header;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the access key
|
||||
pub fn set_access_key(mut self, access_key: Option<String>) -> Self {
|
||||
self.access_key = access_key;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the parent user
|
||||
pub fn set_parent_user(mut self, parent_user: Option<String>) -> Self {
|
||||
self.parent_user = parent_user;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the error
|
||||
pub fn set_error(mut self, error: Option<String>) -> Self {
|
||||
self.error = error;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl LogRecord for AuditLogEntry {
|
||||
fn to_json(&self) -> String {
|
||||
serde_json::to_string(self).unwrap_or_else(|_| String::from("{}"))
|
||||
}
|
||||
|
||||
fn get_timestamp(&self) -> DateTime<Utc> {
|
||||
self.base.timestamp
|
||||
}
|
||||
}
|
||||
@@ -1,106 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Base log entry structure shared by all log types
|
||||
/// This structure is used to serialize log entries to JSON
|
||||
/// and send them to the log sinks
|
||||
/// This structure is also used to deserialize log entries from JSON
|
||||
/// This structure is also used to store log entries in the database
|
||||
/// This structure is also used to query log entries from the database
|
||||
///
|
||||
/// The `BaseLogEntry` structure contains the following fields:
|
||||
/// - `timestamp` - the timestamp of the log entry
|
||||
/// - `request_id` - the request ID of the log entry
|
||||
/// - `message` - the message of the log entry
|
||||
/// - `tags` - the tags of the log entry
|
||||
///
|
||||
/// The `BaseLogEntry` structure contains the following methods:
|
||||
/// - `new` - create a new `BaseLogEntry` with default values
|
||||
/// - `message` - set the message
|
||||
/// - `request_id` - set the request ID
|
||||
/// - `tags` - set the tags
|
||||
/// - `timestamp` - set the timestamp
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use rustfs_obs::BaseLogEntry;
|
||||
/// use chrono::{DateTime, Utc};
|
||||
/// use std::collections::HashMap;
|
||||
///
|
||||
/// let timestamp = Utc::now();
|
||||
/// let request = Some("req-123".to_string());
|
||||
/// let message = Some("This is a log message".to_string());
|
||||
/// let tags = Some(HashMap::new());
|
||||
///
|
||||
/// let entry = BaseLogEntry::new()
|
||||
/// .timestamp(timestamp)
|
||||
/// .request_id(request)
|
||||
/// .message(message)
|
||||
/// .tags(tags);
|
||||
/// ```
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq, Default)]
|
||||
pub struct BaseLogEntry {
|
||||
#[serde(rename = "time")]
|
||||
pub timestamp: DateTime<Utc>,
|
||||
|
||||
#[serde(rename = "requestID", skip_serializing_if = "Option::is_none")]
|
||||
pub request_id: Option<String>,
|
||||
|
||||
#[serde(rename = "message", skip_serializing_if = "Option::is_none")]
|
||||
pub message: Option<String>,
|
||||
|
||||
#[serde(rename = "tags", skip_serializing_if = "Option::is_none")]
|
||||
pub tags: Option<HashMap<String, Value>>,
|
||||
}
|
||||
|
||||
impl BaseLogEntry {
|
||||
/// Create a new BaseLogEntry with default values
|
||||
pub fn new() -> Self {
|
||||
BaseLogEntry {
|
||||
timestamp: Utc::now(),
|
||||
request_id: None,
|
||||
message: None,
|
||||
tags: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the message
|
||||
pub fn message(mut self, message: Option<String>) -> Self {
|
||||
self.message = message;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the request ID
|
||||
pub fn request_id(mut self, request_id: Option<String>) -> Self {
|
||||
self.request_id = request_id;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the tags
|
||||
pub fn tags(mut self, tags: Option<HashMap<String, Value>>) -> Self {
|
||||
self.tags = tags;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the timestamp
|
||||
pub fn timestamp(mut self, timestamp: DateTime<Utc>) -> Self {
|
||||
self.timestamp = timestamp;
|
||||
self
|
||||
}
|
||||
}
|
||||
@@ -1,158 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub(crate) mod args;
|
||||
pub(crate) mod audit;
|
||||
pub(crate) mod base;
|
||||
pub(crate) mod unified;
|
||||
|
||||
use serde::de::Error;
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
use tracing_core::Level;
|
||||
|
||||
/// ObjectVersion is used across multiple modules
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
|
||||
pub struct ObjectVersion {
|
||||
#[serde(rename = "name")]
|
||||
pub object_name: String,
|
||||
#[serde(rename = "versionId", skip_serializing_if = "Option::is_none")]
|
||||
pub version_id: Option<String>,
|
||||
}
|
||||
|
||||
impl ObjectVersion {
|
||||
/// Create a new ObjectVersion object
|
||||
pub fn new() -> Self {
|
||||
ObjectVersion {
|
||||
object_name: String::new(),
|
||||
version_id: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new ObjectVersion with object name
|
||||
pub fn new_with_object_name(object_name: String) -> Self {
|
||||
ObjectVersion {
|
||||
object_name,
|
||||
version_id: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the object name
|
||||
pub fn set_object_name(mut self, object_name: String) -> Self {
|
||||
self.object_name = object_name;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the version ID
|
||||
pub fn set_version_id(mut self, version_id: Option<String>) -> Self {
|
||||
self.version_id = version_id;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ObjectVersion {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
/// Log kind/level enum
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)]
|
||||
pub enum LogKind {
|
||||
#[serde(rename = "INFO")]
|
||||
#[default]
|
||||
Info,
|
||||
#[serde(rename = "WARNING")]
|
||||
Warning,
|
||||
#[serde(rename = "ERROR")]
|
||||
Error,
|
||||
#[serde(rename = "FATAL")]
|
||||
Fatal,
|
||||
}
|
||||
|
||||
/// Trait for types that can be serialized to JSON and have a timestamp
|
||||
/// This trait is used by `ServerLogEntry` to convert the log entry to JSON
|
||||
/// and get the timestamp of the log entry
|
||||
/// This trait is implemented by `ServerLogEntry`
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use rustfs_obs::LogRecord;
|
||||
/// use chrono::{DateTime, Utc};
|
||||
/// use rustfs_obs::ServerLogEntry;
|
||||
/// use tracing_core::Level;
|
||||
///
|
||||
/// let log_entry = ServerLogEntry::new(Level::INFO, "api_handler".to_string());
|
||||
/// let json = log_entry.to_json();
|
||||
/// let timestamp = log_entry.get_timestamp();
|
||||
/// ```
|
||||
pub trait LogRecord {
|
||||
fn to_json(&self) -> String;
|
||||
fn get_timestamp(&self) -> chrono::DateTime<chrono::Utc>;
|
||||
}
|
||||
|
||||
/// Wrapper for `tracing_core::Level` to implement `Serialize` and `Deserialize`
|
||||
/// for `ServerLogEntry`
|
||||
/// This is necessary because `tracing_core::Level` does not implement `Serialize`
|
||||
/// and `Deserialize`
|
||||
/// This is a workaround to allow `ServerLogEntry` to be serialized and deserialized
|
||||
/// using `serde`
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use rustfs_obs::SerializableLevel;
|
||||
/// use tracing_core::Level;
|
||||
///
|
||||
/// let level = Level::INFO;
|
||||
/// let serializable_level = SerializableLevel::from(level);
|
||||
/// ```
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct SerializableLevel(pub Level);
|
||||
|
||||
impl From<Level> for SerializableLevel {
|
||||
fn from(level: Level) -> Self {
|
||||
SerializableLevel(level)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SerializableLevel> for Level {
|
||||
fn from(serializable_level: SerializableLevel) -> Self {
|
||||
serializable_level.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for SerializableLevel {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
serializer.serialize_str(self.0.as_str())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for SerializableLevel {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let s = String::deserialize(deserializer)?;
|
||||
match s.as_str() {
|
||||
"TRACE" => Ok(SerializableLevel(Level::TRACE)),
|
||||
"DEBUG" => Ok(SerializableLevel(Level::DEBUG)),
|
||||
"INFO" => Ok(SerializableLevel(Level::INFO)),
|
||||
"WARN" => Ok(SerializableLevel(Level::WARN)),
|
||||
"ERROR" => Ok(SerializableLevel(Level::ERROR)),
|
||||
_ => Err(D::Error::custom("unknown log level")),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,301 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::{AuditLogEntry, BaseLogEntry, LogKind, LogRecord, SerializableLevel};
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tracing_core::Level;
|
||||
|
||||
/// Server log entry with structured fields
|
||||
/// ServerLogEntry is used to log structured log entries from the server
|
||||
///
|
||||
/// The `ServerLogEntry` structure contains the following fields:
|
||||
/// - `base` - the base log entry
|
||||
/// - `level` - the log level
|
||||
/// - `source` - the source of the log entry
|
||||
/// - `user_id` - the user ID
|
||||
/// - `fields` - the structured fields of the log entry
|
||||
///
|
||||
/// The `ServerLogEntry` structure contains the following methods:
|
||||
/// - `new` - create a new `ServerLogEntry` with specified level and source
|
||||
/// - `with_base` - set the base log entry
|
||||
/// - `user_id` - set the user ID
|
||||
/// - `fields` - set the fields
|
||||
/// - `add_field` - add a field
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use rustfs_obs::ServerLogEntry;
|
||||
/// use tracing_core::Level;
|
||||
///
|
||||
/// let entry = ServerLogEntry::new(Level::INFO, "test_module".to_string())
|
||||
/// .user_id(Some("user-456".to_string()))
|
||||
/// .add_field("operation".to_string(), "login".to_string());
|
||||
/// ```
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct ServerLogEntry {
|
||||
#[serde(flatten)]
|
||||
pub base: BaseLogEntry,
|
||||
|
||||
pub level: SerializableLevel,
|
||||
pub source: String,
|
||||
|
||||
#[serde(rename = "userId", skip_serializing_if = "Option::is_none")]
|
||||
pub user_id: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Vec::is_empty", default)]
|
||||
pub fields: Vec<(String, String)>,
|
||||
}
|
||||
|
||||
impl ServerLogEntry {
|
||||
/// Create a new ServerLogEntry with specified level and source
|
||||
pub fn new(level: Level, source: String) -> Self {
|
||||
ServerLogEntry {
|
||||
base: BaseLogEntry::new(),
|
||||
level: SerializableLevel(level),
|
||||
source,
|
||||
user_id: None,
|
||||
fields: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the base log entry
|
||||
pub fn with_base(mut self, base: BaseLogEntry) -> Self {
|
||||
self.base = base;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the user ID
|
||||
pub fn user_id(mut self, user_id: Option<String>) -> Self {
|
||||
self.user_id = user_id;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set fields
|
||||
pub fn fields(mut self, fields: Vec<(String, String)>) -> Self {
|
||||
self.fields = fields;
|
||||
self
|
||||
}
|
||||
|
||||
/// Add a field
|
||||
pub fn add_field(mut self, key: String, value: String) -> Self {
|
||||
self.fields.push((key, value));
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl LogRecord for ServerLogEntry {
|
||||
fn to_json(&self) -> String {
|
||||
serde_json::to_string(self).unwrap_or_else(|_| String::from("{}"))
|
||||
}
|
||||
|
||||
fn get_timestamp(&self) -> DateTime<Utc> {
|
||||
self.base.timestamp
|
||||
}
|
||||
}
|
||||
|
||||
/// Console log entry structure
|
||||
/// ConsoleLogEntry is used to log console log entries
|
||||
/// The `ConsoleLogEntry` structure contains the following fields:
|
||||
/// - `base` - the base log entry
|
||||
/// - `level` - the log level
|
||||
/// - `console_msg` - the console message
|
||||
/// - `node_name` - the node name
|
||||
/// - `err` - the error message
|
||||
///
|
||||
/// The `ConsoleLogEntry` structure contains the following methods:
|
||||
/// - `new` - create a new `ConsoleLogEntry`
|
||||
/// - `new_with_console_msg` - create a new `ConsoleLogEntry` with console message and node name
|
||||
/// - `with_base` - set the base log entry
|
||||
/// - `set_level` - set the log level
|
||||
/// - `set_node_name` - set the node name
|
||||
/// - `set_console_msg` - set the console message
|
||||
/// - `set_err` - set the error message
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use rustfs_obs::ConsoleLogEntry;
|
||||
///
|
||||
/// let entry = ConsoleLogEntry::new_with_console_msg("Test message".to_string(), "node-123".to_string());
|
||||
/// ```
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ConsoleLogEntry {
|
||||
#[serde(flatten)]
|
||||
pub base: BaseLogEntry,
|
||||
|
||||
pub level: LogKind,
|
||||
pub console_msg: String,
|
||||
pub node_name: String,
|
||||
|
||||
#[serde(skip)]
|
||||
pub err: Option<String>,
|
||||
}
|
||||
|
||||
impl ConsoleLogEntry {
|
||||
/// Create a new ConsoleLogEntry
|
||||
pub fn new() -> Self {
|
||||
ConsoleLogEntry {
|
||||
base: BaseLogEntry::new(),
|
||||
level: LogKind::Info,
|
||||
console_msg: String::new(),
|
||||
node_name: String::new(),
|
||||
err: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new ConsoleLogEntry with console message and node name
|
||||
pub fn new_with_console_msg(console_msg: String, node_name: String) -> Self {
|
||||
ConsoleLogEntry {
|
||||
base: BaseLogEntry::new(),
|
||||
level: LogKind::Info,
|
||||
console_msg,
|
||||
node_name,
|
||||
err: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the base log entry
|
||||
pub fn with_base(mut self, base: BaseLogEntry) -> Self {
|
||||
self.base = base;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the log level
|
||||
pub fn set_level(mut self, level: LogKind) -> Self {
|
||||
self.level = level;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the node name
|
||||
pub fn set_node_name(mut self, node_name: String) -> Self {
|
||||
self.node_name = node_name;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the console message
|
||||
pub fn set_console_msg(mut self, console_msg: String) -> Self {
|
||||
self.console_msg = console_msg;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the error message
|
||||
pub fn set_err(mut self, err: Option<String>) -> Self {
|
||||
self.err = err;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ConsoleLogEntry {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl LogRecord for ConsoleLogEntry {
|
||||
fn to_json(&self) -> String {
|
||||
serde_json::to_string(self).unwrap_or_else(|_| String::from("{}"))
|
||||
}
|
||||
|
||||
fn get_timestamp(&self) -> DateTime<Utc> {
|
||||
self.base.timestamp
|
||||
}
|
||||
}
|
||||
|
||||
/// Unified log entry type
|
||||
/// UnifiedLogEntry is used to log different types of log entries
|
||||
///
|
||||
/// The `UnifiedLogEntry` enum contains the following variants:
|
||||
/// - `Server` - a server log entry
|
||||
/// - `Audit` - an audit log entry
|
||||
/// - `Console` - a console log entry
|
||||
///
|
||||
/// The `UnifiedLogEntry` enum contains the following methods:
|
||||
/// - `to_json` - convert the log entry to JSON
|
||||
/// - `get_timestamp` - get the timestamp of the log entry
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use rustfs_obs::{UnifiedLogEntry, ServerLogEntry};
|
||||
/// use tracing_core::Level;
|
||||
///
|
||||
/// let server_entry = ServerLogEntry::new(Level::INFO, "test_module".to_string());
|
||||
/// let unified = UnifiedLogEntry::Server(server_entry);
|
||||
/// ```
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(tag = "type")]
|
||||
pub enum UnifiedLogEntry {
|
||||
#[serde(rename = "server")]
|
||||
Server(ServerLogEntry),
|
||||
|
||||
#[serde(rename = "audit")]
|
||||
Audit(Box<AuditLogEntry>),
|
||||
|
||||
#[serde(rename = "console")]
|
||||
Console(ConsoleLogEntry),
|
||||
}
|
||||
|
||||
impl LogRecord for UnifiedLogEntry {
|
||||
fn to_json(&self) -> String {
|
||||
match self {
|
||||
UnifiedLogEntry::Server(entry) => entry.to_json(),
|
||||
UnifiedLogEntry::Audit(entry) => entry.to_json(),
|
||||
UnifiedLogEntry::Console(entry) => entry.to_json(),
|
||||
}
|
||||
}
|
||||
|
||||
fn get_timestamp(&self) -> DateTime<Utc> {
|
||||
match self {
|
||||
UnifiedLogEntry::Server(entry) => entry.get_timestamp(),
|
||||
UnifiedLogEntry::Audit(entry) => entry.get_timestamp(),
|
||||
UnifiedLogEntry::Console(entry) => entry.get_timestamp(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_base_log_entry() {
|
||||
let base = BaseLogEntry::new()
|
||||
.request_id(Some("req-123".to_string()))
|
||||
.message(Some("Test message".to_string()));
|
||||
|
||||
assert_eq!(base.request_id, Some("req-123".to_string()));
|
||||
assert_eq!(base.message, Some("Test message".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_server_log_entry() {
|
||||
let entry = ServerLogEntry::new(Level::INFO, "test_module".to_string())
|
||||
.user_id(Some("user-456".to_string()))
|
||||
.add_field("operation".to_string(), "login".to_string());
|
||||
|
||||
assert_eq!(entry.level.0, Level::INFO);
|
||||
assert_eq!(entry.source, "test_module");
|
||||
assert_eq!(entry.user_id, Some("user-456".to_string()));
|
||||
assert_eq!(entry.fields.len(), 1);
|
||||
assert_eq!(entry.fields[0], ("operation".to_string(), "login".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_unified_log_entry_json() {
|
||||
let server_entry = ServerLogEntry::new(Level::INFO, "test_source".to_string());
|
||||
let unified = UnifiedLogEntry::Server(server_entry);
|
||||
|
||||
let json = unified.to_json();
|
||||
assert!(json.contains("test_source"));
|
||||
}
|
||||
}
|
||||
@@ -12,9 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::logger::InitLogStatus;
|
||||
use crate::AppConfig;
|
||||
use crate::telemetry::{OtelGuard, init_telemetry};
|
||||
use crate::{AppConfig, Logger, get_global_logger, init_global_logger};
|
||||
use std::sync::{Arc, Mutex};
|
||||
use tokio::sync::{OnceCell, SetError};
|
||||
use tracing::{error, info};
|
||||
@@ -61,46 +60,14 @@ pub enum GlobalError {
|
||||
///
|
||||
/// # #[tokio::main]
|
||||
/// # async fn main() {
|
||||
/// let (logger, guard) = init_obs(None).await;
|
||||
/// let guard = init_obs(None).await;
|
||||
/// # }
|
||||
/// ```
|
||||
pub async fn init_obs(endpoint: Option<String>) -> (Arc<tokio::sync::Mutex<Logger>>, OtelGuard) {
|
||||
pub async fn init_obs(endpoint: Option<String>) -> OtelGuard {
|
||||
// Load the configuration file
|
||||
let config = AppConfig::new_with_endpoint(endpoint);
|
||||
|
||||
let guard = init_telemetry(&config.observability);
|
||||
|
||||
let logger = init_global_logger(&config).await;
|
||||
let obs_config = config.observability.clone();
|
||||
tokio::spawn(async move {
|
||||
let result = InitLogStatus::init_start_log(&obs_config).await;
|
||||
match result {
|
||||
Ok(_) => {
|
||||
info!("Logger initialized successfully");
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to initialize logger: {}", e);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
(logger, guard)
|
||||
}
|
||||
|
||||
/// Get the global logger instance
|
||||
/// This function returns a reference to the global logger instance.
|
||||
///
|
||||
/// # Returns
|
||||
/// A reference to the global logger instance
|
||||
///
|
||||
/// # Example
|
||||
/// ```no_run
|
||||
/// use rustfs_obs::get_logger;
|
||||
///
|
||||
/// let logger = get_logger();
|
||||
/// ```
|
||||
pub fn get_logger() -> &'static Arc<tokio::sync::Mutex<Logger>> {
|
||||
get_global_logger()
|
||||
init_telemetry(&config.observability)
|
||||
}
|
||||
|
||||
/// Set the global guard for OpenTelemetry
|
||||
@@ -117,7 +84,7 @@ pub fn get_logger() -> &'static Arc<tokio::sync::Mutex<Logger>> {
|
||||
/// use rustfs_obs::{ init_obs, set_global_guard};
|
||||
///
|
||||
/// async fn init() -> Result<(), Box<dyn std::error::Error>> {
|
||||
/// let (_, guard) = init_obs(None).await;
|
||||
/// let guard = init_obs(None).await;
|
||||
/// set_global_guard(guard)?;
|
||||
/// Ok(())
|
||||
/// }
|
||||
|
||||
@@ -18,10 +18,7 @@
|
||||
//!
|
||||
//! ## feature mark
|
||||
//!
|
||||
//! - `file`: enable file logging enabled by default
|
||||
//! - `gpu`: gpu monitoring function
|
||||
//! - `kafka`: enable kafka metric output
|
||||
//! - `webhook`: enable webhook notifications
|
||||
//! - `full`: includes all functions
|
||||
//!
|
||||
//! to enable gpu monitoring add in cargo toml
|
||||
@@ -41,27 +38,15 @@
|
||||
///
|
||||
/// # #[tokio::main]
|
||||
/// # async fn main() {
|
||||
/// let (logger, guard) = init_obs(None).await;
|
||||
/// # let guard = init_obs(None).await;
|
||||
/// # }
|
||||
/// ```
|
||||
mod config;
|
||||
mod entry;
|
||||
mod global;
|
||||
mod logger;
|
||||
mod metrics;
|
||||
mod sinks;
|
||||
mod system;
|
||||
mod telemetry;
|
||||
mod worker;
|
||||
|
||||
pub use config::{AppConfig, LoggerConfig, OtelConfig, SinkConfig};
|
||||
pub use entry::args::Args;
|
||||
pub use entry::audit::{ApiDetails, AuditLogEntry};
|
||||
pub use entry::base::BaseLogEntry;
|
||||
pub use entry::unified::{ConsoleLogEntry, ServerLogEntry, UnifiedLogEntry};
|
||||
pub use entry::{LogKind, LogRecord, ObjectVersion, SerializableLevel};
|
||||
pub use config::AppConfig;
|
||||
pub use global::*;
|
||||
pub use logger::Logger;
|
||||
pub use logger::{get_global_logger, init_global_logger, start_logger};
|
||||
pub use logger::{log_debug, log_error, log_info, log_trace, log_warn, log_with_context};
|
||||
pub use system::SystemObserver;
|
||||
|
||||
@@ -1,490 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::sinks::Sink;
|
||||
use crate::{
|
||||
AppConfig, AuditLogEntry, BaseLogEntry, ConsoleLogEntry, GlobalError, OtelConfig, ServerLogEntry, UnifiedLogEntry, sinks,
|
||||
};
|
||||
use rustfs_config::{APP_NAME, ENVIRONMENT, SERVICE_VERSION};
|
||||
use std::sync::Arc;
|
||||
use std::time::SystemTime;
|
||||
use tokio::sync::mpsc::{self, Receiver, Sender};
|
||||
use tokio::sync::{Mutex, OnceCell};
|
||||
use tracing_core::Level;
|
||||
|
||||
// Add the global instance at the module level
|
||||
static GLOBAL_LOGGER: OnceCell<Arc<Mutex<Logger>>> = OnceCell::const_new();
|
||||
|
||||
/// Server log processor
|
||||
#[derive(Debug)]
|
||||
pub struct Logger {
|
||||
sender: Sender<UnifiedLogEntry>, // Log sending channel
|
||||
queue_capacity: usize,
|
||||
}
|
||||
|
||||
impl Logger {
|
||||
/// Create a new Logger instance
|
||||
/// Returns Logger and corresponding Receiver
|
||||
pub fn new(config: &AppConfig) -> (Self, Receiver<UnifiedLogEntry>) {
|
||||
// Get queue capacity from configuration, or use default values 10000
|
||||
let queue_capacity = config.logger.as_ref().and_then(|l| l.queue_capacity).unwrap_or(10000);
|
||||
let (sender, receiver) = mpsc::channel(queue_capacity);
|
||||
(Logger { sender, queue_capacity }, receiver)
|
||||
}
|
||||
|
||||
/// get the queue capacity
|
||||
/// This function returns the queue capacity.
|
||||
/// # Returns
|
||||
/// The queue capacity
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use rustfs_obs::Logger;
|
||||
/// async fn example(logger: &Logger) {
|
||||
/// let _ = logger.get_queue_capacity();
|
||||
/// }
|
||||
/// ```
|
||||
pub fn get_queue_capacity(&self) -> usize {
|
||||
self.queue_capacity
|
||||
}
|
||||
|
||||
/// Log a server entry
|
||||
#[tracing::instrument(skip(self), fields(log_source = "logger_server"))]
|
||||
pub async fn log_server_entry(&self, entry: ServerLogEntry) -> Result<(), GlobalError> {
|
||||
self.log_entry(UnifiedLogEntry::Server(entry)).await
|
||||
}
|
||||
|
||||
/// Log an audit entry
|
||||
#[tracing::instrument(skip(self), fields(log_source = "logger_audit"))]
|
||||
pub async fn log_audit_entry(&self, entry: AuditLogEntry) -> Result<(), GlobalError> {
|
||||
self.log_entry(UnifiedLogEntry::Audit(Box::new(entry))).await
|
||||
}
|
||||
|
||||
/// Log a console entry
|
||||
#[tracing::instrument(skip(self), fields(log_source = "logger_console"))]
|
||||
pub async fn log_console_entry(&self, entry: ConsoleLogEntry) -> Result<(), GlobalError> {
|
||||
self.log_entry(UnifiedLogEntry::Console(entry)).await
|
||||
}
|
||||
|
||||
/// Asynchronous logging of unified log entries
|
||||
#[tracing::instrument(skip_all, fields(log_source = "logger"))]
|
||||
pub async fn log_entry(&self, entry: UnifiedLogEntry) -> Result<(), GlobalError> {
|
||||
// Extract information for tracing based on entry type
|
||||
match &entry {
|
||||
UnifiedLogEntry::Server(server) => {
|
||||
tracing::Span::current()
|
||||
.record("log_level", server.level.0.as_str())
|
||||
.record("log_message", server.base.message.as_deref().unwrap_or("log message not set"))
|
||||
.record("source", &server.source);
|
||||
|
||||
// Generate tracing event based on log level
|
||||
match server.level.0 {
|
||||
Level::ERROR => {
|
||||
tracing::error!(target: "server_logs", message = %server.base.message.as_deref().unwrap_or(""));
|
||||
}
|
||||
Level::WARN => {
|
||||
tracing::warn!(target: "server_logs", message = %server.base.message.as_deref().unwrap_or(""));
|
||||
}
|
||||
Level::INFO => {
|
||||
tracing::info!(target: "server_logs", message = %server.base.message.as_deref().unwrap_or(""));
|
||||
}
|
||||
Level::DEBUG => {
|
||||
tracing::debug!(target: "server_logs", message = %server.base.message.as_deref().unwrap_or(""));
|
||||
}
|
||||
Level::TRACE => {
|
||||
tracing::trace!(target: "server_logs", message = %server.base.message.as_deref().unwrap_or(""));
|
||||
}
|
||||
}
|
||||
}
|
||||
UnifiedLogEntry::Audit(audit) => {
|
||||
tracing::info!(
|
||||
target: "audit_logs",
|
||||
event = %audit.event,
|
||||
api = %audit.api.name.as_deref().unwrap_or("unknown"),
|
||||
message = %audit.base.message.as_deref().unwrap_or("")
|
||||
);
|
||||
}
|
||||
UnifiedLogEntry::Console(console) => {
|
||||
let level_str = match console.level {
|
||||
crate::LogKind::Info => "INFO",
|
||||
crate::LogKind::Warning => "WARN",
|
||||
crate::LogKind::Error => "ERROR",
|
||||
crate::LogKind::Fatal => "FATAL",
|
||||
};
|
||||
|
||||
tracing::info!(
|
||||
target: "console_logs",
|
||||
level = %level_str,
|
||||
node = %console.node_name,
|
||||
message = %console.console_msg
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// Send logs to async queue with improved error handling
|
||||
match self.sender.try_send(entry) {
|
||||
Ok(_) => Ok(()),
|
||||
Err(mpsc::error::TrySendError::Full(entry)) => {
|
||||
// Processing strategy when queue is full
|
||||
tracing::warn!("Log queue full, applying backpressure");
|
||||
match tokio::time::timeout(std::time::Duration::from_millis(500), self.sender.send(entry)).await {
|
||||
Ok(Ok(_)) => Ok(()),
|
||||
Ok(Err(_)) => Err(GlobalError::SendFailed("Channel closed")),
|
||||
Err(_) => Err(GlobalError::Timeout("Queue backpressure timeout")),
|
||||
}
|
||||
}
|
||||
Err(mpsc::error::TrySendError::Closed(_)) => Err(GlobalError::SendFailed("Logger channel closed")),
|
||||
}
|
||||
}
|
||||
|
||||
/// Write log with context information
|
||||
/// This function writes log messages with context information.
|
||||
///
|
||||
/// # Parameters
|
||||
/// - `message`: Message to be logged
|
||||
/// - `source`: Source of the log
|
||||
/// - `request_id`: Request ID
|
||||
/// - `user_id`: User ID
|
||||
/// - `fields`: Additional fields
|
||||
///
|
||||
/// # Returns
|
||||
/// Result indicating whether the operation was successful
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use tracing_core::Level;
|
||||
/// use rustfs_obs::Logger;
|
||||
///
|
||||
/// async fn example(logger: &Logger) {
|
||||
/// let _ = logger.write_with_context("This is an information message", "example",Level::INFO, Some("req-12345".to_string()), Some("user-6789".to_string()), vec![("endpoint".to_string(), "/api/v1/data".to_string())]).await;
|
||||
/// }
|
||||
pub async fn write_with_context(
|
||||
&self,
|
||||
message: &str,
|
||||
source: &str,
|
||||
level: Level,
|
||||
request_id: Option<String>,
|
||||
user_id: Option<String>,
|
||||
fields: Vec<(String, String)>,
|
||||
) -> Result<(), GlobalError> {
|
||||
let base = BaseLogEntry::new().message(Some(message.to_string())).request_id(request_id);
|
||||
|
||||
let server_entry = ServerLogEntry::new(level, source.to_string())
|
||||
.user_id(user_id)
|
||||
.fields(fields)
|
||||
.with_base(base);
|
||||
|
||||
self.log_server_entry(server_entry).await
|
||||
}
|
||||
|
||||
/// Write log
|
||||
/// This function writes log messages.
|
||||
/// # Parameters
|
||||
/// - `message`: Message to be logged
|
||||
/// - `source`: Source of the log
|
||||
/// - `level`: Log level
|
||||
///
|
||||
/// # Returns
|
||||
/// Result indicating whether the operation was successful
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use rustfs_obs::Logger;
|
||||
/// use tracing_core::Level;
|
||||
///
|
||||
/// async fn example(logger: &Logger) {
|
||||
/// let _ = logger.write("This is an information message", "example", Level::INFO).await;
|
||||
/// }
|
||||
/// ```
|
||||
pub async fn write(&self, message: &str, source: &str, level: Level) -> Result<(), GlobalError> {
|
||||
self.write_with_context(message, source, level, None, None, Vec::new()).await
|
||||
}
|
||||
|
||||
/// Shutdown the logger
|
||||
/// This function shuts down the logger.
|
||||
///
|
||||
/// # Returns
|
||||
/// Result indicating whether the operation was successful
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use rustfs_obs::Logger;
|
||||
///
|
||||
/// async fn example(logger: Logger) {
|
||||
/// let _ = logger.shutdown().await;
|
||||
/// }
|
||||
/// ```
|
||||
pub async fn shutdown(self) -> Result<(), GlobalError> {
|
||||
drop(self.sender); //Close the sending end so that the receiver knows that there is no new message
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Start the log module
|
||||
/// This function starts the log module.
|
||||
/// It initializes the logger and starts the worker to process logs.
|
||||
/// # Parameters
|
||||
/// - `config`: Configuration information
|
||||
/// - `sinks`: A vector of Sink instances
|
||||
/// # Returns
|
||||
/// The global logger instance
|
||||
/// # Example
|
||||
/// ```no_run
|
||||
/// use rustfs_obs::{AppConfig, start_logger};
|
||||
///
|
||||
/// let config = AppConfig::default();
|
||||
/// let sinks = vec![];
|
||||
/// let logger = start_logger(&config, sinks);
|
||||
/// ```
|
||||
pub fn start_logger(config: &AppConfig, sinks: Vec<Arc<dyn Sink>>) -> Logger {
|
||||
let (logger, receiver) = Logger::new(config);
|
||||
tokio::spawn(crate::worker::start_worker(receiver, sinks));
|
||||
logger
|
||||
}
|
||||
|
||||
/// Initialize the global logger instance
|
||||
/// This function initializes the global logger instance and returns a reference to it.
|
||||
/// If the logger has been initialized before, it will return the existing logger instance.
|
||||
///
|
||||
/// # Parameters
|
||||
/// - `config`: Configuration information
|
||||
/// - `sinks`: A vector of Sink instances
|
||||
///
|
||||
/// # Returns
|
||||
/// A reference to the global logger instance
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use rustfs_obs::{AppConfig,init_global_logger};
|
||||
///
|
||||
/// let config = AppConfig::default();
|
||||
/// let logger = init_global_logger(&config);
|
||||
/// ```
|
||||
pub async fn init_global_logger(config: &AppConfig) -> Arc<Mutex<Logger>> {
|
||||
let sinks = sinks::create_sinks(config).await;
|
||||
let logger = Arc::new(Mutex::new(start_logger(config, sinks)));
|
||||
GLOBAL_LOGGER.set(logger.clone()).expect("Logger already initialized");
|
||||
logger
|
||||
}
|
||||
|
||||
/// Get the global logger instance
|
||||
///
|
||||
/// This function returns a reference to the global logger instance.
|
||||
///
|
||||
/// # Returns
|
||||
/// A reference to the global logger instance
|
||||
///
|
||||
/// # Example
|
||||
/// ```no_run
|
||||
/// use rustfs_obs::get_global_logger;
|
||||
///
|
||||
/// let logger = get_global_logger();
|
||||
/// ```
|
||||
pub fn get_global_logger() -> &'static Arc<Mutex<Logger>> {
|
||||
GLOBAL_LOGGER.get().expect("Logger not initialized")
|
||||
}
|
||||
|
||||
/// Log information
|
||||
/// This function logs information messages.
|
||||
///
|
||||
/// # Parameters
|
||||
/// - `message`: Message to be logged
|
||||
/// - `source`: Source of the log
|
||||
///
|
||||
/// # Returns
|
||||
/// Result indicating whether the operation was successful
|
||||
///
|
||||
/// # Example
|
||||
/// ```no_run
|
||||
/// use rustfs_obs::log_info;
|
||||
///
|
||||
/// async fn example() {
|
||||
/// let _ = log_info("This is an information message", "example").await;
|
||||
/// }
|
||||
/// ```
|
||||
pub async fn log_info(message: &str, source: &str) -> Result<(), GlobalError> {
|
||||
get_global_logger().lock().await.write(message, source, Level::INFO).await
|
||||
}
|
||||
|
||||
/// Log error
|
||||
/// This function logs error messages.
|
||||
/// # Parameters
|
||||
/// - `message`: Message to be logged
|
||||
/// - `source`: Source of the log
|
||||
/// # Returns
|
||||
/// Result indicating whether the operation was successful
|
||||
/// # Example
|
||||
/// ```no_run
|
||||
/// use rustfs_obs::log_error;
|
||||
///
|
||||
/// async fn example() {
|
||||
/// let _ = log_error("This is an error message", "example").await;
|
||||
/// }
|
||||
pub async fn log_error(message: &str, source: &str) -> Result<(), GlobalError> {
|
||||
get_global_logger().lock().await.write(message, source, Level::ERROR).await
|
||||
}
|
||||
|
||||
/// Log warning
|
||||
/// This function logs warning messages.
|
||||
/// # Parameters
|
||||
/// - `message`: Message to be logged
|
||||
/// - `source`: Source of the log
|
||||
/// # Returns
|
||||
/// Result indicating whether the operation was successful
|
||||
///
|
||||
/// # Example
|
||||
/// ```no_run
|
||||
/// use rustfs_obs::log_warn;
|
||||
///
|
||||
/// async fn example() {
|
||||
/// let _ = log_warn("This is a warning message", "example").await;
|
||||
/// }
|
||||
/// ```
|
||||
pub async fn log_warn(message: &str, source: &str) -> Result<(), GlobalError> {
|
||||
get_global_logger().lock().await.write(message, source, Level::WARN).await
|
||||
}
|
||||
|
||||
/// Log debug
|
||||
/// This function logs debug messages.
|
||||
/// # Parameters
|
||||
/// - `message`: Message to be logged
|
||||
/// - `source`: Source of the log
|
||||
/// # Returns
|
||||
/// Result indicating whether the operation was successful
|
||||
///
|
||||
/// # Example
|
||||
/// ```no_run
|
||||
/// use rustfs_obs::log_debug;
|
||||
///
|
||||
/// async fn example() {
|
||||
/// let _ = log_debug("This is a debug message", "example").await;
|
||||
/// }
|
||||
/// ```
|
||||
pub async fn log_debug(message: &str, source: &str) -> Result<(), GlobalError> {
|
||||
get_global_logger().lock().await.write(message, source, Level::DEBUG).await
|
||||
}
|
||||
|
||||
/// Log trace
|
||||
/// This function logs trace messages.
|
||||
/// # Parameters
|
||||
/// - `message`: Message to be logged
|
||||
/// - `source`: Source of the log
|
||||
///
|
||||
/// # Returns
|
||||
/// Result indicating whether the operation was successful
|
||||
///
|
||||
/// # Example
|
||||
/// ```no_run
|
||||
/// use rustfs_obs::log_trace;
|
||||
///
|
||||
/// async fn example() {
|
||||
/// let _ = log_trace("This is a trace message", "example").await;
|
||||
/// }
|
||||
/// ```
|
||||
pub async fn log_trace(message: &str, source: &str) -> Result<(), GlobalError> {
|
||||
get_global_logger().lock().await.write(message, source, Level::TRACE).await
|
||||
}
|
||||
|
||||
/// Log with context information
|
||||
/// This function logs messages with context information.
|
||||
/// # Parameters
|
||||
/// - `message`: Message to be logged
|
||||
/// - `source`: Source of the log
|
||||
/// - `level`: Log level
|
||||
/// - `request_id`: Request ID
|
||||
/// - `user_id`: User ID
|
||||
/// - `fields`: Additional fields
|
||||
/// # Returns
|
||||
/// Result indicating whether the operation was successful
|
||||
/// # Example
|
||||
/// ```no_run
|
||||
/// use tracing_core::Level;
|
||||
/// use rustfs_obs::log_with_context;
|
||||
///
|
||||
/// async fn example() {
|
||||
/// let _ = log_with_context("This is an information message", "example", Level::INFO, Some("req-12345".to_string()), Some("user-6789".to_string()), vec![("endpoint".to_string(), "/api/v1/data".to_string())]).await;
|
||||
/// }
|
||||
/// ```
|
||||
pub async fn log_with_context(
|
||||
message: &str,
|
||||
source: &str,
|
||||
level: Level,
|
||||
request_id: Option<String>,
|
||||
user_id: Option<String>,
|
||||
fields: Vec<(String, String)>,
|
||||
) -> Result<(), GlobalError> {
|
||||
get_global_logger()
|
||||
.lock()
|
||||
.await
|
||||
.write_with_context(message, source, level, request_id, user_id, fields)
|
||||
.await
|
||||
}
|
||||
|
||||
/// Log initialization status
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct InitLogStatus {
|
||||
pub timestamp: SystemTime,
|
||||
pub service_name: String,
|
||||
pub version: String,
|
||||
pub environment: String,
|
||||
}
|
||||
|
||||
impl Default for InitLogStatus {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
timestamp: SystemTime::now(),
|
||||
service_name: String::from(APP_NAME),
|
||||
version: SERVICE_VERSION.to_string(),
|
||||
environment: ENVIRONMENT.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl InitLogStatus {
|
||||
pub fn new_config(config: &OtelConfig) -> Self {
|
||||
let config = config.clone();
|
||||
let environment = config.environment.unwrap_or(ENVIRONMENT.to_string());
|
||||
let version = config.service_version.unwrap_or(SERVICE_VERSION.to_string());
|
||||
Self {
|
||||
timestamp: SystemTime::now(),
|
||||
service_name: String::from(APP_NAME),
|
||||
version,
|
||||
environment,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn init_start_log(config: &OtelConfig) -> Result<(), GlobalError> {
|
||||
let status = Self::new_config(config);
|
||||
log_init_state(Some(status)).await
|
||||
}
|
||||
}
|
||||
|
||||
/// Log initialization details during system startup
|
||||
async fn log_init_state(status: Option<InitLogStatus>) -> Result<(), GlobalError> {
|
||||
let status = status.unwrap_or_default();
|
||||
|
||||
let base_entry = BaseLogEntry::new()
|
||||
.timestamp(chrono::DateTime::from(status.timestamp))
|
||||
.message(Some(format!(
|
||||
"Service initialization started - {} v{} in {}",
|
||||
status.service_name, status.version, status.environment
|
||||
)))
|
||||
.request_id(Some("system_init".to_string()));
|
||||
|
||||
let server_entry = ServerLogEntry::new(Level::INFO, "system_initialization".to_string())
|
||||
.with_base(base_entry)
|
||||
.user_id(Some("system".to_string()));
|
||||
|
||||
get_global_logger().lock().await.log_server_entry(server_entry).await?;
|
||||
Ok(())
|
||||
}
|
||||
@@ -1,178 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::sinks::Sink;
|
||||
use crate::{LogRecord, UnifiedLogEntry};
|
||||
use async_trait::async_trait;
|
||||
use std::sync::Arc;
|
||||
use tokio::fs::OpenOptions;
|
||||
use tokio::io;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
|
||||
/// File Sink Implementation
|
||||
pub struct FileSink {
|
||||
path: String,
|
||||
buffer_size: usize,
|
||||
writer: Arc<tokio::sync::Mutex<io::BufWriter<tokio::fs::File>>>,
|
||||
entry_count: std::sync::atomic::AtomicUsize,
|
||||
last_flush: std::sync::atomic::AtomicU64,
|
||||
flush_interval_ms: u64, // Time between flushes
|
||||
flush_threshold: usize, // Number of entries before flush
|
||||
}
|
||||
|
||||
impl FileSink {
|
||||
/// Create a new FileSink instance
|
||||
pub async fn new(
|
||||
path: String,
|
||||
buffer_size: usize,
|
||||
flush_interval_ms: u64,
|
||||
flush_threshold: usize,
|
||||
) -> Result<Self, io::Error> {
|
||||
// check if the file exists
|
||||
let file_exists = tokio::fs::metadata(&path).await.is_ok();
|
||||
// if the file not exists, create it
|
||||
if !file_exists {
|
||||
tokio::fs::create_dir_all(std::path::Path::new(&path).parent().unwrap()).await?;
|
||||
tracing::debug!("File does not exist, creating it. Path: {:?}", path)
|
||||
}
|
||||
let file = if file_exists {
|
||||
// If the file exists, open it in append mode
|
||||
tracing::debug!("FileSink: File exists, opening in append mode. Path: {:?}", path);
|
||||
OpenOptions::new().append(true).create(true).open(&path).await?
|
||||
} else {
|
||||
// If the file does not exist, create it
|
||||
tracing::debug!("FileSink: File does not exist, creating a new file.");
|
||||
// Create the file and write a header or initial content if needed
|
||||
OpenOptions::new().create(true).truncate(true).write(true).open(&path).await?
|
||||
};
|
||||
let writer = io::BufWriter::with_capacity(buffer_size, file);
|
||||
let now = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_millis() as u64;
|
||||
Ok(FileSink {
|
||||
path,
|
||||
buffer_size,
|
||||
writer: Arc::new(tokio::sync::Mutex::new(writer)),
|
||||
entry_count: std::sync::atomic::AtomicUsize::new(0),
|
||||
last_flush: std::sync::atomic::AtomicU64::new(now),
|
||||
flush_interval_ms,
|
||||
flush_threshold,
|
||||
})
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
async fn initialize_writer(&mut self) -> io::Result<()> {
|
||||
let file = tokio::fs::File::create(&self.path).await?;
|
||||
|
||||
// Use buffer_size to create a buffer writer with a specified capacity
|
||||
let buf_writer = io::BufWriter::with_capacity(self.buffer_size, file);
|
||||
|
||||
// Replace the original writer with the new Mutex
|
||||
self.writer = Arc::new(tokio::sync::Mutex::new(buf_writer));
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Get the current buffer size
|
||||
#[allow(dead_code)]
|
||||
pub fn buffer_size(&self) -> usize {
|
||||
self.buffer_size
|
||||
}
|
||||
|
||||
// How to dynamically adjust the buffer size
|
||||
#[allow(dead_code)]
|
||||
pub async fn set_buffer_size(&mut self, new_size: usize) -> io::Result<()> {
|
||||
if self.buffer_size != new_size {
|
||||
self.buffer_size = new_size;
|
||||
// Reinitialize the writer directly, without checking is_some()
|
||||
self.initialize_writer().await?;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Check if flushing is needed based on count or time
|
||||
fn should_flush(&self) -> bool {
|
||||
// Check entry count threshold
|
||||
if self.entry_count.load(std::sync::atomic::Ordering::Relaxed) >= self.flush_threshold {
|
||||
return true;
|
||||
}
|
||||
|
||||
// Check time threshold
|
||||
let now = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_millis() as u64;
|
||||
|
||||
let last = self.last_flush.load(std::sync::atomic::Ordering::Relaxed);
|
||||
now - last >= self.flush_interval_ms
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Sink for FileSink {
|
||||
async fn write(&self, entry: &UnifiedLogEntry) {
|
||||
let line = format!("{entry:?}\n");
|
||||
let mut writer = self.writer.lock().await;
|
||||
|
||||
if let Err(e) = writer.write_all(line.as_bytes()).await {
|
||||
eprintln!(
|
||||
"Failed to write log to file {}: {},entry timestamp:{:?}",
|
||||
self.path,
|
||||
e,
|
||||
entry.get_timestamp()
|
||||
);
|
||||
return;
|
||||
}
|
||||
|
||||
// Only flush periodically to improve performance
|
||||
// Logic to determine when to flush could be added here
|
||||
// Increment the entry count
|
||||
self.entry_count.fetch_add(1, std::sync::atomic::Ordering::Relaxed);
|
||||
|
||||
// Check if we should flush
|
||||
if self.should_flush() {
|
||||
if let Err(e) = writer.flush().await {
|
||||
eprintln!("Failed to flush log file {}: {}", self.path, e);
|
||||
return;
|
||||
}
|
||||
|
||||
// Reset counters
|
||||
self.entry_count.store(0, std::sync::atomic::Ordering::Relaxed);
|
||||
|
||||
let now = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_millis() as u64;
|
||||
|
||||
self.last_flush.store(now, std::sync::atomic::Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for FileSink {
|
||||
fn drop(&mut self) {
|
||||
let writer = self.writer.clone();
|
||||
let path = self.path.clone();
|
||||
|
||||
tokio::task::spawn_blocking(move || {
|
||||
let rt = tokio::runtime::Runtime::new().unwrap();
|
||||
rt.block_on(async {
|
||||
let mut writer = writer.lock().await;
|
||||
if let Err(e) = writer.flush().await {
|
||||
eprintln!("Failed to flush log file {path}: {e}");
|
||||
}
|
||||
});
|
||||
});
|
||||
}
|
||||
}
|
||||
@@ -1,179 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::sinks::Sink;
|
||||
use crate::{LogRecord, UnifiedLogEntry};
|
||||
use async_trait::async_trait;
|
||||
use std::sync::Arc;
|
||||
|
||||
/// Kafka Sink Implementation
|
||||
pub struct KafkaSink {
|
||||
producer: rdkafka::producer::FutureProducer,
|
||||
topic: String,
|
||||
batch_size: usize,
|
||||
batch_timeout_ms: u64,
|
||||
entries: Arc<tokio::sync::Mutex<Vec<UnifiedLogEntry>>>,
|
||||
last_flush: Arc<std::sync::atomic::AtomicU64>,
|
||||
}
|
||||
|
||||
impl KafkaSink {
|
||||
/// Create a new KafkaSink instance
|
||||
pub fn new(producer: rdkafka::producer::FutureProducer, topic: String, batch_size: usize, batch_timeout_ms: u64) -> Self {
|
||||
// Create Arc-wrapped values first
|
||||
let entries = Arc::new(tokio::sync::Mutex::new(Vec::with_capacity(batch_size)));
|
||||
let last_flush = Arc::new(std::sync::atomic::AtomicU64::new(
|
||||
std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_millis() as u64,
|
||||
));
|
||||
let sink = KafkaSink {
|
||||
producer: producer.clone(),
|
||||
topic: topic.clone(),
|
||||
batch_size,
|
||||
batch_timeout_ms,
|
||||
entries: entries.clone(),
|
||||
last_flush: last_flush.clone(),
|
||||
};
|
||||
|
||||
// Start background flusher
|
||||
tokio::spawn(Self::periodic_flush(producer, topic, entries, last_flush, batch_timeout_ms));
|
||||
|
||||
sink
|
||||
}
|
||||
|
||||
/// Add a getter method to read the batch_timeout_ms field
|
||||
#[allow(dead_code)]
|
||||
pub fn batch_timeout(&self) -> u64 {
|
||||
self.batch_timeout_ms
|
||||
}
|
||||
|
||||
/// Add a method to dynamically adjust the timeout if needed
|
||||
#[allow(dead_code)]
|
||||
pub fn set_batch_timeout(&mut self, new_timeout_ms: u64) {
|
||||
self.batch_timeout_ms = new_timeout_ms;
|
||||
}
|
||||
|
||||
async fn periodic_flush(
|
||||
producer: rdkafka::producer::FutureProducer,
|
||||
topic: String,
|
||||
entries: Arc<tokio::sync::Mutex<Vec<UnifiedLogEntry>>>,
|
||||
last_flush: Arc<std::sync::atomic::AtomicU64>,
|
||||
timeout_ms: u64,
|
||||
) {
|
||||
loop {
|
||||
tokio::time::sleep(tokio::time::Duration::from_millis(timeout_ms / 2)).await;
|
||||
|
||||
let now = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_millis() as u64;
|
||||
|
||||
let last = last_flush.load(std::sync::atomic::Ordering::Relaxed);
|
||||
|
||||
if now - last >= timeout_ms {
|
||||
let mut batch = entries.lock().await;
|
||||
if !batch.is_empty() {
|
||||
Self::send_batch(&producer, &topic, batch.drain(..).collect()).await;
|
||||
last_flush.store(now, std::sync::atomic::Ordering::Relaxed);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn send_batch(producer: &rdkafka::producer::FutureProducer, topic: &str, entries: Vec<UnifiedLogEntry>) {
|
||||
for entry in entries {
|
||||
let payload = match serde_json::to_string(&entry) {
|
||||
Ok(p) => p,
|
||||
Err(e) => {
|
||||
eprintln!("Failed to serialize log entry: {e}");
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
let span_id = entry.get_timestamp().to_rfc3339();
|
||||
|
||||
let _ = producer
|
||||
.send(
|
||||
rdkafka::producer::FutureRecord::to(topic).payload(&payload).key(&span_id),
|
||||
std::time::Duration::from_secs(5),
|
||||
)
|
||||
.await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Sink for KafkaSink {
|
||||
async fn write(&self, entry: &UnifiedLogEntry) {
|
||||
let mut batch = self.entries.lock().await;
|
||||
batch.push(entry.clone());
|
||||
|
||||
let should_flush_by_size = batch.len() >= self.batch_size;
|
||||
let should_flush_by_time = {
|
||||
let now = std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_millis() as u64;
|
||||
let last = self.last_flush.load(std::sync::atomic::Ordering::Relaxed);
|
||||
now - last >= self.batch_timeout_ms
|
||||
};
|
||||
|
||||
if should_flush_by_size || should_flush_by_time {
|
||||
// Existing flush logic
|
||||
let entries_to_send: Vec<UnifiedLogEntry> = batch.drain(..).collect();
|
||||
let producer = self.producer.clone();
|
||||
let topic = self.topic.clone();
|
||||
|
||||
self.last_flush.store(
|
||||
std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_millis() as u64,
|
||||
std::sync::atomic::Ordering::Relaxed,
|
||||
);
|
||||
|
||||
tokio::spawn(async move {
|
||||
KafkaSink::send_batch(&producer, &topic, entries_to_send).await;
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for KafkaSink {
|
||||
fn drop(&mut self) {
|
||||
// Perform any necessary cleanup here
|
||||
// For example, you might want to flush any remaining entries
|
||||
let producer = self.producer.clone();
|
||||
let topic = self.topic.clone();
|
||||
let entries = self.entries.clone();
|
||||
let last_flush = self.last_flush.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let mut batch = entries.lock().await;
|
||||
if !batch.is_empty() {
|
||||
KafkaSink::send_batch(&producer, &topic, batch.drain(..).collect()).await;
|
||||
last_flush.store(
|
||||
std::time::SystemTime::now()
|
||||
.duration_since(std::time::UNIX_EPOCH)
|
||||
.unwrap()
|
||||
.as_millis() as u64,
|
||||
std::sync::atomic::Ordering::Relaxed,
|
||||
);
|
||||
}
|
||||
});
|
||||
|
||||
eprintln!("Dropping KafkaSink with topic: {0}", self.topic);
|
||||
}
|
||||
}
|
||||
@@ -1,123 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::{AppConfig, SinkConfig, UnifiedLogEntry};
|
||||
use async_trait::async_trait;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[cfg(feature = "file")]
|
||||
mod file;
|
||||
#[cfg(all(feature = "kafka", target_os = "linux"))]
|
||||
mod kafka;
|
||||
#[cfg(feature = "webhook")]
|
||||
mod webhook;
|
||||
|
||||
/// Sink Trait definition, asynchronously write logs
|
||||
#[async_trait]
|
||||
pub trait Sink: Send + Sync {
|
||||
async fn write(&self, entry: &UnifiedLogEntry);
|
||||
}
|
||||
|
||||
/// Create a list of Sink instances
|
||||
pub async fn create_sinks(config: &AppConfig) -> Vec<Arc<dyn Sink>> {
|
||||
let mut sinks: Vec<Arc<dyn Sink>> = Vec::new();
|
||||
|
||||
for sink_config in &config.sinks {
|
||||
match sink_config {
|
||||
#[cfg(all(feature = "kafka", target_os = "linux"))]
|
||||
SinkConfig::Kafka(kafka_config) => {
|
||||
match rdkafka::config::ClientConfig::new()
|
||||
.set("bootstrap.servers", &kafka_config.brokers)
|
||||
.set("message.timeout.ms", "5000")
|
||||
.create()
|
||||
{
|
||||
Ok(producer) => {
|
||||
sinks.push(Arc::new(kafka::KafkaSink::new(
|
||||
producer,
|
||||
kafka_config.topic.clone(),
|
||||
kafka_config
|
||||
.batch_size
|
||||
.unwrap_or(rustfs_config::observability::DEFAULT_SINKS_KAFKA_BATCH_SIZE),
|
||||
kafka_config
|
||||
.batch_timeout_ms
|
||||
.unwrap_or(rustfs_config::observability::DEFAULT_SINKS_KAFKA_BATCH_TIMEOUT_MS),
|
||||
)));
|
||||
tracing::info!("Kafka sink created for topic: {}", kafka_config.topic);
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!("Failed to create Kafka producer: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(feature = "webhook")]
|
||||
SinkConfig::Webhook(webhook_config) => {
|
||||
sinks.push(Arc::new(webhook::WebhookSink::new(
|
||||
webhook_config.endpoint.clone(),
|
||||
webhook_config.auth_token.clone(),
|
||||
webhook_config
|
||||
.max_retries
|
||||
.unwrap_or(rustfs_config::observability::DEFAULT_SINKS_WEBHOOK_MAX_RETRIES),
|
||||
webhook_config
|
||||
.retry_delay_ms
|
||||
.unwrap_or(rustfs_config::observability::DEFAULT_SINKS_WEBHOOK_RETRY_DELAY_MS),
|
||||
)));
|
||||
tracing::info!("Webhook sink created for endpoint: {}", webhook_config.endpoint);
|
||||
}
|
||||
#[cfg(feature = "file")]
|
||||
SinkConfig::File(file_config) => {
|
||||
tracing::debug!("FileSink: Using path: {}", file_config.path);
|
||||
match file::FileSink::new(
|
||||
std::path::Path::new(&file_config.path)
|
||||
.join(rustfs_config::DEFAULT_SINK_FILE_LOG_FILE)
|
||||
.to_string_lossy()
|
||||
.to_string(),
|
||||
file_config
|
||||
.buffer_size
|
||||
.unwrap_or(rustfs_config::observability::DEFAULT_SINKS_FILE_BUFFER_SIZE),
|
||||
file_config
|
||||
.flush_interval_ms
|
||||
.unwrap_or(rustfs_config::observability::DEFAULT_SINKS_FILE_FLUSH_INTERVAL_MS),
|
||||
file_config
|
||||
.flush_threshold
|
||||
.unwrap_or(rustfs_config::observability::DEFAULT_SINKS_FILE_FLUSH_THRESHOLD),
|
||||
)
|
||||
.await
|
||||
{
|
||||
Ok(sink) => {
|
||||
sinks.push(Arc::new(sink));
|
||||
tracing::info!("File sink created for path: {}", file_config.path);
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::error!("Failed to create File sink: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
#[cfg(any(not(feature = "kafka"), not(target_os = "linux")))]
|
||||
SinkConfig::Kafka(_) => {
|
||||
tracing::warn!("Kafka sink is configured but the 'kafka' feature is not enabled");
|
||||
}
|
||||
#[cfg(not(feature = "webhook"))]
|
||||
SinkConfig::Webhook(_) => {
|
||||
tracing::warn!("Webhook sink is configured but the 'webhook' feature is not enabled");
|
||||
}
|
||||
#[cfg(not(feature = "file"))]
|
||||
SinkConfig::File(_) => {
|
||||
tracing::warn!("File sink is configured but the 'file' feature is not enabled");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sinks
|
||||
}
|
||||
@@ -1,84 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::UnifiedLogEntry;
|
||||
use crate::sinks::Sink;
|
||||
use async_trait::async_trait;
|
||||
|
||||
/// Webhook Sink Implementation
|
||||
pub struct WebhookSink {
|
||||
endpoint: String,
|
||||
auth_token: String,
|
||||
client: reqwest::Client,
|
||||
max_retries: usize,
|
||||
retry_delay_ms: u64,
|
||||
}
|
||||
|
||||
impl WebhookSink {
|
||||
pub fn new(endpoint: String, auth_token: String, max_retries: usize, retry_delay_ms: u64) -> Self {
|
||||
WebhookSink {
|
||||
endpoint,
|
||||
auth_token,
|
||||
client: reqwest::Client::builder()
|
||||
.timeout(std::time::Duration::from_secs(10))
|
||||
.build()
|
||||
.unwrap_or_else(|_| reqwest::Client::new()),
|
||||
max_retries,
|
||||
retry_delay_ms,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl Sink for WebhookSink {
|
||||
async fn write(&self, entry: &UnifiedLogEntry) {
|
||||
let mut retries = 0;
|
||||
let url = self.endpoint.clone();
|
||||
let entry_clone = entry.clone();
|
||||
let auth_value = reqwest::header::HeaderValue::from_str(format!("Bearer {}", self.auth_token.clone()).as_str()).unwrap();
|
||||
while retries < self.max_retries {
|
||||
match self
|
||||
.client
|
||||
.post(&url)
|
||||
.header(reqwest::header::AUTHORIZATION, auth_value.clone())
|
||||
.json(&entry_clone)
|
||||
.send()
|
||||
.await
|
||||
{
|
||||
Ok(response) if response.status().is_success() => {
|
||||
return;
|
||||
}
|
||||
_ => {
|
||||
retries += 1;
|
||||
if retries < self.max_retries {
|
||||
tokio::time::sleep(tokio::time::Duration::from_millis(
|
||||
self.retry_delay_ms * (1 << retries), // Exponential backoff
|
||||
))
|
||||
.await;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
eprintln!("Failed to send log to webhook after {0} retries", self.max_retries);
|
||||
}
|
||||
}
|
||||
|
||||
impl Drop for WebhookSink {
|
||||
fn drop(&mut self) {
|
||||
// Perform any necessary cleanup here
|
||||
// For example, you might want to log that the sink is being dropped
|
||||
eprintln!("Dropping WebhookSink with URL: {0}", self.endpoint);
|
||||
}
|
||||
}
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::OtelConfig;
|
||||
use crate::config::OtelConfig;
|
||||
use flexi_logger::{
|
||||
Age, Cleanup, Criterion, DeferredNow, FileSpec, LogSpecification, Naming, Record, WriteMode,
|
||||
WriteMode::{AsyncWith, BufferAndFlush},
|
||||
@@ -63,7 +63,8 @@ use tracing_subscriber::{EnvFilter, Layer, layer::SubscriberExt, util::Subscribe
|
||||
/// - The tracer provider (for distributed tracing)
|
||||
/// - The meter provider (for metrics collection)
|
||||
/// - The logger provider (for structured logging)
|
||||
// Implement Debug trait correctly, rather than using derive, as some fields may not have implemented Debug
|
||||
///
|
||||
/// Implement Debug trait correctly, rather than using derive, as some fields may not have implemented Debug
|
||||
pub struct OtelGuard {
|
||||
tracer_provider: Option<SdkTracerProvider>,
|
||||
meter_provider: Option<SdkMeterProvider>,
|
||||
|
||||
@@ -29,6 +29,7 @@ documentation = "https://docs.rs/rustfs-policy/latest/rustfs_policy/"
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
rustfs-config = { workspace = true, features = ["constants","opa"] }
|
||||
tokio.workspace = true
|
||||
time = { workspace = true, features = ["serde-human-readable"] }
|
||||
serde = { workspace = true, features = ["derive", "rc"] }
|
||||
@@ -41,6 +42,10 @@ rand.workspace = true
|
||||
base64-simd = { workspace = true }
|
||||
jsonwebtoken = { workspace = true }
|
||||
regex = { workspace = true }
|
||||
reqwest.workspace = true
|
||||
chrono.workspace = true
|
||||
tracing.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
test-case.workspace = true
|
||||
temp-env = { workspace = true }
|
||||
|
||||
@@ -228,7 +228,7 @@ mod tests {
|
||||
use jsonwebtoken::{Algorithm, DecodingKey, Validation, decode};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize)]
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
struct Claims {
|
||||
sub: String,
|
||||
exp: usize,
|
||||
|
||||
@@ -17,6 +17,7 @@ mod doc;
|
||||
mod effect;
|
||||
mod function;
|
||||
mod id;
|
||||
pub mod opa;
|
||||
#[allow(clippy::module_inception)]
|
||||
mod policy;
|
||||
mod principal;
|
||||
|
||||
288
crates/policy/src/policy/opa.rs
Normal file
288
crates/policy/src/policy/opa.rs
Normal file
@@ -0,0 +1,288 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::policy::Args as PArgs;
|
||||
use rustfs_config::{ENV_PREFIX, opa::*};
|
||||
use serde::Deserialize;
|
||||
use serde_json::json;
|
||||
use std::{collections::HashMap, env, time::Duration};
|
||||
use tracing::{error, info};
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct Args {
|
||||
pub url: String,
|
||||
pub auth_token: String,
|
||||
}
|
||||
impl Args {
|
||||
pub fn enable(&self) -> bool {
|
||||
!self.url.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct AuthZPlugin {
|
||||
client: reqwest::Client,
|
||||
args: Args,
|
||||
}
|
||||
|
||||
fn check() -> Result<(), String> {
|
||||
let env_list = env::vars();
|
||||
let mut candidate = HashMap::new();
|
||||
let prefix = format!("{}{}", ENV_PREFIX, POLICY_PLUGIN_SUB_SYS).to_uppercase();
|
||||
for (key, value) in env_list {
|
||||
if key.starts_with(&prefix) {
|
||||
candidate.insert(key.to_string(), value);
|
||||
}
|
||||
}
|
||||
|
||||
//check required env vars
|
||||
if candidate.remove(ENV_POLICY_PLUGIN_OPA_URL).is_none() {
|
||||
return Err(format!("Missing required env var: {}", ENV_POLICY_PLUGIN_OPA_URL));
|
||||
}
|
||||
|
||||
// check optional env vars
|
||||
candidate.remove(ENV_POLICY_PLUGIN_AUTH_TOKEN);
|
||||
if !candidate.is_empty() {
|
||||
return Err(format!("Invalid env vars: {:?}", candidate));
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
async fn validate(config: &Args) -> Result<(), String> {
|
||||
let client = reqwest::Client::new();
|
||||
|
||||
match client.post(&config.url).send().await {
|
||||
Ok(resp) => {
|
||||
match resp.status() {
|
||||
reqwest::StatusCode::OK => {
|
||||
info!("OPA is ready to accept requests.");
|
||||
}
|
||||
_ => {
|
||||
return Err(format!("OPA returned an error: {}", resp.status()));
|
||||
}
|
||||
};
|
||||
}
|
||||
Err(err) => {
|
||||
return Err(format!("Error connecting to OPA: {}", err));
|
||||
}
|
||||
};
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn lookup_config() -> Result<Args, String> {
|
||||
let args = Args::default();
|
||||
|
||||
let get_cfg =
|
||||
|cfg: &str| -> Result<String, String> { env::var(cfg).map_err(|e| format!("Error getting env var {}: {:?}", cfg, e)) };
|
||||
|
||||
let url = get_cfg(ENV_POLICY_PLUGIN_OPA_URL);
|
||||
if url.is_err() {
|
||||
info!("OPA is not enabled.");
|
||||
return Ok(args);
|
||||
}
|
||||
check()?;
|
||||
let args = Args {
|
||||
url: url.ok().unwrap(),
|
||||
auth_token: get_cfg(ENV_POLICY_PLUGIN_AUTH_TOKEN).unwrap_or_default(),
|
||||
};
|
||||
validate(&args).await?;
|
||||
Ok(args)
|
||||
}
|
||||
|
||||
impl AuthZPlugin {
|
||||
pub fn new(config: Args) -> Self {
|
||||
let client = reqwest::Client::builder()
|
||||
.timeout(Duration::from_secs(5))
|
||||
.connect_timeout(Duration::from_secs(1))
|
||||
.pool_max_idle_per_host(10)
|
||||
.pool_idle_timeout(Some(Duration::from_secs(60)))
|
||||
.tcp_keepalive(Some(Duration::from_secs(30)))
|
||||
.tcp_nodelay(true)
|
||||
.http2_keep_alive_interval(Some(Duration::from_secs(30)))
|
||||
.http2_keep_alive_timeout(Duration::from_secs(15))
|
||||
.build()
|
||||
.unwrap();
|
||||
|
||||
Self { client, args: config }
|
||||
}
|
||||
|
||||
pub async fn is_allowed(&self, args: &PArgs<'_>) -> bool {
|
||||
let payload = self.build_opa_input(args);
|
||||
|
||||
let mut request = self.client.post(self.args.url.clone()).json(&payload);
|
||||
if !self.args.auth_token.is_empty() {
|
||||
request = request.header("Authorization", format!("Bearer {}", self.args.auth_token));
|
||||
}
|
||||
|
||||
match request.send().await {
|
||||
Ok(resp) => {
|
||||
let status = resp.status();
|
||||
if !status.is_success() {
|
||||
error!("OPA returned non-success status: {}", status);
|
||||
return false;
|
||||
}
|
||||
|
||||
match resp.json::<OpaResponseEnum>().await {
|
||||
Ok(response_enum) => match response_enum {
|
||||
OpaResponseEnum::SimpleResult(result) => result.result,
|
||||
OpaResponseEnum::AllowResult(response) => response.result.allow,
|
||||
},
|
||||
Err(err) => {
|
||||
error!("Error parsing OPA response: {:?}", err);
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
error!("Error sending request to OPA: {:?}", err);
|
||||
false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn build_opa_input(&self, args: &PArgs<'_>) -> serde_json::Value {
|
||||
let groups = match args.groups {
|
||||
Some(g) => g.clone(),
|
||||
None => vec![],
|
||||
};
|
||||
let action_str: &str = (&args.action).into();
|
||||
json!({
|
||||
// Core authorization parameters for OPA policy evaluation
|
||||
"input":{
|
||||
"identity": {
|
||||
"account": args.account,
|
||||
"groups": groups,
|
||||
"is_owner": args.is_owner,
|
||||
"claims": args.claims
|
||||
},
|
||||
|
||||
"resource": {
|
||||
"bucket": args.bucket,
|
||||
"object": args.object,
|
||||
"arn": if args.object.is_empty() {
|
||||
format!("arn:aws:s3:::{}", args.bucket)
|
||||
} else {
|
||||
format!("arn:aws:s3:::{}/{}", args.bucket, args.object)
|
||||
}
|
||||
},
|
||||
|
||||
"action": action_str,
|
||||
|
||||
"context": {
|
||||
"conditions": args.conditions,
|
||||
"deny_only": args.deny_only,
|
||||
"timestamp": chrono::Utc::now().to_rfc3339()
|
||||
}
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Default)]
|
||||
struct OpaResultAllow {
|
||||
allow: bool,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Default)]
|
||||
struct OpaResult {
|
||||
result: bool,
|
||||
}
|
||||
|
||||
#[derive(Deserialize, Default)]
|
||||
struct OpaResponse {
|
||||
result: OpaResultAllow,
|
||||
}
|
||||
|
||||
#[derive(Deserialize)]
|
||||
#[serde(untagged)]
|
||||
enum OpaResponseEnum {
|
||||
SimpleResult(OpaResult),
|
||||
AllowResult(OpaResponse),
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use temp_env;
|
||||
|
||||
#[test]
|
||||
fn test_check_valid_config() {
|
||||
// Use temp_env to temporarily set environment variables
|
||||
temp_env::with_vars(
|
||||
[
|
||||
("RUSTFS_POLICY_PLUGIN_URL", Some("http://localhost:8181/v1/data/rustfs/authz/allow")),
|
||||
("RUSTFS_POLICY_PLUGIN_AUTH_TOKEN", Some("test-token")),
|
||||
],
|
||||
|| {
|
||||
assert!(check().is_ok());
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_missing_required_env() {
|
||||
temp_env::with_var_unset("RUSTFS_POLICY_PLUGIN_URL", || {
|
||||
temp_env::with_var("RUSTFS_POLICY_PLUGIN_AUTH_TOKEN", Some("test-token"), || {
|
||||
let result = check();
|
||||
assert!(result.is_err());
|
||||
assert!(result.unwrap_err().contains("Missing required env var"));
|
||||
});
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_invalid_env_vars() {
|
||||
temp_env::with_vars(
|
||||
[
|
||||
("RUSTFS_POLICY_PLUGIN_URL", Some("http://localhost:8181/v1/data/rustfs/authz/allow")),
|
||||
("RUSTFS_POLICY_PLUGIN_INVALID", Some("invalid-value")),
|
||||
],
|
||||
|| {
|
||||
let result = check();
|
||||
assert!(result.is_err());
|
||||
assert!(result.unwrap_err().contains("Invalid env vars"));
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lookup_config_not_enabled() {
|
||||
temp_env::with_var_unset("RUSTFS_POLICY_PLUGIN_URL", || {
|
||||
let rt = tokio::runtime::Runtime::new().unwrap();
|
||||
let result = rt.block_on(async { lookup_config().await });
|
||||
|
||||
// Should return the default empty Args
|
||||
assert!(result.is_ok());
|
||||
let args = result.unwrap();
|
||||
assert!(!args.enable());
|
||||
assert_eq!(args.url, "");
|
||||
assert_eq!(args.auth_token, "");
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_args_enable() {
|
||||
// Test Args enable method
|
||||
let args_enabled = Args {
|
||||
url: "http://localhost:8181".to_string(),
|
||||
auth_token: "token".to_string(),
|
||||
};
|
||||
assert!(args_enabled.enable());
|
||||
|
||||
let args_disabled = Args {
|
||||
url: "".to_string(),
|
||||
auth_token: "".to_string(),
|
||||
};
|
||||
assert!(!args_disabled.enable());
|
||||
}
|
||||
}
|
||||
@@ -17,7 +17,6 @@ use crate::error::{Error, Result};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
use std::collections::{HashMap, HashSet};
|
||||
|
||||
/// DEFAULT_VERSION is the default version.
|
||||
/// https://docs.aws.amazon.com/IAM/latest/UserGuide/reference_policies_elements_version.html
|
||||
pub const DEFAULT_VERSION: &str = "2012-10-17";
|
||||
|
||||
@@ -59,7 +59,7 @@ pub fn generate_jwt<T: Serialize>(claims: &T, secret: &str) -> std::result::Resu
|
||||
jsonwebtoken::encode(&header, &claims, &EncodingKey::from_secret(secret.as_bytes()))
|
||||
}
|
||||
|
||||
pub fn extract_claims<T: DeserializeOwned>(
|
||||
pub fn extract_claims<T: DeserializeOwned + Clone>(
|
||||
token: &str,
|
||||
secret: &str,
|
||||
) -> std::result::Result<jsonwebtoken::TokenData<T>, jsonwebtoken::errors::Error> {
|
||||
|
||||
@@ -13,7 +13,7 @@ documentation = "https://docs.rs/rustfs-target/latest/rustfs_target/"
|
||||
|
||||
[dependencies]
|
||||
rustfs-config = { workspace = true, features = ["notify", "constants", "audit"] }
|
||||
rustfs-utils = { workspace = true, features = ["sys"] }
|
||||
rustfs-utils = { workspace = true, features = ["sys", "notify"] }
|
||||
async-trait = { workspace = true }
|
||||
reqwest = { workspace = true }
|
||||
rumqttc = { workspace = true }
|
||||
|
||||
70
crates/targets/src/check.rs
Normal file
70
crates/targets/src/check.rs
Normal file
@@ -0,0 +1,70 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/// Check if MQTT Broker is available
|
||||
/// # Arguments
|
||||
/// * `broker_url` - URL of MQTT Broker, for example `mqtt://localhost:1883`
|
||||
/// * `topic` - Topic for testing connections
|
||||
/// # Returns
|
||||
/// * `Ok(())` - If the connection is successful
|
||||
/// * `Err(String)` - If the connection fails, contains an error message
|
||||
///
|
||||
/// # Example
|
||||
/// ```rust,no_run
|
||||
/// #[tokio::main]
|
||||
/// async fn main() {
|
||||
/// let result = rustfs_targets::check_mqtt_broker_available("mqtt://localhost:1883", "test/topic").await;
|
||||
/// if result.is_ok() {
|
||||
/// println!("MQTT Broker is available");
|
||||
/// } else {
|
||||
/// println!("MQTT Broker is not available: {}", result.err().unwrap());
|
||||
/// }
|
||||
/// }
|
||||
/// ```
|
||||
/// # Note
|
||||
/// Need to add `rumqttc` and `url` dependencies in `Cargo.toml`
|
||||
/// ```toml
|
||||
/// [dependencies]
|
||||
/// rumqttc = "0.25.0"
|
||||
/// url = "2.5.7"
|
||||
/// tokio = { version = "1", features = ["full"] }
|
||||
/// ```
|
||||
pub async fn check_mqtt_broker_available(broker_url: &str, topic: &str) -> Result<(), String> {
|
||||
use rumqttc::{AsyncClient, MqttOptions, QoS};
|
||||
let url = rustfs_utils::parse_url(broker_url).map_err(|e| format!("Broker URL parsing failed:{e}"))?;
|
||||
let url = url.url();
|
||||
|
||||
match url.scheme() {
|
||||
"tcp" | "ssl" | "ws" | "wss" | "mqtt" | "mqtts" | "tls" | "tcps" => {}
|
||||
_ => return Err("unsupported broker url scheme".to_string()),
|
||||
}
|
||||
|
||||
let host = url.host_str().ok_or("Broker is missing host")?;
|
||||
let port = url.port().unwrap_or(1883);
|
||||
let mut mqtt_options = MqttOptions::new("rustfs_check", host, port);
|
||||
mqtt_options.set_keep_alive(std::time::Duration::from_secs(5));
|
||||
let (client, mut eventloop) = AsyncClient::new(mqtt_options, 1);
|
||||
|
||||
// Try to connect and subscribe
|
||||
client
|
||||
.subscribe(topic, QoS::AtLeastOnce)
|
||||
.await
|
||||
.map_err(|e| format!("MQTT subscription failed:{e}"))?;
|
||||
// Wait for eventloop to receive at least one event
|
||||
match tokio::time::timeout(std::time::Duration::from_secs(3), eventloop.poll()).await {
|
||||
Ok(Ok(_)) => Ok(()),
|
||||
Ok(Err(e)) => Err(format!("MQTT connection failed:{e}")),
|
||||
Err(_) => Err("MQTT connection timeout".to_string()),
|
||||
}
|
||||
}
|
||||
@@ -13,11 +13,13 @@
|
||||
// limitations under the License.
|
||||
|
||||
pub mod arn;
|
||||
mod check;
|
||||
pub mod error;
|
||||
mod event_name;
|
||||
pub mod store;
|
||||
pub mod target;
|
||||
|
||||
pub use check::check_mqtt_broker_available;
|
||||
pub use error::{StoreError, TargetError};
|
||||
pub use event_name::EventName;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@@ -324,7 +324,7 @@ async fn run_mqtt_event_loop(
|
||||
Ok(Err(e)) => Err(e),
|
||||
Err(_) => {
|
||||
debug!(target_id = %target_id, "MQTT poll timed out (EVENT_LOOP_POLL_TIMEOUT) while not connected or status pending.");
|
||||
Err(rumqttc::ConnectionError::NetworkTimeout)
|
||||
Err(ConnectionError::NetworkTimeout)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@@ -376,7 +376,7 @@ async fn run_mqtt_event_loop(
|
||||
connected_status.store(false, Ordering::SeqCst);
|
||||
error!(target_id = %target_id, error = %e, "Error from MQTT event loop poll");
|
||||
|
||||
if matches!(e, rumqttc::ConnectionError::NetworkTimeout) && (!initial_connection_established || !connected_status.load(Ordering::SeqCst)) {
|
||||
if matches!(e, ConnectionError::NetworkTimeout) && (!initial_connection_established || !connected_status.load(Ordering::SeqCst)) {
|
||||
warn!(target_id = %target_id, "Timeout during initial poll or pending state, will retry.");
|
||||
continue;
|
||||
}
|
||||
@@ -395,8 +395,8 @@ async fn run_mqtt_event_loop(
|
||||
error!(target_id = %target_id, error = %e, "Fatal MQTT error, terminating event loop.");
|
||||
break;
|
||||
}
|
||||
// rumqttc's eventloop.poll() may return Err and terminate after some errors,
|
||||
// Or it will handle reconnection internally. The continue here will make select! wait again.
|
||||
// rumqttc's eventloop.poll() may return Err and terminate after some errors,
|
||||
// Or it will handle reconnection internally. To continue here will make select! wait again.
|
||||
// If the error is temporary and rumqttc is handling reconnection, poll() should eventually succeed or return a different error again.
|
||||
// Sleep briefly to avoid busy cycles in case of rapid failure.
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
|
||||
@@ -29,14 +29,16 @@ base64-simd = { workspace = true, optional = true }
|
||||
blake3 = { workspace = true, optional = true }
|
||||
brotli = { workspace = true, optional = true }
|
||||
bytes = { workspace = true, optional = true }
|
||||
crc32fast = { workspace = true }
|
||||
crc32fast = { workspace = true, optional = true }
|
||||
flate2 = { workspace = true, optional = true }
|
||||
futures = { workspace = true, optional = true }
|
||||
hashbrown = { workspace = true, optional = true }
|
||||
hex-simd = { workspace = true, optional = true }
|
||||
highway = { workspace = true, optional = true }
|
||||
hickory-resolver = { workspace = true, optional = true }
|
||||
hmac = { workspace = true, optional = true }
|
||||
hyper = { workspace = true, optional = true }
|
||||
libc = { workspace = true, optional = true }
|
||||
local-ip-address = { workspace = true, optional = true }
|
||||
lz4 = { workspace = true, optional = true }
|
||||
md-5 = { workspace = true, optional = true }
|
||||
@@ -53,7 +55,7 @@ s3s = { workspace = true, optional = true }
|
||||
serde = { workspace = true, optional = true }
|
||||
sha1 = { workspace = true, optional = true }
|
||||
sha2 = { workspace = true, optional = true }
|
||||
convert_case = "0.8.0"
|
||||
convert_case = { workspace = true, optional = true }
|
||||
siphasher = { workspace = true, optional = true }
|
||||
snap = { workspace = true, optional = true }
|
||||
sysinfo = { workspace = true, optional = true }
|
||||
@@ -83,13 +85,13 @@ tls = ["dep:rustls", "dep:rustls-pemfile", "dep:rustls-pki-types"] # tls charac
|
||||
net = ["ip", "dep:url", "dep:netif", "dep:futures", "dep:transform-stream", "dep:bytes", "dep:s3s", "dep:hyper", "dep:hickory-resolver", "dep:moka", "dep:thiserror", "dep:tokio"] # network features with DNS resolver
|
||||
io = ["dep:tokio"]
|
||||
path = []
|
||||
notify = ["dep:hyper", "dep:s3s"] # file system notification features
|
||||
notify = ["dep:hyper", "dep:s3s", "dep:hashbrown", "dep:thiserror", "dep:serde", "dep:libc"] # file system notification features
|
||||
compress = ["dep:flate2", "dep:brotli", "dep:snap", "dep:lz4", "dep:zstd"]
|
||||
string = ["dep:regex", "dep:rand"]
|
||||
crypto = ["dep:base64-simd", "dep:hex-simd", "dep:hmac", "dep:hyper", "dep:sha1"]
|
||||
hash = ["dep:highway", "dep:md-5", "dep:sha2", "dep:blake3", "dep:serde", "dep:siphasher", "dep:hex-simd", "dep:base64-simd"]
|
||||
hash = ["dep:highway", "dep:md-5", "dep:sha2", "dep:blake3", "dep:serde", "dep:siphasher", "dep:hex-simd", "dep:base64-simd", "dep:crc32fast"]
|
||||
os = ["dep:nix", "dep:tempfile", "winapi"] # operating system utilities
|
||||
integration = [] # integration test features
|
||||
sys = ["dep:sysinfo"] # system information features
|
||||
http = []
|
||||
full = ["ip", "tls", "net", "io", "hash", "os", "integration", "path", "crypto", "string", "compress", "sys", "notify","http"] # all features
|
||||
http = ["dep:convert_case"]
|
||||
full = ["ip", "tls", "net", "io", "hash", "os", "integration", "path", "crypto", "string", "compress", "sys", "notify", "http"] # all features
|
||||
|
||||
@@ -102,7 +102,7 @@ pub fn load_all_certs_from_directory(
|
||||
let path = entry.path();
|
||||
|
||||
if path.is_dir() {
|
||||
let domain_name = path
|
||||
let domain_name: &str = path
|
||||
.file_name()
|
||||
.and_then(|name| name.to_str())
|
||||
.ok_or_else(|| certs_error(format!("invalid domain name directory:{path:?}")))?;
|
||||
|
||||
@@ -420,7 +420,6 @@ mod tests {
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[ignore]
|
||||
async fn test_invalid_domain_resolution() {
|
||||
let resolver = LayeredDnsResolver::new().await.unwrap();
|
||||
|
||||
|
||||
@@ -13,15 +13,13 @@
|
||||
// limitations under the License.
|
||||
|
||||
use bytes::Bytes;
|
||||
use futures::pin_mut;
|
||||
use futures::{Stream, StreamExt};
|
||||
use std::io::Error;
|
||||
use std::net::Ipv6Addr;
|
||||
use std::sync::{LazyLock, Mutex};
|
||||
use futures::{Stream, StreamExt, pin_mut};
|
||||
use std::{
|
||||
collections::{HashMap, HashSet},
|
||||
fmt::Display,
|
||||
net::{IpAddr, SocketAddr, TcpListener, ToSocketAddrs},
|
||||
io::Error,
|
||||
net::{IpAddr, Ipv6Addr, SocketAddr, TcpListener, ToSocketAddrs},
|
||||
sync::{Arc, LazyLock, Mutex, RwLock},
|
||||
time::{Duration, Instant},
|
||||
};
|
||||
use tracing::{error, info};
|
||||
@@ -51,6 +49,41 @@ impl DnsCacheEntry {
|
||||
|
||||
static DNS_CACHE: LazyLock<Mutex<HashMap<String, DnsCacheEntry>>> = LazyLock::new(|| Mutex::new(HashMap::new()));
|
||||
const DNS_CACHE_TTL: Duration = Duration::from_secs(300); // 5 minutes
|
||||
type DynDnsResolver = dyn Fn(&str) -> std::io::Result<HashSet<IpAddr>> + Send + Sync + 'static;
|
||||
static CUSTOM_DNS_RESOLVER: LazyLock<RwLock<Option<Arc<DynDnsResolver>>>> = LazyLock::new(|| RwLock::new(None));
|
||||
|
||||
fn resolve_domain(domain: &str) -> std::io::Result<HashSet<IpAddr>> {
|
||||
if let Some(resolver) = CUSTOM_DNS_RESOLVER.read().unwrap().clone() {
|
||||
return resolver(domain);
|
||||
}
|
||||
|
||||
(domain, 0)
|
||||
.to_socket_addrs()
|
||||
.map(|v| v.map(|v| v.ip()).collect::<HashSet<_>>())
|
||||
.map_err(Error::other)
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
fn clear_dns_cache() {
|
||||
if let Ok(mut cache) = DNS_CACHE.lock() {
|
||||
cache.clear();
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn set_mock_dns_resolver<F>(resolver: F)
|
||||
where
|
||||
F: Fn(&str) -> std::io::Result<HashSet<IpAddr>> + Send + Sync + 'static,
|
||||
{
|
||||
*CUSTOM_DNS_RESOLVER.write().unwrap() = Some(Arc::new(resolver));
|
||||
clear_dns_cache();
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
pub fn reset_dns_resolver() {
|
||||
*CUSTOM_DNS_RESOLVER.write().unwrap() = None;
|
||||
clear_dns_cache();
|
||||
}
|
||||
|
||||
/// helper for validating if the provided arg is an ip address.
|
||||
pub fn is_socket_addr(addr: &str) -> bool {
|
||||
@@ -93,10 +126,7 @@ pub fn is_local_host(host: Host<&str>, port: u16, local_port: u16) -> std::io::R
|
||||
let local_set: HashSet<IpAddr> = LOCAL_IPS.iter().copied().collect();
|
||||
let is_local_host = match host {
|
||||
Host::Domain(domain) => {
|
||||
let ips = match (domain, 0).to_socket_addrs().map(|v| v.map(|v| v.ip()).collect::<Vec<_>>()) {
|
||||
Ok(ips) => ips,
|
||||
Err(err) => return Err(Error::other(err)),
|
||||
};
|
||||
let ips = resolve_domain(domain)?.into_iter().collect::<Vec<_>>();
|
||||
|
||||
ips.iter().any(|ip| local_set.contains(ip))
|
||||
}
|
||||
@@ -130,30 +160,31 @@ pub async fn get_host_ip(host: Host<&str>) -> std::io::Result<HashSet<IpAddr>> {
|
||||
// }
|
||||
// }
|
||||
// Check cache first
|
||||
if let Ok(mut cache) = DNS_CACHE.lock() {
|
||||
if let Some(entry) = cache.get(domain) {
|
||||
if !entry.is_expired(DNS_CACHE_TTL) {
|
||||
return Ok(entry.ips.clone());
|
||||
if CUSTOM_DNS_RESOLVER.read().unwrap().is_none() {
|
||||
if let Ok(mut cache) = DNS_CACHE.lock() {
|
||||
if let Some(entry) = cache.get(domain) {
|
||||
if !entry.is_expired(DNS_CACHE_TTL) {
|
||||
return Ok(entry.ips.clone());
|
||||
}
|
||||
// Remove expired entry
|
||||
cache.remove(domain);
|
||||
}
|
||||
// Remove expired entry
|
||||
cache.remove(domain);
|
||||
}
|
||||
}
|
||||
|
||||
info!("Cache miss for domain {domain}, querying system resolver.");
|
||||
|
||||
// Fallback to standard resolution when DNS resolver is not available
|
||||
match (domain, 0)
|
||||
.to_socket_addrs()
|
||||
.map(|v| v.map(|v| v.ip()).collect::<HashSet<_>>())
|
||||
{
|
||||
match resolve_domain(domain) {
|
||||
Ok(ips) => {
|
||||
// Cache the result
|
||||
if let Ok(mut cache) = DNS_CACHE.lock() {
|
||||
cache.insert(domain.to_string(), DnsCacheEntry::new(ips.clone()));
|
||||
// Limit cache size to prevent memory bloat
|
||||
if cache.len() > 1000 {
|
||||
cache.retain(|_, v| !v.is_expired(DNS_CACHE_TTL));
|
||||
if CUSTOM_DNS_RESOLVER.read().unwrap().is_none() {
|
||||
// Cache the result
|
||||
if let Ok(mut cache) = DNS_CACHE.lock() {
|
||||
cache.insert(domain.to_string(), DnsCacheEntry::new(ips.clone()));
|
||||
// Limit cache size to prevent memory bloat
|
||||
if cache.len() > 1000 {
|
||||
cache.retain(|_, v| !v.is_expired(DNS_CACHE_TTL));
|
||||
}
|
||||
}
|
||||
}
|
||||
info!("System query for domain {domain}: {:?}", ips);
|
||||
@@ -292,6 +323,21 @@ mod test {
|
||||
use super::*;
|
||||
use crate::init_global_dns_resolver;
|
||||
use std::net::{Ipv4Addr, Ipv6Addr};
|
||||
use std::{collections::HashSet, io::Error as IoError};
|
||||
|
||||
fn mock_resolver(domain: &str) -> std::io::Result<HashSet<IpAddr>> {
|
||||
match domain {
|
||||
"localhost" => Ok([
|
||||
IpAddr::V4(Ipv4Addr::new(127, 0, 0, 1)),
|
||||
IpAddr::V6(Ipv6Addr::new(0, 0, 0, 0, 0, 0, 0, 1)),
|
||||
]
|
||||
.into_iter()
|
||||
.collect()),
|
||||
"example.org" => Ok([IpAddr::V4(Ipv4Addr::new(192, 0, 2, 10))].into_iter().collect()),
|
||||
"invalid.nonexistent.domain.example" => Err(IoError::other("mock DNS failure")),
|
||||
_ => Ok(HashSet::new()),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_socket_addr() {
|
||||
@@ -349,7 +395,7 @@ mod test {
|
||||
let invalid_cases = [
|
||||
("localhost", "invalid socket address"),
|
||||
("", "invalid socket address"),
|
||||
("example.org:54321", "host in server address should be this server"),
|
||||
("203.0.113.1:54321", "host in server address should be this server"),
|
||||
("8.8.8.8:53", "host in server address should be this server"),
|
||||
(":-10", "invalid port value"),
|
||||
("invalid:port", "invalid port value"),
|
||||
@@ -369,6 +415,8 @@ mod test {
|
||||
|
||||
#[test]
|
||||
fn test_is_local_host() {
|
||||
set_mock_dns_resolver(mock_resolver);
|
||||
|
||||
// Test localhost domain
|
||||
let localhost_host = Host::Domain("localhost");
|
||||
assert!(is_local_host(localhost_host, 0, 0).unwrap());
|
||||
@@ -393,10 +441,13 @@ mod test {
|
||||
// Test invalid domain should return error
|
||||
let invalid_host = Host::Domain("invalid.nonexistent.domain.example");
|
||||
assert!(is_local_host(invalid_host, 0, 0).is_err());
|
||||
|
||||
reset_dns_resolver();
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_host_ip() {
|
||||
set_mock_dns_resolver(mock_resolver);
|
||||
match init_global_dns_resolver().await {
|
||||
Ok(_) => {}
|
||||
Err(e) => {
|
||||
@@ -427,16 +478,9 @@ mod test {
|
||||
|
||||
// Test invalid domain
|
||||
let invalid_host = Host::Domain("invalid.nonexistent.domain.example");
|
||||
match get_host_ip(invalid_host.clone()).await {
|
||||
Ok(ips) => {
|
||||
// Depending on DNS resolver behavior, it might return empty set or error
|
||||
assert!(ips.is_empty(), "Expected empty IP set for invalid domain, got: {ips:?}");
|
||||
}
|
||||
Err(_) => {
|
||||
error!("Expected error for invalid domain");
|
||||
} // Expected error
|
||||
}
|
||||
assert!(get_host_ip(invalid_host).await.is_err());
|
||||
|
||||
reset_dns_resolver();
|
||||
}
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -12,9 +12,13 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod net;
|
||||
|
||||
use hashbrown::HashMap;
|
||||
use hyper::HeaderMap;
|
||||
use s3s::{S3Request, S3Response};
|
||||
use std::collections::HashMap;
|
||||
|
||||
pub use net::*;
|
||||
|
||||
/// Extract request parameters from S3Request, mainly header information.
|
||||
#[allow(dead_code)]
|
||||
|
||||
533
crates/utils/src/notify/net.rs
Normal file
533
crates/utils/src/notify/net.rs
Normal file
@@ -0,0 +1,533 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use regex::Regex;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::net::IpAddr;
|
||||
use std::path::Path;
|
||||
use std::sync::LazyLock;
|
||||
use thiserror::Error;
|
||||
use url::Url;
|
||||
|
||||
// Lazy static for the host label regex.
|
||||
static HOST_LABEL_REGEX: LazyLock<Regex> = LazyLock::new(|| Regex::new(r"^[a-zA-Z0-9]([a-zA-Z0-9-]*[a-zA-Z0-9])?$").unwrap());
|
||||
|
||||
/// NetError represents errors that can occur in network operations.
|
||||
#[derive(Error, Debug)]
|
||||
pub enum NetError {
|
||||
#[error("invalid argument")]
|
||||
InvalidArgument,
|
||||
#[error("invalid hostname")]
|
||||
InvalidHost,
|
||||
#[error("missing '[' in host")]
|
||||
MissingBracket,
|
||||
#[error("parse error: {0}")]
|
||||
ParseError(String),
|
||||
#[error("unexpected scheme: {0}")]
|
||||
UnexpectedScheme(String),
|
||||
#[error("scheme appears with empty host")]
|
||||
SchemeWithEmptyHost,
|
||||
}
|
||||
|
||||
// Host represents a network host with IP/name and port.
|
||||
#[derive(Debug, Clone, PartialEq, Serialize, Deserialize)]
|
||||
pub struct Host {
|
||||
pub name: String,
|
||||
pub port: Option<u16>, // Using Option<u16> to represent if port is set, similar to IsPortSet.
|
||||
}
|
||||
|
||||
// Implementation of Host methods.
|
||||
impl Host {
|
||||
// is_empty returns true if the host name is empty.
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.name.is_empty()
|
||||
}
|
||||
|
||||
// equal checks if two hosts are equal by comparing their string representations.
|
||||
pub fn equal(&self, other: &Host) -> bool {
|
||||
self.to_string() == other.to_string()
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for Host {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self.port {
|
||||
Some(p) => write!(f, "{}:{}", self.name, p),
|
||||
None => write!(f, "{}", self.name),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// parse_host parses a string into a Host, with validation similar to Go's ParseHost.
|
||||
pub fn parse_host(s: &str) -> Result<Host, NetError> {
|
||||
if s.is_empty() {
|
||||
return Err(NetError::InvalidArgument);
|
||||
}
|
||||
|
||||
// is_valid_host validates the host string, checking for IP or hostname validity.
|
||||
let is_valid_host = |host: &str| -> bool {
|
||||
if host.is_empty() {
|
||||
return true;
|
||||
}
|
||||
if host.parse::<IpAddr>().is_ok() {
|
||||
return true;
|
||||
}
|
||||
if !(1..=253).contains(&host.len()) {
|
||||
return false;
|
||||
}
|
||||
for (i, label) in host.split('.').enumerate() {
|
||||
if i + 1 == host.split('.').count() && label.is_empty() {
|
||||
continue;
|
||||
}
|
||||
if !(1..=63).contains(&label.len()) || !HOST_LABEL_REGEX.is_match(label) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
true
|
||||
};
|
||||
|
||||
// Split host and port, similar to net.SplitHostPort.
|
||||
let (host_str, port_str) = s.rsplit_once(':').map_or((s, ""), |(h, p)| (h, p));
|
||||
let port = if !port_str.is_empty() {
|
||||
Some(port_str.parse().map_err(|_| NetError::ParseError(port_str.to_string()))?)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
// Trim IPv6 brackets if present.
|
||||
let host = trim_ipv6(host_str)?;
|
||||
|
||||
// Handle IPv6 zone identifier.
|
||||
let trimmed_host = host.split('%').next().unwrap_or(&host);
|
||||
|
||||
if !is_valid_host(trimmed_host) {
|
||||
return Err(NetError::InvalidHost);
|
||||
}
|
||||
|
||||
Ok(Host { name: host, port })
|
||||
}
|
||||
|
||||
// trim_ipv6 removes square brackets from IPv6 addresses, similar to Go's trimIPv6.
|
||||
fn trim_ipv6(host: &str) -> Result<String, NetError> {
|
||||
if host.ends_with(']') {
|
||||
if !host.starts_with('[') {
|
||||
return Err(NetError::MissingBracket);
|
||||
}
|
||||
Ok(host[1..host.len() - 1].to_string())
|
||||
} else {
|
||||
Ok(host.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
// URL is a wrapper around url::Url for custom handling.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ParsedURL(pub Url);
|
||||
|
||||
impl ParsedURL {
|
||||
/// is_empty returns true if the URL is empty or "about:blank".
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.0.as_str() == "" || (self.0.scheme() == "about" && self.0.path() == "blank")
|
||||
}
|
||||
|
||||
/// hostname returns the hostname of the URL.
|
||||
pub fn hostname(&self) -> String {
|
||||
self.0.host_str().unwrap_or("").to_string()
|
||||
}
|
||||
|
||||
/// port returns the port of the URL as a string, defaulting to "80" for http and "443" for https if not set.
|
||||
pub fn port(&self) -> String {
|
||||
match self.0.port() {
|
||||
Some(p) => p.to_string(),
|
||||
None => match self.0.scheme() {
|
||||
"http" => "80".to_string(),
|
||||
"https" => "443".to_string(),
|
||||
_ => "".to_string(),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// scheme returns the scheme of the URL.
|
||||
pub fn scheme(&self) -> &str {
|
||||
self.0.scheme()
|
||||
}
|
||||
|
||||
/// url returns a reference to the underlying Url.
|
||||
pub fn url(&self) -> &Url {
|
||||
&self.0
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for ParsedURL {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
let mut url = self.0.clone();
|
||||
if let Some(host) = url.host_str().map(|h| h.to_string()) {
|
||||
if let Some(port) = url.port() {
|
||||
if (url.scheme() == "http" && port == 80) || (url.scheme() == "https" && port == 443) {
|
||||
url.set_host(Some(&host)).unwrap();
|
||||
url.set_port(None).unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
let mut s = url.to_string();
|
||||
|
||||
// If the URL ends with a slash and the path is just "/", remove the trailing slash.
|
||||
if s.ends_with('/') && url.path() == "/" {
|
||||
s.pop();
|
||||
}
|
||||
|
||||
write!(f, "{}", s)
|
||||
}
|
||||
}
|
||||
|
||||
impl serde::Serialize for ParsedURL {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: serde::Serializer,
|
||||
{
|
||||
serializer.serialize_str(&self.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> serde::Deserialize<'de> for ParsedURL {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: serde::Deserializer<'de>,
|
||||
{
|
||||
let s: String = serde::Deserialize::deserialize(deserializer)?;
|
||||
if s.is_empty() {
|
||||
Ok(ParsedURL(Url::parse("about:blank").unwrap()))
|
||||
} else {
|
||||
parse_url(&s).map_err(serde::de::Error::custom)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// parse_url parses a string into a ParsedURL, with host validation and path cleaning.
|
||||
pub fn parse_url(s: &str) -> Result<ParsedURL, NetError> {
|
||||
if let Some(scheme_end) = s.find("://") {
|
||||
if s[scheme_end + 3..].starts_with('/') {
|
||||
let scheme = &s[..scheme_end];
|
||||
if !scheme.is_empty() {
|
||||
return Err(NetError::SchemeWithEmptyHost);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut uu = Url::parse(s).map_err(|e| NetError::ParseError(e.to_string()))?;
|
||||
if uu.host_str().is_none_or(|h| h.is_empty()) {
|
||||
if uu.scheme() != "" {
|
||||
return Err(NetError::SchemeWithEmptyHost);
|
||||
}
|
||||
} else {
|
||||
let port_str = uu.port().map(|p| p.to_string()).unwrap_or_else(|| match uu.scheme() {
|
||||
"http" => "80".to_string(),
|
||||
"https" => "443".to_string(),
|
||||
_ => "".to_string(),
|
||||
});
|
||||
|
||||
if !port_str.is_empty() {
|
||||
let host_port = format!("{}:{}", uu.host_str().unwrap(), port_str);
|
||||
parse_host(&host_port)?; // Validate host.
|
||||
}
|
||||
}
|
||||
|
||||
// Clean path: Use Url's path_segments to normalize.
|
||||
if !uu.path().is_empty() {
|
||||
// Url automatically cleans paths, but we ensure trailing slash if original had it.
|
||||
let mut cleaned_path = String::new();
|
||||
for comp in Path::new(uu.path()).components() {
|
||||
use std::path::Component;
|
||||
match comp {
|
||||
Component::RootDir => cleaned_path.push('/'),
|
||||
Component::Normal(s) => {
|
||||
if !cleaned_path.ends_with('/') {
|
||||
cleaned_path.push('/');
|
||||
}
|
||||
cleaned_path.push_str(&s.to_string_lossy());
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
if s.ends_with('/') && !cleaned_path.ends_with('/') {
|
||||
cleaned_path.push('/');
|
||||
}
|
||||
if cleaned_path.is_empty() {
|
||||
cleaned_path.push('/');
|
||||
}
|
||||
uu.set_path(&cleaned_path);
|
||||
}
|
||||
|
||||
Ok(ParsedURL(uu))
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
/// parse_http_url parses a string into a ParsedURL, ensuring the scheme is http or https.
|
||||
pub fn parse_http_url(s: &str) -> Result<ParsedURL, NetError> {
|
||||
let u = parse_url(s)?;
|
||||
match u.0.scheme() {
|
||||
"http" | "https" => Ok(u),
|
||||
_ => Err(NetError::UnexpectedScheme(u.0.scheme().to_string())),
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
/// is_network_or_host_down checks if an error indicates network or host down, considering timeouts.
|
||||
pub fn is_network_or_host_down(err: &std::io::Error, expect_timeouts: bool) -> bool {
|
||||
if err.kind() == std::io::ErrorKind::TimedOut {
|
||||
return !expect_timeouts;
|
||||
}
|
||||
// Simplified checks based on Go logic; adapt for Rust as needed
|
||||
let err_str = err.to_string().to_lowercase();
|
||||
err_str.contains("connection reset by peer")
|
||||
|| err_str.contains("connection timed out")
|
||||
|| err_str.contains("broken pipe")
|
||||
|| err_str.contains("use of closed network connection")
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
/// is_conn_reset_err checks if an error indicates a connection reset by peer.
|
||||
pub fn is_conn_reset_err(err: &std::io::Error) -> bool {
|
||||
err.to_string().contains("connection reset by peer") || matches!(err.raw_os_error(), Some(libc::ECONNRESET))
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
/// is_conn_refused_err checks if an error indicates a connection refused.
|
||||
pub fn is_conn_refused_err(err: &std::io::Error) -> bool {
|
||||
err.to_string().contains("connection refused") || matches!(err.raw_os_error(), Some(libc::ECONNREFUSED))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn parse_host_with_empty_string_returns_error() {
|
||||
let result = parse_host("");
|
||||
assert!(matches!(result, Err(NetError::InvalidArgument)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_host_with_valid_ipv4() {
|
||||
let result = parse_host("192.168.1.1:8080");
|
||||
assert!(result.is_ok());
|
||||
let host = result.unwrap();
|
||||
assert_eq!(host.name, "192.168.1.1");
|
||||
assert_eq!(host.port, Some(8080));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_host_with_valid_hostname() {
|
||||
let result = parse_host("example.com:443");
|
||||
assert!(result.is_ok());
|
||||
let host = result.unwrap();
|
||||
assert_eq!(host.name, "example.com");
|
||||
assert_eq!(host.port, Some(443));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_host_with_ipv6_brackets() {
|
||||
let result = parse_host("[::1]:8080");
|
||||
assert!(result.is_ok());
|
||||
let host = result.unwrap();
|
||||
assert_eq!(host.name, "::1");
|
||||
assert_eq!(host.port, Some(8080));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_host_with_invalid_ipv6_missing_bracket() {
|
||||
let result = parse_host("::1]:8080");
|
||||
assert!(matches!(result, Err(NetError::MissingBracket)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_host_with_invalid_hostname() {
|
||||
let result = parse_host("invalid..host:80");
|
||||
assert!(matches!(result, Err(NetError::InvalidHost)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_host_without_port() {
|
||||
let result = parse_host("example.com");
|
||||
assert!(result.is_ok());
|
||||
let host = result.unwrap();
|
||||
assert_eq!(host.name, "example.com");
|
||||
assert_eq!(host.port, None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn host_is_empty_when_name_is_empty() {
|
||||
let host = Host {
|
||||
name: "".to_string(),
|
||||
port: None,
|
||||
};
|
||||
assert!(host.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn host_is_not_empty_when_name_present() {
|
||||
let host = Host {
|
||||
name: "example.com".to_string(),
|
||||
port: Some(80),
|
||||
};
|
||||
assert!(!host.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn host_to_string_with_port() {
|
||||
let host = Host {
|
||||
name: "example.com".to_string(),
|
||||
port: Some(80),
|
||||
};
|
||||
assert_eq!(host.to_string(), "example.com:80");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn host_to_string_without_port() {
|
||||
let host = Host {
|
||||
name: "example.com".to_string(),
|
||||
port: None,
|
||||
};
|
||||
assert_eq!(host.to_string(), "example.com");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn host_equal_when_same() {
|
||||
let host1 = Host {
|
||||
name: "example.com".to_string(),
|
||||
port: Some(80),
|
||||
};
|
||||
let host2 = Host {
|
||||
name: "example.com".to_string(),
|
||||
port: Some(80),
|
||||
};
|
||||
assert!(host1.equal(&host2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn host_not_equal_when_different() {
|
||||
let host1 = Host {
|
||||
name: "example.com".to_string(),
|
||||
port: Some(80),
|
||||
};
|
||||
let host2 = Host {
|
||||
name: "example.com".to_string(),
|
||||
port: Some(443),
|
||||
};
|
||||
assert!(!host1.equal(&host2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_url_with_valid_http_url() {
|
||||
let result = parse_url("http://example.com/path");
|
||||
assert!(result.is_ok());
|
||||
let parsed = result.unwrap();
|
||||
assert_eq!(parsed.hostname(), "example.com");
|
||||
assert_eq!(parsed.port(), "80");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_url_with_valid_https_url() {
|
||||
let result = parse_url("https://example.com:443/path");
|
||||
assert!(result.is_ok());
|
||||
let parsed = result.unwrap();
|
||||
assert_eq!(parsed.hostname(), "example.com");
|
||||
assert_eq!(parsed.port(), "443");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_url_with_scheme_but_empty_host() {
|
||||
let result = parse_url("http:///path");
|
||||
assert!(matches!(result, Err(NetError::SchemeWithEmptyHost)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_url_with_invalid_host() {
|
||||
let result = parse_url("http://invalid..host/path");
|
||||
assert!(matches!(result, Err(NetError::InvalidHost)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_url_with_path_cleaning() {
|
||||
let result = parse_url("http://example.com//path/../path/");
|
||||
assert!(result.is_ok());
|
||||
let parsed = result.unwrap();
|
||||
assert_eq!(parsed.0.path(), "/path/");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_http_url_with_http_scheme() {
|
||||
let result = parse_http_url("http://example.com");
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_http_url_with_https_scheme() {
|
||||
let result = parse_http_url("https://example.com");
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_http_url_with_invalid_scheme() {
|
||||
let result = parse_http_url("ftp://example.com");
|
||||
assert!(matches!(result, Err(NetError::UnexpectedScheme(_))));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parsed_url_is_empty_when_url_is_empty() {
|
||||
let url = ParsedURL(Url::parse("about:blank").unwrap());
|
||||
assert!(url.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parsed_url_hostname() {
|
||||
let url = ParsedURL(Url::parse("http://example.com:8080").unwrap());
|
||||
assert_eq!(url.hostname(), "example.com");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parsed_url_port() {
|
||||
let url = ParsedURL(Url::parse("http://example.com:8080").unwrap());
|
||||
assert_eq!(url.port(), "8080");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parsed_url_to_string_removes_default_ports() {
|
||||
let url = ParsedURL(Url::parse("http://example.com:80").unwrap());
|
||||
assert_eq!(url.to_string(), "http://example.com");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn is_network_or_host_down_with_timeout() {
|
||||
let err = std::io::Error::new(std::io::ErrorKind::TimedOut, "timeout");
|
||||
assert!(is_network_or_host_down(&err, false));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn is_network_or_host_down_with_expected_timeout() {
|
||||
let err = std::io::Error::new(std::io::ErrorKind::TimedOut, "timeout");
|
||||
assert!(!is_network_or_host_down(&err, true));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn is_conn_reset_err_with_reset_message() {
|
||||
let err = std::io::Error::other("connection reset by peer");
|
||||
assert!(is_conn_reset_err(&err));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn is_conn_refused_err_with_refused_message() {
|
||||
let err = std::io::Error::other("connection refused");
|
||||
assert!(is_conn_refused_err(&err));
|
||||
}
|
||||
}
|
||||
0
deploy/data/dev/.gitkeep
Normal file
0
deploy/data/dev/.gitkeep
Normal file
0
deploy/data/pro/.gitkeep
Normal file
0
deploy/data/pro/.gitkeep
Normal file
@@ -30,7 +30,7 @@ services:
|
||||
- "9000:9000" # S3 API port
|
||||
- "9001:9001" # Console port
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=/data/rustfs0,/data/rustfs1,/data/rustfs2,/data/rustfs3
|
||||
- RUSTFS_VOLUMES=/data/rustfs{0..3} # Define 4 storage volumes
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9001
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
@@ -43,12 +43,9 @@ services:
|
||||
- RUSTFS_TLS_PATH=/opt/tls
|
||||
- RUSTFS_OBS_ENDPOINT=http://otel-collector:4317
|
||||
volumes:
|
||||
- rustfs_data_0:/data/rustfs0
|
||||
- rustfs_data_1:/data/rustfs1
|
||||
- rustfs_data_2:/data/rustfs2
|
||||
- rustfs_data_3:/data/rustfs3
|
||||
- logs_data:/app/logs
|
||||
- .docker/tls/:/opt/tls # TLS configuration, you should create tls directory and put your tls files in it and then specify the path here
|
||||
- deploy/data/pro:/data
|
||||
- deploy/logs:/app/logs
|
||||
- deploy/data/certs/:/opt/tls # TLS configuration, you should create tls directory and put your tls files in it and then specify the path here
|
||||
networks:
|
||||
- rustfs-network
|
||||
restart: unless-stopped
|
||||
@@ -78,7 +75,7 @@ services:
|
||||
- "9010:9000" # S3 API port
|
||||
- "9011:9001" # Console port
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=/data/rustfs0,/data/rustfs1
|
||||
- RUSTFS_VOLUMES=/data/rustfs{1..4}
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9001
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
@@ -90,7 +87,7 @@ services:
|
||||
- RUSTFS_LOG_LEVEL=debug
|
||||
volumes:
|
||||
- .:/app # Mount source code to /app for development
|
||||
- rustfs_dev_data:/data
|
||||
- deploy/data/dev:/data
|
||||
networks:
|
||||
- rustfs-network
|
||||
restart: unless-stopped
|
||||
@@ -98,7 +95,7 @@ services:
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"sh", "-c",
|
||||
"sh", "-c",
|
||||
"curl -f http://localhost:9000/health && curl -f http://localhost:9001/health"
|
||||
]
|
||||
interval: 30s
|
||||
@@ -239,5 +236,5 @@ volumes:
|
||||
driver: local
|
||||
redis_data:
|
||||
driver: local
|
||||
logs_data:
|
||||
logs:
|
||||
driver: local
|
||||
|
||||
106
docs/ansible/REAEME.md
Normal file
106
docs/ansible/REAEME.md
Normal file
@@ -0,0 +1,106 @@
|
||||
# Install rustfs with mnmd mode using ansible
|
||||
|
||||
This chapter show how to install rustfs with mnmd(multiple nodes multiple disks) using ansible playbook.Two installation method are available, namely binary and docker compose.
|
||||
|
||||
## Requirements
|
||||
|
||||
- Multiple nodes(At least 4 nodes,each has private IP and public IP)
|
||||
- Multiple disks(At least 1 disk per nodes, 4 disks is a better choice)
|
||||
- Ansible should be available
|
||||
- Docker should be available(only for docker compose installation)
|
||||
|
||||
## Binary installation and uninstallation
|
||||
|
||||
### Installation
|
||||
|
||||
For binary installation([script installation](https://rustfs.com/en/download/),you should modify the below part of the playbook,
|
||||
|
||||
```
|
||||
- name: Modify Target Server's hosts file
|
||||
blockinfile:
|
||||
path: /etc/hosts
|
||||
block: |
|
||||
172.92.20.199 rustfs-node1
|
||||
172.92.20.200 rustfs-node2
|
||||
172.92.20.201 rustfs-node3
|
||||
172.92.20.202 rustfs-node4
|
||||
```
|
||||
|
||||
Replacing the IP with your nodes' **private IP**.If you have more than 4 nodes, adding the ip in order.
|
||||
|
||||
Running the command to install rustfs
|
||||
|
||||
```
|
||||
ansible-playbook --skip-tags rustfs_uninstall binary-mnmd.yml
|
||||
```
|
||||
|
||||
After installation success, you can access the rustfs cluster via any node's public ip and 9000 port. Both default username and password are `rustfsadmin`.
|
||||
|
||||
|
||||
### Uninstallation
|
||||
|
||||
Running the command to uninstall rustfs
|
||||
|
||||
```
|
||||
ansible-playbook --tags rustfs_uninstall binary-mnmd.yml
|
||||
```
|
||||
|
||||
## Docker compose installation and uninstallation
|
||||
|
||||
**NOTE**: For docker compose installation,playbook contains docker installation task,
|
||||
|
||||
```
|
||||
tasks:
|
||||
- name: Install docker
|
||||
shell: |
|
||||
apt-get remove -y docker docker.io containerd runc || true
|
||||
apt-get update -y
|
||||
apt-get install -y ca-certificates curl gnupg lsb-release
|
||||
install -m 0755 -d /etc/apt/keyrings
|
||||
curl -fsSL https://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | gpg --dearmor --yes -o /etc/apt/keyrings/docker.gpg
|
||||
chmod a+r /etc/apt/keyrings/docker.gpg
|
||||
echo \
|
||||
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://mirrors.aliyun.com/docker-ce/linux/ubuntu \
|
||||
$(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||
apt-get update -y
|
||||
apt-get install -y docker-ce docker-ce-cli containerd.io docker-compose-plugin
|
||||
become: yes
|
||||
register: docker_installation_result
|
||||
changed_when: false
|
||||
|
||||
- name: Installation check
|
||||
debug:
|
||||
var: docker_installation_result.stdout
|
||||
```
|
||||
|
||||
If your node already has docker environment,you can add `tags` in the playbook and skip this task in the follow installation.By the way, the docker installation only for `Ubuntu` OS,if you have the different OS,you should modify this task as well.
|
||||
|
||||
For docker compose installation,you should also modify the below part of the playbook,
|
||||
|
||||
```
|
||||
extra_hosts:
|
||||
- "rustfs-node1:172.20.92.202"
|
||||
- "rustfs-node2:172.20.92.201"
|
||||
- "rustfs-node3:172.20.92.200"
|
||||
- "rustfs-node4:172.20.92.199"
|
||||
```
|
||||
|
||||
Replacing the IP with your nodes' **private IP**.If you have more than 4 nodes, adding the ip in order.
|
||||
|
||||
Running the command to install rustfs,
|
||||
|
||||
```
|
||||
ansible-playbook --skip-tags docker_uninstall docker-compose-mnmd.yml
|
||||
```
|
||||
|
||||
After installation success, you can access the rustfs cluster via any node's public ip and 9000 port. Both default username and password are `rustfsadmin`.
|
||||
|
||||
### Uninstallation
|
||||
|
||||
Running the command to uninstall rustfs
|
||||
|
||||
```
|
||||
ansible-playbook --tags docker_uninstall docker-compose-mnmd.yml
|
||||
```
|
||||
|
||||
|
||||
168
docs/ansible/binary-mnmd.yml
Normal file
168
docs/ansible/binary-mnmd.yml
Normal file
@@ -0,0 +1,168 @@
|
||||
---
|
||||
- name: Prepare for RustFS installation
|
||||
hosts: rustfs
|
||||
become: yes
|
||||
vars:
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
remote_user: root
|
||||
|
||||
tasks:
|
||||
- name: Create Workspace
|
||||
file:
|
||||
path: /opt/rustfs
|
||||
state: directory
|
||||
mode: '0755'
|
||||
register: create_dir_result
|
||||
|
||||
- name: Dir Creation Result Check
|
||||
debug:
|
||||
msg: "RustFS dir created successfully"
|
||||
when: create_dir_result.changed
|
||||
|
||||
- name: Modify Target Server's hosts file
|
||||
blockinfile:
|
||||
path: /etc/hosts
|
||||
block: |
|
||||
127.0.0.1 localhost
|
||||
172.20.92.199 rustfs-node1
|
||||
172.20.92.200 rustfs-node2
|
||||
172.20.92.201 rustfs-node3
|
||||
172.20.92.202 rustfs-node4
|
||||
|
||||
- name: Create rustfs group
|
||||
group:
|
||||
name: rustfs
|
||||
system: yes
|
||||
state: present
|
||||
|
||||
- name: Create rustfs user
|
||||
user:
|
||||
name: rustfs
|
||||
shell: /bin/bash
|
||||
system: yes
|
||||
create_home: no
|
||||
group: rustfs
|
||||
state: present
|
||||
|
||||
- name: Get rustfs user id
|
||||
command: id -u rustfs
|
||||
register: rustfs_uid
|
||||
changed_when: false
|
||||
ignore_errors: yes
|
||||
|
||||
- name: Check rustfs user id
|
||||
debug:
|
||||
msg: "rustfs uid is {{ rustfs_uid.stdout }}"
|
||||
|
||||
- name: Create volume dir
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: directory
|
||||
owner: rustfs
|
||||
group: rustfs
|
||||
mode: '0755'
|
||||
loop:
|
||||
- /data/rustfs0
|
||||
- /data/rustfs1
|
||||
- /data/rustfs2
|
||||
- /data/rustfs3
|
||||
- /var/logs/rustfs
|
||||
|
||||
- name: Install RustFS
|
||||
hosts: rustfs
|
||||
become: yes
|
||||
vars:
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
install_script_url: "https://rustfs.com/install_rustfs.sh"
|
||||
install_script_tmp: "/opt/rustfs/install_rustfs.sh"
|
||||
tags: rustfs_install
|
||||
|
||||
tasks:
|
||||
- name: Prepare configuration file
|
||||
copy:
|
||||
dest: /etc/default/rustfs
|
||||
content: |
|
||||
RUSTFS_ACCESS_KEY=rustfsadmin
|
||||
RUSTFS_SECRET_KEY=rustfsadmin
|
||||
RUSTFS_VOLUMES="http://rustfs-node{1...4}:9000/data/rustfs{0...3}"
|
||||
RUSTFS_ADDRESS=":9000"
|
||||
RUSTFS_CONSOLE_ENABLE=true
|
||||
RUST_LOG=error
|
||||
RUSTFS_OBS_LOG_DIRECTORY="/var/logs/rustfs/"
|
||||
RUSTFS_EXTERNAL_ADDRESS=0.0.0.0:9000
|
||||
owner: root
|
||||
group: root
|
||||
mode: '0644'
|
||||
|
||||
- name: Install unzip
|
||||
apt:
|
||||
name: unzip
|
||||
state: present
|
||||
update_cache: yes
|
||||
|
||||
- name: Download the rustfs install script
|
||||
get_url:
|
||||
url: "{{ install_script_url }}"
|
||||
dest: "{{ install_script_tmp }}"
|
||||
mode: '0755'
|
||||
|
||||
- name: Run rustfs installation script
|
||||
expect:
|
||||
command: bash "{{install_script_tmp}}"
|
||||
responses:
|
||||
'.*Enter your choice.*': "1\n"
|
||||
'.*Please enter RustFS service port.*': "9000\n"
|
||||
'.*Please enter RustFS console port.*': "9001\n"
|
||||
'.*Please enter data storage directory.*': "http://rustfs-node{1...4}:9000/data/rustfs{0...3}\n"
|
||||
timeout: 300
|
||||
register: rustfs_install_result
|
||||
tags:
|
||||
- rustfs_install
|
||||
|
||||
- name: Debug installation output
|
||||
debug:
|
||||
var: rustfs_install_result.stdout_lines
|
||||
|
||||
- name: Installation confirmation
|
||||
command: rustfs --version
|
||||
register: rustfs_version
|
||||
changed_when: false
|
||||
failed_when: rustfs_version.rc != 0
|
||||
|
||||
- name: Show rustfs version
|
||||
debug:
|
||||
msg: "RustFS version is {{ rustfs_version.stdout }}"
|
||||
|
||||
- name: Uninstall RustFS
|
||||
hosts: rustfs
|
||||
become: yes
|
||||
vars:
|
||||
install_script_tmp: /opt/rustfs/install_rustfs.sh
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
tags: rustfs_uninstall
|
||||
|
||||
tasks:
|
||||
- name: Run rustfs uninstall script
|
||||
expect:
|
||||
command: bash "{{ install_script_tmp }}"
|
||||
responses:
|
||||
'Enter your choice.*': "2\n"
|
||||
'Are you sure you want to uninstall RustFS.*': "y\n"
|
||||
timeout: 300
|
||||
register: rustfs_uninstall_result
|
||||
tags: rustfs_uninstall
|
||||
|
||||
- name: Debug uninstall output
|
||||
debug:
|
||||
var: rustfs_uninstall_result.stdout_lines
|
||||
|
||||
- name: Delete data dir
|
||||
file:
|
||||
path: "{{ item }}"
|
||||
state: absent
|
||||
loop:
|
||||
- /data/rustfs0
|
||||
- /data/rustfs1
|
||||
- /data/rustfs2
|
||||
- /data/rustfs3
|
||||
- /var/logs/rustfs
|
||||
115
docs/ansible/docker-compose-mnmd.yml
Normal file
115
docs/ansible/docker-compose-mnmd.yml
Normal file
@@ -0,0 +1,115 @@
|
||||
---
|
||||
- name: Prepare for RustFS installation
|
||||
hosts: rustfs
|
||||
become: yes
|
||||
vars:
|
||||
ansible_python_interpreter: /usr/bin/python3
|
||||
install_script_url: "https://rustfs.com/install_rustfs.sh"
|
||||
docker_compose: "/opt/rustfs/docker-compose"
|
||||
remote_user: root
|
||||
|
||||
tasks:
|
||||
- name: Install docker
|
||||
tags: docker_install
|
||||
shell: |
|
||||
apt-get remove -y docker docker.io containerd runc || true
|
||||
apt-get update -y
|
||||
apt-get install -y ca-certificates curl gnupg lsb-release
|
||||
install -m 0755 -d /etc/apt/keyrings
|
||||
curl -fsSL https://mirrors.aliyun.com/docker-ce/linux/ubuntu/gpg | gpg --dearmor --yes -o /etc/apt/keyrings/docker.gpg
|
||||
chmod a+r /etc/apt/keyrings/docker.gpg
|
||||
echo \
|
||||
"deb [arch=$(dpkg --print-architecture) signed-by=/etc/apt/keyrings/docker.gpg] https://mirrors.aliyun.com/docker-ce/linux/ubuntu \
|
||||
$(lsb_release -cs) stable" | tee /etc/apt/sources.list.d/docker.list > /dev/null
|
||||
apt-get update -y
|
||||
apt-get install -y docker-ce docker-ce-cli containerd.io docker-compose-plugin
|
||||
become: yes
|
||||
register: docker_installation_result
|
||||
changed_when: false
|
||||
when: ansible_facts['distribution'] == "Ubuntu"
|
||||
|
||||
- name: Installation check
|
||||
debug:
|
||||
var: docker_installation_result.stdout
|
||||
when: ansible_facts['distribution'] == "Ubuntu"
|
||||
|
||||
- name: Create docker compose dir
|
||||
file:
|
||||
path: "{{ docker_compose }}"
|
||||
state: directory
|
||||
mode: '0755'
|
||||
|
||||
- name: Prepare docker compose file
|
||||
copy:
|
||||
content: |
|
||||
services:
|
||||
rustfs:
|
||||
image: rustfs/rustfs:latest
|
||||
container_name: rustfs
|
||||
hostname: rustfs
|
||||
network_mode: host
|
||||
environment:
|
||||
# Use service names and correct disk indexing (1..4 to match mounted paths)
|
||||
- RUSTFS_VOLUMES=http://rustfs-node{1...4}:9000/data/rustfs{1...4}
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9001
|
||||
- RUSTFS_EXTERNAL_ADDRESS=0.0.0.0:9000 # Same as internal since no port mapping
|
||||
- RUSTFS_ACCESS_KEY=rustfsadmin
|
||||
- RUSTFS_SECRET_KEY=rustfsadmin
|
||||
- RUSTFS_CMD=rustfs
|
||||
command: ["sh", "-c", "sleep 3 && rustfs"]
|
||||
healthcheck:
|
||||
test:
|
||||
[
|
||||
"CMD-SHELL",
|
||||
"curl -f http://localhost:9000/health && curl -f http://localhost:9001/health || exit 1"
|
||||
]
|
||||
interval: 10s
|
||||
timeout: 5s
|
||||
retries: 3
|
||||
start_period: 30s
|
||||
ports:
|
||||
- "9000:9000" # API endpoint
|
||||
- "9001:9001" # Console
|
||||
volumes:
|
||||
- rustfs-data1:/data/rustfs1
|
||||
- rustfs-data2:/data/rustfs2
|
||||
- rustfs-data3:/data/rustfs3
|
||||
- rustfs-data4:/data/rustfs4
|
||||
extra_hosts:
|
||||
- "rustfs-node1:172.20.92.202"
|
||||
- "rustfs-node2:172.20.92.201"
|
||||
- "rustfs-node3:172.20.92.200"
|
||||
- "rustfs-node4:172.20.92.199"
|
||||
|
||||
volumes:
|
||||
rustfs-data1:
|
||||
rustfs-data2:
|
||||
rustfs-data3:
|
||||
rustfs-data4:
|
||||
|
||||
dest: "{{ docker_compose }}/docker-compose.yml"
|
||||
mode: '0644'
|
||||
|
||||
- name: Install rustfs using docker compose
|
||||
tags: rustfs_install
|
||||
command: docker compose -f "{{ docker_compose}}/docker-compose.yml" up -d
|
||||
args:
|
||||
chdir: "{{ docker_compose }}"
|
||||
|
||||
- name: Get docker compose output
|
||||
command: docker compose ps
|
||||
args:
|
||||
chdir: "{{ docker_compose }}"
|
||||
register: docker_compose_output
|
||||
|
||||
- name: Check the docker compose installation output
|
||||
debug:
|
||||
msg: "{{ docker_compose_output.stdout }}"
|
||||
|
||||
- name: Uninstall rustfs using docker compose
|
||||
tags: rustfs_uninstall
|
||||
command: docker compose -f "{{ docker_compose}}/docker-compose.yml" down
|
||||
args:
|
||||
chdir: "{{ docker_compose }}"
|
||||
@@ -1,147 +0,0 @@
|
||||
# Append Write Design
|
||||
|
||||
This document captures the current design of the append-write feature in RustFS so that new contributors can quickly understand the moving parts, data flows, and testing expectations.
|
||||
|
||||
## Goals & Non-Goals
|
||||
|
||||
### Goals
|
||||
- Allow clients to append payloads to existing objects without re-uploading the full body.
|
||||
- Support inline objects and spill seamlessly into segmented layout once thresholds are exceeded.
|
||||
- Preserve strong read-after-write semantics via optimistic concurrency controls (ETag / epoch).
|
||||
- Expose minimal S3-compatible surface area (`x-amz-object-append`, `x-amz-append-position`, `x-amz-append-action`).
|
||||
|
||||
### Non-Goals
|
||||
- Full multipart-upload parity; append is intentionally simpler and serialized per object.
|
||||
- Cross-object transactions; each object is isolated.
|
||||
- Rebalancing or background compaction (future work).
|
||||
|
||||
## State Machine
|
||||
|
||||
Append state is persisted inside `FileInfo.metadata` under `x-rustfs-internal-append-state` and serialized as `AppendState` (`crates/filemeta/src/append.rs`).
|
||||
|
||||
```
|
||||
Disabled --(initial PUT w/o append)--> SegmentedSealed
|
||||
Inline --(inline append)--> Inline / InlinePendingSpill
|
||||
InlinePendingSpill --(spill success)--> SegmentedActive
|
||||
SegmentedActive --(Complete)--> SegmentedSealed
|
||||
SegmentedActive --(Abort)--> SegmentedSealed
|
||||
SegmentedSealed --(new append)--> SegmentedActive
|
||||
```
|
||||
|
||||
Definitions:
|
||||
- **Inline**: Object data fully stored in metadata (`FileInfo.data`).
|
||||
- **InlinePendingSpill**: Inline data after append exceeded inline threshold; awaiting spill to disk.
|
||||
- **SegmentedActive**: Object data lives in erasure-coded part(s) plus one or more pending append segments on disk (`append/<epoch>/<uuid>`).
|
||||
- **SegmentedSealed**: No pending segments; logical content equals committed parts.
|
||||
|
||||
`AppendState` fields:
|
||||
- `state`: current state enum (see above).
|
||||
- `epoch`: monotonically increasing counter for concurrency control.
|
||||
- `committed_length`: logical size already durable in the base parts/inline region.
|
||||
- `pending_segments`: ordered list of `AppendSegment { offset, length, data_dir, etag, epoch }`.
|
||||
|
||||
## Metadata & Storage Layout
|
||||
|
||||
### Inline Objects
|
||||
- Inline payload stored in `FileInfo.data`.
|
||||
- Hash metadata maintained through `append_inline_data` (re-encoding with bitrot writer when checksums exist).
|
||||
- When spilling is required, inline data is decoded, appended, and re-encoded into erasure shards written to per-disk `append/<epoch>/<segment_id>/part.1` temporary path before rename to primary data directory.
|
||||
|
||||
### Segmented Objects
|
||||
- Base object content is represented by standard erasure-coded parts (`FileInfo.parts`, `FileInfo.data_dir`).
|
||||
- Pending append segments live under `<object>/append/<epoch>/<segment_uuid>/part.1` (per disk).
|
||||
- Each append stores segment metadata (`etag`, `offset`, `length`) inside `AppendState.pending_segments` and updates `FileInfo.size` to include pending bytes.
|
||||
- Aggregate ETag is recomputed using multipart MD5 helper (`get_complete_multipart_md5`).
|
||||
|
||||
### Metadata Writes
|
||||
- `SetDisks::write_unique_file_info` persists `FileInfo` updates to the quorum of disks.
|
||||
- During spill/append/complete/abort, all mirrored `FileInfo` copies within `parts_metadata` are updated to keep nodes consistent.
|
||||
- Abort ensures inline markers are cleared (`x-rustfs-internal-inline-data`) and `FileInfo.data = None` to avoid stale inline reads.
|
||||
|
||||
## Request Flows
|
||||
|
||||
### Append (Inline Path)
|
||||
1. Handler (`rustfs/src/storage/ecfs.rs`) validates headers and fills `ObjectOptions.append_*`.
|
||||
2. `SetDisks::append_inline_object` verifies append position using `AppendState` snapshot.
|
||||
3. Existing inline payload decoded (if checksums present) and appended in-memory (`append_inline_data`).
|
||||
4. Storage class decision determines whether to remain inline or spill.
|
||||
5. Inline success updates `FileInfo.data`, metadata, `AppendState` (state `Inline`, lengths updated).
|
||||
6. Spill path delegates to `spill_inline_into_segmented` (see segmented path below).
|
||||
|
||||
### Append (Segmented Path)
|
||||
1. `SetDisks::append_segmented_object` validates state (must be `SegmentedActive` or `SegmentedSealed`).
|
||||
2. Snapshot expected offset = committed length + sum of pending segments.
|
||||
3. Payload encoded using erasure coding; shards written to temp volume; renamed into `append/<epoch>/<segment_uuid>` under object data directory.
|
||||
4. New `AppendSegment` pushed, `AppendState.epoch` incremented, aggregated ETag recalculated.
|
||||
5. `FileInfo.size` reflects committed + pending bytes; metadata persisted across quorum.
|
||||
|
||||
### GET / Range Reads
|
||||
1. `SetDisks::get_object_with_fileinfo` inspects `AppendState`.
|
||||
2. Reads committed data from inline or erasure parts (ignoring inline buffers once segmented).
|
||||
3. If requested range includes pending segments, loader fetches each segment via `load_pending_segment`, decodes shards, and streams appended bytes.
|
||||
|
||||
### Complete Append (`x-amz-append-action: complete`)
|
||||
1. `complete_append_object` fetches current `FileInfo`, ensures pending segments exist.
|
||||
2. Entire logical object (committed + pending) streamed through `VecAsyncWriter` (TODO: potential optimization) to produce contiguous payload.
|
||||
3. Inline spill routine (`spill_inline_into_segmented`) consolidates data into primary part, sets state `SegmentedSealed`, clears pending list, updates `committed_length`.
|
||||
4. Pending segment directories removed and quorum metadata persisted.
|
||||
|
||||
### Abort Append (`x-amz-append-action: abort`)
|
||||
1. `abort_append_object` removes pending segment directories.
|
||||
2. Ensures `committed_length` matches actual durable data (inline length or sum of parts); logs and corrects if mismatch is found.
|
||||
3. Clears pending list, sets state `SegmentedSealed`, bumps epoch, removes inline markers/data.
|
||||
4. Persists metadata and returns base ETag (multipart MD5 of committed parts).
|
||||
|
||||
## Error Handling & Recovery
|
||||
|
||||
- All disk writes go through quorum helpers (`reduce_write_quorum_errs`, `reduce_read_quorum_errs`) and propagate `StorageError` variants for HTTP mapping.
|
||||
- Append operations are single-threaded per object via locking in higher layers (`fast_lock_manager` in `SetDisks::put_object`).
|
||||
- On spill/append rename failure, temp directories are cleaned up; operation aborts without mutating metadata.
|
||||
- Abort path now realigns `committed_length` if metadata drifted (observed during development) and strips inline remnants to prevent stale reads.
|
||||
- Pending segments are only removed once metadata update succeeds; no partial deletion is performed ahead of state persistence.
|
||||
|
||||
## Concurrency
|
||||
|
||||
- Append requests rely on exact `x-amz-append-position` to ensure the client has an up-to-date view.
|
||||
- Optional header `If-Match` is honored in S3 handler before actual append (shared with regular PUT path).
|
||||
- `AppendState.epoch` increments after each append/complete/abort; future work may expose it for stronger optimistic control.
|
||||
- e2e test `append_segments_concurrency_then_complete` verifies that simultaneous appends result in exactly one success; the loser receives 400.
|
||||
|
||||
## Key Modules
|
||||
|
||||
- `crates/ecstore/src/set_disk.rs`: core implementation (inline append, spill, segmented append, complete, abort, GET integration).
|
||||
- `crates/ecstore/src/erasure_coding/{encode,decode}.rs`: encode/decode helpers used by append pipeline.
|
||||
- `crates/filemeta/src/append.rs`: metadata schema + helper functions.
|
||||
- `rustfs/src/storage/ecfs.rs`: HTTP/S3 layer that parses headers and routes to append operations.
|
||||
|
||||
## Testing Strategy
|
||||
|
||||
### Unit Tests
|
||||
- `crates/filemeta/src/append.rs` covers serialization and state transitions.
|
||||
- `crates/ecstore/src/set_disk.rs` contains lower-level utilities and regression tests for metadata helpers.
|
||||
- Additional unit coverage is recommended for spill/append failure paths (e.g., injected rename failures).
|
||||
|
||||
### End-to-End Tests (`cargo test --package e2e_test append`)
|
||||
- Inline append success, wrong position, precondition failures.
|
||||
- Segmented append success, wrong position, wrong ETag.
|
||||
- Spill threshold transition (`append_threshold_crossing_inline_to_segmented`).
|
||||
- Pending segment streaming (`append_range_requests_across_segments`).
|
||||
- Complete append consolidates pending segments.
|
||||
- Abort append discards pending data and allows new append.
|
||||
- Concurrency: two clients racing to append, followed by additional append + complete.
|
||||
|
||||
### Tooling Considerations
|
||||
- `make clippy` must pass; the append code relies on async operations and custom logging.
|
||||
- `make test` / `cargo nextest run` recommended before submitting PRs.
|
||||
- Use `RUST_LOG=rustfs_ecstore=debug` when debugging append flows; targeted `info!`/`warn!` logs are emitted during spill/abort.
|
||||
|
||||
## Future Work
|
||||
|
||||
- Streamed consolidation in `complete_append_object` to avoid buffering entire logical object.
|
||||
- Throttling or automatic `Complete` when pending segments exceed size/quantity thresholds.
|
||||
- Stronger epoch exposure to clients (header-based conflict detection).
|
||||
- Automated cleanup or garbage collection for orphaned `append/*` directories.
|
||||
|
||||
---
|
||||
|
||||
For questions or design discussions, drop a note in the append-write channel or ping the storage team.
|
||||
85
docs/examples/README.md
Normal file
85
docs/examples/README.md
Normal file
@@ -0,0 +1,85 @@
|
||||
# RustFS Deployment Examples
|
||||
|
||||
This directory contains practical deployment examples and configurations for RustFS.
|
||||
|
||||
## Available Examples
|
||||
|
||||
### [MNMD (Multi-Node Multi-Drive)](./mnmd/)
|
||||
|
||||
Complete Docker Compose example for deploying RustFS in a 4-node, 4-drive-per-node configuration.
|
||||
|
||||
**Features:**
|
||||
|
||||
- Proper disk indexing (1..4) to avoid VolumeNotFound errors
|
||||
- Startup coordination via `wait-and-start.sh` script
|
||||
- Service discovery using Docker service names
|
||||
- Health checks with alternatives for different base images
|
||||
- Comprehensive documentation and verification checklist
|
||||
|
||||
**Use Case:** Production-ready multi-node deployment for high availability and performance.
|
||||
|
||||
**Quick Start:**
|
||||
|
||||
```bash
|
||||
cd docs/examples/mnmd
|
||||
docker-compose up -d
|
||||
```
|
||||
|
||||
**See also:**
|
||||
|
||||
- [MNMD README](./mnmd/README.md) - Detailed usage guide
|
||||
- [MNMD CHECKLIST](./mnmd/CHECKLIST.md) - Step-by-step verification
|
||||
|
||||
## Other Deployment Examples
|
||||
|
||||
For additional deployment examples, see:
|
||||
|
||||
- [`docker/`](./docker/) - Root-level examples directory with:
|
||||
- `docker-quickstart.sh` - Quick start script for basic deployments, Quickstart script (basic
|
||||
/dev/prod/status/test/cleanup)
|
||||
- `enhanced-docker-deployment.sh` - Advanced deployment scenarios, Advanced deployment script with multiple
|
||||
scenarios and detailed logs (basic /dev/prod/all/status/test/logs/cleanup)
|
||||
- `enhanced-security-deployment.sh` - Production-ready scripts with TLS, throttling, and secure credential
|
||||
generation
|
||||
- `docker-comprehensive.yml` - Docker Compose with multiple profiles, Docker Compose files containing multiple
|
||||
profiles (basic / dev / production / enterprise / api-only / nginx, etc.)
|
||||
- Usage example:
|
||||
```bash
|
||||
# Rapid development environment
|
||||
./docs/examples/docker/docker-quickstart.sh dev
|
||||
|
||||
# Start dev profile using Docker Compose
|
||||
docker-compose -f docs/examples/docker/docker-comprehensive.yml --profile dev up -d
|
||||
|
||||
# Secure deployment
|
||||
./docs/examples/docker/enhanced-security-deployment.sh
|
||||
```
|
||||
- Note: If the original CI or other documents refer to the old path `examples/`, please update it to
|
||||
`docs/examples/docker/`. Relative links within the document are already in this README.
|
||||
|
||||
- [`.docker/compose/`](/.docker/compose/) - Docker Compose configurations:
|
||||
- `docker-compose.cluster.yaml` - Basic cluster setup
|
||||
- `docker-compose.observability.yaml` - Observability stack integration
|
||||
|
||||
## Related Documentation
|
||||
|
||||
- [Console & Endpoint Service Separation](../console-separation.md)
|
||||
- [Environment Variables](../ENVIRONMENT_VARIABLES.md)
|
||||
- [Performance Testing](../PERFORMANCE_TESTING.md)
|
||||
|
||||
## Contributing
|
||||
|
||||
When adding new examples:
|
||||
|
||||
1. Create a dedicated subdirectory under `docs/examples/`
|
||||
2. Include a comprehensive README.md
|
||||
3. Provide working configuration files
|
||||
4. Add verification steps or checklists
|
||||
5. Document common issues and troubleshooting
|
||||
|
||||
## Support
|
||||
|
||||
For issues or questions:
|
||||
|
||||
- GitHub Issues: https://github.com/rustfs/rustfs/issues
|
||||
- Documentation: https://rustfs.com/docs
|
||||
@@ -1,10 +1,12 @@
|
||||
# RustFS Docker Deployment Examples
|
||||
|
||||
This directory contains various deployment scripts and configuration files for RustFS with console and endpoint service separation.
|
||||
This directory contains various deployment scripts and configuration files for RustFS with console and endpoint service
|
||||
separation.
|
||||
|
||||
## Quick Start Scripts
|
||||
|
||||
### `docker-quickstart.sh`
|
||||
|
||||
The fastest way to get RustFS running with different configurations.
|
||||
|
||||
```bash
|
||||
@@ -28,6 +30,7 @@ The fastest way to get RustFS running with different configurations.
|
||||
```
|
||||
|
||||
### `enhanced-docker-deployment.sh`
|
||||
|
||||
Comprehensive deployment script with multiple scenarios and detailed logging.
|
||||
|
||||
```bash
|
||||
@@ -51,7 +54,9 @@ Comprehensive deployment script with multiple scenarios and detailed logging.
|
||||
```
|
||||
|
||||
### `enhanced-security-deployment.sh`
|
||||
Production-ready deployment with enhanced security features including TLS, rate limiting, and secure credential generation.
|
||||
|
||||
Production-ready deployment with enhanced security features including TLS, rate limiting, and secure credential
|
||||
generation.
|
||||
|
||||
```bash
|
||||
# Deploy with security hardening
|
||||
@@ -68,6 +73,7 @@ Production-ready deployment with enhanced security features including TLS, rate
|
||||
## Docker Compose Examples
|
||||
|
||||
### `docker-comprehensive.yml`
|
||||
|
||||
Complete Docker Compose configuration with multiple deployment profiles.
|
||||
|
||||
```bash
|
||||
@@ -106,6 +112,7 @@ docker-compose -f docker-comprehensive.yml --profile dev up -d
|
||||
```
|
||||
|
||||
**Access Points:**
|
||||
|
||||
- API: http://localhost:9010 (or 9030 for enhanced)
|
||||
- Console: http://localhost:9011/rustfs/console/ (or 9031 for enhanced)
|
||||
- Credentials: dev-admin / dev-secret
|
||||
@@ -121,6 +128,7 @@ docker-compose -f docker-comprehensive.yml --profile dev up -d
|
||||
```
|
||||
|
||||
**Features:**
|
||||
|
||||
- TLS encryption for console
|
||||
- Rate limiting enabled
|
||||
- Restricted CORS policies
|
||||
@@ -194,13 +202,13 @@ curl http://localhost:9001/health
|
||||
|
||||
### Port Mappings
|
||||
|
||||
| Deployment | API Port | Console Port | Description |
|
||||
|-----------|----------|--------------|-------------|
|
||||
| Basic | 9000 | 9001 | Simple deployment |
|
||||
| Dev | 9010 | 9011 | Development environment |
|
||||
| Prod | 9020 | 9021 | Production-like setup |
|
||||
| Enterprise | 9030 | 9443 | Enterprise with TLS |
|
||||
| API-Only | 9040 | - | API endpoint only |
|
||||
| Deployment | API Port | Console Port | Description |
|
||||
|------------|----------|--------------|-------------------------|
|
||||
| Basic | 9000 | 9001 | Simple deployment |
|
||||
| Dev | 9010 | 9011 | Development environment |
|
||||
| Prod | 9020 | 9021 | Production-like setup |
|
||||
| Enterprise | 9030 | 9443 | Enterprise with TLS |
|
||||
| API-Only | 9040 | - | API endpoint only |
|
||||
|
||||
### Network Isolation
|
||||
|
||||
@@ -213,11 +221,13 @@ Production deployments use network isolation:
|
||||
## Security Considerations
|
||||
|
||||
### Development
|
||||
|
||||
- Permissive CORS policies for easy testing
|
||||
- Debug logging enabled
|
||||
- Default credentials for simplicity
|
||||
|
||||
### Production
|
||||
### Production
|
||||
|
||||
- Restrictive CORS policies
|
||||
- TLS encryption for console
|
||||
- Rate limiting enabled
|
||||
@@ -226,6 +236,7 @@ Production deployments use network isolation:
|
||||
- Network isolation
|
||||
|
||||
### Enterprise
|
||||
|
||||
- Complete TLS encryption
|
||||
- Advanced rate limiting
|
||||
- Authentication timeouts
|
||||
@@ -260,11 +271,12 @@ docker exec rustfs-container netstat -tulpn | grep -E ':(9000|9001)'
|
||||
|
||||
## Migration from Previous Versions
|
||||
|
||||
See [docs/console-separation.md](../docs/console-separation.md) for detailed migration instructions from single-port deployments to the separated architecture.
|
||||
See [docs/console-separation.md](../../console-separation.md) for detailed migration instructions from single-port
|
||||
deployments to the separated architecture.
|
||||
|
||||
## Additional Resources
|
||||
|
||||
- [Console Separation Documentation](../docs/console-separation.md)
|
||||
- [Docker Compose Configuration](../docker-compose.yml)
|
||||
- [Main Dockerfile](../Dockerfile)
|
||||
- [Security Best Practices](../docs/console-separation.md#security-hardening)
|
||||
- [Console Separation Documentation](../../console-separation.md)
|
||||
- [Docker Compose Configuration](../../../docker-compose.yml)
|
||||
- [Main Dockerfile](../../../Dockerfile)
|
||||
- [Security Best Practices](../../console-separation.md#security-hardening)
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user