mirror of
https://github.com/rustfs/rustfs.git
synced 2026-01-17 09:40:32 +00:00
Compare commits
58 Commits
fix/window
...
fix/axum-t
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
0b3dfaa587 | ||
|
|
2fd57696de | ||
|
|
ed4329d50c | ||
|
|
18b22eedd9 | ||
|
|
55e4cdec5d | ||
|
|
bcc2831384 | ||
|
|
e3ff5c073c | ||
|
|
bc0f5292f3 | ||
|
|
5d617a0998 | ||
|
|
dceb7aac8a | ||
|
|
e3a7eb2d3d | ||
|
|
b8a578c6cf | ||
|
|
1e683f12ef | ||
|
|
6a63fba5c2 | ||
|
|
5c4ade2a20 | ||
|
|
27293622eb | ||
|
|
df502f2ac6 | ||
|
|
6f83f00bed | ||
|
|
f6ffde0ec0 | ||
|
|
b10cecb724 | ||
|
|
e6a91fab05 | ||
|
|
cb53ee13cd | ||
|
|
60d3374804 | ||
|
|
6928221b56 | ||
|
|
2d58eea702 | ||
|
|
8c00838398 | ||
|
|
109ca7a100 | ||
|
|
15e6d4dbd0 | ||
|
|
68c5c0b834 | ||
|
|
27480f7625 | ||
|
|
f795299d53 | ||
|
|
650fae71fb | ||
|
|
dc76e4472e | ||
|
|
b5140f0098 | ||
|
|
5f2e594480 | ||
|
|
bec51bb783 | ||
|
|
1fad8167af | ||
|
|
f0da8ce216 | ||
|
|
f9d3a908f0 | ||
|
|
29d86036b1 | ||
|
|
78b13f3ff2 | ||
|
|
91c613f2d7 | ||
|
|
01af6f2837 | ||
|
|
760cb1d734 | ||
|
|
6b2eebee1d | ||
|
|
ddaa9e35ea | ||
|
|
66c6d4707e | ||
|
|
703d961168 | ||
|
|
7ac850d4ed | ||
|
|
9f060eae5e | ||
|
|
6d80190a10 | ||
|
|
805a567dfc | ||
|
|
d72fef5ce6 | ||
|
|
b78f0d0bf1 | ||
|
|
720919843d | ||
|
|
bc676dd57c | ||
|
|
36db23b620 | ||
|
|
86f93abb13 |
4
.github/actions/setup/action.yml
vendored
4
.github/actions/setup/action.yml
vendored
@@ -52,6 +52,10 @@ runs:
|
||||
sudo apt-get install -y \
|
||||
musl-tools \
|
||||
build-essential \
|
||||
cmake \
|
||||
libclang-dev \
|
||||
golang \
|
||||
perl \
|
||||
pkg-config \
|
||||
libssl-dev
|
||||
|
||||
|
||||
5
.github/dependabot.yml
vendored
5
.github/dependabot.yml
vendored
@@ -26,6 +26,9 @@ updates:
|
||||
day: "monday"
|
||||
timezone: "Asia/Shanghai"
|
||||
time: "08:00"
|
||||
ignore:
|
||||
- dependency-name: "object_store"
|
||||
versions: [ "0.13.x" ]
|
||||
groups:
|
||||
s3s:
|
||||
update-types:
|
||||
@@ -36,4 +39,4 @@ updates:
|
||||
- "s3s-*"
|
||||
dependencies:
|
||||
patterns:
|
||||
- "*"
|
||||
- "*"
|
||||
1152
Cargo.lock
generated
1152
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
61
Cargo.toml
61
Cargo.toml
@@ -15,8 +15,10 @@
|
||||
[workspace]
|
||||
members = [
|
||||
"rustfs", # Core file system implementation
|
||||
"crates/ahm", # Asynchronous Hash Map for concurrent data structures
|
||||
"crates/appauth", # Application authentication and authorization
|
||||
"crates/audit", # Audit target management system with multi-target fan-out
|
||||
"crates/checksums", # client checksums
|
||||
"crates/common", # Shared utilities and data structures
|
||||
"crates/config", # Configuration management
|
||||
"crates/credentials", # Credential management system
|
||||
@@ -25,8 +27,10 @@ members = [
|
||||
"crates/e2e_test", # End-to-end test suite
|
||||
"crates/filemeta", # File metadata management
|
||||
"crates/iam", # Identity and Access Management
|
||||
"crates/kms", # Key Management Service
|
||||
"crates/lock", # Distributed locking implementation
|
||||
"crates/madmin", # Management dashboard and admin API interface
|
||||
"crates/mcp", # MCP server for S3 operations
|
||||
"crates/notify", # Notification system for events
|
||||
"crates/obs", # Observability utilities
|
||||
"crates/policy", # Policy management
|
||||
@@ -36,13 +40,10 @@ members = [
|
||||
"crates/s3select-api", # S3 Select API interface
|
||||
"crates/s3select-query", # S3 Select query engine
|
||||
"crates/signer", # client signer
|
||||
"crates/checksums", # client checksums
|
||||
"crates/trusted-proxies", # Trusted proxies management
|
||||
"crates/utils", # Utility functions and helpers
|
||||
"crates/workers", # Worker thread pools and task scheduling
|
||||
"crates/zip", # ZIP file handling and compression
|
||||
"crates/ahm", # Asynchronous Hash Map for concurrent data structures
|
||||
"crates/mcp", # MCP server for S3 operations
|
||||
"crates/kms", # Key Management Service
|
||||
]
|
||||
resolver = "2"
|
||||
|
||||
@@ -89,6 +90,7 @@ rustfs-rio = { path = "crates/rio", version = "0.0.5" }
|
||||
rustfs-s3select-api = { path = "crates/s3select-api", version = "0.0.5" }
|
||||
rustfs-s3select-query = { path = "crates/s3select-query", version = "0.0.5" }
|
||||
rustfs-signer = { path = "crates/signer", version = "0.0.5" }
|
||||
rustfs-trusted-proxies = { path = "crates/trusted-proxies", version = "0.0.5" }
|
||||
rustfs-targets = { path = "crates/targets", version = "0.0.5" }
|
||||
rustfs-utils = { path = "crates/utils", version = "0.0.5" }
|
||||
rustfs-workers = { path = "crates/workers", version = "0.0.5" }
|
||||
@@ -96,7 +98,7 @@ rustfs-zip = { path = "./crates/zip", version = "0.0.5" }
|
||||
|
||||
# Async Runtime and Networking
|
||||
async-channel = "2.5.0"
|
||||
async-compression = { version = "0.4.19" }
|
||||
async-compression = { version = "0.4.37" }
|
||||
async-recursion = "1.1.1"
|
||||
async-trait = "0.1.89"
|
||||
axum = "0.8.8"
|
||||
@@ -107,10 +109,11 @@ futures-util = "0.3.31"
|
||||
pollster = "0.4.0"
|
||||
hyper = { version = "1.8.1", features = ["http2", "http1", "server"] }
|
||||
hyper-rustls = { version = "0.27.7", default-features = false, features = ["native-tokio", "http1", "tls12", "logging", "http2", "aws-lc-rs", "webpki-roots"] }
|
||||
hyper-util = { version = "0.1.19", features = ["tokio", "server-auto", "server-graceful"] }
|
||||
hyper-util = { version = "0.1.19", features = ["tokio", "server-auto", "server-graceful", "tracing"] }
|
||||
http = "1.4.0"
|
||||
http-body = "1.0.1"
|
||||
http-body-util = "0.1.3"
|
||||
#reqwest = { version = "0.13.1", default-features = false, features = ["rustls", "charset", "http2", "system-proxy", "stream", "json", "blocking", "query", "form"] }
|
||||
reqwest = { version = "0.12.28", default-features = false, features = ["rustls-tls-no-provider", "charset", "http2", "system-proxy", "stream", "json", "blocking"] }
|
||||
socket2 = "0.6.1"
|
||||
tokio = { version = "1.49.0", features = ["fs", "rt-multi-thread"] }
|
||||
@@ -121,7 +124,7 @@ tokio-util = { version = "0.7.18", features = ["io", "compat"] }
|
||||
tonic = { version = "0.14.2", features = ["gzip"] }
|
||||
tonic-prost = { version = "0.14.2" }
|
||||
tonic-prost-build = { version = "0.14.2" }
|
||||
tower = { version = "0.5.2", features = ["timeout"] }
|
||||
tower = { version = "0.5.3", features = ["timeout"] }
|
||||
tower-http = { version = "0.6.8", features = ["cors"] }
|
||||
|
||||
# Serialization and Data Formats
|
||||
@@ -130,8 +133,8 @@ bytesize = "2.3.1"
|
||||
byteorder = "1.5.0"
|
||||
flatbuffers = "25.12.19"
|
||||
form_urlencoded = "1.2.2"
|
||||
prost = "0.14.1"
|
||||
quick-xml = "0.38.4"
|
||||
prost = "0.14.3"
|
||||
quick-xml = "0.39.0"
|
||||
rmcp = { version = "0.12.0" }
|
||||
rmp = { version = "0.8.15" }
|
||||
rmp-serde = { version = "1.3.1" }
|
||||
@@ -143,13 +146,13 @@ schemars = "1.2.0"
|
||||
# Cryptography and Security
|
||||
aes-gcm = { version = "0.11.0-rc.2", features = ["rand_core"] }
|
||||
argon2 = { version = "0.6.0-rc.5" }
|
||||
blake3 = { version = "1.8.2", features = ["rayon", "mmap"] }
|
||||
blake3 = { version = "1.8.3", features = ["rayon", "mmap"] }
|
||||
chacha20poly1305 = { version = "0.11.0-rc.2" }
|
||||
crc-fast = "1.6.0"
|
||||
crc-fast = "1.9.0"
|
||||
hmac = { version = "0.13.0-rc.3" }
|
||||
jsonwebtoken = { version = "10.2.0", features = ["rust_crypto"] }
|
||||
pbkdf2 = "0.13.0-rc.5"
|
||||
rsa = { version = "0.10.0-rc.11" }
|
||||
jsonwebtoken = { version = "10.2.0", features = ["aws_lc_rs"] }
|
||||
pbkdf2 = "0.13.0-rc.7"
|
||||
rsa = { version = "0.10.0-rc.12" }
|
||||
rustls = { version = "0.23.36", default-features = false, features = ["aws-lc-rs", "logging", "tls12", "prefer-post-quantum", "std"] }
|
||||
rustls-pemfile = "2.2.0"
|
||||
rustls-pki-types = "1.13.2"
|
||||
@@ -159,9 +162,9 @@ subtle = "2.6"
|
||||
zeroize = { version = "1.8.2", features = ["derive"] }
|
||||
|
||||
# Time and Date
|
||||
chrono = { version = "0.4.42", features = ["serde"] }
|
||||
chrono = { version = "0.4.43", features = ["serde"] }
|
||||
humantime = "2.3.0"
|
||||
time = { version = "0.3.44", features = ["std", "parsing", "formatting", "macros", "serde"] }
|
||||
time = { version = "0.3.45", features = ["std", "parsing", "formatting", "macros", "serde"] }
|
||||
|
||||
# Utilities and Tools
|
||||
anyhow = "1.0.100"
|
||||
@@ -172,7 +175,7 @@ atomic_enum = "0.3.0"
|
||||
aws-config = { version = "1.8.12" }
|
||||
aws-credential-types = { version = "1.2.11" }
|
||||
aws-sdk-s3 = { version = "1.119.0", default-features = false, features = ["sigv4a", "default-https-client", "rt-tokio"] }
|
||||
aws-smithy-types = { version = "1.3.5" }
|
||||
aws-smithy-types = { version = "1.3.6" }
|
||||
base64 = "0.22.1"
|
||||
base64-simd = "0.8.0"
|
||||
brotli = "8.0.2"
|
||||
@@ -182,25 +185,25 @@ const-str = { version = "1.0.0", features = ["std", "proc"] }
|
||||
convert_case = "0.10.0"
|
||||
criterion = { version = "0.8", features = ["html_reports"] }
|
||||
crossbeam-queue = "0.3.12"
|
||||
datafusion = "51.0.0"
|
||||
datafusion = "52.0.0"
|
||||
derive_builder = "0.20.2"
|
||||
dunce = "1.0.5"
|
||||
enumset = "1.1.10"
|
||||
faster-hex = "0.10.0"
|
||||
flate2 = "1.1.5"
|
||||
flate2 = "1.1.8"
|
||||
flexi_logger = { version = "0.31.7", features = ["trc", "dont_minimize_extra_stacks", "compress", "kv", "json"] }
|
||||
glob = "0.3.3"
|
||||
google-cloud-storage = "1.5.0"
|
||||
google-cloud-auth = "1.3.0"
|
||||
google-cloud-storage = "1.6.0"
|
||||
google-cloud-auth = "1.4.0"
|
||||
hashbrown = { version = "0.16.1", features = ["serde", "rayon"] }
|
||||
heed = { version = "0.22.0" }
|
||||
hex-simd = "0.8.0"
|
||||
highway = { version = "1.3.0" }
|
||||
ipnetwork = { version = "0.21.1", features = ["serde"] }
|
||||
lazy_static = "1.5.0"
|
||||
libc = "0.2.179"
|
||||
libc = "0.2.180"
|
||||
libsystemd = "0.7.2"
|
||||
local-ip-address = "0.6.8"
|
||||
local-ip-address = "0.6.9"
|
||||
lz4 = "1.28.1"
|
||||
matchit = "0.9.1"
|
||||
md-5 = "0.11.0-rc.3"
|
||||
@@ -223,9 +226,9 @@ rayon = "1.11.0"
|
||||
reed-solomon-simd = { version = "3.1.0" }
|
||||
regex = { version = "1.12.2" }
|
||||
rumqttc = { version = "0.25.1" }
|
||||
rust-embed = { version = "8.9.0" }
|
||||
rust-embed = { version = "8.11.0" }
|
||||
rustc-hash = { version = "2.1.1" }
|
||||
s3s = { version = "0.13.0-alpha", features = ["minio"], git = "https://github.com/s3s-project/s3s.git", branch = "main" }
|
||||
s3s = { version = "0.13.0-alpha.2", features = ["minio"] }
|
||||
serial_test = "3.3.1"
|
||||
shadow-rs = { version = "1.5.0", default-features = false }
|
||||
siphasher = "1.0.1"
|
||||
@@ -243,7 +246,7 @@ thiserror = "2.0.17"
|
||||
tracing = { version = "0.1.44" }
|
||||
tracing-appender = "0.2.4"
|
||||
tracing-error = "0.2.1"
|
||||
tracing-opentelemetry = "0.32.0"
|
||||
tracing-opentelemetry = "0.32.1"
|
||||
tracing-subscriber = { version = "0.3.22", features = ["env-filter", "time"] }
|
||||
transform-stream = "0.3.1"
|
||||
url = "2.5.8"
|
||||
@@ -254,7 +257,7 @@ walkdir = "2.5.0"
|
||||
wildmatch = { version = "2.6.1", features = ["serde"] }
|
||||
windows = { version = "0.62.2" }
|
||||
xxhash-rust = { version = "0.8.15", features = ["xxh64", "xxh3"] }
|
||||
zip = "7.0.0"
|
||||
zip = "7.1.0"
|
||||
zstd = "0.13.3"
|
||||
|
||||
# Observability and Metrics
|
||||
@@ -270,8 +273,8 @@ opentelemetry-stdout = { version = "0.31.0" }
|
||||
libunftp = "0.21.0"
|
||||
russh = { version = "0.56.0", features = ["aws-lc-rs", "rsa"], default-features = false }
|
||||
russh-sftp = "2.1.1"
|
||||
ssh-key = { version = "0.7.0-rc.4", features = ["std", "rsa", "ed25519"] }
|
||||
suppaftp = { version = "7.0.7", features = ["tokio", "tokio-rustls", "rustls"] }
|
||||
ssh-key = { version = "0.7.0-rc.6", features = ["std", "rsa", "ed25519"] }
|
||||
suppaftp = { version = "7.1.0", features = ["tokio", "tokio-rustls", "rustls"] }
|
||||
rcgen = "0.14.6"
|
||||
|
||||
# Performance Analysis and Memory Profiling
|
||||
|
||||
@@ -35,6 +35,10 @@ RUN set -eux; \
|
||||
ca-certificates \
|
||||
curl \
|
||||
git \
|
||||
cmake \
|
||||
libclang-dev \
|
||||
golang \
|
||||
perl \
|
||||
pkg-config \
|
||||
libssl-dev \
|
||||
lld \
|
||||
|
||||
@@ -175,7 +175,7 @@ make help-docker # 显示所有 Docker 相关命令
|
||||
### 访问 RustFS
|
||||
|
||||
5. **访问控制台**: 打开浏览器并访问 `http://localhost:9000` 进入 RustFS 控制台。
|
||||
* 默认账号/密码: `rustfsadmin` / `rustfsadmin`
|
||||
* 默认账号/密码:`rustfsadmin` / `rustfsadmin`
|
||||
6. **创建存储桶**: 使用控制台为您的对象创建一个新的存储桶 (Bucket)。
|
||||
7. **上传对象**: 您可以直接通过控制台上传文件,或使用 S3 兼容的 API/客户端与您的 RustFS 实例进行交互。
|
||||
|
||||
|
||||
@@ -96,6 +96,11 @@ pub enum Metric {
|
||||
ApplyNonCurrent,
|
||||
HealAbandonedVersion,
|
||||
|
||||
// Quota metrics:
|
||||
QuotaCheck,
|
||||
QuotaViolation,
|
||||
QuotaSync,
|
||||
|
||||
// START Trace metrics:
|
||||
StartTrace,
|
||||
ScanObject, // Scan object. All operations included.
|
||||
@@ -131,6 +136,9 @@ impl Metric {
|
||||
Self::CleanAbandoned => "clean_abandoned",
|
||||
Self::ApplyNonCurrent => "apply_non_current",
|
||||
Self::HealAbandonedVersion => "heal_abandoned_version",
|
||||
Self::QuotaCheck => "quota_check",
|
||||
Self::QuotaViolation => "quota_violation",
|
||||
Self::QuotaSync => "quota_sync",
|
||||
Self::StartTrace => "start_trace",
|
||||
Self::ScanObject => "scan_object",
|
||||
Self::HealAbandonedObject => "heal_abandoned_object",
|
||||
@@ -163,15 +171,18 @@ impl Metric {
|
||||
10 => Some(Self::CleanAbandoned),
|
||||
11 => Some(Self::ApplyNonCurrent),
|
||||
12 => Some(Self::HealAbandonedVersion),
|
||||
13 => Some(Self::StartTrace),
|
||||
14 => Some(Self::ScanObject),
|
||||
15 => Some(Self::HealAbandonedObject),
|
||||
16 => Some(Self::LastRealtime),
|
||||
17 => Some(Self::ScanFolder),
|
||||
18 => Some(Self::ScanCycle),
|
||||
19 => Some(Self::ScanBucketDrive),
|
||||
20 => Some(Self::CompactFolder),
|
||||
21 => Some(Self::Last),
|
||||
13 => Some(Self::QuotaCheck),
|
||||
14 => Some(Self::QuotaViolation),
|
||||
15 => Some(Self::QuotaSync),
|
||||
16 => Some(Self::StartTrace),
|
||||
17 => Some(Self::ScanObject),
|
||||
18 => Some(Self::HealAbandonedObject),
|
||||
19 => Some(Self::LastRealtime),
|
||||
20 => Some(Self::ScanFolder),
|
||||
21 => Some(Self::ScanCycle),
|
||||
22 => Some(Self::ScanBucketDrive),
|
||||
23 => Some(Self::CompactFolder),
|
||||
24 => Some(Self::Last),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,6 +21,8 @@ pub(crate) mod heal;
|
||||
pub(crate) mod object;
|
||||
pub(crate) mod profiler;
|
||||
pub(crate) mod protocols;
|
||||
pub(crate) mod proxy;
|
||||
pub(crate) mod quota;
|
||||
pub(crate) mod runtime;
|
||||
pub(crate) mod targets;
|
||||
pub(crate) mod tls;
|
||||
|
||||
28
crates/config/src/constants/proxy.rs
Normal file
28
crates/config/src/constants/proxy.rs
Normal file
@@ -0,0 +1,28 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/// RUSTFS_HTTP_TRUSTED_PROXIES
|
||||
/// Environment variable name for trusted proxies configuration
|
||||
/// Example: RUSTFS_HTTP_TRUSTED_PROXIES="127.0.0.1,::1,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,fc00::/7"
|
||||
/// If not set, defaults to local loopback and common private networks
|
||||
/// Used in proxy configuration loading
|
||||
/// Refer to `TrustedProxiesConfig` for details
|
||||
pub const ENV_TRUSTED_PROXIES: &str = "RUSTFS_HTTP_TRUSTED_PROXIES";
|
||||
|
||||
/// Default trusted proxies: Local loopback and common private networks
|
||||
/// Used when the environment variable is not set
|
||||
/// Format: Comma-separated list of IPs and CIDR blocks
|
||||
/// Example: RUSTFS_HTTP_TRUSTED_PROXIES="127.0.0.1,::1,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,fc00::/7"
|
||||
/// Refer to `TrustedProxiesConfig` for details
|
||||
pub const DEFAULT_TRUSTED_PROXIES: &str = "127.0.0.1,::1,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,fc00::/7";
|
||||
26
crates/config/src/constants/quota.rs
Normal file
26
crates/config/src/constants/quota.rs
Normal file
@@ -0,0 +1,26 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub const QUOTA_CONFIG_FILE: &str = "quota.json";
|
||||
pub const QUOTA_TYPE_HARD: &str = "HARD";
|
||||
|
||||
pub const QUOTA_EXCEEDED_ERROR_CODE: &str = "XRustfsQuotaExceeded";
|
||||
pub const QUOTA_INVALID_CONFIG_ERROR_CODE: &str = "InvalidArgument";
|
||||
pub const QUOTA_NOT_FOUND_ERROR_CODE: &str = "NoSuchBucket";
|
||||
pub const QUOTA_INTERNAL_ERROR_CODE: &str = "InternalError";
|
||||
|
||||
pub const QUOTA_API_PATH: &str = "/rustfs/admin/v3/quota/{bucket}";
|
||||
|
||||
pub const QUOTA_INVALID_TYPE_ERROR_MSG: &str = "Only HARD quota type is supported";
|
||||
pub const QUOTA_METADATA_SYSTEM_ERROR_MSG: &str = "Bucket metadata system not initialized";
|
||||
@@ -33,6 +33,10 @@ pub use constants::profiler::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::protocols::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::proxy::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::quota::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::runtime::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::targets::*;
|
||||
|
||||
@@ -38,13 +38,28 @@ impl TryFrom<u8> for ID {
|
||||
|
||||
impl ID {
|
||||
pub(crate) fn get_key(&self, password: &[u8], salt: &[u8]) -> Result<[u8; 32], crate::Error> {
|
||||
// Validate inputs for security
|
||||
// if password.is_empty() {
|
||||
// return Err(crate::Error::ErrInvalidInput("Password cannot be empty".to_string()));
|
||||
// }
|
||||
// if salt.len() < 16 {
|
||||
// return Err(crate::Error::ErrInvalidInput("Salt must be at least 16 bytes".to_string()));
|
||||
// }
|
||||
|
||||
let mut key = [0u8; 32];
|
||||
match self {
|
||||
ID::Pbkdf2AESGCM => pbkdf2_hmac::<Sha256>(password, salt, 8192, &mut key),
|
||||
_ => {
|
||||
let params = Params::new(64 * 1024, 1, 4, Some(32))?;
|
||||
let argon_2id = Argon2::new(Algorithm::Argon2id, Version::V0x13, params);
|
||||
argon_2id.hash_password_into(password, salt, &mut key)?;
|
||||
ID::Pbkdf2AESGCM => {
|
||||
pbkdf2_hmac::<Sha256>(password, salt, 8192, &mut key);
|
||||
}
|
||||
ID::Argon2idAESGCM | ID::Argon2idChaCHa20Poly1305 => {
|
||||
const ARGON2_MEMORY: u32 = 64 * 1024;
|
||||
const ARGON2_ITERATIONS: u32 = 1;
|
||||
const ARGON2_PARALLELISM: u32 = 4;
|
||||
const ARGON2_OUTPUT_LEN: usize = 32;
|
||||
|
||||
let params = Params::new(ARGON2_MEMORY, ARGON2_ITERATIONS, ARGON2_PARALLELISM, Some(ARGON2_OUTPUT_LEN))?;
|
||||
let argon2 = Argon2::new(Algorithm::Argon2id, Version::V0x13, params);
|
||||
argon2.hash_password_into(password, salt, &mut key)?;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -106,7 +106,7 @@ fn test_encrypt_decrypt_binary_data() -> Result<(), crate::Error> {
|
||||
#[test]
|
||||
fn test_encrypt_decrypt_unicode_data() -> Result<(), crate::Error> {
|
||||
let unicode_strings = [
|
||||
"Hello, 世界! 🌍",
|
||||
"Hello, 世界!🌍",
|
||||
"Тест на русском языке",
|
||||
"العربية اختبار",
|
||||
"🚀🔐💻🌟⭐",
|
||||
|
||||
@@ -20,6 +20,12 @@ pub enum Error {
|
||||
#[error("invalid encryption algorithm ID: {0}")]
|
||||
ErrInvalidAlgID(u8),
|
||||
|
||||
#[error("invalid input: {0}")]
|
||||
ErrInvalidInput(String),
|
||||
|
||||
#[error("invalid key length")]
|
||||
ErrInvalidKeyLength,
|
||||
|
||||
#[cfg(any(test, feature = "crypto"))]
|
||||
#[error("{0}")]
|
||||
ErrInvalidLength(#[from] sha2::digest::InvalidLength),
|
||||
@@ -38,4 +44,13 @@ pub enum Error {
|
||||
|
||||
#[error("jwt err: {0}")]
|
||||
ErrJwt(#[from] jsonwebtoken::errors::Error),
|
||||
|
||||
#[error("io error: {0}")]
|
||||
ErrIo(#[from] std::io::Error),
|
||||
|
||||
#[error("invalid signature")]
|
||||
ErrInvalidSignature,
|
||||
|
||||
#[error("invalid token")]
|
||||
ErrInvalidToken,
|
||||
}
|
||||
|
||||
155
crates/e2e_test/src/bucket_policy_check_test.rs
Normal file
155
crates/e2e_test/src/bucket_policy_check_test.rs
Normal file
@@ -0,0 +1,155 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Regression test for Issue #1423
|
||||
//! Verifies that Bucket Policies are honored for Authenticated Users.
|
||||
|
||||
use crate::common::{RustFSTestEnvironment, init_logging};
|
||||
use aws_sdk_s3::config::{Credentials, Region};
|
||||
use aws_sdk_s3::{Client, Config};
|
||||
use serial_test::serial;
|
||||
use tracing::info;
|
||||
|
||||
async fn create_user(
|
||||
env: &RustFSTestEnvironment,
|
||||
username: &str,
|
||||
password: &str,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
let create_user_body = serde_json::json!({
|
||||
"secretKey": password,
|
||||
"status": "enabled"
|
||||
})
|
||||
.to_string();
|
||||
|
||||
let create_user_url = format!("{}/rustfs/admin/v3/add-user?accessKey={}", env.url, username);
|
||||
crate::common::awscurl_put(&create_user_url, &create_user_body, &env.access_key, &env.secret_key).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn create_user_client(env: &RustFSTestEnvironment, access_key: &str, secret_key: &str) -> Client {
|
||||
let credentials = Credentials::new(access_key, secret_key, None, None, "test-user");
|
||||
let config = Config::builder()
|
||||
.credentials_provider(credentials)
|
||||
.region(Region::new("us-east-1"))
|
||||
.endpoint_url(&env.url)
|
||||
.force_path_style(true)
|
||||
.behavior_version_latest()
|
||||
.build();
|
||||
|
||||
Client::from_conf(config)
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_bucket_policy_authenticated_user() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
info!("Starting test_bucket_policy_authenticated_user...");
|
||||
|
||||
let mut env = RustFSTestEnvironment::new().await?;
|
||||
env.start_rustfs_server(vec![]).await?;
|
||||
|
||||
let admin_client = env.create_s3_client();
|
||||
let bucket_name = "bucket-policy-auth-test";
|
||||
let object_key = "test-object.txt";
|
||||
let user_access = "testuser";
|
||||
let user_secret = "testpassword";
|
||||
|
||||
// 1. Create Bucket (Admin)
|
||||
admin_client.create_bucket().bucket(bucket_name).send().await?;
|
||||
|
||||
// 2. Create User (Admin API)
|
||||
create_user(&env, user_access, user_secret).await?;
|
||||
|
||||
// 3. Create User Client
|
||||
let user_client = create_user_client(&env, user_access, user_secret);
|
||||
|
||||
// 4. Verify Access Denied initially (No Policy)
|
||||
let result = user_client.list_objects_v2().bucket(bucket_name).send().await;
|
||||
if result.is_ok() {
|
||||
return Err("Should be Access Denied initially".into());
|
||||
}
|
||||
|
||||
// 5. Apply Bucket Policy Allowed User
|
||||
let policy_json = serde_json::json!({
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Sid": "AllowTestUser",
|
||||
"Effect": "Allow",
|
||||
"Principal": {
|
||||
"AWS": [user_access]
|
||||
},
|
||||
"Action": [
|
||||
"s3:ListBucket",
|
||||
"s3:GetObject",
|
||||
"s3:PutObject",
|
||||
"s3:DeleteObject"
|
||||
],
|
||||
"Resource": [
|
||||
format!("arn:aws:s3:::{}", bucket_name),
|
||||
format!("arn:aws:s3:::{}/*", bucket_name)
|
||||
]
|
||||
}
|
||||
]
|
||||
})
|
||||
.to_string();
|
||||
|
||||
admin_client
|
||||
.put_bucket_policy()
|
||||
.bucket(bucket_name)
|
||||
.policy(&policy_json)
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
// 6. Verify Access Allowed (With Bucket Policy)
|
||||
info!("Verifying PutObject...");
|
||||
user_client
|
||||
.put_object()
|
||||
.bucket(bucket_name)
|
||||
.key(object_key)
|
||||
.body(aws_sdk_s3::primitives::ByteStream::from_static(b"hello world"))
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| format!("PutObject failed: {}", e))?;
|
||||
|
||||
info!("Verifying ListObjects...");
|
||||
let list_res = user_client
|
||||
.list_objects_v2()
|
||||
.bucket(bucket_name)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| format!("ListObjects failed: {}", e))?;
|
||||
assert_eq!(list_res.contents().len(), 1);
|
||||
|
||||
info!("Verifying GetObject...");
|
||||
user_client
|
||||
.get_object()
|
||||
.bucket(bucket_name)
|
||||
.key(object_key)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| format!("GetObject failed: {}", e))?;
|
||||
|
||||
info!("Verifying DeleteObject...");
|
||||
user_client
|
||||
.delete_object()
|
||||
.bucket(bucket_name)
|
||||
.key(object_key)
|
||||
.send()
|
||||
.await
|
||||
.map_err(|e| format!("DeleteObject failed: {}", e))?;
|
||||
|
||||
info!("Test Passed!");
|
||||
Ok(())
|
||||
}
|
||||
@@ -176,12 +176,14 @@ impl RustFSTestEnvironment {
|
||||
/// Kill any existing RustFS processes
|
||||
pub async fn cleanup_existing_processes(&self) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
info!("Cleaning up any existing RustFS processes");
|
||||
let output = Command::new("pkill").args(["-f", "rustfs"]).output();
|
||||
let binary_path = rustfs_binary_path();
|
||||
let binary_name = binary_path.to_string_lossy();
|
||||
let output = Command::new("pkill").args(["-f", &binary_name]).output();
|
||||
|
||||
if let Ok(output) = output
|
||||
&& output.status.success()
|
||||
{
|
||||
info!("Killed existing RustFS processes");
|
||||
info!("Killed existing RustFS processes: {}", binary_name);
|
||||
sleep(Duration::from_millis(1000)).await;
|
||||
}
|
||||
Ok(())
|
||||
@@ -363,3 +365,12 @@ pub async fn awscurl_put(
|
||||
) -> Result<String, Box<dyn std::error::Error + Send + Sync>> {
|
||||
execute_awscurl(url, "PUT", Some(body), access_key, secret_key).await
|
||||
}
|
||||
|
||||
/// Helper function for DELETE requests
|
||||
pub async fn awscurl_delete(
|
||||
url: &str,
|
||||
access_key: &str,
|
||||
secret_key: &str,
|
||||
) -> Result<String, Box<dyn std::error::Error + Send + Sync>> {
|
||||
execute_awscurl(url, "DELETE", None, access_key, secret_key).await
|
||||
}
|
||||
|
||||
@@ -29,6 +29,13 @@ mod data_usage_test;
|
||||
#[cfg(test)]
|
||||
mod kms;
|
||||
|
||||
// Quota tests
|
||||
#[cfg(test)]
|
||||
mod quota_test;
|
||||
|
||||
#[cfg(test)]
|
||||
mod bucket_policy_check_test;
|
||||
|
||||
// Special characters in path test modules
|
||||
#[cfg(test)]
|
||||
mod special_chars_test;
|
||||
|
||||
798
crates/e2e_test/src/quota_test.rs
Normal file
798
crates/e2e_test/src/quota_test.rs
Normal file
@@ -0,0 +1,798 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::common::{RustFSTestEnvironment, awscurl_delete, awscurl_get, awscurl_post, awscurl_put, init_logging};
|
||||
use aws_sdk_s3::Client;
|
||||
use serial_test::serial;
|
||||
use tracing::{debug, info};
|
||||
|
||||
/// Test environment setup for quota tests
|
||||
pub struct QuotaTestEnv {
|
||||
pub env: RustFSTestEnvironment,
|
||||
pub client: Client,
|
||||
pub bucket_name: String,
|
||||
}
|
||||
|
||||
impl QuotaTestEnv {
|
||||
pub async fn new() -> Result<Self, Box<dyn std::error::Error + Send + Sync>> {
|
||||
let bucket_name = format!("quota-test-{}", uuid::Uuid::new_v4());
|
||||
let mut env = RustFSTestEnvironment::new().await?;
|
||||
env.start_rustfs_server(vec![]).await?;
|
||||
let client = env.create_s3_client();
|
||||
|
||||
Ok(Self {
|
||||
env,
|
||||
client,
|
||||
bucket_name,
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn create_bucket(&self) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
self.env.create_test_bucket(&self.bucket_name).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn cleanup_bucket(&self) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
let objects = self.client.list_objects_v2().bucket(&self.bucket_name).send().await?;
|
||||
for object in objects.contents() {
|
||||
self.client
|
||||
.delete_object()
|
||||
.bucket(&self.bucket_name)
|
||||
.key(object.key().unwrap_or_default())
|
||||
.send()
|
||||
.await?;
|
||||
}
|
||||
self.env.delete_test_bucket(&self.bucket_name).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn set_bucket_quota(&self, quota_bytes: u64) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
let url = format!("{}/rustfs/admin/v3/quota/{}", self.env.url, self.bucket_name);
|
||||
let quota_config = serde_json::json!({
|
||||
"quota": quota_bytes,
|
||||
"quota_type": "HARD"
|
||||
});
|
||||
|
||||
let response = awscurl_put(&url, "a_config.to_string(), &self.env.access_key, &self.env.secret_key).await?;
|
||||
if response.contains("error") {
|
||||
Err(format!("Failed to set quota: {}", response).into())
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_bucket_quota(&self) -> Result<Option<u64>, Box<dyn std::error::Error + Send + Sync>> {
|
||||
let url = format!("{}/rustfs/admin/v3/quota/{}", self.env.url, self.bucket_name);
|
||||
let response = awscurl_get(&url, &self.env.access_key, &self.env.secret_key).await?;
|
||||
|
||||
if response.contains("error") {
|
||||
Err(format!("Failed to get quota: {}", response).into())
|
||||
} else {
|
||||
let quota_info: serde_json::Value = serde_json::from_str(&response)?;
|
||||
Ok(quota_info.get("quota").and_then(|v| v.as_u64()))
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn clear_bucket_quota(&self) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
let url = format!("{}/rustfs/admin/v3/quota/{}", self.env.url, self.bucket_name);
|
||||
let response = awscurl_delete(&url, &self.env.access_key, &self.env.secret_key).await?;
|
||||
|
||||
if response.contains("error") {
|
||||
Err(format!("Failed to clear quota: {}", response).into())
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_bucket_quota_stats(&self) -> Result<serde_json::Value, Box<dyn std::error::Error + Send + Sync>> {
|
||||
let url = format!("{}/rustfs/admin/v3/quota-stats/{}", self.env.url, self.bucket_name);
|
||||
let response = awscurl_get(&url, &self.env.access_key, &self.env.secret_key).await?;
|
||||
|
||||
if response.contains("error") {
|
||||
Err(format!("Failed to get quota stats: {}", response).into())
|
||||
} else {
|
||||
Ok(serde_json::from_str(&response)?)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn check_bucket_quota(
|
||||
&self,
|
||||
operation_type: &str,
|
||||
operation_size: u64,
|
||||
) -> Result<serde_json::Value, Box<dyn std::error::Error + Send + Sync>> {
|
||||
let url = format!("{}/rustfs/admin/v3/quota-check/{}", self.env.url, self.bucket_name);
|
||||
let check_request = serde_json::json!({
|
||||
"operation_type": operation_type,
|
||||
"operation_size": operation_size
|
||||
});
|
||||
|
||||
let response = awscurl_post(&url, &check_request.to_string(), &self.env.access_key, &self.env.secret_key).await?;
|
||||
|
||||
if response.contains("error") {
|
||||
Err(format!("Failed to check quota: {}", response).into())
|
||||
} else {
|
||||
Ok(serde_json::from_str(&response)?)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn upload_object(&self, key: &str, size_bytes: usize) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
let data = vec![0u8; size_bytes];
|
||||
self.client
|
||||
.put_object()
|
||||
.bucket(&self.bucket_name)
|
||||
.key(key)
|
||||
.body(aws_sdk_s3::primitives::ByteStream::from(data))
|
||||
.send()
|
||||
.await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn object_exists(&self, key: &str) -> Result<bool, Box<dyn std::error::Error + Send + Sync>> {
|
||||
match self.client.head_object().bucket(&self.bucket_name).key(key).send().await {
|
||||
Ok(_) => Ok(true),
|
||||
Err(e) => {
|
||||
// Check for any 404-related errors and return false instead of propagating
|
||||
let error_str = e.to_string();
|
||||
if error_str.contains("404") || error_str.contains("Not Found") || error_str.contains("NotFound") {
|
||||
Ok(false)
|
||||
} else {
|
||||
// Also check the error code directly
|
||||
if let Some(service_err) = e.as_service_error()
|
||||
&& service_err.is_not_found()
|
||||
{
|
||||
return Ok(false);
|
||||
}
|
||||
Err(e.into())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_bucket_usage(&self) -> Result<u64, Box<dyn std::error::Error + Send + Sync>> {
|
||||
let stats = self.get_bucket_quota_stats().await?;
|
||||
Ok(stats.get("current_usage").and_then(|v| v.as_u64()).unwrap_or(0))
|
||||
}
|
||||
|
||||
pub async fn set_bucket_quota_for(
|
||||
&self,
|
||||
bucket: &str,
|
||||
quota_bytes: u64,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
let url = format!("{}/rustfs/admin/v3/quota/{}", self.env.url, bucket);
|
||||
let quota_config = serde_json::json!({
|
||||
"quota": quota_bytes,
|
||||
"quota_type": "HARD"
|
||||
});
|
||||
|
||||
let response = awscurl_put(&url, "a_config.to_string(), &self.env.access_key, &self.env.secret_key).await?;
|
||||
if response.contains("error") {
|
||||
Err(format!("Failed to set quota: {}", response).into())
|
||||
} else {
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
/// Get bucket quota statistics for specific bucket
|
||||
pub async fn get_bucket_quota_stats_for(
|
||||
&self,
|
||||
bucket: &str,
|
||||
) -> Result<serde_json::Value, Box<dyn std::error::Error + Send + Sync>> {
|
||||
debug!("Getting quota stats for bucket: {}", bucket);
|
||||
|
||||
let url = format!("{}/rustfs/admin/v3/quota-stats/{}", self.env.url, bucket);
|
||||
let response = awscurl_get(&url, &self.env.access_key, &self.env.secret_key).await?;
|
||||
|
||||
if response.contains("error") {
|
||||
Err(format!("Failed to get quota stats: {}", response).into())
|
||||
} else {
|
||||
let stats: serde_json::Value = serde_json::from_str(&response)?;
|
||||
Ok(stats)
|
||||
}
|
||||
}
|
||||
|
||||
/// Upload an object to specific bucket
|
||||
pub async fn upload_object_to_bucket(
|
||||
&self,
|
||||
bucket: &str,
|
||||
key: &str,
|
||||
size_bytes: usize,
|
||||
) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
debug!("Uploading object {} with size {} bytes to bucket {}", key, size_bytes, bucket);
|
||||
|
||||
let data = vec![0u8; size_bytes];
|
||||
|
||||
self.client
|
||||
.put_object()
|
||||
.bucket(bucket)
|
||||
.key(key)
|
||||
.body(aws_sdk_s3::primitives::ByteStream::from(data))
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
info!("Successfully uploaded object: {} ({} bytes) to bucket: {}", key, size_bytes, bucket);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod integration_tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_quota_basic_operations() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
let env = QuotaTestEnv::new().await?;
|
||||
|
||||
// Create test bucket
|
||||
env.create_bucket().await?;
|
||||
|
||||
// Set quota of 1MB
|
||||
env.set_bucket_quota(1024 * 1024).await?;
|
||||
|
||||
// Verify quota is set
|
||||
let quota = env.get_bucket_quota().await?;
|
||||
assert_eq!(quota, Some(1024 * 1024));
|
||||
|
||||
// Upload a 512KB object (should succeed)
|
||||
env.upload_object("test1.txt", 512 * 1024).await?;
|
||||
assert!(env.object_exists("test1.txt").await?);
|
||||
|
||||
// Upload another 512KB object (should succeed, total 1MB)
|
||||
env.upload_object("test2.txt", 512 * 1024).await?;
|
||||
assert!(env.object_exists("test2.txt").await?);
|
||||
|
||||
// Try to upload 1KB more (should fail due to quota)
|
||||
let upload_result = env.upload_object("test3.txt", 1024).await;
|
||||
assert!(upload_result.is_err());
|
||||
assert!(!env.object_exists("test3.txt").await?);
|
||||
|
||||
// Clean up
|
||||
env.clear_bucket_quota().await?;
|
||||
env.cleanup_bucket().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_quota_update_and_clear() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
let env = QuotaTestEnv::new().await?;
|
||||
|
||||
env.create_bucket().await?;
|
||||
|
||||
// Set initial quota
|
||||
env.set_bucket_quota(512 * 1024).await?;
|
||||
assert_eq!(env.get_bucket_quota().await?, Some(512 * 1024));
|
||||
|
||||
// Update quota to larger size
|
||||
env.set_bucket_quota(2 * 1024 * 1024).await?;
|
||||
assert_eq!(env.get_bucket_quota().await?, Some(2 * 1024 * 1024));
|
||||
|
||||
// Upload 1MB object (should succeed with new quota)
|
||||
env.upload_object("large_file.txt", 1024 * 1024).await?;
|
||||
assert!(env.object_exists("large_file.txt").await?);
|
||||
|
||||
// Clear quota
|
||||
env.clear_bucket_quota().await?;
|
||||
assert_eq!(env.get_bucket_quota().await?, None);
|
||||
|
||||
// Upload another large object (should succeed with no quota)
|
||||
env.upload_object("unlimited_file.txt", 5 * 1024 * 1024).await?;
|
||||
assert!(env.object_exists("unlimited_file.txt").await?);
|
||||
|
||||
env.cleanup_bucket().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_quota_delete_operations() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
let env = QuotaTestEnv::new().await?;
|
||||
|
||||
env.create_bucket().await?;
|
||||
|
||||
// Set quota of 1MB
|
||||
env.set_bucket_quota(1024 * 1024).await?;
|
||||
|
||||
// Fill up to quota limit
|
||||
env.upload_object("file1.txt", 512 * 1024).await?;
|
||||
env.upload_object("file2.txt", 512 * 1024).await?;
|
||||
|
||||
// Delete one file
|
||||
env.client
|
||||
.delete_object()
|
||||
.bucket(&env.bucket_name)
|
||||
.key("file1.txt")
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
assert!(!env.object_exists("file1.txt").await?);
|
||||
|
||||
// Now we should be able to upload again (quota freed up)
|
||||
env.upload_object("file3.txt", 256 * 1024).await?;
|
||||
assert!(env.object_exists("file3.txt").await?);
|
||||
|
||||
env.cleanup_bucket().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_quota_usage_tracking() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
let env = QuotaTestEnv::new().await?;
|
||||
|
||||
env.create_bucket().await?;
|
||||
|
||||
// Set quota
|
||||
env.set_bucket_quota(2 * 1024 * 1024).await?;
|
||||
|
||||
// Upload some files
|
||||
env.upload_object("file1.txt", 512 * 1024).await?;
|
||||
env.upload_object("file2.txt", 256 * 1024).await?;
|
||||
|
||||
// Check usage
|
||||
let usage = env.get_bucket_usage().await?;
|
||||
assert_eq!(usage, (512 + 256) * 1024);
|
||||
|
||||
// Delete a file
|
||||
env.client
|
||||
.delete_object()
|
||||
.bucket(&env.bucket_name)
|
||||
.key("file1.txt")
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
// Check updated usage
|
||||
let updated_usage = env.get_bucket_usage().await?;
|
||||
assert_eq!(updated_usage, 256 * 1024);
|
||||
|
||||
env.cleanup_bucket().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_quota_statistics() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
let env = QuotaTestEnv::new().await?;
|
||||
|
||||
env.create_bucket().await?;
|
||||
|
||||
// Set quota of 2MB
|
||||
env.set_bucket_quota(2 * 1024 * 1024).await?;
|
||||
|
||||
// Upload files to use 1.5MB
|
||||
env.upload_object("file1.txt", 1024 * 1024).await?;
|
||||
env.upload_object("file2.txt", 512 * 1024).await?;
|
||||
|
||||
// Get detailed quota statistics
|
||||
let stats = env.get_bucket_quota_stats().await?;
|
||||
|
||||
assert_eq!(stats.get("bucket").unwrap().as_str().unwrap(), env.bucket_name);
|
||||
assert_eq!(stats.get("quota_limit").unwrap().as_u64().unwrap(), 2 * 1024 * 1024);
|
||||
assert_eq!(stats.get("current_usage").unwrap().as_u64().unwrap(), (1024 + 512) * 1024);
|
||||
assert_eq!(stats.get("remaining_quota").unwrap().as_u64().unwrap(), 512 * 1024);
|
||||
|
||||
let usage_percentage = stats.get("usage_percentage").unwrap().as_f64().unwrap();
|
||||
assert!((usage_percentage - 75.0).abs() < 0.1);
|
||||
|
||||
env.cleanup_bucket().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_quota_check_api() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
let env = QuotaTestEnv::new().await?;
|
||||
|
||||
env.create_bucket().await?;
|
||||
|
||||
// Set quota of 1MB
|
||||
env.set_bucket_quota(1024 * 1024).await?;
|
||||
|
||||
// Upload 512KB file
|
||||
env.upload_object("existing_file.txt", 512 * 1024).await?;
|
||||
|
||||
// Check if we can upload another 512KB (should succeed, exactly fill quota)
|
||||
let check_result = env.check_bucket_quota("PUT", 512 * 1024).await?;
|
||||
assert!(check_result.get("allowed").unwrap().as_bool().unwrap());
|
||||
assert_eq!(check_result.get("remaining_quota").unwrap().as_u64().unwrap(), 0);
|
||||
|
||||
// Note: we haven't actually uploaded the second file yet, so current_usage is still 512KB
|
||||
// Check if we can upload 1KB (should succeed - we haven't used the full quota yet)
|
||||
let check_result = env.check_bucket_quota("PUT", 1024).await?;
|
||||
assert!(check_result.get("allowed").unwrap().as_bool().unwrap());
|
||||
assert_eq!(check_result.get("remaining_quota").unwrap().as_u64().unwrap(), 512 * 1024 - 1024);
|
||||
|
||||
// Check if we can upload 600KB (should fail - would exceed quota)
|
||||
let check_result = env.check_bucket_quota("PUT", 600 * 1024).await?;
|
||||
assert!(!check_result.get("allowed").unwrap().as_bool().unwrap());
|
||||
|
||||
// Check delete operation (should always be allowed)
|
||||
let check_result = env.check_bucket_quota("DELETE", 512 * 1024).await?;
|
||||
assert!(check_result.get("allowed").unwrap().as_bool().unwrap());
|
||||
|
||||
env.cleanup_bucket().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_quota_multiple_buckets() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
let env = QuotaTestEnv::new().await?;
|
||||
|
||||
// Create two buckets in the same environment
|
||||
let bucket1 = format!("quota-test-{}-1", uuid::Uuid::new_v4());
|
||||
let bucket2 = format!("quota-test-{}-2", uuid::Uuid::new_v4());
|
||||
|
||||
env.env.create_test_bucket(&bucket1).await?;
|
||||
env.env.create_test_bucket(&bucket2).await?;
|
||||
|
||||
// Set different quotas for each bucket
|
||||
env.set_bucket_quota_for(&bucket1, 1024 * 1024).await?; // 1MB
|
||||
env.set_bucket_quota_for(&bucket2, 2 * 1024 * 1024).await?; // 2MB
|
||||
|
||||
// Fill first bucket to quota
|
||||
env.upload_object_to_bucket(&bucket1, "big_file.txt", 1024 * 1024).await?;
|
||||
|
||||
// Should still be able to upload to second bucket
|
||||
env.upload_object_to_bucket(&bucket2, "big_file.txt", 1024 * 1024).await?;
|
||||
env.upload_object_to_bucket(&bucket2, "another_file.txt", 512 * 1024).await?;
|
||||
|
||||
// Verify statistics are independent
|
||||
let stats1 = env.get_bucket_quota_stats_for(&bucket1).await?;
|
||||
let stats2 = env.get_bucket_quota_stats_for(&bucket2).await?;
|
||||
|
||||
assert_eq!(stats1.get("current_usage").unwrap().as_u64().unwrap(), 1024 * 1024);
|
||||
assert_eq!(stats2.get("current_usage").unwrap().as_u64().unwrap(), (1024 + 512) * 1024);
|
||||
|
||||
// Clean up
|
||||
env.env.delete_test_bucket(&bucket1).await?;
|
||||
env.env.delete_test_bucket(&bucket2).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_quota_error_handling() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
let env = QuotaTestEnv::new().await?;
|
||||
|
||||
env.create_bucket().await?;
|
||||
|
||||
// Test invalid quota type
|
||||
let url = format!("{}/rustfs/admin/v3/quota/{}", env.env.url, env.bucket_name);
|
||||
|
||||
let invalid_config = serde_json::json!({
|
||||
"quota": 1024,
|
||||
"quota_type": "SOFT" // Invalid type
|
||||
});
|
||||
|
||||
let response = awscurl_put(&url, &invalid_config.to_string(), &env.env.access_key, &env.env.secret_key).await;
|
||||
assert!(response.is_err());
|
||||
let error_msg = response.unwrap_err().to_string();
|
||||
assert!(error_msg.contains("InvalidArgument"));
|
||||
|
||||
// Test operations on non-existent bucket
|
||||
let url = format!("{}/rustfs/admin/v3/quota/non-existent-bucket", env.env.url);
|
||||
let response = awscurl_get(&url, &env.env.access_key, &env.env.secret_key).await;
|
||||
assert!(response.is_err());
|
||||
let error_msg = response.unwrap_err().to_string();
|
||||
assert!(error_msg.contains("NoSuchBucket"));
|
||||
|
||||
env.cleanup_bucket().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_quota_http_endpoints() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
let env = QuotaTestEnv::new().await?;
|
||||
|
||||
env.create_bucket().await?;
|
||||
|
||||
// Test 1: GET quota for bucket without quota config
|
||||
let url = format!("{}/rustfs/admin/v3/quota/{}", env.env.url, env.bucket_name);
|
||||
let response = awscurl_get(&url, &env.env.access_key, &env.env.secret_key).await?;
|
||||
assert!(response.contains("quota") && response.contains("null"));
|
||||
|
||||
// Test 2: PUT quota - valid config
|
||||
let quota_config = serde_json::json!({
|
||||
"quota": 1048576,
|
||||
"quota_type": "HARD"
|
||||
});
|
||||
let response = awscurl_put(&url, "a_config.to_string(), &env.env.access_key, &env.env.secret_key).await?;
|
||||
assert!(response.contains("success") || !response.contains("error"));
|
||||
|
||||
// Test 3: GET quota after setting
|
||||
let response = awscurl_get(&url, &env.env.access_key, &env.env.secret_key).await?;
|
||||
assert!(response.contains("1048576"));
|
||||
|
||||
// Test 4: GET quota stats
|
||||
let stats_url = format!("{}/rustfs/admin/v3/quota-stats/{}", env.env.url, env.bucket_name);
|
||||
let response = awscurl_get(&stats_url, &env.env.access_key, &env.env.secret_key).await?;
|
||||
assert!(response.contains("quota_limit") && response.contains("current_usage"));
|
||||
|
||||
// Test 5: POST quota check
|
||||
let check_url = format!("{}/rustfs/admin/v3/quota-check/{}", env.env.url, env.bucket_name);
|
||||
let check_request = serde_json::json!({
|
||||
"operation_type": "PUT",
|
||||
"operation_size": 1024
|
||||
});
|
||||
let response = awscurl_post(&check_url, &check_request.to_string(), &env.env.access_key, &env.env.secret_key).await?;
|
||||
assert!(response.contains("allowed"));
|
||||
|
||||
// Test 6: DELETE quota
|
||||
let response = awscurl_delete(&url, &env.env.access_key, &env.env.secret_key).await?;
|
||||
assert!(!response.contains("error"));
|
||||
|
||||
// Test 7: GET quota after deletion
|
||||
let response = awscurl_get(&url, &env.env.access_key, &env.env.secret_key).await?;
|
||||
assert!(response.contains("quota") && response.contains("null"));
|
||||
|
||||
// Test 8: Invalid quota type
|
||||
let invalid_config = serde_json::json!({
|
||||
"quota": 1024,
|
||||
"quota_type": "SOFT"
|
||||
});
|
||||
let response = awscurl_put(&url, &invalid_config.to_string(), &env.env.access_key, &env.env.secret_key).await;
|
||||
assert!(response.is_err());
|
||||
let error_msg = response.unwrap_err().to_string();
|
||||
assert!(error_msg.contains("InvalidArgument"));
|
||||
|
||||
env.cleanup_bucket().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_quota_copy_operations() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
let env = QuotaTestEnv::new().await?;
|
||||
|
||||
env.create_bucket().await?;
|
||||
|
||||
// Set quota of 2MB
|
||||
env.set_bucket_quota(2 * 1024 * 1024).await?;
|
||||
|
||||
// Upload initial file
|
||||
env.upload_object("original.txt", 1024 * 1024).await?;
|
||||
|
||||
// Copy file - should succeed (1MB each, total 2MB)
|
||||
env.client
|
||||
.copy_object()
|
||||
.bucket(&env.bucket_name)
|
||||
.key("copy1.txt")
|
||||
.copy_source(format!("{}/{}", env.bucket_name, "original.txt"))
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
assert!(env.object_exists("copy1.txt").await?);
|
||||
|
||||
// Try to copy again - should fail (1.5MB each, total 3MB > 2MB quota)
|
||||
let copy_result = env
|
||||
.client
|
||||
.copy_object()
|
||||
.bucket(&env.bucket_name)
|
||||
.key("copy2.txt")
|
||||
.copy_source(format!("{}/{}", env.bucket_name, "original.txt"))
|
||||
.send()
|
||||
.await;
|
||||
|
||||
assert!(copy_result.is_err());
|
||||
assert!(!env.object_exists("copy2.txt").await?);
|
||||
|
||||
env.cleanup_bucket().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_quota_batch_delete() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
let env = QuotaTestEnv::new().await?;
|
||||
|
||||
env.create_bucket().await?;
|
||||
|
||||
// Set quota of 2MB
|
||||
env.set_bucket_quota(2 * 1024 * 1024).await?;
|
||||
|
||||
// Upload files to fill quota
|
||||
env.upload_object("file1.txt", 1024 * 1024).await?;
|
||||
env.upload_object("file2.txt", 1024 * 1024).await?;
|
||||
|
||||
// Verify quota is full
|
||||
let upload_result = env.upload_object("file3.txt", 1024).await;
|
||||
assert!(upload_result.is_err());
|
||||
|
||||
// Delete multiple objects using batch delete
|
||||
let objects = vec![
|
||||
aws_sdk_s3::types::ObjectIdentifier::builder()
|
||||
.key("file1.txt")
|
||||
.build()
|
||||
.unwrap(),
|
||||
aws_sdk_s3::types::ObjectIdentifier::builder()
|
||||
.key("file2.txt")
|
||||
.build()
|
||||
.unwrap(),
|
||||
];
|
||||
|
||||
let delete_result = env
|
||||
.client
|
||||
.delete_objects()
|
||||
.bucket(&env.bucket_name)
|
||||
.delete(
|
||||
aws_sdk_s3::types::Delete::builder()
|
||||
.set_objects(Some(objects))
|
||||
.quiet(true)
|
||||
.build()
|
||||
.unwrap(),
|
||||
)
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
assert_eq!(delete_result.deleted().len(), 2);
|
||||
|
||||
// Now should be able to upload again (quota freed up)
|
||||
env.upload_object("file3.txt", 256 * 1024).await?;
|
||||
assert!(env.object_exists("file3.txt").await?);
|
||||
|
||||
env.cleanup_bucket().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_quota_multipart_upload() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
init_logging();
|
||||
let env = QuotaTestEnv::new().await?;
|
||||
|
||||
env.create_bucket().await?;
|
||||
|
||||
// Set quota of 10MB
|
||||
env.set_bucket_quota(10 * 1024 * 1024).await?;
|
||||
|
||||
let key = "multipart_test.txt";
|
||||
let part_size = 5 * 1024 * 1024; // 5MB minimum per part (S3 requirement)
|
||||
|
||||
// Test 1: Multipart upload within quota (single 5MB part)
|
||||
let create_result = env
|
||||
.client
|
||||
.create_multipart_upload()
|
||||
.bucket(&env.bucket_name)
|
||||
.key(key)
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
let upload_id = create_result.upload_id().unwrap();
|
||||
|
||||
// Upload single 5MB part (S3 allows single part with any size ≥ 5MB for the only part)
|
||||
let part_data = vec![1u8; part_size];
|
||||
let part_result = env
|
||||
.client
|
||||
.upload_part()
|
||||
.bucket(&env.bucket_name)
|
||||
.key(key)
|
||||
.upload_id(upload_id)
|
||||
.part_number(1)
|
||||
.body(aws_sdk_s3::primitives::ByteStream::from(part_data))
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
let uploaded_parts = vec![
|
||||
aws_sdk_s3::types::CompletedPart::builder()
|
||||
.part_number(1)
|
||||
.e_tag(part_result.e_tag().unwrap())
|
||||
.build(),
|
||||
];
|
||||
|
||||
env.client
|
||||
.complete_multipart_upload()
|
||||
.bucket(&env.bucket_name)
|
||||
.key(key)
|
||||
.upload_id(upload_id)
|
||||
.multipart_upload(
|
||||
aws_sdk_s3::types::CompletedMultipartUpload::builder()
|
||||
.set_parts(Some(uploaded_parts))
|
||||
.build(),
|
||||
)
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
assert!(env.object_exists(key).await?);
|
||||
|
||||
// Test 2: Multipart upload exceeds quota (should fail)
|
||||
// Upload 6MB filler (total now: 5MB + 6MB = 11MB > 10MB quota)
|
||||
let upload_filler = env.upload_object("filler.txt", 6 * 1024 * 1024).await;
|
||||
// This should fail due to quota
|
||||
assert!(upload_filler.is_err());
|
||||
|
||||
// Verify filler doesn't exist
|
||||
assert!(!env.object_exists("filler.txt").await?);
|
||||
|
||||
// Now try a multipart upload that exceeds quota
|
||||
// Current usage: 5MB (from Test 1), quota: 10MB
|
||||
// Trying to upload 6MB via multipart → should fail
|
||||
|
||||
let create_result2 = env
|
||||
.client
|
||||
.create_multipart_upload()
|
||||
.bucket(&env.bucket_name)
|
||||
.key("over_quota.txt")
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
let upload_id2 = create_result2.upload_id().unwrap();
|
||||
|
||||
let mut uploaded_parts2 = vec![];
|
||||
for part_num in 1..=2 {
|
||||
let part_data = vec![part_num as u8; part_size];
|
||||
let part_result = env
|
||||
.client
|
||||
.upload_part()
|
||||
.bucket(&env.bucket_name)
|
||||
.key("over_quota.txt")
|
||||
.upload_id(upload_id2)
|
||||
.part_number(part_num)
|
||||
.body(aws_sdk_s3::primitives::ByteStream::from(part_data))
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
uploaded_parts2.push(
|
||||
aws_sdk_s3::types::CompletedPart::builder()
|
||||
.part_number(part_num)
|
||||
.e_tag(part_result.e_tag().unwrap())
|
||||
.build(),
|
||||
);
|
||||
}
|
||||
|
||||
let complete_result = env
|
||||
.client
|
||||
.complete_multipart_upload()
|
||||
.bucket(&env.bucket_name)
|
||||
.key("over_quota.txt")
|
||||
.upload_id(upload_id2)
|
||||
.multipart_upload(
|
||||
aws_sdk_s3::types::CompletedMultipartUpload::builder()
|
||||
.set_parts(Some(uploaded_parts2))
|
||||
.build(),
|
||||
)
|
||||
.send()
|
||||
.await;
|
||||
|
||||
assert!(complete_result.is_err());
|
||||
assert!(!env.object_exists("over_quota.txt").await?);
|
||||
|
||||
env.cleanup_bucket().await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
@@ -48,6 +48,7 @@ async-trait.workspace = true
|
||||
bytes.workspace = true
|
||||
byteorder = { workspace = true }
|
||||
chrono.workspace = true
|
||||
dunce.workspace = true
|
||||
glob = { workspace = true }
|
||||
thiserror.workspace = true
|
||||
flatbuffers.workspace = true
|
||||
@@ -108,8 +109,6 @@ google-cloud-storage = { workspace = true }
|
||||
google-cloud-auth = { workspace = true }
|
||||
aws-config = { workspace = true }
|
||||
faster-hex = { workspace = true }
|
||||
dunce = { workspace = true }
|
||||
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { workspace = true, features = ["rt-multi-thread", "macros"] }
|
||||
|
||||
@@ -12,8 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::disk::error::DiskError;
|
||||
use crate::disk::{self, DiskAPI as _, DiskStore};
|
||||
use crate::disk::{self, DiskAPI as _, DiskStore, error::DiskError};
|
||||
use crate::erasure_coding::{BitrotReader, BitrotWriterWrapper, CustomWriter};
|
||||
use rustfs_utils::HashAlgorithm;
|
||||
use std::io::Cursor;
|
||||
|
||||
@@ -13,6 +13,14 @@
|
||||
// limitations under the License.
|
||||
|
||||
use crate::bucket::metadata::BucketMetadata;
|
||||
use crate::bucket::metadata_sys::get_bucket_targets_config;
|
||||
use crate::bucket::metadata_sys::get_replication_config;
|
||||
use crate::bucket::replication::ObjectOpts;
|
||||
use crate::bucket::replication::ReplicationConfigurationExt;
|
||||
use crate::bucket::target::ARN;
|
||||
use crate::bucket::target::BucketTargetType;
|
||||
use crate::bucket::target::{self, BucketTarget, BucketTargets, Credentials};
|
||||
use crate::bucket::versioning_sys::BucketVersioningSys;
|
||||
use aws_credential_types::Credentials as SdkCredentials;
|
||||
use aws_sdk_s3::config::Region as SdkRegion;
|
||||
use aws_sdk_s3::error::SdkError;
|
||||
@@ -52,15 +60,6 @@ use tracing::warn;
|
||||
use url::Url;
|
||||
use uuid::Uuid;
|
||||
|
||||
use crate::bucket::metadata_sys::get_bucket_targets_config;
|
||||
use crate::bucket::metadata_sys::get_replication_config;
|
||||
use crate::bucket::replication::ObjectOpts;
|
||||
use crate::bucket::replication::ReplicationConfigurationExt;
|
||||
use crate::bucket::target::ARN;
|
||||
use crate::bucket::target::BucketTargetType;
|
||||
use crate::bucket::target::{self, BucketTarget, BucketTargets, Credentials};
|
||||
use crate::bucket::versioning_sys::BucketVersioningSys;
|
||||
|
||||
const DEFAULT_HEALTH_CHECK_DURATION: Duration = Duration::from_secs(5);
|
||||
const DEFAULT_HEALTH_CHECK_RELOAD_DURATION: Duration = Duration::from_secs(30 * 60);
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::lifecycle;
|
||||
use crate::bucket::lifecycle::lifecycle;
|
||||
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub enum LcEventSrc {
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
#![allow(unused_must_use)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use crate::bucket::lifecycle::rule::TransitionOps;
|
||||
use s3s::dto::{
|
||||
BucketLifecycleConfiguration, ExpirationStatus, LifecycleExpiration, LifecycleRule, NoncurrentVersionTransition,
|
||||
ObjectLockConfiguration, ObjectLockEnabled, RestoreRequest, Transition,
|
||||
@@ -30,8 +31,6 @@ use time::macros::{datetime, offset};
|
||||
use time::{self, Duration, OffsetDateTime};
|
||||
use tracing::info;
|
||||
|
||||
use crate::bucket::lifecycle::rule::TransitionOps;
|
||||
|
||||
pub const TRANSITION_COMPLETE: &str = "complete";
|
||||
pub const TRANSITION_PENDING: &str = "pending";
|
||||
|
||||
|
||||
@@ -18,15 +18,13 @@
|
||||
#![allow(unused_must_use)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use rustfs_common::data_usage::TierStats;
|
||||
use sha2::Sha256;
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::ops::Sub;
|
||||
use time::OffsetDateTime;
|
||||
use tracing::{error, warn};
|
||||
|
||||
use rustfs_common::data_usage::TierStats;
|
||||
|
||||
pub type DailyAllTierStats = HashMap<String, LastDayTierStats>;
|
||||
|
||||
#[derive(Clone)]
|
||||
|
||||
@@ -18,15 +18,14 @@
|
||||
#![allow(unused_must_use)]
|
||||
#![allow(clippy::all)]
|
||||
|
||||
use crate::bucket::lifecycle::bucket_lifecycle_ops::{ExpiryOp, GLOBAL_ExpiryState, TransitionedObject};
|
||||
use crate::bucket::lifecycle::lifecycle::{self, ObjectOpts};
|
||||
use crate::global::GLOBAL_TierConfigMgr;
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::any::Any;
|
||||
use std::io::Write;
|
||||
use xxhash_rust::xxh64;
|
||||
|
||||
use super::bucket_lifecycle_ops::{ExpiryOp, GLOBAL_ExpiryState, TransitionedObject};
|
||||
use super::lifecycle::{self, ObjectOpts};
|
||||
use crate::global::GLOBAL_TierConfigMgr;
|
||||
|
||||
static XXHASH_SEED: u64 = 0;
|
||||
|
||||
#[derive(Default)]
|
||||
|
||||
@@ -12,20 +12,21 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::{quota::BucketQuota, target::BucketTargets};
|
||||
|
||||
use super::object_lock::ObjectLockApi;
|
||||
use super::versioning::VersioningApi;
|
||||
use super::{quota::BucketQuota, target::BucketTargets};
|
||||
use crate::bucket::utils::deserialize;
|
||||
use crate::config::com::{read_config, save_config};
|
||||
use crate::disk::BUCKET_META_PREFIX;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::new_object_layer_fn;
|
||||
use crate::store::ECStore;
|
||||
use byteorder::{BigEndian, ByteOrder, LittleEndian};
|
||||
use rmp_serde::Serializer as rmpSerializer;
|
||||
use rustfs_policy::policy::BucketPolicy;
|
||||
use s3s::dto::{
|
||||
BucketLifecycleConfiguration, NotificationConfiguration, ObjectLockConfiguration, ReplicationConfiguration,
|
||||
ServerSideEncryptionConfiguration, Tagging, VersioningConfiguration,
|
||||
BucketLifecycleConfiguration, CORSConfiguration, NotificationConfiguration, ObjectLockConfiguration,
|
||||
ReplicationConfiguration, ServerSideEncryptionConfiguration, Tagging, VersioningConfiguration,
|
||||
};
|
||||
use serde::Serializer;
|
||||
use serde::{Deserialize, Serialize};
|
||||
@@ -34,9 +35,6 @@ use std::sync::Arc;
|
||||
use time::OffsetDateTime;
|
||||
use tracing::error;
|
||||
|
||||
use crate::disk::BUCKET_META_PREFIX;
|
||||
use crate::store::ECStore;
|
||||
|
||||
pub const BUCKET_METADATA_FILE: &str = ".metadata.bin";
|
||||
pub const BUCKET_METADATA_FORMAT: u16 = 1;
|
||||
pub const BUCKET_METADATA_VERSION: u16 = 1;
|
||||
@@ -51,6 +49,7 @@ pub const OBJECT_LOCK_CONFIG: &str = "object-lock.xml";
|
||||
pub const BUCKET_VERSIONING_CONFIG: &str = "versioning.xml";
|
||||
pub const BUCKET_REPLICATION_CONFIG: &str = "replication.xml";
|
||||
pub const BUCKET_TARGETS_FILE: &str = "bucket-targets.json";
|
||||
pub const BUCKET_CORS_CONFIG: &str = "cors.xml";
|
||||
|
||||
#[derive(Debug, Deserialize, Serialize, Clone)]
|
||||
#[serde(rename_all = "PascalCase", default)]
|
||||
@@ -69,6 +68,7 @@ pub struct BucketMetadata {
|
||||
pub replication_config_xml: Vec<u8>,
|
||||
pub bucket_targets_config_json: Vec<u8>,
|
||||
pub bucket_targets_config_meta_json: Vec<u8>,
|
||||
pub cors_config_xml: Vec<u8>,
|
||||
|
||||
pub policy_config_updated_at: OffsetDateTime,
|
||||
pub object_lock_config_updated_at: OffsetDateTime,
|
||||
@@ -81,6 +81,7 @@ pub struct BucketMetadata {
|
||||
pub notification_config_updated_at: OffsetDateTime,
|
||||
pub bucket_targets_config_updated_at: OffsetDateTime,
|
||||
pub bucket_targets_config_meta_updated_at: OffsetDateTime,
|
||||
pub cors_config_updated_at: OffsetDateTime,
|
||||
|
||||
#[serde(skip)]
|
||||
pub new_field_updated_at: OffsetDateTime,
|
||||
@@ -107,6 +108,8 @@ pub struct BucketMetadata {
|
||||
pub bucket_target_config: Option<BucketTargets>,
|
||||
#[serde(skip)]
|
||||
pub bucket_target_config_meta: Option<HashMap<String, String>>,
|
||||
#[serde(skip)]
|
||||
pub cors_config: Option<CORSConfiguration>,
|
||||
}
|
||||
|
||||
impl Default for BucketMetadata {
|
||||
@@ -126,6 +129,7 @@ impl Default for BucketMetadata {
|
||||
replication_config_xml: Default::default(),
|
||||
bucket_targets_config_json: Default::default(),
|
||||
bucket_targets_config_meta_json: Default::default(),
|
||||
cors_config_xml: Default::default(),
|
||||
policy_config_updated_at: OffsetDateTime::UNIX_EPOCH,
|
||||
object_lock_config_updated_at: OffsetDateTime::UNIX_EPOCH,
|
||||
encryption_config_updated_at: OffsetDateTime::UNIX_EPOCH,
|
||||
@@ -137,6 +141,7 @@ impl Default for BucketMetadata {
|
||||
notification_config_updated_at: OffsetDateTime::UNIX_EPOCH,
|
||||
bucket_targets_config_updated_at: OffsetDateTime::UNIX_EPOCH,
|
||||
bucket_targets_config_meta_updated_at: OffsetDateTime::UNIX_EPOCH,
|
||||
cors_config_updated_at: OffsetDateTime::UNIX_EPOCH,
|
||||
new_field_updated_at: OffsetDateTime::UNIX_EPOCH,
|
||||
policy_config: Default::default(),
|
||||
notification_config: Default::default(),
|
||||
@@ -149,6 +154,7 @@ impl Default for BucketMetadata {
|
||||
replication_config: Default::default(),
|
||||
bucket_target_config: Default::default(),
|
||||
bucket_target_config_meta: Default::default(),
|
||||
cors_config: Default::default(),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -297,6 +303,10 @@ impl BucketMetadata {
|
||||
self.bucket_targets_config_json = data.clone();
|
||||
self.bucket_targets_config_updated_at = updated;
|
||||
}
|
||||
BUCKET_CORS_CONFIG => {
|
||||
self.cors_config_xml = data;
|
||||
self.cors_config_updated_at = updated;
|
||||
}
|
||||
_ => return Err(Error::other(format!("config file not found : {config_file}"))),
|
||||
}
|
||||
|
||||
@@ -355,7 +365,7 @@ impl BucketMetadata {
|
||||
self.tagging_config = Some(deserialize::<Tagging>(&self.tagging_config_xml)?);
|
||||
}
|
||||
if !self.quota_config_json.is_empty() {
|
||||
self.quota_config = Some(BucketQuota::unmarshal(&self.quota_config_json)?);
|
||||
self.quota_config = Some(serde_json::from_slice(&self.quota_config_json)?);
|
||||
}
|
||||
if !self.replication_config_xml.is_empty() {
|
||||
self.replication_config = Some(deserialize::<ReplicationConfiguration>(&self.replication_config_xml)?);
|
||||
@@ -367,6 +377,9 @@ impl BucketMetadata {
|
||||
} else {
|
||||
self.bucket_target_config = Some(BucketTargets::default())
|
||||
}
|
||||
if !self.cors_config_xml.is_empty() {
|
||||
self.cors_config = Some(deserialize::<CORSConfiguration>(&self.cors_config_xml)?);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
@@ -487,7 +500,8 @@ mod test {
|
||||
bm.tagging_config_updated_at = OffsetDateTime::now_utc();
|
||||
|
||||
// Add quota configuration
|
||||
let quota_json = r#"{"quota":1073741824,"quotaType":"hard"}"#; // 1GB quota
|
||||
let quota_json =
|
||||
r#"{"quota":1073741824,"quota_type":"Hard","created_at":"2024-01-01T00:00:00Z","updated_at":"2024-01-01T00:00:00Z"}"#; // 1GB quota
|
||||
bm.quota_config_json = quota_json.as_bytes().to_vec();
|
||||
bm.quota_config_updated_at = OffsetDateTime::now_utc();
|
||||
|
||||
|
||||
@@ -12,6 +12,9 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::metadata::{BucketMetadata, load_bucket_metadata};
|
||||
use super::quota::BucketQuota;
|
||||
use super::target::BucketTargets;
|
||||
use crate::StorageAPI as _;
|
||||
use crate::bucket::bucket_target_sys::BucketTargetSys;
|
||||
use crate::bucket::metadata::{BUCKET_LIFECYCLE_CONFIG, load_bucket_metadata_parse};
|
||||
@@ -20,12 +23,13 @@ use crate::error::{Error, Result, is_err_bucket_not_found};
|
||||
use crate::global::{GLOBAL_Endpoints, is_dist_erasure, is_erasure, new_object_layer_fn};
|
||||
use crate::store::ECStore;
|
||||
use futures::future::join_all;
|
||||
use lazy_static::lazy_static;
|
||||
use rustfs_common::heal_channel::HealOpts;
|
||||
use rustfs_policy::policy::BucketPolicy;
|
||||
use s3s::dto::ReplicationConfiguration;
|
||||
use s3s::dto::{
|
||||
BucketLifecycleConfiguration, NotificationConfiguration, ObjectLockConfiguration, ServerSideEncryptionConfiguration, Tagging,
|
||||
VersioningConfiguration,
|
||||
BucketLifecycleConfiguration, CORSConfiguration, NotificationConfiguration, ObjectLockConfiguration,
|
||||
ServerSideEncryptionConfiguration, Tagging, VersioningConfiguration,
|
||||
};
|
||||
use std::collections::HashSet;
|
||||
use std::sync::OnceLock;
|
||||
@@ -36,12 +40,6 @@ use tokio::sync::RwLock;
|
||||
use tokio::time::sleep;
|
||||
use tracing::error;
|
||||
|
||||
use super::metadata::{BucketMetadata, load_bucket_metadata};
|
||||
use super::quota::BucketQuota;
|
||||
use super::target::BucketTargets;
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref GLOBAL_BucketMetadataSys: OnceLock<Arc<RwLock<BucketMetadataSys>>> = OnceLock::new();
|
||||
}
|
||||
@@ -112,6 +110,13 @@ pub async fn get_bucket_targets_config(bucket: &str) -> Result<BucketTargets> {
|
||||
bucket_meta_sys.get_bucket_targets_config(bucket).await
|
||||
}
|
||||
|
||||
pub async fn get_cors_config(bucket: &str) -> Result<(CORSConfiguration, OffsetDateTime)> {
|
||||
let bucket_meta_sys_lock = get_bucket_metadata_sys()?;
|
||||
let bucket_meta_sys = bucket_meta_sys_lock.read().await;
|
||||
|
||||
bucket_meta_sys.get_cors_config(bucket).await
|
||||
}
|
||||
|
||||
pub async fn get_tagging_config(bucket: &str) -> Result<(Tagging, OffsetDateTime)> {
|
||||
let bucket_meta_sys_lock = get_bucket_metadata_sys()?;
|
||||
let bucket_meta_sys = bucket_meta_sys_lock.read().await;
|
||||
@@ -502,6 +507,16 @@ impl BucketMetadataSys {
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_cors_config(&self, bucket: &str) -> Result<(CORSConfiguration, OffsetDateTime)> {
|
||||
let (bm, _) = self.get_config(bucket).await?;
|
||||
|
||||
if let Some(config) = &bm.cors_config {
|
||||
Ok((config.clone(), bm.cors_config_updated_at))
|
||||
} else {
|
||||
Err(Error::ConfigNotFound)
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn created_at(&self, bucket: &str) -> Result<OffsetDateTime> {
|
||||
let bm = match self.get_config(bucket).await {
|
||||
Ok((bm, _)) => bm.created,
|
||||
|
||||
@@ -12,11 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use time::{OffsetDateTime, format_description};
|
||||
|
||||
use s3s::dto::{Date, ObjectLockLegalHold, ObjectLockLegalHoldStatus, ObjectLockRetention, ObjectLockRetentionMode};
|
||||
use s3s::header::{X_AMZ_OBJECT_LOCK_LEGAL_HOLD, X_AMZ_OBJECT_LOCK_MODE, X_AMZ_OBJECT_LOCK_RETAIN_UNTIL_DATE};
|
||||
use std::collections::HashMap;
|
||||
use time::{OffsetDateTime, format_description};
|
||||
|
||||
const _ERR_MALFORMED_BUCKET_OBJECT_CONFIG: &str = "invalid bucket object lock config";
|
||||
const _ERR_INVALID_RETENTION_DATE: &str = "date must be provided in ISO 8601 format";
|
||||
|
||||
@@ -12,16 +12,13 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::bucket::metadata_sys::get_object_lock_config;
|
||||
use crate::bucket::object_lock::objectlock;
|
||||
use crate::store_api::ObjectInfo;
|
||||
use s3s::dto::{DefaultRetention, ObjectLockLegalHoldStatus, ObjectLockRetentionMode};
|
||||
use std::sync::Arc;
|
||||
use time::OffsetDateTime;
|
||||
|
||||
use s3s::dto::{DefaultRetention, ObjectLockLegalHoldStatus, ObjectLockRetentionMode};
|
||||
|
||||
use crate::bucket::metadata_sys::get_object_lock_config;
|
||||
use crate::store_api::ObjectInfo;
|
||||
|
||||
use super::objectlock;
|
||||
|
||||
pub struct BucketObjectLockSys {}
|
||||
|
||||
impl BucketObjectLockSys {
|
||||
|
||||
195
crates/ecstore/src/bucket/quota/checker.rs
Normal file
195
crates/ecstore/src/bucket/quota/checker.rs
Normal file
@@ -0,0 +1,195 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::{BucketQuota, QuotaCheckResult, QuotaError, QuotaOperation};
|
||||
use crate::bucket::metadata_sys::{BucketMetadataSys, update};
|
||||
use crate::data_usage::get_bucket_usage_memory;
|
||||
use rustfs_common::metrics::Metric;
|
||||
use rustfs_config::QUOTA_CONFIG_FILE;
|
||||
use std::sync::Arc;
|
||||
use std::time::Instant;
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{debug, warn};
|
||||
|
||||
pub struct QuotaChecker {
|
||||
metadata_sys: Arc<RwLock<BucketMetadataSys>>,
|
||||
}
|
||||
|
||||
impl QuotaChecker {
|
||||
pub fn new(metadata_sys: Arc<RwLock<BucketMetadataSys>>) -> Self {
|
||||
Self { metadata_sys }
|
||||
}
|
||||
|
||||
pub async fn check_quota(
|
||||
&self,
|
||||
bucket: &str,
|
||||
operation: QuotaOperation,
|
||||
operation_size: u64,
|
||||
) -> Result<QuotaCheckResult, QuotaError> {
|
||||
let start_time = Instant::now();
|
||||
let quota_config = self.get_quota_config(bucket).await?;
|
||||
|
||||
// If no quota limit is set, allow operation
|
||||
let quota_limit = match quota_config.quota {
|
||||
None => {
|
||||
let current_usage = self.get_real_time_usage(bucket).await?;
|
||||
return Ok(QuotaCheckResult {
|
||||
allowed: true,
|
||||
current_usage,
|
||||
quota_limit: None,
|
||||
operation_size,
|
||||
remaining: None,
|
||||
});
|
||||
}
|
||||
Some(q) => q,
|
||||
};
|
||||
|
||||
let current_usage = self.get_real_time_usage(bucket).await?;
|
||||
|
||||
let expected_usage = match operation {
|
||||
QuotaOperation::PutObject | QuotaOperation::CopyObject => current_usage + operation_size,
|
||||
QuotaOperation::DeleteObject => current_usage.saturating_sub(operation_size),
|
||||
};
|
||||
|
||||
let allowed = match operation {
|
||||
QuotaOperation::PutObject | QuotaOperation::CopyObject => {
|
||||
quota_config.check_operation_allowed(current_usage, operation_size)
|
||||
}
|
||||
QuotaOperation::DeleteObject => true,
|
||||
};
|
||||
|
||||
let remaining = if quota_limit >= expected_usage {
|
||||
Some(quota_limit - expected_usage)
|
||||
} else {
|
||||
Some(0)
|
||||
};
|
||||
|
||||
if !allowed {
|
||||
warn!(
|
||||
"Quota exceeded for bucket: {}, current: {}, limit: {}, attempted: {}",
|
||||
bucket, current_usage, quota_limit, operation_size
|
||||
);
|
||||
}
|
||||
|
||||
let result = QuotaCheckResult {
|
||||
allowed,
|
||||
current_usage,
|
||||
quota_limit: Some(quota_limit),
|
||||
operation_size,
|
||||
remaining,
|
||||
};
|
||||
|
||||
let duration = start_time.elapsed();
|
||||
rustfs_common::metrics::Metrics::inc_time(Metric::QuotaCheck, duration).await;
|
||||
if !allowed {
|
||||
rustfs_common::metrics::Metrics::inc_time(Metric::QuotaViolation, duration).await;
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub async fn get_quota_config(&self, bucket: &str) -> Result<BucketQuota, QuotaError> {
|
||||
let meta = self
|
||||
.metadata_sys
|
||||
.read()
|
||||
.await
|
||||
.get(bucket)
|
||||
.await
|
||||
.map_err(QuotaError::StorageError)?;
|
||||
|
||||
if meta.quota_config_json.is_empty() {
|
||||
debug!("No quota config found for bucket: {}, using default", bucket);
|
||||
return Ok(BucketQuota::new(None));
|
||||
}
|
||||
|
||||
let quota: BucketQuota = serde_json::from_slice(&meta.quota_config_json).map_err(|e| QuotaError::InvalidConfig {
|
||||
reason: format!("Failed to parse quota config: {}", e),
|
||||
})?;
|
||||
|
||||
Ok(quota)
|
||||
}
|
||||
|
||||
pub async fn set_quota_config(&mut self, bucket: &str, quota: BucketQuota) -> Result<(), QuotaError> {
|
||||
let json_data = serde_json::to_vec("a).map_err(|e| QuotaError::InvalidConfig {
|
||||
reason: format!("Failed to serialize quota config: {}", e),
|
||||
})?;
|
||||
let start_time = Instant::now();
|
||||
|
||||
update(bucket, QUOTA_CONFIG_FILE, json_data)
|
||||
.await
|
||||
.map_err(QuotaError::StorageError)?;
|
||||
|
||||
rustfs_common::metrics::Metrics::inc_time(Metric::QuotaSync, start_time.elapsed()).await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn get_quota_stats(&self, bucket: &str) -> Result<(BucketQuota, Option<u64>), QuotaError> {
|
||||
// If bucket doesn't exist, return ConfigNotFound error
|
||||
if !self.bucket_exists(bucket).await {
|
||||
return Err(QuotaError::ConfigNotFound {
|
||||
bucket: bucket.to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
let quota = self.get_quota_config(bucket).await?;
|
||||
let current_usage = self.get_real_time_usage(bucket).await.unwrap_or(0);
|
||||
|
||||
Ok((quota, Some(current_usage)))
|
||||
}
|
||||
|
||||
pub async fn bucket_exists(&self, bucket: &str) -> bool {
|
||||
self.metadata_sys.read().await.get(bucket).await.is_ok()
|
||||
}
|
||||
|
||||
pub async fn get_real_time_usage(&self, bucket: &str) -> Result<u64, QuotaError> {
|
||||
Ok(get_bucket_usage_memory(bucket).await.unwrap_or(0))
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_quota_check_no_limit() {
|
||||
let result = QuotaCheckResult {
|
||||
allowed: true,
|
||||
current_usage: 0,
|
||||
quota_limit: None,
|
||||
operation_size: 1024,
|
||||
remaining: None,
|
||||
};
|
||||
|
||||
assert!(result.allowed);
|
||||
assert_eq!(result.quota_limit, None);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_quota_check_within_limit() {
|
||||
let quota = BucketQuota::new(Some(2048)); // 2KB
|
||||
|
||||
// Current usage 512, trying to add 1024
|
||||
let allowed = quota.check_operation_allowed(512, 1024);
|
||||
assert!(allowed);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_quota_check_exceeds_limit() {
|
||||
let quota = BucketQuota::new(Some(1024)); // 1KB
|
||||
|
||||
// Current usage 512, trying to add 1024
|
||||
let allowed = quota.check_operation_allowed(512, 1024);
|
||||
assert!(!allowed);
|
||||
}
|
||||
}
|
||||
@@ -12,36 +12,37 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod checker;
|
||||
|
||||
use crate::error::Result;
|
||||
use rmp_serde::Serializer as rmpSerializer;
|
||||
use rustfs_config::{
|
||||
QUOTA_API_PATH, QUOTA_EXCEEDED_ERROR_CODE, QUOTA_INTERNAL_ERROR_CODE, QUOTA_INVALID_CONFIG_ERROR_CODE,
|
||||
QUOTA_NOT_FOUND_ERROR_CODE,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use thiserror::Error;
|
||||
use time::OffsetDateTime;
|
||||
|
||||
// Define the QuotaType enum
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)]
|
||||
pub enum QuotaType {
|
||||
/// Hard quota: reject immediately when exceeded
|
||||
#[default]
|
||||
Hard,
|
||||
}
|
||||
|
||||
// Define the BucketQuota structure
|
||||
#[derive(Debug, Deserialize, Serialize, Default, Clone)]
|
||||
#[derive(Debug, Deserialize, Serialize, Default, Clone, PartialEq)]
|
||||
pub struct BucketQuota {
|
||||
quota: Option<u64>, // Use Option to represent optional fields
|
||||
|
||||
size: u64,
|
||||
|
||||
rate: u64,
|
||||
|
||||
requests: u64,
|
||||
|
||||
quota_type: Option<QuotaType>,
|
||||
pub quota: Option<u64>,
|
||||
pub quota_type: QuotaType,
|
||||
/// Timestamp when this quota configuration was set (for audit purposes)
|
||||
pub created_at: Option<OffsetDateTime>,
|
||||
}
|
||||
|
||||
impl BucketQuota {
|
||||
pub fn marshal_msg(&self) -> Result<Vec<u8>> {
|
||||
let mut buf = Vec::new();
|
||||
|
||||
self.serialize(&mut rmpSerializer::new(&mut buf).with_struct_map())?;
|
||||
|
||||
Ok(buf)
|
||||
}
|
||||
|
||||
@@ -49,4 +50,107 @@ impl BucketQuota {
|
||||
let t: BucketQuota = rmp_serde::from_slice(buf)?;
|
||||
Ok(t)
|
||||
}
|
||||
|
||||
pub fn new(quota: Option<u64>) -> Self {
|
||||
let now = OffsetDateTime::now_utc();
|
||||
Self {
|
||||
quota,
|
||||
quota_type: QuotaType::Hard,
|
||||
created_at: Some(now),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_quota_limit(&self) -> Option<u64> {
|
||||
self.quota
|
||||
}
|
||||
|
||||
pub fn check_operation_allowed(&self, current_usage: u64, operation_size: u64) -> bool {
|
||||
if let Some(quota_limit) = self.quota {
|
||||
current_usage.saturating_add(operation_size) <= quota_limit
|
||||
} else {
|
||||
true // No quota limit
|
||||
}
|
||||
}
|
||||
|
||||
pub fn get_remaining_quota(&self, current_usage: u64) -> Option<u64> {
|
||||
self.quota.map(|limit| limit.saturating_sub(current_usage))
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct QuotaCheckResult {
|
||||
pub allowed: bool,
|
||||
pub current_usage: u64,
|
||||
/// quota_limit: None means unlimited
|
||||
pub quota_limit: Option<u64>,
|
||||
pub operation_size: u64,
|
||||
pub remaining: Option<u64>,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
pub enum QuotaOperation {
|
||||
PutObject,
|
||||
CopyObject,
|
||||
DeleteObject,
|
||||
}
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum QuotaError {
|
||||
#[error("Bucket quota exceeded: current={current}, limit={limit}, operation={operation}")]
|
||||
QuotaExceeded { current: u64, limit: u64, operation: u64 },
|
||||
#[error("Quota configuration not found for bucket: {bucket}")]
|
||||
ConfigNotFound { bucket: String },
|
||||
#[error("Invalid quota configuration: {reason}")]
|
||||
InvalidConfig { reason: String },
|
||||
#[error("Storage error: {0}")]
|
||||
StorageError(#[from] crate::error::StorageError),
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize)]
|
||||
pub struct QuotaErrorResponse {
|
||||
#[serde(rename = "Code")]
|
||||
pub code: String,
|
||||
#[serde(rename = "Message")]
|
||||
pub message: String,
|
||||
#[serde(rename = "Resource")]
|
||||
pub resource: String,
|
||||
#[serde(rename = "RequestId")]
|
||||
pub request_id: String,
|
||||
#[serde(rename = "HostId")]
|
||||
pub host_id: String,
|
||||
}
|
||||
|
||||
impl QuotaErrorResponse {
|
||||
pub fn new(quota_error: &QuotaError, request_id: &str, host_id: &str) -> Self {
|
||||
match quota_error {
|
||||
QuotaError::QuotaExceeded { .. } => Self {
|
||||
code: QUOTA_EXCEEDED_ERROR_CODE.to_string(),
|
||||
message: quota_error.to_string(),
|
||||
resource: QUOTA_API_PATH.to_string(),
|
||||
request_id: request_id.to_string(),
|
||||
host_id: host_id.to_string(),
|
||||
},
|
||||
QuotaError::ConfigNotFound { .. } => Self {
|
||||
code: QUOTA_NOT_FOUND_ERROR_CODE.to_string(),
|
||||
message: quota_error.to_string(),
|
||||
resource: QUOTA_API_PATH.to_string(),
|
||||
request_id: request_id.to_string(),
|
||||
host_id: host_id.to_string(),
|
||||
},
|
||||
QuotaError::InvalidConfig { .. } => Self {
|
||||
code: QUOTA_INVALID_CONFIG_ERROR_CODE.to_string(),
|
||||
message: quota_error.to_string(),
|
||||
resource: QUOTA_API_PATH.to_string(),
|
||||
request_id: request_id.to_string(),
|
||||
host_id: host_id.to_string(),
|
||||
},
|
||||
QuotaError::StorageError(_) => Self {
|
||||
code: QUOTA_INTERNAL_ERROR_CODE.to_string(),
|
||||
message: quota_error.to_string(),
|
||||
resource: QUOTA_API_PATH.to_string(),
|
||||
request_id: request_id.to_string(),
|
||||
host_id: host_id.to_string(),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::ReplicationRuleExt as _;
|
||||
use crate::bucket::replication::ReplicationRuleExt as _;
|
||||
use crate::bucket::tagging::decode_tags_to_map;
|
||||
use rustfs_filemeta::ReplicationType;
|
||||
use s3s::dto::DeleteMarkerReplicationStatus;
|
||||
|
||||
@@ -1,22 +1,30 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::StorageAPI;
|
||||
use crate::bucket::replication::ResyncOpts;
|
||||
use crate::bucket::replication::ResyncStatusType;
|
||||
use crate::bucket::replication::replicate_delete;
|
||||
use crate::bucket::replication::replicate_object;
|
||||
use crate::disk::BUCKET_META_PREFIX;
|
||||
use std::any::Any;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicI32;
|
||||
use std::sync::atomic::Ordering;
|
||||
|
||||
use crate::bucket::replication::replication_resyncer::{
|
||||
BucketReplicationResyncStatus, DeletedObjectReplicationInfo, ReplicationResyncer,
|
||||
};
|
||||
use crate::bucket::replication::replication_state::ReplicationStats;
|
||||
use crate::config::com::read_config;
|
||||
use crate::disk::BUCKET_META_PREFIX;
|
||||
use crate::error::Error as EcstoreError;
|
||||
use crate::store_api::ObjectInfo;
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
use rustfs_filemeta::MrfReplicateEntry;
|
||||
use rustfs_filemeta::ReplicateDecision;
|
||||
@@ -29,6 +37,10 @@ use rustfs_filemeta::ResyncDecision;
|
||||
use rustfs_filemeta::replication_statuses_map;
|
||||
use rustfs_filemeta::version_purge_statuses_map;
|
||||
use rustfs_utils::http::RESERVED_METADATA_PREFIX_LOWER;
|
||||
use std::any::Any;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::AtomicI32;
|
||||
use std::sync::atomic::Ordering;
|
||||
use time::OffsetDateTime;
|
||||
use time::format_description::well_known::Rfc3339;
|
||||
use tokio::sync::Mutex;
|
||||
|
||||
@@ -1,3 +1,17 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::bucket::bucket_target_sys::{
|
||||
AdvancedPutOptions, BucketTargetSys, PutObjectOptions, PutObjectPartOptions, RemoveObjectOptions, TargetClient,
|
||||
};
|
||||
@@ -16,7 +30,6 @@ use crate::event_notification::{EventArgs, send_event};
|
||||
use crate::global::GLOBAL_LocalNodeName;
|
||||
use crate::store_api::{DeletedObject, ObjectInfo, ObjectOptions, ObjectToDelete, WalkOptions};
|
||||
use crate::{StorageAPI, new_object_layer_fn};
|
||||
|
||||
use aws_sdk_s3::error::SdkError;
|
||||
use aws_sdk_s3::operation::head_object::HeadObjectOutput;
|
||||
use aws_sdk_s3::primitives::ByteStream;
|
||||
@@ -24,7 +37,6 @@ use aws_sdk_s3::types::{CompletedPart, ObjectLockLegalHoldStatus};
|
||||
use byteorder::ByteOrder;
|
||||
use futures::future::join_all;
|
||||
use http::HeaderMap;
|
||||
|
||||
use regex::Regex;
|
||||
use rustfs_filemeta::{
|
||||
MrfReplicateEntry, REPLICATE_EXISTING, REPLICATE_EXISTING_DELETE, REPLICATION_RESET, ReplicateDecision, ReplicateObjectInfo,
|
||||
|
||||
@@ -1,3 +1,17 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::error::Error;
|
||||
use rustfs_filemeta::{ReplicatedTargetInfo, ReplicationStatusType, ReplicationType};
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@@ -12,11 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::bucket::replication::ObjectOpts;
|
||||
use s3s::dto::ReplicaModificationsStatus;
|
||||
use s3s::dto::ReplicationRule;
|
||||
|
||||
use super::ObjectOpts;
|
||||
|
||||
pub trait ReplicationRuleExt {
|
||||
fn prefix(&self) -> &str;
|
||||
fn metadata_replicate(&self, obj: &ObjectOpts) -> bool;
|
||||
|
||||
@@ -12,9 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use s3s::dto::Tag;
|
||||
use std::collections::HashMap;
|
||||
use url::form_urlencoded;
|
||||
|
||||
pub fn decode_tags(tags: &str) -> Vec<Tag> {
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::BucketTargetType;
|
||||
use crate::bucket::target::BucketTargetType;
|
||||
use std::fmt::Display;
|
||||
use std::str::FromStr;
|
||||
|
||||
|
||||
@@ -14,16 +14,15 @@
|
||||
|
||||
use crate::disk::RUSTFS_META_BUCKET;
|
||||
use crate::error::{Error, Result, StorageError};
|
||||
use rustfs_utils::path::SLASH_SEPARATOR;
|
||||
use regex::Regex;
|
||||
use rustfs_utils::path::SLASH_SEPARATOR_STR;
|
||||
use s3s::xml;
|
||||
use tracing::instrument;
|
||||
|
||||
pub fn is_meta_bucketname(name: &str) -> bool {
|
||||
name.starts_with(RUSTFS_META_BUCKET)
|
||||
}
|
||||
|
||||
use regex::Regex;
|
||||
use tracing::instrument;
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
static ref VALID_BUCKET_NAME: Regex = Regex::new(r"^[A-Za-z0-9][A-Za-z0-9\.\-\_\:]{1,61}[A-Za-z0-9]$").unwrap();
|
||||
static ref VALID_BUCKET_NAME_STRICT: Regex = Regex::new(r"^[a-z0-9][a-z0-9\.\-]{1,61}[a-z0-9]$").unwrap();
|
||||
@@ -194,7 +193,7 @@ pub fn is_valid_object_name(object: &str) -> bool {
|
||||
return false;
|
||||
}
|
||||
|
||||
if object.ends_with(SLASH_SEPARATOR) {
|
||||
if object.ends_with(SLASH_SEPARATOR_STR) {
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -206,7 +205,7 @@ pub fn check_object_name_for_length_and_slash(bucket: &str, object: &str) -> Res
|
||||
return Err(StorageError::ObjectNameTooLong(bucket.to_owned(), object.to_owned()));
|
||||
}
|
||||
|
||||
if object.starts_with(SLASH_SEPARATOR) {
|
||||
if object.starts_with(SLASH_SEPARATOR_STR) {
|
||||
return Err(StorageError::ObjectNamePrefixAsSlash(bucket.to_owned(), object.to_owned()));
|
||||
}
|
||||
|
||||
|
||||
@@ -12,9 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use s3s::dto::{BucketVersioningStatus, VersioningConfiguration};
|
||||
|
||||
use rustfs_utils::string::match_simple;
|
||||
use s3s::dto::{BucketVersioningStatus, VersioningConfiguration};
|
||||
|
||||
pub trait VersioningApi {
|
||||
fn enabled(&self) -> bool;
|
||||
|
||||
@@ -12,9 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
use std::sync::Arc;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
pub mod metacache_set;
|
||||
|
||||
@@ -12,8 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use rustfs_utils::string::has_pattern;
|
||||
use rustfs_utils::string::has_string_suffix_in_slice;
|
||||
use rustfs_utils::string::{has_pattern, has_string_suffix_in_slice};
|
||||
use std::env;
|
||||
use tracing::error;
|
||||
|
||||
|
||||
@@ -18,7 +18,7 @@ use crate::error::{Error, Result};
|
||||
use crate::store_api::{ObjectInfo, ObjectOptions, PutObjReader, StorageAPI};
|
||||
use http::HeaderMap;
|
||||
use rustfs_config::DEFAULT_DELIMITER;
|
||||
use rustfs_utils::path::SLASH_SEPARATOR;
|
||||
use rustfs_utils::path::SLASH_SEPARATOR_STR;
|
||||
use std::collections::HashSet;
|
||||
use std::sync::Arc;
|
||||
use std::sync::LazyLock;
|
||||
@@ -29,7 +29,7 @@ const CONFIG_FILE: &str = "config.json";
|
||||
|
||||
pub const STORAGE_CLASS_SUB_SYS: &str = "storage_class";
|
||||
|
||||
static CONFIG_BUCKET: LazyLock<String> = LazyLock::new(|| format!("{RUSTFS_META_BUCKET}{SLASH_SEPARATOR}{CONFIG_PREFIX}"));
|
||||
static CONFIG_BUCKET: LazyLock<String> = LazyLock::new(|| format!("{RUSTFS_META_BUCKET}{SLASH_SEPARATOR_STR}{CONFIG_PREFIX}"));
|
||||
|
||||
static SUB_SYSTEMS_DYNAMIC: LazyLock<HashSet<String>> = LazyLock::new(|| {
|
||||
let mut h = HashSet::new();
|
||||
@@ -129,7 +129,7 @@ async fn new_and_save_server_config<S: StorageAPI>(api: Arc<S>) -> Result<Config
|
||||
}
|
||||
|
||||
fn get_config_file() -> String {
|
||||
format!("{CONFIG_PREFIX}{SLASH_SEPARATOR}{CONFIG_FILE}")
|
||||
format!("{CONFIG_PREFIX}{SLASH_SEPARATOR_STR}{CONFIG_FILE}")
|
||||
}
|
||||
|
||||
/// Handle the situation where the configuration file does not exist, create and save a new configuration
|
||||
|
||||
@@ -12,52 +12,66 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::{
|
||||
collections::{HashMap, hash_map::Entry},
|
||||
sync::Arc,
|
||||
time::SystemTime,
|
||||
};
|
||||
|
||||
pub mod local_snapshot;
|
||||
|
||||
use crate::{
|
||||
bucket::metadata_sys::get_replication_config, config::com::read_config, disk::DiskAPI, error::Error, store::ECStore,
|
||||
store_api::StorageAPI,
|
||||
};
|
||||
pub use local_snapshot::{
|
||||
DATA_USAGE_DIR, DATA_USAGE_STATE_DIR, LOCAL_USAGE_SNAPSHOT_VERSION, LocalUsageSnapshot, LocalUsageSnapshotMeta,
|
||||
data_usage_dir, data_usage_state_dir, ensure_data_usage_layout, read_snapshot as read_local_snapshot, snapshot_file_name,
|
||||
snapshot_object_path, snapshot_path, write_snapshot as write_local_snapshot,
|
||||
};
|
||||
|
||||
use crate::{
|
||||
bucket::metadata_sys::get_replication_config, config::com::read_config, disk::DiskAPI, store::ECStore, store_api::StorageAPI,
|
||||
};
|
||||
use rustfs_common::data_usage::{
|
||||
BucketTargetUsageInfo, BucketUsageInfo, DataUsageCache, DataUsageEntry, DataUsageInfo, DiskUsageStatus, SizeSummary,
|
||||
};
|
||||
use rustfs_utils::path::SLASH_SEPARATOR;
|
||||
use rustfs_utils::path::SLASH_SEPARATOR_STR;
|
||||
use std::{
|
||||
collections::{HashMap, hash_map::Entry},
|
||||
sync::{Arc, OnceLock},
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
use tokio::fs;
|
||||
use tracing::{error, info, warn};
|
||||
|
||||
use crate::error::Error;
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
// Data usage storage constants
|
||||
pub const DATA_USAGE_ROOT: &str = SLASH_SEPARATOR;
|
||||
pub const DATA_USAGE_ROOT: &str = SLASH_SEPARATOR_STR;
|
||||
const DATA_USAGE_OBJ_NAME: &str = ".usage.json";
|
||||
const DATA_USAGE_BLOOM_NAME: &str = ".bloomcycle.bin";
|
||||
pub const DATA_USAGE_CACHE_NAME: &str = ".usage-cache.bin";
|
||||
const DATA_USAGE_CACHE_TTL_SECS: u64 = 30;
|
||||
|
||||
type UsageMemoryCache = Arc<RwLock<HashMap<String, (u64, SystemTime)>>>;
|
||||
type CacheUpdating = Arc<RwLock<bool>>;
|
||||
|
||||
static USAGE_MEMORY_CACHE: OnceLock<UsageMemoryCache> = OnceLock::new();
|
||||
static USAGE_CACHE_UPDATING: OnceLock<CacheUpdating> = OnceLock::new();
|
||||
|
||||
fn memory_cache() -> &'static UsageMemoryCache {
|
||||
USAGE_MEMORY_CACHE.get_or_init(|| Arc::new(RwLock::new(HashMap::new())))
|
||||
}
|
||||
|
||||
fn cache_updating() -> &'static CacheUpdating {
|
||||
USAGE_CACHE_UPDATING.get_or_init(|| Arc::new(RwLock::new(false)))
|
||||
}
|
||||
|
||||
// Data usage storage paths
|
||||
lazy_static::lazy_static! {
|
||||
pub static ref DATA_USAGE_BUCKET: String = format!("{}{}{}",
|
||||
crate::disk::RUSTFS_META_BUCKET,
|
||||
SLASH_SEPARATOR,
|
||||
SLASH_SEPARATOR_STR,
|
||||
crate::disk::BUCKET_META_PREFIX
|
||||
);
|
||||
pub static ref DATA_USAGE_OBJ_NAME_PATH: String = format!("{}{}{}",
|
||||
crate::disk::BUCKET_META_PREFIX,
|
||||
SLASH_SEPARATOR,
|
||||
SLASH_SEPARATOR_STR,
|
||||
DATA_USAGE_OBJ_NAME
|
||||
);
|
||||
pub static ref DATA_USAGE_BLOOM_NAME_PATH: String = format!("{}{}{}",
|
||||
crate::disk::BUCKET_META_PREFIX,
|
||||
SLASH_SEPARATOR,
|
||||
SLASH_SEPARATOR_STR,
|
||||
DATA_USAGE_BLOOM_NAME
|
||||
);
|
||||
}
|
||||
@@ -94,8 +108,8 @@ pub async fn load_data_usage_from_backend(store: Arc<ECStore>) -> Result<DataUsa
|
||||
Ok(data) => data,
|
||||
Err(e) => {
|
||||
error!("Failed to read data usage info from backend: {}", e);
|
||||
if e == crate::error::Error::ConfigNotFound {
|
||||
warn!("Data usage config not found, building basic statistics");
|
||||
if e == Error::ConfigNotFound {
|
||||
info!("Data usage config not found, building basic statistics");
|
||||
return build_basic_data_usage_info(store).await;
|
||||
}
|
||||
return Err(Error::other(e));
|
||||
@@ -128,7 +142,7 @@ pub async fn load_data_usage_from_backend(store: Arc<ECStore>) -> Result<DataUsa
|
||||
.map(|(bucket, &size)| {
|
||||
(
|
||||
bucket.clone(),
|
||||
rustfs_common::data_usage::BucketUsageInfo {
|
||||
BucketUsageInfo {
|
||||
size,
|
||||
..Default::default()
|
||||
},
|
||||
@@ -245,7 +259,7 @@ pub async fn aggregate_local_snapshots(store: Arc<ECStore>) -> Result<(Vec<DiskU
|
||||
|
||||
// If a snapshot is corrupted or unreadable, skip it but keep processing others
|
||||
if let Err(err) = &snapshot_result {
|
||||
warn!(
|
||||
info!(
|
||||
"Failed to read data usage snapshot for disk {} (pool {}, set {}, disk {}): {}",
|
||||
disk_id, pool_idx, set_disks.set_index, disk_index, err
|
||||
);
|
||||
@@ -254,7 +268,7 @@ pub async fn aggregate_local_snapshots(store: Arc<ECStore>) -> Result<(Vec<DiskU
|
||||
if let Err(remove_err) = fs::remove_file(&snapshot_file).await
|
||||
&& remove_err.kind() != std::io::ErrorKind::NotFound
|
||||
{
|
||||
warn!("Failed to remove corrupted snapshot {:?}: {}", snapshot_file, remove_err);
|
||||
info!("Failed to remove corrupted snapshot {:?}: {}", snapshot_file, remove_err);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -341,7 +355,7 @@ pub async fn compute_bucket_usage(store: Arc<ECStore>, bucket_name: &str) -> Res
|
||||
|
||||
continuation = result.next_continuation_token.clone();
|
||||
if continuation.is_none() {
|
||||
warn!(
|
||||
info!(
|
||||
"Bucket {} listing marked truncated but no continuation token returned; stopping early",
|
||||
bucket_name
|
||||
);
|
||||
@@ -364,8 +378,120 @@ pub async fn compute_bucket_usage(store: Arc<ECStore>, bucket_name: &str) -> Res
|
||||
Ok(usage)
|
||||
}
|
||||
|
||||
/// Fast in-memory increment for immediate quota consistency
|
||||
pub async fn increment_bucket_usage_memory(bucket: &str, size_increment: u64) {
|
||||
let mut cache = memory_cache().write().await;
|
||||
let current = cache.entry(bucket.to_string()).or_insert_with(|| (0, SystemTime::now()));
|
||||
current.0 += size_increment;
|
||||
current.1 = SystemTime::now();
|
||||
}
|
||||
|
||||
/// Fast in-memory decrement for immediate quota consistency
|
||||
pub async fn decrement_bucket_usage_memory(bucket: &str, size_decrement: u64) {
|
||||
let mut cache = memory_cache().write().await;
|
||||
if let Some(current) = cache.get_mut(bucket) {
|
||||
current.0 = current.0.saturating_sub(size_decrement);
|
||||
current.1 = SystemTime::now();
|
||||
}
|
||||
}
|
||||
|
||||
/// Get bucket usage from in-memory cache
|
||||
pub async fn get_bucket_usage_memory(bucket: &str) -> Option<u64> {
|
||||
update_usage_cache_if_needed().await;
|
||||
|
||||
let cache = memory_cache().read().await;
|
||||
cache.get(bucket).map(|(usage, _)| *usage)
|
||||
}
|
||||
|
||||
async fn update_usage_cache_if_needed() {
|
||||
let ttl = Duration::from_secs(DATA_USAGE_CACHE_TTL_SECS);
|
||||
let double_ttl = ttl * 2;
|
||||
let now = SystemTime::now();
|
||||
|
||||
let cache = memory_cache().read().await;
|
||||
let earliest_timestamp = cache.values().map(|(_, ts)| *ts).min();
|
||||
drop(cache);
|
||||
|
||||
let age = match earliest_timestamp {
|
||||
Some(ts) => now.duration_since(ts).unwrap_or_default(),
|
||||
None => double_ttl,
|
||||
};
|
||||
|
||||
if age < ttl {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut updating = cache_updating().write().await;
|
||||
if age < double_ttl {
|
||||
if *updating {
|
||||
return;
|
||||
}
|
||||
*updating = true;
|
||||
drop(updating);
|
||||
|
||||
let cache_clone = (*memory_cache()).clone();
|
||||
let updating_clone = (*cache_updating()).clone();
|
||||
tokio::spawn(async move {
|
||||
if let Some(store) = crate::global::GLOBAL_OBJECT_API.get()
|
||||
&& let Ok(data_usage_info) = load_data_usage_from_backend(store.clone()).await
|
||||
{
|
||||
let mut cache = cache_clone.write().await;
|
||||
for (bucket_name, bucket_usage) in data_usage_info.buckets_usage.iter() {
|
||||
cache.insert(bucket_name.clone(), (bucket_usage.size, SystemTime::now()));
|
||||
}
|
||||
}
|
||||
let mut updating = updating_clone.write().await;
|
||||
*updating = false;
|
||||
});
|
||||
return;
|
||||
}
|
||||
|
||||
for retry in 0..10 {
|
||||
if !*updating {
|
||||
break;
|
||||
}
|
||||
drop(updating);
|
||||
let delay = Duration::from_millis(1 << retry);
|
||||
tokio::time::sleep(delay).await;
|
||||
updating = cache_updating().write().await;
|
||||
}
|
||||
|
||||
*updating = true;
|
||||
drop(updating);
|
||||
|
||||
if let Some(store) = crate::global::GLOBAL_OBJECT_API.get()
|
||||
&& let Ok(data_usage_info) = load_data_usage_from_backend(store.clone()).await
|
||||
{
|
||||
let mut cache = memory_cache().write().await;
|
||||
for (bucket_name, bucket_usage) in data_usage_info.buckets_usage.iter() {
|
||||
cache.insert(bucket_name.clone(), (bucket_usage.size, SystemTime::now()));
|
||||
}
|
||||
}
|
||||
|
||||
let mut updating = cache_updating().write().await;
|
||||
*updating = false;
|
||||
}
|
||||
|
||||
/// Sync memory cache with backend data (called by scanner)
|
||||
pub async fn sync_memory_cache_with_backend() -> Result<(), Error> {
|
||||
if let Some(store) = crate::global::GLOBAL_OBJECT_API.get() {
|
||||
match load_data_usage_from_backend(store.clone()).await {
|
||||
Ok(data_usage_info) => {
|
||||
let mut cache = memory_cache().write().await;
|
||||
for (bucket, bucket_usage) in data_usage_info.buckets_usage.iter() {
|
||||
cache.insert(bucket.clone(), (bucket_usage.size, SystemTime::now()));
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
debug!("Failed to sync memory cache with backend: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Build basic data usage info with real object counts
|
||||
async fn build_basic_data_usage_info(store: Arc<ECStore>) -> Result<DataUsageInfo, Error> {
|
||||
pub async fn build_basic_data_usage_info(store: Arc<ECStore>) -> Result<DataUsageInfo, Error> {
|
||||
let mut data_usage_info = DataUsageInfo::default();
|
||||
|
||||
// Get bucket list
|
||||
@@ -437,7 +563,7 @@ pub fn cache_to_data_usage_info(cache: &DataUsageCache, path: &str, buckets: &[c
|
||||
None => continue,
|
||||
};
|
||||
let flat = cache.flatten(&e);
|
||||
let mut bui = rustfs_common::data_usage::BucketUsageInfo {
|
||||
let mut bui = BucketUsageInfo {
|
||||
size: flat.size as u64,
|
||||
versions_count: flat.versions as u64,
|
||||
objects_count: flat.objects as u64,
|
||||
@@ -515,7 +641,7 @@ pub async fn load_data_usage_cache(store: &crate::set_disk::SetDisks, name: &str
|
||||
break;
|
||||
}
|
||||
Err(err) => match err {
|
||||
crate::error::Error::FileNotFound | crate::error::Error::VolumeNotFound => {
|
||||
Error::FileNotFound | Error::VolumeNotFound => {
|
||||
match store
|
||||
.get_object_reader(
|
||||
RUSTFS_META_BUCKET,
|
||||
@@ -536,7 +662,7 @@ pub async fn load_data_usage_cache(store: &crate::set_disk::SetDisks, name: &str
|
||||
break;
|
||||
}
|
||||
Err(_) => match err {
|
||||
crate::error::Error::FileNotFound | crate::error::Error::VolumeNotFound => {
|
||||
Error::FileNotFound | Error::VolumeNotFound => {
|
||||
break;
|
||||
}
|
||||
_ => {}
|
||||
@@ -565,9 +691,9 @@ pub async fn save_data_usage_cache(cache: &DataUsageCache, name: &str) -> crate:
|
||||
use std::path::Path;
|
||||
|
||||
let Some(store) = new_object_layer_fn() else {
|
||||
return Err(crate::error::Error::other("errServerNotInitialized"));
|
||||
return Err(Error::other("errServerNotInitialized"));
|
||||
};
|
||||
let buf = cache.marshal_msg().map_err(crate::error::Error::other)?;
|
||||
let buf = cache.marshal_msg().map_err(Error::other)?;
|
||||
let buf_clone = buf.clone();
|
||||
|
||||
let store_clone = store.clone();
|
||||
|
||||
@@ -1,13 +1,25 @@
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time::SystemTime;
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tokio::fs;
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::data_usage::BucketUsageInfo;
|
||||
use crate::disk::RUSTFS_META_BUCKET;
|
||||
use crate::error::{Error, Result};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::time::SystemTime;
|
||||
use tokio::fs;
|
||||
|
||||
/// Directory used to store per-disk usage snapshots under the metadata bucket.
|
||||
pub const DATA_USAGE_DIR: &str = "datausage";
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::error::{Error, Result};
|
||||
use crate::disk::error::{Error, Result};
|
||||
use path_absolutize::Absolutize;
|
||||
use rustfs_utils::{is_local_host, is_socket_addr};
|
||||
use std::{fmt::Display, path::Path};
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// use crate::quorum::CheckErrorFn;
|
||||
use std::hash::{Hash, Hasher};
|
||||
use std::io::{self};
|
||||
use std::path::PathBuf;
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::error::DiskError;
|
||||
use crate::disk::error::DiskError;
|
||||
|
||||
pub fn to_file_error(io_err: std::io::Error) -> std::io::Error {
|
||||
match io_err.kind() {
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::error::Error;
|
||||
use crate::disk::error::Error;
|
||||
|
||||
pub static OBJECT_OP_IGNORED_ERRS: &[Error] = &[
|
||||
Error::DiskNotFound,
|
||||
|
||||
@@ -12,8 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::error::{Error, Result};
|
||||
use super::{DiskInfo, error::DiskError};
|
||||
use crate::disk::error::{Error, Result};
|
||||
use crate::disk::{DiskInfo, error::DiskError};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Error as JsonError;
|
||||
use uuid::Uuid;
|
||||
|
||||
@@ -17,7 +17,6 @@ use std::{
|
||||
path::Path,
|
||||
sync::{Arc, OnceLock},
|
||||
};
|
||||
|
||||
use tokio::{
|
||||
fs::{self, File},
|
||||
io,
|
||||
|
||||
@@ -12,39 +12,26 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::error::{Error, Result};
|
||||
use super::os::{is_root_disk, rename_all};
|
||||
use super::{
|
||||
BUCKET_META_PREFIX, CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskLocation, DiskMetrics,
|
||||
FileInfoVersions, RUSTFS_META_BUCKET, ReadMultipleReq, ReadMultipleResp, ReadOptions, RenameDataResp,
|
||||
STORAGE_FORMAT_FILE_BACKUP, UpdateMetadataOpts, VolumeInfo, WalkDirOptions, os,
|
||||
};
|
||||
use super::{endpoint::Endpoint, error::DiskError, format::FormatV3};
|
||||
|
||||
use crate::config::storageclass::DEFAULT_INLINE_BLOCK;
|
||||
use crate::data_usage::local_snapshot::ensure_data_usage_layout;
|
||||
use crate::disk::error::FileAccessDeniedWithContext;
|
||||
use crate::disk::error_conv::{to_access_error, to_file_error, to_unformatted_disk_error, to_volume_error};
|
||||
use crate::disk::fs::{
|
||||
O_APPEND, O_CREATE, O_RDONLY, O_TRUNC, O_WRONLY, access, lstat, lstat_std, remove, remove_all_std, remove_std, rename,
|
||||
};
|
||||
use crate::disk::os::{check_path_length, is_empty_dir};
|
||||
use crate::disk::{
|
||||
CHECK_PART_FILE_CORRUPT, CHECK_PART_FILE_NOT_FOUND, CHECK_PART_SUCCESS, CHECK_PART_UNKNOWN, CHECK_PART_VOLUME_NOT_FOUND,
|
||||
FileReader, RUSTFS_META_TMP_DELETED_BUCKET, conv_part_err_to_int,
|
||||
BUCKET_META_PREFIX, CHECK_PART_FILE_CORRUPT, CHECK_PART_FILE_NOT_FOUND, CHECK_PART_SUCCESS, CHECK_PART_UNKNOWN,
|
||||
CHECK_PART_VOLUME_NOT_FOUND, CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskLocation, DiskMetrics,
|
||||
FileInfoVersions, FileReader, FileWriter, RUSTFS_META_BUCKET, RUSTFS_META_TMP_DELETED_BUCKET, ReadMultipleReq,
|
||||
ReadMultipleResp, ReadOptions, RenameDataResp, STORAGE_FORMAT_FILE, STORAGE_FORMAT_FILE_BACKUP, UpdateMetadataOpts,
|
||||
VolumeInfo, WalkDirOptions, conv_part_err_to_int,
|
||||
endpoint::Endpoint,
|
||||
error::{DiskError, Error, FileAccessDeniedWithContext, Result},
|
||||
error_conv::{to_access_error, to_file_error, to_unformatted_disk_error, to_volume_error},
|
||||
format::FormatV3,
|
||||
fs::{O_APPEND, O_CREATE, O_RDONLY, O_TRUNC, O_WRONLY, access, lstat, lstat_std, remove, remove_all_std, remove_std, rename},
|
||||
os,
|
||||
os::{check_path_length, is_empty_dir, is_root_disk, rename_all},
|
||||
};
|
||||
use crate::disk::{FileWriter, STORAGE_FORMAT_FILE};
|
||||
use crate::global::{GLOBAL_IsErasureSD, GLOBAL_RootDiskThreshold};
|
||||
use rustfs_utils::path::{
|
||||
GLOBAL_DIR_SUFFIX, GLOBAL_DIR_SUFFIX_WITH_SLASH, SLASH_SEPARATOR, clean, decode_dir_object, encode_dir_object, has_suffix,
|
||||
path_join, path_join_buf,
|
||||
};
|
||||
use tokio::time::interval;
|
||||
|
||||
use crate::erasure_coding::bitrot_verify;
|
||||
use bytes::Bytes;
|
||||
// use path_absolutize::Absolutize; // Replaced with direct path operations for better performance
|
||||
use crate::file_cache::{get_global_file_cache, prefetch_metadata_patterns, read_metadata_cached};
|
||||
use crate::global::{GLOBAL_IsErasureSD, GLOBAL_RootDiskThreshold};
|
||||
use bytes::Bytes;
|
||||
use parking_lot::RwLock as ParkingLotRwLock;
|
||||
use rustfs_filemeta::{
|
||||
Cache, FileInfo, FileInfoOpts, FileMeta, MetaCacheEntry, MetacacheWriter, ObjectPartInfo, Opts, RawFileInfo, UpdateFn,
|
||||
@@ -52,6 +39,10 @@ use rustfs_filemeta::{
|
||||
};
|
||||
use rustfs_utils::HashAlgorithm;
|
||||
use rustfs_utils::os::get_info;
|
||||
use rustfs_utils::path::{
|
||||
GLOBAL_DIR_SUFFIX, GLOBAL_DIR_SUFFIX_WITH_SLASH, SLASH_SEPARATOR_STR, clean, decode_dir_object, encode_dir_object,
|
||||
has_suffix, path_join, path_join_buf,
|
||||
};
|
||||
use std::collections::HashMap;
|
||||
use std::collections::HashSet;
|
||||
use std::fmt::Debug;
|
||||
@@ -67,6 +58,7 @@ use time::OffsetDateTime;
|
||||
use tokio::fs::{self, File};
|
||||
use tokio::io::{AsyncReadExt, AsyncSeekExt, AsyncWrite, AsyncWriteExt, ErrorKind};
|
||||
use tokio::sync::RwLock;
|
||||
use tokio::time::interval;
|
||||
use tracing::{debug, error, info, warn};
|
||||
use uuid::Uuid;
|
||||
|
||||
@@ -484,7 +476,7 @@ impl LocalDisk {
|
||||
|
||||
// Async prefetch related files, don't block current read
|
||||
if let Some(parent) = file_path.parent() {
|
||||
prefetch_metadata_patterns(parent, &[super::STORAGE_FORMAT_FILE, "part.1", "part.2", "part.meta"]).await;
|
||||
prefetch_metadata_patterns(parent, &[STORAGE_FORMAT_FILE, "part.1", "part.2", "part.meta"]).await;
|
||||
}
|
||||
|
||||
// Main read logic
|
||||
@@ -508,7 +500,7 @@ impl LocalDisk {
|
||||
async fn read_metadata_batch(&self, requests: Vec<(String, String)>) -> Result<Vec<Option<Arc<FileMeta>>>> {
|
||||
let paths: Vec<PathBuf> = requests
|
||||
.iter()
|
||||
.map(|(bucket, key)| self.get_object_path(bucket, &format!("{}/{}", key, super::STORAGE_FORMAT_FILE)))
|
||||
.map(|(bucket, key)| self.get_object_path(bucket, &format!("{}/{}", key, STORAGE_FORMAT_FILE)))
|
||||
.collect::<Result<Vec<_>>>()?;
|
||||
|
||||
let cache = get_global_file_cache();
|
||||
@@ -545,7 +537,7 @@ impl LocalDisk {
|
||||
|
||||
// TODO: async notifications for disk space checks and trash cleanup
|
||||
|
||||
let trash_path = self.get_object_path(super::RUSTFS_META_TMP_DELETED_BUCKET, Uuid::new_v4().to_string().as_str())?;
|
||||
let trash_path = self.get_object_path(RUSTFS_META_TMP_DELETED_BUCKET, Uuid::new_v4().to_string().as_str())?;
|
||||
// if let Some(parent) = trash_path.parent() {
|
||||
// if !parent.exists() {
|
||||
// fs::create_dir_all(parent).await?;
|
||||
@@ -553,7 +545,7 @@ impl LocalDisk {
|
||||
// }
|
||||
|
||||
let err = if recursive {
|
||||
rename_all(delete_path, trash_path, self.get_bucket_path(super::RUSTFS_META_TMP_DELETED_BUCKET)?)
|
||||
rename_all(delete_path, trash_path, self.get_bucket_path(RUSTFS_META_TMP_DELETED_BUCKET)?)
|
||||
.await
|
||||
.err()
|
||||
} else {
|
||||
@@ -563,12 +555,12 @@ impl LocalDisk {
|
||||
.err()
|
||||
};
|
||||
|
||||
if immediate_purge || delete_path.to_string_lossy().ends_with(SLASH_SEPARATOR) {
|
||||
let trash_path2 = self.get_object_path(super::RUSTFS_META_TMP_DELETED_BUCKET, Uuid::new_v4().to_string().as_str())?;
|
||||
if immediate_purge || delete_path.to_string_lossy().ends_with(SLASH_SEPARATOR_STR) {
|
||||
let trash_path2 = self.get_object_path(RUSTFS_META_TMP_DELETED_BUCKET, Uuid::new_v4().to_string().as_str())?;
|
||||
let _ = rename_all(
|
||||
encode_dir_object(delete_path.to_string_lossy().as_ref()),
|
||||
trash_path2,
|
||||
self.get_bucket_path(super::RUSTFS_META_TMP_DELETED_BUCKET)?,
|
||||
self.get_bucket_path(RUSTFS_META_TMP_DELETED_BUCKET)?,
|
||||
)
|
||||
.await;
|
||||
}
|
||||
@@ -852,7 +844,11 @@ impl LocalDisk {
|
||||
self.write_all_internal(&tmp_file_path, InternalBuf::Ref(buf), sync, &tmp_volume_dir)
|
||||
.await?;
|
||||
|
||||
rename_all(tmp_file_path, file_path, volume_dir).await
|
||||
rename_all(tmp_file_path, &file_path, volume_dir).await?;
|
||||
|
||||
// Invalidate cache after successful write
|
||||
get_global_file_cache().invalidate(&file_path).await;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// write_all_public for trail
|
||||
@@ -917,7 +913,7 @@ impl LocalDisk {
|
||||
}
|
||||
|
||||
if let Some(parent) = path.as_ref().parent() {
|
||||
super::os::make_dir_all(parent, skip_parent).await?;
|
||||
os::make_dir_all(parent, skip_parent).await?;
|
||||
}
|
||||
|
||||
let f = super::fs::open_file(path.as_ref(), mode).await.map_err(to_file_error)?;
|
||||
@@ -943,7 +939,7 @@ impl LocalDisk {
|
||||
let meta = file.metadata().await.map_err(to_file_error)?;
|
||||
let file_size = meta.len() as usize;
|
||||
|
||||
bitrot_verify(Box::new(file), file_size, part_size, algo, bytes::Bytes::copy_from_slice(sum), shard_size)
|
||||
bitrot_verify(Box::new(file), file_size, part_size, algo, Bytes::copy_from_slice(sum), shard_size)
|
||||
.await
|
||||
.map_err(to_file_error)?;
|
||||
|
||||
@@ -1039,15 +1035,16 @@ impl LocalDisk {
|
||||
continue;
|
||||
}
|
||||
|
||||
if entry.ends_with(SLASH_SEPARATOR) {
|
||||
if entry.ends_with(SLASH_SEPARATOR_STR) {
|
||||
if entry.ends_with(GLOBAL_DIR_SUFFIX_WITH_SLASH) {
|
||||
let entry = format!("{}{}", entry.as_str().trim_end_matches(GLOBAL_DIR_SUFFIX_WITH_SLASH), SLASH_SEPARATOR);
|
||||
let entry =
|
||||
format!("{}{}", entry.as_str().trim_end_matches(GLOBAL_DIR_SUFFIX_WITH_SLASH), SLASH_SEPARATOR_STR);
|
||||
dir_objes.insert(entry.clone());
|
||||
*item = entry;
|
||||
continue;
|
||||
}
|
||||
|
||||
*item = entry.trim_end_matches(SLASH_SEPARATOR).to_owned();
|
||||
*item = entry.trim_end_matches(SLASH_SEPARATOR_STR).to_owned();
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -1059,7 +1056,7 @@ impl LocalDisk {
|
||||
.await?;
|
||||
|
||||
let entry = entry.strip_suffix(STORAGE_FORMAT_FILE).unwrap_or_default().to_owned();
|
||||
let name = entry.trim_end_matches(SLASH_SEPARATOR);
|
||||
let name = entry.trim_end_matches(SLASH_SEPARATOR_STR);
|
||||
let name = decode_dir_object(format!("{}/{}", ¤t, &name).as_str());
|
||||
|
||||
// if opts.limit > 0
|
||||
@@ -1142,7 +1139,7 @@ impl LocalDisk {
|
||||
Ok(res) => {
|
||||
if is_dir_obj {
|
||||
meta.name = meta.name.trim_end_matches(GLOBAL_DIR_SUFFIX_WITH_SLASH).to_owned();
|
||||
meta.name.push_str(SLASH_SEPARATOR);
|
||||
meta.name.push_str(SLASH_SEPARATOR_STR);
|
||||
}
|
||||
|
||||
meta.metadata = res;
|
||||
@@ -1160,7 +1157,7 @@ impl LocalDisk {
|
||||
// NOT an object, append to stack (with slash)
|
||||
// If dirObject, but no metadata (which is unexpected) we skip it.
|
||||
if !is_dir_obj && !is_empty_dir(self.get_object_path(&opts.bucket, &meta.name)?).await {
|
||||
meta.name.push_str(SLASH_SEPARATOR);
|
||||
meta.name.push_str(SLASH_SEPARATOR_STR);
|
||||
dir_stack.push(meta.name);
|
||||
}
|
||||
}
|
||||
@@ -1235,7 +1232,7 @@ async fn read_file_metadata(p: impl AsRef<Path>) -> Result<Metadata> {
|
||||
|
||||
fn skip_access_checks(p: impl AsRef<str>) -> bool {
|
||||
let vols = [
|
||||
super::RUSTFS_META_TMP_DELETED_BUCKET,
|
||||
RUSTFS_META_TMP_DELETED_BUCKET,
|
||||
super::RUSTFS_META_TMP_BUCKET,
|
||||
super::RUSTFS_META_MULTIPART_BUCKET,
|
||||
RUSTFS_META_BUCKET,
|
||||
@@ -1629,8 +1626,8 @@ impl DiskAPI for LocalDisk {
|
||||
super::fs::access_std(&dst_volume_dir).map_err(|e| to_access_error(e, DiskError::VolumeAccessDenied))?
|
||||
}
|
||||
|
||||
let src_is_dir = has_suffix(src_path, SLASH_SEPARATOR);
|
||||
let dst_is_dir = has_suffix(dst_path, SLASH_SEPARATOR);
|
||||
let src_is_dir = has_suffix(src_path, SLASH_SEPARATOR_STR);
|
||||
let dst_is_dir = has_suffix(dst_path, SLASH_SEPARATOR_STR);
|
||||
|
||||
if !src_is_dir && dst_is_dir || src_is_dir && !dst_is_dir {
|
||||
warn!(
|
||||
@@ -1696,8 +1693,8 @@ impl DiskAPI for LocalDisk {
|
||||
.map_err(|e| to_access_error(e, DiskError::VolumeAccessDenied))?;
|
||||
}
|
||||
|
||||
let src_is_dir = has_suffix(src_path, SLASH_SEPARATOR);
|
||||
let dst_is_dir = has_suffix(dst_path, SLASH_SEPARATOR);
|
||||
let src_is_dir = has_suffix(src_path, SLASH_SEPARATOR_STR);
|
||||
let dst_is_dir = has_suffix(dst_path, SLASH_SEPARATOR_STR);
|
||||
if (dst_is_dir || src_is_dir) && (!dst_is_dir || !src_is_dir) {
|
||||
return Err(Error::from(DiskError::FileAccessDenied));
|
||||
}
|
||||
@@ -1848,12 +1845,12 @@ impl DiskAPI for LocalDisk {
|
||||
}
|
||||
|
||||
let volume_dir = self.get_bucket_path(volume)?;
|
||||
let dir_path_abs = self.get_object_path(volume, dir_path.trim_start_matches(SLASH_SEPARATOR))?;
|
||||
let dir_path_abs = self.get_object_path(volume, dir_path.trim_start_matches(SLASH_SEPARATOR_STR))?;
|
||||
|
||||
let entries = match os::read_dir(&dir_path_abs, count).await {
|
||||
Ok(res) => res,
|
||||
Err(e) => {
|
||||
if e.kind() == std::io::ErrorKind::NotFound
|
||||
if e.kind() == ErrorKind::NotFound
|
||||
&& !skip_access_checks(volume)
|
||||
&& let Err(e) = access(&volume_dir).await
|
||||
{
|
||||
@@ -1884,11 +1881,11 @@ impl DiskAPI for LocalDisk {
|
||||
|
||||
let mut objs_returned = 0;
|
||||
|
||||
if opts.base_dir.ends_with(SLASH_SEPARATOR) {
|
||||
if opts.base_dir.ends_with(SLASH_SEPARATOR_STR) {
|
||||
let fpath = self.get_object_path(
|
||||
&opts.bucket,
|
||||
path_join_buf(&[
|
||||
format!("{}{}", opts.base_dir.trim_end_matches(SLASH_SEPARATOR), GLOBAL_DIR_SUFFIX).as_str(),
|
||||
format!("{}{}", opts.base_dir.trim_end_matches(SLASH_SEPARATOR_STR), GLOBAL_DIR_SUFFIX).as_str(),
|
||||
STORAGE_FORMAT_FILE,
|
||||
])
|
||||
.as_str(),
|
||||
@@ -2120,7 +2117,7 @@ impl DiskAPI for LocalDisk {
|
||||
let volume_dir = self.get_bucket_path(volume)?;
|
||||
|
||||
if let Err(e) = access(&volume_dir).await {
|
||||
if e.kind() == std::io::ErrorKind::NotFound {
|
||||
if e.kind() == ErrorKind::NotFound {
|
||||
os::make_dir_all(&volume_dir, self.root.as_path()).await?;
|
||||
return Ok(());
|
||||
}
|
||||
@@ -2138,7 +2135,7 @@ impl DiskAPI for LocalDisk {
|
||||
let entries = os::read_dir(&self.root, -1).await.map_err(to_volume_error)?;
|
||||
|
||||
for entry in entries {
|
||||
if !has_suffix(&entry, SLASH_SEPARATOR) || !Self::is_valid_volname(clean(&entry).as_str()) {
|
||||
if !has_suffix(&entry, SLASH_SEPARATOR_STR) || !Self::is_valid_volname(clean(&entry).as_str()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -2360,7 +2357,7 @@ impl DiskAPI for LocalDisk {
|
||||
force_del_marker: bool,
|
||||
opts: DeleteOptions,
|
||||
) -> Result<()> {
|
||||
if path.starts_with(SLASH_SEPARATOR) {
|
||||
if path.starts_with(SLASH_SEPARATOR_STR) {
|
||||
return self
|
||||
.delete(
|
||||
volume,
|
||||
@@ -2421,7 +2418,7 @@ impl DiskAPI for LocalDisk {
|
||||
if !meta.versions.is_empty() {
|
||||
let buf = meta.marshal_msg()?;
|
||||
return self
|
||||
.write_all_meta(volume, format!("{path}{SLASH_SEPARATOR}{STORAGE_FORMAT_FILE}").as_str(), &buf, true)
|
||||
.write_all_meta(volume, format!("{path}{SLASH_SEPARATOR_STR}{STORAGE_FORMAT_FILE}").as_str(), &buf, true)
|
||||
.await;
|
||||
}
|
||||
|
||||
@@ -2431,11 +2428,11 @@ impl DiskAPI for LocalDisk {
|
||||
{
|
||||
let src_path = path_join(&[
|
||||
file_path.as_path(),
|
||||
Path::new(format!("{old_data_dir}{SLASH_SEPARATOR}{STORAGE_FORMAT_FILE_BACKUP}").as_str()),
|
||||
Path::new(format!("{old_data_dir}{SLASH_SEPARATOR_STR}{STORAGE_FORMAT_FILE_BACKUP}").as_str()),
|
||||
]);
|
||||
let dst_path = path_join(&[
|
||||
file_path.as_path(),
|
||||
Path::new(format!("{path}{SLASH_SEPARATOR}{STORAGE_FORMAT_FILE}").as_str()),
|
||||
Path::new(format!("{path}{SLASH_SEPARATOR_STR}{STORAGE_FORMAT_FILE}").as_str()),
|
||||
]);
|
||||
return rename_all(src_path, dst_path, file_path).await;
|
||||
}
|
||||
@@ -2564,7 +2561,7 @@ async fn get_disk_info(drive_path: PathBuf) -> Result<(rustfs_utils::os::DiskInf
|
||||
if root_disk_threshold > 0 {
|
||||
disk_info.total <= root_disk_threshold
|
||||
} else {
|
||||
is_root_disk(&drive_path, SLASH_SEPARATOR).unwrap_or_default()
|
||||
is_root_disk(&drive_path, SLASH_SEPARATOR_STR).unwrap_or_default()
|
||||
}
|
||||
} else {
|
||||
false
|
||||
@@ -2582,7 +2579,7 @@ mod test {
|
||||
// let arr = Vec::new();
|
||||
|
||||
let vols = [
|
||||
super::super::RUSTFS_META_TMP_DELETED_BUCKET,
|
||||
RUSTFS_META_TMP_DELETED_BUCKET,
|
||||
super::super::RUSTFS_META_TMP_BUCKET,
|
||||
super::super::RUSTFS_META_MULTIPART_BUCKET,
|
||||
RUSTFS_META_BUCKET,
|
||||
@@ -2610,9 +2607,7 @@ mod test {
|
||||
|
||||
let disk = LocalDisk::new(&ep, false).await.unwrap();
|
||||
|
||||
let tmpp = disk
|
||||
.resolve_abs_path(Path::new(super::super::RUSTFS_META_TMP_DELETED_BUCKET))
|
||||
.unwrap();
|
||||
let tmpp = disk.resolve_abs_path(Path::new(RUSTFS_META_TMP_DELETED_BUCKET)).unwrap();
|
||||
|
||||
println!("ppp :{:?}", &tmpp);
|
||||
|
||||
@@ -2640,9 +2635,7 @@ mod test {
|
||||
|
||||
let disk = LocalDisk::new(&ep, false).await.unwrap();
|
||||
|
||||
let tmpp = disk
|
||||
.resolve_abs_path(Path::new(super::super::RUSTFS_META_TMP_DELETED_BUCKET))
|
||||
.unwrap();
|
||||
let tmpp = disk.resolve_abs_path(Path::new(RUSTFS_META_TMP_DELETED_BUCKET)).unwrap();
|
||||
|
||||
println!("ppp :{:?}", &tmpp);
|
||||
|
||||
|
||||
@@ -12,19 +12,17 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::disk::error::DiskError;
|
||||
use crate::disk::error::Result;
|
||||
use crate::disk::error_conv::to_file_error;
|
||||
use rustfs_utils::path::SLASH_SEPARATOR_STR;
|
||||
use std::{
|
||||
io,
|
||||
path::{Component, Path},
|
||||
};
|
||||
|
||||
use super::error::Result;
|
||||
use crate::disk::error_conv::to_file_error;
|
||||
use rustfs_utils::path::SLASH_SEPARATOR;
|
||||
use tokio::fs;
|
||||
use tracing::warn;
|
||||
|
||||
use super::error::DiskError;
|
||||
|
||||
/// Check path length according to OS limits.
|
||||
pub fn check_path_length(path_name: &str) -> Result<()> {
|
||||
// Apple OS X path length is limited to 1016
|
||||
@@ -118,7 +116,7 @@ pub async fn read_dir(path: impl AsRef<Path>, count: i32) -> std::io::Result<Vec
|
||||
if file_type.is_file() {
|
||||
volumes.push(name);
|
||||
} else if file_type.is_dir() {
|
||||
volumes.push(format!("{name}{SLASH_SEPARATOR}"));
|
||||
volumes.push(format!("{name}{SLASH_SEPARATOR_STR}"));
|
||||
}
|
||||
count -= 1;
|
||||
if count == 0 {
|
||||
|
||||
@@ -12,19 +12,18 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use rustfs_utils::{XHost, check_local_server_addr, get_host_ip, is_local_host};
|
||||
use tracing::{error, info, instrument, warn};
|
||||
|
||||
use crate::{
|
||||
disk::endpoint::{Endpoint, EndpointType},
|
||||
disks_layout::DisksLayout,
|
||||
global::global_rustfs_port,
|
||||
};
|
||||
use std::io::{Error, Result};
|
||||
use rustfs_utils::{XHost, check_local_server_addr, get_host_ip, is_local_host};
|
||||
use std::{
|
||||
collections::{HashMap, HashSet, hash_map::Entry},
|
||||
io::{Error, Result},
|
||||
net::IpAddr,
|
||||
};
|
||||
use tracing::{error, info, instrument, warn};
|
||||
|
||||
/// enum for setup type.
|
||||
#[derive(PartialEq, Eq, Debug, Clone)]
|
||||
|
||||
@@ -12,10 +12,9 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::BitrotReader;
|
||||
use super::Erasure;
|
||||
use crate::disk::error::Error;
|
||||
use crate::disk::error_reduce::reduce_errs;
|
||||
use crate::erasure_coding::{BitrotReader, Erasure};
|
||||
use futures::stream::{FuturesUnordered, StreamExt};
|
||||
use pin_project_lite::pin_project;
|
||||
use std::io;
|
||||
@@ -312,11 +311,12 @@ impl Erasure {
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use rustfs_utils::HashAlgorithm;
|
||||
|
||||
use crate::{disk::error::DiskError, erasure_coding::BitrotWriter};
|
||||
|
||||
use super::*;
|
||||
use crate::{
|
||||
disk::error::DiskError,
|
||||
erasure_coding::{BitrotReader, BitrotWriter},
|
||||
};
|
||||
use rustfs_utils::HashAlgorithm;
|
||||
use std::io::Cursor;
|
||||
|
||||
#[tokio::test]
|
||||
|
||||
@@ -12,11 +12,11 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::BitrotWriterWrapper;
|
||||
use super::Erasure;
|
||||
use crate::disk::error::Error;
|
||||
use crate::disk::error_reduce::count_errs;
|
||||
use crate::disk::error_reduce::{OBJECT_OP_IGNORED_ERRS, reduce_write_quorum_errs};
|
||||
use crate::erasure_coding::BitrotWriterWrapper;
|
||||
use crate::erasure_coding::Erasure;
|
||||
use bytes::Bytes;
|
||||
use futures::StreamExt;
|
||||
use futures::stream::FuturesUnordered;
|
||||
|
||||
@@ -12,10 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::BitrotReader;
|
||||
use super::BitrotWriterWrapper;
|
||||
use super::decode::ParallelReader;
|
||||
use crate::disk::error::{Error, Result};
|
||||
use crate::erasure_coding::BitrotReader;
|
||||
use crate::erasure_coding::BitrotWriterWrapper;
|
||||
use crate::erasure_coding::decode::ParallelReader;
|
||||
use crate::erasure_coding::encode::MultiWriter;
|
||||
use bytes::Bytes;
|
||||
use tokio::io::AsyncRead;
|
||||
|
||||
@@ -12,12 +12,11 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod bitrot;
|
||||
pub mod decode;
|
||||
pub mod encode;
|
||||
pub mod erasure;
|
||||
pub mod heal;
|
||||
|
||||
mod bitrot;
|
||||
pub use bitrot::*;
|
||||
|
||||
pub use erasure::{Erasure, ReedSolomonEncoder, calc_shard_size};
|
||||
|
||||
@@ -12,12 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use s3s::{S3Error, S3ErrorCode};
|
||||
|
||||
use rustfs_utils::path::decode_dir_object;
|
||||
|
||||
use crate::bucket::error::BucketMetadataError;
|
||||
use crate::disk::error::DiskError;
|
||||
use rustfs_utils::path::decode_dir_object;
|
||||
use s3s::{S3Error, S3ErrorCode};
|
||||
|
||||
pub type Error = StorageError;
|
||||
pub type Result<T> = core::result::Result<T, Error>;
|
||||
|
||||
@@ -12,10 +12,9 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::event::targetid::TargetID;
|
||||
use std::sync::atomic::AtomicI64;
|
||||
|
||||
use super::targetid::TargetID;
|
||||
|
||||
#[derive(Default)]
|
||||
pub struct TargetList {
|
||||
pub current_send_calls: AtomicI64,
|
||||
|
||||
@@ -14,15 +14,14 @@
|
||||
// limitations under the License.
|
||||
#![allow(unused_variables)]
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
use crate::bucket::metadata::BucketMetadata;
|
||||
use crate::event::name::EventName;
|
||||
use crate::event::targetlist::TargetList;
|
||||
use crate::store::ECStore;
|
||||
use crate::store_api::ObjectInfo;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
pub struct EventNotifier {
|
||||
target_list: TargetList,
|
||||
|
||||
@@ -12,12 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::{
|
||||
admin_server_info::get_local_server_property,
|
||||
new_object_layer_fn,
|
||||
store_api::StorageAPI,
|
||||
// utils::os::get_drive_stats,
|
||||
};
|
||||
use crate::{admin_server_info::get_local_server_property, new_object_layer_fn, store_api::StorageAPI};
|
||||
use chrono::Utc;
|
||||
use rustfs_common::{GLOBAL_LOCAL_NODE_NAME, GLOBAL_RUSTFS_ADDR, heal_channel::DriveState, metrics::global_metrics};
|
||||
use rustfs_madmin::metrics::{DiskIOStats, DiskMetric, RealtimeMetrics};
|
||||
|
||||
@@ -38,7 +38,7 @@ use rustfs_common::defer;
|
||||
use rustfs_common::heal_channel::HealOpts;
|
||||
use rustfs_filemeta::{MetaCacheEntries, MetaCacheEntry, MetadataResolutionParams};
|
||||
use rustfs_rio::{HashReader, WarpReader};
|
||||
use rustfs_utils::path::{SLASH_SEPARATOR, encode_dir_object, path_join};
|
||||
use rustfs_utils::path::{SLASH_SEPARATOR_STR, encode_dir_object, path_join};
|
||||
use rustfs_workers::workers::Workers;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
@@ -451,10 +451,10 @@ fn path2_bucket_object_with_base_path(base_path: &str, path: &str) -> (String, S
|
||||
let trimmed_path = path
|
||||
.strip_prefix(base_path)
|
||||
.unwrap_or(path)
|
||||
.strip_prefix(SLASH_SEPARATOR)
|
||||
.strip_prefix(SLASH_SEPARATOR_STR)
|
||||
.unwrap_or(path);
|
||||
// Find the position of the first '/'
|
||||
let pos = trimmed_path.find(SLASH_SEPARATOR).unwrap_or(trimmed_path.len());
|
||||
let pos = trimmed_path.find(SLASH_SEPARATOR_STR).unwrap_or(trimmed_path.len());
|
||||
// Split into bucket and prefix
|
||||
let bucket = &trimmed_path[0..pos];
|
||||
let prefix = &trimmed_path[pos + 1..]; // +1 to skip the '/' character if it exists
|
||||
|
||||
@@ -12,16 +12,14 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::error::Error;
|
||||
|
||||
use crate::rpc::{TONIC_RPC_PREFIX, gen_signature_headers};
|
||||
use http::Method;
|
||||
use rustfs_common::GLOBAL_CONN_MAP;
|
||||
use rustfs_protos::{create_new_channel, proto_gen::node_service::node_service_client::NodeServiceClient};
|
||||
use std::error::Error;
|
||||
use tonic::{service::interceptor::InterceptedService, transport::Channel};
|
||||
use tracing::debug;
|
||||
|
||||
use crate::rpc::{TONIC_RPC_PREFIX, gen_signature_headers};
|
||||
|
||||
/// 3. Subsequent calls will attempt fresh connections
|
||||
/// 4. If node is still down, connection will fail fast (3s timeout)
|
||||
pub async fn node_service_time_out_client(
|
||||
|
||||
@@ -108,14 +108,19 @@ pub fn verify_rpc_signature(url: &str, method: &Method, headers: &HeaderMap) ->
|
||||
}
|
||||
|
||||
// Generate expected signature
|
||||
|
||||
let expected_signature = generate_signature(&secret, url, method, timestamp);
|
||||
|
||||
// Compare signatures
|
||||
if signature != expected_signature {
|
||||
error!(
|
||||
"verify_rpc_signature: Invalid signature: secret {}, url {}, method {}, timestamp {}, signature {}, expected_signature {}",
|
||||
secret, url, method, timestamp, signature, expected_signature
|
||||
"verify_rpc_signature: Invalid signature: url {}, method {}, timestamp {}, signature {}, expected_signature: {}***{}|{}",
|
||||
url,
|
||||
method,
|
||||
timestamp,
|
||||
signature,
|
||||
expected_signature.chars().next().unwrap_or('*'),
|
||||
expected_signature.chars().last().unwrap_or('*'),
|
||||
expected_signature.len()
|
||||
);
|
||||
|
||||
return Err(std::io::Error::other("Invalid signature"));
|
||||
|
||||
@@ -27,7 +27,6 @@ use rustfs_madmin::{
|
||||
net::NetInfo,
|
||||
};
|
||||
use rustfs_protos::evict_failed_connection;
|
||||
use rustfs_protos::proto_gen::node_service::node_service_client::NodeServiceClient;
|
||||
use rustfs_protos::proto_gen::node_service::{
|
||||
DeleteBucketMetadataRequest, DeletePolicyRequest, DeleteServiceAccountRequest, DeleteUserRequest, GetCpusRequest,
|
||||
GetMemInfoRequest, GetMetricsRequest, GetNetInfoRequest, GetOsInfoRequest, GetPartitionsRequest, GetProcInfoRequest,
|
||||
@@ -35,6 +34,7 @@ use rustfs_protos::proto_gen::node_service::{
|
||||
LoadPolicyMappingRequest, LoadPolicyRequest, LoadRebalanceMetaRequest, LoadServiceAccountRequest,
|
||||
LoadTransitionTierConfigRequest, LoadUserRequest, LocalStorageInfoRequest, Mss, ReloadPoolMetaRequest,
|
||||
ReloadSiteReplicationConfigRequest, ServerInfoRequest, SignalServiceRequest, StartProfilingRequest, StopRebalanceRequest,
|
||||
node_service_client::NodeServiceClient,
|
||||
};
|
||||
use rustfs_utils::XHost;
|
||||
use serde::{Deserialize, Serialize as _};
|
||||
|
||||
@@ -12,15 +12,29 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::{
|
||||
path::PathBuf,
|
||||
sync::{Arc, atomic::Ordering},
|
||||
time::Duration,
|
||||
use crate::{
|
||||
disk::{
|
||||
CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskLocation, DiskOption, FileInfoVersions,
|
||||
FileReader, FileWriter, ReadMultipleReq, ReadMultipleResp, ReadOptions, RenameDataResp, UpdateMetadataOpts, VolumeInfo,
|
||||
WalkDirOptions,
|
||||
disk_store::{
|
||||
CHECK_EVERY, CHECK_TIMEOUT_DURATION, ENV_RUSTFS_DRIVE_ACTIVE_MONITORING, SKIP_IF_SUCCESS_BEFORE,
|
||||
get_max_timeout_duration,
|
||||
},
|
||||
endpoint::Endpoint,
|
||||
{
|
||||
disk_store::DiskHealthTracker,
|
||||
error::{DiskError, Error, Result},
|
||||
},
|
||||
},
|
||||
rpc::build_auth_headers,
|
||||
rpc::client::{TonicInterceptor, gen_tonic_signature_interceptor, node_service_time_out_client},
|
||||
};
|
||||
|
||||
use bytes::Bytes;
|
||||
use futures::lock::Mutex;
|
||||
use http::{HeaderMap, HeaderValue, Method, header::CONTENT_TYPE};
|
||||
use rustfs_filemeta::{FileInfo, ObjectPartInfo, RawFileInfo};
|
||||
use rustfs_protos::proto_gen::node_service::RenamePartRequest;
|
||||
use rustfs_protos::proto_gen::node_service::{
|
||||
CheckPartsRequest, DeletePathsRequest, DeleteRequest, DeleteVersionRequest, DeleteVersionsRequest, DeleteVolumeRequest,
|
||||
DiskInfoRequest, ListDirRequest, ListVolumesRequest, MakeVolumeRequest, MakeVolumesRequest, ReadAllRequest,
|
||||
@@ -28,37 +42,18 @@ use rustfs_protos::proto_gen::node_service::{
|
||||
StatVolumeRequest, UpdateMetadataRequest, VerifyFileRequest, WriteAllRequest, WriteMetadataRequest,
|
||||
node_service_client::NodeServiceClient,
|
||||
};
|
||||
use rustfs_utils::string::parse_bool_with_default;
|
||||
use tokio::time;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
use crate::disk::{disk_store::DiskHealthTracker, error::DiskError};
|
||||
use crate::{
|
||||
disk::error::{Error, Result},
|
||||
rpc::build_auth_headers,
|
||||
};
|
||||
use crate::{
|
||||
disk::{
|
||||
CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskLocation, DiskOption, FileInfoVersions,
|
||||
ReadMultipleReq, ReadMultipleResp, ReadOptions, RenameDataResp, UpdateMetadataOpts, VolumeInfo, WalkDirOptions,
|
||||
disk_store::{
|
||||
CHECK_EVERY, CHECK_TIMEOUT_DURATION, ENV_RUSTFS_DRIVE_ACTIVE_MONITORING, SKIP_IF_SUCCESS_BEFORE,
|
||||
get_max_timeout_duration,
|
||||
},
|
||||
endpoint::Endpoint,
|
||||
},
|
||||
rpc::client::gen_tonic_signature_interceptor,
|
||||
};
|
||||
use crate::{
|
||||
disk::{FileReader, FileWriter},
|
||||
rpc::client::{TonicInterceptor, node_service_time_out_client},
|
||||
};
|
||||
use rustfs_filemeta::{FileInfo, ObjectPartInfo, RawFileInfo};
|
||||
use rustfs_protos::proto_gen::node_service::RenamePartRequest;
|
||||
use rustfs_rio::{HttpReader, HttpWriter};
|
||||
use rustfs_utils::string::parse_bool_with_default;
|
||||
use std::{
|
||||
path::PathBuf,
|
||||
sync::{Arc, atomic::Ordering},
|
||||
time::Duration,
|
||||
};
|
||||
use tokio::time;
|
||||
use tokio::{io::AsyncWrite, net::TcpStream, time::timeout};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tonic::{Request, service::interceptor::InterceptedService, transport::Channel};
|
||||
use tracing::{debug, info, warn};
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Debug)]
|
||||
|
||||
@@ -14,9 +14,10 @@
|
||||
|
||||
use crate::rpc::client::{TonicInterceptor, gen_tonic_signature_interceptor, node_service_time_out_client};
|
||||
use async_trait::async_trait;
|
||||
use rustfs_lock::types::{LockId, LockMetadata, LockPriority};
|
||||
use rustfs_lock::{LockClient, LockError, LockInfo, LockResponse, LockStats, LockStatus, Result};
|
||||
use rustfs_lock::{LockRequest, LockType};
|
||||
use rustfs_lock::{
|
||||
LockClient, LockError, LockInfo, LockRequest, LockResponse, LockStats, LockStatus, LockType, Result,
|
||||
types::{LockId, LockMetadata, LockPriority},
|
||||
};
|
||||
use rustfs_protos::proto_gen::node_service::node_service_client::NodeServiceClient;
|
||||
use rustfs_protos::proto_gen::node_service::{GenerallyLockRequest, PingRequest};
|
||||
use std::collections::HashMap;
|
||||
|
||||
@@ -82,7 +82,7 @@ use rustfs_utils::http::headers::{AMZ_OBJECT_TAGGING, RESERVED_METADATA_PREFIX,
|
||||
use rustfs_utils::{
|
||||
HashAlgorithm,
|
||||
crypto::hex,
|
||||
path::{SLASH_SEPARATOR, encode_dir_object, has_suffix, path_join_buf},
|
||||
path::{SLASH_SEPARATOR_STR, encode_dir_object, has_suffix, path_join_buf},
|
||||
};
|
||||
use rustfs_workers::workers::Workers;
|
||||
use s3s::header::X_AMZ_RESTORE;
|
||||
@@ -4614,7 +4614,9 @@ impl StorageAPI for SetDisks {
|
||||
.await
|
||||
.map_err(|e| to_object_err(e, vec![bucket, object]))?;
|
||||
|
||||
Ok(ObjectInfo::from_file_info(&dfi, bucket, object, opts.versioned || opts.version_suspended))
|
||||
let mut obj_info = ObjectInfo::from_file_info(&dfi, bucket, object, opts.versioned || opts.version_suspended);
|
||||
obj_info.size = goi.size;
|
||||
Ok(obj_info)
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
@@ -5324,7 +5326,7 @@ impl StorageAPI for SetDisks {
|
||||
&upload_id_path,
|
||||
fi.data_dir.map(|v| v.to_string()).unwrap_or_default().as_str(),
|
||||
]),
|
||||
SLASH_SEPARATOR
|
||||
SLASH_SEPARATOR_STR
|
||||
);
|
||||
|
||||
let mut part_numbers = match Self::list_parts(&online_disks, &part_path, read_quorum).await {
|
||||
@@ -5462,7 +5464,7 @@ impl StorageAPI for SetDisks {
|
||||
let mut populated_upload_ids = HashSet::new();
|
||||
|
||||
for upload_id in upload_ids.iter() {
|
||||
let upload_id = upload_id.trim_end_matches(SLASH_SEPARATOR).to_string();
|
||||
let upload_id = upload_id.trim_end_matches(SLASH_SEPARATOR_STR).to_string();
|
||||
if populated_upload_ids.contains(&upload_id) {
|
||||
continue;
|
||||
}
|
||||
@@ -6222,7 +6224,7 @@ impl StorageAPI for SetDisks {
|
||||
None
|
||||
};
|
||||
|
||||
if has_suffix(object, SLASH_SEPARATOR) {
|
||||
if has_suffix(object, SLASH_SEPARATOR_STR) {
|
||||
let (result, err) = self.heal_object_dir_locked(bucket, object, opts.dry_run, opts.remove).await?;
|
||||
return Ok((result, err.map(|e| e.into())));
|
||||
}
|
||||
|
||||
@@ -13,8 +13,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
|
||||
use crate::disk::error_reduce::count_errs;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::store_api::{ListPartsInfo, ObjectInfoOrErr, WalkOptions};
|
||||
@@ -44,17 +42,16 @@ use rustfs_common::{
|
||||
heal_channel::{DriveState, HealItemType},
|
||||
};
|
||||
use rustfs_filemeta::FileInfo;
|
||||
|
||||
use rustfs_madmin::heal_commands::{HealDriveInfo, HealResultItem};
|
||||
use rustfs_utils::{crc_hash, path::path_join_buf, sip_hash};
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
use tokio::sync::RwLock;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use uuid::Uuid;
|
||||
|
||||
use tokio::sync::broadcast::{Receiver, Sender};
|
||||
use tokio::time::Duration;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::warn;
|
||||
use tracing::{error, info};
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Sets {
|
||||
|
||||
@@ -1825,16 +1825,16 @@ impl StorageAPI for ECStore {
|
||||
if self.is_suspended(pool.pool_idx).await {
|
||||
continue;
|
||||
}
|
||||
match pool
|
||||
return match pool
|
||||
.list_object_parts(bucket, object, upload_id, part_number_marker, max_parts, opts)
|
||||
.await
|
||||
{
|
||||
Ok(res) => return Ok(res),
|
||||
Ok(res) => Ok(res),
|
||||
Err(err) => {
|
||||
if is_err_invalid_upload_id(&err) {
|
||||
continue;
|
||||
}
|
||||
return Err(err);
|
||||
Err(err)
|
||||
}
|
||||
};
|
||||
}
|
||||
@@ -2204,7 +2204,7 @@ impl StorageAPI for ECStore {
|
||||
async fn delete_object_version(&self, bucket: &str, object: &str, fi: &FileInfo, force_del_marker: bool) -> Result<()> {
|
||||
check_del_obj_args(bucket, object)?;
|
||||
|
||||
let object = rustfs_utils::path::encode_dir_object(object);
|
||||
let object = encode_dir_object(object);
|
||||
|
||||
if self.single_pool() {
|
||||
return self.pools[0]
|
||||
@@ -2324,17 +2324,15 @@ impl StorageAPI for ECStore {
|
||||
|
||||
// No pool returned a nil error, return the first non 'not found' error
|
||||
for (index, err) in errs.iter().enumerate() {
|
||||
match err {
|
||||
return match err {
|
||||
Some(err) => {
|
||||
if is_err_object_not_found(err) || is_err_version_not_found(err) {
|
||||
continue;
|
||||
}
|
||||
return Ok((ress.remove(index), Some(err.clone())));
|
||||
Ok((ress.remove(index), Some(err.clone())))
|
||||
}
|
||||
None => {
|
||||
return Ok((ress.remove(index), None));
|
||||
}
|
||||
}
|
||||
None => Ok((ress.remove(index), None)),
|
||||
};
|
||||
}
|
||||
|
||||
// At this stage, all errors are 'not found'
|
||||
|
||||
@@ -27,7 +27,6 @@ use crate::{
|
||||
};
|
||||
use futures::future::join_all;
|
||||
use std::collections::{HashMap, hash_map::Entry};
|
||||
|
||||
use tracing::{info, warn};
|
||||
use uuid::Uuid;
|
||||
|
||||
|
||||
@@ -34,7 +34,7 @@ use rustfs_filemeta::{
|
||||
MetaCacheEntries, MetaCacheEntriesSorted, MetaCacheEntriesSortedResult, MetaCacheEntry, MetadataResolutionParams,
|
||||
merge_file_meta_versions,
|
||||
};
|
||||
use rustfs_utils::path::{self, SLASH_SEPARATOR, base_dir_from_prefix};
|
||||
use rustfs_utils::path::{self, SLASH_SEPARATOR_STR, base_dir_from_prefix};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::broadcast::{self};
|
||||
@@ -132,7 +132,7 @@ impl ListPathOptions {
|
||||
return;
|
||||
}
|
||||
|
||||
let s = SLASH_SEPARATOR.chars().next().unwrap_or_default();
|
||||
let s = SLASH_SEPARATOR_STR.chars().next().unwrap_or_default();
|
||||
self.filter_prefix = {
|
||||
let fp = self.prefix.trim_start_matches(&self.base_dir).trim_matches(s);
|
||||
|
||||
@@ -346,7 +346,7 @@ impl ECStore {
|
||||
if let Some(delimiter) = &delimiter {
|
||||
if obj.is_dir && obj.mod_time.is_none() {
|
||||
let mut found = false;
|
||||
if delimiter != SLASH_SEPARATOR {
|
||||
if delimiter != SLASH_SEPARATOR_STR {
|
||||
for p in prefixes.iter() {
|
||||
if found {
|
||||
break;
|
||||
@@ -410,13 +410,13 @@ impl ECStore {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let mut list_result = match self.list_path(&opts).await {
|
||||
Ok(res) => res,
|
||||
Err(err) => MetaCacheEntriesSortedResult {
|
||||
let mut list_result = self
|
||||
.list_path(&opts)
|
||||
.await
|
||||
.unwrap_or_else(|err| MetaCacheEntriesSortedResult {
|
||||
err: Some(err.into()),
|
||||
..Default::default()
|
||||
},
|
||||
};
|
||||
});
|
||||
|
||||
if let Some(err) = list_result.err.clone()
|
||||
&& err != rustfs_filemeta::Error::Unexpected
|
||||
@@ -470,7 +470,7 @@ impl ECStore {
|
||||
if let Some(delimiter) = &delimiter {
|
||||
if obj.is_dir && obj.mod_time.is_none() {
|
||||
let mut found = false;
|
||||
if delimiter != SLASH_SEPARATOR {
|
||||
if delimiter != SLASH_SEPARATOR_STR {
|
||||
for p in prefixes.iter() {
|
||||
if found {
|
||||
break;
|
||||
@@ -502,7 +502,7 @@ impl ECStore {
|
||||
// warn!("list_path opt {:?}", &o);
|
||||
|
||||
check_list_objs_args(&o.bucket, &o.prefix, &o.marker)?;
|
||||
// if opts.prefix.ends_with(SLASH_SEPARATOR) {
|
||||
// if opts.prefix.ends_with(SLASH_SEPARATOR_STR) {
|
||||
// return Err(Error::msg("eof"));
|
||||
// }
|
||||
|
||||
@@ -520,11 +520,11 @@ impl ECStore {
|
||||
return Err(Error::Unexpected);
|
||||
}
|
||||
|
||||
if o.prefix.starts_with(SLASH_SEPARATOR) {
|
||||
if o.prefix.starts_with(SLASH_SEPARATOR_STR) {
|
||||
return Err(Error::Unexpected);
|
||||
}
|
||||
|
||||
let slash_separator = Some(SLASH_SEPARATOR.to_owned());
|
||||
let slash_separator = Some(SLASH_SEPARATOR_STR.to_owned());
|
||||
|
||||
o.include_directories = o.separator == slash_separator;
|
||||
|
||||
@@ -774,8 +774,8 @@ impl ECStore {
|
||||
let mut filter_prefix = {
|
||||
prefix
|
||||
.trim_start_matches(&path)
|
||||
.trim_start_matches(SLASH_SEPARATOR)
|
||||
.trim_end_matches(SLASH_SEPARATOR)
|
||||
.trim_start_matches(SLASH_SEPARATOR_STR)
|
||||
.trim_end_matches(SLASH_SEPARATOR_STR)
|
||||
.to_owned()
|
||||
};
|
||||
|
||||
@@ -988,7 +988,7 @@ async fn gather_results(
|
||||
}
|
||||
|
||||
if let Some(marker) = &opts.marker
|
||||
&& &entry.name < marker
|
||||
&& &entry.name <= marker
|
||||
{
|
||||
continue;
|
||||
}
|
||||
@@ -1130,7 +1130,7 @@ async fn merge_entry_channels(
|
||||
if path::clean(&best_entry.name) == path::clean(&other_entry.name) {
|
||||
let dir_matches = best_entry.is_dir() && other_entry.is_dir();
|
||||
let suffix_matches =
|
||||
best_entry.name.ends_with(SLASH_SEPARATOR) == other_entry.name.ends_with(SLASH_SEPARATOR);
|
||||
best_entry.name.ends_with(SLASH_SEPARATOR_STR) == other_entry.name.ends_with(SLASH_SEPARATOR_STR);
|
||||
|
||||
if dir_matches && suffix_matches {
|
||||
to_merge.push(other_idx);
|
||||
@@ -1476,7 +1476,6 @@ mod test {
|
||||
// use crate::error::Error;
|
||||
// use crate::metacache::writer::MetacacheReader;
|
||||
// use crate::set_disk::SetDisks;
|
||||
// use crate::store::ECStore;
|
||||
// use crate::store_list_objects::ListPathOptions;
|
||||
// use crate::store_list_objects::WalkOptions;
|
||||
// use crate::store_list_objects::WalkVersionsSortOrder;
|
||||
|
||||
@@ -51,7 +51,7 @@ use crate::{
|
||||
store_api::{ObjectOptions, PutObjReader},
|
||||
};
|
||||
use rustfs_rio::HashReader;
|
||||
use rustfs_utils::path::{SLASH_SEPARATOR, path_join};
|
||||
use rustfs_utils::path::{SLASH_SEPARATOR_STR, path_join};
|
||||
use s3s::S3ErrorCode;
|
||||
|
||||
use super::{
|
||||
@@ -403,7 +403,7 @@ impl TierConfigMgr {
|
||||
pub async fn save_tiering_config<S: StorageAPI>(&self, api: Arc<S>) -> std::result::Result<(), std::io::Error> {
|
||||
let data = self.marshal()?;
|
||||
|
||||
let config_file = format!("{}{}{}", CONFIG_PREFIX, SLASH_SEPARATOR, TIER_CONFIG_FILE);
|
||||
let config_file = format!("{}{}{}", CONFIG_PREFIX, SLASH_SEPARATOR_STR, TIER_CONFIG_FILE);
|
||||
|
||||
self.save_config(api, &config_file, data).await
|
||||
}
|
||||
@@ -483,7 +483,7 @@ async fn new_and_save_tiering_config<S: StorageAPI>(api: Arc<S>) -> Result<TierC
|
||||
|
||||
#[tracing::instrument(level = "debug", name = "load_tier_config", skip(api))]
|
||||
async fn load_tier_config(api: Arc<ECStore>) -> std::result::Result<TierConfigMgr, std::io::Error> {
|
||||
let config_file = format!("{}{}{}", CONFIG_PREFIX, SLASH_SEPARATOR, TIER_CONFIG_FILE);
|
||||
let config_file = format!("{}{}{}", CONFIG_PREFIX, SLASH_SEPARATOR_STR, TIER_CONFIG_FILE);
|
||||
let data = read_config(api.clone(), config_file.as_str()).await;
|
||||
if let Err(err) = data {
|
||||
if is_err_config_not_found(&err) {
|
||||
|
||||
@@ -30,13 +30,11 @@ use crate::client::{
|
||||
transition_api::{Options, TransitionClient, TransitionCore},
|
||||
transition_api::{ReadCloser, ReaderImpl},
|
||||
};
|
||||
use crate::error::ErrorResponse;
|
||||
use crate::error::error_resp_to_object_err;
|
||||
use crate::tier::{
|
||||
tier_config::TierS3,
|
||||
warm_backend::{WarmBackend, WarmBackendGetOpts},
|
||||
};
|
||||
use rustfs_utils::path::SLASH_SEPARATOR;
|
||||
use rustfs_utils::path::SLASH_SEPARATOR_STR;
|
||||
|
||||
pub struct WarmBackendS3 {
|
||||
pub client: Arc<TransitionClient>,
|
||||
@@ -178,7 +176,7 @@ impl WarmBackend for WarmBackendS3 {
|
||||
async fn in_use(&self) -> Result<bool, std::io::Error> {
|
||||
let result = self
|
||||
.core
|
||||
.list_objects_v2(&self.bucket, &self.prefix, "", "", SLASH_SEPARATOR, 1)
|
||||
.list_objects_v2(&self.bucket, &self.prefix, "", "", SLASH_SEPARATOR_STR, 1)
|
||||
.await?;
|
||||
|
||||
Ok(result.common_prefixes.len() > 0 || result.contents.len() > 0)
|
||||
|
||||
@@ -27,19 +27,11 @@ use aws_sdk_s3::Client;
|
||||
use aws_sdk_s3::config::{Credentials, Region};
|
||||
use aws_sdk_s3::primitives::ByteStream;
|
||||
|
||||
use crate::client::{
|
||||
api_get_options::GetObjectOptions,
|
||||
api_put_object::PutObjectOptions,
|
||||
api_remove::RemoveObjectOptions,
|
||||
transition_api::{ReadCloser, ReaderImpl},
|
||||
};
|
||||
use crate::error::ErrorResponse;
|
||||
use crate::error::error_resp_to_object_err;
|
||||
use crate::client::transition_api::{ReadCloser, ReaderImpl};
|
||||
use crate::tier::{
|
||||
tier_config::TierS3,
|
||||
warm_backend::{WarmBackend, WarmBackendGetOpts},
|
||||
};
|
||||
use rustfs_utils::path::SLASH_SEPARATOR;
|
||||
|
||||
pub struct WarmBackendS3 {
|
||||
pub client: Arc<Client>,
|
||||
|
||||
@@ -32,7 +32,7 @@ use rustfs_ecstore::{
|
||||
store_api::{ObjectInfo, ObjectOptions},
|
||||
};
|
||||
use rustfs_policy::{auth::UserIdentity, policy::PolicyDoc};
|
||||
use rustfs_utils::path::{SLASH_SEPARATOR, path_join_buf};
|
||||
use rustfs_utils::path::{SLASH_SEPARATOR_STR, path_join_buf};
|
||||
use serde::{Serialize, de::DeserializeOwned};
|
||||
use std::sync::LazyLock;
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
@@ -182,7 +182,7 @@ impl ObjectStore {
|
||||
} else {
|
||||
info.name
|
||||
};
|
||||
let name = object_name.trim_start_matches(&prefix).trim_end_matches(SLASH_SEPARATOR);
|
||||
let name = object_name.trim_start_matches(&prefix).trim_end_matches(SLASH_SEPARATOR_STR);
|
||||
let _ = sender
|
||||
.send(StringOrErr {
|
||||
item: Some(name.to_owned()),
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use crate::notification_system_subscriber::NotificationSystemSubscriberView;
|
||||
use crate::notifier::TargetList;
|
||||
use crate::{
|
||||
Event, error::NotificationError, notifier::EventNotifier, registry::TargetRegistry, rules::BucketNotificationConfig, stream,
|
||||
};
|
||||
@@ -191,6 +192,22 @@ impl NotificationSystem {
|
||||
self.notifier.target_list().read().await.keys()
|
||||
}
|
||||
|
||||
/// Gets the complete Target list, including both active and inactive Targets.
|
||||
///
|
||||
/// # Return
|
||||
/// An `Arc<RwLock<TargetList>>` containing all Targets.
|
||||
pub async fn get_all_targets(&self) -> Arc<RwLock<TargetList>> {
|
||||
self.notifier.target_list()
|
||||
}
|
||||
|
||||
/// Gets all Target values, including both active and inactive Targets.
|
||||
///
|
||||
/// # Return
|
||||
/// A Vec containing all Targets.
|
||||
pub async fn get_target_values(&self) -> Vec<Arc<dyn Target<Event> + Send + Sync>> {
|
||||
self.notifier.target_list().read().await.values()
|
||||
}
|
||||
|
||||
/// Checks if there are active subscribers for the given bucket and event name.
|
||||
pub async fn has_subscriber(&self, bucket: &str, event: &EventName) -> bool {
|
||||
if !self.subscriber_view.has_subscriber(bucket, event) {
|
||||
|
||||
@@ -370,6 +370,11 @@ impl TargetList {
|
||||
self.targets.keys().cloned().collect()
|
||||
}
|
||||
|
||||
/// Returns all targets in the list
|
||||
pub fn values(&self) -> Vec<Arc<dyn Target<Event> + Send + Sync>> {
|
||||
self.targets.values().cloned().collect()
|
||||
}
|
||||
|
||||
/// Returns the number of targets
|
||||
pub fn len(&self) -> usize {
|
||||
self.targets.len()
|
||||
|
||||
@@ -22,8 +22,8 @@ use strum::{EnumString, IntoStaticStr};
|
||||
|
||||
use super::{Error as IamError, Validator, utils::wildcard};
|
||||
|
||||
/// A set of policy actions that serializes as a single string when containing one item,
|
||||
/// or as an array when containing multiple items (matching AWS S3 API format).
|
||||
/// A set of policy actions that always serializes as an array of strings,
|
||||
/// conforming to the S3 policy specification for consistency and compatibility.
|
||||
#[derive(Clone, Default, Debug)]
|
||||
pub struct ActionSet(pub HashSet<Action>);
|
||||
|
||||
@@ -34,15 +34,8 @@ impl Serialize for ActionSet {
|
||||
{
|
||||
use serde::ser::SerializeSeq;
|
||||
|
||||
if self.0.len() == 1 {
|
||||
// Serialize single action as string (not array)
|
||||
if let Some(action) = self.0.iter().next() {
|
||||
let action_str: &str = action.into();
|
||||
return serializer.serialize_str(action_str);
|
||||
}
|
||||
}
|
||||
|
||||
// Serialize multiple actions as array
|
||||
// Always serialize as array, even for single action, to match S3 specification
|
||||
// and ensure compatibility with AWS SDK clients that expect array format
|
||||
let mut seq = serializer.serialize_seq(Some(self.0.len()))?;
|
||||
for action in &self.0 {
|
||||
let action_str: &str = action.into();
|
||||
@@ -610,13 +603,17 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_actionset_serialize_single_element() {
|
||||
// Single element should serialize as string
|
||||
// Single element should serialize as array for S3 specification compliance
|
||||
let mut set = HashSet::new();
|
||||
set.insert(Action::S3Action(S3Action::GetObjectAction));
|
||||
let actionset = ActionSet(set);
|
||||
|
||||
let json = serde_json::to_string(&actionset).expect("Should serialize");
|
||||
assert_eq!(json, "\"s3:GetObject\"");
|
||||
let parsed: serde_json::Value = serde_json::from_str(&json).expect("Should parse");
|
||||
assert!(parsed.is_array(), "Should serialize as array");
|
||||
let arr = parsed.as_array().expect("Should be array");
|
||||
assert_eq!(arr.len(), 1);
|
||||
assert_eq!(arr[0].as_str().unwrap(), "s3:GetObject");
|
||||
}
|
||||
|
||||
#[test]
|
||||
@@ -636,12 +633,16 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_actionset_wildcard_serialization() {
|
||||
// Wildcard action should serialize correctly
|
||||
// Wildcard action should serialize as array for S3 specification compliance
|
||||
let mut set = HashSet::new();
|
||||
set.insert(Action::try_from("*").expect("Should parse wildcard"));
|
||||
let actionset = ActionSet(set);
|
||||
|
||||
let json = serde_json::to_string(&actionset).expect("Should serialize");
|
||||
assert_eq!(json, "\"s3:*\"");
|
||||
let parsed: serde_json::Value = serde_json::from_str(&json).expect("Should parse");
|
||||
assert!(parsed.is_array(), "Should serialize as array");
|
||||
let arr = parsed.as_array().expect("Should be array");
|
||||
assert_eq!(arr.len(), 1);
|
||||
assert_eq!(arr[0].as_str().unwrap(), "s3:*");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1119,7 +1119,7 @@ mod test {
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bucket_policy_serialize_single_action_as_string() {
|
||||
fn test_bucket_policy_serialize_single_action_as_array() {
|
||||
use crate::policy::action::{Action, ActionSet, S3Action};
|
||||
use crate::policy::resource::{Resource, ResourceSet};
|
||||
use crate::policy::{Effect, Principal};
|
||||
@@ -1153,8 +1153,10 @@ mod test {
|
||||
let parsed: serde_json::Value = serde_json::from_str(&json).expect("Should parse");
|
||||
let action = &parsed["Statement"][0]["Action"];
|
||||
|
||||
// Single action should be serialized as string
|
||||
assert!(action.is_string(), "Single action should serialize as string");
|
||||
assert_eq!(action.as_str().unwrap(), "s3:ListBucket");
|
||||
// Single action should be serialized as array for S3 specification compliance
|
||||
assert!(action.is_array(), "Single action should serialize as array");
|
||||
let arr = action.as_array().expect("Should be array");
|
||||
assert_eq!(arr.len(), 1);
|
||||
assert_eq!(arr[0].as_str().unwrap(), "s3:ListBucket");
|
||||
}
|
||||
}
|
||||
|
||||
33
crates/trusted-proxies/.env.example
Normal file
33
crates/trusted-proxies/.env.example
Normal file
@@ -0,0 +1,33 @@
|
||||
# Server Configuration
|
||||
SERVER_HOST=0.0.0.0
|
||||
SERVER_PORT=3000
|
||||
|
||||
# Trusted Proxy Configuration
|
||||
TRUSTED_PROXY_NETWORKS=127.0.0.1,::1,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,fd00::/8
|
||||
TRUSTED_PROXY_EXTRA_NETWORKS=
|
||||
TRUSTED_PROXY_VALIDATION_MODE=hop_by_hop
|
||||
TRUSTED_PROXY_ENABLE_RFC7239=true
|
||||
TRUSTED_PROXY_MAX_HOPS=10
|
||||
TRUSTED_PROXY_CHAIN_CONTINUITY_CHECK=true
|
||||
TRUSTED_PROXY_LOG_FAILED_VALIDATIONS=true
|
||||
|
||||
# Cache Configuration
|
||||
TRUSTED_PROXY_CACHE_CAPACITY=10000
|
||||
TRUSTED_PROXY_CACHE_TTL_SECONDS=300
|
||||
TRUSTED_PROXY_CACHE_CLEANUP_INTERVAL=60
|
||||
|
||||
# Monitoring Configuration
|
||||
TRUSTED_PROXY_METRICS_ENABLED=true
|
||||
TRUSTED_PROXY_LOG_LEVEL=info
|
||||
TRUSTED_PROXY_STRUCTURED_LOGGING=false
|
||||
TRUSTED_PROXY_TRACING_ENABLED=true
|
||||
|
||||
# Cloud Integration
|
||||
TRUSTED_PROXY_CLOUD_METADATA_ENABLED=false
|
||||
TRUSTED_PROXY_CLOUD_METADATA_TIMEOUT=5
|
||||
TRUSTED_PROXY_CLOUDFLARE_IPS_ENABLED=false
|
||||
TRUSTED_PROXY_CLOUD_PROVIDER_FORCE=
|
||||
|
||||
# Application
|
||||
RUST_LOG=info
|
||||
RUST_BACKTRACE=1
|
||||
63
crates/trusted-proxies/Cargo.toml
Normal file
63
crates/trusted-proxies/Cargo.toml
Normal file
@@ -0,0 +1,63 @@
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
[package]
|
||||
name = "rustfs-trusted-proxies"
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
version.workspace = true
|
||||
homepage.workspace = true
|
||||
description = " RustFS Trusted Proxies module provides secure and efficient management of trusted proxy servers within the RustFS ecosystem, enhancing network security and performance."
|
||||
keywords = ["trusted-proxies", "network-security", "rustfs", "proxy-management"]
|
||||
categories = ["network-programming", "security", "web-programming"]
|
||||
|
||||
[dependencies]
|
||||
async-trait = { workspace = true }
|
||||
axum = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
http = { workspace = true }
|
||||
tower-http = { workspace = true }
|
||||
ipnetwork = { workspace = true }
|
||||
metrics = { workspace = true }
|
||||
moka = { workspace = true, features = ["future"] }
|
||||
reqwest = { workspace = true }
|
||||
rustfs-utils = { workspace = true }
|
||||
serde.workspace = true
|
||||
serde_json.workspace = true
|
||||
thiserror = { workspace = true }
|
||||
tokio = { workspace = true, features = ["macros", "rt-multi-thread", "sync", "time", "test-util"] }
|
||||
tower = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
tracing-subscriber = { workspace = true }
|
||||
uuid = { workspace = true, features = ["v4"] }
|
||||
regex = { workspace = true }
|
||||
lazy_static = { workspace = true }
|
||||
dotenvy = "0.15.7"
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { workspace = true, features = ["full", "test-util"] }
|
||||
tower = { workspace = true, features = ["util"] }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[[test]]
|
||||
name = "unit_tests"
|
||||
path = "tests/unit/mod.rs"
|
||||
|
||||
[[test]]
|
||||
name = "integration_tests"
|
||||
path = "tests/integration/mod.rs"
|
||||
71
crates/trusted-proxies/README.md
Normal file
71
crates/trusted-proxies/README.md
Normal file
@@ -0,0 +1,71 @@
|
||||
# RustFS Trusted Proxies
|
||||
|
||||
The `rustfs-trusted-proxies` module provides secure and efficient management of trusted proxy servers within the RustFS ecosystem. It is designed to handle multi-layer proxy architectures, ensuring accurate client IP identification while maintaining a zero-trust security model.
|
||||
|
||||
## Features
|
||||
|
||||
- **Multi-Layer Proxy Validation**: Supports `Strict`, `Lenient`, and `HopByHop` validation modes to accurately identify the real client IP address.
|
||||
- **Zero-Trust Security**: Verifies every hop in the proxy chain against a configurable list of trusted networks.
|
||||
- **Cloud Integration**: Automatic discovery of trusted IP ranges for major cloud providers including AWS, Azure, and GCP.
|
||||
- **High Performance**: Utilizes the `moka` cache for fast lookup of validation results and `axum` for a high-performance web interface.
|
||||
- **Observability**: Built-in support for Prometheus metrics and structured JSON logging via `tracing`.
|
||||
- **RFC 7239 Support**: Full support for the modern `Forwarded` header alongside legacy `X-Forwarded-For` headers.
|
||||
|
||||
## Configuration
|
||||
|
||||
The module is configured primarily through environment variables:
|
||||
|
||||
| Variable | Default | Description |
|
||||
|----------|---------|-------------|
|
||||
| `TRUSTED_PROXY_VALIDATION_MODE` | `hop_by_hop` | Validation strategy (`strict`, `lenient`, `hop_by_hop`) |
|
||||
| `TRUSTED_PROXY_NETWORKS` | `127.0.0.1,::1,...` | Comma-separated list of trusted CIDR ranges |
|
||||
| `TRUSTED_PROXY_MAX_HOPS` | `10` | Maximum allowed proxy hops |
|
||||
| `TRUSTED_PROXY_CACHE_CAPACITY` | `10000` | Max entries in the validation cache |
|
||||
| `TRUSTED_PROXY_METRICS_ENABLED` | `true` | Enable Prometheus metrics collection |
|
||||
| `TRUSTED_PROXY_CLOUD_METADATA_ENABLED` | `false` | Enable auto-discovery of cloud IP ranges |
|
||||
|
||||
## Usage
|
||||
|
||||
### As a Middleware
|
||||
|
||||
Integrate the trusted proxy validation into your Axum application:
|
||||
|
||||
```rust
|
||||
use rustfs_trusted_proxies::{TrustedProxyLayer, TrustedProxyConfig};
|
||||
|
||||
let config = TrustedProxyConfig::default();
|
||||
let layer = TrustedProxyLayer::enabled(config, None);
|
||||
|
||||
let app = Router::new()
|
||||
.route("/", get(handler))
|
||||
.layer(layer);
|
||||
```
|
||||
|
||||
### Accessing Client Info
|
||||
|
||||
Retrieve the verified client information in your handlers:
|
||||
|
||||
```rust
|
||||
use rustfs_trusted_proxies::ClientInfo;
|
||||
|
||||
async fn handler(Extension(client_info): Extension<ClientInfo>) -> impl IntoResponse {
|
||||
println!("Real Client IP: {}", client_info.real_ip);
|
||||
}
|
||||
```
|
||||
|
||||
## Development
|
||||
|
||||
### Pre-Commit Checklist
|
||||
Before committing, ensure all checks pass:
|
||||
```bash
|
||||
make pre-commit
|
||||
```
|
||||
|
||||
### Testing
|
||||
Run the test suite:
|
||||
```bash
|
||||
cargo test --workspace --exclude e2e_test
|
||||
```
|
||||
|
||||
## License
|
||||
Licensed under the Apache License, Version 2.0.
|
||||
150
crates/trusted-proxies/src/api/handlers.rs
Normal file
150
crates/trusted-proxies/src/api/handlers.rs
Normal file
@@ -0,0 +1,150 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! API request handlers for the trusted proxy service.
|
||||
|
||||
use crate::AppState;
|
||||
use crate::error::AppError;
|
||||
use crate::middleware::ClientInfo;
|
||||
use axum::{
|
||||
extract::{Request, State},
|
||||
http::StatusCode,
|
||||
response::{IntoResponse, Json},
|
||||
};
|
||||
use serde_json::{Value, json};
|
||||
|
||||
/// Health check endpoint to verify service availability.
|
||||
#[allow(dead_code)]
|
||||
pub async fn health_check() -> impl IntoResponse {
|
||||
Json(json!({
|
||||
"status": "healthy",
|
||||
"timestamp": chrono::Utc::now().to_rfc3339(),
|
||||
"service": "trusted-proxy",
|
||||
"version": env!("CARGO_PKG_VERSION"),
|
||||
}))
|
||||
}
|
||||
|
||||
/// Returns the current application configuration.
|
||||
#[allow(dead_code)]
|
||||
pub async fn show_config(State(state): State<AppState>) -> Result<Json<Value>, AppError> {
|
||||
let config = &state.config;
|
||||
|
||||
let response = json!({
|
||||
"server": {
|
||||
"addr": config.server_addr.to_string(),
|
||||
},
|
||||
"proxy": {
|
||||
"trusted_networks_count": config.proxy.proxies.len(),
|
||||
"validation_mode": config.proxy.validation_mode.as_str(),
|
||||
"max_hops": config.proxy.max_hops,
|
||||
"enable_rfc7239": config.proxy.enable_rfc7239,
|
||||
},
|
||||
"cache": {
|
||||
"capacity": config.cache.capacity,
|
||||
"ttl_seconds": config.cache.ttl_seconds,
|
||||
},
|
||||
"monitoring": {
|
||||
"metrics_enabled": config.monitoring.metrics_enabled,
|
||||
"log_level": config.monitoring.log_level,
|
||||
},
|
||||
"cloud": {
|
||||
"metadata_enabled": config.cloud.metadata_enabled,
|
||||
"cloudflare_enabled": config.cloud.cloudflare_ips_enabled,
|
||||
},
|
||||
});
|
||||
|
||||
Ok(Json(response))
|
||||
}
|
||||
|
||||
/// Returns information about the client as identified by the trusted proxy middleware.
|
||||
#[allow(dead_code)]
|
||||
pub async fn client_info(State(_state): State<AppState>, req: Request) -> impl IntoResponse {
|
||||
// Retrieve the verified client information from the request extensions.
|
||||
let client_info = req.extensions().get::<ClientInfo>();
|
||||
|
||||
match client_info {
|
||||
Some(info) => {
|
||||
let response = json!({
|
||||
"client": {
|
||||
"real_ip": info.real_ip.to_string(),
|
||||
"is_from_trusted_proxy": info.is_from_trusted_proxy,
|
||||
"proxy_hops": info.proxy_hops,
|
||||
"validation_mode": info.validation_mode.as_str(),
|
||||
},
|
||||
"headers": {
|
||||
"forwarded_host": info.forwarded_host,
|
||||
"forwarded_proto": info.forwarded_proto,
|
||||
},
|
||||
"warnings": info.warnings,
|
||||
"timestamp": chrono::Utc::now().to_rfc3339(),
|
||||
});
|
||||
|
||||
Json(response).into_response()
|
||||
}
|
||||
None => {
|
||||
let response = json!({
|
||||
"error": "Client information not available",
|
||||
"message": "The trusted proxy middleware may not be enabled or configured correctly.",
|
||||
});
|
||||
|
||||
(StatusCode::INTERNAL_SERVER_ERROR, Json(response)).into_response()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Debugging endpoint that returns all proxy-related headers received in the request.
|
||||
#[allow(dead_code)]
|
||||
pub async fn proxy_test(req: Request) -> Json<Value> {
|
||||
// Collect all headers related to proxying.
|
||||
let headers: Vec<(String, String)> = req
|
||||
.headers()
|
||||
.iter()
|
||||
.filter(|(name, _)| {
|
||||
let name_str = name.as_str().to_lowercase();
|
||||
name_str.contains("forwarded") || name_str.contains("x-forwarded") || name_str.contains("x-real")
|
||||
})
|
||||
.map(|(name, value)| (name.to_string(), value.to_str().unwrap_or("[INVALID]").to_string()))
|
||||
.collect();
|
||||
|
||||
// Get the direct peer address.
|
||||
let peer_addr = req
|
||||
.extensions()
|
||||
.get::<std::net::SocketAddr>()
|
||||
.map(|addr| addr.to_string())
|
||||
.unwrap_or_else(|| "unknown".to_string());
|
||||
|
||||
Json(json!({
|
||||
"peer_addr": peer_addr,
|
||||
"method": req.method().to_string(),
|
||||
"uri": req.uri().to_string(),
|
||||
"proxy_headers": headers,
|
||||
"timestamp": chrono::Utc::now().to_rfc3339(),
|
||||
}))
|
||||
}
|
||||
|
||||
/// Endpoint for retrieving Prometheus metrics.
|
||||
#[allow(dead_code)]
|
||||
pub async fn metrics(State(state): State<AppState>) -> impl IntoResponse {
|
||||
if !state.config.monitoring.metrics_enabled {
|
||||
return (StatusCode::NOT_FOUND, "Metrics are not enabled").into_response();
|
||||
}
|
||||
|
||||
// In a production environment, this would return the actual Prometheus-formatted metrics.
|
||||
let metrics_summary = json!({
|
||||
"status": "metrics_enabled",
|
||||
"note": "Prometheus metrics are being collected. Use a compatible exporter to view them.",
|
||||
});
|
||||
|
||||
Json(metrics_summary).into_response()
|
||||
}
|
||||
15
crates/trusted-proxies/src/api/mod.rs
Normal file
15
crates/trusted-proxies/src/api/mod.rs
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod handlers;
|
||||
253
crates/trusted-proxies/src/cloud/detector.rs
Normal file
253
crates/trusted-proxies/src/cloud/detector.rs
Normal file
@@ -0,0 +1,253 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Cloud provider detection and metadata fetching.
|
||||
|
||||
use async_trait::async_trait;
|
||||
use std::time::Duration;
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
use crate::error::AppError;
|
||||
|
||||
/// Supported cloud providers.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
pub enum CloudProvider {
|
||||
/// Amazon Web Services
|
||||
Aws,
|
||||
/// Microsoft Azure
|
||||
Azure,
|
||||
/// Google Cloud Platform
|
||||
Gcp,
|
||||
/// DigitalOcean
|
||||
DigitalOcean,
|
||||
/// Cloudflare
|
||||
Cloudflare,
|
||||
/// Unknown or custom provider.
|
||||
Unknown(String),
|
||||
}
|
||||
|
||||
impl CloudProvider {
|
||||
/// Detects the cloud provider based on environment variables.
|
||||
pub fn detect_from_env() -> Option<Self> {
|
||||
// Check for AWS environment variables.
|
||||
if std::env::var("AWS_EXECUTION_ENV").is_ok()
|
||||
|| std::env::var("AWS_REGION").is_ok()
|
||||
|| std::env::var("EC2_INSTANCE_ID").is_ok()
|
||||
{
|
||||
return Some(Self::Aws);
|
||||
}
|
||||
|
||||
// Check for Azure environment variables.
|
||||
if std::env::var("WEBSITE_SITE_NAME").is_ok()
|
||||
|| std::env::var("WEBSITE_INSTANCE_ID").is_ok()
|
||||
|| std::env::var("APPSETTING_WEBSITE_SITE_NAME").is_ok()
|
||||
{
|
||||
return Some(Self::Azure);
|
||||
}
|
||||
|
||||
// Check for GCP environment variables.
|
||||
if std::env::var("GCP_PROJECT").is_ok()
|
||||
|| std::env::var("GOOGLE_CLOUD_PROJECT").is_ok()
|
||||
|| std::env::var("GAE_INSTANCE").is_ok()
|
||||
{
|
||||
return Some(Self::Gcp);
|
||||
}
|
||||
|
||||
// Check for DigitalOcean environment variables.
|
||||
if std::env::var("DIGITALOCEAN_REGION").is_ok() {
|
||||
return Some(Self::DigitalOcean);
|
||||
}
|
||||
|
||||
// Check for Cloudflare environment variables.
|
||||
if std::env::var("CF_PAGES").is_ok() || std::env::var("CF_WORKERS").is_ok() {
|
||||
return Some(Self::Cloudflare);
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// Returns the canonical name of the cloud provider.
|
||||
pub fn name(&self) -> &str {
|
||||
match self {
|
||||
Self::Aws => "aws",
|
||||
Self::Azure => "azure",
|
||||
Self::Gcp => "gcp",
|
||||
Self::DigitalOcean => "digitalocean",
|
||||
Self::Cloudflare => "cloudflare",
|
||||
Self::Unknown(name) => name,
|
||||
}
|
||||
}
|
||||
|
||||
/// Parses a cloud provider from a string.
|
||||
pub fn from_str(s: &str) -> Self {
|
||||
match s.to_lowercase().as_str() {
|
||||
"aws" | "amazon" => Self::Aws,
|
||||
"azure" | "microsoft" => Self::Azure,
|
||||
"gcp" | "google" => Self::Gcp,
|
||||
"digitalocean" | "do" => Self::DigitalOcean,
|
||||
"cloudflare" | "cf" => Self::Cloudflare,
|
||||
_ => Self::Unknown(s.to_string()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Trait for fetching metadata from a specific cloud provider.
|
||||
#[async_trait]
|
||||
pub trait CloudMetadataFetcher: Send + Sync {
|
||||
/// Returns the name of the provider.
|
||||
fn provider_name(&self) -> &str;
|
||||
|
||||
/// Fetches the network CIDR ranges for the current instance.
|
||||
async fn fetch_network_cidrs(&self) -> Result<Vec<ipnetwork::IpNetwork>, AppError>;
|
||||
|
||||
/// Fetches the public IP ranges for the cloud provider.
|
||||
async fn fetch_public_ip_ranges(&self) -> Result<Vec<ipnetwork::IpNetwork>, AppError>;
|
||||
|
||||
/// Fetches all IP ranges that should be considered trusted proxies.
|
||||
async fn fetch_trusted_proxy_ranges(&self) -> Result<Vec<ipnetwork::IpNetwork>, AppError> {
|
||||
let mut ranges = Vec::new();
|
||||
|
||||
match self.fetch_network_cidrs().await {
|
||||
Ok(cidrs) => ranges.extend(cidrs),
|
||||
Err(e) => warn!("Failed to fetch network CIDRs from {}: {}", self.provider_name(), e),
|
||||
}
|
||||
|
||||
match self.fetch_public_ip_ranges().await {
|
||||
Ok(public_ranges) => ranges.extend(public_ranges),
|
||||
Err(e) => warn!("Failed to fetch public IP ranges from {}: {}", self.provider_name(), e),
|
||||
}
|
||||
|
||||
Ok(ranges)
|
||||
}
|
||||
}
|
||||
|
||||
/// Detector for identifying the current cloud environment and fetching relevant metadata.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CloudDetector {
|
||||
/// Whether cloud detection is enabled.
|
||||
enabled: bool,
|
||||
/// Timeout for metadata requests.
|
||||
timeout: Duration,
|
||||
/// Optionally force a specific provider.
|
||||
forced_provider: Option<CloudProvider>,
|
||||
}
|
||||
|
||||
impl CloudDetector {
|
||||
/// Creates a new `CloudDetector`.
|
||||
pub fn new(enabled: bool, timeout: Duration, forced_provider: Option<String>) -> Self {
|
||||
let forced_provider = forced_provider.map(|s| CloudProvider::from_str(&s));
|
||||
|
||||
Self {
|
||||
enabled,
|
||||
timeout,
|
||||
forced_provider,
|
||||
}
|
||||
}
|
||||
|
||||
/// Identifies the current cloud provider.
|
||||
pub fn detect_provider(&self) -> Option<CloudProvider> {
|
||||
if !self.enabled {
|
||||
return None;
|
||||
}
|
||||
|
||||
if let Some(provider) = self.forced_provider.as_ref() {
|
||||
return Some(provider.clone());
|
||||
}
|
||||
|
||||
CloudProvider::detect_from_env()
|
||||
}
|
||||
|
||||
/// Fetches trusted IP ranges for the detected cloud provider.
|
||||
pub async fn fetch_trusted_ranges(&self) -> Result<Vec<ipnetwork::IpNetwork>, AppError> {
|
||||
if !self.enabled {
|
||||
debug!("Cloud metadata fetching is disabled");
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
let provider = self.detect_provider();
|
||||
|
||||
match provider {
|
||||
Some(CloudProvider::Aws) => {
|
||||
info!("Detected AWS environment, fetching metadata");
|
||||
let fetcher = crate::cloud::metadata::AwsMetadataFetcher::new(self.timeout);
|
||||
fetcher.fetch_trusted_proxy_ranges().await
|
||||
}
|
||||
Some(CloudProvider::Azure) => {
|
||||
info!("Detected Azure environment, fetching metadata");
|
||||
let fetcher = crate::cloud::metadata::AzureMetadataFetcher::new(self.timeout);
|
||||
fetcher.fetch_trusted_proxy_ranges().await
|
||||
}
|
||||
Some(CloudProvider::Gcp) => {
|
||||
info!("Detected GCP environment, fetching metadata");
|
||||
let fetcher = crate::cloud::metadata::GcpMetadataFetcher::new(self.timeout);
|
||||
fetcher.fetch_trusted_proxy_ranges().await
|
||||
}
|
||||
Some(CloudProvider::Cloudflare) => {
|
||||
info!("Detected Cloudflare environment");
|
||||
let ranges = crate::cloud::ranges::CloudflareIpRanges::fetch().await?;
|
||||
Ok(ranges)
|
||||
}
|
||||
Some(CloudProvider::DigitalOcean) => {
|
||||
info!("Detected DigitalOcean environment");
|
||||
let ranges = crate::cloud::ranges::DigitalOceanIpRanges::fetch().await?;
|
||||
Ok(ranges)
|
||||
}
|
||||
Some(CloudProvider::Unknown(name)) => {
|
||||
warn!("Unknown cloud provider detected: {}", name);
|
||||
Ok(Vec::new())
|
||||
}
|
||||
None => {
|
||||
debug!("No cloud provider detected");
|
||||
Ok(Vec::new())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Attempts to fetch metadata from all supported providers sequentially.
|
||||
pub async fn try_all_providers(&self) -> Result<Vec<ipnetwork::IpNetwork>, AppError> {
|
||||
if !self.enabled {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
let providers: Vec<Box<dyn CloudMetadataFetcher>> = vec![
|
||||
Box::new(crate::cloud::metadata::AwsMetadataFetcher::new(self.timeout)),
|
||||
Box::new(crate::cloud::metadata::AzureMetadataFetcher::new(self.timeout)),
|
||||
Box::new(crate::cloud::metadata::GcpMetadataFetcher::new(self.timeout)),
|
||||
];
|
||||
|
||||
for provider in providers {
|
||||
let provider_name = provider.provider_name();
|
||||
debug!("Trying to fetch metadata from {}", provider_name);
|
||||
|
||||
match provider.fetch_trusted_proxy_ranges().await {
|
||||
Ok(ranges) => {
|
||||
if !ranges.is_empty() {
|
||||
info!("Fetched {} IP ranges from {}", ranges.len(), provider_name);
|
||||
return Ok(ranges);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
debug!("Failed to fetch metadata from {}: {}", provider_name, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(Vec::new())
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a default `CloudDetector` with detection disabled.
|
||||
pub fn default_cloud_detector() -> CloudDetector {
|
||||
CloudDetector::new(false, Duration::from_secs(5), None)
|
||||
}
|
||||
156
crates/trusted-proxies/src/cloud/metadata/aws.rs
Normal file
156
crates/trusted-proxies/src/cloud/metadata/aws.rs
Normal file
@@ -0,0 +1,156 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! AWS metadata fetching implementation for identifying trusted proxy ranges.
|
||||
|
||||
use async_trait::async_trait;
|
||||
use reqwest::Client;
|
||||
use std::str::FromStr;
|
||||
use std::time::Duration;
|
||||
use tracing::{debug, info};
|
||||
|
||||
use crate::cloud::detector::CloudMetadataFetcher;
|
||||
use crate::error::AppError;
|
||||
|
||||
/// Fetcher for AWS-specific metadata.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct AwsMetadataFetcher {
|
||||
client: Client,
|
||||
metadata_endpoint: String,
|
||||
}
|
||||
|
||||
impl AwsMetadataFetcher {
|
||||
/// Creates a new `AwsMetadataFetcher`.
|
||||
///
|
||||
/// # Arguments
|
||||
///
|
||||
/// * `timeout` - Duration to use for HTTP request timeouts.
|
||||
///
|
||||
/// Returns a new instance of `AwsMetadataFetcher`.
|
||||
pub fn new(timeout: Duration) -> Self {
|
||||
let client = Client::builder().timeout(timeout).build().unwrap_or_else(|_| Client::new());
|
||||
|
||||
Self {
|
||||
client,
|
||||
metadata_endpoint: "http://169.254.169.254".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Retrieves an IMDSv2 token for secure metadata access.
|
||||
#[allow(dead_code)]
|
||||
async fn get_metadata_token(&self) -> Result<String, AppError> {
|
||||
let url = format!("{}/latest/api/token", self.metadata_endpoint);
|
||||
|
||||
match self
|
||||
.client
|
||||
.put(&url)
|
||||
.header("X-aws-ec2-metadata-token-ttl-seconds", "21600")
|
||||
.send()
|
||||
.await
|
||||
{
|
||||
Ok(response) => {
|
||||
if response.status().is_success() {
|
||||
let token = response
|
||||
.text()
|
||||
.await
|
||||
.map_err(|e| AppError::cloud(format!("Failed to read IMDSv2 token: {}", e)))?;
|
||||
Ok(token)
|
||||
} else {
|
||||
debug!("IMDSv2 token request failed with status: {}", response.status());
|
||||
Err(AppError::cloud("Failed to obtain IMDSv2 token"))
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
debug!("IMDSv2 token request failed: {}", e);
|
||||
Err(AppError::cloud(format!("IMDSv2 request failed: {}", e)))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl CloudMetadataFetcher for AwsMetadataFetcher {
|
||||
fn provider_name(&self) -> &str {
|
||||
"aws"
|
||||
}
|
||||
|
||||
async fn fetch_network_cidrs(&self) -> Result<Vec<ipnetwork::IpNetwork>, AppError> {
|
||||
// Simplified implementation: returns standard AWS VPC private ranges.
|
||||
let default_ranges = vec![
|
||||
"10.0.0.0/8", // Large VPCs
|
||||
"172.16.0.0/12", // Medium VPCs
|
||||
"192.168.0.0/16", // Small VPCs
|
||||
];
|
||||
|
||||
let networks: Result<Vec<_>, _> = default_ranges
|
||||
.into_iter()
|
||||
.map(|s| ipnetwork::IpNetwork::from_str(s))
|
||||
.collect();
|
||||
|
||||
match networks {
|
||||
Ok(networks) => {
|
||||
debug!("Using default AWS VPC network ranges");
|
||||
Ok(networks)
|
||||
}
|
||||
Err(e) => Err(AppError::cloud(format!("Failed to parse default AWS ranges: {}", e))),
|
||||
}
|
||||
}
|
||||
|
||||
async fn fetch_public_ip_ranges(&self) -> Result<Vec<ipnetwork::IpNetwork>, AppError> {
|
||||
let url = "https://ip-ranges.amazonaws.com/ip-ranges.json";
|
||||
|
||||
#[derive(Debug, serde::Deserialize)]
|
||||
struct AwsIpRanges {
|
||||
prefixes: Vec<AwsPrefix>,
|
||||
}
|
||||
|
||||
#[derive(Debug, serde::Deserialize)]
|
||||
struct AwsPrefix {
|
||||
ip_prefix: String,
|
||||
service: String,
|
||||
}
|
||||
|
||||
match self.client.get(url).timeout(Duration::from_secs(5)).send().await {
|
||||
Ok(response) => {
|
||||
if response.status().is_success() {
|
||||
let ip_ranges: AwsIpRanges = response
|
||||
.json()
|
||||
.await
|
||||
.map_err(|e| AppError::cloud(format!("Failed to parse AWS IP ranges JSON: {}", e)))?;
|
||||
|
||||
let mut networks = Vec::new();
|
||||
|
||||
for prefix in ip_ranges.prefixes {
|
||||
// Include EC2 and CloudFront ranges as potential trusted proxies.
|
||||
if prefix.service == "EC2" || prefix.service == "CLOUDFRONT" {
|
||||
if let Ok(network) = ipnetwork::IpNetwork::from_str(&prefix.ip_prefix) {
|
||||
networks.push(network);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!("Successfully fetched {} AWS public IP ranges", networks.len());
|
||||
Ok(networks)
|
||||
} else {
|
||||
debug!("Failed to fetch AWS IP ranges: HTTP {}", response.status());
|
||||
Ok(Vec::new())
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
debug!("Failed to fetch AWS IP ranges: {}", e);
|
||||
Ok(Vec::new())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
307
crates/trusted-proxies/src/cloud/metadata/azure.rs
Normal file
307
crates/trusted-proxies/src/cloud/metadata/azure.rs
Normal file
@@ -0,0 +1,307 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Azure Cloud metadata fetching implementation for identifying trusted proxy ranges.
|
||||
|
||||
use async_trait::async_trait;
|
||||
use reqwest::Client;
|
||||
use serde::Deserialize;
|
||||
use std::str::FromStr;
|
||||
use std::time::Duration;
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
use crate::cloud::detector::CloudMetadataFetcher;
|
||||
use crate::error::AppError;
|
||||
|
||||
/// Fetcher for Azure-specific metadata.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct AzureMetadataFetcher {
|
||||
client: Client,
|
||||
metadata_endpoint: String,
|
||||
}
|
||||
|
||||
impl AzureMetadataFetcher {
|
||||
/// Creates a new `AzureMetadataFetcher`.
|
||||
pub fn new(timeout: Duration) -> Self {
|
||||
let client = Client::builder().timeout(timeout).build().unwrap_or_else(|_| Client::new());
|
||||
|
||||
Self {
|
||||
client,
|
||||
metadata_endpoint: "http://169.254.169.254".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Retrieves metadata from the Azure Instance Metadata Service (IMDS).
|
||||
async fn get_metadata(&self, path: &str) -> Result<String, AppError> {
|
||||
let url = format!("{}/metadata/{}?api-version=2021-05-01", self.metadata_endpoint, path);
|
||||
|
||||
debug!("Fetching Azure metadata from: {}", url);
|
||||
|
||||
match self.client.get(&url).header("Metadata", "true").send().await {
|
||||
Ok(response) => {
|
||||
if response.status().is_success() {
|
||||
let text = response
|
||||
.text()
|
||||
.await
|
||||
.map_err(|e| AppError::cloud(format!("Failed to read Azure metadata response: {}", e)))?;
|
||||
Ok(text)
|
||||
} else {
|
||||
debug!("Azure metadata request failed with status: {}", response.status());
|
||||
Err(AppError::cloud(format!("Azure metadata API returned status: {}", response.status())))
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
debug!("Azure metadata request failed: {}", e);
|
||||
Err(AppError::cloud(format!("Azure metadata request failed: {}", e)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Fetches Azure public IP ranges from the official Microsoft download source.
|
||||
async fn fetch_azure_ip_ranges(&self) -> Result<Vec<ipnetwork::IpNetwork>, AppError> {
|
||||
// Official Azure IP ranges download URL (periodically updated).
|
||||
let url =
|
||||
"https://download.microsoft.com/download/7/1/D/71D86715-5596-4529-9B13-DA13A5DE5B63/ServiceTags_Public_20231211.json";
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct AzureServiceTags {
|
||||
values: Vec<AzureServiceTag>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct AzureServiceTag {
|
||||
name: String,
|
||||
properties: AzureServiceTagProperties,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct AzureServiceTagProperties {
|
||||
address_prefixes: Vec<String>,
|
||||
}
|
||||
|
||||
debug!("Fetching Azure IP ranges from: {}", url);
|
||||
|
||||
match self.client.get(url).timeout(Duration::from_secs(10)).send().await {
|
||||
Ok(response) => {
|
||||
if response.status().is_success() {
|
||||
let service_tags: AzureServiceTags = response
|
||||
.json()
|
||||
.await
|
||||
.map_err(|e| AppError::cloud(format!("Failed to parse Azure IP ranges JSON: {}", e)))?;
|
||||
|
||||
let mut networks = Vec::new();
|
||||
|
||||
for tag in service_tags.values {
|
||||
// Include general Azure datacenter ranges, excluding specific internal services.
|
||||
if tag.name.contains("Azure") && !tag.name.contains("ActiveDirectory") {
|
||||
for prefix in tag.properties.address_prefixes {
|
||||
if let Ok(network) = ipnetwork::IpNetwork::from_str(&prefix) {
|
||||
networks.push(network);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!("Successfully fetched {} Azure public IP ranges", networks.len());
|
||||
Ok(networks)
|
||||
} else {
|
||||
debug!("Failed to fetch Azure IP ranges: HTTP {}", response.status());
|
||||
Ok(Vec::new())
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
debug!("Failed to fetch Azure IP ranges: {}", e);
|
||||
// Fallback to hardcoded ranges if the download fails.
|
||||
Self::default_azure_ranges()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a set of default Azure IP ranges as a fallback.
|
||||
fn default_azure_ranges() -> Result<Vec<ipnetwork::IpNetwork>, AppError> {
|
||||
let ranges = vec![
|
||||
"13.64.0.0/11",
|
||||
"13.96.0.0/13",
|
||||
"13.104.0.0/14",
|
||||
"20.33.0.0/16",
|
||||
"20.34.0.0/15",
|
||||
"20.36.0.0/14",
|
||||
"20.40.0.0/13",
|
||||
"20.48.0.0/12",
|
||||
"20.64.0.0/10",
|
||||
"20.128.0.0/16",
|
||||
"20.135.0.0/16",
|
||||
"20.136.0.0/13",
|
||||
"20.150.0.0/15",
|
||||
"20.157.0.0/16",
|
||||
"20.184.0.0/13",
|
||||
"20.190.0.0/16",
|
||||
"20.192.0.0/10",
|
||||
"40.64.0.0/10",
|
||||
"40.80.0.0/12",
|
||||
"40.96.0.0/13",
|
||||
"40.112.0.0/13",
|
||||
"40.120.0.0/14",
|
||||
"40.124.0.0/16",
|
||||
"40.125.0.0/17",
|
||||
"51.12.0.0/15",
|
||||
"51.104.0.0/15",
|
||||
"51.120.0.0/16",
|
||||
"51.124.0.0/16",
|
||||
"51.132.0.0/16",
|
||||
"51.136.0.0/15",
|
||||
"51.138.0.0/16",
|
||||
"51.140.0.0/14",
|
||||
"51.144.0.0/15",
|
||||
"52.96.0.0/12",
|
||||
"52.112.0.0/14",
|
||||
"52.120.0.0/14",
|
||||
"52.124.0.0/16",
|
||||
"52.125.0.0/16",
|
||||
"52.126.0.0/15",
|
||||
"52.130.0.0/15",
|
||||
"52.136.0.0/13",
|
||||
"52.144.0.0/15",
|
||||
"52.146.0.0/15",
|
||||
"52.148.0.0/14",
|
||||
"52.152.0.0/13",
|
||||
"52.160.0.0/12",
|
||||
"52.176.0.0/13",
|
||||
"52.184.0.0/14",
|
||||
"52.188.0.0/14",
|
||||
"52.224.0.0/11",
|
||||
"65.52.0.0/14",
|
||||
"104.40.0.0/13",
|
||||
"104.208.0.0/13",
|
||||
"104.215.0.0/16",
|
||||
"137.116.0.0/15",
|
||||
"137.135.0.0/16",
|
||||
"138.91.0.0/16",
|
||||
"157.56.0.0/16",
|
||||
"168.61.0.0/16",
|
||||
"168.62.0.0/15",
|
||||
"191.233.0.0/18",
|
||||
"193.149.0.0/19",
|
||||
"2603:1000::/40",
|
||||
"2603:1010::/40",
|
||||
"2603:1020::/40",
|
||||
"2603:1030::/40",
|
||||
"2603:1040::/40",
|
||||
"2603:1050::/40",
|
||||
"2603:1060::/40",
|
||||
"2603:1070::/40",
|
||||
"2603:1080::/40",
|
||||
"2603:1090::/40",
|
||||
"2603:10a0::/40",
|
||||
"2603:10b0::/40",
|
||||
"2603:10c0::/40",
|
||||
"2603:10d0::/40",
|
||||
"2603:10e0::/40",
|
||||
"2603:10f0::/40",
|
||||
"2603:1100::/40",
|
||||
];
|
||||
|
||||
let networks: Result<Vec<_>, _> = ranges.into_iter().map(|s| ipnetwork::IpNetwork::from_str(s)).collect();
|
||||
|
||||
match networks {
|
||||
Ok(networks) => {
|
||||
debug!("Using default Azure public IP ranges");
|
||||
Ok(networks)
|
||||
}
|
||||
Err(e) => Err(AppError::cloud(format!("Failed to parse default Azure ranges: {}", e))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl CloudMetadataFetcher for AzureMetadataFetcher {
|
||||
fn provider_name(&self) -> &str {
|
||||
"azure"
|
||||
}
|
||||
|
||||
async fn fetch_network_cidrs(&self) -> Result<Vec<ipnetwork::IpNetwork>, AppError> {
|
||||
// Attempt to fetch network interface information from Azure IMDS.
|
||||
match self.get_metadata("instance/network/interface").await {
|
||||
Ok(metadata) => {
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct AzureNetworkInterface {
|
||||
ipv4: AzureIpv4Info,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct AzureIpv4Info {
|
||||
subnet: Vec<AzureSubnet>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct AzureSubnet {
|
||||
address: String,
|
||||
prefix: String,
|
||||
}
|
||||
|
||||
let interfaces: Vec<AzureNetworkInterface> = serde_json::from_str(&metadata)
|
||||
.map_err(|e| AppError::cloud(format!("Failed to parse Azure network metadata JSON: {}", e)))?;
|
||||
|
||||
let mut cidrs = Vec::new();
|
||||
for interface in interfaces {
|
||||
for subnet in interface.ipv4.subnet {
|
||||
let cidr = format!("{}/{}", subnet.address, subnet.prefix);
|
||||
if let Ok(network) = ipnetwork::IpNetwork::from_str(&cidr) {
|
||||
cidrs.push(network);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !cidrs.is_empty() {
|
||||
info!("Successfully fetched {} network CIDRs from Azure metadata", cidrs.len());
|
||||
Ok(cidrs)
|
||||
} else {
|
||||
debug!("No network CIDRs found in Azure metadata, falling back to defaults");
|
||||
Self::default_azure_network_ranges()
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Failed to fetch Azure network metadata: {}", e);
|
||||
Self::default_azure_network_ranges()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn fetch_public_ip_ranges(&self) -> Result<Vec<ipnetwork::IpNetwork>, AppError> {
|
||||
self.fetch_azure_ip_ranges().await
|
||||
}
|
||||
}
|
||||
|
||||
impl AzureMetadataFetcher {
|
||||
/// Returns a set of default Azure VNet ranges as a fallback.
|
||||
fn default_azure_network_ranges() -> Result<Vec<ipnetwork::IpNetwork>, AppError> {
|
||||
let ranges = vec![
|
||||
"10.0.0.0/8", // Large VNets
|
||||
"172.16.0.0/12", // Medium VNets
|
||||
"192.168.0.0/16", // Small VNets
|
||||
"100.64.0.0/10", // Azure reserved range
|
||||
"192.0.0.0/24", // Azure reserved
|
||||
];
|
||||
|
||||
let networks: Result<Vec<_>, _> = ranges.into_iter().map(|s| ipnetwork::IpNetwork::from_str(s)).collect();
|
||||
|
||||
match networks {
|
||||
Ok(networks) => {
|
||||
debug!("Using default Azure VNet network ranges");
|
||||
Ok(networks)
|
||||
}
|
||||
Err(e) => Err(AppError::cloud(format!("Failed to parse default Azure network ranges: {}", e))),
|
||||
}
|
||||
}
|
||||
}
|
||||
309
crates/trusted-proxies/src/cloud/metadata/gcp.rs
Normal file
309
crates/trusted-proxies/src/cloud/metadata/gcp.rs
Normal file
@@ -0,0 +1,309 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Google Cloud Platform (GCP) metadata fetching implementation for identifying trusted proxy ranges.
|
||||
|
||||
use async_trait::async_trait;
|
||||
use reqwest::Client;
|
||||
use serde::Deserialize;
|
||||
use std::str::FromStr;
|
||||
use std::time::Duration;
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
use crate::cloud::detector::CloudMetadataFetcher;
|
||||
use crate::error::AppError;
|
||||
|
||||
/// Fetcher for GCP-specific metadata.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct GcpMetadataFetcher {
|
||||
client: Client,
|
||||
metadata_endpoint: String,
|
||||
}
|
||||
|
||||
impl GcpMetadataFetcher {
|
||||
/// Creates a new `GcpMetadataFetcher`.
|
||||
pub fn new(timeout: Duration) -> Self {
|
||||
let client = Client::builder().timeout(timeout).build().unwrap_or_else(|_| Client::new());
|
||||
|
||||
Self {
|
||||
client,
|
||||
metadata_endpoint: "http://metadata.google.internal".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Retrieves metadata from the GCP Compute Engine metadata server.
|
||||
async fn get_metadata(&self, path: &str) -> Result<String, AppError> {
|
||||
let url = format!("{}/computeMetadata/v1/{}", self.metadata_endpoint, path);
|
||||
|
||||
debug!("Fetching GCP metadata from: {}", url);
|
||||
|
||||
match self.client.get(&url).header("Metadata-Flavor", "Google").send().await {
|
||||
Ok(response) => {
|
||||
if response.status().is_success() {
|
||||
let text = response
|
||||
.text()
|
||||
.await
|
||||
.map_err(|e| AppError::cloud(format!("Failed to read GCP metadata response: {}", e)))?;
|
||||
Ok(text)
|
||||
} else {
|
||||
debug!("GCP metadata request failed with status: {}", response.status());
|
||||
Err(AppError::cloud(format!("GCP metadata API returned status: {}", response.status())))
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
debug!("GCP metadata request failed: {}", e);
|
||||
Err(AppError::cloud(format!("GCP metadata request failed: {}", e)))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Converts a dotted-decimal subnet mask to a CIDR prefix length.
|
||||
fn subnet_mask_to_prefix_length(mask: &str) -> Result<u8, AppError> {
|
||||
let parts: Vec<&str> = mask.split('.').collect();
|
||||
if parts.len() != 4 {
|
||||
return Err(AppError::cloud(format!("Invalid subnet mask format: {}", mask)));
|
||||
}
|
||||
|
||||
let mut prefix_length = 0;
|
||||
for part in parts {
|
||||
let octet: u8 = part
|
||||
.parse()
|
||||
.map_err(|_| AppError::cloud(format!("Invalid octet in subnet mask: {}", part)))?;
|
||||
|
||||
let mut remaining = octet;
|
||||
while remaining > 0 {
|
||||
if remaining & 0x80 == 0x80 {
|
||||
prefix_length += 1;
|
||||
remaining <<= 1;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if remaining != 0 {
|
||||
return Err(AppError::cloud("Non-contiguous subnet mask detected"));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(prefix_length)
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl CloudMetadataFetcher for GcpMetadataFetcher {
|
||||
fn provider_name(&self) -> &str {
|
||||
"gcp"
|
||||
}
|
||||
|
||||
async fn fetch_network_cidrs(&self) -> Result<Vec<ipnetwork::IpNetwork>, AppError> {
|
||||
// Attempt to list network interfaces from GCP metadata.
|
||||
match self.get_metadata("instance/network-interfaces/").await {
|
||||
Ok(interfaces_metadata) => {
|
||||
let interface_indices: Vec<usize> = interfaces_metadata
|
||||
.lines()
|
||||
.filter_map(|line| {
|
||||
let line = line.trim().trim_end_matches('/');
|
||||
if line.chars().all(|c| c.is_ascii_digit()) {
|
||||
line.parse().ok()
|
||||
} else {
|
||||
None
|
||||
}
|
||||
})
|
||||
.collect();
|
||||
|
||||
if interface_indices.is_empty() {
|
||||
warn!("No network interfaces found in GCP metadata");
|
||||
return Self::default_gcp_network_ranges();
|
||||
}
|
||||
|
||||
let mut cidrs = Vec::new();
|
||||
|
||||
for index in interface_indices {
|
||||
// Try to get IP and subnet mask for each interface.
|
||||
let ip_path = format!("instance/network-interfaces/{}/ip", index);
|
||||
let mask_path = format!("instance/network-interfaces/{}/subnetmask", index);
|
||||
|
||||
match tokio::try_join!(self.get_metadata(&ip_path), self.get_metadata(&mask_path)) {
|
||||
Ok((ip, mask)) => {
|
||||
let ip = ip.trim();
|
||||
let mask = mask.trim();
|
||||
|
||||
if let (Ok(ip_addr), Ok(prefix_len)) =
|
||||
(std::net::Ipv4Addr::from_str(ip), Self::subnet_mask_to_prefix_length(mask))
|
||||
{
|
||||
let cidr_str = format!("{}/{}", ip_addr, prefix_len);
|
||||
if let Ok(network) = ipnetwork::IpNetwork::from_str(&cidr_str) {
|
||||
cidrs.push(network);
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
debug!("Failed to get IP/mask for GCP interface {}: {}", index, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if cidrs.is_empty() {
|
||||
warn!("Could not determine network CIDRs from GCP metadata, falling back to defaults");
|
||||
Self::default_gcp_network_ranges()
|
||||
} else {
|
||||
info!("Successfully fetched {} network CIDRs from GCP metadata", cidrs.len());
|
||||
Ok(cidrs)
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Failed to fetch GCP network metadata: {}", e);
|
||||
Self::default_gcp_network_ranges()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn fetch_public_ip_ranges(&self) -> Result<Vec<ipnetwork::IpNetwork>, AppError> {
|
||||
self.fetch_gcp_ip_ranges().await
|
||||
}
|
||||
}
|
||||
|
||||
impl GcpMetadataFetcher {
|
||||
/// Fetches GCP public IP ranges from the official Google source.
|
||||
async fn fetch_gcp_ip_ranges(&self) -> Result<Vec<ipnetwork::IpNetwork>, AppError> {
|
||||
let url = "https://www.gstatic.com/ipranges/cloud.json";
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct GcpIpRanges {
|
||||
prefixes: Vec<GcpPrefix>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Deserialize)]
|
||||
struct GcpPrefix {
|
||||
ipv4_prefix: Option<String>,
|
||||
}
|
||||
|
||||
debug!("Fetching GCP IP ranges from: {}", url);
|
||||
|
||||
match self.client.get(url).timeout(Duration::from_secs(10)).send().await {
|
||||
Ok(response) => {
|
||||
if response.status().is_success() {
|
||||
let ip_ranges: GcpIpRanges = response
|
||||
.json()
|
||||
.await
|
||||
.map_err(|e| AppError::cloud(format!("Failed to parse GCP IP ranges JSON: {}", e)))?;
|
||||
|
||||
let mut networks = Vec::new();
|
||||
|
||||
for prefix in ip_ranges.prefixes {
|
||||
if let Some(ipv4_prefix) = prefix.ipv4_prefix {
|
||||
if let Ok(network) = ipnetwork::IpNetwork::from_str(&ipv4_prefix) {
|
||||
networks.push(network);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!("Successfully fetched {} GCP public IP ranges", networks.len());
|
||||
Ok(networks)
|
||||
} else {
|
||||
debug!("Failed to fetch GCP IP ranges: HTTP {}", response.status());
|
||||
Self::default_gcp_ip_ranges()
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
debug!("Failed to fetch GCP IP ranges: {}", e);
|
||||
Self::default_gcp_ip_ranges()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a set of default GCP public IP ranges as a fallback.
|
||||
fn default_gcp_ip_ranges() -> Result<Vec<ipnetwork::IpNetwork>, AppError> {
|
||||
let ranges = vec![
|
||||
"8.34.208.0/20",
|
||||
"8.35.192.0/20",
|
||||
"8.35.208.0/20",
|
||||
"23.236.48.0/20",
|
||||
"23.251.128.0/19",
|
||||
"34.0.0.0/15",
|
||||
"34.2.0.0/16",
|
||||
"34.3.0.0/23",
|
||||
"34.4.0.0/14",
|
||||
"34.8.0.0/13",
|
||||
"34.16.0.0/12",
|
||||
"34.32.0.0/11",
|
||||
"34.64.0.0/10",
|
||||
"34.128.0.0/10",
|
||||
"35.184.0.0/13",
|
||||
"35.192.0.0/14",
|
||||
"35.196.0.0/15",
|
||||
"35.198.0.0/16",
|
||||
"35.200.0.0/13",
|
||||
"35.208.0.0/12",
|
||||
"35.224.0.0/12",
|
||||
"35.240.0.0/13",
|
||||
"104.154.0.0/15",
|
||||
"104.196.0.0/14",
|
||||
"107.167.160.0/19",
|
||||
"107.178.192.0/18",
|
||||
"108.59.80.0/20",
|
||||
"108.170.192.0/18",
|
||||
"108.177.0.0/17",
|
||||
"130.211.0.0/16",
|
||||
"136.112.0.0/12",
|
||||
"142.250.0.0/15",
|
||||
"146.148.0.0/17",
|
||||
"172.217.0.0/16",
|
||||
"172.253.0.0/16",
|
||||
"173.194.0.0/16",
|
||||
"192.178.0.0/15",
|
||||
"209.85.128.0/17",
|
||||
"216.58.192.0/19",
|
||||
"216.239.32.0/19",
|
||||
"2001:4860::/32",
|
||||
"2404:6800::/32",
|
||||
"2600:1900::/28",
|
||||
"2607:f8b0::/32",
|
||||
"2620:15c::/36",
|
||||
"2800:3f0::/32",
|
||||
"2a00:1450::/32",
|
||||
"2c0f:fb50::/32",
|
||||
];
|
||||
|
||||
let networks: Result<Vec<_>, _> = ranges.into_iter().map(|s| ipnetwork::IpNetwork::from_str(s)).collect();
|
||||
|
||||
match networks {
|
||||
Ok(networks) => {
|
||||
debug!("Using default GCP public IP ranges");
|
||||
Ok(networks)
|
||||
}
|
||||
Err(e) => Err(AppError::cloud(format!("Failed to parse default GCP ranges: {}", e))),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a set of default GCP VPC ranges as a fallback.
|
||||
fn default_gcp_network_ranges() -> Result<Vec<ipnetwork::IpNetwork>, AppError> {
|
||||
let ranges = vec![
|
||||
"10.0.0.0/8", // Large VPCs
|
||||
"172.16.0.0/12", // Medium VPCs
|
||||
"192.168.0.0/16", // Small VPCs
|
||||
"100.64.0.0/10", // GCP reserved range
|
||||
];
|
||||
|
||||
let networks: Result<Vec<_>, _> = ranges.into_iter().map(|s| ipnetwork::IpNetwork::from_str(s)).collect();
|
||||
|
||||
match networks {
|
||||
Ok(networks) => {
|
||||
debug!("Using default GCP VPC network ranges");
|
||||
Ok(networks)
|
||||
}
|
||||
Err(e) => Err(AppError::cloud(format!("Failed to parse default GCP network ranges: {}", e))),
|
||||
}
|
||||
}
|
||||
}
|
||||
26
crates/trusted-proxies/src/cloud/metadata/mod.rs
Normal file
26
crates/trusted-proxies/src/cloud/metadata/mod.rs
Normal file
@@ -0,0 +1,26 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Cloud provider metadata fetching
|
||||
//!
|
||||
//! This module contains implementations for fetching metadata
|
||||
//! from various cloud providers.
|
||||
|
||||
mod aws;
|
||||
mod azure;
|
||||
mod gcp;
|
||||
|
||||
pub use aws::*;
|
||||
pub use azure::*;
|
||||
pub use gcp::*;
|
||||
28
crates/trusted-proxies/src/cloud/mod.rs
Normal file
28
crates/trusted-proxies/src/cloud/mod.rs
Normal file
@@ -0,0 +1,28 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Cloud service integration module
|
||||
//!
|
||||
//! This module provides integration with various cloud providers
|
||||
//! for automatic IP range detection and metadata fetching.
|
||||
|
||||
mod detector;
|
||||
pub mod metadata;
|
||||
mod ranges;
|
||||
|
||||
pub use detector::*;
|
||||
pub use ranges::*;
|
||||
|
||||
// Re-export metadata module types
|
||||
pub use metadata::*;
|
||||
216
crates/trusted-proxies/src/cloud/ranges.rs
Normal file
216
crates/trusted-proxies/src/cloud/ranges.rs
Normal file
@@ -0,0 +1,216 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Static and dynamic IP range definitions for various cloud providers.
|
||||
|
||||
use std::str::FromStr;
|
||||
use std::time::Duration;
|
||||
|
||||
use ipnetwork::IpNetwork;
|
||||
use reqwest::Client;
|
||||
use tracing::{debug, info};
|
||||
|
||||
use crate::error::AppError;
|
||||
|
||||
/// Utility for fetching Cloudflare IP ranges.
|
||||
pub struct CloudflareIpRanges;
|
||||
|
||||
impl CloudflareIpRanges {
|
||||
/// Returns a static list of Cloudflare IP ranges.
|
||||
pub async fn fetch() -> Result<Vec<IpNetwork>, AppError> {
|
||||
let ranges = vec![
|
||||
// IPv4 ranges
|
||||
"103.21.244.0/22",
|
||||
"103.22.200.0/22",
|
||||
"103.31.4.0/22",
|
||||
"104.16.0.0/13",
|
||||
"104.24.0.0/14",
|
||||
"108.162.192.0/18",
|
||||
"131.0.72.0/22",
|
||||
"141.101.64.0/18",
|
||||
"162.158.0.0/15",
|
||||
"172.64.0.0/13",
|
||||
"173.245.48.0/20",
|
||||
"188.114.96.0/20",
|
||||
"190.93.240.0/20",
|
||||
"197.234.240.0/22",
|
||||
"198.41.128.0/17",
|
||||
// IPv6 ranges
|
||||
"2400:cb00::/32",
|
||||
"2606:4700::/32",
|
||||
"2803:f800::/32",
|
||||
"2405:b500::/32",
|
||||
"2405:8100::/32",
|
||||
"2a06:98c0::/29",
|
||||
"2c0f:f248::/32",
|
||||
];
|
||||
|
||||
let networks: Result<Vec<_>, _> = ranges.into_iter().map(|s| IpNetwork::from_str(s)).collect();
|
||||
|
||||
match networks {
|
||||
Ok(networks) => {
|
||||
info!("Loaded {} static Cloudflare IP ranges", networks.len());
|
||||
Ok(networks)
|
||||
}
|
||||
Err(e) => Err(AppError::cloud(format!("Failed to parse static Cloudflare IP ranges: {}", e))),
|
||||
}
|
||||
}
|
||||
|
||||
/// Fetches the latest Cloudflare IP ranges from their official API.
|
||||
pub async fn fetch_from_api() -> Result<Vec<IpNetwork>, AppError> {
|
||||
let client = Client::builder()
|
||||
.timeout(Duration::from_secs(10))
|
||||
.build()
|
||||
.map_err(|e| AppError::cloud(format!("Failed to create HTTP client: {}", e)))?;
|
||||
|
||||
let urls = ["https://www.cloudflare.com/ips-v4", "https://www.cloudflare.com/ips-v6"];
|
||||
|
||||
let mut all_ranges = Vec::new();
|
||||
|
||||
for url in urls {
|
||||
match client.get(url).send().await {
|
||||
Ok(response) => {
|
||||
if response.status().is_success() {
|
||||
let text = response
|
||||
.text()
|
||||
.await
|
||||
.map_err(|e| AppError::cloud(format!("Failed to read response from {}: {}", url, e)))?;
|
||||
|
||||
let ranges: Result<Vec<_>, _> = text
|
||||
.lines()
|
||||
.map(|line| line.trim())
|
||||
.filter(|line| !line.is_empty())
|
||||
.map(IpNetwork::from_str)
|
||||
.collect();
|
||||
|
||||
match ranges {
|
||||
Ok(mut networks) => {
|
||||
debug!("Fetched {} IP ranges from {}", networks.len(), url);
|
||||
all_ranges.append(&mut networks);
|
||||
}
|
||||
Err(e) => {
|
||||
debug!("Failed to parse IP ranges from {}: {}", url, e);
|
||||
}
|
||||
}
|
||||
} else {
|
||||
debug!("Failed to fetch IP ranges from {}: HTTP {}", url, response.status());
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
debug!("Failed to fetch from {}: {}", url, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if all_ranges.is_empty() {
|
||||
// Fallback to static list if API requests fail.
|
||||
Self::fetch().await
|
||||
} else {
|
||||
info!("Successfully fetched {} Cloudflare IP ranges from API", all_ranges.len());
|
||||
Ok(all_ranges)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Utility for fetching DigitalOcean IP ranges.
|
||||
pub struct DigitalOceanIpRanges;
|
||||
|
||||
impl DigitalOceanIpRanges {
|
||||
/// Returns a static list of DigitalOcean IP ranges.
|
||||
pub async fn fetch() -> Result<Vec<IpNetwork>, AppError> {
|
||||
let ranges = vec![
|
||||
// Datacenter IP ranges
|
||||
"64.227.0.0/16",
|
||||
"138.197.0.0/16",
|
||||
"139.59.0.0/16",
|
||||
"157.230.0.0/16",
|
||||
"159.65.0.0/16",
|
||||
"167.99.0.0/16",
|
||||
"178.128.0.0/16",
|
||||
"206.189.0.0/16",
|
||||
"207.154.0.0/16",
|
||||
"209.97.0.0/16",
|
||||
// Load Balancer IP ranges
|
||||
"144.126.0.0/16",
|
||||
"143.198.0.0/16",
|
||||
"161.35.0.0/16",
|
||||
];
|
||||
|
||||
let networks: Result<Vec<_>, _> = ranges.into_iter().map(|s| IpNetwork::from_str(s)).collect();
|
||||
|
||||
match networks {
|
||||
Ok(networks) => {
|
||||
info!("Loaded {} static DigitalOcean IP ranges", networks.len());
|
||||
Ok(networks)
|
||||
}
|
||||
Err(e) => Err(AppError::cloud(format!("Failed to parse static DigitalOcean IP ranges: {}", e))),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Utility for fetching Google Cloud IP ranges.
|
||||
pub struct GoogleCloudIpRanges;
|
||||
|
||||
impl GoogleCloudIpRanges {
|
||||
/// Fetches the latest Google Cloud IP ranges from their official source.
|
||||
pub async fn fetch() -> Result<Vec<IpNetwork>, AppError> {
|
||||
let client = Client::builder()
|
||||
.timeout(Duration::from_secs(10))
|
||||
.build()
|
||||
.map_err(|e| AppError::cloud(format!("Failed to create HTTP client: {}", e)))?;
|
||||
|
||||
let url = "https://www.gstatic.com/ipranges/cloud.json";
|
||||
|
||||
#[derive(Debug, serde::Deserialize)]
|
||||
struct GoogleIpRanges {
|
||||
prefixes: Vec<GooglePrefix>,
|
||||
}
|
||||
|
||||
#[derive(Debug, serde::Deserialize)]
|
||||
struct GooglePrefix {
|
||||
ipv4_prefix: Option<String>,
|
||||
}
|
||||
|
||||
match client.get(url).send().await {
|
||||
Ok(response) => {
|
||||
if response.status().is_success() {
|
||||
let ip_ranges: GoogleIpRanges = response
|
||||
.json()
|
||||
.await
|
||||
.map_err(|e| AppError::cloud(format!("Failed to parse Google IP ranges JSON: {}", e)))?;
|
||||
|
||||
let mut networks = Vec::new();
|
||||
|
||||
for prefix in ip_ranges.prefixes {
|
||||
if let Some(ipv4_prefix) = prefix.ipv4_prefix {
|
||||
if let Ok(network) = IpNetwork::from_str(&ipv4_prefix) {
|
||||
networks.push(network);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!("Successfully fetched {} Google Cloud IP ranges from API", networks.len());
|
||||
Ok(networks)
|
||||
} else {
|
||||
debug!("Failed to fetch Google IP ranges: HTTP {}", response.status());
|
||||
Ok(Vec::new())
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
debug!("Failed to fetch Google IP ranges: {}", e);
|
||||
Ok(Vec::new())
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
208
crates/trusted-proxies/src/config/env.rs
Normal file
208
crates/trusted-proxies/src/config/env.rs
Normal file
@@ -0,0 +1,208 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Environment variable configuration constants and helpers for the trusted proxy system.
|
||||
|
||||
use crate::error::ConfigError;
|
||||
use ipnetwork::IpNetwork;
|
||||
use std::str::FromStr;
|
||||
|
||||
// ==================== Base Proxy Configuration ====================
|
||||
/// Environment variable for the proxy validation mode.
|
||||
pub const ENV_PROXY_VALIDATION_MODE: &str = "TRUSTED_PROXY_VALIDATION_MODE";
|
||||
/// Default validation mode is "hop_by_hop".
|
||||
pub const DEFAULT_PROXY_VALIDATION_MODE: &str = "hop_by_hop";
|
||||
|
||||
/// Environment variable to enable RFC 7239 "Forwarded" header support.
|
||||
pub const ENV_PROXY_ENABLE_RFC7239: &str = "TRUSTED_PROXY_ENABLE_RFC7239";
|
||||
/// RFC 7239 support is enabled by default.
|
||||
pub const DEFAULT_PROXY_ENABLE_RFC7239: bool = true;
|
||||
|
||||
/// Environment variable for the maximum allowed proxy hops.
|
||||
pub const ENV_PROXY_MAX_HOPS: &str = "TRUSTED_PROXY_MAX_HOPS";
|
||||
/// Default maximum hops is 10.
|
||||
pub const DEFAULT_PROXY_MAX_HOPS: usize = 10;
|
||||
|
||||
/// Environment variable to enable proxy chain continuity checks.
|
||||
pub const ENV_PROXY_CHAIN_CONTINUITY_CHECK: &str = "TRUSTED_PROXY_CHAIN_CONTINUITY_CHECK";
|
||||
/// Continuity checks are enabled by default.
|
||||
pub const DEFAULT_PROXY_CHAIN_CONTINUITY_CHECK: bool = true;
|
||||
|
||||
/// Environment variable to enable logging of failed proxy validations.
|
||||
pub const ENV_PROXY_LOG_FAILED_VALIDATIONS: &str = "TRUSTED_PROXY_LOG_FAILED_VALIDATIONS";
|
||||
/// Logging of failed validations is enabled by default.
|
||||
pub const DEFAULT_PROXY_LOG_FAILED_VALIDATIONS: bool = true;
|
||||
|
||||
// ==================== Trusted Proxy Networks ====================
|
||||
/// Environment variable for the list of trusted proxy networks (comma-separated IP/CIDR).
|
||||
pub const ENV_TRUSTED_PROXIES: &str = "TRUSTED_PROXY_NETWORKS";
|
||||
/// Default trusted networks include localhost and common private ranges.
|
||||
pub const DEFAULT_TRUSTED_PROXIES: &str = "127.0.0.1,::1,10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,fd00::/8";
|
||||
|
||||
/// Environment variable for additional trusted proxy networks (production specific).
|
||||
pub const ENV_EXTRA_TRUSTED_PROXIES: &str = "TRUSTED_PROXY_EXTRA_NETWORKS";
|
||||
/// No extra trusted networks by default.
|
||||
pub const DEFAULT_EXTRA_TRUSTED_PROXIES: &str = "";
|
||||
|
||||
/// Environment variable for private network ranges used in internal validation.
|
||||
pub const ENV_PRIVATE_NETWORKS: &str = "TRUSTED_PROXY_PRIVATE_NETWORKS";
|
||||
/// Default private networks include common RFC 1918 and RFC 4193 ranges.
|
||||
pub const DEFAULT_PRIVATE_NETWORKS: &str = "10.0.0.0/8,172.16.0.0/12,192.168.0.0/16,fd00::/8";
|
||||
|
||||
// ==================== Cache Configuration ====================
|
||||
/// Environment variable for the proxy validation cache capacity.
|
||||
pub const ENV_CACHE_CAPACITY: &str = "TRUSTED_PROXY_CACHE_CAPACITY";
|
||||
/// Default cache capacity is 10,000 entries.
|
||||
pub const DEFAULT_CACHE_CAPACITY: usize = 10_000;
|
||||
|
||||
/// Environment variable for the cache entry time-to-live (TTL) in seconds.
|
||||
pub const ENV_CACHE_TTL_SECONDS: &str = "TRUSTED_PROXY_CACHE_TTL_SECONDS";
|
||||
/// Default cache TTL is 300 seconds (5 minutes).
|
||||
pub const DEFAULT_CACHE_TTL_SECONDS: u64 = 300;
|
||||
|
||||
/// Environment variable for the cache cleanup interval in seconds.
|
||||
pub const ENV_CACHE_CLEANUP_INTERVAL: &str = "TRUSTED_PROXY_CACHE_CLEANUP_INTERVAL";
|
||||
/// Default cleanup interval is 60 seconds.
|
||||
pub const DEFAULT_CACHE_CLEANUP_INTERVAL: u64 = 60;
|
||||
|
||||
// ==================== Monitoring Configuration ====================
|
||||
/// Environment variable to enable Prometheus metrics.
|
||||
pub const ENV_METRICS_ENABLED: &str = "TRUSTED_PROXY_METRICS_ENABLED";
|
||||
/// Metrics are enabled by default.
|
||||
pub const DEFAULT_METRICS_ENABLED: bool = true;
|
||||
|
||||
/// Environment variable for the application log level.
|
||||
pub const ENV_LOG_LEVEL: &str = "TRUSTED_PROXY_LOG_LEVEL";
|
||||
/// Default log level is "info".
|
||||
pub const DEFAULT_LOG_LEVEL: &str = "info";
|
||||
|
||||
/// Environment variable to enable structured JSON logging.
|
||||
pub const ENV_STRUCTURED_LOGGING: &str = "TRUSTED_PROXY_STRUCTURED_LOGGING";
|
||||
/// Structured logging is disabled by default.
|
||||
pub const DEFAULT_STRUCTURED_LOGGING: bool = false;
|
||||
|
||||
/// Environment variable to enable distributed tracing.
|
||||
pub const ENV_TRACING_ENABLED: &str = "TRUSTED_PROXY_TRACING_ENABLED";
|
||||
/// Tracing is enabled by default.
|
||||
pub const DEFAULT_TRACING_ENABLED: bool = true;
|
||||
|
||||
// ==================== Cloud Integration ====================
|
||||
/// Environment variable to enable automatic cloud metadata discovery.
|
||||
pub const ENV_CLOUD_METADATA_ENABLED: &str = "TRUSTED_PROXY_CLOUD_METADATA_ENABLED";
|
||||
/// Cloud metadata discovery is disabled by default.
|
||||
pub const DEFAULT_CLOUD_METADATA_ENABLED: bool = false;
|
||||
|
||||
/// Environment variable for the cloud metadata request timeout in seconds.
|
||||
pub const ENV_CLOUD_METADATA_TIMEOUT: &str = "TRUSTED_PROXY_CLOUD_METADATA_TIMEOUT";
|
||||
/// Default cloud metadata timeout is 5 seconds.
|
||||
pub const DEFAULT_CLOUD_METADATA_TIMEOUT: u64 = 5;
|
||||
|
||||
/// Environment variable to enable Cloudflare IP range integration.
|
||||
pub const ENV_CLOUDFLARE_IPS_ENABLED: &str = "TRUSTED_PROXY_CLOUDFLARE_IPS_ENABLED";
|
||||
/// Cloudflare integration is disabled by default.
|
||||
pub const DEFAULT_CLOUDFLARE_IPS_ENABLED: bool = false;
|
||||
|
||||
/// Environment variable to force a specific cloud provider (overrides auto-detection).
|
||||
pub const ENV_CLOUD_PROVIDER_FORCE: &str = "TRUSTED_PROXY_CLOUD_PROVIDER_FORCE";
|
||||
/// No forced provider by default.
|
||||
pub const DEFAULT_CLOUD_PROVIDER_FORCE: &str = "";
|
||||
|
||||
// ==================== Helper Functions ====================
|
||||
|
||||
/// Parses a comma-separated list of IP/CIDR strings from an environment variable.
|
||||
pub fn parse_ip_list_from_env(key: &str, default: &str) -> Result<Vec<IpNetwork>, ConfigError> {
|
||||
let value = std::env::var(key).unwrap_or_else(|_| default.to_string());
|
||||
|
||||
if value.trim().is_empty() {
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
|
||||
let mut networks = Vec::new();
|
||||
for item in value.split(',') {
|
||||
let item = item.trim();
|
||||
if item.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
match IpNetwork::from_str(item) {
|
||||
Ok(network) => networks.push(network),
|
||||
Err(e) => {
|
||||
tracing::warn!("Failed to parse network '{}' from environment variable {}: {}", item, key, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(networks)
|
||||
}
|
||||
|
||||
/// Parses a comma-separated list of strings from an environment variable.
|
||||
pub fn parse_string_list_from_env(key: &str, default: &str) -> Vec<String> {
|
||||
let value = std::env::var(key).unwrap_or_else(|_| default.to_string());
|
||||
|
||||
value
|
||||
.split(',')
|
||||
.map(|s| s.trim().to_string())
|
||||
.filter(|s| !s.is_empty())
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Retrieves a boolean value from an environment variable.
|
||||
pub fn get_bool_from_env(key: &str, default: bool) -> bool {
|
||||
std::env::var(key)
|
||||
.map(|v| match v.to_lowercase().as_str() {
|
||||
"true" | "1" | "yes" | "on" => true,
|
||||
"false" | "0" | "no" | "off" => false,
|
||||
_ => default,
|
||||
})
|
||||
.unwrap_or(default)
|
||||
}
|
||||
|
||||
/// Retrieves a `usize` value from an environment variable.
|
||||
pub fn get_usize_from_env(key: &str, default: usize) -> usize {
|
||||
std::env::var(key).ok().and_then(|v| v.parse().ok()).unwrap_or(default)
|
||||
}
|
||||
|
||||
/// Retrieves a `u64` value from an environment variable.
|
||||
pub fn get_u64_from_env(key: &str, default: u64) -> u64 {
|
||||
std::env::var(key).ok().and_then(|v| v.parse().ok()).unwrap_or(default)
|
||||
}
|
||||
|
||||
/// Retrieves a string value from an environment variable.
|
||||
pub fn get_string_from_env(key: &str, default: &str) -> String {
|
||||
std::env::var(key).unwrap_or_else(|_| default.to_string())
|
||||
}
|
||||
|
||||
/// Checks if an environment variable is set.
|
||||
pub fn is_env_set(key: &str) -> bool {
|
||||
std::env::var(key).is_ok()
|
||||
}
|
||||
|
||||
/// Returns a list of all proxy-related environment variables and their current values.
|
||||
pub fn get_all_proxy_env_vars() -> Vec<(String, String)> {
|
||||
let vars = [
|
||||
ENV_PROXY_VALIDATION_MODE,
|
||||
ENV_PROXY_ENABLE_RFC7239,
|
||||
ENV_PROXY_MAX_HOPS,
|
||||
ENV_PROXY_CHAIN_CONTINUITY_CHECK,
|
||||
ENV_TRUSTED_PROXIES,
|
||||
ENV_EXTRA_TRUSTED_PROXIES,
|
||||
ENV_CLOUD_METADATA_ENABLED,
|
||||
ENV_CLOUD_METADATA_TIMEOUT,
|
||||
ENV_CLOUDFLARE_IPS_ENABLED,
|
||||
];
|
||||
|
||||
vars.iter()
|
||||
.filter_map(|&key| std::env::var(key).ok().map(|value| (key.to_string(), value)))
|
||||
.collect()
|
||||
}
|
||||
201
crates/trusted-proxies/src/config/loader.rs
Normal file
201
crates/trusted-proxies/src/config/loader.rs
Normal file
@@ -0,0 +1,201 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Configuration loader for environment variables and files.
|
||||
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
|
||||
use crate::config::env::*;
|
||||
use crate::config::{AppConfig, CacheConfig, CloudConfig, MonitoringConfig, TrustedProxy, TrustedProxyConfig, ValidationMode};
|
||||
use crate::error::ConfigError;
|
||||
use rustfs_utils::*;
|
||||
|
||||
/// Loader for application configuration.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ConfigLoader;
|
||||
|
||||
impl ConfigLoader {
|
||||
/// Loads the complete application configuration from environment variables.
|
||||
pub fn from_env() -> Result<AppConfig, ConfigError> {
|
||||
// Load proxy-specific configuration.
|
||||
let proxy_config = Self::load_proxy_config()?;
|
||||
|
||||
// Load cache configuration.
|
||||
let cache_config = Self::load_cache_config();
|
||||
|
||||
// Load monitoring and observability configuration.
|
||||
let monitoring_config = Self::load_monitoring_config();
|
||||
|
||||
// Load cloud provider integration configuration.
|
||||
let cloud_config = Self::load_cloud_config();
|
||||
|
||||
// Load server binding address.
|
||||
let server_addr = Self::load_server_addr();
|
||||
|
||||
Ok(AppConfig::new(proxy_config, cache_config, monitoring_config, cloud_config, server_addr))
|
||||
}
|
||||
|
||||
/// Loads trusted proxy configuration from environment variables.
|
||||
fn load_proxy_config() -> Result<TrustedProxyConfig, ConfigError> {
|
||||
let mut proxies = Vec::new();
|
||||
|
||||
// Parse base trusted proxies from environment.
|
||||
let base_networks = parse_ip_list_from_env(ENV_TRUSTED_PROXIES, DEFAULT_TRUSTED_PROXIES)?;
|
||||
for network in base_networks {
|
||||
proxies.push(TrustedProxy::Cidr(network));
|
||||
}
|
||||
|
||||
// Parse extra trusted proxies from environment.
|
||||
let extra_networks = parse_ip_list_from_env(ENV_EXTRA_TRUSTED_PROXIES, DEFAULT_EXTRA_TRUSTED_PROXIES)?;
|
||||
for network in extra_networks {
|
||||
proxies.push(TrustedProxy::Cidr(network));
|
||||
}
|
||||
|
||||
// Parse individual trusted proxy IPs.
|
||||
let ip_strings = parse_string_list_from_env("TRUSTED_PROXY_IPS", "");
|
||||
for ip_str in ip_strings {
|
||||
if let Ok(ip) = ip_str.parse::<IpAddr>() {
|
||||
proxies.push(TrustedProxy::Single(ip));
|
||||
}
|
||||
}
|
||||
|
||||
// Determine validation mode.
|
||||
let validation_mode_str = get_env_str(ENV_PROXY_VALIDATION_MODE, DEFAULT_PROXY_VALIDATION_MODE);
|
||||
let validation_mode = ValidationMode::from_str(&validation_mode_str)?;
|
||||
|
||||
// Load other proxy settings.
|
||||
let enable_rfc7239 = get_env_bool(ENV_PROXY_ENABLE_RFC7239, DEFAULT_PROXY_ENABLE_RFC7239);
|
||||
let max_hops = get_env_usize(ENV_PROXY_MAX_HOPS, DEFAULT_PROXY_MAX_HOPS);
|
||||
let enable_chain_check = get_env_bool(ENV_PROXY_CHAIN_CONTINUITY_CHECK, DEFAULT_PROXY_CHAIN_CONTINUITY_CHECK);
|
||||
|
||||
// Load private network ranges.
|
||||
let private_networks = parse_ip_list_from_env(ENV_PRIVATE_NETWORKS, DEFAULT_PRIVATE_NETWORKS)?;
|
||||
|
||||
Ok(TrustedProxyConfig::new(
|
||||
proxies,
|
||||
validation_mode,
|
||||
enable_rfc7239,
|
||||
max_hops,
|
||||
enable_chain_check,
|
||||
private_networks,
|
||||
))
|
||||
}
|
||||
|
||||
/// Loads cache configuration from environment variables.
|
||||
fn load_cache_config() -> CacheConfig {
|
||||
CacheConfig {
|
||||
capacity: get_env_usize(ENV_CACHE_CAPACITY, DEFAULT_CACHE_CAPACITY),
|
||||
ttl_seconds: get_env_u64(ENV_CACHE_TTL_SECONDS, DEFAULT_CACHE_TTL_SECONDS),
|
||||
cleanup_interval_seconds: get_env_u64(ENV_CACHE_CLEANUP_INTERVAL, DEFAULT_CACHE_CLEANUP_INTERVAL),
|
||||
}
|
||||
}
|
||||
|
||||
/// Loads monitoring configuration from environment variables.
|
||||
fn load_monitoring_config() -> MonitoringConfig {
|
||||
MonitoringConfig {
|
||||
metrics_enabled: get_env_bool(ENV_METRICS_ENABLED, DEFAULT_METRICS_ENABLED),
|
||||
log_level: get_env_str(ENV_LOG_LEVEL, DEFAULT_LOG_LEVEL),
|
||||
structured_logging: get_env_bool(ENV_STRUCTURED_LOGGING, DEFAULT_STRUCTURED_LOGGING),
|
||||
tracing_enabled: get_env_bool(ENV_TRACING_ENABLED, DEFAULT_TRACING_ENABLED),
|
||||
log_failed_validations: get_env_bool(ENV_PROXY_LOG_FAILED_VALIDATIONS, DEFAULT_PROXY_LOG_FAILED_VALIDATIONS),
|
||||
}
|
||||
}
|
||||
|
||||
/// Loads cloud configuration from environment variables.
|
||||
fn load_cloud_config() -> CloudConfig {
|
||||
let forced_provider_str = get_env_str(ENV_CLOUD_PROVIDER_FORCE, DEFAULT_CLOUD_PROVIDER_FORCE);
|
||||
let forced_provider = if forced_provider_str.is_empty() {
|
||||
None
|
||||
} else {
|
||||
Some(forced_provider_str)
|
||||
};
|
||||
|
||||
CloudConfig {
|
||||
metadata_enabled: get_env_bool(ENV_CLOUD_METADATA_ENABLED, DEFAULT_CLOUD_METADATA_ENABLED),
|
||||
metadata_timeout_seconds: get_env_u64(ENV_CLOUD_METADATA_TIMEOUT, DEFAULT_CLOUD_METADATA_TIMEOUT),
|
||||
cloudflare_ips_enabled: get_env_bool(ENV_CLOUDFLARE_IPS_ENABLED, DEFAULT_CLOUDFLARE_IPS_ENABLED),
|
||||
forced_provider,
|
||||
}
|
||||
}
|
||||
|
||||
/// Loads the server binding address from environment variables.
|
||||
fn load_server_addr() -> SocketAddr {
|
||||
let host = get_env_str("SERVER_HOST", "0.0.0.0");
|
||||
let port = get_env_usize("SERVER_PORT", 3000) as u16;
|
||||
|
||||
format!("{}:{}", host, port)
|
||||
.parse()
|
||||
.unwrap_or_else(|_| SocketAddr::from(([0, 0, 0, 0], 3000)))
|
||||
}
|
||||
|
||||
/// Loads configuration from environment, falling back to defaults on failure.
|
||||
pub fn from_env_or_default() -> AppConfig {
|
||||
match Self::from_env() {
|
||||
Ok(config) => {
|
||||
tracing::info!("Configuration loaded successfully from environment variables");
|
||||
config
|
||||
}
|
||||
Err(e) => {
|
||||
tracing::warn!("Failed to load configuration from environment: {}. Using defaults", e);
|
||||
Self::default_config()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a default configuration.
|
||||
pub fn default_config() -> AppConfig {
|
||||
let proxy_config = TrustedProxyConfig::new(
|
||||
vec![
|
||||
TrustedProxy::Single("127.0.0.1".parse().unwrap()),
|
||||
TrustedProxy::Single("::1".parse().unwrap()),
|
||||
],
|
||||
ValidationMode::HopByHop,
|
||||
true,
|
||||
10,
|
||||
true,
|
||||
vec![
|
||||
"10.0.0.0/8".parse().unwrap(),
|
||||
"172.16.0.0/12".parse().unwrap(),
|
||||
"192.168.0.0/16".parse().unwrap(),
|
||||
],
|
||||
);
|
||||
|
||||
AppConfig::new(
|
||||
proxy_config,
|
||||
CacheConfig::default(),
|
||||
MonitoringConfig::default(),
|
||||
CloudConfig::default(),
|
||||
"0.0.0.0:3000".parse().unwrap(),
|
||||
)
|
||||
}
|
||||
|
||||
/// Prints a summary of the configuration to the log.
|
||||
pub fn print_summary(config: &AppConfig) {
|
||||
tracing::info!("=== Application Configuration ===");
|
||||
tracing::info!("Server: {}", config.server_addr);
|
||||
tracing::info!("Trusted Proxies: {}", config.proxy.proxies.len());
|
||||
tracing::info!("Validation Mode: {:?}", config.proxy.validation_mode);
|
||||
tracing::info!("Cache Capacity: {}", config.cache.capacity);
|
||||
tracing::info!("Metrics Enabled: {}", config.monitoring.metrics_enabled);
|
||||
tracing::info!("Cloud Metadata: {}", config.cloud.metadata_enabled);
|
||||
|
||||
if config.monitoring.log_failed_validations {
|
||||
tracing::info!("Failed validations will be logged");
|
||||
}
|
||||
|
||||
if !config.proxy.proxies.is_empty() {
|
||||
tracing::debug!("Trusted networks: {:?}", config.proxy.get_network_strings());
|
||||
}
|
||||
}
|
||||
}
|
||||
24
crates/trusted-proxies/src/config/mod.rs
Normal file
24
crates/trusted-proxies/src/config/mod.rs
Normal file
@@ -0,0 +1,24 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod env;
|
||||
mod loader;
|
||||
mod types;
|
||||
|
||||
pub use env::*;
|
||||
// Re-export commonly used types
|
||||
pub use ipnetwork::IpNetwork;
|
||||
pub use loader::*;
|
||||
pub use std::net::{IpAddr, SocketAddr};
|
||||
pub use types::*;
|
||||
297
crates/trusted-proxies/src/config/types.rs
Normal file
297
crates/trusted-proxies/src/config/types.rs
Normal file
@@ -0,0 +1,297 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Configuration type definitions for the trusted proxy system.
|
||||
|
||||
use ipnetwork::IpNetwork;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::time::Duration;
|
||||
|
||||
use crate::error::ConfigError;
|
||||
|
||||
/// Proxy validation mode defining how the proxy chain is verified.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize, Deserialize)]
|
||||
#[serde(rename_all = "snake_case")]
|
||||
pub enum ValidationMode {
|
||||
/// Lenient mode: Accepts the entire chain as long as the last proxy is trusted.
|
||||
Lenient,
|
||||
/// Strict mode: Requires all proxies in the chain to be trusted.
|
||||
Strict,
|
||||
/// Hop-by-hop mode: Finds the first untrusted proxy from right to left.
|
||||
/// This is the recommended mode for most production environments.
|
||||
HopByHop,
|
||||
}
|
||||
|
||||
impl ValidationMode {
|
||||
/// Parses the validation mode from a string.
|
||||
pub fn from_str(s: &str) -> Result<Self, ConfigError> {
|
||||
match s.to_lowercase().as_str() {
|
||||
"lenient" => Ok(Self::Lenient),
|
||||
"strict" => Ok(Self::Strict),
|
||||
"hop_by_hop" | "hopbyhop" => Ok(Self::HopByHop),
|
||||
_ => Err(ConfigError::InvalidConfig(format!(
|
||||
"Invalid validation mode: '{}'. Must be one of: lenient, strict, hop_by_hop",
|
||||
s
|
||||
))),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the string representation of the validation mode.
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
Self::Lenient => "lenient",
|
||||
Self::Strict => "strict",
|
||||
Self::HopByHop => "hop_by_hop",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ValidationMode {
|
||||
fn default() -> Self {
|
||||
Self::HopByHop
|
||||
}
|
||||
}
|
||||
|
||||
/// Represents a trusted proxy entry, which can be a single IP or a CIDR range.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum TrustedProxy {
|
||||
/// A single IP address.
|
||||
Single(IpAddr),
|
||||
/// An IP network range (CIDR notation).
|
||||
Cidr(IpNetwork),
|
||||
}
|
||||
|
||||
impl TrustedProxy {
|
||||
/// Checks if the given IP address matches this proxy configuration.
|
||||
pub fn contains(&self, ip: &IpAddr) -> bool {
|
||||
match self {
|
||||
Self::Single(proxy_ip) => ip == proxy_ip,
|
||||
Self::Cidr(network) => network.contains(*ip),
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns the string representation of the proxy entry.
|
||||
pub fn to_string(&self) -> String {
|
||||
match self {
|
||||
Self::Single(ip) => ip.to_string(),
|
||||
Self::Cidr(network) => network.to_string(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Configuration for trusted proxies and validation logic.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct TrustedProxyConfig {
|
||||
/// List of trusted proxy entries.
|
||||
pub proxies: Vec<TrustedProxy>,
|
||||
/// The validation mode to use for verifying proxy chains.
|
||||
pub validation_mode: ValidationMode,
|
||||
/// Whether to enable RFC 7239 "Forwarded" header support.
|
||||
pub enable_rfc7239: bool,
|
||||
/// Maximum allowed proxy hops in the chain.
|
||||
pub max_hops: usize,
|
||||
/// Whether to enable continuity checks for the proxy chain.
|
||||
pub enable_chain_continuity_check: bool,
|
||||
/// Private network ranges that should be treated with caution.
|
||||
pub private_networks: Vec<IpNetwork>,
|
||||
}
|
||||
|
||||
impl TrustedProxyConfig {
|
||||
/// Creates a new trusted proxy configuration.
|
||||
pub fn new(
|
||||
proxies: Vec<TrustedProxy>,
|
||||
validation_mode: ValidationMode,
|
||||
enable_rfc7239: bool,
|
||||
max_hops: usize,
|
||||
enable_chain_continuity_check: bool,
|
||||
private_networks: Vec<IpNetwork>,
|
||||
) -> Self {
|
||||
Self {
|
||||
proxies,
|
||||
validation_mode,
|
||||
enable_rfc7239,
|
||||
max_hops,
|
||||
enable_chain_continuity_check,
|
||||
private_networks,
|
||||
}
|
||||
}
|
||||
|
||||
/// Checks if a SocketAddr originates from a trusted proxy.
|
||||
pub fn is_trusted(&self, addr: &SocketAddr) -> bool {
|
||||
let ip = addr.ip();
|
||||
self.proxies.iter().any(|proxy| proxy.contains(&ip))
|
||||
}
|
||||
|
||||
/// Checks if an IP address belongs to a private network range.
|
||||
pub fn is_private_network(&self, ip: &IpAddr) -> bool {
|
||||
self.private_networks.iter().any(|network| network.contains(*ip))
|
||||
}
|
||||
|
||||
/// Returns a list of all network strings for debugging purposes.
|
||||
pub fn get_network_strings(&self) -> Vec<String> {
|
||||
self.proxies.iter().map(|p| p.to_string()).collect()
|
||||
}
|
||||
|
||||
/// Returns a summary of the configuration.
|
||||
pub fn summary(&self) -> String {
|
||||
format!(
|
||||
"TrustedProxyConfig {{ proxies: {}, mode: {}, max_hops: {} }}",
|
||||
self.proxies.len(),
|
||||
self.validation_mode.as_str(),
|
||||
self.max_hops
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Configuration for the internal caching mechanism.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct CacheConfig {
|
||||
/// Maximum number of entries in the cache.
|
||||
pub capacity: usize,
|
||||
/// Time-to-live for cache entries in seconds.
|
||||
pub ttl_seconds: u64,
|
||||
/// Interval for cache cleanup in seconds.
|
||||
pub cleanup_interval_seconds: u64,
|
||||
}
|
||||
|
||||
impl Default for CacheConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
capacity: 10_000,
|
||||
ttl_seconds: 300,
|
||||
cleanup_interval_seconds: 60,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl CacheConfig {
|
||||
/// Returns the TTL as a Duration.
|
||||
pub fn ttl_duration(&self) -> Duration {
|
||||
Duration::from_secs(self.ttl_seconds)
|
||||
}
|
||||
|
||||
/// Returns the cleanup interval as a Duration.
|
||||
pub fn cleanup_interval(&self) -> Duration {
|
||||
Duration::from_secs(self.cleanup_interval_seconds)
|
||||
}
|
||||
}
|
||||
|
||||
/// Configuration for monitoring and observability.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct MonitoringConfig {
|
||||
/// Whether to enable Prometheus metrics.
|
||||
pub metrics_enabled: bool,
|
||||
/// The logging level (e.g., "info", "debug").
|
||||
pub log_level: String,
|
||||
/// Whether to use structured JSON logging.
|
||||
pub structured_logging: bool,
|
||||
/// Whether to enable distributed tracing.
|
||||
pub tracing_enabled: bool,
|
||||
/// Whether to log detailed information about failed validations.
|
||||
pub log_failed_validations: bool,
|
||||
}
|
||||
|
||||
impl Default for MonitoringConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
metrics_enabled: true,
|
||||
log_level: "info".to_string(),
|
||||
structured_logging: false,
|
||||
tracing_enabled: true,
|
||||
log_failed_validations: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Configuration for cloud provider integration.
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct CloudConfig {
|
||||
/// Whether to enable automatic cloud metadata discovery.
|
||||
pub metadata_enabled: bool,
|
||||
/// Timeout for cloud metadata requests in seconds.
|
||||
pub metadata_timeout_seconds: u64,
|
||||
/// Whether to automatically include Cloudflare IP ranges.
|
||||
pub cloudflare_ips_enabled: bool,
|
||||
/// Optionally force a specific cloud provider.
|
||||
pub forced_provider: Option<String>,
|
||||
}
|
||||
|
||||
impl Default for CloudConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
metadata_enabled: false,
|
||||
metadata_timeout_seconds: 5,
|
||||
cloudflare_ips_enabled: false,
|
||||
forced_provider: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl CloudConfig {
|
||||
/// Returns the metadata timeout as a Duration.
|
||||
pub fn metadata_timeout(&self) -> Duration {
|
||||
Duration::from_secs(self.metadata_timeout_seconds)
|
||||
}
|
||||
}
|
||||
|
||||
/// Complete application configuration.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct AppConfig {
|
||||
/// Trusted proxy settings.
|
||||
pub proxy: TrustedProxyConfig,
|
||||
/// Cache settings.
|
||||
pub cache: CacheConfig,
|
||||
/// Monitoring and observability settings.
|
||||
pub monitoring: MonitoringConfig,
|
||||
/// Cloud integration settings.
|
||||
pub cloud: CloudConfig,
|
||||
/// The address the server should bind to.
|
||||
pub server_addr: SocketAddr,
|
||||
}
|
||||
|
||||
impl AppConfig {
|
||||
/// Creates a new application configuration.
|
||||
pub fn new(
|
||||
proxy: TrustedProxyConfig,
|
||||
cache: CacheConfig,
|
||||
monitoring: MonitoringConfig,
|
||||
cloud: CloudConfig,
|
||||
server_addr: SocketAddr,
|
||||
) -> Self {
|
||||
Self {
|
||||
proxy,
|
||||
cache,
|
||||
monitoring,
|
||||
cloud,
|
||||
server_addr,
|
||||
}
|
||||
}
|
||||
|
||||
/// Returns a summary of the application configuration.
|
||||
pub fn summary(&self) -> String {
|
||||
format!(
|
||||
"AppConfig {{\n\
|
||||
\x20\x20proxy: {},\n\
|
||||
\x20\x20cache_capacity: {},\n\
|
||||
\x20\x20metrics: {},\n\
|
||||
\x20\x20cloud_metadata: {}\n\
|
||||
}}",
|
||||
self.proxy.summary(),
|
||||
self.cache.capacity,
|
||||
self.monitoring.metrics_enabled,
|
||||
self.cloud.metadata_enabled
|
||||
)
|
||||
}
|
||||
}
|
||||
82
crates/trusted-proxies/src/error/config.rs
Normal file
82
crates/trusted-proxies/src/error/config.rs
Normal file
@@ -0,0 +1,82 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Configuration error types for the trusted proxy system.
|
||||
|
||||
use std::net::AddrParseError;
|
||||
|
||||
/// Errors related to application configuration.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum ConfigError {
|
||||
/// Required environment variable is missing.
|
||||
#[error("Missing environment variable: {0}")]
|
||||
MissingEnvVar(String),
|
||||
|
||||
/// Environment variable exists but could not be parsed.
|
||||
#[error("Failed to parse environment variable {0}: {1}")]
|
||||
EnvParseError(String, String),
|
||||
|
||||
/// A configuration value is logically invalid.
|
||||
#[error("Invalid configuration value for {0}: {1}")]
|
||||
InvalidValue(String, String),
|
||||
|
||||
/// An IP address or CIDR range is malformed.
|
||||
#[error("Invalid IP address or network: {0}")]
|
||||
InvalidIp(String),
|
||||
|
||||
/// Configuration failed overall validation.
|
||||
#[error("Configuration validation failed: {0}")]
|
||||
ValidationFailed(String),
|
||||
|
||||
/// Two or more configuration settings are in conflict.
|
||||
#[error("Configuration conflict: {0}")]
|
||||
Conflict(String),
|
||||
|
||||
/// Error reading or parsing a configuration file.
|
||||
#[error("Config file error: {0}")]
|
||||
FileError(String),
|
||||
|
||||
/// General invalid configuration error.
|
||||
#[error("Invalid config: {0}")]
|
||||
InvalidConfig(String),
|
||||
}
|
||||
|
||||
impl From<AddrParseError> for ConfigError {
|
||||
fn from(err: AddrParseError) -> Self {
|
||||
Self::InvalidIp(err.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ipnetwork::IpNetworkError> for ConfigError {
|
||||
fn from(err: ipnetwork::IpNetworkError) -> Self {
|
||||
Self::InvalidIp(err.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl ConfigError {
|
||||
/// Creates a `MissingEnvVar` error.
|
||||
pub fn missing_env_var(key: &str) -> Self {
|
||||
Self::MissingEnvVar(key.to_string())
|
||||
}
|
||||
|
||||
/// Creates an `EnvParseError`.
|
||||
pub fn env_parse(key: &str, value: &str) -> Self {
|
||||
Self::EnvParseError(key.to_string(), value.to_string())
|
||||
}
|
||||
|
||||
/// Creates an `InvalidValue` error.
|
||||
pub fn invalid_value(field: &str, value: &str) -> Self {
|
||||
Self::InvalidValue(field.to_string(), value.to_string())
|
||||
}
|
||||
}
|
||||
94
crates/trusted-proxies/src/error/mod.rs
Normal file
94
crates/trusted-proxies/src/error/mod.rs
Normal file
@@ -0,0 +1,94 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Error types for the trusted proxy system.
|
||||
|
||||
mod config;
|
||||
mod proxy;
|
||||
|
||||
pub use config::*;
|
||||
pub use proxy::*;
|
||||
|
||||
/// Unified error type for the application.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum AppError {
|
||||
/// Errors related to configuration.
|
||||
#[error("Configuration error: {0}")]
|
||||
Config(#[from] ConfigError),
|
||||
|
||||
/// Errors related to proxy validation.
|
||||
#[error("Proxy validation error: {0}")]
|
||||
Proxy(#[from] ProxyError),
|
||||
|
||||
/// Errors related to cloud service integration.
|
||||
#[error("Cloud service error: {0}")]
|
||||
Cloud(String),
|
||||
|
||||
/// General internal errors.
|
||||
#[error("Internal error: {0}")]
|
||||
Internal(String),
|
||||
|
||||
/// Standard I/O errors.
|
||||
#[error("IO error: {0}")]
|
||||
Io(#[from] std::io::Error),
|
||||
|
||||
/// Errors related to HTTP requests or responses.
|
||||
#[error("HTTP error: {0}")]
|
||||
Http(String),
|
||||
}
|
||||
|
||||
impl AppError {
|
||||
/// Creates a new `Cloud` error.
|
||||
pub fn cloud(msg: impl Into<String>) -> Self {
|
||||
Self::Cloud(msg.into())
|
||||
}
|
||||
|
||||
/// Creates a new `Internal` error.
|
||||
pub fn internal(msg: impl Into<String>) -> Self {
|
||||
Self::Internal(msg.into())
|
||||
}
|
||||
|
||||
/// Creates a new `Http` error.
|
||||
pub fn http(msg: impl Into<String>) -> Self {
|
||||
Self::Http(msg.into())
|
||||
}
|
||||
|
||||
/// Returns true if the error is considered recoverable.
|
||||
pub fn is_recoverable(&self) -> bool {
|
||||
match self {
|
||||
Self::Config(_) => true,
|
||||
Self::Proxy(e) => e.is_recoverable(),
|
||||
Self::Cloud(_) => true,
|
||||
Self::Internal(_) => false,
|
||||
Self::Io(_) => true,
|
||||
Self::Http(_) => true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Type alias for API error responses (Status Code, Error Message).
|
||||
pub type ApiError = (axum::http::StatusCode, String);
|
||||
|
||||
impl From<AppError> for ApiError {
|
||||
fn from(err: AppError) -> Self {
|
||||
match err {
|
||||
AppError::Config(_) => (axum::http::StatusCode::BAD_REQUEST, err.to_string()),
|
||||
AppError::Proxy(_) => (axum::http::StatusCode::BAD_REQUEST, err.to_string()),
|
||||
AppError::Cloud(_) => (axum::http::StatusCode::SERVICE_UNAVAILABLE, err.to_string()),
|
||||
AppError::Internal(_) => (axum::http::StatusCode::INTERNAL_SERVER_ERROR, err.to_string()),
|
||||
AppError::Io(_) => (axum::http::StatusCode::INTERNAL_SERVER_ERROR, err.to_string()),
|
||||
AppError::Http(_) => (axum::http::StatusCode::BAD_GATEWAY, err.to_string()),
|
||||
}
|
||||
}
|
||||
}
|
||||
114
crates/trusted-proxies/src/error/proxy.rs
Normal file
114
crates/trusted-proxies/src/error/proxy.rs
Normal file
@@ -0,0 +1,114 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Proxy validation error types for the trusted proxy system.
|
||||
|
||||
use std::net::AddrParseError;
|
||||
|
||||
/// Errors that can occur during proxy chain validation.
|
||||
#[derive(Debug, thiserror::Error)]
|
||||
pub enum ProxyError {
|
||||
/// The X-Forwarded-For header is malformed or contains invalid data.
|
||||
#[error("Invalid X-Forwarded-For header: {0}")]
|
||||
InvalidXForwardedFor(String),
|
||||
|
||||
/// The RFC 7239 Forwarded header is malformed.
|
||||
#[error("Invalid Forwarded header (RFC 7239): {0}")]
|
||||
InvalidForwardedHeader(String),
|
||||
|
||||
/// General failure during proxy chain validation.
|
||||
#[error("Proxy chain validation failed: {0}")]
|
||||
ChainValidationFailed(String),
|
||||
|
||||
/// The number of proxy hops exceeds the configured limit.
|
||||
#[error("Proxy chain too long: {0} hops (max: {1})")]
|
||||
ChainTooLong(usize, usize),
|
||||
|
||||
/// The request originated from a proxy that is not in the trusted list.
|
||||
#[error("Request from untrusted proxy: {0}")]
|
||||
UntrustedProxy(String),
|
||||
|
||||
/// The proxy chain is not continuous (e.g., an untrusted IP is between trusted ones).
|
||||
#[error("Proxy chain is not continuous")]
|
||||
ChainNotContinuous,
|
||||
|
||||
/// An IP address in the chain could not be parsed.
|
||||
#[error("Failed to parse IP address: {0}")]
|
||||
IpParseError(String),
|
||||
|
||||
/// A header value could not be parsed as a string.
|
||||
#[error("Failed to parse header: {0}")]
|
||||
HeaderParseError(String),
|
||||
|
||||
/// Validation took too long and timed out.
|
||||
#[error("Validation timeout")]
|
||||
Timeout,
|
||||
|
||||
/// An unexpected internal error occurred during validation.
|
||||
#[error("Internal validation error: {0}")]
|
||||
Internal(String),
|
||||
}
|
||||
|
||||
impl From<AddrParseError> for ProxyError {
|
||||
fn from(err: AddrParseError) -> Self {
|
||||
Self::IpParseError(err.to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl ProxyError {
|
||||
/// Creates an `InvalidXForwardedFor` error.
|
||||
pub fn invalid_xff(msg: impl Into<String>) -> Self {
|
||||
Self::InvalidXForwardedFor(msg.into())
|
||||
}
|
||||
|
||||
/// Creates an `InvalidForwardedHeader` error.
|
||||
pub fn invalid_forwarded(msg: impl Into<String>) -> Self {
|
||||
Self::InvalidForwardedHeader(msg.into())
|
||||
}
|
||||
|
||||
/// Creates a `ChainValidationFailed` error.
|
||||
pub fn chain_failed(msg: impl Into<String>) -> Self {
|
||||
Self::ChainValidationFailed(msg.into())
|
||||
}
|
||||
|
||||
/// Creates an `UntrustedProxy` error.
|
||||
pub fn untrusted(proxy: impl Into<String>) -> Self {
|
||||
Self::UntrustedProxy(proxy.into())
|
||||
}
|
||||
|
||||
/// Creates an `Internal` validation error.
|
||||
pub fn internal(msg: impl Into<String>) -> Self {
|
||||
Self::Internal(msg.into())
|
||||
}
|
||||
|
||||
/// Determines if the error is recoverable, meaning the request can still be processed
|
||||
/// (perhaps by falling back to the direct peer IP).
|
||||
pub fn is_recoverable(&self) -> bool {
|
||||
match self {
|
||||
// These errors typically mean we should use the direct peer IP as a fallback.
|
||||
Self::UntrustedProxy(_) => true,
|
||||
Self::ChainTooLong(_, _) => true,
|
||||
Self::ChainNotContinuous => true,
|
||||
|
||||
// These errors suggest malformed requests or severe configuration issues.
|
||||
Self::InvalidXForwardedFor(_) => false,
|
||||
Self::InvalidForwardedHeader(_) => false,
|
||||
Self::ChainValidationFailed(_) => false,
|
||||
Self::IpParseError(_) => false,
|
||||
Self::HeaderParseError(_) => false,
|
||||
Self::Timeout => true,
|
||||
Self::Internal(_) => false,
|
||||
}
|
||||
}
|
||||
}
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user