Compare commits

...

14 Commits

Author SHA1 Message Date
houseme
705cc0c9f6 Merge branch 'main' of github.com:rustfs/rustfs into feature/metric-1205 2025-12-21 17:56:06 +08:00
0xdx2
3e2252e4bb fix(config):Update argument parsing for volumes and server_domains to support del… (#1209)
Signed-off-by: houseme <housemecn@gmail.com>
Co-authored-by: houseme <housemecn@gmail.com>
Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-12-21 17:54:23 +08:00
loverustfs
f3a1431fa5 fix: resolve TLS handshake failure in inter-node communication (#1201) (#1222)
Co-authored-by: houseme <housemecn@gmail.com>
2025-12-21 16:11:55 +08:00
yxrxy
3bd96bcf10 fix: resolve event target deletion issue (#1219) 2025-12-21 12:43:48 +08:00
majinghe
20ea591049 add custom nodeport support (#1217) 2025-12-20 22:02:21 +08:00
GatewayJ
cc31e88c91 fix: expiration time (#1215) 2025-12-20 20:25:52 +08:00
yxrxy
b5535083de fix(iam): store previous credentials in .rustfs.sys bucket to preserv… (#1213) 2025-12-20 19:15:49 +08:00
loverustfs
1e35edf079 chore(ci): restore workflows before 8e0aeb4 (#1212) 2025-12-20 07:50:49 +08:00
Copilot
8dd3e8b534 fix: decode form-urlencoded object names in webhook/mqtt Key field (#1210)
Co-authored-by: copilot-swe-agent[bot] <198982749+Copilot@users.noreply.github.com>
Co-authored-by: houseme <4829346+houseme@users.noreply.github.com>
Co-authored-by: houseme <housemecn@gmail.com>
2025-12-20 01:31:09 +08:00
loverustfs
8e0aeb4fdc Optimize ci ubicloud (#1208) 2025-12-19 23:22:45 +08:00
majinghe
abe8a50b5a add cert manager and ingress annotations support (#1206) 2025-12-19 21:50:23 +08:00
loverustfs
61f4d307b5 Modify latest version tips to console 2025-12-19 14:57:19 +08:00
loverustfs
3eafeb0ff0 Modify to accelerate 2025-12-19 13:01:17 +08:00
houseme
6273b138f6 upgrade mio version to 1.1.1 2025-12-05 14:55:17 +08:00
34 changed files with 1094 additions and 195 deletions

View File

@@ -454,7 +454,7 @@ jobs:
OSS_ACCESS_KEY_ID: ${{ secrets.ALICLOUDOSS_KEY_ID }}
OSS_ACCESS_KEY_SECRET: ${{ secrets.ALICLOUDOSS_KEY_SECRET }}
OSS_REGION: cn-beijing
OSS_ENDPOINT: https://oss-cn-beijing.aliyuncs.com
OSS_ENDPOINT: https://oss-accelerate.aliyuncs.com
shell: bash
run: |
BUILD_TYPE="${{ needs.build-check.outputs.build_type }}"

110
Cargo.lock generated
View File

@@ -1032,9 +1032,9 @@ dependencies = [
[[package]]
name = "axum"
version = "0.8.7"
version = "0.8.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5b098575ebe77cb6d14fc7f32749631a6e44edbef6b796f89b020e99ba20d425"
checksum = "8b52af3cb4058c895d37317bb27508dccc8e5f2d39454016b297bf4a400597b8"
dependencies = [
"axum-core",
"bytes",
@@ -1084,9 +1084,9 @@ dependencies = [
[[package]]
name = "axum-extra"
version = "0.12.2"
version = "0.12.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dbfe9f610fe4e99cf0cfcd03ccf8c63c28c616fe714d80475ef731f3b13dd21b"
checksum = "6dfbd6109d91702d55fc56df06aae7ed85c465a7a451db6c0e54a4b9ca5983d1"
dependencies = [
"axum",
"axum-core",
@@ -1434,31 +1434,14 @@ dependencies = [
"serde_core",
]
[[package]]
name = "cargo-util-schemas"
version = "0.8.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7dc1a6f7b5651af85774ae5a34b4e8be397d9cf4bc063b7e6dbd99a841837830"
dependencies = [
"semver",
"serde",
"serde-untagged",
"serde-value",
"thiserror 2.0.17",
"toml",
"unicode-xid",
"url",
]
[[package]]
name = "cargo_metadata"
version = "0.22.0"
version = "0.23.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "0c3f56c207c76c07652489840ff98687dcf213de178ac0974660d6fefeaf5ec6"
checksum = "ef987d17b0a113becdd19d3d0022d04d7ef41f9efe4f3fb63ac44ba61df3ade9"
dependencies = [
"camino",
"cargo-platform",
"cargo-util-schemas",
"semver",
"serde",
"serde_json",
@@ -1473,9 +1456,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
[[package]]
name = "cc"
version = "1.2.49"
version = "1.2.50"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "90583009037521a116abf44494efecd645ba48b6622457080f080b85544e2215"
checksum = "9f50d563227a1c37cc0a263f64eca3334388c01c5e4c4861a9def205c614383c"
dependencies = [
"find-msvc-tools",
"jobserver",
@@ -1576,7 +1559,7 @@ version = "0.4.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "773f3b9af64447d2ce9850330c473515014aa235e6a783b02db81ff39e4a3dad"
dependencies = [
"crypto-common 0.1.6",
"crypto-common 0.1.7",
"inout 0.1.4",
]
@@ -1798,9 +1781,9 @@ dependencies = [
[[package]]
name = "crc"
version = "3.4.0"
version = "3.3.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "5eb8a2a1cd12ab0d987a5d5e825195d372001a4094a0376319d5a0ad71c1ba0d"
checksum = "9710d3b3739c2e349eb44fe848ad0b7c8cb1e42bd87ee49371df2f7acaf3e675"
dependencies = [
"crc-catalog",
]
@@ -1965,9 +1948,9 @@ dependencies = [
[[package]]
name = "crypto-common"
version = "0.1.6"
version = "0.1.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1bfb12502f3fc46cca1bb51ac28df9d618d813cdc3d2f25b9fe775a34af26bb3"
checksum = "78c8292055d1c1df0cce5d180393dc8cce0abec0a7102adb6c7b1eef6016d60a"
dependencies = [
"generic-array",
"typenum",
@@ -2997,7 +2980,7 @@ checksum = "9ed9a281f7bc9b7576e61468ba615a66a5c8cfdff42420a70aa82701a3b1e292"
dependencies = [
"block-buffer 0.10.4",
"const-oid 0.9.6",
"crypto-common 0.1.6",
"crypto-common 0.1.7",
"subtle",
]
@@ -3405,9 +3388,9 @@ checksum = "1d674e81391d1e1ab681a28d99df07927c6d4aa5b027d7da16ba32d1d21ecd99"
[[package]]
name = "flatbuffers"
version = "25.9.23"
version = "25.12.19"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "09b6620799e7340ebd9968d2e0708eb82cf1971e9a16821e2091b6d6e475eed5"
checksum = "35f6839d7b3b98adde531effaf34f0c2badc6f4735d26fe74709d8e513a96ef3"
dependencies = [
"bitflags 2.10.0",
"rustc_version",
@@ -3607,9 +3590,9 @@ dependencies = [
[[package]]
name = "generic-array"
version = "0.14.9"
version = "0.14.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4bb6743198531e02858aeaea5398fcc883e71851fcbcb5a2f773e2fb6cb1edf2"
checksum = "85649ca51fd72272d7821adaf274ad91c288277713d9c18820d8499a7ff69e9a"
dependencies = [
"typenum",
"version_check",
@@ -4641,9 +4624,9 @@ dependencies = [
[[package]]
name = "itoa"
version = "1.0.15"
version = "1.0.16"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4a5f13b858c8d314ee3e8f639011f7ccefe71f97f96e50151fb991f267928e2c"
checksum = "7ee5b5339afb4c41626dde77b7a611bd4f2c202b897852b4bcf5d03eddc61010"
[[package]]
name = "jemalloc_pprof"
@@ -4972,9 +4955,9 @@ dependencies = [
[[package]]
name = "lzma-rust2"
version = "0.13.0"
version = "0.15.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c60a23ffb90d527e23192f1246b14746e2f7f071cb84476dd879071696c18a4a"
checksum = "48172246aa7c3ea28e423295dd1ca2589a24617cc4e588bb8cfe177cb2c54d95"
dependencies = [
"crc",
"sha2 0.10.9",
@@ -5134,9 +5117,9 @@ dependencies = [
[[package]]
name = "moka"
version = "0.12.11"
version = "0.12.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8261cd88c312e0004c1d51baad2980c66528dfdb2bee62003e643a4d8f86b077"
checksum = "a3dec6bd31b08944e08b58fd99373893a6c17054d6f3ea5006cc894f4f4eee2a"
dependencies = [
"async-lock",
"crossbeam-channel",
@@ -5147,7 +5130,6 @@ dependencies = [
"futures-util",
"parking_lot",
"portable-atomic",
"rustc_version",
"smallvec",
"tagptr",
"uuid",
@@ -5281,9 +5263,9 @@ checksum = "5e0826a989adedc2a244799e823aece04662b66609d96af8dff7ac6df9a8925d"
[[package]]
name = "ntapi"
version = "0.4.1"
version = "0.4.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e8a3895c6391c39d7fe7ebc444a87eb2991b2a0bc718fdabd071eec617fc68e4"
checksum = "c70f219e21142367c70c0b30c6a9e3a14d55b4d12a204d897fbec83a0363f081"
dependencies = [
"winapi",
]
@@ -6113,9 +6095,9 @@ dependencies = [
[[package]]
name = "portable-atomic"
version = "1.11.1"
version = "1.12.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f84267b20a16ea918e43c6a88433c2d54fa145c92a811b5b047ccbe153674483"
checksum = "f59e70c4aef1e55797c2e8fd94a4f2a973fc972cfde0e0b05f683667b0cd39dd"
[[package]]
name = "potential_utf"
@@ -7099,6 +7081,7 @@ dependencies = [
"serde",
"serde_json",
"serde_urlencoded",
"serial_test",
"shadow-rs",
"socket2 0.6.1",
"subtle",
@@ -7879,9 +7862,9 @@ checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d"
[[package]]
name = "ryu"
version = "1.0.20"
version = "1.0.21"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "28d3b2b1366ec20994f1fd18c3c594f05c5dd4bc44d8bb0c1c632c8d6829481f"
checksum = "62049b2877bf12821e8f9ad256ee38fdc31db7387ec2d3b3f403024de2034aea"
[[package]]
name = "s3s"
@@ -8096,28 +8079,6 @@ dependencies = [
"serde_derive",
]
[[package]]
name = "serde-untagged"
version = "0.1.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f9faf48a4a2d2693be24c6289dbe26552776eb7737074e6722891fadbe6c5058"
dependencies = [
"erased-serde",
"serde",
"serde_core",
"typeid",
]
[[package]]
name = "serde-value"
version = "0.7.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f3a1a3341211875ef120e117ea7fd5228530ae7e7036a779fdc9117be6b3282c"
dependencies = [
"ordered-float",
"serde",
]
[[package]]
name = "serde_core"
version = "1.0.228"
@@ -8315,9 +8276,9 @@ dependencies = [
[[package]]
name = "shadow-rs"
version = "1.4.0"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "72d18183cef626bce22836103349c7050d73db799be0171386b80947d157ae32"
checksum = "ff351910f271e7065781b6b4f0f43cb515d474d812f31176a0246d9058e47d5d"
dependencies = [
"cargo_metadata",
"const_format",
@@ -10434,9 +10395,9 @@ dependencies = [
[[package]]
name = "zip"
version = "6.0.0"
version = "7.0.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "eb2a05c7c36fde6c09b08576c9f7fb4cda705990f73b58fe011abf7dfb24168b"
checksum = "bdd8a47718a4ee5fe78e07667cd36f3de80e7c2bfe727c7074245ffc7303c037"
dependencies = [
"aes 0.8.4",
"arbitrary",
@@ -10445,6 +10406,7 @@ dependencies = [
"crc32fast",
"deflate64",
"flate2",
"generic-array",
"getrandom 0.3.4",
"hmac 0.12.1",
"indexmap 2.12.1",

View File

@@ -97,8 +97,8 @@ async-channel = "2.5.0"
async-compression = { version = "0.4.19" }
async-recursion = "1.1.1"
async-trait = "0.1.89"
axum = "0.8.7"
axum-extra = "0.12.2"
axum = "0.8.8"
axum-extra = "0.12.3"
axum-server = { version = "0.8.0", features = ["tls-rustls-no-provider"], default-features = false }
futures = "0.3.31"
futures-core = "0.3.31"
@@ -126,7 +126,7 @@ tower-http = { version = "0.6.8", features = ["cors"] }
bytes = { version = "1.11.0", features = ["serde"] }
bytesize = "2.3.1"
byteorder = "1.5.0"
flatbuffers = "25.9.23"
flatbuffers = "25.12.19"
form_urlencoded = "1.2.2"
prost = "0.14.1"
quick-xml = "0.38.4"
@@ -203,7 +203,7 @@ matchit = "0.9.0"
md-5 = "0.11.0-rc.3"
md5 = "0.8.0"
mime_guess = "2.0.5"
moka = { version = "0.12.11", features = ["future"] }
moka = { version = "0.12.12", features = ["future"] }
netif = "0.1.6"
nix = { version = "0.30.1", features = ["fs"] }
nu-ansi-term = "0.50.3"
@@ -224,7 +224,7 @@ rust-embed = { version = "8.9.0" }
rustc-hash = { version = "2.1.1" }
s3s = { version = "0.12.0-rc.6", features = ["minio"], git = "https://github.com/s3s-project/s3s.git", branch = "main" }
serial_test = "3.2.0"
shadow-rs = { version = "1.4.0", default-features = false }
shadow-rs = { version = "1.5.0", default-features = false }
siphasher = "1.0.1"
smallvec = { version = "1.15.1", features = ["serde"] }
smartstring = "1.0.1"
@@ -252,7 +252,7 @@ walkdir = "2.5.0"
wildmatch = { version = "2.6.1", features = ["serde"] }
winapi = { version = "0.3.9" }
xxhash-rust = { version = "0.8.15", features = ["xxh64", "xxh3"] }
zip = "6.0.0"
zip = "7.0.0"
zstd = "0.13.3"
# Observability and Metrics

View File

@@ -103,7 +103,7 @@ The RustFS container runs as a non-root user `rustfs` (UID `10001`). If you run
docker run -d -p 9000:9000 -p 9001:9001 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:latest
# Using specific version
docker run -d -p 9000:9000 -p 9001:9001 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:1.0.0.alpha.68
docker run -d -p 9000:9000 -p 9001:9001 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:1.0.0-alpha.76
```
You can also use Docker Compose. Using the `docker-compose.yml` file in the root directory:

View File

@@ -24,11 +24,16 @@ pub static GLOBAL_RUSTFS_HOST: LazyLock<RwLock<String>> = LazyLock::new(|| RwLoc
pub static GLOBAL_RUSTFS_PORT: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("9000".to_string()));
pub static GLOBAL_RUSTFS_ADDR: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
pub static GLOBAL_CONN_MAP: LazyLock<RwLock<HashMap<String, Channel>>> = LazyLock::new(|| RwLock::new(HashMap::new()));
pub static GLOBAL_ROOT_CERT: LazyLock<RwLock<Option<Vec<u8>>>> = LazyLock::new(|| RwLock::new(None));
pub async fn set_global_addr(addr: &str) {
*GLOBAL_RUSTFS_ADDR.write().await = addr.to_string();
}
pub async fn set_global_root_cert(cert: Vec<u8>) {
*GLOBAL_ROOT_CERT.write().await = Some(cert);
}
/// Evict a stale/dead connection from the global connection cache.
/// This is critical for cluster recovery when a node dies unexpectedly (e.g., power-off).
/// By removing the cached connection, subsequent requests will establish a fresh connection.

View File

@@ -89,6 +89,30 @@ pub const RUSTFS_TLS_KEY: &str = "rustfs_key.pem";
/// This is the default cert for TLS.
pub const RUSTFS_TLS_CERT: &str = "rustfs_cert.pem";
/// Default public certificate filename for rustfs
/// This is the default public certificate filename for rustfs.
/// It is used to store the public certificate of the application.
/// Default value: public.crt
pub const RUSTFS_PUBLIC_CERT: &str = "public.crt";
/// Default CA certificate filename for rustfs
/// This is the default CA certificate filename for rustfs.
/// It is used to store the CA certificate of the application.
/// Default value: ca.crt
pub const RUSTFS_CA_CERT: &str = "ca.crt";
/// Default HTTP prefix for rustfs
/// This is the default HTTP prefix for rustfs.
/// It is used to identify HTTP URLs.
/// Default value: http://
pub const RUSTFS_HTTP_PREFIX: &str = "http://";
/// Default HTTPS prefix for rustfs
/// This is the default HTTPS prefix for rustfs.
/// It is used to identify HTTPS URLs.
/// Default value: https://
pub const RUSTFS_HTTPS_PREFIX: &str = "https://";
/// Default port for rustfs
/// This is the default port for rustfs.
/// This is used to bind the server to a specific port.

View File

@@ -12,4 +12,26 @@
// See the License for the specific language governing permissions and
// limitations under the License.
/// TLS related environment variable names and default values
/// Environment variable to enable TLS key logging
/// When set to "1", RustFS will log TLS keys to the specified file for debugging purposes.
/// By default, this is disabled.
/// To enable, set the environment variable RUSTFS_TLS_KEYLOG=1
pub const ENV_TLS_KEYLOG: &str = "RUSTFS_TLS_KEYLOG";
/// Default value for TLS key logging
/// By default, RustFS does not log TLS keys.
/// To change this behavior, set the environment variable RUSTFS_TLS_KEYLOG=1
pub const DEFAULT_TLS_KEYLOG: bool = false;
/// Environment variable to trust system CA certificates
/// When set to "1", RustFS will trust system CA certificates in addition to any
/// custom CA certificates provided in the configuration.
/// By default, this is disabled.
/// To enable, set the environment variable RUSTFS_TRUST_SYSTEM_CA=1
pub const ENV_TRUST_SYSTEM_CA: &str = "RUSTFS_TRUST_SYSTEM_CA";
/// Default value for trusting system CA certificates
/// By default, RustFS does not trust system CA certificates.
/// To change this behavior, set the environment variable RUSTFS_TRUST_SYSTEM_CA=1
pub const DEFAULT_TRUST_SYSTEM_CA: bool = false;

View File

@@ -33,7 +33,7 @@ static IAM_SYS: OnceLock<Arc<IamSys<ObjectStore>>> = OnceLock::new();
#[instrument(skip(ecstore))]
pub async fn init_iam_sys(ecstore: Arc<ECStore>) -> Result<()> {
debug!("init iam system");
let s = IamCache::new(ObjectStore::new(ecstore)).await;
let s = IamCache::new(ObjectStore::new(ecstore).await).await;
IAM_SYS.get_or_init(move || IamSys::new(s).into());
Ok(())

View File

@@ -120,18 +120,52 @@ fn split_path(s: &str, last_index: bool) -> (&str, &str) {
#[derive(Clone)]
pub struct ObjectStore {
object_api: Arc<ECStore>,
prev_cred: Option<rustfs_policy::auth::Credentials>,
}
impl ObjectStore {
const BUCKET_NAME: &'static str = ".rustfs.sys";
const PREV_CRED_FILE: &'static str = "config/iam/prev_cred.json";
pub fn new(object_api: Arc<ECStore>) -> Self {
Self { object_api }
/// Load previous credentials from persistent storage in .rustfs.sys bucket
async fn load_prev_cred(object_api: Arc<ECStore>) -> Option<rustfs_policy::auth::Credentials> {
match read_config(object_api, Self::PREV_CRED_FILE).await {
Ok(data) => serde_json::from_slice::<rustfs_policy::auth::Credentials>(&data).ok(),
Err(_) => None,
}
}
fn decrypt_data(data: &[u8]) -> Result<Vec<u8>> {
let de = rustfs_crypto::decrypt_data(get_global_action_cred().unwrap_or_default().secret_key.as_bytes(), data)?;
Ok(de)
/// Save previous credentials to persistent storage in .rustfs.sys bucket
async fn save_prev_cred(object_api: Arc<ECStore>, cred: &Option<rustfs_policy::auth::Credentials>) -> Result<()> {
match cred {
Some(c) => {
let data = serde_json::to_vec(c).map_err(|e| Error::other(format!("Failed to serialize cred: {}", e)))?;
save_config(object_api, Self::PREV_CRED_FILE, data)
.await
.map_err(|e| Error::other(format!("Failed to write cred to storage: {}", e)))
}
None => {
// If no credentials, remove the config
match delete_config(object_api, Self::PREV_CRED_FILE).await {
Ok(_) => Ok(()),
Err(e) => {
// Ignore ConfigNotFound error when trying to delete non-existent config
if matches!(e, rustfs_ecstore::error::StorageError::ConfigNotFound) {
Ok(())
} else {
Err(Error::other(format!("Failed to delete cred from storage: {}", e)))
}
}
}
}
}
}
pub async fn new(object_api: Arc<ECStore>) -> Self {
// Load previous credentials from persistent storage in .rustfs.sys bucket
let prev_cred = Self::load_prev_cred(object_api.clone()).await.or_else(get_global_action_cred);
Self { object_api, prev_cred }
}
fn encrypt_data(data: &[u8]) -> Result<Vec<u8>> {
@@ -139,10 +173,65 @@ impl ObjectStore {
Ok(en)
}
/// Decrypt data with credential fallback mechanism
/// First tries current credentials, then falls back to previous credentials if available
async fn decrypt_fallback(&self, data: &[u8], path: &str) -> Result<Vec<u8>> {
let current_cred = get_global_action_cred().unwrap_or_default();
// Try current credentials first
match rustfs_crypto::decrypt_data(current_cred.secret_key.as_bytes(), data) {
Ok(decrypted) => {
// Update persistent storage with current credentials for consistency
let _ = Self::save_prev_cred(self.object_api.clone(), &Some(current_cred)).await;
Ok(decrypted)
}
Err(_) => {
// Current credentials failed, try previous credentials
if let Some(ref prev_cred) = self.prev_cred {
match rustfs_crypto::decrypt_data(prev_cred.secret_key.as_bytes(), data) {
Ok(prev_decrypted) => {
warn!("Decryption succeeded with previous credentials, path: {}", path);
// Re-encrypt with current credentials
match rustfs_crypto::encrypt_data(current_cred.secret_key.as_bytes(), &prev_decrypted) {
Ok(re_encrypted) => {
let _ = save_config(self.object_api.clone(), path, re_encrypted).await;
}
Err(e) => {
warn!("Failed to re-encrypt with current credentials: {}, path: {}", e, path);
}
}
// Update persistent storage with current credentials
let _ = Self::save_prev_cred(self.object_api.clone(), &Some(current_cred)).await;
Ok(prev_decrypted)
}
Err(_) => {
// Both attempts failed
warn!("Decryption failed with both current and previous credentials, deleting config: {}", path);
let _ = self.delete_iam_config(path).await;
Err(Error::ConfigNotFound)
}
}
} else {
// No previous credentials available
warn!(
"Decryption failed with current credentials and no previous credentials available, deleting config: {}",
path
);
let _ = self.delete_iam_config(path).await;
Err(Error::ConfigNotFound)
}
}
}
}
async fn load_iamconfig_bytes_with_metadata(&self, path: impl AsRef<str> + Send) -> Result<(Vec<u8>, ObjectInfo)> {
let (data, obj) = read_config_with_metadata(self.object_api.clone(), path.as_ref(), &ObjectOptions::default()).await?;
Ok((Self::decrypt_data(&data)?, obj))
let decrypted_data = self.decrypt_fallback(&data, path.as_ref()).await?;
Ok((decrypted_data, obj))
}
async fn list_iam_config_items(&self, prefix: &str, ctx: CancellationToken, sender: Sender<StringOrErr>) {
@@ -386,15 +475,7 @@ impl Store for ObjectStore {
async fn load_iam_config<Item: DeserializeOwned>(&self, path: impl AsRef<str> + Send) -> Result<Item> {
let mut data = read_config(self.object_api.clone(), path.as_ref()).await?;
data = match Self::decrypt_data(&data) {
Ok(v) => v,
Err(err) => {
warn!("delete the config file when decrypt failed failed: {}, path: {}", err, path.as_ref());
// delete the config file when decrypt failed
let _ = self.delete_iam_config(path.as_ref()).await;
return Err(Error::ConfigNotFound);
}
};
data = self.decrypt_fallback(&data, path.as_ref()).await?;
Ok(serde_json::from_slice(&data)?)
}

View File

@@ -212,6 +212,11 @@ impl NotificationSystem {
return Ok(());
}
// Save the modified configuration to storage
rustfs_ecstore::config::com::save_server_config(store, &new_config)
.await
.map_err(|e| NotificationError::SaveConfig(e.to_string()))?;
info!("Configuration updated. Reloading system...");
self.reload_config(new_config).await
}
@@ -294,23 +299,35 @@ impl NotificationSystem {
/// If the target configuration does not exist, it returns Ok(()) without making any changes.
pub async fn remove_target_config(&self, target_type: &str, target_name: &str) -> Result<(), NotificationError> {
info!("Removing config for target {} of type {}", target_name, target_type);
self.update_config_and_reload(|config| {
let mut changed = false;
if let Some(targets) = config.0.get_mut(&target_type.to_lowercase()) {
if targets.remove(&target_name.to_lowercase()).is_some() {
changed = true;
let config_result = self
.update_config_and_reload(|config| {
let mut changed = false;
if let Some(targets) = config.0.get_mut(&target_type.to_lowercase()) {
if targets.remove(&target_name.to_lowercase()).is_some() {
changed = true;
}
if targets.is_empty() {
config.0.remove(target_type);
}
}
if targets.is_empty() {
config.0.remove(target_type);
if !changed {
info!("Target {} of type {} not found, no changes made.", target_name, target_type);
}
}
if !changed {
info!("Target {} of type {} not found, no changes made.", target_name, target_type);
}
debug!("Config after remove: {:?}", config);
changed
})
.await
debug!("Config after remove: {:?}", config);
changed
})
.await;
if config_result.is_ok() {
let target_id = TargetID::new(target_name.to_string(), target_type.to_string());
// Remove from target list
let target_list = self.notifier.target_list();
let mut target_list_guard = target_list.write().await;
let _ = target_list_guard.remove_target_only(&target_id).await;
}
config_result
}
/// Enhanced event stream startup function, including monitoring and concurrency control

View File

@@ -195,6 +195,10 @@ impl EventNotifier {
) -> Result<(), NotificationError> {
// Currently active, simpler logic
let mut target_list_guard = self.target_list.write().await; //Gets a write lock for the TargetList
// Clear existing targets first - rebuild from scratch to ensure consistency with new configuration
target_list_guard.clear();
for target_boxed in targets_to_init {
// Traverse the incoming Box<dyn Target >
debug!("init bucket target: {}", target_boxed.name());
@@ -240,6 +244,11 @@ impl TargetList {
Ok(())
}
/// Clears all targets from the list
pub fn clear(&mut self) {
self.targets.clear();
}
/// Removes a target by ID. Note: This does not stop its associated event stream.
/// Stream cancellation should be handled by EventNotifier.
pub async fn remove_target_only(&mut self, id: &TargetID) -> Option<Arc<dyn Target<Event> + Send + Sync>> {

View File

@@ -20,7 +20,6 @@ use serde::{Deserialize, Serialize};
use serde_json::{Value, json};
use std::collections::HashMap;
use time::OffsetDateTime;
use time::macros::offset;
use tracing::warn;
const ACCESS_KEY_MIN_LEN: usize = 3;
@@ -231,7 +230,7 @@ pub fn create_new_credentials_with_metadata(
let expiration = {
if let Some(v) = claims.get("exp") {
if let Some(expiry) = v.as_i64() {
Some(OffsetDateTime::from_unix_timestamp(expiry)?.to_offset(offset!(+8)))
Some(OffsetDateTime::from_unix_timestamp(expiry)?)
} else {
None
}

View File

@@ -15,19 +15,19 @@
#[allow(unsafe_code)]
mod generated;
use std::{error::Error, time::Duration};
pub use generated::*;
use proto_gen::node_service::node_service_client::NodeServiceClient;
use rustfs_common::globals::{GLOBAL_CONN_MAP, evict_connection};
use rustfs_common::globals::{GLOBAL_CONN_MAP, GLOBAL_ROOT_CERT, evict_connection};
use std::{error::Error, time::Duration};
use tonic::{
Request, Status,
metadata::MetadataValue,
service::interceptor::InterceptedService,
transport::{Channel, Endpoint},
transport::{Certificate, Channel, ClientTlsConfig, Endpoint},
};
use tracing::{debug, warn};
pub use generated::*;
// Default 100 MB
pub const DEFAULT_GRPC_SERVER_MESSAGE_LEN: usize = 100 * 1024 * 1024;
@@ -46,6 +46,12 @@ const HTTP2_KEEPALIVE_TIMEOUT_SECS: u64 = 3;
/// Overall RPC timeout - maximum time for any single RPC operation
const RPC_TIMEOUT_SECS: u64 = 30;
/// Default HTTPS prefix for rustfs
/// This is the default HTTPS prefix for rustfs.
/// It is used to identify HTTPS URLs.
/// Default value: https://
const RUSTFS_HTTPS_PREFIX: &str = "https://";
/// Creates a new gRPC channel with optimized keepalive settings for cluster resilience.
///
/// This function is designed to detect dead peers quickly:
@@ -56,7 +62,7 @@ const RPC_TIMEOUT_SECS: u64 = 30;
async fn create_new_channel(addr: &str) -> Result<Channel, Box<dyn Error>> {
debug!("Creating new gRPC channel to: {}", addr);
let connector = Endpoint::from_shared(addr.to_string())?
let mut connector = Endpoint::from_shared(addr.to_string())?
// Fast connection timeout for dead peer detection
.connect_timeout(Duration::from_secs(CONNECT_TIMEOUT_SECS))
// TCP-level keepalive - OS will probe connection
@@ -70,6 +76,37 @@ async fn create_new_channel(addr: &str) -> Result<Channel, Box<dyn Error>> {
// Overall timeout for any RPC - fail fast on unresponsive peers
.timeout(Duration::from_secs(RPC_TIMEOUT_SECS));
let root_cert = GLOBAL_ROOT_CERT.read().await;
if addr.starts_with(RUSTFS_HTTPS_PREFIX) {
if let Some(cert_pem) = root_cert.as_ref() {
let ca = Certificate::from_pem(cert_pem);
// Derive the hostname from the HTTPS URL for TLS hostname verification.
let domain = addr
.trim_start_matches(RUSTFS_HTTPS_PREFIX)
.split('/')
.next()
.unwrap_or("")
.split(':')
.next()
.unwrap_or("");
let tls = if !domain.is_empty() {
ClientTlsConfig::new().ca_certificate(ca).domain_name(domain)
} else {
// Fallback: configure TLS without explicit domain if parsing fails.
ClientTlsConfig::new().ca_certificate(ca)
};
connector = connector.tls_config(tls)?;
debug!("Configured TLS with custom root certificate for: {}", addr);
} else {
debug!("Using system root certificates for TLS: {}", addr);
}
} else {
// Custom root certificates are configured but will be ignored for non-HTTPS addresses.
if root_cert.is_some() {
warn!("Custom root certificates are configured but not used because the address does not use HTTPS: {addr}");
}
}
let channel = connector.connect().await?;
// Cache the new connection

View File

@@ -312,7 +312,7 @@ where
compress: true,
};
let data = serde_json::to_vec(&item).map_err(|e| StoreError::Serialization(e.to_string()))?;
let data = serde_json::to_vec(&*item).map_err(|e| StoreError::Serialization(e.to_string()))?;
self.write_file(&key, &data)?;
Ok(key)

View File

@@ -159,3 +159,30 @@ impl std::fmt::Display for TargetType {
}
}
}
/// Decodes a form-urlencoded object name to its original form.
///
/// This function properly handles form-urlencoded strings where spaces are
/// represented as `+` symbols. It first replaces `+` with spaces, then
/// performs standard percent-decoding.
///
/// # Arguments
/// * `encoded` - The form-urlencoded string to decode
///
/// # Returns
/// The decoded string, or an error if decoding fails
///
/// # Example
/// ```
/// use rustfs_targets::target::decode_object_name;
///
/// let encoded = "greeting+file+%282%29.csv";
/// let decoded = decode_object_name(encoded).unwrap();
/// assert_eq!(decoded, "greeting file (2).csv");
/// ```
pub fn decode_object_name(encoded: &str) -> Result<String, TargetError> {
let replaced = encoded.replace("+", " ");
urlencoding::decode(&replaced)
.map(|s| s.into_owned())
.map_err(|e| TargetError::Encoding(format!("Failed to decode object key: {e}")))
}

View File

@@ -32,7 +32,6 @@ use std::{
use tokio::sync::{Mutex, OnceCell, mpsc};
use tracing::{debug, error, info, instrument, trace, warn};
use url::Url;
use urlencoding;
const DEFAULT_CONNECTION_TIMEOUT: Duration = Duration::from_secs(15);
const EVENT_LOOP_POLL_TIMEOUT: Duration = Duration::from_secs(10); // For initial connection check in task
@@ -258,8 +257,8 @@ where
.as_ref()
.ok_or_else(|| TargetError::Configuration("MQTT client not initialized".to_string()))?;
let object_name = urlencoding::decode(&event.object_name)
.map_err(|e| TargetError::Encoding(format!("Failed to decode object key: {e}")))?;
// Decode form-urlencoded object name
let object_name = crate::target::decode_object_name(&event.object_name)?;
let key = format!("{}/{}", event.bucket_name, object_name);

View File

@@ -36,7 +36,6 @@ use std::{
use tokio::net::lookup_host;
use tokio::sync::mpsc;
use tracing::{debug, error, info, instrument};
use urlencoding;
/// Arguments for configuring a Webhook target
#[derive(Debug, Clone)]
@@ -221,8 +220,8 @@ where
async fn send(&self, event: &EntityTarget<E>) -> Result<(), TargetError> {
info!("Webhook Sending event to webhook target: {}", self.id);
let object_name = urlencoding::decode(&event.object_name)
.map_err(|e| TargetError::Encoding(format!("Failed to decode object key: {e}")))?;
// Decode form-urlencoded object name
let object_name = crate::target::decode_object_name(&event.object_name)?;
let key = format!("{}/{}", event.bucket_name, object_name);
@@ -421,3 +420,51 @@ where
self.args.enable
}
}
#[cfg(test)]
mod tests {
use crate::target::decode_object_name;
use url::form_urlencoded;
#[test]
fn test_decode_object_name_with_spaces() {
// Test case from the issue: "greeting file (2).csv"
let object_name = "greeting file (2).csv";
// Simulate what event.rs does: form-urlencoded encoding (spaces become +)
let form_encoded = form_urlencoded::byte_serialize(object_name.as_bytes()).collect::<String>();
assert_eq!(form_encoded, "greeting+file+%282%29.csv");
// Test the decode_object_name helper function
let decoded = decode_object_name(&form_encoded).unwrap();
assert_eq!(decoded, object_name);
assert!(!decoded.contains('+'), "Decoded string should not contain + symbols");
}
#[test]
fn test_decode_object_name_with_special_chars() {
// Test with various special characters
let test_cases = vec![
("folder/greeting file (2).csv", "folder%2Fgreeting+file+%282%29.csv"),
("test file.txt", "test+file.txt"),
("my file (copy).pdf", "my+file+%28copy%29.pdf"),
("file with spaces and (parentheses).doc", "file+with+spaces+and+%28parentheses%29.doc"),
];
for (original, form_encoded) in test_cases {
// Test the decode_object_name helper function
let decoded = decode_object_name(form_encoded).unwrap();
assert_eq!(decoded, original, "Failed to decode: {}", form_encoded);
}
}
#[test]
fn test_decode_object_name_without_spaces() {
// Test that files without spaces still work correctly
let object_name = "simple-file.txt";
let form_encoded = form_urlencoded::byte_serialize(object_name.as_bytes()).collect::<String>();
let decoded = decode_object_name(&form_encoded).unwrap();
assert_eq!(decoded, object_name);
}
}

View File

@@ -84,7 +84,7 @@ tls = ["dep:rustls", "dep:rustls-pemfile", "dep:rustls-pki-types"] # tls charac
net = ["ip", "dep:url", "dep:netif", "dep:futures", "dep:transform-stream", "dep:bytes", "dep:s3s", "dep:hyper", "dep:thiserror", "dep:tokio"] # network features with DNS resolver
io = ["dep:tokio"]
path = []
notify = ["dep:hyper", "dep:s3s", "dep:hashbrown", "dep:thiserror", "dep:serde", "dep:libc"] # file system notification features
notify = ["dep:hyper", "dep:s3s", "dep:hashbrown", "dep:thiserror", "dep:serde", "dep:libc", "dep:url", "dep:regex"] # file system notification features
compress = ["dep:flate2", "dep:brotli", "dep:snap", "dep:lz4", "dep:zstd"]
string = ["dep:regex", "dep:rand"]
crypto = ["dep:base64-simd", "dep:hex-simd", "dep:hmac", "dep:hyper", "dep:sha1"]

View File

@@ -21,7 +21,7 @@ use std::collections::HashMap;
use std::io::Error;
use std::path::Path;
use std::sync::Arc;
use std::{env, fs, io};
use std::{fs, io};
use tracing::{debug, warn};
/// Load public certificate from file.
@@ -243,17 +243,7 @@ pub fn create_multi_cert_resolver(
/// * A boolean indicating whether TLS key logging is enabled based on the `RUSTFS_TLS_KEYLOG` environment variable.
///
pub fn tls_key_log() -> bool {
env::var("RUSTFS_TLS_KEYLOG")
.map(|v| {
let v = v.trim();
v.eq_ignore_ascii_case("1")
|| v.eq_ignore_ascii_case("on")
|| v.eq_ignore_ascii_case("true")
|| v.eq_ignore_ascii_case("yes")
|| v.eq_ignore_ascii_case("enabled")
|| v.eq_ignore_ascii_case("t")
})
.unwrap_or(false)
crate::get_env_bool(rustfs_config::ENV_TLS_KEYLOG, rustfs_config::DEFAULT_TLS_KEYLOG)
}
#[cfg(test)]

View File

@@ -52,13 +52,17 @@ RustFS helm chart supports **standalone and distributed mode**. For standalone m
| ingress.nginxAnnotations."nginx.ingress.kubernetes.io/session-cookie-hash" | string | `"sha1"` | |
| ingress.nginxAnnotations."nginx.ingress.kubernetes.io/session-cookie-max-age" | string | `"3600"` | |
| ingress.nginxAnnotations."nginx.ingress.kubernetes.io/session-cookie-name" | string | `"rustfs"` | |
| ingress.tls[0].hosts[0] | string | `"your.rustfs.com"` | |
| ingress.tls[0].secretName | string | `"rustfs-tls"` | |
| ingress.traefikAnnotations."traefik.ingress.kubernetes.io/service.sticky.cookie" | string | `"true"` | |
| ingress.traefikAnnotations."traefik.ingress.kubernetes.io/service.sticky.cookie.httponly" | string | `"true"` | |
| ingress.traefikAnnotations."traefik.ingress.kubernetes.io/service.sticky.cookie.name" | string | `"rustfs"` | |
| ingress.traefikAnnotations."traefik.ingress.kubernetes.io/service.sticky.cookie.samesite" | string | `"none"` | |
| ingress.traefikAnnotations."traefik.ingress.kubernetes.io/service.sticky.cookie.secure" | string | `"true"` | |
| ingress.tls.enabled | bool | `false` | Enable tls and access rustfs via https. |
| ingress.tls.certManager.enabled | string | `false` | Enable cert manager support to generate certificate automatically. |
| ingress.tls.certManager.issuer.name | string | `false` | The name of cert manager issuer. |
| ingress.tls.certManager.issuer.kind | string | `false` | The kind of cert manager issuer, issuer or cluster-issuer. |
| ingress.tls.crt | string | "" | The content of certificate file. |
| ingress.tls.key | string | "" | The content of key file. |
| livenessProbe.failureThreshold | int | `3` | |
| livenessProbe.httpGet.path | string | `"/health"` | |
| livenessProbe.httpGet.port | string | `"endpoint"` | |
@@ -100,9 +104,6 @@ RustFS helm chart supports **standalone and distributed mode**. For standalone m
| storageclass.dataStorageSize | string | `"256Mi"` | The storage size for data PVC. |
| storageclass.logStorageSize | string | `"256Mi"` | The storage size for logs PVC. |
| storageclass.name | string | `"local-path"` | The name for StorageClass. |
| tls.crt | string | `"tls.crt"` | |
| tls.enabled | bool | `false` | |
| tls.key | string | `"tls.key"` | |
| tolerations | list | `[]` | |
---

View File

@@ -15,7 +15,7 @@ type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 1.0.3
version: 0.0.76
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to

View File

@@ -0,0 +1,15 @@
{{- if and .Values.ingress.tls.enabled .Values.ingress.tls.certManager.enabled }}
{{- $host := index .Values.ingress.hosts 0 }}
apiVersion: cert-manager.io/v1
kind: Certificate
metadata:
name: {{ include "rustfs.fullname" . }}-tls
namespace: {{ .Release.Namespace }}
spec:
secretName: {{ .Values.ingress.tls.secretName }}
issuerRef:
name: {{ .Values.ingress.tls.certManager.issuer.name }}
kind: {{ .Values.ingress.tls.certManager.issuer.kind }}
dnsNames:
- {{ $host.host }}
{{- end }}

View File

@@ -1,4 +1,14 @@
{{- if .Values.ingress.enabled -}}
{{- $secretName := .Values.ingress.tls.secretName }}
{{- $ingressAnnotations := dict }}
{{- if eq .Values.ingress.className "nginx" }}
{{- $ingressAnnotations = merge $ingressAnnotations (.Values.ingress.nginxAnnotations | default dict) }}
{{- else if eq .Values.ingress.className "" }}
{{- $ingressAnnotations = merge $ingressAnnotations (.Values.ingress.customAnnoations | default dict) }}
{{- end }}
{{- if .Values.ingress.tls.certManager.enabled }}
{{- $ingressAnnotations = merge $ingressAnnotations (.Values.ingress.certManagerAnnotations | default dict) }}
{{- end }}
apiVersion: networking.k8s.io/v1
kind: Ingress
metadata:
@@ -8,25 +18,21 @@ metadata:
{{- with .Values.commonLabels }}
{{- toYaml . | nindent 4 }}
{{- end }}
{{- if eq .Values.ingress.className "nginx" }}
{{- with .Values.ingress.nginxAnnotations }}
{{- with $ingressAnnotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}
spec:
{{- with .Values.ingress.className }}
ingressClassName: {{ . }}
{{- end }}
{{- if .Values.tls.enabled }}
{{- if .Values.ingress.tls.enabled }}
tls:
{{- range .Values.ingress.tls }}
- hosts:
{{- range .hosts }}
- {{ . | quote }}
{{- range .Values.ingress.hosts }}
- {{ .host | quote }}
{{- end }}
secretName: {{ .secretName }}
{{- end }}
secretName: {{ $secretName }}
{{- end }}
rules:
{{- range .Values.ingress.hosts }}

View File

@@ -1,4 +1,4 @@
{{- if .Values.tls.enabled }}
{{- if and .Values.ingress.tls.enabled (not .Values.ingress.tls.certManager.enabled) }}
apiVersion: v1
kind: Secret
metadata:
@@ -7,6 +7,6 @@ metadata:
{{- toYaml .Values.commonLabels | nindent 4 }}
type: kubernetes.io/tls
data:
tls.crt : {{ .Values.tls.crt | b64enc | quote }}
tls.key : {{ .Values.tls.key | b64enc | quote }}
tls.crt : {{ .Values.ingress.tls.crt | b64enc | quote }}
tls.key : {{ .Values.ingress.tls.key | b64enc | quote }}
{{- end }}

View File

@@ -13,15 +13,16 @@ spec:
clusterIP: None
publishNotReadyAddresses: true
ports:
- port: {{ .Values.service.ep_port }}
name: endpoint
- port: {{ .Values.service.console_port }}
name: console
- name: endpoint
port: {{ .Values.service.endpoint.port }}
- name: console
port: {{ .Values.service.console.port }}
selector:
{{- include "rustfs.selectorLabels" . | nindent 4 }}
{{- end }}
---
{{- $serviceType := .Values.service.type }}
apiVersion: v1
kind: Service
metadata:
@@ -40,19 +41,27 @@ metadata:
{{- toYaml . | nindent 4 }}
{{- end }}
spec:
{{- if .Values.ingress.enabled }}
{{- if eq $serviceType "ClusterIP" }}
type: ClusterIP
{{- else }}
type: {{ .Values.service.type }}
{{- else if eq $serviceType "NodePort" }}
type: NodePort
sessionAffinity: ClientIP
sessionAffinityConfig:
clientIP:
timeoutSeconds: 10800
{{- end }}
ports:
- port: {{ .Values.service.ep_port }}
name: endpoint
- port: {{ .Values.service.console_port }}
name: console
- name: endpoint
port: {{ .Values.service.endpoint.port }}
targetPort: {{ .Values.service.endpoint.port }}
{{- if eq $serviceType "NodePort" }}
nodePort: {{ .Values.service.endpoint.nodePort }}
{{- end }}
- name: console
port: {{ .Values.service.console.port }}
targetPort: {{ .Values.service.console.port }}
{{- if eq $serviceType "NodePort" }}
nodePort: {{ .Values.service.console.nodePort }}
{{- end }}
selector:
{{- include "rustfs.selectorLabels" . | nindent 4 }}

View File

@@ -11,7 +11,7 @@ image:
# This sets the pull policy for images.
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: "latest"
tag: "1.0.0-alpha.73"
# This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
imagePullSecrets: []
@@ -90,14 +90,18 @@ containerSecurityContext:
runAsNonRoot: true
service:
type: NodePort
ep_port: 9000
console_port: 9001
type: ClusterIP
endpoint:
port: 9000
nodePort: 32000
console:
port: 9001
nodePort: 32001
# This block is for setting up the ingress for more information can be found here: https://kubernetes.io/docs/concepts/services-networking/ingress/
ingress:
enabled: true
className: "traefik" # Specify the classname, traefik or nginx. Different classname has different annotations for session sticky.
className: "nginx" # Specify the classname, traefik or nginx. Different classname has different annotations for session sticky.
traefikAnnotations:
traefik.ingress.kubernetes.io/service.sticky.cookie: "true"
traefik.ingress.kubernetes.io/service.sticky.cookie.httponly: "true"
@@ -110,20 +114,26 @@ ingress:
nginx.ingress.kubernetes.io/session-cookie-hash: sha1
nginx.ingress.kubernetes.io/session-cookie-max-age: "3600"
nginx.ingress.kubernetes.io/session-cookie-name: rustfs
certManagerAnnotations:
{} # Specify cert manager issuer annotations,cert-manager.io/issuer or cert-manager.io/cluster-issuer.
# cert-manager.io/issuer: "letsencrypt-staging"
customAnnotations: # Specify custom annotations
{} # Customize annotations
hosts:
- host: your.rustfs.com
- host: xmg.rustfs.com
paths:
- path: /
pathType: ImplementationSpecific
tls:
- secretName: rustfs-tls
hosts:
- your.rustfs.com
tls:
enabled: false
crt: tls.crt
key: tls.key
pathType: Prefix
tls:
enabled: false # Enable tls and access rustfs via https.
certManager:
enabled: false # Enable certmanager to generate certificate for rustfs, default false.
issuer:
name: letsencrypt-staging # Specify cert manager issuer name
kind: Issuer # Specify cert manager issuer kind, Issuer or ClusterIssuer.
secretName: secret-tls
crt: tls.crt
key: tls.key
resources:
# We usually recommend not to specify default resources and to leave this as a conscious

View File

@@ -144,6 +144,7 @@ pprof = { workspace = true }
[dev-dependencies]
uuid = { workspace = true, features = ["v4"] }
serial_test = { workspace = true }
[build-dependencies]
http.workspace = true

View File

@@ -101,7 +101,7 @@ where
&& headers
.get(header::CONTENT_TYPE)
.and_then(|v| v.to_str().ok())
.map(|ct| ct.split(';').next().unwrap_or("").trim())
.map(|ct| ct.split(';').next().unwrap_or("").trim().to_lowercase())
.map(|ct| ct == "application/x-www-form-urlencoded")
.unwrap_or(false)
{

View File

@@ -13,9 +13,48 @@
// limitations under the License.
#[cfg(test)]
#[allow(unsafe_op_in_unsafe_fn)]
mod tests {
use crate::config::Opt;
use clap::Parser;
use rustfs_ecstore::disks_layout::DisksLayout;
use serial_test::serial;
use std::env;
/// Helper function to run test with environment variable set.
/// Automatically cleans up the environment variable after the test.
///
/// # Safety
/// This function uses unsafe env::set_var and env::remove_var.
/// Tests using this helper must be marked with #[serial] to avoid race conditions.
#[allow(unsafe_code)]
fn with_env_var<F>(key: &str, value: &str, test_fn: F)
where
F: FnOnce(),
{
unsafe {
env::set_var(key, value);
}
// Ensure cleanup happens even if test panics
let result = std::panic::catch_unwind(std::panic::AssertUnwindSafe(test_fn));
unsafe {
env::remove_var(key);
}
// Re-panic if the test failed
if let Err(e) = result {
std::panic::resume_unwind(e);
}
}
/// Helper to parse volumes and verify the layout.
fn verify_layout<T, F>(volumes: &[T], verify_fn: F)
where
T: AsRef<str>,
F: FnOnce(&DisksLayout),
{
let layout = DisksLayout::from_volumes(volumes).expect("Failed to parse volumes");
verify_fn(&layout);
}
#[test]
fn test_default_console_configuration() {
@@ -66,4 +105,422 @@ mod tests {
assert_eq!(endpoint_port, 9000);
assert_eq!(console_port, 9001);
}
#[test]
fn test_volumes_and_disk_layout_parsing() {
use rustfs_ecstore::disks_layout::DisksLayout;
// Test case 1: Single volume path
let args = vec!["rustfs", "/data/vol1"];
let opt = Opt::parse_from(args);
assert_eq!(opt.volumes.len(), 1);
assert_eq!(opt.volumes[0], "/data/vol1");
let layout = DisksLayout::from_volumes(&opt.volumes).expect("Failed to parse single volume");
assert!(!layout.is_empty_layout());
assert!(layout.is_single_drive_layout());
assert_eq!(layout.get_single_drive_layout(), "/data/vol1");
// Test case 2: Multiple volume paths (space-separated via env)
let args = vec!["rustfs", "/data/vol1", "/data/vol2", "/data/vol3", "/data/vol4"];
let opt = Opt::parse_from(args);
assert_eq!(opt.volumes.len(), 4);
let layout = DisksLayout::from_volumes(&opt.volumes).expect("Failed to parse multiple volumes");
assert!(!layout.is_empty_layout());
assert!(!layout.is_single_drive_layout());
assert_eq!(layout.get_set_count(0), 1);
assert_eq!(layout.get_drives_per_set(0), 4);
// Test case 3: Ellipses pattern - simple range
let args = vec!["rustfs", "/data/vol{1...4}"];
let opt = Opt::parse_from(args);
assert_eq!(opt.volumes.len(), 1);
assert_eq!(opt.volumes[0], "/data/vol{1...4}");
let layout = DisksLayout::from_volumes(&opt.volumes).expect("Failed to parse ellipses pattern");
assert!(!layout.is_empty_layout());
assert_eq!(layout.get_set_count(0), 1);
assert_eq!(layout.get_drives_per_set(0), 4);
// Test case 4: Ellipses pattern - larger range that creates multiple sets
let args = vec!["rustfs", "/data/vol{1...16}"];
let opt = Opt::parse_from(args);
let layout = DisksLayout::from_volumes(&opt.volumes).expect("Failed to parse ellipses with multiple sets");
assert!(!layout.is_empty_layout());
assert_eq!(layout.get_drives_per_set(0), 16);
// Test case 5: Distributed setup pattern
let args = vec!["rustfs", "http://server{1...4}/data/vol{1...4}"];
let opt = Opt::parse_from(args);
let layout = DisksLayout::from_volumes(&opt.volumes).expect("Failed to parse distributed pattern");
assert!(!layout.is_empty_layout());
assert_eq!(layout.get_drives_per_set(0), 16);
// Test case 6: Multiple pools (legacy: false)
let args = vec!["rustfs", "http://server1/data{1...4}", "http://server2/data{1...4}"];
let opt = Opt::parse_from(args);
assert_eq!(opt.volumes.len(), 2);
let layout = DisksLayout::from_volumes(&opt.volumes).expect("Failed to parse multiple pools");
assert!(!layout.legacy);
assert_eq!(layout.pools.len(), 2);
// Test case 7: Minimum valid drives for erasure coding (2 drives minimum)
let args = vec!["rustfs", "/data/vol1", "/data/vol2"];
let opt = Opt::parse_from(args);
let layout = DisksLayout::from_volumes(&opt.volumes).expect("Should succeed with 2 drives");
assert_eq!(layout.get_drives_per_set(0), 2);
// Test case 8: Invalid - single drive not enough for erasure coding
let args = vec!["rustfs", "/data/vol1"];
let opt = Opt::parse_from(args);
// Single drive is special case and should succeed for single drive layout
let layout = DisksLayout::from_volumes(&opt.volumes).expect("Single drive should work");
assert!(layout.is_single_drive_layout());
// Test case 9: Command line with both address and volumes
let args = vec![
"rustfs",
"/data/vol{1...8}",
"--address",
":9000",
"--console-address",
":9001",
];
let opt = Opt::parse_from(args);
assert_eq!(opt.volumes.len(), 1);
assert_eq!(opt.address, ":9000");
assert_eq!(opt.console_address, ":9001");
let layout = DisksLayout::from_volumes(&opt.volumes).expect("Failed to parse with address args");
assert!(!layout.is_empty_layout());
assert_eq!(layout.get_drives_per_set(0), 8);
// Test case 10: Multiple ellipses in single argument - nested pattern
let args = vec!["rustfs", "/data{0...3}/vol{0...4}"];
let opt = Opt::parse_from(args);
assert_eq!(opt.volumes.len(), 1);
assert_eq!(opt.volumes[0], "/data{0...3}/vol{0...4}");
let layout = DisksLayout::from_volumes(&opt.volumes).expect("Failed to parse nested ellipses pattern");
assert!(!layout.is_empty_layout());
// 4 data dirs * 5 vols = 20 drives
let total_drives = layout.get_set_count(0) * layout.get_drives_per_set(0);
assert_eq!(total_drives, 20, "Expected 20 drives from /data{{0...3}}/vol{{0...4}}");
// Test case 11: Multiple pools with nested ellipses patterns
let args = vec!["rustfs", "/data{0...3}/vol{0...4}", "/data{4...7}/vol{0...4}"];
let opt = Opt::parse_from(args);
assert_eq!(opt.volumes.len(), 2);
let layout = DisksLayout::from_volumes(&opt.volumes).expect("Failed to parse multiple pools with nested patterns");
assert!(!layout.legacy);
assert_eq!(layout.pools.len(), 2);
// Each pool should have 20 drives (4 * 5)
let pool0_drives = layout.get_set_count(0) * layout.get_drives_per_set(0);
let pool1_drives = layout.get_set_count(1) * layout.get_drives_per_set(1);
assert_eq!(pool0_drives, 20, "Pool 0 should have 20 drives");
assert_eq!(pool1_drives, 20, "Pool 1 should have 20 drives");
// Test case 11: Complex distributed pattern with multiple ellipses
let args = vec!["rustfs", "http://server{1...2}.local/disk{1...8}"];
let opt = Opt::parse_from(args);
let layout = DisksLayout::from_volumes(&opt.volumes).expect("Failed to parse distributed nested pattern");
assert!(!layout.is_empty_layout());
// 2 servers * 8 disks = 16 drives
let total_drives = layout.get_set_count(0) * layout.get_drives_per_set(0);
assert_eq!(total_drives, 16, "Expected 16 drives from server{{1...2}}/disk{{1...8}}");
// Test case 12: Zero-padded patterns
let args = vec!["rustfs", "/data/vol{01...16}"];
let opt = Opt::parse_from(args);
let layout = DisksLayout::from_volumes(&opt.volumes).expect("Failed to parse zero-padded pattern");
assert!(!layout.is_empty_layout());
assert_eq!(layout.get_drives_per_set(0), 16);
}
/// Test environment variable parsing for volumes.
/// Uses #[serial] to avoid concurrent env var modifications.
#[test]
#[serial]
#[allow(unsafe_code)]
fn test_rustfs_volumes_env_variable() {
// Test case 1: Single volume via environment variable
with_env_var("RUSTFS_VOLUMES", "/data/vol1", || {
let args = vec!["rustfs"];
let opt = Opt::parse_from(args);
assert_eq!(opt.volumes.len(), 1);
assert_eq!(opt.volumes[0], "/data/vol1");
let layout = DisksLayout::from_volumes(&opt.volumes).expect("Failed to parse single volume from env");
assert!(layout.is_single_drive_layout());
});
// Test case 2: Multiple volumes via environment variable (space-separated)
with_env_var("RUSTFS_VOLUMES", "/data/vol1 /data/vol2 /data/vol3 /data/vol4", || {
let args = vec!["rustfs"];
let opt = Opt::parse_from(args);
assert_eq!(opt.volumes.len(), 4);
assert_eq!(opt.volumes[0], "/data/vol1");
assert_eq!(opt.volumes[1], "/data/vol2");
assert_eq!(opt.volumes[2], "/data/vol3");
assert_eq!(opt.volumes[3], "/data/vol4");
verify_layout(&opt.volumes, |layout| {
assert!(!layout.is_single_drive_layout());
assert_eq!(layout.get_drives_per_set(0), 4);
});
});
// Test case 3: Ellipses pattern via environment variable
with_env_var("RUSTFS_VOLUMES", "/data/vol{1...4}", || {
let args = vec!["rustfs"];
let opt = Opt::parse_from(args);
assert_eq!(opt.volumes.len(), 1);
assert_eq!(opt.volumes[0], "/data/vol{1...4}");
verify_layout(&opt.volumes, |layout| {
assert_eq!(layout.get_drives_per_set(0), 4);
});
});
// Test case 4: Larger range with ellipses
with_env_var("RUSTFS_VOLUMES", "/data/vol{1...16}", || {
let args = vec!["rustfs"];
let opt = Opt::parse_from(args);
verify_layout(&opt.volumes, |layout| {
assert_eq!(layout.get_drives_per_set(0), 16);
});
});
// Test case 5: Distributed setup pattern
with_env_var("RUSTFS_VOLUMES", "http://server{1...4}/data/vol{1...4}", || {
let args = vec!["rustfs"];
let opt = Opt::parse_from(args);
verify_layout(&opt.volumes, |layout| {
assert_eq!(layout.get_drives_per_set(0), 16);
});
});
// Test case 6: Multiple pools via environment variable (space-separated)
with_env_var("RUSTFS_VOLUMES", "http://server1/data{1...4} http://server2/data{1...4}", || {
let args = vec!["rustfs"];
let opt = Opt::parse_from(args);
assert_eq!(opt.volumes.len(), 2);
verify_layout(&opt.volumes, |layout| {
assert!(!layout.legacy);
assert_eq!(layout.pools.len(), 2);
});
});
// Test case 7: Nested ellipses pattern
with_env_var("RUSTFS_VOLUMES", "/data{0...3}/vol{0...4}", || {
let args = vec!["rustfs"];
let opt = Opt::parse_from(args);
assert_eq!(opt.volumes.len(), 1);
assert_eq!(opt.volumes[0], "/data{0...3}/vol{0...4}");
verify_layout(&opt.volumes, |layout| {
let total_drives = layout.get_set_count(0) * layout.get_drives_per_set(0);
assert_eq!(total_drives, 20, "Expected 20 drives from /data{{0...3}}/vol{{0...4}}");
});
});
// Test case 8: Multiple pools with nested ellipses
with_env_var("RUSTFS_VOLUMES", "/data{0...3}/vol{0...4} /data{4...7}/vol{0...4}", || {
let args = vec!["rustfs"];
let opt = Opt::parse_from(args);
assert_eq!(opt.volumes.len(), 2);
verify_layout(&opt.volumes, |layout| {
assert_eq!(layout.pools.len(), 2);
let pool0_drives = layout.get_set_count(0) * layout.get_drives_per_set(0);
let pool1_drives = layout.get_set_count(1) * layout.get_drives_per_set(1);
assert_eq!(pool0_drives, 20, "Pool 0 should have 20 drives");
assert_eq!(pool1_drives, 20, "Pool 1 should have 20 drives");
});
});
// Test case 9: Complex distributed pattern with multiple ellipses
with_env_var("RUSTFS_VOLUMES", "http://server{1...2}.local/disk{1...8}", || {
let args = vec!["rustfs"];
let opt = Opt::parse_from(args);
verify_layout(&opt.volumes, |layout| {
let total_drives = layout.get_set_count(0) * layout.get_drives_per_set(0);
assert_eq!(total_drives, 16, "Expected 16 drives from server{{1...2}}/disk{{1...8}}");
});
});
// Test case 10: Zero-padded patterns
with_env_var("RUSTFS_VOLUMES", "/data/vol{01...16}", || {
let args = vec!["rustfs"];
let opt = Opt::parse_from(args);
verify_layout(&opt.volumes, |layout| {
assert_eq!(layout.get_drives_per_set(0), 16);
});
});
// Test case 11: Environment variable with additional CLI options
with_env_var("RUSTFS_VOLUMES", "/data/vol{1...8}", || {
let args = vec!["rustfs", "--address", ":9000", "--console-address", ":9001"];
let opt = Opt::parse_from(args);
assert_eq!(opt.volumes.len(), 1);
assert_eq!(opt.address, ":9000");
assert_eq!(opt.console_address, ":9001");
verify_layout(&opt.volumes, |layout| {
assert_eq!(layout.get_drives_per_set(0), 8);
});
});
// Test case 12: Command line argument overrides environment variable
with_env_var("RUSTFS_VOLUMES", "/data/vol1", || {
let args = vec!["rustfs", "/override/vol1"];
let opt = Opt::parse_from(args);
assert_eq!(opt.volumes.len(), 1);
// CLI argument should override environment variable
assert_eq!(opt.volumes[0], "/override/vol1");
});
}
/// Test boundary cases for path parsing.
/// NOTE: Current implementation uses space as delimiter,
/// which means paths with spaces are NOT supported.
#[test]
#[serial]
#[allow(unsafe_code)]
fn test_volumes_boundary_cases() {
// Test case 1: Paths with spaces are not properly supported (known limitation)
// This test documents the current behavior - space-separated paths will be split
with_env_var("RUSTFS_VOLUMES", "/data/my disk/vol1", || {
let args = vec!["rustfs"];
let opt = Opt::try_parse_from(args).expect("Failed to parse with spaces in path");
// Current behavior: space causes split into 2 volumes
assert_eq!(opt.volumes.len(), 2, "Paths with spaces are split (known limitation)");
assert_eq!(opt.volumes[0], "/data/my");
assert_eq!(opt.volumes[1], "disk/vol1");
});
// Test case 2: Empty environment variable causes parsing failure
// because volumes is required and NonEmptyStringValueParser filters empty strings
with_env_var("RUSTFS_VOLUMES", "", || {
let args = vec!["rustfs"];
let result = Opt::try_parse_from(args);
// Should fail because no volumes provided (empty string filtered out)
assert!(result.is_err(), "Empty RUSTFS_VOLUMES should fail parsing (required field)");
});
// Test case 2b: Multiple consecutive spaces create empty strings during splitting
// This causes parsing to fail because volumes is required and empty strings are invalid
with_env_var("RUSTFS_VOLUMES", "/data/vol1 /data/vol2", || {
let args = vec!["rustfs"];
let result = Opt::try_parse_from(args);
// Should fail because double space creates an empty element
assert!(result.is_err(), "Multiple consecutive spaces should cause parsing failure");
});
// Test case 3: Very long path with ellipses (stress test)
// Note: Large drive counts may be automatically split into multiple sets
let long_path = format!("/very/long/path/structure/with/many/directories/vol{{1...{}}}", 100);
with_env_var("RUSTFS_VOLUMES", &long_path, || {
let args = vec!["rustfs"];
let opt = Opt::try_parse_from(args).expect("Failed to parse with long ellipses path");
verify_layout(&opt.volumes, |layout| {
// Total drives should be 100, but may be distributed across sets
let total_drives = layout.get_set_count(0) * layout.get_drives_per_set(0);
assert_eq!(total_drives, 100, "Total drives should be 100");
});
});
}
/// Test error handling for invalid ellipses patterns.
#[test]
fn test_invalid_ellipses_patterns() {
// Test case 1: Invalid ellipses format (letters instead of numbers)
let args = vec!["rustfs", "/data/vol{a...z}"];
let opt = Opt::parse_from(args);
let result = DisksLayout::from_volumes(&opt.volumes);
assert!(result.is_err(), "Invalid ellipses pattern with letters should fail");
// Test case 2: Reversed range (larger to smaller)
let args = vec!["rustfs", "/data/vol{10...1}"];
let opt = Opt::parse_from(args);
let result = DisksLayout::from_volumes(&opt.volumes);
// Depending on implementation, this may succeed with 0 drives or fail
// Document actual behavior
if let Ok(layout) = result {
assert!(
layout.is_empty_layout() || layout.get_drives_per_set(0) == 0,
"Reversed range should result in empty layout"
);
}
}
#[test]
fn test_server_domains_parsing() {
// Test case 1: server domains without ports
let args = vec![
"rustfs",
"/data/vol1",
"--server-domains",
"example.com,api.example.com,cdn.example.com",
];
let opt = Opt::parse_from(args);
assert_eq!(opt.server_domains.len(), 3);
assert_eq!(opt.server_domains[0], "example.com");
assert_eq!(opt.server_domains[1], "api.example.com");
assert_eq!(opt.server_domains[2], "cdn.example.com");
// Test case 2: server domains with ports
let args = vec![
"rustfs",
"/data/vol1",
"--server-domains",
"example.com:9000,api.example.com:8080,cdn.example.com:443",
];
let opt = Opt::parse_from(args);
assert_eq!(opt.server_domains.len(), 3);
assert_eq!(opt.server_domains[0], "example.com:9000");
assert_eq!(opt.server_domains[1], "api.example.com:8080");
assert_eq!(opt.server_domains[2], "cdn.example.com:443");
// Test case 3: mixed server domains (with and without ports)
let args = vec![
"rustfs",
"/data/vol1",
"--server-domains",
"example.com,api.example.com:9000,cdn.example.com,storage.example.com:8443",
];
let opt = Opt::parse_from(args);
assert_eq!(opt.server_domains.len(), 4);
assert_eq!(opt.server_domains[0], "example.com");
assert_eq!(opt.server_domains[1], "api.example.com:9000");
assert_eq!(opt.server_domains[2], "cdn.example.com");
assert_eq!(opt.server_domains[3], "storage.example.com:8443");
// Test case 4: single domain with port
let args = vec!["rustfs", "/data/vol1", "--server-domains", "example.com:9000"];
let opt = Opt::parse_from(args);
assert_eq!(opt.server_domains.len(), 1);
assert_eq!(opt.server_domains[0], "example.com:9000");
// Test case 5: localhost with different ports
let args = vec![
"rustfs",
"/data/vol1",
"--server-domains",
"localhost:9000,127.0.0.1:9000,localhost",
];
let opt = Opt::parse_from(args);
assert_eq!(opt.server_domains.len(), 3);
assert_eq!(opt.server_domains[0], "localhost:9000");
assert_eq!(opt.server_domains[1], "127.0.0.1:9000");
assert_eq!(opt.server_domains[2], "localhost");
}
}

View File

@@ -13,6 +13,7 @@
// limitations under the License.
use clap::Parser;
use clap::builder::NonEmptyStringValueParser;
use const_str::concat;
use std::string::ToString;
shadow_rs::shadow!(build);
@@ -50,7 +51,12 @@ const LONG_VERSION: &str = concat!(
#[command(version = SHORT_VERSION, long_version = LONG_VERSION)]
pub struct Opt {
/// DIR points to a directory on a filesystem.
#[arg(required = true, env = "RUSTFS_VOLUMES")]
#[arg(
required = true,
env = "RUSTFS_VOLUMES",
value_delimiter = ' ',
value_parser = NonEmptyStringValueParser::new()
)]
pub volumes: Vec<String>,
/// bind to a specific ADDRESS:PORT, ADDRESS can be an IP or hostname
@@ -58,7 +64,12 @@ pub struct Opt {
pub address: String,
/// Domain name used for virtual-hosted-style requests.
#[arg(long, env = "RUSTFS_SERVER_DOMAINS")]
#[arg(
long,
env = "RUSTFS_SERVER_DOMAINS",
value_delimiter = ',',
value_parser = NonEmptyStringValueParser::new()
)]
pub server_domains: Vec<String>,
/// Access key used for authentication.

View File

@@ -27,7 +27,7 @@ mod version;
// Ensure the correct path for parse_license is imported
use crate::init::{add_bucket_notification_configuration, init_buffer_profile_system, init_kms_system, init_update_check};
use crate::server::{
SHUTDOWN_TIMEOUT, ServiceState, ServiceStateManager, ShutdownSignal, init_event_notifier, shutdown_event_notifier,
SHUTDOWN_TIMEOUT, ServiceState, ServiceStateManager, ShutdownSignal, init_cert, init_event_notifier, shutdown_event_notifier,
start_audit_system, start_http_server, stop_audit_system, wait_for_shutdown,
};
use chrono::Datelike;
@@ -38,19 +38,19 @@ use rustfs_ahm::{
scanner::data_scanner::ScannerConfig, shutdown_ahm_services,
};
use rustfs_common::globals::set_global_addr;
use rustfs_ecstore::bucket::metadata_sys::init_bucket_metadata_sys;
use rustfs_ecstore::bucket::replication::{GLOBAL_REPLICATION_POOL, init_background_replication};
use rustfs_ecstore::config as ecconfig;
use rustfs_ecstore::config::GLOBAL_CONFIG_SYS;
use rustfs_ecstore::store_api::BucketOptions;
use rustfs_ecstore::{
StorageAPI,
bucket::metadata_sys::init_bucket_metadata_sys,
bucket::replication::{GLOBAL_REPLICATION_POOL, init_background_replication},
config as ecconfig,
config::GLOBAL_CONFIG_SYS,
endpoints::EndpointServerPools,
global::{set_global_rustfs_port, shutdown_background_services},
notification_sys::new_global_notification_sys,
set_global_endpoints,
store::ECStore,
store::init_local_disks,
store_api::BucketOptions,
update_erasure_type,
};
use rustfs_iam::init_iam_sys;
@@ -125,6 +125,11 @@ async fn async_main() -> Result<()> {
// Initialize performance profiling if enabled
profiling::init_from_env().await;
// Initialize TLS if a certificate path is provided
if let Some(tls_path) = &opt.tls_path {
init_cert(tls_path).await
}
// Run parameters
match run(opt).await {
Ok(_) => Ok(()),

160
rustfs/src/server/cert.rs Normal file
View File

@@ -0,0 +1,160 @@
// Copyright 2024 RustFS Team
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
use rustfs_common::globals::set_global_root_cert;
use rustfs_config::{RUSTFS_CA_CERT, RUSTFS_PUBLIC_CERT, RUSTFS_TLS_CERT};
use tracing::{debug, info};
/// Initialize TLS certificates for inter-node communication.
/// This function attempts to load certificates from the specified `tls_path`.
/// It looks for `rustfs_cert.pem`, `public.crt`, and `ca.crt` files.
/// Additionally, it tries to load system root certificates from common locations
/// to ensure trust for public CAs when mixing self-signed and public certificates.
/// If any certificates are found, they are set as the global root certificates.
pub(crate) async fn init_cert(tls_path: &str) {
let mut cert_data = Vec::new();
// Try rustfs_cert.pem (custom cert name)
walk_dir(std::path::PathBuf::from(tls_path), RUSTFS_TLS_CERT, &mut cert_data).await;
// Try public.crt (common CA name)
let public_cert_path = std::path::Path::new(tls_path).join(RUSTFS_PUBLIC_CERT);
load_cert_file(public_cert_path.to_str().unwrap_or_default(), &mut cert_data, "CA certificate").await;
// Try ca.crt (common CA name)
let ca_cert_path = std::path::Path::new(tls_path).join(RUSTFS_CA_CERT);
load_cert_file(ca_cert_path.to_str().unwrap_or_default(), &mut cert_data, "CA certificate").await;
let trust_system_ca = rustfs_utils::get_env_bool(rustfs_config::ENV_TRUST_SYSTEM_CA, rustfs_config::DEFAULT_TRUST_SYSTEM_CA);
if !trust_system_ca {
// Attempt to load system root certificates to maintain trust for public CAs
// This is important when mixing self-signed internal certs with public external certs
let system_ca_paths = [
"/etc/ssl/certs/ca-certificates.crt", // Debian/Ubuntu/Alpine
"/etc/pki/tls/certs/ca-bundle.crt", // Fedora/RHEL/CentOS
"/etc/ssl/ca-bundle.pem", // OpenSUSE
"/etc/pki/tls/cacert.pem", // OpenELEC
"/etc/ssl/cert.pem", // macOS/FreeBSD
"/usr/local/etc/openssl/cert.pem", // macOS/Homebrew OpenSSL
"/usr/local/share/certs/ca-root-nss.crt", // FreeBSD
"/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem", // RHEL
"/usr/share/pki/ca-trust-legacy/ca-bundle.legacy.crt", // RHEL legacy
];
let mut system_cert_loaded = false;
for path in system_ca_paths {
if load_cert_file(path, &mut cert_data, "system root certificates").await {
system_cert_loaded = true;
info!("Loaded system root certificates from {}", path);
break; // Stop after finding the first valid bundle
}
}
if !system_cert_loaded {
debug!("Could not find system root certificates in common locations.");
}
} else {
info!("Loading system root certificates disabled via RUSTFS_TRUST_SYSTEM_CA");
}
if !cert_data.is_empty() {
set_global_root_cert(cert_data).await;
info!("Configured custom root certificates for inter-node communication");
}
}
/// Helper function to load a certificate file and append to cert_data.
/// Returns true if the file was successfully loaded.
async fn load_cert_file(path: &str, cert_data: &mut Vec<u8>, desc: &str) -> bool {
if tokio::fs::metadata(path).await.is_ok() {
if let Ok(data) = tokio::fs::read(path).await {
cert_data.extend(data);
cert_data.push(b'\n');
info!("Loaded {} from {}", desc, path);
true
} else {
debug!("Failed to read {} from {}", desc, path);
false
}
} else {
debug!("{} file not found at {}", desc, path);
false
}
}
/// Load the certificate file if its name matches `cert_name`.
/// If it matches, the certificate data is appended to `cert_data`.
///
/// # Parameters
/// - `entry`: The directory entry to check.
/// - `cert_name`: The name of the certificate file to match.
/// - `cert_data`: A mutable vector to append loaded certificate data.
async fn load_if_matches(entry: &tokio::fs::DirEntry, cert_name: &str, cert_data: &mut Vec<u8>) {
let fname = entry.file_name().to_string_lossy().to_string();
if fname == cert_name {
let p = entry.path();
load_cert_file(&p.to_string_lossy(), cert_data, "certificate").await;
}
}
/// Search the directory at `path` and one level of subdirectories to find and load
/// certificates matching `cert_name`. Loaded certificate data is appended to
/// `cert_data`.
/// # Parameters
/// - `path`: The starting directory path to search for certificates.
/// - `cert_name`: The name of the certificate file to look for.
/// - `cert_data`: A mutable vector to append loaded certificate data.
async fn walk_dir(path: std::path::PathBuf, cert_name: &str, cert_data: &mut Vec<u8>) {
if let Ok(mut rd) = tokio::fs::read_dir(&path).await {
while let Ok(Some(entry)) = rd.next_entry().await {
if let Ok(ft) = entry.file_type().await {
if ft.is_file() {
load_if_matches(&entry, cert_name, cert_data).await;
} else if ft.is_dir() {
// Only check direct subdirectories, no deeper recursion
if let Ok(mut sub_rd) = tokio::fs::read_dir(&entry.path()).await {
while let Ok(Some(sub_entry)) = sub_rd.next_entry().await {
if let Ok(sub_ft) = sub_entry.file_type().await {
if sub_ft.is_file() {
load_if_matches(&sub_entry, cert_name, cert_data).await;
}
// Ignore subdirectories and symlinks in subdirs to limit to one level
}
}
}
} else if ft.is_symlink() {
// Follow symlink and treat target as file or directory, but limit to one level
if let Ok(meta) = tokio::fs::metadata(&entry.path()).await {
if meta.is_file() {
load_if_matches(&entry, cert_name, cert_data).await;
} else if meta.is_dir() {
// Treat as directory but only check its direct contents
if let Ok(mut sub_rd) = tokio::fs::read_dir(&entry.path()).await {
while let Ok(Some(sub_entry)) = sub_rd.next_entry().await {
if let Ok(sub_ft) = sub_entry.file_type().await {
if sub_ft.is_file() {
load_if_matches(&sub_entry, cert_name, cert_data).await;
}
// Ignore deeper levels
}
}
}
}
}
}
}
}
} else {
debug!("Certificate directory not found: {}", path.display());
}
}

View File

@@ -13,6 +13,7 @@
// limitations under the License.
mod audit;
mod cert;
mod compress;
mod event;
mod http;
@@ -22,6 +23,7 @@ mod runtime;
mod service_state;
pub(crate) use audit::{start_audit_system, stop_audit_system};
pub(crate) use cert::init_cert;
pub(crate) use event::{init_event_notifier, shutdown_event_notifier};
pub(crate) use http::start_http_server;
pub(crate) use runtime::get_tokio_runtime_builder;

View File

@@ -183,6 +183,9 @@ export RUSTFS_ENABLE_PROFILING=false
# Heal configuration queue size
export RUSTFS_HEAL_QUEUE_SIZE=10000
# rustfs trust system CA certificates
export RUSTFS_TRUST_SYSTEM_CA=true
if [ -n "$1" ]; then
export RUSTFS_VOLUMES="$1"
fi