From 00f3275603819e80a61d851634281e29ab4e812f Mon Sep 17 00:00:00 2001 From: weisd Date: Wed, 7 Jan 2026 13:42:03 +0800 Subject: [PATCH 01/17] rm online check (#1416) --- crates/ecstore/src/set_disk.rs | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/crates/ecstore/src/set_disk.rs b/crates/ecstore/src/set_disk.rs index ccf2bcd9..92089d02 100644 --- a/crates/ecstore/src/set_disk.rs +++ b/crates/ecstore/src/set_disk.rs @@ -1485,9 +1485,7 @@ impl SetDisks { let object = object.clone(); let version_id = version_id.clone(); tokio::spawn(async move { - if let Some(disk) = disk - && disk.is_online().await - { + if let Some(disk) = disk { disk.read_version(&org_bucket, &bucket, &object, &version_id, &opts).await } else { Err(DiskError::DiskNotFound) From a95e549430851f3c34ee4cc8dd7be60aa200e8c4 Mon Sep 17 00:00:00 2001 From: houseme Date: Wed, 7 Jan 2026 18:05:52 +0800 Subject: [PATCH 02/17] Fix/fix improve for audit (#1418) --- Cargo.lock | 12 +++--- Cargo.toml | 8 ++-- Dockerfile | 4 +- Dockerfile.source | 2 +- crates/audit/src/registry.rs | 77 ++++++++++++++++++++++++++++++++++++ crates/audit/src/system.rs | 43 ++++++++++---------- crates/targets/src/arn.rs | 9 +---- rustfs/src/server/http.rs | 19 +++------ rustfs/src/storage/helper.rs | 8 ++++ 9 files changed, 127 insertions(+), 55 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 875747ac..5185f080 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8503,9 +8503,9 @@ dependencies = [ [[package]] name = "rustls" -version = "0.23.35" +version = "0.23.36" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "533f54bc6a7d4f647e46ad909549eda97bf5afc1585190ef692b4286b198bd8f" +checksum = "c665f33d38cea657d9614f766881e4d510e0eda4239891eea56b4cadcf01801b" dependencies = [ "aws-lc-rs", "log", @@ -8864,9 +8864,9 @@ dependencies = [ [[package]] name = "serde_json" -version = "1.0.148" +version = "1.0.149" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3084b546a1dd6289475996f182a22aba973866ea8e8b02c51d9f46b1336a22da" +checksum = "83fc039473c5595ace860d8c4fafa220ff474b3fc6bfdb4293327f1a37e94d86" dependencies = [ "itoa", "memchr", @@ -10466,9 +10466,9 @@ checksum = "8ecb6da28b8a351d773b68d5825ac39017e680750f980f3a1a85cd8dd28a47c1" [[package]] name = "url" -version = "2.5.7" +version = "2.5.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "08bc136a29a3d1758e07a9cca267be308aeebf5cfd5a10f3f67ab2097683ef5b" +checksum = "ff67a8a4397373c3ef660812acab3268222035010ab8680ec4215f38ba3d0eed" dependencies = [ "form_urlencoded", "idna", diff --git a/Cargo.toml b/Cargo.toml index 083860d2..b34b260c 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -50,7 +50,7 @@ resolver = "2" edition = "2024" license = "Apache-2.0" repository = "https://github.com/rustfs/rustfs" -rust-version = "1.88" +rust-version = "1.90" version = "0.0.5" homepage = "https://rustfs.com" description = "RustFS is a high-performance distributed object storage software built using Rust, one of the most popular languages worldwide. " @@ -136,7 +136,7 @@ rmcp = { version = "0.12.0" } rmp = { version = "0.8.15" } rmp-serde = { version = "1.3.1" } serde = { version = "1.0.228", features = ["derive"] } -serde_json = { version = "1.0.148", features = ["raw_value"] } +serde_json = { version = "1.0.149", features = ["raw_value"] } serde_urlencoded = "0.7.1" schemars = "1.2.0" @@ -150,7 +150,7 @@ hmac = { version = "0.13.0-rc.3" } jsonwebtoken = { version = "10.2.0", features = ["rust_crypto"] } pbkdf2 = "0.13.0-rc.5" rsa = { version = "0.10.0-rc.11" } -rustls = { version = "0.23.35" } +rustls = { version = "0.23.36", default-features = false, features = ["aws-lc-rs", "logging", "tls12", "prefer-post-quantum", "std"] } rustls-pemfile = "2.2.0" rustls-pki-types = "1.13.2" sha1 = "0.11.0-rc.3" @@ -245,7 +245,7 @@ tracing-error = "0.2.1" tracing-opentelemetry = "0.32.0" tracing-subscriber = { version = "0.3.22", features = ["env-filter", "time"] } transform-stream = "0.3.1" -url = "2.5.7" +url = "2.5.8" urlencoding = "2.1.3" uuid = { version = "1.19.0", features = ["v4", "fast-rng", "macro-diagnostics"] } vaultrs = { version = "0.7.4" } diff --git a/Dockerfile b/Dockerfile index 1f303ae6..8f22a10c 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.22 AS build +FROM alpine:3.23 AS build ARG TARGETARCH ARG RELEASE=latest @@ -40,7 +40,7 @@ RUN set -eux; \ rm -rf rustfs.zip /build/.tmp || true -FROM alpine:3.22 +FROM alpine:3.23 ARG RELEASE=latest ARG BUILD_DATE diff --git a/Dockerfile.source b/Dockerfile.source index 6cea2782..280d606d 100644 --- a/Dockerfile.source +++ b/Dockerfile.source @@ -16,7 +16,7 @@ ARG BUILDPLATFORM # ----------------------------- # Build stage # ----------------------------- -FROM rust:1.88-bookworm AS builder +FROM rust:1.91-trixie AS builder # Re-declare args after FROM ARG TARGETPLATFORM diff --git a/crates/audit/src/registry.rs b/crates/audit/src/registry.rs index 76edcabf..e3620b66 100644 --- a/crates/audit/src/registry.rs +++ b/crates/audit/src/registry.rs @@ -21,6 +21,7 @@ use futures::stream::FuturesUnordered; use hashbrown::{HashMap, HashSet}; use rustfs_config::{DEFAULT_DELIMITER, ENABLE_KEY, ENV_PREFIX, EnableState, audit::AUDIT_ROUTE_PREFIX}; use rustfs_ecstore::config::{Config, KVS}; +use rustfs_targets::arn::TargetID; use rustfs_targets::{Target, TargetError, target::ChannelTargetType}; use std::str::FromStr; use std::sync::Arc; @@ -392,4 +393,80 @@ impl AuditRegistry { Ok(()) } + + /// Creates a unique key for a target based on its type and ID + /// + /// # Arguments + /// * `target_type` - The type of the target (e.g., "webhook", "mqtt"). + /// * `target_id` - The identifier for the target instance. + /// + /// # Returns + /// * `String` - The unique key for the target. + pub fn create_key(&self, target_type: &str, target_id: &str) -> String { + let key = TargetID::new(target_id.to_string(), target_type.to_string()); + info!(target_type = %target_type, "Create key for {}", key); + key.to_string() + } + + /// Enables a target (placeholder, assumes target exists) + /// + /// # Arguments + /// * `target_type` - The type of the target (e.g., "webhook", "mqtt"). + /// * `target_id` - The identifier for the target instance. + /// + /// # Returns + /// * `AuditResult<()>` - Result indicating success or failure. + pub fn enable_target(&self, target_type: &str, target_id: &str) -> AuditResult<()> { + let key = self.create_key(target_type, target_id); + if self.get_target(&key).is_some() { + info!("Target {}-{} enabled", target_type, target_id); + Ok(()) + } else { + Err(AuditError::Configuration( + format!("Target not found: {}-{}", target_type, target_id), + None, + )) + } + } + + /// Disables a target (placeholder, assumes target exists) + /// + /// # Arguments + /// * `target_type` - The type of the target (e.g., "webhook", "mqtt"). + /// * `target_id` - The identifier for the target instance. + /// + /// # Returns + /// * `AuditResult<()>` - Result indicating success or failure. + pub fn disable_target(&self, target_type: &str, target_id: &str) -> AuditResult<()> { + let key = self.create_key(target_type, target_id); + if self.get_target(&key).is_some() { + info!("Target {}-{} disabled", target_type, target_id); + Ok(()) + } else { + Err(AuditError::Configuration( + format!("Target not found: {}-{}", target_type, target_id), + None, + )) + } + } + + /// Upserts a target into the registry + /// + /// # Arguments + /// * `target_type` - The type of the target (e.g., "webhook", "mqtt"). + /// * `target_id` - The identifier for the target instance. + /// * `target` - The target instance to be upserted. + /// + /// # Returns + /// * `AuditResult<()>` - Result indicating success or failure. + pub fn upsert_target( + &mut self, + target_type: &str, + target_id: &str, + target: Box + Send + Sync>, + ) -> AuditResult<()> { + let key = self.create_key(target_type, target_id); + self.targets.insert(key, target); + Ok(()) + } } diff --git a/crates/audit/src/system.rs b/crates/audit/src/system.rs index 0441f280..d9116b65 100644 --- a/crates/audit/src/system.rs +++ b/crates/audit/src/system.rs @@ -274,9 +274,9 @@ impl AuditSystem { drop(state); let registry = self.registry.lock().await; - let target_ids = registry.list_targets(); + let target_keys = registry.list_targets(); - if target_ids.is_empty() { + if target_keys.is_empty() { warn!("No audit targets configured for dispatch"); return Ok(()); } @@ -284,22 +284,22 @@ impl AuditSystem { // Dispatch to all targets concurrently let mut tasks = Vec::new(); - for target_id in target_ids { - if let Some(target) = registry.get_target(&target_id) { + for target_key in target_keys { + if let Some(target) = registry.get_target(&target_key) { let entry_clone = Arc::clone(&entry); - let target_id_clone = target_id.clone(); + let target_key_clone = target_key.clone(); // Create EntityTarget for the audit log entry let entity_target = EntityTarget { object_name: entry.api.name.clone().unwrap_or_default(), bucket_name: entry.api.bucket.clone().unwrap_or_default(), - event_name: rustfs_targets::EventName::ObjectCreatedPut, // Default, should be derived from entry + event_name: entry.event, // Default, should be derived from entry data: (*entry_clone).clone(), }; let task = async move { let result = target.save(Arc::new(entity_target)).await; - (target_id_clone, result) + (target_key_clone, result) }; tasks.push(task); @@ -312,14 +312,14 @@ impl AuditSystem { let mut errors = Vec::new(); let mut success_count = 0; - for (target_id, result) in results { + for (target_key, result) in results { match result { Ok(_) => { success_count += 1; observability::record_target_success(); } Err(e) => { - error!(target_id = %target_id, error = %e, "Failed to dispatch audit log to target"); + error!(target_id = %target_key, error = %e, "Failed to dispatch audit log to target"); errors.push(e); observability::record_target_failure(); } @@ -360,18 +360,18 @@ impl AuditSystem { drop(state); let registry = self.registry.lock().await; - let target_ids = registry.list_targets(); + let target_keys = registry.list_targets(); - if target_ids.is_empty() { + if target_keys.is_empty() { warn!("No audit targets configured for batch dispatch"); return Ok(()); } let mut tasks = Vec::new(); - for target_id in target_ids { - if let Some(target) = registry.get_target(&target_id) { + for target_key in target_keys { + if let Some(target) = registry.get_target(&target_key) { let entries_clone: Vec<_> = entries.iter().map(Arc::clone).collect(); - let target_id_clone = target_id.clone(); + let target_key_clone = target_key.clone(); let task = async move { let mut success_count = 0; @@ -380,7 +380,7 @@ impl AuditSystem { let entity_target = EntityTarget { object_name: entry.api.name.clone().unwrap_or_default(), bucket_name: entry.api.bucket.clone().unwrap_or_default(), - event_name: rustfs_targets::EventName::ObjectCreatedPut, + event_name: entry.event, data: (*entry).clone(), }; match target.save(Arc::new(entity_target)).await { @@ -388,7 +388,7 @@ impl AuditSystem { Err(e) => errors.push(e), } } - (target_id_clone, success_count, errors) + (target_key_clone, success_count, errors) }; tasks.push(task); } @@ -418,6 +418,7 @@ impl AuditSystem { } /// Starts the audit stream processing for a target with batching and retry logic + /// /// # Arguments /// * `store` - The store from which to read audit entries /// * `target` - The target to which audit entries will be sent @@ -501,7 +502,7 @@ impl AuditSystem { /// Enables a specific target /// /// # Arguments - /// * `target_id` - The ID of the target to enable + /// * `target_id` - The ID of the target to enable, TargetID to string /// /// # Returns /// * `AuditResult<()>` - Result indicating success or failure @@ -520,7 +521,7 @@ impl AuditSystem { /// Disables a specific target /// /// # Arguments - /// * `target_id` - The ID of the target to disable + /// * `target_id` - The ID of the target to disable, TargetID to string /// /// # Returns /// * `AuditResult<()>` - Result indicating success or failure @@ -539,7 +540,7 @@ impl AuditSystem { /// Removes a target from the system /// /// # Arguments - /// * `target_id` - The ID of the target to remove + /// * `target_id` - The ID of the target to remove, TargetID to string /// /// # Returns /// * `AuditResult<()>` - Result indicating success or failure @@ -559,7 +560,7 @@ impl AuditSystem { /// Updates or inserts a target /// /// # Arguments - /// * `target_id` - The ID of the target to upsert + /// * `target_id` - The ID of the target to upsert, TargetID to string /// * `target` - The target instance to insert or update /// /// # Returns @@ -596,7 +597,7 @@ impl AuditSystem { /// Gets information about a specific target /// /// # Arguments - /// * `target_id` - The ID of the target to retrieve + /// * `target_id` - The ID of the target to retrieve, TargetID to string /// /// # Returns /// * `Option` - Target ID if found diff --git a/crates/targets/src/arn.rs b/crates/targets/src/arn.rs index a2ef3528..1853ad9b 100644 --- a/crates/targets/src/arn.rs +++ b/crates/targets/src/arn.rs @@ -37,11 +37,6 @@ impl TargetID { Self { id, name } } - /// Convert to string representation - pub fn to_id_string(&self) -> String { - format!("{}:{}", self.id, self.name) - } - /// Create an ARN pub fn to_arn(&self, region: &str) -> ARN { ARN { @@ -80,7 +75,7 @@ impl Serialize for TargetID { where S: Serializer, { - serializer.serialize_str(&self.to_id_string()) + serializer.serialize_str(&self.to_string()) } } @@ -130,7 +125,7 @@ impl ARN { if self.target_id.id.is_empty() && self.target_id.name.is_empty() && self.region.is_empty() { return String::new(); } - format!("{}:{}:{}", ARN_PREFIX, self.region, self.target_id.to_id_string()) + format!("{}:{}:{}", ARN_PREFIX, self.region, self.target_id) } /// Parsing ARN from string diff --git a/rustfs/src/server/http.rs b/rustfs/src/server/http.rs index 33c0bf40..95debdcc 100644 --- a/rustfs/src/server/http.rs +++ b/rustfs/src/server/http.rs @@ -378,20 +378,11 @@ pub async fn start_http_server( // Enable TCP Keepalive to detect dead clients (e.g. power loss) // Idle: 10s, Interval: 5s, Retries: 3 - let ka = { - #[cfg(not(target_os = "openbsd"))] - let ka = TcpKeepalive::new() - .with_time(Duration::from_secs(10)) - .with_interval(Duration::from_secs(5)) - .with_retries(3); - - // On OpenBSD socket2 only supports configuring the initial - // TCP keepalive timeout; intervals and retries cannot be set. - #[cfg(target_os = "openbsd")] - let ka = TcpKeepalive::new().with_time(Duration::from_secs(10)); - - ka - }; + let mut ka = TcpKeepalive::new().with_time(Duration::from_secs(10)); + #[cfg(not(target_os = "openbsd"))] + { + ka = ka.with_interval(Duration::from_secs(5)).with_retries(3); + } if let Err(err) = socket_ref.set_tcp_keepalive(&ka) { warn!(?err, "Failed to set TCP_KEEPALIVE"); diff --git a/rustfs/src/storage/helper.rs b/rustfs/src/storage/helper.rs index cf732fd1..aa4f0094 100644 --- a/rustfs/src/storage/helper.rs +++ b/rustfs/src/storage/helper.rs @@ -162,9 +162,17 @@ impl OperationHelper { .build(); let mut final_builder = builder.api(api_details.clone()); + if let Ok(res) = result { + final_builder = final_builder.resp_header(extract_resp_elements(res)); + } if let Some(err) = error_msg { final_builder = final_builder.error(err); } + + if let Some(sk) = rustfs_credentials::get_global_access_key_opt() { + final_builder = final_builder.access_key(&sk); + } + self.audit_builder = Some(final_builder); self.api_builder = ApiDetailsBuilder(api_details); // Store final details for Drop use } From 4f5653e6561126f10364f67315677b9d7b1bfbe0 Mon Sep 17 00:00:00 2001 From: majinghe <42570491+majinghe@users.noreply.github.com> Date: Thu, 8 Jan 2026 20:44:16 +0800 Subject: [PATCH 03/17] add upgrade strategy for standalone mode (#1431) --- helm/rustfs/templates/deployment.yaml | 4 ++++ helm/rustfs/values.yaml | 5 +++++ 2 files changed, 9 insertions(+) diff --git a/helm/rustfs/templates/deployment.yaml b/helm/rustfs/templates/deployment.yaml index 2ef42be9..d0d2a10a 100644 --- a/helm/rustfs/templates/deployment.yaml +++ b/helm/rustfs/templates/deployment.yaml @@ -10,6 +10,10 @@ metadata: {{- end }} spec: replicas: 1 + {{- with .Values.mode.standalone.strategy }} + strategy: + {{- toYaml . | nindent 4 }} + {{- end }} selector: matchLabels: {{- include "rustfs.selectorLabels" . | nindent 6 }} diff --git a/helm/rustfs/values.yaml b/helm/rustfs/values.yaml index c8983679..f59260b1 100644 --- a/helm/rustfs/values.yaml +++ b/helm/rustfs/values.yaml @@ -30,6 +30,11 @@ fullnameOverride: "" mode: standalone: enabled: false + strategy: + type: RollingUpdate + rollingUpdate: + maxSurge: 0 + maxUnavailable: 1 distributed: enabled: true From 900f7724b84fd2c10bdf22dffb92a07b45f55ab7 Mon Sep 17 00:00:00 2001 From: majinghe <42570491+majinghe@users.noreply.github.com> Date: Thu, 8 Jan 2026 20:57:55 +0800 Subject: [PATCH 04/17] add gateway api support due to ingress nginx retirement (#1432) Co-authored-by: houseme --- helm/README.md | 20 ++++++++++++++++ helm/rustfs/templates/gateway-api/gateway.yml | 23 +++++++++++++++++++ .../templates/gateway-api/httproute.yml | 19 +++++++++++++++ helm/rustfs/templates/secret-tls.yaml | 2 +- helm/rustfs/values.yaml | 6 +++++ 5 files changed, 69 insertions(+), 1 deletion(-) create mode 100644 helm/rustfs/templates/gateway-api/gateway.yml create mode 100644 helm/rustfs/templates/gateway-api/httproute.yml diff --git a/helm/README.md b/helm/README.md index 3a1ae7b0..2d635767 100644 --- a/helm/README.md +++ b/helm/README.md @@ -110,6 +110,10 @@ RustFS helm chart supports **standalone and distributed mode**. For standalone m | storageclass.logStorageSize | string | `"256Mi"` | The storage size for logs PVC. | | storageclass.name | string | `"local-path"` | The name for StorageClass. | | tolerations | list | `[]` | | +| gatewayApi.enabled | bool | `false` | To enable/disable gateway api support. | +| gatewayApi.gatewayClass | string | `traefik` | Gateway class implementation. | +| gatewayApi.hostname | string | Hostname to access RustFS via gateway api. | +| gatewayApi.secretName | string | Secret tls to via RustFS using HTTPS. | --- @@ -207,6 +211,22 @@ You should use `--set-file` parameter when running `helm install` command, for e helm install rustfs rustfs/rustfs -n rustfs --set tls.enabled=true,--set-file tls.crt=./tls.crt,--set-file tls.key=./tls.key ``` +# Gateway API support (alpha) + +Due to [ingress nginx retirement](https://kubernetes.io/blog/2025/11/11/ingress-nginx-retirement/) in March 2026, so RustFS adds support for [gateway api](https://gateway-api.sigs.k8s.io/). Currently, RustFS only supports traefik as gateway class, more and more gateway class support will be added in the future after those classes are tested. If you want to enable gateway api, specify `gatewayApi.enabled` to `true` while specify `ingress.enabled` to `false`. After installation, you can find the `Gateway` and `HttpRoute` resources, + +``` +$ kubectl -n rustfs get gateway +NAME CLASS ADDRESS PROGRAMMED AGE +rustfs-gateway traefik True 169m + +$ kubectl -n rustfs get httproute +NAME HOSTNAMES AGE +rustfs-route ["example.rustfs.com"] 172m +``` + +Then, via RustFS instance via `https://example.rustfs.com` or `http://example.rustfs.com`. + # Uninstall Uninstalling the rustfs installation with command, diff --git a/helm/rustfs/templates/gateway-api/gateway.yml b/helm/rustfs/templates/gateway-api/gateway.yml new file mode 100644 index 00000000..8b50f5ae --- /dev/null +++ b/helm/rustfs/templates/gateway-api/gateway.yml @@ -0,0 +1,23 @@ +{{- if .Values.gatewayApi.enabled }} +apiVersion: gateway.networking.k8s.io/v1 +kind: Gateway +metadata: + name: {{ include "rustfs.fullname" . }}-gateway +spec: + gatewayClassName: {{ .Values.gatewayApi.gatewayClass }} + listeners: + - name: http + port: 80 + protocol: HTTP + allowedRoutes: + namespaces: + from: Same + - name: https + port: 443 + protocol: HTTPS + tls: + mode: Terminate + certificateRefs: + - name: {{ .Values.gatewayApi.secretName }} + kind: Secret +{{- end }} diff --git a/helm/rustfs/templates/gateway-api/httproute.yml b/helm/rustfs/templates/gateway-api/httproute.yml new file mode 100644 index 00000000..9ac5d968 --- /dev/null +++ b/helm/rustfs/templates/gateway-api/httproute.yml @@ -0,0 +1,19 @@ +{{- if .Values.gatewayApi.enabled -}} +apiVersion: gateway.networking.k8s.io/v1 +kind: HTTPRoute +metadata: + name: {{ include "rustfs.fullname" . }}-route +spec: + parentRefs: + - name: {{ include "rustfs.fullname" . }}-gateway + hostnames: + - {{ .Values.gatewayApi.hostname }} + rules: + - matches: + - path: + type: PathPrefix + value: / + backendRefs: + - name: rustfs-svc + port: 9001 +{{- end }} diff --git a/helm/rustfs/templates/secret-tls.yaml b/helm/rustfs/templates/secret-tls.yaml index 28b50600..fea2cf58 100644 --- a/helm/rustfs/templates/secret-tls.yaml +++ b/helm/rustfs/templates/secret-tls.yaml @@ -1,4 +1,4 @@ -{{- if and .Values.ingress.tls.enabled (not .Values.ingress.tls.certManager.enabled) }} +{{- if and (or .Values.gatewayApi.enabled .Values.ingress.tls.enabled) (not .Values.ingress.tls.certManager.enabled) }} apiVersion: v1 kind: Secret metadata: diff --git a/helm/rustfs/values.yaml b/helm/rustfs/values.yaml index f59260b1..66b9f006 100644 --- a/helm/rustfs/values.yaml +++ b/helm/rustfs/values.yaml @@ -135,6 +135,12 @@ ingress: crt: tls.crt key: tls.key +gatewayApi: + enabled: false + gatewayClass: traefik + hostname: example.rustfs.com + secretName: secret-tls + resources: # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little From 9e162b6e9ebb874cc1d06a7b33bc4a05786578aa Mon Sep 17 00:00:00 2001 From: mkrueger92 <7305571+mkrueger92@users.noreply.github.com> Date: Thu, 8 Jan 2026 14:16:00 +0100 Subject: [PATCH 05/17] Default to helm chart version for docker image and not latest (#1385) Signed-off-by: mkrueger92 <7305571+mkrueger92@users.noreply.github.com> Co-authored-by: houseme --- .github/workflows/helm-package.yml | 1 - helm/rustfs/values.yaml | 2 +- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/.github/workflows/helm-package.yml b/.github/workflows/helm-package.yml index d1f69288..cb21874c 100644 --- a/.github/workflows/helm-package.yml +++ b/.github/workflows/helm-package.yml @@ -44,7 +44,6 @@ jobs: set -x old_version=$(grep "^appVersion:" helm/rustfs/Chart.yaml | awk '{print $2}') sed -i "s/$old_version/$new_version/g" helm/rustfs/Chart.yaml - sed -i "/^image:/,/^[^ ]/ s/tag:.*/tag: "$new_version"/" helm/rustfs/values.yaml - name: Set up Helm uses: azure/setup-helm@v4.3.0 diff --git a/helm/rustfs/values.yaml b/helm/rustfs/values.yaml index 66b9f006..d8017cc0 100644 --- a/helm/rustfs/values.yaml +++ b/helm/rustfs/values.yaml @@ -11,7 +11,7 @@ image: # This sets the pull policy for images. pullPolicy: IfNotPresent # Overrides the image tag whose default is the chart appVersion. - tag: "latest" + tag: "" # This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ imagePullSecrets: [] From 04f441361e3a683d1162a2bf75fa5863568a5933 Mon Sep 17 00:00:00 2001 From: houseme Date: Sat, 10 Jan 2026 02:15:08 +0800 Subject: [PATCH 06/17] replace winapi to windows crate (#1455) --- Cargo.lock | 2 +- Cargo.toml | 2 +- crates/utils/Cargo.toml | 4 +- crates/utils/src/net.rs | 4 +- crates/utils/src/os/windows.rs | 258 ++++++++++++++++++++++----------- 5 files changed, 175 insertions(+), 95 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 5185f080..821a203d 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -8410,7 +8410,7 @@ dependencies = [ "tracing", "transform-stream", "url", - "winapi", + "windows 0.62.2", "zstd", ] diff --git a/Cargo.toml b/Cargo.toml index b34b260c..b09e7730 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -251,7 +251,7 @@ uuid = { version = "1.19.0", features = ["v4", "fast-rng", "macro-diagnostics"] vaultrs = { version = "0.7.4" } walkdir = "2.5.0" wildmatch = { version = "2.6.1", features = ["serde"] } -winapi = { version = "0.3.9" } +windows = { version = "0.62.2" } xxhash-rust = { version = "0.8.15", features = ["xxh64", "xxh3"] } zip = "7.0.0" zstd = "0.13.3" diff --git a/crates/utils/Cargo.toml b/crates/utils/Cargo.toml index 7295c43b..9ba7c712 100644 --- a/crates/utils/Cargo.toml +++ b/crates/utils/Cargo.toml @@ -72,7 +72,7 @@ rand = { workspace = true } tokio = { workspace = true, features = ["macros", "rt-multi-thread"] } [target.'cfg(windows)'.dependencies] -winapi = { workspace = true, optional = true, features = ["std", "fileapi", "minwindef", "ntdef", "winnt"] } +windows = { workspace = true, optional = true, features = ["Win32_Storage_FileSystem", "Win32_Foundation"] } [lints] workspace = true @@ -89,7 +89,7 @@ compress = ["dep:flate2", "dep:brotli", "dep:snap", "dep:lz4", "dep:zstd"] string = ["dep:regex"] crypto = ["dep:base64-simd", "dep:hex-simd", "dep:hmac", "dep:hyper", "dep:sha1"] hash = ["dep:highway", "dep:md-5", "dep:sha2", "dep:blake3", "dep:serde", "dep:siphasher", "dep:hex-simd", "dep:crc-fast"] -os = ["dep:nix", "dep:tempfile", "winapi"] # operating system utilities +os = ["dep:nix", "dep:tempfile", "dep:windows"] # operating system utilities integration = [] # integration test features sys = ["dep:sysinfo"] # system information features http = ["dep:convert_case", "dep:http", "dep:regex"] diff --git a/crates/utils/src/net.rs b/crates/utils/src/net.rs index 57296275..65638b8b 100644 --- a/crates/utils/src/net.rs +++ b/crates/utils/src/net.rs @@ -14,8 +14,6 @@ use bytes::Bytes; use futures::{Stream, StreamExt, pin_mut}; -#[cfg(test)] -use std::sync::MutexGuard; use std::{ collections::{HashMap, HashSet}, fmt::Display, @@ -83,7 +81,7 @@ fn reset_dns_resolver_inner() { #[cfg(test)] pub struct MockResolverGuard { - _lock: MutexGuard<'static, ()>, + _lock: std::sync::MutexGuard<'static, ()>, } #[cfg(test)] diff --git a/crates/utils/src/os/windows.rs b/crates/utils/src/os/windows.rs index ab76a2cd..5bfa79d1 100644 --- a/crates/utils/src/os/windows.rs +++ b/crates/utils/src/os/windows.rs @@ -1,4 +1,3 @@ -#![allow(unsafe_code)] // TODO: audit unsafe code // Copyright 2024 RustFS Team // // Licensed under the Apache License, Version 2.0 (the "License"); @@ -13,149 +12,232 @@ // See the License for the specific language governing permissions and // limitations under the License. -use super::{DiskInfo, IOStats}; +#![allow(unsafe_code)] // TODO: audit unsafe code + +use crate::os::{DiskInfo, IOStats}; use std::io::Error; -use std::mem; -use std::os::windows::ffi::OsStrExt; use std::path::Path; -use winapi::shared::minwindef::{DWORD, MAX_PATH}; -use winapi::shared::ntdef::ULARGE_INTEGER; -use winapi::um::fileapi::{GetDiskFreeSpaceExW, GetDiskFreeSpaceW, GetVolumeInformationW, GetVolumePathNameW}; -use winapi::um::winnt::{LPCWSTR, WCHAR}; +use windows::Win32::Foundation::MAX_PATH; +use windows::Win32::Storage::FileSystem::{GetDiskFreeSpaceExW, GetDiskFreeSpaceW, GetVolumeInformationW, GetVolumePathNameW}; /// Returns total and free bytes available in a directory, e.g. `C:\`. pub fn get_info(p: impl AsRef) -> std::io::Result { - let path_display = p.as_ref().display(); - let path_wide: Vec = p + let path_wide = p .as_ref() - .to_path_buf() - .into_os_string() - .encode_wide() - .chain(std::iter::once(0)) // Null-terminate the string - .collect(); + .to_string_lossy() + .encode_utf16() + .chain(std::iter::once(0)) + .collect::>(); - let mut lp_free_bytes_available: ULARGE_INTEGER = unsafe { mem::zeroed() }; - let mut lp_total_number_of_bytes: ULARGE_INTEGER = unsafe { mem::zeroed() }; - let mut lp_total_number_of_free_bytes: ULARGE_INTEGER = unsafe { mem::zeroed() }; + let mut free_bytes_available = 0u64; + let mut total_number_of_bytes = 0u64; + let mut total_number_of_free_bytes = 0u64; - let success = unsafe { + unsafe { GetDiskFreeSpaceExW( - path_wide.as_ptr(), - &mut lp_free_bytes_available, - &mut lp_total_number_of_bytes, - &mut lp_total_number_of_free_bytes, + windows::core::PCWSTR::from_raw(path_wide.as_ptr()), + Some(&mut free_bytes_available), + Some(&mut total_number_of_bytes), + Some(&mut total_number_of_free_bytes), ) - }; - if success == 0 { - return Err(Error::last_os_error()); + .map_err(|e| Error::from_raw_os_error(e.code().0 as i32))?; } - let total = unsafe { *lp_total_number_of_bytes.QuadPart() }; - let free = unsafe { *lp_total_number_of_free_bytes.QuadPart() }; + let total = total_number_of_bytes; + let free = total_number_of_free_bytes; if free > total { return Err(Error::other(format!( - "detected free space ({free}) > total drive space ({total}), fs corruption at ({path_display}). please run 'fsck'" + "detected free space ({free}) > total drive space ({total}), fs corruption at ({}). please run 'fsck'", + p.as_ref().display() ))); } - let mut lp_sectors_per_cluster: DWORD = 0; - let mut lp_bytes_per_sector: DWORD = 0; - let mut lp_number_of_free_clusters: DWORD = 0; - let mut lp_total_number_of_clusters: DWORD = 0; + let mut sectors_per_cluster = 0u32; + let mut bytes_per_sector = 0u32; + let mut number_of_free_clusters = 0u32; + let mut total_number_of_clusters = 0u32; - let success = unsafe { + unsafe { GetDiskFreeSpaceW( - path_wide.as_ptr(), - &mut lp_sectors_per_cluster, - &mut lp_bytes_per_sector, - &mut lp_number_of_free_clusters, - &mut lp_total_number_of_clusters, + windows::core::PCWSTR::from_raw(path_wide.as_ptr()), + Some(&mut sectors_per_cluster), + Some(&mut bytes_per_sector), + Some(&mut number_of_free_clusters), + Some(&mut total_number_of_clusters), ) - }; - if success == 0 { - return Err(Error::last_os_error()); + .map_err(|e| Error::from_raw_os_error(e.code().0 as i32))?; } Ok(DiskInfo { total, free, used: total - free, - files: lp_total_number_of_clusters as u64, - ffree: lp_number_of_free_clusters as u64, - - // TODO This field is currently unused, and since this logic causes a - // NotFound error during startup on Windows systems, it has been commented out here - // - // The error occurs in GetVolumeInformationW where the path parameter - // is of type [WCHAR; MAX_PATH]. For a drive letter, there are excessive - // trailing zeros, which causes the failure here. - // - // fstype: get_fs_type(&path_wide)?, + files: total_number_of_clusters as u64, + ffree: number_of_free_clusters as u64, + fstype: get_fs_type(&path_wide).unwrap_or_default(), ..Default::default() }) } /// Returns leading volume name. +/// +/// # Arguments +/// * `v` - A slice of u16 representing the path in UTF-16 encoding +/// +/// # Returns +/// * `Ok(Vec)` containing the volume name in UTF-16 encoding. +/// * `Err` if an error occurs during the operation. #[allow(dead_code)] -fn get_volume_name(v: &[WCHAR]) -> std::io::Result { - let volume_name_size: DWORD = MAX_PATH as _; - let mut lp_volume_name_buffer: [WCHAR; MAX_PATH] = [0; MAX_PATH]; +fn get_volume_name(v: &[u16]) -> std::io::Result> { + let mut volume_name_buffer = [0u16; MAX_PATH as usize]; - let success = unsafe { GetVolumePathNameW(v.as_ptr(), lp_volume_name_buffer.as_mut_ptr(), volume_name_size) }; - - if success == 0 { - return Err(Error::last_os_error()); + unsafe { + GetVolumePathNameW(windows::core::PCWSTR::from_raw(v.as_ptr()), &mut volume_name_buffer) + .map_err(|e| Error::from_raw_os_error(e.code().0 as i32))?; } - Ok(lp_volume_name_buffer.as_ptr()) + let len = volume_name_buffer + .iter() + .position(|&x| x == 0) + .unwrap_or(volume_name_buffer.len()); + Ok(volume_name_buffer[..len].to_vec()) } #[allow(dead_code)] -fn utf16_to_string(v: &[WCHAR]) -> String { +fn utf16_to_string(v: &[u16]) -> String { let len = v.iter().position(|&x| x == 0).unwrap_or(v.len()); String::from_utf16_lossy(&v[..len]) } /// Returns the filesystem type of the underlying mounted filesystem +/// +/// # Arguments +/// * `p` - A slice of u16 representing the path in UTF-16 encoding +/// +/// # Returns +/// * `Ok(String)` containing the filesystem type (e.g., "NTFS", "FAT32"). +/// * `Err` if an error occurs during the operation. #[allow(dead_code)] -fn get_fs_type(p: &[WCHAR]) -> std::io::Result { +fn get_fs_type(p: &[u16]) -> std::io::Result { let path = get_volume_name(p)?; - let volume_name_size: DWORD = MAX_PATH as _; - let n_file_system_name_size: DWORD = MAX_PATH as _; + let mut volume_serial_number = 0u32; + let mut maximum_component_length = 0u32; + let mut file_system_flags = 0u32; + let mut volume_name_buffer = [0u16; MAX_PATH as usize]; + let mut file_system_name_buffer = [0u16; MAX_PATH as usize]; - let mut lp_volume_serial_number: DWORD = 0; - let mut lp_maximum_component_length: DWORD = 0; - let mut lp_file_system_flags: DWORD = 0; - - let mut lp_volume_name_buffer: [WCHAR; MAX_PATH] = [0; MAX_PATH]; - let mut lp_file_system_name_buffer: [WCHAR; MAX_PATH] = [0; MAX_PATH]; - - let success = unsafe { + unsafe { GetVolumeInformationW( - path, - lp_volume_name_buffer.as_mut_ptr(), - volume_name_size, - &mut lp_volume_serial_number, - &mut lp_maximum_component_length, - &mut lp_file_system_flags, - lp_file_system_name_buffer.as_mut_ptr(), - n_file_system_name_size, + windows::core::PCWSTR::from_raw(path.as_ptr()), + Some(&mut volume_name_buffer), + Some(&mut volume_serial_number), + Some(&mut maximum_component_length), + Some(&mut file_system_flags), + Some(&mut file_system_name_buffer), ) - }; - - if success == 0 { - return Err(Error::last_os_error()); + .map_err(|e| Error::from_raw_os_error(e.code().0 as i32))?; } - Ok(utf16_to_string(&lp_file_system_name_buffer)) + Ok(utf16_to_string(&file_system_name_buffer)) } -pub fn same_disk(_disk1: &str, _disk2: &str) -> std::io::Result { - Ok(false) +/// Determines if two paths are on the same disk. +/// +/// # Arguments +/// * `disk1` - The first disk path as a string slice. +/// * `disk2` - The second disk path as a string slice. +/// +/// # Returns +/// * `Ok(true)` if both paths are on the same disk. +/// * `Ok(false)` if both paths are on different disks. +/// * `Err` if an error occurs during the operation. +pub fn same_disk(disk1: &str, disk2: &str) -> std::io::Result { + let path1_wide: Vec = disk1.encode_utf16().chain(std::iter::once(0)).collect(); + let path2_wide: Vec = disk2.encode_utf16().chain(std::iter::once(0)).collect(); + + let volume1 = get_volume_name(&path1_wide)?; + let volume2 = get_volume_name(&path2_wide)?; + + Ok(volume1 == volume2) } +/// Retrieves I/O statistics for a drive identified by its major and minor numbers. +/// +/// # Arguments +/// * `major` - The major number of the drive. +/// * `minor` - The minor number of the drive. +/// +/// # Returns +/// * `Ok(IOStats)` containing the I/O statistics. +/// * `Err` if an error occurs during the operation. pub fn get_drive_stats(_major: u32, _minor: u32) -> std::io::Result { + // Windows does not provide direct IO stats via simple API; this is a stub + // For full implementation, consider using PDH or WMI, but that adds complexity Ok(IOStats::default()) } + +#[cfg(test)] +mod tests { + use crate::os::{get_info, same_disk}; + + #[cfg(target_os = "windows")] + #[test] + fn test_get_info_valid_path() { + let temp_dir = tempfile::tempdir().unwrap(); + let info = get_info(temp_dir.path()).unwrap(); + + // Verify disk info is valid + assert!(info.total > 0); + assert!(info.free > 0); + assert!(info.used > 0); + assert!(info.files > 0); + assert!(info.ffree > 0); + assert!(!info.fstype.is_empty()); + } + #[cfg(target_os = "windows")] + #[test] + fn test_get_info_invalid_path() { + use std::path::PathBuf; + let invalid_path = PathBuf::from("Z:\\invalid\\path"); + let result = get_info(&invalid_path); + + assert!(result.is_err()); + } + #[cfg(target_os = "windows")] + #[test] + fn test_same_disk_same_path() { + let temp_dir = tempfile::tempdir().unwrap(); + let path = temp_dir.path().to_str().unwrap(); + + let result = same_disk(path, path).unwrap(); + assert!(result); + } + #[cfg(target_os = "windows")] + #[test] + fn test_same_disk_different_paths() { + let temp_dir1 = tempfile::tempdir().unwrap(); + let temp_dir2 = tempfile::tempdir().unwrap(); + + let path1 = temp_dir1.path().to_str().unwrap(); + let path2 = temp_dir2.path().to_str().unwrap(); + + let _result = same_disk(path1, path2).unwrap(); + // Since both temporary directories are created in the same file system, + // they should be on the same disk in most cases + // Test passes if the function doesn't panic - the actual result depends on test environment + } + + #[cfg(target_os = "windows")] + #[test] + fn get_info_with_root_drive() { + let info = get_info("C:\\").unwrap(); + assert!(info.total > 0); + assert!(info.free > 0); + assert!(info.used > 0); + assert!(info.files > 0); + assert!(info.ffree > 0); + assert!(!info.fstype.is_empty()); + } +} From d532c7c9725fb3e66f5240b3adcb02df7efbf738 Mon Sep 17 00:00:00 2001 From: GatewayJ <835269233@qq.com> Date: Sat, 10 Jan 2026 10:11:08 +0800 Subject: [PATCH 07/17] feat: object-list access (#1457) Signed-off-by: loverustfs Co-authored-by: loverustfs Co-authored-by: loverustfs Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com> --- rustfs/src/storage/access.rs | 11 +++++++---- 1 file changed, 7 insertions(+), 4 deletions(-) diff --git a/rustfs/src/storage/access.rs b/rustfs/src/storage/access.rs index 10b74164..8964d94d 100644 --- a/rustfs/src/storage/access.rs +++ b/rustfs/src/storage/access.rs @@ -817,11 +817,14 @@ impl S3Access for FS { authorize_request(req, Action::S3Action(S3Action::ListBucketMultipartUploadsAction)).await } - /// Checks whether the ListObjectVersions request has accesses to the resources. + /// Checks whether the `ListObjectVersions` request is authorized for the requested bucket. /// - /// This method returns `Ok(())` by default. - async fn list_object_versions(&self, _req: &mut S3Request) -> S3Result<()> { - Ok(()) + /// Returns `Ok(())` if the request is allowed, or an error if access is denied or another + /// authorization-related issue occurs. + async fn list_object_versions(&self, req: &mut S3Request) -> S3Result<()> { + let req_info = req.extensions.get_mut::().expect("ReqInfo not found"); + req_info.bucket = Some(req.input.bucket.clone()); + authorize_request(req, Action::S3Action(S3Action::ListBucketVersionsAction)).await } /// Checks whether the ListObjects request has accesses to the resources. From 00119548d2d9a5e57d84d4e06acef3e274a54f5d Mon Sep 17 00:00:00 2001 From: loverustfs Date: Sat, 10 Jan 2026 16:11:11 +0800 Subject: [PATCH 08/17] Ahead --- README.md | 7 +++++++ README_ZH.md | 9 +++++++++ 2 files changed, 16 insertions(+) diff --git a/README.md b/README.md index d106e8f6..ff657348 100644 --- a/README.md +++ b/README.md @@ -83,6 +83,13 @@ Unlike other storage systems, RustFS is released under the permissible Apache 2. | **Edge & IoT** | **Strong Edge Support**
Ideal for secure, innovative edge devices. | **Weak Edge Support**
Often too heavy for edge gateways. | | **Risk Profile** | **Enterprise Risk Mitigation**
Clear IP rights and safe for commercial use. | **Legal Risks**
Intellectual property ambiguity and usage restrictions. | + +## Staying ahead + +Star RustFS on GitHub and be instantly notified of new releases. + +https://github.com/user-attachments/assets/7ee40bb4-3e46-4eac-b0d0-5fbeb85ff8f3 + ## Quickstart To get started with RustFS, follow these steps: diff --git a/README_ZH.md b/README_ZH.md index 166ba1cf..fee8fc8d 100644 --- a/README_ZH.md +++ b/README_ZH.md @@ -86,6 +86,15 @@ RustFS 是一个基于 Rust 构建的高性能分布式对象存储系统。Rust | **成本** | **稳定且免费**
免费社区支持,稳定的商业定价。 | **高昂成本**
1PiB 的成本可能高达 250,000 美元。 | | **风险控制** | **企业级风险规避**
清晰的知识产权,商业使用安全无忧。 | **法律风险**
知识产权归属模糊及使用限制风险。 | + +## 保持领先 + +在 GitHub 上为 RustFS 点赞,即可第一时间收到新版本发布通知。 + +https://github.com/user-attachments/assets/7ee40bb4-3e46-4eac-b0d0-5fbeb85ff8f3 + + + ## 快速开始 请按照以下步骤快速上手 RustFS: From e614e530cf2f82f6957f67e1f47d23a1ce75dd06 Mon Sep 17 00:00:00 2001 From: loverustfs Date: Sat, 10 Jan 2026 16:12:40 +0800 Subject: [PATCH 09/17] Modify ahead images url --- README.md | 2 +- README_ZH.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index ff657348..dbc5f388 100644 --- a/README.md +++ b/README.md @@ -88,7 +88,7 @@ Unlike other storage systems, RustFS is released under the permissible Apache 2. Star RustFS on GitHub and be instantly notified of new releases. -https://github.com/user-attachments/assets/7ee40bb4-3e46-4eac-b0d0-5fbeb85ff8f3 + ## Quickstart diff --git a/README_ZH.md b/README_ZH.md index fee8fc8d..175e7f2b 100644 --- a/README_ZH.md +++ b/README_ZH.md @@ -91,7 +91,7 @@ RustFS 是一个基于 Rust 构建的高性能分布式对象存储系统。Rust 在 GitHub 上为 RustFS 点赞,即可第一时间收到新版本发布通知。 -https://github.com/user-attachments/assets/7ee40bb4-3e46-4eac-b0d0-5fbeb85ff8f3 + From 703d961168bdba2b4d650a83729ef63dcced0942 Mon Sep 17 00:00:00 2001 From: loverustfs Date: Sat, 10 Jan 2026 20:01:28 +0800 Subject: [PATCH 10/17] fix: honor bucket policy for authenticated users (#1460) Co-authored-by: GatewayJ <835269233@qq.com> --- .../e2e_test/src/bucket_policy_check_test.rs | 155 ++++++++++++++++++ crates/e2e_test/src/lib.rs | 3 + rustfs/src/storage/access.rs | 47 +++++- 3 files changed, 201 insertions(+), 4 deletions(-) create mode 100644 crates/e2e_test/src/bucket_policy_check_test.rs diff --git a/crates/e2e_test/src/bucket_policy_check_test.rs b/crates/e2e_test/src/bucket_policy_check_test.rs new file mode 100644 index 00000000..c0b18ec6 --- /dev/null +++ b/crates/e2e_test/src/bucket_policy_check_test.rs @@ -0,0 +1,155 @@ +// Copyright 2024 RustFS Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Regression test for Issue #1423 +//! Verifies that Bucket Policies are honored for Authenticated Users. + +use crate::common::{RustFSTestEnvironment, init_logging}; +use aws_sdk_s3::config::{Credentials, Region}; +use aws_sdk_s3::{Client, Config}; +use serial_test::serial; +use tracing::info; + +async fn create_user( + env: &RustFSTestEnvironment, + username: &str, + password: &str, +) -> Result<(), Box> { + let create_user_body = serde_json::json!({ + "secretKey": password, + "status": "enabled" + }) + .to_string(); + + let create_user_url = format!("{}/rustfs/admin/v3/add-user?accessKey={}", env.url, username); + crate::common::awscurl_put(&create_user_url, &create_user_body, &env.access_key, &env.secret_key).await?; + Ok(()) +} + +fn create_user_client(env: &RustFSTestEnvironment, access_key: &str, secret_key: &str) -> Client { + let credentials = Credentials::new(access_key, secret_key, None, None, "test-user"); + let config = Config::builder() + .credentials_provider(credentials) + .region(Region::new("us-east-1")) + .endpoint_url(&env.url) + .force_path_style(true) + .behavior_version_latest() + .build(); + + Client::from_conf(config) +} + +#[tokio::test] +#[serial] +async fn test_bucket_policy_authenticated_user() -> Result<(), Box> { + init_logging(); + info!("Starting test_bucket_policy_authenticated_user..."); + + let mut env = RustFSTestEnvironment::new().await?; + env.start_rustfs_server(vec![]).await?; + + let admin_client = env.create_s3_client(); + let bucket_name = "bucket-policy-auth-test"; + let object_key = "test-object.txt"; + let user_access = "testuser"; + let user_secret = "testpassword"; + + // 1. Create Bucket (Admin) + admin_client.create_bucket().bucket(bucket_name).send().await?; + + // 2. Create User (Admin API) + create_user(&env, user_access, user_secret).await?; + + // 3. Create User Client + let user_client = create_user_client(&env, user_access, user_secret); + + // 4. Verify Access Denied initially (No Policy) + let result = user_client.list_objects_v2().bucket(bucket_name).send().await; + if result.is_ok() { + return Err("Should be Access Denied initially".into()); + } + + // 5. Apply Bucket Policy Allowed User + let policy_json = serde_json::json!({ + "Version": "2012-10-17", + "Statement": [ + { + "Sid": "AllowTestUser", + "Effect": "Allow", + "Principal": { + "AWS": [user_access] + }, + "Action": [ + "s3:ListBucket", + "s3:GetObject", + "s3:PutObject", + "s3:DeleteObject" + ], + "Resource": [ + format!("arn:aws:s3:::{}", bucket_name), + format!("arn:aws:s3:::{}/*", bucket_name) + ] + } + ] + }) + .to_string(); + + admin_client + .put_bucket_policy() + .bucket(bucket_name) + .policy(&policy_json) + .send() + .await?; + + // 6. Verify Access Allowed (With Bucket Policy) + info!("Verifying PutObject..."); + user_client + .put_object() + .bucket(bucket_name) + .key(object_key) + .body(aws_sdk_s3::primitives::ByteStream::from_static(b"hello world")) + .send() + .await + .map_err(|e| format!("PutObject failed: {}", e))?; + + info!("Verifying ListObjects..."); + let list_res = user_client + .list_objects_v2() + .bucket(bucket_name) + .send() + .await + .map_err(|e| format!("ListObjects failed: {}", e))?; + assert_eq!(list_res.contents().len(), 1); + + info!("Verifying GetObject..."); + user_client + .get_object() + .bucket(bucket_name) + .key(object_key) + .send() + .await + .map_err(|e| format!("GetObject failed: {}", e))?; + + info!("Verifying DeleteObject..."); + user_client + .delete_object() + .bucket(bucket_name) + .key(object_key) + .send() + .await + .map_err(|e| format!("DeleteObject failed: {}", e))?; + + info!("Test Passed!"); + Ok(()) +} diff --git a/crates/e2e_test/src/lib.rs b/crates/e2e_test/src/lib.rs index cef028e5..b635afb7 100644 --- a/crates/e2e_test/src/lib.rs +++ b/crates/e2e_test/src/lib.rs @@ -29,6 +29,9 @@ mod data_usage_test; #[cfg(test)] mod kms; +#[cfg(test)] +mod bucket_policy_check_test; + // Special characters in path test modules #[cfg(test)] mod special_chars_test; diff --git a/rustfs/src/storage/access.rs b/rustfs/src/storage/access.rs index 8964d94d..e394c68f 100644 --- a/rustfs/src/storage/access.rs +++ b/rustfs/src/storage/access.rs @@ -68,6 +68,16 @@ pub async fn authorize_request(req: &mut S3Request, action: Action) -> S3R deny_only: false, }) .await + && !PolicySys::is_allowed(&BucketPolicyArgs { + bucket: req_info.bucket.as_deref().unwrap_or(""), + action: Action::S3Action(S3Action::DeleteObjectVersionAction), + is_owner: req_info.is_owner, + account: &cred.access_key, + groups: &cred.groups, + conditions: &conditions, + object: req_info.object.as_deref().unwrap_or(""), + }) + .await { return Err(s3_error!(AccessDenied, "Access Denied")); } @@ -89,8 +99,22 @@ pub async fn authorize_request(req: &mut S3Request, action: Action) -> S3R return Ok(()); } - if action == Action::S3Action(S3Action::ListBucketVersionsAction) - && iam_store + if PolicySys::is_allowed(&BucketPolicyArgs { + bucket: req_info.bucket.as_deref().unwrap_or(""), + action, + is_owner: req_info.is_owner, + account: &cred.access_key, + groups: &cred.groups, + conditions: &conditions, + object: req_info.object.as_deref().unwrap_or(""), + }) + .await + { + return Ok(()); + } + + if action == Action::S3Action(S3Action::ListBucketVersionsAction) { + if iam_store .is_allowed(&Args { account: &cred.access_key, groups: &cred.groups, @@ -103,8 +127,23 @@ pub async fn authorize_request(req: &mut S3Request, action: Action) -> S3R deny_only: false, }) .await - { - return Ok(()); + { + return Ok(()); + } + + if PolicySys::is_allowed(&BucketPolicyArgs { + bucket: req_info.bucket.as_deref().unwrap_or(""), + action: Action::S3Action(S3Action::ListBucketAction), + is_owner: req_info.is_owner, + account: &cred.access_key, + groups: &cred.groups, + conditions: &conditions, + object: req_info.object.as_deref().unwrap_or(""), + }) + .await + { + return Ok(()); + } } } else { let conditions = get_condition_values( From ddaa9e35eaf6cc429a002bd561fd0e06f359066d Mon Sep 17 00:00:00 2001 From: houseme Date: Sun, 11 Jan 2026 16:47:51 +0800 Subject: [PATCH 11/17] fix(http): Fix console bucket management functionality failure caused by RUSTFS_SERVER_DOMAINS (#1467) --- rustfs/src/server/http.rs | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/rustfs/src/server/http.rs b/rustfs/src/server/http.rs index 95debdcc..314494fe 100644 --- a/rustfs/src/server/http.rs +++ b/rustfs/src/server/http.rs @@ -250,7 +250,8 @@ pub async fn start_http_server( b.set_access(store.clone()); b.set_route(admin::make_admin_route(opt.console_enable)?); - if !opt.server_domains.is_empty() { + // console server does not need to setup virtual-hosted-style requests + if !opt.server_domains.is_empty() && !opt.console_enable { MultiDomain::new(&opt.server_domains).map_err(Error::other)?; // validate domains // add the default port number to the given server domains From 6b2eebee1d07399ef02c0863bd515b4412a5a560 Mon Sep 17 00:00:00 2001 From: houseme Date: Sun, 11 Jan 2026 17:45:16 +0800 Subject: [PATCH 12/17] fix: Remove secret and signature from the log (#1466) --- crates/ecstore/src/rpc/http_auth.rs | 11 +++-- rustfs/src/admin/mod.rs | 2 - rustfs/src/config/mod.rs | 74 ++++++++++++++++++++++++++--- 3 files changed, 75 insertions(+), 12 deletions(-) diff --git a/crates/ecstore/src/rpc/http_auth.rs b/crates/ecstore/src/rpc/http_auth.rs index e974e79b..5d69e280 100644 --- a/crates/ecstore/src/rpc/http_auth.rs +++ b/crates/ecstore/src/rpc/http_auth.rs @@ -108,14 +108,19 @@ pub fn verify_rpc_signature(url: &str, method: &Method, headers: &HeaderMap) -> } // Generate expected signature - let expected_signature = generate_signature(&secret, url, method, timestamp); // Compare signatures if signature != expected_signature { error!( - "verify_rpc_signature: Invalid signature: secret {}, url {}, method {}, timestamp {}, signature {}, expected_signature {}", - secret, url, method, timestamp, signature, expected_signature + "verify_rpc_signature: Invalid signature: url {}, method {}, timestamp {}, signature {}, expected_signature: {}***{}|{}", + url, + method, + timestamp, + signature, + expected_signature.chars().next().unwrap_or('*'), + expected_signature.chars().last().unwrap_or('*'), + expected_signature.len() ); return Err(std::io::Error::other("Invalid signature")); diff --git a/rustfs/src/admin/mod.rs b/rustfs/src/admin/mod.rs index 22f6a881..e554ace5 100644 --- a/rustfs/src/admin/mod.rs +++ b/rustfs/src/admin/mod.rs @@ -239,14 +239,12 @@ pub fn make_admin_route(console_enabled: bool) -> std::io::Result )?; // Performance profiling endpoints (available on all platforms, with platform-specific responses) - #[cfg(not(target_os = "windows"))] r.insert( Method::GET, format!("{}{}", ADMIN_PREFIX, "/debug/pprof/profile").as_str(), AdminOperation(&handlers::ProfileHandler {}), )?; - #[cfg(not(target_os = "windows"))] r.insert( Method::GET, format!("{}{}", ADMIN_PREFIX, "/debug/pprof/status").as_str(), diff --git a/rustfs/src/config/mod.rs b/rustfs/src/config/mod.rs index 00d068ba..87462eb2 100644 --- a/rustfs/src/config/mod.rs +++ b/rustfs/src/config/mod.rs @@ -47,7 +47,7 @@ const LONG_VERSION: &str = concat!( concat!("git status :\n", build::GIT_STATUS_FILE), ); -#[derive(Debug, Parser, Clone)] +#[derive(Parser, Clone)] #[command(version = SHORT_VERSION, long_version = LONG_VERSION)] pub struct Opt { /// DIR points to a directory on a filesystem. @@ -60,7 +60,11 @@ pub struct Opt { pub volumes: Vec, /// bind to a specific ADDRESS:PORT, ADDRESS can be an IP or hostname - #[arg(long, default_value_t = rustfs_config::DEFAULT_ADDRESS.to_string(), env = "RUSTFS_ADDRESS")] + #[arg( + long, + default_value_t = rustfs_config::DEFAULT_ADDRESS.to_string(), + env = "RUSTFS_ADDRESS" + )] pub address: String, /// Domain name used for virtual-hosted-style requests. @@ -73,23 +77,43 @@ pub struct Opt { pub server_domains: Vec, /// Access key used for authentication. - #[arg(long, default_value_t = rustfs_credentials::DEFAULT_ACCESS_KEY.to_string(), env = "RUSTFS_ACCESS_KEY")] + #[arg( + long, + default_value_t = rustfs_credentials::DEFAULT_ACCESS_KEY.to_string(), + env = "RUSTFS_ACCESS_KEY" + )] pub access_key: String, /// Secret key used for authentication. - #[arg(long, default_value_t = rustfs_credentials::DEFAULT_SECRET_KEY.to_string(), env = "RUSTFS_SECRET_KEY")] + #[arg( + long, + default_value_t = rustfs_credentials::DEFAULT_SECRET_KEY.to_string(), + env = "RUSTFS_SECRET_KEY" + )] pub secret_key: String, /// Enable console server - #[arg(long, default_value_t = rustfs_config::DEFAULT_CONSOLE_ENABLE, env = "RUSTFS_CONSOLE_ENABLE")] + #[arg( + long, + default_value_t = rustfs_config::DEFAULT_CONSOLE_ENABLE, + env = "RUSTFS_CONSOLE_ENABLE" + )] pub console_enable: bool, /// Console server bind address - #[arg(long, default_value_t = rustfs_config::DEFAULT_CONSOLE_ADDRESS.to_string(), env = "RUSTFS_CONSOLE_ADDRESS")] + #[arg( + long, + default_value_t = rustfs_config::DEFAULT_CONSOLE_ADDRESS.to_string(), + env = "RUSTFS_CONSOLE_ADDRESS" + )] pub console_address: String, /// Observability endpoint for trace, metrics and logs,only support grpc mode. - #[arg(long, default_value_t = rustfs_config::DEFAULT_OBS_ENDPOINT.to_string(), env = "RUSTFS_OBS_ENDPOINT")] + #[arg( + long, + default_value_t = rustfs_config::DEFAULT_OBS_ENDPOINT.to_string(), + env = "RUSTFS_OBS_ENDPOINT" + )] pub obs_endpoint: String, /// tls path for rustfs API and console. @@ -137,6 +161,42 @@ pub struct Opt { pub buffer_profile: String, } +impl std::fmt::Debug for Opt { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.debug_struct("Opt") + .field("volumes", &self.volumes) + .field("address", &self.address) + .field("server_domains", &self.server_domains) + .field("access_key", &self.access_key) + .field("secret_key", &Opt::mask_sensitive(Some(&self.secret_key))) // Hide sensitive values + .field("console_enable", &self.console_enable) + .field("console_address", &self.console_address) + .field("obs_endpoint", &self.obs_endpoint) + .field("tls_path", &self.tls_path) + .field("license", &Opt::mask_sensitive(self.license.as_ref())) + .field("region", &self.region) + .field("kms_enable", &self.kms_enable) + .field("kms_backend", &self.kms_backend) + .field("kms_key_dir", &self.kms_key_dir) + .field("kms_vault_address", &self.kms_vault_address) + .field("kms_vault_token", &Opt::mask_sensitive(self.kms_vault_token.as_ref())) + .field("kms_default_key_id", &self.kms_default_key_id) + .field("buffer_profile_disable", &self.buffer_profile_disable) + .field("buffer_profile", &self.buffer_profile) + .finish() + } +} + +impl Opt { + /// Mask sensitive information in Option + fn mask_sensitive(s: Option<&String>) -> String { + match s { + None => "".to_string(), + Some(s) => format!("{}***{}|{}", s.chars().next().unwrap_or('*'), s.chars().last().unwrap_or('*'), s.len()), + } + } +} + // lazy_static::lazy_static! { // pub(crate) static ref OPT: OnceLock = OnceLock::new(); // } From 760cb1d734593a08b24fdfdcfda6495cd4dfd41f Mon Sep 17 00:00:00 2001 From: houseme Date: Sun, 11 Jan 2026 19:53:51 +0800 Subject: [PATCH 13/17] Fix Windows Path Separator Handling in rustfs_utils (#1464) Co-authored-by: reatang --- Cargo.lock | 341 ++++--- Cargo.toml | 9 +- crates/ecstore/Cargo.toml | 2 +- crates/ecstore/src/bucket/utils.rs | 6 +- crates/ecstore/src/config/com.rs | 6 +- crates/ecstore/src/data_usage.rs | 10 +- crates/ecstore/src/disk/local.rs | 122 ++- crates/ecstore/src/disk/os.rs | 4 +- crates/ecstore/src/pools.rs | 6 +- crates/ecstore/src/set_disk.rs | 8 +- crates/ecstore/src/store_list_objects.rs | 20 +- crates/ecstore/src/tier/tier.rs | 6 +- crates/ecstore/src/tier/warm_backend_s3.rs | 6 +- crates/ecstore/src/tier/warm_backend_s3sdk.rs | 10 +- crates/iam/src/store/object.rs | 4 +- crates/utils/Cargo.toml | 2 +- crates/utils/src/path.rs | 922 ++++++++++++++---- rustfs/src/admin/handlers/bucket_meta.rs | 15 +- 18 files changed, 999 insertions(+), 500 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 821a203d..70720aad 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -430,7 +430,7 @@ dependencies = [ "arrow-schema", "chrono", "half", - "indexmap 2.12.1", + "indexmap 2.13.0", "itoa", "lexical-core", "memchr", @@ -532,7 +532,7 @@ checksum = "3109e49b1e4909e9db6515a30c633684d68cdeaa252f215214cb4fa1a5bfee2c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", "synstructure 0.13.2", ] @@ -544,7 +544,7 @@ checksum = "7b18050c2cd6fe86c3a76584ef5e0baf286d038cda203eb6223df2cc413565f7" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -612,7 +612,7 @@ checksum = "3b43422f69d8ff38f95f1b2bb76517c91589a924d1559a0e935d7c8ce0274c11" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -623,7 +623,7 @@ checksum = "9035ad2d096bed7955a320ee7e2230574d28fd3c3a0f186cbea1ff3c7eed5dbb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -649,7 +649,7 @@ checksum = "99e1aca718ea7b89985790c94aad72d77533063fe00bc497bb79a7c2dae6a661" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -1280,15 +1280,16 @@ dependencies = [ [[package]] name = "blake3" -version = "1.8.2" +version = "1.8.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3888aaa89e4b2a40fca9848e400f6a658a5a3978de7be858e209cafa8be9a4a0" +checksum = "2468ef7d57b3fb7e16b576e8377cdbde2320c60e1491e961d11da40fc4f02a2d" dependencies = [ "arrayref", "arrayvec", "cc", "cfg-if", - "constant_time_eq", + "constant_time_eq 0.4.2", + "cpufeatures", "memmap2", "rayon-core", ] @@ -1333,9 +1334,9 @@ dependencies = [ [[package]] name = "bon" -version = "3.8.1" +version = "3.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ebeb9aaf9329dff6ceb65c689ca3db33dbf15f324909c60e4e5eef5701ce31b1" +checksum = "234655ec178edd82b891e262ea7cf71f6584bcd09eff94db786be23f1821825c" dependencies = [ "bon-macros", "rustversion", @@ -1343,17 +1344,17 @@ dependencies = [ [[package]] name = "bon-macros" -version = "3.8.1" +version = "3.8.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "77e9d642a7e3a318e37c2c9427b5a6a48aa1ad55dcd986f3034ab2239045a645" +checksum = "89ec27229c38ed0eb3c0feee3d2c1d6a4379ae44f418a29a658890e062d8f365" dependencies = [ - "darling 0.21.3", + "darling 0.23.0", "ident_case", "prettyplease", "proc-macro2", "quote", "rustversion", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -1528,9 +1529,9 @@ dependencies = [ [[package]] name = "cc" -version = "1.2.51" +version = "1.2.52" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7a0aeaff4ff1a90589618835a598e545176939b97874f7abc7851caa0618f203" +checksum = "cd4932aefd12402b36c60956a4fe0035421f544799057659ff86f923657aada3" dependencies = [ "find-msvc-tools", "jobserver", @@ -1690,7 +1691,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -1747,9 +1748,9 @@ checksum = "c2459377285ad874054d797f3ccebf984978aa39129f6eafde5cdc8315b612f8" [[package]] name = "const-oid" -version = "0.10.1" +version = "0.10.2" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0dabb6555f92fb9ee4140454eb5dcd14c7960e1225c6d1a6cc361f032947713e" +checksum = "a6ef517f0926dd24a1582492c791b6a4818a4d94e789a334894aa15b0d12f55c" [[package]] name = "const-random" @@ -1788,7 +1789,7 @@ checksum = "1d3e0f24ee268386bd3ab4e04fc60df9a818ad801b5ffe592f388a6acc5053fb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -1817,6 +1818,12 @@ version = "0.3.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "7c74b8349d32d297c9134b8c88677813a227df8f779daa29bfc29c183fe3dca6" +[[package]] +name = "constant_time_eq" +version = "0.4.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3d52eff69cd5e647efe296129160853a42795992097e8af39800e1060caeea9b" + [[package]] name = "convert_case" version = "0.10.0" @@ -2037,9 +2044,9 @@ dependencies = [ [[package]] name = "crypto-bigint" -version = "0.7.0-rc.14" +version = "0.7.0-rc.15" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c9c6daa2049db6a5fad90a981b8c63f023dbaf75a0fae73db4dcf234556fc957" +checksum = "1a9e36ac79ac44866b74e08a0b4925f97b984e3fff17680d2c6fbce8317ab0f6" dependencies = [ "ctutils", "num-traits", @@ -2075,7 +2082,7 @@ version = "0.7.0-pre.5" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "da0b07a7a616370e8b6efca0c6a25e5f4c6d02fde11f3d570e4af64d8ed7e2e9" dependencies = [ - "crypto-bigint 0.7.0-rc.14", + "crypto-bigint 0.7.0-rc.15", "libm", "rand_core 0.10.0-rc-3", ] @@ -2167,7 +2174,7 @@ checksum = "f46882e17999c6cc590af592290432be3bce0428cb0d5f8b6715e4dc7b383eb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -2235,7 +2242,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -2249,7 +2256,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -2262,7 +2269,7 @@ dependencies = [ "proc-macro2", "quote", "strsim 0.11.1", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -2284,7 +2291,7 @@ checksum = "fc34b93ccb385b40dc71c6fceac4b2ad23662c7eeb248cf10d529b7e055b6ead" dependencies = [ "darling_core 0.20.11", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -2295,7 +2302,7 @@ checksum = "d38308df82d1080de0afee5d069fa14b0326a88c14f15c5ccda35b4a6c414c81" dependencies = [ "darling_core 0.21.3", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -2306,7 +2313,7 @@ checksum = "ac3984ec7bd6cfa798e62b4a642426a5be0e68f9401cfc2a01e3fa9ea2fcdb8d" dependencies = [ "darling_core 0.23.0", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -2325,9 +2332,9 @@ dependencies = [ [[package]] name = "data-encoding" -version = "2.9.0" +version = "2.10.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2a2330da5de22e8a3cb63252ce2abb30116bf5265e89c0e01bc17015ce30a476" +checksum = "d7a1e2f27636f116493b8b860f5546edb47c8d8f8ea73e1d2a20be88e28d1fea" [[package]] name = "datafusion" @@ -2446,7 +2453,7 @@ dependencies = [ "chrono", "half", "hashbrown 0.14.5", - "indexmap 2.12.1", + "indexmap 2.13.0", "libc", "log", "object_store", @@ -2644,7 +2651,7 @@ dependencies = [ "datafusion-functions-aggregate-common", "datafusion-functions-window-common", "datafusion-physical-expr-common", - "indexmap 2.12.1", + "indexmap 2.13.0", "itertools 0.14.0", "paste", "recursive", @@ -2660,7 +2667,7 @@ checksum = "5ce2fb1b8c15c9ac45b0863c30b268c69dc9ee7a1ee13ecf5d067738338173dc" dependencies = [ "arrow", "datafusion-common", - "indexmap 2.12.1", + "indexmap 2.13.0", "itertools 0.14.0", "paste", ] @@ -2804,7 +2811,7 @@ checksum = "1063ad4c9e094b3f798acee16d9a47bd7372d9699be2de21b05c3bd3f34ab848" dependencies = [ "datafusion-doc", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -2819,7 +2826,7 @@ dependencies = [ "datafusion-expr", "datafusion-expr-common", "datafusion-physical-expr", - "indexmap 2.12.1", + "indexmap 2.13.0", "itertools 0.14.0", "log", "recursive", @@ -2842,11 +2849,11 @@ dependencies = [ "datafusion-physical-expr-common", "half", "hashbrown 0.14.5", - "indexmap 2.12.1", + "indexmap 2.13.0", "itertools 0.14.0", "parking_lot", "paste", - "petgraph 0.8.3", + "petgraph", ] [[package]] @@ -2920,7 +2927,7 @@ dependencies = [ "futures", "half", "hashbrown 0.14.5", - "indexmap 2.12.1", + "indexmap 2.13.0", "itertools 0.14.0", "log", "parking_lot", @@ -2970,7 +2977,7 @@ dependencies = [ "chrono", "datafusion-common", "datafusion-expr", - "indexmap 2.12.1", + "indexmap 2.13.0", "log", "recursive", "regex", @@ -3000,7 +3007,7 @@ checksum = "780eb241654bf097afb00fc5f054a09b687dad862e485fdcf8399bb056565370" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -3030,7 +3037,7 @@ version = "0.8.0-rc.10" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "02c1d73e9668ea6b6a28172aa55f3ebec38507131ce179051c8033b5c6037653" dependencies = [ - "const-oid 0.10.1", + "const-oid 0.10.2", "pem-rfc7468 1.0.0", "zeroize", ] @@ -3067,7 +3074,7 @@ checksum = "1e567bd82dcff979e4b03460c307b3cdc9e96fde3d73bed1496d2bc75d9dd62a" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -3109,7 +3116,7 @@ dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -3129,7 +3136,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ab63b0e2bf4d5928aff72e83a7dace85d7bba5fe12dcc3c5a572d78caffd3f3c" dependencies = [ "derive_builder_core 0.20.2", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -3151,7 +3158,7 @@ dependencies = [ "proc-macro2", "quote", "rustc_version", - "syn 2.0.113", + "syn 2.0.114", "unicode-xid", ] @@ -3189,7 +3196,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ebf9423bafb058e4142194330c52273c343f8a5beb7176d052f0e73b17dd35b9" dependencies = [ "block-buffer 0.11.0", - "const-oid 0.10.1", + "const-oid 0.10.2", "crypto-common 0.2.0-rc.9", "subtle", ] @@ -3223,7 +3230,7 @@ checksum = "97369cbbc041bc366949bc74d34658d6cda5621039731c6310521892a3a20ae0" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -3428,7 +3435,7 @@ dependencies = [ "once_cell", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -3449,7 +3456,7 @@ dependencies = [ "darling 0.21.3", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -3488,7 +3495,7 @@ checksum = "44f23cf4b44bfce11a86ace86f8a73ffdec849c9fd00a386a53d278bd9e81fb3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -3610,9 +3617,9 @@ dependencies = [ [[package]] name = "find-msvc-tools" -version = "0.1.6" +version = "0.1.7" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "645cbb3a84e60b7531617d5ae4e57f7e27308f6445f5abf653209ea76dec8dff" +checksum = "f449e6c6c08c865631d4890cfacf252b3d396c9bcc83adb6623cdb02a8336c41" [[package]] name = "findshlibs" @@ -3820,7 +3827,7 @@ checksum = "162ee34ebcb7c64a8abebc059ce0fee27c2262618d7b60ed8faf72fef13c3650" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -3930,7 +3937,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -4026,7 +4033,7 @@ dependencies = [ "opentelemetry-semantic-conventions", "percent-encoding", "pin-project", - "prost 0.14.1", + "prost 0.14.3", "prost-types", "reqwest", "rustc_version", @@ -4136,7 +4143,7 @@ dependencies = [ "mime", "percent-encoding", "pin-project", - "prost 0.14.1", + "prost 0.14.3", "prost-types", "reqwest", "serde", @@ -4204,9 +4211,9 @@ dependencies = [ [[package]] name = "h2" -version = "0.4.12" +version = "0.4.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f3c0b69cfcb4e1b9f1bf2f53f95f766e4661169728ec61cd3fe5a0166f2d1386" +checksum = "2f44da3a8150a6703ed5d34e164b875fd14c2cdab9af1252a9a1020bde2bdc54" dependencies = [ "atomic-waker", "bytes", @@ -4214,7 +4221,7 @@ dependencies = [ "futures-core", "futures-sink", "http 1.4.0", - "indexmap 2.12.1", + "indexmap 2.13.0", "slab", "tokio", "tokio-util", @@ -4304,7 +4311,7 @@ dependencies = [ "proc-macro-error2", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -4753,9 +4760,9 @@ dependencies = [ [[package]] name = "indexmap" -version = "2.12.1" +version = "2.13.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0ad4bb2b565bca0645f4d68c5c9af97fba094e9791da685bf83cb5f3ce74acf2" +checksum = "7714e70437a7dc3ac8eb7e6f8df75fd8eb422675fc7678aff7364301092b1017" dependencies = [ "equivalent", "hashbrown 0.16.1", @@ -4770,7 +4777,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "232929e1d75fe899576a3d5c7416ad0d88dbfbb3c3d6aa00873a7408a50ddb88" dependencies = [ "ahash", - "indexmap 2.12.1", + "indexmap 2.13.0", "is-terminal", "itoa", "log", @@ -4793,7 +4800,7 @@ dependencies = [ "crossbeam-utils", "dashmap", "env_logger", - "indexmap 2.12.1", + "indexmap 2.13.0", "itoa", "log", "num-format", @@ -5003,7 +5010,7 @@ dependencies = [ "p384", "pem", "rand 0.8.5", - "rsa 0.9.9", + "rsa 0.9.10", "serde", "serde_json", "sha2 0.10.9", @@ -5051,7 +5058,7 @@ dependencies = [ "proc-macro2", "quote", "regex", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -5128,9 +5135,9 @@ checksum = "2c4a545a15244c7d945065b5d392b2d2d7f21526fba56ce51467b06ed445e8f7" [[package]] name = "libc" -version = "0.2.179" +version = "0.2.180" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c5a2d376baa530d1238d133232d15e239abad80d05838b4b59354e5268af431f" +checksum = "bcc35a38544a891a5f7c865aca548a982ccb3b8650a5b06d0fd33a10283c56fc" [[package]] name = "libcrux-intrinsics" @@ -5402,9 +5409,9 @@ dependencies = [ [[package]] name = "lzma-rust2" -version = "0.15.4" +version = "0.15.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "48172246aa7c3ea28e423295dd1ca2589a24617cc4e588bb8cfe177cb2c54d95" +checksum = "7fa48f5024824ecd3e8282cc948bd46fbd095aed5a98939de0594601a59b4e2b" dependencies = [ "crc", "sha2 0.10.9", @@ -5626,7 +5633,7 @@ dependencies = [ "proc-macro2", "quote", "serde", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -6057,7 +6064,7 @@ dependencies = [ "opentelemetry-http", "opentelemetry-proto", "opentelemetry_sdk", - "prost 0.14.1", + "prost 0.14.3", "reqwest", "thiserror 2.0.17", "tracing", @@ -6071,7 +6078,7 @@ checksum = "a7175df06de5eaee9909d4805a3d07e28bb752c34cab57fa9cff549da596b30f" dependencies = [ "opentelemetry", "opentelemetry_sdk", - "prost 0.14.1", + "prost 0.14.3", "tonic", "tonic-prost", ] @@ -6392,16 +6399,6 @@ version = "2.3.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9b4f627cb1b25917193a259e49bdad08f671f8d9708acfd5fe0a8c1455d87220" -[[package]] -name = "petgraph" -version = "0.7.1" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3672b37090dbd86368a4145bc067582552b29c27377cad4e0a306c97f9bd7772" -dependencies = [ - "fixedbitset", - "indexmap 2.12.1", -] - [[package]] name = "petgraph" version = "0.8.3" @@ -6410,7 +6407,7 @@ checksum = "8701b58ea97060d5e5b155d383a69952a60943f0e6dfe30b04c287beb0b27455" dependencies = [ "fixedbitset", "hashbrown 0.15.5", - "indexmap 2.12.1", + "indexmap 2.13.0", "serde", ] @@ -6464,7 +6461,7 @@ dependencies = [ "phf_shared 0.11.3", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -6502,7 +6499,7 @@ checksum = "6e918e4ff8c4549eb882f14b3a4bc8c8bc93de829416eacf579f1207a8fbf861" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -6762,7 +6759,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "479ca8adacdd7ce8f1fb39ce9ecccbfe93a3f1344b3d0d97f20bc0196208f62b" dependencies = [ "proc-macro2", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -6802,14 +6799,14 @@ dependencies = [ "proc-macro-error-attr2", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] name = "proc-macro2" -version = "1.0.104" +version = "1.0.105" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9695f8df41bb4f3d222c95a67532365f569318332d03d5f3f67f37b20e6ebdf0" +checksum = "535d180e0ecab6268a3e718bb9fd44db66bbbc256257165fc699dadf70d16fe7" dependencies = [ "unicode-ident", ] @@ -6840,33 +6837,32 @@ dependencies = [ [[package]] name = "prost" -version = "0.14.1" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7231bd9b3d3d33c86b58adbac74b5ec0ad9f496b19d22801d773636feaa95f3d" +checksum = "d2ea70524a2f82d518bce41317d0fae74151505651af45faf1ffbd6fd33f0568" dependencies = [ "bytes", - "prost-derive 0.14.1", + "prost-derive 0.14.3", ] [[package]] name = "prost-build" -version = "0.14.1" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ac6c3320f9abac597dcbc668774ef006702672474aad53c6d596b62e487b40b1" +checksum = "343d3bd7056eda839b03204e68deff7d1b13aba7af2b2fd16890697274262ee7" dependencies = [ "heck", "itertools 0.14.0", "log", "multimap", - "once_cell", - "petgraph 0.7.1", + "petgraph", "prettyplease", - "prost 0.14.1", + "prost 0.14.3", "prost-types", "pulldown-cmark", "pulldown-cmark-to-cmark", "regex", - "syn 2.0.113", + "syn 2.0.114", "tempfile", ] @@ -6880,29 +6876,29 @@ dependencies = [ "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] name = "prost-derive" -version = "0.14.1" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "9120690fafc389a67ba3803df527d0ec9cbbc9cc45e4cc20b332996dfb672425" +checksum = "27c6023962132f4b30eb4c172c91ce92d933da334c59c23cddee82358ddafb0b" dependencies = [ "anyhow", "itertools 0.14.0", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] name = "prost-types" -version = "0.14.1" +version = "0.14.3" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b9b4db3d6da204ed77bb26ba83b6122a73aeb2e87e25fbf7ad2e84c4ccbf8f72" +checksum = "8991c4cbdb8bc5b11f0b074ffe286c30e523de90fee5ba8132f1399f23cb3dd7" dependencies = [ - "prost 0.14.1", + "prost 0.14.3", ] [[package]] @@ -6938,7 +6934,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b4aeaa1f2460f1d348eeaeed86aea999ce98c1bded6f089ff8514c9d9dbdc973" dependencies = [ "anyhow", - "indexmap 2.12.1", + "indexmap 2.13.0", "log", "protobuf", "protobuf-support", @@ -6989,9 +6985,9 @@ dependencies = [ [[package]] name = "pulldown-cmark-to-cmark" -version = "21.1.0" +version = "22.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "8246feae3db61428fd0bb94285c690b460e4517d83152377543ca802357785f1" +checksum = "50793def1b900256624a709439404384204a5dc3a6ec580281bfaac35e882e90" dependencies = [ "pulldown-cmark", ] @@ -7083,9 +7079,9 @@ dependencies = [ [[package]] name = "quote" -version = "1.0.42" +version = "1.0.43" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a338cc41d27e6cc6dce6cefc13a0729dfbb81c262b1f519331575dd80ef3067f" +checksum = "dc74d9a594b72ae6656596548f56f667211f8a97b3d4c3d467150794690dc40a" dependencies = [ "proc-macro2", ] @@ -7230,7 +7226,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "76009fbe0614077fc1a2ce255e3a1881a2e3a3527097d5dc6d8212c585e7e38b" dependencies = [ "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -7291,7 +7287,7 @@ checksum = "b7186006dcb21920990093f30e3dea63b7d6e977bf1256be20c3563a5db070da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -7458,7 +7454,7 @@ dependencies = [ "proc-macro2", "quote", "serde_json", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -7482,9 +7478,9 @@ dependencies = [ [[package]] name = "rsa" -version = "0.9.9" +version = "0.9.10" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "40a0376c50d0358279d9d643e4bf7b7be212f1f4ff1da9070a7b54d22ef75c88" +checksum = "b8573f03f5883dcaebdfcf4725caa1ecb9c15b2ef50c43a07b816e06799bb12d" dependencies = [ "const-oid 0.9.6", "digest 0.10.7", @@ -7506,8 +7502,8 @@ version = "0.10.0-rc.11" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d27d813937fdf8e9ad15e3e422a55da4021d29639000139ca19d99f3949060da" dependencies = [ - "const-oid 0.10.1", - "crypto-bigint 0.7.0-rc.14", + "const-oid 0.10.2", + "crypto-bigint 0.7.0-rc.15", "crypto-primes", "digest 0.11.0-rc.5", "pkcs1 0.8.0-rc.4", @@ -7544,7 +7540,7 @@ dependencies = [ "regex", "relative-path", "rustc_version", - "syn 2.0.113", + "syn 2.0.114", "unicode-ident", ] @@ -7693,7 +7689,7 @@ dependencies = [ "quote", "rust-embed-utils", "shellexpand", - "syn 2.0.113", + "syn 2.0.114", "walkdir", ] @@ -7974,6 +7970,7 @@ dependencies = [ "bytesize", "chrono", "criterion", + "dunce", "enumset", "faster-hex", "flatbuffers", @@ -8252,7 +8249,7 @@ name = "rustfs-protos" version = "0.0.5" dependencies = [ "flatbuffers", - "prost 0.14.1", + "prost 0.14.3", "rustfs-common", "tonic", "tonic-prost", @@ -8703,7 +8700,7 @@ dependencies = [ "proc-macro2", "quote", "serde_derive_internals", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -8839,7 +8836,7 @@ checksum = "d540f220d3187173da220f885ab66608367b6574e925011a9353e4badda91d79" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -8850,7 +8847,7 @@ checksum = "18d26a20a969b9e3fdf2fc2d9f21eda6c40e2de84c9408bb5d3b05d499aae711" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -8917,7 +8914,7 @@ dependencies = [ "chrono", "hex", "indexmap 1.9.3", - "indexmap 2.12.1", + "indexmap 2.13.0", "schemars 0.9.0", "schemars 1.2.0", "serde_core", @@ -8935,7 +8932,7 @@ dependencies = [ "darling 0.21.3", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -8971,7 +8968,7 @@ checksum = "6f50427f258fb77356e4cd4aa0e87e2bd2c66dbcee41dc405282cae2bfc26c83" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -9226,7 +9223,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -9312,7 +9309,7 @@ checksum = "da5fc6819faabb412da764b99d3b713bb55083c11e7e0c00144d386cd6a1939c" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -9367,7 +9364,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "6ad6a09263583e83e934fcd436b7e3bb9d69602e2feef3787adb615c1fe3a343" dependencies = [ "base64ct", - "crypto-bigint 0.7.0-rc.14", + "crypto-bigint 0.7.0-rc.15", "digest 0.11.0-rc.5", "pem-rfc7468 1.0.0", "subtle", @@ -9476,7 +9473,7 @@ dependencies = [ "heck", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -9487,9 +9484,9 @@ checksum = "13c2bddecc57b384dee18652358fb23172facb8a2c51ccc10d74c157bdea3292" [[package]] name = "suppaftp" -version = "7.0.7" +version = "7.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ba8928c89e226be233f0eb1594e9bd023f72a948dc06581c0d908387f57de1de" +checksum = "69a15b325bbe0a1f85de3dbf988a3a14e9cd321537dffcbf6641381dd6d7586f" dependencies = [ "async-trait", "chrono", @@ -9618,9 +9615,9 @@ dependencies = [ [[package]] name = "syn" -version = "2.0.113" +version = "2.0.114" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "678faa00651c9eb72dd2020cbdf275d92eccb2400d568e419efdd64838145cb4" +checksum = "d4d107df263a3013ef9b1879b0df87d706ff80f65a86ea879bd9c31f9b307c2a" dependencies = [ "proc-macro2", "quote", @@ -9665,7 +9662,7 @@ checksum = "728a70f3dbaf5bab7f0c4b1ac8d7ae5ea60a4b5549c8a5914361c99147a709d2" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -9750,7 +9747,7 @@ dependencies = [ "cfg-if", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -9761,7 +9758,7 @@ checksum = "5c89e72a01ed4c579669add59014b9a524d609c0c88c6a585ce37485879f6ffb" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", "test-case-core", ] @@ -9791,7 +9788,7 @@ checksum = "4fee6c4efc90059e10f81e6d42c60a18f76588c3d74cb83a0b242a2b6c7504c1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -9802,7 +9799,7 @@ checksum = "3ff15c8ecd7de3849db632e14d18d2571fa09dfc5ed93479bc4485c7a517c913" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -9951,7 +9948,7 @@ checksum = "2d2e76690929402faae40aebdda620a2c0e25dd6d3b9afe48867dfd95991f4bd" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -9979,7 +9976,7 @@ checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -10064,7 +10061,7 @@ version = "0.22.27" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" dependencies = [ - "indexmap 2.12.1", + "indexmap 2.13.0", "serde", "serde_spanned", "toml_datetime 0.6.11", @@ -10078,7 +10075,7 @@ version = "0.23.10+spec-1.0.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "84c8b9f757e028cee9fa244aea147aab2a9ec09d5325a9b01e0a49730c2b5269" dependencies = [ - "indexmap 2.12.1", + "indexmap 2.13.0", "toml_datetime 0.7.5+spec-1.1.0", "toml_parser", "winnow", @@ -10140,7 +10137,7 @@ dependencies = [ "prettyplease", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -10150,7 +10147,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "66bd50ad6ce1252d87ef024b3d64fe4c3cf54a86fb9ef4c631fdd0ded7aeaa67" dependencies = [ "bytes", - "prost 0.14.1", + "prost 0.14.3", "tonic", ] @@ -10165,7 +10162,7 @@ dependencies = [ "prost-build", "prost-types", "quote", - "syn 2.0.113", + "syn 2.0.114", "tempfile", "tonic-build", ] @@ -10178,7 +10175,7 @@ checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9" dependencies = [ "futures-core", "futures-util", - "indexmap 2.12.1", + "indexmap 2.13.0", "pin-project-lite", "slab", "sync_wrapper", @@ -10258,7 +10255,7 @@ checksum = "7490cfa5ec963746568740651ac6781f701c9c5ea257c58e057f3ba8cf69e8da" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -10404,9 +10401,9 @@ dependencies = [ [[package]] name = "unicase" -version = "2.8.1" +version = "2.9.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "75b844d17643ee918803943289730bec8aac480150456169e647ed0b576ba539" +checksum = "dbc4bc3a9f746d862c45cb89d705aa10f187bb96c76001afab07a0d35ce60142" [[package]] name = "unicode-ident" @@ -10516,7 +10513,7 @@ checksum = "39d11901c36b3650df7acb0f9ebe624f35b5ac4e1922ecd3c57f444648429594" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -10672,7 +10669,7 @@ dependencies = [ "bumpalo", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", "wasm-bindgen-shared", ] @@ -10878,7 +10875,7 @@ checksum = "053e2e040ab57b9dc951b72c264860db7eb3b0200ba345b4e4c3b14f67855ddf" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -10889,7 +10886,7 @@ checksum = "3f316c4a2570ba26bbec722032c4099d8c8bc095efccdc15688708623367e358" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -11178,7 +11175,7 @@ dependencies = [ "darling 0.20.11", "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -11287,28 +11284,28 @@ checksum = "b659052874eb698efe5b9e8cf382204678a0086ebf46982b79d6ca3182927e5d" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", "synstructure 0.13.2", ] [[package]] name = "zerocopy" -version = "0.8.31" +version = "0.8.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "fd74ec98b9250adb3ca554bdde269adf631549f51d8a8f8f0a10b50f1cb298c3" +checksum = "668f5168d10b9ee831de31933dc111a459c97ec93225beb307aed970d1372dfd" dependencies = [ "zerocopy-derive", ] [[package]] name = "zerocopy-derive" -version = "0.8.31" +version = "0.8.33" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d8a8d209fdf45cf5138cbb5a506f6b52522a25afccc534d1475dad8e31105c6a" +checksum = "2c7962b26b0a8685668b671ee4b54d007a67d4eaf05fda79ac0ecf41e32270f1" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -11328,7 +11325,7 @@ checksum = "d71e5d6e06ab090c67b5e44993ec16b72dcbaabc526db883a360057678b48502" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", "synstructure 0.13.2", ] @@ -11349,7 +11346,7 @@ checksum = "85a5b4158499876c763cb03bc4e49185d3cccbabb15b33c627f7884f43db852e" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -11382,7 +11379,7 @@ checksum = "eadce39539ca5cb3985590102671f2567e659fca9666581ad3411d59207951f3" dependencies = [ "proc-macro2", "quote", - "syn 2.0.113", + "syn 2.0.114", ] [[package]] @@ -11394,14 +11391,14 @@ dependencies = [ "aes 0.8.4", "arbitrary", "bzip2 0.6.1", - "constant_time_eq", + "constant_time_eq 0.3.1", "crc32fast", "deflate64", "flate2", "generic-array 0.14.7", "getrandom 0.3.4", "hmac 0.12.1", - "indexmap 2.12.1", + "indexmap 2.13.0", "lzma-rust2", "memchr", "pbkdf2 0.12.2", @@ -11421,9 +11418,9 @@ checksum = "40990edd51aae2c2b6907af74ffb635029d5788228222c4bb811e9351c0caad3" [[package]] name = "zmij" -version = "1.0.10" +version = "1.0.12" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "30e0d8dffbae3d840f64bda38e28391faef673a7b5a6017840f2a106c8145868" +checksum = "2fc5a66a20078bf1251bde995aa2fdcc4b800c70b5d92dd2c62abc5c60f679f8" [[package]] name = "zopfli" diff --git a/Cargo.toml b/Cargo.toml index b09e7730..cc839ea8 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -130,7 +130,7 @@ bytesize = "2.3.1" byteorder = "1.5.0" flatbuffers = "25.12.19" form_urlencoded = "1.2.2" -prost = "0.14.1" +prost = "0.14.3" quick-xml = "0.38.4" rmcp = { version = "0.12.0" } rmp = { version = "0.8.15" } @@ -143,7 +143,7 @@ schemars = "1.2.0" # Cryptography and Security aes-gcm = { version = "0.11.0-rc.2", features = ["rand_core"] } argon2 = { version = "0.6.0-rc.5" } -blake3 = { version = "1.8.2", features = ["rayon", "mmap"] } +blake3 = { version = "1.8.3", features = ["rayon", "mmap"] } chacha20poly1305 = { version = "0.11.0-rc.2" } crc-fast = "1.6.0" hmac = { version = "0.13.0-rc.3" } @@ -184,6 +184,7 @@ criterion = { version = "0.8", features = ["html_reports"] } crossbeam-queue = "0.3.12" datafusion = "51.0.0" derive_builder = "0.20.2" +dunce = "1.0.5" enumset = "1.1.10" faster-hex = "0.10.0" flate2 = "1.1.5" @@ -197,7 +198,7 @@ hex-simd = "0.8.0" highway = { version = "1.3.0" } ipnetwork = { version = "0.21.1", features = ["serde"] } lazy_static = "1.5.0" -libc = "0.2.179" +libc = "0.2.180" libsystemd = "0.7.2" local-ip-address = "0.6.8" lz4 = "1.28.1" @@ -270,7 +271,7 @@ libunftp = "0.21.0" russh = { version = "0.56.0", features = ["aws-lc-rs", "rsa"], default-features = false } russh-sftp = "2.1.1" ssh-key = { version = "0.7.0-rc.4", features = ["std", "rsa", "ed25519"] } -suppaftp = { version = "7.0.7", features = ["tokio", "tokio-rustls", "rustls"] } +suppaftp = { version = "7.1.0", features = ["tokio", "tokio-rustls", "rustls"] } rcgen = "0.14.6" # Performance Analysis and Memory Profiling diff --git a/crates/ecstore/Cargo.toml b/crates/ecstore/Cargo.toml index d4fe7d4c..5d6e2ff1 100644 --- a/crates/ecstore/Cargo.toml +++ b/crates/ecstore/Cargo.toml @@ -48,6 +48,7 @@ async-trait.workspace = true bytes.workspace = true byteorder = { workspace = true } chrono.workspace = true +dunce.workspace = true glob = { workspace = true } thiserror.workspace = true flatbuffers.workspace = true @@ -109,7 +110,6 @@ google-cloud-auth = { workspace = true } aws-config = { workspace = true } faster-hex = { workspace = true } - [dev-dependencies] tokio = { workspace = true, features = ["rt-multi-thread", "macros"] } criterion = { workspace = true, features = ["html_reports"] } diff --git a/crates/ecstore/src/bucket/utils.rs b/crates/ecstore/src/bucket/utils.rs index a012798b..8eb60ccb 100644 --- a/crates/ecstore/src/bucket/utils.rs +++ b/crates/ecstore/src/bucket/utils.rs @@ -14,7 +14,7 @@ use crate::disk::RUSTFS_META_BUCKET; use crate::error::{Error, Result, StorageError}; -use rustfs_utils::path::SLASH_SEPARATOR; +use rustfs_utils::path::SLASH_SEPARATOR_STR; use s3s::xml; pub fn is_meta_bucketname(name: &str) -> bool { @@ -194,7 +194,7 @@ pub fn is_valid_object_name(object: &str) -> bool { return false; } - if object.ends_with(SLASH_SEPARATOR) { + if object.ends_with(SLASH_SEPARATOR_STR) { return false; } @@ -206,7 +206,7 @@ pub fn check_object_name_for_length_and_slash(bucket: &str, object: &str) -> Res return Err(StorageError::ObjectNameTooLong(bucket.to_owned(), object.to_owned())); } - if object.starts_with(SLASH_SEPARATOR) { + if object.starts_with(SLASH_SEPARATOR_STR) { return Err(StorageError::ObjectNamePrefixAsSlash(bucket.to_owned(), object.to_owned())); } diff --git a/crates/ecstore/src/config/com.rs b/crates/ecstore/src/config/com.rs index b1010bf0..3ad8256a 100644 --- a/crates/ecstore/src/config/com.rs +++ b/crates/ecstore/src/config/com.rs @@ -18,7 +18,7 @@ use crate::error::{Error, Result}; use crate::store_api::{ObjectInfo, ObjectOptions, PutObjReader, StorageAPI}; use http::HeaderMap; use rustfs_config::DEFAULT_DELIMITER; -use rustfs_utils::path::SLASH_SEPARATOR; +use rustfs_utils::path::SLASH_SEPARATOR_STR; use std::collections::HashSet; use std::sync::Arc; use std::sync::LazyLock; @@ -29,7 +29,7 @@ const CONFIG_FILE: &str = "config.json"; pub const STORAGE_CLASS_SUB_SYS: &str = "storage_class"; -static CONFIG_BUCKET: LazyLock = LazyLock::new(|| format!("{RUSTFS_META_BUCKET}{SLASH_SEPARATOR}{CONFIG_PREFIX}")); +static CONFIG_BUCKET: LazyLock = LazyLock::new(|| format!("{RUSTFS_META_BUCKET}{SLASH_SEPARATOR_STR}{CONFIG_PREFIX}")); static SUB_SYSTEMS_DYNAMIC: LazyLock> = LazyLock::new(|| { let mut h = HashSet::new(); @@ -129,7 +129,7 @@ async fn new_and_save_server_config(api: Arc) -> Result String { - format!("{CONFIG_PREFIX}{SLASH_SEPARATOR}{CONFIG_FILE}") + format!("{CONFIG_PREFIX}{SLASH_SEPARATOR_STR}{CONFIG_FILE}") } /// Handle the situation where the configuration file does not exist, create and save a new configuration diff --git a/crates/ecstore/src/data_usage.rs b/crates/ecstore/src/data_usage.rs index cc908e6a..df3ffede 100644 --- a/crates/ecstore/src/data_usage.rs +++ b/crates/ecstore/src/data_usage.rs @@ -31,14 +31,14 @@ use crate::{ use rustfs_common::data_usage::{ BucketTargetUsageInfo, BucketUsageInfo, DataUsageCache, DataUsageEntry, DataUsageInfo, DiskUsageStatus, SizeSummary, }; -use rustfs_utils::path::SLASH_SEPARATOR; +use rustfs_utils::path::SLASH_SEPARATOR_STR; use tokio::fs; use tracing::{error, info, warn}; use crate::error::Error; // Data usage storage constants -pub const DATA_USAGE_ROOT: &str = SLASH_SEPARATOR; +pub const DATA_USAGE_ROOT: &str = SLASH_SEPARATOR_STR; const DATA_USAGE_OBJ_NAME: &str = ".usage.json"; const DATA_USAGE_BLOOM_NAME: &str = ".bloomcycle.bin"; pub const DATA_USAGE_CACHE_NAME: &str = ".usage-cache.bin"; @@ -47,17 +47,17 @@ pub const DATA_USAGE_CACHE_NAME: &str = ".usage-cache.bin"; lazy_static::lazy_static! { pub static ref DATA_USAGE_BUCKET: String = format!("{}{}{}", crate::disk::RUSTFS_META_BUCKET, - SLASH_SEPARATOR, + SLASH_SEPARATOR_STR, crate::disk::BUCKET_META_PREFIX ); pub static ref DATA_USAGE_OBJ_NAME_PATH: String = format!("{}{}{}", crate::disk::BUCKET_META_PREFIX, - SLASH_SEPARATOR, + SLASH_SEPARATOR_STR, DATA_USAGE_OBJ_NAME ); pub static ref DATA_USAGE_BLOOM_NAME_PATH: String = format!("{}{}{}", crate::disk::BUCKET_META_PREFIX, - SLASH_SEPARATOR, + SLASH_SEPARATOR_STR, DATA_USAGE_BLOOM_NAME ); } diff --git a/crates/ecstore/src/disk/local.rs b/crates/ecstore/src/disk/local.rs index 09991fc5..7632a34d 100644 --- a/crates/ecstore/src/disk/local.rs +++ b/crates/ecstore/src/disk/local.rs @@ -12,39 +12,26 @@ // See the License for the specific language governing permissions and // limitations under the License. -use super::error::{Error, Result}; -use super::os::{is_root_disk, rename_all}; -use super::{ - BUCKET_META_PREFIX, CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskLocation, DiskMetrics, - FileInfoVersions, RUSTFS_META_BUCKET, ReadMultipleReq, ReadMultipleResp, ReadOptions, RenameDataResp, - STORAGE_FORMAT_FILE_BACKUP, UpdateMetadataOpts, VolumeInfo, WalkDirOptions, os, -}; -use super::{endpoint::Endpoint, error::DiskError, format::FormatV3}; - use crate::config::storageclass::DEFAULT_INLINE_BLOCK; use crate::data_usage::local_snapshot::ensure_data_usage_layout; -use crate::disk::error::FileAccessDeniedWithContext; -use crate::disk::error_conv::{to_access_error, to_file_error, to_unformatted_disk_error, to_volume_error}; -use crate::disk::fs::{ - O_APPEND, O_CREATE, O_RDONLY, O_TRUNC, O_WRONLY, access, lstat, lstat_std, remove, remove_all_std, remove_std, rename, -}; -use crate::disk::os::{check_path_length, is_empty_dir}; use crate::disk::{ - CHECK_PART_FILE_CORRUPT, CHECK_PART_FILE_NOT_FOUND, CHECK_PART_SUCCESS, CHECK_PART_UNKNOWN, CHECK_PART_VOLUME_NOT_FOUND, - FileReader, RUSTFS_META_TMP_DELETED_BUCKET, conv_part_err_to_int, + BUCKET_META_PREFIX, CHECK_PART_FILE_CORRUPT, CHECK_PART_FILE_NOT_FOUND, CHECK_PART_SUCCESS, CHECK_PART_UNKNOWN, + CHECK_PART_VOLUME_NOT_FOUND, CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskLocation, DiskMetrics, + FileInfoVersions, FileReader, FileWriter, RUSTFS_META_BUCKET, RUSTFS_META_TMP_DELETED_BUCKET, ReadMultipleReq, + ReadMultipleResp, ReadOptions, RenameDataResp, STORAGE_FORMAT_FILE, STORAGE_FORMAT_FILE_BACKUP, UpdateMetadataOpts, + VolumeInfo, WalkDirOptions, conv_part_err_to_int, + endpoint::Endpoint, + error::{DiskError, Error, FileAccessDeniedWithContext, Result}, + error_conv::{to_access_error, to_file_error, to_unformatted_disk_error, to_volume_error}, + format::FormatV3, + fs::{O_APPEND, O_CREATE, O_RDONLY, O_TRUNC, O_WRONLY, access, lstat, lstat_std, remove, remove_all_std, remove_std, rename}, + os, + os::{check_path_length, is_empty_dir, is_root_disk, rename_all}, }; -use crate::disk::{FileWriter, STORAGE_FORMAT_FILE}; -use crate::global::{GLOBAL_IsErasureSD, GLOBAL_RootDiskThreshold}; -use rustfs_utils::path::{ - GLOBAL_DIR_SUFFIX, GLOBAL_DIR_SUFFIX_WITH_SLASH, SLASH_SEPARATOR, clean, decode_dir_object, encode_dir_object, has_suffix, - path_join, path_join_buf, -}; -use tokio::time::interval; - use crate::erasure_coding::bitrot_verify; -use bytes::Bytes; -// use path_absolutize::Absolutize; // Replaced with direct path operations for better performance use crate::file_cache::{get_global_file_cache, prefetch_metadata_patterns, read_metadata_cached}; +use crate::global::{GLOBAL_IsErasureSD, GLOBAL_RootDiskThreshold}; +use bytes::Bytes; use parking_lot::RwLock as ParkingLotRwLock; use rustfs_filemeta::{ Cache, FileInfo, FileInfoOpts, FileMeta, MetaCacheEntry, MetacacheWriter, ObjectPartInfo, Opts, RawFileInfo, UpdateFn, @@ -52,6 +39,10 @@ use rustfs_filemeta::{ }; use rustfs_utils::HashAlgorithm; use rustfs_utils::os::get_info; +use rustfs_utils::path::{ + GLOBAL_DIR_SUFFIX, GLOBAL_DIR_SUFFIX_WITH_SLASH, SLASH_SEPARATOR_STR, clean, decode_dir_object, encode_dir_object, + has_suffix, path_join, path_join_buf, +}; use std::collections::HashMap; use std::collections::HashSet; use std::fmt::Debug; @@ -67,6 +58,7 @@ use time::OffsetDateTime; use tokio::fs::{self, File}; use tokio::io::{AsyncReadExt, AsyncSeekExt, AsyncWrite, AsyncWriteExt, ErrorKind}; use tokio::sync::RwLock; +use tokio::time::interval; use tracing::{debug, error, info, warn}; use uuid::Uuid; @@ -129,7 +121,8 @@ impl LocalDisk { pub async fn new(ep: &Endpoint, cleanup: bool) -> Result { debug!("Creating local disk"); // Use optimized path resolution instead of absolutize() for better performance - let root = match std::fs::canonicalize(ep.get_file_path()) { + // Use dunce::canonicalize instead of std::fs::canonicalize to avoid UNC paths on Windows + let root = match dunce::canonicalize(ep.get_file_path()) { Ok(path) => path, Err(e) => { if e.kind() == ErrorKind::NotFound { @@ -483,7 +476,7 @@ impl LocalDisk { // Async prefetch related files, don't block current read if let Some(parent) = file_path.parent() { - prefetch_metadata_patterns(parent, &[super::STORAGE_FORMAT_FILE, "part.1", "part.2", "part.meta"]).await; + prefetch_metadata_patterns(parent, &[STORAGE_FORMAT_FILE, "part.1", "part.2", "part.meta"]).await; } // Main read logic @@ -507,7 +500,7 @@ impl LocalDisk { async fn read_metadata_batch(&self, requests: Vec<(String, String)>) -> Result>>> { let paths: Vec = requests .iter() - .map(|(bucket, key)| self.get_object_path(bucket, &format!("{}/{}", key, super::STORAGE_FORMAT_FILE))) + .map(|(bucket, key)| self.get_object_path(bucket, &format!("{}/{}", key, STORAGE_FORMAT_FILE))) .collect::>>()?; let cache = get_global_file_cache(); @@ -544,7 +537,7 @@ impl LocalDisk { // TODO: async notifications for disk space checks and trash cleanup - let trash_path = self.get_object_path(super::RUSTFS_META_TMP_DELETED_BUCKET, Uuid::new_v4().to_string().as_str())?; + let trash_path = self.get_object_path(RUSTFS_META_TMP_DELETED_BUCKET, Uuid::new_v4().to_string().as_str())?; // if let Some(parent) = trash_path.parent() { // if !parent.exists() { // fs::create_dir_all(parent).await?; @@ -552,7 +545,7 @@ impl LocalDisk { // } let err = if recursive { - rename_all(delete_path, trash_path, self.get_bucket_path(super::RUSTFS_META_TMP_DELETED_BUCKET)?) + rename_all(delete_path, trash_path, self.get_bucket_path(RUSTFS_META_TMP_DELETED_BUCKET)?) .await .err() } else { @@ -562,12 +555,12 @@ impl LocalDisk { .err() }; - if immediate_purge || delete_path.to_string_lossy().ends_with(SLASH_SEPARATOR) { - let trash_path2 = self.get_object_path(super::RUSTFS_META_TMP_DELETED_BUCKET, Uuid::new_v4().to_string().as_str())?; + if immediate_purge || delete_path.to_string_lossy().ends_with(SLASH_SEPARATOR_STR) { + let trash_path2 = self.get_object_path(RUSTFS_META_TMP_DELETED_BUCKET, Uuid::new_v4().to_string().as_str())?; let _ = rename_all( encode_dir_object(delete_path.to_string_lossy().as_ref()), trash_path2, - self.get_bucket_path(super::RUSTFS_META_TMP_DELETED_BUCKET)?, + self.get_bucket_path(RUSTFS_META_TMP_DELETED_BUCKET)?, ) .await; } @@ -916,7 +909,7 @@ impl LocalDisk { } if let Some(parent) = path.as_ref().parent() { - super::os::make_dir_all(parent, skip_parent).await?; + os::make_dir_all(parent, skip_parent).await?; } let f = super::fs::open_file(path.as_ref(), mode).await.map_err(to_file_error)?; @@ -942,7 +935,7 @@ impl LocalDisk { let meta = file.metadata().await.map_err(to_file_error)?; let file_size = meta.len() as usize; - bitrot_verify(Box::new(file), file_size, part_size, algo, bytes::Bytes::copy_from_slice(sum), shard_size) + bitrot_verify(Box::new(file), file_size, part_size, algo, Bytes::copy_from_slice(sum), shard_size) .await .map_err(to_file_error)?; @@ -1038,15 +1031,16 @@ impl LocalDisk { continue; } - if entry.ends_with(SLASH_SEPARATOR) { + if entry.ends_with(SLASH_SEPARATOR_STR) { if entry.ends_with(GLOBAL_DIR_SUFFIX_WITH_SLASH) { - let entry = format!("{}{}", entry.as_str().trim_end_matches(GLOBAL_DIR_SUFFIX_WITH_SLASH), SLASH_SEPARATOR); + let entry = + format!("{}{}", entry.as_str().trim_end_matches(GLOBAL_DIR_SUFFIX_WITH_SLASH), SLASH_SEPARATOR_STR); dir_objes.insert(entry.clone()); *item = entry; continue; } - *item = entry.trim_end_matches(SLASH_SEPARATOR).to_owned(); + *item = entry.trim_end_matches(SLASH_SEPARATOR_STR).to_owned(); continue; } @@ -1058,7 +1052,7 @@ impl LocalDisk { .await?; let entry = entry.strip_suffix(STORAGE_FORMAT_FILE).unwrap_or_default().to_owned(); - let name = entry.trim_end_matches(SLASH_SEPARATOR); + let name = entry.trim_end_matches(SLASH_SEPARATOR_STR); let name = decode_dir_object(format!("{}/{}", ¤t, &name).as_str()); // if opts.limit > 0 @@ -1141,7 +1135,7 @@ impl LocalDisk { Ok(res) => { if is_dir_obj { meta.name = meta.name.trim_end_matches(GLOBAL_DIR_SUFFIX_WITH_SLASH).to_owned(); - meta.name.push_str(SLASH_SEPARATOR); + meta.name.push_str(SLASH_SEPARATOR_STR); } meta.metadata = res; @@ -1159,7 +1153,7 @@ impl LocalDisk { // NOT an object, append to stack (with slash) // If dirObject, but no metadata (which is unexpected) we skip it. if !is_dir_obj && !is_empty_dir(self.get_object_path(&opts.bucket, &meta.name)?).await { - meta.name.push_str(SLASH_SEPARATOR); + meta.name.push_str(SLASH_SEPARATOR_STR); dir_stack.push(meta.name); } } @@ -1234,7 +1228,7 @@ async fn read_file_metadata(p: impl AsRef) -> Result { fn skip_access_checks(p: impl AsRef) -> bool { let vols = [ - super::RUSTFS_META_TMP_DELETED_BUCKET, + RUSTFS_META_TMP_DELETED_BUCKET, super::RUSTFS_META_TMP_BUCKET, super::RUSTFS_META_MULTIPART_BUCKET, RUSTFS_META_BUCKET, @@ -1628,8 +1622,8 @@ impl DiskAPI for LocalDisk { super::fs::access_std(&dst_volume_dir).map_err(|e| to_access_error(e, DiskError::VolumeAccessDenied))? } - let src_is_dir = has_suffix(src_path, SLASH_SEPARATOR); - let dst_is_dir = has_suffix(dst_path, SLASH_SEPARATOR); + let src_is_dir = has_suffix(src_path, SLASH_SEPARATOR_STR); + let dst_is_dir = has_suffix(dst_path, SLASH_SEPARATOR_STR); if !src_is_dir && dst_is_dir || src_is_dir && !dst_is_dir { warn!( @@ -1695,8 +1689,8 @@ impl DiskAPI for LocalDisk { .map_err(|e| to_access_error(e, DiskError::VolumeAccessDenied))?; } - let src_is_dir = has_suffix(src_path, SLASH_SEPARATOR); - let dst_is_dir = has_suffix(dst_path, SLASH_SEPARATOR); + let src_is_dir = has_suffix(src_path, SLASH_SEPARATOR_STR); + let dst_is_dir = has_suffix(dst_path, SLASH_SEPARATOR_STR); if (dst_is_dir || src_is_dir) && (!dst_is_dir || !src_is_dir) { return Err(Error::from(DiskError::FileAccessDenied)); } @@ -1847,12 +1841,12 @@ impl DiskAPI for LocalDisk { } let volume_dir = self.get_bucket_path(volume)?; - let dir_path_abs = self.get_object_path(volume, dir_path.trim_start_matches(SLASH_SEPARATOR))?; + let dir_path_abs = self.get_object_path(volume, dir_path.trim_start_matches(SLASH_SEPARATOR_STR))?; let entries = match os::read_dir(&dir_path_abs, count).await { Ok(res) => res, Err(e) => { - if e.kind() == std::io::ErrorKind::NotFound + if e.kind() == ErrorKind::NotFound && !skip_access_checks(volume) && let Err(e) = access(&volume_dir).await { @@ -1883,11 +1877,11 @@ impl DiskAPI for LocalDisk { let mut objs_returned = 0; - if opts.base_dir.ends_with(SLASH_SEPARATOR) { + if opts.base_dir.ends_with(SLASH_SEPARATOR_STR) { let fpath = self.get_object_path( &opts.bucket, path_join_buf(&[ - format!("{}{}", opts.base_dir.trim_end_matches(SLASH_SEPARATOR), GLOBAL_DIR_SUFFIX).as_str(), + format!("{}{}", opts.base_dir.trim_end_matches(SLASH_SEPARATOR_STR), GLOBAL_DIR_SUFFIX).as_str(), STORAGE_FORMAT_FILE, ]) .as_str(), @@ -2119,7 +2113,7 @@ impl DiskAPI for LocalDisk { let volume_dir = self.get_bucket_path(volume)?; if let Err(e) = access(&volume_dir).await { - if e.kind() == std::io::ErrorKind::NotFound { + if e.kind() == ErrorKind::NotFound { os::make_dir_all(&volume_dir, self.root.as_path()).await?; return Ok(()); } @@ -2137,7 +2131,7 @@ impl DiskAPI for LocalDisk { let entries = os::read_dir(&self.root, -1).await.map_err(to_volume_error)?; for entry in entries { - if !has_suffix(&entry, SLASH_SEPARATOR) || !Self::is_valid_volname(clean(&entry).as_str()) { + if !has_suffix(&entry, SLASH_SEPARATOR_STR) || !Self::is_valid_volname(clean(&entry).as_str()) { continue; } @@ -2359,7 +2353,7 @@ impl DiskAPI for LocalDisk { force_del_marker: bool, opts: DeleteOptions, ) -> Result<()> { - if path.starts_with(SLASH_SEPARATOR) { + if path.starts_with(SLASH_SEPARATOR_STR) { return self .delete( volume, @@ -2420,7 +2414,7 @@ impl DiskAPI for LocalDisk { if !meta.versions.is_empty() { let buf = meta.marshal_msg()?; return self - .write_all_meta(volume, format!("{path}{SLASH_SEPARATOR}{STORAGE_FORMAT_FILE}").as_str(), &buf, true) + .write_all_meta(volume, format!("{path}{SLASH_SEPARATOR_STR}{STORAGE_FORMAT_FILE}").as_str(), &buf, true) .await; } @@ -2430,11 +2424,11 @@ impl DiskAPI for LocalDisk { { let src_path = path_join(&[ file_path.as_path(), - Path::new(format!("{old_data_dir}{SLASH_SEPARATOR}{STORAGE_FORMAT_FILE_BACKUP}").as_str()), + Path::new(format!("{old_data_dir}{SLASH_SEPARATOR_STR}{STORAGE_FORMAT_FILE_BACKUP}").as_str()), ]); let dst_path = path_join(&[ file_path.as_path(), - Path::new(format!("{path}{SLASH_SEPARATOR}{STORAGE_FORMAT_FILE}").as_str()), + Path::new(format!("{path}{SLASH_SEPARATOR_STR}{STORAGE_FORMAT_FILE}").as_str()), ]); return rename_all(src_path, dst_path, file_path).await; } @@ -2563,7 +2557,7 @@ async fn get_disk_info(drive_path: PathBuf) -> Result<(rustfs_utils::os::DiskInf if root_disk_threshold > 0 { disk_info.total <= root_disk_threshold } else { - is_root_disk(&drive_path, SLASH_SEPARATOR).unwrap_or_default() + is_root_disk(&drive_path, SLASH_SEPARATOR_STR).unwrap_or_default() } } else { false @@ -2581,7 +2575,7 @@ mod test { // let arr = Vec::new(); let vols = [ - super::super::RUSTFS_META_TMP_DELETED_BUCKET, + RUSTFS_META_TMP_DELETED_BUCKET, super::super::RUSTFS_META_TMP_BUCKET, super::super::RUSTFS_META_MULTIPART_BUCKET, RUSTFS_META_BUCKET, @@ -2609,9 +2603,7 @@ mod test { let disk = LocalDisk::new(&ep, false).await.unwrap(); - let tmpp = disk - .resolve_abs_path(Path::new(super::super::RUSTFS_META_TMP_DELETED_BUCKET)) - .unwrap(); + let tmpp = disk.resolve_abs_path(Path::new(RUSTFS_META_TMP_DELETED_BUCKET)).unwrap(); println!("ppp :{:?}", &tmpp); @@ -2639,9 +2631,7 @@ mod test { let disk = LocalDisk::new(&ep, false).await.unwrap(); - let tmpp = disk - .resolve_abs_path(Path::new(super::super::RUSTFS_META_TMP_DELETED_BUCKET)) - .unwrap(); + let tmpp = disk.resolve_abs_path(Path::new(RUSTFS_META_TMP_DELETED_BUCKET)).unwrap(); println!("ppp :{:?}", &tmpp); diff --git a/crates/ecstore/src/disk/os.rs b/crates/ecstore/src/disk/os.rs index 7ec5d0ff..660deec5 100644 --- a/crates/ecstore/src/disk/os.rs +++ b/crates/ecstore/src/disk/os.rs @@ -19,7 +19,7 @@ use std::{ use super::error::Result; use crate::disk::error_conv::to_file_error; -use rustfs_utils::path::SLASH_SEPARATOR; +use rustfs_utils::path::SLASH_SEPARATOR_STR; use tokio::fs; use tracing::warn; @@ -118,7 +118,7 @@ pub async fn read_dir(path: impl AsRef, count: i32) -> std::io::Result (String, S let trimmed_path = path .strip_prefix(base_path) .unwrap_or(path) - .strip_prefix(SLASH_SEPARATOR) + .strip_prefix(SLASH_SEPARATOR_STR) .unwrap_or(path); // Find the position of the first '/' - let pos = trimmed_path.find(SLASH_SEPARATOR).unwrap_or(trimmed_path.len()); + let pos = trimmed_path.find(SLASH_SEPARATOR_STR).unwrap_or(trimmed_path.len()); // Split into bucket and prefix let bucket = &trimmed_path[0..pos]; let prefix = &trimmed_path[pos + 1..]; // +1 to skip the '/' character if it exists diff --git a/crates/ecstore/src/set_disk.rs b/crates/ecstore/src/set_disk.rs index 92089d02..678d2dab 100644 --- a/crates/ecstore/src/set_disk.rs +++ b/crates/ecstore/src/set_disk.rs @@ -82,7 +82,7 @@ use rustfs_utils::http::headers::{AMZ_OBJECT_TAGGING, RESERVED_METADATA_PREFIX, use rustfs_utils::{ HashAlgorithm, crypto::hex, - path::{SLASH_SEPARATOR, encode_dir_object, has_suffix, path_join_buf}, + path::{SLASH_SEPARATOR_STR, encode_dir_object, has_suffix, path_join_buf}, }; use rustfs_workers::workers::Workers; use s3s::header::X_AMZ_RESTORE; @@ -5324,7 +5324,7 @@ impl StorageAPI for SetDisks { &upload_id_path, fi.data_dir.map(|v| v.to_string()).unwrap_or_default().as_str(), ]), - SLASH_SEPARATOR + SLASH_SEPARATOR_STR ); let mut part_numbers = match Self::list_parts(&online_disks, &part_path, read_quorum).await { @@ -5462,7 +5462,7 @@ impl StorageAPI for SetDisks { let mut populated_upload_ids = HashSet::new(); for upload_id in upload_ids.iter() { - let upload_id = upload_id.trim_end_matches(SLASH_SEPARATOR).to_string(); + let upload_id = upload_id.trim_end_matches(SLASH_SEPARATOR_STR).to_string(); if populated_upload_ids.contains(&upload_id) { continue; } @@ -6222,7 +6222,7 @@ impl StorageAPI for SetDisks { None }; - if has_suffix(object, SLASH_SEPARATOR) { + if has_suffix(object, SLASH_SEPARATOR_STR) { let (result, err) = self.heal_object_dir_locked(bucket, object, opts.dry_run, opts.remove).await?; return Ok((result, err.map(|e| e.into()))); } diff --git a/crates/ecstore/src/store_list_objects.rs b/crates/ecstore/src/store_list_objects.rs index c28a4c42..ff10fc41 100644 --- a/crates/ecstore/src/store_list_objects.rs +++ b/crates/ecstore/src/store_list_objects.rs @@ -34,7 +34,7 @@ use rustfs_filemeta::{ MetaCacheEntries, MetaCacheEntriesSorted, MetaCacheEntriesSortedResult, MetaCacheEntry, MetadataResolutionParams, merge_file_meta_versions, }; -use rustfs_utils::path::{self, SLASH_SEPARATOR, base_dir_from_prefix}; +use rustfs_utils::path::{self, SLASH_SEPARATOR_STR, base_dir_from_prefix}; use std::collections::HashMap; use std::sync::Arc; use tokio::sync::broadcast::{self}; @@ -132,7 +132,7 @@ impl ListPathOptions { return; } - let s = SLASH_SEPARATOR.chars().next().unwrap_or_default(); + let s = SLASH_SEPARATOR_STR.chars().next().unwrap_or_default(); self.filter_prefix = { let fp = self.prefix.trim_start_matches(&self.base_dir).trim_matches(s); @@ -346,7 +346,7 @@ impl ECStore { if let Some(delimiter) = &delimiter { if obj.is_dir && obj.mod_time.is_none() { let mut found = false; - if delimiter != SLASH_SEPARATOR { + if delimiter != SLASH_SEPARATOR_STR { for p in prefixes.iter() { if found { break; @@ -470,7 +470,7 @@ impl ECStore { if let Some(delimiter) = &delimiter { if obj.is_dir && obj.mod_time.is_none() { let mut found = false; - if delimiter != SLASH_SEPARATOR { + if delimiter != SLASH_SEPARATOR_STR { for p in prefixes.iter() { if found { break; @@ -502,7 +502,7 @@ impl ECStore { // warn!("list_path opt {:?}", &o); check_list_objs_args(&o.bucket, &o.prefix, &o.marker)?; - // if opts.prefix.ends_with(SLASH_SEPARATOR) { + // if opts.prefix.ends_with(SLASH_SEPARATOR_STR) { // return Err(Error::msg("eof")); // } @@ -520,11 +520,11 @@ impl ECStore { return Err(Error::Unexpected); } - if o.prefix.starts_with(SLASH_SEPARATOR) { + if o.prefix.starts_with(SLASH_SEPARATOR_STR) { return Err(Error::Unexpected); } - let slash_separator = Some(SLASH_SEPARATOR.to_owned()); + let slash_separator = Some(SLASH_SEPARATOR_STR.to_owned()); o.include_directories = o.separator == slash_separator; @@ -774,8 +774,8 @@ impl ECStore { let mut filter_prefix = { prefix .trim_start_matches(&path) - .trim_start_matches(SLASH_SEPARATOR) - .trim_end_matches(SLASH_SEPARATOR) + .trim_start_matches(SLASH_SEPARATOR_STR) + .trim_end_matches(SLASH_SEPARATOR_STR) .to_owned() }; @@ -1130,7 +1130,7 @@ async fn merge_entry_channels( if path::clean(&best_entry.name) == path::clean(&other_entry.name) { let dir_matches = best_entry.is_dir() && other_entry.is_dir(); let suffix_matches = - best_entry.name.ends_with(SLASH_SEPARATOR) == other_entry.name.ends_with(SLASH_SEPARATOR); + best_entry.name.ends_with(SLASH_SEPARATOR_STR) == other_entry.name.ends_with(SLASH_SEPARATOR_STR); if dir_matches && suffix_matches { to_merge.push(other_idx); diff --git a/crates/ecstore/src/tier/tier.rs b/crates/ecstore/src/tier/tier.rs index 1d11ad9e..37079e27 100644 --- a/crates/ecstore/src/tier/tier.rs +++ b/crates/ecstore/src/tier/tier.rs @@ -51,7 +51,7 @@ use crate::{ store_api::{ObjectOptions, PutObjReader}, }; use rustfs_rio::HashReader; -use rustfs_utils::path::{SLASH_SEPARATOR, path_join}; +use rustfs_utils::path::{SLASH_SEPARATOR_STR, path_join}; use s3s::S3ErrorCode; use super::{ @@ -403,7 +403,7 @@ impl TierConfigMgr { pub async fn save_tiering_config(&self, api: Arc) -> std::result::Result<(), std::io::Error> { let data = self.marshal()?; - let config_file = format!("{}{}{}", CONFIG_PREFIX, SLASH_SEPARATOR, TIER_CONFIG_FILE); + let config_file = format!("{}{}{}", CONFIG_PREFIX, SLASH_SEPARATOR_STR, TIER_CONFIG_FILE); self.save_config(api, &config_file, data).await } @@ -483,7 +483,7 @@ async fn new_and_save_tiering_config(api: Arc) -> Result) -> std::result::Result { - let config_file = format!("{}{}{}", CONFIG_PREFIX, SLASH_SEPARATOR, TIER_CONFIG_FILE); + let config_file = format!("{}{}{}", CONFIG_PREFIX, SLASH_SEPARATOR_STR, TIER_CONFIG_FILE); let data = read_config(api.clone(), config_file.as_str()).await; if let Err(err) = data { if is_err_config_not_found(&err) { diff --git a/crates/ecstore/src/tier/warm_backend_s3.rs b/crates/ecstore/src/tier/warm_backend_s3.rs index e1b500c5..85453311 100644 --- a/crates/ecstore/src/tier/warm_backend_s3.rs +++ b/crates/ecstore/src/tier/warm_backend_s3.rs @@ -30,13 +30,11 @@ use crate::client::{ transition_api::{Options, TransitionClient, TransitionCore}, transition_api::{ReadCloser, ReaderImpl}, }; -use crate::error::ErrorResponse; -use crate::error::error_resp_to_object_err; use crate::tier::{ tier_config::TierS3, warm_backend::{WarmBackend, WarmBackendGetOpts}, }; -use rustfs_utils::path::SLASH_SEPARATOR; +use rustfs_utils::path::SLASH_SEPARATOR_STR; pub struct WarmBackendS3 { pub client: Arc, @@ -178,7 +176,7 @@ impl WarmBackend for WarmBackendS3 { async fn in_use(&self) -> Result { let result = self .core - .list_objects_v2(&self.bucket, &self.prefix, "", "", SLASH_SEPARATOR, 1) + .list_objects_v2(&self.bucket, &self.prefix, "", "", SLASH_SEPARATOR_STR, 1) .await?; Ok(result.common_prefixes.len() > 0 || result.contents.len() > 0) diff --git a/crates/ecstore/src/tier/warm_backend_s3sdk.rs b/crates/ecstore/src/tier/warm_backend_s3sdk.rs index 15ae827b..8e7d29ce 100644 --- a/crates/ecstore/src/tier/warm_backend_s3sdk.rs +++ b/crates/ecstore/src/tier/warm_backend_s3sdk.rs @@ -27,19 +27,11 @@ use aws_sdk_s3::Client; use aws_sdk_s3::config::{Credentials, Region}; use aws_sdk_s3::primitives::ByteStream; -use crate::client::{ - api_get_options::GetObjectOptions, - api_put_object::PutObjectOptions, - api_remove::RemoveObjectOptions, - transition_api::{ReadCloser, ReaderImpl}, -}; -use crate::error::ErrorResponse; -use crate::error::error_resp_to_object_err; +use crate::client::transition_api::{ReadCloser, ReaderImpl}; use crate::tier::{ tier_config::TierS3, warm_backend::{WarmBackend, WarmBackendGetOpts}, }; -use rustfs_utils::path::SLASH_SEPARATOR; pub struct WarmBackendS3 { pub client: Arc, diff --git a/crates/iam/src/store/object.rs b/crates/iam/src/store/object.rs index 930a8743..bc50d5ea 100644 --- a/crates/iam/src/store/object.rs +++ b/crates/iam/src/store/object.rs @@ -32,7 +32,7 @@ use rustfs_ecstore::{ store_api::{ObjectInfo, ObjectOptions}, }; use rustfs_policy::{auth::UserIdentity, policy::PolicyDoc}; -use rustfs_utils::path::{SLASH_SEPARATOR, path_join_buf}; +use rustfs_utils::path::{SLASH_SEPARATOR_STR, path_join_buf}; use serde::{Serialize, de::DeserializeOwned}; use std::sync::LazyLock; use std::{collections::HashMap, sync::Arc}; @@ -182,7 +182,7 @@ impl ObjectStore { } else { info.name }; - let name = object_name.trim_start_matches(&prefix).trim_end_matches(SLASH_SEPARATOR); + let name = object_name.trim_start_matches(&prefix).trim_end_matches(SLASH_SEPARATOR_STR); let _ = sender .send(StringOrErr { item: Some(name.to_owned()), diff --git a/crates/utils/Cargo.toml b/crates/utils/Cargo.toml index 9ba7c712..80f9aa8a 100644 --- a/crates/utils/Cargo.toml +++ b/crates/utils/Cargo.toml @@ -83,7 +83,7 @@ ip = ["dep:local-ip-address"] # ip characteristics and their dependencies tls = ["dep:rustls", "dep:rustls-pemfile", "dep:rustls-pki-types"] # tls characteristics and their dependencies net = ["ip", "dep:url", "dep:netif", "dep:futures", "dep:transform-stream", "dep:bytes", "dep:s3s", "dep:hyper", "dep:thiserror", "dep:tokio"] # network features with DNS resolver io = ["dep:tokio"] -path = [] +path = [] # path manipulation features notify = ["dep:hyper", "dep:s3s", "dep:hashbrown", "dep:thiserror", "dep:serde", "dep:libc", "dep:url", "dep:regex"] # file system notification features compress = ["dep:flate2", "dep:brotli", "dep:snap", "dep:lz4", "dep:zstd"] string = ["dep:regex"] diff --git a/crates/utils/src/path.rs b/crates/utils/src/path.rs index 0eb7c9f7..0bf75064 100644 --- a/crates/utils/src/path.rs +++ b/crates/utils/src/path.rs @@ -15,12 +15,36 @@ use std::path::Path; use std::path::PathBuf; +#[cfg(target_os = "windows")] +const SLASH_SEPARATOR: char = '\\'; +#[cfg(not(target_os = "windows"))] +const SLASH_SEPARATOR: char = '/'; + +/// GLOBAL_DIR_SUFFIX is a special suffix used to denote directory objects +/// in object storage systems that do not have a native directory concept. pub const GLOBAL_DIR_SUFFIX: &str = "__XLDIR__"; -pub const SLASH_SEPARATOR: &str = "/"; +/// SLASH_SEPARATOR_STR is the string representation of the path separator +/// used in the current operating system. +pub const SLASH_SEPARATOR_STR: &str = if cfg!(target_os = "windows") { "\\" } else { "/" }; +/// GLOBAL_DIR_SUFFIX_WITH_SLASH is the directory suffix followed by the +/// platform-specific path separator, used to denote directory objects. +#[cfg(target_os = "windows")] +pub const GLOBAL_DIR_SUFFIX_WITH_SLASH: &str = "__XLDIR__\\"; +#[cfg(not(target_os = "windows"))] pub const GLOBAL_DIR_SUFFIX_WITH_SLASH: &str = "__XLDIR__/"; +/// has_suffix checks if the string `s` ends with the specified `suffix`, +/// performing a case-insensitive comparison on Windows platforms. +/// +/// # Arguments +/// * `s` - A string slice that holds the string to be checked. +/// * `suffix` - A string slice that holds the suffix to check for. +/// +/// # Returns +/// A boolean indicating whether `s` ends with `suffix`. +/// pub fn has_suffix(s: &str, suffix: &str) -> bool { if cfg!(target_os = "windows") { s.to_lowercase().ends_with(&suffix.to_lowercase()) @@ -29,19 +53,46 @@ pub fn has_suffix(s: &str, suffix: &str) -> bool { } } +/// encode_dir_object encodes a directory object by appending +/// a special suffix if it ends with a slash. +/// +/// # Arguments +/// * `object` - A string slice that holds the object to be encoded. +/// +/// # Returns +/// A `String` representing the encoded directory object. +/// pub fn encode_dir_object(object: &str) -> String { - if has_suffix(object, SLASH_SEPARATOR) { - format!("{}{}", object.trim_end_matches(SLASH_SEPARATOR), GLOBAL_DIR_SUFFIX) + if has_suffix(object, SLASH_SEPARATOR_STR) { + format!("{}{}", object.trim_end_matches(SLASH_SEPARATOR_STR), GLOBAL_DIR_SUFFIX) } else { object.to_string() } } +/// is_dir_object checks if the given object string represents +/// a directory object by verifying if it ends with the special suffix. +/// +/// # Arguments +/// * `object` - A string slice that holds the object to be checked. +/// +/// # Returns +/// A boolean indicating whether the object is a directory object. +/// pub fn is_dir_object(object: &str) -> bool { let obj = encode_dir_object(object); obj.ends_with(GLOBAL_DIR_SUFFIX) } +/// decode_dir_object decodes a directory object by removing +/// the special suffix if it is present. +/// +/// # Arguments +/// * `object` - A string slice that holds the object to be decoded. +/// +/// # Returns +/// A `String` representing the decoded directory object. +/// #[allow(dead_code)] pub fn decode_dir_object(object: &str) -> String { if has_suffix(object, GLOBAL_DIR_SUFFIX) { @@ -51,21 +102,50 @@ pub fn decode_dir_object(object: &str) -> String { } } +/// retain_slash ensures that the given string `s` ends with a slash. +/// If it does not, a slash is appended. +/// +/// # Arguments +/// * `s` - A string slice that holds the string to be processed. +/// +/// # Returns +/// A `String` that ends with a slash. +/// pub fn retain_slash(s: &str) -> String { if s.is_empty() { return s.to_string(); } - if s.ends_with(SLASH_SEPARATOR) { + if s.ends_with(SLASH_SEPARATOR_STR) { s.to_string() } else { - format!("{s}{SLASH_SEPARATOR}") + format!("{s}{SLASH_SEPARATOR_STR}") } } +/// strings_has_prefix_fold checks if the string `s` starts with the specified `prefix`, +/// performing a case-insensitive comparison on Windows platforms. +/// +/// # Arguments +/// * `s` - A string slice that holds the string to be checked. +/// * `prefix` - A string slice that holds the prefix to check for. +/// +/// # Returns +/// A boolean indicating whether `s` starts with `prefix`. +/// pub fn strings_has_prefix_fold(s: &str, prefix: &str) -> bool { s.len() >= prefix.len() && (s[..prefix.len()] == *prefix || s[..prefix.len()].eq_ignore_ascii_case(prefix)) } +/// has_prefix checks if the string `s` starts with the specified `prefix`, +/// performing a case-insensitive comparison on Windows platforms. +/// +/// # Arguments +/// * `s` - A string slice that holds the string to be checked. +/// * `prefix` - A string slice that holds the prefix to check for. +/// +/// # Returns +/// A boolean indicating whether `s` starts with `prefix`. +/// pub fn has_prefix(s: &str, prefix: &str) -> bool { if cfg!(target_os = "windows") { return strings_has_prefix_fold(s, prefix); @@ -74,21 +154,37 @@ pub fn has_prefix(s: &str, prefix: &str) -> bool { s.starts_with(prefix) } +/// path_join joins multiple path elements into a single PathBuf, +/// ensuring that the resulting path is clean and properly formatted. +/// +/// # Arguments +/// * `elem` - A slice of path elements to be joined. +/// +/// # Returns +/// A PathBuf representing the joined path. +/// pub fn path_join>(elem: &[P]) -> PathBuf { - path_join_buf( - elem.iter() - .map(|p| p.as_ref().to_string_lossy().into_owned()) - .collect::>() - .iter() - .map(|s| s.as_str()) - .collect::>() - .as_slice(), - ) - .into() + if elem.is_empty() { + return PathBuf::from("."); + } + // Collect components as owned Strings (lossy for non-UTF8) + let strs: Vec = elem.iter().map(|p| p.as_ref().to_string_lossy().into_owned()).collect(); + // Convert to slice of &str for path_join_buf + let refs: Vec<&str> = strs.iter().map(|s| s.as_str()).collect(); + PathBuf::from(path_join_buf(&refs)) } +/// path_join_buf joins multiple string path elements into a single String, +/// ensuring that the resulting path is clean and properly formatted. +/// +/// # Arguments +/// * `elements` - A slice of string path elements to be joined. +/// +/// # Returns +/// A String representing the joined path. +/// pub fn path_join_buf(elements: &[&str]) -> String { - let trailing_slash = !elements.is_empty() && elements.last().is_some_and(|last| last.ends_with(SLASH_SEPARATOR)); + let trailing_slash = !elements.is_empty() && elements.last().is_some_and(|last| last.ends_with(SLASH_SEPARATOR_STR)); let mut dst = String::new(); let mut added = 0; @@ -96,7 +192,7 @@ pub fn path_join_buf(elements: &[&str]) -> String { for e in elements { if added > 0 || !e.is_empty() { if added > 0 { - dst.push_str(SLASH_SEPARATOR); + dst.push(SLASH_SEPARATOR); } dst.push_str(e); added += e.len(); @@ -106,113 +202,535 @@ pub fn path_join_buf(elements: &[&str]) -> String { if path_needs_clean(dst.as_bytes()) { let mut clean_path = clean(&dst); if trailing_slash { - clean_path.push_str(SLASH_SEPARATOR); + clean_path.push(SLASH_SEPARATOR); } return clean_path; } if trailing_slash { - dst.push_str(SLASH_SEPARATOR); + dst.push(SLASH_SEPARATOR); } dst } +/// Platform-aware separator check +fn is_sep(b: u8) -> bool { + #[cfg(target_os = "windows")] + { + b == b'/' || b == b'\\' + } + #[cfg(not(target_os = "windows"))] + { + b == b'/' + } +} + /// path_needs_clean returns whether path cleaning may change the path. /// Will detect all cases that will be cleaned, /// but may produce false positives on non-trivial paths. +/// +/// # Arguments +/// * `path` - A byte slice that holds the path to be checked. +/// +/// # Returns +/// A boolean indicating whether the path needs cleaning. +/// fn path_needs_clean(path: &[u8]) -> bool { if path.is_empty() { return true; } - let rooted = path[0] == b'/'; let n = path.len(); - let (mut r, mut w) = if rooted { (1, 1) } else { (0, 0) }; + // On Windows: any forward slash indicates normalization to backslash is required. + #[cfg(target_os = "windows")] + { + if path.iter().any(|&b| b == b'/') { + return true; + } + } - while r < n { - match path[r] { - b if b > 127 => { - // Non ascii. + // Initialize scan index and previous-separator flag. + let mut i = 0usize; + let mut prev_was_sep = false; + + // Platform-aware prefix handling to avoid flagging meaningful leading sequences: + // - Windows: handle drive letter "C:" and UNC leading "\\" + // - Non-Windows: detect and flag double leading '/' (e.g. "//abc") as needing clean + if n >= 1 && is_sep(path[0]) { + #[cfg(target_os = "windows")] + { + // If starts with two separators -> UNC prefix: allow exactly two without flag + if n >= 2 && is_sep(path[1]) { + // If a third leading separator exists, that's redundant (e.g. "///...") -> needs clean + if n >= 3 && is_sep(path[2]) { + return true; + } + // Skip the two UNC leading separators for scanning; do not mark prev_was_sep true + i = 2; + prev_was_sep = false; + } else { + // Single leading separator (rooted) -> mark as seen separator so immediate next sep is duplicate + i = 1; + prev_was_sep = true; + } + } + + #[cfg(not(target_os = "windows"))] + { + // POSIX: double leading '/' is redundant and should be cleaned (e.g. "//abc" -> "/abc") + if n >= 2 && is_sep(path[1]) { return true; } - b'/' => { - // multiple / elements - return true; - } - b'.' => { - if r + 1 == n || path[r + 1] == b'/' { - // . element - assume it has to be cleaned. - return true; - } - if r + 1 < n && path[r + 1] == b'.' && (r + 2 == n || path[r + 2] == b'/') { - // .. element: remove to last / - assume it has to be cleaned. - return true; - } - // Handle single dot case - if r + 1 == n { - // . element - assume it has to be cleaned. - return true; - } - // Copy the dot - w += 1; - r += 1; - } - _ => { - // real path element. - // add slash if needed - if (rooted && w != 1) || (!rooted && w != 0) { - w += 1; - } - // copy element - while r < n && path[r] != b'/' { - w += 1; - r += 1; - } - // allow one slash, not at end - if r < n - 1 && path[r] == b'/' { - r += 1; + i = 1; + prev_was_sep = true; + } + } else { + // If not starting with separator, check for Windows drive-letter prefix like "C:" + #[cfg(target_os = "windows")] + { + if n >= 2 && path[1] == b':' && (path[0] as char).is_ascii_alphabetic() { + // Position after "C:" + i = 2; + // If a separator immediately follows the drive (rooted like "C:\"), + // treat that first separator as seen; if more separators follow, it's redundant. + if i < n && is_sep(path[i]) { + i += 1; // consume the single allowed separator after drive + if i < n && is_sep(path[i]) { + // multiple separators after drive like "C:\\..." -> needs clean + return true; + } + prev_was_sep = true; + } else { + prev_was_sep = false; } } } } - // Turn empty string into "." - if w == 0 { - return true; + // Generic scan for repeated separators and dot / dot-dot components. + while i < n { + let b = path[i]; + if is_sep(b) { + if prev_was_sep { + // Multiple separators (except allowed UNC prefix handled above) + return true; + } + prev_was_sep = true; + i += 1; + continue; + } + + // Not a separator: parse current path element + let start = i; + while i < n && !is_sep(path[i]) { + i += 1; + } + let len = i - start; + if len == 1 && path[start] == b'.' { + // single "." element -> needs cleaning + return true; + } + if len == 2 && path[start] == b'.' && path[start + 1] == b'.' { + // ".." element -> needs cleaning + return true; + } + prev_was_sep = false; } + // Trailing separator: if last byte is a separator and path length > 1, then usually needs cleaning, + // except when the path is a platform-specific root form (e.g. "/" on POSIX, "\\" or "C:\" on Windows). + if n > 1 && is_sep(path[n - 1]) { + #[cfg(not(target_os = "windows"))] + { + // POSIX: any trailing separator except the single-root "/" needs cleaning. + return true; + } + #[cfg(target_os = "windows")] + { + // Windows special root forms that are acceptable with trailing separator: + // - UNC root: exactly two leading separators "\" "\" (i.e. "\\") -> n == 2 + if n == 2 && is_sep(path[0]) && is_sep(path[1]) { + return false; + } + // - Drive root: pattern "C:\" or "C:/" (len == 3) + if n == 3 && path[1] == b':' && (path[0] as char).is_ascii_alphabetic() && is_sep(path[2]) { + return false; + } + // Otherwise, trailing separator should be cleaned. + return true; + } + } + + // No conditions triggered: assume path is already clean. false } -pub fn path_to_bucket_object_with_base_path(bash_path: &str, path: &str) -> (String, String) { - let path = path.trim_start_matches(bash_path).trim_start_matches(SLASH_SEPARATOR); +/// path_to_bucket_object_with_base_path splits a given path into bucket and object components, +/// considering a base path to trim from the start. +/// +/// # Arguments +/// * `base_path` - A string slice that holds the base path to be trimmed. +/// * `path` - A string slice that holds the path to be split. +/// +/// # Returns +/// A tuple containing the bucket and object as `String`s. +/// +pub fn path_to_bucket_object_with_base_path(base_path: &str, path: &str) -> (String, String) { + let path = path.trim_start_matches(base_path).trim_start_matches(SLASH_SEPARATOR); if let Some(m) = path.find(SLASH_SEPARATOR) { - return (path[..m].to_string(), path[m + SLASH_SEPARATOR.len()..].to_string()); + return (path[..m].to_string(), path[m + SLASH_SEPARATOR_STR.len()..].to_string()); } (path.to_string(), "".to_string()) } +/// path_to_bucket_object splits a given path into bucket and object components. +/// +/// # Arguments +/// * `s` - A string slice that holds the path to be split. +/// +/// # Returns +/// A tuple containing the bucket and object as `String`s. +/// pub fn path_to_bucket_object(s: &str) -> (String, String) { path_to_bucket_object_with_base_path("", s) } +/// contains_any_sep_str checks if the given string contains any path separators. +/// +/// # Arguments +/// * `s` - A string slice that holds the string to be checked. +/// +/// # Returns +/// A boolean indicating whether the string contains any path separators. +fn contains_any_sep_str(s: &str) -> bool { + #[cfg(target_os = "windows")] + { + s.contains('/') || s.contains('\\') + } + #[cfg(not(target_os = "windows"))] + { + s.contains('/') + } +} + +/// base_dir_from_prefix extracts the base directory from a given prefix. +/// +/// # Arguments +/// * `prefix` - A string slice that holds the prefix to be processed. +/// +/// # Returns +/// A `String` representing the base directory extracted from the prefix. +/// pub fn base_dir_from_prefix(prefix: &str) -> String { - let mut base_dir = dir(prefix).to_owned(); - if base_dir == "." || base_dir == "./" || base_dir == "/" { - base_dir = "".to_owned(); + if !contains_any_sep_str(prefix) { + return String::new(); } - if !prefix.contains('/') { - base_dir = "".to_owned(); + let mut base_dir = dir(prefix); + if base_dir == "." || base_dir == SLASH_SEPARATOR_STR { + base_dir.clear(); } - if !base_dir.is_empty() && !base_dir.ends_with(SLASH_SEPARATOR) { - base_dir.push_str(SLASH_SEPARATOR); + if !base_dir.is_empty() && !base_dir.ends_with(SLASH_SEPARATOR_STR) { + base_dir.push_str(SLASH_SEPARATOR_STR); } base_dir } +/// clean returns the shortest path name equivalent to path +/// by purely lexical processing. It applies the following rules +/// iteratively until no further processing can be done: +/// +/// 1. Replace multiple slashes with a single slash. +/// 2. Eliminate each . path name element (the current directory). +/// 3. Eliminate each inner .. path name element (the parent directory) +/// along with the non-.. element that precedes it. +/// 4. Eliminate .. elements that begin a rooted path, +/// that is, replace "/.." by "/" at the beginning of a path. +/// +/// If the result of this process is an empty string, clean returns the string ".". +/// +/// This function is adapted to work cross-platform by using the appropriate path separator. +/// On Windows, this function is aware of drive letters (e.g., `C:`) and UNC paths +/// (e.g., `\\server\share`) and cleans them using the appropriate separator. +/// +/// # Arguments +/// * `path` - A string slice that holds the path to be cleaned. +/// +/// # Returns +/// A `String` representing the cleaned path. +/// +pub fn clean(path: &str) -> String { + if path.is_empty() { + return ".".to_string(); + } + + #[cfg(target_os = "windows")] + { + use std::borrow::Cow; + let bytes = path.as_bytes(); + let n = bytes.len(); + // Windows-aware handling + let mut i = 0usize; + let mut drive: Option = None; + let mut rooted = false; + let mut preserve_leading_double_sep = false; + + // Drive letter detection + if n >= 2 && bytes[1] == b':' && (bytes[0] as char).is_ascii_alphabetic() { + drive = Some(bytes[0] as char); + i = 2; + // If next is separator, it's an absolute drive-root (e.g., "C:\") + if i < n && is_sep(bytes[i]) { + rooted = true; + // consume all leading separators after drive + while i < n && is_sep(bytes[i]) { + i += 1; + } + } + } else { + // UNC or absolute by separators + if n >= 2 && is_sep(bytes[0]) && is_sep(bytes[1]) { + rooted = true; + preserve_leading_double_sep = true; + i = 2; + // consume extra leading separators + while i < n && is_sep(bytes[i]) { + i += 1; + } + } else if is_sep(bytes[0]) { + rooted = true; + i = 1; + while i < n && is_sep(bytes[i]) { + i += 1; + } + } + } + + // Component stack + let mut comps: Vec> = Vec::new(); + let mut r = i; + while r < n { + // find next sep or end + let start = r; + while r < n && !is_sep(bytes[r]) { + r += 1; + } + // component bytes [start..r) + let comp = String::from_utf8_lossy(&bytes[start..r]); + if comp == "." { + // skip + } else if comp == ".." { + if !comps.is_empty() { + // pop last component + comps.pop(); + } else if !rooted { + // relative path with .. at front must be kept + comps.push(Cow::Owned("..".to_string())); + } else { + // rooted and at root => ignore + } + } else { + comps.push(comp); + } + // skip separators + while r < n && is_sep(bytes[r]) { + r += 1; + } + } + + // Build result + let mut out = String::new(); + if let Some(d) = drive { + out.push(d); + out.push(':'); + if rooted { + out.push(SLASH_SEPARATOR); + } + } else if preserve_leading_double_sep { + out.push(SLASH_SEPARATOR); + out.push(SLASH_SEPARATOR); + } else if rooted { + out.push(SLASH_SEPARATOR); + } + + // Join components + for (idx, c) in comps.iter().enumerate() { + if !out.is_empty() && !out.ends_with(SLASH_SEPARATOR_STR) { + out.push(SLASH_SEPARATOR); + } + out.push_str(c); + } + + // Special cases: + if out.is_empty() { + // No drive, no components -> "." + return ".".to_string(); + } + + // If output is just "C:" (drive without components and not rooted), keep as "C:" + if drive.is_some() { + if out.len() == 2 && out.as_bytes()[1] == b':' { + return out; + } + // If drive+colon+sep and no components, return "C:\" + if out.len() == 3 && out.as_bytes()[1] == b':' && is_sep(out.as_bytes()[2]) { + return out; + } + } + + // Remove trailing separator unless it's a root form (single leading sep or drive root or UNC) + if out.len() > 1 && out.ends_with(SLASH_SEPARATOR_STR) { + // Determine if it's a root form: "/" or "\\" or "C:\" + let is_root = { + // "/" (non-drive single sep) + if out == SLASH_SEPARATOR_STR { + true + } else if out.starts_with(SLASH_SEPARATOR_STR) && out == format!("{}{}", SLASH_SEPARATOR_STR, SLASH_SEPARATOR_STR) + { + // only double separator + true + } else { + // drive root "C:\" length >=3 with pattern X:\ + if out.len() == 3 && out.as_bytes()[1] == b':' && is_sep(out.as_bytes()[2]) { + true + } else { + false + } + } + }; + if !is_root { + out.pop(); + } + } + + out + } + + #[cfg(not(target_os = "windows"))] + { + // POSIX-like behavior (original implementation but simplified) + let rooted = path.starts_with('/'); + let n = path.len(); + let mut out = LazyBuf::new(path.to_string()); + let mut r = 0usize; + let mut dotdot = 0usize; + + if rooted { + out.append(b'/'); + r = 1; + dotdot = 1; + } + + while r < n { + match path.as_bytes()[r] { + b'/' => { + // Empty path element + r += 1; + } + b'.' if r + 1 == n || path.as_bytes()[r + 1] == b'/' => { + // . element + r += 1; + } + b'.' if path.as_bytes()[r + 1] == b'.' && (r + 2 == n || path.as_bytes()[r + 2] == b'/') => { + // .. element: remove to last / + r += 2; + + if out.w > dotdot { + // Can backtrack + out.w -= 1; + while out.w > dotdot && out.index(out.w) != b'/' { + out.w -= 1; + } + } else if !rooted { + // Cannot backtrack but not rooted, so append .. element. + if out.w > 0 { + out.append(b'/'); + } + out.append(b'.'); + out.append(b'.'); + dotdot = out.w; + } + } + _ => { + // Real path element. + // Add slash if needed + if (rooted && out.w != 1) || (!rooted && out.w != 0) { + out.append(b'/'); + } + + // Copy element + while r < n && path.as_bytes()[r] != b'/' { + out.append(path.as_bytes()[r]); + r += 1; + } + } + } + } + + // Turn empty string into "." + if out.w == 0 { + return ".".to_string(); + } + + out.string() + } +} + +/// split splits path immediately after the final slash, +/// separating it into a directory and file name component. +/// If there is no slash in path, split returns +/// ("", path). +/// +/// # Arguments +/// * `path` - A string slice that holds the path to be split. +/// +/// # Returns +/// A tuple containing the directory and file name as string slices. +/// +pub fn split(path: &str) -> (&str, &str) { + // Find the last occurrence of the '/' character + if let Some(i) = path.rfind(SLASH_SEPARATOR_STR) { + // Return the directory (up to and including the last '/') and the file name + return (&path[..i + 1], &path[i + 1..]); + } + // If no '/' is found, return an empty string for the directory and the whole path as the file name + (path, "") +} + +/// dir returns all but the last element of path, +/// typically the path's directory. After dropping the final +/// element, the path is cleaned. If the path is empty, +/// dir returns ".". +/// +/// # Arguments +/// * `path` - A string slice that holds the path to be processed. +/// +/// # Returns +/// A `String` representing the directory part of the path. +/// +pub fn dir(path: &str) -> String { + let (a, _) = split(path); + clean(a) +} + +/// trim_etag removes surrounding double quotes from an ETag string. +/// +/// # Arguments +/// * `etag` - A string slice that holds the ETag to be trimmed. +/// +/// # Returns +/// A `String` representing the trimmed ETag. +/// +pub fn trim_etag(etag: &str) -> String { + etag.trim_matches('"').to_string() +} + +/// LazyBuf is a structure that efficiently builds a byte buffer +/// from a string by delaying the allocation of the buffer until +/// a modification is necessary. It allows appending bytes and +/// retrieving the current string representation. pub struct LazyBuf { s: String, buf: Option>, @@ -220,6 +738,13 @@ pub struct LazyBuf { } impl LazyBuf { + /// Creates a new LazyBuf with the given string. + /// + /// # Arguments + /// * `s` - A string to initialize the LazyBuf. + /// + /// # Returns + /// A new instance of LazyBuf. pub fn new(s: String) -> Self { LazyBuf { s, buf: None, w: 0 } } @@ -258,100 +783,84 @@ impl LazyBuf { } } -pub fn clean(path: &str) -> String { - if path.is_empty() { - return ".".to_string(); - } - - let rooted = path.starts_with('/'); - let n = path.len(); - let mut out = LazyBuf::new(path.to_string()); - let mut r = 0; - let mut dotdot = 0; - - if rooted { - out.append(b'/'); - r = 1; - dotdot = 1; - } - - while r < n { - match path.as_bytes()[r] { - b'/' => { - // Empty path element - r += 1; - } - b'.' if r + 1 == n || path.as_bytes()[r + 1] == b'/' => { - // . element - r += 1; - } - b'.' if path.as_bytes()[r + 1] == b'.' && (r + 2 == n || path.as_bytes()[r + 2] == b'/') => { - // .. element: remove to last / - r += 2; - - if out.w > dotdot { - // Can backtrack - out.w -= 1; - while out.w > dotdot && out.index(out.w) != b'/' { - out.w -= 1; - } - } else if !rooted { - // Cannot backtrack but not rooted, so append .. element. - if out.w > 0 { - out.append(b'/'); - } - out.append(b'.'); - out.append(b'.'); - dotdot = out.w; - } - } - _ => { - // Real path element. - // Add slash if needed - if (rooted && out.w != 1) || (!rooted && out.w != 0) { - out.append(b'/'); - } - - // Copy element - while r < n && path.as_bytes()[r] != b'/' { - out.append(path.as_bytes()[r]); - r += 1; - } - } - } - } - - // Turn empty string into "." - if out.w == 0 { - return ".".to_string(); - } - - out.string() -} - -pub fn split(path: &str) -> (&str, &str) { - // Find the last occurrence of the '/' character - if let Some(i) = path.rfind('/') { - // Return the directory (up to and including the last '/') and the file name - return (&path[..i + 1], &path[i + 1..]); - } - // If no '/' is found, return an empty string for the directory and the whole path as the file name - (path, "") -} - -pub fn dir(path: &str) -> String { - let (a, _) = split(path); - clean(a) -} - -pub fn trim_etag(etag: &str) -> String { - etag.trim_matches('"').to_string() -} - #[cfg(test)] mod tests { use super::*; + #[test] + fn test_path_join_buf() { + #[cfg(not(target_os = "windows"))] + { + // Basic joining + assert_eq!(path_join_buf(&["a", "b"]), "a/b"); + assert_eq!(path_join_buf(&["a/", "b"]), "a/b"); + + // Empty array input + assert_eq!(path_join_buf(&[]), "."); + + // Single element + assert_eq!(path_join_buf(&["a"]), "a"); + + // Multiple elements + assert_eq!(path_join_buf(&["a", "b", "c"]), "a/b/c"); + + // Elements with trailing separators + assert_eq!(path_join_buf(&["a/", "b/"]), "a/b/"); + + // Elements requiring cleaning (with "." and "..") + assert_eq!(path_join_buf(&["a", ".", "b"]), "a/b"); + assert_eq!(path_join_buf(&["a", "..", "b"]), "b"); + assert_eq!(path_join_buf(&["a", "b", ".."]), "a"); + + // Preservation of trailing slashes + assert_eq!(path_join_buf(&["a", "b/"]), "a/b/"); + assert_eq!(path_join_buf(&["a/", "b/"]), "a/b/"); + + // Empty elements + assert_eq!(path_join_buf(&["a", "", "b"]), "a/b"); + + // Double slashes (cleaning) + assert_eq!(path_join_buf(&["a//", "b"]), "a/b"); + } + #[cfg(target_os = "windows")] + { + // Basic joining + assert_eq!(path_join_buf(&["a", "b"]), "a\\b"); + assert_eq!(path_join_buf(&["a\\", "b"]), "a\\b"); + + // Empty array input + assert_eq!(path_join_buf(&[]), "."); + + // Single element + assert_eq!(path_join_buf(&["a"]), "a"); + + // Multiple elements + assert_eq!(path_join_buf(&["a", "b", "c"]), "a\\b\\c"); + + // Elements with trailing separators + assert_eq!(path_join_buf(&["a\\", "b\\"]), "a\\b\\"); + + // Elements requiring cleaning (with "." and "..") + assert_eq!(path_join_buf(&["a", ".", "b"]), "a\\b"); + assert_eq!(path_join_buf(&["a", "..", "b"]), "b"); + assert_eq!(path_join_buf(&["a", "b", ".."]), "a"); + + // Mixed separator handling + assert_eq!(path_join_buf(&["a/b", "c"]), "a\\b\\c"); + assert_eq!(path_join_buf(&["a\\", "b/c"]), "a\\b\\c"); + + // Preservation of trailing slashes + assert_eq!(path_join_buf(&["a", "b\\"]), "a\\b\\"); + assert_eq!(path_join_buf(&["a\\", "b\\"]), "a\\b\\"); + + // Empty elements + assert_eq!(path_join_buf(&["a", "", "b"]), "a\\b"); + + // Double slashes (cleaning) + assert_eq!(path_join_buf(&["a\\\\", "b"]), "a\\b"); + } + } + #[test] fn test_trim_etag() { // Test with quoted ETag @@ -383,43 +892,56 @@ mod tests { #[test] fn test_clean() { - assert_eq!(clean(""), "."); - assert_eq!(clean("abc"), "abc"); - assert_eq!(clean("abc/def"), "abc/def"); - assert_eq!(clean("a/b/c"), "a/b/c"); - assert_eq!(clean("."), "."); - assert_eq!(clean(".."), ".."); - assert_eq!(clean("../.."), "../.."); - assert_eq!(clean("../../abc"), "../../abc"); - assert_eq!(clean("/abc"), "/abc"); - assert_eq!(clean("/"), "/"); - assert_eq!(clean("abc/"), "abc"); - assert_eq!(clean("abc/def/"), "abc/def"); - assert_eq!(clean("a/b/c/"), "a/b/c"); - assert_eq!(clean("./"), "."); - assert_eq!(clean("../"), ".."); - assert_eq!(clean("../../"), "../.."); - assert_eq!(clean("/abc/"), "/abc"); - assert_eq!(clean("abc//def//ghi"), "abc/def/ghi"); - assert_eq!(clean("//abc"), "/abc"); - assert_eq!(clean("///abc"), "/abc"); - assert_eq!(clean("//abc//"), "/abc"); - assert_eq!(clean("abc//"), "abc"); - assert_eq!(clean("abc/./def"), "abc/def"); - assert_eq!(clean("/./abc/def"), "/abc/def"); - assert_eq!(clean("abc/."), "abc"); - assert_eq!(clean("abc/./../def"), "def"); - assert_eq!(clean("abc//./../def"), "def"); - assert_eq!(clean("abc/../../././../def"), "../../def"); + #[cfg(not(target_os = "windows"))] + { + assert_eq!(clean(""), "."); + assert_eq!(clean("abc"), "abc"); + assert_eq!(clean("abc/def"), "abc/def"); + assert_eq!(clean("a/b/c"), "a/b/c"); + assert_eq!(clean("."), "."); + assert_eq!(clean(".."), ".."); + assert_eq!(clean("../.."), "../.."); + assert_eq!(clean("../../abc"), "../../abc"); + assert_eq!(clean("/abc"), "/abc"); + assert_eq!(clean("/"), "/"); + assert_eq!(clean("abc/"), "abc"); + assert_eq!(clean("abc/def/"), "abc/def"); + assert_eq!(clean("a/b/c/"), "a/b/c"); + assert_eq!(clean("./"), "."); + assert_eq!(clean("../"), ".."); + assert_eq!(clean("../../"), "../.."); + assert_eq!(clean("/abc/"), "/abc"); + assert_eq!(clean("abc//def//ghi"), "abc/def/ghi"); + assert_eq!(clean("//abc"), "/abc"); + assert_eq!(clean("///abc"), "/abc"); + assert_eq!(clean("//abc//"), "/abc"); + assert_eq!(clean("abc//"), "abc"); + assert_eq!(clean("abc/./def"), "abc/def"); + assert_eq!(clean("/./abc/def"), "/abc/def"); + assert_eq!(clean("abc/."), "abc"); + assert_eq!(clean("abc/./../def"), "def"); + assert_eq!(clean("abc//./../def"), "def"); + assert_eq!(clean("abc/../../././../def"), "../../def"); - assert_eq!(clean("abc/def/ghi/../jkl"), "abc/def/jkl"); - assert_eq!(clean("abc/def/../ghi/../jkl"), "abc/jkl"); - assert_eq!(clean("abc/def/.."), "abc"); - assert_eq!(clean("abc/def/../.."), "."); - assert_eq!(clean("/abc/def/../.."), "/"); - assert_eq!(clean("abc/def/../../.."), ".."); - assert_eq!(clean("/abc/def/../../.."), "/"); - assert_eq!(clean("abc/def/../../../ghi/jkl/../../../mno"), "../../mno"); + assert_eq!(clean("abc/def/ghi/../jkl"), "abc/def/jkl"); + assert_eq!(clean("abc/def/../ghi/../jkl"), "abc/jkl"); + assert_eq!(clean("abc/def/.."), "abc"); + assert_eq!(clean("abc/def/../.."), "."); + assert_eq!(clean("/abc/def/../.."), "/"); + assert_eq!(clean("abc/def/../../.."), ".."); + assert_eq!(clean("/abc/def/../../.."), "/"); + assert_eq!(clean("abc/def/../../../ghi/jkl/../../../mno"), "../../mno"); + } + + #[cfg(target_os = "windows")] + { + assert_eq!(clean("a\\b\\..\\c"), "a\\c"); + assert_eq!(clean("a\\\\b"), "a\\b"); + assert_eq!(clean("C:\\"), "C:\\"); + assert_eq!(clean("C:\\a\\..\\b"), "C:\\b"); + assert_eq!(clean("C:a\\b\\..\\c"), "C:a\\c"); + assert_eq!(clean("\\\\server\\share\\a\\\\b"), "\\\\server\\share\\a\\b"); + } } #[test] diff --git a/rustfs/src/admin/handlers/bucket_meta.rs b/rustfs/src/admin/handlers/bucket_meta.rs index 013b79aa..5fc2d934 100644 --- a/rustfs/src/admin/handlers/bucket_meta.rs +++ b/rustfs/src/admin/handlers/bucket_meta.rs @@ -12,11 +12,6 @@ // See the License for the specific language governing permissions and // limitations under the License. -use std::{ - collections::HashMap, - io::{Cursor, Read as _, Write as _}, -}; - use crate::{ admin::{auth::validate_admin_request, router::Operation}, auth::{check_key_valid, get_session_token}, @@ -49,7 +44,7 @@ use rustfs_policy::policy::{ BucketPolicy, action::{Action, AdminAction}, }; -use rustfs_utils::path::{SLASH_SEPARATOR, path_join_buf}; +use rustfs_utils::path::{SLASH_SEPARATOR_STR, path_join_buf}; use s3s::{ Body, S3Request, S3Response, S3Result, dto::{ @@ -61,6 +56,10 @@ use s3s::{ }; use serde::Deserialize; use serde_urlencoded::from_bytes; +use std::{ + collections::HashMap, + io::{Cursor, Read as _, Write as _}, +}; use time::OffsetDateTime; use tracing::warn; use zip::{ZipArchive, ZipWriter, write::SimpleFileOptions}; @@ -424,7 +423,7 @@ impl Operation for ImportBucketMetadata { // Extract bucket names let mut bucket_names = Vec::new(); for (file_path, _) in &file_contents { - let file_path_split = file_path.split(SLASH_SEPARATOR).collect::>(); + let file_path_split = file_path.split(SLASH_SEPARATOR_STR).collect::>(); if file_path_split.len() < 2 { warn!("file path is invalid: {}", file_path); @@ -463,7 +462,7 @@ impl Operation for ImportBucketMetadata { // Second pass: process file contents for (file_path, content) in file_contents { - let file_path_split = file_path.split(SLASH_SEPARATOR).collect::>(); + let file_path_split = file_path.split(SLASH_SEPARATOR_STR).collect::>(); if file_path_split.len() < 2 { warn!("file path is invalid: {}", file_path); From 78b13f3ff2b18d7f00f9ad4157c25df087dd4a63 Mon Sep 17 00:00:00 2001 From: weisd Date: Mon, 12 Jan 2026 11:19:09 +0800 Subject: [PATCH 14/17] fix: add delete prefix option support (#1471) --- rustfs/src/storage/options.rs | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/rustfs/src/storage/options.rs b/rustfs/src/storage/options.rs index 5aafa96d..1886ffe3 100644 --- a/rustfs/src/storage/options.rs +++ b/rustfs/src/storage/options.rs @@ -18,6 +18,7 @@ use rustfs_ecstore::error::Result; use rustfs_ecstore::error::StorageError; use rustfs_utils::http::AMZ_META_UNENCRYPTED_CONTENT_LENGTH; use rustfs_utils::http::AMZ_META_UNENCRYPTED_CONTENT_MD5; +use rustfs_utils::http::RUSTFS_FORCE_DELETE; use s3s::header::X_AMZ_OBJECT_LOCK_MODE; use s3s::header::X_AMZ_OBJECT_LOCK_RETAIN_UNTIL_DATE; @@ -77,6 +78,11 @@ pub async fn del_opts( StorageError::InvalidArgument(bucket.to_owned(), object.to_owned(), err.to_string()) })?; + opts.delete_prefix = headers + .get(RUSTFS_FORCE_DELETE) + .map(|v| v.to_str().unwrap() == "true") + .unwrap_or_default(); + opts.version_id = { if is_dir_object(object) && vid.is_none() { Some(Uuid::nil().to_string()) From 29d86036b1c160caa689d724c7dafbf7d67e8919 Mon Sep 17 00:00:00 2001 From: yxrxy <1532529704@qq.com> Date: Mon, 12 Jan 2026 11:42:07 +0800 Subject: [PATCH 15/17] feat: implement bucket quota system (#1461) Signed-off-by: yxrxy <1532529704@qq.com> Co-authored-by: loverustfs --- crates/common/src/metrics.rs | 29 +- crates/config/src/constants/mod.rs | 1 + crates/config/src/constants/quota.rs | 26 + crates/config/src/lib.rs | 2 + crates/e2e_test/src/common.rs | 15 +- crates/e2e_test/src/lib.rs | 4 + crates/e2e_test/src/quota_test.rs | 798 +++++++++++++++++++++ crates/ecstore/src/bucket/metadata.rs | 5 +- crates/ecstore/src/bucket/quota/checker.rs | 195 +++++ crates/ecstore/src/bucket/quota/mod.rs | 134 +++- crates/ecstore/src/data_usage.rs | 134 +++- crates/ecstore/src/set_disk.rs | 4 +- rustfs/src/admin/handlers.rs | 1 + rustfs/src/admin/handlers/quota.rs | 485 +++++++++++++ rustfs/src/admin/mod.rs | 28 +- rustfs/src/error.rs | 24 + rustfs/src/storage/ecfs.rs | 121 +++- 17 files changed, 1964 insertions(+), 42 deletions(-) create mode 100644 crates/config/src/constants/quota.rs create mode 100644 crates/e2e_test/src/quota_test.rs create mode 100644 crates/ecstore/src/bucket/quota/checker.rs create mode 100644 rustfs/src/admin/handlers/quota.rs diff --git a/crates/common/src/metrics.rs b/crates/common/src/metrics.rs index f0fc031a..196e16c2 100644 --- a/crates/common/src/metrics.rs +++ b/crates/common/src/metrics.rs @@ -96,6 +96,11 @@ pub enum Metric { ApplyNonCurrent, HealAbandonedVersion, + // Quota metrics: + QuotaCheck, + QuotaViolation, + QuotaSync, + // START Trace metrics: StartTrace, ScanObject, // Scan object. All operations included. @@ -131,6 +136,9 @@ impl Metric { Self::CleanAbandoned => "clean_abandoned", Self::ApplyNonCurrent => "apply_non_current", Self::HealAbandonedVersion => "heal_abandoned_version", + Self::QuotaCheck => "quota_check", + Self::QuotaViolation => "quota_violation", + Self::QuotaSync => "quota_sync", Self::StartTrace => "start_trace", Self::ScanObject => "scan_object", Self::HealAbandonedObject => "heal_abandoned_object", @@ -163,15 +171,18 @@ impl Metric { 10 => Some(Self::CleanAbandoned), 11 => Some(Self::ApplyNonCurrent), 12 => Some(Self::HealAbandonedVersion), - 13 => Some(Self::StartTrace), - 14 => Some(Self::ScanObject), - 15 => Some(Self::HealAbandonedObject), - 16 => Some(Self::LastRealtime), - 17 => Some(Self::ScanFolder), - 18 => Some(Self::ScanCycle), - 19 => Some(Self::ScanBucketDrive), - 20 => Some(Self::CompactFolder), - 21 => Some(Self::Last), + 13 => Some(Self::QuotaCheck), + 14 => Some(Self::QuotaViolation), + 15 => Some(Self::QuotaSync), + 16 => Some(Self::StartTrace), + 17 => Some(Self::ScanObject), + 18 => Some(Self::HealAbandonedObject), + 19 => Some(Self::LastRealtime), + 20 => Some(Self::ScanFolder), + 21 => Some(Self::ScanCycle), + 22 => Some(Self::ScanBucketDrive), + 23 => Some(Self::CompactFolder), + 24 => Some(Self::Last), _ => None, } } diff --git a/crates/config/src/constants/mod.rs b/crates/config/src/constants/mod.rs index 49fa0080..a526c5ac 100644 --- a/crates/config/src/constants/mod.rs +++ b/crates/config/src/constants/mod.rs @@ -21,6 +21,7 @@ pub(crate) mod heal; pub(crate) mod object; pub(crate) mod profiler; pub(crate) mod protocols; +pub(crate) mod quota; pub(crate) mod runtime; pub(crate) mod targets; pub(crate) mod tls; diff --git a/crates/config/src/constants/quota.rs b/crates/config/src/constants/quota.rs new file mode 100644 index 00000000..90d98d60 --- /dev/null +++ b/crates/config/src/constants/quota.rs @@ -0,0 +1,26 @@ +// Copyright 2024 RustFS Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +pub const QUOTA_CONFIG_FILE: &str = "quota.json"; +pub const QUOTA_TYPE_HARD: &str = "HARD"; + +pub const QUOTA_EXCEEDED_ERROR_CODE: &str = "XRustfsQuotaExceeded"; +pub const QUOTA_INVALID_CONFIG_ERROR_CODE: &str = "InvalidArgument"; +pub const QUOTA_NOT_FOUND_ERROR_CODE: &str = "NoSuchBucket"; +pub const QUOTA_INTERNAL_ERROR_CODE: &str = "InternalError"; + +pub const QUOTA_API_PATH: &str = "/rustfs/admin/v3/quota/{bucket}"; + +pub const QUOTA_INVALID_TYPE_ERROR_MSG: &str = "Only HARD quota type is supported"; +pub const QUOTA_METADATA_SYSTEM_ERROR_MSG: &str = "Bucket metadata system not initialized"; diff --git a/crates/config/src/lib.rs b/crates/config/src/lib.rs index 4b6d42fb..5a8b6800 100644 --- a/crates/config/src/lib.rs +++ b/crates/config/src/lib.rs @@ -33,6 +33,8 @@ pub use constants::profiler::*; #[cfg(feature = "constants")] pub use constants::protocols::*; #[cfg(feature = "constants")] +pub use constants::quota::*; +#[cfg(feature = "constants")] pub use constants::runtime::*; #[cfg(feature = "constants")] pub use constants::targets::*; diff --git a/crates/e2e_test/src/common.rs b/crates/e2e_test/src/common.rs index 9b94005e..da70db83 100644 --- a/crates/e2e_test/src/common.rs +++ b/crates/e2e_test/src/common.rs @@ -176,12 +176,14 @@ impl RustFSTestEnvironment { /// Kill any existing RustFS processes pub async fn cleanup_existing_processes(&self) -> Result<(), Box> { info!("Cleaning up any existing RustFS processes"); - let output = Command::new("pkill").args(["-f", "rustfs"]).output(); + let binary_path = rustfs_binary_path(); + let binary_name = binary_path.to_string_lossy(); + let output = Command::new("pkill").args(["-f", &binary_name]).output(); if let Ok(output) = output && output.status.success() { - info!("Killed existing RustFS processes"); + info!("Killed existing RustFS processes: {}", binary_name); sleep(Duration::from_millis(1000)).await; } Ok(()) @@ -363,3 +365,12 @@ pub async fn awscurl_put( ) -> Result> { execute_awscurl(url, "PUT", Some(body), access_key, secret_key).await } + +/// Helper function for DELETE requests +pub async fn awscurl_delete( + url: &str, + access_key: &str, + secret_key: &str, +) -> Result> { + execute_awscurl(url, "DELETE", None, access_key, secret_key).await +} diff --git a/crates/e2e_test/src/lib.rs b/crates/e2e_test/src/lib.rs index b635afb7..0d5a8bbb 100644 --- a/crates/e2e_test/src/lib.rs +++ b/crates/e2e_test/src/lib.rs @@ -29,6 +29,10 @@ mod data_usage_test; #[cfg(test)] mod kms; +// Quota tests +#[cfg(test)] +mod quota_test; + #[cfg(test)] mod bucket_policy_check_test; diff --git a/crates/e2e_test/src/quota_test.rs b/crates/e2e_test/src/quota_test.rs new file mode 100644 index 00000000..f3c53622 --- /dev/null +++ b/crates/e2e_test/src/quota_test.rs @@ -0,0 +1,798 @@ +// Copyright 2024 RustFS Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use crate::common::{RustFSTestEnvironment, awscurl_delete, awscurl_get, awscurl_post, awscurl_put, init_logging}; +use aws_sdk_s3::Client; +use serial_test::serial; +use tracing::{debug, info}; + +/// Test environment setup for quota tests +pub struct QuotaTestEnv { + pub env: RustFSTestEnvironment, + pub client: Client, + pub bucket_name: String, +} + +impl QuotaTestEnv { + pub async fn new() -> Result> { + let bucket_name = format!("quota-test-{}", uuid::Uuid::new_v4()); + let mut env = RustFSTestEnvironment::new().await?; + env.start_rustfs_server(vec![]).await?; + let client = env.create_s3_client(); + + Ok(Self { + env, + client, + bucket_name, + }) + } + + pub async fn create_bucket(&self) -> Result<(), Box> { + self.env.create_test_bucket(&self.bucket_name).await?; + Ok(()) + } + + pub async fn cleanup_bucket(&self) -> Result<(), Box> { + let objects = self.client.list_objects_v2().bucket(&self.bucket_name).send().await?; + for object in objects.contents() { + self.client + .delete_object() + .bucket(&self.bucket_name) + .key(object.key().unwrap_or_default()) + .send() + .await?; + } + self.env.delete_test_bucket(&self.bucket_name).await?; + Ok(()) + } + + pub async fn set_bucket_quota(&self, quota_bytes: u64) -> Result<(), Box> { + let url = format!("{}/rustfs/admin/v3/quota/{}", self.env.url, self.bucket_name); + let quota_config = serde_json::json!({ + "quota": quota_bytes, + "quota_type": "HARD" + }); + + let response = awscurl_put(&url, "a_config.to_string(), &self.env.access_key, &self.env.secret_key).await?; + if response.contains("error") { + Err(format!("Failed to set quota: {}", response).into()) + } else { + Ok(()) + } + } + + pub async fn get_bucket_quota(&self) -> Result, Box> { + let url = format!("{}/rustfs/admin/v3/quota/{}", self.env.url, self.bucket_name); + let response = awscurl_get(&url, &self.env.access_key, &self.env.secret_key).await?; + + if response.contains("error") { + Err(format!("Failed to get quota: {}", response).into()) + } else { + let quota_info: serde_json::Value = serde_json::from_str(&response)?; + Ok(quota_info.get("quota").and_then(|v| v.as_u64())) + } + } + + pub async fn clear_bucket_quota(&self) -> Result<(), Box> { + let url = format!("{}/rustfs/admin/v3/quota/{}", self.env.url, self.bucket_name); + let response = awscurl_delete(&url, &self.env.access_key, &self.env.secret_key).await?; + + if response.contains("error") { + Err(format!("Failed to clear quota: {}", response).into()) + } else { + Ok(()) + } + } + + pub async fn get_bucket_quota_stats(&self) -> Result> { + let url = format!("{}/rustfs/admin/v3/quota-stats/{}", self.env.url, self.bucket_name); + let response = awscurl_get(&url, &self.env.access_key, &self.env.secret_key).await?; + + if response.contains("error") { + Err(format!("Failed to get quota stats: {}", response).into()) + } else { + Ok(serde_json::from_str(&response)?) + } + } + + pub async fn check_bucket_quota( + &self, + operation_type: &str, + operation_size: u64, + ) -> Result> { + let url = format!("{}/rustfs/admin/v3/quota-check/{}", self.env.url, self.bucket_name); + let check_request = serde_json::json!({ + "operation_type": operation_type, + "operation_size": operation_size + }); + + let response = awscurl_post(&url, &check_request.to_string(), &self.env.access_key, &self.env.secret_key).await?; + + if response.contains("error") { + Err(format!("Failed to check quota: {}", response).into()) + } else { + Ok(serde_json::from_str(&response)?) + } + } + + pub async fn upload_object(&self, key: &str, size_bytes: usize) -> Result<(), Box> { + let data = vec![0u8; size_bytes]; + self.client + .put_object() + .bucket(&self.bucket_name) + .key(key) + .body(aws_sdk_s3::primitives::ByteStream::from(data)) + .send() + .await?; + Ok(()) + } + + pub async fn object_exists(&self, key: &str) -> Result> { + match self.client.head_object().bucket(&self.bucket_name).key(key).send().await { + Ok(_) => Ok(true), + Err(e) => { + // Check for any 404-related errors and return false instead of propagating + let error_str = e.to_string(); + if error_str.contains("404") || error_str.contains("Not Found") || error_str.contains("NotFound") { + Ok(false) + } else { + // Also check the error code directly + if let Some(service_err) = e.as_service_error() + && service_err.is_not_found() + { + return Ok(false); + } + Err(e.into()) + } + } + } + } + + pub async fn get_bucket_usage(&self) -> Result> { + let stats = self.get_bucket_quota_stats().await?; + Ok(stats.get("current_usage").and_then(|v| v.as_u64()).unwrap_or(0)) + } + + pub async fn set_bucket_quota_for( + &self, + bucket: &str, + quota_bytes: u64, + ) -> Result<(), Box> { + let url = format!("{}/rustfs/admin/v3/quota/{}", self.env.url, bucket); + let quota_config = serde_json::json!({ + "quota": quota_bytes, + "quota_type": "HARD" + }); + + let response = awscurl_put(&url, "a_config.to_string(), &self.env.access_key, &self.env.secret_key).await?; + if response.contains("error") { + Err(format!("Failed to set quota: {}", response).into()) + } else { + Ok(()) + } + } + + /// Get bucket quota statistics for specific bucket + pub async fn get_bucket_quota_stats_for( + &self, + bucket: &str, + ) -> Result> { + debug!("Getting quota stats for bucket: {}", bucket); + + let url = format!("{}/rustfs/admin/v3/quota-stats/{}", self.env.url, bucket); + let response = awscurl_get(&url, &self.env.access_key, &self.env.secret_key).await?; + + if response.contains("error") { + Err(format!("Failed to get quota stats: {}", response).into()) + } else { + let stats: serde_json::Value = serde_json::from_str(&response)?; + Ok(stats) + } + } + + /// Upload an object to specific bucket + pub async fn upload_object_to_bucket( + &self, + bucket: &str, + key: &str, + size_bytes: usize, + ) -> Result<(), Box> { + debug!("Uploading object {} with size {} bytes to bucket {}", key, size_bytes, bucket); + + let data = vec![0u8; size_bytes]; + + self.client + .put_object() + .bucket(bucket) + .key(key) + .body(aws_sdk_s3::primitives::ByteStream::from(data)) + .send() + .await?; + + info!("Successfully uploaded object: {} ({} bytes) to bucket: {}", key, size_bytes, bucket); + Ok(()) + } +} + +#[cfg(test)] +mod integration_tests { + use super::*; + + #[tokio::test] + #[serial] + async fn test_quota_basic_operations() -> Result<(), Box> { + init_logging(); + let env = QuotaTestEnv::new().await?; + + // Create test bucket + env.create_bucket().await?; + + // Set quota of 1MB + env.set_bucket_quota(1024 * 1024).await?; + + // Verify quota is set + let quota = env.get_bucket_quota().await?; + assert_eq!(quota, Some(1024 * 1024)); + + // Upload a 512KB object (should succeed) + env.upload_object("test1.txt", 512 * 1024).await?; + assert!(env.object_exists("test1.txt").await?); + + // Upload another 512KB object (should succeed, total 1MB) + env.upload_object("test2.txt", 512 * 1024).await?; + assert!(env.object_exists("test2.txt").await?); + + // Try to upload 1KB more (should fail due to quota) + let upload_result = env.upload_object("test3.txt", 1024).await; + assert!(upload_result.is_err()); + assert!(!env.object_exists("test3.txt").await?); + + // Clean up + env.clear_bucket_quota().await?; + env.cleanup_bucket().await?; + + Ok(()) + } + + #[tokio::test] + #[serial] + async fn test_quota_update_and_clear() -> Result<(), Box> { + init_logging(); + let env = QuotaTestEnv::new().await?; + + env.create_bucket().await?; + + // Set initial quota + env.set_bucket_quota(512 * 1024).await?; + assert_eq!(env.get_bucket_quota().await?, Some(512 * 1024)); + + // Update quota to larger size + env.set_bucket_quota(2 * 1024 * 1024).await?; + assert_eq!(env.get_bucket_quota().await?, Some(2 * 1024 * 1024)); + + // Upload 1MB object (should succeed with new quota) + env.upload_object("large_file.txt", 1024 * 1024).await?; + assert!(env.object_exists("large_file.txt").await?); + + // Clear quota + env.clear_bucket_quota().await?; + assert_eq!(env.get_bucket_quota().await?, None); + + // Upload another large object (should succeed with no quota) + env.upload_object("unlimited_file.txt", 5 * 1024 * 1024).await?; + assert!(env.object_exists("unlimited_file.txt").await?); + + env.cleanup_bucket().await?; + + Ok(()) + } + + #[tokio::test] + #[serial] + async fn test_quota_delete_operations() -> Result<(), Box> { + init_logging(); + let env = QuotaTestEnv::new().await?; + + env.create_bucket().await?; + + // Set quota of 1MB + env.set_bucket_quota(1024 * 1024).await?; + + // Fill up to quota limit + env.upload_object("file1.txt", 512 * 1024).await?; + env.upload_object("file2.txt", 512 * 1024).await?; + + // Delete one file + env.client + .delete_object() + .bucket(&env.bucket_name) + .key("file1.txt") + .send() + .await?; + + assert!(!env.object_exists("file1.txt").await?); + + // Now we should be able to upload again (quota freed up) + env.upload_object("file3.txt", 256 * 1024).await?; + assert!(env.object_exists("file3.txt").await?); + + env.cleanup_bucket().await?; + + Ok(()) + } + + #[tokio::test] + #[serial] + async fn test_quota_usage_tracking() -> Result<(), Box> { + init_logging(); + let env = QuotaTestEnv::new().await?; + + env.create_bucket().await?; + + // Set quota + env.set_bucket_quota(2 * 1024 * 1024).await?; + + // Upload some files + env.upload_object("file1.txt", 512 * 1024).await?; + env.upload_object("file2.txt", 256 * 1024).await?; + + // Check usage + let usage = env.get_bucket_usage().await?; + assert_eq!(usage, (512 + 256) * 1024); + + // Delete a file + env.client + .delete_object() + .bucket(&env.bucket_name) + .key("file1.txt") + .send() + .await?; + + // Check updated usage + let updated_usage = env.get_bucket_usage().await?; + assert_eq!(updated_usage, 256 * 1024); + + env.cleanup_bucket().await?; + + Ok(()) + } + + #[tokio::test] + #[serial] + async fn test_quota_statistics() -> Result<(), Box> { + init_logging(); + let env = QuotaTestEnv::new().await?; + + env.create_bucket().await?; + + // Set quota of 2MB + env.set_bucket_quota(2 * 1024 * 1024).await?; + + // Upload files to use 1.5MB + env.upload_object("file1.txt", 1024 * 1024).await?; + env.upload_object("file2.txt", 512 * 1024).await?; + + // Get detailed quota statistics + let stats = env.get_bucket_quota_stats().await?; + + assert_eq!(stats.get("bucket").unwrap().as_str().unwrap(), env.bucket_name); + assert_eq!(stats.get("quota_limit").unwrap().as_u64().unwrap(), 2 * 1024 * 1024); + assert_eq!(stats.get("current_usage").unwrap().as_u64().unwrap(), (1024 + 512) * 1024); + assert_eq!(stats.get("remaining_quota").unwrap().as_u64().unwrap(), 512 * 1024); + + let usage_percentage = stats.get("usage_percentage").unwrap().as_f64().unwrap(); + assert!((usage_percentage - 75.0).abs() < 0.1); + + env.cleanup_bucket().await?; + + Ok(()) + } + + #[tokio::test] + #[serial] + async fn test_quota_check_api() -> Result<(), Box> { + init_logging(); + let env = QuotaTestEnv::new().await?; + + env.create_bucket().await?; + + // Set quota of 1MB + env.set_bucket_quota(1024 * 1024).await?; + + // Upload 512KB file + env.upload_object("existing_file.txt", 512 * 1024).await?; + + // Check if we can upload another 512KB (should succeed, exactly fill quota) + let check_result = env.check_bucket_quota("PUT", 512 * 1024).await?; + assert!(check_result.get("allowed").unwrap().as_bool().unwrap()); + assert_eq!(check_result.get("remaining_quota").unwrap().as_u64().unwrap(), 0); + + // Note: we haven't actually uploaded the second file yet, so current_usage is still 512KB + // Check if we can upload 1KB (should succeed - we haven't used the full quota yet) + let check_result = env.check_bucket_quota("PUT", 1024).await?; + assert!(check_result.get("allowed").unwrap().as_bool().unwrap()); + assert_eq!(check_result.get("remaining_quota").unwrap().as_u64().unwrap(), 512 * 1024 - 1024); + + // Check if we can upload 600KB (should fail - would exceed quota) + let check_result = env.check_bucket_quota("PUT", 600 * 1024).await?; + assert!(!check_result.get("allowed").unwrap().as_bool().unwrap()); + + // Check delete operation (should always be allowed) + let check_result = env.check_bucket_quota("DELETE", 512 * 1024).await?; + assert!(check_result.get("allowed").unwrap().as_bool().unwrap()); + + env.cleanup_bucket().await?; + + Ok(()) + } + + #[tokio::test] + #[serial] + async fn test_quota_multiple_buckets() -> Result<(), Box> { + init_logging(); + let env = QuotaTestEnv::new().await?; + + // Create two buckets in the same environment + let bucket1 = format!("quota-test-{}-1", uuid::Uuid::new_v4()); + let bucket2 = format!("quota-test-{}-2", uuid::Uuid::new_v4()); + + env.env.create_test_bucket(&bucket1).await?; + env.env.create_test_bucket(&bucket2).await?; + + // Set different quotas for each bucket + env.set_bucket_quota_for(&bucket1, 1024 * 1024).await?; // 1MB + env.set_bucket_quota_for(&bucket2, 2 * 1024 * 1024).await?; // 2MB + + // Fill first bucket to quota + env.upload_object_to_bucket(&bucket1, "big_file.txt", 1024 * 1024).await?; + + // Should still be able to upload to second bucket + env.upload_object_to_bucket(&bucket2, "big_file.txt", 1024 * 1024).await?; + env.upload_object_to_bucket(&bucket2, "another_file.txt", 512 * 1024).await?; + + // Verify statistics are independent + let stats1 = env.get_bucket_quota_stats_for(&bucket1).await?; + let stats2 = env.get_bucket_quota_stats_for(&bucket2).await?; + + assert_eq!(stats1.get("current_usage").unwrap().as_u64().unwrap(), 1024 * 1024); + assert_eq!(stats2.get("current_usage").unwrap().as_u64().unwrap(), (1024 + 512) * 1024); + + // Clean up + env.env.delete_test_bucket(&bucket1).await?; + env.env.delete_test_bucket(&bucket2).await?; + + Ok(()) + } + + #[tokio::test] + #[serial] + async fn test_quota_error_handling() -> Result<(), Box> { + init_logging(); + let env = QuotaTestEnv::new().await?; + + env.create_bucket().await?; + + // Test invalid quota type + let url = format!("{}/rustfs/admin/v3/quota/{}", env.env.url, env.bucket_name); + + let invalid_config = serde_json::json!({ + "quota": 1024, + "quota_type": "SOFT" // Invalid type + }); + + let response = awscurl_put(&url, &invalid_config.to_string(), &env.env.access_key, &env.env.secret_key).await; + assert!(response.is_err()); + let error_msg = response.unwrap_err().to_string(); + assert!(error_msg.contains("InvalidArgument")); + + // Test operations on non-existent bucket + let url = format!("{}/rustfs/admin/v3/quota/non-existent-bucket", env.env.url); + let response = awscurl_get(&url, &env.env.access_key, &env.env.secret_key).await; + assert!(response.is_err()); + let error_msg = response.unwrap_err().to_string(); + assert!(error_msg.contains("NoSuchBucket")); + + env.cleanup_bucket().await?; + + Ok(()) + } + + #[tokio::test] + #[serial] + async fn test_quota_http_endpoints() -> Result<(), Box> { + init_logging(); + let env = QuotaTestEnv::new().await?; + + env.create_bucket().await?; + + // Test 1: GET quota for bucket without quota config + let url = format!("{}/rustfs/admin/v3/quota/{}", env.env.url, env.bucket_name); + let response = awscurl_get(&url, &env.env.access_key, &env.env.secret_key).await?; + assert!(response.contains("quota") && response.contains("null")); + + // Test 2: PUT quota - valid config + let quota_config = serde_json::json!({ + "quota": 1048576, + "quota_type": "HARD" + }); + let response = awscurl_put(&url, "a_config.to_string(), &env.env.access_key, &env.env.secret_key).await?; + assert!(response.contains("success") || !response.contains("error")); + + // Test 3: GET quota after setting + let response = awscurl_get(&url, &env.env.access_key, &env.env.secret_key).await?; + assert!(response.contains("1048576")); + + // Test 4: GET quota stats + let stats_url = format!("{}/rustfs/admin/v3/quota-stats/{}", env.env.url, env.bucket_name); + let response = awscurl_get(&stats_url, &env.env.access_key, &env.env.secret_key).await?; + assert!(response.contains("quota_limit") && response.contains("current_usage")); + + // Test 5: POST quota check + let check_url = format!("{}/rustfs/admin/v3/quota-check/{}", env.env.url, env.bucket_name); + let check_request = serde_json::json!({ + "operation_type": "PUT", + "operation_size": 1024 + }); + let response = awscurl_post(&check_url, &check_request.to_string(), &env.env.access_key, &env.env.secret_key).await?; + assert!(response.contains("allowed")); + + // Test 6: DELETE quota + let response = awscurl_delete(&url, &env.env.access_key, &env.env.secret_key).await?; + assert!(!response.contains("error")); + + // Test 7: GET quota after deletion + let response = awscurl_get(&url, &env.env.access_key, &env.env.secret_key).await?; + assert!(response.contains("quota") && response.contains("null")); + + // Test 8: Invalid quota type + let invalid_config = serde_json::json!({ + "quota": 1024, + "quota_type": "SOFT" + }); + let response = awscurl_put(&url, &invalid_config.to_string(), &env.env.access_key, &env.env.secret_key).await; + assert!(response.is_err()); + let error_msg = response.unwrap_err().to_string(); + assert!(error_msg.contains("InvalidArgument")); + + env.cleanup_bucket().await?; + + Ok(()) + } + + #[tokio::test] + #[serial] + async fn test_quota_copy_operations() -> Result<(), Box> { + init_logging(); + let env = QuotaTestEnv::new().await?; + + env.create_bucket().await?; + + // Set quota of 2MB + env.set_bucket_quota(2 * 1024 * 1024).await?; + + // Upload initial file + env.upload_object("original.txt", 1024 * 1024).await?; + + // Copy file - should succeed (1MB each, total 2MB) + env.client + .copy_object() + .bucket(&env.bucket_name) + .key("copy1.txt") + .copy_source(format!("{}/{}", env.bucket_name, "original.txt")) + .send() + .await?; + + assert!(env.object_exists("copy1.txt").await?); + + // Try to copy again - should fail (1.5MB each, total 3MB > 2MB quota) + let copy_result = env + .client + .copy_object() + .bucket(&env.bucket_name) + .key("copy2.txt") + .copy_source(format!("{}/{}", env.bucket_name, "original.txt")) + .send() + .await; + + assert!(copy_result.is_err()); + assert!(!env.object_exists("copy2.txt").await?); + + env.cleanup_bucket().await?; + + Ok(()) + } + + #[tokio::test] + #[serial] + async fn test_quota_batch_delete() -> Result<(), Box> { + init_logging(); + let env = QuotaTestEnv::new().await?; + + env.create_bucket().await?; + + // Set quota of 2MB + env.set_bucket_quota(2 * 1024 * 1024).await?; + + // Upload files to fill quota + env.upload_object("file1.txt", 1024 * 1024).await?; + env.upload_object("file2.txt", 1024 * 1024).await?; + + // Verify quota is full + let upload_result = env.upload_object("file3.txt", 1024).await; + assert!(upload_result.is_err()); + + // Delete multiple objects using batch delete + let objects = vec![ + aws_sdk_s3::types::ObjectIdentifier::builder() + .key("file1.txt") + .build() + .unwrap(), + aws_sdk_s3::types::ObjectIdentifier::builder() + .key("file2.txt") + .build() + .unwrap(), + ]; + + let delete_result = env + .client + .delete_objects() + .bucket(&env.bucket_name) + .delete( + aws_sdk_s3::types::Delete::builder() + .set_objects(Some(objects)) + .quiet(true) + .build() + .unwrap(), + ) + .send() + .await?; + + assert_eq!(delete_result.deleted().len(), 2); + + // Now should be able to upload again (quota freed up) + env.upload_object("file3.txt", 256 * 1024).await?; + assert!(env.object_exists("file3.txt").await?); + + env.cleanup_bucket().await?; + + Ok(()) + } + + #[tokio::test] + #[serial] + async fn test_quota_multipart_upload() -> Result<(), Box> { + init_logging(); + let env = QuotaTestEnv::new().await?; + + env.create_bucket().await?; + + // Set quota of 10MB + env.set_bucket_quota(10 * 1024 * 1024).await?; + + let key = "multipart_test.txt"; + let part_size = 5 * 1024 * 1024; // 5MB minimum per part (S3 requirement) + + // Test 1: Multipart upload within quota (single 5MB part) + let create_result = env + .client + .create_multipart_upload() + .bucket(&env.bucket_name) + .key(key) + .send() + .await?; + + let upload_id = create_result.upload_id().unwrap(); + + // Upload single 5MB part (S3 allows single part with any size ≥ 5MB for the only part) + let part_data = vec![1u8; part_size]; + let part_result = env + .client + .upload_part() + .bucket(&env.bucket_name) + .key(key) + .upload_id(upload_id) + .part_number(1) + .body(aws_sdk_s3::primitives::ByteStream::from(part_data)) + .send() + .await?; + + let uploaded_parts = vec![ + aws_sdk_s3::types::CompletedPart::builder() + .part_number(1) + .e_tag(part_result.e_tag().unwrap()) + .build(), + ]; + + env.client + .complete_multipart_upload() + .bucket(&env.bucket_name) + .key(key) + .upload_id(upload_id) + .multipart_upload( + aws_sdk_s3::types::CompletedMultipartUpload::builder() + .set_parts(Some(uploaded_parts)) + .build(), + ) + .send() + .await?; + + assert!(env.object_exists(key).await?); + + // Test 2: Multipart upload exceeds quota (should fail) + // Upload 6MB filler (total now: 5MB + 6MB = 11MB > 10MB quota) + let upload_filler = env.upload_object("filler.txt", 6 * 1024 * 1024).await; + // This should fail due to quota + assert!(upload_filler.is_err()); + + // Verify filler doesn't exist + assert!(!env.object_exists("filler.txt").await?); + + // Now try a multipart upload that exceeds quota + // Current usage: 5MB (from Test 1), quota: 10MB + // Trying to upload 6MB via multipart → should fail + + let create_result2 = env + .client + .create_multipart_upload() + .bucket(&env.bucket_name) + .key("over_quota.txt") + .send() + .await?; + + let upload_id2 = create_result2.upload_id().unwrap(); + + let mut uploaded_parts2 = vec![]; + for part_num in 1..=2 { + let part_data = vec![part_num as u8; part_size]; + let part_result = env + .client + .upload_part() + .bucket(&env.bucket_name) + .key("over_quota.txt") + .upload_id(upload_id2) + .part_number(part_num) + .body(aws_sdk_s3::primitives::ByteStream::from(part_data)) + .send() + .await?; + + uploaded_parts2.push( + aws_sdk_s3::types::CompletedPart::builder() + .part_number(part_num) + .e_tag(part_result.e_tag().unwrap()) + .build(), + ); + } + + let complete_result = env + .client + .complete_multipart_upload() + .bucket(&env.bucket_name) + .key("over_quota.txt") + .upload_id(upload_id2) + .multipart_upload( + aws_sdk_s3::types::CompletedMultipartUpload::builder() + .set_parts(Some(uploaded_parts2)) + .build(), + ) + .send() + .await; + + assert!(complete_result.is_err()); + assert!(!env.object_exists("over_quota.txt").await?); + + env.cleanup_bucket().await?; + + Ok(()) + } +} diff --git a/crates/ecstore/src/bucket/metadata.rs b/crates/ecstore/src/bucket/metadata.rs index 87884300..5c75571e 100644 --- a/crates/ecstore/src/bucket/metadata.rs +++ b/crates/ecstore/src/bucket/metadata.rs @@ -355,7 +355,7 @@ impl BucketMetadata { self.tagging_config = Some(deserialize::(&self.tagging_config_xml)?); } if !self.quota_config_json.is_empty() { - self.quota_config = Some(BucketQuota::unmarshal(&self.quota_config_json)?); + self.quota_config = Some(serde_json::from_slice(&self.quota_config_json)?); } if !self.replication_config_xml.is_empty() { self.replication_config = Some(deserialize::(&self.replication_config_xml)?); @@ -487,7 +487,8 @@ mod test { bm.tagging_config_updated_at = OffsetDateTime::now_utc(); // Add quota configuration - let quota_json = r#"{"quota":1073741824,"quotaType":"hard"}"#; // 1GB quota + let quota_json = + r#"{"quota":1073741824,"quota_type":"Hard","created_at":"2024-01-01T00:00:00Z","updated_at":"2024-01-01T00:00:00Z"}"#; // 1GB quota bm.quota_config_json = quota_json.as_bytes().to_vec(); bm.quota_config_updated_at = OffsetDateTime::now_utc(); diff --git a/crates/ecstore/src/bucket/quota/checker.rs b/crates/ecstore/src/bucket/quota/checker.rs new file mode 100644 index 00000000..2a7f2858 --- /dev/null +++ b/crates/ecstore/src/bucket/quota/checker.rs @@ -0,0 +1,195 @@ +// Copyright 2024 RustFS Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +use super::{BucketQuota, QuotaCheckResult, QuotaError, QuotaOperation}; +use crate::bucket::metadata_sys::{BucketMetadataSys, update}; +use crate::data_usage::get_bucket_usage_memory; +use rustfs_common::metrics::Metric; +use rustfs_config::QUOTA_CONFIG_FILE; +use std::sync::Arc; +use std::time::Instant; +use tokio::sync::RwLock; +use tracing::{debug, warn}; + +pub struct QuotaChecker { + metadata_sys: Arc>, +} + +impl QuotaChecker { + pub fn new(metadata_sys: Arc>) -> Self { + Self { metadata_sys } + } + + pub async fn check_quota( + &self, + bucket: &str, + operation: QuotaOperation, + operation_size: u64, + ) -> Result { + let start_time = Instant::now(); + let quota_config = self.get_quota_config(bucket).await?; + + // If no quota limit is set, allow operation + let quota_limit = match quota_config.quota { + None => { + let current_usage = self.get_real_time_usage(bucket).await?; + return Ok(QuotaCheckResult { + allowed: true, + current_usage, + quota_limit: None, + operation_size, + remaining: None, + }); + } + Some(q) => q, + }; + + let current_usage = self.get_real_time_usage(bucket).await?; + + let expected_usage = match operation { + QuotaOperation::PutObject | QuotaOperation::CopyObject => current_usage + operation_size, + QuotaOperation::DeleteObject => current_usage.saturating_sub(operation_size), + }; + + let allowed = match operation { + QuotaOperation::PutObject | QuotaOperation::CopyObject => { + quota_config.check_operation_allowed(current_usage, operation_size) + } + QuotaOperation::DeleteObject => true, + }; + + let remaining = if quota_limit >= expected_usage { + Some(quota_limit - expected_usage) + } else { + Some(0) + }; + + if !allowed { + warn!( + "Quota exceeded for bucket: {}, current: {}, limit: {}, attempted: {}", + bucket, current_usage, quota_limit, operation_size + ); + } + + let result = QuotaCheckResult { + allowed, + current_usage, + quota_limit: Some(quota_limit), + operation_size, + remaining, + }; + + let duration = start_time.elapsed(); + rustfs_common::metrics::Metrics::inc_time(Metric::QuotaCheck, duration).await; + if !allowed { + rustfs_common::metrics::Metrics::inc_time(Metric::QuotaViolation, duration).await; + } + + Ok(result) + } + + pub async fn get_quota_config(&self, bucket: &str) -> Result { + let meta = self + .metadata_sys + .read() + .await + .get(bucket) + .await + .map_err(QuotaError::StorageError)?; + + if meta.quota_config_json.is_empty() { + debug!("No quota config found for bucket: {}, using default", bucket); + return Ok(BucketQuota::new(None)); + } + + let quota: BucketQuota = serde_json::from_slice(&meta.quota_config_json).map_err(|e| QuotaError::InvalidConfig { + reason: format!("Failed to parse quota config: {}", e), + })?; + + Ok(quota) + } + + pub async fn set_quota_config(&mut self, bucket: &str, quota: BucketQuota) -> Result<(), QuotaError> { + let json_data = serde_json::to_vec("a).map_err(|e| QuotaError::InvalidConfig { + reason: format!("Failed to serialize quota config: {}", e), + })?; + let start_time = Instant::now(); + + update(bucket, QUOTA_CONFIG_FILE, json_data) + .await + .map_err(QuotaError::StorageError)?; + + rustfs_common::metrics::Metrics::inc_time(Metric::QuotaSync, start_time.elapsed()).await; + Ok(()) + } + + pub async fn get_quota_stats(&self, bucket: &str) -> Result<(BucketQuota, Option), QuotaError> { + // If bucket doesn't exist, return ConfigNotFound error + if !self.bucket_exists(bucket).await { + return Err(QuotaError::ConfigNotFound { + bucket: bucket.to_string(), + }); + } + + let quota = self.get_quota_config(bucket).await?; + let current_usage = self.get_real_time_usage(bucket).await.unwrap_or(0); + + Ok((quota, Some(current_usage))) + } + + pub async fn bucket_exists(&self, bucket: &str) -> bool { + self.metadata_sys.read().await.get(bucket).await.is_ok() + } + + pub async fn get_real_time_usage(&self, bucket: &str) -> Result { + Ok(get_bucket_usage_memory(bucket).await.unwrap_or(0)) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[tokio::test] + async fn test_quota_check_no_limit() { + let result = QuotaCheckResult { + allowed: true, + current_usage: 0, + quota_limit: None, + operation_size: 1024, + remaining: None, + }; + + assert!(result.allowed); + assert_eq!(result.quota_limit, None); + } + + #[tokio::test] + async fn test_quota_check_within_limit() { + let quota = BucketQuota::new(Some(2048)); // 2KB + + // Current usage 512, trying to add 1024 + let allowed = quota.check_operation_allowed(512, 1024); + assert!(allowed); + } + + #[tokio::test] + async fn test_quota_check_exceeds_limit() { + let quota = BucketQuota::new(Some(1024)); // 1KB + + // Current usage 512, trying to add 1024 + let allowed = quota.check_operation_allowed(512, 1024); + assert!(!allowed); + } +} diff --git a/crates/ecstore/src/bucket/quota/mod.rs b/crates/ecstore/src/bucket/quota/mod.rs index b9e778fd..3bf00a05 100644 --- a/crates/ecstore/src/bucket/quota/mod.rs +++ b/crates/ecstore/src/bucket/quota/mod.rs @@ -12,36 +12,37 @@ // See the License for the specific language governing permissions and // limitations under the License. +pub mod checker; + use crate::error::Result; use rmp_serde::Serializer as rmpSerializer; +use rustfs_config::{ + QUOTA_API_PATH, QUOTA_EXCEEDED_ERROR_CODE, QUOTA_INTERNAL_ERROR_CODE, QUOTA_INVALID_CONFIG_ERROR_CODE, + QUOTA_NOT_FOUND_ERROR_CODE, +}; use serde::{Deserialize, Serialize}; +use thiserror::Error; +use time::OffsetDateTime; -// Define the QuotaType enum -#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize, Default)] pub enum QuotaType { + /// Hard quota: reject immediately when exceeded + #[default] Hard, } -// Define the BucketQuota structure -#[derive(Debug, Deserialize, Serialize, Default, Clone)] +#[derive(Debug, Deserialize, Serialize, Default, Clone, PartialEq)] pub struct BucketQuota { - quota: Option, // Use Option to represent optional fields - - size: u64, - - rate: u64, - - requests: u64, - - quota_type: Option, + pub quota: Option, + pub quota_type: QuotaType, + /// Timestamp when this quota configuration was set (for audit purposes) + pub created_at: Option, } impl BucketQuota { pub fn marshal_msg(&self) -> Result> { let mut buf = Vec::new(); - self.serialize(&mut rmpSerializer::new(&mut buf).with_struct_map())?; - Ok(buf) } @@ -49,4 +50,107 @@ impl BucketQuota { let t: BucketQuota = rmp_serde::from_slice(buf)?; Ok(t) } + + pub fn new(quota: Option) -> Self { + let now = OffsetDateTime::now_utc(); + Self { + quota, + quota_type: QuotaType::Hard, + created_at: Some(now), + } + } + + pub fn get_quota_limit(&self) -> Option { + self.quota + } + + pub fn check_operation_allowed(&self, current_usage: u64, operation_size: u64) -> bool { + if let Some(quota_limit) = self.quota { + current_usage.saturating_add(operation_size) <= quota_limit + } else { + true // No quota limit + } + } + + pub fn get_remaining_quota(&self, current_usage: u64) -> Option { + self.quota.map(|limit| limit.saturating_sub(current_usage)) + } +} + +#[derive(Debug)] +pub struct QuotaCheckResult { + pub allowed: bool, + pub current_usage: u64, + /// quota_limit: None means unlimited + pub quota_limit: Option, + pub operation_size: u64, + pub remaining: Option, +} + +#[derive(Debug)] +pub enum QuotaOperation { + PutObject, + CopyObject, + DeleteObject, +} + +#[derive(Debug, Error)] +pub enum QuotaError { + #[error("Bucket quota exceeded: current={current}, limit={limit}, operation={operation}")] + QuotaExceeded { current: u64, limit: u64, operation: u64 }, + #[error("Quota configuration not found for bucket: {bucket}")] + ConfigNotFound { bucket: String }, + #[error("Invalid quota configuration: {reason}")] + InvalidConfig { reason: String }, + #[error("Storage error: {0}")] + StorageError(#[from] crate::error::StorageError), +} + +#[derive(Debug, Serialize)] +pub struct QuotaErrorResponse { + #[serde(rename = "Code")] + pub code: String, + #[serde(rename = "Message")] + pub message: String, + #[serde(rename = "Resource")] + pub resource: String, + #[serde(rename = "RequestId")] + pub request_id: String, + #[serde(rename = "HostId")] + pub host_id: String, +} + +impl QuotaErrorResponse { + pub fn new(quota_error: &QuotaError, request_id: &str, host_id: &str) -> Self { + match quota_error { + QuotaError::QuotaExceeded { .. } => Self { + code: QUOTA_EXCEEDED_ERROR_CODE.to_string(), + message: quota_error.to_string(), + resource: QUOTA_API_PATH.to_string(), + request_id: request_id.to_string(), + host_id: host_id.to_string(), + }, + QuotaError::ConfigNotFound { .. } => Self { + code: QUOTA_NOT_FOUND_ERROR_CODE.to_string(), + message: quota_error.to_string(), + resource: QUOTA_API_PATH.to_string(), + request_id: request_id.to_string(), + host_id: host_id.to_string(), + }, + QuotaError::InvalidConfig { .. } => Self { + code: QUOTA_INVALID_CONFIG_ERROR_CODE.to_string(), + message: quota_error.to_string(), + resource: QUOTA_API_PATH.to_string(), + request_id: request_id.to_string(), + host_id: host_id.to_string(), + }, + QuotaError::StorageError(_) => Self { + code: QUOTA_INTERNAL_ERROR_CODE.to_string(), + message: quota_error.to_string(), + resource: QUOTA_API_PATH.to_string(), + request_id: request_id.to_string(), + host_id: host_id.to_string(), + }, + } + } } diff --git a/crates/ecstore/src/data_usage.rs b/crates/ecstore/src/data_usage.rs index df3ffede..bd434855 100644 --- a/crates/ecstore/src/data_usage.rs +++ b/crates/ecstore/src/data_usage.rs @@ -15,8 +15,10 @@ use std::{ collections::{HashMap, hash_map::Entry}, sync::Arc, - time::SystemTime, + time::{Duration, SystemTime}, }; +use tokio::sync::RwLock; +use tracing::debug; pub mod local_snapshot; pub use local_snapshot::{ @@ -32,6 +34,7 @@ use rustfs_common::data_usage::{ BucketTargetUsageInfo, BucketUsageInfo, DataUsageCache, DataUsageEntry, DataUsageInfo, DiskUsageStatus, SizeSummary, }; use rustfs_utils::path::SLASH_SEPARATOR_STR; +use std::sync::OnceLock; use tokio::fs; use tracing::{error, info, warn}; @@ -42,6 +45,21 @@ pub const DATA_USAGE_ROOT: &str = SLASH_SEPARATOR_STR; const DATA_USAGE_OBJ_NAME: &str = ".usage.json"; const DATA_USAGE_BLOOM_NAME: &str = ".bloomcycle.bin"; pub const DATA_USAGE_CACHE_NAME: &str = ".usage-cache.bin"; +const DATA_USAGE_CACHE_TTL_SECS: u64 = 30; + +type UsageMemoryCache = Arc>>; +type CacheUpdating = Arc>; + +static USAGE_MEMORY_CACHE: OnceLock = OnceLock::new(); +static USAGE_CACHE_UPDATING: OnceLock = OnceLock::new(); + +fn memory_cache() -> &'static UsageMemoryCache { + USAGE_MEMORY_CACHE.get_or_init(|| Arc::new(RwLock::new(HashMap::new()))) +} + +fn cache_updating() -> &'static CacheUpdating { + USAGE_CACHE_UPDATING.get_or_init(|| Arc::new(RwLock::new(false))) +} // Data usage storage paths lazy_static::lazy_static! { @@ -364,8 +382,120 @@ pub async fn compute_bucket_usage(store: Arc, bucket_name: &str) -> Res Ok(usage) } +/// Fast in-memory increment for immediate quota consistency +pub async fn increment_bucket_usage_memory(bucket: &str, size_increment: u64) { + let mut cache = memory_cache().write().await; + let current = cache.entry(bucket.to_string()).or_insert_with(|| (0, SystemTime::now())); + current.0 += size_increment; + current.1 = SystemTime::now(); +} + +/// Fast in-memory decrement for immediate quota consistency +pub async fn decrement_bucket_usage_memory(bucket: &str, size_decrement: u64) { + let mut cache = memory_cache().write().await; + if let Some(current) = cache.get_mut(bucket) { + current.0 = current.0.saturating_sub(size_decrement); + current.1 = SystemTime::now(); + } +} + +/// Get bucket usage from in-memory cache +pub async fn get_bucket_usage_memory(bucket: &str) -> Option { + update_usage_cache_if_needed().await; + + let cache = memory_cache().read().await; + cache.get(bucket).map(|(usage, _)| *usage) +} + +async fn update_usage_cache_if_needed() { + let ttl = Duration::from_secs(DATA_USAGE_CACHE_TTL_SECS); + let double_ttl = ttl * 2; + let now = SystemTime::now(); + + let cache = memory_cache().read().await; + let earliest_timestamp = cache.values().map(|(_, ts)| *ts).min(); + drop(cache); + + let age = match earliest_timestamp { + Some(ts) => now.duration_since(ts).unwrap_or_default(), + None => double_ttl, + }; + + if age < ttl { + return; + } + + let mut updating = cache_updating().write().await; + if age < double_ttl { + if *updating { + return; + } + *updating = true; + drop(updating); + + let cache_clone = (*memory_cache()).clone(); + let updating_clone = (*cache_updating()).clone(); + tokio::spawn(async move { + if let Some(store) = crate::global::GLOBAL_OBJECT_API.get() + && let Ok(data_usage_info) = load_data_usage_from_backend(store.clone()).await + { + let mut cache = cache_clone.write().await; + for (bucket_name, bucket_usage) in data_usage_info.buckets_usage.iter() { + cache.insert(bucket_name.clone(), (bucket_usage.size, SystemTime::now())); + } + } + let mut updating = updating_clone.write().await; + *updating = false; + }); + return; + } + + for retry in 0..10 { + if !*updating { + break; + } + drop(updating); + let delay = Duration::from_millis(1 << retry); + tokio::time::sleep(delay).await; + updating = cache_updating().write().await; + } + + *updating = true; + drop(updating); + + if let Some(store) = crate::global::GLOBAL_OBJECT_API.get() + && let Ok(data_usage_info) = load_data_usage_from_backend(store.clone()).await + { + let mut cache = memory_cache().write().await; + for (bucket_name, bucket_usage) in data_usage_info.buckets_usage.iter() { + cache.insert(bucket_name.clone(), (bucket_usage.size, SystemTime::now())); + } + } + + let mut updating = cache_updating().write().await; + *updating = false; +} + +/// Sync memory cache with backend data (called by scanner) +pub async fn sync_memory_cache_with_backend() -> Result<(), Error> { + if let Some(store) = crate::global::GLOBAL_OBJECT_API.get() { + match load_data_usage_from_backend(store.clone()).await { + Ok(data_usage_info) => { + let mut cache = memory_cache().write().await; + for (bucket, bucket_usage) in data_usage_info.buckets_usage.iter() { + cache.insert(bucket.clone(), (bucket_usage.size, SystemTime::now())); + } + } + Err(e) => { + debug!("Failed to sync memory cache with backend: {}", e); + } + } + } + Ok(()) +} + /// Build basic data usage info with real object counts -async fn build_basic_data_usage_info(store: Arc) -> Result { +pub async fn build_basic_data_usage_info(store: Arc) -> Result { let mut data_usage_info = DataUsageInfo::default(); // Get bucket list diff --git a/crates/ecstore/src/set_disk.rs b/crates/ecstore/src/set_disk.rs index 678d2dab..52d33c3f 100644 --- a/crates/ecstore/src/set_disk.rs +++ b/crates/ecstore/src/set_disk.rs @@ -4614,7 +4614,9 @@ impl StorageAPI for SetDisks { .await .map_err(|e| to_object_err(e, vec![bucket, object]))?; - Ok(ObjectInfo::from_file_info(&dfi, bucket, object, opts.versioned || opts.version_suspended)) + let mut obj_info = ObjectInfo::from_file_info(&dfi, bucket, object, opts.versioned || opts.version_suspended); + obj_info.size = goi.size; + Ok(obj_info) } #[tracing::instrument(skip(self))] diff --git a/rustfs/src/admin/handlers.rs b/rustfs/src/admin/handlers.rs index 3f1a5614..e55c55ed 100644 --- a/rustfs/src/admin/handlers.rs +++ b/rustfs/src/admin/handlers.rs @@ -83,6 +83,7 @@ pub mod kms_keys; pub mod policies; pub mod pools; pub mod profile; +pub mod quota; pub mod rebalance; pub mod service_account; pub mod sts; diff --git a/rustfs/src/admin/handlers/quota.rs b/rustfs/src/admin/handlers/quota.rs new file mode 100644 index 00000000..84944fa8 --- /dev/null +++ b/rustfs/src/admin/handlers/quota.rs @@ -0,0 +1,485 @@ +// Copyright 2024 RustFS Team +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +//! Quota admin handlers for HTTP API + +use super::Operation; +use crate::admin::auth::validate_admin_request; +use crate::auth::{check_key_valid, get_session_token}; +use hyper::StatusCode; +use matchit::Params; +use rustfs_ecstore::bucket::quota::checker::QuotaChecker; +use rustfs_ecstore::bucket::quota::{BucketQuota, QuotaError, QuotaOperation}; +use rustfs_policy::policy::action::{Action, AdminAction}; +use s3s::{Body, S3Request, S3Response, S3Result, s3_error}; +use serde::{Deserialize, Serialize}; +use serde_json; +use tracing::{debug, info, warn}; + +#[derive(Debug, Deserialize)] +pub struct SetBucketQuotaRequest { + pub quota: Option, + #[serde(default = "default_quota_type")] + pub quota_type: String, +} + +fn default_quota_type() -> String { + rustfs_config::QUOTA_TYPE_HARD.to_string() +} + +#[derive(Debug, Serialize)] +pub struct BucketQuotaResponse { + pub bucket: String, + pub quota: Option, + pub size: u64, + /// Current usage size in bytes + pub quota_type: String, +} + +#[derive(Debug, Serialize)] +pub struct BucketQuotaStats { + pub bucket: String, + pub quota_limit: Option, + pub current_usage: u64, + pub remaining_quota: Option, + pub usage_percentage: Option, +} + +#[derive(Debug, Deserialize)] +pub struct CheckQuotaRequest { + pub operation_type: String, + pub operation_size: u64, +} + +#[derive(Debug, Serialize)] +pub struct CheckQuotaResponse { + pub bucket: String, + pub operation_type: String, + pub operation_size: u64, + pub allowed: bool, + pub current_usage: u64, + pub quota_limit: Option, + pub remaining_quota: Option, +} + +/// Quota management handlers +pub struct SetBucketQuotaHandler; +pub struct GetBucketQuotaHandler; +pub struct ClearBucketQuotaHandler; +pub struct GetBucketQuotaStatsHandler; +pub struct CheckBucketQuotaHandler; + +#[async_trait::async_trait] +impl Operation for SetBucketQuotaHandler { + #[tracing::instrument(skip_all)] + async fn call(&self, mut req: S3Request, params: Params<'_, '_>) -> S3Result> { + warn!("handle SetBucketQuota"); + + let Some(ref cred) = req.credentials else { + return Err(s3_error!(InvalidRequest, "authentication required")); + }; + + let (cred, owner) = + check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &cred.access_key).await?; + + validate_admin_request( + &req.headers, + &cred, + owner, + false, + vec![Action::AdminAction(AdminAction::SetBucketQuotaAdminAction)], + None, + ) + .await?; + + let bucket = params.get("bucket").unwrap_or("").to_string(); + if bucket.is_empty() { + return Err(s3_error!(InvalidRequest, "bucket name is required")); + } + + let body = req + .input + .store_all_limited(rustfs_config::MAX_ADMIN_REQUEST_BODY_SIZE) + .await + .map_err(|e| s3_error!(InvalidRequest, "failed to read request body: {}", e))?; + + let request: SetBucketQuotaRequest = if body.is_empty() { + SetBucketQuotaRequest { + quota: None, + quota_type: default_quota_type(), + } + } else { + serde_json::from_slice(&body).map_err(|e| s3_error!(InvalidRequest, "invalid JSON: {}", e))? + }; + + if request.quota_type.to_uppercase() != rustfs_config::QUOTA_TYPE_HARD { + return Err(s3_error!(InvalidArgument, "{}", rustfs_config::QUOTA_INVALID_TYPE_ERROR_MSG)); + } + + let quota = BucketQuota::new(request.quota); + + let metadata_sys_lock = rustfs_ecstore::bucket::metadata_sys::GLOBAL_BucketMetadataSys + .get() + .ok_or_else(|| s3_error!(InternalError, "{}", rustfs_config::QUOTA_METADATA_SYSTEM_ERROR_MSG))?; + let mut quota_checker = QuotaChecker::new(metadata_sys_lock.clone()); + + quota_checker + .set_quota_config(&bucket, quota.clone()) + .await + .map_err(|e| s3_error!(InternalError, "Failed to set quota: {}", e))?; + + // Get real-time usage from data usage system + let current_usage = if let Some(store) = rustfs_ecstore::global::GLOBAL_OBJECT_API.get() { + match rustfs_ecstore::data_usage::load_data_usage_from_backend(store.clone()).await { + Ok(data_usage_info) => data_usage_info + .buckets_usage + .get(&bucket) + .map(|bucket_usage| bucket_usage.size) + .unwrap_or(0), + Err(_) => 0, + } + } else { + 0 + }; + + let response = BucketQuotaResponse { + bucket, + quota: quota.quota, + size: current_usage, + quota_type: rustfs_config::QUOTA_TYPE_HARD.to_string(), + }; + + let json = + serde_json::to_string(&response).map_err(|e| s3_error!(InternalError, "Failed to serialize response: {}", e))?; + + Ok(S3Response::new((StatusCode::OK, Body::from(json)))) + } +} + +#[async_trait::async_trait] +impl Operation for GetBucketQuotaHandler { + #[tracing::instrument(skip_all)] + async fn call(&self, req: S3Request, params: Params<'_, '_>) -> S3Result> { + warn!("handle GetBucketQuota"); + + let Some(ref cred) = req.credentials else { + return Err(s3_error!(InvalidRequest, "authentication required")); + }; + + let (cred, owner) = + check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &cred.access_key).await?; + + validate_admin_request( + &req.headers, + &cred, + owner, + false, + vec![Action::AdminAction(AdminAction::GetBucketQuotaAdminAction)], + None, + ) + .await?; + + let bucket = params.get("bucket").unwrap_or("").to_string(); + if bucket.is_empty() { + return Err(s3_error!(InvalidRequest, "bucket name is required")); + } + + let metadata_sys_lock = rustfs_ecstore::bucket::metadata_sys::GLOBAL_BucketMetadataSys + .get() + .ok_or_else(|| s3_error!(InternalError, "Bucket metadata system not initialized"))?; + + let quota_checker = QuotaChecker::new(metadata_sys_lock.clone()); + + let (quota, current_usage) = quota_checker.get_quota_stats(&bucket).await.map_err(|e| match e { + QuotaError::ConfigNotFound { .. } => { + s3_error!(NoSuchBucket, "Bucket not found: {}", bucket) + } + _ => s3_error!(InternalError, "Failed to get quota: {}", e), + })?; + + let response = BucketQuotaResponse { + bucket, + quota: quota.quota, + size: current_usage.unwrap_or(0), + quota_type: rustfs_config::QUOTA_TYPE_HARD.to_string(), + }; + + let json = + serde_json::to_string(&response).map_err(|e| s3_error!(InternalError, "Failed to serialize response: {}", e))?; + + Ok(S3Response::new((StatusCode::OK, Body::from(json)))) + } +} + +#[async_trait::async_trait] +impl Operation for ClearBucketQuotaHandler { + #[tracing::instrument(skip_all)] + async fn call(&self, req: S3Request, params: Params<'_, '_>) -> S3Result> { + warn!("handle ClearBucketQuota"); + + let Some(ref cred) = req.credentials else { + return Err(s3_error!(InvalidRequest, "authentication required")); + }; + + let (cred, owner) = + check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &cred.access_key).await?; + + validate_admin_request( + &req.headers, + &cred, + owner, + false, + vec![Action::AdminAction(AdminAction::SetBucketQuotaAdminAction)], + None, + ) + .await?; + + let bucket = params.get("bucket").unwrap_or("").to_string(); + if bucket.is_empty() { + return Err(s3_error!(InvalidRequest, "bucket name is required")); + } + + info!("Clearing quota for bucket: {}", bucket); + + let metadata_sys_lock = rustfs_ecstore::bucket::metadata_sys::GLOBAL_BucketMetadataSys + .get() + .ok_or_else(|| s3_error!(InternalError, "Bucket metadata system not initialized"))?; + + let mut quota_checker = QuotaChecker::new(metadata_sys_lock.clone()); + + // Clear quota (set to None) + let quota = BucketQuota::new(None); + quota_checker + .set_quota_config(&bucket, quota.clone()) + .await + .map_err(|e| s3_error!(InternalError, "Failed to clear quota: {}", e))?; + + info!("Successfully cleared quota for bucket: {}", bucket); + + // Get real-time usage from data usage system + let current_usage = if let Some(store) = rustfs_ecstore::global::GLOBAL_OBJECT_API.get() { + match rustfs_ecstore::data_usage::load_data_usage_from_backend(store.clone()).await { + Ok(data_usage_info) => data_usage_info + .buckets_usage + .get(&bucket) + .map(|bucket_usage| bucket_usage.size) + .unwrap_or(0), + Err(_) => 0, + } + } else { + 0 + }; + + let response = BucketQuotaResponse { + bucket, + quota: None, + size: current_usage, + quota_type: rustfs_config::QUOTA_TYPE_HARD.to_string(), + }; + + let json = + serde_json::to_string(&response).map_err(|e| s3_error!(InternalError, "Failed to serialize response: {}", e))?; + + Ok(S3Response::new((StatusCode::OK, Body::from(json)))) + } +} + +#[async_trait::async_trait] +impl Operation for GetBucketQuotaStatsHandler { + #[tracing::instrument(skip_all)] + async fn call(&self, req: S3Request, params: Params<'_, '_>) -> S3Result> { + warn!("handle GetBucketQuotaStats"); + + let Some(ref cred) = req.credentials else { + return Err(s3_error!(InvalidRequest, "authentication required")); + }; + + let (cred, owner) = + check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &cred.access_key).await?; + + validate_admin_request( + &req.headers, + &cred, + owner, + false, + vec![Action::AdminAction(AdminAction::GetBucketQuotaAdminAction)], + None, + ) + .await?; + + let bucket = params.get("bucket").unwrap_or("").to_string(); + if bucket.is_empty() { + return Err(s3_error!(InvalidRequest, "bucket name is required")); + } + + let metadata_sys_lock = rustfs_ecstore::bucket::metadata_sys::GLOBAL_BucketMetadataSys + .get() + .ok_or_else(|| s3_error!(InternalError, "Bucket metadata system not initialized"))?; + + let quota_checker = QuotaChecker::new(metadata_sys_lock.clone()); + + let (quota, current_usage_opt) = quota_checker.get_quota_stats(&bucket).await.map_err(|e| match e { + QuotaError::ConfigNotFound { .. } => { + s3_error!(NoSuchBucket, "Bucket not found: {}", bucket) + } + _ => s3_error!(InternalError, "Failed to get quota stats: {}", e), + })?; + + let current_usage = current_usage_opt.unwrap_or(0); + let usage_percentage = quota.quota.and_then(|limit| { + if limit == 0 { + None + } else { + Some((current_usage as f64 / limit as f64) * 100.0) + } + }); + + let remaining_quota = quota.get_remaining_quota(current_usage); + + let response = BucketQuotaStats { + bucket, + quota_limit: quota.quota, + current_usage, + remaining_quota, + usage_percentage, + }; + + let json = + serde_json::to_string(&response).map_err(|e| s3_error!(InternalError, "Failed to serialize response: {}", e))?; + + Ok(S3Response::new((StatusCode::OK, Body::from(json)))) + } +} + +#[async_trait::async_trait] +impl Operation for CheckBucketQuotaHandler { + #[tracing::instrument(skip_all)] + async fn call(&self, mut req: S3Request, params: Params<'_, '_>) -> S3Result> { + warn!("handle CheckBucketQuota"); + + let Some(ref cred) = req.credentials else { + return Err(s3_error!(InvalidRequest, "authentication required")); + }; + + let (cred, owner) = + check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &cred.access_key).await?; + + validate_admin_request( + &req.headers, + &cred, + owner, + false, + vec![Action::AdminAction(AdminAction::GetBucketQuotaAdminAction)], + None, + ) + .await?; + + let bucket = params.get("bucket").unwrap_or("").to_string(); + if bucket.is_empty() { + return Err(s3_error!(InvalidRequest, "bucket name is required")); + } + + let body = req + .input + .store_all_limited(rustfs_config::MAX_ADMIN_REQUEST_BODY_SIZE) + .await + .map_err(|e| s3_error!(InvalidRequest, "failed to read request body: {}", e))?; + + let request: CheckQuotaRequest = if body.is_empty() { + return Err(s3_error!(InvalidRequest, "request body cannot be empty")); + } else { + serde_json::from_slice(&body).map_err(|e| s3_error!(InvalidRequest, "invalid JSON: {}", e))? + }; + + debug!( + "Checking quota for bucket: {}, operation: {}, size: {}", + bucket, request.operation_type, request.operation_size + ); + + let metadata_sys_lock = rustfs_ecstore::bucket::metadata_sys::GLOBAL_BucketMetadataSys + .get() + .ok_or_else(|| s3_error!(InternalError, "Bucket metadata system not initialized"))?; + + let quota_checker = QuotaChecker::new(metadata_sys_lock.clone()); + + let operation: QuotaOperation = match request.operation_type.to_uppercase().as_str() { + "PUT" | "PUTOBJECT" => QuotaOperation::PutObject, + "COPY" | "COPYOBJECT" => QuotaOperation::CopyObject, + "DELETE" | "DELETEOBJECT" => QuotaOperation::DeleteObject, + _ => QuotaOperation::PutObject, // Default to PUT operation + }; + + let result = quota_checker + .check_quota(&bucket, operation, request.operation_size) + .await + .map_err(|e| s3_error!(InternalError, "Failed to check quota: {}", e))?; + + let response = CheckQuotaResponse { + bucket, + operation_type: request.operation_type, + operation_size: request.operation_size, + allowed: result.allowed, + current_usage: result.current_usage, + quota_limit: result.quota_limit, + remaining_quota: result.remaining, + }; + + let json = + serde_json::to_string(&response).map_err(|e| s3_error!(InternalError, "Failed to serialize response: {}", e))?; + + Ok(S3Response::new((StatusCode::OK, Body::from(json)))) + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_default_quota_type() { + assert_eq!(default_quota_type(), "HARD"); + } + + #[test] + fn test_quota_operation_parsing() { + let parse_operation = |operation: &str| match operation.to_uppercase().as_str() { + "PUT" | "PUTOBJECT" => QuotaOperation::PutObject, + "COPY" | "COPYOBJECT" => QuotaOperation::CopyObject, + "DELETE" | "DELETEOBJECT" => QuotaOperation::DeleteObject, + _ => QuotaOperation::PutObject, + }; + + assert!(matches!(parse_operation("put"), QuotaOperation::PutObject)); + assert!(matches!(parse_operation("PUT"), QuotaOperation::PutObject)); + assert!(matches!(parse_operation("PutObject"), QuotaOperation::PutObject)); + assert!(matches!(parse_operation("copy"), QuotaOperation::CopyObject)); + assert!(matches!(parse_operation("DELETE"), QuotaOperation::DeleteObject)); + assert!(matches!(parse_operation("unknown"), QuotaOperation::PutObject)); + } + + #[tokio::test] + async fn test_quota_response_serialization() { + let response = BucketQuotaResponse { + bucket: "test-bucket".to_string(), + quota: Some(2147483648), + size: 1073741824, + quota_type: rustfs_config::QUOTA_TYPE_HARD.to_string(), + }; + + let json = serde_json::to_string(&response).unwrap(); + assert!(json.contains("test-bucket")); + assert!(json.contains("2147483648")); + assert!(json.contains("HARD")); + } +} diff --git a/rustfs/src/admin/mod.rs b/rustfs/src/admin/mod.rs index e554ace5..4f23261f 100644 --- a/rustfs/src/admin/mod.rs +++ b/rustfs/src/admin/mod.rs @@ -29,7 +29,7 @@ use handlers::{ event::{ListNotificationTargets, ListTargetsArns, NotificationTarget, RemoveNotificationTarget}, group, kms, kms_dynamic, kms_keys, policies, pools, profile::{TriggerProfileCPU, TriggerProfileMemory}, - rebalance, + quota, rebalance, service_account::{AddServiceAccount, DeleteServiceAccount, InfoServiceAccount, ListServiceAccount, UpdateServiceAccount}, sts, tier, user, }; @@ -202,6 +202,32 @@ pub fn make_admin_route(console_enabled: bool) -> std::io::Result AdminOperation(&tier::ClearTier {}), )?; + r.insert( + Method::PUT, + format!("{}{}", ADMIN_PREFIX, "/v3/quota/{bucket}").as_str(), + AdminOperation("a::SetBucketQuotaHandler {}), + )?; + r.insert( + Method::GET, + format!("{}{}", ADMIN_PREFIX, "/v3/quota/{bucket}").as_str(), + AdminOperation("a::GetBucketQuotaHandler {}), + )?; + r.insert( + Method::DELETE, + format!("{}{}", ADMIN_PREFIX, "/v3/quota/{bucket}").as_str(), + AdminOperation("a::ClearBucketQuotaHandler {}), + )?; + r.insert( + Method::GET, + format!("{}{}", ADMIN_PREFIX, "/v3/quota-stats/{bucket}").as_str(), + AdminOperation("a::GetBucketQuotaStatsHandler {}), + )?; + r.insert( + Method::POST, + format!("{}{}", ADMIN_PREFIX, "/v3/quota-check/{bucket}").as_str(), + AdminOperation("a::CheckBucketQuotaHandler {}), + )?; + r.insert( Method::GET, format!("{}{}", ADMIN_PREFIX, "/export-bucket-metadata").as_str(), diff --git a/rustfs/src/error.rs b/rustfs/src/error.rs index 4ca5fd6c..dabf7d7a 100644 --- a/rustfs/src/error.rs +++ b/rustfs/src/error.rs @@ -12,6 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. +use rustfs_ecstore::bucket::quota::QuotaError; use rustfs_ecstore::error::StorageError; use s3s::{S3Error, S3ErrorCode}; @@ -284,6 +285,29 @@ impl From for ApiError { } } +impl From for ApiError { + fn from(err: QuotaError) -> Self { + let code = match &err { + QuotaError::QuotaExceeded { .. } => S3ErrorCode::InvalidRequest, + QuotaError::ConfigNotFound { .. } => S3ErrorCode::NoSuchBucket, + QuotaError::InvalidConfig { .. } => S3ErrorCode::InvalidArgument, + QuotaError::StorageError(_) => S3ErrorCode::InternalError, + }; + + let message = if code == S3ErrorCode::InternalError { + err.to_string() + } else { + ApiError::error_code_to_message(&code) + }; + + ApiError { + code, + message, + source: Some(Box::new(err)), + } + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/rustfs/src/storage/ecfs.rs b/rustfs/src/storage/ecfs.rs index fe6c779d..1fec35ec 100644 --- a/rustfs/src/storage/ecfs.rs +++ b/rustfs/src/storage/ecfs.rs @@ -40,6 +40,7 @@ use datafusion::arrow::{ use futures::StreamExt; use http::{HeaderMap, StatusCode}; use metrics::counter; +use rustfs_ecstore::bucket::quota::checker::QuotaChecker; use rustfs_ecstore::{ bucket::{ lifecycle::{ @@ -54,6 +55,7 @@ use rustfs_ecstore::{ metadata_sys::get_replication_config, object_lock::objectlock_sys::BucketObjectLockSys, policy_sys::PolicySys, + quota::QuotaOperation, replication::{ DeletedObjectReplicationInfo, ReplicationConfigurationExt, check_replicate_delete, get_must_replicate_options, must_replicate, schedule_replication, schedule_replication_delete, @@ -1067,11 +1069,42 @@ impl S3 for FS { } } + // check quota for copy operation + if let Some(metadata_sys) = rustfs_ecstore::bucket::metadata_sys::GLOBAL_BucketMetadataSys.get() { + let quota_checker = QuotaChecker::new(metadata_sys.clone()); + + match quota_checker + .check_quota(&bucket, QuotaOperation::CopyObject, src_info.size as u64) + .await + { + Ok(check_result) => { + if !check_result.allowed { + return Err(S3Error::with_message( + S3ErrorCode::InvalidRequest, + format!( + "Bucket quota exceeded. Current usage: {} bytes, limit: {} bytes", + check_result.current_usage, + check_result.quota_limit.unwrap_or(0) + ), + )); + } + } + Err(e) => { + warn!("Quota check failed for bucket {}: {}, allowing operation", bucket, e); + } + } + } + let oi = store .copy_object(&src_bucket, &src_key, &bucket, &key, &mut src_info, &src_opts, &dst_opts) .await .map_err(ApiError::from)?; + // Update quota tracking after successful copy + if rustfs_ecstore::bucket::metadata_sys::GLOBAL_BucketMetadataSys.get().is_some() { + rustfs_ecstore::data_usage::increment_bucket_usage_memory(&bucket, oi.size as u64).await; + } + // Invalidate cache for the destination object to prevent stale data let manager = get_concurrency_manager(); let dest_bucket = bucket.clone(); @@ -1440,6 +1473,9 @@ impl S3 for FS { } }; + // Fast in-memory update for immediate quota consistency + rustfs_ecstore::data_usage::decrement_bucket_usage_memory(&bucket, obj_info.size as u64).await; + // Invalidate cache for the deleted object let manager = get_concurrency_manager(); let del_bucket = bucket.clone(); @@ -1534,8 +1570,6 @@ impl S3 for FS { return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string())); }; - let has_lock_enable = BucketObjectLockSys::get(&bucket).await.is_some(); - let version_cfg = BucketVersioningSys::get(&bucket).await.unwrap_or_default(); #[derive(Default, Clone)] @@ -1548,6 +1582,7 @@ impl S3 for FS { let mut object_to_delete = Vec::new(); let mut object_to_delete_index = HashMap::new(); + let mut object_sizes = HashMap::new(); for (idx, obj_id) in delete.objects.iter().enumerate() { // Per S3 API spec, "null" string means non-versioned object // Filter out "null" version_id to treat as unversioned @@ -1606,15 +1641,14 @@ impl S3 for FS { .await .map_err(ApiError::from)?; - let mut goi = ObjectInfo::default(); - let mut gerr = None; + // Get object info to collect size for quota tracking + let (goi, gerr) = match store.get_object_info(&bucket, &object.object_name, &opts).await { + Ok(res) => (res, None), + Err(e) => (ObjectInfo::default(), Some(e.to_string())), + }; - if replicate_deletes || object.version_id.is_some() && has_lock_enable { - (goi, gerr) = match store.get_object_info(&bucket, &object.object_name, &opts).await { - Ok(res) => (res, None), - Err(e) => (ObjectInfo::default(), Some(e.to_string())), - }; - } + // Store object size for quota tracking + object_sizes.insert(object.object_name.clone(), goi.size); if is_dir_object(&object.object_name) && object.version_id.is_none() { object.version_id = Some(Uuid::nil()); @@ -1716,6 +1750,10 @@ impl S3 for FS { dobjs[i].replication_state = Some(object_to_delete[i].replication_state()); } delete_results[*didx].delete_object = Some(dobjs[i].clone()); + // Update quota tracking for successfully deleted objects + if let Some(&size) = object_sizes.get(&obj.object_name) { + rustfs_ecstore::data_usage::decrement_bucket_usage_memory(&bucket, size as u64).await; + } continue; } @@ -3151,6 +3189,34 @@ impl S3 for FS { // Validate object key validate_object_key(&key, "PUT")?; + // check quota for put operation + if let Some(size) = content_length + && let Some(metadata_sys) = rustfs_ecstore::bucket::metadata_sys::GLOBAL_BucketMetadataSys.get() + { + let quota_checker = QuotaChecker::new(metadata_sys.clone()); + + match quota_checker + .check_quota(&bucket, QuotaOperation::PutObject, size as u64) + .await + { + Ok(check_result) => { + if !check_result.allowed { + return Err(S3Error::with_message( + S3ErrorCode::InvalidRequest, + format!( + "Bucket quota exceeded. Current usage: {} bytes, limit: {} bytes", + check_result.current_usage, + check_result.quota_limit.unwrap_or(0) + ), + )); + } + } + Err(e) => { + warn!("Quota check failed for bucket {}: {}, allowing operation", bucket, e); + } + } + } + if if_match.is_some() || if_none_match.is_some() { let Some(store) = new_object_layer_fn() else { return Err(S3Error::with_message(S3ErrorCode::InternalError, "Not init".to_string())); @@ -3429,6 +3495,9 @@ impl S3 for FS { .await .map_err(ApiError::from)?; + // Fast in-memory update for immediate quota consistency + rustfs_ecstore::data_usage::increment_bucket_usage_memory(&bucket, obj_info.size as u64).await; + // Invalidate cache for the written object to prevent stale data let manager = get_concurrency_manager(); let put_bucket = bucket.clone(); @@ -4356,6 +4425,38 @@ impl S3 for FS { .await .map_err(ApiError::from)?; + // check quota after completing multipart upload + if let Some(metadata_sys) = rustfs_ecstore::bucket::metadata_sys::GLOBAL_BucketMetadataSys.get() { + let quota_checker = QuotaChecker::new(metadata_sys.clone()); + + match quota_checker + .check_quota(&bucket, QuotaOperation::PutObject, obj_info.size as u64) + .await + { + Ok(check_result) => { + if !check_result.allowed { + // Quota exceeded, delete the completed object + let _ = store.delete_object(&bucket, &key, ObjectOptions::default()).await; + return Err(S3Error::with_message( + S3ErrorCode::InvalidRequest, + format!( + "Bucket quota exceeded. Current usage: {} bytes, limit: {} bytes", + check_result.current_usage, + check_result.quota_limit.unwrap_or(0) + ), + )); + } + // Update quota tracking after successful multipart upload + if rustfs_ecstore::bucket::metadata_sys::GLOBAL_BucketMetadataSys.get().is_some() { + rustfs_ecstore::data_usage::increment_bucket_usage_memory(&bucket, obj_info.size as u64).await; + } + } + Err(e) => { + warn!("Quota check failed for bucket {}: {}, allowing operation", bucket, e); + } + } + } + // Invalidate cache for the completed multipart object let manager = get_concurrency_manager(); let mpu_bucket = bucket.clone(); From f9d3a908f035f51261702719a9c3e0123132ebb7 Mon Sep 17 00:00:00 2001 From: houseme Date: Mon, 12 Jan 2026 12:25:02 +0800 Subject: [PATCH 16/17] =?UTF-8?q?Refactor=EF=BC=9Areplace=20`jsonwebtoken`?= =?UTF-8?q?=20feature=20from=20`rust=5Fcrypto`=20to=20`aws=5Flc=5Frs`=20(#?= =?UTF-8?q?1474)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: dependabot[bot] Co-authored-by: dependabot[bot] <49699333+dependabot[bot]@users.noreply.github.com> --- Cargo.lock | 157 ++++++++++++++++++++++------------------------------- Cargo.toml | 6 +- 2 files changed, 67 insertions(+), 96 deletions(-) diff --git a/Cargo.lock b/Cargo.lock index 70720aad..5e46daab 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -270,7 +270,7 @@ dependencies = [ "base64ct", "blake2 0.11.0-rc.3", "cpufeatures", - "password-hash 0.6.0-rc.7", + "password-hash 0.6.0-rc.8", ] [[package]] @@ -287,9 +287,9 @@ checksum = "7c02d123df017efcdfbd739ef81735b36c5ba83ec3c59c80a9d7ecc718f92e50" [[package]] name = "arrow" -version = "57.1.0" +version = "57.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cb372a7cbcac02a35d3fb7b3fc1f969ec078e871f9bb899bf00a2e1809bec8a3" +checksum = "2a2b10dcb159faf30d3f81f6d56c1211a5bea2ca424eabe477648a44b993320e" dependencies = [ "arrow-arith", "arrow-array", @@ -308,9 +308,9 @@ dependencies = [ [[package]] name = "arrow-arith" -version = "57.1.0" +version = "57.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "0f377dcd19e440174596d83deb49cd724886d91060c07fec4f67014ef9d54049" +checksum = "288015089e7931843c80ed4032c5274f02b37bcb720c4a42096d50b390e70372" dependencies = [ "arrow-array", "arrow-buffer", @@ -322,9 +322,9 @@ dependencies = [ [[package]] name = "arrow-array" -version = "57.1.0" +version = "57.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a23eaff85a44e9fa914660fb0d0bb00b79c4a3d888b5334adb3ea4330c84f002" +checksum = "65ca404ea6191e06bf30956394173337fa9c35f445bd447fe6c21ab944e1a23c" dependencies = [ "ahash", "arrow-buffer", @@ -341,9 +341,9 @@ dependencies = [ [[package]] name = "arrow-buffer" -version = "57.1.0" +version = "57.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "a2819d893750cb3380ab31ebdc8c68874dd4429f90fd09180f3c93538bd21626" +checksum = "36356383099be0151dacc4245309895f16ba7917d79bdb71a7148659c9206c56" dependencies = [ "bytes", "half", @@ -353,9 +353,9 @@ dependencies = [ [[package]] name = "arrow-cast" -version = "57.1.0" +version = "57.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "e3d131abb183f80c450d4591dc784f8d7750c50c6e2bc3fcaad148afc8361271" +checksum = "9c8e372ed52bd4ee88cc1e6c3859aa7ecea204158ac640b10e187936e7e87074" dependencies = [ "arrow-array", "arrow-buffer", @@ -375,9 +375,9 @@ dependencies = [ [[package]] name = "arrow-csv" -version = "57.1.0" +version = "57.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2275877a0e5e7e7c76954669366c2aa1a829e340ab1f612e647507860906fb6b" +checksum = "8e4100b729fe656f2e4fb32bc5884f14acf9118d4ad532b7b33c1132e4dce896" dependencies = [ "arrow-array", "arrow-cast", @@ -390,9 +390,9 @@ dependencies = [ [[package]] name = "arrow-data" -version = "57.1.0" +version = "57.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "05738f3d42cb922b9096f7786f606fcb8669260c2640df8490533bb2fa38c9d3" +checksum = "bf87f4ff5fc13290aa47e499a8b669a82c5977c6a1fedce22c7f542c1fd5a597" dependencies = [ "arrow-buffer", "arrow-schema", @@ -403,9 +403,9 @@ dependencies = [ [[package]] name = "arrow-ipc" -version = "57.1.0" +version = "57.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "3d09446e8076c4b3f235603d9ea7c5494e73d441b01cd61fb33d7254c11964b3" +checksum = "eb3ca63edd2073fcb42ba112f8ae165df1de935627ead6e203d07c99445f2081" dependencies = [ "arrow-array", "arrow-buffer", @@ -419,9 +419,9 @@ dependencies = [ [[package]] name = "arrow-json" -version = "57.1.0" +version = "57.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "371ffd66fa77f71d7628c63f209c9ca5341081051aa32f9c8020feb0def787c0" +checksum = "a36b2332559d3310ebe3e173f75b29989b4412df4029a26a30cc3f7da0869297" dependencies = [ "arrow-array", "arrow-buffer", @@ -443,9 +443,9 @@ dependencies = [ [[package]] name = "arrow-ord" -version = "57.1.0" +version = "57.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cbc94fc7adec5d1ba9e8cd1b1e8d6f72423b33fe978bf1f46d970fafab787521" +checksum = "13c4e0530272ca755d6814218dffd04425c5b7854b87fa741d5ff848bf50aa39" dependencies = [ "arrow-array", "arrow-buffer", @@ -456,9 +456,9 @@ dependencies = [ [[package]] name = "arrow-row" -version = "57.1.0" +version = "57.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "169676f317157dc079cc5def6354d16db63d8861d61046d2f3883268ced6f99f" +checksum = "b07f52788744cc71c4628567ad834cadbaeb9f09026ff1d7a4120f69edf7abd3" dependencies = [ "arrow-array", "arrow-buffer", @@ -469,9 +469,9 @@ dependencies = [ [[package]] name = "arrow-schema" -version = "57.1.0" +version = "57.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d27609cd7dd45f006abae27995c2729ef6f4b9361cde1ddd019dc31a5aa017e0" +checksum = "6bb63203e8e0e54b288d0d8043ca8fa1013820822a27692ef1b78a977d879f2c" dependencies = [ "serde_core", "serde_json", @@ -479,9 +479,9 @@ dependencies = [ [[package]] name = "arrow-select" -version = "57.1.0" +version = "57.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "ae980d021879ea119dd6e2a13912d81e64abed372d53163e804dfe84639d8010" +checksum = "c96d8a1c180b44ecf2e66c9a2f2bbcb8b1b6f14e165ce46ac8bde211a363411b" dependencies = [ "ahash", "arrow-array", @@ -493,9 +493,9 @@ dependencies = [ [[package]] name = "arrow-string" -version = "57.1.0" +version = "57.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "cf35e8ef49dcf0c5f6d175edee6b8af7b45611805333129c541a8b89a0fc0534" +checksum = "a8ad6a81add9d3ea30bf8374ee8329992c7fd246ffd8b7e2f48a3cea5aa0cc9a" dependencies = [ "arrow-array", "arrow-buffer", @@ -1767,7 +1767,7 @@ version = "0.1.16" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f9d839f2a20b0aee515dc581a6172f2321f96cab76c1a38a4c584a194955390e" dependencies = [ - "getrandom 0.2.16", + "getrandom 0.2.17", "once_cell", "tiny-keccak", ] @@ -3890,9 +3890,9 @@ dependencies = [ [[package]] name = "getrandom" -version = "0.2.16" +version = "0.2.17" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "335ff9f135e4384c8150d6f27c6daed433577f86b4750418338c01a1a2528592" +checksum = "ff2abc00be7fca6ebc474524697ae276ad847ad0a6b3faa4bcb027e9a4614ad0" dependencies = [ "cfg-if", "js-sys", @@ -4873,7 +4873,7 @@ dependencies = [ "p384", "p521", "rand_core 0.6.4", - "rsa 0.10.0-rc.11", + "rsa", "sec1 0.7.3", "sha1 0.10.6", "sha1 0.11.0-rc.3", @@ -5001,19 +5001,13 @@ version = "10.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "c76e1c7d7df3e34443b3621b459b066a7b79644f059fc8b2db7070c825fd417e" dependencies = [ + "aws-lc-rs", "base64", - "ed25519-dalek 2.2.0", - "getrandom 0.2.16", - "hmac 0.12.1", + "getrandom 0.2.17", "js-sys", - "p256 0.13.2", - "p384", "pem", - "rand 0.8.5", - "rsa 0.9.10", "serde", "serde_json", - "sha2 0.10.9", "signature 2.2.0", "simple_asn1", ] @@ -5794,7 +5788,6 @@ dependencies = [ "rand 0.8.5", "serde", "smallvec", - "zeroize", ] [[package]] @@ -6247,9 +6240,9 @@ dependencies = [ [[package]] name = "parquet" -version = "57.1.0" +version = "57.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "be3e4f6d320dd92bfa7d612e265d7d08bba0a240bab86af3425e1d255a511d89" +checksum = "5f6a2926a30477c0b95fea6c28c3072712b139337a242c2cc64817bdc20a8854" dependencies = [ "ahash", "arrow-array", @@ -6295,9 +6288,9 @@ dependencies = [ [[package]] name = "password-hash" -version = "0.6.0-rc.7" +version = "0.6.0-rc.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c351143b5ab27b1f1d24712f21ea4d0458fe74f60dd5839297dabcc2ecd24d58" +checksum = "f77af9403a6489b7b51f552693bd48d8e81a710c92d3d77648b203558578762d" dependencies = [ "getrandom 0.4.0-rc.0", "phc", @@ -6357,9 +6350,9 @@ dependencies = [ [[package]] name = "pbkdf2" -version = "0.13.0-rc.5" +version = "0.13.0-rc.6" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c015873c38594dfb7724f90b2ed912a606697393bda2d39fd83c2394301f808a" +checksum = "9fb9b101849c3ddab38905781f5aa7ae14ea06e87befaf0e7b003e5d3186250d" dependencies = [ "digest 0.11.0-rc.5", "hmac 0.13.0-rc.3", @@ -6514,17 +6507,6 @@ version = "0.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "8b870d8c151b6f2fb93e84a13146138f05d02ed11c7e7c54f8826aaaf7c9f184" -[[package]] -name = "pkcs1" -version = "0.7.5" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c8ffb9f10fa047879315e6625af03c164b16962a5368d724ed16323b68ace47f" -dependencies = [ - "der 0.7.10", - "pkcs8 0.10.2", - "spki 0.7.3", -] - [[package]] name = "pkcs1" version = "0.8.0-rc.4" @@ -7016,6 +6998,15 @@ name = "quick-xml" version = "0.38.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b66c2058c55a409d601666cffe35f04333cf1013010882cec174a7467cd4e21c" +dependencies = [ + "memchr", +] + +[[package]] +name = "quick-xml" +version = "0.39.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "f2e3bf4aa9d243beeb01a7b3bc30b77cfe2c44e24ec02d751a7104a53c2c49a1" dependencies = [ "memchr", "serde", @@ -7151,7 +7142,7 @@ version = "0.6.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "ec0be4795e2f6a28069bec0b5ff3e2ac9bafc99e6a9a7dc3547996c5c816922c" dependencies = [ - "getrandom 0.2.16", + "getrandom 0.2.17", ] [[package]] @@ -7253,7 +7244,7 @@ version = "0.5.2" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "a4e608c6638b9c18977b00b475ac1f28d14e84b27d8d42f70e0bf1e3dec127ac" dependencies = [ - "getrandom 0.2.16", + "getrandom 0.2.17", "libredox", "thiserror 2.0.17", ] @@ -7416,7 +7407,7 @@ checksum = "a4689e6c2294d81e88dc6261c768b63bc4fcdb852be6d1352498b114f61383b7" dependencies = [ "cc", "cfg-if", - "getrandom 0.2.16", + "getrandom 0.2.17", "libc", "untrusted 0.9.0", "windows-sys 0.52.0", @@ -7476,26 +7467,6 @@ dependencies = [ "serde", ] -[[package]] -name = "rsa" -version = "0.9.10" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b8573f03f5883dcaebdfcf4725caa1ecb9c15b2ef50c43a07b816e06799bb12d" -dependencies = [ - "const-oid 0.9.6", - "digest 0.10.7", - "num-bigint-dig", - "num-integer", - "num-traits", - "pkcs1 0.7.5", - "pkcs8 0.10.2", - "rand_core 0.6.4", - "signature 2.2.0", - "spki 0.7.3", - "subtle", - "zeroize", -] - [[package]] name = "rsa" version = "0.10.0-rc.11" @@ -7506,7 +7477,7 @@ dependencies = [ "crypto-bigint 0.7.0-rc.15", "crypto-primes", "digest 0.11.0-rc.5", - "pkcs1 0.8.0-rc.4", + "pkcs1", "pkcs8 0.11.0-rc.8", "rand_core 0.10.0-rc-3", "sha2 0.11.0-rc.3", @@ -7590,7 +7561,7 @@ dependencies = [ "enum_dispatch", "futures", "generic-array 1.3.5", - "getrandom 0.2.16", + "getrandom 0.2.17", "hex-literal", "hmac 0.12.1", "home", @@ -7605,12 +7576,12 @@ dependencies = [ "p521", "pageant", "pbkdf2 0.12.2", - "pkcs1 0.8.0-rc.4", + "pkcs1", "pkcs5", "pkcs8 0.10.2", "rand 0.8.5", "rand_core 0.6.4", - "rsa 0.10.0-rc.11", + "rsa", "russh-cryptovec", "russh-util", "sec1 0.7.3", @@ -7857,7 +7828,7 @@ version = "0.0.5" dependencies = [ "base64-simd", "rand 0.10.0-rc.6", - "rsa 0.10.0-rc.11", + "rsa", "serde", "serde_json", ] @@ -7943,7 +7914,7 @@ dependencies = [ "cfg-if", "chacha20poly1305", "jsonwebtoken", - "pbkdf2 0.13.0-rc.5", + "pbkdf2 0.13.0-rc.6", "rand 0.10.0-rc.6", "serde_json", "sha2 0.11.0-rc.3", @@ -7991,7 +7962,7 @@ dependencies = [ "parking_lot", "path-absolutize", "pin-project-lite", - "quick-xml 0.38.4", + "quick-xml 0.39.0", "rand 0.10.0-rc.6", "reed-solomon-simd", "regex", @@ -8169,7 +8140,7 @@ dependencies = [ "form_urlencoded", "futures", "hashbrown 0.16.1", - "quick-xml 0.38.4", + "quick-xml 0.39.0", "rayon", "rumqttc", "rustc-hash", @@ -9379,7 +9350,7 @@ checksum = "7faefb89d4a5304e31238913d1f7f164e22494276ed58cd84d5058ba7b04911f" dependencies = [ "ed25519-dalek 3.0.0-pre.2", "rand_core 0.10.0-rc-3", - "rsa 0.10.0-rc.11", + "rsa", "sec1 0.8.0-rc.11", "sha2 0.11.0-rc.3", "signature 3.0.0-rc.6", @@ -11418,9 +11389,9 @@ checksum = "40990edd51aae2c2b6907af74ffb635029d5788228222c4bb811e9351c0caad3" [[package]] name = "zmij" -version = "1.0.12" +version = "1.0.13" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2fc5a66a20078bf1251bde995aa2fdcc4b800c70b5d92dd2c62abc5c60f679f8" +checksum = "ac93432f5b761b22864c774aac244fa5c0fd877678a4c37ebf6cf42208f9c9ec" [[package]] name = "zopfli" diff --git a/Cargo.toml b/Cargo.toml index cc839ea8..c8218545 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -131,7 +131,7 @@ byteorder = "1.5.0" flatbuffers = "25.12.19" form_urlencoded = "1.2.2" prost = "0.14.3" -quick-xml = "0.38.4" +quick-xml = "0.39.0" rmcp = { version = "0.12.0" } rmp = { version = "0.8.15" } rmp-serde = { version = "1.3.1" } @@ -147,8 +147,8 @@ blake3 = { version = "1.8.3", features = ["rayon", "mmap"] } chacha20poly1305 = { version = "0.11.0-rc.2" } crc-fast = "1.6.0" hmac = { version = "0.13.0-rc.3" } -jsonwebtoken = { version = "10.2.0", features = ["rust_crypto"] } -pbkdf2 = "0.13.0-rc.5" +jsonwebtoken = { version = "10.2.0", features = ["aws_lc_rs"] } +pbkdf2 = "0.13.0-rc.6" rsa = { version = "0.10.0-rc.11" } rustls = { version = "0.23.36", default-features = false, features = ["aws-lc-rs", "logging", "tls12", "prefer-post-quantum", "std"] } rustls-pemfile = "2.2.0" From f0da8ce21665976a8d1f243a836faa77dca3a9ef Mon Sep 17 00:00:00 2001 From: weisd Date: Mon, 12 Jan 2026 13:26:01 +0800 Subject: [PATCH 17/17] fix: avoid unwrap() panic in delete_prefix parsing (#1476) Co-authored-by: houseme --- rustfs/src/storage/options.rs | 35 ++++++++++++++++++++++++++++++++++- 1 file changed, 34 insertions(+), 1 deletion(-) diff --git a/rustfs/src/storage/options.rs b/rustfs/src/storage/options.rs index 1886ffe3..e0247373 100644 --- a/rustfs/src/storage/options.rs +++ b/rustfs/src/storage/options.rs @@ -80,7 +80,7 @@ pub async fn del_opts( opts.delete_prefix = headers .get(RUSTFS_FORCE_DELETE) - .map(|v| v.to_str().unwrap() == "true") + .map(|v| v.to_str().unwrap_or_default() == "true") .unwrap_or_default(); opts.version_id = { @@ -671,6 +671,39 @@ mod tests { } } + #[tokio::test] + async fn test_del_opts_with_delete_prefix() { + let mut headers = create_test_headers(); + let metadata = create_test_metadata(); + + // Test without RUSTFS_FORCE_DELETE header - should default to false + let result = del_opts("test-bucket", "test-object", None, &headers, metadata.clone()).await; + assert!(result.is_ok()); + let opts = result.unwrap(); + assert!(!opts.delete_prefix); + + // Test with RUSTFS_FORCE_DELETE header set to "true" + headers.insert(RUSTFS_FORCE_DELETE, HeaderValue::from_static("true")); + let result = del_opts("test-bucket", "test-object", None, &headers, metadata.clone()).await; + assert!(result.is_ok()); + let opts = result.unwrap(); + assert!(opts.delete_prefix); + + // Test with RUSTFS_FORCE_DELETE header set to "false" + headers.insert(RUSTFS_FORCE_DELETE, HeaderValue::from_static("false")); + let result = del_opts("test-bucket", "test-object", None, &headers, metadata.clone()).await; + assert!(result.is_ok()); + let opts = result.unwrap(); + assert!(!opts.delete_prefix); + + // Test with RUSTFS_FORCE_DELETE header set to other value + headers.insert(RUSTFS_FORCE_DELETE, HeaderValue::from_static("maybe")); + let result = del_opts("test-bucket", "test-object", None, &headers, metadata).await; + assert!(result.is_ok()); + let opts = result.unwrap(); + assert!(!opts.delete_prefix); + } + #[tokio::test] async fn test_get_opts_basic() { let headers = create_test_headers();