mirror of
https://github.com/rustfs/rustfs.git
synced 2026-01-17 09:40:32 +00:00
Compare commits
43 Commits
1.0.0-alph
...
1.0.0-alph
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
a95e549430 | ||
|
|
00f3275603 | ||
|
|
359c9d2d26 | ||
|
|
3ce99939a3 | ||
|
|
02f809312b | ||
|
|
356dc7e0c2 | ||
|
|
e4ad86ada6 | ||
|
|
b95bee64b2 | ||
|
|
18fb920fa4 | ||
|
|
5f19eef945 | ||
|
|
40ad2a6ea9 | ||
|
|
e7a3129be4 | ||
|
|
b142563127 | ||
|
|
5660208e89 | ||
|
|
0b6f3302ce | ||
|
|
60103f0f72 | ||
|
|
ab752458ce | ||
|
|
1d6c8750e7 | ||
|
|
9c44f71a0a | ||
|
|
9c432fc963 | ||
|
|
f86761fae9 | ||
|
|
377ed507c5 | ||
|
|
e063306ac3 | ||
|
|
8009ad5692 | ||
|
|
fb89a16086 | ||
|
|
666c0a9a38 | ||
|
|
486a4b58e6 | ||
|
|
f5f6ea4a5c | ||
|
|
38c2d74d36 | ||
|
|
ffbcd3852f | ||
|
|
75b144b7d4 | ||
|
|
d06397cf4a | ||
|
|
f995943832 | ||
|
|
de4a3fa766 | ||
|
|
4d0045ff18 | ||
|
|
d96e04a579 | ||
|
|
cc916926ff | ||
|
|
134e7e237c | ||
|
|
cf53a9d84a | ||
|
|
8d7cd4cb1b | ||
|
|
61b3100260 | ||
|
|
b19e8070a2 | ||
|
|
b8aa8214e2 |
0
.docker/observability/prometheus-data/.gitignore
vendored
Normal file → Executable file
0
.docker/observability/prometheus-data/.gitignore
vendored
Normal file → Executable file
2
.github/workflows/audit.yml
vendored
2
.github/workflows/audit.yml
vendored
@@ -57,7 +57,7 @@ jobs:
|
||||
|
||||
- name: Upload audit results
|
||||
if: always()
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: security-audit-results-${{ github.run_number }}
|
||||
path: audit-results.json
|
||||
|
||||
4
.github/workflows/build.yml
vendored
4
.github/workflows/build.yml
vendored
@@ -442,7 +442,7 @@ jobs:
|
||||
echo "📊 Version: ${VERSION}"
|
||||
|
||||
- name: Upload to GitHub artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: ${{ steps.package.outputs.package_name }}
|
||||
path: "rustfs-*.zip"
|
||||
@@ -679,7 +679,7 @@ jobs:
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Download all build artifacts
|
||||
uses: actions/download-artifact@v5
|
||||
uses: actions/download-artifact@v7
|
||||
with:
|
||||
path: ./artifacts
|
||||
pattern: rustfs-*
|
||||
|
||||
4
.github/workflows/ci.yml
vendored
4
.github/workflows/ci.yml
vendored
@@ -160,7 +160,7 @@ jobs:
|
||||
with:
|
||||
tool: s3s-e2e
|
||||
git: https://github.com/Nugine/s3s.git
|
||||
rev: b7714bfaa17ddfa9b23ea01774a1e7bbdbfc2ca3
|
||||
rev: 9e41304ed549b89cfb03ede98e9c0d2ac7522051
|
||||
|
||||
- name: Build debug binary
|
||||
run: |
|
||||
@@ -175,7 +175,7 @@ jobs:
|
||||
|
||||
- name: Upload test logs
|
||||
if: failure()
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: e2e-test-logs-${{ github.run_number }}
|
||||
path: /tmp/rustfs.log
|
||||
|
||||
4
.github/workflows/e2e-s3tests.yml
vendored
4
.github/workflows/e2e-s3tests.yml
vendored
@@ -205,7 +205,7 @@ jobs:
|
||||
|
||||
- name: Upload artifacts
|
||||
if: always() && env.ACT != 'true'
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: s3tests-single
|
||||
path: artifacts/**
|
||||
@@ -416,7 +416,7 @@ jobs:
|
||||
|
||||
- name: Upload artifacts
|
||||
if: always() && env.ACT != 'true'
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: s3tests-multi
|
||||
path: artifacts/**
|
||||
|
||||
4
.github/workflows/helm-package.yml
vendored
4
.github/workflows/helm-package.yml
vendored
@@ -56,7 +56,7 @@ jobs:
|
||||
helm package ./helm/rustfs --destination helm/rustfs/ --version "0.0.$package_version"
|
||||
|
||||
- name: Upload helm package as artifact
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: helm-package
|
||||
path: helm/rustfs/*.tgz
|
||||
@@ -74,7 +74,7 @@ jobs:
|
||||
token: ${{ secrets.RUSTFS_HELM_PACKAGE }}
|
||||
|
||||
- name: Download helm package
|
||||
uses: actions/download-artifact@v4
|
||||
uses: actions/download-artifact@v7
|
||||
with:
|
||||
name: helm-package
|
||||
path: ./
|
||||
|
||||
4
.github/workflows/performance.yml
vendored
4
.github/workflows/performance.yml
vendored
@@ -107,7 +107,7 @@ jobs:
|
||||
|
||||
- name: Upload profile data
|
||||
if: steps.profiling.outputs.profile_generated == 'true'
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: performance-profile-${{ github.run_number }}
|
||||
path: samply-profile.json
|
||||
@@ -135,7 +135,7 @@ jobs:
|
||||
tee benchmark-results.json
|
||||
|
||||
- name: Upload benchmark results
|
||||
uses: actions/upload-artifact@v4
|
||||
uses: actions/upload-artifact@v6
|
||||
with:
|
||||
name: benchmark-results-${{ github.run_number }}
|
||||
path: benchmark-results.json
|
||||
|
||||
20
.github/workflows/stale.yml
vendored
Normal file
20
.github/workflows/stale.yml
vendored
Normal file
@@ -0,0 +1,20 @@
|
||||
name: "Mark stale issues"
|
||||
on:
|
||||
schedule:
|
||||
- cron: "30 1 * * *"
|
||||
|
||||
jobs:
|
||||
stale:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/stale@v9
|
||||
with:
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
stale-issue-message: 'This issue has been automatically marked as stale because it has not had recent activity. It will be closed if no further activity occurs.'
|
||||
stale-issue-label: 'stale'
|
||||
## Mark if there is no activity for more than 7 days
|
||||
days-before-stale: 7
|
||||
# If no one responds after 3 days, the tag will be closed.
|
||||
days-before-close: 3
|
||||
# These tags are exempt and will not close automatically.
|
||||
exempt-issue-labels: 'pinned,security'
|
||||
18
AGENTS.md
18
AGENTS.md
@@ -1,8 +1,18 @@
|
||||
# Repository Guidelines
|
||||
|
||||
## ⚠️ Pre-Commit Checklist (MANDATORY)
|
||||
**Before EVERY commit, you MUST run and pass ALL of the following:**
|
||||
```bash
|
||||
cargo fmt --all --check # Code formatting
|
||||
cargo clippy --all-targets --all-features -- -D warnings # Lints
|
||||
cargo test --workspace --exclude e2e_test # Unit tests
|
||||
```
|
||||
Or simply run `make pre-commit` which covers all checks. **DO NOT commit if any check fails.**
|
||||
|
||||
## Communication Rules
|
||||
- Respond to the user in Chinese; use English in all other contexts.
|
||||
- Code and documentation must be written in English only. Chinese text is allowed solely as test data/fixtures when a case explicitly requires Chinese-language content for validation.
|
||||
- **Pull Request titles and descriptions must be written in English** to ensure consistency and accessibility for all contributors.
|
||||
|
||||
## Project Structure & Module Organization
|
||||
The workspace root hosts shared dependencies in `Cargo.toml`. The service binary lives under `rustfs/src/main.rs`, while reusable crates sit in `crates/` (`crypto`, `iam`, `kms`, and `e2e_test`). Local fixtures for standalone flows reside in `test_standalone/`, deployment manifests are under `deploy/`, Docker assets sit at the root, and automation lives in `scripts/`. Skim each crate’s README or module docs before contributing changes.
|
||||
@@ -19,7 +29,13 @@ Co-locate unit tests with their modules and give behavior-led names such as `han
|
||||
When fixing bugs or adding features, include regression tests that capture the new behavior so future changes cannot silently break it.
|
||||
|
||||
## Commit & Pull Request Guidelines
|
||||
Work on feature branches (e.g., `feat/...`) after syncing `main`. Follow Conventional Commits under 72 characters (e.g., `feat: add kms key rotation`). Each commit must compile, format cleanly, and pass `make pre-commit`. Open PRs with a concise summary, note verification commands, link relevant issues, and wait for reviewer approval.
|
||||
Work on feature branches (e.g., `feat/...`) after syncing `main`. Follow Conventional Commits under 72 characters (e.g., `feat: add kms key rotation`). Each commit must compile, format cleanly, and pass `make pre-commit`.
|
||||
|
||||
**Pull Request Requirements:**
|
||||
- PR titles and descriptions **MUST be written in English**
|
||||
- Open PRs with a concise summary, note verification commands, link relevant issues
|
||||
- Follow the PR template format and fill in all required sections
|
||||
- Wait for reviewer approval before merging
|
||||
|
||||
## Security & Configuration Tips
|
||||
Do not commit secrets or cloud credentials; prefer environment variables or vault tooling. Review IAM- and KMS-related changes with a second maintainer. Confirm proxy settings before running sensitive tests to avoid leaking traffic outside localhost.
|
||||
|
||||
3
CLA.md
3
CLA.md
@@ -83,6 +83,3 @@ that body of laws known as conflict of laws. The parties expressly agree that th
|
||||
for the International Sale of Goods will not apply. Any legal action or proceeding arising under this Agreement will be
|
||||
brought exclusively in the courts located in Beijing, China, and the parties hereby irrevocably consent to the personal
|
||||
jurisdiction and venue therein.
|
||||
|
||||
For your reading convenience, this Agreement is written in parallel English and Chinese sections. To the extent there is
|
||||
a conflict between the English and Chinese sections, the English sections shall govern.
|
||||
@@ -186,6 +186,39 @@ cargo clippy --all-targets --all-features -- -D warnings
|
||||
cargo clippy --fix --all-targets --all-features
|
||||
```
|
||||
|
||||
## 📝 Pull Request Guidelines
|
||||
|
||||
### Language Requirements
|
||||
|
||||
**All Pull Request titles and descriptions MUST be written in English.**
|
||||
|
||||
This ensures:
|
||||
- Consistency across all contributions
|
||||
- Accessibility for international contributors
|
||||
- Better integration with automated tools and CI/CD systems
|
||||
- Clear communication in a globally understood language
|
||||
|
||||
#### PR Description Requirements
|
||||
|
||||
When creating a Pull Request, ensure:
|
||||
|
||||
1. **Title**: Use English and follow Conventional Commits format (e.g., `fix: improve s3-tests readiness detection`)
|
||||
2. **Description**: Write in English, following the PR template format
|
||||
3. **Code Comments**: Must be in English (as per coding standards)
|
||||
4. **Commit Messages**: Must be in English (as per commit guidelines)
|
||||
|
||||
#### PR Template
|
||||
|
||||
Always use the PR template (`.github/pull_request_template.md`) and fill in all sections:
|
||||
- Type of Change
|
||||
- Related Issues
|
||||
- Summary of Changes
|
||||
- Checklist
|
||||
- Impact
|
||||
- Additional Notes
|
||||
|
||||
**Note**: While you may communicate with reviewers in Chinese during discussions, the PR itself (title, description, and all formal documentation) must be in English.
|
||||
|
||||
---
|
||||
|
||||
Following these guidelines ensures high code quality and smooth collaboration across the RustFS project! 🚀
|
||||
|
||||
1840
Cargo.lock
generated
1840
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
47
Cargo.toml
47
Cargo.toml
@@ -50,7 +50,7 @@ resolver = "2"
|
||||
edition = "2024"
|
||||
license = "Apache-2.0"
|
||||
repository = "https://github.com/rustfs/rustfs"
|
||||
rust-version = "1.85"
|
||||
rust-version = "1.90"
|
||||
version = "0.0.5"
|
||||
homepage = "https://rustfs.com"
|
||||
description = "RustFS is a high-performance distributed object storage software built using Rust, one of the most popular languages worldwide. "
|
||||
@@ -100,24 +100,24 @@ async-compression = { version = "0.4.19" }
|
||||
async-recursion = "1.1.1"
|
||||
async-trait = "0.1.89"
|
||||
axum = "0.8.8"
|
||||
axum-server = { version = "0.8.0", features = ["tls-rustls-no-provider"], default-features = false }
|
||||
axum-server = { version = "0.8.0", features = ["tls-rustls"], default-features = false }
|
||||
futures = "0.3.31"
|
||||
futures-core = "0.3.31"
|
||||
futures-util = "0.3.31"
|
||||
pollster = "0.4.0"
|
||||
hyper = { version = "1.8.1", features = ["http2", "http1", "server"] }
|
||||
hyper-rustls = { version = "0.27.7", default-features = false, features = ["native-tokio", "http1", "tls12", "logging", "http2", "ring", "webpki-roots"] }
|
||||
hyper-rustls = { version = "0.27.7", default-features = false, features = ["native-tokio", "http1", "tls12", "logging", "http2", "aws-lc-rs", "webpki-roots"] }
|
||||
hyper-util = { version = "0.1.19", features = ["tokio", "server-auto", "server-graceful"] }
|
||||
http = "1.4.0"
|
||||
http-body = "1.0.1"
|
||||
http-body-util = "0.1.3"
|
||||
reqwest = { version = "0.12.28", default-features = false, features = ["rustls-tls-webpki-roots", "charset", "http2", "system-proxy", "stream", "json", "blocking"] }
|
||||
reqwest = { version = "0.12.28", default-features = false, features = ["rustls-tls-no-provider", "charset", "http2", "system-proxy", "stream", "json", "blocking"] }
|
||||
socket2 = "0.6.1"
|
||||
tokio = { version = "1.48.0", features = ["fs", "rt-multi-thread"] }
|
||||
tokio-rustls = { version = "0.26.4", default-features = false, features = ["logging", "tls12", "ring"] }
|
||||
tokio-stream = { version = "0.1.17" }
|
||||
tokio-test = "0.4.4"
|
||||
tokio-util = { version = "0.7.17", features = ["io", "compat"] }
|
||||
tokio = { version = "1.49.0", features = ["fs", "rt-multi-thread"] }
|
||||
tokio-rustls = { version = "0.26.4", default-features = false, features = ["logging", "tls12", "aws-lc-rs"] }
|
||||
tokio-stream = { version = "0.1.18" }
|
||||
tokio-test = "0.4.5"
|
||||
tokio-util = { version = "0.7.18", features = ["io", "compat"] }
|
||||
tonic = { version = "0.14.2", features = ["gzip"] }
|
||||
tonic-prost = { version = "0.14.2" }
|
||||
tonic-prost-build = { version = "0.14.2" }
|
||||
@@ -136,7 +136,7 @@ rmcp = { version = "0.12.0" }
|
||||
rmp = { version = "0.8.15" }
|
||||
rmp-serde = { version = "1.3.1" }
|
||||
serde = { version = "1.0.228", features = ["derive"] }
|
||||
serde_json = { version = "1.0.148", features = ["raw_value"] }
|
||||
serde_json = { version = "1.0.149", features = ["raw_value"] }
|
||||
serde_urlencoded = "0.7.1"
|
||||
schemars = "1.2.0"
|
||||
|
||||
@@ -149,8 +149,8 @@ crc-fast = "1.6.0"
|
||||
hmac = { version = "0.13.0-rc.3" }
|
||||
jsonwebtoken = { version = "10.2.0", features = ["rust_crypto"] }
|
||||
pbkdf2 = "0.13.0-rc.5"
|
||||
rsa = { version = "0.10.0-rc.10" }
|
||||
rustls = { version = "0.23.35", features = ["ring", "logging", "std", "tls12"], default-features = false }
|
||||
rsa = { version = "0.10.0-rc.11" }
|
||||
rustls = { version = "0.23.36", default-features = false, features = ["aws-lc-rs", "logging", "tls12", "prefer-post-quantum", "std"] }
|
||||
rustls-pemfile = "2.2.0"
|
||||
rustls-pki-types = "1.13.2"
|
||||
sha1 = "0.11.0-rc.3"
|
||||
@@ -171,14 +171,14 @@ atoi = "2.0.0"
|
||||
atomic_enum = "0.3.0"
|
||||
aws-config = { version = "1.8.12" }
|
||||
aws-credential-types = { version = "1.2.11" }
|
||||
aws-sdk-s3 = { version = "1.119.0", default-features = false, features = ["sigv4a", "rustls", "rt-tokio"] }
|
||||
aws-sdk-s3 = { version = "1.119.0", default-features = false, features = ["sigv4a", "default-https-client", "rt-tokio"] }
|
||||
aws-smithy-types = { version = "1.3.5" }
|
||||
base64 = "0.22.1"
|
||||
base64-simd = "0.8.0"
|
||||
brotli = "8.0.2"
|
||||
cfg-if = "1.0.4"
|
||||
clap = { version = "4.5.53", features = ["derive", "env"] }
|
||||
const-str = { version = "0.7.1", features = ["std", "proc"] }
|
||||
clap = { version = "4.5.54", features = ["derive", "env"] }
|
||||
const-str = { version = "1.0.0", features = ["std", "proc"] }
|
||||
convert_case = "0.10.0"
|
||||
criterion = { version = "0.8", features = ["html_reports"] }
|
||||
crossbeam-queue = "0.3.12"
|
||||
@@ -197,7 +197,7 @@ hex-simd = "0.8.0"
|
||||
highway = { version = "1.3.0" }
|
||||
ipnetwork = { version = "0.21.1", features = ["serde"] }
|
||||
lazy_static = "1.5.0"
|
||||
libc = "0.2.178"
|
||||
libc = "0.2.179"
|
||||
libsystemd = "0.7.2"
|
||||
local-ip-address = "0.6.8"
|
||||
lz4 = "1.28.1"
|
||||
@@ -217,7 +217,7 @@ path-absolutize = "3.1.1"
|
||||
path-clean = "1.0.1"
|
||||
pin-project-lite = "0.2.16"
|
||||
pretty_assertions = "1.4.1"
|
||||
rand = { version = "0.10.0-rc.5", features = ["serde"] }
|
||||
rand = { version = "0.10.0-rc.6", features = ["serde"] }
|
||||
rayon = "1.11.0"
|
||||
reed-solomon-simd = { version = "3.1.0" }
|
||||
regex = { version = "1.12.2" }
|
||||
@@ -225,7 +225,7 @@ rumqttc = { version = "0.25.1" }
|
||||
rust-embed = { version = "8.9.0" }
|
||||
rustc-hash = { version = "2.1.1" }
|
||||
s3s = { version = "0.13.0-alpha", features = ["minio"], git = "https://github.com/s3s-project/s3s.git", branch = "main" }
|
||||
serial_test = "3.2.0"
|
||||
serial_test = "3.3.1"
|
||||
shadow-rs = { version = "1.5.0", default-features = false }
|
||||
siphasher = "1.0.1"
|
||||
smallvec = { version = "1.15.1", features = ["serde"] }
|
||||
@@ -234,7 +234,6 @@ snafu = "0.8.9"
|
||||
snap = "1.1.1"
|
||||
starshard = { version = "0.6.0", features = ["rayon", "async", "serde"] }
|
||||
strum = { version = "0.27.2", features = ["derive"] }
|
||||
sysctl = "0.7.1"
|
||||
sysinfo = "0.37.2"
|
||||
temp-env = "0.3.6"
|
||||
tempfile = "3.24.0"
|
||||
@@ -246,7 +245,7 @@ tracing-error = "0.2.1"
|
||||
tracing-opentelemetry = "0.32.0"
|
||||
tracing-subscriber = { version = "0.3.22", features = ["env-filter", "time"] }
|
||||
transform-stream = "0.3.1"
|
||||
url = "2.5.7"
|
||||
url = "2.5.8"
|
||||
urlencoding = "2.1.3"
|
||||
uuid = { version = "1.19.0", features = ["v4", "fast-rng", "macro-diagnostics"] }
|
||||
vaultrs = { version = "0.7.4" }
|
||||
@@ -266,6 +265,14 @@ opentelemetry_sdk = { version = "0.31.0" }
|
||||
opentelemetry-semantic-conventions = { version = "0.31.0", features = ["semconv_experimental"] }
|
||||
opentelemetry-stdout = { version = "0.31.0" }
|
||||
|
||||
# FTP and SFTP
|
||||
libunftp = "0.21.0"
|
||||
russh = { version = "0.56.0", features = ["aws-lc-rs", "rsa"], default-features = false }
|
||||
russh-sftp = "2.1.1"
|
||||
ssh-key = { version = "0.7.0-rc.4", features = ["std", "rsa", "ed25519"] }
|
||||
suppaftp = { version = "7.0.7", features = ["tokio", "tokio-rustls", "rustls"] }
|
||||
rcgen = "0.14.6"
|
||||
|
||||
# Performance Analysis and Memory Profiling
|
||||
mimalloc = "0.1"
|
||||
# Use tikv-jemallocator as memory allocator and enable performance analysis
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
FROM alpine:3.22 AS build
|
||||
FROM alpine:3.23 AS build
|
||||
|
||||
ARG TARGETARCH
|
||||
ARG RELEASE=latest
|
||||
@@ -40,7 +40,7 @@ RUN set -eux; \
|
||||
rm -rf rustfs.zip /build/.tmp || true
|
||||
|
||||
|
||||
FROM alpine:3.22
|
||||
FROM alpine:3.23
|
||||
|
||||
ARG RELEASE=latest
|
||||
ARG BUILD_DATE
|
||||
|
||||
@@ -16,7 +16,7 @@ ARG BUILDPLATFORM
|
||||
# -----------------------------
|
||||
# Build stage
|
||||
# -----------------------------
|
||||
FROM rust:1.88-bookworm AS builder
|
||||
FROM rust:1.91-trixie AS builder
|
||||
|
||||
# Re-declare args after FROM
|
||||
ARG TARGETPLATFORM
|
||||
|
||||
2
Makefile
2
Makefile
@@ -19,7 +19,7 @@ NUM_CORES := $(shell nproc 2>/dev/null || sysctl -n hw.ncpu)
|
||||
MAKEFLAGS += -j$(NUM_CORES) -l$(NUM_CORES)
|
||||
MAKEFLAGS += --silent
|
||||
|
||||
SHELL:= /bin/bash
|
||||
SHELL := $(shell which bash)
|
||||
.SHELLFLAGS = -eu -o pipefail -c
|
||||
|
||||
DOCKER_CLI ?= docker
|
||||
|
||||
33
SECURITY.md
33
SECURITY.md
@@ -1,19 +1,40 @@
|
||||
# Security Policy
|
||||
|
||||
## Security Philosophy
|
||||
|
||||
At RustFS, we take security seriously. We believe that **transparency leads to better security**. The more open our code is, the more eyes are on it, and the faster we can identify and resolve potential issues.
|
||||
|
||||
We highly value the contributions of the security community and welcome anyone to audit our code. Your efforts help us make RustFS safer for everyone.
|
||||
|
||||
## Supported Versions
|
||||
|
||||
Security updates are provided for the latest released version of this project.
|
||||
To help us focus our security efforts, please refer to the table below to see which versions of RustFS are currently supported with security updates.
|
||||
|
||||
| Version | Supported |
|
||||
| ------- | ------------------ |
|
||||
| 1.x.x | :white_check_mark: |
|
||||
| Latest | :white_check_mark: |
|
||||
| < 1.0 | :x: |
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
Please report security vulnerabilities **privately** via GitHub Security Advisories:
|
||||
If you discover a security vulnerability in RustFS, we appreciate your help in disclosing it to us responsibly.
|
||||
|
||||
https://github.com/rustfs/rustfs/security/advisories/new
|
||||
**Please do not open a public GitHub issue for security vulnerabilities.** Publicly disclosing a vulnerability can put the entire community at risk before a fix is available.
|
||||
|
||||
Do **not** open a public issue for security-sensitive bugs.
|
||||
### How to Report
|
||||
|
||||
You can expect an initial response within a reasonable timeframe. Further updates will be provided as the report is triaged.
|
||||
1. https://github.com/rustfs/rustfs/security/advisories/new
|
||||
2. Please email us directly at: **security@rustfs.com**
|
||||
|
||||
In your email, please include:
|
||||
1. **Description**: A detailed description of the vulnerability.
|
||||
2. **Steps to Reproduce**: Steps or a script to reproduce the issue.
|
||||
3. **Impact**: The potential impact of the vulnerability.
|
||||
|
||||
### Our Response Process
|
||||
|
||||
1. **Acknowledgment**: We will acknowledge your email within 48 hours.
|
||||
2. **Assessment**: We will investigate the issue and determine its severity.
|
||||
3. **Fix & Disclosure**: We will work on a patch. Once the patch is released, we will publicly announce the vulnerability and acknowledge your contribution (unless you prefer to remain anonymous).
|
||||
|
||||
Thank you for helping keep RustFS and its users safe!
|
||||
|
||||
@@ -37,6 +37,8 @@ datas = "datas"
|
||||
bre = "bre"
|
||||
abd = "abd"
|
||||
mak = "mak"
|
||||
# s3-tests original test names (cannot be changed)
|
||||
nonexisted = "nonexisted"
|
||||
|
||||
[files]
|
||||
extend-exclude = []
|
||||
extend-exclude = []
|
||||
|
||||
@@ -348,7 +348,7 @@ impl ErasureSetHealer {
|
||||
}
|
||||
|
||||
// save checkpoint periodically
|
||||
if global_obj_idx % 100 == 0 {
|
||||
if global_obj_idx.is_multiple_of(100) {
|
||||
checkpoint_manager
|
||||
.update_position(bucket_index, *current_object_index)
|
||||
.await?;
|
||||
|
||||
@@ -492,12 +492,11 @@ impl HealManager {
|
||||
for (_, disk_opt) in GLOBAL_LOCAL_DISK_MAP.read().await.iter() {
|
||||
if let Some(disk) = disk_opt {
|
||||
// detect unformatted disk via get_disk_id()
|
||||
if let Err(err) = disk.get_disk_id().await {
|
||||
if err == DiskError::UnformattedDisk {
|
||||
if let Err(err) = disk.get_disk_id().await
|
||||
&& err == DiskError::UnformattedDisk {
|
||||
endpoints.push(disk.endpoint());
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -541,10 +541,10 @@ impl ResumeUtils {
|
||||
for entry in entries {
|
||||
if entry.ends_with(&format!("_{RESUME_STATE_FILE}")) {
|
||||
// Extract task ID from filename: {task_id}_ahm_resume_state.json
|
||||
if let Some(task_id) = entry.strip_suffix(&format!("_{RESUME_STATE_FILE}")) {
|
||||
if !task_id.is_empty() {
|
||||
task_ids.push(task_id.to_string());
|
||||
}
|
||||
if let Some(task_id) = entry.strip_suffix(&format!("_{RESUME_STATE_FILE}"))
|
||||
&& !task_id.is_empty()
|
||||
{
|
||||
task_ids.push(task_id.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -83,10 +83,10 @@ pub struct CheckpointManager {
|
||||
|
||||
impl CheckpointManager {
|
||||
pub fn new(node_id: &str, data_dir: &Path) -> Self {
|
||||
if !data_dir.exists() {
|
||||
if let Err(e) = std::fs::create_dir_all(data_dir) {
|
||||
error!("create data dir failed {:?}: {}", data_dir, e);
|
||||
}
|
||||
if !data_dir.exists()
|
||||
&& let Err(e) = std::fs::create_dir_all(data_dir)
|
||||
{
|
||||
error!("create data dir failed {:?}: {}", data_dir, e);
|
||||
}
|
||||
|
||||
let checkpoint_file = data_dir.join(format!("scanner_checkpoint_{node_id}.json"));
|
||||
|
||||
@@ -401,10 +401,10 @@ impl Scanner {
|
||||
let mut latest_update: Option<SystemTime> = None;
|
||||
|
||||
for snapshot in &outcome.snapshots {
|
||||
if let Some(update) = snapshot.last_update {
|
||||
if latest_update.is_none_or(|current| update > current) {
|
||||
latest_update = Some(update);
|
||||
}
|
||||
if let Some(update) = snapshot.last_update
|
||||
&& latest_update.is_none_or(|current| update > current)
|
||||
{
|
||||
latest_update = Some(update);
|
||||
}
|
||||
|
||||
aggregated.objects_total_count = aggregated.objects_total_count.saturating_add(snapshot.objects_total_count);
|
||||
@@ -527,28 +527,20 @@ impl Scanner {
|
||||
let (disks, _) = set_disks.get_online_disks_with_healing(false).await;
|
||||
if let Some(disk) = disks.first() {
|
||||
let bucket_path = disk.path().join(bucket_name);
|
||||
if bucket_path.exists() {
|
||||
if let Ok(entries) = std::fs::read_dir(&bucket_path) {
|
||||
for entry in entries.flatten() {
|
||||
if let Ok(file_type) = entry.file_type() {
|
||||
if file_type.is_dir() {
|
||||
if let Some(object_name) = entry.file_name().to_str() {
|
||||
if !object_name.starts_with('.') {
|
||||
debug!("Deep scanning object: {}/{}", bucket_name, object_name);
|
||||
if let Err(e) = self.verify_object_integrity(bucket_name, object_name).await {
|
||||
warn!(
|
||||
"Object integrity verification failed for {}/{}: {}",
|
||||
bucket_name, object_name, e
|
||||
);
|
||||
} else {
|
||||
debug!(
|
||||
"Object integrity verification passed for {}/{}",
|
||||
bucket_name, object_name
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if bucket_path.exists()
|
||||
&& let Ok(entries) = std::fs::read_dir(&bucket_path)
|
||||
{
|
||||
for entry in entries.flatten() {
|
||||
if let Ok(file_type) = entry.file_type()
|
||||
&& file_type.is_dir()
|
||||
&& let Some(object_name) = entry.file_name().to_str()
|
||||
&& !object_name.starts_with('.')
|
||||
{
|
||||
debug!("Deep scanning object: {}/{}", bucket_name, object_name);
|
||||
if let Err(e) = self.verify_object_integrity(bucket_name, object_name).await {
|
||||
warn!("Object integrity verification failed for {}/{}: {}", bucket_name, object_name, e);
|
||||
} else {
|
||||
debug!("Object integrity verification passed for {}/{}", bucket_name, object_name);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -859,10 +851,10 @@ impl Scanner {
|
||||
|
||||
// Phase 2: Minimal EC verification for critical objects only
|
||||
// Note: The main scanning is now handled by NodeScanner in the background
|
||||
if let Some(ecstore) = rustfs_ecstore::new_object_layer_fn() {
|
||||
if let Err(e) = self.minimal_ec_verification(&ecstore).await {
|
||||
error!("Minimal EC verification failed: {}", e);
|
||||
}
|
||||
if let Some(ecstore) = rustfs_ecstore::new_object_layer_fn()
|
||||
&& let Err(e) = self.minimal_ec_verification(&ecstore).await
|
||||
{
|
||||
error!("Minimal EC verification failed: {}", e);
|
||||
}
|
||||
|
||||
// Update scan duration
|
||||
@@ -950,13 +942,12 @@ impl Scanner {
|
||||
}
|
||||
|
||||
// If there is still no data, try backend before persisting zeros
|
||||
if data_usage.buckets_usage.is_empty() {
|
||||
if let Ok(existing) = rustfs_ecstore::data_usage::load_data_usage_from_backend(ecstore.clone()).await {
|
||||
if !existing.buckets_usage.is_empty() {
|
||||
info!("Using existing backend data usage during fallback backoff");
|
||||
data_usage = existing;
|
||||
}
|
||||
}
|
||||
if data_usage.buckets_usage.is_empty()
|
||||
&& let Ok(existing) = rustfs_ecstore::data_usage::load_data_usage_from_backend(ecstore.clone()).await
|
||||
&& !existing.buckets_usage.is_empty()
|
||||
{
|
||||
info!("Using existing backend data usage during fallback backoff");
|
||||
data_usage = existing;
|
||||
}
|
||||
|
||||
// Avoid overwriting valid backend stats with zeros when fallback is throttled
|
||||
@@ -1721,36 +1712,34 @@ impl Scanner {
|
||||
// check disk status, if offline, submit erasure set heal task
|
||||
if !metrics.is_online {
|
||||
let enable_healing = self.config.read().await.enable_healing;
|
||||
if enable_healing {
|
||||
if let Some(heal_manager) = &self.heal_manager {
|
||||
// Get bucket list for erasure set healing
|
||||
let buckets = match rustfs_ecstore::new_object_layer_fn() {
|
||||
Some(ecstore) => match ecstore.list_bucket(&ecstore::store_api::BucketOptions::default()).await {
|
||||
Ok(buckets) => buckets.iter().map(|b| b.name.clone()).collect::<Vec<String>>(),
|
||||
Err(e) => {
|
||||
error!("Failed to get bucket list for disk healing: {}", e);
|
||||
return Err(Error::Storage(e));
|
||||
}
|
||||
},
|
||||
None => {
|
||||
error!("No ECStore available for getting bucket list");
|
||||
return Err(Error::Storage(ecstore::error::StorageError::other("No ECStore available")));
|
||||
}
|
||||
};
|
||||
|
||||
let set_disk_id = format!("pool_{}_set_{}", disk.endpoint().pool_idx, disk.endpoint().set_idx);
|
||||
let req = HealRequest::new(
|
||||
crate::heal::task::HealType::ErasureSet { buckets, set_disk_id },
|
||||
crate::heal::task::HealOptions::default(),
|
||||
crate::heal::task::HealPriority::High,
|
||||
);
|
||||
match heal_manager.submit_heal_request(req).await {
|
||||
Ok(task_id) => {
|
||||
warn!("disk offline, submit erasure set heal task: {} {}", task_id, disk_path);
|
||||
}
|
||||
if enable_healing && let Some(heal_manager) = &self.heal_manager {
|
||||
// Get bucket list for erasure set healing
|
||||
let buckets = match rustfs_ecstore::new_object_layer_fn() {
|
||||
Some(ecstore) => match ecstore.list_bucket(&ecstore::store_api::BucketOptions::default()).await {
|
||||
Ok(buckets) => buckets.iter().map(|b| b.name.clone()).collect::<Vec<String>>(),
|
||||
Err(e) => {
|
||||
error!("disk offline, submit erasure set heal task failed: {} {}", disk_path, e);
|
||||
error!("Failed to get bucket list for disk healing: {}", e);
|
||||
return Err(Error::Storage(e));
|
||||
}
|
||||
},
|
||||
None => {
|
||||
error!("No ECStore available for getting bucket list");
|
||||
return Err(Error::Storage(ecstore::error::StorageError::other("No ECStore available")));
|
||||
}
|
||||
};
|
||||
|
||||
let set_disk_id = format!("pool_{}_set_{}", disk.endpoint().pool_idx, disk.endpoint().set_idx);
|
||||
let req = HealRequest::new(
|
||||
crate::heal::task::HealType::ErasureSet { buckets, set_disk_id },
|
||||
crate::heal::task::HealOptions::default(),
|
||||
crate::heal::task::HealPriority::High,
|
||||
);
|
||||
match heal_manager.submit_heal_request(req).await {
|
||||
Ok(task_id) => {
|
||||
warn!("disk offline, submit erasure set heal task: {} {}", task_id, disk_path);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("disk offline, submit erasure set heal task failed: {} {}", disk_path, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1778,36 +1767,34 @@ impl Scanner {
|
||||
|
||||
// disk access failed, submit erasure set heal task
|
||||
let enable_healing = self.config.read().await.enable_healing;
|
||||
if enable_healing {
|
||||
if let Some(heal_manager) = &self.heal_manager {
|
||||
// Get bucket list for erasure set healing
|
||||
let buckets = match rustfs_ecstore::new_object_layer_fn() {
|
||||
Some(ecstore) => match ecstore.list_bucket(&ecstore::store_api::BucketOptions::default()).await {
|
||||
Ok(buckets) => buckets.iter().map(|b| b.name.clone()).collect::<Vec<String>>(),
|
||||
Err(e) => {
|
||||
error!("Failed to get bucket list for disk healing: {}", e);
|
||||
return Err(Error::Storage(e));
|
||||
}
|
||||
},
|
||||
None => {
|
||||
error!("No ECStore available for getting bucket list");
|
||||
return Err(Error::Storage(ecstore::error::StorageError::other("No ECStore available")));
|
||||
if enable_healing && let Some(heal_manager) = &self.heal_manager {
|
||||
// Get bucket list for erasure set healing
|
||||
let buckets = match rustfs_ecstore::new_object_layer_fn() {
|
||||
Some(ecstore) => match ecstore.list_bucket(&ecstore::store_api::BucketOptions::default()).await {
|
||||
Ok(buckets) => buckets.iter().map(|b| b.name.clone()).collect::<Vec<String>>(),
|
||||
Err(e) => {
|
||||
error!("Failed to get bucket list for disk healing: {}", e);
|
||||
return Err(Error::Storage(e));
|
||||
}
|
||||
};
|
||||
},
|
||||
None => {
|
||||
error!("No ECStore available for getting bucket list");
|
||||
return Err(Error::Storage(ecstore::error::StorageError::other("No ECStore available")));
|
||||
}
|
||||
};
|
||||
|
||||
let set_disk_id = format!("pool_{}_set_{}", disk.endpoint().pool_idx, disk.endpoint().set_idx);
|
||||
let req = HealRequest::new(
|
||||
crate::heal::task::HealType::ErasureSet { buckets, set_disk_id },
|
||||
crate::heal::task::HealOptions::default(),
|
||||
crate::heal::task::HealPriority::Urgent,
|
||||
);
|
||||
match heal_manager.submit_heal_request(req).await {
|
||||
Ok(task_id) => {
|
||||
warn!("disk access failed, submit erasure set heal task: {} {}", task_id, disk_path);
|
||||
}
|
||||
Err(heal_err) => {
|
||||
error!("disk access failed, submit erasure set heal task failed: {} {}", disk_path, heal_err);
|
||||
}
|
||||
let set_disk_id = format!("pool_{}_set_{}", disk.endpoint().pool_idx, disk.endpoint().set_idx);
|
||||
let req = HealRequest::new(
|
||||
crate::heal::task::HealType::ErasureSet { buckets, set_disk_id },
|
||||
crate::heal::task::HealOptions::default(),
|
||||
crate::heal::task::HealPriority::Urgent,
|
||||
);
|
||||
match heal_manager.submit_heal_request(req).await {
|
||||
Ok(task_id) => {
|
||||
warn!("disk access failed, submit erasure set heal task: {} {}", task_id, disk_path);
|
||||
}
|
||||
Err(heal_err) => {
|
||||
error!("disk access failed, submit erasure set heal task failed: {} {}", disk_path, heal_err);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1820,11 +1807,11 @@ impl Scanner {
|
||||
let mut disk_objects = HashMap::new();
|
||||
for volume in volumes {
|
||||
// check cancel token
|
||||
if let Some(cancel_token) = get_ahm_services_cancel_token() {
|
||||
if cancel_token.is_cancelled() {
|
||||
info!("Cancellation requested, stopping disk scan");
|
||||
break;
|
||||
}
|
||||
if let Some(cancel_token) = get_ahm_services_cancel_token()
|
||||
&& cancel_token.is_cancelled()
|
||||
{
|
||||
info!("Cancellation requested, stopping disk scan");
|
||||
break;
|
||||
}
|
||||
|
||||
match self.scan_volume(disk, &volume.name).await {
|
||||
@@ -1955,104 +1942,96 @@ impl Scanner {
|
||||
|
||||
// object metadata damaged, submit metadata heal task
|
||||
let enable_healing = self.config.read().await.enable_healing;
|
||||
if enable_healing {
|
||||
if let Some(heal_manager) = &self.heal_manager {
|
||||
let req = HealRequest::metadata(bucket.to_string(), entry.name.clone());
|
||||
match heal_manager.submit_heal_request(req).await {
|
||||
Ok(task_id) => {
|
||||
warn!(
|
||||
"object metadata damaged, submit heal task: {} {} / {}",
|
||||
task_id, bucket, entry.name
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
error!(
|
||||
"object metadata damaged, submit heal task failed: {} / {} {}",
|
||||
bucket, entry.name, e
|
||||
);
|
||||
}
|
||||
if enable_healing && let Some(heal_manager) = &self.heal_manager {
|
||||
let req = HealRequest::metadata(bucket.to_string(), entry.name.clone());
|
||||
match heal_manager.submit_heal_request(req).await {
|
||||
Ok(task_id) => {
|
||||
warn!("object metadata damaged, submit heal task: {} {} / {}", task_id, bucket, entry.name);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("object metadata damaged, submit heal task failed: {} / {} {}", bucket, entry.name, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Apply lifecycle actions
|
||||
if let Some(lifecycle_config) = &lifecycle_config {
|
||||
if disk.is_local() {
|
||||
let vcfg = BucketVersioningSys::get(bucket).await.ok();
|
||||
if let Some(lifecycle_config) = &lifecycle_config
|
||||
&& disk.is_local()
|
||||
{
|
||||
let vcfg = BucketVersioningSys::get(bucket).await.ok();
|
||||
|
||||
let mut scanner_item = ScannerItem {
|
||||
bucket: bucket.to_string(),
|
||||
object_name: entry.name.clone(),
|
||||
lifecycle: Some(lifecycle_config.clone()),
|
||||
versioning: versioning_config.clone(),
|
||||
};
|
||||
//ScannerItem::new(bucket.to_string(), Some(lifecycle_config.clone()), versioning_config.clone());
|
||||
let fivs = match entry.clone().file_info_versions(&scanner_item.bucket) {
|
||||
Ok(fivs) => fivs,
|
||||
Err(_err) => {
|
||||
stop_fn();
|
||||
return Err(Error::other("skip this file"));
|
||||
}
|
||||
};
|
||||
let mut size_s = SizeSummary::default();
|
||||
let obj_infos = match scanner_item.apply_versions_actions(&fivs.versions).await {
|
||||
Ok(obj_infos) => obj_infos,
|
||||
Err(_err) => {
|
||||
stop_fn();
|
||||
return Err(Error::other("skip this file"));
|
||||
}
|
||||
};
|
||||
let mut scanner_item = ScannerItem {
|
||||
bucket: bucket.to_string(),
|
||||
object_name: entry.name.clone(),
|
||||
lifecycle: Some(lifecycle_config.clone()),
|
||||
versioning: versioning_config.clone(),
|
||||
};
|
||||
//ScannerItem::new(bucket.to_string(), Some(lifecycle_config.clone()), versioning_config.clone());
|
||||
let fivs = match entry.clone().file_info_versions(&scanner_item.bucket) {
|
||||
Ok(fivs) => fivs,
|
||||
Err(_err) => {
|
||||
stop_fn();
|
||||
return Err(Error::other("skip this file"));
|
||||
}
|
||||
};
|
||||
let mut size_s = SizeSummary::default();
|
||||
let obj_infos = match scanner_item.apply_versions_actions(&fivs.versions).await {
|
||||
Ok(obj_infos) => obj_infos,
|
||||
Err(_err) => {
|
||||
stop_fn();
|
||||
return Err(Error::other("skip this file"));
|
||||
}
|
||||
};
|
||||
|
||||
let versioned = if let Some(vcfg) = vcfg.as_ref() {
|
||||
vcfg.versioned(&scanner_item.object_name)
|
||||
} else {
|
||||
false
|
||||
};
|
||||
let versioned = if let Some(vcfg) = vcfg.as_ref() {
|
||||
vcfg.versioned(&scanner_item.object_name)
|
||||
} else {
|
||||
false
|
||||
};
|
||||
|
||||
#[allow(unused_assignments)]
|
||||
let mut obj_deleted = false;
|
||||
for info in obj_infos.iter() {
|
||||
let sz: i64;
|
||||
(obj_deleted, sz) = scanner_item.apply_actions(info, &mut size_s).await;
|
||||
#[allow(unused_assignments)]
|
||||
let mut obj_deleted = false;
|
||||
for info in obj_infos.iter() {
|
||||
let sz: i64;
|
||||
(obj_deleted, sz) = scanner_item.apply_actions(info, &mut size_s).await;
|
||||
|
||||
if obj_deleted {
|
||||
break;
|
||||
}
|
||||
|
||||
let actual_sz = match info.get_actual_size() {
|
||||
Ok(size) => size,
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
||||
if info.delete_marker {
|
||||
size_s.delete_markers += 1;
|
||||
}
|
||||
|
||||
if info.version_id.is_some() && sz == actual_sz {
|
||||
size_s.versions += 1;
|
||||
}
|
||||
|
||||
size_s.total_size += sz as usize;
|
||||
|
||||
if info.delete_marker {
|
||||
continue;
|
||||
}
|
||||
if obj_deleted {
|
||||
break;
|
||||
}
|
||||
|
||||
for free_version in fivs.free_versions.iter() {
|
||||
let _obj_info = rustfs_ecstore::store_api::ObjectInfo::from_file_info(
|
||||
free_version,
|
||||
&scanner_item.bucket,
|
||||
&scanner_item.object_name,
|
||||
versioned,
|
||||
);
|
||||
let actual_sz = match info.get_actual_size() {
|
||||
Ok(size) => size,
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
||||
if info.delete_marker {
|
||||
size_s.delete_markers += 1;
|
||||
}
|
||||
|
||||
// todo: global trace
|
||||
/*if obj_deleted {
|
||||
return Err(Error::other(ERR_IGNORE_FILE_CONTRIB).into());
|
||||
}*/
|
||||
if info.version_id.is_some() && sz == actual_sz {
|
||||
size_s.versions += 1;
|
||||
}
|
||||
|
||||
size_s.total_size += sz as usize;
|
||||
|
||||
if info.delete_marker {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
for free_version in fivs.free_versions.iter() {
|
||||
let _obj_info = rustfs_ecstore::store_api::ObjectInfo::from_file_info(
|
||||
free_version,
|
||||
&scanner_item.bucket,
|
||||
&scanner_item.object_name,
|
||||
versioned,
|
||||
);
|
||||
}
|
||||
|
||||
// todo: global trace
|
||||
/*if obj_deleted {
|
||||
return Err(Error::other(ERR_IGNORE_FILE_CONTRIB).into());
|
||||
}*/
|
||||
}
|
||||
|
||||
// Store object metadata for later analysis
|
||||
@@ -2064,22 +2043,17 @@ impl Scanner {
|
||||
|
||||
// object metadata parse failed, submit metadata heal task
|
||||
let enable_healing = self.config.read().await.enable_healing;
|
||||
if enable_healing {
|
||||
if let Some(heal_manager) = &self.heal_manager {
|
||||
let req = HealRequest::metadata(bucket.to_string(), entry.name.clone());
|
||||
match heal_manager.submit_heal_request(req).await {
|
||||
Ok(task_id) => {
|
||||
warn!(
|
||||
"object metadata parse failed, submit heal task: {} {} / {}",
|
||||
task_id, bucket, entry.name
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
error!(
|
||||
"object metadata parse failed, submit heal task failed: {} / {} {}",
|
||||
bucket, entry.name, e
|
||||
);
|
||||
}
|
||||
if enable_healing && let Some(heal_manager) = &self.heal_manager {
|
||||
let req = HealRequest::metadata(bucket.to_string(), entry.name.clone());
|
||||
match heal_manager.submit_heal_request(req).await {
|
||||
Ok(task_id) => {
|
||||
warn!("object metadata parse failed, submit heal task: {} {} / {}", task_id, bucket, entry.name);
|
||||
}
|
||||
Err(e) => {
|
||||
error!(
|
||||
"object metadata parse failed, submit heal task failed: {} / {} {}",
|
||||
bucket, entry.name, e
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2190,17 +2164,14 @@ impl Scanner {
|
||||
// the delete marker, but we keep it conservative here.
|
||||
let mut has_latest_delete_marker = false;
|
||||
for &disk_idx in locations {
|
||||
if let Some(bucket_map) = all_disk_objects.get(disk_idx) {
|
||||
if let Some(file_map) = bucket_map.get(bucket) {
|
||||
if let Some(fm) = file_map.get(object_name) {
|
||||
if let Some(first_ver) = fm.versions.first() {
|
||||
if first_ver.header.version_type == VersionType::Delete {
|
||||
has_latest_delete_marker = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Some(bucket_map) = all_disk_objects.get(disk_idx)
|
||||
&& let Some(file_map) = bucket_map.get(bucket)
|
||||
&& let Some(fm) = file_map.get(object_name)
|
||||
&& let Some(first_ver) = fm.versions.first()
|
||||
&& first_ver.header.version_type == VersionType::Delete
|
||||
{
|
||||
has_latest_delete_marker = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
if has_latest_delete_marker {
|
||||
@@ -2248,28 +2219,26 @@ impl Scanner {
|
||||
|
||||
// submit heal task
|
||||
let enable_healing = self.config.read().await.enable_healing;
|
||||
if enable_healing {
|
||||
if let Some(heal_manager) = &self.heal_manager {
|
||||
use crate::heal::{HealPriority, HealRequest};
|
||||
let req = HealRequest::new(
|
||||
crate::heal::HealType::Object {
|
||||
bucket: bucket.clone(),
|
||||
object: object_name.clone(),
|
||||
version_id: None,
|
||||
},
|
||||
crate::heal::HealOptions::default(),
|
||||
HealPriority::High,
|
||||
);
|
||||
match heal_manager.submit_heal_request(req).await {
|
||||
Ok(task_id) => {
|
||||
warn!(
|
||||
"object missing, submit heal task: {} {} / {} (missing disks: {:?})",
|
||||
task_id, bucket, object_name, missing_disks
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("object missing, submit heal task failed: {} / {} {}", bucket, object_name, e);
|
||||
}
|
||||
if enable_healing && let Some(heal_manager) = &self.heal_manager {
|
||||
use crate::heal::{HealPriority, HealRequest};
|
||||
let req = HealRequest::new(
|
||||
crate::heal::HealType::Object {
|
||||
bucket: bucket.clone(),
|
||||
object: object_name.clone(),
|
||||
version_id: None,
|
||||
},
|
||||
crate::heal::HealOptions::default(),
|
||||
HealPriority::High,
|
||||
);
|
||||
match heal_manager.submit_heal_request(req).await {
|
||||
Ok(task_id) => {
|
||||
warn!(
|
||||
"object missing, submit heal task: {} {} / {} (missing disks: {:?})",
|
||||
task_id, bucket, object_name, missing_disks
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("object missing, submit heal task failed: {} / {} {}", bucket, object_name, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2277,11 +2246,11 @@ impl Scanner {
|
||||
|
||||
// Step 3: Deep scan EC verification
|
||||
let config = self.config.read().await;
|
||||
if config.scan_mode == ScanMode::Deep {
|
||||
if let Err(e) = self.verify_object_integrity(bucket, object_name).await {
|
||||
objects_with_ec_issues += 1;
|
||||
warn!("Object integrity verification failed for object {}/{}: {}", bucket, object_name, e);
|
||||
}
|
||||
if config.scan_mode == ScanMode::Deep
|
||||
&& let Err(e) = self.verify_object_integrity(bucket, object_name).await
|
||||
{
|
||||
objects_with_ec_issues += 1;
|
||||
warn!("Object integrity verification failed for object {}/{}: {}", bucket, object_name, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -2293,10 +2262,10 @@ impl Scanner {
|
||||
|
||||
// Step 4: Collect data usage statistics if enabled
|
||||
let config = self.config.read().await;
|
||||
if config.enable_data_usage_stats {
|
||||
if let Err(e) = self.collect_data_usage_statistics(all_disk_objects).await {
|
||||
error!("Failed to collect data usage statistics: {}", e);
|
||||
}
|
||||
if config.enable_data_usage_stats
|
||||
&& let Err(e) = self.collect_data_usage_statistics(all_disk_objects).await
|
||||
{
|
||||
error!("Failed to collect data usage statistics: {}", e);
|
||||
}
|
||||
drop(config);
|
||||
|
||||
@@ -2526,11 +2495,11 @@ impl Scanner {
|
||||
info!("Starting legacy scan loop for backward compatibility");
|
||||
|
||||
loop {
|
||||
if let Some(token) = get_ahm_services_cancel_token() {
|
||||
if token.is_cancelled() {
|
||||
info!("Cancellation requested, exiting legacy scan loop");
|
||||
break;
|
||||
}
|
||||
if let Some(token) = get_ahm_services_cancel_token()
|
||||
&& token.is_cancelled()
|
||||
{
|
||||
info!("Cancellation requested, exiting legacy scan loop");
|
||||
break;
|
||||
}
|
||||
|
||||
let (enable_data_usage_stats, scan_interval) = {
|
||||
@@ -2538,10 +2507,8 @@ impl Scanner {
|
||||
(config.enable_data_usage_stats, config.scan_interval)
|
||||
};
|
||||
|
||||
if enable_data_usage_stats {
|
||||
if let Err(e) = self.collect_and_persist_data_usage().await {
|
||||
warn!("Background data usage collection failed: {}", e);
|
||||
}
|
||||
if enable_data_usage_stats && let Err(e) = self.collect_and_persist_data_usage().await {
|
||||
warn!("Background data usage collection failed: {}", e);
|
||||
}
|
||||
|
||||
// Update local stats in aggregator after latest scan
|
||||
@@ -2656,10 +2623,10 @@ mod tests {
|
||||
// create temp dir as 4 disks
|
||||
let test_base_dir = test_dir.unwrap_or("/tmp/rustfs_ahm_test");
|
||||
let temp_dir = std::path::PathBuf::from(test_base_dir);
|
||||
if temp_dir.exists() {
|
||||
if let Err(e) = fs::remove_dir_all(&temp_dir) {
|
||||
panic!("Failed to remove test directory: {e}");
|
||||
}
|
||||
if temp_dir.exists()
|
||||
&& let Err(e) = fs::remove_dir_all(&temp_dir)
|
||||
{
|
||||
panic!("Failed to remove test directory: {e}");
|
||||
}
|
||||
if let Err(e) = fs::create_dir_all(&temp_dir) {
|
||||
panic!("Failed to create test directory: {e}");
|
||||
|
||||
@@ -305,10 +305,10 @@ fn compute_object_usage(bucket: &str, object: &str, file_meta: &FileMeta) -> Res
|
||||
has_live_object = true;
|
||||
versions_count = versions_count.saturating_add(1);
|
||||
|
||||
if latest_file_info.is_none() {
|
||||
if let Ok(info) = file_meta.into_fileinfo(bucket, object, "", false, false) {
|
||||
latest_file_info = Some(info);
|
||||
}
|
||||
if latest_file_info.is_none()
|
||||
&& let Ok(info) = file_meta.into_fileinfo(bucket, object, "", false, false, false)
|
||||
{
|
||||
latest_file_info = Some(info);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -112,10 +112,10 @@ impl LocalStatsManager {
|
||||
/// create new local stats manager
|
||||
pub fn new(node_id: &str, data_dir: &Path) -> Self {
|
||||
// ensure data directory exists
|
||||
if !data_dir.exists() {
|
||||
if let Err(e) = std::fs::create_dir_all(data_dir) {
|
||||
error!("create stats data directory failed {:?}: {}", data_dir, e);
|
||||
}
|
||||
if !data_dir.exists()
|
||||
&& let Err(e) = std::fs::create_dir_all(data_dir)
|
||||
{
|
||||
error!("create stats data directory failed {:?}: {}", data_dir, e);
|
||||
}
|
||||
|
||||
let stats_file = data_dir.join(format!("scanner_stats_{node_id}.json"));
|
||||
|
||||
@@ -436,10 +436,10 @@ impl NodeScanner {
|
||||
/// create a new node scanner
|
||||
pub fn new(node_id: String, config: NodeScannerConfig) -> Self {
|
||||
// Ensure data directory exists
|
||||
if !config.data_dir.exists() {
|
||||
if let Err(e) = std::fs::create_dir_all(&config.data_dir) {
|
||||
error!("create data directory failed {:?}: {}", config.data_dir, e);
|
||||
}
|
||||
if !config.data_dir.exists()
|
||||
&& let Err(e) = std::fs::create_dir_all(&config.data_dir)
|
||||
{
|
||||
error!("create data directory failed {:?}: {}", config.data_dir, e);
|
||||
}
|
||||
|
||||
let stats_manager = Arc::new(LocalStatsManager::new(&node_id, &config.data_dir));
|
||||
|
||||
@@ -327,16 +327,16 @@ impl DecentralizedStatsAggregator {
|
||||
);
|
||||
|
||||
// Check cache validity if timestamp is not initial value (UNIX_EPOCH)
|
||||
if cache_timestamp != SystemTime::UNIX_EPOCH {
|
||||
if let Ok(elapsed) = now.duration_since(cache_timestamp) {
|
||||
if elapsed < cache_ttl {
|
||||
if let Some(cached) = self.cached_stats.read().await.as_ref() {
|
||||
debug!("Returning cached aggregated stats, remaining TTL: {:?}", cache_ttl - elapsed);
|
||||
return Ok(cached.clone());
|
||||
}
|
||||
} else {
|
||||
debug!("Cache expired: elapsed={:?} >= ttl={:?}", elapsed, cache_ttl);
|
||||
if cache_timestamp != SystemTime::UNIX_EPOCH
|
||||
&& let Ok(elapsed) = now.duration_since(cache_timestamp)
|
||||
{
|
||||
if elapsed < cache_ttl {
|
||||
if let Some(cached) = self.cached_stats.read().await.as_ref() {
|
||||
debug!("Returning cached aggregated stats, remaining TTL: {:?}", cache_ttl - elapsed);
|
||||
return Ok(cached.clone());
|
||||
}
|
||||
} else {
|
||||
debug!("Cache expired: elapsed={:?} >= ttl={:?}", elapsed, cache_ttl);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -421,86 +421,86 @@ mod serial_tests {
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(lmdb_env) = GLOBAL_LMDB_ENV.get() {
|
||||
if let Some(lmdb) = GLOBAL_LMDB_DB.get() {
|
||||
let mut wtxn = lmdb_env.write_txn().unwrap();
|
||||
if let Some(lmdb_env) = GLOBAL_LMDB_ENV.get()
|
||||
&& let Some(lmdb) = GLOBAL_LMDB_DB.get()
|
||||
{
|
||||
let mut wtxn = lmdb_env.write_txn().unwrap();
|
||||
|
||||
/*if let Ok((lc_config, _)) = rustfs_ecstore::bucket::metadata_sys::get_lifecycle_config(bucket_name.as_str()).await {
|
||||
if let Ok(object_info) = ecstore
|
||||
.get_object_info(bucket_name.as_str(), object_name, &rustfs_ecstore::store_api::ObjectOptions::default())
|
||||
.await
|
||||
{
|
||||
let event = rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::eval_action_from_lifecycle(
|
||||
&lc_config,
|
||||
None,
|
||||
None,
|
||||
&object_info,
|
||||
)
|
||||
.await;
|
||||
|
||||
rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::apply_expiry_on_non_transitioned_objects(
|
||||
ecstore.clone(),
|
||||
&object_info,
|
||||
&event,
|
||||
&rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_audit::LcEventSrc::Scanner,
|
||||
)
|
||||
.await;
|
||||
|
||||
expired = wait_for_object_absence(&ecstore, bucket_name.as_str(), object_name, Duration::from_secs(2)).await;
|
||||
}
|
||||
}*/
|
||||
|
||||
for record in records {
|
||||
if !record.usage.has_live_object {
|
||||
continue;
|
||||
}
|
||||
|
||||
let object_info = convert_record_to_object_info(record);
|
||||
println!("object_info2: {object_info:?}");
|
||||
let mod_time = object_info.mod_time.unwrap_or(OffsetDateTime::now_utc());
|
||||
let expiry_time = rustfs_ecstore::bucket::lifecycle::lifecycle::expected_expiry_time(mod_time, 1);
|
||||
|
||||
let version_id = if let Some(version_id) = object_info.version_id {
|
||||
version_id.to_string()
|
||||
} else {
|
||||
"zzzzzzzz-zzzz-zzzz-zzzz-zzzzzzzzzzzz".to_string()
|
||||
};
|
||||
|
||||
lmdb.put(
|
||||
&mut wtxn,
|
||||
&expiry_time.unix_timestamp(),
|
||||
&LifecycleContent {
|
||||
ver_no: 0,
|
||||
ver_id: version_id,
|
||||
mod_time,
|
||||
type_: LifecycleType::TransitionNoncurrent,
|
||||
object_name: object_info.name,
|
||||
},
|
||||
/*if let Ok((lc_config, _)) = rustfs_ecstore::bucket::metadata_sys::get_lifecycle_config(bucket_name.as_str()).await {
|
||||
if let Ok(object_info) = ecstore
|
||||
.get_object_info(bucket_name.as_str(), object_name, &rustfs_ecstore::store_api::ObjectOptions::default())
|
||||
.await
|
||||
{
|
||||
let event = rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::eval_action_from_lifecycle(
|
||||
&lc_config,
|
||||
None,
|
||||
None,
|
||||
&object_info,
|
||||
)
|
||||
.unwrap();
|
||||
.await;
|
||||
|
||||
rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::apply_expiry_on_non_transitioned_objects(
|
||||
ecstore.clone(),
|
||||
&object_info,
|
||||
&event,
|
||||
&rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_audit::LcEventSrc::Scanner,
|
||||
)
|
||||
.await;
|
||||
|
||||
expired = wait_for_object_absence(&ecstore, bucket_name.as_str(), object_name, Duration::from_secs(2)).await;
|
||||
}
|
||||
}*/
|
||||
|
||||
for record in records {
|
||||
if !record.usage.has_live_object {
|
||||
continue;
|
||||
}
|
||||
|
||||
wtxn.commit().unwrap();
|
||||
let object_info = convert_record_to_object_info(record);
|
||||
println!("object_info2: {object_info:?}");
|
||||
let mod_time = object_info.mod_time.unwrap_or(OffsetDateTime::now_utc());
|
||||
let expiry_time = rustfs_ecstore::bucket::lifecycle::lifecycle::expected_expiry_time(mod_time, 1);
|
||||
|
||||
let mut wtxn = lmdb_env.write_txn().unwrap();
|
||||
let iter = lmdb.iter_mut(&mut wtxn).unwrap();
|
||||
//let _ = unsafe { iter.del_current().unwrap() };
|
||||
for row in iter {
|
||||
if let Ok(ref elm) = row {
|
||||
let LifecycleContent {
|
||||
ver_no,
|
||||
ver_id,
|
||||
mod_time,
|
||||
type_,
|
||||
object_name,
|
||||
} = &elm.1;
|
||||
println!("cache row:{ver_no} {ver_id} {mod_time} {type_:?} {object_name}");
|
||||
}
|
||||
println!("row:{row:?}");
|
||||
}
|
||||
//drop(iter);
|
||||
wtxn.commit().unwrap();
|
||||
let version_id = if let Some(version_id) = object_info.version_id {
|
||||
version_id.to_string()
|
||||
} else {
|
||||
"zzzzzzzz-zzzz-zzzz-zzzz-zzzzzzzzzzzz".to_string()
|
||||
};
|
||||
|
||||
lmdb.put(
|
||||
&mut wtxn,
|
||||
&expiry_time.unix_timestamp(),
|
||||
&LifecycleContent {
|
||||
ver_no: 0,
|
||||
ver_id: version_id,
|
||||
mod_time,
|
||||
type_: LifecycleType::TransitionNoncurrent,
|
||||
object_name: object_info.name,
|
||||
},
|
||||
)
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
wtxn.commit().unwrap();
|
||||
|
||||
let mut wtxn = lmdb_env.write_txn().unwrap();
|
||||
let iter = lmdb.iter_mut(&mut wtxn).unwrap();
|
||||
//let _ = unsafe { iter.del_current().unwrap() };
|
||||
for row in iter {
|
||||
if let Ok(ref elm) = row {
|
||||
let LifecycleContent {
|
||||
ver_no,
|
||||
ver_id,
|
||||
mod_time,
|
||||
type_,
|
||||
object_name,
|
||||
} = &elm.1;
|
||||
println!("cache row:{ver_no} {ver_id} {mod_time} {type_:?} {object_name}");
|
||||
}
|
||||
println!("row:{row:?}");
|
||||
}
|
||||
//drop(iter);
|
||||
wtxn.commit().unwrap();
|
||||
}
|
||||
|
||||
println!("Lifecycle cache test completed");
|
||||
|
||||
@@ -415,29 +415,28 @@ mod serial_tests {
|
||||
.await;
|
||||
println!("Pending expiry tasks: {pending}");
|
||||
|
||||
if let Ok((lc_config, _)) = rustfs_ecstore::bucket::metadata_sys::get_lifecycle_config(bucket_name.as_str()).await {
|
||||
if let Ok(object_info) = ecstore
|
||||
if let Ok((lc_config, _)) = rustfs_ecstore::bucket::metadata_sys::get_lifecycle_config(bucket_name.as_str()).await
|
||||
&& let Ok(object_info) = ecstore
|
||||
.get_object_info(bucket_name.as_str(), object_name, &rustfs_ecstore::store_api::ObjectOptions::default())
|
||||
.await
|
||||
{
|
||||
let event = rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::eval_action_from_lifecycle(
|
||||
&lc_config,
|
||||
None,
|
||||
None,
|
||||
&object_info,
|
||||
)
|
||||
.await;
|
||||
{
|
||||
let event = rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::eval_action_from_lifecycle(
|
||||
&lc_config,
|
||||
None,
|
||||
None,
|
||||
&object_info,
|
||||
)
|
||||
.await;
|
||||
|
||||
rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::apply_expiry_on_non_transitioned_objects(
|
||||
ecstore.clone(),
|
||||
&object_info,
|
||||
&event,
|
||||
&rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_audit::LcEventSrc::Scanner,
|
||||
)
|
||||
.await;
|
||||
rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::apply_expiry_on_non_transitioned_objects(
|
||||
ecstore.clone(),
|
||||
&object_info,
|
||||
&event,
|
||||
&rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_audit::LcEventSrc::Scanner,
|
||||
)
|
||||
.await;
|
||||
|
||||
expired = wait_for_object_absence(&ecstore, bucket_name.as_str(), object_name, Duration::from_secs(2)).await;
|
||||
}
|
||||
expired = wait_for_object_absence(&ecstore, bucket_name.as_str(), object_name, Duration::from_secs(2)).await;
|
||||
}
|
||||
|
||||
if !expired {
|
||||
@@ -550,32 +549,31 @@ mod serial_tests {
|
||||
.await;
|
||||
println!("Pending expiry tasks: {pending}");
|
||||
|
||||
if let Ok((lc_config, _)) = rustfs_ecstore::bucket::metadata_sys::get_lifecycle_config(bucket_name.as_str()).await {
|
||||
if let Ok(obj_info) = ecstore
|
||||
if let Ok((lc_config, _)) = rustfs_ecstore::bucket::metadata_sys::get_lifecycle_config(bucket_name.as_str()).await
|
||||
&& let Ok(obj_info) = ecstore
|
||||
.get_object_info(bucket_name.as_str(), object_name, &rustfs_ecstore::store_api::ObjectOptions::default())
|
||||
.await
|
||||
{
|
||||
let event = rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::eval_action_from_lifecycle(
|
||||
&lc_config, None, None, &obj_info,
|
||||
)
|
||||
.await;
|
||||
{
|
||||
let event = rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::eval_action_from_lifecycle(
|
||||
&lc_config, None, None, &obj_info,
|
||||
)
|
||||
.await;
|
||||
|
||||
rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::apply_expiry_on_non_transitioned_objects(
|
||||
ecstore.clone(),
|
||||
&obj_info,
|
||||
&event,
|
||||
&rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_audit::LcEventSrc::Scanner,
|
||||
)
|
||||
.await;
|
||||
rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::apply_expiry_on_non_transitioned_objects(
|
||||
ecstore.clone(),
|
||||
&obj_info,
|
||||
&event,
|
||||
&rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_audit::LcEventSrc::Scanner,
|
||||
)
|
||||
.await;
|
||||
|
||||
deleted = wait_for_object_absence(&ecstore, bucket_name.as_str(), object_name, Duration::from_secs(2)).await;
|
||||
deleted = wait_for_object_absence(&ecstore, bucket_name.as_str(), object_name, Duration::from_secs(2)).await;
|
||||
|
||||
if !deleted {
|
||||
println!(
|
||||
"Object info: name={}, size={}, mod_time={:?}",
|
||||
obj_info.name, obj_info.size, obj_info.mod_time
|
||||
);
|
||||
}
|
||||
if !deleted {
|
||||
println!(
|
||||
"Object info: name={}, size={}, mod_time={:?}",
|
||||
obj_info.name, obj_info.size, obj_info.mod_time
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -204,10 +204,10 @@ impl TargetFactory for MQTTTargetFactory {
|
||||
if !std::path::Path::new(&queue_dir).is_absolute() {
|
||||
return Err(TargetError::Configuration("MQTT queue directory must be an absolute path".to_string()));
|
||||
}
|
||||
if let Some(qos_str) = config.lookup(MQTT_QOS) {
|
||||
if qos_str == "0" {
|
||||
warn!("Using queue_dir with QoS 0 may result in event loss");
|
||||
}
|
||||
if let Some(qos_str) = config.lookup(MQTT_QOS)
|
||||
&& qos_str == "0"
|
||||
{
|
||||
warn!("Using queue_dir with QoS 0 may result in event loss");
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -21,6 +21,7 @@ use futures::stream::FuturesUnordered;
|
||||
use hashbrown::{HashMap, HashSet};
|
||||
use rustfs_config::{DEFAULT_DELIMITER, ENABLE_KEY, ENV_PREFIX, EnableState, audit::AUDIT_ROUTE_PREFIX};
|
||||
use rustfs_ecstore::config::{Config, KVS};
|
||||
use rustfs_targets::arn::TargetID;
|
||||
use rustfs_targets::{Target, TargetError, target::ChannelTargetType};
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
@@ -138,12 +139,11 @@ impl AuditRegistry {
|
||||
format!("{ENV_PREFIX}{AUDIT_ROUTE_PREFIX}{target_type}{DEFAULT_DELIMITER}{ENABLE_KEY}{DEFAULT_DELIMITER}")
|
||||
.to_uppercase();
|
||||
for (key, value) in &all_env {
|
||||
if EnableState::from_str(value).ok().map(|s| s.is_enabled()).unwrap_or(false) {
|
||||
if let Some(id) = key.strip_prefix(&enable_prefix) {
|
||||
if !id.is_empty() {
|
||||
instance_ids_from_env.insert(id.to_lowercase());
|
||||
}
|
||||
}
|
||||
if EnableState::from_str(value).ok().map(|s| s.is_enabled()).unwrap_or(false)
|
||||
&& let Some(id) = key.strip_prefix(&enable_prefix)
|
||||
&& !id.is_empty()
|
||||
{
|
||||
instance_ids_from_env.insert(id.to_lowercase());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -292,10 +292,10 @@ impl AuditRegistry {
|
||||
for section in sections {
|
||||
let mut section_map: std::collections::HashMap<String, KVS> = std::collections::HashMap::new();
|
||||
// Add default item
|
||||
if let Some(default_kvs) = section_defaults.get(§ion) {
|
||||
if !default_kvs.is_empty() {
|
||||
section_map.insert(DEFAULT_DELIMITER.to_string(), default_kvs.clone());
|
||||
}
|
||||
if let Some(default_kvs) = section_defaults.get(§ion)
|
||||
&& !default_kvs.is_empty()
|
||||
{
|
||||
section_map.insert(DEFAULT_DELIMITER.to_string(), default_kvs.clone());
|
||||
}
|
||||
|
||||
// Add successful instance item
|
||||
@@ -393,4 +393,80 @@ impl AuditRegistry {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Creates a unique key for a target based on its type and ID
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `target_type` - The type of the target (e.g., "webhook", "mqtt").
|
||||
/// * `target_id` - The identifier for the target instance.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `String` - The unique key for the target.
|
||||
pub fn create_key(&self, target_type: &str, target_id: &str) -> String {
|
||||
let key = TargetID::new(target_id.to_string(), target_type.to_string());
|
||||
info!(target_type = %target_type, "Create key for {}", key);
|
||||
key.to_string()
|
||||
}
|
||||
|
||||
/// Enables a target (placeholder, assumes target exists)
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `target_type` - The type of the target (e.g., "webhook", "mqtt").
|
||||
/// * `target_id` - The identifier for the target instance.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditResult<()>` - Result indicating success or failure.
|
||||
pub fn enable_target(&self, target_type: &str, target_id: &str) -> AuditResult<()> {
|
||||
let key = self.create_key(target_type, target_id);
|
||||
if self.get_target(&key).is_some() {
|
||||
info!("Target {}-{} enabled", target_type, target_id);
|
||||
Ok(())
|
||||
} else {
|
||||
Err(AuditError::Configuration(
|
||||
format!("Target not found: {}-{}", target_type, target_id),
|
||||
None,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Disables a target (placeholder, assumes target exists)
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `target_type` - The type of the target (e.g., "webhook", "mqtt").
|
||||
/// * `target_id` - The identifier for the target instance.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditResult<()>` - Result indicating success or failure.
|
||||
pub fn disable_target(&self, target_type: &str, target_id: &str) -> AuditResult<()> {
|
||||
let key = self.create_key(target_type, target_id);
|
||||
if self.get_target(&key).is_some() {
|
||||
info!("Target {}-{} disabled", target_type, target_id);
|
||||
Ok(())
|
||||
} else {
|
||||
Err(AuditError::Configuration(
|
||||
format!("Target not found: {}-{}", target_type, target_id),
|
||||
None,
|
||||
))
|
||||
}
|
||||
}
|
||||
|
||||
/// Upserts a target into the registry
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `target_type` - The type of the target (e.g., "webhook", "mqtt").
|
||||
/// * `target_id` - The identifier for the target instance.
|
||||
/// * `target` - The target instance to be upserted.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditResult<()>` - Result indicating success or failure.
|
||||
pub fn upsert_target(
|
||||
&mut self,
|
||||
target_type: &str,
|
||||
target_id: &str,
|
||||
target: Box<dyn Target<AuditEntry> + Send + Sync>,
|
||||
) -> AuditResult<()> {
|
||||
let key = self.create_key(target_type, target_id);
|
||||
self.targets.insert(key, target);
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
@@ -274,9 +274,9 @@ impl AuditSystem {
|
||||
drop(state);
|
||||
|
||||
let registry = self.registry.lock().await;
|
||||
let target_ids = registry.list_targets();
|
||||
let target_keys = registry.list_targets();
|
||||
|
||||
if target_ids.is_empty() {
|
||||
if target_keys.is_empty() {
|
||||
warn!("No audit targets configured for dispatch");
|
||||
return Ok(());
|
||||
}
|
||||
@@ -284,22 +284,22 @@ impl AuditSystem {
|
||||
// Dispatch to all targets concurrently
|
||||
let mut tasks = Vec::new();
|
||||
|
||||
for target_id in target_ids {
|
||||
if let Some(target) = registry.get_target(&target_id) {
|
||||
for target_key in target_keys {
|
||||
if let Some(target) = registry.get_target(&target_key) {
|
||||
let entry_clone = Arc::clone(&entry);
|
||||
let target_id_clone = target_id.clone();
|
||||
let target_key_clone = target_key.clone();
|
||||
|
||||
// Create EntityTarget for the audit log entry
|
||||
let entity_target = EntityTarget {
|
||||
object_name: entry.api.name.clone().unwrap_or_default(),
|
||||
bucket_name: entry.api.bucket.clone().unwrap_or_default(),
|
||||
event_name: rustfs_targets::EventName::ObjectCreatedPut, // Default, should be derived from entry
|
||||
event_name: entry.event, // Default, should be derived from entry
|
||||
data: (*entry_clone).clone(),
|
||||
};
|
||||
|
||||
let task = async move {
|
||||
let result = target.save(Arc::new(entity_target)).await;
|
||||
(target_id_clone, result)
|
||||
(target_key_clone, result)
|
||||
};
|
||||
|
||||
tasks.push(task);
|
||||
@@ -312,14 +312,14 @@ impl AuditSystem {
|
||||
let mut errors = Vec::new();
|
||||
let mut success_count = 0;
|
||||
|
||||
for (target_id, result) in results {
|
||||
for (target_key, result) in results {
|
||||
match result {
|
||||
Ok(_) => {
|
||||
success_count += 1;
|
||||
observability::record_target_success();
|
||||
}
|
||||
Err(e) => {
|
||||
error!(target_id = %target_id, error = %e, "Failed to dispatch audit log to target");
|
||||
error!(target_id = %target_key, error = %e, "Failed to dispatch audit log to target");
|
||||
errors.push(e);
|
||||
observability::record_target_failure();
|
||||
}
|
||||
@@ -360,18 +360,18 @@ impl AuditSystem {
|
||||
drop(state);
|
||||
|
||||
let registry = self.registry.lock().await;
|
||||
let target_ids = registry.list_targets();
|
||||
let target_keys = registry.list_targets();
|
||||
|
||||
if target_ids.is_empty() {
|
||||
if target_keys.is_empty() {
|
||||
warn!("No audit targets configured for batch dispatch");
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let mut tasks = Vec::new();
|
||||
for target_id in target_ids {
|
||||
if let Some(target) = registry.get_target(&target_id) {
|
||||
for target_key in target_keys {
|
||||
if let Some(target) = registry.get_target(&target_key) {
|
||||
let entries_clone: Vec<_> = entries.iter().map(Arc::clone).collect();
|
||||
let target_id_clone = target_id.clone();
|
||||
let target_key_clone = target_key.clone();
|
||||
|
||||
let task = async move {
|
||||
let mut success_count = 0;
|
||||
@@ -380,7 +380,7 @@ impl AuditSystem {
|
||||
let entity_target = EntityTarget {
|
||||
object_name: entry.api.name.clone().unwrap_or_default(),
|
||||
bucket_name: entry.api.bucket.clone().unwrap_or_default(),
|
||||
event_name: rustfs_targets::EventName::ObjectCreatedPut,
|
||||
event_name: entry.event,
|
||||
data: (*entry).clone(),
|
||||
};
|
||||
match target.save(Arc::new(entity_target)).await {
|
||||
@@ -388,7 +388,7 @@ impl AuditSystem {
|
||||
Err(e) => errors.push(e),
|
||||
}
|
||||
}
|
||||
(target_id_clone, success_count, errors)
|
||||
(target_key_clone, success_count, errors)
|
||||
};
|
||||
tasks.push(task);
|
||||
}
|
||||
@@ -418,6 +418,7 @@ impl AuditSystem {
|
||||
}
|
||||
|
||||
/// Starts the audit stream processing for a target with batching and retry logic
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `store` - The store from which to read audit entries
|
||||
/// * `target` - The target to which audit entries will be sent
|
||||
@@ -501,7 +502,7 @@ impl AuditSystem {
|
||||
/// Enables a specific target
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `target_id` - The ID of the target to enable
|
||||
/// * `target_id` - The ID of the target to enable, TargetID to string
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditResult<()>` - Result indicating success or failure
|
||||
@@ -520,7 +521,7 @@ impl AuditSystem {
|
||||
/// Disables a specific target
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `target_id` - The ID of the target to disable
|
||||
/// * `target_id` - The ID of the target to disable, TargetID to string
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditResult<()>` - Result indicating success or failure
|
||||
@@ -539,7 +540,7 @@ impl AuditSystem {
|
||||
/// Removes a target from the system
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `target_id` - The ID of the target to remove
|
||||
/// * `target_id` - The ID of the target to remove, TargetID to string
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuditResult<()>` - Result indicating success or failure
|
||||
@@ -559,7 +560,7 @@ impl AuditSystem {
|
||||
/// Updates or inserts a target
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `target_id` - The ID of the target to upsert
|
||||
/// * `target_id` - The ID of the target to upsert, TargetID to string
|
||||
/// * `target` - The target instance to insert or update
|
||||
///
|
||||
/// # Returns
|
||||
@@ -573,10 +574,10 @@ impl AuditSystem {
|
||||
}
|
||||
|
||||
// Remove existing target if present
|
||||
if let Some(old_target) = registry.remove_target(&target_id) {
|
||||
if let Err(e) = old_target.close().await {
|
||||
error!(target_id = %target_id, error = %e, "Failed to close old target during upsert");
|
||||
}
|
||||
if let Some(old_target) = registry.remove_target(&target_id)
|
||||
&& let Err(e) = old_target.close().await
|
||||
{
|
||||
error!(target_id = %target_id, error = %e, "Failed to close old target during upsert");
|
||||
}
|
||||
|
||||
registry.add_target(target_id.clone(), target);
|
||||
@@ -596,7 +597,7 @@ impl AuditSystem {
|
||||
/// Gets information about a specific target
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `target_id` - The ID of the target to retrieve
|
||||
/// * `target_id` - The ID of the target to retrieve, TargetID to string
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Option<String>` - Target ID if found
|
||||
|
||||
@@ -605,13 +605,12 @@ impl DataUsageCache {
|
||||
|
||||
pub fn search_parent(&self, hash: &DataUsageHash) -> Option<DataUsageHash> {
|
||||
let want = hash.key();
|
||||
if let Some(last_index) = want.rfind('/') {
|
||||
if let Some(v) = self.find(&want[0..last_index]) {
|
||||
if v.children.contains(&want) {
|
||||
let found = hash_path(&want[0..last_index]);
|
||||
return Some(found);
|
||||
}
|
||||
}
|
||||
if let Some(last_index) = want.rfind('/')
|
||||
&& let Some(v) = self.find(&want[0..last_index])
|
||||
&& v.children.contains(&want)
|
||||
{
|
||||
let found = hash_path(&want[0..last_index]);
|
||||
return Some(found);
|
||||
}
|
||||
|
||||
for (k, v) in self.cache.iter() {
|
||||
@@ -1150,10 +1149,10 @@ impl DataUsageInfo {
|
||||
self.buckets_count = self.buckets_usage.len() as u64;
|
||||
|
||||
// Update last update time
|
||||
if let Some(other_update) = other.last_update {
|
||||
if self.last_update.is_none() || other_update > self.last_update.unwrap() {
|
||||
self.last_update = Some(other_update);
|
||||
}
|
||||
if let Some(other_update) = other.last_update
|
||||
&& (self.last_update.is_none() || other_update > self.last_update.unwrap())
|
||||
{
|
||||
self.last_update = Some(other_update);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
#![allow(non_upper_case_globals)] // FIXME
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::LazyLock;
|
||||
use tokio::sync::RwLock;
|
||||
@@ -26,6 +27,30 @@ pub static GLOBAL_RUSTFS_ADDR: LazyLock<RwLock<String>> = LazyLock::new(|| RwLoc
|
||||
pub static GLOBAL_CONN_MAP: LazyLock<RwLock<HashMap<String, Channel>>> = LazyLock::new(|| RwLock::new(HashMap::new()));
|
||||
pub static GLOBAL_ROOT_CERT: LazyLock<RwLock<Option<Vec<u8>>>> = LazyLock::new(|| RwLock::new(None));
|
||||
pub static GLOBAL_MTLS_IDENTITY: LazyLock<RwLock<Option<MtlsIdentityPem>>> = LazyLock::new(|| RwLock::new(None));
|
||||
/// Global initialization time of the RustFS node.
|
||||
pub static GLOBAL_INIT_TIME: LazyLock<RwLock<Option<DateTime<Utc>>>> = LazyLock::new(|| RwLock::new(None));
|
||||
|
||||
/// Set the global local node name.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `name` - A string slice representing the local node name.
|
||||
pub async fn set_global_local_node_name(name: &str) {
|
||||
*GLOBAL_LOCAL_NODE_NAME.write().await = name.to_string();
|
||||
}
|
||||
|
||||
/// Set the global RustFS initialization time to the current UTC time.
|
||||
pub async fn set_global_init_time_now() {
|
||||
let now = Utc::now();
|
||||
*GLOBAL_INIT_TIME.write().await = Some(now);
|
||||
}
|
||||
|
||||
/// Get the global RustFS initialization time.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Option<DateTime<Utc>>` - The initialization time if set.
|
||||
pub async fn get_global_init_time() -> Option<DateTime<Utc>> {
|
||||
*GLOBAL_INIT_TIME.read().await
|
||||
}
|
||||
|
||||
/// Set the global RustFS address used for gRPC connections.
|
||||
///
|
||||
|
||||
@@ -403,10 +403,10 @@ fn lc_get_prefix(rule: &LifecycleRule) -> String {
|
||||
} else if let Some(filter) = &rule.filter {
|
||||
if let Some(p) = &filter.prefix {
|
||||
return p.to_string();
|
||||
} else if let Some(and) = &filter.and {
|
||||
if let Some(p) = &and.prefix {
|
||||
return p.to_string();
|
||||
}
|
||||
} else if let Some(and) = &filter.and
|
||||
&& let Some(p) = &and.prefix
|
||||
{
|
||||
return p.to_string();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -475,21 +475,19 @@ pub fn rep_has_active_rules(config: &ReplicationConfiguration, prefix: &str, rec
|
||||
{
|
||||
continue;
|
||||
}
|
||||
if !prefix.is_empty() {
|
||||
if let Some(filter) = &rule.filter {
|
||||
if let Some(r_prefix) = &filter.prefix {
|
||||
if !r_prefix.is_empty() {
|
||||
// incoming prefix must be in rule prefix
|
||||
if !recursive && !prefix.starts_with(r_prefix) {
|
||||
continue;
|
||||
}
|
||||
// If recursive, we can skip this rule if it doesn't match the tested prefix or level below prefix
|
||||
// does not match
|
||||
if recursive && !r_prefix.starts_with(prefix) && !prefix.starts_with(r_prefix) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
if !prefix.is_empty()
|
||||
&& let Some(filter) = &rule.filter
|
||||
&& let Some(r_prefix) = &filter.prefix
|
||||
&& !r_prefix.is_empty()
|
||||
{
|
||||
// incoming prefix must be in rule prefix
|
||||
if !recursive && !prefix.starts_with(r_prefix) {
|
||||
continue;
|
||||
}
|
||||
// If recursive, we can skip this rule if it doesn't match the tested prefix or level below prefix
|
||||
// does not match
|
||||
if recursive && !r_prefix.starts_with(prefix) && !prefix.starts_with(r_prefix) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
|
||||
@@ -18,6 +18,7 @@ use rustfs_madmin::metrics::ScannerMetrics as M_ScannerMetrics;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
fmt::Display,
|
||||
future::Future,
|
||||
pin::Pin,
|
||||
sync::{
|
||||
Arc, OnceLock,
|
||||
@@ -115,7 +116,7 @@ pub enum Metric {
|
||||
|
||||
impl Metric {
|
||||
/// Convert to string representation for metrics
|
||||
pub fn as_str(self) -> &'static str {
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
Self::ReadMetadata => "read_metadata",
|
||||
Self::CheckMissing => "check_missing",
|
||||
@@ -460,27 +461,32 @@ impl Metrics {
|
||||
metrics.current_started = cycle.started;
|
||||
}
|
||||
|
||||
// Replace default start time with global init time if it's the placeholder
|
||||
if let Some(init_time) = crate::get_global_init_time().await {
|
||||
metrics.current_started = init_time;
|
||||
}
|
||||
|
||||
metrics.collected_at = Utc::now();
|
||||
metrics.active_paths = self.get_current_paths().await;
|
||||
|
||||
// Lifetime operations
|
||||
for i in 0..Metric::Last as usize {
|
||||
let count = self.operations[i].load(Ordering::Relaxed);
|
||||
if count > 0 {
|
||||
if let Some(metric) = Metric::from_index(i) {
|
||||
metrics.life_time_ops.insert(metric.as_str().to_string(), count);
|
||||
}
|
||||
if count > 0
|
||||
&& let Some(metric) = Metric::from_index(i)
|
||||
{
|
||||
metrics.life_time_ops.insert(metric.as_str().to_string(), count);
|
||||
}
|
||||
}
|
||||
|
||||
// Last minute statistics for realtime metrics
|
||||
for i in 0..Metric::LastRealtime as usize {
|
||||
let last_min = self.latency[i].total().await;
|
||||
if last_min.n > 0 {
|
||||
if let Some(_metric) = Metric::from_index(i) {
|
||||
// Convert to madmin TimedAction format if needed
|
||||
// This would require implementing the conversion
|
||||
}
|
||||
if last_min.n > 0
|
||||
&& let Some(_metric) = Metric::from_index(i)
|
||||
{
|
||||
// Convert to madmin TimedAction format if needed
|
||||
// This would require implementing the conversion
|
||||
}
|
||||
}
|
||||
|
||||
@@ -489,8 +495,8 @@ impl Metrics {
|
||||
}
|
||||
|
||||
// Type aliases for compatibility with existing code
|
||||
pub type UpdateCurrentPathFn = Arc<dyn Fn(&str) -> Pin<Box<dyn std::future::Future<Output = ()> + Send>> + Send + Sync>;
|
||||
pub type CloseDiskFn = Arc<dyn Fn() -> Pin<Box<dyn std::future::Future<Output = ()> + Send>> + Send + Sync>;
|
||||
pub type UpdateCurrentPathFn = Arc<dyn Fn(&str) -> Pin<Box<dyn Future<Output = ()> + Send>> + Send + Sync>;
|
||||
pub type CloseDiskFn = Arc<dyn Fn() -> Pin<Box<dyn Future<Output = ()> + Send>> + Send + Sync>;
|
||||
|
||||
/// Create a current path updater for tracking scan progress
|
||||
pub fn current_path_updater(disk: &str, initial: &str) -> (UpdateCurrentPathFn, CloseDiskFn) {
|
||||
@@ -506,7 +512,7 @@ pub fn current_path_updater(disk: &str, initial: &str) -> (UpdateCurrentPathFn,
|
||||
|
||||
let update_fn = {
|
||||
let tracker = Arc::clone(&tracker);
|
||||
Arc::new(move |path: &str| -> Pin<Box<dyn std::future::Future<Output = ()> + Send>> {
|
||||
Arc::new(move |path: &str| -> Pin<Box<dyn Future<Output = ()> + Send>> {
|
||||
let tracker = Arc::clone(&tracker);
|
||||
let path = path.to_string();
|
||||
Box::pin(async move {
|
||||
@@ -517,7 +523,7 @@ pub fn current_path_updater(disk: &str, initial: &str) -> (UpdateCurrentPathFn,
|
||||
|
||||
let done_fn = {
|
||||
let disk_name = disk_name.clone();
|
||||
Arc::new(move || -> Pin<Box<dyn std::future::Future<Output = ()> + Send>> {
|
||||
Arc::new(move || -> Pin<Box<dyn Future<Output = ()> + Send>> {
|
||||
let disk_name = disk_name.clone();
|
||||
Box::pin(async move {
|
||||
global_metrics().current_paths.write().await.remove(&disk_name);
|
||||
|
||||
@@ -170,12 +170,6 @@ pub const KI_B: usize = 1024;
|
||||
/// Default value: 1048576
|
||||
pub const MI_B: usize = 1024 * 1024;
|
||||
|
||||
/// Environment variable for gRPC authentication token
|
||||
/// Used to set the authentication token for gRPC communication
|
||||
/// Example: RUSTFS_GRPC_AUTH_TOKEN=your_token_here
|
||||
/// Default value: No default value. RUSTFS_SECRET_KEY value is recommended.
|
||||
pub const ENV_GRPC_AUTH_TOKEN: &str = "RUSTFS_GRPC_AUTH_TOKEN";
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
@@ -20,6 +20,7 @@ pub(crate) mod env;
|
||||
pub(crate) mod heal;
|
||||
pub(crate) mod object;
|
||||
pub(crate) mod profiler;
|
||||
pub(crate) mod protocols;
|
||||
pub(crate) mod runtime;
|
||||
pub(crate) mod targets;
|
||||
pub(crate) mod tls;
|
||||
|
||||
40
crates/config/src/constants/protocols.rs
Normal file
40
crates/config/src/constants/protocols.rs
Normal file
@@ -0,0 +1,40 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Protocol server configuration constants
|
||||
|
||||
/// Default FTPS server bind address
|
||||
pub const DEFAULT_FTPS_ADDRESS: &str = "0.0.0.0:8021";
|
||||
|
||||
/// Default SFTP server bind address
|
||||
pub const DEFAULT_SFTP_ADDRESS: &str = "0.0.0.0:8022";
|
||||
|
||||
/// Default FTPS passive ports range (optional)
|
||||
pub const DEFAULT_FTPS_PASSIVE_PORTS: Option<&str> = None;
|
||||
|
||||
/// Default FTPS external IP (auto-detected)
|
||||
pub const DEFAULT_FTPS_EXTERNAL_IP: Option<&str> = None;
|
||||
|
||||
/// Environment variable names
|
||||
pub const ENV_FTPS_ENABLE: &str = "RUSTFS_FTPS_ENABLE";
|
||||
pub const ENV_FTPS_ADDRESS: &str = "RUSTFS_FTPS_ADDRESS";
|
||||
pub const ENV_FTPS_CERTS_FILE: &str = "RUSTFS_FTPS_CERTS_FILE";
|
||||
pub const ENV_FTPS_KEY_FILE: &str = "RUSTFS_FTPS_KEY_FILE";
|
||||
pub const ENV_FTPS_PASSIVE_PORTS: &str = "RUSTFS_FTPS_PASSIVE_PORTS";
|
||||
pub const ENV_FTPS_EXTERNAL_IP: &str = "RUSTFS_FTPS_EXTERNAL_IP";
|
||||
|
||||
pub const ENV_SFTP_ENABLE: &str = "RUSTFS_SFTP_ENABLE";
|
||||
pub const ENV_SFTP_ADDRESS: &str = "RUSTFS_SFTP_ADDRESS";
|
||||
pub const ENV_SFTP_HOST_KEY: &str = "RUSTFS_SFTP_HOST_KEY";
|
||||
pub const ENV_SFTP_AUTHORIZED_KEYS: &str = "RUSTFS_SFTP_AUTHORIZED_KEYS";
|
||||
@@ -31,6 +31,8 @@ pub use constants::object::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::profiler::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::protocols::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::runtime::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::targets::*;
|
||||
|
||||
@@ -27,11 +27,11 @@ pub const DEFAULT_ACCESS_KEY: &str = "rustfsadmin";
|
||||
/// Example: --secret-key rustfsadmin
|
||||
pub const DEFAULT_SECRET_KEY: &str = "rustfsadmin";
|
||||
|
||||
/// Environment variable for gRPC authentication token
|
||||
/// Used to set the authentication token for gRPC communication
|
||||
/// Example: RUSTFS_GRPC_AUTH_TOKEN=your_token_here
|
||||
/// Environment variable for RPC authentication token
|
||||
/// Used to set the authentication token for RPC communication
|
||||
/// Example: RUSTFS_RPC_SECRET=your_token_here
|
||||
/// Default value: No default value. RUSTFS_SECRET_KEY value is recommended.
|
||||
pub const ENV_GRPC_AUTH_TOKEN: &str = "RUSTFS_GRPC_AUTH_TOKEN";
|
||||
pub const ENV_RPC_SECRET: &str = "RUSTFS_RPC_SECRET";
|
||||
|
||||
/// IAM Policy Types
|
||||
/// Used to differentiate between embedded and inherited policies
|
||||
|
||||
@@ -12,7 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::{DEFAULT_SECRET_KEY, ENV_GRPC_AUTH_TOKEN, IAM_POLICY_CLAIM_NAME_SA, INHERITED_POLICY_TYPE};
|
||||
use crate::{DEFAULT_SECRET_KEY, ENV_RPC_SECRET, IAM_POLICY_CLAIM_NAME_SA, INHERITED_POLICY_TYPE};
|
||||
use rand::{Rng, RngCore};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
@@ -25,8 +25,8 @@ use time::OffsetDateTime;
|
||||
/// Global active credentials
|
||||
static GLOBAL_ACTIVE_CRED: OnceLock<Credentials> = OnceLock::new();
|
||||
|
||||
/// Global gRPC authentication token
|
||||
static GLOBAL_GRPC_AUTH_TOKEN: OnceLock<String> = OnceLock::new();
|
||||
/// Global RPC authentication token
|
||||
pub static GLOBAL_RUSTFS_RPC_SECRET: OnceLock<String> = OnceLock::new();
|
||||
|
||||
/// Initialize the global action credentials
|
||||
///
|
||||
@@ -181,15 +181,15 @@ pub fn gen_secret_key(length: usize) -> std::io::Result<String> {
|
||||
Ok(key_str)
|
||||
}
|
||||
|
||||
/// Get the gRPC authentication token from environment variable
|
||||
/// Get the RPC authentication token from environment variable
|
||||
///
|
||||
/// # Returns
|
||||
/// * `String` - The gRPC authentication token
|
||||
/// * `String` - The RPC authentication token
|
||||
///
|
||||
pub fn get_grpc_token() -> String {
|
||||
GLOBAL_GRPC_AUTH_TOKEN
|
||||
pub fn get_rpc_token() -> String {
|
||||
GLOBAL_RUSTFS_RPC_SECRET
|
||||
.get_or_init(|| {
|
||||
env::var(ENV_GRPC_AUTH_TOKEN)
|
||||
env::var(ENV_RPC_SECRET)
|
||||
.unwrap_or_else(|_| get_global_secret_key_opt().unwrap_or_else(|| DEFAULT_SECRET_KEY.to_string()))
|
||||
})
|
||||
.clone()
|
||||
|
||||
@@ -51,3 +51,8 @@ base64 = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
md5 = { workspace = true }
|
||||
suppaftp.workspace = true
|
||||
rcgen.workspace = true
|
||||
anyhow.workspace = true
|
||||
rustls.workspace = true
|
||||
rustls-pemfile.workspace = true
|
||||
|
||||
@@ -34,8 +34,8 @@ use tracing::{error, info, warn};
|
||||
use uuid::Uuid;
|
||||
|
||||
// Common constants for all E2E tests
|
||||
pub const DEFAULT_ACCESS_KEY: &str = "minioadmin";
|
||||
pub const DEFAULT_SECRET_KEY: &str = "minioadmin";
|
||||
pub const DEFAULT_ACCESS_KEY: &str = "rustfsadmin";
|
||||
pub const DEFAULT_SECRET_KEY: &str = "rustfsadmin";
|
||||
pub const TEST_BUCKET: &str = "e2e-test-bucket";
|
||||
pub fn workspace_root() -> PathBuf {
|
||||
let mut path = PathBuf::from(env!("CARGO_MANIFEST_DIR"));
|
||||
@@ -165,7 +165,7 @@ impl RustFSTestEnvironment {
|
||||
}
|
||||
|
||||
/// Find an available port for the test
|
||||
async fn find_available_port() -> Result<u16, Box<dyn std::error::Error + Send + Sync>> {
|
||||
pub async fn find_available_port() -> Result<u16, Box<dyn std::error::Error + Send + Sync>> {
|
||||
use std::net::TcpListener;
|
||||
let listener = TcpListener::bind("127.0.0.1:0")?;
|
||||
let port = listener.local_addr()?.port();
|
||||
@@ -178,11 +178,11 @@ impl RustFSTestEnvironment {
|
||||
info!("Cleaning up any existing RustFS processes");
|
||||
let output = Command::new("pkill").args(["-f", "rustfs"]).output();
|
||||
|
||||
if let Ok(output) = output {
|
||||
if output.status.success() {
|
||||
info!("Killed existing RustFS processes");
|
||||
sleep(Duration::from_millis(1000)).await;
|
||||
}
|
||||
if let Ok(output) = output
|
||||
&& output.status.success()
|
||||
{
|
||||
info!("Killed existing RustFS processes");
|
||||
sleep(Duration::from_millis(1000)).await;
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -406,11 +406,11 @@ impl VaultTestEnvironment {
|
||||
let port_check = TcpStream::connect(VAULT_ADDRESS).await.is_ok();
|
||||
if port_check {
|
||||
// Additional check by making a health request
|
||||
if let Ok(response) = reqwest::get(&format!("{VAULT_URL}/v1/sys/health")).await {
|
||||
if response.status().is_success() {
|
||||
info!("Vault server is ready after {} seconds", i);
|
||||
return Ok(());
|
||||
}
|
||||
if let Ok(response) = reqwest::get(&format!("{VAULT_URL}/v1/sys/health")).await
|
||||
&& response.status().is_success()
|
||||
{
|
||||
info!("Vault server is ready after {} seconds", i);
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -40,3 +40,6 @@ mod content_encoding_test;
|
||||
// Policy variables tests
|
||||
#[cfg(test)]
|
||||
mod policy;
|
||||
|
||||
#[cfg(test)]
|
||||
mod protocols;
|
||||
|
||||
44
crates/e2e_test/src/protocols/README.md
Normal file
44
crates/e2e_test/src/protocols/README.md
Normal file
@@ -0,0 +1,44 @@
|
||||
# Protocol E2E Tests
|
||||
|
||||
FTPS and SFTP protocol end-to-end tests for RustFS.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
### Required Tools
|
||||
|
||||
```bash
|
||||
# Ubuntu/Debian
|
||||
sudo apt-get install sshpass ssh-keygen
|
||||
|
||||
# RHEL/CentOS
|
||||
sudo yum install sshpass openssh-clients
|
||||
|
||||
# macOS
|
||||
brew install sshpass openssh
|
||||
```
|
||||
|
||||
## Running Tests
|
||||
|
||||
Run all protocol tests:
|
||||
```bash
|
||||
cargo test --package e2e_test test_protocol_core_suite -- --test-threads=1 --nocapture
|
||||
```
|
||||
|
||||
Run only FTPS tests:
|
||||
```bash
|
||||
cargo test --package e2e_test test_ftps_core_operations -- --test-threads=1 --nocapture
|
||||
```
|
||||
|
||||
## Test Coverage
|
||||
|
||||
### FTPS Tests
|
||||
- mkdir bucket
|
||||
- cd to bucket
|
||||
- put file
|
||||
- ls list objects
|
||||
- cd . (stay in current directory)
|
||||
- cd / (return to root)
|
||||
- cd nonexistent bucket (should fail)
|
||||
- delete object
|
||||
- cdup
|
||||
- rmdir delete bucket
|
||||
235
crates/e2e_test/src/protocols/ftps_core.rs
Normal file
235
crates/e2e_test/src/protocols/ftps_core.rs
Normal file
@@ -0,0 +1,235 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Core FTPS tests
|
||||
|
||||
use crate::common::rustfs_binary_path;
|
||||
use crate::protocols::test_env::{DEFAULT_ACCESS_KEY, DEFAULT_SECRET_KEY, ProtocolTestEnvironment};
|
||||
use anyhow::Result;
|
||||
use rcgen::generate_simple_self_signed;
|
||||
use rustls::crypto::aws_lc_rs::default_provider;
|
||||
use rustls::{ClientConfig, RootCertStore};
|
||||
use std::io::Cursor;
|
||||
use std::path::PathBuf;
|
||||
use std::sync::Arc;
|
||||
use suppaftp::RustlsConnector;
|
||||
use suppaftp::RustlsFtpStream;
|
||||
use tokio::process::Command;
|
||||
use tracing::info;
|
||||
|
||||
// Fixed FTPS port for testing
|
||||
const FTPS_PORT: u16 = 9021;
|
||||
const FTPS_ADDRESS: &str = "127.0.0.1:9021";
|
||||
|
||||
/// Test FTPS: put, ls, mkdir, rmdir, delete operations
|
||||
pub async fn test_ftps_core_operations() -> Result<()> {
|
||||
let env = ProtocolTestEnvironment::new().map_err(|e| anyhow::anyhow!("{}", e))?;
|
||||
|
||||
// Generate and write certificate
|
||||
let cert = generate_simple_self_signed(vec!["localhost".to_string(), "127.0.0.1".to_string()])?;
|
||||
let cert_path = PathBuf::from(&env.temp_dir).join("ftps.crt");
|
||||
let key_path = PathBuf::from(&env.temp_dir).join("ftps.key");
|
||||
|
||||
let cert_pem = cert.cert.pem();
|
||||
let key_pem = cert.signing_key.serialize_pem();
|
||||
tokio::fs::write(&cert_path, &cert_pem).await?;
|
||||
tokio::fs::write(&key_path, &key_pem).await?;
|
||||
|
||||
// Start server manually
|
||||
info!("Starting FTPS server on {}", FTPS_ADDRESS);
|
||||
let binary_path = rustfs_binary_path();
|
||||
let mut server_process = Command::new(&binary_path)
|
||||
.env("RUSTFS_FTPS_ENABLE", "true")
|
||||
.env("RUSTFS_FTPS_ADDRESS", FTPS_ADDRESS)
|
||||
.env("RUSTFS_FTPS_CERTS_FILE", cert_path.to_str().unwrap())
|
||||
.env("RUSTFS_FTPS_KEY_FILE", key_path.to_str().unwrap())
|
||||
.arg(&env.temp_dir)
|
||||
.spawn()?;
|
||||
|
||||
// Ensure server is cleaned up even on failure
|
||||
let result = async {
|
||||
// Wait for server to be ready
|
||||
ProtocolTestEnvironment::wait_for_port_ready(FTPS_PORT, 30)
|
||||
.await
|
||||
.map_err(|e| anyhow::anyhow!("{}", e))?;
|
||||
|
||||
// Install the aws-lc-rs crypto provider
|
||||
default_provider()
|
||||
.install_default()
|
||||
.map_err(|e| anyhow::anyhow!("Failed to install crypto provider: {:?}", e))?;
|
||||
|
||||
// Create a simple rustls config that accepts any certificate for testing
|
||||
let mut root_store = RootCertStore::empty();
|
||||
// Add the self-signed certificate to the trust store for e2e
|
||||
// Note: In a real environment, you'd use proper root certificates
|
||||
let cert_pem = cert.cert.pem();
|
||||
let cert_der = rustls_pemfile::certs(&mut Cursor::new(cert_pem))
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
.map_err(|e| anyhow::anyhow!("Failed to parse cert: {}", e))?;
|
||||
|
||||
root_store.add_parsable_certificates(cert_der);
|
||||
|
||||
let config = ClientConfig::builder()
|
||||
.with_root_certificates(root_store)
|
||||
.with_no_client_auth();
|
||||
|
||||
// Wrap in suppaftp's RustlsConnector
|
||||
let tls_connector = RustlsConnector::from(Arc::new(config));
|
||||
|
||||
// Connect to FTPS server
|
||||
let ftp_stream = RustlsFtpStream::connect(FTPS_ADDRESS).map_err(|e| anyhow::anyhow!("Failed to connect: {}", e))?;
|
||||
|
||||
// Upgrade to secure connection
|
||||
let mut ftp_stream = ftp_stream
|
||||
.into_secure(tls_connector, "127.0.0.1")
|
||||
.map_err(|e| anyhow::anyhow!("Failed to upgrade to TLS: {}", e))?;
|
||||
ftp_stream.login(DEFAULT_ACCESS_KEY, DEFAULT_SECRET_KEY)?;
|
||||
|
||||
info!("Testing FTPS: mkdir bucket");
|
||||
let bucket_name = "testbucket";
|
||||
ftp_stream.mkdir(bucket_name)?;
|
||||
info!("PASS: mkdir bucket '{}' successful", bucket_name);
|
||||
|
||||
info!("Testing FTPS: cd to bucket");
|
||||
ftp_stream.cwd(bucket_name)?;
|
||||
info!("PASS: cd to bucket '{}' successful", bucket_name);
|
||||
|
||||
info!("Testing FTPS: put file");
|
||||
let filename = "test.txt";
|
||||
let content = "Hello, FTPS!";
|
||||
ftp_stream.put_file(filename, &mut Cursor::new(content.as_bytes()))?;
|
||||
info!("PASS: put file '{}' ({} bytes) successful", filename, content.len());
|
||||
|
||||
info!("Testing FTPS: download file");
|
||||
let downloaded_content = ftp_stream.retr(filename, |stream| {
|
||||
let mut buffer = Vec::new();
|
||||
stream.read_to_end(&mut buffer).map_err(suppaftp::FtpError::ConnectionError)?;
|
||||
Ok(buffer)
|
||||
})?;
|
||||
let downloaded_str = String::from_utf8(downloaded_content)?;
|
||||
assert_eq!(downloaded_str, content, "Downloaded content should match uploaded content");
|
||||
info!("PASS: download file '{}' successful, content matches", filename);
|
||||
|
||||
info!("Testing FTPS: ls list objects in bucket");
|
||||
let list = ftp_stream.list(None)?;
|
||||
assert!(list.iter().any(|line| line.contains(filename)), "File should appear in list");
|
||||
info!("PASS: ls command successful, file '{}' found in bucket", filename);
|
||||
|
||||
info!("Testing FTPS: ls . (list current directory)");
|
||||
let list_dot = ftp_stream.list(Some(".")).unwrap_or_else(|_| ftp_stream.list(None).unwrap());
|
||||
assert!(list_dot.iter().any(|line| line.contains(filename)), "File should appear in ls .");
|
||||
info!("PASS: ls . successful, file '{}' found", filename);
|
||||
|
||||
info!("Testing FTPS: ls / (list root directory)");
|
||||
let list_root = ftp_stream.list(Some("/")).unwrap();
|
||||
assert!(list_root.iter().any(|line| line.contains(bucket_name)), "Bucket should appear in ls /");
|
||||
assert!(!list_root.iter().any(|line| line.contains(filename)), "File should not appear in ls /");
|
||||
info!(
|
||||
"PASS: ls / successful, bucket '{}' found, file '{}' not found in root",
|
||||
bucket_name, filename
|
||||
);
|
||||
|
||||
info!("Testing FTPS: ls /. (list root directory with /.)");
|
||||
let list_root_dot = ftp_stream
|
||||
.list(Some("/."))
|
||||
.unwrap_or_else(|_| ftp_stream.list(Some("/")).unwrap());
|
||||
assert!(
|
||||
list_root_dot.iter().any(|line| line.contains(bucket_name)),
|
||||
"Bucket should appear in ls /."
|
||||
);
|
||||
info!("PASS: ls /. successful, bucket '{}' found", bucket_name);
|
||||
|
||||
info!("Testing FTPS: ls /bucket (list bucket by absolute path)");
|
||||
let list_bucket = ftp_stream.list(Some(&format!("/{}", bucket_name))).unwrap();
|
||||
assert!(list_bucket.iter().any(|line| line.contains(filename)), "File should appear in ls /bucket");
|
||||
info!("PASS: ls /{} successful, file '{}' found", bucket_name, filename);
|
||||
|
||||
info!("Testing FTPS: cd . (stay in current directory)");
|
||||
ftp_stream.cwd(".")?;
|
||||
info!("PASS: cd . successful (stays in current directory)");
|
||||
|
||||
info!("Testing FTPS: ls after cd . (should still see file)");
|
||||
let list_after_dot = ftp_stream.list(None)?;
|
||||
assert!(
|
||||
list_after_dot.iter().any(|line| line.contains(filename)),
|
||||
"File should still appear in list after cd ."
|
||||
);
|
||||
info!("PASS: ls after cd . successful, file '{}' still found in bucket", filename);
|
||||
|
||||
info!("Testing FTPS: cd / (go to root directory)");
|
||||
ftp_stream.cwd("/")?;
|
||||
info!("PASS: cd / successful (back to root directory)");
|
||||
|
||||
info!("Testing FTPS: ls after cd / (should see bucket only)");
|
||||
let root_list_after = ftp_stream.list(None)?;
|
||||
assert!(
|
||||
!root_list_after.iter().any(|line| line.contains(filename)),
|
||||
"File should not appear in root ls"
|
||||
);
|
||||
assert!(
|
||||
root_list_after.iter().any(|line| line.contains(bucket_name)),
|
||||
"Bucket should appear in root ls"
|
||||
);
|
||||
info!("PASS: ls after cd / successful, file not in root, bucket '{}' found in root", bucket_name);
|
||||
|
||||
info!("Testing FTPS: cd back to bucket");
|
||||
ftp_stream.cwd(bucket_name)?;
|
||||
info!("PASS: cd back to bucket '{}' successful", bucket_name);
|
||||
|
||||
info!("Testing FTPS: delete object");
|
||||
ftp_stream.rm(filename)?;
|
||||
info!("PASS: delete object '{}' successful", filename);
|
||||
|
||||
info!("Testing FTPS: ls verify object deleted");
|
||||
let list_after = ftp_stream.list(None)?;
|
||||
assert!(!list_after.iter().any(|line| line.contains(filename)), "File should be deleted");
|
||||
info!("PASS: ls after delete successful, file '{}' is not found", filename);
|
||||
|
||||
info!("Testing FTPS: cd up to root directory");
|
||||
ftp_stream.cdup()?;
|
||||
info!("PASS: cd up to root directory successful");
|
||||
|
||||
info!("Testing FTPS: cd to nonexistent bucket (should fail)");
|
||||
let nonexistent_bucket = "nonexistent-bucket";
|
||||
let cd_result = ftp_stream.cwd(nonexistent_bucket);
|
||||
assert!(cd_result.is_err(), "cd to nonexistent bucket should fail");
|
||||
info!("PASS: cd to nonexistent bucket '{}' failed as expected", nonexistent_bucket);
|
||||
|
||||
info!("Testing FTPS: ls verify bucket exists in root");
|
||||
let root_list = ftp_stream.list(None)?;
|
||||
assert!(root_list.iter().any(|line| line.contains(bucket_name)), "Bucket should exist in root");
|
||||
info!("PASS: ls root successful, bucket '{}' found in root", bucket_name);
|
||||
|
||||
info!("Testing FTPS: rmdir delete bucket");
|
||||
ftp_stream.rmdir(bucket_name)?;
|
||||
info!("PASS: rmdir bucket '{}' successful", bucket_name);
|
||||
|
||||
info!("Testing FTPS: ls verify bucket deleted");
|
||||
let root_list_after = ftp_stream.list(None)?;
|
||||
assert!(!root_list_after.iter().any(|line| line.contains(bucket_name)), "Bucket should be deleted");
|
||||
info!("PASS: ls root after delete successful, bucket '{}' is not found", bucket_name);
|
||||
|
||||
ftp_stream.quit()?;
|
||||
|
||||
info!("FTPS core tests passed");
|
||||
Ok(())
|
||||
}
|
||||
.await;
|
||||
|
||||
// Always cleanup server process
|
||||
let _ = server_process.kill().await;
|
||||
let _ = server_process.wait().await;
|
||||
|
||||
result
|
||||
}
|
||||
19
crates/e2e_test/src/protocols/mod.rs
Normal file
19
crates/e2e_test/src/protocols/mod.rs
Normal file
@@ -0,0 +1,19 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Protocol tests for FTPS and SFTP
|
||||
|
||||
pub mod ftps_core;
|
||||
pub mod test_env;
|
||||
pub mod test_runner;
|
||||
72
crates/e2e_test/src/protocols/test_env.rs
Normal file
72
crates/e2e_test/src/protocols/test_env.rs
Normal file
@@ -0,0 +1,72 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Protocol test environment for FTPS and SFTP
|
||||
|
||||
use std::net::TcpStream;
|
||||
use std::time::Duration;
|
||||
use tokio::time::sleep;
|
||||
use tracing::{info, warn};
|
||||
|
||||
/// Default credentials
|
||||
pub const DEFAULT_ACCESS_KEY: &str = "rustfsadmin";
|
||||
pub const DEFAULT_SECRET_KEY: &str = "rustfsadmin";
|
||||
|
||||
/// Custom test environment that doesn't automatically stop servers
|
||||
pub struct ProtocolTestEnvironment {
|
||||
pub temp_dir: String,
|
||||
}
|
||||
|
||||
impl ProtocolTestEnvironment {
|
||||
/// Create a new test environment
|
||||
/// This environment won't stop any server when dropped
|
||||
pub fn new() -> Result<Self, Box<dyn std::error::Error + Send + Sync>> {
|
||||
let temp_dir = format!("/tmp/rustfs_protocol_test_{}", uuid::Uuid::new_v4());
|
||||
std::fs::create_dir_all(&temp_dir)?;
|
||||
|
||||
Ok(Self { temp_dir })
|
||||
}
|
||||
|
||||
/// Wait for server to be ready
|
||||
pub async fn wait_for_port_ready(port: u16, max_attempts: u32) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
let address = format!("127.0.0.1:{}", port);
|
||||
|
||||
info!("Waiting for server to be ready on {}", address);
|
||||
|
||||
for i in 0..max_attempts {
|
||||
if TcpStream::connect(&address).is_ok() {
|
||||
info!("Server is ready after {} s", i + 1);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
if i == max_attempts - 1 {
|
||||
return Err(format!("Server did not become ready within {} s", max_attempts).into());
|
||||
}
|
||||
|
||||
sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
// Implement Drop trait that doesn't stop servers
|
||||
impl Drop for ProtocolTestEnvironment {
|
||||
fn drop(&mut self) {
|
||||
// Clean up temp directory only, don't stop any server
|
||||
if let Err(e) = std::fs::remove_dir_all(&self.temp_dir) {
|
||||
warn!("Failed to clean up temp directory {}: {}", self.temp_dir, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
171
crates/e2e_test/src/protocols/test_runner.rs
Normal file
171
crates/e2e_test/src/protocols/test_runner.rs
Normal file
@@ -0,0 +1,171 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Protocol test runner
|
||||
|
||||
use crate::common::init_logging;
|
||||
use crate::protocols::ftps_core::test_ftps_core_operations;
|
||||
use std::time::Instant;
|
||||
use tokio::time::{Duration, sleep};
|
||||
use tracing::{error, info};
|
||||
|
||||
/// Test result
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct TestResult {
|
||||
pub test_name: String,
|
||||
pub success: bool,
|
||||
pub error_message: Option<String>,
|
||||
}
|
||||
|
||||
impl TestResult {
|
||||
pub fn success(test_name: String) -> Self {
|
||||
Self {
|
||||
test_name,
|
||||
success: true,
|
||||
error_message: None,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn failure(test_name: String, error: String) -> Self {
|
||||
Self {
|
||||
test_name,
|
||||
success: false,
|
||||
error_message: Some(error),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Protocol test suite
|
||||
pub struct ProtocolTestSuite {
|
||||
tests: Vec<TestDefinition>,
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
struct TestDefinition {
|
||||
name: String,
|
||||
}
|
||||
|
||||
impl ProtocolTestSuite {
|
||||
/// Create default test suite
|
||||
pub fn new() -> Self {
|
||||
let tests = vec![
|
||||
TestDefinition {
|
||||
name: "test_ftps_core_operations".to_string(),
|
||||
},
|
||||
// TestDefinition { name: "test_sftp_core_operations".to_string() },
|
||||
];
|
||||
|
||||
Self { tests }
|
||||
}
|
||||
|
||||
/// Run test suite
|
||||
pub async fn run_test_suite(&self) -> Vec<TestResult> {
|
||||
init_logging();
|
||||
info!("Starting Protocol test suite");
|
||||
|
||||
let start_time = Instant::now();
|
||||
let mut results = Vec::new();
|
||||
|
||||
info!("Scheduled {} tests", self.tests.len());
|
||||
|
||||
// Run tests
|
||||
for (i, test_def) in self.tests.iter().enumerate() {
|
||||
let test_description = match test_def.name.as_str() {
|
||||
"test_ftps_core_operations" => {
|
||||
info!("=== Starting FTPS Module Test ===");
|
||||
"FTPS core operations (put, ls, mkdir, rmdir, delete)"
|
||||
}
|
||||
"test_sftp_core_operations" => {
|
||||
info!("=== Starting SFTP Module Test ===");
|
||||
"SFTP core operations (put, ls, mkdir, rmdir, delete)"
|
||||
}
|
||||
_ => "",
|
||||
};
|
||||
|
||||
info!("Test {}/{} - {}", i + 1, self.tests.len(), test_description);
|
||||
info!("Running: {}", test_def.name);
|
||||
|
||||
let test_start = Instant::now();
|
||||
|
||||
let result = self.run_single_test(test_def).await;
|
||||
let test_duration = test_start.elapsed();
|
||||
|
||||
match result {
|
||||
Ok(_) => {
|
||||
info!("Test passed: {} ({:.2}s)", test_def.name, test_duration.as_secs_f64());
|
||||
results.push(TestResult::success(test_def.name.clone()));
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Test failed: {} ({:.2}s): {}", test_def.name, test_duration.as_secs_f64(), e);
|
||||
results.push(TestResult::failure(test_def.name.clone(), e.to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
// Delay between tests to avoid resource conflicts
|
||||
if i < self.tests.len() - 1 {
|
||||
sleep(Duration::from_secs(2)).await;
|
||||
}
|
||||
}
|
||||
|
||||
// Print summary
|
||||
self.print_summary(&results, start_time.elapsed());
|
||||
|
||||
results
|
||||
}
|
||||
|
||||
/// Run a single test
|
||||
async fn run_single_test(&self, test_def: &TestDefinition) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
match test_def.name.as_str() {
|
||||
"test_ftps_core_operations" => test_ftps_core_operations().await.map_err(|e| e.into()),
|
||||
// "test_sftp_core_operations" => test_sftp_core_operations().await.map_err(|e| e.into()),
|
||||
_ => Err(format!("Test {} not implemented", test_def.name).into()),
|
||||
}
|
||||
}
|
||||
|
||||
/// Print test summary
|
||||
fn print_summary(&self, results: &[TestResult], total_duration: Duration) {
|
||||
info!("=== Test Suite Summary ===");
|
||||
info!("Total duration: {:.2}s", total_duration.as_secs_f64());
|
||||
info!("Total tests: {}", results.len());
|
||||
|
||||
let passed = results.iter().filter(|r| r.success).count();
|
||||
let failed = results.len() - passed;
|
||||
let success_rate = (passed as f64 / results.len() as f64) * 100.0;
|
||||
|
||||
info!("Passed: {} | Failed: {}", passed, failed);
|
||||
info!("Success rate: {:.1}%", success_rate);
|
||||
|
||||
if failed > 0 {
|
||||
error!("Failed tests:");
|
||||
for result in results.iter().filter(|r| !r.success) {
|
||||
error!(" - {}: {}", result.test_name, result.error_message.as_ref().unwrap());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Test suite
|
||||
#[tokio::test]
|
||||
async fn test_protocol_core_suite() -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
let suite = ProtocolTestSuite::new();
|
||||
let results = suite.run_test_suite().await;
|
||||
|
||||
let failed = results.iter().filter(|r| !r.success).count();
|
||||
if failed > 0 {
|
||||
return Err(format!("Protocol tests failed: {failed} failures").into());
|
||||
}
|
||||
|
||||
info!("All protocol tests passed");
|
||||
Ok(())
|
||||
}
|
||||
@@ -15,11 +15,12 @@
|
||||
|
||||
use async_trait::async_trait;
|
||||
use rustfs_ecstore::disk::endpoint::Endpoint;
|
||||
use rustfs_lock::client::{LockClient, local::LocalClient, remote::RemoteClient};
|
||||
use rustfs_ecstore::rpc::RemoteClient;
|
||||
use rustfs_lock::client::{LockClient, local::LocalClient};
|
||||
use rustfs_lock::types::{LockInfo, LockResponse, LockStats};
|
||||
use rustfs_lock::{LockId, LockMetadata, LockPriority, LockType};
|
||||
use rustfs_lock::{LockRequest, NamespaceLock, NamespaceLockManager};
|
||||
use rustfs_protos::{node_service_time_out_client, proto_gen::node_service::GenerallyLockRequest};
|
||||
use rustfs_protos::proto_gen::node_service::GenerallyLockRequest;
|
||||
use serial_test::serial;
|
||||
use std::{collections::HashMap, error::Error, sync::Arc, time::Duration};
|
||||
use tokio::time::sleep;
|
||||
@@ -156,7 +157,7 @@ async fn test_lock_unlock_rpc() -> Result<(), Box<dyn Error>> {
|
||||
};
|
||||
let args = serde_json::to_string(&args)?;
|
||||
|
||||
let mut client = node_service_time_out_client(&CLUSTER_ADDR.to_string()).await?;
|
||||
let mut client = RemoteClient::new(CLUSTER_ADDR.to_string()).get_client().await?;
|
||||
println!("got client");
|
||||
let request = Request::new(GenerallyLockRequest { args: args.clone() });
|
||||
|
||||
@@ -614,7 +615,7 @@ async fn test_rpc_read_lock() -> Result<(), Box<dyn Error>> {
|
||||
};
|
||||
let args_str = serde_json::to_string(&args)?;
|
||||
|
||||
let mut client = node_service_time_out_client(&CLUSTER_ADDR.to_string()).await?;
|
||||
let mut client = RemoteClient::new(CLUSTER_ADDR.to_string()).get_client().await?;
|
||||
|
||||
// First read lock
|
||||
let request = Request::new(GenerallyLockRequest { args: args_str.clone() });
|
||||
@@ -669,7 +670,7 @@ async fn test_lock_refresh() -> Result<(), Box<dyn Error>> {
|
||||
};
|
||||
let args_str = serde_json::to_string(&args)?;
|
||||
|
||||
let mut client = node_service_time_out_client(&CLUSTER_ADDR.to_string()).await?;
|
||||
let mut client = RemoteClient::new(CLUSTER_ADDR.to_string()).get_client().await?;
|
||||
|
||||
// Acquire lock
|
||||
let request = Request::new(GenerallyLockRequest { args: args_str.clone() });
|
||||
@@ -713,7 +714,7 @@ async fn test_force_unlock() -> Result<(), Box<dyn Error>> {
|
||||
};
|
||||
let args_str = serde_json::to_string(&args)?;
|
||||
|
||||
let mut client = node_service_time_out_client(&CLUSTER_ADDR.to_string()).await?;
|
||||
let mut client = RemoteClient::new(CLUSTER_ADDR.to_string()).get_client().await?;
|
||||
|
||||
// Acquire lock
|
||||
let request = Request::new(GenerallyLockRequest { args: args_str.clone() });
|
||||
|
||||
@@ -17,11 +17,11 @@ use crate::common::workspace_root;
|
||||
use futures::future::join_all;
|
||||
use rmp_serde::{Deserializer, Serializer};
|
||||
use rustfs_ecstore::disk::{VolumeInfo, WalkDirOptions};
|
||||
use rustfs_ecstore::rpc::{TonicInterceptor, gen_tonic_signature_interceptor, node_service_time_out_client};
|
||||
use rustfs_filemeta::{MetaCacheEntry, MetacacheReader, MetacacheWriter};
|
||||
use rustfs_protos::proto_gen::node_service::WalkDirRequest;
|
||||
use rustfs_protos::{
|
||||
models::{PingBody, PingBodyBuilder},
|
||||
node_service_time_out_client,
|
||||
proto_gen::node_service::{
|
||||
ListVolumesRequest, LocalStorageInfoRequest, MakeVolumeRequest, PingRequest, PingResponse, ReadAllRequest,
|
||||
},
|
||||
@@ -53,7 +53,9 @@ async fn ping() -> Result<(), Box<dyn Error>> {
|
||||
assert!(decoded_payload.is_ok());
|
||||
|
||||
// Create client
|
||||
let mut client = node_service_time_out_client(&CLUSTER_ADDR.to_string()).await?;
|
||||
let mut client =
|
||||
node_service_time_out_client(&CLUSTER_ADDR.to_string(), TonicInterceptor::Signature(gen_tonic_signature_interceptor()))
|
||||
.await?;
|
||||
|
||||
// Construct PingRequest
|
||||
let request = Request::new(PingRequest {
|
||||
@@ -78,7 +80,9 @@ async fn ping() -> Result<(), Box<dyn Error>> {
|
||||
#[tokio::test]
|
||||
#[ignore = "requires running RustFS server at localhost:9000"]
|
||||
async fn make_volume() -> Result<(), Box<dyn Error>> {
|
||||
let mut client = node_service_time_out_client(&CLUSTER_ADDR.to_string()).await?;
|
||||
let mut client =
|
||||
node_service_time_out_client(&CLUSTER_ADDR.to_string(), TonicInterceptor::Signature(gen_tonic_signature_interceptor()))
|
||||
.await?;
|
||||
let request = Request::new(MakeVolumeRequest {
|
||||
disk: "data".to_string(),
|
||||
volume: "dandan".to_string(),
|
||||
@@ -96,7 +100,9 @@ async fn make_volume() -> Result<(), Box<dyn Error>> {
|
||||
#[tokio::test]
|
||||
#[ignore = "requires running RustFS server at localhost:9000"]
|
||||
async fn list_volumes() -> Result<(), Box<dyn Error>> {
|
||||
let mut client = node_service_time_out_client(&CLUSTER_ADDR.to_string()).await?;
|
||||
let mut client =
|
||||
node_service_time_out_client(&CLUSTER_ADDR.to_string(), TonicInterceptor::Signature(gen_tonic_signature_interceptor()))
|
||||
.await?;
|
||||
let request = Request::new(ListVolumesRequest {
|
||||
disk: "data".to_string(),
|
||||
});
|
||||
@@ -126,7 +132,9 @@ async fn walk_dir() -> Result<(), Box<dyn Error>> {
|
||||
let (rd, mut wr) = tokio::io::duplex(1024);
|
||||
let mut buf = Vec::new();
|
||||
opts.serialize(&mut Serializer::new(&mut buf))?;
|
||||
let mut client = node_service_time_out_client(&CLUSTER_ADDR.to_string()).await?;
|
||||
let mut client =
|
||||
node_service_time_out_client(&CLUSTER_ADDR.to_string(), TonicInterceptor::Signature(gen_tonic_signature_interceptor()))
|
||||
.await?;
|
||||
let disk_path = std::env::var_os("RUSTFS_DISK_PATH").map(PathBuf::from).unwrap_or_else(|| {
|
||||
let mut path = workspace_root();
|
||||
path.push("target");
|
||||
@@ -179,7 +187,9 @@ async fn walk_dir() -> Result<(), Box<dyn Error>> {
|
||||
#[tokio::test]
|
||||
#[ignore = "requires running RustFS server at localhost:9000"]
|
||||
async fn read_all() -> Result<(), Box<dyn Error>> {
|
||||
let mut client = node_service_time_out_client(&CLUSTER_ADDR.to_string()).await?;
|
||||
let mut client =
|
||||
node_service_time_out_client(&CLUSTER_ADDR.to_string(), TonicInterceptor::Signature(gen_tonic_signature_interceptor()))
|
||||
.await?;
|
||||
let request = Request::new(ReadAllRequest {
|
||||
disk: "data".to_string(),
|
||||
volume: "ff".to_string(),
|
||||
@@ -197,7 +207,9 @@ async fn read_all() -> Result<(), Box<dyn Error>> {
|
||||
#[tokio::test]
|
||||
#[ignore = "requires running RustFS server at localhost:9000"]
|
||||
async fn storage_info() -> Result<(), Box<dyn Error>> {
|
||||
let mut client = node_service_time_out_client(&CLUSTER_ADDR.to_string()).await?;
|
||||
let mut client =
|
||||
node_service_time_out_client(&CLUSTER_ADDR.to_string(), TonicInterceptor::Signature(gen_tonic_signature_interceptor()))
|
||||
.await?;
|
||||
let request = Request::new(LocalStorageInfoRequest { metrics: true });
|
||||
|
||||
let response = client.local_storage_info(request).await?.into_inner();
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
use crate::data_usage::{DATA_USAGE_CACHE_NAME, DATA_USAGE_ROOT, load_data_usage_from_backend};
|
||||
use crate::error::{Error, Result};
|
||||
use crate::rpc::{TonicInterceptor, gen_tonic_signature_interceptor, node_service_time_out_client};
|
||||
use crate::{
|
||||
disk::endpoint::Endpoint,
|
||||
global::{GLOBAL_BOOT_TIME, GLOBAL_Endpoints},
|
||||
@@ -29,7 +30,6 @@ use rustfs_madmin::{
|
||||
};
|
||||
use rustfs_protos::{
|
||||
models::{PingBody, PingBodyBuilder},
|
||||
node_service_time_out_client,
|
||||
proto_gen::node_service::{PingRequest, PingResponse},
|
||||
};
|
||||
use std::{
|
||||
@@ -101,9 +101,9 @@ async fn is_server_resolvable(endpoint: &Endpoint) -> Result<()> {
|
||||
let decoded_payload = flatbuffers::root::<PingBody>(finished_data);
|
||||
assert!(decoded_payload.is_ok());
|
||||
|
||||
let mut client = node_service_time_out_client(&addr)
|
||||
let mut client = node_service_time_out_client(&addr, TonicInterceptor::Signature(gen_tonic_signature_interceptor()))
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
|
||||
let request = Request::new(PingRequest {
|
||||
version: 1,
|
||||
|
||||
@@ -498,19 +498,19 @@ impl BucketTargetSys {
|
||||
bucket: bucket.to_string(),
|
||||
})?;
|
||||
|
||||
if arn.arn_type == BucketTargetType::ReplicationService {
|
||||
if let Ok((config, _)) = get_replication_config(bucket).await {
|
||||
for rule in config.filter_target_arns(&ObjectOpts {
|
||||
op_type: ReplicationType::All,
|
||||
..Default::default()
|
||||
}) {
|
||||
if rule == arn_str || config.role == arn_str {
|
||||
let arn_remotes_map = self.arn_remotes_map.read().await;
|
||||
if arn_remotes_map.get(arn_str).is_some() {
|
||||
return Err(BucketTargetError::BucketRemoteRemoveDisallowed {
|
||||
bucket: bucket.to_string(),
|
||||
});
|
||||
}
|
||||
if arn.arn_type == BucketTargetType::ReplicationService
|
||||
&& let Ok((config, _)) = get_replication_config(bucket).await
|
||||
{
|
||||
for rule in config.filter_target_arns(&ObjectOpts {
|
||||
op_type: ReplicationType::All,
|
||||
..Default::default()
|
||||
}) {
|
||||
if rule == arn_str || config.role == arn_str {
|
||||
let arn_remotes_map = self.arn_remotes_map.read().await;
|
||||
if arn_remotes_map.get(arn_str).is_some() {
|
||||
return Err(BucketTargetError::BucketRemoteRemoveDisallowed {
|
||||
bucket: bucket.to_string(),
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -691,22 +691,22 @@ impl BucketTargetSys {
|
||||
}
|
||||
|
||||
// Add new targets
|
||||
if let Some(new_targets) = targets {
|
||||
if !new_targets.is_empty() {
|
||||
for target in &new_targets.targets {
|
||||
if let Ok(client) = self.get_remote_target_client_internal(target).await {
|
||||
arn_remotes_map.insert(
|
||||
target.arn.clone(),
|
||||
ArnTarget {
|
||||
client: Some(Arc::new(client)),
|
||||
last_refresh: OffsetDateTime::now_utc(),
|
||||
},
|
||||
);
|
||||
self.update_bandwidth_limit(bucket, &target.arn, target.bandwidth_limit);
|
||||
}
|
||||
if let Some(new_targets) = targets
|
||||
&& !new_targets.is_empty()
|
||||
{
|
||||
for target in &new_targets.targets {
|
||||
if let Ok(client) = self.get_remote_target_client_internal(target).await {
|
||||
arn_remotes_map.insert(
|
||||
target.arn.clone(),
|
||||
ArnTarget {
|
||||
client: Some(Arc::new(client)),
|
||||
last_refresh: OffsetDateTime::now_utc(),
|
||||
},
|
||||
);
|
||||
self.update_bandwidth_limit(bucket, &target.arn, target.bandwidth_limit);
|
||||
}
|
||||
targets_map.insert(bucket.to_string(), new_targets.targets.clone());
|
||||
}
|
||||
targets_map.insert(bucket.to_string(), new_targets.targets.clone());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -31,10 +31,10 @@ impl BucketObjectLockSys {
|
||||
}
|
||||
|
||||
pub async fn get(bucket: &str) -> Option<DefaultRetention> {
|
||||
if let Ok(object_lock_config) = get_object_lock_config(bucket).await {
|
||||
if let Some(object_lock_rule) = object_lock_config.0.rule {
|
||||
return object_lock_rule.default_retention;
|
||||
}
|
||||
if let Ok(object_lock_config) = get_object_lock_config(bucket).await
|
||||
&& let Some(object_lock_rule) = object_lock_config.0.rule
|
||||
{
|
||||
return object_lock_rule.default_retention;
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
@@ -55,10 +55,10 @@ impl ReplicationConfigurationExt for ReplicationConfiguration {
|
||||
if !has_arn {
|
||||
has_arn = true;
|
||||
}
|
||||
if let Some(status) = &rule.existing_object_replication {
|
||||
if status.status == ExistingObjectReplicationStatus::from_static(ExistingObjectReplicationStatus::ENABLED) {
|
||||
return (true, true);
|
||||
}
|
||||
if let Some(status) = &rule.existing_object_replication
|
||||
&& status.status == ExistingObjectReplicationStatus::from_static(ExistingObjectReplicationStatus::ENABLED)
|
||||
{
|
||||
return (true, true);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -86,12 +86,11 @@ impl ReplicationConfigurationExt for ReplicationConfiguration {
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(status) = &rule.existing_object_replication {
|
||||
if obj.existing_object
|
||||
&& status.status == ExistingObjectReplicationStatus::from_static(ExistingObjectReplicationStatus::DISABLED)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
if let Some(status) = &rule.existing_object_replication
|
||||
&& obj.existing_object
|
||||
&& status.status == ExistingObjectReplicationStatus::from_static(ExistingObjectReplicationStatus::DISABLED)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
if !obj.name.starts_with(rule.prefix()) {
|
||||
@@ -145,12 +144,11 @@ impl ReplicationConfigurationExt for ReplicationConfiguration {
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(status) = &rule.existing_object_replication {
|
||||
if obj.existing_object
|
||||
&& status.status == ExistingObjectReplicationStatus::from_static(ExistingObjectReplicationStatus::DISABLED)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
if let Some(status) = &rule.existing_object_replication
|
||||
&& obj.existing_object
|
||||
&& status.status == ExistingObjectReplicationStatus::from_static(ExistingObjectReplicationStatus::DISABLED)
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
if obj.op_type == ReplicationType::Delete {
|
||||
@@ -186,20 +184,20 @@ impl ReplicationConfigurationExt for ReplicationConfiguration {
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(filter) = &rule.filter {
|
||||
if let Some(filter_prefix) = &filter.prefix {
|
||||
if !prefix.is_empty() && !filter_prefix.is_empty() {
|
||||
// The provided prefix must fall within the rule prefix
|
||||
if !recursive && !prefix.starts_with(filter_prefix) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// When recursive, skip this rule if it does not match the test prefix or hierarchy
|
||||
if recursive && !rule.prefix().starts_with(prefix) && !prefix.starts_with(rule.prefix()) {
|
||||
if let Some(filter) = &rule.filter
|
||||
&& let Some(filter_prefix) = &filter.prefix
|
||||
{
|
||||
if !prefix.is_empty() && !filter_prefix.is_empty() {
|
||||
// The provided prefix must fall within the rule prefix
|
||||
if !recursive && !prefix.starts_with(filter_prefix) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// When recursive, skip this rule if it does not match the test prefix or hierarchy
|
||||
if recursive && !rule.prefix().starts_with(prefix) && !prefix.starts_with(rule.prefix()) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
|
||||
@@ -512,20 +512,20 @@ impl<S: StorageAPI> ReplicationPool<S> {
|
||||
if !lrg_workers.is_empty() {
|
||||
let index = (hash as usize) % lrg_workers.len();
|
||||
|
||||
if let Some(worker) = lrg_workers.get(index) {
|
||||
if worker.try_send(ReplicationOperation::Object(Box::new(ri.clone()))).is_err() {
|
||||
// Queue to MRF if worker is busy
|
||||
let _ = self.mrf_save_tx.try_send(ri.to_mrf_entry());
|
||||
if let Some(worker) = lrg_workers.get(index)
|
||||
&& worker.try_send(ReplicationOperation::Object(Box::new(ri.clone()))).is_err()
|
||||
{
|
||||
// Queue to MRF if worker is busy
|
||||
let _ = self.mrf_save_tx.try_send(ri.to_mrf_entry());
|
||||
|
||||
// Try to add more workers if possible
|
||||
let max_l_workers = *self.max_l_workers.read().await;
|
||||
let existing = lrg_workers.len();
|
||||
if self.active_lrg_workers() < std::cmp::min(max_l_workers, LARGE_WORKER_COUNT) as i32 {
|
||||
let workers = std::cmp::min(existing + 1, max_l_workers);
|
||||
// Try to add more workers if possible
|
||||
let max_l_workers = *self.max_l_workers.read().await;
|
||||
let existing = lrg_workers.len();
|
||||
if self.active_lrg_workers() < std::cmp::min(max_l_workers, LARGE_WORKER_COUNT) as i32 {
|
||||
let workers = std::cmp::min(existing + 1, max_l_workers);
|
||||
|
||||
drop(lrg_workers);
|
||||
self.resize_lrg_workers(workers, existing).await;
|
||||
}
|
||||
drop(lrg_workers);
|
||||
self.resize_lrg_workers(workers, existing).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -539,47 +539,45 @@ impl<S: StorageAPI> ReplicationPool<S> {
|
||||
_ => self.get_worker_ch(&ri.bucket, &ri.name, ri.size).await,
|
||||
};
|
||||
|
||||
if let Some(channel) = ch {
|
||||
if channel.try_send(ReplicationOperation::Object(Box::new(ri.clone()))).is_err() {
|
||||
// Queue to MRF if all workers are busy
|
||||
let _ = self.mrf_save_tx.try_send(ri.to_mrf_entry());
|
||||
if let Some(channel) = ch
|
||||
&& channel.try_send(ReplicationOperation::Object(Box::new(ri.clone()))).is_err()
|
||||
{
|
||||
// Queue to MRF if all workers are busy
|
||||
let _ = self.mrf_save_tx.try_send(ri.to_mrf_entry());
|
||||
|
||||
// Try to scale up workers based on priority
|
||||
let priority = self.priority.read().await.clone();
|
||||
let max_workers = *self.max_workers.read().await;
|
||||
// Try to scale up workers based on priority
|
||||
let priority = self.priority.read().await.clone();
|
||||
let max_workers = *self.max_workers.read().await;
|
||||
|
||||
match priority {
|
||||
ReplicationPriority::Fast => {
|
||||
// Log warning about unable to keep up
|
||||
info!("Warning: Unable to keep up with incoming traffic");
|
||||
match priority {
|
||||
ReplicationPriority::Fast => {
|
||||
// Log warning about unable to keep up
|
||||
info!("Warning: Unable to keep up with incoming traffic");
|
||||
}
|
||||
ReplicationPriority::Slow => {
|
||||
info!("Warning: Unable to keep up with incoming traffic - recommend increasing replication priority to auto");
|
||||
}
|
||||
ReplicationPriority::Auto => {
|
||||
let max_w = std::cmp::min(max_workers, WORKER_MAX_LIMIT);
|
||||
let active_workers = self.active_workers();
|
||||
|
||||
if active_workers < max_w as i32 {
|
||||
let workers = self.workers.read().await;
|
||||
let new_count = std::cmp::min(workers.len() + 1, max_w);
|
||||
let existing = workers.len();
|
||||
|
||||
drop(workers);
|
||||
self.resize_workers(new_count, existing).await;
|
||||
}
|
||||
ReplicationPriority::Slow => {
|
||||
info!(
|
||||
"Warning: Unable to keep up with incoming traffic - recommend increasing replication priority to auto"
|
||||
);
|
||||
}
|
||||
ReplicationPriority::Auto => {
|
||||
let max_w = std::cmp::min(max_workers, WORKER_MAX_LIMIT);
|
||||
let active_workers = self.active_workers();
|
||||
|
||||
if active_workers < max_w as i32 {
|
||||
let workers = self.workers.read().await;
|
||||
let new_count = std::cmp::min(workers.len() + 1, max_w);
|
||||
let existing = workers.len();
|
||||
let max_mrf_workers = std::cmp::min(max_workers, MRF_WORKER_MAX_LIMIT);
|
||||
let active_mrf = self.active_mrf_workers();
|
||||
|
||||
drop(workers);
|
||||
self.resize_workers(new_count, existing).await;
|
||||
}
|
||||
if active_mrf < max_mrf_workers as i32 {
|
||||
let current_mrf = self.mrf_worker_size.load(Ordering::SeqCst);
|
||||
let new_mrf = std::cmp::min(current_mrf + 1, max_mrf_workers as i32);
|
||||
|
||||
let max_mrf_workers = std::cmp::min(max_workers, MRF_WORKER_MAX_LIMIT);
|
||||
let active_mrf = self.active_mrf_workers();
|
||||
|
||||
if active_mrf < max_mrf_workers as i32 {
|
||||
let current_mrf = self.mrf_worker_size.load(Ordering::SeqCst);
|
||||
let new_mrf = std::cmp::min(current_mrf + 1, max_mrf_workers as i32);
|
||||
|
||||
self.resize_failed_workers(new_mrf).await;
|
||||
}
|
||||
self.resize_failed_workers(new_mrf).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -593,31 +591,29 @@ impl<S: StorageAPI> ReplicationPool<S> {
|
||||
_ => self.get_worker_ch(&doi.bucket, &doi.delete_object.object_name, 0).await,
|
||||
};
|
||||
|
||||
if let Some(channel) = ch {
|
||||
if channel.try_send(ReplicationOperation::Delete(Box::new(doi.clone()))).is_err() {
|
||||
let _ = self.mrf_save_tx.try_send(doi.to_mrf_entry());
|
||||
if let Some(channel) = ch
|
||||
&& channel.try_send(ReplicationOperation::Delete(Box::new(doi.clone()))).is_err()
|
||||
{
|
||||
let _ = self.mrf_save_tx.try_send(doi.to_mrf_entry());
|
||||
|
||||
let priority = self.priority.read().await.clone();
|
||||
let max_workers = *self.max_workers.read().await;
|
||||
let priority = self.priority.read().await.clone();
|
||||
let max_workers = *self.max_workers.read().await;
|
||||
|
||||
match priority {
|
||||
ReplicationPriority::Fast => {
|
||||
info!("Warning: Unable to keep up with incoming deletes");
|
||||
}
|
||||
ReplicationPriority::Slow => {
|
||||
info!(
|
||||
"Warning: Unable to keep up with incoming deletes - recommend increasing replication priority to auto"
|
||||
);
|
||||
}
|
||||
ReplicationPriority::Auto => {
|
||||
let max_w = std::cmp::min(max_workers, WORKER_MAX_LIMIT);
|
||||
if self.active_workers() < max_w as i32 {
|
||||
let workers = self.workers.read().await;
|
||||
let new_count = std::cmp::min(workers.len() + 1, max_w);
|
||||
let existing = workers.len();
|
||||
drop(workers);
|
||||
self.resize_workers(new_count, existing).await;
|
||||
}
|
||||
match priority {
|
||||
ReplicationPriority::Fast => {
|
||||
info!("Warning: Unable to keep up with incoming deletes");
|
||||
}
|
||||
ReplicationPriority::Slow => {
|
||||
info!("Warning: Unable to keep up with incoming deletes - recommend increasing replication priority to auto");
|
||||
}
|
||||
ReplicationPriority::Auto => {
|
||||
let max_w = std::cmp::min(max_workers, WORKER_MAX_LIMIT);
|
||||
if self.active_workers() < max_w as i32 {
|
||||
let workers = self.workers.read().await;
|
||||
let new_count = std::cmp::min(workers.len() + 1, max_w);
|
||||
let existing = workers.len();
|
||||
drop(workers);
|
||||
self.resize_workers(new_count, existing).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -242,11 +242,10 @@ impl ReplicationResyncer {
|
||||
|
||||
|
||||
|
||||
if let Some(last_update) = status.last_update {
|
||||
if last_update > *last_update_times.get(bucket).unwrap_or(&OffsetDateTime::UNIX_EPOCH) {
|
||||
if let Some(last_update) = status.last_update
|
||||
&& last_update > *last_update_times.get(bucket).unwrap_or(&OffsetDateTime::UNIX_EPOCH) {
|
||||
update = true;
|
||||
}
|
||||
}
|
||||
|
||||
if update {
|
||||
if let Err(err) = save_resync_status(bucket, status, api.clone()).await {
|
||||
@@ -345,13 +344,12 @@ impl ReplicationResyncer {
|
||||
return;
|
||||
};
|
||||
|
||||
if !heal {
|
||||
if let Err(e) = self
|
||||
if !heal
|
||||
&& let Err(e) = self
|
||||
.mark_status(ResyncStatusType::ResyncStarted, opts.clone(), storage.clone())
|
||||
.await
|
||||
{
|
||||
error!("Failed to mark resync status: {}", e);
|
||||
}
|
||||
{
|
||||
error!("Failed to mark resync status: {}", e);
|
||||
}
|
||||
|
||||
let (tx, mut rx) = tokio::sync::mpsc::channel(100);
|
||||
@@ -1463,21 +1461,18 @@ async fn replicate_delete_to_target(dobj: &DeletedObjectReplicationInfo, tgt_cli
|
||||
Some(version_id.to_string())
|
||||
};
|
||||
|
||||
if dobj.delete_object.delete_marker_version_id.is_some() {
|
||||
if let Err(e) = tgt_client
|
||||
if dobj.delete_object.delete_marker_version_id.is_some()
|
||||
&& let Err(e) = tgt_client
|
||||
.head_object(&tgt_client.bucket, &dobj.delete_object.object_name, version_id.clone())
|
||||
.await
|
||||
{
|
||||
if let SdkError::ServiceError(service_err) = &e {
|
||||
if !service_err.err().is_not_found() {
|
||||
rinfo.replication_status = ReplicationStatusType::Failed;
|
||||
rinfo.error = Some(e.to_string());
|
||||
&& let SdkError::ServiceError(service_err) = &e
|
||||
&& !service_err.err().is_not_found()
|
||||
{
|
||||
rinfo.replication_status = ReplicationStatusType::Failed;
|
||||
rinfo.error = Some(e.to_string());
|
||||
|
||||
return rinfo;
|
||||
}
|
||||
}
|
||||
};
|
||||
}
|
||||
return rinfo;
|
||||
};
|
||||
|
||||
match tgt_client
|
||||
.remove_object(
|
||||
|
||||
@@ -49,13 +49,13 @@ impl ExponentialMovingAverage {
|
||||
pub fn update_exponential_moving_average(&self, now: SystemTime) {
|
||||
if let Ok(mut last_update_guard) = self.last_update.try_lock() {
|
||||
let last_update = *last_update_guard;
|
||||
if let Ok(duration) = now.duration_since(last_update) {
|
||||
if duration.as_secs() > 0 {
|
||||
let decay = (-duration.as_secs_f64() / 60.0).exp(); // 1 minute decay
|
||||
let current_value = f64::from_bits(self.value.load(AtomicOrdering::Relaxed));
|
||||
self.value.store((current_value * decay).to_bits(), AtomicOrdering::Relaxed);
|
||||
*last_update_guard = now;
|
||||
}
|
||||
if let Ok(duration) = now.duration_since(last_update)
|
||||
&& duration.as_secs() > 0
|
||||
{
|
||||
let decay = (-duration.as_secs_f64() / 60.0).exp(); // 1 minute decay
|
||||
let current_value = f64::from_bits(self.value.load(AtomicOrdering::Relaxed));
|
||||
self.value.store((current_value * decay).to_bits(), AtomicOrdering::Relaxed);
|
||||
*last_update_guard = now;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -757,10 +757,10 @@ impl ReplicationStats {
|
||||
|
||||
/// Check if bucket replication statistics have usage
|
||||
pub fn has_replication_usage(&self, bucket: &str) -> bool {
|
||||
if let Ok(cache) = self.cache.try_read() {
|
||||
if let Some(stats) = cache.get(bucket) {
|
||||
return stats.has_replication_usage();
|
||||
}
|
||||
if let Ok(cache) = self.cache.try_read()
|
||||
&& let Some(stats) = cache.get(bucket)
|
||||
{
|
||||
return stats.has_replication_usage();
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
@@ -13,7 +13,8 @@
|
||||
// limitations under the License.
|
||||
|
||||
use crate::disk::RUSTFS_META_BUCKET;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::error::{Error, Result, StorageError};
|
||||
use rustfs_utils::path::SLASH_SEPARATOR;
|
||||
use s3s::xml;
|
||||
|
||||
pub fn is_meta_bucketname(name: &str) -> bool {
|
||||
@@ -21,6 +22,7 @@ pub fn is_meta_bucketname(name: &str) -> bool {
|
||||
}
|
||||
|
||||
use regex::Regex;
|
||||
use tracing::instrument;
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
static ref VALID_BUCKET_NAME: Regex = Regex::new(r"^[A-Za-z0-9][A-Za-z0-9\.\-\_\:]{1,61}[A-Za-z0-9]$").unwrap();
|
||||
@@ -113,3 +115,420 @@ pub fn serialize<T: xml::Serialize>(val: &T) -> xml::SerResult<Vec<u8>> {
|
||||
}
|
||||
Ok(buf)
|
||||
}
|
||||
|
||||
pub fn has_bad_path_component(path: &str) -> bool {
|
||||
let n = path.len();
|
||||
if n > 32 << 10 {
|
||||
// At 32K we are beyond reasonable.
|
||||
return true;
|
||||
}
|
||||
|
||||
let bytes = path.as_bytes();
|
||||
let mut i = 0;
|
||||
|
||||
// Skip leading slashes (for sake of Windows \ is included as well)
|
||||
while i < n && (bytes[i] == b'/' || bytes[i] == b'\\') {
|
||||
i += 1;
|
||||
}
|
||||
|
||||
while i < n {
|
||||
// Find the next segment
|
||||
let start = i;
|
||||
while i < n && bytes[i] != b'/' && bytes[i] != b'\\' {
|
||||
i += 1;
|
||||
}
|
||||
|
||||
// Trim whitespace of segment
|
||||
let mut segment_start = start;
|
||||
let mut segment_end = i;
|
||||
|
||||
while segment_start < segment_end && bytes[segment_start].is_ascii_whitespace() {
|
||||
segment_start += 1;
|
||||
}
|
||||
while segment_end > segment_start && bytes[segment_end - 1].is_ascii_whitespace() {
|
||||
segment_end -= 1;
|
||||
}
|
||||
|
||||
// Check for ".." or "."
|
||||
match segment_end - segment_start {
|
||||
2 if segment_start + 1 < n && bytes[segment_start] == b'.' && bytes[segment_start + 1] == b'.' => {
|
||||
return true;
|
||||
}
|
||||
1 if bytes[segment_start] == b'.' => {
|
||||
return true;
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
|
||||
if i < n {
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
pub fn is_valid_object_prefix(object: &str) -> bool {
|
||||
if has_bad_path_component(object) {
|
||||
return false;
|
||||
}
|
||||
|
||||
if !object.is_char_boundary(0) || std::str::from_utf8(object.as_bytes()).is_err() {
|
||||
return false;
|
||||
}
|
||||
|
||||
if object.contains("//") {
|
||||
return false;
|
||||
}
|
||||
|
||||
// This is valid for AWS S3 but it will never
|
||||
// work with file systems, we will reject here
|
||||
// to return object name invalid rather than
|
||||
// a cryptic error from the file system.
|
||||
!object.contains('\0')
|
||||
}
|
||||
|
||||
pub fn is_valid_object_name(object: &str) -> bool {
|
||||
// Implement object name validation
|
||||
if object.is_empty() {
|
||||
return false;
|
||||
}
|
||||
|
||||
if object.ends_with(SLASH_SEPARATOR) {
|
||||
return false;
|
||||
}
|
||||
|
||||
is_valid_object_prefix(object)
|
||||
}
|
||||
|
||||
pub fn check_object_name_for_length_and_slash(bucket: &str, object: &str) -> Result<()> {
|
||||
if object.len() > 1024 {
|
||||
return Err(StorageError::ObjectNameTooLong(bucket.to_owned(), object.to_owned()));
|
||||
}
|
||||
|
||||
if object.starts_with(SLASH_SEPARATOR) {
|
||||
return Err(StorageError::ObjectNamePrefixAsSlash(bucket.to_owned(), object.to_owned()));
|
||||
}
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
{
|
||||
if object.contains(':')
|
||||
|| object.contains('*')
|
||||
|| object.contains('?')
|
||||
|| object.contains('"')
|
||||
|| object.contains('|')
|
||||
|| object.contains('<')
|
||||
|| object.contains('>')
|
||||
// || object.contains('\\')
|
||||
{
|
||||
return Err(StorageError::ObjectNameInvalid(bucket.to_owned(), object.to_owned()));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn check_copy_obj_args(bucket: &str, object: &str) -> Result<()> {
|
||||
check_bucket_and_object_names(bucket, object)
|
||||
}
|
||||
|
||||
pub fn check_get_obj_args(bucket: &str, object: &str) -> Result<()> {
|
||||
check_bucket_and_object_names(bucket, object)
|
||||
}
|
||||
|
||||
pub fn check_del_obj_args(bucket: &str, object: &str) -> Result<()> {
|
||||
check_bucket_and_object_names(bucket, object)
|
||||
}
|
||||
|
||||
pub fn check_bucket_and_object_names(bucket: &str, object: &str) -> Result<()> {
|
||||
if !is_meta_bucketname(bucket) && check_valid_bucket_name_strict(bucket).is_err() {
|
||||
return Err(StorageError::BucketNameInvalid(bucket.to_string()));
|
||||
}
|
||||
|
||||
if object.is_empty() {
|
||||
return Err(StorageError::ObjectNameInvalid(bucket.to_string(), object.to_string()));
|
||||
}
|
||||
|
||||
if !is_valid_object_prefix(object) {
|
||||
return Err(StorageError::ObjectNameInvalid(bucket.to_string(), object.to_string()));
|
||||
}
|
||||
|
||||
// if cfg!(target_os = "windows") && object.contains('\\') {
|
||||
// return Err(StorageError::ObjectNameInvalid(bucket.to_string(), object.to_string()));
|
||||
// }
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn check_list_objs_args(bucket: &str, prefix: &str, _marker: &Option<String>) -> Result<()> {
|
||||
if !is_meta_bucketname(bucket) && check_valid_bucket_name_strict(bucket).is_err() {
|
||||
return Err(StorageError::BucketNameInvalid(bucket.to_string()));
|
||||
}
|
||||
|
||||
if !is_valid_object_prefix(prefix) {
|
||||
return Err(StorageError::ObjectNameInvalid(bucket.to_string(), prefix.to_string()));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn check_list_multipart_args(
|
||||
bucket: &str,
|
||||
prefix: &str,
|
||||
key_marker: &Option<String>,
|
||||
upload_id_marker: &Option<String>,
|
||||
_delimiter: &Option<String>,
|
||||
) -> Result<()> {
|
||||
check_list_objs_args(bucket, prefix, key_marker)?;
|
||||
|
||||
if let Some(upload_id_marker) = upload_id_marker {
|
||||
if let Some(key_marker) = key_marker
|
||||
&& key_marker.ends_with('/')
|
||||
{
|
||||
return Err(StorageError::InvalidUploadIDKeyCombination(
|
||||
upload_id_marker.to_string(),
|
||||
key_marker.to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
if let Err(_e) = base64_simd::URL_SAFE_NO_PAD.decode_to_vec(upload_id_marker.as_bytes()) {
|
||||
return Err(StorageError::MalformedUploadID(upload_id_marker.to_owned()));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn check_object_args(bucket: &str, object: &str) -> Result<()> {
|
||||
if !is_meta_bucketname(bucket) && check_valid_bucket_name_strict(bucket).is_err() {
|
||||
return Err(StorageError::BucketNameInvalid(bucket.to_string()));
|
||||
}
|
||||
|
||||
check_object_name_for_length_and_slash(bucket, object)?;
|
||||
|
||||
if !is_valid_object_name(object) {
|
||||
return Err(StorageError::ObjectNameInvalid(bucket.to_string(), object.to_string()));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn check_new_multipart_args(bucket: &str, object: &str) -> Result<()> {
|
||||
check_object_args(bucket, object)
|
||||
}
|
||||
|
||||
pub fn check_multipart_object_args(bucket: &str, object: &str, upload_id: &str) -> Result<()> {
|
||||
if let Err(e) = base64_simd::URL_SAFE_NO_PAD.decode_to_vec(upload_id.as_bytes()) {
|
||||
return Err(StorageError::MalformedUploadID(format!("{bucket}/{object}-{upload_id},err:{e}")));
|
||||
};
|
||||
check_object_args(bucket, object)
|
||||
}
|
||||
|
||||
pub fn check_put_object_part_args(bucket: &str, object: &str, upload_id: &str) -> Result<()> {
|
||||
check_multipart_object_args(bucket, object, upload_id)
|
||||
}
|
||||
|
||||
pub fn check_list_parts_args(bucket: &str, object: &str, upload_id: &str) -> Result<()> {
|
||||
check_multipart_object_args(bucket, object, upload_id)
|
||||
}
|
||||
|
||||
pub fn check_complete_multipart_args(bucket: &str, object: &str, upload_id: &str) -> Result<()> {
|
||||
check_multipart_object_args(bucket, object, upload_id)
|
||||
}
|
||||
|
||||
pub fn check_abort_multipart_args(bucket: &str, object: &str, upload_id: &str) -> Result<()> {
|
||||
check_multipart_object_args(bucket, object, upload_id)
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
pub fn check_put_object_args(bucket: &str, object: &str) -> Result<()> {
|
||||
if !is_meta_bucketname(bucket) && check_valid_bucket_name_strict(bucket).is_err() {
|
||||
return Err(StorageError::BucketNameInvalid(bucket.to_string()));
|
||||
}
|
||||
|
||||
check_object_name_for_length_and_slash(bucket, object)?;
|
||||
|
||||
if object.is_empty() || !is_valid_object_prefix(object) {
|
||||
return Err(StorageError::ObjectNameInvalid(bucket.to_string(), object.to_string()));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
// Test validation functions
|
||||
#[test]
|
||||
fn test_is_valid_object_name() {
|
||||
// Valid cases
|
||||
assert!(is_valid_object_name("valid-object-name"));
|
||||
assert!(is_valid_object_name("object/with/slashes"));
|
||||
assert!(is_valid_object_name("object with spaces"));
|
||||
assert!(is_valid_object_name("object_with_underscores"));
|
||||
assert!(is_valid_object_name("object.with.dots"));
|
||||
assert!(is_valid_object_name("single"));
|
||||
assert!(is_valid_object_name("file.txt"));
|
||||
assert!(is_valid_object_name("path/to/file.txt"));
|
||||
assert!(is_valid_object_name("a/b/c/d/e/f"));
|
||||
assert!(is_valid_object_name("object-123"));
|
||||
assert!(is_valid_object_name("object(1)"));
|
||||
assert!(is_valid_object_name("object[1]"));
|
||||
assert!(is_valid_object_name("object@domain.com"));
|
||||
|
||||
// Invalid cases - empty string
|
||||
assert!(!is_valid_object_name(""));
|
||||
|
||||
// Invalid cases - ends with slash (object names cannot end with slash)
|
||||
assert!(!is_valid_object_name("object/"));
|
||||
assert!(!is_valid_object_name("path/to/file/"));
|
||||
assert!(!is_valid_object_name("ends/with/slash/"));
|
||||
|
||||
// Invalid cases - bad path components (inherited from is_valid_object_prefix)
|
||||
assert!(!is_valid_object_name("."));
|
||||
assert!(!is_valid_object_name(".."));
|
||||
assert!(!is_valid_object_name("object/.."));
|
||||
assert!(!is_valid_object_name("object/."));
|
||||
assert!(!is_valid_object_name("../object"));
|
||||
assert!(!is_valid_object_name("./object"));
|
||||
assert!(!is_valid_object_name("path/../other"));
|
||||
assert!(!is_valid_object_name("path/./other"));
|
||||
assert!(!is_valid_object_name("a/../b/../c"));
|
||||
assert!(!is_valid_object_name("a/./b/./c"));
|
||||
|
||||
// Invalid cases - double slashes
|
||||
assert!(!is_valid_object_name("object//with//double//slashes"));
|
||||
assert!(!is_valid_object_name("//leading/double/slash"));
|
||||
assert!(!is_valid_object_name("trailing/double/slash//"));
|
||||
|
||||
// Invalid cases - null characters
|
||||
assert!(!is_valid_object_name("object\x00with\x00null"));
|
||||
assert!(!is_valid_object_name("object\x00"));
|
||||
assert!(!is_valid_object_name("\x00object"));
|
||||
|
||||
// Invalid cases - overly long path (>32KB)
|
||||
let long_path = "a/".repeat(16385); // 16385 * 2 = 32770 bytes, over 32KB (32768)
|
||||
assert!(!is_valid_object_name(&long_path));
|
||||
|
||||
// Valid cases - prefixes that are valid for object names too
|
||||
assert!(is_valid_object_name("prefix"));
|
||||
assert!(is_valid_object_name("deep/nested/object"));
|
||||
assert!(is_valid_object_name("normal_object"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_valid_object_prefix() {
|
||||
// Valid cases
|
||||
assert!(is_valid_object_prefix("valid-prefix"));
|
||||
assert!(is_valid_object_prefix(""));
|
||||
assert!(is_valid_object_prefix("prefix/with/slashes"));
|
||||
assert!(is_valid_object_prefix("prefix/"));
|
||||
assert!(is_valid_object_prefix("deep/nested/prefix/"));
|
||||
assert!(is_valid_object_prefix("normal-prefix"));
|
||||
assert!(is_valid_object_prefix("prefix_with_underscores"));
|
||||
assert!(is_valid_object_prefix("prefix.with.dots"));
|
||||
|
||||
// Invalid cases - bad path components
|
||||
assert!(!is_valid_object_prefix("."));
|
||||
assert!(!is_valid_object_prefix(".."));
|
||||
assert!(!is_valid_object_prefix("prefix/.."));
|
||||
assert!(!is_valid_object_prefix("prefix/."));
|
||||
assert!(!is_valid_object_prefix("../prefix"));
|
||||
assert!(!is_valid_object_prefix("./prefix"));
|
||||
assert!(!is_valid_object_prefix("prefix/../other"));
|
||||
assert!(!is_valid_object_prefix("prefix/./other"));
|
||||
assert!(!is_valid_object_prefix("a/../b/../c"));
|
||||
assert!(!is_valid_object_prefix("a/./b/./c"));
|
||||
|
||||
// Invalid cases - double slashes
|
||||
assert!(!is_valid_object_prefix("prefix//with//double//slashes"));
|
||||
assert!(!is_valid_object_prefix("//leading/double/slash"));
|
||||
assert!(!is_valid_object_prefix("trailing/double/slash//"));
|
||||
|
||||
// Invalid cases - null characters
|
||||
assert!(!is_valid_object_prefix("prefix\x00with\x00null"));
|
||||
assert!(!is_valid_object_prefix("prefix\x00"));
|
||||
assert!(!is_valid_object_prefix("\x00prefix"));
|
||||
|
||||
// Invalid cases - overly long path (>32KB)
|
||||
let long_path = "a/".repeat(16385); // 16385 * 2 = 32770 bytes, over 32KB (32768)
|
||||
assert!(!is_valid_object_prefix(&long_path));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_bucket_and_object_names() {
|
||||
// Valid names
|
||||
assert!(check_bucket_and_object_names("valid-bucket", "valid-object").is_ok());
|
||||
|
||||
// Invalid bucket names
|
||||
assert!(check_bucket_and_object_names("", "valid-object").is_err());
|
||||
assert!(check_bucket_and_object_names("INVALID", "valid-object").is_err());
|
||||
|
||||
// Invalid object names
|
||||
assert!(check_bucket_and_object_names("valid-bucket", "").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_list_objs_args() {
|
||||
assert!(check_list_objs_args("valid-bucket", "", &None).is_ok());
|
||||
assert!(check_list_objs_args("", "", &None).is_err());
|
||||
assert!(check_list_objs_args("INVALID", "", &None).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_multipart_args() {
|
||||
assert!(check_new_multipart_args("valid-bucket", "valid-object").is_ok());
|
||||
assert!(check_new_multipart_args("", "valid-object").is_err());
|
||||
assert!(check_new_multipart_args("valid-bucket", "").is_err());
|
||||
|
||||
// Use valid base64 encoded upload_id
|
||||
let valid_upload_id = "dXBsb2FkLWlk"; // base64 encoded "upload-id"
|
||||
assert!(check_multipart_object_args("valid-bucket", "valid-object", valid_upload_id).is_ok());
|
||||
assert!(check_multipart_object_args("", "valid-object", valid_upload_id).is_err());
|
||||
assert!(check_multipart_object_args("valid-bucket", "", valid_upload_id).is_err());
|
||||
// Empty string is valid base64 (decodes to empty vec), so this should pass bucket/object validation
|
||||
// but fail on empty upload_id check in the function logic
|
||||
assert!(check_multipart_object_args("valid-bucket", "valid-object", "").is_ok());
|
||||
assert!(check_multipart_object_args("valid-bucket", "valid-object", "invalid-base64!").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_validation_functions_comprehensive() {
|
||||
// Test object name validation edge cases
|
||||
assert!(!is_valid_object_name(""));
|
||||
assert!(is_valid_object_name("a"));
|
||||
assert!(is_valid_object_name("test.txt"));
|
||||
assert!(is_valid_object_name("folder/file.txt"));
|
||||
assert!(is_valid_object_name("very-long-object-name-with-many-characters"));
|
||||
|
||||
// Test prefix validation
|
||||
assert!(is_valid_object_prefix(""));
|
||||
assert!(is_valid_object_prefix("prefix"));
|
||||
assert!(is_valid_object_prefix("prefix/"));
|
||||
assert!(is_valid_object_prefix("deep/nested/prefix/"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_argument_validation_comprehensive() {
|
||||
// Test bucket and object name validation
|
||||
assert!(check_bucket_and_object_names("test-bucket", "test-object").is_ok());
|
||||
assert!(check_bucket_and_object_names("test-bucket", "folder/test-object").is_ok());
|
||||
|
||||
// Test list objects arguments
|
||||
assert!(check_list_objs_args("test-bucket", "prefix", &Some("marker".to_string())).is_ok());
|
||||
assert!(check_list_objs_args("test-bucket", "", &None).is_ok());
|
||||
|
||||
// Test multipart upload arguments with valid base64 upload_id
|
||||
let valid_upload_id = "dXBsb2FkLWlk"; // base64 encoded "upload-id"
|
||||
assert!(check_put_object_part_args("test-bucket", "test-object", valid_upload_id).is_ok());
|
||||
assert!(check_list_parts_args("test-bucket", "test-object", valid_upload_id).is_ok());
|
||||
assert!(check_complete_multipart_args("test-bucket", "test-object", valid_upload_id).is_ok());
|
||||
assert!(check_abort_multipart_args("test-bucket", "test-object", valid_upload_id).is_ok());
|
||||
|
||||
// Test put object arguments
|
||||
assert!(check_put_object_args("test-bucket", "test-object").is_ok());
|
||||
assert!(check_put_object_args("", "test-object").is_err());
|
||||
assert!(check_put_object_args("test-bucket", "").is_err());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -37,10 +37,11 @@ impl VersioningApi for VersioningConfiguration {
|
||||
return true;
|
||||
}
|
||||
|
||||
if let Some(exclude_folders) = self.exclude_folders {
|
||||
if exclude_folders && prefix.ends_with('/') {
|
||||
return false;
|
||||
}
|
||||
if let Some(exclude_folders) = self.exclude_folders
|
||||
&& exclude_folders
|
||||
&& prefix.ends_with('/')
|
||||
{
|
||||
return false;
|
||||
}
|
||||
|
||||
if let Some(ref excluded_prefixes) = self.excluded_prefixes {
|
||||
@@ -67,10 +68,11 @@ impl VersioningApi for VersioningConfiguration {
|
||||
return false;
|
||||
}
|
||||
|
||||
if let Some(exclude_folders) = self.exclude_folders {
|
||||
if exclude_folders && prefix.ends_with('/') {
|
||||
return true;
|
||||
}
|
||||
if let Some(exclude_folders) = self.exclude_folders
|
||||
&& exclude_folders
|
||||
&& prefix.ends_with('/')
|
||||
{
|
||||
return true;
|
||||
}
|
||||
|
||||
if let Some(ref excluded_prefixes) = self.excluded_prefixes {
|
||||
|
||||
@@ -308,12 +308,10 @@ pub async fn list_path_raw(rx: CancellationToken, opts: ListPathRawOptions) -> d
|
||||
|
||||
// Break if all at EOF or error.
|
||||
if at_eof + has_err == readers.len() {
|
||||
if has_err > 0 {
|
||||
if let Some(finished_fn) = opts.finished.as_ref() {
|
||||
if has_err > 0 {
|
||||
finished_fn(&errs).await;
|
||||
}
|
||||
}
|
||||
if has_err > 0
|
||||
&& let Some(finished_fn) = opts.finished.as_ref()
|
||||
{
|
||||
finished_fn(&errs).await;
|
||||
}
|
||||
|
||||
// error!("list_path_raw: at_eof + has_err == readers.len() break {:?}", &errs);
|
||||
|
||||
@@ -41,7 +41,7 @@ use crate::{
|
||||
use rustfs_utils::hash::EMPTY_STRING_SHA256_HASH;
|
||||
|
||||
pub struct RemoveBucketOptions {
|
||||
_forced_elete: bool,
|
||||
_forced_delete: bool,
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
|
||||
@@ -161,7 +161,7 @@ impl TransitionClient {
|
||||
async fn private_new(endpoint: &str, opts: Options, tier_type: &str) -> Result<TransitionClient, std::io::Error> {
|
||||
let endpoint_url = get_endpoint_url(endpoint, opts.secure)?;
|
||||
|
||||
let _ = rustls::crypto::ring::default_provider().install_default();
|
||||
let _ = rustls::crypto::aws_lc_rs::default_provider().install_default();
|
||||
let scheme = endpoint_url.scheme();
|
||||
let client;
|
||||
let tls = if let Some(store) = load_root_store_from_tls_path() {
|
||||
|
||||
@@ -211,10 +211,11 @@ async fn apply_dynamic_config_for_sub_sys<S: StorageAPI>(cfg: &mut Config, api:
|
||||
for (i, count) in set_drive_counts.iter().enumerate() {
|
||||
match storageclass::lookup_config(&kvs, *count) {
|
||||
Ok(res) => {
|
||||
if i == 0 && GLOBAL_STORAGE_CLASS.get().is_none() {
|
||||
if let Err(r) = GLOBAL_STORAGE_CLASS.set(res) {
|
||||
error!("GLOBAL_STORAGE_CLASS.set failed {:?}", r);
|
||||
}
|
||||
if i == 0
|
||||
&& GLOBAL_STORAGE_CLASS.get().is_none()
|
||||
&& let Err(r) = GLOBAL_STORAGE_CLASS.set(res)
|
||||
{
|
||||
error!("GLOBAL_STORAGE_CLASS.set failed {:?}", r);
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
|
||||
@@ -180,10 +180,10 @@ impl Config {
|
||||
let mut default = HashMap::new();
|
||||
default.insert(DEFAULT_DELIMITER.to_owned(), v.clone());
|
||||
self.0.insert(k.clone(), default);
|
||||
} else if !self.0[k].contains_key(DEFAULT_DELIMITER) {
|
||||
if let Some(m) = self.0.get_mut(k) {
|
||||
m.insert(DEFAULT_DELIMITER.to_owned(), v.clone());
|
||||
}
|
||||
} else if !self.0[k].contains_key(DEFAULT_DELIMITER)
|
||||
&& let Some(m) = self.0.get_mut(k)
|
||||
{
|
||||
m.insert(DEFAULT_DELIMITER.to_owned(), v.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -65,18 +65,16 @@ lazy_static::lazy_static! {
|
||||
/// Store data usage info to backend storage
|
||||
pub async fn store_data_usage_in_backend(data_usage_info: DataUsageInfo, store: Arc<ECStore>) -> Result<(), Error> {
|
||||
// Prevent older data from overwriting newer persisted stats
|
||||
if let Ok(buf) = read_config(store.clone(), &DATA_USAGE_OBJ_NAME_PATH).await {
|
||||
if let Ok(existing) = serde_json::from_slice::<DataUsageInfo>(&buf) {
|
||||
if let (Some(new_ts), Some(existing_ts)) = (data_usage_info.last_update, existing.last_update) {
|
||||
if new_ts <= existing_ts {
|
||||
info!(
|
||||
"Skip persisting data usage: incoming last_update {:?} <= existing {:?}",
|
||||
new_ts, existing_ts
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
}
|
||||
if let Ok(buf) = read_config(store.clone(), &DATA_USAGE_OBJ_NAME_PATH).await
|
||||
&& let Ok(existing) = serde_json::from_slice::<DataUsageInfo>(&buf)
|
||||
&& let (Some(new_ts), Some(existing_ts)) = (data_usage_info.last_update, existing.last_update)
|
||||
&& new_ts <= existing_ts
|
||||
{
|
||||
info!(
|
||||
"Skip persisting data usage: incoming last_update {:?} <= existing {:?}",
|
||||
new_ts, existing_ts
|
||||
);
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let data =
|
||||
@@ -149,26 +147,24 @@ pub async fn load_data_usage_from_backend(store: Arc<ECStore>) -> Result<DataUsa
|
||||
|
||||
// Handle replication info
|
||||
for (bucket, bui) in &data_usage_info.buckets_usage {
|
||||
if bui.replicated_size_v1 > 0
|
||||
if (bui.replicated_size_v1 > 0
|
||||
|| bui.replication_failed_count_v1 > 0
|
||||
|| bui.replication_failed_size_v1 > 0
|
||||
|| bui.replication_pending_count_v1 > 0
|
||||
|| bui.replication_pending_count_v1 > 0)
|
||||
&& let Ok((cfg, _)) = get_replication_config(bucket).await
|
||||
&& !cfg.role.is_empty()
|
||||
{
|
||||
if let Ok((cfg, _)) = get_replication_config(bucket).await {
|
||||
if !cfg.role.is_empty() {
|
||||
data_usage_info.replication_info.insert(
|
||||
cfg.role.clone(),
|
||||
BucketTargetUsageInfo {
|
||||
replication_failed_size: bui.replication_failed_size_v1,
|
||||
replication_failed_count: bui.replication_failed_count_v1,
|
||||
replicated_size: bui.replicated_size_v1,
|
||||
replication_pending_count: bui.replication_pending_count_v1,
|
||||
replication_pending_size: bui.replication_pending_size_v1,
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
data_usage_info.replication_info.insert(
|
||||
cfg.role.clone(),
|
||||
BucketTargetUsageInfo {
|
||||
replication_failed_size: bui.replication_failed_size_v1,
|
||||
replication_failed_count: bui.replication_failed_count_v1,
|
||||
replicated_size: bui.replicated_size_v1,
|
||||
replication_pending_count: bui.replication_pending_count_v1,
|
||||
replication_pending_size: bui.replication_pending_size_v1,
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -177,10 +173,10 @@ pub async fn load_data_usage_from_backend(store: Arc<ECStore>) -> Result<DataUsa
|
||||
|
||||
/// Aggregate usage information from local disk snapshots.
|
||||
fn merge_snapshot(aggregated: &mut DataUsageInfo, mut snapshot: LocalUsageSnapshot, latest_update: &mut Option<SystemTime>) {
|
||||
if let Some(update) = snapshot.last_update {
|
||||
if latest_update.is_none_or(|current| update > current) {
|
||||
*latest_update = Some(update);
|
||||
}
|
||||
if let Some(update) = snapshot.last_update
|
||||
&& latest_update.is_none_or(|current| update > current)
|
||||
{
|
||||
*latest_update = Some(update);
|
||||
}
|
||||
|
||||
snapshot.recompute_totals();
|
||||
@@ -255,10 +251,10 @@ pub async fn aggregate_local_snapshots(store: Arc<ECStore>) -> Result<(Vec<DiskU
|
||||
);
|
||||
// Best-effort cleanup so next scan can rebuild a fresh snapshot instead of repeatedly failing
|
||||
let snapshot_file = snapshot_path(root.as_path(), &disk_id);
|
||||
if let Err(remove_err) = fs::remove_file(&snapshot_file).await {
|
||||
if remove_err.kind() != std::io::ErrorKind::NotFound {
|
||||
warn!("Failed to remove corrupted snapshot {:?}: {}", snapshot_file, remove_err);
|
||||
}
|
||||
if let Err(remove_err) = fs::remove_file(&snapshot_file).await
|
||||
&& remove_err.kind() != std::io::ErrorKind::NotFound
|
||||
{
|
||||
warn!("Failed to remove corrupted snapshot {:?}: {}", snapshot_file, remove_err);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -30,7 +30,7 @@ use std::{
|
||||
};
|
||||
use tokio::{sync::RwLock, time};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::{debug, info, warn};
|
||||
use tracing::{info, warn};
|
||||
use uuid::Uuid;
|
||||
|
||||
/// Disk health status constants
|
||||
@@ -44,7 +44,6 @@ pub const SKIP_IF_SUCCESS_BEFORE: Duration = Duration::from_secs(5);
|
||||
pub const CHECK_TIMEOUT_DURATION: Duration = Duration::from_secs(5);
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
static ref TEST_OBJ: String = format!("health-check-{}", Uuid::new_v4());
|
||||
static ref TEST_DATA: Bytes = Bytes::from(vec![42u8; 2048]);
|
||||
static ref TEST_BUCKET: String = ".rustfs.sys/tmp".to_string();
|
||||
}
|
||||
@@ -96,22 +95,22 @@ impl DiskHealthTracker {
|
||||
|
||||
/// Check if disk is faulty
|
||||
pub fn is_faulty(&self) -> bool {
|
||||
self.status.load(Ordering::Relaxed) == DISK_HEALTH_FAULTY
|
||||
self.status.load(Ordering::Acquire) == DISK_HEALTH_FAULTY
|
||||
}
|
||||
|
||||
/// Set disk as faulty
|
||||
pub fn set_faulty(&self) {
|
||||
self.status.store(DISK_HEALTH_FAULTY, Ordering::Relaxed);
|
||||
self.status.store(DISK_HEALTH_FAULTY, Ordering::Release);
|
||||
}
|
||||
|
||||
/// Set disk as OK
|
||||
pub fn set_ok(&self) {
|
||||
self.status.store(DISK_HEALTH_OK, Ordering::Relaxed);
|
||||
self.status.store(DISK_HEALTH_OK, Ordering::Release);
|
||||
}
|
||||
|
||||
pub fn swap_ok_to_faulty(&self) -> bool {
|
||||
self.status
|
||||
.compare_exchange(DISK_HEALTH_OK, DISK_HEALTH_FAULTY, Ordering::Relaxed, Ordering::Relaxed)
|
||||
.compare_exchange(DISK_HEALTH_OK, DISK_HEALTH_FAULTY, Ordering::AcqRel, Ordering::Relaxed)
|
||||
.is_ok()
|
||||
}
|
||||
|
||||
@@ -132,7 +131,7 @@ impl DiskHealthTracker {
|
||||
|
||||
/// Get last success timestamp
|
||||
pub fn last_success(&self) -> i64 {
|
||||
self.last_success.load(Ordering::Relaxed)
|
||||
self.last_success.load(Ordering::Acquire)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -256,8 +255,9 @@ impl LocalDiskWrapper {
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
|
||||
|
||||
debug!("health check: performing health check");
|
||||
if Self::perform_health_check(disk.clone(), &TEST_BUCKET, &TEST_OBJ, &TEST_DATA, true, CHECK_TIMEOUT_DURATION).await.is_err() && health.swap_ok_to_faulty() {
|
||||
|
||||
let test_obj = format!("health-check-{}", Uuid::new_v4());
|
||||
if Self::perform_health_check(disk.clone(), &TEST_BUCKET, &test_obj, &TEST_DATA, true, CHECK_TIMEOUT_DURATION).await.is_err() && health.swap_ok_to_faulty() {
|
||||
// Health check failed, disk is considered faulty
|
||||
|
||||
health.increment_waiting(); // Balance the increment from failed operation
|
||||
@@ -326,7 +326,7 @@ impl LocalDiskWrapper {
|
||||
Ok(result) => match result {
|
||||
Ok(()) => Ok(()),
|
||||
Err(e) => {
|
||||
debug!("health check: failed: {:?}", e);
|
||||
warn!("health check: failed: {:?}", e);
|
||||
|
||||
if e == DiskError::FaultyDisk {
|
||||
return Err(e);
|
||||
@@ -359,7 +359,8 @@ impl LocalDiskWrapper {
|
||||
return;
|
||||
}
|
||||
|
||||
match Self::perform_health_check(disk.clone(), &TEST_BUCKET, &TEST_OBJ, &TEST_DATA, false, CHECK_TIMEOUT_DURATION).await {
|
||||
let test_obj = format!("health-check-{}", Uuid::new_v4());
|
||||
match Self::perform_health_check(disk.clone(), &TEST_BUCKET, &test_obj, &TEST_DATA, false, CHECK_TIMEOUT_DURATION).await {
|
||||
Ok(_) => {
|
||||
info!("Disk {} is back online", disk.to_string());
|
||||
health.set_ok();
|
||||
@@ -383,7 +384,7 @@ impl LocalDiskWrapper {
|
||||
let stored_disk_id = self.disk.get_disk_id().await?;
|
||||
|
||||
if stored_disk_id != want_id {
|
||||
return Err(Error::other(format!("Disk ID mismatch wanted {:?}, got {:?}", want_id, stored_disk_id)));
|
||||
return Err(Error::other(format!("Disk ID mismatch wanted {want_id:?}, got {stored_disk_id:?}")));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -467,7 +468,7 @@ impl LocalDiskWrapper {
|
||||
// Timeout occurred, mark disk as potentially faulty and decrement waiting counter
|
||||
self.health.decrement_waiting();
|
||||
warn!("disk operation timeout after {:?}", timeout_duration);
|
||||
Err(DiskError::other(format!("disk operation timeout after {:?}", timeout_duration)))
|
||||
Err(DiskError::other(format!("disk operation timeout after {timeout_duration:?}")))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -484,11 +485,15 @@ impl DiskAPI for LocalDiskWrapper {
|
||||
return false;
|
||||
};
|
||||
|
||||
let Some(current_disk_id) = *self.disk_id.read().await else {
|
||||
return false;
|
||||
};
|
||||
// if disk_id is not set use the current disk_id
|
||||
if let Some(current_disk_id) = *self.disk_id.read().await {
|
||||
return current_disk_id == disk_id;
|
||||
} else {
|
||||
// if disk_id is not set, update the disk_id
|
||||
let _ = self.set_disk_id_internal(Some(disk_id)).await;
|
||||
}
|
||||
|
||||
current_disk_id == disk_id
|
||||
return true;
|
||||
}
|
||||
|
||||
fn is_local(&self) -> bool {
|
||||
|
||||
@@ -145,6 +145,9 @@ pub enum DiskError {
|
||||
|
||||
#[error("timeout")]
|
||||
Timeout,
|
||||
|
||||
#[error("invalid path")]
|
||||
InvalidPath,
|
||||
}
|
||||
|
||||
impl DiskError {
|
||||
@@ -373,6 +376,7 @@ impl Clone for DiskError {
|
||||
DiskError::ShortWrite => DiskError::ShortWrite,
|
||||
DiskError::SourceStalled => DiskError::SourceStalled,
|
||||
DiskError::Timeout => DiskError::Timeout,
|
||||
DiskError::InvalidPath => DiskError::InvalidPath,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -421,6 +425,7 @@ impl DiskError {
|
||||
DiskError::ShortWrite => 0x27,
|
||||
DiskError::SourceStalled => 0x28,
|
||||
DiskError::Timeout => 0x29,
|
||||
DiskError::InvalidPath => 0x2A,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -467,6 +472,7 @@ impl DiskError {
|
||||
0x27 => Some(DiskError::ShortWrite),
|
||||
0x28 => Some(DiskError::SourceStalled),
|
||||
0x29 => Some(DiskError::Timeout),
|
||||
0x2A => Some(DiskError::InvalidPath),
|
||||
_ => None,
|
||||
}
|
||||
}
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -147,11 +147,11 @@ async fn reliable_rename(
|
||||
dst_file_path: impl AsRef<Path>,
|
||||
base_dir: impl AsRef<Path>,
|
||||
) -> io::Result<()> {
|
||||
if let Some(parent) = dst_file_path.as_ref().parent() {
|
||||
if !file_exists(parent) {
|
||||
// info!("reliable_rename reliable_mkdir_all parent: {:?}", parent);
|
||||
reliable_mkdir_all(parent, base_dir.as_ref()).await?;
|
||||
}
|
||||
if let Some(parent) = dst_file_path.as_ref().parent()
|
||||
&& !file_exists(parent)
|
||||
{
|
||||
// info!("reliable_rename reliable_mkdir_all parent: {:?}", parent);
|
||||
reliable_mkdir_all(parent, base_dir.as_ref()).await?;
|
||||
}
|
||||
|
||||
let mut i = 0;
|
||||
@@ -190,12 +190,11 @@ pub async fn reliable_mkdir_all(path: impl AsRef<Path>, base_dir: impl AsRef<Pat
|
||||
if e.kind() == io::ErrorKind::NotFound && i == 0 {
|
||||
i += 1;
|
||||
|
||||
if let Some(base_parent) = base_dir.parent() {
|
||||
if let Some(c) = base_parent.components().next() {
|
||||
if c != Component::RootDir {
|
||||
base_dir = base_parent
|
||||
}
|
||||
}
|
||||
if let Some(base_parent) = base_dir.parent()
|
||||
&& let Some(c) = base_parent.components().next()
|
||||
&& c != Component::RootDir
|
||||
{
|
||||
base_dir = base_parent
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
@@ -318,7 +318,7 @@ fn get_divisible_size(total_sizes: &[usize]) -> usize {
|
||||
fn possible_set_counts(set_size: usize) -> Vec<usize> {
|
||||
let mut ss = Vec::new();
|
||||
for s in SET_SIZES {
|
||||
if set_size % s == 0 {
|
||||
if set_size.is_multiple_of(s) {
|
||||
ss.push(s);
|
||||
}
|
||||
}
|
||||
@@ -340,7 +340,7 @@ fn common_set_drive_count(divisible_size: usize, set_counts: &[usize]) -> usize
|
||||
let mut prev_d = divisible_size / set_counts[0];
|
||||
let mut set_size = 0;
|
||||
for &cnt in set_counts {
|
||||
if divisible_size % cnt == 0 {
|
||||
if divisible_size.is_multiple_of(cnt) {
|
||||
let d = divisible_size / cnt;
|
||||
if d <= prev_d {
|
||||
prev_d = d;
|
||||
|
||||
@@ -266,12 +266,11 @@ impl Erasure {
|
||||
|
||||
let (mut shards, errs) = reader.read().await;
|
||||
|
||||
if ret_err.is_none() {
|
||||
if let (_, Some(err)) = reduce_errs(&errs, &[]) {
|
||||
if err == Error::FileNotFound || err == Error::FileCorrupt {
|
||||
ret_err = Some(err.into());
|
||||
}
|
||||
}
|
||||
if ret_err.is_none()
|
||||
&& let (_, Some(err)) = reduce_errs(&errs, &[])
|
||||
&& (err == Error::FileNotFound || err == Error::FileCorrupt)
|
||||
{
|
||||
ret_err = Some(err.into());
|
||||
}
|
||||
|
||||
if !reader.can_decode(&shards) {
|
||||
|
||||
@@ -150,10 +150,10 @@ impl Erasure {
|
||||
}
|
||||
Err(e) if e.kind() == std::io::ErrorKind::UnexpectedEof => {
|
||||
// Check if the inner error is a checksum mismatch - if so, propagate it
|
||||
if let Some(inner) = e.get_ref() {
|
||||
if rustfs_rio::is_checksum_mismatch(inner) {
|
||||
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, e.to_string()));
|
||||
}
|
||||
if let Some(inner) = e.get_ref()
|
||||
&& rustfs_rio::is_checksum_mismatch(inner)
|
||||
{
|
||||
return Err(std::io::Error::new(std::io::ErrorKind::InvalidData, e.to_string()));
|
||||
}
|
||||
break;
|
||||
}
|
||||
|
||||
@@ -45,7 +45,7 @@ impl super::Erasure {
|
||||
|
||||
let start_block = 0;
|
||||
let mut end_block = total_length / self.block_size;
|
||||
if total_length % self.block_size != 0 {
|
||||
if !total_length.is_multiple_of(self.block_size) {
|
||||
end_block += 1;
|
||||
}
|
||||
|
||||
|
||||
@@ -112,7 +112,10 @@ pub async fn collect_local_metrics(types: MetricType, opts: &CollectMetricsOpts)
|
||||
|
||||
if types.contains(&MetricType::SCANNER) {
|
||||
debug!("start get scanner metrics");
|
||||
let metrics = global_metrics().report().await;
|
||||
let mut metrics = global_metrics().report().await;
|
||||
if let Some(init_time) = rustfs_common::get_global_init_time().await {
|
||||
metrics.current_started = init_time;
|
||||
}
|
||||
real_time_metrics.aggregated.scanner = Some(metrics);
|
||||
}
|
||||
|
||||
|
||||
@@ -244,10 +244,12 @@ impl PoolMeta {
|
||||
}
|
||||
pub fn decommission(&mut self, idx: usize, pi: PoolSpaceInfo) -> Result<()> {
|
||||
if let Some(pool) = self.pools.get_mut(idx) {
|
||||
if let Some(ref info) = pool.decommission {
|
||||
if !info.complete && !info.failed && !info.canceled {
|
||||
return Err(StorageError::DecommissionAlreadyRunning);
|
||||
}
|
||||
if let Some(ref info) = pool.decommission
|
||||
&& !info.complete
|
||||
&& !info.failed
|
||||
&& !info.canceled
|
||||
{
|
||||
return Err(StorageError::DecommissionAlreadyRunning);
|
||||
}
|
||||
|
||||
let now = OffsetDateTime::now_utc();
|
||||
@@ -273,12 +275,12 @@ impl PoolMeta {
|
||||
pub fn pending_buckets(&self, idx: usize) -> Vec<DecomBucketInfo> {
|
||||
let mut list = Vec::new();
|
||||
|
||||
if let Some(pool) = self.pools.get(idx) {
|
||||
if let Some(ref info) = pool.decommission {
|
||||
for bk in info.queued_buckets.iter() {
|
||||
let (name, prefix) = path2_bucket_object(bk);
|
||||
list.push(DecomBucketInfo { name, prefix });
|
||||
}
|
||||
if let Some(pool) = self.pools.get(idx)
|
||||
&& let Some(ref info) = pool.decommission
|
||||
{
|
||||
for bk in info.queued_buckets.iter() {
|
||||
let (name, prefix) = path2_bucket_object(bk);
|
||||
list.push(DecomBucketInfo { name, prefix });
|
||||
}
|
||||
}
|
||||
|
||||
@@ -306,15 +308,15 @@ impl PoolMeta {
|
||||
}
|
||||
|
||||
pub fn count_item(&mut self, idx: usize, size: usize, failed: bool) {
|
||||
if let Some(pool) = self.pools.get_mut(idx) {
|
||||
if let Some(info) = pool.decommission.as_mut() {
|
||||
if failed {
|
||||
info.items_decommission_failed += 1;
|
||||
info.bytes_failed += size;
|
||||
} else {
|
||||
info.items_decommissioned += 1;
|
||||
info.bytes_done += size;
|
||||
}
|
||||
if let Some(pool) = self.pools.get_mut(idx)
|
||||
&& let Some(info) = pool.decommission.as_mut()
|
||||
{
|
||||
if failed {
|
||||
info.items_decommission_failed += 1;
|
||||
info.bytes_failed += size;
|
||||
} else {
|
||||
info.items_decommissioned += 1;
|
||||
info.bytes_done += size;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -324,11 +326,11 @@ impl PoolMeta {
|
||||
return;
|
||||
}
|
||||
|
||||
if let Some(pool) = self.pools.get_mut(idx) {
|
||||
if let Some(info) = pool.decommission.as_mut() {
|
||||
info.object = object;
|
||||
info.bucket = bucket;
|
||||
}
|
||||
if let Some(pool) = self.pools.get_mut(idx)
|
||||
&& let Some(info) = pool.decommission.as_mut()
|
||||
{
|
||||
info.object = object;
|
||||
info.bucket = bucket;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -407,10 +409,10 @@ impl PoolMeta {
|
||||
|
||||
if specified_pools.len() == remembered_pools.len() {
|
||||
for (k, pi) in remembered_pools.iter() {
|
||||
if let Some(pos) = specified_pools.get(k) {
|
||||
if *pos != pi.position {
|
||||
update = true; // Pool order changed, allow the update.
|
||||
}
|
||||
if let Some(pos) = specified_pools.get(k)
|
||||
&& *pos != pi.position
|
||||
{
|
||||
update = true; // Pool order changed, allow the update.
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -640,10 +642,12 @@ impl ECStore {
|
||||
pub async fn is_decommission_running(&self) -> bool {
|
||||
let pool_meta = self.pool_meta.read().await;
|
||||
for pool in pool_meta.pools.iter() {
|
||||
if let Some(ref info) = pool.decommission {
|
||||
if !info.complete && !info.failed && !info.canceled {
|
||||
return true;
|
||||
}
|
||||
if let Some(ref info) = pool.decommission
|
||||
&& !info.complete
|
||||
&& !info.failed
|
||||
&& !info.canceled
|
||||
{
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -850,8 +854,8 @@ impl ECStore {
|
||||
decommissioned += 1;
|
||||
}
|
||||
|
||||
if decommissioned == fivs.versions.len() {
|
||||
if let Err(err) = set
|
||||
if decommissioned == fivs.versions.len()
|
||||
&& let Err(err) = set
|
||||
.delete_object(
|
||||
bucket.as_str(),
|
||||
&encode_dir_object(&entry.name),
|
||||
@@ -863,9 +867,8 @@ impl ECStore {
|
||||
},
|
||||
)
|
||||
.await
|
||||
{
|
||||
error!("decommission_pool: delete_object err {:?}", &err);
|
||||
}
|
||||
{
|
||||
error!("decommission_pool: delete_object err {:?}", &err);
|
||||
}
|
||||
|
||||
{
|
||||
@@ -879,10 +882,8 @@ impl ECStore {
|
||||
.unwrap_or_default();
|
||||
|
||||
drop(pool_meta);
|
||||
if ok {
|
||||
if let Some(notification_sys) = get_global_notification_sys() {
|
||||
notification_sys.reload_pool_meta().await;
|
||||
}
|
||||
if ok && let Some(notification_sys) = get_global_notification_sys() {
|
||||
notification_sys.reload_pool_meta().await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1080,10 +1081,10 @@ impl ECStore {
|
||||
|
||||
{
|
||||
let mut pool_meta = self.pool_meta.write().await;
|
||||
if pool_meta.bucket_done(idx, bucket.to_string()) {
|
||||
if let Err(err) = pool_meta.save(self.pools.clone()).await {
|
||||
error!("decom pool_meta.save err {:?}", err);
|
||||
}
|
||||
if pool_meta.bucket_done(idx, bucket.to_string())
|
||||
&& let Err(err) = pool_meta.save(self.pools.clone()).await
|
||||
{
|
||||
error!("decom pool_meta.save err {:?}", err);
|
||||
}
|
||||
}
|
||||
continue;
|
||||
@@ -1100,10 +1101,10 @@ impl ECStore {
|
||||
|
||||
{
|
||||
let mut pool_meta = self.pool_meta.write().await;
|
||||
if pool_meta.bucket_done(idx, bucket.to_string()) {
|
||||
if let Err(err) = pool_meta.save(self.pools.clone()).await {
|
||||
error!("decom pool_meta.save err {:?}", err);
|
||||
}
|
||||
if pool_meta.bucket_done(idx, bucket.to_string())
|
||||
&& let Err(err) = pool_meta.save(self.pools.clone()).await
|
||||
{
|
||||
error!("decom pool_meta.save err {:?}", err);
|
||||
}
|
||||
|
||||
warn!("decommission: decommission_pool bucket_done {}", &bucket.name);
|
||||
@@ -1138,11 +1139,10 @@ impl ECStore {
|
||||
if let Err(err) = self
|
||||
.make_bucket(bk.to_string_lossy().to_string().as_str(), &MakeBucketOptions::default())
|
||||
.await
|
||||
&& !is_err_bucket_exists(&err)
|
||||
{
|
||||
if !is_err_bucket_exists(&err) {
|
||||
error!("decommission: make bucket failed: {err}");
|
||||
return Err(err);
|
||||
}
|
||||
error!("decommission: make bucket failed: {err}");
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -380,10 +380,10 @@ impl ECStore {
|
||||
#[tracing::instrument(skip(self, fi))]
|
||||
pub async fn update_pool_stats(&self, pool_index: usize, bucket: String, fi: &FileInfo) -> Result<()> {
|
||||
let mut rebalance_meta = self.rebalance_meta.write().await;
|
||||
if let Some(meta) = rebalance_meta.as_mut() {
|
||||
if let Some(pool_stat) = meta.pool_stats.get_mut(pool_index) {
|
||||
pool_stat.update(bucket, fi);
|
||||
}
|
||||
if let Some(meta) = rebalance_meta.as_mut()
|
||||
&& let Some(pool_stat) = meta.pool_stats.get_mut(pool_index)
|
||||
{
|
||||
pool_stat.update(bucket, fi);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -394,20 +394,20 @@ impl ECStore {
|
||||
info!("next_rebal_bucket: pool_index: {}", pool_index);
|
||||
let rebalance_meta = self.rebalance_meta.read().await;
|
||||
info!("next_rebal_bucket: rebalance_meta: {:?}", rebalance_meta);
|
||||
if let Some(meta) = rebalance_meta.as_ref() {
|
||||
if let Some(pool_stat) = meta.pool_stats.get(pool_index) {
|
||||
if pool_stat.info.status == RebalStatus::Completed || !pool_stat.participating {
|
||||
info!("next_rebal_bucket: pool_index: {} completed or not participating", pool_index);
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
if pool_stat.buckets.is_empty() {
|
||||
info!("next_rebal_bucket: pool_index: {} buckets is empty", pool_index);
|
||||
return Ok(None);
|
||||
}
|
||||
info!("next_rebal_bucket: pool_index: {} bucket: {}", pool_index, pool_stat.buckets[0]);
|
||||
return Ok(Some(pool_stat.buckets[0].clone()));
|
||||
if let Some(meta) = rebalance_meta.as_ref()
|
||||
&& let Some(pool_stat) = meta.pool_stats.get(pool_index)
|
||||
{
|
||||
if pool_stat.info.status == RebalStatus::Completed || !pool_stat.participating {
|
||||
info!("next_rebal_bucket: pool_index: {} completed or not participating", pool_index);
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
if pool_stat.buckets.is_empty() {
|
||||
info!("next_rebal_bucket: pool_index: {} buckets is empty", pool_index);
|
||||
return Ok(None);
|
||||
}
|
||||
info!("next_rebal_bucket: pool_index: {} bucket: {}", pool_index, pool_stat.buckets[0]);
|
||||
return Ok(Some(pool_stat.buckets[0].clone()));
|
||||
}
|
||||
|
||||
info!("next_rebal_bucket: pool_index: {} None", pool_index);
|
||||
@@ -417,28 +417,28 @@ impl ECStore {
|
||||
#[tracing::instrument(skip(self))]
|
||||
pub async fn bucket_rebalance_done(&self, pool_index: usize, bucket: String) -> Result<()> {
|
||||
let mut rebalance_meta = self.rebalance_meta.write().await;
|
||||
if let Some(meta) = rebalance_meta.as_mut() {
|
||||
if let Some(pool_stat) = meta.pool_stats.get_mut(pool_index) {
|
||||
info!("bucket_rebalance_done: buckets {:?}", &pool_stat.buckets);
|
||||
if let Some(meta) = rebalance_meta.as_mut()
|
||||
&& let Some(pool_stat) = meta.pool_stats.get_mut(pool_index)
|
||||
{
|
||||
info!("bucket_rebalance_done: buckets {:?}", &pool_stat.buckets);
|
||||
|
||||
// Use retain to filter out buckets slated for removal
|
||||
let mut found = false;
|
||||
pool_stat.buckets.retain(|b| {
|
||||
if b.as_str() == bucket.as_str() {
|
||||
found = true;
|
||||
pool_stat.rebalanced_buckets.push(b.clone());
|
||||
false // Remove this element
|
||||
} else {
|
||||
true // Keep this element
|
||||
}
|
||||
});
|
||||
|
||||
if found {
|
||||
info!("bucket_rebalance_done: bucket {} rebalanced", &bucket);
|
||||
return Ok(());
|
||||
// Use retain to filter out buckets slated for removal
|
||||
let mut found = false;
|
||||
pool_stat.buckets.retain(|b| {
|
||||
if b.as_str() == bucket.as_str() {
|
||||
found = true;
|
||||
pool_stat.rebalanced_buckets.push(b.clone());
|
||||
false // Remove this element
|
||||
} else {
|
||||
info!("bucket_rebalance_done: bucket {} not found", bucket);
|
||||
true // Keep this element
|
||||
}
|
||||
});
|
||||
|
||||
if found {
|
||||
info!("bucket_rebalance_done: bucket {} rebalanced", &bucket);
|
||||
return Ok(());
|
||||
} else {
|
||||
info!("bucket_rebalance_done: bucket {} not found", bucket);
|
||||
}
|
||||
}
|
||||
info!("bucket_rebalance_done: bucket {} not found", bucket);
|
||||
@@ -492,10 +492,10 @@ impl ECStore {
|
||||
#[tracing::instrument(skip(self))]
|
||||
pub async fn stop_rebalance(self: &Arc<Self>) -> Result<()> {
|
||||
let rebalance_meta = self.rebalance_meta.read().await;
|
||||
if let Some(meta) = rebalance_meta.as_ref() {
|
||||
if let Some(cancel_tx) = meta.cancel.as_ref() {
|
||||
cancel_tx.cancel();
|
||||
}
|
||||
if let Some(meta) = rebalance_meta.as_ref()
|
||||
&& let Some(cancel_tx) = meta.cancel.as_ref()
|
||||
{
|
||||
cancel_tx.cancel();
|
||||
}
|
||||
|
||||
Ok(())
|
||||
@@ -690,24 +690,24 @@ impl ECStore {
|
||||
async fn check_if_rebalance_done(&self, pool_index: usize) -> bool {
|
||||
let mut rebalance_meta = self.rebalance_meta.write().await;
|
||||
|
||||
if let Some(meta) = rebalance_meta.as_mut() {
|
||||
if let Some(pool_stat) = meta.pool_stats.get_mut(pool_index) {
|
||||
// Check if the pool's rebalance status is already completed
|
||||
if pool_stat.info.status == RebalStatus::Completed {
|
||||
info!("check_if_rebalance_done: pool {} is already completed", pool_index);
|
||||
return true;
|
||||
}
|
||||
if let Some(meta) = rebalance_meta.as_mut()
|
||||
&& let Some(pool_stat) = meta.pool_stats.get_mut(pool_index)
|
||||
{
|
||||
// Check if the pool's rebalance status is already completed
|
||||
if pool_stat.info.status == RebalStatus::Completed {
|
||||
info!("check_if_rebalance_done: pool {} is already completed", pool_index);
|
||||
return true;
|
||||
}
|
||||
|
||||
// Calculate the percentage of free space improvement
|
||||
let pfi = (pool_stat.init_free_space + pool_stat.bytes) as f64 / pool_stat.init_capacity as f64;
|
||||
// Calculate the percentage of free space improvement
|
||||
let pfi = (pool_stat.init_free_space + pool_stat.bytes) as f64 / pool_stat.init_capacity as f64;
|
||||
|
||||
// Mark pool rebalance as done if within 5% of the PercentFreeGoal
|
||||
if (pfi - meta.percent_free_goal).abs() <= 0.05 {
|
||||
pool_stat.info.status = RebalStatus::Completed;
|
||||
pool_stat.info.end_time = Some(OffsetDateTime::now_utc());
|
||||
info!("check_if_rebalance_done: pool {} is completed, pfi: {}", pool_index, pfi);
|
||||
return true;
|
||||
}
|
||||
// Mark pool rebalance as done if within 5% of the PercentFreeGoal
|
||||
if (pfi - meta.percent_free_goal).abs() <= 0.05 {
|
||||
pool_stat.info.status = RebalStatus::Completed;
|
||||
pool_stat.info.end_time = Some(OffsetDateTime::now_utc());
|
||||
info!("check_if_rebalance_done: pool {} is completed, pfi: {}", pool_index, pfi);
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1102,11 +1102,11 @@ impl ECStore {
|
||||
pub async fn save_rebalance_stats(&self, pool_idx: usize, opt: RebalSaveOpt) -> Result<()> {
|
||||
// TODO: lock
|
||||
let mut meta = RebalanceMeta::new();
|
||||
if let Err(err) = meta.load(self.pools[0].clone()).await {
|
||||
if err != Error::ConfigNotFound {
|
||||
info!("save_rebalance_stats: load err: {:?}", err);
|
||||
return Err(err);
|
||||
}
|
||||
if let Err(err) = meta.load(self.pools[0].clone()).await
|
||||
&& err != Error::ConfigNotFound
|
||||
{
|
||||
info!("save_rebalance_stats: load err: {:?}", err);
|
||||
return Err(err);
|
||||
}
|
||||
|
||||
match opt {
|
||||
|
||||
88
crates/ecstore/src/rpc/client.rs
Normal file
88
crates/ecstore/src/rpc/client.rs
Normal file
@@ -0,0 +1,88 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::error::Error;
|
||||
|
||||
use http::Method;
|
||||
use rustfs_common::GLOBAL_CONN_MAP;
|
||||
use rustfs_protos::{create_new_channel, proto_gen::node_service::node_service_client::NodeServiceClient};
|
||||
use tonic::{service::interceptor::InterceptedService, transport::Channel};
|
||||
use tracing::debug;
|
||||
|
||||
use crate::rpc::{TONIC_RPC_PREFIX, gen_signature_headers};
|
||||
|
||||
/// 3. Subsequent calls will attempt fresh connections
|
||||
/// 4. If node is still down, connection will fail fast (3s timeout)
|
||||
pub async fn node_service_time_out_client(
|
||||
addr: &String,
|
||||
interceptor: TonicInterceptor,
|
||||
) -> Result<NodeServiceClient<InterceptedService<Channel, TonicInterceptor>>, Box<dyn Error>> {
|
||||
// Try to get cached channel
|
||||
let cached_channel = { GLOBAL_CONN_MAP.read().await.get(addr).cloned() };
|
||||
|
||||
let channel = match cached_channel {
|
||||
Some(channel) => {
|
||||
debug!("Using cached gRPC channel for: {}", addr);
|
||||
channel
|
||||
}
|
||||
None => {
|
||||
// No cached connection, create new one
|
||||
create_new_channel(addr).await?
|
||||
}
|
||||
};
|
||||
|
||||
Ok(NodeServiceClient::with_interceptor(channel, interceptor))
|
||||
}
|
||||
|
||||
pub async fn node_service_time_out_client_no_auth(
|
||||
addr: &String,
|
||||
) -> Result<NodeServiceClient<InterceptedService<Channel, TonicInterceptor>>, Box<dyn Error>> {
|
||||
node_service_time_out_client(addr, TonicInterceptor::NoOp(NoOpInterceptor)).await
|
||||
}
|
||||
|
||||
pub struct TonicSignatureInterceptor;
|
||||
|
||||
impl tonic::service::Interceptor for TonicSignatureInterceptor {
|
||||
fn call(&mut self, mut req: tonic::Request<()>) -> Result<tonic::Request<()>, tonic::Status> {
|
||||
let headers = gen_signature_headers(TONIC_RPC_PREFIX, &Method::GET);
|
||||
req.metadata_mut().as_mut().extend(headers);
|
||||
Ok(req)
|
||||
}
|
||||
}
|
||||
|
||||
pub fn gen_tonic_signature_interceptor() -> TonicSignatureInterceptor {
|
||||
TonicSignatureInterceptor
|
||||
}
|
||||
|
||||
pub struct NoOpInterceptor;
|
||||
|
||||
impl tonic::service::Interceptor for NoOpInterceptor {
|
||||
fn call(&mut self, req: tonic::Request<()>) -> Result<tonic::Request<()>, tonic::Status> {
|
||||
Ok(req)
|
||||
}
|
||||
}
|
||||
|
||||
pub enum TonicInterceptor {
|
||||
Signature(TonicSignatureInterceptor),
|
||||
NoOp(NoOpInterceptor),
|
||||
}
|
||||
|
||||
impl tonic::service::Interceptor for TonicInterceptor {
|
||||
fn call(&mut self, req: tonic::Request<()>) -> Result<tonic::Request<()>, tonic::Status> {
|
||||
match self {
|
||||
TonicInterceptor::Signature(interceptor) => interceptor.call(req),
|
||||
TonicInterceptor::NoOp(interceptor) => interceptor.call(req),
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -15,11 +15,8 @@
|
||||
use base64::Engine as _;
|
||||
use base64::engine::general_purpose;
|
||||
use hmac::{Hmac, KeyInit, Mac};
|
||||
use http::HeaderMap;
|
||||
use http::HeaderValue;
|
||||
use http::Method;
|
||||
use http::Uri;
|
||||
use rustfs_credentials::get_global_action_cred;
|
||||
use http::{HeaderMap, HeaderValue, Method, Uri};
|
||||
use rustfs_credentials::{DEFAULT_SECRET_KEY, ENV_RPC_SECRET, get_global_secret_key_opt};
|
||||
use sha2::Sha256;
|
||||
use time::OffsetDateTime;
|
||||
use tracing::error;
|
||||
@@ -29,15 +26,20 @@ type HmacSha256 = Hmac<Sha256>;
|
||||
const SIGNATURE_HEADER: &str = "x-rustfs-signature";
|
||||
const TIMESTAMP_HEADER: &str = "x-rustfs-timestamp";
|
||||
const SIGNATURE_VALID_DURATION: i64 = 300; // 5 minutes
|
||||
pub const TONIC_RPC_PREFIX: &str = "/node_service.NodeService";
|
||||
|
||||
/// Get the shared secret for HMAC signing
|
||||
fn get_shared_secret() -> String {
|
||||
if let Some(cred) = get_global_action_cred() {
|
||||
cred.secret_key
|
||||
} else {
|
||||
// Fallback to environment variable if global credentials are not available
|
||||
std::env::var("RUSTFS_RPC_SECRET").unwrap_or_else(|_| "rustfs-default-secret".to_string())
|
||||
}
|
||||
rustfs_credentials::GLOBAL_RUSTFS_RPC_SECRET
|
||||
.get_or_init(|| {
|
||||
rustfs_utils::get_env_str(
|
||||
ENV_RPC_SECRET,
|
||||
get_global_secret_key_opt()
|
||||
.unwrap_or_else(|| DEFAULT_SECRET_KEY.to_string())
|
||||
.as_str(),
|
||||
)
|
||||
})
|
||||
.clone()
|
||||
}
|
||||
|
||||
/// Generate HMAC-SHA256 signature for the given data
|
||||
@@ -57,13 +59,25 @@ fn generate_signature(secret: &str, url: &str, method: &Method, timestamp: i64)
|
||||
|
||||
/// Build headers with authentication signature
|
||||
pub fn build_auth_headers(url: &str, method: &Method, headers: &mut HeaderMap) {
|
||||
let auth_headers = gen_signature_headers(url, method);
|
||||
|
||||
headers.extend(auth_headers);
|
||||
}
|
||||
|
||||
pub fn gen_signature_headers(url: &str, method: &Method) -> HeaderMap {
|
||||
let secret = get_shared_secret();
|
||||
let timestamp = OffsetDateTime::now_utc().unix_timestamp();
|
||||
|
||||
let signature = generate_signature(&secret, url, method, timestamp);
|
||||
|
||||
headers.insert(SIGNATURE_HEADER, HeaderValue::from_str(&signature).unwrap());
|
||||
headers.insert(TIMESTAMP_HEADER, HeaderValue::from_str(×tamp.to_string()).unwrap());
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert(SIGNATURE_HEADER, HeaderValue::from_str(&signature).expect("Invalid header value"));
|
||||
headers.insert(
|
||||
TIMESTAMP_HEADER,
|
||||
HeaderValue::from_str(×tamp.to_string()).expect("Invalid header value"),
|
||||
);
|
||||
|
||||
headers
|
||||
}
|
||||
|
||||
/// Verify the request signature for RPC requests
|
||||
|
||||
@@ -12,12 +12,18 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod client;
|
||||
mod http_auth;
|
||||
mod peer_rest_client;
|
||||
mod peer_s3_client;
|
||||
mod remote_disk;
|
||||
mod remote_locker;
|
||||
|
||||
pub use http_auth::{build_auth_headers, verify_rpc_signature};
|
||||
pub use client::{
|
||||
TonicInterceptor, gen_tonic_signature_interceptor, node_service_time_out_client, node_service_time_out_client_no_auth,
|
||||
};
|
||||
pub use http_auth::{TONIC_RPC_PREFIX, build_auth_headers, gen_signature_headers, verify_rpc_signature};
|
||||
pub use peer_rest_client::PeerRestClient;
|
||||
pub use peer_s3_client::{LocalPeerS3Client, PeerS3Client, RemotePeerS3Client, S3PeerSys};
|
||||
pub use remote_disk::RemoteDisk;
|
||||
pub use remote_locker::RemoteClient;
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
use crate::rpc::client::{TonicInterceptor, gen_tonic_signature_interceptor, node_service_time_out_client};
|
||||
use crate::{
|
||||
endpoints::EndpointServerPools,
|
||||
global::is_dist_erasure,
|
||||
@@ -25,21 +26,22 @@ use rustfs_madmin::{
|
||||
metrics::RealtimeMetrics,
|
||||
net::NetInfo,
|
||||
};
|
||||
use rustfs_protos::{
|
||||
evict_failed_connection, node_service_time_out_client,
|
||||
proto_gen::node_service::{
|
||||
DeleteBucketMetadataRequest, DeletePolicyRequest, DeleteServiceAccountRequest, DeleteUserRequest, GetCpusRequest,
|
||||
GetMemInfoRequest, GetMetricsRequest, GetNetInfoRequest, GetOsInfoRequest, GetPartitionsRequest, GetProcInfoRequest,
|
||||
GetSeLinuxInfoRequest, GetSysConfigRequest, GetSysErrorsRequest, LoadBucketMetadataRequest, LoadGroupRequest,
|
||||
LoadPolicyMappingRequest, LoadPolicyRequest, LoadRebalanceMetaRequest, LoadServiceAccountRequest,
|
||||
LoadTransitionTierConfigRequest, LoadUserRequest, LocalStorageInfoRequest, Mss, ReloadPoolMetaRequest,
|
||||
ReloadSiteReplicationConfigRequest, ServerInfoRequest, SignalServiceRequest, StartProfilingRequest, StopRebalanceRequest,
|
||||
},
|
||||
use rustfs_protos::evict_failed_connection;
|
||||
use rustfs_protos::proto_gen::node_service::node_service_client::NodeServiceClient;
|
||||
use rustfs_protos::proto_gen::node_service::{
|
||||
DeleteBucketMetadataRequest, DeletePolicyRequest, DeleteServiceAccountRequest, DeleteUserRequest, GetCpusRequest,
|
||||
GetMemInfoRequest, GetMetricsRequest, GetNetInfoRequest, GetOsInfoRequest, GetPartitionsRequest, GetProcInfoRequest,
|
||||
GetSeLinuxInfoRequest, GetSysConfigRequest, GetSysErrorsRequest, LoadBucketMetadataRequest, LoadGroupRequest,
|
||||
LoadPolicyMappingRequest, LoadPolicyRequest, LoadRebalanceMetaRequest, LoadServiceAccountRequest,
|
||||
LoadTransitionTierConfigRequest, LoadUserRequest, LocalStorageInfoRequest, Mss, ReloadPoolMetaRequest,
|
||||
ReloadSiteReplicationConfigRequest, ServerInfoRequest, SignalServiceRequest, StartProfilingRequest, StopRebalanceRequest,
|
||||
};
|
||||
use rustfs_utils::XHost;
|
||||
use serde::{Deserialize, Serialize as _};
|
||||
use std::{collections::HashMap, io::Cursor, time::SystemTime};
|
||||
use tonic::Request;
|
||||
use tonic::service::interceptor::InterceptedService;
|
||||
use tonic::transport::Channel;
|
||||
use tracing::warn;
|
||||
|
||||
pub const PEER_RESTSIGNAL: &str = "signal";
|
||||
@@ -66,13 +68,13 @@ impl PeerRestClient {
|
||||
let mut remote = Vec::with_capacity(hosts.len());
|
||||
let mut all = vec![None; hosts.len()];
|
||||
for (i, hs_host) in hosts.iter().enumerate() {
|
||||
if let Some(host) = hs_host {
|
||||
if let Some(grid_host) = eps.find_grid_hosts_from_peer(host) {
|
||||
let client = PeerRestClient::new(host.clone(), grid_host);
|
||||
if let Some(host) = hs_host
|
||||
&& let Some(grid_host) = eps.find_grid_hosts_from_peer(host)
|
||||
{
|
||||
let client = PeerRestClient::new(host.clone(), grid_host);
|
||||
|
||||
all[i] = Some(client.clone());
|
||||
remote.push(Some(client));
|
||||
}
|
||||
all[i] = Some(client.clone());
|
||||
remote.push(Some(client));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -83,6 +85,12 @@ impl PeerRestClient {
|
||||
(remote, all)
|
||||
}
|
||||
|
||||
pub async fn get_client(&self) -> Result<NodeServiceClient<InterceptedService<Channel, TonicInterceptor>>> {
|
||||
node_service_time_out_client(&self.grid_host, TonicInterceptor::Signature(gen_tonic_signature_interceptor()))
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))
|
||||
}
|
||||
|
||||
/// Evict the connection to this peer from the global cache.
|
||||
/// This should be called when communication with this peer fails.
|
||||
pub async fn evict_connection(&self) {
|
||||
@@ -101,9 +109,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
async fn local_storage_info_inner(&self) -> Result<rustfs_madmin::StorageInfo> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(LocalStorageInfoRequest { metrics: true });
|
||||
|
||||
let response = client.local_storage_info(request).await?.into_inner();
|
||||
@@ -131,9 +137,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
async fn server_info_inner(&self) -> Result<ServerProperties> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(ServerInfoRequest { metrics: true });
|
||||
|
||||
let response = client.server_info(request).await?.into_inner();
|
||||
@@ -152,9 +156,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn get_cpus(&self) -> Result<Cpus> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(GetCpusRequest {});
|
||||
|
||||
let response = client.get_cpus(request).await?.into_inner();
|
||||
@@ -173,9 +175,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn get_net_info(&self) -> Result<NetInfo> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(GetNetInfoRequest {});
|
||||
|
||||
let response = client.get_net_info(request).await?.into_inner();
|
||||
@@ -194,9 +194,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn get_partitions(&self) -> Result<Partitions> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(GetPartitionsRequest {});
|
||||
|
||||
let response = client.get_partitions(request).await?.into_inner();
|
||||
@@ -215,9 +213,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn get_os_info(&self) -> Result<OsInfo> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(GetOsInfoRequest {});
|
||||
|
||||
let response = client.get_os_info(request).await?.into_inner();
|
||||
@@ -236,9 +232,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn get_se_linux_info(&self) -> Result<SysService> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(GetSeLinuxInfoRequest {});
|
||||
|
||||
let response = client.get_se_linux_info(request).await?.into_inner();
|
||||
@@ -257,9 +251,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn get_sys_config(&self) -> Result<SysConfig> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(GetSysConfigRequest {});
|
||||
|
||||
let response = client.get_sys_config(request).await?.into_inner();
|
||||
@@ -278,9 +270,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn get_sys_errors(&self) -> Result<SysErrors> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(GetSysErrorsRequest {});
|
||||
|
||||
let response = client.get_sys_errors(request).await?.into_inner();
|
||||
@@ -299,9 +289,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn get_mem_info(&self) -> Result<MemInfo> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(GetMemInfoRequest {});
|
||||
|
||||
let response = client.get_mem_info(request).await?.into_inner();
|
||||
@@ -320,9 +308,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn get_metrics(&self, t: MetricType, opts: &CollectMetricsOpts) -> Result<RealtimeMetrics> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let mut buf_t = Vec::new();
|
||||
t.serialize(&mut Serializer::new(&mut buf_t))?;
|
||||
let mut buf_o = Vec::new();
|
||||
@@ -348,9 +334,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn get_proc_info(&self) -> Result<ProcInfo> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(GetProcInfoRequest {});
|
||||
|
||||
let response = client.get_proc_info(request).await?.into_inner();
|
||||
@@ -369,9 +353,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn start_profiling(&self, profiler: &str) -> Result<()> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(StartProfilingRequest {
|
||||
profiler: profiler.to_string(),
|
||||
});
|
||||
@@ -403,9 +385,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn load_bucket_metadata(&self, bucket: &str) -> Result<()> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(LoadBucketMetadataRequest {
|
||||
bucket: bucket.to_string(),
|
||||
});
|
||||
@@ -421,9 +401,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn delete_bucket_metadata(&self, bucket: &str) -> Result<()> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(DeleteBucketMetadataRequest {
|
||||
bucket: bucket.to_string(),
|
||||
});
|
||||
@@ -439,9 +417,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn delete_policy(&self, policy: &str) -> Result<()> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(DeletePolicyRequest {
|
||||
policy_name: policy.to_string(),
|
||||
});
|
||||
@@ -457,9 +433,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn load_policy(&self, policy: &str) -> Result<()> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(LoadPolicyRequest {
|
||||
policy_name: policy.to_string(),
|
||||
});
|
||||
@@ -475,9 +449,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn load_policy_mapping(&self, user_or_group: &str, user_type: u64, is_group: bool) -> Result<()> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(LoadPolicyMappingRequest {
|
||||
user_or_group: user_or_group.to_string(),
|
||||
user_type,
|
||||
@@ -495,9 +467,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn delete_user(&self, access_key: &str) -> Result<()> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(DeleteUserRequest {
|
||||
access_key: access_key.to_string(),
|
||||
});
|
||||
@@ -517,9 +487,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn delete_service_account(&self, access_key: &str) -> Result<()> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(DeleteServiceAccountRequest {
|
||||
access_key: access_key.to_string(),
|
||||
});
|
||||
@@ -539,9 +507,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn load_user(&self, access_key: &str, temp: bool) -> Result<()> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(LoadUserRequest {
|
||||
access_key: access_key.to_string(),
|
||||
temp,
|
||||
@@ -562,9 +528,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn load_service_account(&self, access_key: &str) -> Result<()> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(LoadServiceAccountRequest {
|
||||
access_key: access_key.to_string(),
|
||||
});
|
||||
@@ -584,9 +548,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn load_group(&self, group: &str) -> Result<()> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(LoadGroupRequest {
|
||||
group: group.to_string(),
|
||||
});
|
||||
@@ -606,9 +568,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn reload_site_replication_config(&self) -> Result<()> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(ReloadSiteReplicationConfigRequest {});
|
||||
|
||||
let response = client.reload_site_replication_config(request).await?.into_inner();
|
||||
@@ -622,9 +582,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn signal_service(&self, sig: u64, sub_sys: &str, dry_run: bool, _exec_at: SystemTime) -> Result<()> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let mut vars = HashMap::new();
|
||||
vars.insert(PEER_RESTSIGNAL.to_string(), sig.to_string());
|
||||
vars.insert(PEER_RESTSUB_SYS.to_string(), sub_sys.to_string());
|
||||
@@ -644,23 +602,17 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn get_metacache_listing(&self) -> Result<()> {
|
||||
let _client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let _client = self.get_client().await?;
|
||||
todo!()
|
||||
}
|
||||
|
||||
pub async fn update_metacache_listing(&self) -> Result<()> {
|
||||
let _client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let _client = self.get_client().await?;
|
||||
todo!()
|
||||
}
|
||||
|
||||
pub async fn reload_pool_meta(&self) -> Result<()> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(ReloadPoolMetaRequest {});
|
||||
|
||||
let response = client.reload_pool_meta(request).await?.into_inner();
|
||||
@@ -675,9 +627,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn stop_rebalance(&self) -> Result<()> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(StopRebalanceRequest {});
|
||||
|
||||
let response = client.stop_rebalance(request).await?.into_inner();
|
||||
@@ -692,9 +642,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn load_rebalance_meta(&self, start_rebalance: bool) -> Result<()> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(LoadRebalanceMetaRequest { start_rebalance });
|
||||
|
||||
let response = client.load_rebalance_meta(request).await?.into_inner();
|
||||
@@ -711,9 +659,7 @@ impl PeerRestClient {
|
||||
}
|
||||
|
||||
pub async fn load_transition_tier_config(&self) -> Result<()> {
|
||||
let mut client = node_service_time_out_client(&self.grid_host)
|
||||
.await
|
||||
.map_err(|err| Error::other(err.to_string()))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(LoadTransitionTierConfigRequest {});
|
||||
|
||||
let response = client.load_transition_tier_config(request).await?.into_inner();
|
||||
|
||||
@@ -18,6 +18,7 @@ use crate::disk::error::{Error, Result};
|
||||
use crate::disk::error_reduce::{BUCKET_OP_IGNORED_ERRS, is_all_buckets_not_found, reduce_write_quorum_errs};
|
||||
use crate::disk::{DiskAPI, DiskStore, disk_store::get_max_timeout_duration};
|
||||
use crate::global::GLOBAL_LOCAL_DISK_MAP;
|
||||
use crate::rpc::client::{TonicInterceptor, gen_tonic_signature_interceptor, node_service_time_out_client};
|
||||
use crate::store::all_local_disk;
|
||||
use crate::store_utils::is_reserved_or_invalid_bucket;
|
||||
use crate::{
|
||||
@@ -32,7 +33,7 @@ use async_trait::async_trait;
|
||||
use futures::future::join_all;
|
||||
use rustfs_common::heal_channel::{DriveState, HealItemType, HealOpts, RUSTFS_RESERVED_BUCKET};
|
||||
use rustfs_madmin::heal_commands::{HealDriveInfo, HealResultItem};
|
||||
use rustfs_protos::node_service_time_out_client;
|
||||
use rustfs_protos::proto_gen::node_service::node_service_client::NodeServiceClient;
|
||||
use rustfs_protos::proto_gen::node_service::{
|
||||
DeleteBucketRequest, GetBucketInfoRequest, HealBucketRequest, ListBucketRequest, MakeBucketRequest,
|
||||
};
|
||||
@@ -40,6 +41,8 @@ use std::{collections::HashMap, fmt::Debug, sync::Arc, time::Duration};
|
||||
use tokio::{net::TcpStream, sync::RwLock, time};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tonic::Request;
|
||||
use tonic::service::interceptor::InterceptedService;
|
||||
use tonic::transport::Channel;
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
type Client = Arc<Box<dyn PeerS3Client>>;
|
||||
@@ -101,10 +104,10 @@ impl S3PeerSys {
|
||||
for pool_idx in 0..self.pools_count {
|
||||
let mut per_pool_errs = vec![None; self.clients.len()];
|
||||
for (i, client) in self.clients.iter().enumerate() {
|
||||
if let Some(v) = client.get_pools() {
|
||||
if v.contains(&pool_idx) {
|
||||
per_pool_errs[i] = errs[i].clone();
|
||||
}
|
||||
if let Some(v) = client.get_pools()
|
||||
&& v.contains(&pool_idx)
|
||||
{
|
||||
per_pool_errs[i] = errs[i].clone();
|
||||
}
|
||||
}
|
||||
let qu = per_pool_errs.len() / 2;
|
||||
@@ -136,10 +139,10 @@ impl S3PeerSys {
|
||||
for pool_idx in 0..self.pools_count {
|
||||
let mut per_pool_errs = vec![None; self.clients.len()];
|
||||
for (i, client) in self.clients.iter().enumerate() {
|
||||
if let Some(v) = client.get_pools() {
|
||||
if v.contains(&pool_idx) {
|
||||
per_pool_errs[i] = errs[i].clone();
|
||||
}
|
||||
if let Some(v) = client.get_pools()
|
||||
&& v.contains(&pool_idx)
|
||||
{
|
||||
per_pool_errs[i] = errs[i].clone();
|
||||
}
|
||||
}
|
||||
let qu = per_pool_errs.len() / 2;
|
||||
@@ -587,6 +590,12 @@ impl RemotePeerS3Client {
|
||||
client
|
||||
}
|
||||
|
||||
pub async fn get_client(&self) -> Result<NodeServiceClient<InterceptedService<Channel, TonicInterceptor>>> {
|
||||
node_service_time_out_client(&self.addr, TonicInterceptor::Signature(gen_tonic_signature_interceptor()))
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))
|
||||
}
|
||||
|
||||
pub fn get_addr(&self) -> String {
|
||||
self.addr.clone()
|
||||
}
|
||||
@@ -664,7 +673,7 @@ impl RemotePeerS3Client {
|
||||
async fn perform_connectivity_check(addr: &str) -> Result<()> {
|
||||
use tokio::time::timeout;
|
||||
|
||||
let url = url::Url::parse(addr).map_err(|e| Error::other(format!("Invalid URL: {}", e)))?;
|
||||
let url = url::Url::parse(addr).map_err(|e| Error::other(format!("Invalid URL: {e}")))?;
|
||||
|
||||
let Some(host) = url.host_str() else {
|
||||
return Err(Error::other("No host in URL".to_string()));
|
||||
@@ -675,7 +684,7 @@ impl RemotePeerS3Client {
|
||||
// Try to establish TCP connection
|
||||
match timeout(CHECK_TIMEOUT_DURATION, TcpStream::connect((host, port))).await {
|
||||
Ok(Ok(_)) => Ok(()),
|
||||
_ => Err(Error::other(format!("Cannot connect to {}:{}", host, port))),
|
||||
_ => Err(Error::other(format!("Cannot connect to {host}:{port}"))),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -714,7 +723,7 @@ impl RemotePeerS3Client {
|
||||
// Timeout occurred, mark peer as potentially faulty
|
||||
self.health.decrement_waiting();
|
||||
warn!("Remote peer operation timeout after {:?}", timeout_duration);
|
||||
Err(Error::other(format!("Remote peer operation timeout after {:?}", timeout_duration)))
|
||||
Err(Error::other(format!("Remote peer operation timeout after {timeout_duration:?}")))
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -730,9 +739,7 @@ impl PeerS3Client for RemotePeerS3Client {
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let options: String = serde_json::to_string(opts)?;
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(HealBucketRequest {
|
||||
bucket: bucket.to_string(),
|
||||
options,
|
||||
@@ -762,9 +769,7 @@ impl PeerS3Client for RemotePeerS3Client {
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let options = serde_json::to_string(opts)?;
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(ListBucketRequest { options });
|
||||
let response = client.list_bucket(request).await?.into_inner();
|
||||
if !response.success {
|
||||
@@ -790,9 +795,7 @@ impl PeerS3Client for RemotePeerS3Client {
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let options = serde_json::to_string(opts)?;
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(MakeBucketRequest {
|
||||
name: bucket.to_string(),
|
||||
options,
|
||||
@@ -818,9 +821,7 @@ impl PeerS3Client for RemotePeerS3Client {
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let options = serde_json::to_string(opts)?;
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let request = Request::new(GetBucketInfoRequest {
|
||||
bucket: bucket.to_string(),
|
||||
options,
|
||||
@@ -845,9 +846,7 @@ impl PeerS3Client for RemotePeerS3Client {
|
||||
async fn delete_bucket(&self, bucket: &str, _opts: &DeleteBucketOptions) -> Result<()> {
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let mut client = self.get_client().await?;
|
||||
|
||||
let request = Request::new(DeleteBucketRequest {
|
||||
bucket: bucket.to_string(),
|
||||
|
||||
@@ -21,39 +21,44 @@ use std::{
|
||||
use bytes::Bytes;
|
||||
use futures::lock::Mutex;
|
||||
use http::{HeaderMap, HeaderValue, Method, header::CONTENT_TYPE};
|
||||
use rustfs_protos::{
|
||||
node_service_time_out_client,
|
||||
proto_gen::node_service::{
|
||||
CheckPartsRequest, DeletePathsRequest, DeleteRequest, DeleteVersionRequest, DeleteVersionsRequest, DeleteVolumeRequest,
|
||||
DiskInfoRequest, ListDirRequest, ListVolumesRequest, MakeVolumeRequest, MakeVolumesRequest, ReadAllRequest,
|
||||
ReadMultipleRequest, ReadPartsRequest, ReadVersionRequest, ReadXlRequest, RenameDataRequest, RenameFileRequest,
|
||||
StatVolumeRequest, UpdateMetadataRequest, VerifyFileRequest, WriteAllRequest, WriteMetadataRequest,
|
||||
},
|
||||
use rustfs_protos::proto_gen::node_service::{
|
||||
CheckPartsRequest, DeletePathsRequest, DeleteRequest, DeleteVersionRequest, DeleteVersionsRequest, DeleteVolumeRequest,
|
||||
DiskInfoRequest, ListDirRequest, ListVolumesRequest, MakeVolumeRequest, MakeVolumesRequest, ReadAllRequest,
|
||||
ReadMultipleRequest, ReadPartsRequest, ReadVersionRequest, ReadXlRequest, RenameDataRequest, RenameFileRequest,
|
||||
StatVolumeRequest, UpdateMetadataRequest, VerifyFileRequest, WriteAllRequest, WriteMetadataRequest,
|
||||
node_service_client::NodeServiceClient,
|
||||
};
|
||||
use rustfs_utils::string::parse_bool_with_default;
|
||||
use tokio::time;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
use crate::disk::{
|
||||
CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskLocation, DiskOption, FileInfoVersions,
|
||||
ReadMultipleReq, ReadMultipleResp, ReadOptions, RenameDataResp, UpdateMetadataOpts, VolumeInfo, WalkDirOptions,
|
||||
disk_store::{
|
||||
CHECK_EVERY, CHECK_TIMEOUT_DURATION, ENV_RUSTFS_DRIVE_ACTIVE_MONITORING, SKIP_IF_SUCCESS_BEFORE, get_max_timeout_duration,
|
||||
},
|
||||
endpoint::Endpoint,
|
||||
};
|
||||
use crate::disk::{FileReader, FileWriter};
|
||||
use crate::disk::{disk_store::DiskHealthTracker, error::DiskError};
|
||||
use crate::{
|
||||
disk::error::{Error, Result},
|
||||
rpc::build_auth_headers,
|
||||
};
|
||||
use crate::{
|
||||
disk::{
|
||||
CheckPartsResp, DeleteOptions, DiskAPI, DiskInfo, DiskInfoOptions, DiskLocation, DiskOption, FileInfoVersions,
|
||||
ReadMultipleReq, ReadMultipleResp, ReadOptions, RenameDataResp, UpdateMetadataOpts, VolumeInfo, WalkDirOptions,
|
||||
disk_store::{
|
||||
CHECK_EVERY, CHECK_TIMEOUT_DURATION, ENV_RUSTFS_DRIVE_ACTIVE_MONITORING, SKIP_IF_SUCCESS_BEFORE,
|
||||
get_max_timeout_duration,
|
||||
},
|
||||
endpoint::Endpoint,
|
||||
},
|
||||
rpc::client::gen_tonic_signature_interceptor,
|
||||
};
|
||||
use crate::{
|
||||
disk::{FileReader, FileWriter},
|
||||
rpc::client::{TonicInterceptor, node_service_time_out_client},
|
||||
};
|
||||
use rustfs_filemeta::{FileInfo, ObjectPartInfo, RawFileInfo};
|
||||
use rustfs_protos::proto_gen::node_service::RenamePartRequest;
|
||||
use rustfs_rio::{HttpReader, HttpWriter};
|
||||
use tokio::{io::AsyncWrite, net::TcpStream, time::timeout};
|
||||
use tonic::Request;
|
||||
use tonic::{Request, service::interceptor::InterceptedService, transport::Channel};
|
||||
use uuid::Uuid;
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -201,7 +206,7 @@ impl RemoteDisk {
|
||||
|
||||
/// Perform basic connectivity check for remote disk
|
||||
async fn perform_connectivity_check(addr: &str) -> Result<()> {
|
||||
let url = url::Url::parse(addr).map_err(|e| Error::other(format!("Invalid URL: {}", e)))?;
|
||||
let url = url::Url::parse(addr).map_err(|e| Error::other(format!("Invalid URL: {e}")))?;
|
||||
|
||||
let Some(host) = url.host_str() else {
|
||||
return Err(Error::other("No host in URL".to_string()));
|
||||
@@ -215,7 +220,7 @@ impl RemoteDisk {
|
||||
drop(stream);
|
||||
Ok(())
|
||||
}
|
||||
_ => Err(Error::other(format!("Cannot connect to {}:{}", host, port))),
|
||||
_ => Err(Error::other(format!("Cannot connect to {host}:{port}"))),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -255,10 +260,16 @@ impl RemoteDisk {
|
||||
// Timeout occurred, mark disk as potentially faulty
|
||||
self.health.decrement_waiting();
|
||||
warn!("Remote disk operation timeout after {:?}", timeout_duration);
|
||||
Err(Error::other(format!("Remote disk operation timeout after {:?}", timeout_duration)))
|
||||
Err(Error::other(format!("Remote disk operation timeout after {timeout_duration:?}")))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_client(&self) -> Result<NodeServiceClient<InterceptedService<Channel, TonicInterceptor>>> {
|
||||
node_service_time_out_client(&self.addr, TonicInterceptor::Signature(gen_tonic_signature_interceptor()))
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: all api need to handle errors
|
||||
@@ -343,7 +354,8 @@ impl DiskAPI for RemoteDisk {
|
||||
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(MakeVolumeRequest {
|
||||
@@ -370,7 +382,8 @@ impl DiskAPI for RemoteDisk {
|
||||
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(MakeVolumesRequest {
|
||||
@@ -397,7 +410,8 @@ impl DiskAPI for RemoteDisk {
|
||||
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(ListVolumesRequest {
|
||||
@@ -429,7 +443,8 @@ impl DiskAPI for RemoteDisk {
|
||||
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(StatVolumeRequest {
|
||||
@@ -458,7 +473,8 @@ impl DiskAPI for RemoteDisk {
|
||||
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(DeleteVolumeRequest {
|
||||
@@ -545,7 +561,8 @@ impl DiskAPI for RemoteDisk {
|
||||
let file_info = serde_json::to_string(&fi)?;
|
||||
let opts = serde_json::to_string(&opts)?;
|
||||
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(DeleteVersionRequest {
|
||||
@@ -603,7 +620,7 @@ impl DiskAPI for RemoteDisk {
|
||||
}
|
||||
});
|
||||
}
|
||||
let mut client = match node_service_time_out_client(&self.addr).await {
|
||||
let mut client = match self.get_client().await {
|
||||
Ok(client) => client,
|
||||
Err(err) => {
|
||||
let mut errors = Vec::with_capacity(versions.len());
|
||||
@@ -674,7 +691,8 @@ impl DiskAPI for RemoteDisk {
|
||||
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(DeletePathsRequest {
|
||||
@@ -703,7 +721,8 @@ impl DiskAPI for RemoteDisk {
|
||||
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(WriteMetadataRequest {
|
||||
@@ -734,7 +753,8 @@ impl DiskAPI for RemoteDisk {
|
||||
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(UpdateMetadataRequest {
|
||||
@@ -772,7 +792,8 @@ impl DiskAPI for RemoteDisk {
|
||||
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(ReadVersionRequest {
|
||||
@@ -804,7 +825,8 @@ impl DiskAPI for RemoteDisk {
|
||||
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(ReadXlRequest {
|
||||
@@ -843,7 +865,8 @@ impl DiskAPI for RemoteDisk {
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let file_info = serde_json::to_string(&fi)?;
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(RenameDataRequest {
|
||||
@@ -878,7 +901,8 @@ impl DiskAPI for RemoteDisk {
|
||||
return Err(DiskError::FaultyDisk);
|
||||
}
|
||||
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(ListDirRequest {
|
||||
@@ -1039,7 +1063,8 @@ impl DiskAPI for RemoteDisk {
|
||||
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(RenameFileRequest {
|
||||
@@ -1069,7 +1094,8 @@ impl DiskAPI for RemoteDisk {
|
||||
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(RenamePartRequest {
|
||||
@@ -1101,7 +1127,8 @@ impl DiskAPI for RemoteDisk {
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let options = serde_json::to_string(&opt)?;
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(DeleteRequest {
|
||||
@@ -1131,7 +1158,8 @@ impl DiskAPI for RemoteDisk {
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let file_info = serde_json::to_string(&fi)?;
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(VerifyFileRequest {
|
||||
@@ -1160,7 +1188,8 @@ impl DiskAPI for RemoteDisk {
|
||||
async fn read_parts(&self, bucket: &str, paths: &[String]) -> Result<Vec<ObjectPartInfo>> {
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(ReadPartsRequest {
|
||||
@@ -1190,7 +1219,8 @@ impl DiskAPI for RemoteDisk {
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let file_info = serde_json::to_string(&fi)?;
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(CheckPartsRequest {
|
||||
@@ -1222,7 +1252,8 @@ impl DiskAPI for RemoteDisk {
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let read_multiple_req = serde_json::to_string(&req)?;
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(ReadMultipleRequest {
|
||||
@@ -1255,7 +1286,8 @@ impl DiskAPI for RemoteDisk {
|
||||
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(WriteAllRequest {
|
||||
@@ -1284,7 +1316,8 @@ impl DiskAPI for RemoteDisk {
|
||||
|
||||
self.execute_with_timeout(
|
||||
|| async {
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(ReadAllRequest {
|
||||
@@ -1313,7 +1346,8 @@ impl DiskAPI for RemoteDisk {
|
||||
}
|
||||
|
||||
let opts = serde_json::to_string(&opts)?;
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
let mut client = self
|
||||
.get_client()
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(DiskInfoRequest {
|
||||
|
||||
@@ -12,24 +12,21 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::rpc::client::{TonicInterceptor, gen_tonic_signature_interceptor, node_service_time_out_client};
|
||||
use async_trait::async_trait;
|
||||
use rustfs_protos::{
|
||||
node_service_time_out_client,
|
||||
proto_gen::node_service::{GenerallyLockRequest, PingRequest},
|
||||
};
|
||||
use rustfs_lock::types::{LockId, LockMetadata, LockPriority};
|
||||
use rustfs_lock::{LockClient, LockError, LockInfo, LockResponse, LockStats, LockStatus, Result};
|
||||
use rustfs_lock::{LockRequest, LockType};
|
||||
use rustfs_protos::proto_gen::node_service::node_service_client::NodeServiceClient;
|
||||
use rustfs_protos::proto_gen::node_service::{GenerallyLockRequest, PingRequest};
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
use tonic::Request;
|
||||
use tonic::service::interceptor::InterceptedService;
|
||||
use tonic::transport::Channel;
|
||||
use tracing::info;
|
||||
|
||||
use crate::{
|
||||
error::{LockError, Result},
|
||||
types::{LockId, LockInfo, LockRequest, LockResponse, LockStats},
|
||||
};
|
||||
|
||||
use super::LockClient;
|
||||
|
||||
/// Remote lock client implementation
|
||||
#[derive(Debug)]
|
||||
pub struct RemoteClient {
|
||||
@@ -67,24 +64,28 @@ impl RemoteClient {
|
||||
LockRequest {
|
||||
lock_id: lock_id.clone(),
|
||||
resource: lock_id.resource.clone(),
|
||||
lock_type: crate::types::LockType::Exclusive, // Type doesn't matter for unlock
|
||||
lock_type: LockType::Exclusive, // Type doesn't matter for unlock
|
||||
owner: owner.to_string(),
|
||||
acquire_timeout: std::time::Duration::from_secs(30),
|
||||
ttl: std::time::Duration::from_secs(300),
|
||||
metadata: crate::types::LockMetadata::default(),
|
||||
priority: crate::types::LockPriority::Normal,
|
||||
metadata: LockMetadata::default(),
|
||||
priority: LockPriority::Normal,
|
||||
deadlock_detection: false,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn get_client(&self) -> Result<NodeServiceClient<InterceptedService<Channel, TonicInterceptor>>> {
|
||||
node_service_time_out_client(&self.addr, TonicInterceptor::Signature(gen_tonic_signature_interceptor()))
|
||||
.await
|
||||
.map_err(|err| LockError::internal(format!("can not get client, err: {err}")))
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl LockClient for RemoteClient {
|
||||
async fn acquire_exclusive(&self, request: &LockRequest) -> Result<LockResponse> {
|
||||
info!("remote acquire_exclusive for {}", request.resource);
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| LockError::internal(format!("can not get client, err: {err}")))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let req = Request::new(GenerallyLockRequest {
|
||||
args: serde_json::to_string(&request)
|
||||
.map_err(|e| LockError::internal(format!("Failed to serialize request: {e}")))?,
|
||||
@@ -111,7 +112,7 @@ impl LockClient for RemoteClient {
|
||||
id: request.lock_id.clone(),
|
||||
resource: request.resource.clone(),
|
||||
lock_type: request.lock_type,
|
||||
status: crate::types::LockStatus::Acquired,
|
||||
status: LockStatus::Acquired,
|
||||
owner: request.owner.clone(),
|
||||
acquired_at: std::time::SystemTime::now(),
|
||||
expires_at: std::time::SystemTime::now() + request.ttl,
|
||||
@@ -133,9 +134,7 @@ impl LockClient for RemoteClient {
|
||||
|
||||
async fn acquire_shared(&self, request: &LockRequest) -> Result<LockResponse> {
|
||||
info!("remote acquire_shared for {}", request.resource);
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| LockError::internal(format!("can not get client, err: {err}")))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let req = Request::new(GenerallyLockRequest {
|
||||
args: serde_json::to_string(&request)
|
||||
.map_err(|e| LockError::internal(format!("Failed to serialize request: {e}")))?,
|
||||
@@ -162,7 +161,7 @@ impl LockClient for RemoteClient {
|
||||
id: request.lock_id.clone(),
|
||||
resource: request.resource.clone(),
|
||||
lock_type: request.lock_type,
|
||||
status: crate::types::LockStatus::Acquired,
|
||||
status: LockStatus::Acquired,
|
||||
owner: request.owner.clone(),
|
||||
acquired_at: std::time::SystemTime::now(),
|
||||
expires_at: std::time::SystemTime::now() + request.ttl,
|
||||
@@ -195,9 +194,7 @@ impl LockClient for RemoteClient {
|
||||
|
||||
let request_string = serde_json::to_string(&unlock_request)
|
||||
.map_err(|e| LockError::internal(format!("Failed to serialize request: {e}")))?;
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| LockError::internal(format!("can not get client, err: {err}")))?;
|
||||
let mut client = self.get_client().await?;
|
||||
|
||||
// Try UnLock first (for exclusive locks)
|
||||
let req = Request::new(GenerallyLockRequest {
|
||||
@@ -238,9 +235,7 @@ impl LockClient for RemoteClient {
|
||||
async fn refresh(&self, lock_id: &LockId) -> Result<bool> {
|
||||
info!("remote refresh for {}", lock_id);
|
||||
let refresh_request = self.create_unlock_request(lock_id, "remote");
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| LockError::internal(format!("can not get client, err: {err}")))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let req = Request::new(GenerallyLockRequest {
|
||||
args: serde_json::to_string(&refresh_request)
|
||||
.map_err(|e| LockError::internal(format!("Failed to serialize request: {e}")))?,
|
||||
@@ -259,9 +254,7 @@ impl LockClient for RemoteClient {
|
||||
async fn force_release(&self, lock_id: &LockId) -> Result<bool> {
|
||||
info!("remote force_release for {}", lock_id);
|
||||
let force_request = self.create_unlock_request(lock_id, "remote");
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| LockError::internal(format!("can not get client, err: {err}")))?;
|
||||
let mut client = self.get_client().await?;
|
||||
let req = Request::new(GenerallyLockRequest {
|
||||
args: serde_json::to_string(&force_request)
|
||||
.map_err(|e| LockError::internal(format!("Failed to serialize request: {e}")))?,
|
||||
@@ -283,9 +276,7 @@ impl LockClient for RemoteClient {
|
||||
// Since there's no direct status query in the gRPC service,
|
||||
// we attempt a non-blocking lock acquisition to check if the resource is available
|
||||
let status_request = self.create_unlock_request(lock_id, "remote");
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| LockError::internal(format!("can not get client, err: {err}")))?;
|
||||
let mut client = self.get_client().await?;
|
||||
|
||||
// Try to acquire a very short-lived lock to test availability
|
||||
let req = Request::new(GenerallyLockRequest {
|
||||
@@ -316,14 +307,14 @@ impl LockClient for RemoteClient {
|
||||
Ok(Some(LockInfo {
|
||||
id: lock_id.clone(),
|
||||
resource: lock_id.as_str().to_string(),
|
||||
lock_type: crate::types::LockType::Exclusive, // We can't know the exact type
|
||||
status: crate::types::LockStatus::Acquired,
|
||||
lock_type: LockType::Exclusive, // We can't know the exact type
|
||||
status: LockStatus::Acquired,
|
||||
owner: "unknown".to_string(), // Remote client can't determine owner
|
||||
acquired_at: std::time::SystemTime::now(),
|
||||
expires_at: std::time::SystemTime::now() + std::time::Duration::from_secs(3600),
|
||||
last_refreshed: std::time::SystemTime::now(),
|
||||
metadata: crate::types::LockMetadata::default(),
|
||||
priority: crate::types::LockPriority::Normal,
|
||||
metadata: LockMetadata::default(),
|
||||
priority: LockPriority::Normal,
|
||||
wait_start_time: None,
|
||||
}))
|
||||
}
|
||||
@@ -333,14 +324,14 @@ impl LockClient for RemoteClient {
|
||||
Ok(Some(LockInfo {
|
||||
id: lock_id.clone(),
|
||||
resource: lock_id.as_str().to_string(),
|
||||
lock_type: crate::types::LockType::Exclusive,
|
||||
status: crate::types::LockStatus::Acquired,
|
||||
lock_type: LockType::Exclusive,
|
||||
status: LockStatus::Acquired,
|
||||
owner: "unknown".to_string(),
|
||||
acquired_at: std::time::SystemTime::now(),
|
||||
expires_at: std::time::SystemTime::now() + std::time::Duration::from_secs(3600),
|
||||
last_refreshed: std::time::SystemTime::now(),
|
||||
metadata: crate::types::LockMetadata::default(),
|
||||
priority: crate::types::LockPriority::Normal,
|
||||
metadata: LockMetadata::default(),
|
||||
priority: LockPriority::Normal,
|
||||
wait_start_time: None,
|
||||
}))
|
||||
}
|
||||
@@ -372,7 +363,7 @@ impl LockClient for RemoteClient {
|
||||
|
||||
async fn is_online(&self) -> bool {
|
||||
// Use Ping interface to test if remote service is online
|
||||
let mut client = match node_service_time_out_client(&self.addr).await {
|
||||
let mut client = match self.get_client().await {
|
||||
Ok(client) => client,
|
||||
Err(_) => {
|
||||
info!("remote client {} connection failed", self.addr);
|
||||
@@ -266,10 +266,10 @@ impl SetDisks {
|
||||
let mut new_disk = Vec::with_capacity(disks.len());
|
||||
|
||||
for disk in disks.iter() {
|
||||
if let Some(d) = disk {
|
||||
if d.is_online().await {
|
||||
new_disk.push(disk.clone());
|
||||
}
|
||||
if let Some(d) = disk
|
||||
&& d.is_online().await
|
||||
{
|
||||
new_disk.push(disk.clone());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1417,22 +1417,21 @@ impl SetDisks {
|
||||
let mut valid_obj_map = HashMap::new();
|
||||
|
||||
for (i, op_hash) in meta_hashes.iter().enumerate() {
|
||||
if let Some(hash) = op_hash {
|
||||
if let Some(max_hash) = max_val {
|
||||
if hash == max_hash {
|
||||
if metas[i].is_valid() && !found {
|
||||
found_fi = Some(metas[i].clone());
|
||||
found = true;
|
||||
}
|
||||
|
||||
let props = ObjProps {
|
||||
mod_time: metas[i].mod_time,
|
||||
num_versions: metas[i].num_versions,
|
||||
};
|
||||
|
||||
*valid_obj_map.entry(props).or_insert(0) += 1;
|
||||
}
|
||||
if let Some(hash) = op_hash
|
||||
&& let Some(max_hash) = max_val
|
||||
&& hash == max_hash
|
||||
{
|
||||
if metas[i].is_valid() && !found {
|
||||
found_fi = Some(metas[i].clone());
|
||||
found = true;
|
||||
}
|
||||
|
||||
let props = ObjProps {
|
||||
mod_time: metas[i].mod_time,
|
||||
num_versions: metas[i].num_versions,
|
||||
};
|
||||
|
||||
*valid_obj_map.entry(props).or_insert(0) += 1;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1486,20 +1485,8 @@ impl SetDisks {
|
||||
let object = object.clone();
|
||||
let version_id = version_id.clone();
|
||||
tokio::spawn(async move {
|
||||
if let Some(disk) = disk
|
||||
&& disk.is_online().await
|
||||
{
|
||||
if version_id.is_empty() {
|
||||
match disk.read_xl(&bucket, &object, read_data).await {
|
||||
Ok(info) => {
|
||||
let fi = file_info_from_raw(info, &bucket, &object, read_data).await?;
|
||||
Ok(fi)
|
||||
}
|
||||
Err(err) => Err(err),
|
||||
}
|
||||
} else {
|
||||
disk.read_version(&org_bucket, &bucket, &object, &version_id, &opts).await
|
||||
}
|
||||
if let Some(disk) = disk {
|
||||
disk.read_version(&org_bucket, &bucket, &object, &version_id, &opts).await
|
||||
} else {
|
||||
Err(DiskError::DiskNotFound)
|
||||
}
|
||||
@@ -1627,7 +1614,7 @@ impl SetDisks {
|
||||
bucket: &str,
|
||||
object: &str,
|
||||
read_data: bool,
|
||||
_incl_free_vers: bool,
|
||||
incl_free_vers: bool,
|
||||
) -> (Vec<FileInfo>, Vec<Option<DiskError>>) {
|
||||
let mut metadata_array = vec![None; fileinfos.len()];
|
||||
let mut meta_file_infos = vec![FileInfo::default(); fileinfos.len()];
|
||||
@@ -1677,7 +1664,7 @@ impl SetDisks {
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let finfo = match meta.into_fileinfo(bucket, object, "", true, true) {
|
||||
let finfo = match meta.into_fileinfo(bucket, object, "", true, incl_free_vers, true) {
|
||||
Ok(res) => res,
|
||||
Err(err) => {
|
||||
for item in errs.iter_mut() {
|
||||
@@ -1704,7 +1691,7 @@ impl SetDisks {
|
||||
|
||||
for (idx, meta_op) in metadata_array.iter().enumerate() {
|
||||
if let Some(meta) = meta_op {
|
||||
match meta.into_fileinfo(bucket, object, vid.to_string().as_str(), read_data, true) {
|
||||
match meta.into_fileinfo(bucket, object, vid.to_string().as_str(), read_data, incl_free_vers, true) {
|
||||
Ok(res) => meta_file_infos[idx] = res,
|
||||
Err(err) => errs[idx] = Some(err.into()),
|
||||
}
|
||||
@@ -3572,17 +3559,17 @@ impl SetDisks {
|
||||
let mut offline = 0;
|
||||
for (i, err) in errs.iter().enumerate() {
|
||||
let mut found = false;
|
||||
if let Some(err) = err {
|
||||
if err == &DiskError::DiskNotFound {
|
||||
found = true;
|
||||
}
|
||||
if let Some(err) = err
|
||||
&& err == &DiskError::DiskNotFound
|
||||
{
|
||||
found = true;
|
||||
}
|
||||
for p in data_errs_by_part {
|
||||
if let Some(v) = p.1.get(i) {
|
||||
if *v == CHECK_PART_DISK_NOT_FOUND {
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
if let Some(v) = p.1.get(i)
|
||||
&& *v == CHECK_PART_DISK_NOT_FOUND
|
||||
{
|
||||
found = true;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -3838,10 +3825,10 @@ impl ObjectIO for SetDisks {
|
||||
None
|
||||
};
|
||||
|
||||
if let Some(http_preconditions) = opts.http_preconditions.clone() {
|
||||
if let Some(err) = self.check_write_precondition(bucket, object, opts).await {
|
||||
return Err(err);
|
||||
}
|
||||
if let Some(http_preconditions) = opts.http_preconditions.clone()
|
||||
&& let Some(err) = self.check_write_precondition(bucket, object, opts).await
|
||||
{
|
||||
return Err(err);
|
||||
}
|
||||
|
||||
let mut user_defined = opts.user_defined.clone();
|
||||
@@ -4002,16 +3989,16 @@ impl ObjectIO for SetDisks {
|
||||
}
|
||||
}
|
||||
|
||||
if fi.checksum.is_none() {
|
||||
if let Some(content_hash) = data.as_hash_reader().content_hash() {
|
||||
fi.checksum = Some(content_hash.to_bytes(&[]));
|
||||
}
|
||||
if fi.checksum.is_none()
|
||||
&& let Some(content_hash) = data.as_hash_reader().content_hash()
|
||||
{
|
||||
fi.checksum = Some(content_hash.to_bytes(&[]));
|
||||
}
|
||||
|
||||
if let Some(sc) = user_defined.get(AMZ_STORAGE_CLASS) {
|
||||
if sc == storageclass::STANDARD {
|
||||
let _ = user_defined.remove(AMZ_STORAGE_CLASS);
|
||||
}
|
||||
if let Some(sc) = user_defined.get(AMZ_STORAGE_CLASS)
|
||||
&& sc == storageclass::STANDARD
|
||||
{
|
||||
let _ = user_defined.remove(AMZ_STORAGE_CLASS);
|
||||
}
|
||||
|
||||
let mod_time = if let Some(mod_time) = opts.mod_time {
|
||||
@@ -4062,11 +4049,11 @@ impl ObjectIO for SetDisks {
|
||||
self.delete_all(RUSTFS_META_TMP_BUCKET, &tmp_dir).await?;
|
||||
|
||||
for (i, op_disk) in online_disks.iter().enumerate() {
|
||||
if let Some(disk) = op_disk {
|
||||
if disk.is_online().await {
|
||||
fi = parts_metadatas[i].clone();
|
||||
break;
|
||||
}
|
||||
if let Some(disk) = op_disk
|
||||
&& disk.is_online().await
|
||||
{
|
||||
fi = parts_metadatas[i].clone();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5568,10 +5555,10 @@ impl StorageAPI for SetDisks {
|
||||
user_defined.insert("etag".to_owned(), etag.clone());
|
||||
}
|
||||
|
||||
if let Some(sc) = user_defined.get(AMZ_STORAGE_CLASS) {
|
||||
if sc == storageclass::STANDARD {
|
||||
let _ = user_defined.remove(AMZ_STORAGE_CLASS);
|
||||
}
|
||||
if let Some(sc) = user_defined.get(AMZ_STORAGE_CLASS)
|
||||
&& sc == storageclass::STANDARD
|
||||
{
|
||||
let _ = user_defined.remove(AMZ_STORAGE_CLASS);
|
||||
}
|
||||
|
||||
let sc_parity_drives = {
|
||||
@@ -5620,10 +5607,10 @@ impl StorageAPI for SetDisks {
|
||||
// TODO: get content-type
|
||||
}
|
||||
|
||||
if let Some(sc) = user_defined.get(AMZ_STORAGE_CLASS) {
|
||||
if sc == storageclass::STANDARD {
|
||||
let _ = user_defined.remove(AMZ_STORAGE_CLASS);
|
||||
}
|
||||
if let Some(sc) = user_defined.get(AMZ_STORAGE_CLASS)
|
||||
&& sc == storageclass::STANDARD
|
||||
{
|
||||
let _ = user_defined.remove(AMZ_STORAGE_CLASS);
|
||||
}
|
||||
|
||||
if let Some(checksum) = &opts.want_checksum {
|
||||
@@ -5925,14 +5912,14 @@ impl StorageAPI for SetDisks {
|
||||
return Err(Error::InvalidPart(p.part_num, ext_part.etag.clone(), p.etag.clone().unwrap_or_default()));
|
||||
}
|
||||
|
||||
if checksum_type.full_object_requested() {
|
||||
if let Err(err) = checksum.add_part(&cs, ext_part.actual_size) {
|
||||
error!(
|
||||
"complete_multipart_upload checksum add_part failed part_id={}, bucket={}, object={}",
|
||||
p.part_num, bucket, object
|
||||
);
|
||||
return Err(Error::InvalidPart(p.part_num, ext_part.etag.clone(), p.etag.clone().unwrap_or_default()));
|
||||
}
|
||||
if checksum_type.full_object_requested()
|
||||
&& let Err(err) = checksum.add_part(&cs, ext_part.actual_size)
|
||||
{
|
||||
error!(
|
||||
"complete_multipart_upload checksum add_part failed part_id={}, bucket={}, object={}",
|
||||
p.part_num, bucket, object
|
||||
);
|
||||
return Err(Error::InvalidPart(p.part_num, ext_part.etag.clone(), p.etag.clone().unwrap_or_default()));
|
||||
}
|
||||
|
||||
checksum_combined.extend_from_slice(cs.raw.as_slice());
|
||||
@@ -6112,11 +6099,11 @@ impl StorageAPI for SetDisks {
|
||||
});
|
||||
|
||||
for (i, op_disk) in online_disks.iter().enumerate() {
|
||||
if let Some(disk) = op_disk {
|
||||
if disk.is_online().await {
|
||||
fi = parts_metadatas[i].clone();
|
||||
break;
|
||||
}
|
||||
if let Some(disk) = op_disk
|
||||
&& disk.is_online().await
|
||||
{
|
||||
fi = parts_metadatas[i].clone();
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -6210,16 +6197,15 @@ impl StorageAPI for SetDisks {
|
||||
let _write_lock_guard = if !opts.no_lock {
|
||||
let key = rustfs_lock::fast_lock::types::ObjectKey::new(bucket, object);
|
||||
let mut skip_lock = false;
|
||||
if let Some(lock_info) = self.fast_lock_manager.get_lock_info(&key) {
|
||||
if lock_info.owner.as_ref() == self.locker_owner.as_str()
|
||||
&& matches!(lock_info.mode, rustfs_lock::fast_lock::types::LockMode::Exclusive)
|
||||
{
|
||||
debug!(
|
||||
"Reusing existing exclusive lock for heal operation on {}/{} held by {}",
|
||||
bucket, object, self.locker_owner
|
||||
);
|
||||
skip_lock = true;
|
||||
}
|
||||
if let Some(lock_info) = self.fast_lock_manager.get_lock_info(&key)
|
||||
&& lock_info.owner.as_ref() == self.locker_owner.as_str()
|
||||
&& matches!(lock_info.mode, rustfs_lock::fast_lock::types::LockMode::Exclusive)
|
||||
{
|
||||
debug!(
|
||||
"Reusing existing exclusive lock for heal operation on {}/{} held by {}",
|
||||
bucket, object, self.locker_owner
|
||||
);
|
||||
skip_lock = true;
|
||||
}
|
||||
if skip_lock {
|
||||
None
|
||||
@@ -6563,14 +6549,14 @@ async fn disks_with_all_parts(
|
||||
if err.is_some() {
|
||||
let part_err = conv_part_err_to_int(err);
|
||||
for p in 0..latest_meta.parts.len() {
|
||||
if let Some(vec) = data_errs_by_part.get_mut(&p) {
|
||||
if index < vec.len() {
|
||||
info!(
|
||||
"data_errs_by_part: copy meta errors to part errors: object_name={}, index: {index}, part: {p}, part_err: {part_err}",
|
||||
object_name
|
||||
);
|
||||
vec[index] = part_err;
|
||||
}
|
||||
if let Some(vec) = data_errs_by_part.get_mut(&p)
|
||||
&& index < vec.len()
|
||||
{
|
||||
info!(
|
||||
"data_errs_by_part: copy meta errors to part errors: object_name={}, index: {index}, part: {p}, part_err: {part_err}",
|
||||
object_name
|
||||
);
|
||||
vec[index] = part_err;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -6609,14 +6595,14 @@ async fn disks_with_all_parts(
|
||||
.await
|
||||
.err();
|
||||
|
||||
if let Some(vec) = data_errs_by_part.get_mut(&0) {
|
||||
if index < vec.len() {
|
||||
vec[index] = conv_part_err_to_int(&verify_err.map(|e| e.into()));
|
||||
info!(
|
||||
"data_errs_by_part:bitrot check result: object_name={}, index: {index}, result: {}",
|
||||
object_name, vec[index]
|
||||
);
|
||||
}
|
||||
if let Some(vec) = data_errs_by_part.get_mut(&0)
|
||||
&& index < vec.len()
|
||||
{
|
||||
vec[index] = conv_part_err_to_int(&verify_err.map(|e| e.into()));
|
||||
info!(
|
||||
"data_errs_by_part:bitrot check result: object_name={}, index: {index}, result: {}",
|
||||
object_name, vec[index]
|
||||
);
|
||||
}
|
||||
}
|
||||
continue;
|
||||
@@ -6654,32 +6640,32 @@ async fn disks_with_all_parts(
|
||||
|
||||
// Update dataErrsByPart for all parts
|
||||
for p in 0..latest_meta.parts.len() {
|
||||
if let Some(vec) = data_errs_by_part.get_mut(&p) {
|
||||
if index < vec.len() {
|
||||
if verify_err.is_some() {
|
||||
if let Some(vec) = data_errs_by_part.get_mut(&p)
|
||||
&& index < vec.len()
|
||||
{
|
||||
if verify_err.is_some() {
|
||||
info!(
|
||||
"data_errs_by_part: verify_err: object_name={}, index: {index}, part: {p}, verify_err: {verify_err:?}",
|
||||
object_name
|
||||
);
|
||||
vec[index] = conv_part_err_to_int(&verify_err.clone());
|
||||
} else {
|
||||
// Fix: verify_resp.results length is based on meta.parts, not latest_meta.parts
|
||||
// We need to check bounds to avoid panic
|
||||
if p < verify_resp.results.len() {
|
||||
info!(
|
||||
"data_errs_by_part: verify_err: object_name={}, index: {index}, part: {p}, verify_err: {verify_err:?}",
|
||||
"data_errs_by_part: update data_errs_by_part: object_name={}, index: {}, part: {}, verify_resp.results: {:?}",
|
||||
object_name, index, p, verify_resp.results[p]
|
||||
);
|
||||
vec[index] = verify_resp.results[p];
|
||||
} else {
|
||||
debug!(
|
||||
"data_errs_by_part: verify_resp.results length mismatch: expected at least {}, got {}, object_name={}, index: {index}, part: {p}",
|
||||
p + 1,
|
||||
verify_resp.results.len(),
|
||||
object_name
|
||||
);
|
||||
vec[index] = conv_part_err_to_int(&verify_err.clone());
|
||||
} else {
|
||||
// Fix: verify_resp.results length is based on meta.parts, not latest_meta.parts
|
||||
// We need to check bounds to avoid panic
|
||||
if p < verify_resp.results.len() {
|
||||
info!(
|
||||
"data_errs_by_part: update data_errs_by_part: object_name={}, index: {}, part: {}, verify_resp.results: {:?}",
|
||||
object_name, index, p, verify_resp.results[p]
|
||||
);
|
||||
vec[index] = verify_resp.results[p];
|
||||
} else {
|
||||
debug!(
|
||||
"data_errs_by_part: verify_resp.results length mismatch: expected at least {}, got {}, object_name={}, index: {index}, part: {p}",
|
||||
p + 1,
|
||||
verify_resp.results.len(),
|
||||
object_name
|
||||
);
|
||||
vec[index] = CHECK_PART_SUCCESS;
|
||||
}
|
||||
vec[index] = CHECK_PART_SUCCESS;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -6689,14 +6675,14 @@ async fn disks_with_all_parts(
|
||||
// Build dataErrsByDisk from dataErrsByPart
|
||||
for (part, disks) in data_errs_by_part.iter() {
|
||||
for (disk_idx, disk_err) in disks.iter().enumerate() {
|
||||
if let Some(vec) = data_errs_by_disk.get_mut(&disk_idx) {
|
||||
if *part < vec.len() {
|
||||
vec[*part] = *disk_err;
|
||||
info!(
|
||||
"data_errs_by_disk: update data_errs_by_disk: object_name={}, part: {part}, disk_idx: {disk_idx}, disk_err: {disk_err}",
|
||||
object_name,
|
||||
);
|
||||
}
|
||||
if let Some(vec) = data_errs_by_disk.get_mut(&disk_idx)
|
||||
&& *part < vec.len()
|
||||
{
|
||||
vec[*part] = *disk_err;
|
||||
info!(
|
||||
"data_errs_by_disk: update data_errs_by_disk: object_name={}, part: {part}, disk_idx: {disk_idx}, disk_err: {disk_err}",
|
||||
object_name,
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -6738,10 +6724,10 @@ pub fn should_heal_object_on_disk(
|
||||
meta: &FileInfo,
|
||||
latest_meta: &FileInfo,
|
||||
) -> (bool, Option<DiskError>) {
|
||||
if let Some(err) = err {
|
||||
if err == &DiskError::FileNotFound || err == &DiskError::FileVersionNotFound || err == &DiskError::FileCorrupt {
|
||||
return (true, Some(err.clone()));
|
||||
}
|
||||
if let Some(err) = err
|
||||
&& (err == &DiskError::FileNotFound || err == &DiskError::FileVersionNotFound || err == &DiskError::FileCorrupt)
|
||||
{
|
||||
return (true, Some(err.clone()));
|
||||
}
|
||||
|
||||
if latest_meta.volume != meta.volume
|
||||
@@ -6906,15 +6892,15 @@ pub fn e_tag_matches(etag: &str, condition: &str) -> bool {
|
||||
pub fn should_prevent_write(oi: &ObjectInfo, if_none_match: Option<String>, if_match: Option<String>) -> bool {
|
||||
match &oi.etag {
|
||||
Some(etag) => {
|
||||
if let Some(if_none_match) = if_none_match {
|
||||
if e_tag_matches(etag, &if_none_match) {
|
||||
return true;
|
||||
}
|
||||
if let Some(if_none_match) = if_none_match
|
||||
&& e_tag_matches(etag, &if_none_match)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
if let Some(if_match) = if_match {
|
||||
if !e_tag_matches(etag, &if_match) {
|
||||
return true;
|
||||
}
|
||||
if let Some(if_match) = if_match
|
||||
&& !e_tag_matches(etag, &if_match)
|
||||
{
|
||||
return true;
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
@@ -491,12 +491,12 @@ impl StorageAPI for Sets {
|
||||
let cp_src_dst_same = path_join_buf(&[src_bucket, src_object]) == path_join_buf(&[dst_bucket, dst_object]);
|
||||
|
||||
if cp_src_dst_same {
|
||||
if let (Some(src_vid), Some(dst_vid)) = (&src_opts.version_id, &dst_opts.version_id) {
|
||||
if src_vid == dst_vid {
|
||||
return src_set
|
||||
.copy_object(src_bucket, src_object, dst_bucket, dst_object, src_info, src_opts, dst_opts)
|
||||
.await;
|
||||
}
|
||||
if let (Some(src_vid), Some(dst_vid)) = (&src_opts.version_id, &dst_opts.version_id)
|
||||
&& src_vid == dst_vid
|
||||
{
|
||||
return src_set
|
||||
.copy_object(src_bucket, src_object, dst_bucket, dst_object, src_info, src_opts, dst_opts)
|
||||
.await;
|
||||
}
|
||||
|
||||
if !dst_opts.versioned && src_opts.version_id.is_none() {
|
||||
@@ -823,10 +823,10 @@ impl StorageAPI for Sets {
|
||||
Ok((m, n)) => (m, n),
|
||||
Err(_) => continue,
|
||||
};
|
||||
if let Some(set) = self.disk_set.get(m) {
|
||||
if let Some(Some(disk)) = set.disks.read().await.get(n) {
|
||||
let _ = disk.close().await;
|
||||
}
|
||||
if let Some(set) = self.disk_set.get(m)
|
||||
&& let Some(Some(disk)) = set.disks.read().await.get(n)
|
||||
{
|
||||
let _ = disk.close().await;
|
||||
}
|
||||
|
||||
if let Some(Some(disk)) = disks.get(index) {
|
||||
@@ -980,25 +980,24 @@ fn new_heal_format_sets(
|
||||
let mut current_disks_info = vec![vec![DiskInfo::default(); set_drive_count]; set_count];
|
||||
for (i, set) in ref_format.erasure.sets.iter().enumerate() {
|
||||
for j in 0..set.len() {
|
||||
if let Some(Some(err)) = errs.get(i * set_drive_count + j) {
|
||||
if *err == DiskError::UnformattedDisk {
|
||||
let mut fm = FormatV3::new(set_count, set_drive_count);
|
||||
fm.id = ref_format.id;
|
||||
fm.format = ref_format.format.clone();
|
||||
fm.version = ref_format.version.clone();
|
||||
fm.erasure.this = ref_format.erasure.sets[i][j];
|
||||
fm.erasure.sets = ref_format.erasure.sets.clone();
|
||||
fm.erasure.version = ref_format.erasure.version.clone();
|
||||
fm.erasure.distribution_algo = ref_format.erasure.distribution_algo.clone();
|
||||
new_formats[i][j] = Some(fm);
|
||||
}
|
||||
if let Some(Some(err)) = errs.get(i * set_drive_count + j)
|
||||
&& *err == DiskError::UnformattedDisk
|
||||
{
|
||||
let mut fm = FormatV3::new(set_count, set_drive_count);
|
||||
fm.id = ref_format.id;
|
||||
fm.format = ref_format.format.clone();
|
||||
fm.version = ref_format.version.clone();
|
||||
fm.erasure.this = ref_format.erasure.sets[i][j];
|
||||
fm.erasure.sets = ref_format.erasure.sets.clone();
|
||||
fm.erasure.version = ref_format.erasure.version.clone();
|
||||
fm.erasure.distribution_algo = ref_format.erasure.distribution_algo.clone();
|
||||
new_formats[i][j] = Some(fm);
|
||||
}
|
||||
if let (Some(format), None) = (&formats[i * set_drive_count + j], &errs[i * set_drive_count + j]) {
|
||||
if let Some(info) = &format.disk_info {
|
||||
if !info.endpoint.is_empty() {
|
||||
current_disks_info[i][j] = info.clone();
|
||||
}
|
||||
}
|
||||
if let (Some(format), None) = (&formats[i * set_drive_count + j], &errs[i * set_drive_count + j])
|
||||
&& let Some(info) = &format.disk_info
|
||||
&& !info.endpoint.is_empty()
|
||||
{
|
||||
current_disks_info[i][j] = info.clone();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,6 +16,17 @@
|
||||
|
||||
use crate::bucket::lifecycle::bucket_lifecycle_ops::init_background_expiry;
|
||||
use crate::bucket::metadata_sys::{self, set_bucket_metadata};
|
||||
use crate::bucket::utils::check_abort_multipart_args;
|
||||
use crate::bucket::utils::check_complete_multipart_args;
|
||||
use crate::bucket::utils::check_copy_obj_args;
|
||||
use crate::bucket::utils::check_del_obj_args;
|
||||
use crate::bucket::utils::check_get_obj_args;
|
||||
use crate::bucket::utils::check_list_multipart_args;
|
||||
use crate::bucket::utils::check_list_parts_args;
|
||||
use crate::bucket::utils::check_new_multipart_args;
|
||||
use crate::bucket::utils::check_object_args;
|
||||
use crate::bucket::utils::check_put_object_args;
|
||||
use crate::bucket::utils::check_put_object_part_args;
|
||||
use crate::bucket::utils::{check_valid_bucket_name, check_valid_bucket_name_strict, is_meta_bucketname};
|
||||
use crate::config::GLOBAL_STORAGE_CLASS;
|
||||
use crate::config::storageclass;
|
||||
@@ -23,8 +34,8 @@ use crate::disk::endpoint::{Endpoint, EndpointType};
|
||||
use crate::disk::{DiskAPI, DiskInfo, DiskInfoOptions};
|
||||
use crate::error::{Error, Result};
|
||||
use crate::error::{
|
||||
StorageError, is_err_bucket_exists, is_err_invalid_upload_id, is_err_object_not_found, is_err_read_quorum,
|
||||
is_err_version_not_found, to_object_err,
|
||||
StorageError, is_err_bucket_exists, is_err_bucket_not_found, is_err_invalid_upload_id, is_err_object_not_found,
|
||||
is_err_read_quorum, is_err_version_not_found, to_object_err,
|
||||
};
|
||||
use crate::global::{
|
||||
DISK_ASSUME_UNKNOWN_SIZE, DISK_FILL_FRACTION, DISK_MIN_INODES, DISK_RESERVE_FRACTION, GLOBAL_BOOT_TIME,
|
||||
@@ -59,7 +70,7 @@ use rustfs_common::heal_channel::{HealItemType, HealOpts};
|
||||
use rustfs_common::{GLOBAL_LOCAL_NODE_NAME, GLOBAL_RUSTFS_HOST, GLOBAL_RUSTFS_PORT};
|
||||
use rustfs_filemeta::FileInfo;
|
||||
use rustfs_madmin::heal_commands::HealResultItem;
|
||||
use rustfs_utils::path::{SLASH_SEPARATOR, decode_dir_object, encode_dir_object, path_join_buf};
|
||||
use rustfs_utils::path::{decode_dir_object, encode_dir_object, path_join_buf};
|
||||
use s3s::dto::{BucketVersioningStatus, ObjectLockConfiguration, ObjectLockEnabled, VersioningConfiguration};
|
||||
use std::cmp::Ordering;
|
||||
use std::net::SocketAddr;
|
||||
@@ -75,6 +86,46 @@ use tokio_util::sync::CancellationToken;
|
||||
use tracing::{debug, error, info, instrument, warn};
|
||||
use uuid::Uuid;
|
||||
|
||||
/// Check if a directory contains any xl.meta files (indicating actual S3 objects)
|
||||
/// This is used to determine if a bucket is empty for deletion purposes.
|
||||
async fn has_xlmeta_files(path: &std::path::Path) -> bool {
|
||||
use crate::disk::STORAGE_FORMAT_FILE;
|
||||
use tokio::fs;
|
||||
|
||||
let mut stack = vec![path.to_path_buf()];
|
||||
|
||||
while let Some(current_path) = stack.pop() {
|
||||
let mut entries = match fs::read_dir(¤t_path).await {
|
||||
Ok(entries) => entries,
|
||||
Err(_) => continue,
|
||||
};
|
||||
|
||||
while let Ok(Some(entry)) = entries.next_entry().await {
|
||||
let file_name = entry.file_name();
|
||||
let file_name_str = file_name.to_string_lossy();
|
||||
|
||||
// Skip hidden files/directories (like .rustfs.sys)
|
||||
if file_name_str.starts_with('.') {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if this is an xl.meta file
|
||||
if file_name_str == STORAGE_FORMAT_FILE {
|
||||
return true;
|
||||
}
|
||||
|
||||
// If it's a directory, add to stack for further exploration
|
||||
if let Ok(file_type) = entry.file_type().await
|
||||
&& file_type.is_dir()
|
||||
{
|
||||
stack.push(entry.path());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
const MAX_UPLOADS_LIST: usize = 10000;
|
||||
|
||||
#[derive(Debug)]
|
||||
@@ -243,10 +294,10 @@ impl ECStore {
|
||||
});
|
||||
|
||||
// Only set it when the global deployment ID is not yet configured
|
||||
if let Some(dep_id) = deployment_id {
|
||||
if get_global_deployment_id().is_none() {
|
||||
set_global_deployment_id(dep_id);
|
||||
}
|
||||
if let Some(dep_id) = deployment_id
|
||||
&& get_global_deployment_id().is_none()
|
||||
{
|
||||
set_global_deployment_id(dep_id);
|
||||
}
|
||||
|
||||
let wait_sec = 5;
|
||||
@@ -768,10 +819,10 @@ impl ECStore {
|
||||
def_pool = pinfo.clone();
|
||||
has_def_pool = true;
|
||||
// https://docs.aws.amazon.com/AmazonS3/latest/userguide/conditional-deletes.html
|
||||
if is_err_object_not_found(err) {
|
||||
if let Err(err) = opts.precondition_check(&pinfo.object_info) {
|
||||
return Err(err.clone());
|
||||
}
|
||||
if is_err_object_not_found(err)
|
||||
&& let Err(err) = opts.precondition_check(&pinfo.object_info)
|
||||
{
|
||||
return Err(err.clone());
|
||||
}
|
||||
|
||||
if !is_err_object_not_found(err) && !is_err_version_not_found(err) {
|
||||
@@ -885,13 +936,14 @@ impl ECStore {
|
||||
return Ok((obj, res.idx));
|
||||
}
|
||||
|
||||
if let Some(err) = res.err {
|
||||
if !is_err_object_not_found(&err) && !is_err_version_not_found(&err) {
|
||||
return Err(err);
|
||||
}
|
||||
|
||||
// TODO: delete marker
|
||||
if let Some(err) = res.err
|
||||
&& !is_err_object_not_found(&err)
|
||||
&& !is_err_version_not_found(&err)
|
||||
{
|
||||
return Err(err);
|
||||
}
|
||||
|
||||
// TODO: delete marker
|
||||
}
|
||||
|
||||
let object = decode_dir_object(object);
|
||||
@@ -918,12 +970,12 @@ impl ECStore {
|
||||
let mut derrs = Vec::new();
|
||||
|
||||
for pe in errs.iter() {
|
||||
if let Some(err) = &pe.err {
|
||||
if err == &StorageError::ErasureWriteQuorum {
|
||||
objs.push(None);
|
||||
derrs.push(Some(StorageError::ErasureWriteQuorum));
|
||||
continue;
|
||||
}
|
||||
if let Some(err) = &pe.err
|
||||
&& err == &StorageError::ErasureWriteQuorum
|
||||
{
|
||||
objs.push(None);
|
||||
derrs.push(Some(StorageError::ErasureWriteQuorum));
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(idx) = pe.index {
|
||||
@@ -1226,14 +1278,14 @@ impl StorageAPI for ECStore {
|
||||
|
||||
#[instrument(skip(self))]
|
||||
async fn make_bucket(&self, bucket: &str, opts: &MakeBucketOptions) -> Result<()> {
|
||||
if !is_meta_bucketname(bucket) {
|
||||
if let Err(err) = check_valid_bucket_name_strict(bucket) {
|
||||
return Err(StorageError::BucketNameInvalid(err.to_string()));
|
||||
}
|
||||
|
||||
// TODO: nslock
|
||||
if !is_meta_bucketname(bucket)
|
||||
&& let Err(err) = check_valid_bucket_name_strict(bucket)
|
||||
{
|
||||
return Err(StorageError::BucketNameInvalid(err.to_string()));
|
||||
}
|
||||
|
||||
// TODO: nslock
|
||||
|
||||
if let Err(err) = self.peer_sys.make_bucket(bucket, opts).await {
|
||||
let err = to_object_err(err.into(), vec![bucket]);
|
||||
if !is_err_bucket_exists(&err) {
|
||||
@@ -1311,14 +1363,36 @@ impl StorageAPI for ECStore {
|
||||
|
||||
// TODO: nslock
|
||||
|
||||
let mut opts = opts.clone();
|
||||
// Check bucket exists before deletion (per S3 API spec)
|
||||
// If bucket doesn't exist, return NoSuchBucket error
|
||||
if let Err(err) = self.peer_sys.get_bucket_info(bucket, &BucketOptions::default()).await {
|
||||
// Convert DiskError to StorageError for comparison
|
||||
let storage_err: StorageError = err.into();
|
||||
if is_err_bucket_not_found(&storage_err) {
|
||||
return Err(StorageError::BucketNotFound(bucket.to_string()));
|
||||
}
|
||||
return Err(to_object_err(storage_err, vec![bucket]));
|
||||
}
|
||||
|
||||
// Check bucket is empty before deletion (per S3 API spec)
|
||||
// If bucket is not empty (contains actual objects with xl.meta files) and force
|
||||
// is not set, return BucketNotEmpty error.
|
||||
// Note: Empty directories (left after object deletion) should NOT count as objects.
|
||||
if !opts.force {
|
||||
// FIXME: check bucket exists
|
||||
opts.force = true
|
||||
let local_disks = all_local_disk().await;
|
||||
for disk in local_disks.iter() {
|
||||
// Check if bucket directory contains any xl.meta files (actual objects)
|
||||
// We recursively scan for xl.meta files to determine if bucket has objects
|
||||
// Use the disk's root path to construct bucket path
|
||||
let bucket_path = disk.path().join(bucket);
|
||||
if has_xlmeta_files(&bucket_path).await {
|
||||
return Err(StorageError::BucketNotEmpty(bucket.to_string()));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
self.peer_sys
|
||||
.delete_bucket(bucket, &opts)
|
||||
.delete_bucket(bucket, opts)
|
||||
.await
|
||||
.map_err(|e| to_object_err(e.into(), vec![bucket]))?;
|
||||
|
||||
@@ -1427,12 +1501,12 @@ impl StorageAPI for ECStore {
|
||||
let pool_idx = self.get_pool_idx_no_lock(src_bucket, &src_object, src_info.size).await?;
|
||||
|
||||
if cp_src_dst_same {
|
||||
if let (Some(src_vid), Some(dst_vid)) = (&src_opts.version_id, &dst_opts.version_id) {
|
||||
if src_vid == dst_vid {
|
||||
return self.pools[pool_idx]
|
||||
.copy_object(src_bucket, &src_object, dst_bucket, &dst_object, src_info, src_opts, dst_opts)
|
||||
.await;
|
||||
}
|
||||
if let (Some(src_vid), Some(dst_vid)) = (&src_opts.version_id, &dst_opts.version_id)
|
||||
&& src_vid == dst_vid
|
||||
{
|
||||
return self.pools[pool_idx]
|
||||
.copy_object(src_bucket, &src_object, dst_bucket, &dst_object, src_info, src_opts, dst_opts)
|
||||
.await;
|
||||
}
|
||||
|
||||
if !dst_opts.versioned && src_opts.version_id.is_none() {
|
||||
@@ -2340,180 +2414,14 @@ async fn init_local_peer(endpoint_pools: &EndpointServerPools, host: &String, po
|
||||
*GLOBAL_LOCAL_NODE_NAME.write().await = peer_set[0].clone();
|
||||
}
|
||||
|
||||
pub fn is_valid_object_prefix(_object: &str) -> bool {
|
||||
// Implement object prefix validation
|
||||
// !object.is_empty() // Placeholder
|
||||
// FIXME: TODO:
|
||||
true
|
||||
}
|
||||
|
||||
fn is_valid_object_name(object: &str) -> bool {
|
||||
// Implement object name validation
|
||||
!object.is_empty() // Placeholder
|
||||
}
|
||||
|
||||
fn check_object_name_for_length_and_slash(bucket: &str, object: &str) -> Result<()> {
|
||||
if object.len() > 1024 {
|
||||
return Err(StorageError::ObjectNameTooLong(bucket.to_owned(), object.to_owned()));
|
||||
}
|
||||
|
||||
if object.starts_with(SLASH_SEPARATOR) {
|
||||
return Err(StorageError::ObjectNamePrefixAsSlash(bucket.to_owned(), object.to_owned()));
|
||||
}
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
{
|
||||
if object.contains(':')
|
||||
|| object.contains('*')
|
||||
|| object.contains('?')
|
||||
|| object.contains('"')
|
||||
|| object.contains('|')
|
||||
|| object.contains('<')
|
||||
|| object.contains('>')
|
||||
// || object.contains('\\')
|
||||
{
|
||||
return Err(StorageError::ObjectNameInvalid(bucket.to_owned(), object.to_owned()));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn check_copy_obj_args(bucket: &str, object: &str) -> Result<()> {
|
||||
check_bucket_and_object_names(bucket, object)
|
||||
}
|
||||
|
||||
fn check_get_obj_args(bucket: &str, object: &str) -> Result<()> {
|
||||
check_bucket_and_object_names(bucket, object)
|
||||
}
|
||||
|
||||
fn check_del_obj_args(bucket: &str, object: &str) -> Result<()> {
|
||||
check_bucket_and_object_names(bucket, object)
|
||||
}
|
||||
|
||||
fn check_bucket_and_object_names(bucket: &str, object: &str) -> Result<()> {
|
||||
if !is_meta_bucketname(bucket) && check_valid_bucket_name_strict(bucket).is_err() {
|
||||
return Err(StorageError::BucketNameInvalid(bucket.to_string()));
|
||||
}
|
||||
|
||||
if object.is_empty() {
|
||||
return Err(StorageError::ObjectNameInvalid(bucket.to_string(), object.to_string()));
|
||||
}
|
||||
|
||||
if !is_valid_object_prefix(object) {
|
||||
return Err(StorageError::ObjectNameInvalid(bucket.to_string(), object.to_string()));
|
||||
}
|
||||
|
||||
// if cfg!(target_os = "windows") && object.contains('\\') {
|
||||
// return Err(StorageError::ObjectNameInvalid(bucket.to_string(), object.to_string()));
|
||||
// }
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub fn check_list_objs_args(bucket: &str, prefix: &str, _marker: &Option<String>) -> Result<()> {
|
||||
if !is_meta_bucketname(bucket) && check_valid_bucket_name_strict(bucket).is_err() {
|
||||
return Err(StorageError::BucketNameInvalid(bucket.to_string()));
|
||||
}
|
||||
|
||||
if !is_valid_object_prefix(prefix) {
|
||||
return Err(StorageError::ObjectNameInvalid(bucket.to_string(), prefix.to_string()));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn check_list_multipart_args(
|
||||
bucket: &str,
|
||||
prefix: &str,
|
||||
key_marker: &Option<String>,
|
||||
upload_id_marker: &Option<String>,
|
||||
_delimiter: &Option<String>,
|
||||
) -> Result<()> {
|
||||
check_list_objs_args(bucket, prefix, key_marker)?;
|
||||
|
||||
if let Some(upload_id_marker) = upload_id_marker {
|
||||
if let Some(key_marker) = key_marker {
|
||||
if key_marker.ends_with('/') {
|
||||
return Err(StorageError::InvalidUploadIDKeyCombination(
|
||||
upload_id_marker.to_string(),
|
||||
key_marker.to_string(),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
if let Err(_e) = base64_simd::URL_SAFE_NO_PAD.decode_to_vec(upload_id_marker.as_bytes()) {
|
||||
return Err(StorageError::MalformedUploadID(upload_id_marker.to_owned()));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn check_object_args(bucket: &str, object: &str) -> Result<()> {
|
||||
if !is_meta_bucketname(bucket) && check_valid_bucket_name_strict(bucket).is_err() {
|
||||
return Err(StorageError::BucketNameInvalid(bucket.to_string()));
|
||||
}
|
||||
|
||||
check_object_name_for_length_and_slash(bucket, object)?;
|
||||
|
||||
if !is_valid_object_name(object) {
|
||||
return Err(StorageError::ObjectNameInvalid(bucket.to_string(), object.to_string()));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
fn check_new_multipart_args(bucket: &str, object: &str) -> Result<()> {
|
||||
check_object_args(bucket, object)
|
||||
}
|
||||
|
||||
fn check_multipart_object_args(bucket: &str, object: &str, upload_id: &str) -> Result<()> {
|
||||
if let Err(e) = base64_simd::URL_SAFE_NO_PAD.decode_to_vec(upload_id.as_bytes()) {
|
||||
return Err(StorageError::MalformedUploadID(format!("{bucket}/{object}-{upload_id},err:{e}")));
|
||||
};
|
||||
check_object_args(bucket, object)
|
||||
}
|
||||
|
||||
fn check_put_object_part_args(bucket: &str, object: &str, upload_id: &str) -> Result<()> {
|
||||
check_multipart_object_args(bucket, object, upload_id)
|
||||
}
|
||||
|
||||
fn check_list_parts_args(bucket: &str, object: &str, upload_id: &str) -> Result<()> {
|
||||
check_multipart_object_args(bucket, object, upload_id)
|
||||
}
|
||||
|
||||
fn check_complete_multipart_args(bucket: &str, object: &str, upload_id: &str) -> Result<()> {
|
||||
check_multipart_object_args(bucket, object, upload_id)
|
||||
}
|
||||
|
||||
fn check_abort_multipart_args(bucket: &str, object: &str, upload_id: &str) -> Result<()> {
|
||||
check_multipart_object_args(bucket, object, upload_id)
|
||||
}
|
||||
|
||||
#[instrument(level = "debug")]
|
||||
fn check_put_object_args(bucket: &str, object: &str) -> Result<()> {
|
||||
if !is_meta_bucketname(bucket) && check_valid_bucket_name_strict(bucket).is_err() {
|
||||
return Err(StorageError::BucketNameInvalid(bucket.to_string()));
|
||||
}
|
||||
|
||||
check_object_name_for_length_and_slash(bucket, object)?;
|
||||
|
||||
if object.is_empty() || !is_valid_object_prefix(object) {
|
||||
return Err(StorageError::ObjectNameInvalid(bucket.to_string(), object.to_string()));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn get_disk_infos(disks: &[Option<DiskStore>]) -> Vec<Option<DiskInfo>> {
|
||||
let opts = &DiskInfoOptions::default();
|
||||
let mut res = vec![None; disks.len()];
|
||||
for (idx, disk_op) in disks.iter().enumerate() {
|
||||
if let Some(disk) = disk_op {
|
||||
if let Ok(info) = disk.disk_info(opts).await {
|
||||
res[idx] = Some(info);
|
||||
}
|
||||
if let Some(disk) = disk_op
|
||||
&& let Ok(info) = disk.disk_info(opts).await
|
||||
{
|
||||
res[idx] = Some(info);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2621,59 +2529,6 @@ pub async fn has_space_for(dis: &[Option<DiskInfo>], size: i64) -> Result<bool>
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
// Test validation functions
|
||||
#[test]
|
||||
fn test_is_valid_object_name() {
|
||||
assert!(is_valid_object_name("valid-object-name"));
|
||||
assert!(!is_valid_object_name(""));
|
||||
assert!(is_valid_object_name("object/with/slashes"));
|
||||
assert!(is_valid_object_name("object with spaces"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_valid_object_prefix() {
|
||||
assert!(is_valid_object_prefix("valid-prefix"));
|
||||
assert!(is_valid_object_prefix(""));
|
||||
assert!(is_valid_object_prefix("prefix/with/slashes"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_bucket_and_object_names() {
|
||||
// Valid names
|
||||
assert!(check_bucket_and_object_names("valid-bucket", "valid-object").is_ok());
|
||||
|
||||
// Invalid bucket names
|
||||
assert!(check_bucket_and_object_names("", "valid-object").is_err());
|
||||
assert!(check_bucket_and_object_names("INVALID", "valid-object").is_err());
|
||||
|
||||
// Invalid object names
|
||||
assert!(check_bucket_and_object_names("valid-bucket", "").is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_list_objs_args() {
|
||||
assert!(check_list_objs_args("valid-bucket", "", &None).is_ok());
|
||||
assert!(check_list_objs_args("", "", &None).is_err());
|
||||
assert!(check_list_objs_args("INVALID", "", &None).is_err());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_multipart_args() {
|
||||
assert!(check_new_multipart_args("valid-bucket", "valid-object").is_ok());
|
||||
assert!(check_new_multipart_args("", "valid-object").is_err());
|
||||
assert!(check_new_multipart_args("valid-bucket", "").is_err());
|
||||
|
||||
// Use valid base64 encoded upload_id
|
||||
let valid_upload_id = "dXBsb2FkLWlk"; // base64 encoded "upload-id"
|
||||
assert!(check_multipart_object_args("valid-bucket", "valid-object", valid_upload_id).is_ok());
|
||||
assert!(check_multipart_object_args("", "valid-object", valid_upload_id).is_err());
|
||||
assert!(check_multipart_object_args("valid-bucket", "", valid_upload_id).is_err());
|
||||
// Empty string is valid base64 (decodes to empty vec), so this should pass bucket/object validation
|
||||
// but fail on empty upload_id check in the function logic
|
||||
assert!(check_multipart_object_args("valid-bucket", "valid-object", "").is_ok());
|
||||
assert!(check_multipart_object_args("valid-bucket", "valid-object", "invalid-base64!").is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_disk_infos() {
|
||||
let disks = vec![None, None]; // Empty disks for testing
|
||||
@@ -2767,43 +2622,4 @@ mod tests {
|
||||
}
|
||||
assert_eq!(count, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_validation_functions_comprehensive() {
|
||||
// Test object name validation edge cases
|
||||
assert!(!is_valid_object_name(""));
|
||||
assert!(is_valid_object_name("a"));
|
||||
assert!(is_valid_object_name("test.txt"));
|
||||
assert!(is_valid_object_name("folder/file.txt"));
|
||||
assert!(is_valid_object_name("very-long-object-name-with-many-characters"));
|
||||
|
||||
// Test prefix validation
|
||||
assert!(is_valid_object_prefix(""));
|
||||
assert!(is_valid_object_prefix("prefix"));
|
||||
assert!(is_valid_object_prefix("prefix/"));
|
||||
assert!(is_valid_object_prefix("deep/nested/prefix/"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_argument_validation_comprehensive() {
|
||||
// Test bucket and object name validation
|
||||
assert!(check_bucket_and_object_names("test-bucket", "test-object").is_ok());
|
||||
assert!(check_bucket_and_object_names("test-bucket", "folder/test-object").is_ok());
|
||||
|
||||
// Test list objects arguments
|
||||
assert!(check_list_objs_args("test-bucket", "prefix", &Some("marker".to_string())).is_ok());
|
||||
assert!(check_list_objs_args("test-bucket", "", &None).is_ok());
|
||||
|
||||
// Test multipart upload arguments with valid base64 upload_id
|
||||
let valid_upload_id = "dXBsb2FkLWlk"; // base64 encoded "upload-id"
|
||||
assert!(check_put_object_part_args("test-bucket", "test-object", valid_upload_id).is_ok());
|
||||
assert!(check_list_parts_args("test-bucket", "test-object", valid_upload_id).is_ok());
|
||||
assert!(check_complete_multipart_args("test-bucket", "test-object", valid_upload_id).is_ok());
|
||||
assert!(check_abort_multipart_args("test-bucket", "test-object", valid_upload_id).is_ok());
|
||||
|
||||
// Test put object arguments
|
||||
assert!(check_put_object_args("test-bucket", "test-object").is_ok());
|
||||
assert!(check_put_object_args("", "test-object").is_err());
|
||||
assert!(check_put_object_args("test-bucket", "").is_err());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -28,14 +28,15 @@ use http::{HeaderMap, HeaderValue};
|
||||
use rustfs_common::heal_channel::HealOpts;
|
||||
use rustfs_filemeta::{
|
||||
FileInfo, MetaCacheEntriesSorted, ObjectPartInfo, REPLICATION_RESET, REPLICATION_STATUS, ReplicateDecision, ReplicationState,
|
||||
ReplicationStatusType, VersionPurgeStatusType, replication_statuses_map, version_purge_statuses_map,
|
||||
ReplicationStatusType, RestoreStatusOps as _, VersionPurgeStatusType, parse_restore_obj_status, replication_statuses_map,
|
||||
version_purge_statuses_map,
|
||||
};
|
||||
use rustfs_madmin::heal_commands::HealResultItem;
|
||||
use rustfs_rio::Checksum;
|
||||
use rustfs_rio::{DecompressReader, HashReader, LimitReader, WarpReader};
|
||||
use rustfs_utils::CompressionAlgorithm;
|
||||
use rustfs_utils::http::AMZ_STORAGE_CLASS;
|
||||
use rustfs_utils::http::headers::{AMZ_OBJECT_TAGGING, RESERVED_METADATA_PREFIX_LOWER};
|
||||
use rustfs_utils::http::{AMZ_BUCKET_REPLICATION_STATUS, AMZ_RESTORE, AMZ_STORAGE_CLASS};
|
||||
use rustfs_utils::path::decode_dir_object;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
@@ -144,10 +145,10 @@ impl GetObjectReader {
|
||||
) -> Result<(Self, usize, i64)> {
|
||||
let mut rs = rs;
|
||||
|
||||
if let Some(part_number) = opts.part_number {
|
||||
if rs.is_none() {
|
||||
rs = HTTPRangeSpec::from_object_info(oi, part_number);
|
||||
}
|
||||
if let Some(part_number) = opts.part_number
|
||||
&& rs.is_none()
|
||||
{
|
||||
rs = HTTPRangeSpec::from_object_info(oi, part_number);
|
||||
}
|
||||
|
||||
// TODO:Encrypted
|
||||
@@ -462,32 +463,30 @@ impl ObjectOptions {
|
||||
pub fn precondition_check(&self, obj_info: &ObjectInfo) -> Result<()> {
|
||||
let has_valid_mod_time = obj_info.mod_time.is_some_and(|t| t != OffsetDateTime::UNIX_EPOCH);
|
||||
|
||||
if let Some(part_number) = self.part_number {
|
||||
if part_number > 1 && !obj_info.parts.is_empty() {
|
||||
let part_found = obj_info.parts.iter().any(|pi| pi.number == part_number);
|
||||
if !part_found {
|
||||
return Err(Error::InvalidPartNumber(part_number));
|
||||
}
|
||||
if let Some(part_number) = self.part_number
|
||||
&& part_number > 1
|
||||
&& !obj_info.parts.is_empty()
|
||||
{
|
||||
let part_found = obj_info.parts.iter().any(|pi| pi.number == part_number);
|
||||
if !part_found {
|
||||
return Err(Error::InvalidPartNumber(part_number));
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(pre) = &self.http_preconditions {
|
||||
if let Some(if_none_match) = &pre.if_none_match {
|
||||
if let Some(etag) = &obj_info.etag {
|
||||
if is_etag_equal(etag, if_none_match) {
|
||||
return Err(Error::NotModified);
|
||||
}
|
||||
}
|
||||
if let Some(if_none_match) = &pre.if_none_match
|
||||
&& let Some(etag) = &obj_info.etag
|
||||
&& is_etag_equal(etag, if_none_match)
|
||||
{
|
||||
return Err(Error::NotModified);
|
||||
}
|
||||
|
||||
if has_valid_mod_time {
|
||||
if let Some(if_modified_since) = &pre.if_modified_since {
|
||||
if let Some(mod_time) = &obj_info.mod_time {
|
||||
if !is_modified_since(mod_time, if_modified_since) {
|
||||
return Err(Error::NotModified);
|
||||
}
|
||||
}
|
||||
}
|
||||
if has_valid_mod_time
|
||||
&& let Some(if_modified_since) = &pre.if_modified_since
|
||||
&& let Some(mod_time) = &obj_info.mod_time
|
||||
&& !is_modified_since(mod_time, if_modified_since)
|
||||
{
|
||||
return Err(Error::NotModified);
|
||||
}
|
||||
|
||||
if let Some(if_match) = &pre.if_match {
|
||||
@@ -499,14 +498,13 @@ impl ObjectOptions {
|
||||
return Err(Error::PreconditionFailed);
|
||||
}
|
||||
}
|
||||
if has_valid_mod_time && pre.if_match.is_none() {
|
||||
if let Some(if_unmodified_since) = &pre.if_unmodified_since {
|
||||
if let Some(mod_time) = &obj_info.mod_time {
|
||||
if is_modified_since(mod_time, if_unmodified_since) {
|
||||
return Err(Error::PreconditionFailed);
|
||||
}
|
||||
}
|
||||
}
|
||||
if has_valid_mod_time
|
||||
&& pre.if_match.is_none()
|
||||
&& let Some(if_unmodified_since) = &pre.if_unmodified_since
|
||||
&& let Some(mod_time) = &obj_info.mod_time
|
||||
&& is_modified_since(mod_time, if_unmodified_since)
|
||||
{
|
||||
return Err(Error::PreconditionFailed);
|
||||
}
|
||||
}
|
||||
|
||||
@@ -698,12 +696,12 @@ impl ObjectInfo {
|
||||
}
|
||||
|
||||
if self.is_compressed() {
|
||||
if let Some(size_str) = self.user_defined.get(&format!("{RESERVED_METADATA_PREFIX_LOWER}actual-size")) {
|
||||
if !size_str.is_empty() {
|
||||
// Todo: deal with error
|
||||
let size = size_str.parse::<i64>().map_err(|e| std::io::Error::other(e.to_string()))?;
|
||||
return Ok(size);
|
||||
}
|
||||
if let Some(size_str) = self.user_defined.get(&format!("{RESERVED_METADATA_PREFIX_LOWER}actual-size"))
|
||||
&& !size_str.is_empty()
|
||||
{
|
||||
// Todo: deal with error
|
||||
let size = size_str.parse::<i64>().map_err(|e| std::io::Error::other(e.to_string()))?;
|
||||
return Ok(size);
|
||||
}
|
||||
let mut actual_size = 0;
|
||||
self.parts.iter().for_each(|part| {
|
||||
@@ -744,8 +742,39 @@ impl ObjectInfo {
|
||||
|
||||
let inlined = fi.inline_data();
|
||||
|
||||
// TODO:expires
|
||||
// TODO:ReplicationState
|
||||
// Parse expires from metadata (HTTP date format RFC 7231 or ISO 8601)
|
||||
let expires = fi.metadata.get("expires").and_then(|s| {
|
||||
// Try parsing as ISO 8601 first
|
||||
time::OffsetDateTime::parse(s, &time::format_description::well_known::Iso8601::DEFAULT)
|
||||
.or_else(|_| {
|
||||
// Try RFC 2822 format
|
||||
time::OffsetDateTime::parse(s, &time::format_description::well_known::Rfc2822)
|
||||
})
|
||||
.or_else(|_| {
|
||||
// Try RFC 3339 format
|
||||
time::OffsetDateTime::parse(s, &time::format_description::well_known::Rfc3339)
|
||||
})
|
||||
.ok()
|
||||
});
|
||||
|
||||
let replication_status_internal = fi
|
||||
.replication_state_internal
|
||||
.as_ref()
|
||||
.and_then(|v| v.replication_status_internal.clone());
|
||||
let version_purge_status_internal = fi
|
||||
.replication_state_internal
|
||||
.as_ref()
|
||||
.and_then(|v| v.version_purge_status_internal.clone());
|
||||
|
||||
let mut replication_status = fi.replication_status();
|
||||
if replication_status.is_empty()
|
||||
&& let Some(status) = fi.metadata.get(AMZ_BUCKET_REPLICATION_STATUS).cloned()
|
||||
&& status == ReplicationStatusType::Replica.as_str()
|
||||
{
|
||||
replication_status = ReplicationStatusType::Replica;
|
||||
}
|
||||
|
||||
let version_purge_status = fi.version_purge_status();
|
||||
|
||||
let transitioned_object = TransitionedObject {
|
||||
name: fi.transitioned_objname.clone(),
|
||||
@@ -766,10 +795,24 @@ impl ObjectInfo {
|
||||
};
|
||||
|
||||
// Extract storage class from metadata, default to STANDARD if not found
|
||||
let storage_class = metadata
|
||||
.get(AMZ_STORAGE_CLASS)
|
||||
.cloned()
|
||||
.or_else(|| Some(storageclass::STANDARD.to_string()));
|
||||
let storage_class = if !fi.transition_tier.is_empty() {
|
||||
Some(fi.transition_tier.clone())
|
||||
} else {
|
||||
fi.metadata
|
||||
.get(AMZ_STORAGE_CLASS)
|
||||
.cloned()
|
||||
.or_else(|| Some(storageclass::STANDARD.to_string()))
|
||||
};
|
||||
|
||||
let mut restore_ongoing = false;
|
||||
let mut restore_expires = None;
|
||||
if let Some(restore_status) = fi.metadata.get(AMZ_RESTORE).cloned() {
|
||||
//
|
||||
if let Ok(restore_status) = parse_restore_obj_status(&restore_status) {
|
||||
restore_ongoing = restore_status.on_going();
|
||||
restore_expires = restore_status.expiry();
|
||||
}
|
||||
}
|
||||
|
||||
// Convert parts from rustfs_filemeta::ObjectPartInfo to store_api::ObjectPartInfo
|
||||
let parts = fi
|
||||
@@ -787,6 +830,8 @@ impl ObjectInfo {
|
||||
})
|
||||
.collect();
|
||||
|
||||
// TODO: part checksums
|
||||
|
||||
ObjectInfo {
|
||||
bucket: bucket.to_string(),
|
||||
name,
|
||||
@@ -802,6 +847,7 @@ impl ObjectInfo {
|
||||
user_tags,
|
||||
content_type,
|
||||
content_encoding,
|
||||
expires,
|
||||
num_versions: fi.num_versions,
|
||||
successor_mod_time: fi.successor_mod_time,
|
||||
etag,
|
||||
@@ -810,6 +856,12 @@ impl ObjectInfo {
|
||||
transitioned_object,
|
||||
checksum: fi.checksum.clone(),
|
||||
storage_class,
|
||||
restore_ongoing,
|
||||
restore_expires,
|
||||
replication_status_internal,
|
||||
replication_status,
|
||||
version_purge_status_internal,
|
||||
version_purge_status,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
@@ -881,32 +933,31 @@ impl ObjectInfo {
|
||||
continue;
|
||||
}
|
||||
|
||||
if entry.is_dir() {
|
||||
if let Some(delimiter) = &delimiter {
|
||||
if let Some(idx) = {
|
||||
let remaining = if entry.name.starts_with(prefix) {
|
||||
&entry.name[prefix.len()..]
|
||||
} else {
|
||||
entry.name.as_str()
|
||||
};
|
||||
remaining.find(delimiter.as_str())
|
||||
} {
|
||||
let idx = prefix.len() + idx + delimiter.len();
|
||||
if let Some(curr_prefix) = entry.name.get(0..idx) {
|
||||
if curr_prefix == prev_prefix {
|
||||
continue;
|
||||
}
|
||||
|
||||
prev_prefix = curr_prefix;
|
||||
|
||||
objects.push(ObjectInfo {
|
||||
is_dir: true,
|
||||
bucket: bucket.to_owned(),
|
||||
name: curr_prefix.to_owned(),
|
||||
..Default::default()
|
||||
});
|
||||
}
|
||||
if entry.is_dir()
|
||||
&& let Some(delimiter) = &delimiter
|
||||
&& let Some(idx) = {
|
||||
let remaining = if entry.name.starts_with(prefix) {
|
||||
&entry.name[prefix.len()..]
|
||||
} else {
|
||||
entry.name.as_str()
|
||||
};
|
||||
remaining.find(delimiter.as_str())
|
||||
}
|
||||
{
|
||||
let idx = prefix.len() + idx + delimiter.len();
|
||||
if let Some(curr_prefix) = entry.name.get(0..idx) {
|
||||
if curr_prefix == prev_prefix {
|
||||
continue;
|
||||
}
|
||||
|
||||
prev_prefix = curr_prefix;
|
||||
|
||||
objects.push(ObjectInfo {
|
||||
is_dir: true,
|
||||
bucket: bucket.to_owned(),
|
||||
name: curr_prefix.to_owned(),
|
||||
..Default::default()
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -966,32 +1017,31 @@ impl ObjectInfo {
|
||||
continue;
|
||||
}
|
||||
|
||||
if entry.is_dir() {
|
||||
if let Some(delimiter) = &delimiter {
|
||||
if let Some(idx) = {
|
||||
let remaining = if entry.name.starts_with(prefix) {
|
||||
&entry.name[prefix.len()..]
|
||||
} else {
|
||||
entry.name.as_str()
|
||||
};
|
||||
remaining.find(delimiter.as_str())
|
||||
} {
|
||||
let idx = prefix.len() + idx + delimiter.len();
|
||||
if let Some(curr_prefix) = entry.name.get(0..idx) {
|
||||
if curr_prefix == prev_prefix {
|
||||
continue;
|
||||
}
|
||||
|
||||
prev_prefix = curr_prefix;
|
||||
|
||||
objects.push(ObjectInfo {
|
||||
is_dir: true,
|
||||
bucket: bucket.to_owned(),
|
||||
name: curr_prefix.to_owned(),
|
||||
..Default::default()
|
||||
});
|
||||
}
|
||||
if entry.is_dir()
|
||||
&& let Some(delimiter) = &delimiter
|
||||
&& let Some(idx) = {
|
||||
let remaining = if entry.name.starts_with(prefix) {
|
||||
&entry.name[prefix.len()..]
|
||||
} else {
|
||||
entry.name.as_str()
|
||||
};
|
||||
remaining.find(delimiter.as_str())
|
||||
}
|
||||
{
|
||||
let idx = prefix.len() + idx + delimiter.len();
|
||||
if let Some(curr_prefix) = entry.name.get(0..idx) {
|
||||
if curr_prefix == prev_prefix {
|
||||
continue;
|
||||
}
|
||||
|
||||
prev_prefix = curr_prefix;
|
||||
|
||||
objects.push(ObjectInfo {
|
||||
is_dir: true,
|
||||
bucket: bucket.to_owned(),
|
||||
name: curr_prefix.to_owned(),
|
||||
..Default::default()
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1026,10 +1076,10 @@ impl ObjectInfo {
|
||||
}
|
||||
|
||||
pub fn decrypt_checksums(&self, part: usize, _headers: &HeaderMap) -> Result<(HashMap<String, String>, bool)> {
|
||||
if part > 0 {
|
||||
if let Some(checksums) = self.parts.iter().find(|p| p.number == part).and_then(|p| p.checksums.clone()) {
|
||||
return Ok((checksums, true));
|
||||
}
|
||||
if part > 0
|
||||
&& let Some(checksums) = self.parts.iter().find(|p| p.number == part).and_then(|p| p.checksums.clone())
|
||||
{
|
||||
return Ok((checksums, true));
|
||||
}
|
||||
|
||||
// TODO: decrypt checksums
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
use crate::StorageAPI;
|
||||
use crate::bucket::metadata_sys::get_versioning_config;
|
||||
use crate::bucket::utils::check_list_objs_args;
|
||||
use crate::bucket::versioning::VersioningApi;
|
||||
use crate::cache_value::metacache_set::{ListPathRawOptions, list_path_raw};
|
||||
use crate::disk::error::DiskError;
|
||||
@@ -22,7 +23,6 @@ use crate::error::{
|
||||
Error, Result, StorageError, is_all_not_found, is_all_volume_not_found, is_err_bucket_not_found, to_object_err,
|
||||
};
|
||||
use crate::set_disk::SetDisks;
|
||||
use crate::store::check_list_objs_args;
|
||||
use crate::store_api::{
|
||||
ListObjectVersionsInfo, ListObjectsInfo, ObjectInfo, ObjectInfoOrErr, ObjectOptions, WalkOptions, WalkVersionsSortOrder,
|
||||
};
|
||||
@@ -302,10 +302,10 @@ impl ECStore {
|
||||
..Default::default()
|
||||
});
|
||||
|
||||
if let Some(err) = list_result.err.clone() {
|
||||
if err != rustfs_filemeta::Error::Unexpected {
|
||||
return Err(to_object_err(err.into(), vec![bucket, prefix]));
|
||||
}
|
||||
if let Some(err) = list_result.err.clone()
|
||||
&& err != rustfs_filemeta::Error::Unexpected
|
||||
{
|
||||
return Err(to_object_err(err.into(), vec![bucket, prefix]));
|
||||
}
|
||||
|
||||
if let Some(result) = list_result.entries.as_mut() {
|
||||
@@ -387,7 +387,12 @@ impl ECStore {
|
||||
}
|
||||
|
||||
let version_marker = if let Some(marker) = version_marker {
|
||||
Some(Uuid::parse_str(&marker)?)
|
||||
// "null" is used for non-versioned objects in AWS S3 API
|
||||
if marker == "null" {
|
||||
None
|
||||
} else {
|
||||
Some(Uuid::parse_str(&marker)?)
|
||||
}
|
||||
} else {
|
||||
None
|
||||
};
|
||||
@@ -413,10 +418,10 @@ impl ECStore {
|
||||
},
|
||||
};
|
||||
|
||||
if let Some(err) = list_result.err.clone() {
|
||||
if err != rustfs_filemeta::Error::Unexpected {
|
||||
return Err(to_object_err(err.into(), vec![bucket, prefix]));
|
||||
}
|
||||
if let Some(err) = list_result.err.clone()
|
||||
&& err != rustfs_filemeta::Error::Unexpected
|
||||
{
|
||||
return Err(to_object_err(err.into(), vec![bucket, prefix]));
|
||||
}
|
||||
|
||||
if let Some(result) = list_result.entries.as_mut() {
|
||||
@@ -445,7 +450,13 @@ impl ECStore {
|
||||
if is_truncated {
|
||||
get_objects
|
||||
.last()
|
||||
.map(|last| (Some(last.name.clone()), last.version_id.map(|v| v.to_string())))
|
||||
.map(|last| {
|
||||
(
|
||||
Some(last.name.clone()),
|
||||
// AWS S3 API returns "null" for non-versioned objects
|
||||
Some(last.version_id.map(|v| v.to_string()).unwrap_or_else(|| "null".to_string())),
|
||||
)
|
||||
})
|
||||
.unwrap_or_default()
|
||||
} else {
|
||||
(None, None)
|
||||
@@ -498,10 +509,11 @@ impl ECStore {
|
||||
let mut o = o.clone();
|
||||
o.marker = o.marker.filter(|v| v >= &o.prefix);
|
||||
|
||||
if let Some(marker) = &o.marker {
|
||||
if !o.prefix.is_empty() && !marker.starts_with(&o.prefix) {
|
||||
return Err(Error::Unexpected);
|
||||
}
|
||||
if let Some(marker) = &o.marker
|
||||
&& !o.prefix.is_empty()
|
||||
&& !marker.starts_with(&o.prefix)
|
||||
{
|
||||
return Err(Error::Unexpected);
|
||||
}
|
||||
|
||||
if o.limit == 0 {
|
||||
@@ -806,10 +818,10 @@ impl ECStore {
|
||||
let value = tx2.clone();
|
||||
let resolver = resolver.clone();
|
||||
async move {
|
||||
if let Some(entry) = entries.resolve(resolver) {
|
||||
if let Err(err) = value.send(entry).await {
|
||||
error!("list_path send fail {:?}", err);
|
||||
}
|
||||
if let Some(entry) = entries.resolve(resolver)
|
||||
&& let Err(err) = value.send(entry).await
|
||||
{
|
||||
error!("list_path send fail {:?}", err);
|
||||
}
|
||||
}
|
||||
})
|
||||
@@ -975,20 +987,21 @@ async fn gather_results(
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(marker) = &opts.marker {
|
||||
if &entry.name < marker {
|
||||
continue;
|
||||
}
|
||||
if let Some(marker) = &opts.marker
|
||||
&& &entry.name < marker
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
if !entry.name.starts_with(&opts.prefix) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(separator) = &opts.separator {
|
||||
if !opts.recursive && !entry.is_in_dir(&opts.prefix, separator) {
|
||||
continue;
|
||||
}
|
||||
if let Some(separator) = &opts.separator
|
||||
&& !opts.recursive
|
||||
&& !entry.is_in_dir(&opts.prefix, separator)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
if !opts.incl_deleted && entry.is_object() && entry.is_latest_delete_marker() && !entry.is_object_dir() {
|
||||
@@ -1189,16 +1202,16 @@ async fn merge_entry_channels(
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(xl) = has_xl.as_mut() {
|
||||
if !versions.is_empty() {
|
||||
xl.versions = merge_file_meta_versions(read_quorum, true, 0, &versions);
|
||||
if let Some(xl) = has_xl.as_mut()
|
||||
&& !versions.is_empty()
|
||||
{
|
||||
xl.versions = merge_file_meta_versions(read_quorum, true, 0, &versions);
|
||||
|
||||
if let Ok(meta) = xl.marshal_msg() {
|
||||
if let Some(b) = best.as_mut() {
|
||||
b.metadata = meta;
|
||||
b.cached = Some(xl.clone());
|
||||
}
|
||||
}
|
||||
if let Ok(meta) = xl.marshal_msg()
|
||||
&& let Some(b) = best.as_mut()
|
||||
{
|
||||
b.metadata = meta;
|
||||
b.cached = Some(xl.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1206,11 +1219,11 @@ async fn merge_entry_channels(
|
||||
to_merge.clear();
|
||||
}
|
||||
|
||||
if let Some(best_entry) = &best {
|
||||
if best_entry.name > last {
|
||||
out_channel.send(best_entry.clone()).await.map_err(Error::other)?;
|
||||
last = best_entry.name.clone();
|
||||
}
|
||||
if let Some(best_entry) = &best
|
||||
&& best_entry.name > last
|
||||
{
|
||||
out_channel.send(best_entry.clone()).await.map_err(Error::other)?;
|
||||
last = best_entry.name.clone();
|
||||
}
|
||||
|
||||
select_from(&mut in_channels, best_idx, &mut top, &mut n_done).await?;
|
||||
@@ -1296,10 +1309,10 @@ impl SetDisks {
|
||||
let value = tx2.clone();
|
||||
let resolver = resolver.clone();
|
||||
async move {
|
||||
if let Some(entry) = entries.resolve(resolver) {
|
||||
if let Err(err) = value.send(entry).await {
|
||||
error!("list_path send fail {:?}", err);
|
||||
}
|
||||
if let Some(entry) = entries.resolve(resolver)
|
||||
&& let Err(err) = value.send(entry).await
|
||||
{
|
||||
error!("list_path send fail {:?}", err);
|
||||
}
|
||||
}
|
||||
})
|
||||
@@ -1374,6 +1387,78 @@ fn calc_common_counter(infos: &[DiskInfo], read_quorum: usize) -> u64 {
|
||||
|
||||
#[cfg(test)]
|
||||
mod test {
|
||||
use uuid::Uuid;
|
||||
|
||||
/// Test that "null" version marker is handled correctly
|
||||
/// AWS S3 API uses "null" string to represent non-versioned objects
|
||||
#[test]
|
||||
fn test_null_version_marker_handling() {
|
||||
// "null" should be treated as None (non-versioned)
|
||||
let version_marker = "null";
|
||||
let parsed: Option<Uuid> = if version_marker == "null" {
|
||||
None
|
||||
} else {
|
||||
Uuid::parse_str(version_marker).ok()
|
||||
};
|
||||
assert!(parsed.is_none(), "\"null\" should be parsed as None");
|
||||
|
||||
// Valid UUID should be parsed correctly
|
||||
let valid_uuid = "550e8400-e29b-41d4-a716-446655440000";
|
||||
let parsed: Option<Uuid> = if valid_uuid == "null" {
|
||||
None
|
||||
} else {
|
||||
Uuid::parse_str(valid_uuid).ok()
|
||||
};
|
||||
assert!(parsed.is_some(), "Valid UUID should be parsed correctly");
|
||||
assert_eq!(parsed.unwrap().to_string(), "550e8400-e29b-41d4-a716-446655440000");
|
||||
}
|
||||
|
||||
/// Test that next_version_idmarker returns "null" for non-versioned objects
|
||||
#[test]
|
||||
fn test_next_version_idmarker_null_string() {
|
||||
// When version_id is None, next_version_idmarker should be "null"
|
||||
let version_id: Option<Uuid> = None;
|
||||
let next_version_idmarker = version_id.map(|v| v.to_string()).unwrap_or_else(|| "null".to_string());
|
||||
assert_eq!(next_version_idmarker, "null");
|
||||
|
||||
// When version_id is Some, next_version_idmarker should be the UUID string
|
||||
let version_id: Option<Uuid> = Some(Uuid::parse_str("550e8400-e29b-41d4-a716-446655440000").unwrap());
|
||||
let next_version_idmarker = version_id.map(|v| v.to_string()).unwrap_or_else(|| "null".to_string());
|
||||
assert_eq!(next_version_idmarker, "550e8400-e29b-41d4-a716-446655440000");
|
||||
}
|
||||
|
||||
/// Test the round-trip: next_version_idmarker -> VersionIdMarker parameter -> parsing
|
||||
#[test]
|
||||
fn test_version_marker_round_trip() {
|
||||
// Scenario 1: Non-versioned object
|
||||
// Server returns "null" as NextVersionIdMarker
|
||||
// Client sends "null" as VersionIdMarker
|
||||
// Server parses "null" as None
|
||||
let server_response = "null";
|
||||
let client_request = server_response;
|
||||
let parsed: Option<Uuid> = if client_request == "null" {
|
||||
None
|
||||
} else {
|
||||
Uuid::parse_str(client_request).ok()
|
||||
};
|
||||
assert!(parsed.is_none());
|
||||
|
||||
// Scenario 2: Versioned object
|
||||
// Server returns UUID as NextVersionIdMarker
|
||||
// Client sends UUID as VersionIdMarker
|
||||
// Server parses UUID correctly
|
||||
let uuid_str = "550e8400-e29b-41d4-a716-446655440000";
|
||||
let server_response = uuid_str;
|
||||
let client_request = server_response;
|
||||
let parsed: Option<Uuid> = if client_request == "null" {
|
||||
None
|
||||
} else {
|
||||
Uuid::parse_str(client_request).ok()
|
||||
};
|
||||
assert!(parsed.is_some());
|
||||
assert_eq!(parsed.unwrap().to_string(), uuid_str);
|
||||
}
|
||||
|
||||
// use std::sync::Arc;
|
||||
|
||||
// use crate::cache_value::metacache_set::list_path_raw;
|
||||
|
||||
@@ -505,6 +505,10 @@ impl FileInfo {
|
||||
ReplicationStatusType::Empty
|
||||
}
|
||||
}
|
||||
|
||||
pub fn shard_file_size(&self, total_length: i64) -> i64 {
|
||||
self.erasure.shard_file_size(total_length)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
|
||||
@@ -590,7 +594,7 @@ impl RestoreStatusOps for RestoreStatus {
|
||||
}
|
||||
}
|
||||
|
||||
fn parse_restore_obj_status(restore_hdr: &str) -> Result<RestoreStatus> {
|
||||
pub fn parse_restore_obj_status(restore_hdr: &str) -> Result<RestoreStatus> {
|
||||
let tokens: Vec<&str> = restore_hdr.splitn(2, ",").collect();
|
||||
let progress_tokens: Vec<&str> = tokens[0].splitn(2, "=").collect();
|
||||
if progress_tokens.len() != 2 {
|
||||
@@ -635,10 +639,10 @@ fn parse_restore_obj_status(restore_hdr: &str) -> Result<RestoreStatus> {
|
||||
}
|
||||
|
||||
pub fn is_restored_object_on_disk(meta: &HashMap<String, String>) -> bool {
|
||||
if let Some(restore_hdr) = meta.get(X_AMZ_RESTORE.as_str()) {
|
||||
if let Ok(restore_status) = parse_restore_obj_status(restore_hdr) {
|
||||
return restore_status.on_disk();
|
||||
}
|
||||
if let Some(restore_hdr) = meta.get(X_AMZ_RESTORE.as_str())
|
||||
&& let Ok(restore_status) = parse_restore_obj_status(restore_hdr)
|
||||
{
|
||||
return restore_status.on_disk();
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
@@ -14,7 +14,8 @@
|
||||
|
||||
use crate::{
|
||||
ErasureAlgo, ErasureInfo, Error, FileInfo, FileInfoVersions, InlineData, ObjectPartInfo, RawFileInfo, ReplicationState,
|
||||
ReplicationStatusType, Result, VersionPurgeStatusType, replication_statuses_map, version_purge_statuses_map,
|
||||
ReplicationStatusType, Result, TIER_FV_ID, TIER_FV_MARKER, VersionPurgeStatusType, replication_statuses_map,
|
||||
version_purge_statuses_map,
|
||||
};
|
||||
use byteorder::ByteOrder;
|
||||
use bytes::Bytes;
|
||||
@@ -473,12 +474,30 @@ impl FileMeta {
|
||||
match version.header.version_type {
|
||||
VersionType::Invalid | VersionType::Legacy => (),
|
||||
VersionType::Object => {
|
||||
if version.header.version_id == fi.version_id {
|
||||
// For non-versioned buckets, treat None as Uuid::nil()
|
||||
let fi_vid = fi.version_id.or(Some(Uuid::nil()));
|
||||
let ver_vid = version.header.version_id.or(Some(Uuid::nil()));
|
||||
|
||||
if ver_vid == fi_vid {
|
||||
let mut ver = FileMetaVersion::try_from(version.meta.as_slice())?;
|
||||
|
||||
if let Some(ref mut obj) = ver.object {
|
||||
for (k, v) in fi.metadata.iter() {
|
||||
obj.meta_user.insert(k.clone(), v.clone());
|
||||
// Split metadata into meta_user and meta_sys based on prefix
|
||||
// This logic must match From<FileInfo> for MetaObject
|
||||
if k.len() > RESERVED_METADATA_PREFIX.len()
|
||||
&& (k.starts_with(RESERVED_METADATA_PREFIX) || k.starts_with(RESERVED_METADATA_PREFIX_LOWER))
|
||||
{
|
||||
// Skip internal flags that shouldn't be persisted
|
||||
if k == headers::X_RUSTFS_HEALING || k == headers::X_RUSTFS_DATA_MOV {
|
||||
continue;
|
||||
}
|
||||
// Insert into meta_sys
|
||||
obj.meta_sys.insert(k.clone(), v.as_bytes().to_vec());
|
||||
} else {
|
||||
// Insert into meta_user
|
||||
obj.meta_user.insert(k.clone(), v.clone());
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(mod_time) = fi.mod_time {
|
||||
@@ -557,13 +576,12 @@ impl FileMeta {
|
||||
let mod_time = version.get_mod_time();
|
||||
|
||||
for (idx, exist) in self.versions.iter().enumerate() {
|
||||
if let Some(ref ex_mt) = exist.header.mod_time {
|
||||
if let Some(ref in_md) = mod_time {
|
||||
if ex_mt <= in_md {
|
||||
self.versions.insert(idx, FileMetaShallowVersion::try_from(version)?);
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
if let Some(ref ex_mt) = exist.header.mod_time
|
||||
&& let Some(ref in_md) = mod_time
|
||||
&& ex_mt <= in_md
|
||||
{
|
||||
self.versions.insert(idx, FileMetaShallowVersion::try_from(version)?);
|
||||
return Ok(());
|
||||
}
|
||||
}
|
||||
Err(Error::other("add_version failed"))
|
||||
@@ -639,58 +657,44 @@ impl FileMeta {
|
||||
}
|
||||
|
||||
if fi.deleted {
|
||||
if !fi.delete_marker_replication_status().is_empty() {
|
||||
if let Some(delete_marker) = ventry.delete_marker.as_mut() {
|
||||
if fi.delete_marker_replication_status() == ReplicationStatusType::Replica {
|
||||
delete_marker.meta_sys.insert(
|
||||
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replica-status"),
|
||||
fi.replication_state_internal
|
||||
.as_ref()
|
||||
.map(|v| v.replica_status.clone())
|
||||
.unwrap_or_default()
|
||||
.as_str()
|
||||
.as_bytes()
|
||||
.to_vec(),
|
||||
);
|
||||
delete_marker.meta_sys.insert(
|
||||
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replica-timestamp"),
|
||||
fi.replication_state_internal
|
||||
.as_ref()
|
||||
.map(|v| v.replica_timestamp.unwrap_or(OffsetDateTime::UNIX_EPOCH).to_string())
|
||||
.unwrap_or_default()
|
||||
.as_bytes()
|
||||
.to_vec(),
|
||||
);
|
||||
} else {
|
||||
delete_marker.meta_sys.insert(
|
||||
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replication-status"),
|
||||
fi.replication_state_internal
|
||||
.as_ref()
|
||||
.map(|v| v.replication_status_internal.clone().unwrap_or_default())
|
||||
.unwrap_or_default()
|
||||
.as_bytes()
|
||||
.to_vec(),
|
||||
);
|
||||
delete_marker.meta_sys.insert(
|
||||
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replication-timestamp"),
|
||||
fi.replication_state_internal
|
||||
.as_ref()
|
||||
.map(|v| v.replication_timestamp.unwrap_or(OffsetDateTime::UNIX_EPOCH).to_string())
|
||||
.unwrap_or_default()
|
||||
.as_bytes()
|
||||
.to_vec(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !fi.version_purge_status().is_empty() {
|
||||
if let Some(delete_marker) = ventry.delete_marker.as_mut() {
|
||||
if !fi.delete_marker_replication_status().is_empty()
|
||||
&& let Some(delete_marker) = ventry.delete_marker.as_mut()
|
||||
{
|
||||
if fi.delete_marker_replication_status() == ReplicationStatusType::Replica {
|
||||
delete_marker.meta_sys.insert(
|
||||
VERSION_PURGE_STATUS_KEY.to_string(),
|
||||
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replica-status"),
|
||||
fi.replication_state_internal
|
||||
.as_ref()
|
||||
.map(|v| v.version_purge_status_internal.clone().unwrap_or_default())
|
||||
.map(|v| v.replica_status.clone())
|
||||
.unwrap_or_default()
|
||||
.as_str()
|
||||
.as_bytes()
|
||||
.to_vec(),
|
||||
);
|
||||
delete_marker.meta_sys.insert(
|
||||
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replica-timestamp"),
|
||||
fi.replication_state_internal
|
||||
.as_ref()
|
||||
.map(|v| v.replica_timestamp.unwrap_or(OffsetDateTime::UNIX_EPOCH).to_string())
|
||||
.unwrap_or_default()
|
||||
.as_bytes()
|
||||
.to_vec(),
|
||||
);
|
||||
} else {
|
||||
delete_marker.meta_sys.insert(
|
||||
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replication-status"),
|
||||
fi.replication_state_internal
|
||||
.as_ref()
|
||||
.map(|v| v.replication_status_internal.clone().unwrap_or_default())
|
||||
.unwrap_or_default()
|
||||
.as_bytes()
|
||||
.to_vec(),
|
||||
);
|
||||
delete_marker.meta_sys.insert(
|
||||
format!("{}{}", RESERVED_METADATA_PREFIX_LOWER, "replication-timestamp"),
|
||||
fi.replication_state_internal
|
||||
.as_ref()
|
||||
.map(|v| v.replication_timestamp.unwrap_or(OffsetDateTime::UNIX_EPOCH).to_string())
|
||||
.unwrap_or_default()
|
||||
.as_bytes()
|
||||
.to_vec(),
|
||||
@@ -698,6 +702,20 @@ impl FileMeta {
|
||||
}
|
||||
}
|
||||
|
||||
if !fi.version_purge_status().is_empty()
|
||||
&& let Some(delete_marker) = ventry.delete_marker.as_mut()
|
||||
{
|
||||
delete_marker.meta_sys.insert(
|
||||
VERSION_PURGE_STATUS_KEY.to_string(),
|
||||
fi.replication_state_internal
|
||||
.as_ref()
|
||||
.map(|v| v.version_purge_status_internal.clone().unwrap_or_default())
|
||||
.unwrap_or_default()
|
||||
.as_bytes()
|
||||
.to_vec(),
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(delete_marker) = ventry.delete_marker.as_mut() {
|
||||
for (k, v) in fi
|
||||
.replication_state_internal
|
||||
@@ -892,6 +910,7 @@ impl FileMeta {
|
||||
path: &str,
|
||||
version_id: &str,
|
||||
read_data: bool,
|
||||
include_free_versions: bool,
|
||||
all_parts: bool,
|
||||
) -> Result<FileInfo> {
|
||||
let vid = {
|
||||
@@ -904,11 +923,35 @@ impl FileMeta {
|
||||
|
||||
let mut is_latest = true;
|
||||
let mut succ_mod_time = None;
|
||||
let mut non_free_versions = self.versions.len();
|
||||
|
||||
let mut found = false;
|
||||
let mut found_free_version = None;
|
||||
let mut found_fi = None;
|
||||
|
||||
for ver in self.versions.iter() {
|
||||
let header = &ver.header;
|
||||
|
||||
// TODO: freeVersion
|
||||
if header.free_version() {
|
||||
non_free_versions -= 1;
|
||||
if include_free_versions && found_free_version.is_none() {
|
||||
let mut found_free_fi = FileMetaVersion::default();
|
||||
if found_free_fi.unmarshal_msg(&ver.meta).is_ok() && found_free_fi.version_type != VersionType::Invalid {
|
||||
let mut free_fi = found_free_fi.into_fileinfo(volume, path, all_parts);
|
||||
free_fi.is_latest = true;
|
||||
found_free_version = Some(free_fi);
|
||||
}
|
||||
}
|
||||
|
||||
if header.version_id != Some(vid) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
if found {
|
||||
continue;
|
||||
}
|
||||
|
||||
if !version_id.is_empty() && header.version_id != Some(vid) {
|
||||
is_latest = false;
|
||||
@@ -916,6 +959,8 @@ impl FileMeta {
|
||||
continue;
|
||||
}
|
||||
|
||||
found = true;
|
||||
|
||||
let mut fi = ver.into_fileinfo(volume, path, all_parts)?;
|
||||
fi.is_latest = is_latest;
|
||||
|
||||
@@ -930,7 +975,25 @@ impl FileMeta {
|
||||
.map(bytes::Bytes::from);
|
||||
}
|
||||
|
||||
fi.num_versions = self.versions.len();
|
||||
found_fi = Some(fi);
|
||||
}
|
||||
|
||||
if !found {
|
||||
if version_id.is_empty() {
|
||||
if include_free_versions
|
||||
&& non_free_versions == 0
|
||||
&& let Some(free_version) = found_free_version
|
||||
{
|
||||
return Ok(free_version);
|
||||
}
|
||||
return Err(Error::FileNotFound);
|
||||
} else {
|
||||
return Err(Error::FileVersionNotFound);
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(mut fi) = found_fi {
|
||||
fi.num_versions = non_free_versions;
|
||||
|
||||
return Ok(fi);
|
||||
}
|
||||
@@ -1750,14 +1813,27 @@ impl MetaObject {
|
||||
metadata.insert(k.to_owned(), v.to_owned());
|
||||
}
|
||||
|
||||
let tier_fvidkey = format!("{RESERVED_METADATA_PREFIX_LOWER}{TIER_FV_ID}").to_lowercase();
|
||||
let tier_fvmarker_key = format!("{RESERVED_METADATA_PREFIX_LOWER}{TIER_FV_MARKER}").to_lowercase();
|
||||
|
||||
for (k, v) in &self.meta_sys {
|
||||
if k == AMZ_STORAGE_CLASS && v == b"STANDARD" {
|
||||
let lower_k = k.to_lowercase();
|
||||
|
||||
if lower_k == tier_fvidkey || lower_k == tier_fvmarker_key {
|
||||
continue;
|
||||
}
|
||||
|
||||
if lower_k == VERSION_PURGE_STATUS_KEY.to_lowercase() {
|
||||
continue;
|
||||
}
|
||||
|
||||
if lower_k == AMZ_STORAGE_CLASS.to_lowercase() && v == b"STANDARD" {
|
||||
continue;
|
||||
}
|
||||
|
||||
if k.starts_with(RESERVED_METADATA_PREFIX)
|
||||
|| k.starts_with(RESERVED_METADATA_PREFIX_LOWER)
|
||||
|| k == VERSION_PURGE_STATUS_KEY
|
||||
|| lower_k == VERSION_PURGE_STATUS_KEY.to_lowercase()
|
||||
{
|
||||
metadata.insert(k.to_owned(), String::from_utf8(v.to_owned()).unwrap_or_default());
|
||||
}
|
||||
@@ -1899,42 +1975,41 @@ impl MetaObject {
|
||||
if let Some(status) = self
|
||||
.meta_sys
|
||||
.get(&format!("{RESERVED_METADATA_PREFIX_LOWER}{TRANSITION_STATUS}"))
|
||||
&& *status == TRANSITION_COMPLETE.as_bytes().to_vec()
|
||||
{
|
||||
if *status == TRANSITION_COMPLETE.as_bytes().to_vec() {
|
||||
let vid = Uuid::parse_str(&fi.tier_free_version_id());
|
||||
if let Err(err) = vid {
|
||||
panic!("Invalid Tier Object delete marker versionId {} {}", fi.tier_free_version_id(), err);
|
||||
}
|
||||
let vid = vid.unwrap();
|
||||
let mut free_entry = FileMetaVersion {
|
||||
version_type: VersionType::Delete,
|
||||
write_version: 0,
|
||||
..Default::default()
|
||||
};
|
||||
free_entry.delete_marker = Some(MetaDeleteMarker {
|
||||
version_id: Some(vid),
|
||||
mod_time: self.mod_time,
|
||||
meta_sys: HashMap::<String, Vec<u8>>::new(),
|
||||
});
|
||||
|
||||
let delete_marker = free_entry.delete_marker.as_mut().unwrap();
|
||||
|
||||
delete_marker
|
||||
.meta_sys
|
||||
.insert(format!("{RESERVED_METADATA_PREFIX_LOWER}{FREE_VERSION}"), vec![]);
|
||||
|
||||
let tier_key = format!("{RESERVED_METADATA_PREFIX_LOWER}{TRANSITION_TIER}");
|
||||
let tier_obj_key = format!("{RESERVED_METADATA_PREFIX_LOWER}{TRANSITIONED_OBJECTNAME}");
|
||||
let tier_obj_vid_key = format!("{RESERVED_METADATA_PREFIX_LOWER}{TRANSITIONED_VERSION_ID}");
|
||||
|
||||
let aa = [tier_key, tier_obj_key, tier_obj_vid_key];
|
||||
for (k, v) in &self.meta_sys {
|
||||
if aa.contains(k) {
|
||||
delete_marker.meta_sys.insert(k.clone(), v.clone());
|
||||
}
|
||||
}
|
||||
return (free_entry, true);
|
||||
let vid = Uuid::parse_str(&fi.tier_free_version_id());
|
||||
if let Err(err) = vid {
|
||||
panic!("Invalid Tier Object delete marker versionId {} {}", fi.tier_free_version_id(), err);
|
||||
}
|
||||
let vid = vid.unwrap();
|
||||
let mut free_entry = FileMetaVersion {
|
||||
version_type: VersionType::Delete,
|
||||
write_version: 0,
|
||||
..Default::default()
|
||||
};
|
||||
free_entry.delete_marker = Some(MetaDeleteMarker {
|
||||
version_id: Some(vid),
|
||||
mod_time: self.mod_time,
|
||||
meta_sys: HashMap::<String, Vec<u8>>::new(),
|
||||
});
|
||||
|
||||
let delete_marker = free_entry.delete_marker.as_mut().unwrap();
|
||||
|
||||
delete_marker
|
||||
.meta_sys
|
||||
.insert(format!("{RESERVED_METADATA_PREFIX_LOWER}{FREE_VERSION}"), vec![]);
|
||||
|
||||
let tier_key = format!("{RESERVED_METADATA_PREFIX_LOWER}{TRANSITION_TIER}");
|
||||
let tier_obj_key = format!("{RESERVED_METADATA_PREFIX_LOWER}{TRANSITIONED_OBJECTNAME}");
|
||||
let tier_obj_vid_key = format!("{RESERVED_METADATA_PREFIX_LOWER}{TRANSITIONED_VERSION_ID}");
|
||||
|
||||
let aa = [tier_key, tier_obj_key, tier_obj_vid_key];
|
||||
for (k, v) in &self.meta_sys {
|
||||
if aa.contains(k) {
|
||||
delete_marker.meta_sys.insert(k.clone(), v.clone());
|
||||
}
|
||||
}
|
||||
return (free_entry, true);
|
||||
}
|
||||
(FileMetaVersion::default(), false)
|
||||
}
|
||||
@@ -2495,15 +2570,31 @@ pub fn merge_file_meta_versions(
|
||||
merged
|
||||
}
|
||||
|
||||
pub async fn file_info_from_raw(ri: RawFileInfo, bucket: &str, object: &str, read_data: bool) -> Result<FileInfo> {
|
||||
get_file_info(&ri.buf, bucket, object, "", FileInfoOpts { data: read_data }).await
|
||||
pub fn file_info_from_raw(
|
||||
ri: RawFileInfo,
|
||||
bucket: &str,
|
||||
object: &str,
|
||||
read_data: bool,
|
||||
include_free_versions: bool,
|
||||
) -> Result<FileInfo> {
|
||||
get_file_info(
|
||||
&ri.buf,
|
||||
bucket,
|
||||
object,
|
||||
"",
|
||||
FileInfoOpts {
|
||||
data: read_data,
|
||||
include_free_versions,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
pub struct FileInfoOpts {
|
||||
pub data: bool,
|
||||
pub include_free_versions: bool,
|
||||
}
|
||||
|
||||
pub async fn get_file_info(buf: &[u8], volume: &str, path: &str, version_id: &str, opts: FileInfoOpts) -> Result<FileInfo> {
|
||||
pub fn get_file_info(buf: &[u8], volume: &str, path: &str, version_id: &str, opts: FileInfoOpts) -> Result<FileInfo> {
|
||||
let vid = {
|
||||
if version_id.is_empty() {
|
||||
None
|
||||
@@ -2525,7 +2616,7 @@ pub async fn get_file_info(buf: &[u8], volume: &str, path: &str, version_id: &st
|
||||
});
|
||||
}
|
||||
|
||||
let fi = meta.into_fileinfo(volume, path, version_id, opts.data, true)?;
|
||||
let fi = meta.into_fileinfo(volume, path, version_id, opts.data, opts.include_free_versions, true)?;
|
||||
Ok(fi)
|
||||
}
|
||||
|
||||
@@ -3550,15 +3641,15 @@ impl FileMeta {
|
||||
match version.header.version_type {
|
||||
VersionType::Object => {
|
||||
stats.object_versions += 1;
|
||||
if let Ok(ver) = FileMetaVersion::try_from(version.meta.as_slice()) {
|
||||
if let Some(obj) = &ver.object {
|
||||
stats.total_size += obj.size;
|
||||
if obj.uses_data_dir() {
|
||||
stats.versions_with_data_dir += 1;
|
||||
}
|
||||
if obj.inlinedata() {
|
||||
stats.versions_with_inline_data += 1;
|
||||
}
|
||||
if let Ok(ver) = FileMetaVersion::try_from(version.meta.as_slice())
|
||||
&& let Some(obj) = &ver.object
|
||||
{
|
||||
stats.total_size += obj.size;
|
||||
if obj.uses_data_dir() {
|
||||
stats.versions_with_data_dir += 1;
|
||||
}
|
||||
if obj.inlinedata() {
|
||||
stats.versions_with_inline_data += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::{Error, FileInfo, FileInfoVersions, FileMeta, FileMetaShallowVersion, Result, VersionType, merge_file_meta_versions};
|
||||
use crate::{
|
||||
Error, FileInfo, FileInfoOpts, FileInfoVersions, FileMeta, FileMetaShallowVersion, Result, VersionType, get_file_info,
|
||||
merge_file_meta_versions,
|
||||
};
|
||||
use rmp::Marker;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::cmp::Ordering;
|
||||
@@ -141,8 +144,7 @@ impl MetaCacheEntry {
|
||||
});
|
||||
}
|
||||
|
||||
if self.cached.is_some() {
|
||||
let fm = self.cached.as_ref().unwrap();
|
||||
if let Some(fm) = &self.cached {
|
||||
if fm.versions.is_empty() {
|
||||
return Ok(FileInfo {
|
||||
volume: bucket.to_owned(),
|
||||
@@ -154,14 +156,20 @@ impl MetaCacheEntry {
|
||||
});
|
||||
}
|
||||
|
||||
let fi = fm.into_fileinfo(bucket, self.name.as_str(), "", false, false)?;
|
||||
let fi = fm.into_fileinfo(bucket, self.name.as_str(), "", false, false, true)?;
|
||||
return Ok(fi);
|
||||
}
|
||||
|
||||
let mut fm = FileMeta::new();
|
||||
fm.unmarshal_msg(&self.metadata)?;
|
||||
let fi = fm.into_fileinfo(bucket, self.name.as_str(), "", false, false)?;
|
||||
Ok(fi)
|
||||
get_file_info(
|
||||
&self.metadata,
|
||||
bucket,
|
||||
self.name.as_str(),
|
||||
"",
|
||||
FileInfoOpts {
|
||||
data: false,
|
||||
include_free_versions: false,
|
||||
},
|
||||
)
|
||||
}
|
||||
|
||||
pub fn file_info_versions(&self, bucket: &str) -> Result<FileInfoVersions> {
|
||||
@@ -442,10 +450,10 @@ impl MetaCacheEntriesSorted {
|
||||
}
|
||||
|
||||
pub fn forward_past(&mut self, marker: Option<String>) {
|
||||
if let Some(val) = marker {
|
||||
if let Some(idx) = self.o.0.iter().flatten().position(|v| v.name > val) {
|
||||
self.o.0 = self.o.0.split_off(idx);
|
||||
}
|
||||
if let Some(val) = marker
|
||||
&& let Some(idx) = self.o.0.iter().flatten().position(|v| v.name > val)
|
||||
{
|
||||
self.o.0 = self.o.0.split_off(idx);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -788,22 +796,23 @@ impl<T: Clone + Debug + Send + 'static> Cache<T> {
|
||||
.duration_since(UNIX_EPOCH)
|
||||
.expect("Time went backwards")
|
||||
.as_secs();
|
||||
if now - self.last_update_ms.load(AtomicOrdering::SeqCst) < self.ttl.as_secs() {
|
||||
if let Some(v) = v {
|
||||
return Ok(v);
|
||||
}
|
||||
if now - self.last_update_ms.load(AtomicOrdering::SeqCst) < self.ttl.as_secs()
|
||||
&& let Some(v) = v
|
||||
{
|
||||
return Ok(v);
|
||||
}
|
||||
|
||||
if self.opts.no_wait && now - self.last_update_ms.load(AtomicOrdering::SeqCst) < self.ttl.as_secs() * 2 {
|
||||
if let Some(value) = v {
|
||||
if self.updating.try_lock().is_ok() {
|
||||
let this = Arc::clone(&self);
|
||||
spawn(async move {
|
||||
let _ = this.update().await;
|
||||
});
|
||||
}
|
||||
return Ok(value);
|
||||
if self.opts.no_wait
|
||||
&& now - self.last_update_ms.load(AtomicOrdering::SeqCst) < self.ttl.as_secs() * 2
|
||||
&& let Some(value) = v
|
||||
{
|
||||
if self.updating.try_lock().is_ok() {
|
||||
let this = Arc::clone(&self);
|
||||
spawn(async move {
|
||||
let _ = this.update().await;
|
||||
});
|
||||
}
|
||||
return Ok(value);
|
||||
}
|
||||
|
||||
let _ = self.updating.lock().await;
|
||||
@@ -811,10 +820,9 @@ impl<T: Clone + Debug + Send + 'static> Cache<T> {
|
||||
if let (Ok(duration), Some(value)) = (
|
||||
SystemTime::now().duration_since(UNIX_EPOCH + Duration::from_secs(self.last_update_ms.load(AtomicOrdering::SeqCst))),
|
||||
v,
|
||||
) {
|
||||
if duration < self.ttl {
|
||||
return Ok(value);
|
||||
}
|
||||
) && duration < self.ttl
|
||||
{
|
||||
return Ok(value);
|
||||
}
|
||||
|
||||
match self.update().await {
|
||||
|
||||
@@ -270,14 +270,12 @@ impl ReplicationState {
|
||||
return repl_status;
|
||||
}
|
||||
|
||||
if repl_status == ReplicationStatusType::Completed {
|
||||
if let (Some(replica_timestamp), Some(replication_timestamp)) =
|
||||
if repl_status == ReplicationStatusType::Completed
|
||||
&& let (Some(replica_timestamp), Some(replication_timestamp)) =
|
||||
(self.replica_timestamp, self.replication_timestamp)
|
||||
{
|
||||
if replica_timestamp > replication_timestamp {
|
||||
return self.replica_status.clone();
|
||||
}
|
||||
}
|
||||
&& replica_timestamp > replication_timestamp
|
||||
{
|
||||
return self.replica_status.clone();
|
||||
}
|
||||
|
||||
return repl_status;
|
||||
|
||||
@@ -246,12 +246,12 @@ where
|
||||
}
|
||||
|
||||
let sts_user = has_sts_user.map(|sts| sts.credentials.access_key.clone());
|
||||
if let Some(ref sts) = sts_user {
|
||||
if let Some(plc) = sts_policy_map.get(sts) {
|
||||
for p in plc.to_slice().iter() {
|
||||
if !policy_docs_map.contains_key(p) {
|
||||
let _ = self.api.load_policy_doc(p, &mut policy_docs_map).await;
|
||||
}
|
||||
if let Some(ref sts) = sts_user
|
||||
&& let Some(plc) = sts_policy_map.get(sts)
|
||||
{
|
||||
for p in plc.to_slice().iter() {
|
||||
if !policy_docs_map.contains_key(p) {
|
||||
let _ = self.api.load_policy_doc(p, &mut policy_docs_map).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -635,10 +635,10 @@ where
|
||||
}
|
||||
|
||||
let users = self.cache.users.load();
|
||||
if let Some(x) = users.get(&cred.access_key) {
|
||||
if x.credentials.is_service_account() {
|
||||
return Err(Error::IAMActionNotAllowed);
|
||||
}
|
||||
if let Some(x) = users.get(&cred.access_key)
|
||||
&& x.credentials.is_service_account()
|
||||
{
|
||||
return Err(Error::IAMActionNotAllowed);
|
||||
}
|
||||
|
||||
let u = UserIdentity::new(cred);
|
||||
@@ -789,10 +789,10 @@ where
|
||||
|
||||
if !policy_present {
|
||||
let mut m = HashMap::new();
|
||||
if let Err(err) = self.api.load_mapped_policy(name, UserType::Reg, true, &mut m).await {
|
||||
if !is_err_no_such_policy(&err) {
|
||||
return Err(err);
|
||||
}
|
||||
if let Err(err) = self.api.load_mapped_policy(name, UserType::Reg, true, &mut m).await
|
||||
&& !is_err_no_such_policy(&err)
|
||||
{
|
||||
return Err(err);
|
||||
}
|
||||
if let Some(p) = m.get(name) {
|
||||
Cache::add_or_update(&self.cache.group_policies, name, p, OffsetDateTime::now_utc());
|
||||
@@ -815,10 +815,10 @@ where
|
||||
Some(p) => p.clone(),
|
||||
None => {
|
||||
let mut m = HashMap::new();
|
||||
if let Err(err) = self.api.load_mapped_policy(name, UserType::Reg, false, &mut m).await {
|
||||
if !is_err_no_such_policy(&err) {
|
||||
return Err(err);
|
||||
}
|
||||
if let Err(err) = self.api.load_mapped_policy(name, UserType::Reg, false, &mut m).await
|
||||
&& !is_err_no_such_policy(&err)
|
||||
{
|
||||
return Err(err);
|
||||
}
|
||||
if let Some(p) = m.get(name) {
|
||||
Cache::add_or_update(&self.cache.user_policies, name, p, OffsetDateTime::now_utc());
|
||||
@@ -828,10 +828,10 @@ where
|
||||
Some(p) => p.clone(),
|
||||
None => {
|
||||
let mut m = HashMap::new();
|
||||
if let Err(err) = self.api.load_mapped_policy(name, UserType::Sts, false, &mut m).await {
|
||||
if !is_err_no_such_policy(&err) {
|
||||
return Err(err);
|
||||
}
|
||||
if let Err(err) = self.api.load_mapped_policy(name, UserType::Sts, false, &mut m).await
|
||||
&& !is_err_no_such_policy(&err)
|
||||
{
|
||||
return Err(err);
|
||||
}
|
||||
if let Some(p) = m.get(name) {
|
||||
Cache::add_or_update(&self.cache.sts_policies, name, p, OffsetDateTime::now_utc());
|
||||
@@ -864,10 +864,10 @@ where
|
||||
Some(p) => p.clone(),
|
||||
None => {
|
||||
let mut m = HashMap::new();
|
||||
if let Err(err) = self.api.load_mapped_policy(group, UserType::Reg, true, &mut m).await {
|
||||
if !is_err_no_such_policy(&err) {
|
||||
return Err(err);
|
||||
}
|
||||
if let Err(err) = self.api.load_mapped_policy(group, UserType::Reg, true, &mut m).await
|
||||
&& !is_err_no_such_policy(&err)
|
||||
{
|
||||
return Err(err);
|
||||
}
|
||||
if let Some(p) = m.get(group) {
|
||||
Cache::add_or_update(&self.cache.group_policies, group, p, OffsetDateTime::now_utc());
|
||||
@@ -910,10 +910,10 @@ where
|
||||
Some(p) => p.clone(),
|
||||
None => {
|
||||
let mut m = HashMap::new();
|
||||
if let Err(err) = self.api.load_mapped_policy(group, UserType::Reg, true, &mut m).await {
|
||||
if !is_err_no_such_policy(&err) {
|
||||
return Err(err);
|
||||
}
|
||||
if let Err(err) = self.api.load_mapped_policy(group, UserType::Reg, true, &mut m).await
|
||||
&& !is_err_no_such_policy(&err)
|
||||
{
|
||||
return Err(err);
|
||||
}
|
||||
if let Some(p) = m.get(group) {
|
||||
Cache::add_or_update(&self.cache.group_policies, group, p, OffsetDateTime::now_utc());
|
||||
@@ -937,10 +937,10 @@ where
|
||||
}
|
||||
|
||||
if policy.is_empty() {
|
||||
if let Err(err) = self.api.delete_mapped_policy(name, user_type, is_group).await {
|
||||
if !is_err_no_such_policy(&err) {
|
||||
return Err(err);
|
||||
}
|
||||
if let Err(err) = self.api.delete_mapped_policy(name, user_type, is_group).await
|
||||
&& !is_err_no_such_policy(&err)
|
||||
{
|
||||
return Err(err);
|
||||
}
|
||||
|
||||
if is_group {
|
||||
@@ -1220,10 +1220,10 @@ where
|
||||
|
||||
Cache::delete(&self.cache.user_policies, access_key, OffsetDateTime::now_utc());
|
||||
|
||||
if let Err(err) = self.api.delete_user_identity(access_key, utype).await {
|
||||
if !is_err_no_such_user(&err) {
|
||||
return Err(err);
|
||||
}
|
||||
if let Err(err) = self.api.delete_user_identity(access_key, utype).await
|
||||
&& !is_err_no_such_user(&err)
|
||||
{
|
||||
return Err(err);
|
||||
}
|
||||
|
||||
if utype == UserType::Sts {
|
||||
@@ -1258,6 +1258,28 @@ where
|
||||
self.update_user_with_claims(access_key, u)
|
||||
}
|
||||
|
||||
/// Add SSH public key for a user (for SFTP authentication)
|
||||
pub async fn add_user_ssh_public_key(&self, access_key: &str, public_key: &str) -> Result<()> {
|
||||
if access_key.is_empty() || public_key.is_empty() {
|
||||
return Err(Error::InvalidArgument);
|
||||
}
|
||||
|
||||
let users = self.cache.users.load();
|
||||
let u = match users.get(access_key) {
|
||||
Some(u) => u,
|
||||
None => return Err(Error::NoSuchUser(access_key.to_string())),
|
||||
};
|
||||
|
||||
let mut user_identity = u.clone();
|
||||
user_identity.add_ssh_public_key(public_key);
|
||||
|
||||
self.api
|
||||
.save_user_identity(access_key, UserType::Reg, user_identity.clone(), None)
|
||||
.await?;
|
||||
|
||||
self.update_user_with_claims(access_key, user_identity)
|
||||
}
|
||||
|
||||
pub async fn set_user_status(&self, access_key: &str, status: AccountStatus) -> Result<OffsetDateTime> {
|
||||
if access_key.is_empty() {
|
||||
return Err(Error::InvalidArgument);
|
||||
@@ -1510,16 +1532,16 @@ where
|
||||
}
|
||||
|
||||
if members.is_empty() {
|
||||
if let Err(err) = self.api.delete_mapped_policy(group, UserType::Reg, true).await {
|
||||
if !is_err_no_such_policy(&err) {
|
||||
return Err(err);
|
||||
}
|
||||
if let Err(err) = self.api.delete_mapped_policy(group, UserType::Reg, true).await
|
||||
&& !is_err_no_such_policy(&err)
|
||||
{
|
||||
return Err(err);
|
||||
}
|
||||
|
||||
if let Err(err) = self.api.delete_group_info(group).await {
|
||||
if !is_err_no_such_group(&err) {
|
||||
return Err(err);
|
||||
}
|
||||
if let Err(err) = self.api.delete_group_info(group).await
|
||||
&& !is_err_no_such_group(&err)
|
||||
{
|
||||
return Err(err);
|
||||
}
|
||||
|
||||
Cache::delete(&self.cache.groups, group, OffsetDateTime::now_utc());
|
||||
@@ -1669,10 +1691,10 @@ where
|
||||
let member_of = self.cache.user_group_memberships.load();
|
||||
if let Some(m) = member_of.get(name) {
|
||||
for group in m.iter() {
|
||||
if let Err(err) = self.remove_members_from_group(group, vec![name.to_string()], true).await {
|
||||
if !is_err_no_such_group(&err) {
|
||||
return Err(err);
|
||||
}
|
||||
if let Err(err) = self.remove_members_from_group(group, vec![name.to_string()], true).await
|
||||
&& !is_err_no_such_group(&err)
|
||||
{
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1837,11 +1859,11 @@ fn filter_policies(cache: &Cache, policy_name: &str, bucket_name: &str) -> (Stri
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(p) = cache.policy_docs.load().get(&policy) {
|
||||
if bucket_name.is_empty() || pollster::block_on(p.policy.match_resource(bucket_name)) {
|
||||
policies.push(policy);
|
||||
to_merge.push(p.policy.clone());
|
||||
}
|
||||
if let Some(p) = cache.policy_docs.load().get(&policy)
|
||||
&& (bucket_name.is_empty() || pollster::block_on(p.policy.match_resource(bucket_name)))
|
||||
{
|
||||
policies.push(policy);
|
||||
to_merge.push(p.policy.clone());
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -633,10 +633,10 @@ impl Store for ObjectStore {
|
||||
|
||||
if let Some(item) = v.item {
|
||||
let name = rustfs_utils::path::dir(&item);
|
||||
if let Err(err) = self.load_group(&name, m).await {
|
||||
if !is_err_no_such_group(&err) {
|
||||
return Err(err);
|
||||
}
|
||||
if let Err(err) = self.load_group(&name, m).await
|
||||
&& !is_err_no_such_group(&err)
|
||||
{
|
||||
return Err(err);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -936,10 +936,10 @@ impl Store for ObjectStore {
|
||||
let name = item.trim_end_matches(".json");
|
||||
|
||||
info!("load group policy: {}", name);
|
||||
if let Err(err) = self.load_mapped_policy(name, UserType::Reg, true, &mut items_cache).await {
|
||||
if !is_err_no_such_policy(&err) {
|
||||
return Err(Error::other(format!("load group policy failed: {err}")));
|
||||
}
|
||||
if let Err(err) = self.load_mapped_policy(name, UserType::Reg, true, &mut items_cache).await
|
||||
&& !is_err_no_such_policy(&err)
|
||||
{
|
||||
return Err(Error::other(format!("load group policy failed: {err}")));
|
||||
};
|
||||
}
|
||||
|
||||
@@ -955,10 +955,10 @@ impl Store for ObjectStore {
|
||||
for item in item_name_list.iter() {
|
||||
let name = rustfs_utils::path::dir(item);
|
||||
info!("load svc user: {}", name);
|
||||
if let Err(err) = self.load_user(&name, UserType::Svc, &mut items_cache).await {
|
||||
if !is_err_no_such_user(&err) {
|
||||
return Err(Error::other(format!("load svc user failed: {err}")));
|
||||
}
|
||||
if let Err(err) = self.load_user(&name, UserType::Svc, &mut items_cache).await
|
||||
&& !is_err_no_such_user(&err)
|
||||
{
|
||||
return Err(Error::other(format!("load svc user failed: {err}")));
|
||||
};
|
||||
}
|
||||
|
||||
@@ -969,10 +969,9 @@ impl Store for ObjectStore {
|
||||
if let Err(err) = self
|
||||
.load_mapped_policy(&parent, UserType::Sts, false, &mut sts_policies_cache)
|
||||
.await
|
||||
&& !is_err_no_such_policy(&err)
|
||||
{
|
||||
if !is_err_no_such_policy(&err) {
|
||||
return Err(Error::other(format!("load_mapped_policy failed: {err}")));
|
||||
}
|
||||
return Err(Error::other(format!("load_mapped_policy failed: {err}")));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -203,13 +203,13 @@ impl<T: Store> IamSys<T> {
|
||||
pub async fn set_policy(&self, name: &str, policy: Policy) -> Result<OffsetDateTime> {
|
||||
let updated_at = self.store.set_policy(name, policy).await?;
|
||||
|
||||
if !self.has_watcher() {
|
||||
if let Some(notification_sys) = get_global_notification_sys() {
|
||||
let resp = notification_sys.load_policy(name).await;
|
||||
for r in resp {
|
||||
if let Some(err) = r.err {
|
||||
warn!("notify load_policy failed: {}", err);
|
||||
}
|
||||
if !self.has_watcher()
|
||||
&& let Some(notification_sys) = get_global_notification_sys()
|
||||
{
|
||||
let resp = notification_sys.load_policy(name).await;
|
||||
for r in resp {
|
||||
if let Some(err) = r.err {
|
||||
warn!("notify load_policy failed: {}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -232,13 +232,14 @@ impl<T: Store> IamSys<T> {
|
||||
pub async fn delete_user(&self, name: &str, notify: bool) -> Result<()> {
|
||||
self.store.delete_user(name, UserType::Reg).await?;
|
||||
|
||||
if notify && !self.has_watcher() {
|
||||
if let Some(notification_sys) = get_global_notification_sys() {
|
||||
let resp = notification_sys.delete_user(name).await;
|
||||
for r in resp {
|
||||
if let Some(err) = r.err {
|
||||
warn!("notify delete_user failed: {}", err);
|
||||
}
|
||||
if notify
|
||||
&& !self.has_watcher()
|
||||
&& let Some(notification_sys) = get_global_notification_sys()
|
||||
{
|
||||
let resp = notification_sys.delete_user(name).await;
|
||||
for r in resp {
|
||||
if let Some(err) = r.err {
|
||||
warn!("notify delete_user failed: {}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -476,13 +477,12 @@ impl<T: Store> IamSys<T> {
|
||||
|
||||
let op_pt = claims.get(&iam_policy_claim_name_sa());
|
||||
let op_sp = claims.get(SESSION_POLICY_NAME);
|
||||
if let (Some(pt), Some(sp)) = (op_pt, op_sp) {
|
||||
if pt == EMBEDDED_POLICY_TYPE {
|
||||
let policy = serde_json::from_slice(
|
||||
&base64_simd::URL_SAFE_NO_PAD.decode_to_vec(sp.as_str().unwrap_or_default().as_bytes())?,
|
||||
)?;
|
||||
return Ok((sa, Some(policy)));
|
||||
}
|
||||
if let (Some(pt), Some(sp)) = (op_pt, op_sp)
|
||||
&& pt == EMBEDDED_POLICY_TYPE
|
||||
{
|
||||
let policy =
|
||||
serde_json::from_slice(&base64_simd::URL_SAFE_NO_PAD.decode_to_vec(sp.as_str().unwrap_or_default().as_bytes())?)?;
|
||||
return Ok((sa, Some(policy)));
|
||||
}
|
||||
|
||||
Ok((sa, None))
|
||||
@@ -537,13 +537,12 @@ impl<T: Store> IamSys<T> {
|
||||
|
||||
let op_pt = claims.get(&iam_policy_claim_name_sa());
|
||||
let op_sp = claims.get(SESSION_POLICY_NAME);
|
||||
if let (Some(pt), Some(sp)) = (op_pt, op_sp) {
|
||||
if pt == EMBEDDED_POLICY_TYPE {
|
||||
let policy = serde_json::from_slice(
|
||||
&base64_simd::URL_SAFE_NO_PAD.decode_to_vec(sp.as_str().unwrap_or_default().as_bytes())?,
|
||||
)?;
|
||||
return Ok((sa, Some(policy)));
|
||||
}
|
||||
if let (Some(pt), Some(sp)) = (op_pt, op_sp)
|
||||
&& pt == EMBEDDED_POLICY_TYPE
|
||||
{
|
||||
let policy =
|
||||
serde_json::from_slice(&base64_simd::URL_SAFE_NO_PAD.decode_to_vec(sp.as_str().unwrap_or_default().as_bytes())?)?;
|
||||
return Ok((sa, Some(policy)));
|
||||
}
|
||||
|
||||
Ok((sa, None))
|
||||
@@ -572,13 +571,14 @@ impl<T: Store> IamSys<T> {
|
||||
|
||||
self.store.delete_user(access_key, UserType::Svc).await?;
|
||||
|
||||
if notify && !self.has_watcher() {
|
||||
if let Some(notification_sys) = get_global_notification_sys() {
|
||||
let resp = notification_sys.delete_service_account(access_key).await;
|
||||
for r in resp {
|
||||
if let Some(err) = r.err {
|
||||
warn!("notify delete_service_account failed: {}", err);
|
||||
}
|
||||
if notify
|
||||
&& !self.has_watcher()
|
||||
&& let Some(notification_sys) = get_global_notification_sys()
|
||||
{
|
||||
let resp = notification_sys.delete_service_account(access_key).await;
|
||||
for r in resp {
|
||||
if let Some(err) = r.err {
|
||||
warn!("notify delete_service_account failed: {}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -637,11 +637,24 @@ impl<T: Store> IamSys<T> {
|
||||
self.store.update_user_secret_key(access_key, secret_key).await
|
||||
}
|
||||
|
||||
/// Add SSH public key for a user (for SFTP authentication)
|
||||
pub async fn add_user_ssh_public_key(&self, access_key: &str, public_key: &str) -> Result<()> {
|
||||
if !is_access_key_valid(access_key) {
|
||||
return Err(IamError::InvalidAccessKeyLength);
|
||||
}
|
||||
|
||||
if public_key.is_empty() {
|
||||
return Err(IamError::InvalidArgument);
|
||||
}
|
||||
|
||||
self.store.add_user_ssh_public_key(access_key, public_key).await
|
||||
}
|
||||
|
||||
pub async fn check_key(&self, access_key: &str) -> Result<(Option<UserIdentity>, bool)> {
|
||||
if let Some(sys_cred) = get_global_action_cred() {
|
||||
if sys_cred.access_key == access_key {
|
||||
return Ok((Some(UserIdentity::new(sys_cred)), true));
|
||||
}
|
||||
if let Some(sys_cred) = get_global_action_cred()
|
||||
&& sys_cred.access_key == access_key
|
||||
{
|
||||
return Ok((Some(UserIdentity::new(sys_cred)), true));
|
||||
}
|
||||
|
||||
match self.store.get_user(access_key).await {
|
||||
@@ -712,13 +725,13 @@ impl<T: Store> IamSys<T> {
|
||||
pub async fn policy_db_set(&self, name: &str, user_type: UserType, is_group: bool, policy: &str) -> Result<OffsetDateTime> {
|
||||
let updated_at = self.store.policy_db_set(name, user_type, is_group, policy).await?;
|
||||
|
||||
if !self.has_watcher() {
|
||||
if let Some(notification_sys) = get_global_notification_sys() {
|
||||
let resp = notification_sys.load_policy_mapping(name, user_type.to_u64(), is_group).await;
|
||||
for r in resp {
|
||||
if let Some(err) = r.err {
|
||||
warn!("notify load_policy failed: {}", err);
|
||||
}
|
||||
if !self.has_watcher()
|
||||
&& let Some(notification_sys) = get_global_notification_sys()
|
||||
{
|
||||
let resp = notification_sys.load_policy_mapping(name, user_type.to_u64(), is_group).await;
|
||||
for r in resp {
|
||||
if let Some(err) = r.err {
|
||||
warn!("notify load_policy failed: {}", err);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -452,27 +452,25 @@ impl KmsClient for LocalKmsClient {
|
||||
}
|
||||
|
||||
let path = entry.path();
|
||||
if path.extension().is_some_and(|ext| ext == "key") {
|
||||
if let Some(stem) = path.file_stem() {
|
||||
if let Some(key_id) = stem.to_str() {
|
||||
if let Ok(key_info) = self.describe_key(key_id, None).await {
|
||||
// Apply filters
|
||||
if let Some(ref status_filter) = request.status_filter {
|
||||
if &key_info.status != status_filter {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if let Some(ref usage_filter) = request.usage_filter {
|
||||
if &key_info.usage != usage_filter {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
keys.push(key_info);
|
||||
count += 1;
|
||||
}
|
||||
}
|
||||
if path.extension().is_some_and(|ext| ext == "key")
|
||||
&& let Some(stem) = path.file_stem()
|
||||
&& let Some(key_id) = stem.to_str()
|
||||
&& let Ok(key_info) = self.describe_key(key_id, None).await
|
||||
{
|
||||
// Apply filters
|
||||
if let Some(ref status_filter) = request.status_filter
|
||||
&& &key_info.status != status_filter
|
||||
{
|
||||
continue;
|
||||
}
|
||||
if let Some(ref usage_filter) = request.usage_filter
|
||||
&& &key_info.usage != usage_filter
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
keys.push(key_info);
|
||||
count += 1;
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -279,14 +279,13 @@ impl KmsConfig {
|
||||
}
|
||||
|
||||
// Validate TLS configuration if using HTTPS
|
||||
if config.address.starts_with("https://") {
|
||||
if let Some(ref tls) = config.tls {
|
||||
if !tls.skip_verify {
|
||||
// In production, we should have proper TLS configuration
|
||||
if tls.ca_cert_path.is_none() && tls.client_cert_path.is_none() {
|
||||
tracing::warn!("Using HTTPS without custom TLS configuration - relying on system CA");
|
||||
}
|
||||
}
|
||||
if config.address.starts_with("https://")
|
||||
&& let Some(ref tls) = config.tls
|
||||
&& !tls.skip_verify
|
||||
{
|
||||
// In production, we should have proper TLS configuration
|
||||
if tls.ca_cert_path.is_none() && tls.client_cert_path.is_none() {
|
||||
tracing::warn!("Using HTTPS without custom TLS configuration - relying on system CA");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user