Compare commits

..

14 Commits

Author SHA1 Message Date
loverustfs
bebd78fbbb Add GNU to build.yml (#275)
* fix unzip error

* fix url change error

fix url change error

* Simplify user experience and integrate console and endpoint

Simplify user experience and integrate console and endpoint

* Add gnu to  build.yml
2025-07-21 16:58:29 +08:00
houseme
3f095e75cb improve code for logger and fix typo (#272) 2025-07-21 15:20:36 +08:00
houseme
f7d30da9e0 fix typo (#267)
* fix typo

* cargo fmt
2025-07-20 00:11:15 +08:00
Chrislearn Young
823d4b6f79 Add typos github actions and fix typos (#265)
* Add typo github actions and fix typos

* cargo fmt
2025-07-19 22:08:50 +08:00
安正超
051ea7786f fix: ossutil install command. (#263) 2025-07-19 18:21:31 +08:00
安正超
42b645e355 fix: robust Dockerfile version logic for v prefix handling (#262)
* fix: robust Dockerfile version logic for v prefix handling

* wip
2025-07-19 15:50:15 +08:00
安正超
f27ee96014 feat: enhance entrypoint and Dockerfiles for flexible volume and permission management (#260)
* feat: enhance entrypoint and Dockerfiles for flexible volume and permission management\n\n- Support batch mount and permission fix in entrypoint.sh\n- Add coreutils/shadow (alpine) and coreutils/passwd (ubuntu) for UID/GID/ownership\n- Use ENTRYPOINT for unified startup\n- Make local dev and prod Dockerfile behavior consistent\n- Improve security and user experience\n\nBREAKING CHANGE: entrypoint.sh and Dockerfile now require additional packages for permission management, and support batch volume mount via RUSTFS_VOLUMES.

* chore: update Dockerfile comments to English only

* fix(entrypoint): improve local/remote volume detection and permission logic in entrypoint.sh

* Update entrypoint.sh

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update entrypoint.sh

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* Update Dockerfile

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-07-19 11:48:46 +08:00
houseme
20cd117aa6 improve code for dockerfile (#256)
* improve code for dockerfile

* Update Dockerfile

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

* improve code for file name

* improve code for dockerfile

* fix

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-07-18 15:53:00 +08:00
houseme
fc8931d69f improve code for dockerfile (#253)
* improve code for dockerfile

* Update Dockerfile

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>

---------

Co-authored-by: Copilot <175728472+Copilot@users.noreply.github.com>
2025-07-18 11:05:00 +08:00
weisd
0167b2decd fix: optimize RPC connection management and prevent race conditions (#252) 2025-07-18 10:41:00 +08:00
weisd
e67980ff3c Fix/range content length (#251)
* fix:getobject range length
2025-07-17 23:25:21 +08:00
weisd
96760bba5a fix:getobject range length (#250) 2025-07-17 23:14:19 +08:00
overtrue
2501d7d241 fix: remove branch restriction from Docker workflow_run trigger
The Docker workflow was not triggering for tag-based releases because it had
'branches: [main]' restriction in the workflow_run configuration. When pushing
tags, the triggering workflow runs on the tag, not on main branch.

Changes:
- Remove 'branches: [main]' from workflow_run trigger
- Simplify tag detection using github.event.workflow_run context instead of API calls
- Use official workflow_run event properties (head_branch, event) for reliable detection
- Support both 'refs/tags/VERSION' and direct 'VERSION' formats
- Add better logging for debugging workflow trigger issues

This fixes the issue where Docker images were not built for tagged releases.
2025-07-17 08:13:34 +08:00
overtrue
55b84262b5 fix: use GitHub API for reliable tag detection in Docker workflow
- Replace git commands with GitHub API calls for tag detection
- Add proper commit checkout for workflow_run events
- Use gh CLI and curl fallback for better reliability
- Add debug output to help troubleshoot tag detection issues

This should fix the issue where Docker builds were not triggered for tagged releases
due to missing tag information in the workflow_run environment.
2025-07-17 08:01:33 +08:00
51 changed files with 753 additions and 497 deletions

View File

@@ -172,6 +172,14 @@ jobs:
target: aarch64-unknown-linux-musl
cross: true
platform: linux
- os: ubuntu-latest
target: x86_64-unknown-linux-gnu
cross: false
platform: linux
- os: ubuntu-latest
target: aarch64-unknown-linux-gnu
cross: true
platform: linux
# macOS builds
- os: macos-latest
target: aarch64-apple-darwin
@@ -181,15 +189,15 @@ jobs:
target: x86_64-apple-darwin
cross: false
platform: macos
# # Windows builds (temporarily disabled)
# - os: windows-latest
# target: x86_64-pc-windows-msvc
# cross: false
# platform: windows
# - os: windows-latest
# target: aarch64-pc-windows-msvc
# cross: true
# platform: windows
# Windows builds (temporarily disabled)
- os: windows-latest
target: x86_64-pc-windows-msvc
cross: false
platform: windows
- os: windows-latest
target: aarch64-pc-windows-msvc
cross: true
platform: windows
steps:
- name: Checkout repository
uses: actions/checkout@v4
@@ -658,13 +666,15 @@ jobs:
update-latest-version:
name: Update Latest Version
needs: [build-check, upload-release-assets]
if: startsWith(github.ref, 'refs/tags/') && needs.build-check.outputs.is_prerelease == 'false'
if: startsWith(github.ref, 'refs/tags/')
runs-on: ubuntu-latest
steps:
- name: Update latest.json
env:
OSS_ACCESS_KEY_ID: ${{ secrets.ALICLOUDOSS_KEY_ID }}
OSS_ACCESS_KEY_SECRET: ${{ secrets.ALICLOUDOSS_KEY_SECRET }}
OSS_REGION: cn-beijing
OSS_ENDPOINT: https://oss-cn-beijing.aliyuncs.com
run: |
if [[ -z "$OSS_ACCESS_KEY_ID" ]]; then
echo "⚠️ OSS credentials not available, skipping latest.json update"
@@ -681,7 +691,9 @@ jobs:
curl -o "$OSSUTIL_ZIP" "https://gosspublic.alicdn.com/ossutil/v2/${OSSUTIL_VERSION}/${OSSUTIL_ZIP}"
unzip "$OSSUTIL_ZIP"
chmod +x "${OSSUTIL_DIR}/ossutil"
mv "${OSSUTIL_DIR}/ossutil" /usr/local/bin/
rm -rf "$OSSUTIL_DIR" "$OSSUTIL_ZIP"
chmod +x /usr/local/bin/ossutil
# Create latest.json
cat > latest.json << EOF
@@ -695,7 +707,7 @@ jobs:
EOF
# Upload to OSS
./${OSSUTIL_DIR}/ossutil cp latest.json oss://rustfs-version/latest.json --force
ossutil cp latest.json oss://rustfs-version/latest.json --force
echo "✅ Updated latest.json for stable release $VERSION"

View File

@@ -83,6 +83,16 @@ jobs:
# Never skip release events and tag pushes
do_not_skip: '["workflow_dispatch", "schedule", "merge_group", "release", "push"]'
typos:
name: Typos
runs-on: ubuntu-latest
steps:
- uses: actions/checkout@v4
- uses: dtolnay/rust-toolchain@stable
- name: Typos check with custom config file
uses: crate-ci/typos@master
test-and-lint:
name: Test and Lint
needs: skip-check

View File

@@ -38,7 +38,6 @@ on:
workflow_run:
workflows: ["Build and Release"]
types: [completed]
branches: [main]
# Manual trigger with same parameters for consistency
workflow_dispatch:
inputs:
@@ -83,6 +82,8 @@ jobs:
uses: actions/checkout@v4
with:
fetch-depth: 0
# For workflow_run events, checkout the specific commit that triggered the workflow
ref: ${{ github.event.workflow_run.head_sha || github.sha }}
- name: Check build conditions
id: check
@@ -114,51 +115,62 @@ jobs:
# Use Git to generate consistent short SHA (ensures uniqueness like build.yml)
short_sha=$(git rev-parse --short "${{ github.event.workflow_run.head_sha }}")
# Determine build type based on event type and git refs
# Check if this is a tag push (release build)
if [[ "${{ github.event.workflow_run.event }}" == "push" ]]; then
# Get git refs to determine if this is a tag or branch push
git_ref="${{ github.event.workflow_run.head_branch }}"
# Determine build type based on triggering workflow event and ref
triggering_event="${{ github.event.workflow_run.event }}"
head_branch="${{ github.event.workflow_run.head_branch }}"
# Check if this is a tag push by looking at the git ref
if git show-ref --tags | grep -q "${{ github.event.workflow_run.head_sha }}"; then
# This commit has tags, extract the tag name
tag_name=$(git tag --points-at "${{ github.event.workflow_run.head_sha }}" | head -n1)
if [[ -n "$tag_name" ]]; then
version="$tag_name"
# Remove 'v' prefix if present for consistent version format
if [[ "$version" == v* ]]; then
version="${version#v}"
fi
echo "🔍 Analyzing triggering workflow:"
echo " 📋 Event: $triggering_event"
echo " 🌿 Head branch: $head_branch"
echo " 📎 Head SHA: ${{ github.event.workflow_run.head_sha }}"
if [[ "$version" == *"alpha"* ]] || [[ "$version" == *"beta"* ]] || [[ "$version" == *"rc"* ]]; then
build_type="prerelease"
is_prerelease=true
echo "🧪 Building Docker image for prerelease: $version"
else
build_type="release"
create_latest=true
echo "🚀 Building Docker image for release: $version"
fi
else
# Regular branch push
build_type="development"
version="dev-${short_sha}"
should_build=false
echo "⏭️ Skipping Docker build for development version (branch push)"
fi
else
# Regular branch push
# Check if this was triggered by a tag push
if [[ "$triggering_event" == "push" ]]; then
# For tag pushes, head_branch will be like "refs/tags/v1.0.0" or just "v1.0.0"
if [[ "$head_branch" == refs/tags/* ]]; then
# Extract tag name from refs/tags/TAG_NAME
tag_name="${head_branch#refs/tags/}"
version="$tag_name"
elif [[ "$head_branch" =~ ^v?[0-9]+\.[0-9]+\.[0-9]+ ]]; then
# Direct tag name like "v1.0.0" or "1.0.0-alpha.1"
version="$head_branch"
elif [[ "$head_branch" == "main" ]]; then
# Regular branch push to main
build_type="development"
version="dev-${short_sha}"
should_build=false
echo "⏭️ Skipping Docker build for development version (branch push)"
echo "⏭️ Skipping Docker build for development version (main branch push)"
else
# Other branch push
build_type="development"
version="dev-${short_sha}"
should_build=false
echo "⏭️ Skipping Docker build for development version (branch: $head_branch)"
fi
# If we extracted a version (tag), determine release type
if [[ -n "$version" ]] && [[ "$version" != "dev-${short_sha}" ]]; then
# Remove 'v' prefix if present for consistent version format
if [[ "$version" == v* ]]; then
version="${version#v}"
fi
if [[ "$version" == *"alpha"* ]] || [[ "$version" == *"beta"* ]] || [[ "$version" == *"rc"* ]]; then
build_type="prerelease"
is_prerelease=true
echo "🧪 Building Docker image for prerelease: $version"
else
build_type="release"
create_latest=true
echo "🚀 Building Docker image for release: $version"
fi
fi
else
# Non-push events
build_type="development"
version="dev-${short_sha}"
should_build=false
echo "⏭️ Skipping Docker build for development version (non-push event)"
echo "⏭️ Skipping Docker build for development version (event: $triggering_event)"
fi
echo "🔄 Build triggered by workflow_run:"

247
Cargo.lock generated
View File

@@ -455,7 +455,10 @@ dependencies = [
"serde_repr",
"tokio",
"url",
"zbus 5.8.0",
"wayland-backend",
"wayland-client",
"wayland-protocols",
"zbus 5.9.0",
]
[[package]]
@@ -512,7 +515,7 @@ dependencies = [
"futures-lite",
"parking",
"polling",
"rustix 1.0.7",
"rustix 1.0.8",
"slab",
"tracing",
"windows-sys 0.59.0",
@@ -544,7 +547,7 @@ dependencies = [
"cfg-if",
"event-listener",
"futures-lite",
"rustix 1.0.7",
"rustix 1.0.8",
"tracing",
]
@@ -571,7 +574,7 @@ dependencies = [
"cfg-if",
"futures-core",
"futures-io",
"rustix 1.0.7",
"rustix 1.0.8",
"signal-hook-registry",
"slab",
"windows-sys 0.59.0",
@@ -673,9 +676,9 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8"
[[package]]
name = "aws-credential-types"
version = "1.2.3"
version = "1.2.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "687bc16bc431a8533fe0097c7f0182874767f920989d7260950172ae8e3c4465"
checksum = "b68c2194a190e1efc999612792e25b1ab3abfefe4306494efaaabc25933c0cbe"
dependencies = [
"aws-smithy-async",
"aws-smithy-runtime-api",
@@ -708,9 +711,9 @@ dependencies = [
[[package]]
name = "aws-runtime"
version = "1.5.8"
version = "1.5.9"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "4f6c68419d8ba16d9a7463671593c54f81ba58cab466e9b759418da606dcc2e2"
checksum = "b2090e664216c78e766b6bac10fe74d2f451c02441d43484cd76ac9a295075f7"
dependencies = [
"aws-credential-types",
"aws-sigv4",
@@ -733,9 +736,9 @@ dependencies = [
[[package]]
name = "aws-sdk-s3"
version = "1.96.0"
version = "1.98.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "6e25d24de44b34dcdd5182ac4e4c6f07bcec2661c505acef94c0d293b65505fe"
checksum = "029e89cae7e628553643aecb3a3f054a0a0912ff0fd1f5d6a0b4fda421dce64b"
dependencies = [
"aws-credential-types",
"aws-runtime",
@@ -806,9 +809,9 @@ dependencies = [
[[package]]
name = "aws-smithy-checksums"
version = "0.63.4"
version = "0.63.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "244f00666380d35c1c76b90f7b88a11935d11b84076ac22a4c014ea0939627af"
checksum = "5ab9472f7a8ec259ddb5681d2ef1cb1cf16c0411890063e67cdc7b62562cc496"
dependencies = [
"aws-smithy-http",
"aws-smithy-types",
@@ -826,9 +829,9 @@ dependencies = [
[[package]]
name = "aws-smithy-eventstream"
version = "0.60.9"
version = "0.60.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "338a3642c399c0a5d157648426110e199ca7fd1c689cc395676b81aa563700c4"
checksum = "604c7aec361252b8f1c871a7641d5e0ba3a7f5a586e51b66bc9510a5519594d9"
dependencies = [
"aws-smithy-types",
"bytes",
@@ -837,9 +840,9 @@ dependencies = [
[[package]]
name = "aws-smithy-http"
version = "0.62.1"
version = "0.62.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "99335bec6cdc50a346fda1437f9fefe33abf8c99060739a546a16457f2862ca9"
checksum = "43c82ba4cab184ea61f6edaafc1072aad3c2a17dcf4c0fce19ac5694b90d8b5f"
dependencies = [
"aws-smithy-eventstream",
"aws-smithy-runtime-api",
@@ -865,7 +868,7 @@ dependencies = [
"aws-smithy-async",
"aws-smithy-runtime-api",
"aws-smithy-types",
"h2 0.3.26",
"h2 0.3.27",
"h2 0.4.11",
"http 0.2.12",
"http 1.3.1",
@@ -1574,23 +1577,12 @@ dependencies = [
[[package]]
name = "chrono-tz"
version = "0.10.3"
version = "0.10.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "efdce149c370f133a071ca8ef6ea340b7b88748ab0810097a9e2976eaa34b4f3"
checksum = "a6139a8597ed92cf816dfb33f5dd6cf0bb93a6adc938f11039f371bc5bcd26c3"
dependencies = [
"chrono",
"chrono-tz-build",
"phf 0.11.3",
]
[[package]]
name = "chrono-tz-build"
version = "0.4.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "8f10f8c9340e31fc120ff885fcdb54a0b48e474bbd77cab557f0c30a3e569402"
dependencies = [
"parse-zoneinfo",
"phf_codegen 0.11.3",
"phf 0.12.1",
]
[[package]]
@@ -1850,18 +1842,18 @@ dependencies = [
[[package]]
name = "const-str"
version = "0.6.2"
version = "0.6.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "9e991226a70654b49d34de5ed064885f0bef0348a8e70018b8ff1ac80aa984a2"
checksum = "041fbfcf8e7054df725fb9985297e92422cdc80fcf313665f5ca3d761bb63f4c"
dependencies = [
"const-str-proc-macro",
]
[[package]]
name = "const-str-proc-macro"
version = "0.6.2"
version = "0.6.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "f0d1c4c3cb85e5856b34e829af0035d7154f8c2889b15bbf43c8a6c6786dcab5"
checksum = "f801882b7ecd4188f4bca0317f34e022d623590d85893d7024b18d14f2a3b40b"
dependencies = [
"proc-macro2",
"quote",
@@ -2031,9 +2023,9 @@ dependencies = [
[[package]]
name = "crc32fast"
version = "1.4.2"
version = "1.5.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3"
checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511"
dependencies = [
"cfg-if",
]
@@ -3384,18 +3376,6 @@ version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "bd0c93bb4b0c6d9b77f4435b0ae98c24d17f1c45b2ff844c6151a07256ca923b"
[[package]]
name = "dispatch2"
version = "0.2.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1a0d569e003ff27784e0e14e4a594048698e0c0f0b66cabcb51511be55a7caa0"
dependencies = [
"bitflags 2.9.1",
"block2 0.6.1",
"libc",
"objc2 0.6.1",
]
[[package]]
name = "dispatch2"
version = "0.3.0"
@@ -3403,6 +3383,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "89a09f22a6c6069a18470eb92d2298acf25463f14256d24778e1230d789a2aec"
dependencies = [
"bitflags 2.9.1",
"block2 0.6.1",
"libc",
"objc2 0.6.1",
]
@@ -3417,6 +3399,15 @@ dependencies = [
"syn 2.0.104",
]
[[package]]
name = "dlib"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "330c60081dcc4c72131f8eb70510f1ac07223e5d4163db481a04a0befcffa412"
dependencies = [
"libloading 0.8.8",
]
[[package]]
name = "dlopen2"
version = "0.7.0"
@@ -3440,6 +3431,12 @@ dependencies = [
"syn 2.0.104",
]
[[package]]
name = "downcast-rs"
version = "1.2.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "75b325c5dbd37f80359721ad39aca5a29fb04c89279657cffdda8736d0c0b9d2"
[[package]]
name = "dpi"
version = "0.1.2"
@@ -4375,9 +4372,9 @@ dependencies = [
[[package]]
name = "h2"
version = "0.3.26"
version = "0.3.27"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8"
checksum = "0beca50380b1fc32983fc1cb4587bfa4bb9e78fc259aad4a0032d2080309222d"
dependencies = [
"bytes",
"fnv",
@@ -4623,7 +4620,7 @@ dependencies = [
"futures-channel",
"futures-core",
"futures-util",
"h2 0.3.26",
"h2 0.3.27",
"http 0.2.12",
"http-body 0.4.6",
"httparse",
@@ -6212,7 +6209,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1c10c2894a6fed806ade6027bcd50662746363a9589d3ec9d9bef30a4e4bc166"
dependencies = [
"bitflags 2.9.1",
"dispatch2 0.3.0",
"dispatch2",
"objc2 0.6.1",
]
@@ -6611,15 +6608,6 @@ dependencies = [
"zstd",
]
[[package]]
name = "parse-zoneinfo"
version = "0.3.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1f2a05b18d44e2957b88f96ba460715e295bc1d7510468a2f3d3b44535d26c24"
dependencies = [
"regex",
]
[[package]]
name = "password-hash"
version = "0.5.0"
@@ -6728,11 +6716,11 @@ dependencies = [
[[package]]
name = "phf"
version = "0.11.3"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078"
checksum = "913273894cec178f401a31ec4b656318d95473527be05c0752cc41cdc32be8b7"
dependencies = [
"phf_shared 0.11.3",
"phf_shared 0.12.1",
]
[[package]]
@@ -6755,16 +6743,6 @@ dependencies = [
"phf_shared 0.10.0",
]
[[package]]
name = "phf_codegen"
version = "0.11.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "aef8048c789fa5e851558d709946d6d79a8ff88c0440c587967f8e94bfb1216a"
dependencies = [
"phf_generator 0.11.3",
"phf_shared 0.11.3",
]
[[package]]
name = "phf_generator"
version = "0.8.0"
@@ -6836,6 +6814,15 @@ dependencies = [
"siphasher 1.0.1",
]
[[package]]
name = "phf_shared"
version = "0.12.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "06005508882fb681fd97892ecff4b7fd0fee13ef1aa569f8695dae7ab9099981"
dependencies = [
"siphasher 1.0.1",
]
[[package]]
name = "pin-project"
version = "1.1.10"
@@ -6967,7 +6954,7 @@ dependencies = [
"concurrent-queue",
"hermit-abi",
"pin-project-lite",
"rustix 1.0.7",
"rustix 1.0.8",
"tracing",
"windows-sys 0.59.0",
]
@@ -7647,13 +7634,13 @@ dependencies = [
[[package]]
name = "rfd"
version = "0.15.3"
version = "0.15.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "80c844748fdc82aae252ee4594a89b6e7ebef1063de7951545564cbc4e57075d"
checksum = "ef2bee61e6cffa4635c72d7d81a84294e28f0930db0ddcb0f66d10244674ebed"
dependencies = [
"ashpd 0.11.0",
"block2 0.6.1",
"dispatch2 0.2.0",
"dispatch2",
"js-sys",
"log",
"objc2 0.6.1",
@@ -8107,7 +8094,7 @@ dependencies = [
"dirs",
"hex",
"keyring",
"rfd 0.15.3",
"rfd 0.15.4",
"rust-embed",
"rust-i18n",
"serde",
@@ -8443,15 +8430,15 @@ dependencies = [
[[package]]
name = "rustix"
version = "1.0.7"
version = "1.0.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266"
checksum = "11181fbabf243db407ef8df94a6ce0b2f9a733bd8be4ad02b4eda9602296cac8"
dependencies = [
"bitflags 2.9.1",
"errno",
"libc",
"linux-raw-sys 0.9.4",
"windows-sys 0.59.0",
"windows-sys 0.60.2",
]
[[package]]
@@ -8675,6 +8662,12 @@ dependencies = [
"windows-sys 0.59.0",
]
[[package]]
name = "scoped-tls"
version = "1.0.1"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294"
[[package]]
name = "scopeguard"
version = "1.2.0"
@@ -9698,7 +9691,7 @@ dependencies = [
"fastrand",
"getrandom 0.3.3",
"once_cell",
"rustix 1.0.7",
"rustix 1.0.8",
"windows-sys 0.59.0",
]
@@ -10079,7 +10072,7 @@ dependencies = [
"serde_spanned",
"toml_datetime",
"toml_write",
"winnow 0.7.11",
"winnow 0.7.12",
]
[[package]]
@@ -10769,6 +10762,66 @@ dependencies = [
"web-sys",
]
[[package]]
name = "wayland-backend"
version = "0.3.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "fe770181423e5fc79d3e2a7f4410b7799d5aab1de4372853de3c6aa13ca24121"
dependencies = [
"cc",
"downcast-rs",
"rustix 0.38.44",
"scoped-tls",
"smallvec",
"wayland-sys",
]
[[package]]
name = "wayland-client"
version = "0.31.10"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "978fa7c67b0847dbd6a9f350ca2569174974cd4082737054dbb7fbb79d7d9a61"
dependencies = [
"bitflags 2.9.1",
"rustix 0.38.44",
"wayland-backend",
"wayland-scanner",
]
[[package]]
name = "wayland-protocols"
version = "0.32.8"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "779075454e1e9a521794fed15886323ea0feda3f8b0fc1390f5398141310422a"
dependencies = [
"bitflags 2.9.1",
"wayland-backend",
"wayland-client",
"wayland-scanner",
]
[[package]]
name = "wayland-scanner"
version = "0.31.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "896fdafd5d28145fce7958917d69f2fd44469b1d4e861cb5961bcbeebc6d1484"
dependencies = [
"proc-macro2",
"quick-xml 0.37.5",
"quote",
]
[[package]]
name = "wayland-sys"
version = "0.31.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "dbcebb399c77d5aa9fa5db874806ee7b4eba4e73650948e8f93963f128896615"
dependencies = [
"dlib",
"log",
"pkg-config",
]
[[package]]
name = "web-sys"
version = "0.3.77"
@@ -11440,9 +11493,9 @@ dependencies = [
[[package]]
name = "winnow"
version = "0.7.11"
version = "0.7.12"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "74c7b26e3480b707944fc872477815d29a8e429d2f93a1ce000f5fa84a15cbcd"
checksum = "f3edebf492c8125044983378ecb5766203ad3b4c2f7a922bd7dd207f6d443e95"
dependencies = [
"memchr",
]
@@ -11542,7 +11595,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "af3a19837351dc82ba89f8a125e22a3c475f05aba604acc023d62b2739ae2909"
dependencies = [
"libc",
"rustix 1.0.7",
"rustix 1.0.8",
]
[[package]]
@@ -11641,9 +11694,9 @@ dependencies = [
[[package]]
name = "zbus"
version = "5.8.0"
version = "5.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "597f45e98bc7e6f0988276012797855613cd8269e23b5be62cc4e5d28b7e515d"
checksum = "4bb4f9a464286d42851d18a605f7193b8febaf5b0919d71c6399b7b26e5b0aad"
dependencies = [
"async-broadcast",
"async-recursion",
@@ -11661,8 +11714,8 @@ dependencies = [
"tracing",
"uds_windows",
"windows-sys 0.59.0",
"winnow 0.7.11",
"zbus_macros 5.8.0",
"winnow 0.7.12",
"zbus_macros 5.9.0",
"zbus_names 4.2.0",
"zvariant 5.6.0",
]
@@ -11682,9 +11735,9 @@ dependencies = [
[[package]]
name = "zbus_macros"
version = "5.8.0"
version = "5.9.0"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "e5c8e4e14dcdd9d97a98b189cd1220f30e8394ad271e8c987da84f73693862c2"
checksum = "ef9859f68ee0c4ee2e8cde84737c78e3f4c54f946f2a38645d0d4c7a95327659"
dependencies = [
"proc-macro-crate 3.3.0",
"proc-macro2",
@@ -11714,7 +11767,7 @@ checksum = "7be68e64bf6ce8db94f63e72f0c7eb9a60d733f7e0499e628dfab0f84d6bcb97"
dependencies = [
"serde",
"static_assertions",
"winnow 0.7.11",
"winnow 0.7.12",
"zvariant 5.6.0",
]
@@ -11906,7 +11959,7 @@ dependencies = [
"enumflags2",
"serde",
"url",
"winnow 0.7.11",
"winnow 0.7.12",
"zvariant_derive 5.6.0",
"zvariant_utils 3.2.0",
]
@@ -11959,5 +12012,5 @@ dependencies = [
"serde",
"static_assertions",
"syn 2.0.104",
"winnow 0.7.11",
"winnow 0.7.12",
]

View File

@@ -108,8 +108,8 @@ cfg-if = "1.0.1"
chacha20poly1305 = { version = "0.10.1" }
chrono = { version = "0.4.41", features = ["serde"] }
clap = { version = "4.5.41", features = ["derive", "env"] }
const-str = { version = "0.6.2", features = ["std", "proc"] }
crc32fast = "1.4.2"
const-str = { version = "0.6.3", features = ["std", "proc"] }
crc32fast = "1.5.0"
criterion = { version = "0.5", features = ["html_reports"] }
dashmap = "6.1.0"
datafusion = "46.0.1"
@@ -196,7 +196,7 @@ reqwest = { version = "0.12.22", default-features = false, features = [
"json",
"blocking",
] }
rfd = { version = "0.15.3", default-features = false, features = [
rfd = { version = "0.15.4", default-features = false, features = [
"xdg-portal",
"tokio",
] }

View File

@@ -1,121 +1,95 @@
# Multi-stage build for RustFS production image
FROM alpine:latest AS build
# Build arguments - use TARGETPLATFORM for consistency with Dockerfile.source
ARG TARGETPLATFORM
ARG BUILDPLATFORM
# Build stage: Download and extract RustFS binary
FROM alpine:3.22 AS build
# Build arguments for platform and release
ARG TARGETARCH
ARG RELEASE=latest
# Install dependencies for downloading and verifying binaries
RUN apk add --no-cache \
ca-certificates \
curl \
bash \
wget \
unzip \
jq
# Install minimal dependencies for downloading and extracting
RUN apk add --no-cache ca-certificates curl unzip
# Create build directory
WORKDIR /build
# Map TARGETPLATFORM to architecture format used in builds
RUN case "${TARGETPLATFORM}" in \
"linux/amd64") ARCH="x86_64" ;; \
"linux/arm64") ARCH="aarch64" ;; \
*) echo "Unsupported platform: ${TARGETPLATFORM}" && exit 1 ;; \
# Detect architecture and download corresponding binary
RUN case "${TARGETARCH}" in \
amd64) ARCH="x86_64" ;; \
arm64) ARCH="aarch64" ;; \
*) echo "Unsupported architecture: ${TARGETARCH}" >&2 && exit 1 ;; \
esac && \
echo "ARCH=${ARCH}" > /build/arch.env
# Download rustfs binary from dl.rustfs.com (release channel only)
RUN . /build/arch.env && \
BASE_URL="https://dl.rustfs.com/artifacts/rustfs/release" && \
PLATFORM="linux" && \
if [ "${RELEASE}" = "latest" ]; then \
# Download latest release version \
PACKAGE_NAME="rustfs-${PLATFORM}-${ARCH}-latest.zip"; \
DOWNLOAD_URL="${BASE_URL}/${PACKAGE_NAME}"; \
echo "📥 Downloading latest release build: ${PACKAGE_NAME}"; \
VERSION="latest"; \
else \
# Download specific release version \
PACKAGE_NAME="rustfs-${PLATFORM}-${ARCH}-v${RELEASE}.zip"; \
DOWNLOAD_URL="${BASE_URL}/${PACKAGE_NAME}"; \
echo "📥 Downloading specific release version: ${PACKAGE_NAME}"; \
VERSION="v${RELEASE#v}"; \
fi && \
echo "🔗 Download URL: ${DOWNLOAD_URL}" && \
curl -f -L "${DOWNLOAD_URL}" -o /build/rustfs.zip && \
if [ ! -f /build/rustfs.zip ] || [ ! -s /build/rustfs.zip ]; then \
echo "❌ Failed to download binary package"; \
echo "💡 Make sure the package ${PACKAGE_NAME} exists"; \
echo "🔗 Check: ${DOWNLOAD_URL}"; \
exit 1; \
fi && \
unzip /build/rustfs.zip -d /build && \
BASE_URL="https://dl.rustfs.com/artifacts/rustfs/release" && \
PACKAGE_NAME="rustfs-linux-${ARCH}-${VERSION}.zip" && \
DOWNLOAD_URL="${BASE_URL}/${PACKAGE_NAME}" && \
echo "Downloading ${PACKAGE_NAME} from ${DOWNLOAD_URL}" >&2 && \
curl -f -L "${DOWNLOAD_URL}" -o rustfs.zip && \
unzip rustfs.zip -d /build && \
chmod +x /build/rustfs && \
rm /build/rustfs.zip && \
echo "✅ Successfully downloaded and extracted rustfs binary"
rm rustfs.zip || { echo "Failed to download or extract ${PACKAGE_NAME}" >&2; exit 1; }
# Runtime stage
FROM alpine:latest
# Runtime stage: Configure runtime environment
FROM alpine:3.22.1
# Set build arguments and labels
# Build arguments and labels
ARG RELEASE=latest
ARG BUILD_DATE
ARG VCS_REF
LABEL name="RustFS" \
vendor="RustFS Team" \
maintainer="RustFS Team <dev@rustfs.com>" \
version="${RELEASE}" \
release="${RELEASE}" \
build-date="${BUILD_DATE}" \
vcs-ref="${VCS_REF}" \
summary="RustFS is a high-performance distributed object storage system written in Rust, compatible with S3 API." \
description="RustFS is a high-performance distributed object storage software built using Rust. It supports erasure coding storage, multi-tenant management, observability, and other enterprise-level features." \
url="https://rustfs.com" \
license="Apache-2.0"
vendor="RustFS Team" \
maintainer="RustFS Team <dev@rustfs.com>" \
version="${RELEASE}" \
release="${RELEASE}" \
build-date="${BUILD_DATE}" \
vcs-ref="${VCS_REF}" \
summary="High-performance distributed object storage system compatible with S3 API" \
description="RustFS is a distributed object storage system written in Rust, supporting erasure coding, multi-tenant management, and observability." \
url="https://rustfs.com" \
license="Apache-2.0"
# Install runtime dependencies
RUN apk add --no-cache \
ca-certificates \
curl \
tzdata \
bash \
&& addgroup -g 1000 rustfs \
&& adduser -u 1000 -G rustfs -s /bin/sh -D rustfs
RUN echo "https://dl-cdn.alpinelinux.org/alpine/v3.20/community" >> /etc/apk/repositories && \
apk update && \
apk add --no-cache ca-certificates bash gosu coreutils shadow && \
addgroup -g 1000 rustfs && \
adduser -u 1000 -G rustfs -s /bin/bash -D rustfs
# Environment variables
ENV RUSTFS_ACCESS_KEY=rustfsadmin \
# Copy CA certificates and RustFS binary from build stage
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
COPY --from=build /build/rustfs /usr/bin/rustfs
# Copy entry point script
COPY entrypoint.sh /entrypoint.sh
# Set permissions
RUN chmod +x /usr/bin/rustfs /entrypoint.sh && \
mkdir -p /data /logs && \
chown rustfs:rustfs /data /logs && \
chmod 700 /data /logs
# Environment variables (credentials should be set via environment or secrets)
ENV RUSTFS_ADDRESS=:9000 \
RUSTFS_ACCESS_KEY=rustfsadmin \
RUSTFS_SECRET_KEY=rustfsadmin \
RUSTFS_ADDRESS=":9000" \
RUSTFS_CONSOLE_ENABLE=true \
RUSTFS_VOLUMES=/data \
RUST_LOG=warn
# Set permissions for /usr/bin (similar to MinIO's approach)
RUN chmod -R 755 /usr/bin
# Copy CA certificates and binaries from build stage
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
COPY --from=build /build/rustfs /usr/bin/
# Set executable permissions
RUN chmod +x /usr/bin/rustfs
# Create data directory
RUN mkdir -p /data /config && chown -R rustfs:rustfs /data /config
# Switch to non-root user
USER rustfs
# Set working directory
WORKDIR /data
RUST_LOG=warn \
RUSTFS_OBS_LOG_DIRECTORY=/logs \
RUSTFS_SINKS_FILE_PATH=/logs
# Expose port
EXPOSE 9000
# Volumes for data and logs
VOLUME ["/data", "/logs"]
# Volume for data
VOLUME ["/data"]
# Set entry point
ENTRYPOINT ["/entrypoint.sh"]
CMD ["/usr/bin/rustfs"]
# Set entrypoint
ENTRYPOINT ["/usr/bin/rustfs"]

View File

@@ -112,6 +112,8 @@ RUN apt-get update && apt-get install -y \
ca-certificates \
tzdata \
wget \
coreutils \
passwd \
&& rm -rf /var/lib/apt/lists/*
# Create rustfs user and group
@@ -128,6 +130,10 @@ RUN mkdir -p /data/rustfs{0,1,2,3} && \
COPY --from=builder /usr/local/bin/rustfs /app/rustfs
RUN chmod +x /app/rustfs && chown rustfs:rustfs /app/rustfs
# Copy entrypoint script
COPY entrypoint.sh /entrypoint.sh
RUN chmod +x /entrypoint.sh
# Switch to non-root user
USER rustfs
@@ -142,9 +148,9 @@ ENV RUSTFS_ACCESS_KEY=rustfsadmin \
RUSTFS_VOLUMES=/data \
RUST_LOG=warn
# Volume for data
VOLUME ["/data"]
# Set default command
# Set entrypoint and default command
ENTRYPOINT ["/entrypoint.sh"]
CMD ["/app/rustfs"]

41
_typos.toml Normal file
View File

@@ -0,0 +1,41 @@
[default]
# # Ignore specific spell checking patterns
# extend-ignore-identifiers-re = [
# # Ignore common patterns in base64 encoding and hash values
# "[A-Za-z0-9+/]{8,}={0,2}", # base64 encoding
# "[A-Fa-f0-9]{8,}", # hexadecimal hash
# "[A-Za-z0-9_-]{20,}", # long random strings
# ]
# # Ignore specific regex patterns in content
# extend-ignore-re = [
# # Ignore hash values and encoded strings (base64 patterns)
# "(?i)[A-Za-z0-9+/]{8,}={0,2}",
# # Ignore long strings in quotes (usually hash or base64)
# '"[A-Za-z0-9+/=_-]{8,}"',
# # Ignore IV values and similar cryptographic strings
# '"[A-Za-z0-9+/=]{12,}"',
# # Ignore cryptographic signatures and keys (including partial strings)
# "[A-Za-z0-9+/]{6,}[A-Za-z0-9+/=]*",
# # Ignore base64-like strings in comments (common in examples)
# "//.*[A-Za-z0-9+/]{8,}[A-Za-z0-9+/=]*",
# ]
extend-ignore-re = [
# Ignore long strings in quotes (usually hash or base64)
'"[A-Za-z0-9+/=_-]{32,}"',
# Ignore IV values and similar cryptographic strings
'"[A-Za-z0-9+/=]{12,}"',
# Ignore cryptographic signatures and keys (including partial strings)
"[A-Za-z0-9+/]{16,}[A-Za-z0-9+/=]*",
]
[default.extend-words]
bui = "bui"
typ = "typ"
clen = "clen"
datas = "datas"
bre = "bre"
abd = "abd"
[files]
extend-exclude = []

View File

@@ -108,14 +108,26 @@ pub const DEFAULT_CONSOLE_ADDRESS: &str = concat!(":", DEFAULT_CONSOLE_PORT);
/// It is used to store the logs of the application.
/// Default value: rustfs.log
/// Environment variable: RUSTFS_OBSERVABILITY_LOG_FILENAME
pub const DEFAULT_LOG_FILENAME: &str = "rustfs.log";
pub const DEFAULT_LOG_FILENAME: &str = "rustfs";
/// Default OBS log filename for rustfs
/// This is the default log filename for OBS.
/// It is used to store the logs of the application.
/// Default value: rustfs.log
pub const DEFAULT_OBS_LOG_FILENAME: &str = concat!(DEFAULT_LOG_FILENAME, ".log");
/// Default sink file log file for rustfs
/// This is the default sink file log file for rustfs.
/// It is used to store the logs of the application.
/// Default value: rustfs-sink.log
pub const DEFAULT_SINK_FILE_LOG_FILE: &str = concat!(DEFAULT_LOG_FILENAME, "-sink.log");
/// Default log directory for rustfs
/// This is the default log directory for rustfs.
/// It is used to store the logs of the application.
/// Default value: logs
/// Environment variable: RUSTFS_OBSERVABILITY_LOG_DIRECTORY
pub const DEFAULT_LOG_DIR: &str = "deploy/logs";
pub const DEFAULT_LOG_DIR: &str = "/logs";
/// Default log rotation size mb for rustfs
/// This is the default log rotation size for rustfs.

View File

@@ -33,11 +33,11 @@ pub fn decrypt_data(password: &[u8], data: &[u8]) -> Result<Vec<u8>, crate::Erro
match id {
ID::Argon2idChaCHa20Poly1305 => {
let key = id.get_key(password, salt)?;
decryp(ChaCha20Poly1305::new_from_slice(&key)?, nonce, data)
decrypt(ChaCha20Poly1305::new_from_slice(&key)?, nonce, data)
}
_ => {
let key = id.get_key(password, salt)?;
decryp(Aes256Gcm::new_from_slice(&key)?, nonce, data)
decrypt(Aes256Gcm::new_from_slice(&key)?, nonce, data)
}
}
}
@@ -135,7 +135,7 @@ pub fn decrypt_data(password: &[u8], data: &[u8]) -> Result<Vec<u8>, crate::Erro
#[cfg(any(test, feature = "crypto"))]
#[inline]
fn decryp<T: aes_gcm::aead::Aead>(stream: T, nonce: &[u8], data: &[u8]) -> Result<Vec<u8>, crate::Error> {
fn decrypt<T: aes_gcm::aead::Aead>(stream: T, nonce: &[u8], data: &[u8]) -> Result<Vec<u8>, crate::Error> {
use crate::error::Error;
stream
.decrypt(aes_gcm::Nonce::from_slice(nonce), data)

View File

@@ -253,7 +253,7 @@ pub async fn get_server_info(get_pools: bool) -> InfoMessage {
warn!("load_data_usage_from_backend end {:?}", after3 - after2);
let backen_info = store.clone().backend_info().await;
let backend_info = store.clone().backend_info().await;
let after4 = OffsetDateTime::now_utc();
@@ -272,10 +272,10 @@ pub async fn get_server_info(get_pools: bool) -> InfoMessage {
backend_type: rustfs_madmin::BackendType::ErasureType,
online_disks: online_disks.sum(),
offline_disks: offline_disks.sum(),
standard_sc_parity: backen_info.standard_sc_parity,
rr_sc_parity: backen_info.rr_sc_parity,
total_sets: backen_info.total_sets,
drives_per_set: backen_info.drives_per_set,
standard_sc_parity: backend_info.standard_sc_parity,
rr_sc_parity: backend_info.rr_sc_parity,
total_sets: backend_info.total_sets,
drives_per_set: backend_info.drives_per_set,
};
if get_pools {
pools = get_pools_info(&all_disks).await.unwrap_or_default();

View File

@@ -31,7 +31,7 @@ pub struct ListPathRawOptions {
pub fallback_disks: Vec<Option<DiskStore>>,
pub bucket: String,
pub path: String,
pub recursice: bool,
pub recursive: bool,
pub filter_prefix: Option<String>,
pub forward_to: Option<String>,
pub min_disks: usize,
@@ -52,7 +52,7 @@ impl Clone for ListPathRawOptions {
fallback_disks: self.fallback_disks.clone(),
bucket: self.bucket.clone(),
path: self.path.clone(),
recursice: self.recursice,
recursive: self.recursive,
filter_prefix: self.filter_prefix.clone(),
forward_to: self.forward_to.clone(),
min_disks: self.min_disks,
@@ -85,7 +85,7 @@ pub async fn list_path_raw(mut rx: B_Receiver<bool>, opts: ListPathRawOptions) -
let wakl_opts = WalkDirOptions {
bucket: opts_clone.bucket.clone(),
base_dir: opts_clone.path.clone(),
recursive: opts_clone.recursice,
recursive: opts_clone.recursive,
report_notfound: opts_clone.report_not_found,
filter_prefix: opts_clone.filter_prefix.clone(),
forward_to: opts_clone.forward_to.clone(),
@@ -133,7 +133,7 @@ pub async fn list_path_raw(mut rx: B_Receiver<bool>, opts: ListPathRawOptions) -
WalkDirOptions {
bucket: opts_clone.bucket.clone(),
base_dir: opts_clone.path.clone(),
recursive: opts_clone.recursice,
recursive: opts_clone.recursive,
report_notfound: opts_clone.report_not_found,
filter_prefix: opts_clone.filter_prefix.clone(),
forward_to: opts_clone.forward_to.clone(),

View File

@@ -41,7 +41,7 @@
// pin_mut!(body);
// // 上一次没用完的数据
// let mut prev_bytes = Bytes::new();
// let mut readed_size = 0;
// let mut read_size = 0;
// loop {
// let data: Vec<Bytes> = {
@@ -51,9 +51,9 @@
// Some(Err(e)) => return Err(e),
// Some(Ok((data, remaining_bytes))) => {
// // debug!(
// // "content_length:{},readed_size:{}, read_data data:{}, remaining_bytes: {} ",
// // "content_length:{},read_size:{}, read_data data:{}, remaining_bytes: {} ",
// // content_length,
// // readed_size,
// // read_size,
// // data.len(),
// // remaining_bytes.len()
// // );
@@ -65,15 +65,15 @@
// };
// for bytes in data {
// readed_size += bytes.len();
// // debug!("readed_size {}, content_length {}", readed_size, content_length,);
// read_size += bytes.len();
// // debug!("read_size {}, content_length {}", read_size, content_length,);
// y.yield_ok(bytes).await;
// }
// if readed_size + prev_bytes.len() >= content_length {
// if read_size + prev_bytes.len() >= content_length {
// // debug!(
// // "读完了 readed_size:{} + prev_bytes.len({}) == content_length {}",
// // readed_size,
// // "读完了 read_size:{} + prev_bytes.len({}) == content_length {}",
// // read_size,
// // prev_bytes.len(),
// // content_length,
// // );

View File

@@ -135,7 +135,7 @@ impl Default for PutObjectOptions {
#[allow(dead_code)]
impl PutObjectOptions {
fn set_matche_tag(&mut self, etag: &str) {
fn set_match_tag(&mut self, etag: &str) {
if etag == "*" {
self.custom_header
.insert("If-Match", HeaderValue::from_str("*").expect("err"));
@@ -145,7 +145,7 @@ impl PutObjectOptions {
}
}
fn set_matche_tag_except(&mut self, etag: &str) {
fn set_match_tag_except(&mut self, etag: &str) {
if etag == "*" {
self.custom_header
.insert("If-None-Match", HeaderValue::from_str("*").expect("err"));
@@ -181,7 +181,7 @@ impl PutObjectOptions {
header.insert(
"Expires",
HeaderValue::from_str(&self.expires.format(ISO8601_DATEFORMAT).unwrap()).expect("err"),
); //rustfs invalid heade
); //rustfs invalid header
}
if self.mode.as_str() != "" {

View File

@@ -2422,7 +2422,7 @@ impl ReplicateObjectInfo {
// let mut arns = Vec::new();
// let mut tgts_map = std::collections::HashSet::new();
// for rule in cfg.rules {
// if rule.status.as_str() == "Disabe" {
// if rule.status.as_str() == "Disable" {
// continue;
// }

View File

@@ -95,7 +95,7 @@ impl ArnTarget {
Self {
client: TargetClient {
bucket,
storage_class: "STANDRD".to_string(),
storage_class: "STANDARD".to_string(),
disable_proxy: false,
health_check_duration: Duration::from_secs(100),
endpoint,
@@ -361,7 +361,7 @@ impl BucketTargetSys {
// // Mocked implementation for obtaining a remote client
// let tcli = TargetClient {
// bucket: _tgt.target_bucket.clone(),
// storage_class: "STANDRD".to_string(),
// storage_class: "STANDARD".to_string(),
// disable_proxy: false,
// health_check_duration: Duration::from_secs(100),
// endpoint: _tgt.endpoint.clone(),
@@ -379,7 +379,7 @@ impl BucketTargetSys {
// // Mocked implementation for obtaining a remote client
// let tcli = TargetClient {
// bucket: _tgt.target_bucket.clone(),
// storage_class: "STANDRD".to_string(),
// storage_class: "STANDARD".to_string(),
// disable_proxy: false,
// health_check_duration: Duration::from_secs(100),
// endpoint: _tgt.endpoint.clone(),
@@ -403,7 +403,7 @@ impl BucketTargetSys {
match store.get_bucket_info(_bucket, &store_api::BucketOptions::default()).await {
Ok(info) => {
println!("Bucket Info: {info:?}");
info.versionning
info.versioning
}
Err(err) => {
eprintln!("Error: {err:?}");
@@ -431,7 +431,7 @@ impl BucketTargetSys {
// {
// Ok(info) => {
// println!("Bucket Info: {:?}", info);
// info.versionning
// info.versioning
// }
// Err(err) => {
// eprintln!("Error: {:?}", err);
@@ -475,8 +475,7 @@ impl BucketTargetSys {
{
Ok(info) => {
println!("Bucket Info: {info:?}");
if !info.versionning {
println!("2222222222 {}", info.versionning);
if !info.versioning {
return Err(SetTargetError::TargetNotVersioned(tgt.target_bucket.to_string()));
}
}

View File

@@ -563,7 +563,7 @@ impl LocalDisk {
}
async fn read_metadata(&self, file_path: impl AsRef<Path>) -> Result<Vec<u8>> {
// TODO: suport timeout
// TODO: support timeout
let (data, _) = self.read_metadata_with_dmtime(file_path.as_ref()).await?;
Ok(data)
}
@@ -595,7 +595,7 @@ impl LocalDisk {
}
async fn read_all_data(&self, volume: &str, volume_dir: impl AsRef<Path>, file_path: impl AsRef<Path>) -> Result<Vec<u8>> {
// TODO: timeout suport
// TODO: timeout support
let (data, _) = self.read_all_data_with_dmtime(volume, volume_dir, file_path).await?;
Ok(data)
}
@@ -750,7 +750,7 @@ impl LocalDisk {
let mut f = {
if sync {
// TODO: suport sync
// TODO: support sync
self.open_file(file_path, flags, skip_parent).await?
} else {
self.open_file(file_path, flags, skip_parent).await?
@@ -2336,7 +2336,7 @@ impl DiskAPI for LocalDisk {
};
done_sz(buf.len() as u64);
res.insert("metasize".to_string(), buf.len().to_string());
item.transform_meda_dir();
item.transform_meta_dir();
let meta_cache = MetaCacheEntry {
name: item.object_path().to_string_lossy().to_string(),
metadata: buf,

View File

@@ -308,7 +308,7 @@ impl Erasure {
// ec encode, 结果会写进 data_buffer
let data_slices: SmallVec<[&mut [u8]; 16]> = data_buffer.chunks_exact_mut(shard_size).collect();
// partiy 数量大于 0 才 ec
// parity 数量大于 0 才 ec
if self.parity_shards > 0 {
self.encoder.as_ref().unwrap().encode(data_slices).map_err(Error::other)?;
}

View File

@@ -563,12 +563,12 @@ impl CurrentScannerCycle {
}
}
// SystemTime 转换为时间戳
// Convert `SystemTime` to timestamp
fn system_time_to_timestamp(time: &DateTime<Utc>) -> i64 {
time.timestamp_micros()
}
// 将时间戳转换为 SystemTime
// Convert timestamp to `SystemTime`
fn timestamp_to_system_time(timestamp: i64) -> DateTime<Utc> {
DateTime::from_timestamp_micros(timestamp).unwrap_or_default()
}
@@ -593,7 +593,7 @@ pub struct ScannerItem {
}
impl ScannerItem {
pub fn transform_meda_dir(&mut self) {
pub fn transform_meta_dir(&mut self) {
let split = self.prefix.split(SLASH_SEPARATOR).map(PathBuf::from).collect::<Vec<_>>();
if split.len() > 1 {
self.prefix = path_join(&split[0..split.len() - 1]).to_string_lossy().to_string();
@@ -1101,7 +1101,7 @@ impl FolderScanner {
// successfully read means we have a valid object.
found_objects = true;
// Remove filename i.e is the meta file to construct object name
item.transform_meda_dir();
item.transform_meta_dir();
// Object already accounted for, remove from heal map,
// simply because getSize() function already heals the
// object.
@@ -1262,7 +1262,7 @@ impl FolderScanner {
disks: self.disks.clone(),
bucket: bucket.clone(),
path: prefix.clone(),
recursice: true,
recursive: true,
report_not_found: true,
min_disks: self.disks_quorum,
agreed: Some(Box::new(move |entry: MetaCacheEntry| {

View File

@@ -1355,7 +1355,7 @@ impl SetDisks {
disks: disks.iter().cloned().map(Some).collect(),
bucket: bucket_info.name.clone(),
path: bucket_info.prefix.clone(),
recursice: true,
recursive: true,
min_disks: listing_quorum,
agreed: Some(Box::new(move |entry: MetaCacheEntry| Box::pin(cb1(entry)))),
partial: Some(Box::new(move |entries: MetaCacheEntries, _: &[Option<DiskError>]| {

View File

@@ -1172,7 +1172,7 @@ impl SetDisks {
ListPathRawOptions {
disks: disks.iter().cloned().map(Some).collect(),
bucket: bucket.clone(),
recursice: true,
recursive: true,
min_disks: listing_quorum,
agreed: Some(Box::new(move |entry: MetaCacheEntry| {
info!("list_objects_to_rebalance: agreed: {:?}", &entry.name);

View File

@@ -449,7 +449,7 @@ impl PeerS3Client for LocalPeerS3Client {
op.as_ref().map(|v| BucketInfo {
name: v.name.clone(),
created: v.created,
versionning: versioned,
versioning: versioned,
..Default::default()
})
})

View File

@@ -1233,7 +1233,7 @@ impl SetDisks {
return Err(DiskError::ErasureReadQuorum);
}
let mut meta_hashs = vec![None; metas.len()];
let mut meta_hashes = vec![None; metas.len()];
let mut hasher = Sha256::new();
for (i, meta) in metas.iter().enumerate() {
@@ -1265,7 +1265,7 @@ impl SetDisks {
hasher.flush()?;
meta_hashs[i] = Some(hex(hasher.clone().finalize().as_slice()));
meta_hashes[i] = Some(hex(hasher.clone().finalize().as_slice()));
hasher.reset();
}
@@ -1273,7 +1273,7 @@ impl SetDisks {
let mut count_map = HashMap::new();
for hash in meta_hashs.iter().flatten() {
for hash in meta_hashes.iter().flatten() {
*count_map.entry(hash).or_insert(0) += 1;
}
@@ -1297,7 +1297,7 @@ impl SetDisks {
let mut valid_obj_map = HashMap::new();
for (i, op_hash) in meta_hashs.iter().enumerate() {
for (i, op_hash) in meta_hashes.iter().enumerate() {
if let Some(hash) = op_hash {
if let Some(max_hash) = max_val {
if hash == max_hash {
@@ -1749,8 +1749,8 @@ impl SetDisks {
// for res in results {
// match res {
// Ok(entrys) => {
// ress.push(Some(entrys));
// Ok(entries) => {
// ress.push(Some(entries));
// errs.push(None);
// }
// Err(e) => {
@@ -2108,17 +2108,17 @@ impl SetDisks {
let erasure = erasure_coding::Erasure::new(fi.erasure.data_blocks, fi.erasure.parity_blocks, fi.erasure.block_size);
let mut total_readed = 0;
let mut total_read = 0;
for i in part_index..=last_part_index {
if total_readed == length {
if total_read == length {
break;
}
let part_number = fi.parts[i].number;
let part_size = fi.parts[i].size;
let mut part_length = part_size - part_offset;
if part_length > (length - total_readed) {
part_length = length - total_readed
if part_length > (length - total_read) {
part_length = length - total_read
}
let till_offset = erasure.shard_file_offset(part_offset, part_length, part_size);
@@ -2203,7 +2203,7 @@ impl SetDisks {
// debug!("ec decode {} written size {}", part_number, n);
total_readed += part_length;
total_read += part_length;
part_offset = 0;
}
@@ -2306,7 +2306,7 @@ impl SetDisks {
Some(filter_prefix.to_string())
}
},
recursice: true,
recursive: true,
forward_to: None,
min_disks: 1,
report_not_found: false,
@@ -2481,12 +2481,12 @@ impl SetDisks {
};
match Self::pick_valid_fileinfo(&parts_metadata, mod_time, etag, read_quorum as usize) {
Ok(lastest_meta) => {
Ok(latest_meta) => {
let (available_disks, data_errs_by_disk, data_errs_by_part) = disks_with_all_parts(
&online_disks,
&mut parts_metadata,
&errs,
&lastest_meta,
&latest_meta,
bucket,
object,
opts.scan_mode,
@@ -2494,22 +2494,22 @@ impl SetDisks {
.await?;
// info!(
// "disks_with_all_parts: got available_disks: {:?}, data_errs_by_disk: {:?}, data_errs_by_part: {:?}, lastest_meta: {:?}",
// available_disks, data_errs_by_disk, data_errs_by_part, lastest_meta
// "disks_with_all_parts: got available_disks: {:?}, data_errs_by_disk: {:?}, data_errs_by_part: {:?}, latest_meta: {:?}",
// available_disks, data_errs_by_disk, data_errs_by_part, latest_meta
// );
let erasure = if !lastest_meta.deleted && !lastest_meta.is_remote() {
let erasure = if !latest_meta.deleted && !latest_meta.is_remote() {
// Initialize erasure coding
erasure_coding::Erasure::new(
lastest_meta.erasure.data_blocks,
lastest_meta.erasure.parity_blocks,
lastest_meta.erasure.block_size,
latest_meta.erasure.data_blocks,
latest_meta.erasure.parity_blocks,
latest_meta.erasure.block_size,
)
} else {
erasure_coding::Erasure::default()
};
result.object_size =
ObjectInfo::from_file_info(&lastest_meta, bucket, object, true).get_actual_size()? as usize;
ObjectInfo::from_file_info(&latest_meta, bucket, object, true).get_actual_size()? as usize;
// Loop to find number of disks with valid data, per-drive
// data state and a list of outdated disks on which data needs
// to be healed.
@@ -2517,15 +2517,15 @@ impl SetDisks {
let mut disks_to_heal_count = 0;
// info!(
// "errs: {:?}, data_errs_by_disk: {:?}, lastest_meta: {:?}",
// errs, data_errs_by_disk, lastest_meta
// "errs: {:?}, data_errs_by_disk: {:?}, latest_meta: {:?}",
// errs, data_errs_by_disk, latest_meta
// );
for index in 0..available_disks.len() {
let (yes, reason) = should_heal_object_on_disk(
&errs[index],
&data_errs_by_disk[&index],
&parts_metadata[index],
&lastest_meta,
&latest_meta,
);
if yes {
outdate_disks[index] = disks[index].clone();
@@ -2583,10 +2583,10 @@ impl SetDisks {
return Ok((result, None));
}
if !lastest_meta.deleted && disks_to_heal_count > lastest_meta.erasure.parity_blocks {
if !latest_meta.deleted && disks_to_heal_count > latest_meta.erasure.parity_blocks {
error!(
"file({} : {}) part corrupt too much, can not to fix, disks_to_heal_count: {}, parity_blocks: {}",
bucket, object, disks_to_heal_count, lastest_meta.erasure.parity_blocks
bucket, object, disks_to_heal_count, latest_meta.erasure.parity_blocks
);
// Allow for dangling deletes, on versions that have DataDir missing etc.
@@ -2633,39 +2633,37 @@ impl SetDisks {
};
}
if !lastest_meta.deleted && lastest_meta.erasure.distribution.len() != available_disks.len() {
if !latest_meta.deleted && latest_meta.erasure.distribution.len() != available_disks.len() {
let err_str = format!(
"unexpected file distribution ({:?}) from available disks ({:?}), looks like backend disks have been manually modified refusing to heal {}/{}({})",
lastest_meta.erasure.distribution, available_disks, bucket, object, version_id
latest_meta.erasure.distribution, available_disks, bucket, object, version_id
);
warn!(err_str);
let err = DiskError::other(err_str);
return Ok((
self.default_heal_result(lastest_meta, &errs, bucket, object, version_id)
.await,
self.default_heal_result(latest_meta, &errs, bucket, object, version_id).await,
Some(err),
));
}
let latest_disks = Self::shuffle_disks(&available_disks, &lastest_meta.erasure.distribution);
if !lastest_meta.deleted && lastest_meta.erasure.distribution.len() != outdate_disks.len() {
let latest_disks = Self::shuffle_disks(&available_disks, &latest_meta.erasure.distribution);
if !latest_meta.deleted && latest_meta.erasure.distribution.len() != outdate_disks.len() {
let err_str = format!(
"unexpected file distribution ({:?}) from outdated disks ({:?}), looks like backend disks have been manually modified refusing to heal {}/{}({})",
lastest_meta.erasure.distribution, outdate_disks, bucket, object, version_id
latest_meta.erasure.distribution, outdate_disks, bucket, object, version_id
);
warn!(err_str);
let err = DiskError::other(err_str);
return Ok((
self.default_heal_result(lastest_meta, &errs, bucket, object, version_id)
.await,
self.default_heal_result(latest_meta, &errs, bucket, object, version_id).await,
Some(err),
));
}
if !lastest_meta.deleted && lastest_meta.erasure.distribution.len() != parts_metadata.len() {
if !latest_meta.deleted && latest_meta.erasure.distribution.len() != parts_metadata.len() {
let err_str = format!(
"unexpected file distribution ({:?}) from metadata entries ({:?}), looks like backend disks have been manually modified refusing to heal {}/{}({})",
lastest_meta.erasure.distribution,
latest_meta.erasure.distribution,
parts_metadata.len(),
bucket,
object,
@@ -2674,15 +2672,13 @@ impl SetDisks {
warn!(err_str);
let err = DiskError::other(err_str);
return Ok((
self.default_heal_result(lastest_meta, &errs, bucket, object, version_id)
.await,
self.default_heal_result(latest_meta, &errs, bucket, object, version_id).await,
Some(err),
));
}
let out_dated_disks = Self::shuffle_disks(&outdate_disks, &lastest_meta.erasure.distribution);
let mut parts_metadata =
Self::shuffle_parts_metadata(&parts_metadata, &lastest_meta.erasure.distribution);
let out_dated_disks = Self::shuffle_disks(&outdate_disks, &latest_meta.erasure.distribution);
let mut parts_metadata = Self::shuffle_parts_metadata(&parts_metadata, &latest_meta.erasure.distribution);
let mut copy_parts_metadata = vec![None; parts_metadata.len()];
for (index, disk) in latest_disks.iter().enumerate() {
if disk.is_some() {
@@ -2703,18 +2699,18 @@ impl SetDisks {
if disk.is_some() {
// Make sure to write the FileInfo information
// that is expected to be in quorum.
parts_metadata[index] = clean_file_info(&lastest_meta);
parts_metadata[index] = clean_file_info(&latest_meta);
}
}
// We write at temporary location and then rename to final location.
let tmp_id = Uuid::new_v4().to_string();
let src_data_dir = lastest_meta.data_dir.unwrap().to_string();
let dst_data_dir = lastest_meta.data_dir.unwrap();
let src_data_dir = latest_meta.data_dir.unwrap().to_string();
let dst_data_dir = latest_meta.data_dir.unwrap();
if !lastest_meta.deleted && !lastest_meta.is_remote() {
let erasure_info = lastest_meta.erasure;
for part in lastest_meta.parts.iter() {
if !latest_meta.deleted && !latest_meta.is_remote() {
let erasure_info = latest_meta.erasure;
for part in latest_meta.parts.iter() {
let till_offset = erasure.shard_file_offset(0, part.size, part.size);
let checksum_algo = erasure_info.get_checksum_info(part.number).algorithm;
let mut readers = Vec::with_capacity(latest_disks.len());
@@ -2759,7 +2755,7 @@ impl SetDisks {
let is_inline_buffer = {
if let Some(sc) = GLOBAL_StorageClass.get() {
sc.should_inline(erasure.shard_file_size(lastest_meta.size), false)
sc.should_inline(erasure.shard_file_size(latest_meta.size), false)
} else {
false
}
@@ -3840,7 +3836,7 @@ impl SetDisks {
disks,
fallback_disks,
bucket: bucket.clone(),
recursice: true,
recursive: true,
forward_to,
min_disks: 1,
report_not_found: false,
@@ -4317,7 +4313,7 @@ impl ObjectIO for SetDisks {
fi.is_latest = true;
// TODO: version suport
// TODO: version support
Ok(ObjectInfo::from_file_info(&fi, bucket, object, opts.versioned || opts.version_suspended))
}
}
@@ -6142,7 +6138,7 @@ async fn disks_with_all_parts(
online_disks: &[Option<DiskStore>],
parts_metadata: &mut [FileInfo],
errs: &[Option<DiskError>],
lastest_meta: &FileInfo,
latest_meta: &FileInfo,
bucket: &str,
object: &str,
scan_mode: HealScanMode,
@@ -6150,10 +6146,10 @@ async fn disks_with_all_parts(
let mut available_disks = vec![None; online_disks.len()];
let mut data_errs_by_disk: HashMap<usize, Vec<usize>> = HashMap::new();
for i in 0..online_disks.len() {
data_errs_by_disk.insert(i, vec![1; lastest_meta.parts.len()]);
data_errs_by_disk.insert(i, vec![1; latest_meta.parts.len()]);
}
let mut data_errs_by_part: HashMap<usize, Vec<usize>> = HashMap::new();
for i in 0..lastest_meta.parts.len() {
for i in 0..latest_meta.parts.len() {
data_errs_by_part.insert(i, vec![1; online_disks.len()]);
}
@@ -6191,7 +6187,7 @@ async fn disks_with_all_parts(
continue;
}
let meta = &parts_metadata[index];
if !meta.mod_time.eq(&lastest_meta.mod_time) || !meta.data_dir.eq(&lastest_meta.data_dir) {
if !meta.mod_time.eq(&latest_meta.mod_time) || !meta.data_dir.eq(&latest_meta.data_dir) {
warn!("mod_time is not Eq, file corrupt, index: {index}");
meta_errs[index] = Some(DiskError::FileCorrupt);
parts_metadata[index] = FileInfo::default();
@@ -6217,7 +6213,7 @@ async fn disks_with_all_parts(
meta_errs.iter().enumerate().for_each(|(index, err)| {
if err.is_some() {
let part_err = conv_part_err_to_int(err);
for p in 0..lastest_meta.parts.len() {
for p in 0..latest_meta.parts.len() {
data_errs_by_part.entry(p).or_insert(vec![0; meta_errs.len()])[index] = part_err;
}
}
@@ -6269,7 +6265,7 @@ async fn disks_with_all_parts(
let mut verify_resp = CheckPartsResp::default();
let mut verify_err = None;
meta.data_dir = lastest_meta.data_dir;
meta.data_dir = latest_meta.data_dir;
if scan_mode == HEAL_DEEP_SCAN {
// disk has a valid xl.meta but may not have all the
// parts. This is considered an outdated disk, since
@@ -6293,7 +6289,7 @@ async fn disks_with_all_parts(
}
}
for p in 0..lastest_meta.parts.len() {
for p in 0..latest_meta.parts.len() {
if let Some(vec) = data_errs_by_part.get_mut(&p) {
if index < vec.len() {
if verify_err.is_some() {
@@ -6331,7 +6327,7 @@ pub fn should_heal_object_on_disk(
err: &Option<DiskError>,
parts_errs: &[usize],
meta: &FileInfo,
lastest_meta: &FileInfo,
latest_meta: &FileInfo,
) -> (bool, Option<DiskError>) {
if let Some(err) = err {
if err == &DiskError::FileNotFound || err == &DiskError::FileVersionNotFound || err == &DiskError::FileCorrupt {
@@ -6339,12 +6335,12 @@ pub fn should_heal_object_on_disk(
}
}
if lastest_meta.volume != meta.volume
|| lastest_meta.name != meta.name
|| lastest_meta.version_id != meta.version_id
|| lastest_meta.deleted != meta.deleted
if latest_meta.volume != meta.volume
|| latest_meta.name != meta.name
|| latest_meta.version_id != meta.version_id
|| latest_meta.deleted != meta.deleted
{
info!("lastest_meta not Eq meta, lastest_meta: {:?}, meta: {:?}", lastest_meta, meta);
info!("latest_meta not Eq meta, latest_meta: {:?}, meta: {:?}", latest_meta, meta);
return (true, Some(DiskError::OutdatedXLMeta));
}
if !meta.deleted && !meta.is_remote() {

View File

@@ -65,7 +65,7 @@ pub struct Sets {
pub pool_idx: usize,
pub endpoints: PoolEndpoints,
pub format: FormatV3,
pub partiy_count: usize,
pub parity_count: usize,
pub set_count: usize,
pub set_drive_count: usize,
pub default_parity_count: usize,
@@ -82,13 +82,13 @@ impl Drop for Sets {
}
impl Sets {
#[tracing::instrument(level = "debug", skip(disks, endpoints, fm, pool_idx, partiy_count))]
#[tracing::instrument(level = "debug", skip(disks, endpoints, fm, pool_idx, parity_count))]
pub async fn new(
disks: Vec<Option<DiskStore>>,
endpoints: &PoolEndpoints,
fm: &FormatV3,
pool_idx: usize,
partiy_count: usize,
parity_count: usize,
) -> Result<Arc<Self>> {
let set_count = fm.erasure.sets.len();
let set_drive_count = fm.erasure.sets[0].len();
@@ -173,7 +173,7 @@ impl Sets {
Arc::new(RwLock::new(NsLockMap::new(is_dist_erasure().await))),
Arc::new(RwLock::new(set_drive)),
set_drive_count,
partiy_count,
parity_count,
i,
pool_idx,
set_endpoints,
@@ -194,10 +194,10 @@ impl Sets {
pool_idx,
endpoints: endpoints.clone(),
format: fm.clone(),
partiy_count,
parity_count,
set_count,
set_drive_count,
default_parity_count: partiy_count,
default_parity_count: parity_count,
distribution_algo: fm.erasure.distribution_algo.clone(),
exit_signal: Some(tx),
});

View File

@@ -152,7 +152,7 @@ impl ECStore {
common_parity_drives = parity_drives;
}
// validate_parity(partiy_count, pool_eps.drives_per_set)?;
// validate_parity(parity_count, pool_eps.drives_per_set)?;
let (disks, errs) = store_init::init_disks(
&pool_eps.endpoints,
@@ -302,13 +302,13 @@ impl ECStore {
}
let pools = meta.return_resumable_pools();
let mut pool_indeces = Vec::with_capacity(pools.len());
let mut pool_indices = Vec::with_capacity(pools.len());
let endpoints = get_global_endpoints();
for p in pools.iter() {
if let Some(idx) = endpoints.get_pool_idx(&p.cmd_line) {
pool_indeces.push(idx);
pool_indices.push(idx);
} else {
return Err(Error::other(format!(
"unexpected state present for decommission status pool({}) not found",
@@ -317,8 +317,8 @@ impl ECStore {
}
}
if !pool_indeces.is_empty() {
let idx = pool_indeces[0];
if !pool_indices.is_empty() {
let idx = pool_indices[0];
if endpoints.as_ref()[idx].endpoints.as_ref()[0].is_local {
let (_tx, rx) = broadcast::channel(1);
@@ -328,9 +328,9 @@ impl ECStore {
// wait 3 minutes for cluster init
tokio::time::sleep(Duration::from_secs(60 * 3)).await;
if let Err(err) = store.decommission(rx.resubscribe(), pool_indeces.clone()).await {
if let Err(err) = store.decommission(rx.resubscribe(), pool_indices.clone()).await {
if err == StorageError::DecommissionAlreadyRunning {
for i in pool_indeces.iter() {
for i in pool_indices.iter() {
store.do_decommission_in_routine(rx.resubscribe(), *i).await;
}
return;
@@ -417,9 +417,9 @@ impl ECStore {
// // TODO handle errs
// continue;
// }
// let entrys = disks_res.as_ref().unwrap();
// let entries = disks_res.as_ref().unwrap();
// for entry in entrys {
// for entry in entries {
// // warn!("lst_merged entry---- {}", &entry.name);
// if !opts.prefix.is_empty() && !entry.name.starts_with(&opts.prefix) {
@@ -1415,7 +1415,7 @@ impl StorageAPI for ECStore {
if let Ok(sys) = metadata_sys::get(bucket).await {
info.created = Some(sys.created);
info.versionning = sys.versioning();
info.versioning = sys.versioning();
info.object_locking = sys.object_locking();
}

View File

@@ -276,7 +276,10 @@ impl HTTPRangeSpec {
return Ok(range_length);
}
Err(Error::other("range value invaild"))
Err(Error::other(format!(
"range value invalid: start={}, end={}, expected start <= end and end >= -1",
self.start, self.end
)))
}
}
@@ -336,7 +339,7 @@ pub struct BucketInfo {
pub name: String,
pub created: Option<OffsetDateTime>,
pub deleted: Option<OffsetDateTime>,
pub versionning: bool,
pub versioning: bool,
pub object_locking: bool,
}

View File

@@ -222,7 +222,7 @@ fn check_format_erasure_value(format: &FormatV3) -> Result<()> {
Ok(())
}
// load_format_erasure_all 读取所有 foramt.json
// load_format_erasure_all 读取所有 format.json
pub async fn load_format_erasure_all(disks: &[Option<DiskStore>], heal: bool) -> (Vec<Option<FormatV3>>, Vec<Option<DiskError>>) {
let mut futures = Vec::with_capacity(disks.len());
let mut datas = Vec::with_capacity(disks.len());

View File

@@ -776,7 +776,7 @@ impl ECStore {
fallback_disks: fallback_disks.iter().cloned().map(Some).collect(),
bucket: bucket.to_owned(),
path,
recursice: true,
recursive: true,
filter_prefix: Some(filter_prefix),
forward_to: opts.marker.clone(),
min_disks: listing_quorum,
@@ -851,8 +851,8 @@ impl ECStore {
}
};
if let Some(fiter) = opts.filter {
if fiter(&fi) {
if let Some(filter) = opts.filter {
if filter(&fi) {
let item = ObjectInfoOrErr {
item: Some(ObjectInfo::from_file_info(&fi, &bucket, &fi.name, {
if let Some(v) = &vcf { v.versioned(&fi.name) } else { false }
@@ -899,8 +899,8 @@ impl ECStore {
}
for fi in fvs.versions.iter() {
if let Some(fiter) = opts.filter {
if fiter(fi) {
if let Some(filter) = opts.filter {
if filter(fi) {
let item = ObjectInfoOrErr {
item: Some(ObjectInfo::from_file_info(fi, &bucket, &fi.name, {
if let Some(v) = &vcf { v.versioned(&fi.name) } else { false }
@@ -972,7 +972,7 @@ async fn gather_results(
let mut sender = Some(results_tx);
let mut recv = recv;
let mut entrys = Vec::new();
let mut entries = Vec::new();
while let Some(mut entry) = recv.recv().await {
if returned {
continue;
@@ -1009,11 +1009,11 @@ async fn gather_results(
// TODO: Lifecycle
if opts.limit > 0 && entrys.len() >= opts.limit as usize {
if opts.limit > 0 && entries.len() >= opts.limit as usize {
if let Some(tx) = sender {
tx.send(MetaCacheEntriesSortedResult {
entries: Some(MetaCacheEntriesSorted {
o: MetaCacheEntries(entrys.clone()),
o: MetaCacheEntries(entries.clone()),
..Default::default()
}),
err: None,
@@ -1027,15 +1027,15 @@ async fn gather_results(
continue;
}
entrys.push(Some(entry));
// entrys.push(entry);
entries.push(Some(entry));
// entries.push(entry);
}
// finish not full, return eof
if let Some(tx) = sender {
tx.send(MetaCacheEntriesSortedResult {
entries: Some(MetaCacheEntriesSorted {
o: MetaCacheEntries(entrys.clone()),
o: MetaCacheEntries(entries.clone()),
..Default::default()
}),
err: Some(Error::Unexpected.into()),
@@ -1125,10 +1125,10 @@ async fn merge_entry_channels(
if path::clean(&best_entry.name) == path::clean(&other_entry.name) {
let dir_matches = best_entry.is_dir() && other_entry.is_dir();
let suffix_matche =
let suffix_matches =
best_entry.name.ends_with(SLASH_SEPARATOR) == other_entry.name.ends_with(SLASH_SEPARATOR);
if dir_matches && suffix_matche {
if dir_matches && suffix_matches {
to_merge.push(other_idx);
continue;
}
@@ -1286,7 +1286,7 @@ impl SetDisks {
fallback_disks: fallback_disks.iter().cloned().map(Some).collect(),
bucket: opts.bucket,
path: opts.base_dir,
recursice: opts.recursive,
recursive: opts.recursive,
filter_prefix: opts.filter_prefix,
forward_to: opts.marker,
min_disks: listing_quorum,

View File

@@ -215,7 +215,7 @@ pub struct FileInfo {
impl FileInfo {
pub fn new(object: &str, data_blocks: usize, parity_blocks: usize) -> Self {
let indexs = {
let indices = {
let cardinality = data_blocks + parity_blocks;
let mut nums = vec![0; cardinality];
let key_crc = crc32fast::hash(object.as_bytes());
@@ -233,7 +233,7 @@ impl FileInfo {
data_blocks,
parity_blocks,
block_size: BLOCK_SIZE_V2,
distribution: indexs,
distribution: indices,
..Default::default()
},
..Default::default()

View File

@@ -702,7 +702,7 @@ impl FileMeta {
})
}
pub fn lastest_mod_time(&self) -> Option<OffsetDateTime> {
pub fn latest_mod_time(&self) -> Option<OffsetDateTime> {
if self.versions.is_empty() {
return None;
}
@@ -1762,7 +1762,7 @@ impl MetaDeleteMarker {
// self.meta_sys = Some(map);
// }
// name => return Err(Error::other(format!("not suport field name {name}"))),
// name => return Err(Error::other(format!("not support field name {name}"))),
// }
// }
@@ -1962,32 +1962,32 @@ pub fn merge_file_meta_versions(
n_versions += 1;
}
} else {
let mut lastest_count = 0;
let mut latest_count = 0;
for (i, ver) in tops.iter().enumerate() {
if ver.header == latest.header {
lastest_count += 1;
latest_count += 1;
continue;
}
if i == 0 || ver.header.sorts_before(&latest.header) {
if i == 0 || lastest_count == 0 {
lastest_count = 1;
if i == 0 || latest_count == 0 {
latest_count = 1;
} else if !strict && ver.header.matches_not_strict(&latest.header) {
lastest_count += 1;
latest_count += 1;
} else {
lastest_count = 1;
latest_count = 1;
}
latest = ver.clone();
continue;
}
// Mismatch, but older.
if lastest_count > 0 && !strict && ver.header.matches_not_strict(&latest.header) {
lastest_count += 1;
if latest_count > 0 && !strict && ver.header.matches_not_strict(&latest.header) {
latest_count += 1;
continue;
}
if lastest_count > 0 && ver.header.version_id == latest.header.version_id {
if latest_count > 0 && ver.header.version_id == latest.header.version_id {
let mut x: HashMap<FileMetaVersionHeader, usize> = HashMap::new();
for a in tops.iter() {
if a.header.version_id != ver.header.version_id {
@@ -1999,12 +1999,12 @@ pub fn merge_file_meta_versions(
}
*x.entry(a_clone.header).or_insert(1) += 1;
}
lastest_count = 0;
latest_count = 0;
for (k, v) in x.iter() {
if *v < lastest_count {
if *v < latest_count {
continue;
}
if *v == lastest_count && latest.header.sorts_before(k) {
if *v == latest_count && latest.header.sorts_before(k) {
continue;
}
tops.iter().for_each(|a| {
@@ -2017,12 +2017,12 @@ pub fn merge_file_meta_versions(
}
});
lastest_count = *v;
latest_count = *v;
}
break;
}
}
if lastest_count >= quorum {
if latest_count >= quorum {
if !latest.header.free_version() {
n_versions += 1;
}

View File

@@ -221,7 +221,7 @@ impl MetaCacheEntry {
};
if self_vers.versions.len() != other_vers.versions.len() {
match self_vers.lastest_mod_time().cmp(&other_vers.lastest_mod_time()) {
match self_vers.latest_mod_time().cmp(&other_vers.latest_mod_time()) {
Ordering::Greater => return (Some(self.clone()), false),
Ordering::Less => return (Some(other.clone()), false),
_ => {}

View File

@@ -90,7 +90,7 @@ where
T: Store,
{
pub(crate) async fn new(api: T) -> Arc<Self> {
let (sender, reciver) = mpsc::channel::<i64>(100);
let (sender, receiver) = mpsc::channel::<i64>(100);
let sys = Arc::new(Self {
api,
@@ -101,11 +101,11 @@ where
last_timestamp: AtomicI64::new(0),
});
sys.clone().init(reciver).await.unwrap();
sys.clone().init(receiver).await.unwrap();
sys
}
async fn init(self: Arc<Self>, reciver: Receiver<i64>) -> Result<()> {
async fn init(self: Arc<Self>, receiver: Receiver<i64>) -> Result<()> {
self.clone().save_iam_formatter().await?;
self.clone().load().await?;
@@ -118,7 +118,7 @@ where
let s = Arc::clone(&self);
async move {
let ticker = tokio::time::interval(Duration::from_secs(120));
tokio::pin!(ticker, reciver);
tokio::pin!(ticker, receiver);
loop {
select! {
_ = ticker.tick() => {
@@ -127,13 +127,13 @@ where
error!("iam load err {:?}", err);
}
},
i = reciver.recv() => {
info!("iam load reciver");
i = receiver.recv() => {
info!("iam load receiver");
match i {
Some(t) => {
let last = s.last_timestamp.load(Ordering::Relaxed);
if last <= t {
info!("iam load reciver load");
info!("iam load receiver load");
if let Err(err) =s.clone().load().await{
error!("iam load err {:?}", err);
}
@@ -814,7 +814,7 @@ where
let mp = MappedPolicy::new(policy);
let (_, combined_policy_stmt) = filter_policies(&self.cache, &mp.policies, "temp");
if combined_policy_stmt.is_empty() {
return Err(Error::other(format!("need poliy not found {}", IamError::NoSuchPolicy)));
return Err(Error::other(format!("Required policy not found: {}", IamError::NoSuchPolicy)));
}
self.api
@@ -987,7 +987,7 @@ where
_ => auth::ACCOUNT_OFF,
}
};
let user_entiry = UserIdentity::from(Credentials {
let user_entry = UserIdentity::from(Credentials {
access_key: access_key.to_string(),
secret_key: args.secret_key.to_string(),
status: status.to_owned(),
@@ -995,10 +995,10 @@ where
});
self.api
.save_user_identity(access_key, UserType::Reg, user_entiry.clone(), None)
.save_user_identity(access_key, UserType::Reg, user_entry.clone(), None)
.await?;
self.update_user_with_claims(access_key, user_entiry)?;
self.update_user_with_claims(access_key, user_entry)?;
Ok(OffsetDateTime::now_utc())
}
@@ -1104,7 +1104,7 @@ where
}
};
let user_entiry = UserIdentity::from(Credentials {
let user_entry = UserIdentity::from(Credentials {
access_key: access_key.to_string(),
secret_key: u.credentials.secret_key.clone(),
status: status.to_owned(),
@@ -1112,10 +1112,10 @@ where
});
self.api
.save_user_identity(access_key, UserType::Reg, user_entiry.clone(), None)
.save_user_identity(access_key, UserType::Reg, user_entry.clone(), None)
.await?;
self.update_user_with_claims(access_key, user_entiry)?;
self.update_user_with_claims(access_key, user_entry)?;
Ok(OffsetDateTime::now_utc())
}

View File

@@ -62,8 +62,12 @@ pub trait Store: Clone + Send + Sync + 'static {
is_group: bool,
m: &mut HashMap<String, MappedPolicy>,
) -> Result<()>;
async fn load_mapped_policys(&self, user_type: UserType, is_group: bool, m: &mut HashMap<String, MappedPolicy>)
-> Result<()>;
async fn load_mapped_policies(
&self,
user_type: UserType,
is_group: bool,
m: &mut HashMap<String, MappedPolicy>,
) -> Result<()>;
async fn load_all(&self, cache: &Cache) -> Result<()>;
}

View File

@@ -656,7 +656,7 @@ impl Store for ObjectStore {
Ok(())
}
async fn load_mapped_policys(
async fn load_mapped_policies(
&self,
user_type: UserType,
is_group: bool,

View File

@@ -124,13 +124,13 @@ impl<T: Store> IamSys<T> {
})
}
pub async fn load_mapped_policys(
pub async fn load_mapped_policies(
&self,
user_type: UserType,
is_group: bool,
m: &mut HashMap<String, MappedPolicy>,
) -> Result<()> {
self.store.api.load_mapped_policys(user_type, is_group, m).await
self.store.api.load_mapped_policies(user_type, is_group, m).await
}
pub async fn list_polices(&self, bucket_name: &str) -> Result<HashMap<String, Policy>> {

View File

@@ -22,7 +22,7 @@ pub struct LRWMutex {
id: RwLock<String>,
source: RwLock<String>,
is_write: RwLock<bool>,
refrence: RwLock<usize>,
reference: RwLock<usize>,
}
impl LRWMutex {
@@ -66,13 +66,13 @@ impl LRWMutex {
let mut locked = false;
if is_write {
if *self.refrence.read().await == 0 && !*self.is_write.read().await {
*self.refrence.write().await = 1;
if *self.reference.read().await == 0 && !*self.is_write.read().await {
*self.reference.write().await = 1;
*self.is_write.write().await = true;
locked = true;
}
} else if !*self.is_write.read().await {
*self.refrence.write().await += 1;
*self.reference.write().await += 1;
locked = true;
}
@@ -115,13 +115,13 @@ impl LRWMutex {
async fn unlock(&self, is_write: bool) -> bool {
let mut unlocked = false;
if is_write {
if *self.is_write.read().await && *self.refrence.read().await == 1 {
*self.refrence.write().await = 0;
if *self.is_write.read().await && *self.reference.read().await == 1 {
*self.reference.write().await = 0;
*self.is_write.write().await = false;
unlocked = true;
}
} else if !*self.is_write.read().await && *self.refrence.read().await > 0 {
*self.refrence.write().await -= 1;
} else if !*self.is_write.read().await && *self.reference.read().await > 0 {
*self.reference.write().await -= 1;
unlocked = true;
}
@@ -129,7 +129,7 @@ impl LRWMutex {
}
pub async fn force_un_lock(&self) {
*self.refrence.write().await = 0;
*self.reference.write().await = 0;
*self.is_write.write().await = false;
}
}

View File

@@ -14,7 +14,8 @@
use rustfs_config::{
APP_NAME, DEFAULT_LOG_DIR, DEFAULT_LOG_FILENAME, DEFAULT_LOG_KEEP_FILES, DEFAULT_LOG_LEVEL, DEFAULT_LOG_ROTATION_SIZE_MB,
DEFAULT_LOG_ROTATION_TIME, ENVIRONMENT, METER_INTERVAL, SAMPLE_RATIO, SERVICE_VERSION, USE_STDOUT,
DEFAULT_LOG_ROTATION_TIME, DEFAULT_OBS_LOG_FILENAME, DEFAULT_SINK_FILE_LOG_FILE, ENVIRONMENT, METER_INTERVAL, SAMPLE_RATIO,
SERVICE_VERSION, USE_STDOUT,
};
use serde::{Deserialize, Serialize};
use std::env;
@@ -104,7 +105,7 @@ impl OtelConfig {
log_filename: env::var("RUSTFS_OBS_LOG_FILENAME")
.ok()
.and_then(|v| v.parse().ok())
.or(Some(DEFAULT_LOG_FILENAME.to_string())),
.or(Some(DEFAULT_OBS_LOG_FILENAME.to_string())),
log_rotation_size_mb: env::var("RUSTFS_OBS_LOG_ROTATION_SIZE_MB")
.ok()
.and_then(|v| v.parse().ok())
@@ -210,16 +211,15 @@ pub struct FileSinkConfig {
impl FileSinkConfig {
pub fn get_default_log_path() -> String {
let temp_dir = env::temp_dir().join("rustfs");
let temp_dir = env::temp_dir().join(DEFAULT_LOG_FILENAME);
if let Err(e) = std::fs::create_dir_all(&temp_dir) {
eprintln!("Failed to create log directory: {e}");
return "rustfs/rustfs.log".to_string();
return DEFAULT_LOG_DIR.to_string();
}
temp_dir
.join("rustfs.log")
.join(DEFAULT_SINK_FILE_LOG_FILE)
.to_str()
.unwrap_or("rustfs/rustfs.log")
.unwrap_or(DEFAULT_LOG_DIR)
.to_string()
}
pub fn new() -> Self {

View File

@@ -77,8 +77,7 @@ impl Logger {
}
/// Asynchronous logging of unified log entries
#[tracing::instrument(skip(self), fields(log_source = "logger"))]
#[tracing::instrument(level = "error", skip_all)]
#[tracing::instrument(skip_all, fields(log_source = "logger"))]
pub async fn log_entry(&self, entry: UnifiedLogEntry) -> Result<(), GlobalError> {
// Extract information for tracing based on entry type
match &entry {

View File

@@ -14,6 +14,7 @@
use crate::{AppConfig, SinkConfig, UnifiedLogEntry};
use async_trait::async_trait;
use rustfs_config::DEFAULT_SINK_FILE_LOG_FILE;
use std::sync::Arc;
#[cfg(feature = "file")]
@@ -71,7 +72,7 @@ pub async fn create_sinks(config: &AppConfig) -> Vec<Arc<dyn Sink>> {
SinkConfig::File(file_config) => {
tracing::debug!("FileSink: Using path: {}", file_config.path);
match file::FileSink::new(
file_config.path.clone(),
format!("{}/{}", file_config.path.clone(), DEFAULT_SINK_FILE_LOG_FILE),
file_config.buffer_size.unwrap_or(8192),
file_config.flush_interval_ms.unwrap_or(1000),
file_config.flush_threshold.unwrap_or(100),

View File

@@ -36,6 +36,7 @@ use rustfs_config::{
use rustfs_utils::get_local_ip_with_default;
use smallvec::SmallVec;
use std::borrow::Cow;
use std::fs;
use std::io::IsTerminal;
use tracing::info;
use tracing_error::ErrorLayer;
@@ -295,6 +296,21 @@ pub(crate) fn init_telemetry(config: &OtelConfig) -> OtelGuard {
let log_directory = config.log_directory.as_deref().unwrap_or(DEFAULT_LOG_DIR);
let log_filename = config.log_filename.as_deref().unwrap_or(service_name);
if let Err(e) = fs::create_dir_all(log_directory) {
eprintln!("Failed to create log directory {log_directory}: {e}");
}
#[cfg(unix)]
{
// Linux/macOS Setting Permissions
// Set the log directory permissions to 755 (rwxr-xr-x)
use std::fs::Permissions;
use std::os::unix::fs::PermissionsExt;
match fs::set_permissions(log_directory, Permissions::from_mode(0o755)) {
Ok(_) => eprintln!("Log directory permissions set to 755: {log_directory}"),
Err(e) => eprintln!("Failed to set log directory permissions {log_directory}: {e}"),
}
}
// Build log cutting conditions
let rotation_criterion = match (config.log_rotation_time.as_deref(), config.log_rotation_size_mb) {
// Cut by time and size at the same time
@@ -354,13 +370,15 @@ pub(crate) fn init_telemetry(config: &OtelConfig) -> OtelGuard {
FileSpec::default()
.directory(log_directory)
.basename(log_filename)
.suffix("log"),
.suppress_timestamp(),
)
.rotate(rotation_criterion, Naming::TimestampsDirect, Cleanup::KeepLogFiles(keep_files.into()))
.format_for_files(format_for_file) // Add a custom formatting function for file output
.duplicate_to_stdout(level_filter) // Use dynamic levels
.format_for_stdout(format_with_color) // Add a custom formatting function for terminal output
.write_mode(WriteMode::Async)
.write_mode(WriteMode::BufferAndFlush)
.append() // Avoid clearing existing logs at startup
.print_message() // Startup information output to console
.start();
if let Ok(logger) = flexi_logger_result {
@@ -420,7 +438,7 @@ fn format_with_color(w: &mut dyn std::io::Write, now: &mut DeferredNow, record:
writeln!(
w,
"[{}] {} [{}] [{}:{}] [{}:{}] {}",
now.now().format("%Y-%m-%d %H:%M:%S%.6f"),
now.now().format(flexi_logger::TS_DASHES_BLANK_COLONS_DOT_BLANK),
level_style.paint(level.to_string()),
Color::Magenta.paint(record.target()),
Color::Blue.paint(record.file().unwrap_or("unknown")),
@@ -444,7 +462,7 @@ fn format_for_file(w: &mut dyn std::io::Write, now: &mut DeferredNow, record: &R
writeln!(
w,
"[{}] {} [{}] [{}:{}] [{}:{}] {}",
now.now().format("%Y-%m-%d %H:%M:%S%.6f"),
now.now().format(flexi_logger::TS_DASHES_BLANK_COLONS_DOT_BLANK),
level,
record.target(),
record.file().unwrap_or("unknown"),

View File

@@ -107,7 +107,7 @@ mod tests {
#[test_case("jwt:dwebsite/aaa")]
#[test_case("sfvc:DuratdionSeconds")]
#[test_case("svc:DursationSeconds/aaa")]
fn test_deserialize_falied(key: &str) {
fn test_deserialize_failed(key: &str) {
let val = serde_json::from_str::<Key>(key);
assert!(val.is_err());
}

View File

@@ -30,9 +30,9 @@ use super::{
pub struct ResourceSet(pub HashSet<Resource>);
impl ResourceSet {
pub fn is_match(&self, resource: &str, conditons: &HashMap<String, Vec<String>>) -> bool {
pub fn is_match(&self, resource: &str, conditions: &HashMap<String, Vec<String>>) -> bool {
for re in self.0.iter() {
if re.is_match(resource, conditons) {
if re.is_match(resource, conditions) {
return true;
}
}
@@ -85,14 +85,14 @@ pub enum Resource {
impl Resource {
pub const S3_PREFIX: &'static str = "arn:aws:s3:::";
pub fn is_match(&self, resource: &str, conditons: &HashMap<String, Vec<String>>) -> bool {
pub fn is_match(&self, resource: &str, conditions: &HashMap<String, Vec<String>>) -> bool {
let mut pattern = match self {
Resource::S3(s) => s.to_owned(),
Resource::Kms(s) => s.to_owned(),
};
if !conditons.is_empty() {
if !conditions.is_empty() {
for key in KeyName::COMMON_KEYS {
if let Some(rvalue) = conditons.get(key.name()) {
if let Some(rvalue) = conditions.get(key.name()) {
if matches!(rvalue.first().map(|c| !c.is_empty()), Some(true)) {
pattern = pattern.replace(&key.var_name(), &rvalue[0]);
}

View File

@@ -39,14 +39,21 @@ pub async fn node_service_time_out_client(
Box<dyn Error>,
> {
let token: MetadataValue<_> = "rustfs rpc".parse()?;
let channel = match GLOBAL_Conn_Map.read().await.get(addr) {
Some(channel) => channel.clone(),
let channel = { GLOBAL_Conn_Map.read().await.get(addr).cloned() };
let channel = match channel {
Some(channel) => channel,
None => {
let connector = Endpoint::from_shared(addr.to_string())?.connect_timeout(Duration::from_secs(60));
connector.connect().await?
let channel = connector.connect().await?;
{
GLOBAL_Conn_Map.write().await.insert(addr.to_string(), channel.clone());
}
channel
}
};
GLOBAL_Conn_Map.write().await.insert(addr.to_string(), channel.clone());
// let timeout_channel = Timeout::new(channel, Duration::from_secs(60));
Ok(NodeServiceClient::with_interceptor(

104
entrypoint.sh Normal file
View File

@@ -0,0 +1,104 @@
#!/bin/bash
set -e
APP_USER=rustfs
APP_GROUP=rustfs
APP_UID=${PUID:-1000}
APP_GID=${PGID:-1000}
# Parse RUSTFS_VOLUMES into array (support space, comma, tab as separator)
VOLUME_RAW="${RUSTFS_VOLUMES:-/data}"
# Replace comma and tab with space, then split
VOLUME_RAW=$(echo "$VOLUME_RAW" | tr ',\t' ' ')
read -ra ALL_VOLUMES <<< "$VOLUME_RAW"
# Only keep local volumes (start with /, not http/https)
LOCAL_VOLUMES=()
for vol in "${ALL_VOLUMES[@]}"; do
if [[ "$vol" =~ ^/ ]] && [[ ! "$vol" =~ ^https?:// ]]; then
# Not a URL (http/https), just a local path
LOCAL_VOLUMES+=("$vol")
fi
# If it's a URL (http/https), skip
# If it's an empty string, skip
# If it's a local path, keep
# (We don't support other protocols here)
done
# Always ensure /logs is included for permission fix
include_logs=1
for vol in "${LOCAL_VOLUMES[@]}"; do
if [ "$vol" = "/logs" ]; then
include_logs=0
break
fi
done
if [ $include_logs -eq 1 ]; then
LOCAL_VOLUMES+=("/logs")
fi
# Try to update rustfs UID/GID if needed (requires root and shadow tools)
update_user_group_ids() {
local uid="$1"
local gid="$2"
local user="$3"
local group="$4"
local updated=0
if [ "$(id -u "$user")" != "$uid" ]; then
if command -v usermod >/dev/null 2>&1; then
echo "🔧 Updating UID of $user to $uid"
usermod -u "$uid" "$user"
updated=1
fi
fi
if [ "$(id -g "$group")" != "$gid" ]; then
if command -v groupmod >/dev/null 2>&1; then
echo "🔧 Updating GID of $group to $gid"
groupmod -g "$gid" "$group"
updated=1
fi
fi
return $updated
}
echo "📦 Initializing mount directories: ${LOCAL_VOLUMES[*]}"
for vol in "${LOCAL_VOLUMES[@]}"; do
if [ ! -d "$vol" ]; then
echo "📁 Creating directory: $vol"
mkdir -p "$vol"
fi
# Alpine busybox stat does not support -c, coreutils is required
dir_uid=$(stat -c '%u' "$vol")
dir_gid=$(stat -c '%g' "$vol")
if [ "$dir_uid" != "$APP_UID" ] || [ "$dir_gid" != "$APP_GID" ]; then
if [[ "$SKIP_CHOWN" != "true" ]]; then
# Prefer to update rustfs user/group UID/GID
update_user_group_ids "$dir_uid" "$dir_gid" "$APP_USER" "$APP_GROUP" || \
{
echo "🔧 Fixing ownership for: $vol$APP_USER:$APP_GROUP"
if [[ -n "$CHOWN_RECURSION_DEPTH" ]]; then
echo "🔧 Applying ownership fix with recursion depth: $CHOWN_RECURSION_DEPTH"
find "$vol" -mindepth 0 -maxdepth "$CHOWN_RECURSION_DEPTH" -exec chown "$APP_USER:$APP_GROUP" {} \;
else
echo "🔧 Applying ownership fix recursively (full depth)"
chown -R "$APP_USER:$APP_GROUP" "$vol"
fi
}
else
echo "⚠️ SKIP_CHOWN is enabled. Skipping ownership fix for: $vol"
fi
fi
chmod 700 "$vol"
done
# Warn if default credentials are used
if [[ "$RUSTFS_ACCESS_KEY" == "rustfsadmin" || "$RUSTFS_SECRET_KEY" == "rustfsadmin" ]]; then
echo "⚠️ WARNING: Using default RUSTFS_ACCESS_KEY or RUSTFS_SECRET_KEY"
echo "⚠️ It is strongly recommended to override these values in production!"
fi
echo "🚀 Starting application: $*"
exec gosu "$APP_USER" "$@"

View File

@@ -74,7 +74,7 @@ use tracing::{error, info, warn};
pub mod bucket_meta;
pub mod event;
pub mod group;
pub mod policys;
pub mod policies;
pub mod pools;
pub mod rebalance;
pub mod service_account;
@@ -798,9 +798,9 @@ pub struct GetReplicationMetricsHandler {}
impl Operation for GetReplicationMetricsHandler {
async fn call(&self, _req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
error!("GetReplicationMetricsHandler");
let querys = extract_query_params(&_req.uri);
if let Some(bucket) = querys.get("bucket") {
error!("get bucket:{} metris", bucket);
let queries = extract_query_params(&_req.uri);
if let Some(bucket) = queries.get("bucket") {
error!("get bucket:{} metrics", bucket);
}
//return Err(s3_error!(InvalidArgument, "Invalid bucket name"));
//Ok(S3Response::with_headers((StatusCode::OK, Body::from()), header))
@@ -815,7 +815,7 @@ impl Operation for SetRemoteTargetHandler {
//return Ok(S3Response::new((StatusCode::OK, Body::from("OK".to_string()))));
// println!("handle MetricsHandler, params: {:?}", _req.input);
info!("SetRemoteTargetHandler params: {:?}", _req.credentials);
let querys = extract_query_params(&_req.uri);
let queries = extract_query_params(&_req.uri);
let Some(_cred) = _req.credentials else {
error!("credentials null");
return Err(s3_error!(InvalidRequest, "get cred failed"));
@@ -825,7 +825,7 @@ impl Operation for SetRemoteTargetHandler {
//println!("body: {}", std::str::from_utf8(&body.clone()).unwrap());
//println!("bucket is:{}", bucket.clone());
if let Some(bucket) = querys.get("bucket") {
if let Some(bucket) = queries.get("bucket") {
if bucket.is_empty() {
info!("have bucket: {}", bucket);
return Ok(S3Response::new((StatusCode::OK, Body::from("fuck".to_string()))));
@@ -842,7 +842,7 @@ impl Operation for SetRemoteTargetHandler {
{
Ok(info) => {
info!("Bucket Info: {:?}", info);
if !info.versionning {
if !info.versioning {
return Ok(S3Response::new((StatusCode::FORBIDDEN, Body::from("bucket need versioned".to_string()))));
}
}
@@ -932,13 +932,13 @@ impl Operation for ListRemoteTargetHandler {
async fn call(&self, _req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
warn!("list GetRemoteTargetHandler, params: {:?}", _req.credentials);
let querys = extract_query_params(&_req.uri);
let queries = extract_query_params(&_req.uri);
let Some(_cred) = _req.credentials else {
error!("credentials null");
return Err(s3_error!(InvalidRequest, "get cred failed"));
};
if let Some(bucket) = querys.get("bucket") {
if let Some(bucket) = queries.get("bucket") {
if bucket.is_empty() {
error!("bucket parameter is empty");
return Ok(S3Response::new((
@@ -957,7 +957,7 @@ impl Operation for ListRemoteTargetHandler {
{
Ok(info) => {
info!("Bucket Info: {:?}", info);
if !info.versionning {
if !info.versioning {
return Ok(S3Response::new((
StatusCode::FORBIDDEN,
Body::from("Bucket needs versioning".to_string()),
@@ -1009,8 +1009,8 @@ pub struct RemoveRemoteTargetHandler {}
impl Operation for RemoveRemoteTargetHandler {
async fn call(&self, _req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
debug!("remove remote target called");
let querys = extract_query_params(&_req.uri);
let Some(bucket) = querys.get("bucket") else {
let queries = extract_query_params(&_req.uri);
let Some(bucket) = queries.get("bucket") else {
return Ok(S3Response::new((
StatusCode::BAD_REQUEST,
Body::from("Bucket parameter is required".to_string()),
@@ -1019,7 +1019,7 @@ impl Operation for RemoveRemoteTargetHandler {
let mut need_delete = true;
if let Some(arnstr) = querys.get("arn") {
if let Some(arnstr) = queries.get("arn") {
let _arn = bucket_targets::ARN::parse(arnstr);
match get_replication_config(bucket).await {

View File

@@ -135,7 +135,7 @@ impl Operation for AddServiceAccount {
let is_svc_acc = target_user == req_user || target_user == req_parent_user;
let mut taget_groups = None;
let mut target_groups = None;
let mut opts = NewServiceAccountOpts {
access_key: create_req.access_key,
secret_key: create_req.secret_key,
@@ -154,7 +154,7 @@ impl Operation for AddServiceAccount {
target_user = req_parent_user;
}
taget_groups = req_groups;
target_groups = req_groups;
if let Some(claims) = cred.claims {
if opts.claims.is_none() {
@@ -172,7 +172,7 @@ impl Operation for AddServiceAccount {
}
let (new_cred, _) = iam_store
.new_service_account(&target_user, taget_groups, opts)
.new_service_account(&target_user, target_groups, opts)
.await
.map_err(|e| {
debug!("create service account failed, e: {:?}", e);

View File

@@ -545,7 +545,7 @@ impl Operation for ExportIam {
USER_POLICY_MAPPINGS_FILE => {
let mut user_policy_mappings: HashMap<String, MappedPolicy> = HashMap::new();
iam_store
.load_mapped_policys(UserType::Reg, false, &mut user_policy_mappings)
.load_mapped_policies(UserType::Reg, false, &mut user_policy_mappings)
.await
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
@@ -561,7 +561,7 @@ impl Operation for ExportIam {
GROUP_POLICY_MAPPINGS_FILE => {
let mut group_policy_mappings = HashMap::new();
iam_store
.load_mapped_policys(UserType::Reg, true, &mut group_policy_mappings)
.load_mapped_policies(UserType::Reg, true, &mut group_policy_mappings)
.await
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
@@ -577,7 +577,7 @@ impl Operation for ExportIam {
STS_USER_POLICY_MAPPINGS_FILE => {
let mut sts_user_policy_mappings: HashMap<String, MappedPolicy> = HashMap::new();
iam_store
.load_mapped_policys(UserType::Sts, false, &mut sts_user_policy_mappings)
.load_mapped_policies(UserType::Sts, false, &mut sts_user_policy_mappings)
.await
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
let json_str = serde_json::to_vec(&sts_user_policy_mappings)

View File

@@ -20,7 +20,7 @@ pub mod utils;
// use ecstore::global::{is_dist_erasure, is_erasure};
use handlers::{
bucket_meta, group, policys, pools, rebalance,
bucket_meta, group, policies, pools, rebalance,
service_account::{AddServiceAccount, DeleteServiceAccount, InfoServiceAccount, ListServiceAccount, UpdateServiceAccount},
sts, tier, user,
};
@@ -333,35 +333,35 @@ fn register_user_route(r: &mut S3Router<AdminOperation>) -> std::io::Result<()>
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/v3/list-canned-policies").as_str(),
AdminOperation(&policys::ListCannedPolicies {}),
AdminOperation(&policies::ListCannedPolicies {}),
)?;
// info-canned-policy?name=xxx
r.insert(
Method::GET,
format!("{}{}", ADMIN_PREFIX, "/v3/info-canned-policy").as_str(),
AdminOperation(&policys::InfoCannedPolicy {}),
AdminOperation(&policies::InfoCannedPolicy {}),
)?;
// add-canned-policy?name=xxx
r.insert(
Method::PUT,
format!("{}{}", ADMIN_PREFIX, "/v3/add-canned-policy").as_str(),
AdminOperation(&policys::AddCannedPolicy {}),
AdminOperation(&policies::AddCannedPolicy {}),
)?;
// remove-canned-policy?name=xxx
r.insert(
Method::DELETE,
format!("{}{}", ADMIN_PREFIX, "/v3/remove-canned-policy").as_str(),
AdminOperation(&policys::RemoveCannedPolicy {}),
AdminOperation(&policies::RemoveCannedPolicy {}),
)?;
// set-user-or-group-policy?policyName=xxx&userOrGroup=xxx&isGroup=xxx
r.insert(
Method::PUT,
format!("{}{}", ADMIN_PREFIX, "/v3/set-user-or-group-policy").as_str(),
AdminOperation(&policys::SetPolicyForUserOrGroup {}),
AdminOperation(&policies::SetPolicyForUserOrGroup {}),
)?;
Ok(())

View File

@@ -740,7 +740,7 @@ impl S3 for FS {
if let Some(part_num) = part_number {
if part_num == 0 {
return Err(s3_error!(InvalidArgument, "part_numer invalid"));
return Err(s3_error!(InvalidArgument, "Invalid part number: part number must be greater than 0"));
}
}
@@ -792,11 +792,6 @@ impl S3 for FS {
};
let last_modified = info.mod_time.map(Timestamp::from);
let body = Some(StreamingBlob::wrap(bytes_stream(
ReaderStream::with_capacity(reader.stream, DEFAULT_READ_BUFFER_SIZE),
info.size as usize,
)));
let mut rs = rs;
if let Some(part_number) = part_number {
@@ -805,17 +800,25 @@ impl S3 for FS {
}
}
let mut content_length = info.size as i64;
let content_range = if let Some(rs) = rs {
let total_size = info.get_actual_size().map_err(ApiError::from)?;
let (start, length) = rs.get_offset_length(total_size as i64).map_err(ApiError::from)?;
content_length = length;
Some(format!("bytes {}-{}/{}", start, start as i64 + length - 1, total_size))
} else {
None
};
let body = Some(StreamingBlob::wrap(bytes_stream(
ReaderStream::with_capacity(reader.stream, DEFAULT_READ_BUFFER_SIZE),
content_length as usize,
)));
let output = GetObjectOutput {
body,
content_length: Some(info.size as i64),
content_length: Some(content_length),
last_modified,
content_type,
accept_ranges: Some("bytes".to_string()),
@@ -879,7 +882,7 @@ impl S3 for FS {
if let Some(part_num) = part_number {
if part_num == 0 {
return Err(s3_error!(InvalidArgument, "part_numer invalid"));
return Err(s3_error!(InvalidArgument, "part_number invalid"));
}
}
@@ -1938,7 +1941,7 @@ impl S3 for FS {
let conditions = get_condition_values(&req.headers, &auth::Credentials::default());
let read_olny = PolicySys::is_allowed(&BucketPolicyArgs {
let read_only = PolicySys::is_allowed(&BucketPolicyArgs {
bucket: &bucket,
action: Action::S3Action(S3Action::ListBucketAction),
is_owner: false,
@@ -1949,7 +1952,7 @@ impl S3 for FS {
})
.await;
let write_olny = PolicySys::is_allowed(&BucketPolicyArgs {
let write_only = PolicySys::is_allowed(&BucketPolicyArgs {
bucket: &bucket,
action: Action::S3Action(S3Action::PutObjectAction),
is_owner: false,
@@ -1960,7 +1963,7 @@ impl S3 for FS {
})
.await;
let is_public = read_olny && write_olny;
let is_public = read_only && write_only;
let output = GetBucketPolicyStatusOutput {
policy_status: Some(PolicyStatus {
@@ -1993,9 +1996,9 @@ impl S3 for FS {
}
};
let policys = try_!(serde_json::to_string(&cfg));
let policies = try_!(serde_json::to_string(&cfg));
Ok(S3Response::new(GetBucketPolicyOutput { policy: Some(policys) }))
Ok(S3Response::new(GetBucketPolicyOutput { policy: Some(policies) }))
}
async fn put_bucket_policy(&self, req: S3Request<PutBucketPolicyInput>) -> S3Result<S3Response<PutBucketPolicyOutput>> {
@@ -2689,7 +2692,7 @@ impl S3 for FS {
for batch in results {
csv_writer
.write(&batch)
.map_err(|e| s3_error!(InternalError, "cann't encode output to csv. e: {}", e.to_string()))?;
.map_err(|e| s3_error!(InternalError, "can't encode output to csv. e: {}", e.to_string()))?;
}
} else if input.request.output_serialization.json.is_some() {
let mut json_writer = JsonWriterBuilder::new()
@@ -2698,13 +2701,16 @@ impl S3 for FS {
for batch in results {
json_writer
.write(&batch)
.map_err(|e| s3_error!(InternalError, "cann't encode output to json. e: {}", e.to_string()))?;
.map_err(|e| s3_error!(InternalError, "can't encode output to json. e: {}", e.to_string()))?;
}
json_writer
.finish()
.map_err(|e| s3_error!(InternalError, "writer output into json error, e: {}", e.to_string()))?;
} else {
return Err(s3_error!(InvalidArgument, "unknow output format"));
return Err(s3_error!(
InvalidArgument,
"Unsupported output format. Supported formats are CSV and JSON"
));
}
let (tx, rx) = mpsc::channel::<S3Result<SelectObjectContentEvent>>(2);

View File

@@ -51,7 +51,7 @@ export RUSTFS_CONSOLE_ADDRESS=":9001"
# export RUSTFS_TLS_PATH="./deploy/certs"
# 可观测性 相关配置信息
export RUSTFS_OBS_ENDPOINT=http://localhost:4317 # OpenTelemetry Collector 的地址
#export RUSTFS_OBS_ENDPOINT=http://localhost:4317 # OpenTelemetry Collector 的地址
#export RUSTFS_OBS_USE_STDOUT=false # 是否使用标准输出
#export RUSTFS_OBS_SAMPLE_RATIO=2.0 # 采样率0.0-1.0之间0.0表示不采样1.0表示全部采样
#export RUSTFS_OBS_METER_INTERVAL=1 # 采样间隔,单位为秒
@@ -64,8 +64,7 @@ export RUSTFS_OBS_LOG_DIRECTORY="$current_dir/deploy/logs" # Log directory
export RUSTFS_OBS_LOG_ROTATION_TIME="minute" # Log rotation time unit, can be "second", "minute", "hour", "day"
export RUSTFS_OBS_LOG_ROTATION_SIZE_MB=1 # Log rotation size in MB
#
export RUSTFS_SINKS_FILE_PATH="$current_dir/deploy/logs/rustfs.log"
export RUSTFS_SINKS_FILE_PATH="$current_dir/deploy/logs"
export RUSTFS_SINKS_FILE_BUFFER_SIZE=12
export RUSTFS_SINKS_FILE_FLUSH_INTERVAL_MS=1000
export RUSTFS_SINKS_FILE_FLUSH_THRESHOLD=100