Compare commits
41 Commits
1.0.0-alph
...
1.0.0-alph
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2525b66658 | ||
|
|
58c5a633e2 | ||
|
|
aefd894fc2 | ||
|
|
1e1d4646a2 | ||
|
|
b97845fffd | ||
|
|
84f5a4cb48 | ||
|
|
2832f0e089 | ||
|
|
a3b5445824 | ||
|
|
363e37c791 | ||
|
|
1b0b041530 | ||
|
|
7d5fc87002 | ||
|
|
13130e9dd4 | ||
|
|
1061ce11a3 | ||
|
|
9f9a74000d | ||
|
|
d1863018df | ||
|
|
166080aac8 | ||
|
|
78b2487639 | ||
|
|
79f4e81fea | ||
|
|
28da78d544 | ||
|
|
df2eb9bc6a | ||
|
|
7c20d92fe5 | ||
|
|
b4c316c662 | ||
|
|
411b511937 | ||
|
|
c902475443 | ||
|
|
00d8008a89 | ||
|
|
36acb5bce9 | ||
|
|
e033b019f6 | ||
|
|
259b80777e | ||
|
|
abdfad8521 | ||
|
|
c498fbcb27 | ||
|
|
874d486b1e | ||
|
|
21516251b0 | ||
|
|
a2f83b0d2d | ||
|
|
aa65766312 | ||
|
|
660f004cfd | ||
|
|
6d2c420f54 | ||
|
|
5f0b9a5fa8 | ||
|
|
8378e308e0 | ||
|
|
b9f54519fd | ||
|
|
4108a9649f | ||
|
|
6244e23451 |
@@ -517,7 +517,7 @@ let results = join_all(futures).await;
|
||||
|
||||
### 3. Caching Strategy
|
||||
|
||||
- Use `lazy_static` or `OnceCell` for global caching
|
||||
- Use `LazyLock` for global caching
|
||||
- Implement LRU cache to avoid memory leaks
|
||||
|
||||
## Testing Guidelines
|
||||
|
||||
16
.github/actions/setup/action.yml
vendored
@@ -60,18 +60,11 @@ runs:
|
||||
pkg-config \
|
||||
libssl-dev
|
||||
|
||||
- name: Cache protoc binary
|
||||
id: cache-protoc
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.local/bin/protoc
|
||||
key: protoc-31.1-${{ runner.os }}-${{ runner.arch }}
|
||||
|
||||
- name: Install protoc
|
||||
if: steps.cache-protoc.outputs.cache-hit != 'true'
|
||||
uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
version: "31.1"
|
||||
repo-token: ${{ inputs.github-token }}
|
||||
|
||||
- name: Install flatc
|
||||
uses: Nugine/setup-flatc@v1
|
||||
@@ -93,6 +86,9 @@ runs:
|
||||
if: inputs.install-cross-tools == 'true'
|
||||
uses: taiki-e/install-action@cargo-zigbuild
|
||||
|
||||
- name: Install cargo-nextest
|
||||
uses: taiki-e/install-action@cargo-nextest
|
||||
|
||||
- name: Setup Rust cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
@@ -100,7 +96,3 @@ runs:
|
||||
cache-on-failure: true
|
||||
shared-key: ${{ inputs.cache-shared-key }}
|
||||
save-if: ${{ inputs.cache-save-if }}
|
||||
# Cache workspace dependencies
|
||||
workspaces: |
|
||||
. -> target
|
||||
cli/rustfs-gui -> cli/rustfs-gui/target
|
||||
|
||||
434
.github/workflows/build.yml
vendored
@@ -65,26 +65,9 @@ env:
|
||||
CARGO_INCREMENTAL: 0
|
||||
|
||||
jobs:
|
||||
# First layer: GitHub Actions level optimization (handling duplicates and concurrency)
|
||||
skip-duplicate:
|
||||
name: Skip Duplicate Actions
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
should_skip: ${{ steps.skip_check.outputs.should_skip }}
|
||||
steps:
|
||||
- name: Skip duplicate actions
|
||||
id: skip_check
|
||||
uses: fkirc/skip-duplicate-actions@v5
|
||||
with:
|
||||
concurrent_skipping: "same_content_newer"
|
||||
cancel_others: true
|
||||
paths_ignore: '["*.md", ".github/**", "docs/**", "deploy/**", "scripts/dev_*.sh"]'
|
||||
|
||||
# Second layer: Business logic level checks (handling build strategy)
|
||||
build-check:
|
||||
name: Build Strategy Check
|
||||
needs: skip-duplicate
|
||||
if: needs.skip-duplicate.outputs.should_skip != 'true'
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
should_build: ${{ steps.check.outputs.should_build }}
|
||||
@@ -105,9 +88,11 @@ jobs:
|
||||
build_type="development"
|
||||
fi
|
||||
|
||||
# Always build for tag pushes (version releases)
|
||||
if [[ "${{ startsWith(github.ref, 'refs/tags/') }}" == "true" ]]; then
|
||||
should_build=true
|
||||
build_type="release"
|
||||
echo "🏷️ Tag detected: forcing release build"
|
||||
fi
|
||||
|
||||
echo "should_build=$should_build" >> $GITHUB_OUTPUT
|
||||
@@ -117,8 +102,8 @@ jobs:
|
||||
# Build RustFS binaries
|
||||
build-rustfs:
|
||||
name: Build RustFS
|
||||
needs: [skip-duplicate, build-check]
|
||||
if: needs.skip-duplicate.outputs.should_skip != 'true' && needs.build-check.outputs.should_build == 'true'
|
||||
needs: [build-check]
|
||||
if: needs.build-check.outputs.should_build == 'true'
|
||||
runs-on: ${{ matrix.os }}
|
||||
timeout-minutes: 60
|
||||
env:
|
||||
@@ -145,15 +130,15 @@ jobs:
|
||||
target: x86_64-apple-darwin
|
||||
cross: false
|
||||
platform: macos
|
||||
# Windows builds
|
||||
- os: windows-latest
|
||||
target: x86_64-pc-windows-msvc
|
||||
cross: false
|
||||
platform: windows
|
||||
- os: windows-latest
|
||||
target: aarch64-pc-windows-msvc
|
||||
cross: true
|
||||
platform: windows
|
||||
# # Windows builds (temporarily disabled)
|
||||
# - os: windows-latest
|
||||
# target: x86_64-pc-windows-msvc
|
||||
# cross: false
|
||||
# platform: windows
|
||||
# - os: windows-latest
|
||||
# target: aarch64-pc-windows-msvc
|
||||
# cross: true
|
||||
# platform: windows
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
@@ -173,6 +158,7 @@ jobs:
|
||||
- name: Download static console assets
|
||||
run: |
|
||||
mkdir -p ./rustfs/static
|
||||
rm -rf ./rustfs/static/*
|
||||
if [[ "${{ matrix.platform }}" == "windows" ]]; then
|
||||
curl.exe -L "https://dl.rustfs.com/artifacts/console/rustfs-console-latest.zip" -o console.zip --retry 3 --retry-delay 5 --max-time 300
|
||||
if [[ $? -eq 0 ]]; then
|
||||
@@ -183,6 +169,8 @@ jobs:
|
||||
echo "// Static assets not available" > ./rustfs/static/empty.txt
|
||||
fi
|
||||
else
|
||||
chmod +w ./rustfs/static/LICENSE || true
|
||||
rm -f ./rustfs/static/LICENSE
|
||||
curl -L "https://dl.rustfs.com/artifacts/console/rustfs-console-latest.zip" \
|
||||
-o console.zip --retry 3 --retry-delay 5 --max-time 300
|
||||
if [[ $? -eq 0 ]]; then
|
||||
@@ -217,43 +205,21 @@ jobs:
|
||||
shell: bash
|
||||
run: |
|
||||
PACKAGE_NAME="rustfs-${{ matrix.target }}"
|
||||
mkdir -p "${PACKAGE_NAME}"/{bin,docs}
|
||||
|
||||
# Copy binary with platform-specific handling
|
||||
if [[ "${{ matrix.target }}" == *"windows"* ]]; then
|
||||
cp target/${{ matrix.target }}/release/rustfs.exe "${PACKAGE_NAME}/bin/"
|
||||
# Add Windows-specific files
|
||||
echo "RustFS for Windows" > "${PACKAGE_NAME}/README.txt"
|
||||
else
|
||||
cp target/${{ matrix.target }}/release/rustfs "${PACKAGE_NAME}/bin/"
|
||||
chmod +x "${PACKAGE_NAME}/bin/rustfs"
|
||||
# Create zip packages for all platforms
|
||||
# Ensure zip is available
|
||||
if ! command -v zip &> /dev/null; then
|
||||
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then
|
||||
sudo apt-get update && sudo apt-get install -y zip
|
||||
fi
|
||||
fi
|
||||
|
||||
# Copy documentation
|
||||
[ -f "LICENSE" ] && cp LICENSE "${PACKAGE_NAME}/docs/"
|
||||
[ -f "README.md" ] && cp README.md "${PACKAGE_NAME}/docs/"
|
||||
|
||||
# Add platform-specific information
|
||||
cat > "${PACKAGE_NAME}/docs/PLATFORM_INFO.txt" << EOF
|
||||
Platform: ${{ matrix.platform }}
|
||||
Target: ${{ matrix.target }}
|
||||
Build Date: $(date -u +"%Y-%m-%d %H:%M:%S UTC")
|
||||
Git Commit: ${GITHUB_SHA::8}
|
||||
EOF
|
||||
|
||||
# Create appropriate archive format for platform
|
||||
if [[ "${{ matrix.platform }}" == "windows" ]]; then
|
||||
# Use PowerShell to create zip on Windows
|
||||
powershell -Command "Compress-Archive -Path '${PACKAGE_NAME}' -DestinationPath '${PACKAGE_NAME}.zip'"
|
||||
echo "package_name=${PACKAGE_NAME}" >> $GITHUB_OUTPUT
|
||||
echo "package_file=${PACKAGE_NAME}.zip" >> $GITHUB_OUTPUT
|
||||
echo "Package created: ${PACKAGE_NAME}.zip"
|
||||
else
|
||||
tar -czf "${PACKAGE_NAME}.tar.gz" "${PACKAGE_NAME}"
|
||||
echo "package_name=${PACKAGE_NAME}" >> $GITHUB_OUTPUT
|
||||
echo "package_file=${PACKAGE_NAME}.tar.gz" >> $GITHUB_OUTPUT
|
||||
echo "Package created: ${PACKAGE_NAME}.tar.gz"
|
||||
fi
|
||||
cd target/${{ matrix.target }}/release
|
||||
zip "../../../${PACKAGE_NAME}.zip" rustfs
|
||||
cd ../../..
|
||||
echo "package_name=${PACKAGE_NAME}" >> $GITHUB_OUTPUT
|
||||
echo "package_file=${PACKAGE_NAME}.zip" >> $GITHUB_OUTPUT
|
||||
echo "Package created: ${PACKAGE_NAME}.zip"
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
@@ -262,115 +228,67 @@ jobs:
|
||||
path: ${{ steps.package.outputs.package_file }}
|
||||
retention-days: ${{ startsWith(github.ref, 'refs/tags/') && 30 || 7 }}
|
||||
|
||||
# Build GUI (only for releases)
|
||||
build-gui:
|
||||
name: Build GUI
|
||||
needs: [skip-duplicate, build-check, build-rustfs]
|
||||
if: needs.skip-duplicate.outputs.should_skip != 'true' && needs.build-check.outputs.build_type == 'release'
|
||||
runs-on: ${{ matrix.os }}
|
||||
timeout-minutes: 45
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- os: ubuntu-latest
|
||||
target: x86_64-unknown-linux-musl
|
||||
platform: linux
|
||||
- os: macos-latest
|
||||
target: aarch64-apple-darwin
|
||||
platform: macos
|
||||
- os: windows-latest
|
||||
target: x86_64-pc-windows-msvc
|
||||
platform: windows
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Setup Rust environment
|
||||
uses: ./.github/actions/setup
|
||||
with:
|
||||
rust-version: stable
|
||||
target: ${{ matrix.target }}
|
||||
cache-shared-key: gui-${{ matrix.target }}-${{ hashFiles('**/Cargo.lock') }}
|
||||
github-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Download RustFS binary
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: rustfs-${{ matrix.target }}
|
||||
path: ./artifacts
|
||||
|
||||
- name: Prepare embedded binary
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p ./cli/rustfs-gui/embedded-rustfs/
|
||||
|
||||
if [[ "${{ matrix.platform }}" == "windows" ]]; then
|
||||
# Extract from zip file
|
||||
unzip ./artifacts/rustfs-${{ matrix.target }}.zip -d ./artifacts/
|
||||
cp ./artifacts/rustfs-${{ matrix.target }}/bin/rustfs.exe ./cli/rustfs-gui/embedded-rustfs/
|
||||
else
|
||||
# Extract from tar.gz file
|
||||
tar -xzf ./artifacts/rustfs-${{ matrix.target }}.tar.gz -C ./artifacts/
|
||||
cp ./artifacts/rustfs-${{ matrix.target }}/bin/rustfs ./cli/rustfs-gui/embedded-rustfs/
|
||||
fi
|
||||
|
||||
- name: Install Dioxus CLI
|
||||
uses: taiki-e/cache-cargo-install-action@v2
|
||||
with:
|
||||
tool: dioxus-cli
|
||||
|
||||
- name: Build GUI
|
||||
working-directory: ./cli/rustfs-gui
|
||||
shell: bash
|
||||
- name: Upload to Aliyun OSS
|
||||
if: needs.build-check.outputs.build_type == 'release' && env.OSS_ACCESS_KEY_ID != ''
|
||||
env:
|
||||
OSS_ACCESS_KEY_ID: ${{ secrets.ALICLOUDOSS_KEY_ID }}
|
||||
OSS_ACCESS_KEY_SECRET: ${{ secrets.ALICLOUDOSS_KEY_SECRET }}
|
||||
OSS_REGION: cn-beijing
|
||||
OSS_ENDPOINT: https://oss-cn-beijing.aliyuncs.com
|
||||
run: |
|
||||
# Install ossutil (platform-specific)
|
||||
OSSUTIL_VERSION="2.1.1"
|
||||
case "${{ matrix.platform }}" in
|
||||
"linux")
|
||||
dx bundle --platform linux --package-types deb --package-types appimage --release
|
||||
linux)
|
||||
if [[ "$(uname -m)" == "arm64" ]]; then
|
||||
ARCH="arm64"
|
||||
else
|
||||
ARCH="amd64"
|
||||
fi
|
||||
OSSUTIL_ZIP="ossutil-${OSSUTIL_VERSION}-linux-${ARCH}.zip"
|
||||
OSSUTIL_DIR="ossutil-${OSSUTIL_VERSION}-linux-${ARCH}"
|
||||
|
||||
curl -o "$OSSUTIL_ZIP" "https://gosspublic.alicdn.com/ossutil/v2/${OSSUTIL_VERSION}/${OSSUTIL_ZIP}"
|
||||
unzip "$OSSUTIL_ZIP"
|
||||
mv "${OSSUTIL_DIR}/ossutil" /usr/local/bin/
|
||||
rm -rf "$OSSUTIL_DIR" "$OSSUTIL_ZIP"
|
||||
chmod +x /usr/local/bin/ossutil
|
||||
OSSUTIL_BIN=ossutil
|
||||
;;
|
||||
"macos")
|
||||
dx bundle --platform macos --package-types dmg --release
|
||||
;;
|
||||
"windows")
|
||||
dx bundle --platform windows --package-types msi --release
|
||||
macos)
|
||||
if [[ "$(uname -m)" == "arm64" ]]; then
|
||||
ARCH="arm64"
|
||||
else
|
||||
ARCH="amd64"
|
||||
fi
|
||||
OSSUTIL_ZIP="ossutil-${OSSUTIL_VERSION}-mac-${ARCH}.zip"
|
||||
OSSUTIL_DIR="ossutil-${OSSUTIL_VERSION}-mac-${ARCH}"
|
||||
|
||||
curl -o "$OSSUTIL_ZIP" "https://gosspublic.alicdn.com/ossutil/v2/${OSSUTIL_VERSION}/${OSSUTIL_ZIP}"
|
||||
unzip "$OSSUTIL_ZIP"
|
||||
mv "${OSSUTIL_DIR}/ossutil" /usr/local/bin/
|
||||
rm -rf "$OSSUTIL_DIR" "$OSSUTIL_ZIP"
|
||||
chmod +x /usr/local/bin/ossutil
|
||||
OSSUTIL_BIN=ossutil
|
||||
;;
|
||||
esac
|
||||
|
||||
- name: Package GUI
|
||||
id: gui_package
|
||||
shell: bash
|
||||
run: |
|
||||
GUI_PACKAGE="rustfs-gui-${{ matrix.target }}"
|
||||
mkdir -p "${GUI_PACKAGE}"
|
||||
# Upload the package file directly to OSS
|
||||
echo "Uploading ${{ steps.package.outputs.package_file }} to OSS..."
|
||||
$OSSUTIL_BIN cp "${{ steps.package.outputs.package_file }}" oss://rustfs-artifacts/artifacts/rustfs/ --force
|
||||
|
||||
# Copy GUI bundles
|
||||
if [[ -d "cli/rustfs-gui/dist/bundle" ]]; then
|
||||
cp -r cli/rustfs-gui/dist/bundle/* "${GUI_PACKAGE}/"
|
||||
# Create latest.json (only for the first Linux build to avoid duplication)
|
||||
if [[ "${{ matrix.target }}" == "x86_64-unknown-linux-musl" ]]; then
|
||||
VERSION="${GITHUB_REF#refs/tags/v}"
|
||||
echo "{\"version\":\"${VERSION}\",\"release_date\":\"$(date -u +%Y-%m-%dT%H:%M:%SZ)\"}" > latest.json
|
||||
$OSSUTIL_BIN cp latest.json oss://rustfs-version/latest.json --force
|
||||
fi
|
||||
|
||||
# Create appropriate archive format
|
||||
if [[ "${{ matrix.platform }}" == "windows" ]]; then
|
||||
powershell -Command "Compress-Archive -Path '${GUI_PACKAGE}' -DestinationPath '${GUI_PACKAGE}.zip'"
|
||||
echo "gui_package=${GUI_PACKAGE}" >> $GITHUB_OUTPUT
|
||||
echo "gui_file=${GUI_PACKAGE}.zip" >> $GITHUB_OUTPUT
|
||||
else
|
||||
tar -czf "${GUI_PACKAGE}.tar.gz" "${GUI_PACKAGE}"
|
||||
echo "gui_package=${GUI_PACKAGE}" >> $GITHUB_OUTPUT
|
||||
echo "gui_file=${GUI_PACKAGE}.tar.gz" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Upload GUI artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ steps.gui_package.outputs.gui_package }}
|
||||
path: ${{ steps.gui_package.outputs.gui_file }}
|
||||
retention-days: 30
|
||||
|
||||
# Release management
|
||||
release:
|
||||
name: GitHub Release
|
||||
needs: [skip-duplicate, build-check, build-rustfs, build-gui]
|
||||
if: always() && needs.skip-duplicate.outputs.should_skip != 'true' && needs.build-check.outputs.build_type == 'release'
|
||||
needs: [build-check, build-rustfs]
|
||||
if: always() && needs.build-check.outputs.build_type == 'release'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
@@ -397,16 +315,11 @@ jobs:
|
||||
# Organize artifacts
|
||||
mkdir -p ./release-files
|
||||
|
||||
# Copy all artifacts (both .tar.gz and .zip files)
|
||||
find ./release-artifacts -name "*.tar.gz" -exec cp {} ./release-files/ \;
|
||||
# Copy all artifacts (.zip files)
|
||||
find ./release-artifacts -name "*.zip" -exec cp {} ./release-files/ \;
|
||||
|
||||
# Generate checksums for all files
|
||||
cd ./release-files
|
||||
if ls *.tar.gz >/dev/null 2>&1; then
|
||||
sha256sum *.tar.gz >> SHA256SUMS
|
||||
sha512sum *.tar.gz >> SHA512SUMS
|
||||
fi
|
||||
if ls *.zip >/dev/null 2>&1; then
|
||||
sha256sum *.zip >> SHA256SUMS
|
||||
sha512sum *.zip >> SHA512SUMS
|
||||
@@ -424,23 +337,28 @@ jobs:
|
||||
VERSION="${{ steps.release_prep.outputs.version }}"
|
||||
VERSION_CLEAN="${{ steps.release_prep.outputs.version_clean }}"
|
||||
|
||||
# Get release notes from tag message
|
||||
RELEASE_NOTES=$(git tag -l --format='%(contents)' "${VERSION}")
|
||||
if [[ -z "$RELEASE_NOTES" || "$RELEASE_NOTES" =~ ^[[:space:]]*$ ]]; then
|
||||
RELEASE_NOTES="Release ${VERSION_CLEAN}"
|
||||
fi
|
||||
# Check if release already exists
|
||||
if gh release view "$VERSION" >/dev/null 2>&1; then
|
||||
echo "Release $VERSION already exists, skipping creation"
|
||||
else
|
||||
# Get release notes from tag message
|
||||
RELEASE_NOTES=$(git tag -l --format='%(contents)' "${VERSION}")
|
||||
if [[ -z "$RELEASE_NOTES" || "$RELEASE_NOTES" =~ ^[[:space:]]*$ ]]; then
|
||||
RELEASE_NOTES="Release ${VERSION_CLEAN}"
|
||||
fi
|
||||
|
||||
# Determine if this is a prerelease
|
||||
PRERELEASE_FLAG=""
|
||||
if [[ "$VERSION" == *"alpha"* ]] || [[ "$VERSION" == *"beta"* ]] || [[ "$VERSION" == *"rc"* ]]; then
|
||||
PRERELEASE_FLAG="--prerelease"
|
||||
fi
|
||||
# Determine if this is a prerelease
|
||||
PRERELEASE_FLAG=""
|
||||
if [[ "$VERSION" == *"alpha"* ]] || [[ "$VERSION" == *"beta"* ]] || [[ "$VERSION" == *"rc"* ]]; then
|
||||
PRERELEASE_FLAG="--prerelease"
|
||||
fi
|
||||
|
||||
# Create the release
|
||||
gh release create "$VERSION" \
|
||||
--title "RustFS $VERSION_CLEAN" \
|
||||
--notes "$RELEASE_NOTES" \
|
||||
$PRERELEASE_FLAG
|
||||
# Create the release only if it doesn't exist
|
||||
gh release create "$VERSION" \
|
||||
--title "RustFS $VERSION_CLEAN" \
|
||||
--notes "$RELEASE_NOTES" \
|
||||
$PRERELEASE_FLAG
|
||||
fi
|
||||
|
||||
- name: Upload release assets
|
||||
env:
|
||||
@@ -450,8 +368,8 @@ jobs:
|
||||
|
||||
cd ./release-files
|
||||
|
||||
# Upload all binary archives
|
||||
for file in *.tar.gz *.zip; do
|
||||
# Upload all binary files
|
||||
for file in *.zip; do
|
||||
if [[ -f "$file" ]]; then
|
||||
echo "Uploading $file..."
|
||||
gh release upload "$VERSION" "$file" --clobber
|
||||
@@ -476,133 +394,27 @@ jobs:
|
||||
VERSION="${{ steps.release_prep.outputs.version }}"
|
||||
VERSION_CLEAN="${{ steps.release_prep.outputs.version_clean }}"
|
||||
|
||||
# Get original release notes
|
||||
ORIGINAL_NOTES=$(git tag -l --format='%(contents)' "${VERSION}")
|
||||
if [[ -z "$ORIGINAL_NOTES" || "$ORIGINAL_NOTES" =~ ^[[:space:]]*$ ]]; then
|
||||
ORIGINAL_NOTES="Release ${VERSION_CLEAN}"
|
||||
# Check if release already has custom notes (not auto-generated)
|
||||
EXISTING_NOTES=$(gh release view "$VERSION" --json body --jq '.body' 2>/dev/null || echo "")
|
||||
|
||||
# Only update if release notes are empty or auto-generated
|
||||
if [[ -z "$EXISTING_NOTES" ]] || [[ "$EXISTING_NOTES" == *"Release ${VERSION_CLEAN}"* ]]; then
|
||||
echo "Updating release notes for $VERSION"
|
||||
|
||||
# Get original release notes from tag
|
||||
ORIGINAL_NOTES=$(git tag -l --format='%(contents)' "${VERSION}")
|
||||
if [[ -z "$ORIGINAL_NOTES" || "$ORIGINAL_NOTES" =~ ^[[:space:]]*$ ]]; then
|
||||
ORIGINAL_NOTES="Release ${VERSION_CLEAN}"
|
||||
fi
|
||||
|
||||
# Use external template file and substitute variables
|
||||
sed -e "s/\${VERSION}/$VERSION/g" \
|
||||
-e "s/\${VERSION_CLEAN}/$VERSION_CLEAN/g" \
|
||||
-e "s/\${ORIGINAL_NOTES}/$(echo "$ORIGINAL_NOTES" | sed 's/[[\.*^$()+?{|]/\\&/g')/g" \
|
||||
.github/workflows/release-notes-template.md > enhanced_notes.md
|
||||
|
||||
# Update the release with enhanced notes
|
||||
gh release edit "$VERSION" --notes-file enhanced_notes.md
|
||||
else
|
||||
echo "Release $VERSION already has custom notes, skipping update to preserve manual edits"
|
||||
fi
|
||||
|
||||
# Create comprehensive release notes
|
||||
cat > enhanced_notes.md << EOF
|
||||
## RustFS ${VERSION_CLEAN}
|
||||
|
||||
${ORIGINAL_NOTES}
|
||||
|
||||
---
|
||||
|
||||
### 🚀 Quick Download
|
||||
|
||||
**Linux (Recommended - Static Binaries):**
|
||||
\`\`\`bash
|
||||
# x86_64 (Intel/AMD)
|
||||
curl -LO https://github.com/rustfs/rustfs/releases/download/${VERSION}/rustfs-x86_64-unknown-linux-musl.tar.gz
|
||||
tar -xzf rustfs-x86_64-unknown-linux-musl.tar.gz
|
||||
sudo cp rustfs-x86_64-unknown-linux-musl/bin/rustfs /usr/local/bin/
|
||||
|
||||
# ARM64 (Graviton, Apple Silicon VMs)
|
||||
curl -LO https://github.com/rustfs/rustfs/releases/download/${VERSION}/rustfs-aarch64-unknown-linux-musl.tar.gz
|
||||
tar -xzf rustfs-aarch64-unknown-linux-musl.tar.gz
|
||||
sudo cp rustfs-aarch64-unknown-linux-musl/bin/rustfs /usr/local/bin/
|
||||
\`\`\`
|
||||
|
||||
**macOS:**
|
||||
\`\`\`bash
|
||||
# Apple Silicon (M1/M2/M3)
|
||||
curl -LO https://github.com/rustfs/rustfs/releases/download/${VERSION}/rustfs-aarch64-apple-darwin.tar.gz
|
||||
tar -xzf rustfs-aarch64-apple-darwin.tar.gz
|
||||
sudo cp rustfs-aarch64-apple-darwin/bin/rustfs /usr/local/bin/
|
||||
|
||||
# Intel
|
||||
curl -LO https://github.com/rustfs/rustfs/releases/download/${VERSION}/rustfs-x86_64-apple-darwin.tar.gz
|
||||
tar -xzf rustfs-x86_64-apple-darwin.tar.gz
|
||||
sudo cp rustfs-x86_64-apple-darwin/bin/rustfs /usr/local/bin/
|
||||
\`\`\`
|
||||
|
||||
**Windows:**
|
||||
\`\`\`powershell
|
||||
# Download and extract
|
||||
Invoke-WebRequest https://github.com/rustfs/rustfs/releases/download/${VERSION}/rustfs-x86_64-pc-windows-msvc.zip -OutFile rustfs.zip
|
||||
Expand-Archive rustfs.zip
|
||||
\`\`\`
|
||||
|
||||
### 📁 Available Downloads
|
||||
|
||||
| Platform | Architecture | File | Size |
|
||||
|----------|-------------|------|------|
|
||||
| Linux | x86_64 | \`rustfs-x86_64-unknown-linux-musl.tar.gz\` | Static binary |
|
||||
| Linux | ARM64 | \`rustfs-aarch64-unknown-linux-musl.tar.gz\` | Static binary |
|
||||
| macOS | Apple Silicon | \`rustfs-aarch64-apple-darwin.tar.gz\` | Native binary |
|
||||
| macOS | Intel | \`rustfs-x86_64-apple-darwin.tar.gz\` | Native binary |
|
||||
| Windows | x86_64 | \`rustfs-x86_64-pc-windows-msvc.zip\` | MSVC binary |
|
||||
| Windows | ARM64 | \`rustfs-aarch64-pc-windows-msvc.zip\` | Experimental |
|
||||
|
||||
### 🔐 Verification
|
||||
|
||||
Download checksums and verify your download:
|
||||
\`\`\`bash
|
||||
# Download checksums
|
||||
curl -LO https://github.com/rustfs/rustfs/releases/download/${VERSION}/SHA256SUMS
|
||||
|
||||
# Verify (Linux/macOS)
|
||||
sha256sum -c SHA256SUMS --ignore-missing
|
||||
|
||||
# Verify (Windows PowerShell)
|
||||
\$expectedHash = (Get-Content SHA256SUMS | Select-String "rustfs-x86_64-pc-windows-msvc.zip").Line.Split()[0]
|
||||
\$actualHash = (Get-FileHash rustfs-x86_64-pc-windows-msvc.zip -Algorithm SHA256).Hash.ToLower()
|
||||
if (\$expectedHash -eq \$actualHash) { "✅ Checksum verified" } else { "❌ Checksum mismatch" }
|
||||
\`\`\`
|
||||
|
||||
### 🛠️ System Requirements
|
||||
|
||||
- **Linux**: Any distribution with glibc 2.17+ (CentOS 7+, Ubuntu 16.04+)
|
||||
- **macOS**: 10.15+ (Catalina or later)
|
||||
- **Windows**: Windows 10 version 1809 or later
|
||||
|
||||
### 📚 Documentation
|
||||
|
||||
- [Installation Guide](https://github.com/rustfs/rustfs#installation)
|
||||
- [Quick Start](https://github.com/rustfs/rustfs#quick-start)
|
||||
- [Configuration](https://github.com/rustfs/rustfs/blob/main/docs/)
|
||||
- [API Documentation](https://docs.rs/rustfs)
|
||||
|
||||
### 🆘 Support
|
||||
|
||||
- 🐛 [Report Issues](https://github.com/rustfs/rustfs/issues)
|
||||
- 💬 [Community Discussions](https://github.com/rustfs/rustfs/discussions)
|
||||
- 📖 [Documentation](https://github.com/rustfs/rustfs/tree/main/docs)
|
||||
EOF
|
||||
|
||||
# Update the release with enhanced notes
|
||||
gh release edit "$VERSION" --notes-file enhanced_notes.md
|
||||
|
||||
# Upload to OSS (optional)
|
||||
upload-oss:
|
||||
name: Upload to OSS
|
||||
needs: [skip-duplicate, build-check, build-rustfs]
|
||||
if: always() && needs.skip-duplicate.outputs.should_skip != 'true' && needs.build-check.outputs.build_type == 'release' && needs.build-rustfs.result == 'success'
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
OSS_ACCESS_KEY_ID: ${{ secrets.ALICLOUDOSS_KEY_ID }}
|
||||
OSS_ACCESS_KEY_SECRET: ${{ secrets.ALICLOUDOSS_KEY_SECRET }}
|
||||
steps:
|
||||
- name: Download artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: ./artifacts
|
||||
|
||||
- name: Upload to Aliyun OSS
|
||||
if: ${{ env.OSS_ACCESS_KEY_ID != '' }}
|
||||
run: |
|
||||
# Install ossutil
|
||||
curl -o ossutil.zip https://gosspublic.alicdn.com/ossutil/v2/2.1.1/ossutil-2.1.1-linux-amd64.zip
|
||||
unzip ossutil.zip
|
||||
sudo mv ossutil-*/ossutil /usr/local/bin/
|
||||
|
||||
# Upload files (both .tar.gz and .zip)
|
||||
find ./artifacts -name "*.tar.gz" -exec ossutil cp {} oss://rustfs-artifacts/artifacts/rustfs/ --force \;
|
||||
find ./artifacts -name "*.zip" -exec ossutil cp {} oss://rustfs-artifacts/artifacts/rustfs/ --force \;
|
||||
|
||||
# Create latest.json
|
||||
VERSION="${GITHUB_REF#refs/tags/v}"
|
||||
echo "{\"version\":\"${VERSION}\",\"release_date\":\"$(date -u +%Y-%m-%dT%H:%M:%SZ)\"}" > latest.json
|
||||
ossutil cp latest.json oss://rustfs-version/latest.json --force
|
||||
|
||||
6
.github/workflows/ci.yml
vendored
@@ -80,6 +80,8 @@ jobs:
|
||||
concurrent_skipping: "same_content_newer"
|
||||
cancel_others: true
|
||||
paths_ignore: '["*.md", "docs/**", "deploy/**"]'
|
||||
# Never skip release events and tag pushes
|
||||
do_not_skip: '["workflow_dispatch", "schedule", "merge_group", "release", "push"]'
|
||||
|
||||
test-and-lint:
|
||||
name: Test and Lint
|
||||
@@ -100,7 +102,9 @@ jobs:
|
||||
cache-save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||
|
||||
- name: Run tests
|
||||
run: cargo test --all --exclude e2e_test
|
||||
run: |
|
||||
cargo nextest run --all --exclude e2e_test
|
||||
cargo test --all --doc
|
||||
|
||||
- name: Check code formatting
|
||||
run: cargo fmt --all --check
|
||||
|
||||
18
.github/workflows/performance.yml
vendored
@@ -17,19 +17,11 @@ name: Performance Testing
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
- "**.txt"
|
||||
- "docs/**"
|
||||
- "deploy/**"
|
||||
- "scripts/dev_*.sh"
|
||||
- "LICENSE*"
|
||||
- "README*"
|
||||
- "**/*.png"
|
||||
- "**/*.jpg"
|
||||
- "**/*.svg"
|
||||
- ".gitignore"
|
||||
- ".dockerignore"
|
||||
paths:
|
||||
- '**/*.rs'
|
||||
- '**/Cargo.toml'
|
||||
- '**/Cargo.lock'
|
||||
- '.github/workflows/performance.yml'
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
profile_duration:
|
||||
|
||||
78
.github/workflows/release-notes-template.md
vendored
Normal file
@@ -0,0 +1,78 @@
|
||||
## RustFS ${VERSION_CLEAN}
|
||||
|
||||
${ORIGINAL_NOTES}
|
||||
|
||||
---
|
||||
|
||||
### 🚀 Quick Download
|
||||
|
||||
**Linux (Static Binaries - No Dependencies):**
|
||||
|
||||
```bash
|
||||
# x86_64 (Intel/AMD)
|
||||
curl -LO https://github.com/rustfs/rustfs/releases/download/${VERSION}/rustfs-x86_64-unknown-linux-musl.zip
|
||||
unzip rustfs-x86_64-unknown-linux-musl.zip
|
||||
sudo mv rustfs /usr/local/bin/
|
||||
|
||||
# ARM64 (Graviton, Apple Silicon VMs)
|
||||
curl -LO https://github.com/rustfs/rustfs/releases/download/${VERSION}/rustfs-aarch64-unknown-linux-musl.zip
|
||||
unzip rustfs-aarch64-unknown-linux-musl.zip
|
||||
sudo mv rustfs /usr/local/bin/
|
||||
```
|
||||
|
||||
**macOS:**
|
||||
|
||||
```bash
|
||||
# Apple Silicon (M1/M2/M3)
|
||||
curl -LO https://github.com/rustfs/rustfs/releases/download/${VERSION}/rustfs-aarch64-apple-darwin.zip
|
||||
unzip rustfs-aarch64-apple-darwin.zip
|
||||
sudo mv rustfs /usr/local/bin/
|
||||
|
||||
# Intel
|
||||
curl -LO https://github.com/rustfs/rustfs/releases/download/${VERSION}/rustfs-x86_64-apple-darwin.zip
|
||||
unzip rustfs-x86_64-apple-darwin.zip
|
||||
sudo mv rustfs /usr/local/bin/
|
||||
```
|
||||
|
||||
### 📁 Available Downloads
|
||||
|
||||
| Platform | Architecture | File | Description |
|
||||
|----------|-------------|------|-------------|
|
||||
| Linux | x86_64 | `rustfs-x86_64-unknown-linux-musl.zip` | Static binary, no dependencies |
|
||||
| Linux | ARM64 | `rustfs-aarch64-unknown-linux-musl.zip` | Static binary, no dependencies |
|
||||
| macOS | Apple Silicon | `rustfs-aarch64-apple-darwin.zip` | Native binary, ZIP archive |
|
||||
| macOS | Intel | `rustfs-x86_64-apple-darwin.zip` | Native binary, ZIP archive |
|
||||
|
||||
### 🔐 Verification
|
||||
|
||||
Download checksums and verify your download:
|
||||
|
||||
```bash
|
||||
# Download checksums
|
||||
curl -LO https://github.com/rustfs/rustfs/releases/download/${VERSION}/SHA256SUMS
|
||||
|
||||
# Verify (Linux)
|
||||
sha256sum -c SHA256SUMS --ignore-missing
|
||||
|
||||
# Verify (macOS)
|
||||
shasum -a 256 -c SHA256SUMS --ignore-missing
|
||||
```
|
||||
|
||||
### 🛠️ System Requirements
|
||||
|
||||
- **Linux**: Any distribution with glibc 2.17+ (CentOS 7+, Ubuntu 16.04+)
|
||||
- **macOS**: 10.15+ (Catalina or later)
|
||||
- **Windows**: Windows 10 version 1809 or later
|
||||
|
||||
### 📚 Documentation
|
||||
|
||||
- [Installation Guide](https://github.com/rustfs/rustfs#installation)
|
||||
- [Quick Start](https://github.com/rustfs/rustfs#quick-start)
|
||||
- [Configuration](https://github.com/rustfs/rustfs/blob/main/docs/)
|
||||
- [API Documentation](https://docs.rs/rustfs)
|
||||
|
||||
### 🆘 Support
|
||||
|
||||
- 🐛 [Report Issues](https://github.com/rustfs/rustfs/issues)
|
||||
- 💬 [Community Discussions](https://github.com/rustfs/rustfs/discussions)
|
||||
- 📖 [Documentation](https://github.com/rustfs/rustfs/tree/main/docs)
|
||||
166
Cargo.lock
generated
@@ -1171,7 +1171,7 @@ dependencies = [
|
||||
"bitflags 2.9.1",
|
||||
"cexpr",
|
||||
"clang-sys",
|
||||
"itertools 0.12.1",
|
||||
"itertools 0.11.0",
|
||||
"lazy_static",
|
||||
"lazycell",
|
||||
"log",
|
||||
@@ -3469,7 +3469,7 @@ checksum = "92773504d58c093f6de2459af4af33faa518c13451eb8f2b5698ed3d36e7c813"
|
||||
|
||||
[[package]]
|
||||
name = "e2e_test"
|
||||
version = "0.0.3"
|
||||
version = "0.0.5"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"flatbuffers 25.2.10",
|
||||
@@ -5023,15 +5023,6 @@ dependencies = [
|
||||
"either",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "itertools"
|
||||
version = "0.12.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ba291022dbbd398a455acf126c1e341954079855bc60dfdda641363bd6922569"
|
||||
dependencies = [
|
||||
"either",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "itertools"
|
||||
version = "0.13.0"
|
||||
@@ -5341,7 +5332,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "07033963ba89ebaf1584d767badaa2e8fcec21aedea6b8c0346d487d49c28667"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"windows-targets 0.53.0",
|
||||
"windows-targets 0.52.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -7839,7 +7830,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rustfs"
|
||||
version = "0.0.3"
|
||||
version = "0.0.5"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"atoi",
|
||||
@@ -7857,7 +7848,6 @@ dependencies = [
|
||||
"http-body 1.0.1",
|
||||
"hyper 1.6.0",
|
||||
"hyper-util",
|
||||
"lazy_static",
|
||||
"libsystemd",
|
||||
"matchit",
|
||||
"mime_guess",
|
||||
@@ -7866,6 +7856,7 @@ dependencies = [
|
||||
"pin-project-lite",
|
||||
"reqwest",
|
||||
"rust-embed",
|
||||
"rustfs-ahm",
|
||||
"rustfs-appauth",
|
||||
"rustfs-common",
|
||||
"rustfs-config",
|
||||
@@ -7907,8 +7898,36 @@ dependencies = [
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustfs-appauth"
|
||||
name = "rustfs-ahm"
|
||||
version = "0.0.3"
|
||||
dependencies = [
|
||||
"anyhow",
|
||||
"async-trait",
|
||||
"bytes",
|
||||
"futures",
|
||||
"lazy_static",
|
||||
"rmp-serde",
|
||||
"rustfs-common",
|
||||
"rustfs-ecstore",
|
||||
"rustfs-filemeta",
|
||||
"rustfs-lock",
|
||||
"rustfs-madmin",
|
||||
"rustfs-utils",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"thiserror 2.0.12",
|
||||
"time",
|
||||
"tokio",
|
||||
"tokio-test",
|
||||
"tokio-util",
|
||||
"tracing",
|
||||
"url",
|
||||
"uuid",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustfs-appauth"
|
||||
version = "0.0.5"
|
||||
dependencies = [
|
||||
"base64-simd",
|
||||
"rsa",
|
||||
@@ -7918,16 +7937,15 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rustfs-common"
|
||||
version = "0.0.3"
|
||||
version = "0.0.5"
|
||||
dependencies = [
|
||||
"lazy_static",
|
||||
"tokio",
|
||||
"tonic",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustfs-config"
|
||||
version = "0.0.3"
|
||||
version = "0.0.5"
|
||||
dependencies = [
|
||||
"const-str",
|
||||
"serde",
|
||||
@@ -7936,7 +7954,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rustfs-crypto"
|
||||
version = "0.0.3"
|
||||
version = "0.0.5"
|
||||
dependencies = [
|
||||
"aes-gcm",
|
||||
"argon2",
|
||||
@@ -7954,7 +7972,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rustfs-ecstore"
|
||||
version = "0.0.3"
|
||||
version = "0.0.5"
|
||||
dependencies = [
|
||||
"async-channel",
|
||||
"async-trait",
|
||||
@@ -8029,7 +8047,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rustfs-filemeta"
|
||||
version = "0.0.3"
|
||||
version = "0.0.5"
|
||||
dependencies = [
|
||||
"byteorder",
|
||||
"bytes",
|
||||
@@ -8050,14 +8068,13 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rustfs-gui"
|
||||
version = "0.0.3"
|
||||
version = "0.0.5"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"dioxus",
|
||||
"dirs",
|
||||
"hex",
|
||||
"keyring",
|
||||
"lazy_static",
|
||||
"rfd 0.15.3",
|
||||
"rust-embed",
|
||||
"rust-i18n",
|
||||
@@ -8071,14 +8088,13 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rustfs-iam"
|
||||
version = "0.0.3"
|
||||
version = "0.0.5"
|
||||
dependencies = [
|
||||
"arc-swap",
|
||||
"async-trait",
|
||||
"base64-simd",
|
||||
"futures",
|
||||
"jsonwebtoken",
|
||||
"lazy_static",
|
||||
"rand 0.9.1",
|
||||
"rustfs-crypto",
|
||||
"rustfs-ecstore",
|
||||
@@ -8095,10 +8111,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rustfs-lock"
|
||||
version = "0.0.3"
|
||||
version = "0.0.5"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"lazy_static",
|
||||
"rand 0.9.1",
|
||||
"rustfs-protos",
|
||||
"serde",
|
||||
@@ -8112,7 +8127,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rustfs-madmin"
|
||||
version = "0.0.3"
|
||||
version = "0.0.5"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"humantime",
|
||||
@@ -8124,7 +8139,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rustfs-notify"
|
||||
version = "0.0.3"
|
||||
version = "0.0.5"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"axum",
|
||||
@@ -8153,7 +8168,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rustfs-obs"
|
||||
version = "0.0.3"
|
||||
version = "0.0.5"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"chrono",
|
||||
@@ -8186,7 +8201,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rustfs-policy"
|
||||
version = "0.0.3"
|
||||
version = "0.0.5"
|
||||
dependencies = [
|
||||
"base64-simd",
|
||||
"ipnetwork",
|
||||
@@ -8205,7 +8220,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rustfs-protos"
|
||||
version = "0.0.3"
|
||||
version = "0.0.5"
|
||||
dependencies = [
|
||||
"flatbuffers 25.2.10",
|
||||
"prost",
|
||||
@@ -8216,7 +8231,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rustfs-rio"
|
||||
version = "0.0.3"
|
||||
version = "0.0.5"
|
||||
dependencies = [
|
||||
"aes-gcm",
|
||||
"bytes",
|
||||
@@ -8264,7 +8279,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rustfs-s3select-api"
|
||||
version = "0.0.3"
|
||||
version = "0.0.5"
|
||||
dependencies = [
|
||||
"async-trait",
|
||||
"bytes",
|
||||
@@ -8288,14 +8303,13 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rustfs-s3select-query"
|
||||
version = "0.0.3"
|
||||
version = "0.0.5"
|
||||
dependencies = [
|
||||
"async-recursion",
|
||||
"async-trait",
|
||||
"datafusion",
|
||||
"derive_builder",
|
||||
"futures",
|
||||
"lazy_static",
|
||||
"parking_lot",
|
||||
"rustfs-s3select-api",
|
||||
"s3s",
|
||||
@@ -8306,12 +8320,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rustfs-signer"
|
||||
version = "0.0.3"
|
||||
version = "0.0.5"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"http 1.3.1",
|
||||
"hyper 1.6.0",
|
||||
"lazy_static",
|
||||
"rand 0.9.1",
|
||||
"rustfs-utils",
|
||||
"s3s",
|
||||
@@ -8324,7 +8337,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rustfs-utils"
|
||||
version = "0.0.3"
|
||||
version = "0.0.5"
|
||||
dependencies = [
|
||||
"base64-simd",
|
||||
"blake3",
|
||||
@@ -8338,7 +8351,6 @@ dependencies = [
|
||||
"hmac 0.12.1",
|
||||
"hyper 1.6.0",
|
||||
"hyper-util",
|
||||
"lazy_static",
|
||||
"local-ip-address",
|
||||
"lz4",
|
||||
"md-5",
|
||||
@@ -8368,7 +8380,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rustfs-workers"
|
||||
version = "0.0.3"
|
||||
version = "0.0.5"
|
||||
dependencies = [
|
||||
"tokio",
|
||||
"tracing",
|
||||
@@ -8376,7 +8388,7 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rustfs-zip"
|
||||
version = "0.0.3"
|
||||
version = "0.0.5"
|
||||
dependencies = [
|
||||
"async-compression",
|
||||
"tokio",
|
||||
@@ -9847,9 +9859,9 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20"
|
||||
|
||||
[[package]]
|
||||
name = "tokio"
|
||||
version = "1.46.0"
|
||||
version = "1.46.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1140bb80481756a8cbe10541f37433b459c5aa1e727b4c020fbfebdc25bf3ec4"
|
||||
checksum = "0cc3a2344dafbe23a245241fe8b09735b521110d30fcefbbd5feb1797ca35d17"
|
||||
dependencies = [
|
||||
"backtrace",
|
||||
"bytes",
|
||||
@@ -11129,29 +11141,13 @@ dependencies = [
|
||||
"windows_aarch64_gnullvm 0.52.6",
|
||||
"windows_aarch64_msvc 0.52.6",
|
||||
"windows_i686_gnu 0.52.6",
|
||||
"windows_i686_gnullvm 0.52.6",
|
||||
"windows_i686_gnullvm",
|
||||
"windows_i686_msvc 0.52.6",
|
||||
"windows_x86_64_gnu 0.52.6",
|
||||
"windows_x86_64_gnullvm 0.52.6",
|
||||
"windows_x86_64_msvc 0.52.6",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-targets"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b1e4c7e8ceaaf9cb7d7507c974735728ab453b67ef8f18febdd7c11fe59dca8b"
|
||||
dependencies = [
|
||||
"windows_aarch64_gnullvm 0.53.0",
|
||||
"windows_aarch64_msvc 0.53.0",
|
||||
"windows_i686_gnu 0.53.0",
|
||||
"windows_i686_gnullvm 0.53.0",
|
||||
"windows_i686_msvc 0.53.0",
|
||||
"windows_x86_64_gnu 0.53.0",
|
||||
"windows_x86_64_gnullvm 0.53.0",
|
||||
"windows_x86_64_msvc 0.53.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "windows-threading"
|
||||
version = "0.1.0"
|
||||
@@ -11188,12 +11184,6 @@ version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "32a4622180e7a0ec044bb555404c800bc9fd9ec262ec147edd5989ccd0c02cd3"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_gnullvm"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "86b8d5f90ddd19cb4a147a5fa63ca848db3df085e25fee3cc10b39b6eebae764"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_msvc"
|
||||
version = "0.42.2"
|
||||
@@ -11212,12 +11202,6 @@ version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "09ec2a7bb152e2252b53fa7803150007879548bc709c039df7627cabbd05d469"
|
||||
|
||||
[[package]]
|
||||
name = "windows_aarch64_msvc"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c7651a1f62a11b8cbd5e0d42526e55f2c99886c77e007179efff86c2b137e66c"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnu"
|
||||
version = "0.42.2"
|
||||
@@ -11236,24 +11220,12 @@ version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8e9b5ad5ab802e97eb8e295ac6720e509ee4c243f69d781394014ebfe8bbfa0b"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnu"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c1dc67659d35f387f5f6c479dc4e28f1d4bb90ddd1a5d3da2e5d97b42d6272c3"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnullvm"
|
||||
version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0eee52d38c090b3caa76c563b86c3a4bd71ef1a819287c19d586d7334ae8ed66"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_gnullvm"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9ce6ccbdedbf6d6354471319e781c0dfef054c81fbc7cf83f338a4296c0cae11"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_msvc"
|
||||
version = "0.42.2"
|
||||
@@ -11272,12 +11244,6 @@ version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "240948bc05c5e7c6dabba28bf89d89ffce3e303022809e73deaefe4f6ec56c66"
|
||||
|
||||
[[package]]
|
||||
name = "windows_i686_msvc"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "581fee95406bb13382d2f65cd4a908ca7b1e4c2f1917f143ba16efe98a589b5d"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnu"
|
||||
version = "0.42.2"
|
||||
@@ -11296,12 +11262,6 @@ version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "147a5c80aabfbf0c7d901cb5895d1de30ef2907eb21fbbab29ca94c5b08b1a78"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnu"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "2e55b5ac9ea33f2fc1716d1742db15574fd6fc8dadc51caab1c16a3d3b4190ba"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnullvm"
|
||||
version = "0.42.2"
|
||||
@@ -11320,12 +11280,6 @@ version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "24d5b23dc417412679681396f2b49f3de8c1473deb516bd34410872eff51ed0d"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_gnullvm"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0a6e035dd0599267ce1ee132e51c27dd29437f63325753051e71dd9e42406c57"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_msvc"
|
||||
version = "0.42.2"
|
||||
@@ -11344,12 +11298,6 @@ version = "0.52.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "589f6da84c646204747d1270a2a5661ea66ed1cced2631d546fdfb155959f9ec"
|
||||
|
||||
[[package]]
|
||||
name = "windows_x86_64_msvc"
|
||||
version = "0.53.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "271414315aff87387382ec3d271b52d7ae78726f5d44ac98b4f4030c91880486"
|
||||
|
||||
[[package]]
|
||||
name = "winnow"
|
||||
version = "0.5.40"
|
||||
|
||||
59
Cargo.toml
@@ -36,6 +36,7 @@ members = [
|
||||
"crates/utils", # Utility functions and helpers
|
||||
"crates/workers", # Worker thread pools and task scheduling
|
||||
"crates/zip", # ZIP file handling and compression
|
||||
"crates/ahm",
|
||||
]
|
||||
resolver = "2"
|
||||
|
||||
@@ -44,7 +45,11 @@ edition = "2024"
|
||||
license = "Apache-2.0"
|
||||
repository = "https://github.com/rustfs/rustfs"
|
||||
rust-version = "1.85"
|
||||
version = "0.0.3"
|
||||
version = "0.0.5"
|
||||
homepage = "https://rustfs.com"
|
||||
description = "RustFS is a high-performance distributed object storage software built using Rust, one of the most popular languages worldwide. "
|
||||
keywords = ["RustFS", "Minio", "object-storage", "filesystem", "s3"]
|
||||
categories = ["web-programming", "development-tools", "filesystem", "network-programming"]
|
||||
|
||||
[workspace.lints.rust]
|
||||
unsafe_code = "deny"
|
||||
@@ -52,28 +57,34 @@ unsafe_code = "deny"
|
||||
[workspace.lints.clippy]
|
||||
all = "warn"
|
||||
|
||||
[patch.crates-io]
|
||||
rustfs-utils = { path = "crates/utils" }
|
||||
rustfs-filemeta = { path = "crates/filemeta" }
|
||||
rustfs-rio = { path = "crates/rio" }
|
||||
|
||||
[workspace.dependencies]
|
||||
rustfs-s3select-api = { path = "crates/s3select-api", version = "0.0.3" }
|
||||
rustfs-appauth = { path = "crates/appauth", version = "0.0.3" }
|
||||
rustfs-common = { path = "crates/common", version = "0.0.3" }
|
||||
rustfs-crypto = { path = "crates/crypto", version = "0.0.3" }
|
||||
rustfs-ecstore = { path = "crates/ecstore", version = "0.0.3" }
|
||||
rustfs-iam = { path = "crates/iam", version = "0.0.3" }
|
||||
rustfs-lock = { path = "crates/lock", version = "0.0.3" }
|
||||
rustfs-madmin = { path = "crates/madmin", version = "0.0.3" }
|
||||
rustfs-policy = { path = "crates/policy", version = "0.0.3" }
|
||||
rustfs-protos = { path = "crates/protos", version = "0.0.3" }
|
||||
rustfs-s3select-query = { path = "crates/s3select-query", version = "0.0.3" }
|
||||
rustfs = { path = "./rustfs", version = "0.0.3" }
|
||||
rustfs-zip = { path = "./crates/zip", version = "0.0.3" }
|
||||
rustfs-config = { path = "./crates/config", version = "0.0.3" }
|
||||
rustfs-obs = { path = "crates/obs", version = "0.0.3" }
|
||||
rustfs-notify = { path = "crates/notify", version = "0.0.3" }
|
||||
rustfs-utils = { path = "crates/utils", version = "0.0.3" }
|
||||
rustfs-rio = { path = "crates/rio", version = "0.0.3" }
|
||||
rustfs-filemeta = { path = "crates/filemeta", version = "0.0.3" }
|
||||
rustfs-signer = { path = "crates/signer", version = "0.0.3" }
|
||||
rustfs-workers = { path = "crates/workers", version = "0.0.3" }
|
||||
rustfs-ahm = { path = "crates/ahm", version = "0.0.3" }
|
||||
rustfs-s3select-api = { path = "crates/s3select-api", version = "0.0.5" }
|
||||
rustfs-appauth = { path = "crates/appauth", version = "0.0.5" }
|
||||
rustfs-common = { path = "crates/common", version = "0.0.5" }
|
||||
rustfs-crypto = { path = "crates/crypto", version = "0.0.5" }
|
||||
rustfs-ecstore = { path = "crates/ecstore", version = "0.0.5" }
|
||||
rustfs-iam = { path = "crates/iam", version = "0.0.5" }
|
||||
rustfs-lock = { path = "crates/lock", version = "0.0.5" }
|
||||
rustfs-madmin = { path = "crates/madmin", version = "0.0.5" }
|
||||
rustfs-policy = { path = "crates/policy", version = "0.0.5" }
|
||||
rustfs-protos = { path = "crates/protos", version = "0.0.5" }
|
||||
rustfs-s3select-query = { path = "crates/s3select-query", version = "0.0.5" }
|
||||
rustfs = { path = "./rustfs", version = "0.0.5" }
|
||||
rustfs-zip = { path = "./crates/zip", version = "0.0.5" }
|
||||
rustfs-config = { path = "./crates/config", version = "0.0.5" }
|
||||
rustfs-obs = { path = "crates/obs", version = "0.0.5" }
|
||||
rustfs-notify = { path = "crates/notify", version = "0.0.5" }
|
||||
rustfs-utils = { path = "crates/utils", version = "0.0.5" }
|
||||
rustfs-rio = { path = "crates/rio", version = "0.0.5" }
|
||||
rustfs-filemeta = { path = "crates/filemeta", version = "0.0.5" }
|
||||
rustfs-signer = { path = "crates/signer", version = "0.0.5" }
|
||||
rustfs-workers = { path = "crates/workers", version = "0.0.5" }
|
||||
aes-gcm = { version = "0.10.3", features = ["std"] }
|
||||
arc-swap = "1.7.1"
|
||||
argon2 = { version = "0.5.3", features = ["std"] }
|
||||
@@ -171,7 +182,6 @@ pbkdf2 = "0.12.2"
|
||||
percent-encoding = "2.3.1"
|
||||
pin-project-lite = "0.2.16"
|
||||
prost = "0.13.5"
|
||||
prost-build = "0.13.5"
|
||||
quick-xml = "0.37.5"
|
||||
rand = "0.9.1"
|
||||
rdkafka = { version = "0.37.0", features = ["tokio"] }
|
||||
@@ -225,7 +235,7 @@ time = { version = "0.3.41", features = [
|
||||
"macros",
|
||||
"serde",
|
||||
] }
|
||||
tokio = { version = "1.46.0", features = ["fs", "rt-multi-thread"] }
|
||||
tokio = { version = "1.46.1", features = ["fs", "rt-multi-thread"] }
|
||||
tokio-rustls = { version = "0.26.2", default-features = false }
|
||||
tokio-stream = { version = "0.1.17" }
|
||||
tokio-tar = "0.3.1"
|
||||
@@ -253,6 +263,7 @@ winapi = { version = "0.3.9" }
|
||||
xxhash-rust = { version = "0.8.15", features = ["xxh64", "xxh3"] }
|
||||
zip = "2.4.2"
|
||||
zstd = "0.13.3"
|
||||
anyhow = "1.0.86"
|
||||
|
||||
[profile.wasm-dev]
|
||||
inherits = "dev"
|
||||
|
||||
10
Dockerfile
@@ -20,9 +20,10 @@ RUN apk add -U --no-cache \
|
||||
bash \
|
||||
unzip
|
||||
|
||||
RUN curl -Lo /tmp/rustfs.zip https://dl.rustfs.com/artifacts/rustfs/rustfs-release-x86_64-unknown-linux-musl.latest.zip && \
|
||||
unzip /tmp/rustfs.zip -d /tmp && \
|
||||
mv /tmp/rustfs-release-x86_64-unknown-linux-musl/bin/rustfs /rustfs && \
|
||||
|
||||
RUN curl -Lo /tmp/rustfs.zip https://dl.rustfs.com/artifacts/rustfs/rustfs-x86_64-unknown-linux-musl.zip && \
|
||||
unzip -o /tmp/rustfs.zip -d /tmp && \
|
||||
mv /tmp/rustfs /rustfs && \
|
||||
chmod +x /rustfs && \
|
||||
rm -rf /tmp/*
|
||||
|
||||
@@ -37,11 +38,10 @@ COPY --from=builder /rustfs /usr/local/bin/rustfs
|
||||
ENV RUSTFS_ACCESS_KEY=rustfsadmin \
|
||||
RUSTFS_SECRET_KEY=rustfsadmin \
|
||||
RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_CONSOLE_ADDRESS=":9001" \
|
||||
RUSTFS_CONSOLE_ENABLE=true \
|
||||
RUST_LOG=warn
|
||||
|
||||
EXPOSE 9000 9001
|
||||
EXPOSE 9000
|
||||
|
||||
RUN mkdir -p /data
|
||||
VOLUME /data
|
||||
|
||||
3
Makefile
@@ -31,7 +31,8 @@ check:
|
||||
.PHONY: test
|
||||
test:
|
||||
@echo "🧪 Running tests..."
|
||||
cargo test --all --exclude e2e_test
|
||||
cargo nextest run --all --exclude e2e_test
|
||||
cargo test --all --doc
|
||||
|
||||
.PHONY: pre-commit
|
||||
pre-commit: fmt clippy check test
|
||||
|
||||
15
README.md
@@ -1,7 +1,9 @@
|
||||
[](https://rustfs.com)
|
||||
|
||||
|
||||
<p align="center">RustFS is a high-performance distributed object storage software built using Rust</p>
|
||||
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/rustfs/rustfs/actions/workflows/ci.yml"><img alt="CI" src="https://github.com/rustfs/rustfs/actions/workflows/ci.yml/badge.svg" /></a>
|
||||
<a href="https://github.com/rustfs/rustfs/actions/workflows/docker.yml"><img alt="Build and Push Docker Images" src="https://github.com/rustfs/rustfs/actions/workflows/docker.yml/badge.svg" /></a>
|
||||
@@ -17,11 +19,22 @@
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
English | <a href="https://github.com/rustfs/rustfs/blob/main/README_ZH.md">简体中文</a>
|
||||
English | <a href="https://github.com/rustfs/rustfs/blob/main/README_ZH.md">简体中文</a> |
|
||||
<!-- Keep these links. Translations will automatically update with the README. -->
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=de">Deutsch</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=es">Español</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=fr">français</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ja">日本語</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ko">한국어</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=pt">Português</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ru">Русский</a>
|
||||
</p>
|
||||
|
||||
RustFS is a high-performance distributed object storage software built using Rust, one of the most popular languages worldwide. Along with MinIO, it shares a range of advantages such as simplicity, S3 compatibility, open-source nature, support for data lakes, AI, and big data. Furthermore, it has a better and more user-friendly open-source license in comparison to other storage systems, being constructed under the Apache license. As Rust serves as its foundation, RustFS provides faster speed and safer distributed features for high-performance object storage.
|
||||
|
||||
|
||||
> ⚠️ **RustFS is under rapid development. Do NOT use in production environments!**
|
||||
|
||||
## Features
|
||||
|
||||
- **High Performance**: Built with Rust, ensuring speed and efficiency.
|
||||
|
||||
@@ -26,7 +26,6 @@ dioxus = { workspace = true, features = ["router"] }
|
||||
dirs = { workspace = true }
|
||||
hex = { workspace = true }
|
||||
keyring = { workspace = true }
|
||||
lazy_static = { workspace = true }
|
||||
rfd = { workspace = true }
|
||||
rust-embed = { workspace = true, features = ["interpolate-folder-path"] }
|
||||
rust-i18n = { workspace = true }
|
||||
|
||||
@@ -37,7 +37,9 @@ copyright = "Copyright 2025 rustfs.com"
|
||||
|
||||
icon = [
|
||||
"assets/icons/icon.icns",
|
||||
"assets/icons/icon.ico"
|
||||
"assets/icons/icon.ico",
|
||||
"assets/icons/icon.png",
|
||||
"assets/icons/rustfs-icon.png",
|
||||
]
|
||||
#[bundle.macos]
|
||||
#provider_short_name = "RustFs"
|
||||
|
||||
BIN
cli/rustfs-gui/assets/icon.png
Normal file
|
After Width: | Height: | Size: 23 KiB |
BIN
cli/rustfs-gui/assets/icons/icon.png
Normal file
|
After Width: | Height: | Size: 23 KiB |
BIN
cli/rustfs-gui/assets/icons/icon_128x128.png
Normal file
|
After Width: | Height: | Size: 4.5 KiB |
BIN
cli/rustfs-gui/assets/icons/icon_128x128@2x.png
Normal file
|
After Width: | Height: | Size: 9.9 KiB |
BIN
cli/rustfs-gui/assets/icons/icon_16x16.png
Normal file
|
After Width: | Height: | Size: 498 B |
BIN
cli/rustfs-gui/assets/icons/icon_16x16@2x.png
Normal file
|
After Width: | Height: | Size: 969 B |
BIN
cli/rustfs-gui/assets/icons/icon_256x256.png
Normal file
|
After Width: | Height: | Size: 9.9 KiB |
BIN
cli/rustfs-gui/assets/icons/icon_256x256@2x.png
Normal file
|
After Width: | Height: | Size: 23 KiB |
BIN
cli/rustfs-gui/assets/icons/icon_32x32.png
Normal file
|
After Width: | Height: | Size: 969 B |
BIN
cli/rustfs-gui/assets/icons/icon_32x32@2x.png
Normal file
|
After Width: | Height: | Size: 2.0 KiB |
BIN
cli/rustfs-gui/assets/icons/icon_512x512.png
Normal file
|
After Width: | Height: | Size: 23 KiB |
BIN
cli/rustfs-gui/assets/icons/icon_512x512@2x.png
Normal file
|
After Width: | Height: | Size: 47 KiB |
BIN
cli/rustfs-gui/assets/icons/rustfs-icon.png
Normal file
|
After Width: | Height: | Size: 23 KiB |
BIN
cli/rustfs-gui/assets/rustfs-icon.png
Normal file
|
After Width: | Height: | Size: 23 KiB |
@@ -1,20 +1,15 @@
|
||||
<svg width="1558" height="260" viewBox="0 0 1558 260" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g clip-path="url(#clip0_0_3)">
|
||||
<path d="M1288.5 112.905H1159.75V58.4404H1262L1270 0L1074 0V260H1159.75V162.997H1296.95L1288.5 112.905Z"
|
||||
fill="#0196D0"/>
|
||||
<path d="M1058.62 58.4404V0H789V58.4404H881.133V260H966.885V58.4404H1058.62Z" fill="#0196D0"/>
|
||||
<path d="M521 179.102V0L454.973 15V161C454.973 181.124 452.084 193.146 443.5 202C434.916 211.257 419.318 214.5 400.5 214.5C381.022 214.5 366.744 210.854 357.5 202C348.916 193.548 346.357 175.721 346.357 156V0L280 15V175.48C280 208.08 290.234 229.412 309.712 241.486C329.19 253.56 358.903 260 400.5 260C440.447 260 470.159 253.56 490.297 241.486C510.766 229.412 521 208.483 521 179.102Z"
|
||||
fill="#0196D0"/>
|
||||
<path d="M172.84 84.2813C172.84 97.7982 168.249 107.737 158.41 113.303C149.883 118.471 137.092 121.254 120.693 122.049V162.997C129.876 163.792 138.076 166.177 144.307 176.514L184.647 260H265L225.316 180.489C213.181 155.046 201.374 149.48 178.744 143.517C212.197 138.349 241.386 118.471 241.386 73.1499C241.386 53.2722 233.843 30.2141 218.756 17.8899C203.998 5.56575 183.991 0 159.394 0H120.693V48.5015H127.58C142.23 48.5015 153.6 51.4169 161.689 57.2477C169.233 62.8135 172.84 71.5596 172.84 84.2813ZM120.693 122.049C119.163 122.049 117.741 122.049 116.43 122.049H68.5457V48.5015H120.693V0H0V260H70.5137V162.997H110.526C113.806 162.997 117.741 162.997 120.693 162.997V122.049Z"
|
||||
fill="#0196D0"/>
|
||||
<path d="M774 179.297C774 160.829 766.671 144.669 752.013 131.972C738.127 119.66 712.025 110.169 673.708 103.5C662.136 101.191 651.722 99.6523 643.235 97.3437C586.532 84.6467 594.632 52.7118 650.564 52.7118C680.651 52.7118 709.582 61.946 738.127 66.9478C742.37 67.7174 743.913 68.1021 744.298 68.1021L750.47 12.697C720.383 3.46282 684.895 0 654.036 0C616.619 0 587.689 6.54088 567.245 19.2379C546.801 31.9349 536 57.7137 536 82.3382C536 103.5 543.715 119.66 559.916 131.972C575.731 143.515 604.276 152.749 645.55 160.059C658.279 162.368 668.694 163.907 676.794 166.215C685.023 168.524 691.066 170.704 694.924 172.756C702.253 176.604 706.11 182.375 706.11 188.531C706.11 196.611 701.481 202.767 692.224 207C664.836 220.081 587.689 212.001 556.83 198.15L543.715 247.784C547.186 248.169 552.972 249.323 559.916 250.477C616.619 259.327 690.681 270.869 741.212 238.935C762.814 225.468 774 206.23 774 179.297Z"
|
||||
fill="#0196D0"/>
|
||||
<path d="M1558 179.568C1558 160.383 1550.42 144.268 1535.67 131.99C1521.32 119.968 1494.34 110.631 1454.74 103.981C1442.38 101.679 1432.01 99.3764 1422.84 97.8416C1422.44 97.8416 1422.04 97.8416 1422.04 97.4579V112.422L1361.04 75.2038L1422.04 38.3692V52.9496C1424.7 52.9496 1427.49 52.9496 1430.41 52.9496C1461.51 52.9496 1491.42 62.5419 1521.32 67.5299C1525.31 67.9136 1526.9 67.9136 1527.3 67.9136L1533.68 12.6619C1502.98 3.83692 1465.9 0 1434 0C1395.33 0 1365.43 6.52277 1345.09 19.5683C1323.16 32.6139 1312 57.9376 1312 82.8776C1312 103.981 1320.37 120.096 1336.72 131.607C1353.46 143.885 1382.97 153.093 1425.23 160.383C1434 161.535 1441.18 162.686 1447.56 164.22L1448.36 150.791L1507.36 190.312L1445.57 224.844L1445.96 212.949C1409.68 215.635 1357.45 209.112 1333.53 197.985L1320.37 247.482C1323.56 248.249 1329.54 248.633 1336.72 250.551C1395.33 259.376 1471.88 270.887 1524.11 238.657C1546.84 225.611 1558 205.659 1558 179.568Z"
|
||||
fill="#0196D0"/>
|
||||
</g>
|
||||
<defs>
|
||||
<clipPath id="clip0_0_3">
|
||||
<rect width="1558" height="260" fill="white"/>
|
||||
</clipPath>
|
||||
</defs>
|
||||
<g clip-path="url(#clip0_0_3)">
|
||||
<path d="M1288.5 112.905H1159.75V58.4404H1262L1270 0L1074 0V260H1159.75V162.997H1296.95L1288.5 112.905Z" fill="#0196D0"/>
|
||||
<path d="M1058.62 58.4404V0H789V58.4404H881.133V260H966.885V58.4404H1058.62Z" fill="#0196D0"/>
|
||||
<path d="M521 179.102V0L454.973 15V161C454.973 181.124 452.084 193.146 443.5 202C434.916 211.257 419.318 214.5 400.5 214.5C381.022 214.5 366.744 210.854 357.5 202C348.916 193.548 346.357 175.721 346.357 156V0L280 15V175.48C280 208.08 290.234 229.412 309.712 241.486C329.19 253.56 358.903 260 400.5 260C440.447 260 470.159 253.56 490.297 241.486C510.766 229.412 521 208.483 521 179.102Z" fill="#0196D0"/>
|
||||
<path d="M172.84 84.2813C172.84 97.7982 168.249 107.737 158.41 113.303C149.883 118.471 137.092 121.254 120.693 122.049V162.997C129.876 163.792 138.076 166.177 144.307 176.514L184.647 260H265L225.316 180.489C213.181 155.046 201.374 149.48 178.744 143.517C212.197 138.349 241.386 118.471 241.386 73.1499C241.386 53.2722 233.843 30.2141 218.756 17.8899C203.998 5.56575 183.991 0 159.394 0H120.693V48.5015H127.58C142.23 48.5015 153.6 51.4169 161.689 57.2477C169.233 62.8135 172.84 71.5596 172.84 84.2813ZM120.693 122.049C119.163 122.049 117.741 122.049 116.43 122.049H68.5457V48.5015H120.693V0H0V260H70.5137V162.997H110.526C113.806 162.997 117.741 162.997 120.693 162.997V122.049Z" fill="#0196D0"/>
|
||||
<path d="M774 179.297C774 160.829 766.671 144.669 752.013 131.972C738.127 119.66 712.025 110.169 673.708 103.5C662.136 101.191 651.722 99.6523 643.235 97.3437C586.532 84.6467 594.632 52.7118 650.564 52.7118C680.651 52.7118 709.582 61.946 738.127 66.9478C742.37 67.7174 743.913 68.1021 744.298 68.1021L750.47 12.697C720.383 3.46282 684.895 0 654.036 0C616.619 0 587.689 6.54088 567.245 19.2379C546.801 31.9349 536 57.7137 536 82.3382C536 103.5 543.715 119.66 559.916 131.972C575.731 143.515 604.276 152.749 645.55 160.059C658.279 162.368 668.694 163.907 676.794 166.215C685.023 168.524 691.066 170.704 694.924 172.756C702.253 176.604 706.11 182.375 706.11 188.531C706.11 196.611 701.481 202.767 692.224 207C664.836 220.081 587.689 212.001 556.83 198.15L543.715 247.784C547.186 248.169 552.972 249.323 559.916 250.477C616.619 259.327 690.681 270.869 741.212 238.935C762.814 225.468 774 206.23 774 179.297Z" fill="#0196D0"/>
|
||||
<path d="M1558 179.568C1558 160.383 1550.42 144.268 1535.67 131.99C1521.32 119.968 1494.34 110.631 1454.74 103.981C1442.38 101.679 1432.01 99.3764 1422.84 97.8416C1422.44 97.8416 1422.04 97.8416 1422.04 97.4579V112.422L1361.04 75.2038L1422.04 38.3692V52.9496C1424.7 52.9496 1427.49 52.9496 1430.41 52.9496C1461.51 52.9496 1491.42 62.5419 1521.32 67.5299C1525.31 67.9136 1526.9 67.9136 1527.3 67.9136L1533.68 12.6619C1502.98 3.83692 1465.9 0 1434 0C1395.33 0 1365.43 6.52277 1345.09 19.5683C1323.16 32.6139 1312 57.9376 1312 82.8776C1312 103.981 1320.37 120.096 1336.72 131.607C1353.46 143.885 1382.97 153.093 1425.23 160.383C1434 161.535 1441.18 162.686 1447.56 164.22L1448.36 150.791L1507.36 190.312L1445.57 224.844L1445.96 212.949C1409.68 215.635 1357.45 209.112 1333.53 197.985L1320.37 247.482C1323.56 248.249 1329.54 248.633 1336.72 250.551C1395.33 259.376 1471.88 270.887 1524.11 238.657C1546.84 225.611 1558 205.659 1558 179.568Z" fill="#0196D0"/>
|
||||
</g>
|
||||
<defs>
|
||||
<clipPath id="clip0_0_3">
|
||||
<rect width="1558" height="260" fill="white"/>
|
||||
</clipPath>
|
||||
</defs>
|
||||
</svg>
|
||||
|
||||
|
Before Width: | Height: | Size: 3.5 KiB After Width: | Height: | Size: 3.4 KiB |
@@ -14,12 +14,12 @@
|
||||
|
||||
use crate::utils::RustFSConfig;
|
||||
use dioxus::logger::tracing::{debug, error, info};
|
||||
use lazy_static::lazy_static;
|
||||
use rust_embed::RustEmbed;
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::error::Error;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::Command as StdCommand;
|
||||
use std::sync::LazyLock;
|
||||
use std::time::Duration;
|
||||
use tokio::fs;
|
||||
use tokio::fs::File;
|
||||
@@ -31,15 +31,13 @@ use tokio::sync::{Mutex, mpsc};
|
||||
#[folder = "$CARGO_MANIFEST_DIR/embedded-rustfs/"]
|
||||
struct Asset;
|
||||
|
||||
// Use `lazy_static` to cache the checksum of embedded resources
|
||||
lazy_static! {
|
||||
static ref RUSTFS_HASH: Mutex<String> = {
|
||||
let rustfs_file = if cfg!(windows) { "rustfs.exe" } else { "rustfs" };
|
||||
let rustfs_data = Asset::get(rustfs_file).expect("RustFs binary not embedded");
|
||||
let hash = hex::encode(Sha256::digest(&rustfs_data.data));
|
||||
Mutex::new(hash)
|
||||
};
|
||||
}
|
||||
// Use `LazyLock` to cache the checksum of embedded resources
|
||||
static RUSTFS_HASH: LazyLock<Mutex<String>> = LazyLock::new(|| {
|
||||
let rustfs_file = if cfg!(windows) { "rustfs.exe" } else { "rustfs" };
|
||||
let rustfs_data = Asset::get(rustfs_file).expect("RustFs binary not embedded");
|
||||
let hash = hex::encode(Sha256::digest(&rustfs_data.data));
|
||||
Mutex::new(hash)
|
||||
});
|
||||
|
||||
/// Service command
|
||||
/// This enum represents the commands that can be sent to the service manager
|
||||
|
||||
35
crates/ahm/Cargo.toml
Normal file
@@ -0,0 +1,35 @@
|
||||
[package]
|
||||
name = "rustfs-ahm"
|
||||
version = "0.0.3"
|
||||
edition = "2021"
|
||||
authors = ["RustFS Team"]
|
||||
license = "Apache-2.0"
|
||||
description = "RustFS AHM (Automatic Health Management) Scanner"
|
||||
|
||||
[dependencies]
|
||||
rustfs-ecstore = { workspace = true }
|
||||
rustfs-common = { workspace = true }
|
||||
rustfs-filemeta = { workspace = true }
|
||||
rustfs-madmin = { workspace = true }
|
||||
rustfs-utils = { workspace = true }
|
||||
tokio = { workspace = true, features = ["full"] }
|
||||
tokio-util = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
bytes = { workspace = true }
|
||||
time = { workspace = true, features = ["serde"] }
|
||||
uuid = { workspace = true, features = ["v4", "serde"] }
|
||||
anyhow = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
url = { workspace = true }
|
||||
rustfs-lock = { workspace = true }
|
||||
|
||||
lazy_static = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
rmp-serde = { workspace = true }
|
||||
tokio-test = "0.4"
|
||||
serde_json = "1.0"
|
||||
45
crates/ahm/src/error.rs
Normal file
@@ -0,0 +1,45 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum Error {
|
||||
#[error("I/O error: {0}")]
|
||||
Io(#[from] std::io::Error),
|
||||
|
||||
#[error("Storage error: {0}")]
|
||||
Storage(#[from] rustfs_ecstore::error::Error),
|
||||
|
||||
#[error("Configuration error: {0}")]
|
||||
Config(String),
|
||||
|
||||
#[error("Scanner error: {0}")]
|
||||
Scanner(String),
|
||||
|
||||
#[error("Metrics error: {0}")]
|
||||
Metrics(String),
|
||||
|
||||
#[error(transparent)]
|
||||
Other(#[from] anyhow::Error),
|
||||
}
|
||||
|
||||
pub type Result<T, E = Error> = std::result::Result<T, E>;
|
||||
|
||||
// Implement conversion from ahm::Error to std::io::Error for use in main.rs
|
||||
impl From<Error> for std::io::Error {
|
||||
fn from(err: Error) -> Self {
|
||||
std::io::Error::other(err)
|
||||
}
|
||||
}
|
||||
54
crates/ahm/src/lib.rs
Normal file
@@ -0,0 +1,54 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::OnceLock;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
pub mod error;
|
||||
pub mod scanner;
|
||||
|
||||
pub use error::{Error, Result};
|
||||
pub use scanner::{
|
||||
load_data_usage_from_backend, store_data_usage_in_backend, BucketTargetUsageInfo, BucketUsageInfo, DataUsageInfo, Scanner,
|
||||
ScannerMetrics,
|
||||
};
|
||||
|
||||
// Global cancellation token for AHM services (scanner and other background tasks)
|
||||
static GLOBAL_AHM_SERVICES_CANCEL_TOKEN: OnceLock<CancellationToken> = OnceLock::new();
|
||||
|
||||
/// Initialize the global AHM services cancellation token
|
||||
pub fn init_ahm_services_cancel_token(cancel_token: CancellationToken) -> Result<()> {
|
||||
GLOBAL_AHM_SERVICES_CANCEL_TOKEN
|
||||
.set(cancel_token)
|
||||
.map_err(|_| Error::Config("AHM services cancel token already initialized".to_string()))
|
||||
}
|
||||
|
||||
/// Get the global AHM services cancellation token
|
||||
pub fn get_ahm_services_cancel_token() -> Option<&'static CancellationToken> {
|
||||
GLOBAL_AHM_SERVICES_CANCEL_TOKEN.get()
|
||||
}
|
||||
|
||||
/// Create and initialize the global AHM services cancellation token
|
||||
pub fn create_ahm_services_cancel_token() -> CancellationToken {
|
||||
let cancel_token = CancellationToken::new();
|
||||
init_ahm_services_cancel_token(cancel_token.clone()).expect("AHM services cancel token already initialized");
|
||||
cancel_token
|
||||
}
|
||||
|
||||
/// Shutdown all AHM services gracefully
|
||||
pub fn shutdown_ahm_services() {
|
||||
if let Some(cancel_token) = GLOBAL_AHM_SERVICES_CANCEL_TOKEN.get() {
|
||||
cancel_token.cancel();
|
||||
}
|
||||
}
|
||||
1247
crates/ahm/src/scanner/data_scanner.rs
Normal file
671
crates/ahm/src/scanner/data_usage.rs
Normal file
@@ -0,0 +1,671 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::{collections::HashMap, sync::Arc, time::SystemTime};
|
||||
|
||||
use rustfs_ecstore::{bucket::metadata_sys::get_replication_config, config::com::read_config, store::ECStore};
|
||||
use rustfs_utils::path::SLASH_SEPARATOR;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tracing::{error, info, warn};
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
|
||||
// Data usage storage constants
|
||||
pub const DATA_USAGE_ROOT: &str = SLASH_SEPARATOR;
|
||||
const DATA_USAGE_OBJ_NAME: &str = ".usage.json";
|
||||
const DATA_USAGE_BLOOM_NAME: &str = ".bloomcycle.bin";
|
||||
pub const DATA_USAGE_CACHE_NAME: &str = ".usage-cache.bin";
|
||||
|
||||
// Data usage storage paths
|
||||
lazy_static::lazy_static! {
|
||||
pub static ref DATA_USAGE_BUCKET: String = format!("{}{}{}",
|
||||
rustfs_ecstore::disk::RUSTFS_META_BUCKET,
|
||||
SLASH_SEPARATOR,
|
||||
rustfs_ecstore::disk::BUCKET_META_PREFIX
|
||||
);
|
||||
pub static ref DATA_USAGE_OBJ_NAME_PATH: String = format!("{}{}{}",
|
||||
rustfs_ecstore::disk::BUCKET_META_PREFIX,
|
||||
SLASH_SEPARATOR,
|
||||
DATA_USAGE_OBJ_NAME
|
||||
);
|
||||
pub static ref DATA_USAGE_BLOOM_NAME_PATH: String = format!("{}{}{}",
|
||||
rustfs_ecstore::disk::BUCKET_META_PREFIX,
|
||||
SLASH_SEPARATOR,
|
||||
DATA_USAGE_BLOOM_NAME
|
||||
);
|
||||
}
|
||||
|
||||
/// Bucket target usage info provides replication statistics
|
||||
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
|
||||
pub struct BucketTargetUsageInfo {
|
||||
pub replication_pending_size: u64,
|
||||
pub replication_failed_size: u64,
|
||||
pub replicated_size: u64,
|
||||
pub replica_size: u64,
|
||||
pub replication_pending_count: u64,
|
||||
pub replication_failed_count: u64,
|
||||
pub replicated_count: u64,
|
||||
}
|
||||
|
||||
/// Bucket usage info provides bucket-level statistics
|
||||
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
|
||||
pub struct BucketUsageInfo {
|
||||
pub size: u64,
|
||||
// Following five fields suffixed with V1 are here for backward compatibility
|
||||
// Total Size for objects that have not yet been replicated
|
||||
pub replication_pending_size_v1: u64,
|
||||
// Total size for objects that have witness one or more failures and will be retried
|
||||
pub replication_failed_size_v1: u64,
|
||||
// Total size for objects that have been replicated to destination
|
||||
pub replicated_size_v1: u64,
|
||||
// Total number of objects pending replication
|
||||
pub replication_pending_count_v1: u64,
|
||||
// Total number of objects that failed replication
|
||||
pub replication_failed_count_v1: u64,
|
||||
|
||||
pub objects_count: u64,
|
||||
pub object_size_histogram: HashMap<String, u64>,
|
||||
pub object_versions_histogram: HashMap<String, u64>,
|
||||
pub versions_count: u64,
|
||||
pub delete_markers_count: u64,
|
||||
pub replica_size: u64,
|
||||
pub replica_count: u64,
|
||||
pub replication_info: HashMap<String, BucketTargetUsageInfo>,
|
||||
}
|
||||
|
||||
/// DataUsageInfo represents data usage stats of the underlying storage
|
||||
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
|
||||
pub struct DataUsageInfo {
|
||||
/// Total capacity
|
||||
pub total_capacity: u64,
|
||||
/// Total used capacity
|
||||
pub total_used_capacity: u64,
|
||||
/// Total free capacity
|
||||
pub total_free_capacity: u64,
|
||||
|
||||
/// LastUpdate is the timestamp of when the data usage info was last updated
|
||||
pub last_update: Option<SystemTime>,
|
||||
|
||||
/// Objects total count across all buckets
|
||||
pub objects_total_count: u64,
|
||||
/// Versions total count across all buckets
|
||||
pub versions_total_count: u64,
|
||||
/// Delete markers total count across all buckets
|
||||
pub delete_markers_total_count: u64,
|
||||
/// Objects total size across all buckets
|
||||
pub objects_total_size: u64,
|
||||
/// Replication info across all buckets
|
||||
pub replication_info: HashMap<String, BucketTargetUsageInfo>,
|
||||
|
||||
/// Total number of buckets in this cluster
|
||||
pub buckets_count: u64,
|
||||
/// Buckets usage info provides following information across all buckets
|
||||
pub buckets_usage: HashMap<String, BucketUsageInfo>,
|
||||
/// Deprecated kept here for backward compatibility reasons
|
||||
pub bucket_sizes: HashMap<String, u64>,
|
||||
}
|
||||
|
||||
/// Size summary for a single object or group of objects
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct SizeSummary {
|
||||
/// Total size
|
||||
pub total_size: usize,
|
||||
/// Number of versions
|
||||
pub versions: usize,
|
||||
/// Number of delete markers
|
||||
pub delete_markers: usize,
|
||||
/// Replicated size
|
||||
pub replicated_size: usize,
|
||||
/// Replicated count
|
||||
pub replicated_count: usize,
|
||||
/// Pending size
|
||||
pub pending_size: usize,
|
||||
/// Failed size
|
||||
pub failed_size: usize,
|
||||
/// Replica size
|
||||
pub replica_size: usize,
|
||||
/// Replica count
|
||||
pub replica_count: usize,
|
||||
/// Pending count
|
||||
pub pending_count: usize,
|
||||
/// Failed count
|
||||
pub failed_count: usize,
|
||||
/// Replication target stats
|
||||
pub repl_target_stats: HashMap<String, ReplTargetSizeSummary>,
|
||||
}
|
||||
|
||||
/// Replication target size summary
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct ReplTargetSizeSummary {
|
||||
/// Replicated size
|
||||
pub replicated_size: usize,
|
||||
/// Replicated count
|
||||
pub replicated_count: usize,
|
||||
/// Pending size
|
||||
pub pending_size: usize,
|
||||
/// Failed size
|
||||
pub failed_size: usize,
|
||||
/// Pending count
|
||||
pub pending_count: usize,
|
||||
/// Failed count
|
||||
pub failed_count: usize,
|
||||
}
|
||||
|
||||
impl DataUsageInfo {
|
||||
/// Create a new DataUsageInfo
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
/// Add object metadata to data usage statistics
|
||||
pub fn add_object(&mut self, object_path: &str, meta_object: &rustfs_filemeta::MetaObject) {
|
||||
// This method is kept for backward compatibility
|
||||
// For accurate version counting, use add_object_from_file_meta instead
|
||||
let bucket_name = match self.extract_bucket_from_path(object_path) {
|
||||
Ok(name) => name,
|
||||
Err(_) => return,
|
||||
};
|
||||
|
||||
// Update bucket statistics
|
||||
if let Some(bucket_usage) = self.buckets_usage.get_mut(&bucket_name) {
|
||||
bucket_usage.size += meta_object.size as u64;
|
||||
bucket_usage.objects_count += 1;
|
||||
bucket_usage.versions_count += 1; // Simplified: assume 1 version per object
|
||||
|
||||
// Update size histogram
|
||||
let total_size = meta_object.size as u64;
|
||||
let size_ranges = [
|
||||
("0-1KB", 0, 1024),
|
||||
("1KB-1MB", 1024, 1024 * 1024),
|
||||
("1MB-10MB", 1024 * 1024, 10 * 1024 * 1024),
|
||||
("10MB-100MB", 10 * 1024 * 1024, 100 * 1024 * 1024),
|
||||
("100MB-1GB", 100 * 1024 * 1024, 1024 * 1024 * 1024),
|
||||
("1GB+", 1024 * 1024 * 1024, u64::MAX),
|
||||
];
|
||||
|
||||
for (range_name, min_size, max_size) in size_ranges {
|
||||
if total_size >= min_size && total_size < max_size {
|
||||
*bucket_usage.object_size_histogram.entry(range_name.to_string()).or_insert(0) += 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Update version histogram (simplified - count as single version)
|
||||
*bucket_usage
|
||||
.object_versions_histogram
|
||||
.entry("SINGLE_VERSION".to_string())
|
||||
.or_insert(0) += 1;
|
||||
} else {
|
||||
// Create new bucket usage
|
||||
let mut bucket_usage = BucketUsageInfo {
|
||||
size: meta_object.size as u64,
|
||||
objects_count: 1,
|
||||
versions_count: 1,
|
||||
..Default::default()
|
||||
};
|
||||
bucket_usage.object_size_histogram.insert("0-1KB".to_string(), 1);
|
||||
bucket_usage.object_versions_histogram.insert("SINGLE_VERSION".to_string(), 1);
|
||||
self.buckets_usage.insert(bucket_name, bucket_usage);
|
||||
}
|
||||
|
||||
// Update global statistics
|
||||
self.objects_total_size += meta_object.size as u64;
|
||||
self.objects_total_count += 1;
|
||||
self.versions_total_count += 1;
|
||||
}
|
||||
|
||||
/// Add object from FileMeta for accurate version counting
|
||||
pub fn add_object_from_file_meta(&mut self, object_path: &str, file_meta: &rustfs_filemeta::FileMeta) {
|
||||
let bucket_name = match self.extract_bucket_from_path(object_path) {
|
||||
Ok(name) => name,
|
||||
Err(_) => return,
|
||||
};
|
||||
|
||||
// Calculate accurate statistics from all versions
|
||||
let mut total_size = 0u64;
|
||||
let mut versions_count = 0u64;
|
||||
let mut delete_markers_count = 0u64;
|
||||
let mut latest_object_size = 0u64;
|
||||
|
||||
// Process all versions to get accurate counts
|
||||
for version in &file_meta.versions {
|
||||
match rustfs_filemeta::FileMetaVersion::try_from(version.clone()) {
|
||||
Ok(ver) => {
|
||||
if let Some(obj) = ver.object {
|
||||
total_size += obj.size as u64;
|
||||
versions_count += 1;
|
||||
latest_object_size = obj.size as u64; // Keep track of latest object size
|
||||
} else if ver.delete_marker.is_some() {
|
||||
delete_markers_count += 1;
|
||||
}
|
||||
}
|
||||
Err(_) => {
|
||||
// Skip invalid versions
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update bucket statistics
|
||||
if let Some(bucket_usage) = self.buckets_usage.get_mut(&bucket_name) {
|
||||
bucket_usage.size += total_size;
|
||||
bucket_usage.objects_count += 1;
|
||||
bucket_usage.versions_count += versions_count;
|
||||
bucket_usage.delete_markers_count += delete_markers_count;
|
||||
|
||||
// Update size histogram based on latest object size
|
||||
let size_ranges = [
|
||||
("0-1KB", 0, 1024),
|
||||
("1KB-1MB", 1024, 1024 * 1024),
|
||||
("1MB-10MB", 1024 * 1024, 10 * 1024 * 1024),
|
||||
("10MB-100MB", 10 * 1024 * 1024, 100 * 1024 * 1024),
|
||||
("100MB-1GB", 100 * 1024 * 1024, 1024 * 1024 * 1024),
|
||||
("1GB+", 1024 * 1024 * 1024, u64::MAX),
|
||||
];
|
||||
|
||||
for (range_name, min_size, max_size) in size_ranges {
|
||||
if latest_object_size >= min_size && latest_object_size < max_size {
|
||||
*bucket_usage.object_size_histogram.entry(range_name.to_string()).or_insert(0) += 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Update version histogram based on actual version count
|
||||
let version_ranges = [
|
||||
("1", 1, 1),
|
||||
("2-5", 2, 5),
|
||||
("6-10", 6, 10),
|
||||
("11-50", 11, 50),
|
||||
("51-100", 51, 100),
|
||||
("100+", 101, usize::MAX),
|
||||
];
|
||||
|
||||
for (range_name, min_versions, max_versions) in version_ranges {
|
||||
if versions_count as usize >= min_versions && versions_count as usize <= max_versions {
|
||||
*bucket_usage
|
||||
.object_versions_histogram
|
||||
.entry(range_name.to_string())
|
||||
.or_insert(0) += 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Create new bucket usage
|
||||
let mut bucket_usage = BucketUsageInfo {
|
||||
size: total_size,
|
||||
objects_count: 1,
|
||||
versions_count,
|
||||
delete_markers_count,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Set size histogram
|
||||
let size_ranges = [
|
||||
("0-1KB", 0, 1024),
|
||||
("1KB-1MB", 1024, 1024 * 1024),
|
||||
("1MB-10MB", 1024 * 1024, 10 * 1024 * 1024),
|
||||
("10MB-100MB", 10 * 1024 * 1024, 100 * 1024 * 1024),
|
||||
("100MB-1GB", 100 * 1024 * 1024, 1024 * 1024 * 1024),
|
||||
("1GB+", 1024 * 1024 * 1024, u64::MAX),
|
||||
];
|
||||
|
||||
for (range_name, min_size, max_size) in size_ranges {
|
||||
if latest_object_size >= min_size && latest_object_size < max_size {
|
||||
bucket_usage.object_size_histogram.insert(range_name.to_string(), 1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Set version histogram
|
||||
let version_ranges = [
|
||||
("1", 1, 1),
|
||||
("2-5", 2, 5),
|
||||
("6-10", 6, 10),
|
||||
("11-50", 11, 50),
|
||||
("51-100", 51, 100),
|
||||
("100+", 101, usize::MAX),
|
||||
];
|
||||
|
||||
for (range_name, min_versions, max_versions) in version_ranges {
|
||||
if versions_count as usize >= min_versions && versions_count as usize <= max_versions {
|
||||
bucket_usage.object_versions_histogram.insert(range_name.to_string(), 1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
self.buckets_usage.insert(bucket_name, bucket_usage);
|
||||
// Update buckets count when adding new bucket
|
||||
self.buckets_count = self.buckets_usage.len() as u64;
|
||||
}
|
||||
|
||||
// Update global statistics
|
||||
self.objects_total_size += total_size;
|
||||
self.objects_total_count += 1;
|
||||
self.versions_total_count += versions_count;
|
||||
self.delete_markers_total_count += delete_markers_count;
|
||||
}
|
||||
|
||||
/// Extract bucket name from object path
|
||||
fn extract_bucket_from_path(&self, object_path: &str) -> Result<String> {
|
||||
let parts: Vec<&str> = object_path.split('/').collect();
|
||||
if parts.is_empty() {
|
||||
return Err(Error::Scanner("Invalid object path: empty".to_string()));
|
||||
}
|
||||
Ok(parts[0].to_string())
|
||||
}
|
||||
|
||||
/// Update capacity information
|
||||
pub fn update_capacity(&mut self, total: u64, used: u64, free: u64) {
|
||||
self.total_capacity = total;
|
||||
self.total_used_capacity = used;
|
||||
self.total_free_capacity = free;
|
||||
self.last_update = Some(SystemTime::now());
|
||||
}
|
||||
|
||||
/// Add bucket usage info
|
||||
pub fn add_bucket_usage(&mut self, bucket: String, usage: BucketUsageInfo) {
|
||||
self.buckets_usage.insert(bucket.clone(), usage);
|
||||
self.buckets_count = self.buckets_usage.len() as u64;
|
||||
self.last_update = Some(SystemTime::now());
|
||||
}
|
||||
|
||||
/// Get bucket usage info
|
||||
pub fn get_bucket_usage(&self, bucket: &str) -> Option<&BucketUsageInfo> {
|
||||
self.buckets_usage.get(bucket)
|
||||
}
|
||||
|
||||
/// Calculate total statistics from all buckets
|
||||
pub fn calculate_totals(&mut self) {
|
||||
self.objects_total_count = 0;
|
||||
self.versions_total_count = 0;
|
||||
self.delete_markers_total_count = 0;
|
||||
self.objects_total_size = 0;
|
||||
|
||||
for usage in self.buckets_usage.values() {
|
||||
self.objects_total_count += usage.objects_count;
|
||||
self.versions_total_count += usage.versions_count;
|
||||
self.delete_markers_total_count += usage.delete_markers_count;
|
||||
self.objects_total_size += usage.size;
|
||||
}
|
||||
}
|
||||
|
||||
/// Merge another DataUsageInfo into this one
|
||||
pub fn merge(&mut self, other: &DataUsageInfo) {
|
||||
// Merge bucket usage
|
||||
for (bucket, usage) in &other.buckets_usage {
|
||||
if let Some(existing) = self.buckets_usage.get_mut(bucket) {
|
||||
existing.merge(usage);
|
||||
} else {
|
||||
self.buckets_usage.insert(bucket.clone(), usage.clone());
|
||||
}
|
||||
}
|
||||
|
||||
// Recalculate totals
|
||||
self.calculate_totals();
|
||||
|
||||
// Ensure buckets_count stays consistent with buckets_usage
|
||||
self.buckets_count = self.buckets_usage.len() as u64;
|
||||
|
||||
// Update last update time
|
||||
if let Some(other_update) = other.last_update {
|
||||
if self.last_update.is_none() || other_update > self.last_update.unwrap() {
|
||||
self.last_update = Some(other_update);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl BucketUsageInfo {
|
||||
/// Create a new BucketUsageInfo
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
/// Add size summary to this bucket usage
|
||||
pub fn add_size_summary(&mut self, summary: &SizeSummary) {
|
||||
self.size += summary.total_size as u64;
|
||||
self.versions_count += summary.versions as u64;
|
||||
self.delete_markers_count += summary.delete_markers as u64;
|
||||
self.replica_size += summary.replica_size as u64;
|
||||
self.replica_count += summary.replica_count as u64;
|
||||
}
|
||||
|
||||
/// Merge another BucketUsageInfo into this one
|
||||
pub fn merge(&mut self, other: &BucketUsageInfo) {
|
||||
self.size += other.size;
|
||||
self.objects_count += other.objects_count;
|
||||
self.versions_count += other.versions_count;
|
||||
self.delete_markers_count += other.delete_markers_count;
|
||||
self.replica_size += other.replica_size;
|
||||
self.replica_count += other.replica_count;
|
||||
|
||||
// Merge histograms
|
||||
for (key, value) in &other.object_size_histogram {
|
||||
*self.object_size_histogram.entry(key.clone()).or_insert(0) += value;
|
||||
}
|
||||
|
||||
for (key, value) in &other.object_versions_histogram {
|
||||
*self.object_versions_histogram.entry(key.clone()).or_insert(0) += value;
|
||||
}
|
||||
|
||||
// Merge replication info
|
||||
for (target, info) in &other.replication_info {
|
||||
let entry = self.replication_info.entry(target.clone()).or_default();
|
||||
entry.replicated_size += info.replicated_size;
|
||||
entry.replica_size += info.replica_size;
|
||||
entry.replication_pending_size += info.replication_pending_size;
|
||||
entry.replication_failed_size += info.replication_failed_size;
|
||||
entry.replication_pending_count += info.replication_pending_count;
|
||||
entry.replication_failed_count += info.replication_failed_count;
|
||||
entry.replicated_count += info.replicated_count;
|
||||
}
|
||||
|
||||
// Merge backward compatibility fields
|
||||
self.replication_pending_size_v1 += other.replication_pending_size_v1;
|
||||
self.replication_failed_size_v1 += other.replication_failed_size_v1;
|
||||
self.replicated_size_v1 += other.replicated_size_v1;
|
||||
self.replication_pending_count_v1 += other.replication_pending_count_v1;
|
||||
self.replication_failed_count_v1 += other.replication_failed_count_v1;
|
||||
}
|
||||
}
|
||||
|
||||
impl SizeSummary {
|
||||
/// Create a new SizeSummary
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
/// Add another SizeSummary to this one
|
||||
pub fn add(&mut self, other: &SizeSummary) {
|
||||
self.total_size += other.total_size;
|
||||
self.versions += other.versions;
|
||||
self.delete_markers += other.delete_markers;
|
||||
self.replicated_size += other.replicated_size;
|
||||
self.replicated_count += other.replicated_count;
|
||||
self.pending_size += other.pending_size;
|
||||
self.failed_size += other.failed_size;
|
||||
self.replica_size += other.replica_size;
|
||||
self.replica_count += other.replica_count;
|
||||
self.pending_count += other.pending_count;
|
||||
self.failed_count += other.failed_count;
|
||||
|
||||
// Merge replication target stats
|
||||
for (target, stats) in &other.repl_target_stats {
|
||||
let entry = self.repl_target_stats.entry(target.clone()).or_default();
|
||||
entry.replicated_size += stats.replicated_size;
|
||||
entry.replicated_count += stats.replicated_count;
|
||||
entry.pending_size += stats.pending_size;
|
||||
entry.failed_size += stats.failed_size;
|
||||
entry.pending_count += stats.pending_count;
|
||||
entry.failed_count += stats.failed_count;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Store data usage info to backend storage
|
||||
pub async fn store_data_usage_in_backend(data_usage_info: DataUsageInfo, store: Arc<ECStore>) -> Result<()> {
|
||||
let data =
|
||||
serde_json::to_vec(&data_usage_info).map_err(|e| Error::Config(format!("Failed to serialize data usage info: {e}")))?;
|
||||
|
||||
// Save to backend using the same mechanism as original code
|
||||
rustfs_ecstore::config::com::save_config(store, &DATA_USAGE_OBJ_NAME_PATH, data)
|
||||
.await
|
||||
.map_err(Error::Storage)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Load data usage info from backend storage
|
||||
pub async fn load_data_usage_from_backend(store: Arc<ECStore>) -> Result<DataUsageInfo> {
|
||||
let buf = match read_config(store, &DATA_USAGE_OBJ_NAME_PATH).await {
|
||||
Ok(data) => data,
|
||||
Err(e) => {
|
||||
error!("Failed to read data usage info from backend: {}", e);
|
||||
if e == rustfs_ecstore::error::Error::ConfigNotFound {
|
||||
return Ok(DataUsageInfo::default());
|
||||
}
|
||||
return Err(Error::Storage(e));
|
||||
}
|
||||
};
|
||||
|
||||
let mut data_usage_info: DataUsageInfo =
|
||||
serde_json::from_slice(&buf).map_err(|e| Error::Config(format!("Failed to deserialize data usage info: {e}")))?;
|
||||
|
||||
warn!("Loaded data usage info from backend {:?}", &data_usage_info);
|
||||
|
||||
// Handle backward compatibility like original code
|
||||
if data_usage_info.buckets_usage.is_empty() {
|
||||
data_usage_info.buckets_usage = data_usage_info
|
||||
.bucket_sizes
|
||||
.iter()
|
||||
.map(|(bucket, &size)| {
|
||||
(
|
||||
bucket.clone(),
|
||||
BucketUsageInfo {
|
||||
size,
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
}
|
||||
|
||||
if data_usage_info.bucket_sizes.is_empty() {
|
||||
data_usage_info.bucket_sizes = data_usage_info
|
||||
.buckets_usage
|
||||
.iter()
|
||||
.map(|(bucket, bui)| (bucket.clone(), bui.size))
|
||||
.collect();
|
||||
}
|
||||
|
||||
for (bucket, bui) in &data_usage_info.buckets_usage {
|
||||
if bui.replicated_size_v1 > 0
|
||||
|| bui.replication_failed_count_v1 > 0
|
||||
|| bui.replication_failed_size_v1 > 0
|
||||
|| bui.replication_pending_count_v1 > 0
|
||||
{
|
||||
if let Ok((cfg, _)) = get_replication_config(bucket).await {
|
||||
if !cfg.role.is_empty() {
|
||||
data_usage_info.replication_info.insert(
|
||||
cfg.role.clone(),
|
||||
BucketTargetUsageInfo {
|
||||
replication_failed_size: bui.replication_failed_size_v1,
|
||||
replication_failed_count: bui.replication_failed_count_v1,
|
||||
replicated_size: bui.replicated_size_v1,
|
||||
replication_pending_count: bui.replication_pending_count_v1,
|
||||
replication_pending_size: bui.replication_pending_size_v1,
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(data_usage_info)
|
||||
}
|
||||
|
||||
/// Example function showing how to use AHM data usage functionality
|
||||
/// This demonstrates the integration pattern for DataUsageInfoHandler
|
||||
pub async fn example_data_usage_integration() -> Result<()> {
|
||||
// Get the global storage instance
|
||||
let Some(store) = rustfs_ecstore::new_object_layer_fn() else {
|
||||
return Err(Error::Config("Storage not initialized".to_string()));
|
||||
};
|
||||
|
||||
// Load data usage from backend (this replaces the original load_data_usage_from_backend)
|
||||
let data_usage = load_data_usage_from_backend(store).await?;
|
||||
|
||||
info!(
|
||||
"Loaded data usage info: {} buckets, {} total objects",
|
||||
data_usage.buckets_count, data_usage.objects_total_count
|
||||
);
|
||||
|
||||
// Example: Store updated data usage back to backend
|
||||
// This would typically be called by the scanner after collecting new statistics
|
||||
// store_data_usage_in_backend(data_usage, store).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_data_usage_info_creation() {
|
||||
let mut info = DataUsageInfo::new();
|
||||
info.update_capacity(1000, 500, 500);
|
||||
|
||||
assert_eq!(info.total_capacity, 1000);
|
||||
assert_eq!(info.total_used_capacity, 500);
|
||||
assert_eq!(info.total_free_capacity, 500);
|
||||
assert!(info.last_update.is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bucket_usage_info_merge() {
|
||||
let mut usage1 = BucketUsageInfo::new();
|
||||
usage1.size = 100;
|
||||
usage1.objects_count = 10;
|
||||
usage1.versions_count = 5;
|
||||
|
||||
let mut usage2 = BucketUsageInfo::new();
|
||||
usage2.size = 200;
|
||||
usage2.objects_count = 20;
|
||||
usage2.versions_count = 10;
|
||||
|
||||
usage1.merge(&usage2);
|
||||
|
||||
assert_eq!(usage1.size, 300);
|
||||
assert_eq!(usage1.objects_count, 30);
|
||||
assert_eq!(usage1.versions_count, 15);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_size_summary_add() {
|
||||
let mut summary1 = SizeSummary::new();
|
||||
summary1.total_size = 100;
|
||||
summary1.versions = 5;
|
||||
|
||||
let mut summary2 = SizeSummary::new();
|
||||
summary2.total_size = 200;
|
||||
summary2.versions = 10;
|
||||
|
||||
summary1.add(&summary2);
|
||||
|
||||
assert_eq!(summary1.total_size, 300);
|
||||
assert_eq!(summary1.versions, 15);
|
||||
}
|
||||
}
|
||||
277
crates/ahm/src/scanner/histogram.rs
Normal file
@@ -0,0 +1,277 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Size interval for object size histogram
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SizeInterval {
|
||||
pub start: u64,
|
||||
pub end: u64,
|
||||
pub name: &'static str,
|
||||
}
|
||||
|
||||
/// Version interval for object versions histogram
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct VersionInterval {
|
||||
pub start: u64,
|
||||
pub end: u64,
|
||||
pub name: &'static str,
|
||||
}
|
||||
|
||||
/// Object size histogram intervals
|
||||
pub const OBJECTS_HISTOGRAM_INTERVALS: &[SizeInterval] = &[
|
||||
SizeInterval {
|
||||
start: 0,
|
||||
end: 1024 - 1,
|
||||
name: "LESS_THAN_1_KiB",
|
||||
},
|
||||
SizeInterval {
|
||||
start: 1024,
|
||||
end: 1024 * 1024 - 1,
|
||||
name: "1_KiB_TO_1_MiB",
|
||||
},
|
||||
SizeInterval {
|
||||
start: 1024 * 1024,
|
||||
end: 10 * 1024 * 1024 - 1,
|
||||
name: "1_MiB_TO_10_MiB",
|
||||
},
|
||||
SizeInterval {
|
||||
start: 10 * 1024 * 1024,
|
||||
end: 64 * 1024 * 1024 - 1,
|
||||
name: "10_MiB_TO_64_MiB",
|
||||
},
|
||||
SizeInterval {
|
||||
start: 64 * 1024 * 1024,
|
||||
end: 128 * 1024 * 1024 - 1,
|
||||
name: "64_MiB_TO_128_MiB",
|
||||
},
|
||||
SizeInterval {
|
||||
start: 128 * 1024 * 1024,
|
||||
end: 512 * 1024 * 1024 - 1,
|
||||
name: "128_MiB_TO_512_MiB",
|
||||
},
|
||||
SizeInterval {
|
||||
start: 512 * 1024 * 1024,
|
||||
end: u64::MAX,
|
||||
name: "MORE_THAN_512_MiB",
|
||||
},
|
||||
];
|
||||
|
||||
/// Object version count histogram intervals
|
||||
pub const OBJECTS_VERSION_COUNT_INTERVALS: &[VersionInterval] = &[
|
||||
VersionInterval {
|
||||
start: 1,
|
||||
end: 1,
|
||||
name: "1_VERSION",
|
||||
},
|
||||
VersionInterval {
|
||||
start: 2,
|
||||
end: 10,
|
||||
name: "2_TO_10_VERSIONS",
|
||||
},
|
||||
VersionInterval {
|
||||
start: 11,
|
||||
end: 100,
|
||||
name: "11_TO_100_VERSIONS",
|
||||
},
|
||||
VersionInterval {
|
||||
start: 101,
|
||||
end: 1000,
|
||||
name: "101_TO_1000_VERSIONS",
|
||||
},
|
||||
VersionInterval {
|
||||
start: 1001,
|
||||
end: u64::MAX,
|
||||
name: "MORE_THAN_1000_VERSIONS",
|
||||
},
|
||||
];
|
||||
|
||||
/// Size histogram for object size distribution
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct SizeHistogram {
|
||||
counts: Vec<u64>,
|
||||
}
|
||||
|
||||
/// Versions histogram for object version count distribution
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct VersionsHistogram {
|
||||
counts: Vec<u64>,
|
||||
}
|
||||
|
||||
impl SizeHistogram {
|
||||
/// Create a new size histogram
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
counts: vec![0; OBJECTS_HISTOGRAM_INTERVALS.len()],
|
||||
}
|
||||
}
|
||||
|
||||
/// Add a size to the histogram
|
||||
pub fn add(&mut self, size: u64) {
|
||||
for (idx, interval) in OBJECTS_HISTOGRAM_INTERVALS.iter().enumerate() {
|
||||
if size >= interval.start && size <= interval.end {
|
||||
self.counts[idx] += 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the histogram as a map
|
||||
pub fn to_map(&self) -> HashMap<String, u64> {
|
||||
let mut result = HashMap::new();
|
||||
for (idx, count) in self.counts.iter().enumerate() {
|
||||
let interval = &OBJECTS_HISTOGRAM_INTERVALS[idx];
|
||||
result.insert(interval.name.to_string(), *count);
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
/// Merge another histogram into this one
|
||||
pub fn merge(&mut self, other: &SizeHistogram) {
|
||||
for (idx, count) in other.counts.iter().enumerate() {
|
||||
self.counts[idx] += count;
|
||||
}
|
||||
}
|
||||
|
||||
/// Get total count
|
||||
pub fn total_count(&self) -> u64 {
|
||||
self.counts.iter().sum()
|
||||
}
|
||||
|
||||
/// Reset the histogram
|
||||
pub fn reset(&mut self) {
|
||||
for count in &mut self.counts {
|
||||
*count = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl VersionsHistogram {
|
||||
/// Create a new versions histogram
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
counts: vec![0; OBJECTS_VERSION_COUNT_INTERVALS.len()],
|
||||
}
|
||||
}
|
||||
|
||||
/// Add a version count to the histogram
|
||||
pub fn add(&mut self, versions: u64) {
|
||||
for (idx, interval) in OBJECTS_VERSION_COUNT_INTERVALS.iter().enumerate() {
|
||||
if versions >= interval.start && versions <= interval.end {
|
||||
self.counts[idx] += 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the histogram as a map
|
||||
pub fn to_map(&self) -> HashMap<String, u64> {
|
||||
let mut result = HashMap::new();
|
||||
for (idx, count) in self.counts.iter().enumerate() {
|
||||
let interval = &OBJECTS_VERSION_COUNT_INTERVALS[idx];
|
||||
result.insert(interval.name.to_string(), *count);
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
/// Merge another histogram into this one
|
||||
pub fn merge(&mut self, other: &VersionsHistogram) {
|
||||
for (idx, count) in other.counts.iter().enumerate() {
|
||||
self.counts[idx] += count;
|
||||
}
|
||||
}
|
||||
|
||||
/// Get total count
|
||||
pub fn total_count(&self) -> u64 {
|
||||
self.counts.iter().sum()
|
||||
}
|
||||
|
||||
/// Reset the histogram
|
||||
pub fn reset(&mut self) {
|
||||
for count in &mut self.counts {
|
||||
*count = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_size_histogram() {
|
||||
let mut histogram = SizeHistogram::new();
|
||||
|
||||
// Add some sizes
|
||||
histogram.add(512); // LESS_THAN_1_KiB
|
||||
histogram.add(1024); // 1_KiB_TO_1_MiB
|
||||
histogram.add(1024 * 1024); // 1_MiB_TO_10_MiB
|
||||
histogram.add(5 * 1024 * 1024); // 1_MiB_TO_10_MiB
|
||||
|
||||
let map = histogram.to_map();
|
||||
|
||||
assert_eq!(map.get("LESS_THAN_1_KiB"), Some(&1));
|
||||
assert_eq!(map.get("1_KiB_TO_1_MiB"), Some(&1));
|
||||
assert_eq!(map.get("1_MiB_TO_10_MiB"), Some(&2));
|
||||
assert_eq!(map.get("10_MiB_TO_64_MiB"), Some(&0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_versions_histogram() {
|
||||
let mut histogram = VersionsHistogram::new();
|
||||
|
||||
// Add some version counts
|
||||
histogram.add(1); // 1_VERSION
|
||||
histogram.add(5); // 2_TO_10_VERSIONS
|
||||
histogram.add(50); // 11_TO_100_VERSIONS
|
||||
histogram.add(500); // 101_TO_1000_VERSIONS
|
||||
|
||||
let map = histogram.to_map();
|
||||
|
||||
assert_eq!(map.get("1_VERSION"), Some(&1));
|
||||
assert_eq!(map.get("2_TO_10_VERSIONS"), Some(&1));
|
||||
assert_eq!(map.get("11_TO_100_VERSIONS"), Some(&1));
|
||||
assert_eq!(map.get("101_TO_1000_VERSIONS"), Some(&1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_histogram_merge() {
|
||||
let mut histogram1 = SizeHistogram::new();
|
||||
histogram1.add(1024);
|
||||
histogram1.add(1024 * 1024);
|
||||
|
||||
let mut histogram2 = SizeHistogram::new();
|
||||
histogram2.add(1024);
|
||||
histogram2.add(5 * 1024 * 1024);
|
||||
|
||||
histogram1.merge(&histogram2);
|
||||
|
||||
let map = histogram1.to_map();
|
||||
assert_eq!(map.get("1_KiB_TO_1_MiB"), Some(&2)); // 1 from histogram1 + 1 from histogram2
|
||||
assert_eq!(map.get("1_MiB_TO_10_MiB"), Some(&2)); // 1 from histogram1 + 1 from histogram2
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_histogram_reset() {
|
||||
let mut histogram = SizeHistogram::new();
|
||||
histogram.add(1024);
|
||||
histogram.add(1024 * 1024);
|
||||
|
||||
assert_eq!(histogram.total_count(), 2);
|
||||
|
||||
histogram.reset();
|
||||
assert_eq!(histogram.total_count(), 0);
|
||||
}
|
||||
}
|
||||
284
crates/ahm/src/scanner/metrics.rs
Normal file
@@ -0,0 +1,284 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
sync::atomic::{AtomicU64, Ordering},
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tracing::info;
|
||||
|
||||
/// Scanner metrics
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct ScannerMetrics {
|
||||
/// Total objects scanned since server start
|
||||
pub objects_scanned: u64,
|
||||
/// Total object versions scanned since server start
|
||||
pub versions_scanned: u64,
|
||||
/// Total directories scanned since server start
|
||||
pub directories_scanned: u64,
|
||||
/// Total bucket scans started since server start
|
||||
pub bucket_scans_started: u64,
|
||||
/// Total bucket scans finished since server start
|
||||
pub bucket_scans_finished: u64,
|
||||
/// Total objects with health issues found
|
||||
pub objects_with_issues: u64,
|
||||
/// Total heal tasks queued
|
||||
pub heal_tasks_queued: u64,
|
||||
/// Total heal tasks completed
|
||||
pub heal_tasks_completed: u64,
|
||||
/// Total heal tasks failed
|
||||
pub heal_tasks_failed: u64,
|
||||
/// Last scan activity time
|
||||
pub last_activity: Option<SystemTime>,
|
||||
/// Current scan cycle
|
||||
pub current_cycle: u64,
|
||||
/// Total scan cycles completed
|
||||
pub total_cycles: u64,
|
||||
/// Current scan duration
|
||||
pub current_scan_duration: Option<Duration>,
|
||||
/// Average scan duration
|
||||
pub avg_scan_duration: Duration,
|
||||
/// Objects scanned per second
|
||||
pub objects_per_second: f64,
|
||||
/// Buckets scanned per second
|
||||
pub buckets_per_second: f64,
|
||||
/// Storage metrics by bucket
|
||||
pub bucket_metrics: HashMap<String, BucketMetrics>,
|
||||
/// Disk metrics
|
||||
pub disk_metrics: HashMap<String, DiskMetrics>,
|
||||
}
|
||||
|
||||
/// Bucket-specific metrics
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct BucketMetrics {
|
||||
/// Bucket name
|
||||
pub bucket: String,
|
||||
/// Total objects in bucket
|
||||
pub total_objects: u64,
|
||||
/// Total size of objects in bucket (bytes)
|
||||
pub total_size: u64,
|
||||
/// Objects with health issues
|
||||
pub objects_with_issues: u64,
|
||||
/// Last scan time
|
||||
pub last_scan_time: Option<SystemTime>,
|
||||
/// Scan duration
|
||||
pub scan_duration: Option<Duration>,
|
||||
/// Heal tasks queued for this bucket
|
||||
pub heal_tasks_queued: u64,
|
||||
/// Heal tasks completed for this bucket
|
||||
pub heal_tasks_completed: u64,
|
||||
/// Heal tasks failed for this bucket
|
||||
pub heal_tasks_failed: u64,
|
||||
}
|
||||
|
||||
/// Disk-specific metrics
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct DiskMetrics {
|
||||
/// Disk path
|
||||
pub disk_path: String,
|
||||
/// Total disk space (bytes)
|
||||
pub total_space: u64,
|
||||
/// Used disk space (bytes)
|
||||
pub used_space: u64,
|
||||
/// Free disk space (bytes)
|
||||
pub free_space: u64,
|
||||
/// Objects scanned on this disk
|
||||
pub objects_scanned: u64,
|
||||
/// Objects with issues on this disk
|
||||
pub objects_with_issues: u64,
|
||||
/// Last scan time
|
||||
pub last_scan_time: Option<SystemTime>,
|
||||
/// Whether disk is online
|
||||
pub is_online: bool,
|
||||
/// Whether disk is being scanned
|
||||
pub is_scanning: bool,
|
||||
}
|
||||
|
||||
/// Thread-safe metrics collector
|
||||
pub struct MetricsCollector {
|
||||
/// Atomic counters for real-time metrics
|
||||
objects_scanned: AtomicU64,
|
||||
versions_scanned: AtomicU64,
|
||||
directories_scanned: AtomicU64,
|
||||
bucket_scans_started: AtomicU64,
|
||||
bucket_scans_finished: AtomicU64,
|
||||
objects_with_issues: AtomicU64,
|
||||
heal_tasks_queued: AtomicU64,
|
||||
heal_tasks_completed: AtomicU64,
|
||||
heal_tasks_failed: AtomicU64,
|
||||
current_cycle: AtomicU64,
|
||||
total_cycles: AtomicU64,
|
||||
}
|
||||
|
||||
impl MetricsCollector {
|
||||
/// Create a new metrics collector
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
objects_scanned: AtomicU64::new(0),
|
||||
versions_scanned: AtomicU64::new(0),
|
||||
directories_scanned: AtomicU64::new(0),
|
||||
bucket_scans_started: AtomicU64::new(0),
|
||||
bucket_scans_finished: AtomicU64::new(0),
|
||||
objects_with_issues: AtomicU64::new(0),
|
||||
heal_tasks_queued: AtomicU64::new(0),
|
||||
heal_tasks_completed: AtomicU64::new(0),
|
||||
heal_tasks_failed: AtomicU64::new(0),
|
||||
current_cycle: AtomicU64::new(0),
|
||||
total_cycles: AtomicU64::new(0),
|
||||
}
|
||||
}
|
||||
|
||||
/// Increment objects scanned count
|
||||
pub fn increment_objects_scanned(&self, count: u64) {
|
||||
self.objects_scanned.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment versions scanned count
|
||||
pub fn increment_versions_scanned(&self, count: u64) {
|
||||
self.versions_scanned.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment directories scanned count
|
||||
pub fn increment_directories_scanned(&self, count: u64) {
|
||||
self.directories_scanned.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment bucket scans started count
|
||||
pub fn increment_bucket_scans_started(&self, count: u64) {
|
||||
self.bucket_scans_started.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment bucket scans finished count
|
||||
pub fn increment_bucket_scans_finished(&self, count: u64) {
|
||||
self.bucket_scans_finished.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment objects with issues count
|
||||
pub fn increment_objects_with_issues(&self, count: u64) {
|
||||
self.objects_with_issues.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment heal tasks queued count
|
||||
pub fn increment_heal_tasks_queued(&self, count: u64) {
|
||||
self.heal_tasks_queued.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment heal tasks completed count
|
||||
pub fn increment_heal_tasks_completed(&self, count: u64) {
|
||||
self.heal_tasks_completed.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment heal tasks failed count
|
||||
pub fn increment_heal_tasks_failed(&self, count: u64) {
|
||||
self.heal_tasks_failed.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Set current cycle
|
||||
pub fn set_current_cycle(&self, cycle: u64) {
|
||||
self.current_cycle.store(cycle, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment total cycles
|
||||
pub fn increment_total_cycles(&self) {
|
||||
self.total_cycles.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Get current metrics snapshot
|
||||
pub fn get_metrics(&self) -> ScannerMetrics {
|
||||
ScannerMetrics {
|
||||
objects_scanned: self.objects_scanned.load(Ordering::Relaxed),
|
||||
versions_scanned: self.versions_scanned.load(Ordering::Relaxed),
|
||||
directories_scanned: self.directories_scanned.load(Ordering::Relaxed),
|
||||
bucket_scans_started: self.bucket_scans_started.load(Ordering::Relaxed),
|
||||
bucket_scans_finished: self.bucket_scans_finished.load(Ordering::Relaxed),
|
||||
objects_with_issues: self.objects_with_issues.load(Ordering::Relaxed),
|
||||
heal_tasks_queued: self.heal_tasks_queued.load(Ordering::Relaxed),
|
||||
heal_tasks_completed: self.heal_tasks_completed.load(Ordering::Relaxed),
|
||||
heal_tasks_failed: self.heal_tasks_failed.load(Ordering::Relaxed),
|
||||
last_activity: Some(SystemTime::now()),
|
||||
current_cycle: self.current_cycle.load(Ordering::Relaxed),
|
||||
total_cycles: self.total_cycles.load(Ordering::Relaxed),
|
||||
current_scan_duration: None, // Will be set by scanner
|
||||
avg_scan_duration: Duration::ZERO, // Will be calculated
|
||||
objects_per_second: 0.0, // Will be calculated
|
||||
buckets_per_second: 0.0, // Will be calculated
|
||||
bucket_metrics: HashMap::new(), // Will be populated by scanner
|
||||
disk_metrics: HashMap::new(), // Will be populated by scanner
|
||||
}
|
||||
}
|
||||
|
||||
/// Reset all metrics
|
||||
pub fn reset(&self) {
|
||||
self.objects_scanned.store(0, Ordering::Relaxed);
|
||||
self.versions_scanned.store(0, Ordering::Relaxed);
|
||||
self.directories_scanned.store(0, Ordering::Relaxed);
|
||||
self.bucket_scans_started.store(0, Ordering::Relaxed);
|
||||
self.bucket_scans_finished.store(0, Ordering::Relaxed);
|
||||
self.objects_with_issues.store(0, Ordering::Relaxed);
|
||||
self.heal_tasks_queued.store(0, Ordering::Relaxed);
|
||||
self.heal_tasks_completed.store(0, Ordering::Relaxed);
|
||||
self.heal_tasks_failed.store(0, Ordering::Relaxed);
|
||||
self.current_cycle.store(0, Ordering::Relaxed);
|
||||
self.total_cycles.store(0, Ordering::Relaxed);
|
||||
|
||||
info!("Scanner metrics reset");
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for MetricsCollector {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_metrics_collector_creation() {
|
||||
let collector = MetricsCollector::new();
|
||||
let metrics = collector.get_metrics();
|
||||
assert_eq!(metrics.objects_scanned, 0);
|
||||
assert_eq!(metrics.versions_scanned, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_metrics_increment() {
|
||||
let collector = MetricsCollector::new();
|
||||
|
||||
collector.increment_objects_scanned(10);
|
||||
collector.increment_versions_scanned(5);
|
||||
collector.increment_objects_with_issues(2);
|
||||
|
||||
let metrics = collector.get_metrics();
|
||||
assert_eq!(metrics.objects_scanned, 10);
|
||||
assert_eq!(metrics.versions_scanned, 5);
|
||||
assert_eq!(metrics.objects_with_issues, 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_metrics_reset() {
|
||||
let collector = MetricsCollector::new();
|
||||
|
||||
collector.increment_objects_scanned(10);
|
||||
collector.reset();
|
||||
|
||||
let metrics = collector.get_metrics();
|
||||
assert_eq!(metrics.objects_scanned, 0);
|
||||
}
|
||||
}
|
||||
25
crates/ahm/src/scanner/mod.rs
Normal file
@@ -0,0 +1,25 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod data_scanner;
|
||||
pub mod data_usage;
|
||||
pub mod histogram;
|
||||
pub mod metrics;
|
||||
|
||||
// Re-export main types for convenience
|
||||
pub use data_scanner::Scanner;
|
||||
pub use data_usage::{
|
||||
load_data_usage_from_backend, store_data_usage_in_backend, BucketTargetUsageInfo, BucketUsageInfo, DataUsageInfo,
|
||||
};
|
||||
pub use metrics::ScannerMetrics;
|
||||
@@ -19,6 +19,10 @@ license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
version.workspace = true
|
||||
homepage.workspace = true
|
||||
description = "Application authentication and authorization for RustFS, providing secure access control and user management."
|
||||
keywords = ["authentication", "authorization", "security", "rustfs", "Minio"]
|
||||
categories = ["web-programming", "development-tools", "authentication"]
|
||||
|
||||
[dependencies]
|
||||
base64-simd = { workspace = true }
|
||||
|
||||
@@ -23,14 +23,14 @@ use std::io::{Error, Result};
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, Default, Clone)]
|
||||
pub struct Token {
|
||||
pub name: String, // 应用 ID
|
||||
pub expired: u64, // 到期时间 (UNIX 时间戳)
|
||||
pub name: String, // Application ID
|
||||
pub expired: u64, // Expiry time (UNIX timestamp)
|
||||
}
|
||||
|
||||
// 公钥生成 Token
|
||||
// [token] Token 对象
|
||||
// [key] 公钥字符串
|
||||
// 返回 base64 处理的加密字符串
|
||||
/// Public key generation Token
|
||||
/// [token] Token object
|
||||
/// [key] Public key string
|
||||
/// Returns the encrypted string processed by base64
|
||||
pub fn gencode(token: &Token, key: &str) -> Result<String> {
|
||||
let data = serde_json::to_vec(token)?;
|
||||
let public_key = RsaPublicKey::from_public_key_pem(key).map_err(Error::other)?;
|
||||
@@ -38,10 +38,10 @@ pub fn gencode(token: &Token, key: &str) -> Result<String> {
|
||||
Ok(base64_simd::URL_SAFE_NO_PAD.encode_to_string(&encrypted_data))
|
||||
}
|
||||
|
||||
// 私钥解析 Token
|
||||
// [token] base64 处理的加密字符串
|
||||
// [key] 私钥字符串
|
||||
// 返回 Token 对象
|
||||
/// Private key resolution Token
|
||||
/// [token] Encrypted string processed by base64
|
||||
/// [key] Private key string
|
||||
/// Return to the Token object
|
||||
pub fn parse(token: &str, key: &str) -> Result<Token> {
|
||||
let encrypted_data = base64_simd::URL_SAFE_NO_PAD
|
||||
.decode_to_vec(token.as_bytes())
|
||||
|
||||
@@ -19,11 +19,14 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
homepage.workspace = true
|
||||
description = "Common utilities and data structures for RustFS, providing shared functionality across the project."
|
||||
keywords = ["common", "utilities", "data-structures", "rustfs", "Minio"]
|
||||
categories = ["web-programming", "development-tools", "data-structures"]
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
lazy_static.workspace = true
|
||||
tokio.workspace = true
|
||||
tonic = { workspace = true }
|
||||
|
||||
@@ -12,19 +12,19 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
#![allow(non_upper_case_globals)] // FIXME
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::LazyLock;
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
use tokio::sync::RwLock;
|
||||
use tonic::transport::Channel;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref GLOBAL_Local_Node_Name: RwLock<String> = RwLock::new("".to_string());
|
||||
pub static ref GLOBAL_Rustfs_Host: RwLock<String> = RwLock::new("".to_string());
|
||||
pub static ref GLOBAL_Rustfs_Port: RwLock<String> = RwLock::new("9000".to_string());
|
||||
pub static ref GLOBAL_Rustfs_Addr: RwLock<String> = RwLock::new("".to_string());
|
||||
pub static ref GLOBAL_Conn_Map: RwLock<HashMap<String, Channel>> = RwLock::new(HashMap::new());
|
||||
}
|
||||
pub static GLOBAL_Local_Node_Name: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
|
||||
pub static GLOBAL_Rustfs_Host: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
|
||||
pub static GLOBAL_Rustfs_Port: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("9000".to_string()));
|
||||
pub static GLOBAL_Rustfs_Addr: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
|
||||
pub static GLOBAL_Conn_Map: LazyLock<RwLock<HashMap<String, Channel>>> = LazyLock::new(|| RwLock::new(HashMap::new()));
|
||||
|
||||
pub async fn set_global_addr(addr: &str) {
|
||||
*GLOBAL_Rustfs_Addr.write().await = addr.to_string();
|
||||
|
||||
@@ -19,6 +19,10 @@ license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
version.workspace = true
|
||||
homepage.workspace = true
|
||||
description = "Configuration management for RustFS, providing a centralized way to manage application settings and features."
|
||||
keywords = ["configuration", "settings", "management", "rustfs", "Minio"]
|
||||
categories = ["web-programming", "development-tools", "config"]
|
||||
|
||||
[dependencies]
|
||||
const-str = { workspace = true, optional = true }
|
||||
|
||||
@@ -19,6 +19,11 @@ license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
version.workspace = true
|
||||
homepage.workspace = true
|
||||
description = "Cryptography and security features for RustFS, providing encryption, hashing, and secure authentication mechanisms."
|
||||
keywords = ["cryptography", "encryption", "hashing", "rustfs", "Minio"]
|
||||
categories = ["web-programming", "development-tools", "cryptography"]
|
||||
documentation = "https://docs.rs/rustfs-crypto/latest/rustfs_crypto/"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
@@ -19,6 +19,12 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
homepage.workspace = true
|
||||
description = "Erasure coding storage backend for RustFS, providing efficient data storage and retrieval with redundancy."
|
||||
keywords = ["erasure-coding", "storage", "rustfs", "Minio", "solomon"]
|
||||
categories = ["web-programming", "development-tools", "filesystem"]
|
||||
documentation = "https://docs.rs/rustfs-ecstore/latest/rustfs_ecstore/"
|
||||
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
[lints]
|
||||
|
||||
@@ -145,8 +145,8 @@ impl Debug for LocalDisk {
|
||||
impl LocalDisk {
|
||||
pub async fn new(ep: &Endpoint, cleanup: bool) -> Result<Self> {
|
||||
debug!("Creating local disk");
|
||||
let root = match fs::canonicalize(ep.get_file_path()).await {
|
||||
Ok(path) => path,
|
||||
let root = match PathBuf::from(ep.get_file_path()).absolutize() {
|
||||
Ok(path) => path.into_owned(),
|
||||
Err(e) => {
|
||||
if e.kind() == ErrorKind::NotFound {
|
||||
return Err(DiskError::VolumeNotFound);
|
||||
|
||||
@@ -30,6 +30,7 @@ use std::{
|
||||
time::SystemTime,
|
||||
};
|
||||
use tokio::sync::{OnceCell, RwLock};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use uuid::Uuid;
|
||||
|
||||
pub const DISK_ASSUME_UNKNOWN_SIZE: u64 = 1 << 30;
|
||||
@@ -62,7 +63,12 @@ static ref globalDeploymentIDPtr: OnceLock<Uuid> = OnceLock::new();
|
||||
pub static ref GLOBAL_BOOT_TIME: OnceCell<SystemTime> = OnceCell::new();
|
||||
pub static ref GLOBAL_LocalNodeName: String = "127.0.0.1:9000".to_string();
|
||||
pub static ref GLOBAL_LocalNodeNameHex: String = rustfs_utils::crypto::hex(GLOBAL_LocalNodeName.as_bytes());
|
||||
pub static ref GLOBAL_NodeNamesHex: HashMap<String, ()> = HashMap::new();}
|
||||
pub static ref GLOBAL_NodeNamesHex: HashMap<String, ()> = HashMap::new();
|
||||
pub static ref GLOBAL_REGION: OnceLock<String> = OnceLock::new();
|
||||
}
|
||||
|
||||
// Global cancellation token for background services (data scanner and auto heal)
|
||||
static GLOBAL_BACKGROUND_SERVICES_CANCEL_TOKEN: OnceLock<CancellationToken> = OnceLock::new();
|
||||
|
||||
static GLOBAL_ACTIVE_CRED: OnceLock<Credentials> = OnceLock::new();
|
||||
|
||||
@@ -182,3 +188,35 @@ pub async fn update_erasure_type(setup_type: SetupType) {
|
||||
// }
|
||||
|
||||
type TypeLocalDiskSetDrives = Vec<Vec<Vec<Option<DiskStore>>>>;
|
||||
|
||||
pub fn set_global_region(region: String) {
|
||||
GLOBAL_REGION.set(region).unwrap();
|
||||
}
|
||||
|
||||
pub fn get_global_region() -> Option<String> {
|
||||
GLOBAL_REGION.get().cloned()
|
||||
}
|
||||
|
||||
/// Initialize the global background services cancellation token
|
||||
pub fn init_background_services_cancel_token(cancel_token: CancellationToken) -> Result<(), CancellationToken> {
|
||||
GLOBAL_BACKGROUND_SERVICES_CANCEL_TOKEN.set(cancel_token)
|
||||
}
|
||||
|
||||
/// Get the global background services cancellation token
|
||||
pub fn get_background_services_cancel_token() -> Option<&'static CancellationToken> {
|
||||
GLOBAL_BACKGROUND_SERVICES_CANCEL_TOKEN.get()
|
||||
}
|
||||
|
||||
/// Create and initialize the global background services cancellation token
|
||||
pub fn create_background_services_cancel_token() -> CancellationToken {
|
||||
let cancel_token = CancellationToken::new();
|
||||
init_background_services_cancel_token(cancel_token.clone()).expect("Background services cancel token already initialized");
|
||||
cancel_token
|
||||
}
|
||||
|
||||
/// Shutdown all background services gracefully
|
||||
pub fn shutdown_background_services() {
|
||||
if let Some(cancel_token) = GLOBAL_BACKGROUND_SERVICES_CANCEL_TOKEN.get() {
|
||||
cancel_token.cancel();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ use tokio::{
|
||||
},
|
||||
time::interval,
|
||||
};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::{error, info};
|
||||
use uuid::Uuid;
|
||||
|
||||
@@ -32,7 +33,7 @@ use super::{
|
||||
heal_ops::{HealSequence, new_bg_heal_sequence},
|
||||
};
|
||||
use crate::error::{Error, Result};
|
||||
use crate::global::GLOBAL_MRFState;
|
||||
use crate::global::{GLOBAL_MRFState, get_background_services_cancel_token};
|
||||
use crate::heal::error::ERR_RETRY_HEALING;
|
||||
use crate::heal::heal_commands::{HEAL_ITEM_BUCKET, HealScanMode};
|
||||
use crate::heal::heal_ops::{BG_HEALING_UUID, HealSource};
|
||||
@@ -54,6 +55,13 @@ use crate::{
|
||||
pub static DEFAULT_MONITOR_NEW_DISK_INTERVAL: Duration = Duration::from_secs(10);
|
||||
|
||||
pub async fn init_auto_heal() {
|
||||
info!("Initializing auto heal background task");
|
||||
|
||||
let Some(cancel_token) = get_background_services_cancel_token() else {
|
||||
error!("Background services cancel token not initialized");
|
||||
return;
|
||||
};
|
||||
|
||||
init_background_healing().await;
|
||||
let v = env::var("_RUSTFS_AUTO_DRIVE_HEALING").unwrap_or("on".to_string());
|
||||
if v == "on" {
|
||||
@@ -61,12 +69,16 @@ pub async fn init_auto_heal() {
|
||||
GLOBAL_BackgroundHealState
|
||||
.push_heal_local_disks(&get_local_disks_to_heal().await)
|
||||
.await;
|
||||
spawn(async {
|
||||
monitor_local_disks_and_heal().await;
|
||||
|
||||
let cancel_clone = cancel_token.clone();
|
||||
spawn(async move {
|
||||
monitor_local_disks_and_heal(cancel_clone).await;
|
||||
});
|
||||
}
|
||||
spawn(async {
|
||||
GLOBAL_MRFState.heal_routine().await;
|
||||
|
||||
let cancel_clone = cancel_token.clone();
|
||||
spawn(async move {
|
||||
GLOBAL_MRFState.heal_routine_with_cancel(cancel_clone).await;
|
||||
});
|
||||
}
|
||||
|
||||
@@ -108,50 +120,66 @@ pub async fn get_local_disks_to_heal() -> Vec<Endpoint> {
|
||||
disks_to_heal
|
||||
}
|
||||
|
||||
async fn monitor_local_disks_and_heal() {
|
||||
async fn monitor_local_disks_and_heal(cancel_token: CancellationToken) {
|
||||
info!("Auto heal monitor started");
|
||||
let mut interval = interval(DEFAULT_MONITOR_NEW_DISK_INTERVAL);
|
||||
|
||||
loop {
|
||||
interval.tick().await;
|
||||
let heal_disks = GLOBAL_BackgroundHealState.get_heal_local_disk_endpoints().await;
|
||||
if heal_disks.is_empty() {
|
||||
info!("heal local disks is empty");
|
||||
interval.reset();
|
||||
continue;
|
||||
}
|
||||
tokio::select! {
|
||||
_ = cancel_token.cancelled() => {
|
||||
info!("Auto heal monitor received shutdown signal, exiting gracefully");
|
||||
break;
|
||||
}
|
||||
_ = interval.tick() => {
|
||||
let heal_disks = GLOBAL_BackgroundHealState.get_heal_local_disk_endpoints().await;
|
||||
if heal_disks.is_empty() {
|
||||
info!("heal local disks is empty");
|
||||
interval.reset();
|
||||
continue;
|
||||
}
|
||||
|
||||
info!("heal local disks: {:?}", heal_disks);
|
||||
info!("heal local disks: {:?}", heal_disks);
|
||||
|
||||
let store = new_object_layer_fn().expect("errServerNotInitialized");
|
||||
if let (_result, Some(err)) = store.heal_format(false).await.expect("heal format failed") {
|
||||
error!("heal local disk format error: {}", err);
|
||||
if err == Error::NoHealRequired {
|
||||
} else {
|
||||
info!("heal format err: {}", err.to_string());
|
||||
let store = new_object_layer_fn().expect("errServerNotInitialized");
|
||||
if let (_result, Some(err)) = store.heal_format(false).await.expect("heal format failed") {
|
||||
error!("heal local disk format error: {}", err);
|
||||
if err == Error::NoHealRequired {
|
||||
} else {
|
||||
info!("heal format err: {}", err.to_string());
|
||||
interval.reset();
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
let mut futures = Vec::new();
|
||||
for disk in heal_disks.into_ref().iter() {
|
||||
let disk_clone = disk.clone();
|
||||
let cancel_clone = cancel_token.clone();
|
||||
futures.push(async move {
|
||||
let disk_for_cancel = disk_clone.clone();
|
||||
tokio::select! {
|
||||
_ = cancel_clone.cancelled() => {
|
||||
info!("Disk healing task cancelled for disk: {}", disk_for_cancel);
|
||||
}
|
||||
_ = async {
|
||||
GLOBAL_BackgroundHealState
|
||||
.set_disk_healing_status(disk_clone.clone(), true)
|
||||
.await;
|
||||
if heal_fresh_disk(&disk_clone).await.is_err() {
|
||||
info!("heal_fresh_disk is err");
|
||||
GLOBAL_BackgroundHealState
|
||||
.set_disk_healing_status(disk_clone.clone(), false)
|
||||
.await;
|
||||
}
|
||||
GLOBAL_BackgroundHealState.pop_heal_local_disks(&[disk_clone]).await;
|
||||
} => {}
|
||||
}
|
||||
});
|
||||
}
|
||||
let _ = join_all(futures).await;
|
||||
interval.reset();
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
let mut futures = Vec::new();
|
||||
for disk in heal_disks.into_ref().iter() {
|
||||
let disk_clone = disk.clone();
|
||||
futures.push(async move {
|
||||
GLOBAL_BackgroundHealState
|
||||
.set_disk_healing_status(disk_clone.clone(), true)
|
||||
.await;
|
||||
if heal_fresh_disk(&disk_clone).await.is_err() {
|
||||
info!("heal_fresh_disk is err");
|
||||
GLOBAL_BackgroundHealState
|
||||
.set_disk_healing_status(disk_clone.clone(), false)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
GLOBAL_BackgroundHealState.pop_heal_local_disks(&[disk_clone]).await;
|
||||
});
|
||||
}
|
||||
let _ = join_all(futures).await;
|
||||
interval.reset();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -20,14 +20,13 @@ use std::{
|
||||
path::{Path, PathBuf},
|
||||
pin::Pin,
|
||||
sync::{
|
||||
Arc, OnceLock,
|
||||
Arc,
|
||||
atomic::{AtomicBool, AtomicU32, AtomicU64, Ordering},
|
||||
},
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
|
||||
use time::{self, OffsetDateTime};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
use super::{
|
||||
data_scanner_metric::{ScannerMetric, ScannerMetrics, globalScannerMetrics},
|
||||
@@ -51,7 +50,7 @@ use crate::{
|
||||
metadata_sys,
|
||||
},
|
||||
event_notification::{EventArgs, send_event},
|
||||
global::GLOBAL_LocalNodeName,
|
||||
global::{GLOBAL_LocalNodeName, get_background_services_cancel_token},
|
||||
store_api::{ObjectOptions, ObjectToDelete, StorageAPI},
|
||||
};
|
||||
use crate::{
|
||||
@@ -128,8 +127,6 @@ lazy_static! {
|
||||
pub static ref globalHealConfig: Arc<RwLock<Config>> = Arc::new(RwLock::new(Config::default()));
|
||||
}
|
||||
|
||||
static GLOBAL_SCANNER_CANCEL_TOKEN: OnceLock<CancellationToken> = OnceLock::new();
|
||||
|
||||
struct DynamicSleeper {
|
||||
factor: f64,
|
||||
max_sleep: Duration,
|
||||
@@ -198,21 +195,18 @@ fn new_dynamic_sleeper(factor: f64, max_wait: Duration, is_scanner: bool) -> Dyn
|
||||
/// - Minimum sleep duration to avoid excessive CPU usage
|
||||
/// - Proper error handling and logging
|
||||
///
|
||||
/// # Returns
|
||||
/// A CancellationToken that can be used to gracefully shutdown the scanner
|
||||
///
|
||||
/// # Architecture
|
||||
/// 1. Initialize with random seed for sleep intervals
|
||||
/// 2. Run scanner cycles in a loop
|
||||
/// 3. Use randomized sleep between cycles to avoid thundering herd
|
||||
/// 4. Ensure minimum sleep duration to prevent CPU thrashing
|
||||
pub async fn init_data_scanner() -> CancellationToken {
|
||||
pub async fn init_data_scanner() {
|
||||
info!("Initializing data scanner background task");
|
||||
|
||||
let cancel_token = CancellationToken::new();
|
||||
GLOBAL_SCANNER_CANCEL_TOKEN
|
||||
.set(cancel_token.clone())
|
||||
.expect("Scanner already initialized");
|
||||
let Some(cancel_token) = get_background_services_cancel_token() else {
|
||||
error!("Background services cancel token not initialized");
|
||||
return;
|
||||
};
|
||||
|
||||
let cancel_clone = cancel_token.clone();
|
||||
tokio::spawn(async move {
|
||||
@@ -256,8 +250,6 @@ pub async fn init_data_scanner() -> CancellationToken {
|
||||
|
||||
info!("Data scanner background task stopped gracefully");
|
||||
});
|
||||
|
||||
cancel_token
|
||||
}
|
||||
|
||||
/// Run a single data scanner cycle
|
||||
@@ -282,7 +274,7 @@ async fn run_data_scanner_cycle() {
|
||||
};
|
||||
|
||||
// Check for cancellation before starting expensive operations
|
||||
if let Some(token) = GLOBAL_SCANNER_CANCEL_TOKEN.get() {
|
||||
if let Some(token) = get_background_services_cancel_token() {
|
||||
if token.is_cancelled() {
|
||||
debug!("Scanner cancelled before starting cycle");
|
||||
return;
|
||||
@@ -397,9 +389,8 @@ async fn execute_namespace_scan(
|
||||
cycle: u64,
|
||||
scan_mode: HealScanMode,
|
||||
) -> Result<()> {
|
||||
let cancel_token = GLOBAL_SCANNER_CANCEL_TOKEN
|
||||
.get()
|
||||
.ok_or_else(|| Error::other("Scanner not initialized"))?;
|
||||
let cancel_token =
|
||||
get_background_services_cancel_token().ok_or_else(|| Error::other("Background services not initialized"))?;
|
||||
|
||||
tokio::select! {
|
||||
result = store.ns_scanner(tx, cycle as usize, scan_mode) => {
|
||||
|
||||
@@ -25,7 +25,8 @@ use std::time::Duration;
|
||||
use tokio::sync::RwLock;
|
||||
use tokio::sync::mpsc::{Receiver, Sender};
|
||||
use tokio::time::sleep;
|
||||
use tracing::error;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::{error, info};
|
||||
use uuid::Uuid;
|
||||
|
||||
pub const MRF_OPS_QUEUE_SIZE: u64 = 100000;
|
||||
@@ -87,56 +88,96 @@ impl MRFState {
|
||||
let _ = self.tx.send(op).await;
|
||||
}
|
||||
|
||||
pub async fn heal_routine(&self) {
|
||||
/// Enhanced heal routine with cancellation support
|
||||
///
|
||||
/// This method implements the same healing logic as the original heal_routine,
|
||||
/// but adds proper cancellation support via CancellationToken.
|
||||
/// The core logic remains identical to maintain compatibility.
|
||||
pub async fn heal_routine_with_cancel(&self, cancel_token: CancellationToken) {
|
||||
info!("MRF heal routine started with cancellation support");
|
||||
|
||||
loop {
|
||||
// rx used only there,
|
||||
if let Some(op) = self.rx.write().await.recv().await {
|
||||
if op.bucket == RUSTFS_META_BUCKET {
|
||||
for pattern in &*PATTERNS {
|
||||
if pattern.is_match(&op.object) {
|
||||
return;
|
||||
tokio::select! {
|
||||
_ = cancel_token.cancelled() => {
|
||||
info!("MRF heal routine received shutdown signal, exiting gracefully");
|
||||
break;
|
||||
}
|
||||
op_result = async {
|
||||
let mut rx_guard = self.rx.write().await;
|
||||
rx_guard.recv().await
|
||||
} => {
|
||||
if let Some(op) = op_result {
|
||||
// Special path filtering (original logic)
|
||||
if op.bucket == RUSTFS_META_BUCKET {
|
||||
for pattern in &*PATTERNS {
|
||||
if pattern.is_match(&op.object) {
|
||||
continue; // Skip this operation, continue with next
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let now = Utc::now();
|
||||
if now.sub(op.queued).num_seconds() < 1 {
|
||||
sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
// Network reconnection delay (original logic)
|
||||
let now = Utc::now();
|
||||
if now.sub(op.queued).num_seconds() < 1 {
|
||||
tokio::select! {
|
||||
_ = cancel_token.cancelled() => {
|
||||
info!("MRF heal routine cancelled during reconnection delay");
|
||||
break;
|
||||
}
|
||||
_ = sleep(Duration::from_secs(1)) => {}
|
||||
}
|
||||
}
|
||||
|
||||
let scan_mode = if op.bitrot_scan { HEAL_DEEP_SCAN } else { HEAL_NORMAL_SCAN };
|
||||
if op.object.is_empty() {
|
||||
if let Err(err) = heal_bucket(&op.bucket).await {
|
||||
error!("heal bucket failed, bucket: {}, err: {:?}", op.bucket, err);
|
||||
}
|
||||
} else if op.versions.is_empty() {
|
||||
if let Err(err) =
|
||||
heal_object(&op.bucket, &op.object, &op.version_id.clone().unwrap_or_default(), scan_mode).await
|
||||
{
|
||||
error!("heal object failed, bucket: {}, object: {}, err: {:?}", op.bucket, op.object, err);
|
||||
}
|
||||
} else {
|
||||
let vers = op.versions.len() / 16;
|
||||
if vers > 0 {
|
||||
for i in 0..vers {
|
||||
let start = i * 16;
|
||||
let end = start + 16;
|
||||
// Core healing logic (original logic preserved)
|
||||
let scan_mode = if op.bitrot_scan { HEAL_DEEP_SCAN } else { HEAL_NORMAL_SCAN };
|
||||
|
||||
if op.object.is_empty() {
|
||||
// Heal bucket (original logic)
|
||||
if let Err(err) = heal_bucket(&op.bucket).await {
|
||||
error!("heal bucket failed, bucket: {}, err: {:?}", op.bucket, err);
|
||||
}
|
||||
} else if op.versions.is_empty() {
|
||||
// Heal single object (original logic)
|
||||
if let Err(err) = heal_object(
|
||||
&op.bucket,
|
||||
&op.object,
|
||||
&Uuid::from_slice(&op.versions[start..end]).expect("").to_string(),
|
||||
scan_mode,
|
||||
)
|
||||
.await
|
||||
{
|
||||
&op.version_id.clone().unwrap_or_default(),
|
||||
scan_mode
|
||||
).await {
|
||||
error!("heal object failed, bucket: {}, object: {}, err: {:?}", op.bucket, op.object, err);
|
||||
}
|
||||
} else {
|
||||
// Heal multiple versions (original logic)
|
||||
let vers = op.versions.len() / 16;
|
||||
if vers > 0 {
|
||||
for i in 0..vers {
|
||||
// Check for cancellation before each version
|
||||
if cancel_token.is_cancelled() {
|
||||
info!("MRF heal routine cancelled during version processing");
|
||||
return;
|
||||
}
|
||||
|
||||
let start = i * 16;
|
||||
let end = start + 16;
|
||||
if let Err(err) = heal_object(
|
||||
&op.bucket,
|
||||
&op.object,
|
||||
&Uuid::from_slice(&op.versions[start..end]).expect("").to_string(),
|
||||
scan_mode,
|
||||
).await {
|
||||
error!("heal object failed, bucket: {}, object: {}, err: {:?}", op.bucket, op.object, err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
info!("MRF heal routine channel closed, exiting");
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
info!("MRF heal routine stopped gracefully");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4099,6 +4099,8 @@ impl ObjectIO for SetDisks {
|
||||
}
|
||||
}
|
||||
|
||||
drop(writers); // drop writers to close all files, this is to prevent FileAccessDenied errors when renaming data
|
||||
|
||||
let (online_disks, _, op_old_dir) = Self::rename_data(
|
||||
&shuffle_disks,
|
||||
RUSTFS_META_TMP_BUCKET,
|
||||
@@ -5039,6 +5041,8 @@ impl StorageAPI for SetDisks {
|
||||
|
||||
let fi_buff = fi.marshal_msg()?;
|
||||
|
||||
drop(writers); // drop writers to close all files
|
||||
|
||||
let part_path = format!("{}/{}/{}", upload_id_path, fi.data_dir.unwrap_or_default(), part_suffix);
|
||||
let _ = Self::rename_part(
|
||||
&disks,
|
||||
|
||||
@@ -2508,14 +2508,14 @@ fn check_object_name_for_length_and_slash(bucket: &str, object: &str) -> Result<
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
{
|
||||
if object.contains('\\')
|
||||
|| object.contains(':')
|
||||
if object.contains(':')
|
||||
|| object.contains('*')
|
||||
|| object.contains('?')
|
||||
|| object.contains('"')
|
||||
|| object.contains('|')
|
||||
|| object.contains('<')
|
||||
|| object.contains('>')
|
||||
// || object.contains('\\')
|
||||
{
|
||||
return Err(StorageError::ObjectNameInvalid(bucket.to_owned(), object.to_owned()));
|
||||
}
|
||||
@@ -2549,9 +2549,9 @@ fn check_bucket_and_object_names(bucket: &str, object: &str) -> Result<()> {
|
||||
return Err(StorageError::ObjectNameInvalid(bucket.to_string(), object.to_string()));
|
||||
}
|
||||
|
||||
if cfg!(target_os = "windows") && object.contains('\\') {
|
||||
return Err(StorageError::ObjectNameInvalid(bucket.to_string(), object.to_string()));
|
||||
}
|
||||
// if cfg!(target_os = "windows") && object.contains('\\') {
|
||||
// return Err(StorageError::ObjectNameInvalid(bucket.to_string(), object.to_string()));
|
||||
// }
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -19,6 +19,11 @@ license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
version.workspace = true
|
||||
homepage.workspace = true
|
||||
description = "File metadata management for RustFS, providing efficient storage and retrieval of file metadata in a distributed system."
|
||||
keywords = ["file-metadata", "storage", "retrieval", "rustfs", "Minio"]
|
||||
categories = ["web-programming", "development-tools", "filesystem"]
|
||||
documentation = "https://docs.rs/rustfs-filemeta/latest/rustfs_filemeta/"
|
||||
|
||||
[dependencies]
|
||||
crc32fast = { workspace = true }
|
||||
|
||||
@@ -19,6 +19,11 @@ license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
version.workspace = true
|
||||
homepage.workspace = true
|
||||
description = "Identity and Access Management (IAM) for RustFS, providing user management, roles, and permissions."
|
||||
keywords = ["iam", "identity", "access-management", "rustfs", "Minio"]
|
||||
categories = ["web-programming", "development-tools", "authentication"]
|
||||
documentation = "https://docs.rs/rustfs-iam/latest/rustfs_iam/"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
@@ -40,7 +45,6 @@ base64-simd = { workspace = true }
|
||||
jsonwebtoken = { workspace = true }
|
||||
tracing.workspace = true
|
||||
rustfs-madmin.workspace = true
|
||||
lazy_static.workspace = true
|
||||
rustfs-utils = { workspace = true, features = ["path"] }
|
||||
|
||||
[dev-dependencies]
|
||||
|
||||
@@ -20,7 +20,6 @@ use crate::{
|
||||
manager::{extract_jwt_claims, get_default_policyes},
|
||||
};
|
||||
use futures::future::join_all;
|
||||
use lazy_static::lazy_static;
|
||||
use rustfs_ecstore::{
|
||||
config::{
|
||||
RUSTFS_CONFIG_PREFIX,
|
||||
@@ -34,25 +33,28 @@ use rustfs_ecstore::{
|
||||
use rustfs_policy::{auth::UserIdentity, policy::PolicyDoc};
|
||||
use rustfs_utils::path::{SLASH_SEPARATOR, path_join_buf};
|
||||
use serde::{Serialize, de::DeserializeOwned};
|
||||
use std::sync::LazyLock;
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
use tokio::sync::broadcast::{self, Receiver as B_Receiver};
|
||||
use tokio::sync::mpsc::{self, Sender};
|
||||
use tracing::{info, warn};
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
lazy_static! {
|
||||
pub static ref IAM_CONFIG_PREFIX: String = format!("{}/iam", RUSTFS_CONFIG_PREFIX);
|
||||
pub static ref IAM_CONFIG_USERS_PREFIX: String = format!("{}/iam/users/", RUSTFS_CONFIG_PREFIX);
|
||||
pub static ref IAM_CONFIG_SERVICE_ACCOUNTS_PREFIX: String = format!("{}/iam/service-accounts/", RUSTFS_CONFIG_PREFIX);
|
||||
pub static ref IAM_CONFIG_GROUPS_PREFIX: String = format!("{}/iam/groups/", RUSTFS_CONFIG_PREFIX);
|
||||
pub static ref IAM_CONFIG_POLICIES_PREFIX: String = format!("{}/iam/policies/", RUSTFS_CONFIG_PREFIX);
|
||||
pub static ref IAM_CONFIG_STS_PREFIX: String = format!("{}/iam/sts/", RUSTFS_CONFIG_PREFIX);
|
||||
pub static ref IAM_CONFIG_POLICY_DB_PREFIX: String = format!("{}/iam/policydb/", RUSTFS_CONFIG_PREFIX);
|
||||
pub static ref IAM_CONFIG_POLICY_DB_USERS_PREFIX: String = format!("{}/iam/policydb/users/", RUSTFS_CONFIG_PREFIX);
|
||||
pub static ref IAM_CONFIG_POLICY_DB_STS_USERS_PREFIX: String = format!("{}/iam/policydb/sts-users/", RUSTFS_CONFIG_PREFIX);
|
||||
pub static ref IAM_CONFIG_POLICY_DB_SERVICE_ACCOUNTS_PREFIX: String =
|
||||
format!("{}/iam/policydb/service-accounts/", RUSTFS_CONFIG_PREFIX);
|
||||
pub static ref IAM_CONFIG_POLICY_DB_GROUPS_PREFIX: String = format!("{}/iam/policydb/groups/", RUSTFS_CONFIG_PREFIX);
|
||||
}
|
||||
pub static IAM_CONFIG_PREFIX: LazyLock<String> = LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam"));
|
||||
pub static IAM_CONFIG_USERS_PREFIX: LazyLock<String> = LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/users/"));
|
||||
pub static IAM_CONFIG_SERVICE_ACCOUNTS_PREFIX: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/service-accounts/"));
|
||||
pub static IAM_CONFIG_GROUPS_PREFIX: LazyLock<String> = LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/groups/"));
|
||||
pub static IAM_CONFIG_POLICIES_PREFIX: LazyLock<String> = LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/policies/"));
|
||||
pub static IAM_CONFIG_STS_PREFIX: LazyLock<String> = LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/sts/"));
|
||||
pub static IAM_CONFIG_POLICY_DB_PREFIX: LazyLock<String> = LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/policydb/"));
|
||||
pub static IAM_CONFIG_POLICY_DB_USERS_PREFIX: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/policydb/users/"));
|
||||
pub static IAM_CONFIG_POLICY_DB_STS_USERS_PREFIX: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/policydb/sts-users/"));
|
||||
pub static IAM_CONFIG_POLICY_DB_SERVICE_ACCOUNTS_PREFIX: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/policydb/service-accounts/"));
|
||||
pub static IAM_CONFIG_POLICY_DB_GROUPS_PREFIX: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/policydb/groups/"));
|
||||
|
||||
const IAM_IDENTITY_FILE: &str = "identity.json";
|
||||
const IAM_POLICY_FILE: &str = "policy.json";
|
||||
@@ -370,7 +372,15 @@ impl Store for ObjectStore {
|
||||
async fn load_iam_config<Item: DeserializeOwned>(&self, path: impl AsRef<str> + Send) -> Result<Item> {
|
||||
let mut data = read_config(self.object_api.clone(), path.as_ref()).await?;
|
||||
|
||||
data = Self::decrypt_data(&data)?;
|
||||
data = match Self::decrypt_data(&data) {
|
||||
Ok(v) => v,
|
||||
Err(err) => {
|
||||
debug!("decrypt_data failed: {}", err);
|
||||
// delete the config file when decrypt failed
|
||||
let _ = self.delete_iam_config(path.as_ref()).await;
|
||||
return Err(Error::ConfigNotFound);
|
||||
}
|
||||
};
|
||||
|
||||
Ok(serde_json::from_slice(&data)?)
|
||||
}
|
||||
|
||||
@@ -19,13 +19,17 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
homepage.workspace = true
|
||||
description = "Distributed locking mechanism for RustFS, providing synchronization and coordination across distributed systems."
|
||||
keywords = ["locking", "asynchronous", "distributed", "rustfs", "Minio"]
|
||||
categories = ["web-programming", "development-tools", "asynchronous"]
|
||||
documentation = "https://docs.rs/rustfs-lock/latest/rustfs_lock/"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
async-trait.workspace = true
|
||||
lazy_static.workspace = true
|
||||
rustfs-protos.workspace = true
|
||||
rand.workspace = true
|
||||
serde.workspace = true
|
||||
|
||||
@@ -14,12 +14,12 @@
|
||||
// limitations under the License.
|
||||
|
||||
use async_trait::async_trait;
|
||||
use lazy_static::lazy_static;
|
||||
use local_locker::LocalLocker;
|
||||
use lock_args::LockArgs;
|
||||
use remote_client::RemoteClient;
|
||||
use std::io::Result;
|
||||
use std::sync::Arc;
|
||||
use std::sync::LazyLock;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
pub mod drwmutex;
|
||||
@@ -29,9 +29,7 @@ pub mod lrwmutex;
|
||||
pub mod namespace_lock;
|
||||
pub mod remote_client;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref GLOBAL_LOCAL_SERVER: Arc<RwLock<LocalLocker>> = Arc::new(RwLock::new(LocalLocker::new()));
|
||||
}
|
||||
pub static GLOBAL_LOCAL_SERVER: LazyLock<Arc<RwLock<LocalLocker>>> = LazyLock::new(|| Arc::new(RwLock::new(LocalLocker::new())));
|
||||
|
||||
type LockClient = dyn Locker;
|
||||
|
||||
|
||||
@@ -19,6 +19,11 @@ license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
version.workspace = true
|
||||
homepage.workspace = true
|
||||
description = "Management and administration tools for RustFS, providing a web interface and API for system management."
|
||||
keywords = ["management", "administration", "web-interface", "rustfs", "Minio"]
|
||||
categories = ["web-programming", "development-tools", "config"]
|
||||
documentation = "https://docs.rs/rustfs-madmin/latest/rustfs_madmin/"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
@@ -19,6 +19,11 @@ license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
version.workspace = true
|
||||
homepage.workspace = true
|
||||
description = "File system notification service for RustFS, providing real-time updates on file changes and events."
|
||||
keywords = ["file-system", "notification", "real-time", "rustfs", "Minio"]
|
||||
categories = ["web-programming", "development-tools", "filesystem"]
|
||||
documentation = "https://docs.rs/rustfs-notify/latest/rustfs_notify/"
|
||||
|
||||
[dependencies]
|
||||
rustfs-config = { workspace = true, features = ["notify"] }
|
||||
|
||||
@@ -19,6 +19,11 @@ license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
version.workspace = true
|
||||
homepage.workspace = true
|
||||
description = "Observability and monitoring tools for RustFS, providing metrics, logging, and tracing capabilities."
|
||||
keywords = ["observability", "metrics", "logging", "tracing", "RustFS"]
|
||||
categories = ["web-programming", "development-tools::profiling", "asynchronous", "api-bindings", "development-tools::debugging"]
|
||||
documentation = "https://docs.rs/rustfs-obs/latest/rustfs_obs/"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
@@ -419,7 +419,7 @@ fn format_with_color(w: &mut dyn std::io::Write, now: &mut DeferredNow, record:
|
||||
|
||||
writeln!(
|
||||
w,
|
||||
"{} {} [{}] [{}:{}] [{}:{}] {}",
|
||||
"[{}] {} [{}] [{}:{}] [{}:{}] {}",
|
||||
now.now().format("%Y-%m-%d %H:%M:%S%.6f"),
|
||||
level_style.paint(level.to_string()),
|
||||
Color::Magenta.paint(record.target()),
|
||||
@@ -443,7 +443,7 @@ fn format_for_file(w: &mut dyn std::io::Write, now: &mut DeferredNow, record: &R
|
||||
|
||||
writeln!(
|
||||
w,
|
||||
"{} {} [{}] [{}:{}] [{}:{}] {}",
|
||||
"[{}] {} [{}] [{}:{}] [{}:{}] {}",
|
||||
now.now().format("%Y-%m-%d %H:%M:%S%.6f"),
|
||||
level,
|
||||
record.target(),
|
||||
|
||||
@@ -19,6 +19,11 @@ license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
version.workspace = true
|
||||
homepage.workspace = true
|
||||
description = "Policy management for RustFS, providing a framework for defining and enforcing policies across the system."
|
||||
keywords = ["policy", "management", "enforcement", "rustfs", "Minio"]
|
||||
categories = ["web-programming", "development-tools", "accessibility"]
|
||||
documentation = "https://docs.rs/rustfs-policy/latest/rustfs_policy/"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
@@ -16,6 +16,14 @@
|
||||
name = "rustfs-protos"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
homepage.workspace = true
|
||||
description = "Protocol definitions for RustFS, providing gRPC and FlatBuffers interfaces for communication between components."
|
||||
keywords = ["protocols", "gRPC", "FlatBuffers", "rustfs", "Minio"]
|
||||
categories = ["web-programming", "development-tools", "data-structures", "asynchronous"]
|
||||
documentation = "https://docs.rs/rustfs-protos/latest/rustfs_protos/"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
@@ -19,6 +19,11 @@ license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
version.workspace = true
|
||||
homepage.workspace = true
|
||||
description = "Rio is a RustFS component that provides a high-performance, asynchronous I/O framework for building scalable and efficient applications."
|
||||
keywords = ["asynchronous", "IO", "framework", "rustfs", "Minio"]
|
||||
categories = ["web-programming", "development-tools", "asynchronous"]
|
||||
documentation = "https://docs.rs/rustfs-rio/latest/rustfs_rio/"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
@@ -19,6 +19,11 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
homepage.workspace = true
|
||||
description = "S3 Select API implementation for RustFS, enabling efficient data retrieval from S3-compatible object stores."
|
||||
keywords = ["s3-select", "api", "rustfs", "Minio", "object-store"]
|
||||
categories = ["web-programming", "development-tools", "asynchronous"]
|
||||
documentation = "https://docs.rs/rustfs-s3select-api/latest/rustfs_s3select_api/"
|
||||
|
||||
[dependencies]
|
||||
async-trait.workspace = true
|
||||
|
||||
@@ -19,6 +19,11 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
homepage.workspace = true
|
||||
description = "S3 Select query engine for RustFS, enabling efficient data retrieval from S3-compatible storage using SQL-like queries."
|
||||
keywords = ["s3-select", "query-engine", "rustfs", "Minio", "data-retrieval"]
|
||||
categories = ["web-programming", "development-tools", "data-structures"]
|
||||
documentation = "https://docs.rs/rustfs-s3select-query/latest/rustfs_s3select_query/"
|
||||
|
||||
[dependencies]
|
||||
rustfs-s3select-api = { workspace = true }
|
||||
@@ -27,7 +32,6 @@ async-trait.workspace = true
|
||||
datafusion = { workspace = true }
|
||||
derive_builder = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
lazy_static = { workspace = true }
|
||||
parking_lot = { workspace = true }
|
||||
s3s.workspace = true
|
||||
snafu = { workspace = true, features = ["backtrace"] }
|
||||
|
||||
@@ -33,7 +33,6 @@ use datafusion::{
|
||||
execution::{RecordBatchStream, SendableRecordBatchStream},
|
||||
};
|
||||
use futures::{Stream, StreamExt};
|
||||
use lazy_static::lazy_static;
|
||||
use rustfs_s3select_api::{
|
||||
QueryError, QueryResult,
|
||||
query::{
|
||||
@@ -48,6 +47,7 @@ use rustfs_s3select_api::{
|
||||
},
|
||||
};
|
||||
use s3s::dto::{FileHeaderInfo, SelectObjectContentInput};
|
||||
use std::sync::LazyLock;
|
||||
|
||||
use crate::{
|
||||
execution::factory::QueryExecutionFactoryRef,
|
||||
@@ -55,11 +55,9 @@ use crate::{
|
||||
sql::logical::planner::DefaultLogicalPlanner,
|
||||
};
|
||||
|
||||
lazy_static! {
|
||||
static ref IGNORE: FileHeaderInfo = FileHeaderInfo::from_static(FileHeaderInfo::IGNORE);
|
||||
static ref NONE: FileHeaderInfo = FileHeaderInfo::from_static(FileHeaderInfo::NONE);
|
||||
static ref USE: FileHeaderInfo = FileHeaderInfo::from_static(FileHeaderInfo::USE);
|
||||
}
|
||||
static IGNORE: LazyLock<FileHeaderInfo> = LazyLock::new(|| FileHeaderInfo::from_static(FileHeaderInfo::IGNORE));
|
||||
static NONE: LazyLock<FileHeaderInfo> = LazyLock::new(|| FileHeaderInfo::from_static(FileHeaderInfo::NONE));
|
||||
static USE: LazyLock<FileHeaderInfo> = LazyLock::new(|| FileHeaderInfo::from_static(FileHeaderInfo::USE));
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct SimpleQueryDispatcher {
|
||||
|
||||
@@ -19,10 +19,14 @@ license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
version.workspace = true
|
||||
homepage.workspace = true
|
||||
description = "Digital signature generation and verification for RustFS, ensuring data integrity and authenticity."
|
||||
keywords = ["digital-signature", "verification", "integrity", "rustfs", "Minio"]
|
||||
categories = ["web-programming", "development-tools", "cryptography"]
|
||||
documentation = "https://docs.rs/rustfs-signer/latest/rustfs_signer/"
|
||||
|
||||
[dependencies]
|
||||
tracing.workspace = true
|
||||
lazy_static.workspace = true
|
||||
bytes = { workspace = true }
|
||||
http.workspace = true
|
||||
time.workspace = true
|
||||
|
||||
@@ -13,8 +13,6 @@
|
||||
// limitations under the License.
|
||||
|
||||
use http::{HeaderMap, HeaderValue, request};
|
||||
use lazy_static::lazy_static;
|
||||
use std::collections::HashMap;
|
||||
use time::{OffsetDateTime, macros::format_description};
|
||||
|
||||
use super::request_signature_v4::{SERVICE_TYPE_S3, get_scope, get_signature, get_signing_key};
|
||||
@@ -32,15 +30,13 @@ const _CRLF_LEN: i64 = 2;
|
||||
const _TRAILER_KV_SEPARATOR: &str = ":";
|
||||
const _TRAILER_SIGNATURE: &str = "x-amz-trailer-signature";
|
||||
|
||||
lazy_static! {
|
||||
static ref ignored_streaming_headers: HashMap<String, bool> = {
|
||||
let mut m = <HashMap<String, bool>>::new();
|
||||
m.insert("authorization".to_string(), true);
|
||||
m.insert("user-agent".to_string(), true);
|
||||
m.insert("content-type".to_string(), true);
|
||||
m
|
||||
};
|
||||
}
|
||||
// static ignored_streaming_headers: LazyLock<HashMap<String, bool>> = LazyLock::new(|| {
|
||||
// let mut m = <HashMap<String, bool>>::new();
|
||||
// m.insert("authorization".to_string(), true);
|
||||
// m.insert("user-agent".to_string(), true);
|
||||
// m.insert("content-type".to_string(), true);
|
||||
// m
|
||||
// });
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn build_chunk_string_to_sign(t: OffsetDateTime, region: &str, previous_sig: &str, chunk_check_sum: &str) -> String {
|
||||
|
||||
@@ -16,9 +16,9 @@ use bytes::BytesMut;
|
||||
use http::HeaderMap;
|
||||
use http::Uri;
|
||||
use http::request;
|
||||
use lazy_static::lazy_static;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::Write;
|
||||
use std::sync::LazyLock;
|
||||
use time::{OffsetDateTime, macros::format_description};
|
||||
use tracing::debug;
|
||||
|
||||
@@ -32,15 +32,14 @@ pub const SIGN_V4_ALGORITHM: &str = "AWS4-HMAC-SHA256";
|
||||
pub const SERVICE_TYPE_S3: &str = "s3";
|
||||
pub const SERVICE_TYPE_STS: &str = "sts";
|
||||
|
||||
lazy_static! {
|
||||
static ref v4_ignored_headers: HashMap<String, bool> = {
|
||||
let mut m = <HashMap<String, bool>>::new();
|
||||
m.insert("accept-encoding".to_string(), true);
|
||||
m.insert("authorization".to_string(), true);
|
||||
m.insert("user-agent".to_string(), true);
|
||||
m
|
||||
};
|
||||
}
|
||||
#[allow(non_upper_case_globals)] // FIXME
|
||||
static v4_ignored_headers: LazyLock<HashMap<String, bool>> = LazyLock::new(|| {
|
||||
let mut m = <HashMap<String, bool>>::new();
|
||||
m.insert("accept-encoding".to_string(), true);
|
||||
m.insert("authorization".to_string(), true);
|
||||
m.insert("user-agent".to_string(), true);
|
||||
m
|
||||
});
|
||||
|
||||
pub fn get_signing_key(secret: &str, loc: &str, t: OffsetDateTime, service_type: &str) -> [u8; 32] {
|
||||
let mut s = "AWS4".to_string();
|
||||
|
||||
@@ -19,6 +19,10 @@ license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
version.workspace = true
|
||||
homepage.workspace = true
|
||||
description = "Utility functions and data structures for RustFS, providing essential features like hashing, compression, and network utilities."
|
||||
keywords = ["utilities", "hashing", "compression", "network", "rustfs"]
|
||||
categories = ["web-programming", "development-tools", "cryptography"]
|
||||
|
||||
[dependencies]
|
||||
base64-simd = { workspace = true, optional = true }
|
||||
@@ -26,7 +30,6 @@ blake3 = { workspace = true, optional = true }
|
||||
crc32fast.workspace = true
|
||||
hex-simd = { workspace = true, optional = true }
|
||||
highway = { workspace = true, optional = true }
|
||||
lazy_static = { workspace = true, optional = true }
|
||||
local-ip-address = { workspace = true, optional = true }
|
||||
md-5 = { workspace = true, optional = true }
|
||||
netif = { workspace = true, optional = true }
|
||||
@@ -73,12 +76,12 @@ workspace = true
|
||||
default = ["ip"] # features that are enabled by default
|
||||
ip = ["dep:local-ip-address"] # ip characteristics and their dependencies
|
||||
tls = ["dep:rustls", "dep:rustls-pemfile", "dep:rustls-pki-types"] # tls characteristics and their dependencies
|
||||
net = ["ip", "dep:url", "dep:netif", "dep:lazy_static", "dep:futures", "dep:transform-stream", "dep:bytes", "dep:s3s", "dep:hyper", "dep:hyper-util"] # empty network features
|
||||
net = ["ip", "dep:url", "dep:netif", "dep:futures", "dep:transform-stream", "dep:bytes", "dep:s3s", "dep:hyper", "dep:hyper-util"] # empty network features
|
||||
io = ["dep:tokio"]
|
||||
path = []
|
||||
notify = ["dep:hyper", "dep:s3s"] # file system notification features
|
||||
compress = ["dep:flate2", "dep:brotli", "dep:snap", "dep:lz4", "dep:zstd"]
|
||||
string = ["dep:regex", "dep:lazy_static", "dep:rand"]
|
||||
string = ["dep:regex", "dep:rand"]
|
||||
crypto = ["dep:base64-simd", "dep:hex-simd", "dep:hmac", "dep:hyper", "dep:sha1"]
|
||||
hash = ["dep:highway", "dep:md-5", "dep:sha2", "dep:blake3", "dep:serde", "dep:siphasher", "dep:hex-simd", "dep:base64-simd"]
|
||||
os = ["dep:nix", "dep:tempfile", "winapi"] # operating system utilities
|
||||
|
||||
@@ -17,8 +17,8 @@ use futures::pin_mut;
|
||||
use futures::{Stream, StreamExt};
|
||||
use hyper::client::conn::http2::Builder;
|
||||
use hyper_util::rt::TokioExecutor;
|
||||
use lazy_static::lazy_static;
|
||||
use std::net::Ipv4Addr;
|
||||
use std::net::Ipv6Addr;
|
||||
use std::sync::LazyLock;
|
||||
use std::{
|
||||
collections::HashSet,
|
||||
fmt::Display,
|
||||
@@ -27,9 +27,7 @@ use std::{
|
||||
use transform_stream::AsyncTryStream;
|
||||
use url::{Host, Url};
|
||||
|
||||
lazy_static! {
|
||||
static ref LOCAL_IPS: Vec<IpAddr> = must_get_local_ips().unwrap();
|
||||
}
|
||||
static LOCAL_IPS: LazyLock<Vec<IpAddr>> = LazyLock::new(|| must_get_local_ips().unwrap());
|
||||
|
||||
/// helper for validating if the provided arg is an ip address.
|
||||
pub fn is_socket_addr(addr: &str) -> bool {
|
||||
@@ -202,7 +200,7 @@ pub fn parse_and_resolve_address(addr_str: &str) -> std::io::Result<SocketAddr>
|
||||
} else {
|
||||
port
|
||||
};
|
||||
SocketAddr::new(IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)), final_port)
|
||||
SocketAddr::new(IpAddr::V6(Ipv6Addr::UNSPECIFIED), final_port)
|
||||
} else {
|
||||
let mut addr = check_local_server_addr(addr_str)?; // assume check_local_server_addr is available here
|
||||
if addr.port() == 0 {
|
||||
@@ -478,12 +476,12 @@ mod test {
|
||||
fn test_parse_and_resolve_address() {
|
||||
// Test port-only format
|
||||
let result = parse_and_resolve_address(":8080").unwrap();
|
||||
assert_eq!(result.ip(), IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)));
|
||||
assert_eq!(result.ip(), IpAddr::V6(Ipv6Addr::UNSPECIFIED));
|
||||
assert_eq!(result.port(), 8080);
|
||||
|
||||
// Test port-only format with port 0 (should get available port)
|
||||
let result = parse_and_resolve_address(":0").unwrap();
|
||||
assert_eq!(result.ip(), IpAddr::V4(Ipv4Addr::new(0, 0, 0, 0)));
|
||||
assert_eq!(result.ip(), IpAddr::V6(Ipv6Addr::UNSPECIFIED));
|
||||
assert!(result.port() > 0);
|
||||
|
||||
// Test localhost with port
|
||||
|
||||
@@ -12,10 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use lazy_static::*;
|
||||
use rand::{Rng, RngCore};
|
||||
use regex::Regex;
|
||||
use std::io::{Error, Result};
|
||||
use std::sync::LazyLock;
|
||||
|
||||
pub fn parse_bool(str: &str) -> Result<bool> {
|
||||
match str {
|
||||
@@ -116,9 +116,7 @@ pub fn match_as_pattern_prefix(pattern: &str, text: &str) -> bool {
|
||||
text.len() <= pattern.len()
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref ELLIPSES_RE: Regex = Regex::new(r"(.*)(\{[0-9a-z]*\.\.\.[0-9a-z]*\})(.*)").unwrap();
|
||||
}
|
||||
static ELLIPSES_RE: LazyLock<Regex> = LazyLock::new(|| Regex::new(r"(.*)(\{[0-9a-z]*\.\.\.[0-9a-z]*\})(.*)").unwrap());
|
||||
|
||||
/// Ellipses constants
|
||||
const OPEN_BRACES: &str = "{";
|
||||
|
||||
@@ -19,10 +19,15 @@ license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
version.workspace = true
|
||||
homepage.workspace = true
|
||||
description = "Workers for RustFS, providing background processing capabilities for tasks such as data synchronization, indexing, and more."
|
||||
keywords = ["workers", "tasks", "rustfs", "Minio"]
|
||||
categories = ["web-programming", "development-tools"]
|
||||
documentation = "https://docs.rs/rustfs-workers/latest/rustfs_workers/"
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
tokio.workspace = true
|
||||
tokio = { workspace = true, features = ["sync"] }
|
||||
tracing.workspace = true
|
||||
|
||||
@@ -19,6 +19,11 @@ license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
version.workspace = true
|
||||
homepage.workspace = true
|
||||
description = "ZIP file handling for RustFS, providing support for reading and writing ZIP archives."
|
||||
keywords = ["zip", "compression", "rustfs", "Minio"]
|
||||
categories = ["web-programming", "development-tools", "compression"]
|
||||
documentation = "https://docs.rs/rustfs-zip/latest/rustfs_zip/"
|
||||
|
||||
|
||||
[dependencies]
|
||||
|
||||
57
pr_description.md
Normal file
@@ -0,0 +1,57 @@
|
||||
## Summary
|
||||
|
||||
This PR modifies the GitHub Actions workflows to ensure that **version releases never get skipped** during CI/CD execution, addressing the issue where duplicate action detection could skip important release processes.
|
||||
|
||||
## Changes Made
|
||||
|
||||
### 🔧 Core Modifications
|
||||
|
||||
1. **Modified skip-duplicate-actions configuration**:
|
||||
- Added `skip_after_successful_duplicate: ${{ !startsWith(github.ref, 'refs/tags/') }}` parameter
|
||||
- This ensures tag pushes (version releases) are never skipped due to duplicate detection
|
||||
|
||||
2. **Updated workflow job conditions**:
|
||||
- **CI Workflow** (`ci.yml`): Modified `test-and-lint` and `e2e-tests` jobs
|
||||
- **Build Workflow** (`build.yml`): Modified `build-check`, `build-rustfs`, `build-gui`, `release`, and `upload-oss` jobs
|
||||
- All jobs now use condition: `startsWith(github.ref, 'refs/tags/') || needs.skip-check.outputs.should_skip != 'true'`
|
||||
|
||||
### 🎯 Problem Solved
|
||||
|
||||
- **Before**: Version releases could be skipped if there were concurrent workflows or duplicate actions
|
||||
- **After**: Tag pushes always trigger complete CI/CD pipeline execution, ensuring:
|
||||
- ✅ Full test suite execution
|
||||
- ✅ Code quality checks (fmt, clippy)
|
||||
- ✅ Multi-platform builds (Linux, macOS, Windows)
|
||||
- ✅ GUI builds for releases
|
||||
- ✅ Release asset creation
|
||||
- ✅ OSS uploads
|
||||
|
||||
### 🚀 Benefits
|
||||
|
||||
1. **Release Quality Assurance**: Every version release undergoes complete validation
|
||||
2. **Consistency**: No more uncertainty about whether release builds were properly tested
|
||||
3. **Multi-platform Support**: Ensures all target platforms are built for every release
|
||||
4. **Backward Compatibility**: Non-release workflows still benefit from duplicate skip optimization
|
||||
|
||||
## Testing
|
||||
|
||||
- [x] Workflow syntax validated
|
||||
- [x] Logic conditions verified for both tag and non-tag scenarios
|
||||
- [x] Maintains existing optimization for development builds
|
||||
- [x] Follows project coding standards and commit conventions
|
||||
|
||||
## Related Issues
|
||||
|
||||
This resolves the concern about workflow skipping during version releases, ensuring complete CI/CD execution for all published versions.
|
||||
|
||||
## Checklist
|
||||
|
||||
- [x] Code follows project formatting standards
|
||||
- [x] Commit message follows Conventional Commits format
|
||||
- [x] Changes are backwards compatible
|
||||
- [x] No breaking changes introduced
|
||||
- [x] All workflow conditions properly tested
|
||||
|
||||
---
|
||||
|
||||
**Note**: This change only affects the execution logic for tag pushes (version releases). Regular development workflows continue to benefit from duplicate action skipping for efficiency.
|
||||
@@ -19,6 +19,11 @@ edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
homepage.workspace = true
|
||||
description = "RustFS is a high-performance, distributed file system designed for modern cloud-native applications, providing efficient data storage and retrieval with advanced features like S3 Select, IAM, and policy management."
|
||||
keywords.workspace = true
|
||||
categories.workspace = true
|
||||
documentation = "https://docs.rustfs.com/"
|
||||
|
||||
# See more keys and their definitions at https://doc.rust-lang.org/cargo/reference/manifest.html
|
||||
[[bin]]
|
||||
@@ -29,6 +34,7 @@ path = "src/main.rs"
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
rustfs-ahm = { workspace = true }
|
||||
rustfs-zip = { workspace = true }
|
||||
rustfs-madmin = { workspace = true }
|
||||
rustfs-s3select-api = { workspace = true }
|
||||
@@ -61,7 +67,6 @@ hyper.workspace = true
|
||||
hyper-util.workspace = true
|
||||
http.workspace = true
|
||||
http-body.workspace = true
|
||||
lazy_static.workspace = true
|
||||
matchit = { workspace = true }
|
||||
mime_guess = { workspace = true }
|
||||
opentelemetry = { workspace = true }
|
||||
|
||||
147
rustfs/README.md
@@ -1,36 +1,129 @@
|
||||
# RustFS
|
||||
[](https://rustfs.com)
|
||||
|
||||
RustFS is a simple file system written in Rust. It is designed to be a learning project for those who want to understand
|
||||
how file systems work and how to implement them in Rust.
|
||||
<p align="center">RustFS is a high-performance distributed object storage software built using Rust</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/rustfs/rustfs/actions/workflows/ci.yml"><img alt="CI" src="https://github.com/rustfs/rustfs/actions/workflows/ci.yml/badge.svg" /></a>
|
||||
<a href="https://github.com/rustfs/rustfs/actions/workflows/docker.yml"><img alt="Build and Push Docker Images" src="https://github.com/rustfs/rustfs/actions/workflows/docker.yml/badge.svg" /></a>
|
||||
<img alt="GitHub commit activity" src="https://img.shields.io/github/commit-activity/m/rustfs/rustfs"/>
|
||||
<img alt="Github Last Commit" src="https://img.shields.io/github/last-commit/rustfs/rustfs"/>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://docs.rustfs.com/en/introduction.html">Getting Started</a>
|
||||
· <a href="https://docs.rustfs.com/en/">Docs</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/issues">Bug reports</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/discussions">Discussions</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
English | <a href="https://github.com/rustfs/rustfs/blob/main/README_ZH.md">简体中文</a>
|
||||
</p>
|
||||
|
||||
RustFS is a high-performance distributed object storage software built using Rust, one of the most popular languages
|
||||
worldwide. Along with MinIO, it shares a range of advantages such as simplicity, S3 compatibility, open-source nature,
|
||||
support for data lakes, AI, and big data. Furthermore, it has a better and more user-friendly open-source license in
|
||||
comparison to other storage systems, being constructed under the Apache license. As Rust serves as its foundation,
|
||||
RustFS provides faster speed and safer distributed features for high-performance object storage.
|
||||
|
||||
## Features
|
||||
|
||||
- Simple file system structure
|
||||
- Basic file operations (create, read, write, delete)
|
||||
- Directory support
|
||||
- File metadata (size, creation time, etc.)
|
||||
- Basic error handling
|
||||
- Unit tests for core functionality
|
||||
- Documentation for public API
|
||||
- Example usage
|
||||
- License information
|
||||
- Contributing guidelines
|
||||
- Changelog
|
||||
- Code of conduct
|
||||
- Acknowledgements
|
||||
- Contact information
|
||||
- Links to additional resources
|
||||
- **High Performance**: Built with Rust, ensuring speed and efficiency.
|
||||
- **Distributed Architecture**: Scalable and fault-tolerant design for large-scale deployments.
|
||||
- **S3 Compatibility**: Seamless integration with existing S3-compatible applications.
|
||||
- **Data Lake Support**: Optimized for big data and AI workloads.
|
||||
- **Open Source**: Licensed under Apache 2.0, encouraging community contributions and transparency.
|
||||
- **User-Friendly**: Designed with simplicity in mind, making it easy to deploy and manage.
|
||||
|
||||
## Getting Started
|
||||
## RustFS vs MinIO
|
||||
|
||||
To get started with RustFS, clone the repository and build the project:
|
||||
Stress test server parameters
|
||||
|
||||
```bash
|
||||
git clone git@github.com:rustfs/s3-rustfs.git
|
||||
cd rustfs
|
||||
cargo build
|
||||
```
|
||||
| Type | parameter | Remark |
|
||||
|---------|-----------|----------------------------------------------------------|
|
||||
| CPU | 2 Core | Intel Xeon(Sapphire Rapids) Platinum 8475B , 2.7/3.2 GHz | |
|
||||
| Memory | 4GB | |
|
||||
| Network | 15Gbp | |
|
||||
| Driver | 40GB x 4 | IOPS 3800 / Driver |
|
||||
|
||||
## Usage
|
||||
<https://github.com/user-attachments/assets/2e4979b5-260c-4f2c-ac12-c87fd558072a>
|
||||
|
||||
To use RustFS, you can create a new file system instance and perform basic file operations. Here is an example:
|
||||
### RustFS vs Other object storage
|
||||
|
||||
| RustFS | Other object storage |
|
||||
|---------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------|
|
||||
| Powerful Console | Simple and useless Console |
|
||||
| Developed based on Rust language, memory is safer | Developed in Go or C, with potential issues like memory GC/leaks |
|
||||
| Does not report logs to third-party countries | Reporting logs to other third countries may violate national security laws |
|
||||
| Licensed under Apache, more business-friendly | AGPL V3 License and other License, polluted open source and License traps, infringement of intellectual property rights |
|
||||
| Comprehensive S3 support, works with domestic and international cloud providers | Full support for S3, but no local cloud vendor support |
|
||||
| Rust-based development, strong support for secure and innovative devices | Poor support for edge gateways and secure innovative devices |
|
||||
| Stable commercial prices, free community support | High pricing, with costs up to $250,000 for 1PiB |
|
||||
| No risk | Intellectual property risks and risks of prohibited uses |
|
||||
|
||||
## Quickstart
|
||||
|
||||
To get started with RustFS, follow these steps:
|
||||
|
||||
1. **One-click installation script (Option 1)**
|
||||
|
||||
```bash
|
||||
curl -O https://rustfs.com/install_rustfs.sh && bash install_rustfs.sh
|
||||
```
|
||||
|
||||
2. **Docker Quick Start (Option 2)**
|
||||
|
||||
```bash
|
||||
podman run -d -p 9000:9000 -p 9001:9001 -v /data:/data quay.io/rustfs/rustfs
|
||||
```
|
||||
|
||||
3. **Access the Console**: Open your web browser and navigate to `http://localhost:9001` to access the RustFS console,
|
||||
default username and password is `rustfsadmin` .
|
||||
4. **Create a Bucket**: Use the console to create a new bucket for your objects.
|
||||
5. **Upload Objects**: You can upload files directly through the console or use S3-compatible APIs to interact with your
|
||||
RustFS instance.
|
||||
|
||||
## Documentation
|
||||
|
||||
For detailed documentation, including configuration options, API references, and advanced usage, please visit
|
||||
our [Documentation](https://docs.rustfs.com).
|
||||
|
||||
## Getting Help
|
||||
|
||||
If you have any questions or need assistance, you can:
|
||||
|
||||
- Check the [FAQ](https://github.com/rustfs/rustfs/discussions/categories/q-a) for common issues and solutions.
|
||||
- Join our [GitHub Discussions](https://github.com/rustfs/rustfs/discussions) to ask questions and share your
|
||||
experiences.
|
||||
- Open an issue on our [GitHub Issues](https://github.com/rustfs/rustfs/issues) page for bug reports or feature
|
||||
requests.
|
||||
|
||||
## Links
|
||||
|
||||
- [Documentation](https://docs.rustfs.com) - The manual you should read
|
||||
- [Changelog](https://github.com/rustfs/rustfs/releases) - What we broke and fixed
|
||||
- [GitHub Discussions](https://github.com/rustfs/rustfs/discussions) - Where the community lives
|
||||
|
||||
## Contact
|
||||
|
||||
- **Bugs**: [GitHub Issues](https://github.com/rustfs/rustfs/issues)
|
||||
- **Business**: <hello@rustfs.com>
|
||||
- **Jobs**: <jobs@rustfs.com>
|
||||
- **General Discussion**: [GitHub Discussions](https://github.com/rustfs/rustfs/discussions)
|
||||
- **Contributing**: [CONTRIBUTING.md](CONTRIBUTING.md)
|
||||
|
||||
## Contributors
|
||||
|
||||
RustFS is a community-driven project, and we appreciate all contributions. Check out
|
||||
the [Contributors](https://github.com/rustfs/rustfs/graphs/contributors) page to see the amazing people who have helped
|
||||
make RustFS better.
|
||||
|
||||
<a href="https://github.com/rustfs/rustfs/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=rustfs/rustfs" />
|
||||
</a>
|
||||
|
||||
## License
|
||||
|
||||
[Apache 2.0](https://opensource.org/licenses/Apache-2.0)
|
||||
|
||||
**RustFS** is a trademark of RustFS, Inc. All other trademarks are the property of their respective owners.
|
||||
|
||||
401
rustfs/src/admin/console.rs
Normal file
@@ -0,0 +1,401 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// use crate::license::get_license;
|
||||
use axum::{
|
||||
// Router,
|
||||
body::Body,
|
||||
http::{Response, StatusCode},
|
||||
response::IntoResponse,
|
||||
// routing::get,
|
||||
};
|
||||
// use axum_extra::extract::Host;
|
||||
// use rustfs_config::{RUSTFS_TLS_CERT, RUSTFS_TLS_KEY};
|
||||
// use rustfs_utils::net::parse_and_resolve_address;
|
||||
// use std::io;
|
||||
|
||||
use http::Uri;
|
||||
// use axum::response::Redirect;
|
||||
// use axum_server::tls_rustls::RustlsConfig;
|
||||
// use http::{HeaderMap, HeaderName, Uri, header};
|
||||
use mime_guess::from_path;
|
||||
use rust_embed::RustEmbed;
|
||||
// use serde::Serialize;
|
||||
// use shadow_rs::shadow;
|
||||
// use std::net::{IpAddr, SocketAddr};
|
||||
// use std::sync::OnceLock;
|
||||
// use std::time::Duration;
|
||||
// use tokio::signal;
|
||||
// use tower_http::cors::{Any, CorsLayer};
|
||||
// use tower_http::trace::TraceLayer;
|
||||
// use tracing::{debug, error, info, instrument};
|
||||
|
||||
// shadow!(build);
|
||||
|
||||
// const RUSTFS_ADMIN_PREFIX: &str = "/rustfs/admin/v3";
|
||||
|
||||
#[derive(RustEmbed)]
|
||||
#[folder = "$CARGO_MANIFEST_DIR/static"]
|
||||
struct StaticFiles;
|
||||
|
||||
/// Static file handler
|
||||
pub(crate) async fn static_handler(uri: Uri) -> impl IntoResponse {
|
||||
let mut path = uri.path().trim_start_matches('/');
|
||||
if path.is_empty() {
|
||||
path = "index.html"
|
||||
}
|
||||
if let Some(file) = StaticFiles::get(path) {
|
||||
let mime_type = from_path(path).first_or_octet_stream();
|
||||
Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header("Content-Type", mime_type.to_string())
|
||||
.body(Body::from(file.data))
|
||||
.unwrap()
|
||||
} else if let Some(file) = StaticFiles::get("index.html") {
|
||||
let mime_type = from_path("index.html").first_or_octet_stream();
|
||||
Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header("Content-Type", mime_type.to_string())
|
||||
.body(Body::from(file.data))
|
||||
.unwrap()
|
||||
} else {
|
||||
Response::builder()
|
||||
.status(StatusCode::NOT_FOUND)
|
||||
.body(Body::from("404 Not Found"))
|
||||
.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
// #[derive(Debug, Serialize, Clone)]
|
||||
// pub(crate) struct Config {
|
||||
// #[serde(skip)]
|
||||
// port: u16,
|
||||
// api: Api,
|
||||
// s3: S3,
|
||||
// release: Release,
|
||||
// license: License,
|
||||
// doc: String,
|
||||
// }
|
||||
|
||||
// impl Config {
|
||||
// fn new(local_ip: IpAddr, port: u16, version: &str, date: &str) -> Self {
|
||||
// Config {
|
||||
// port,
|
||||
// api: Api {
|
||||
// base_url: format!("http://{local_ip}:{port}/{RUSTFS_ADMIN_PREFIX}"),
|
||||
// },
|
||||
// s3: S3 {
|
||||
// endpoint: format!("http://{local_ip}:{port}"),
|
||||
// region: "cn-east-1".to_owned(),
|
||||
// },
|
||||
// release: Release {
|
||||
// version: version.to_string(),
|
||||
// date: date.to_string(),
|
||||
// },
|
||||
// license: License {
|
||||
// name: "Apache-2.0".to_string(),
|
||||
// url: "https://www.apache.org/licenses/LICENSE-2.0".to_string(),
|
||||
// },
|
||||
// doc: "https://rustfs.com/docs/".to_string(),
|
||||
// }
|
||||
// }
|
||||
|
||||
// fn to_json(&self) -> String {
|
||||
// serde_json::to_string(self).unwrap_or_default()
|
||||
// }
|
||||
|
||||
// pub(crate) fn version_info(&self) -> String {
|
||||
// format!(
|
||||
// "RELEASE.{}@{} (rust {} {})",
|
||||
// self.release.date.clone(),
|
||||
// self.release.version.clone().trim_start_matches('@'),
|
||||
// build::RUST_VERSION,
|
||||
// build::BUILD_TARGET
|
||||
// )
|
||||
// }
|
||||
|
||||
// pub(crate) fn version(&self) -> String {
|
||||
// self.release.version.clone()
|
||||
// }
|
||||
|
||||
// pub(crate) fn license(&self) -> String {
|
||||
// format!("{} {}", self.license.name.clone(), self.license.url.clone())
|
||||
// }
|
||||
|
||||
// pub(crate) fn doc(&self) -> String {
|
||||
// self.doc.clone()
|
||||
// }
|
||||
// }
|
||||
|
||||
// #[derive(Debug, Serialize, Clone)]
|
||||
// struct Api {
|
||||
// #[serde(rename = "baseURL")]
|
||||
// base_url: String,
|
||||
// }
|
||||
|
||||
// #[derive(Debug, Serialize, Clone)]
|
||||
// struct S3 {
|
||||
// endpoint: String,
|
||||
// region: String,
|
||||
// }
|
||||
|
||||
// #[derive(Debug, Serialize, Clone)]
|
||||
// struct Release {
|
||||
// version: String,
|
||||
// date: String,
|
||||
// }
|
||||
|
||||
// #[derive(Debug, Serialize, Clone)]
|
||||
// struct License {
|
||||
// name: String,
|
||||
// url: String,
|
||||
// }
|
||||
|
||||
// pub(crate) static CONSOLE_CONFIG: OnceLock<Config> = OnceLock::new();
|
||||
|
||||
// #[allow(clippy::const_is_empty)]
|
||||
// pub(crate) fn init_console_cfg(local_ip: IpAddr, port: u16) {
|
||||
// CONSOLE_CONFIG.get_or_init(|| {
|
||||
// let ver = {
|
||||
// if !build::TAG.is_empty() {
|
||||
// build::TAG.to_string()
|
||||
// } else if !build::SHORT_COMMIT.is_empty() {
|
||||
// format!("@{}", build::SHORT_COMMIT)
|
||||
// } else {
|
||||
// build::PKG_VERSION.to_string()
|
||||
// }
|
||||
// };
|
||||
|
||||
// Config::new(local_ip, port, ver.as_str(), build::COMMIT_DATE_3339)
|
||||
// });
|
||||
// }
|
||||
|
||||
// // fn is_socket_addr_or_ip_addr(host: &str) -> bool {
|
||||
// // host.parse::<SocketAddr>().is_ok() || host.parse::<IpAddr>().is_ok()
|
||||
// // }
|
||||
|
||||
// #[allow(dead_code)]
|
||||
// async fn license_handler() -> impl IntoResponse {
|
||||
// let license = get_license().unwrap_or_default();
|
||||
|
||||
// Response::builder()
|
||||
// .header("content-type", "application/json")
|
||||
// .status(StatusCode::OK)
|
||||
// .body(Body::from(serde_json::to_string(&license).unwrap_or_default()))
|
||||
// .unwrap()
|
||||
// }
|
||||
|
||||
// fn _is_private_ip(ip: IpAddr) -> bool {
|
||||
// match ip {
|
||||
// IpAddr::V4(ip) => {
|
||||
// let octets = ip.octets();
|
||||
// // 10.0.0.0/8
|
||||
// octets[0] == 10 ||
|
||||
// // 172.16.0.0/12
|
||||
// (octets[0] == 172 && (octets[1] >= 16 && octets[1] <= 31)) ||
|
||||
// // 192.168.0.0/16
|
||||
// (octets[0] == 192 && octets[1] == 168)
|
||||
// }
|
||||
// IpAddr::V6(_) => false,
|
||||
// }
|
||||
// }
|
||||
|
||||
// #[allow(clippy::const_is_empty)]
|
||||
// #[allow(dead_code)]
|
||||
// #[instrument(fields(host))]
|
||||
// async fn config_handler(uri: Uri, Host(host): Host, headers: HeaderMap) -> impl IntoResponse {
|
||||
// // Get the scheme from the headers or use the URI scheme
|
||||
// let scheme = headers
|
||||
// .get(HeaderName::from_static("x-forwarded-proto"))
|
||||
// .and_then(|value| value.to_str().ok())
|
||||
// .unwrap_or_else(|| uri.scheme().map(|s| s.as_str()).unwrap_or("http"));
|
||||
|
||||
// // Print logs for debugging
|
||||
// info!("Scheme: {}, ", scheme);
|
||||
|
||||
// // Get the host from the uri and use the value of the host extractor if it doesn't have one
|
||||
// let host = uri.host().unwrap_or(host.as_str());
|
||||
|
||||
// let host = if let Ok(socket_addr) = host.parse::<SocketAddr>() {
|
||||
// // Successfully parsed, it's in IP:Port format.
|
||||
// // For IPv6, we need to enclose it in brackets to form a valid URL.
|
||||
// let ip = socket_addr.ip();
|
||||
// if ip.is_ipv6() { format!("[{ip}]") } else { format!("{ip}") }
|
||||
// } else {
|
||||
// // Failed to parse, it might be a domain name or a bare IP, use it as is.
|
||||
// host.to_string()
|
||||
// };
|
||||
|
||||
// // Make a copy of the current configuration
|
||||
// let mut cfg = match CONSOLE_CONFIG.get() {
|
||||
// Some(cfg) => cfg.clone(),
|
||||
// None => {
|
||||
// error!("Console configuration not initialized");
|
||||
// return Response::builder()
|
||||
// .status(StatusCode::INTERNAL_SERVER_ERROR)
|
||||
// .body(Body::from("Console configuration not initialized"))
|
||||
// .unwrap();
|
||||
// }
|
||||
// };
|
||||
|
||||
// let url = format!("{}://{}:{}", scheme, host, cfg.port);
|
||||
// cfg.api.base_url = format!("{url}{RUSTFS_ADMIN_PREFIX}");
|
||||
// cfg.s3.endpoint = url;
|
||||
|
||||
// Response::builder()
|
||||
// .header("content-type", "application/json")
|
||||
// .status(StatusCode::OK)
|
||||
// .body(Body::from(cfg.to_json()))
|
||||
// .unwrap()
|
||||
// }
|
||||
|
||||
// pub fn register_router() -> Router {
|
||||
// Router::new()
|
||||
// // .route("/license", get(license_handler))
|
||||
// // .route("/config.json", get(config_handler))
|
||||
// .fallback_service(get(static_handler))
|
||||
// }
|
||||
|
||||
// #[allow(dead_code)]
|
||||
// pub async fn start_static_file_server(
|
||||
// addrs: &str,
|
||||
// local_ip: IpAddr,
|
||||
// access_key: &str,
|
||||
// secret_key: &str,
|
||||
// tls_path: Option<String>,
|
||||
// ) {
|
||||
// // Configure CORS
|
||||
// let cors = CorsLayer::new()
|
||||
// .allow_origin(Any) // In the production environment, we recommend that you specify a specific domain name
|
||||
// .allow_methods([http::Method::GET, http::Method::POST])
|
||||
// .allow_headers([header::CONTENT_TYPE]);
|
||||
|
||||
// // Create a route
|
||||
// let app = register_router()
|
||||
// .layer(cors)
|
||||
// .layer(tower_http::compression::CompressionLayer::new().gzip(true).deflate(true))
|
||||
// .layer(TraceLayer::new_for_http());
|
||||
|
||||
// let server_addr = parse_and_resolve_address(addrs).expect("Failed to parse socket address");
|
||||
// let server_port = server_addr.port();
|
||||
// let server_address = server_addr.to_string();
|
||||
|
||||
// info!(
|
||||
// "WebUI: http://{}:{} http://127.0.0.1:{} http://{}",
|
||||
// local_ip, server_port, server_port, server_address
|
||||
// );
|
||||
// info!(" RootUser: {}", access_key);
|
||||
// info!(" RootPass: {}", secret_key);
|
||||
|
||||
// // Check and start the HTTPS/HTTP server
|
||||
// match start_server(server_addr, tls_path, app.clone()).await {
|
||||
// Ok(_) => info!("Server shutdown gracefully"),
|
||||
// Err(e) => error!("Server error: {}", e),
|
||||
// }
|
||||
// }
|
||||
|
||||
// async fn start_server(server_addr: SocketAddr, tls_path: Option<String>, app: Router) -> io::Result<()> {
|
||||
// let tls_path = tls_path.unwrap_or_default();
|
||||
// let key_path = format!("{tls_path}/{RUSTFS_TLS_KEY}");
|
||||
// let cert_path = format!("{tls_path}/{RUSTFS_TLS_CERT}");
|
||||
// let handle = axum_server::Handle::new();
|
||||
// // create a signal off listening task
|
||||
// let handle_clone = handle.clone();
|
||||
// tokio::spawn(async move {
|
||||
// shutdown_signal().await;
|
||||
// info!("Initiating graceful shutdown...");
|
||||
// handle_clone.graceful_shutdown(Some(Duration::from_secs(10)));
|
||||
// });
|
||||
|
||||
// let has_tls_certs = tokio::try_join!(tokio::fs::metadata(&key_path), tokio::fs::metadata(&cert_path)).is_ok();
|
||||
// info!("Console TLS certs: {:?}", has_tls_certs);
|
||||
// if has_tls_certs {
|
||||
// info!("Found TLS certificates, starting with HTTPS");
|
||||
// match RustlsConfig::from_pem_file(cert_path, key_path).await {
|
||||
// Ok(config) => {
|
||||
// info!("Starting HTTPS server...");
|
||||
// axum_server::bind_rustls(server_addr, config)
|
||||
// .handle(handle.clone())
|
||||
// .serve(app.into_make_service())
|
||||
// .await
|
||||
// .map_err(io::Error::other)?;
|
||||
|
||||
// info!("HTTPS server running on https://{}", server_addr);
|
||||
|
||||
// Ok(())
|
||||
// }
|
||||
// Err(e) => {
|
||||
// error!("Failed to create TLS config: {}", e);
|
||||
// start_http_server(server_addr, app, handle).await
|
||||
// }
|
||||
// }
|
||||
// } else {
|
||||
// info!("TLS certificates not found at {} and {}", key_path, cert_path);
|
||||
// start_http_server(server_addr, app, handle).await
|
||||
// }
|
||||
// }
|
||||
|
||||
// #[allow(dead_code)]
|
||||
// /// 308 redirect for HTTP to HTTPS
|
||||
// fn redirect_to_https(https_port: u16) -> Router {
|
||||
// Router::new().route(
|
||||
// "/*path",
|
||||
// get({
|
||||
// move |uri: Uri, req: http::Request<Body>| async move {
|
||||
// let host = req
|
||||
// .headers()
|
||||
// .get("host")
|
||||
// .map_or("localhost", |h| h.to_str().unwrap_or("localhost"));
|
||||
// let path = uri.path_and_query().map(|pq| pq.as_str()).unwrap_or("");
|
||||
// let https_url = format!("https://{host}:{https_port}{path}");
|
||||
// Redirect::permanent(&https_url)
|
||||
// }
|
||||
// }),
|
||||
// )
|
||||
// }
|
||||
|
||||
// async fn start_http_server(addr: SocketAddr, app: Router, handle: axum_server::Handle) -> io::Result<()> {
|
||||
// debug!("Starting HTTP server...");
|
||||
// axum_server::bind(addr)
|
||||
// .handle(handle)
|
||||
// .serve(app.into_make_service())
|
||||
// .await
|
||||
// .map_err(io::Error::other)
|
||||
// }
|
||||
|
||||
// async fn shutdown_signal() {
|
||||
// let ctrl_c = async {
|
||||
// signal::ctrl_c().await.expect("failed to install Ctrl+C handler");
|
||||
// };
|
||||
|
||||
// #[cfg(unix)]
|
||||
// let terminate = async {
|
||||
// signal::unix::signal(signal::unix::SignalKind::terminate())
|
||||
// .expect("failed to install signal handler")
|
||||
// .recv()
|
||||
// .await;
|
||||
// };
|
||||
|
||||
// #[cfg(not(unix))]
|
||||
// let terminate = std::future::pending::<()>();
|
||||
|
||||
// tokio::select! {
|
||||
// _ = ctrl_c => {
|
||||
// info!("shutdown_signal ctrl_c")
|
||||
// },
|
||||
// _ = terminate => {
|
||||
// info!("shutdown_signal terminate")
|
||||
// },
|
||||
// }
|
||||
// }
|
||||
@@ -31,6 +31,7 @@ use rustfs_ecstore::cmd::bucket_targets::{self, GLOBAL_Bucket_Target_Sys};
|
||||
use rustfs_ecstore::error::StorageError;
|
||||
use rustfs_ecstore::global::GLOBAL_ALlHealState;
|
||||
use rustfs_ecstore::global::get_global_action_cred;
|
||||
// use rustfs_ecstore::heal::data_usage::load_data_usage_from_backend;
|
||||
use rustfs_ecstore::heal::data_usage::load_data_usage_from_backend;
|
||||
use rustfs_ecstore::heal::heal_commands::HealOpts;
|
||||
use rustfs_ecstore::heal::heal_ops::new_heal_sequence;
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod console;
|
||||
pub mod handlers;
|
||||
pub mod router;
|
||||
mod rpc;
|
||||
@@ -32,8 +33,8 @@ use s3s::route::S3Route;
|
||||
|
||||
const ADMIN_PREFIX: &str = "/rustfs/admin";
|
||||
|
||||
pub fn make_admin_route() -> std::io::Result<impl S3Route> {
|
||||
let mut r: S3Router<AdminOperation> = S3Router::new();
|
||||
pub fn make_admin_route(console_enabled: bool) -> std::io::Result<impl S3Route> {
|
||||
let mut r: S3Router<AdminOperation> = S3Router::new(console_enabled);
|
||||
|
||||
// 1
|
||||
r.insert(Method::POST, "/", AdminOperation(&sts::AssumeRoleHandle {}))?;
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use axum::routing::get;
|
||||
use hyper::HeaderMap;
|
||||
use hyper::Method;
|
||||
use hyper::StatusCode;
|
||||
@@ -27,20 +28,41 @@ use s3s::S3Result;
|
||||
use s3s::header;
|
||||
use s3s::route::S3Route;
|
||||
use s3s::s3_error;
|
||||
use tower::Service;
|
||||
use tracing::error;
|
||||
|
||||
use super::ADMIN_PREFIX;
|
||||
use super::rpc::RPC_PREFIX;
|
||||
use crate::admin::ADMIN_PREFIX;
|
||||
use crate::admin::console;
|
||||
use crate::admin::rpc::RPC_PREFIX;
|
||||
|
||||
const CONSOLE_PREFIX: &str = "/rustfs/console";
|
||||
|
||||
pub struct S3Router<T> {
|
||||
router: Router<T>,
|
||||
console_enabled: bool,
|
||||
console_router: Option<axum::routing::RouterIntoService<Body>>,
|
||||
}
|
||||
|
||||
impl<T: Operation> S3Router<T> {
|
||||
pub fn new() -> Self {
|
||||
pub fn new(console_enabled: bool) -> Self {
|
||||
let router = Router::new();
|
||||
|
||||
Self { router }
|
||||
let console_router = if console_enabled {
|
||||
Some(
|
||||
axum::Router::new()
|
||||
.nest(CONSOLE_PREFIX, axum::Router::new().fallback_service(get(console::static_handler)))
|
||||
.fallback_service(get(console::static_handler))
|
||||
.into_service::<Body>(),
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Self {
|
||||
router,
|
||||
console_enabled,
|
||||
console_router,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn insert(&mut self, method: Method, path: &str, operation: T) -> std::io::Result<()> {
|
||||
@@ -60,7 +82,7 @@ impl<T: Operation> S3Router<T> {
|
||||
|
||||
impl<T: Operation> Default for S3Router<T> {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
Self::new(false)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -79,10 +101,23 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
uri.path().starts_with(ADMIN_PREFIX) || uri.path().starts_with(RPC_PREFIX)
|
||||
uri.path().starts_with(ADMIN_PREFIX) || uri.path().starts_with(RPC_PREFIX) || uri.path().starts_with(CONSOLE_PREFIX)
|
||||
}
|
||||
|
||||
async fn call(&self, req: S3Request<Body>) -> S3Result<S3Response<Body>> {
|
||||
if self.console_enabled && req.uri.path().starts_with(CONSOLE_PREFIX) {
|
||||
if let Some(console_router) = &self.console_router {
|
||||
let mut console_router = console_router.clone();
|
||||
let req = convert_request(req);
|
||||
let result = console_router.call(req).await;
|
||||
return match result {
|
||||
Ok(resp) => Ok(convert_response(resp)),
|
||||
Err(e) => Err(s3_error!(InternalError, "{}", e)),
|
||||
};
|
||||
}
|
||||
return Err(s3_error!(InternalError, "console is not enabled"));
|
||||
}
|
||||
|
||||
let uri = format!("{}|{}", &req.method, req.uri.path());
|
||||
|
||||
// warn!("get uri {}", &uri);
|
||||
@@ -99,6 +134,10 @@ where
|
||||
|
||||
// check_access before call
|
||||
async fn check_access(&self, req: &mut S3Request<Body>) -> S3Result<()> {
|
||||
if self.console_enabled && req.uri.path().starts_with(CONSOLE_PREFIX) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Check RPC signature verification
|
||||
if req.uri.path().starts_with(RPC_PREFIX) {
|
||||
// Skip signature verification for HEAD requests (health checks)
|
||||
@@ -134,3 +173,34 @@ impl Operation for AdminOperation {
|
||||
self.0.call(req, params).await
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Extra {
|
||||
pub credentials: Option<s3s::auth::Credentials>,
|
||||
pub region: Option<String>,
|
||||
pub service: Option<String>,
|
||||
}
|
||||
|
||||
fn convert_request(req: S3Request<Body>) -> http::Request<Body> {
|
||||
let (mut parts, _) = http::Request::new(Body::empty()).into_parts();
|
||||
parts.method = req.method;
|
||||
parts.uri = req.uri;
|
||||
parts.headers = req.headers;
|
||||
parts.extensions = req.extensions;
|
||||
parts.extensions.insert(Extra {
|
||||
credentials: req.credentials,
|
||||
region: req.region,
|
||||
service: req.service,
|
||||
});
|
||||
http::Request::from_parts(parts, req.input)
|
||||
}
|
||||
|
||||
fn convert_response(resp: http::Response<axum::body::Body>) -> S3Response<Body> {
|
||||
let (parts, body) = resp.into_parts();
|
||||
let mut s3_resp = S3Response::new(Body::http_body_unsync(body));
|
||||
s3_resp.status = Some(parts.status);
|
||||
s3_resp.headers = parts.headers;
|
||||
s3_resp.extensions = parts.extensions;
|
||||
s3_resp
|
||||
}
|
||||
|
||||
@@ -86,6 +86,9 @@ pub struct Opt {
|
||||
|
||||
#[arg(long, env = "RUSTFS_LICENSE")]
|
||||
pub license: Option<String>,
|
||||
|
||||
#[arg(long, env = "RUSTFS_REGION")]
|
||||
pub region: Option<String>,
|
||||
}
|
||||
|
||||
// lazy_static::lazy_static! {
|
||||
|
||||
@@ -1,372 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::license::get_license;
|
||||
use axum::{
|
||||
Router,
|
||||
body::Body,
|
||||
http::{Response, StatusCode},
|
||||
response::IntoResponse,
|
||||
routing::get,
|
||||
};
|
||||
use axum_extra::extract::Host;
|
||||
use rustfs_config::{RUSTFS_TLS_CERT, RUSTFS_TLS_KEY};
|
||||
use rustfs_utils::net::parse_and_resolve_address;
|
||||
use std::io;
|
||||
|
||||
use axum::response::Redirect;
|
||||
use axum_server::tls_rustls::RustlsConfig;
|
||||
use http::{Uri, header};
|
||||
use mime_guess::from_path;
|
||||
use rust_embed::RustEmbed;
|
||||
use serde::Serialize;
|
||||
use shadow_rs::shadow;
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::sync::OnceLock;
|
||||
use std::time::Duration;
|
||||
use tokio::signal;
|
||||
use tower_http::cors::{Any, CorsLayer};
|
||||
use tower_http::trace::TraceLayer;
|
||||
use tracing::{debug, error, info, instrument};
|
||||
|
||||
shadow!(build);
|
||||
|
||||
const RUSTFS_ADMIN_PREFIX: &str = "/rustfs/admin/v3";
|
||||
|
||||
#[derive(RustEmbed)]
|
||||
#[folder = "$CARGO_MANIFEST_DIR/static"]
|
||||
struct StaticFiles;
|
||||
|
||||
/// Static file handler
|
||||
async fn static_handler(uri: Uri) -> impl IntoResponse {
|
||||
let mut path = uri.path().trim_start_matches('/');
|
||||
if path.is_empty() {
|
||||
path = "index.html"
|
||||
}
|
||||
if let Some(file) = StaticFiles::get(path) {
|
||||
let mime_type = from_path(path).first_or_octet_stream();
|
||||
Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header("Content-Type", mime_type.to_string())
|
||||
.body(Body::from(file.data))
|
||||
.unwrap()
|
||||
} else if let Some(file) = StaticFiles::get("index.html") {
|
||||
let mime_type = from_path("index.html").first_or_octet_stream();
|
||||
Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header("Content-Type", mime_type.to_string())
|
||||
.body(Body::from(file.data))
|
||||
.unwrap()
|
||||
} else {
|
||||
Response::builder()
|
||||
.status(StatusCode::NOT_FOUND)
|
||||
.body(Body::from("404 Not Found"))
|
||||
.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Clone)]
|
||||
pub(crate) struct Config {
|
||||
#[serde(skip)]
|
||||
port: u16,
|
||||
api: Api,
|
||||
s3: S3,
|
||||
release: Release,
|
||||
license: License,
|
||||
doc: String,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
fn new(local_ip: IpAddr, port: u16, version: &str, date: &str) -> Self {
|
||||
Config {
|
||||
port,
|
||||
api: Api {
|
||||
base_url: format!("http://{local_ip}:{port}/{RUSTFS_ADMIN_PREFIX}"),
|
||||
},
|
||||
s3: S3 {
|
||||
endpoint: format!("http://{local_ip}:{port}"),
|
||||
region: "cn-east-1".to_owned(),
|
||||
},
|
||||
release: Release {
|
||||
version: version.to_string(),
|
||||
date: date.to_string(),
|
||||
},
|
||||
license: License {
|
||||
name: "Apache-2.0".to_string(),
|
||||
url: "https://www.apache.org/licenses/LICENSE-2.0".to_string(),
|
||||
},
|
||||
doc: "https://rustfs.com/docs/".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn to_json(&self) -> String {
|
||||
serde_json::to_string(self).unwrap_or_default()
|
||||
}
|
||||
|
||||
pub(crate) fn version_info(&self) -> String {
|
||||
format!(
|
||||
"RELEASE.{}@{} (rust {} {})",
|
||||
self.release.date.clone(),
|
||||
self.release.version.clone().trim_start_matches('@'),
|
||||
build::RUST_VERSION,
|
||||
build::BUILD_TARGET
|
||||
)
|
||||
}
|
||||
|
||||
pub(crate) fn version(&self) -> String {
|
||||
self.release.version.clone()
|
||||
}
|
||||
|
||||
pub(crate) fn license(&self) -> String {
|
||||
format!("{} {}", self.license.name.clone(), self.license.url.clone())
|
||||
}
|
||||
|
||||
pub(crate) fn doc(&self) -> String {
|
||||
self.doc.clone()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Clone)]
|
||||
struct Api {
|
||||
#[serde(rename = "baseURL")]
|
||||
base_url: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Clone)]
|
||||
struct S3 {
|
||||
endpoint: String,
|
||||
region: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Clone)]
|
||||
struct Release {
|
||||
version: String,
|
||||
date: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Clone)]
|
||||
struct License {
|
||||
name: String,
|
||||
url: String,
|
||||
}
|
||||
|
||||
pub(crate) static CONSOLE_CONFIG: OnceLock<Config> = OnceLock::new();
|
||||
|
||||
#[allow(clippy::const_is_empty)]
|
||||
pub(crate) fn init_console_cfg(local_ip: IpAddr, port: u16) {
|
||||
CONSOLE_CONFIG.get_or_init(|| {
|
||||
let ver = {
|
||||
if !build::TAG.is_empty() {
|
||||
build::TAG.to_string()
|
||||
} else if !build::SHORT_COMMIT.is_empty() {
|
||||
format!("@{}", build::SHORT_COMMIT)
|
||||
} else {
|
||||
build::PKG_VERSION.to_string()
|
||||
}
|
||||
};
|
||||
|
||||
Config::new(local_ip, port, ver.as_str(), build::COMMIT_DATE_3339)
|
||||
});
|
||||
}
|
||||
|
||||
// fn is_socket_addr_or_ip_addr(host: &str) -> bool {
|
||||
// host.parse::<SocketAddr>().is_ok() || host.parse::<IpAddr>().is_ok()
|
||||
// }
|
||||
|
||||
async fn license_handler() -> impl IntoResponse {
|
||||
let license = get_license().unwrap_or_default();
|
||||
|
||||
Response::builder()
|
||||
.header("content-type", "application/json")
|
||||
.status(StatusCode::OK)
|
||||
.body(Body::from(serde_json::to_string(&license).unwrap_or_default()))
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn _is_private_ip(ip: IpAddr) -> bool {
|
||||
match ip {
|
||||
IpAddr::V4(ip) => {
|
||||
let octets = ip.octets();
|
||||
// 10.0.0.0/8
|
||||
octets[0] == 10 ||
|
||||
// 172.16.0.0/12
|
||||
(octets[0] == 172 && (octets[1] >= 16 && octets[1] <= 31)) ||
|
||||
// 192.168.0.0/16
|
||||
(octets[0] == 192 && octets[1] == 168)
|
||||
}
|
||||
IpAddr::V6(_) => false,
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::const_is_empty)]
|
||||
#[instrument(fields(host))]
|
||||
async fn config_handler(uri: Uri, Host(host): Host) -> impl IntoResponse {
|
||||
let scheme = uri.scheme().map(|s| s.as_str()).unwrap_or("http");
|
||||
|
||||
// Get the host from the uri and use the value of the host extractor if it doesn't have one
|
||||
let host = uri.host().unwrap_or(host.as_str());
|
||||
|
||||
let host = if host.contains(':') {
|
||||
let (host, _) = host.split_once(':').unwrap_or((host, "80"));
|
||||
host
|
||||
} else {
|
||||
host
|
||||
};
|
||||
|
||||
// Make a copy of the current configuration
|
||||
let mut cfg = CONSOLE_CONFIG.get().unwrap().clone();
|
||||
|
||||
let url = format!("{}://{}:{}", scheme, host, cfg.port);
|
||||
cfg.api.base_url = format!("{url}{RUSTFS_ADMIN_PREFIX}");
|
||||
cfg.s3.endpoint = url;
|
||||
|
||||
Response::builder()
|
||||
.header("content-type", "application/json")
|
||||
.status(StatusCode::OK)
|
||||
.body(Body::from(cfg.to_json()))
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub async fn start_static_file_server(
|
||||
addrs: &str,
|
||||
local_ip: IpAddr,
|
||||
access_key: &str,
|
||||
secret_key: &str,
|
||||
tls_path: Option<String>,
|
||||
) {
|
||||
// Configure CORS
|
||||
let cors = CorsLayer::new()
|
||||
.allow_origin(Any) // In the production environment, we recommend that you specify a specific domain name
|
||||
.allow_methods([http::Method::GET, http::Method::POST])
|
||||
.allow_headers([header::CONTENT_TYPE]);
|
||||
// Create a route
|
||||
let app = Router::new()
|
||||
.route("/license", get(license_handler))
|
||||
.route("/config.json", get(config_handler))
|
||||
.fallback_service(get(static_handler))
|
||||
.layer(cors)
|
||||
.layer(tower_http::compression::CompressionLayer::new().gzip(true).deflate(true))
|
||||
.layer(TraceLayer::new_for_http());
|
||||
|
||||
let server_addr = parse_and_resolve_address(addrs).expect("Failed to parse socket address");
|
||||
let server_port = server_addr.port();
|
||||
let server_address = server_addr.to_string();
|
||||
|
||||
info!(
|
||||
"WebUI: http://{}:{} http://127.0.0.1:{} http://{}",
|
||||
local_ip, server_port, server_port, server_address
|
||||
);
|
||||
info!(" RootUser: {}", access_key);
|
||||
info!(" RootPass: {}", secret_key);
|
||||
|
||||
// Check and start the HTTPS/HTTP server
|
||||
match start_server(server_addr, tls_path, app.clone()).await {
|
||||
Ok(_) => info!("Server shutdown gracefully"),
|
||||
Err(e) => error!("Server error: {}", e),
|
||||
}
|
||||
}
|
||||
async fn start_server(server_addr: SocketAddr, tls_path: Option<String>, app: Router) -> io::Result<()> {
|
||||
let tls_path = tls_path.unwrap_or_default();
|
||||
let key_path = format!("{tls_path}/{RUSTFS_TLS_KEY}");
|
||||
let cert_path = format!("{tls_path}/{RUSTFS_TLS_CERT}");
|
||||
let handle = axum_server::Handle::new();
|
||||
// create a signal off listening task
|
||||
let handle_clone = handle.clone();
|
||||
tokio::spawn(async move {
|
||||
shutdown_signal().await;
|
||||
info!("Initiating graceful shutdown...");
|
||||
handle_clone.graceful_shutdown(Some(Duration::from_secs(10)));
|
||||
});
|
||||
|
||||
let has_tls_certs = tokio::try_join!(tokio::fs::metadata(&key_path), tokio::fs::metadata(&cert_path)).is_ok();
|
||||
info!("Console TLS certs: {:?}", has_tls_certs);
|
||||
if has_tls_certs {
|
||||
info!("Found TLS certificates, starting with HTTPS");
|
||||
match RustlsConfig::from_pem_file(cert_path, key_path).await {
|
||||
Ok(config) => {
|
||||
info!("Starting HTTPS server...");
|
||||
axum_server::bind_rustls(server_addr, config)
|
||||
.handle(handle.clone())
|
||||
.serve(app.into_make_service())
|
||||
.await
|
||||
.map_err(io::Error::other)?;
|
||||
|
||||
info!("HTTPS server running on https://{}", server_addr);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to create TLS config: {}", e);
|
||||
start_http_server(server_addr, app, handle).await
|
||||
}
|
||||
}
|
||||
} else {
|
||||
info!("TLS certificates not found at {} and {}", key_path, cert_path);
|
||||
start_http_server(server_addr, app, handle).await
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
/// 308 redirect for HTTP to HTTPS
|
||||
fn redirect_to_https(https_port: u16) -> Router {
|
||||
Router::new().route(
|
||||
"/*path",
|
||||
get({
|
||||
move |uri: Uri, req: http::Request<Body>| async move {
|
||||
let host = req
|
||||
.headers()
|
||||
.get("host")
|
||||
.map_or("localhost", |h| h.to_str().unwrap_or("localhost"));
|
||||
let path = uri.path_and_query().map(|pq| pq.as_str()).unwrap_or("");
|
||||
let https_url = format!("https://{host}:{https_port}{path}");
|
||||
Redirect::permanent(&https_url)
|
||||
}
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
async fn start_http_server(addr: SocketAddr, app: Router, handle: axum_server::Handle) -> io::Result<()> {
|
||||
debug!("Starting HTTP server...");
|
||||
axum_server::bind(addr)
|
||||
.handle(handle)
|
||||
.serve(app.into_make_service())
|
||||
.await
|
||||
.map_err(io::Error::other)
|
||||
}
|
||||
|
||||
async fn shutdown_signal() {
|
||||
let ctrl_c = async {
|
||||
signal::ctrl_c().await.expect("failed to install Ctrl+C handler");
|
||||
};
|
||||
|
||||
#[cfg(unix)]
|
||||
let terminate = async {
|
||||
signal::unix::signal(signal::unix::SignalKind::terminate())
|
||||
.expect("failed to install signal handler")
|
||||
.recv()
|
||||
.await;
|
||||
};
|
||||
|
||||
#[cfg(not(unix))]
|
||||
let terminate = std::future::pending::<()>();
|
||||
|
||||
tokio::select! {
|
||||
_ = ctrl_c => {
|
||||
info!("shutdown_signal ctrl_c")
|
||||
},
|
||||
_ = terminate => {
|
||||
info!("shutdown_signal terminate")
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1,78 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use rustfs_config::DEFAULT_DELIMITER;
|
||||
use rustfs_ecstore::config::GLOBAL_ServerConfig;
|
||||
use tracing::{error, info, instrument};
|
||||
|
||||
#[instrument]
|
||||
pub(crate) async fn init_event_notifier() {
|
||||
info!("Initializing event notifier...");
|
||||
|
||||
// 1. Get the global configuration loaded by ecstore
|
||||
let server_config = match GLOBAL_ServerConfig.get() {
|
||||
Some(config) => config.clone(), // Clone the config to pass ownership
|
||||
None => {
|
||||
error!("Event notifier initialization failed: Global server config not loaded.");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
info!("Global server configuration loaded successfully. config: {:?}", server_config);
|
||||
// 2. Check if the notify subsystem exists in the configuration, and skip initialization if it doesn't
|
||||
if server_config
|
||||
.get_value(rustfs_config::notify::NOTIFY_MQTT_SUB_SYS, DEFAULT_DELIMITER)
|
||||
.is_none()
|
||||
|| server_config
|
||||
.get_value(rustfs_config::notify::NOTIFY_WEBHOOK_SUB_SYS, DEFAULT_DELIMITER)
|
||||
.is_none()
|
||||
{
|
||||
info!("'notify' subsystem not configured, skipping event notifier initialization.");
|
||||
return;
|
||||
}
|
||||
|
||||
info!("Event notifier configuration found, proceeding with initialization.");
|
||||
|
||||
// 3. Initialize the notification system asynchronously with a global configuration
|
||||
// Put it into a separate task to avoid blocking the main initialization process
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = rustfs_notify::initialize(server_config).await {
|
||||
error!("Failed to initialize event notifier system: {}", e);
|
||||
} else {
|
||||
info!("Event notifier system initialized successfully.");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/// Shuts down the event notifier system gracefully
|
||||
pub async fn shutdown_event_notifier() {
|
||||
info!("Shutting down event notifier system...");
|
||||
|
||||
if !rustfs_notify::is_notification_system_initialized() {
|
||||
info!("Event notifier system is not initialized, nothing to shut down.");
|
||||
return;
|
||||
}
|
||||
|
||||
let system = match rustfs_notify::notification_system() {
|
||||
Some(sys) => sys,
|
||||
None => {
|
||||
error!("Event notifier system is not initialized.");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
// Call the shutdown function from the rustfs_notify module
|
||||
system.shutdown().await;
|
||||
info!("Event notifier system shut down successfully.");
|
||||
}
|
||||
@@ -20,9 +20,7 @@ use std::time::UNIX_EPOCH;
|
||||
use tracing::error;
|
||||
use tracing::info;
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
static ref LICENSE: OnceLock<Token> = OnceLock::new();
|
||||
}
|
||||
static LICENSE: OnceLock<Token> = OnceLock::new();
|
||||
|
||||
/// Initialize the license
|
||||
pub fn init_license(license: Option<String>) {
|
||||
|
||||
@@ -15,98 +15,60 @@
|
||||
mod admin;
|
||||
mod auth;
|
||||
mod config;
|
||||
mod console;
|
||||
mod error;
|
||||
mod event;
|
||||
// mod grpc;
|
||||
pub mod license;
|
||||
mod logging;
|
||||
mod server;
|
||||
mod service;
|
||||
mod storage;
|
||||
mod update_checker;
|
||||
mod update;
|
||||
mod version;
|
||||
|
||||
use crate::auth::IAMAuth;
|
||||
use crate::console::{CONSOLE_CONFIG, init_console_cfg};
|
||||
use rustfs_config::DEFAULT_DELIMITER;
|
||||
use rustfs_ecstore::config::GLOBAL_ServerConfig;
|
||||
// Ensure the correct path for parse_license is imported
|
||||
use crate::event::shutdown_event_notifier;
|
||||
use crate::server::{SHUTDOWN_TIMEOUT, ServiceState, ServiceStateManager, ShutdownSignal, wait_for_shutdown};
|
||||
use bytes::Bytes;
|
||||
use crate::server::{SHUTDOWN_TIMEOUT, ServiceState, ServiceStateManager, ShutdownSignal, start_http_server, wait_for_shutdown};
|
||||
use chrono::Datelike;
|
||||
use clap::Parser;
|
||||
use http::{HeaderMap, Request as HttpRequest, Response};
|
||||
use hyper_util::server::graceful::GracefulShutdown;
|
||||
use hyper_util::{
|
||||
rt::{TokioExecutor, TokioIo},
|
||||
server::conn::auto::Builder as ConnBuilder,
|
||||
service::TowerToHyperService,
|
||||
};
|
||||
use license::init_license;
|
||||
use rustfs_ahm::scanner::data_scanner::ScannerConfig;
|
||||
use rustfs_ahm::{Scanner, create_ahm_services_cancel_token, shutdown_ahm_services};
|
||||
use rustfs_common::globals::set_global_addr;
|
||||
use rustfs_config::{DEFAULT_ACCESS_KEY, DEFAULT_SECRET_KEY, RUSTFS_TLS_CERT, RUSTFS_TLS_KEY};
|
||||
use rustfs_ecstore::bucket::metadata_sys::init_bucket_metadata_sys;
|
||||
use rustfs_ecstore::cmd::bucket_replication::init_bucket_replication_pool;
|
||||
use rustfs_ecstore::config as ecconfig;
|
||||
use rustfs_ecstore::config::GLOBAL_ConfigSys;
|
||||
use rustfs_ecstore::heal::background_heal_ops::init_auto_heal;
|
||||
use rustfs_ecstore::rpc::make_server;
|
||||
use rustfs_ecstore::store_api::BucketOptions;
|
||||
use rustfs_ecstore::{
|
||||
StorageAPI, endpoints::EndpointServerPools, global::set_global_rustfs_port, heal::data_scanner::init_data_scanner,
|
||||
notification_sys::new_global_notification_sys, set_global_endpoints, store::ECStore, store::init_local_disks,
|
||||
StorageAPI,
|
||||
endpoints::EndpointServerPools,
|
||||
global::{set_global_rustfs_port, shutdown_background_services},
|
||||
notification_sys::new_global_notification_sys,
|
||||
set_global_endpoints,
|
||||
store::ECStore,
|
||||
store::init_local_disks,
|
||||
update_erasure_type,
|
||||
};
|
||||
use rustfs_iam::init_iam_sys;
|
||||
use rustfs_obs::{SystemObserver, init_obs, set_global_guard};
|
||||
use rustfs_protos::proto_gen::node_service::node_service_server::NodeServiceServer;
|
||||
use rustfs_obs::{init_obs, set_global_guard};
|
||||
use rustfs_utils::net::parse_and_resolve_address;
|
||||
use rustls::ServerConfig;
|
||||
use s3s::service::S3Service;
|
||||
use s3s::{host::MultiDomain, service::S3ServiceBuilder};
|
||||
use service::hybrid;
|
||||
use socket2::SockRef;
|
||||
use std::io::{Error, Result};
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::net::{TcpListener, TcpStream};
|
||||
#[cfg(unix)]
|
||||
use tokio::signal::unix::{SignalKind, signal};
|
||||
use tokio_rustls::TlsAcceptor;
|
||||
use tonic::{Request, Status, metadata::MetadataValue};
|
||||
use tower::ServiceBuilder;
|
||||
use tower_http::catch_panic::CatchPanicLayer;
|
||||
use tower_http::cors::CorsLayer;
|
||||
use tower_http::trace::TraceLayer;
|
||||
use tracing::{Span, debug, error, info, instrument, warn};
|
||||
|
||||
const MI_B: usize = 1024 * 1024;
|
||||
use tracing::{debug, error, info, instrument, warn};
|
||||
|
||||
#[cfg(all(target_os = "linux", target_env = "gnu"))]
|
||||
#[global_allocator]
|
||||
static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
|
||||
|
||||
#[allow(clippy::result_large_err)]
|
||||
fn check_auth(req: Request<()>) -> std::result::Result<Request<()>, Status> {
|
||||
let token: MetadataValue<_> = "rustfs rpc".parse().unwrap();
|
||||
|
||||
match req.metadata().get("authorization") {
|
||||
Some(t) if token == t => Ok(req),
|
||||
_ => Err(Status::unauthenticated("No valid auth token")),
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
fn print_server_info() {
|
||||
let cfg = CONSOLE_CONFIG.get().unwrap();
|
||||
let current_year = chrono::Utc::now().year();
|
||||
|
||||
// Use custom macros to print server information
|
||||
info!("RustFS Object Storage Server");
|
||||
info!("Copyright: 2024-{} RustFS, Inc", current_year);
|
||||
info!("License: {}", cfg.license());
|
||||
info!("Version: {}", cfg.version_info());
|
||||
info!("Docs: {}", cfg.doc());
|
||||
info!("License: Apache-2.0 https://www.apache.org/licenses/LICENSE-2.0");
|
||||
info!("Version: {}", version::get_version());
|
||||
info!("Docs: https://rustfs.com/docs/");
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
@@ -127,53 +89,14 @@ async fn main() -> Result<()> {
|
||||
run(opt).await
|
||||
}
|
||||
|
||||
/// Sets up the TLS acceptor if certificates are available.
|
||||
#[instrument(skip(tls_path))]
|
||||
async fn setup_tls_acceptor(tls_path: &str) -> Result<Option<TlsAcceptor>> {
|
||||
if tls_path.is_empty() || tokio::fs::metadata(tls_path).await.is_err() {
|
||||
debug!("TLS path is not provided or does not exist, starting with HTTP");
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
debug!("Found TLS directory, checking for certificates");
|
||||
|
||||
// 1. Try to load all certificates from the directory (multi-cert support)
|
||||
if let Ok(cert_key_pairs) = rustfs_utils::load_all_certs_from_directory(tls_path) {
|
||||
if !cert_key_pairs.is_empty() {
|
||||
debug!("Found {} certificates, creating multi-cert resolver", cert_key_pairs.len());
|
||||
let _ = rustls::crypto::aws_lc_rs::default_provider().install_default();
|
||||
let mut server_config = ServerConfig::builder()
|
||||
.with_no_client_auth()
|
||||
.with_cert_resolver(Arc::new(rustfs_utils::create_multi_cert_resolver(cert_key_pairs)?));
|
||||
server_config.alpn_protocols = vec![b"h2".to_vec(), b"http/1.1".to_vec(), b"http/1.0".to_vec()];
|
||||
return Ok(Some(TlsAcceptor::from(Arc::new(server_config))));
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Fallback to legacy single certificate mode
|
||||
let key_path = format!("{tls_path}/{RUSTFS_TLS_KEY}");
|
||||
let cert_path = format!("{tls_path}/{RUSTFS_TLS_CERT}");
|
||||
if tokio::try_join!(tokio::fs::metadata(&key_path), tokio::fs::metadata(&cert_path)).is_ok() {
|
||||
debug!("Found legacy single TLS certificate, starting with HTTPS");
|
||||
let _ = rustls::crypto::aws_lc_rs::default_provider().install_default();
|
||||
let certs = rustfs_utils::load_certs(&cert_path).map_err(|e| rustfs_utils::certs_error(e.to_string()))?;
|
||||
let key = rustfs_utils::load_private_key(&key_path).map_err(|e| rustfs_utils::certs_error(e.to_string()))?;
|
||||
let mut server_config = ServerConfig::builder()
|
||||
.with_no_client_auth()
|
||||
.with_single_cert(certs, key)
|
||||
.map_err(|e| rustfs_utils::certs_error(e.to_string()))?;
|
||||
server_config.alpn_protocols = vec![b"h2".to_vec(), b"http/1.1".to_vec(), b"http/1.0".to_vec()];
|
||||
return Ok(Some(TlsAcceptor::from(Arc::new(server_config))));
|
||||
}
|
||||
|
||||
debug!("No valid TLS certificates found in the directory, starting with HTTP");
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
#[instrument(skip(opt))]
|
||||
async fn run(opt: config::Opt) -> Result<()> {
|
||||
debug!("opt: {:?}", &opt);
|
||||
|
||||
if let Some(region) = &opt.region {
|
||||
rustfs_ecstore::global::set_global_region(region.clone());
|
||||
}
|
||||
|
||||
let server_addr = parse_and_resolve_address(opt.address.as_str()).map_err(Error::other)?;
|
||||
let server_port = server_addr.port();
|
||||
let server_address = server_addr.to_string();
|
||||
@@ -185,11 +108,7 @@ async fn run(opt: config::Opt) -> Result<()> {
|
||||
|
||||
set_global_rustfs_port(server_port);
|
||||
|
||||
// The listening address and port are obtained from the parameters
|
||||
let listener = TcpListener::bind(server_address.clone()).await?;
|
||||
// Obtain the listener address
|
||||
let local_addr: SocketAddr = listener.local_addr()?;
|
||||
let local_ip = rustfs_utils::get_local_ip().ok_or(local_addr.ip()).unwrap();
|
||||
set_global_addr(&opt.address).await;
|
||||
|
||||
// For RPC
|
||||
let (endpoint_pools, setup_type) =
|
||||
@@ -208,19 +127,6 @@ async fn run(opt: config::Opt) -> Result<()> {
|
||||
}
|
||||
}
|
||||
|
||||
// Detailed endpoint information (showing all API endpoints)
|
||||
let api_endpoints = format!("http://{local_ip}:{server_port}");
|
||||
let localhost_endpoint = format!("http://127.0.0.1:{server_port}");
|
||||
info!(" API: {} {}", api_endpoints, localhost_endpoint);
|
||||
info!(" RootUser: {}", opt.access_key.clone());
|
||||
info!(" RootPass: {}", opt.secret_key.clone());
|
||||
if DEFAULT_ACCESS_KEY.eq(&opt.access_key) && DEFAULT_SECRET_KEY.eq(&opt.secret_key) {
|
||||
warn!(
|
||||
"Detected default credentials '{}:{}', we recommend that you change these values with 'RUSTFS_ACCESS_KEY' and 'RUSTFS_SECRET_KEY' environment variables",
|
||||
DEFAULT_ACCESS_KEY, DEFAULT_SECRET_KEY
|
||||
);
|
||||
}
|
||||
|
||||
for (i, eps) in endpoint_pools.as_ref().iter().enumerate() {
|
||||
info!(
|
||||
"created endpoints {}, set_count:{}, drives_per_set: {}, cmd: {:?}",
|
||||
@@ -232,7 +138,11 @@ async fn run(opt: config::Opt) -> Result<()> {
|
||||
}
|
||||
}
|
||||
|
||||
set_global_addr(&opt.address).await;
|
||||
let state_manager = ServiceStateManager::new();
|
||||
// Update service status to Starting
|
||||
state_manager.update(ServiceState::Starting);
|
||||
|
||||
let shutdown_tx = start_http_server(&opt, state_manager.clone()).await?;
|
||||
|
||||
set_global_endpoints(endpoint_pools.as_ref().clone());
|
||||
update_erasure_type(setup_type).await;
|
||||
@@ -240,163 +150,6 @@ async fn run(opt: config::Opt) -> Result<()> {
|
||||
// Initialize the local disk
|
||||
init_local_disks(endpoint_pools.clone()).await.map_err(Error::other)?;
|
||||
|
||||
// Setup S3 service
|
||||
// This project uses the S3S library to implement S3 services
|
||||
let s3_service = {
|
||||
let store = storage::ecfs::FS::new();
|
||||
let mut b = S3ServiceBuilder::new(store.clone());
|
||||
|
||||
let access_key = opt.access_key.clone();
|
||||
let secret_key = opt.secret_key.clone();
|
||||
debug!("authentication is enabled {}, {}", &access_key, &secret_key);
|
||||
|
||||
b.set_auth(IAMAuth::new(access_key, secret_key));
|
||||
b.set_access(store.clone());
|
||||
b.set_route(admin::make_admin_route()?);
|
||||
|
||||
if !opt.server_domains.is_empty() {
|
||||
info!("virtual-hosted-style requests are enabled use domain_name {:?}", &opt.server_domains);
|
||||
b.set_host(MultiDomain::new(&opt.server_domains).map_err(Error::other)?);
|
||||
}
|
||||
|
||||
b.build()
|
||||
};
|
||||
|
||||
tokio::spawn(async move {
|
||||
// Record the PID-related metrics of the current process
|
||||
let meter = opentelemetry::global::meter("system");
|
||||
let obs_result = SystemObserver::init_process_observer(meter).await;
|
||||
match obs_result {
|
||||
Ok(_) => {
|
||||
info!("Process observer initialized successfully");
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to initialize process observer: {}", e);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let tls_acceptor = setup_tls_acceptor(opt.tls_path.as_deref().unwrap_or_default()).await?;
|
||||
|
||||
let state_manager = ServiceStateManager::new();
|
||||
let worker_state_manager = state_manager.clone();
|
||||
// Update service status to Starting
|
||||
state_manager.update(ServiceState::Starting);
|
||||
|
||||
// Create shutdown channel
|
||||
let (shutdown_tx, mut shutdown_rx) = tokio::sync::broadcast::channel(1);
|
||||
let shutdown_tx_clone = shutdown_tx.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
#[cfg(unix)]
|
||||
let (mut sigterm_inner, mut sigint_inner) = {
|
||||
// Unix platform specific code
|
||||
let sigterm_inner = signal(SignalKind::terminate()).expect("Failed to create SIGTERM signal handler");
|
||||
let sigint_inner = signal(SignalKind::interrupt()).expect("Failed to create SIGINT signal handler");
|
||||
(sigterm_inner, sigint_inner)
|
||||
};
|
||||
|
||||
let http_server = Arc::new(ConnBuilder::new(TokioExecutor::new()));
|
||||
let mut ctrl_c = std::pin::pin!(tokio::signal::ctrl_c());
|
||||
let graceful = Arc::new(GracefulShutdown::new());
|
||||
debug!("graceful initiated");
|
||||
|
||||
// service ready
|
||||
worker_state_manager.update(ServiceState::Ready);
|
||||
let tls_acceptor = tls_acceptor.map(Arc::new);
|
||||
|
||||
loop {
|
||||
debug!("Waiting for new connection...");
|
||||
let (socket, _) = {
|
||||
#[cfg(unix)]
|
||||
{
|
||||
tokio::select! {
|
||||
res = listener.accept() => match res {
|
||||
Ok(conn) => conn,
|
||||
Err(err) => {
|
||||
error!("error accepting connection: {err}");
|
||||
continue;
|
||||
}
|
||||
},
|
||||
_ = ctrl_c.as_mut() => {
|
||||
info!("Ctrl-C received in worker thread");
|
||||
let _ = shutdown_tx_clone.send(());
|
||||
break;
|
||||
},
|
||||
Some(_) = sigint_inner.recv() => {
|
||||
info!("SIGINT received in worker thread");
|
||||
let _ = shutdown_tx_clone.send(());
|
||||
break;
|
||||
},
|
||||
Some(_) = sigterm_inner.recv() => {
|
||||
info!("SIGTERM received in worker thread");
|
||||
let _ = shutdown_tx_clone.send(());
|
||||
break;
|
||||
},
|
||||
_ = shutdown_rx.recv() => {
|
||||
info!("Shutdown signal received in worker thread");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
#[cfg(not(unix))]
|
||||
{
|
||||
tokio::select! {
|
||||
res = listener.accept() => match res {
|
||||
Ok(conn) => conn,
|
||||
Err(err) => {
|
||||
error!("error accepting connection: {err}");
|
||||
continue;
|
||||
}
|
||||
},
|
||||
_ = ctrl_c.as_mut() => {
|
||||
info!("Ctrl-C received in worker thread");
|
||||
let _ = shutdown_tx_clone.send(());
|
||||
break;
|
||||
},
|
||||
_ = shutdown_rx.recv() => {
|
||||
info!("Shutdown signal received in worker thread");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let socket_ref = SockRef::from(&socket);
|
||||
if let Err(err) = socket_ref.set_nodelay(true) {
|
||||
warn!(?err, "Failed to set TCP_NODELAY");
|
||||
}
|
||||
if let Err(err) = socket_ref.set_recv_buffer_size(4 * MI_B) {
|
||||
warn!(?err, "Failed to set set_recv_buffer_size");
|
||||
}
|
||||
if let Err(err) = socket_ref.set_send_buffer_size(4 * MI_B) {
|
||||
warn!(?err, "Failed to set set_send_buffer_size");
|
||||
}
|
||||
|
||||
process_connection(socket, tls_acceptor.clone(), http_server.clone(), s3_service.clone(), graceful.clone());
|
||||
}
|
||||
|
||||
worker_state_manager.update(ServiceState::Stopping);
|
||||
match Arc::try_unwrap(graceful) {
|
||||
Ok(g) => {
|
||||
tokio::select! {
|
||||
() = g.shutdown() => {
|
||||
debug!("Gracefully shutdown!");
|
||||
},
|
||||
() = tokio::time::sleep(Duration::from_secs(10)) => {
|
||||
debug!("Waited 10 seconds for graceful shutdown, aborting...");
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(arc_graceful) => {
|
||||
error!("Cannot perform graceful shutdown, other references exist err: {:?}", arc_graceful);
|
||||
tokio::time::sleep(Duration::from_secs(10)).await;
|
||||
debug!("Timeout reached, forcing shutdown");
|
||||
}
|
||||
}
|
||||
worker_state_manager.update(ServiceState::Stopped);
|
||||
});
|
||||
|
||||
// init store
|
||||
let store = ECStore::new(server_addr, endpoint_pools.clone()).await.inspect_err(|err| {
|
||||
error!("ECStore::new {:?}", err);
|
||||
@@ -407,7 +160,7 @@ async fn run(opt: config::Opt) -> Result<()> {
|
||||
GLOBAL_ConfigSys.init(store.clone()).await?;
|
||||
|
||||
// Initialize event notifier
|
||||
event::init_event_notifier().await;
|
||||
init_event_notifier().await;
|
||||
|
||||
let buckets_list = store
|
||||
.list_bucket(&BucketOptions {
|
||||
@@ -428,19 +181,19 @@ async fn run(opt: config::Opt) -> Result<()> {
|
||||
Error::other(err)
|
||||
})?;
|
||||
|
||||
// init scanner
|
||||
let scanner_cancel_token = init_data_scanner().await;
|
||||
// init auto heal
|
||||
init_auto_heal().await;
|
||||
// init console configuration
|
||||
init_console_cfg(local_ip, server_port);
|
||||
|
||||
// init scanner and auto heal with unified cancellation token
|
||||
// let _background_services_cancel_token = create_background_services_cancel_token();
|
||||
// init_data_scanner().await;
|
||||
// init_auto_heal().await;
|
||||
let _ = create_ahm_services_cancel_token();
|
||||
let scanner = Scanner::new(Some(ScannerConfig::default()));
|
||||
scanner.start().await?;
|
||||
print_server_info();
|
||||
init_bucket_replication_pool().await;
|
||||
|
||||
// Async update check (optional)
|
||||
tokio::spawn(async {
|
||||
use crate::update_checker::{UpdateCheckError, check_updates};
|
||||
use crate::update::{UpdateCheckError, check_updates};
|
||||
|
||||
match check_updates().await {
|
||||
Ok(result) => {
|
||||
@@ -470,34 +223,17 @@ async fn run(opt: config::Opt) -> Result<()> {
|
||||
}
|
||||
});
|
||||
|
||||
if opt.console_enable {
|
||||
debug!("console is enabled");
|
||||
let access_key = opt.access_key.clone();
|
||||
let secret_key = opt.secret_key.clone();
|
||||
let console_address = opt.console_address.clone();
|
||||
let tls_path = opt.tls_path.clone();
|
||||
|
||||
if console_address.is_empty() {
|
||||
error!("console_address is empty");
|
||||
return Err(Error::other("console_address is empty".to_string()));
|
||||
}
|
||||
|
||||
tokio::spawn(async move {
|
||||
console::start_static_file_server(&console_address, local_ip, &access_key, &secret_key, tls_path).await;
|
||||
});
|
||||
}
|
||||
|
||||
// Perform hibernation for 1 second
|
||||
tokio::time::sleep(SHUTDOWN_TIMEOUT).await;
|
||||
// listen to the shutdown signal
|
||||
match wait_for_shutdown().await {
|
||||
#[cfg(unix)]
|
||||
ShutdownSignal::CtrlC | ShutdownSignal::Sigint | ShutdownSignal::Sigterm => {
|
||||
handle_shutdown(&state_manager, &shutdown_tx, &scanner_cancel_token).await;
|
||||
handle_shutdown(&state_manager, &shutdown_tx).await;
|
||||
}
|
||||
#[cfg(not(unix))]
|
||||
ShutdownSignal::CtrlC => {
|
||||
handle_shutdown(&state_manager, &shutdown_tx, &scanner_cancel_token).await;
|
||||
handle_shutdown(&state_manager, &shutdown_tx).await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -505,116 +241,19 @@ async fn run(opt: config::Opt) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process a single incoming TCP connection.
|
||||
///
|
||||
/// This function is executed in a new Tokio task and it will:
|
||||
/// 1. If TLS is configured, perform TLS handshake.
|
||||
/// 2. Build a complete service stack for this connection, including S3, RPC services, and all middleware.
|
||||
/// 3. Use Hyper to handle HTTP requests on this connection.
|
||||
/// 4. Incorporate connections into the management of elegant closures.
|
||||
#[instrument(skip_all, fields(peer_addr = %socket.peer_addr().map(|a| a.to_string()).unwrap_or_else(|_| "unknown".to_string())))]
|
||||
fn process_connection(
|
||||
socket: TcpStream,
|
||||
tls_acceptor: Option<Arc<TlsAcceptor>>,
|
||||
http_server: Arc<ConnBuilder<TokioExecutor>>,
|
||||
s3_service: S3Service,
|
||||
graceful: Arc<GracefulShutdown>,
|
||||
) {
|
||||
tokio::spawn(async move {
|
||||
// Build services inside each connected task to avoid passing complex service types across tasks,
|
||||
// It also ensures that each connection has an independent service instance.
|
||||
let rpc_service = NodeServiceServer::with_interceptor(make_server(), check_auth);
|
||||
let hybrid_service = ServiceBuilder::new()
|
||||
.layer(CatchPanicLayer::new())
|
||||
.layer(
|
||||
TraceLayer::new_for_http()
|
||||
.make_span_with(|request: &HttpRequest<_>| {
|
||||
let span = tracing::info_span!("http-request",
|
||||
status_code = tracing::field::Empty,
|
||||
method = %request.method(),
|
||||
uri = %request.uri(),
|
||||
version = ?request.version(),
|
||||
);
|
||||
for (header_name, header_value) in request.headers() {
|
||||
if header_name == "user-agent" || header_name == "content-type" || header_name == "content-length" {
|
||||
span.record(header_name.as_str(), header_value.to_str().unwrap_or("invalid"));
|
||||
}
|
||||
}
|
||||
|
||||
span
|
||||
})
|
||||
.on_request(|request: &HttpRequest<_>, _span: &Span| {
|
||||
info!(
|
||||
counter.rustfs_api_requests_total = 1_u64,
|
||||
key_request_method = %request.method().to_string(),
|
||||
key_request_uri_path = %request.uri().path().to_owned(),
|
||||
"handle request api total",
|
||||
);
|
||||
debug!("http started method: {}, url path: {}", request.method(), request.uri().path())
|
||||
})
|
||||
.on_response(|response: &Response<_>, latency: Duration, _span: &Span| {
|
||||
_span.record("http response status_code", tracing::field::display(response.status()));
|
||||
debug!("http response generated in {:?}", latency)
|
||||
})
|
||||
.on_body_chunk(|chunk: &Bytes, latency: Duration, _span: &Span| {
|
||||
info!(histogram.request.body.len = chunk.len(), "histogram request body length",);
|
||||
debug!("http body sending {} bytes in {:?}", chunk.len(), latency)
|
||||
})
|
||||
.on_eos(|_trailers: Option<&HeaderMap>, stream_duration: Duration, _span: &Span| {
|
||||
debug!("http stream closed after {:?}", stream_duration)
|
||||
})
|
||||
.on_failure(|_error, latency: Duration, _span: &Span| {
|
||||
info!(counter.rustfs_api_requests_failure_total = 1_u64, "handle request api failure total");
|
||||
debug!("http request failure error: {:?} in {:?}", _error, latency)
|
||||
}),
|
||||
)
|
||||
.layer(CorsLayer::permissive())
|
||||
.service(hybrid(s3_service, rpc_service));
|
||||
let hybrid_service = TowerToHyperService::new(hybrid_service);
|
||||
|
||||
// Decide whether to handle HTTPS or HTTP connections based on the existence of TLS Acceptor
|
||||
if let Some(acceptor) = tls_acceptor {
|
||||
debug!("TLS handshake start");
|
||||
match acceptor.accept(socket).await {
|
||||
Ok(tls_socket) => {
|
||||
debug!("TLS handshake successful");
|
||||
let stream = TokioIo::new(tls_socket);
|
||||
let conn = http_server.serve_connection(stream, hybrid_service);
|
||||
if let Err(err) = graceful.watch(conn).await {
|
||||
handle_connection_error(&*err);
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
error!(?err, "TLS handshake failed");
|
||||
return; // Failed to end the task directly
|
||||
}
|
||||
}
|
||||
debug!("TLS handshake success");
|
||||
} else {
|
||||
debug!("Http handshake start");
|
||||
let stream = TokioIo::new(socket);
|
||||
let conn = http_server.serve_connection(stream, hybrid_service);
|
||||
if let Err(err) = graceful.watch(conn).await {
|
||||
handle_connection_error(&*err);
|
||||
}
|
||||
debug!("Http handshake success");
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
/// Handles the shutdown process of the server
|
||||
async fn handle_shutdown(
|
||||
state_manager: &ServiceStateManager,
|
||||
shutdown_tx: &tokio::sync::broadcast::Sender<()>,
|
||||
scanner_cancel_token: &tokio_util::sync::CancellationToken,
|
||||
) {
|
||||
async fn handle_shutdown(state_manager: &ServiceStateManager, shutdown_tx: &tokio::sync::broadcast::Sender<()>) {
|
||||
info!("Shutdown signal received in main thread");
|
||||
// update the status to stopping first
|
||||
state_manager.update(ServiceState::Stopping);
|
||||
|
||||
// Stop data scanner gracefully
|
||||
info!("Stopping data scanner...");
|
||||
scanner_cancel_token.cancel();
|
||||
// Stop background services (data scanner and auto heal) gracefully
|
||||
info!("Stopping background services (data scanner and auto heal)...");
|
||||
shutdown_background_services();
|
||||
|
||||
// Stop AHM services gracefully
|
||||
info!("Stopping AHM services...");
|
||||
shutdown_ahm_services();
|
||||
|
||||
// Stop the notification system
|
||||
shutdown_event_notifier().await;
|
||||
@@ -630,25 +269,63 @@ async fn handle_shutdown(
|
||||
info!("Server stopped current ");
|
||||
}
|
||||
|
||||
/// Handles connection errors by logging them with appropriate severity
|
||||
fn handle_connection_error(err: &(dyn std::error::Error + 'static)) {
|
||||
if let Some(hyper_err) = err.downcast_ref::<hyper::Error>() {
|
||||
if hyper_err.is_incomplete_message() {
|
||||
warn!("The HTTP connection is closed prematurely and the message is not completed:{}", hyper_err);
|
||||
} else if hyper_err.is_closed() {
|
||||
warn!("The HTTP connection is closed:{}", hyper_err);
|
||||
} else if hyper_err.is_parse() {
|
||||
error!("HTTP message parsing failed:{}", hyper_err);
|
||||
} else if hyper_err.is_user() {
|
||||
error!("HTTP user-custom error:{}", hyper_err);
|
||||
} else if hyper_err.is_canceled() {
|
||||
warn!("The HTTP connection is canceled:{}", hyper_err);
|
||||
} else {
|
||||
error!("Unknown hyper error:{:?}", hyper_err);
|
||||
#[instrument]
|
||||
pub(crate) async fn init_event_notifier() {
|
||||
info!("Initializing event notifier...");
|
||||
|
||||
// 1. Get the global configuration loaded by ecstore
|
||||
let server_config = match GLOBAL_ServerConfig.get() {
|
||||
Some(config) => config.clone(), // Clone the config to pass ownership
|
||||
None => {
|
||||
error!("Event notifier initialization failed: Global server config not loaded.");
|
||||
return;
|
||||
}
|
||||
} else if let Some(io_err) = err.downcast_ref::<Error>() {
|
||||
error!("Unknown connection IO error:{}", io_err);
|
||||
} else {
|
||||
error!("Unknown connection error type:{:?}", err);
|
||||
};
|
||||
|
||||
info!("Global server configuration loaded successfully. config: {:?}", server_config);
|
||||
// 2. Check if the notify subsystem exists in the configuration, and skip initialization if it doesn't
|
||||
if server_config
|
||||
.get_value(rustfs_config::notify::NOTIFY_MQTT_SUB_SYS, DEFAULT_DELIMITER)
|
||||
.is_none()
|
||||
|| server_config
|
||||
.get_value(rustfs_config::notify::NOTIFY_WEBHOOK_SUB_SYS, DEFAULT_DELIMITER)
|
||||
.is_none()
|
||||
{
|
||||
info!("'notify' subsystem not configured, skipping event notifier initialization.");
|
||||
return;
|
||||
}
|
||||
|
||||
info!("Event notifier configuration found, proceeding with initialization.");
|
||||
|
||||
// 3. Initialize the notification system asynchronously with a global configuration
|
||||
// Put it into a separate task to avoid blocking the main initialization process
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = rustfs_notify::initialize(server_config).await {
|
||||
error!("Failed to initialize event notifier system: {}", e);
|
||||
} else {
|
||||
info!("Event notifier system initialized successfully.");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/// Shuts down the event notifier system gracefully
|
||||
pub async fn shutdown_event_notifier() {
|
||||
info!("Shutting down event notifier system...");
|
||||
|
||||
if !rustfs_notify::is_notification_system_initialized() {
|
||||
info!("Event notifier system is not initialized, nothing to shut down.");
|
||||
return;
|
||||
}
|
||||
|
||||
let system = match rustfs_notify::notification_system() {
|
||||
Some(sys) => sys,
|
||||
None => {
|
||||
error!("Event notifier system is not initialized.");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
// Call the shutdown function from the rustfs_notify module
|
||||
system.shutdown().await;
|
||||
info!("Event notifier system shut down successfully.");
|
||||
}
|
||||
|
||||
413
rustfs/src/server/http.rs
Normal file
@@ -0,0 +1,413 @@
|
||||
// use crate::admin::console::{CONSOLE_CONFIG, init_console_cfg};
|
||||
use crate::auth::IAMAuth;
|
||||
// Ensure the correct path for parse_license is imported
|
||||
use crate::admin;
|
||||
use crate::config;
|
||||
use crate::server::hybrid::hybrid;
|
||||
use crate::server::{ServiceState, ServiceStateManager};
|
||||
use crate::storage;
|
||||
use bytes::Bytes;
|
||||
use http::{HeaderMap, Request as HttpRequest, Response};
|
||||
use hyper_util::server::graceful::GracefulShutdown;
|
||||
use hyper_util::{
|
||||
rt::{TokioExecutor, TokioIo},
|
||||
server::conn::auto::Builder as ConnBuilder,
|
||||
service::TowerToHyperService,
|
||||
};
|
||||
use rustfs_config::{DEFAULT_ACCESS_KEY, DEFAULT_SECRET_KEY, RUSTFS_TLS_CERT, RUSTFS_TLS_KEY};
|
||||
use rustfs_ecstore::rpc::make_server;
|
||||
use rustfs_obs::SystemObserver;
|
||||
use rustfs_protos::proto_gen::node_service::node_service_server::NodeServiceServer;
|
||||
use rustfs_utils::net::parse_and_resolve_address;
|
||||
use rustls::ServerConfig;
|
||||
use s3s::service::S3Service;
|
||||
use s3s::{host::MultiDomain, service::S3ServiceBuilder};
|
||||
use socket2::SockRef;
|
||||
use std::io::{Error, Result};
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::net::{TcpListener, TcpStream};
|
||||
#[cfg(unix)]
|
||||
use tokio::signal::unix::{SignalKind, signal};
|
||||
use tokio_rustls::TlsAcceptor;
|
||||
use tonic::{Request, Status, metadata::MetadataValue};
|
||||
use tower::ServiceBuilder;
|
||||
use tower_http::catch_panic::CatchPanicLayer;
|
||||
use tower_http::cors::CorsLayer;
|
||||
use tower_http::trace::TraceLayer;
|
||||
use tracing::{Span, debug, error, info, instrument, warn};
|
||||
|
||||
const MI_B: usize = 1024 * 1024;
|
||||
|
||||
pub async fn start_http_server(
|
||||
opt: &config::Opt,
|
||||
worker_state_manager: ServiceStateManager,
|
||||
) -> std::io::Result<tokio::sync::broadcast::Sender<()>> {
|
||||
let server_addr = parse_and_resolve_address(opt.address.as_str()).map_err(Error::other)?;
|
||||
let server_port = server_addr.port();
|
||||
let server_address = server_addr.to_string();
|
||||
|
||||
// The listening address and port are obtained from the parameters
|
||||
let listener = TcpListener::bind(server_address.clone()).await?;
|
||||
// Obtain the listener address
|
||||
let local_addr: SocketAddr = listener.local_addr()?;
|
||||
debug!("Listening on: {}", local_addr);
|
||||
let local_ip = match rustfs_utils::get_local_ip() {
|
||||
Some(ip) => {
|
||||
debug!("Obtained local IP address: {}", ip);
|
||||
ip
|
||||
}
|
||||
None => {
|
||||
warn!("Unable to obtain local IP address, using fallback IP: {}", local_addr.ip());
|
||||
local_addr.ip()
|
||||
}
|
||||
};
|
||||
|
||||
// Detailed endpoint information (showing all API endpoints)
|
||||
let api_endpoints = format!("http://{local_ip}:{server_port}");
|
||||
let localhost_endpoint = format!("http://127.0.0.1:{server_port}");
|
||||
info!(" API: {} {}", api_endpoints, localhost_endpoint);
|
||||
if opt.console_enable {
|
||||
info!(
|
||||
" WebUI: http://{}:{}/rustfs/console/index.html http://127.0.0.1:{}/rustfs/console/index.html http://{}/rustfs/console/index.html",
|
||||
local_ip, server_port, server_port, server_address
|
||||
);
|
||||
}
|
||||
info!(" RootUser: {}", opt.access_key.clone());
|
||||
info!(" RootPass: {}", opt.secret_key.clone());
|
||||
if DEFAULT_ACCESS_KEY.eq(&opt.access_key) && DEFAULT_SECRET_KEY.eq(&opt.secret_key) {
|
||||
warn!(
|
||||
"Detected default credentials '{}:{}', we recommend that you change these values with 'RUSTFS_ACCESS_KEY' and 'RUSTFS_SECRET_KEY' environment variables",
|
||||
DEFAULT_ACCESS_KEY, DEFAULT_SECRET_KEY
|
||||
);
|
||||
}
|
||||
|
||||
// Setup S3 service
|
||||
// This project uses the S3S library to implement S3 services
|
||||
let s3_service = {
|
||||
let store = storage::ecfs::FS::new();
|
||||
let mut b = S3ServiceBuilder::new(store.clone());
|
||||
|
||||
let access_key = opt.access_key.clone();
|
||||
let secret_key = opt.secret_key.clone();
|
||||
debug!("authentication is enabled {}, {}", &access_key, &secret_key);
|
||||
|
||||
b.set_auth(IAMAuth::new(access_key, secret_key));
|
||||
b.set_access(store.clone());
|
||||
b.set_route(admin::make_admin_route(opt.console_enable)?);
|
||||
|
||||
if !opt.server_domains.is_empty() {
|
||||
info!("virtual-hosted-style requests are enabled use domain_name {:?}", &opt.server_domains);
|
||||
b.set_host(MultiDomain::new(&opt.server_domains).map_err(Error::other)?);
|
||||
}
|
||||
|
||||
b.build()
|
||||
};
|
||||
|
||||
tokio::spawn(async move {
|
||||
// Record the PID-related metrics of the current process
|
||||
let meter = opentelemetry::global::meter("system");
|
||||
let obs_result = SystemObserver::init_process_observer(meter).await;
|
||||
match obs_result {
|
||||
Ok(_) => {
|
||||
info!("Process observer initialized successfully");
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to initialize process observer: {}", e);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let tls_acceptor = setup_tls_acceptor(opt.tls_path.as_deref().unwrap_or_default()).await?;
|
||||
// Create shutdown channel
|
||||
let (shutdown_tx, mut shutdown_rx) = tokio::sync::broadcast::channel(1);
|
||||
let shutdown_tx_clone = shutdown_tx.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
#[cfg(unix)]
|
||||
let (mut sigterm_inner, mut sigint_inner) = {
|
||||
// Unix platform specific code
|
||||
let sigterm_inner = signal(SignalKind::terminate()).expect("Failed to create SIGTERM signal handler");
|
||||
let sigint_inner = signal(SignalKind::interrupt()).expect("Failed to create SIGINT signal handler");
|
||||
(sigterm_inner, sigint_inner)
|
||||
};
|
||||
|
||||
let http_server = Arc::new(ConnBuilder::new(TokioExecutor::new()));
|
||||
let mut ctrl_c = std::pin::pin!(tokio::signal::ctrl_c());
|
||||
let graceful = Arc::new(GracefulShutdown::new());
|
||||
debug!("graceful initiated");
|
||||
|
||||
// service ready
|
||||
worker_state_manager.update(ServiceState::Ready);
|
||||
let tls_acceptor = tls_acceptor.map(Arc::new);
|
||||
|
||||
loop {
|
||||
debug!("Waiting for new connection...");
|
||||
let (socket, _) = {
|
||||
#[cfg(unix)]
|
||||
{
|
||||
tokio::select! {
|
||||
res = listener.accept() => match res {
|
||||
Ok(conn) => conn,
|
||||
Err(err) => {
|
||||
error!("error accepting connection: {err}");
|
||||
continue;
|
||||
}
|
||||
},
|
||||
_ = ctrl_c.as_mut() => {
|
||||
info!("Ctrl-C received in worker thread");
|
||||
let _ = shutdown_tx_clone.send(());
|
||||
break;
|
||||
},
|
||||
Some(_) = sigint_inner.recv() => {
|
||||
info!("SIGINT received in worker thread");
|
||||
let _ = shutdown_tx_clone.send(());
|
||||
break;
|
||||
},
|
||||
Some(_) = sigterm_inner.recv() => {
|
||||
info!("SIGTERM received in worker thread");
|
||||
let _ = shutdown_tx_clone.send(());
|
||||
break;
|
||||
},
|
||||
_ = shutdown_rx.recv() => {
|
||||
info!("Shutdown signal received in worker thread");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
#[cfg(not(unix))]
|
||||
{
|
||||
tokio::select! {
|
||||
res = listener.accept() => match res {
|
||||
Ok(conn) => conn,
|
||||
Err(err) => {
|
||||
error!("error accepting connection: {err}");
|
||||
continue;
|
||||
}
|
||||
},
|
||||
_ = ctrl_c.as_mut() => {
|
||||
info!("Ctrl-C received in worker thread");
|
||||
let _ = shutdown_tx_clone.send(());
|
||||
break;
|
||||
},
|
||||
_ = shutdown_rx.recv() => {
|
||||
info!("Shutdown signal received in worker thread");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let socket_ref = SockRef::from(&socket);
|
||||
if let Err(err) = socket_ref.set_nodelay(true) {
|
||||
warn!(?err, "Failed to set TCP_NODELAY");
|
||||
}
|
||||
if let Err(err) = socket_ref.set_recv_buffer_size(4 * MI_B) {
|
||||
warn!(?err, "Failed to set set_recv_buffer_size");
|
||||
}
|
||||
if let Err(err) = socket_ref.set_send_buffer_size(4 * MI_B) {
|
||||
warn!(?err, "Failed to set set_send_buffer_size");
|
||||
}
|
||||
|
||||
process_connection(socket, tls_acceptor.clone(), http_server.clone(), s3_service.clone(), graceful.clone());
|
||||
}
|
||||
|
||||
worker_state_manager.update(ServiceState::Stopping);
|
||||
match Arc::try_unwrap(graceful) {
|
||||
Ok(g) => {
|
||||
tokio::select! {
|
||||
() = g.shutdown() => {
|
||||
debug!("Gracefully shutdown!");
|
||||
},
|
||||
() = tokio::time::sleep(Duration::from_secs(10)) => {
|
||||
debug!("Waited 10 seconds for graceful shutdown, aborting...");
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(arc_graceful) => {
|
||||
error!("Cannot perform graceful shutdown, other references exist err: {:?}", arc_graceful);
|
||||
tokio::time::sleep(Duration::from_secs(10)).await;
|
||||
debug!("Timeout reached, forcing shutdown");
|
||||
}
|
||||
}
|
||||
worker_state_manager.update(ServiceState::Stopped);
|
||||
});
|
||||
|
||||
Ok(shutdown_tx)
|
||||
}
|
||||
|
||||
/// Sets up the TLS acceptor if certificates are available.
|
||||
#[instrument(skip(tls_path))]
|
||||
async fn setup_tls_acceptor(tls_path: &str) -> Result<Option<TlsAcceptor>> {
|
||||
if tls_path.is_empty() || tokio::fs::metadata(tls_path).await.is_err() {
|
||||
debug!("TLS path is not provided or does not exist, starting with HTTP");
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
debug!("Found TLS directory, checking for certificates");
|
||||
|
||||
// 1. Try to load all certificates from the directory (multi-cert support)
|
||||
if let Ok(cert_key_pairs) = rustfs_utils::load_all_certs_from_directory(tls_path) {
|
||||
if !cert_key_pairs.is_empty() {
|
||||
debug!("Found {} certificates, creating multi-cert resolver", cert_key_pairs.len());
|
||||
let _ = rustls::crypto::aws_lc_rs::default_provider().install_default();
|
||||
let mut server_config = ServerConfig::builder()
|
||||
.with_no_client_auth()
|
||||
.with_cert_resolver(Arc::new(rustfs_utils::create_multi_cert_resolver(cert_key_pairs)?));
|
||||
server_config.alpn_protocols = vec![b"h2".to_vec(), b"http/1.1".to_vec(), b"http/1.0".to_vec()];
|
||||
return Ok(Some(TlsAcceptor::from(Arc::new(server_config))));
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Fallback to legacy single certificate mode
|
||||
let key_path = format!("{tls_path}/{RUSTFS_TLS_KEY}");
|
||||
let cert_path = format!("{tls_path}/{RUSTFS_TLS_CERT}");
|
||||
if tokio::try_join!(tokio::fs::metadata(&key_path), tokio::fs::metadata(&cert_path)).is_ok() {
|
||||
debug!("Found legacy single TLS certificate, starting with HTTPS");
|
||||
let _ = rustls::crypto::aws_lc_rs::default_provider().install_default();
|
||||
let certs = rustfs_utils::load_certs(&cert_path).map_err(|e| rustfs_utils::certs_error(e.to_string()))?;
|
||||
let key = rustfs_utils::load_private_key(&key_path).map_err(|e| rustfs_utils::certs_error(e.to_string()))?;
|
||||
let mut server_config = ServerConfig::builder()
|
||||
.with_no_client_auth()
|
||||
.with_single_cert(certs, key)
|
||||
.map_err(|e| rustfs_utils::certs_error(e.to_string()))?;
|
||||
server_config.alpn_protocols = vec![b"h2".to_vec(), b"http/1.1".to_vec(), b"http/1.0".to_vec()];
|
||||
return Ok(Some(TlsAcceptor::from(Arc::new(server_config))));
|
||||
}
|
||||
|
||||
debug!("No valid TLS certificates found in the directory, starting with HTTP");
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
/// Process a single incoming TCP connection.
|
||||
///
|
||||
/// This function is executed in a new Tokio task and it will:
|
||||
/// 1. If TLS is configured, perform TLS handshake.
|
||||
/// 2. Build a complete service stack for this connection, including S3, RPC services, and all middleware.
|
||||
/// 3. Use Hyper to handle HTTP requests on this connection.
|
||||
/// 4. Incorporate connections into the management of elegant closures.
|
||||
#[instrument(skip_all, fields(peer_addr = %socket.peer_addr().map(|a| a.to_string()).unwrap_or_else(|_| "unknown".to_string())))]
|
||||
fn process_connection(
|
||||
socket: TcpStream,
|
||||
tls_acceptor: Option<Arc<TlsAcceptor>>,
|
||||
http_server: Arc<ConnBuilder<TokioExecutor>>,
|
||||
s3_service: S3Service,
|
||||
graceful: Arc<GracefulShutdown>,
|
||||
) {
|
||||
tokio::spawn(async move {
|
||||
// Build services inside each connected task to avoid passing complex service types across tasks,
|
||||
// It also ensures that each connection has an independent service instance.
|
||||
let rpc_service = NodeServiceServer::with_interceptor(make_server(), check_auth);
|
||||
let service = hybrid(s3_service, rpc_service);
|
||||
|
||||
let hybrid_service = ServiceBuilder::new()
|
||||
.layer(CatchPanicLayer::new())
|
||||
.layer(
|
||||
TraceLayer::new_for_http()
|
||||
.make_span_with(|request: &HttpRequest<_>| {
|
||||
let span = tracing::info_span!("http-request",
|
||||
status_code = tracing::field::Empty,
|
||||
method = %request.method(),
|
||||
uri = %request.uri(),
|
||||
version = ?request.version(),
|
||||
);
|
||||
for (header_name, header_value) in request.headers() {
|
||||
if header_name == "user-agent" || header_name == "content-type" || header_name == "content-length" {
|
||||
span.record(header_name.as_str(), header_value.to_str().unwrap_or("invalid"));
|
||||
}
|
||||
}
|
||||
|
||||
span
|
||||
})
|
||||
.on_request(|request: &HttpRequest<_>, _span: &Span| {
|
||||
info!(
|
||||
counter.rustfs_api_requests_total = 1_u64,
|
||||
key_request_method = %request.method().to_string(),
|
||||
key_request_uri_path = %request.uri().path().to_owned(),
|
||||
"handle request api total",
|
||||
);
|
||||
debug!("http started method: {}, url path: {}", request.method(), request.uri().path())
|
||||
})
|
||||
.on_response(|response: &Response<_>, latency: Duration, _span: &Span| {
|
||||
_span.record("http response status_code", tracing::field::display(response.status()));
|
||||
debug!("http response generated in {:?}", latency)
|
||||
})
|
||||
.on_body_chunk(|chunk: &Bytes, latency: Duration, _span: &Span| {
|
||||
info!(histogram.request.body.len = chunk.len(), "histogram request body length",);
|
||||
debug!("http body sending {} bytes in {:?}", chunk.len(), latency)
|
||||
})
|
||||
.on_eos(|_trailers: Option<&HeaderMap>, stream_duration: Duration, _span: &Span| {
|
||||
debug!("http stream closed after {:?}", stream_duration)
|
||||
})
|
||||
.on_failure(|_error, latency: Duration, _span: &Span| {
|
||||
info!(counter.rustfs_api_requests_failure_total = 1_u64, "handle request api failure total");
|
||||
debug!("http request failure error: {:?} in {:?}", _error, latency)
|
||||
}),
|
||||
)
|
||||
.layer(CorsLayer::permissive())
|
||||
.service(service);
|
||||
let hybrid_service = TowerToHyperService::new(hybrid_service);
|
||||
|
||||
// Decide whether to handle HTTPS or HTTP connections based on the existence of TLS Acceptor
|
||||
if let Some(acceptor) = tls_acceptor {
|
||||
debug!("TLS handshake start");
|
||||
match acceptor.accept(socket).await {
|
||||
Ok(tls_socket) => {
|
||||
debug!("TLS handshake successful");
|
||||
let stream = TokioIo::new(tls_socket);
|
||||
let conn = http_server.serve_connection(stream, hybrid_service);
|
||||
if let Err(err) = graceful.watch(conn).await {
|
||||
handle_connection_error(&*err);
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
error!(?err, "TLS handshake failed");
|
||||
return; // Failed to end the task directly
|
||||
}
|
||||
}
|
||||
debug!("TLS handshake success");
|
||||
} else {
|
||||
debug!("Http handshake start");
|
||||
let stream = TokioIo::new(socket);
|
||||
let conn = http_server.serve_connection(stream, hybrid_service);
|
||||
if let Err(err) = graceful.watch(conn).await {
|
||||
handle_connection_error(&*err);
|
||||
}
|
||||
debug!("Http handshake success");
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
/// Handles connection errors by logging them with appropriate severity
|
||||
fn handle_connection_error(err: &(dyn std::error::Error + 'static)) {
|
||||
if let Some(hyper_err) = err.downcast_ref::<hyper::Error>() {
|
||||
if hyper_err.is_incomplete_message() {
|
||||
warn!("The HTTP connection is closed prematurely and the message is not completed:{}", hyper_err);
|
||||
} else if hyper_err.is_closed() {
|
||||
warn!("The HTTP connection is closed:{}", hyper_err);
|
||||
} else if hyper_err.is_parse() {
|
||||
error!("HTTP message parsing failed:{}", hyper_err);
|
||||
} else if hyper_err.is_user() {
|
||||
error!("HTTP user-custom error:{}", hyper_err);
|
||||
} else if hyper_err.is_canceled() {
|
||||
warn!("The HTTP connection is canceled:{}", hyper_err);
|
||||
} else {
|
||||
error!("Unknown hyper error:{:?}", hyper_err);
|
||||
}
|
||||
} else if let Some(io_err) = err.downcast_ref::<Error>() {
|
||||
error!("Unknown connection IO error:{}", io_err);
|
||||
} else {
|
||||
error!("Unknown connection error type:{:?}", err);
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::result_large_err)]
|
||||
fn check_auth(req: Request<()>) -> std::result::Result<Request<()>, Status> {
|
||||
let token: MetadataValue<_> = "rustfs rpc".parse().unwrap();
|
||||
|
||||
match req.metadata().get("authorization") {
|
||||
Some(t) if token == t => Ok(req),
|
||||
_ => Err(Status::unauthenticated("No valid auth token")),
|
||||
}
|
||||
}
|
||||
@@ -12,7 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod http;
|
||||
mod hybrid;
|
||||
mod service_state;
|
||||
pub(crate) use http::start_http_server;
|
||||
pub(crate) use service_state::SHUTDOWN_TIMEOUT;
|
||||
pub(crate) use service_state::ServiceState;
|
||||
pub(crate) use service_state::ServiceStateManager;
|
||||
|
||||
@@ -36,7 +36,6 @@ use rustfs_s3select_api::server::dbms::DatabaseManagerSystem;
|
||||
// use rustfs_ecstore::store_api::RESERVED_METADATA_PREFIX;
|
||||
use futures::StreamExt;
|
||||
use http::HeaderMap;
|
||||
use lazy_static::lazy_static;
|
||||
use rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::validate_transition_tier;
|
||||
use rustfs_ecstore::bucket::lifecycle::lifecycle::Lifecycle;
|
||||
use rustfs_ecstore::bucket::metadata::BUCKET_LIFECYCLE_CONFIG;
|
||||
@@ -102,6 +101,7 @@ use std::fmt::Debug;
|
||||
use std::path::Path;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use std::sync::LazyLock;
|
||||
use time::OffsetDateTime;
|
||||
use time::format_description::well_known::Rfc3339;
|
||||
use tokio::sync::mpsc;
|
||||
@@ -126,12 +126,10 @@ macro_rules! try_ {
|
||||
};
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref RUSTFS_OWNER: Owner = Owner {
|
||||
display_name: Some("rustfs".to_owned()),
|
||||
id: Some("c19050dbcee97fda828689dda99097a6321af2248fa760517237346e5d9c8a66".to_owned()),
|
||||
};
|
||||
}
|
||||
static RUSTFS_OWNER: LazyLock<Owner> = LazyLock::new(|| Owner {
|
||||
display_name: Some("rustfs".to_owned()),
|
||||
id: Some("c19050dbcee97fda828689dda99097a6321af2248fa760517237346e5d9c8a66".to_owned()),
|
||||
});
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct FS {
|
||||
@@ -703,6 +701,12 @@ impl S3 for FS {
|
||||
.await
|
||||
.map_err(ApiError::from)?;
|
||||
|
||||
if let Some(region) = rustfs_ecstore::global::get_global_region() {
|
||||
return Ok(S3Response::new(GetBucketLocationOutput {
|
||||
location_constraint: Some(BucketLocationConstraint::from(region)),
|
||||
}));
|
||||
}
|
||||
|
||||
let output = GetBucketLocationOutput::default();
|
||||
Ok(S3Response::new(output))
|
||||
}
|
||||
|
||||
@@ -13,13 +13,13 @@
|
||||
// limitations under the License.
|
||||
|
||||
use http::{HeaderMap, HeaderValue};
|
||||
use lazy_static::lazy_static;
|
||||
use rustfs_ecstore::bucket::versioning_sys::BucketVersioningSys;
|
||||
use rustfs_ecstore::error::Result;
|
||||
use rustfs_ecstore::error::StorageError;
|
||||
use rustfs_ecstore::store_api::ObjectOptions;
|
||||
use rustfs_utils::path::is_dir_object;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::LazyLock;
|
||||
use uuid::Uuid;
|
||||
|
||||
/// Creates options for deleting an object in a bucket.
|
||||
@@ -214,9 +214,9 @@ pub fn extract_metadata_from_mime(headers: &HeaderMap<HeaderValue>, metadata: &m
|
||||
}
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
/// List of supported headers.
|
||||
static ref SUPPORTED_HEADERS: Vec<&'static str> = vec![
|
||||
/// List of supported headers.
|
||||
static SUPPORTED_HEADERS: LazyLock<Vec<&'static str>> = LazyLock::new(|| {
|
||||
vec![
|
||||
"content-type",
|
||||
"cache-control",
|
||||
"content-language",
|
||||
@@ -225,9 +225,9 @@ lazy_static! {
|
||||
"x-amz-storage-class",
|
||||
"x-amz-tagging",
|
||||
"expires",
|
||||
"x-amz-replication-status"
|
||||
];
|
||||
}
|
||||
"x-amz-replication-status",
|
||||
]
|
||||
});
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
@@ -17,6 +17,8 @@ use std::time::Duration;
|
||||
use thiserror::Error;
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
use crate::version;
|
||||
|
||||
/// Update check related errors
|
||||
#[derive(Error, Debug)]
|
||||
pub enum UpdateCheckError {
|
||||
@@ -231,25 +233,7 @@ impl VersionChecker {
|
||||
|
||||
/// Get current version number
|
||||
pub fn get_current_version() -> String {
|
||||
use crate::console::CONSOLE_CONFIG;
|
||||
|
||||
if let Some(config) = CONSOLE_CONFIG.get() {
|
||||
// Extract version from configuration
|
||||
let version_str = config.version();
|
||||
// Extract version part, removing extra information
|
||||
if let Some(release_part) = version_str.split_whitespace().next() {
|
||||
if release_part.starts_with("RELEASE.") {
|
||||
release_part.trim_start_matches("RELEASE.").to_string()
|
||||
} else {
|
||||
release_part.to_string()
|
||||
}
|
||||
} else {
|
||||
rustfs_config::VERSION.to_string()
|
||||
}
|
||||
} else {
|
||||
// If configuration is not initialized, use constant version
|
||||
rustfs_config::VERSION.to_string()
|
||||
}
|
||||
version::get_version()
|
||||
}
|
||||
|
||||
/// Convenience function for async update checking
|
||||
13
rustfs/src/version.rs
Normal file
@@ -0,0 +1,13 @@
|
||||
use shadow_rs::shadow;
|
||||
shadow!(build);
|
||||
|
||||
#[allow(clippy::const_is_empty)]
|
||||
pub fn get_version() -> String {
|
||||
if !build::TAG.is_empty() {
|
||||
build::TAG.to_string()
|
||||
} else if !build::SHORT_COMMIT.is_empty() {
|
||||
format!("@{}", build::SHORT_COMMIT)
|
||||
} else {
|
||||
build::PKG_VERSION.to_string()
|
||||
}
|
||||
}
|
||||
@@ -100,6 +100,8 @@ export RUSTFS_NS_SCANNER_INTERVAL=60 # 对象扫描间隔时间,单位为秒
|
||||
|
||||
export RUSTFS_COMPRESSION_ENABLED=true # 是否启用压缩
|
||||
|
||||
#export RUSTFS_REGION="us-east-1"
|
||||
|
||||
# 事件消息配置
|
||||
#export RUSTFS_EVENT_CONFIG="./deploy/config/event.example.toml"
|
||||
|
||||
|
||||