mirror of
https://github.com/rustfs/rustfs.git
synced 2026-01-17 09:40:32 +00:00
Compare commits
23 Commits
1.0.0-alph
...
1.0.0-alph
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bebd78fbbb | ||
|
|
3f095e75cb | ||
|
|
f7d30da9e0 | ||
|
|
823d4b6f79 | ||
|
|
051ea7786f | ||
|
|
42b645e355 | ||
|
|
f27ee96014 | ||
|
|
20cd117aa6 | ||
|
|
fc8931d69f | ||
|
|
0167b2decd | ||
|
|
e67980ff3c | ||
|
|
96760bba5a | ||
|
|
2501d7d241 | ||
|
|
55b84262b5 | ||
|
|
ce4252eb1a | ||
|
|
db708917b4 | ||
|
|
8ddb45627d | ||
|
|
550c225b79 | ||
|
|
0d46b550a8 | ||
|
|
0693cca1a4 | ||
|
|
0d9f9e381a | ||
|
|
6c7aa5a7ae | ||
|
|
a27d935925 |
@@ -6,10 +6,10 @@ This directory contains Docker configuration files and supporting infrastructure
|
||||
|
||||
```
|
||||
rustfs/
|
||||
├── Dockerfile # Production image (Alpine + GitHub Releases)
|
||||
├── Dockerfile.source # Source build (Debian + cross-compilation)
|
||||
├── cargo.config.toml # Rust cargo configuration
|
||||
├── Dockerfile # Production image (Alpine + pre-built binaries)
|
||||
├── Dockerfile.source # Development image (Debian + source build)
|
||||
├── docker-buildx.sh # Multi-architecture build script
|
||||
├── Makefile # Build automation with simplified commands
|
||||
└── .docker/ # Supporting infrastructure
|
||||
├── observability/ # Monitoring and observability configs
|
||||
├── compose/ # Docker Compose configurations
|
||||
@@ -64,7 +64,11 @@ docker run rustfs/rustfs:main-latest # Main branch latest
|
||||
### Development Environment
|
||||
|
||||
```bash
|
||||
# Start development container
|
||||
# Quick setup using Makefile (recommended)
|
||||
make docker-dev-local # Build development image locally
|
||||
make dev-env-start # Start development container
|
||||
|
||||
# Manual Docker commands
|
||||
docker run -it -v $(pwd):/workspace -p 9000:9000 rustfs/rustfs:latest-dev
|
||||
|
||||
# Build from source locally
|
||||
@@ -76,9 +80,33 @@ docker-compose up rustfs-dev
|
||||
|
||||
## 🏗️ Build Arguments and Scripts
|
||||
|
||||
### Using docker-buildx.sh (Recommended)
|
||||
### Using Makefile Commands (Recommended)
|
||||
|
||||
For multi-architecture builds, use the provided script:
|
||||
The easiest way to build images using simplified commands:
|
||||
|
||||
```bash
|
||||
# Development images (build from source)
|
||||
make docker-dev-local # Build for local use (single arch)
|
||||
make docker-dev # Build multi-arch (for CI/CD)
|
||||
make docker-dev-push REGISTRY=xxx # Build and push to registry
|
||||
|
||||
# Production images (using pre-built binaries)
|
||||
make docker-buildx # Build multi-arch production images
|
||||
make docker-buildx-push # Build and push production images
|
||||
make docker-buildx-version VERSION=v1.0.0 # Build specific version
|
||||
|
||||
# Development environment
|
||||
make dev-env-start # Start development container
|
||||
make dev-env-stop # Stop development container
|
||||
make dev-env-restart # Restart development container
|
||||
|
||||
# Help
|
||||
make help-docker # Show all Docker-related commands
|
||||
```
|
||||
|
||||
### Using docker-buildx.sh (Advanced)
|
||||
|
||||
For direct script usage and advanced scenarios:
|
||||
|
||||
```bash
|
||||
# Build latest version for all architectures
|
||||
@@ -147,17 +175,51 @@ Architecture is automatically detected during build using Docker's `TARGETARCH`
|
||||
|
||||
## 🛠️ Development Workflow
|
||||
|
||||
For local development and testing:
|
||||
### Quick Start with Makefile (Recommended)
|
||||
|
||||
```bash
|
||||
# Quick development setup
|
||||
docker-compose up rustfs-dev
|
||||
# 1. Start development environment
|
||||
make dev-env-start
|
||||
|
||||
# Custom source build
|
||||
docker build -f Dockerfile.source -t rustfs:custom .
|
||||
# 2. Your development container is now running with:
|
||||
# - Port 9000 exposed for RustFS
|
||||
# - Port 9010 exposed for admin console
|
||||
# - Current directory mounted as /workspace
|
||||
|
||||
# 3. Stop when done
|
||||
make dev-env-stop
|
||||
```
|
||||
|
||||
### Manual Development Setup
|
||||
|
||||
```bash
|
||||
# Build development image from source
|
||||
make docker-dev-local
|
||||
|
||||
# Or use traditional Docker commands
|
||||
docker build -f Dockerfile.source -t rustfs:dev .
|
||||
|
||||
# Run with development tools
|
||||
docker run -it -v $(pwd):/workspace rustfs:custom bash
|
||||
docker run -it -v $(pwd):/workspace -p 9000:9000 rustfs:dev bash
|
||||
|
||||
# Or use docker-compose for complex setups
|
||||
docker-compose up rustfs-dev
|
||||
```
|
||||
|
||||
### Common Development Tasks
|
||||
|
||||
```bash
|
||||
# Build and test locally
|
||||
make build # Build binary natively
|
||||
make docker-dev-local # Build development Docker image
|
||||
make test # Run tests
|
||||
make fmt # Format code
|
||||
make clippy # Run linter
|
||||
|
||||
# Get help
|
||||
make help # General help
|
||||
make help-docker # Docker-specific help
|
||||
make help-build # Build-specific help
|
||||
```
|
||||
|
||||
## 🚀 CI/CD Integration
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
FROM alpine:3.18
|
||||
|
||||
ENV LANG C.UTF-8
|
||||
|
||||
# Install base dependencies
|
||||
RUN apk add --no-cache \
|
||||
wget \
|
||||
git \
|
||||
curl \
|
||||
unzip \
|
||||
gcc \
|
||||
musl-dev \
|
||||
pkgconfig \
|
||||
openssl-dev \
|
||||
dbus-dev \
|
||||
wayland-dev \
|
||||
webkit2gtk-4.1-dev \
|
||||
build-base \
|
||||
linux-headers
|
||||
|
||||
# install protoc
|
||||
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v30.2/protoc-30.2-linux-x86_64.zip \
|
||||
&& unzip protoc-30.2-linux-x86_64.zip -d protoc3 \
|
||||
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc \
|
||||
&& mv protoc3/include/* /usr/local/include/ && rm -rf protoc-30.2-linux-x86_64.zip protoc3
|
||||
|
||||
# install flatc
|
||||
RUN wget https://github.com/google/flatbuffers/releases/download/v24.3.25/Linux.flatc.binary.g++-13.zip \
|
||||
&& unzip Linux.flatc.binary.g++-13.zip \
|
||||
&& mv flatc /usr/local/bin/ && chmod +x /usr/local/bin/flatc && rm -rf Linux.flatc.binary.g++-13.zip
|
||||
|
||||
# install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
# Set PATH for rust
|
||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||
|
||||
COPY .docker/cargo.config.toml /root/.cargo/config.toml
|
||||
|
||||
WORKDIR /root/rustfs
|
||||
295
.github/workflows/build.yml
vendored
295
.github/workflows/build.yml
vendored
@@ -172,6 +172,14 @@ jobs:
|
||||
target: aarch64-unknown-linux-musl
|
||||
cross: true
|
||||
platform: linux
|
||||
- os: ubuntu-latest
|
||||
target: x86_64-unknown-linux-gnu
|
||||
cross: false
|
||||
platform: linux
|
||||
- os: ubuntu-latest
|
||||
target: aarch64-unknown-linux-gnu
|
||||
cross: true
|
||||
platform: linux
|
||||
# macOS builds
|
||||
- os: macos-latest
|
||||
target: aarch64-apple-darwin
|
||||
@@ -181,15 +189,15 @@ jobs:
|
||||
target: x86_64-apple-darwin
|
||||
cross: false
|
||||
platform: macos
|
||||
# # Windows builds (temporarily disabled)
|
||||
# - os: windows-latest
|
||||
# target: x86_64-pc-windows-msvc
|
||||
# cross: false
|
||||
# platform: windows
|
||||
# - os: windows-latest
|
||||
# target: aarch64-pc-windows-msvc
|
||||
# cross: true
|
||||
# platform: windows
|
||||
# Windows builds (temporarily disabled)
|
||||
- os: windows-latest
|
||||
target: x86_64-pc-windows-msvc
|
||||
cross: false
|
||||
platform: windows
|
||||
- os: windows-latest
|
||||
target: aarch64-pc-windows-msvc
|
||||
cross: true
|
||||
platform: windows
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
@@ -471,12 +479,12 @@ jobs:
|
||||
"release")
|
||||
echo "🚀 Release build artifacts have been uploaded to OSS release directory"
|
||||
echo "✅ This build is ready for production use"
|
||||
echo "🏷️ GitHub Release will be created automatically by the release workflow"
|
||||
echo "🏷️ GitHub Release will be created in this workflow"
|
||||
;;
|
||||
"prerelease")
|
||||
echo "🧪 Prerelease build artifacts have been uploaded to OSS release directory"
|
||||
echo "⚠️ This is a prerelease build - use with caution"
|
||||
echo "🏷️ GitHub Release will be created automatically by the release workflow"
|
||||
echo "🏷️ GitHub Release will be created in this workflow"
|
||||
;;
|
||||
esac
|
||||
|
||||
@@ -489,3 +497,268 @@ jobs:
|
||||
else
|
||||
echo "❌ Docker image build will be skipped due to build failure"
|
||||
fi
|
||||
|
||||
# Create GitHub Release (only for tag pushes)
|
||||
create-release:
|
||||
name: Create GitHub Release
|
||||
needs: [build-check, build-rustfs]
|
||||
if: startsWith(github.ref, 'refs/tags/') && needs.build-check.outputs.build_type != 'development'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
outputs:
|
||||
release_id: ${{ steps.create.outputs.release_id }}
|
||||
release_url: ${{ steps.create.outputs.release_url }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Create GitHub Release
|
||||
id: create
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
TAG="${{ needs.build-check.outputs.version }}"
|
||||
VERSION="${{ needs.build-check.outputs.version }}"
|
||||
IS_PRERELEASE="${{ needs.build-check.outputs.is_prerelease }}"
|
||||
BUILD_TYPE="${{ needs.build-check.outputs.build_type }}"
|
||||
|
||||
# Determine release type for title
|
||||
if [[ "$BUILD_TYPE" == "prerelease" ]]; then
|
||||
if [[ "$TAG" == *"alpha"* ]]; then
|
||||
RELEASE_TYPE="alpha"
|
||||
elif [[ "$TAG" == *"beta"* ]]; then
|
||||
RELEASE_TYPE="beta"
|
||||
elif [[ "$TAG" == *"rc"* ]]; then
|
||||
RELEASE_TYPE="rc"
|
||||
else
|
||||
RELEASE_TYPE="prerelease"
|
||||
fi
|
||||
else
|
||||
RELEASE_TYPE="release"
|
||||
fi
|
||||
|
||||
# Check if release already exists
|
||||
if gh release view "$TAG" >/dev/null 2>&1; then
|
||||
echo "Release $TAG already exists"
|
||||
RELEASE_ID=$(gh release view "$TAG" --json databaseId --jq '.databaseId')
|
||||
RELEASE_URL=$(gh release view "$TAG" --json url --jq '.url')
|
||||
else
|
||||
# Get release notes from tag message
|
||||
RELEASE_NOTES=$(git tag -l --format='%(contents)' "${TAG}")
|
||||
if [[ -z "$RELEASE_NOTES" || "$RELEASE_NOTES" =~ ^[[:space:]]*$ ]]; then
|
||||
if [[ "$IS_PRERELEASE" == "true" ]]; then
|
||||
RELEASE_NOTES="Pre-release ${VERSION} (${RELEASE_TYPE})"
|
||||
else
|
||||
RELEASE_NOTES="Release ${VERSION}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Create release title
|
||||
if [[ "$IS_PRERELEASE" == "true" ]]; then
|
||||
TITLE="RustFS $VERSION (${RELEASE_TYPE})"
|
||||
else
|
||||
TITLE="RustFS $VERSION"
|
||||
fi
|
||||
|
||||
# Create the release
|
||||
PRERELEASE_FLAG=""
|
||||
if [[ "$IS_PRERELEASE" == "true" ]]; then
|
||||
PRERELEASE_FLAG="--prerelease"
|
||||
fi
|
||||
|
||||
gh release create "$TAG" \
|
||||
--title "$TITLE" \
|
||||
--notes "$RELEASE_NOTES" \
|
||||
$PRERELEASE_FLAG \
|
||||
--draft
|
||||
|
||||
RELEASE_ID=$(gh release view "$TAG" --json databaseId --jq '.databaseId')
|
||||
RELEASE_URL=$(gh release view "$TAG" --json url --jq '.url')
|
||||
fi
|
||||
|
||||
echo "release_id=$RELEASE_ID" >> $GITHUB_OUTPUT
|
||||
echo "release_url=$RELEASE_URL" >> $GITHUB_OUTPUT
|
||||
echo "Created release: $RELEASE_URL"
|
||||
|
||||
# Prepare and upload release assets
|
||||
upload-release-assets:
|
||||
name: Upload Release Assets
|
||||
needs: [build-check, build-rustfs, create-release]
|
||||
if: startsWith(github.ref, 'refs/tags/') && needs.build-check.outputs.build_type != 'development'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
actions: read
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Download all build artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: ./artifacts
|
||||
pattern: rustfs-*
|
||||
merge-multiple: true
|
||||
|
||||
- name: Prepare release assets
|
||||
id: prepare
|
||||
run: |
|
||||
VERSION="${{ needs.build-check.outputs.version }}"
|
||||
TAG="${{ needs.build-check.outputs.version }}"
|
||||
|
||||
mkdir -p ./release-assets
|
||||
|
||||
# Copy and verify artifacts
|
||||
ASSETS_COUNT=0
|
||||
for file in ./artifacts/*.zip; do
|
||||
if [[ -f "$file" ]]; then
|
||||
cp "$file" ./release-assets/
|
||||
ASSETS_COUNT=$((ASSETS_COUNT + 1))
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ $ASSETS_COUNT -eq 0 ]]; then
|
||||
echo "❌ No artifacts found!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd ./release-assets
|
||||
|
||||
# Generate checksums
|
||||
if ls *.zip >/dev/null 2>&1; then
|
||||
sha256sum *.zip > SHA256SUMS
|
||||
sha512sum *.zip > SHA512SUMS
|
||||
fi
|
||||
|
||||
# Create signature placeholder files
|
||||
for file in *.zip; do
|
||||
echo "# Signature for $file" > "${file}.asc"
|
||||
echo "# GPG signature will be added in future versions" >> "${file}.asc"
|
||||
done
|
||||
|
||||
echo "📦 Prepared assets:"
|
||||
ls -la
|
||||
|
||||
echo "🔢 Asset count: $ASSETS_COUNT"
|
||||
|
||||
- name: Upload to GitHub Release
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
TAG="${{ needs.build-check.outputs.version }}"
|
||||
|
||||
cd ./release-assets
|
||||
|
||||
# Upload all files
|
||||
for file in *; do
|
||||
if [[ -f "$file" ]]; then
|
||||
echo "📤 Uploading $file..."
|
||||
gh release upload "$TAG" "$file" --clobber
|
||||
fi
|
||||
done
|
||||
|
||||
echo "✅ All assets uploaded successfully"
|
||||
|
||||
# Update latest.json for stable releases only
|
||||
update-latest-version:
|
||||
name: Update Latest Version
|
||||
needs: [build-check, upload-release-assets]
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Update latest.json
|
||||
env:
|
||||
OSS_ACCESS_KEY_ID: ${{ secrets.ALICLOUDOSS_KEY_ID }}
|
||||
OSS_ACCESS_KEY_SECRET: ${{ secrets.ALICLOUDOSS_KEY_SECRET }}
|
||||
OSS_REGION: cn-beijing
|
||||
OSS_ENDPOINT: https://oss-cn-beijing.aliyuncs.com
|
||||
run: |
|
||||
if [[ -z "$OSS_ACCESS_KEY_ID" ]]; then
|
||||
echo "⚠️ OSS credentials not available, skipping latest.json update"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
VERSION="${{ needs.build-check.outputs.version }}"
|
||||
TAG="${{ needs.build-check.outputs.version }}"
|
||||
|
||||
# Install ossutil
|
||||
OSSUTIL_VERSION="2.1.1"
|
||||
OSSUTIL_ZIP="ossutil-${OSSUTIL_VERSION}-linux-amd64.zip"
|
||||
OSSUTIL_DIR="ossutil-${OSSUTIL_VERSION}-linux-amd64"
|
||||
|
||||
curl -o "$OSSUTIL_ZIP" "https://gosspublic.alicdn.com/ossutil/v2/${OSSUTIL_VERSION}/${OSSUTIL_ZIP}"
|
||||
unzip "$OSSUTIL_ZIP"
|
||||
mv "${OSSUTIL_DIR}/ossutil" /usr/local/bin/
|
||||
rm -rf "$OSSUTIL_DIR" "$OSSUTIL_ZIP"
|
||||
chmod +x /usr/local/bin/ossutil
|
||||
|
||||
# Create latest.json
|
||||
cat > latest.json << EOF
|
||||
{
|
||||
"version": "${VERSION}",
|
||||
"tag": "${TAG}",
|
||||
"release_date": "$(date -u +%Y-%m-%dT%H:%M:%SZ)",
|
||||
"release_type": "stable",
|
||||
"download_url": "https://github.com/${{ github.repository }}/releases/tag/${TAG}"
|
||||
}
|
||||
EOF
|
||||
|
||||
# Upload to OSS
|
||||
ossutil cp latest.json oss://rustfs-version/latest.json --force
|
||||
|
||||
echo "✅ Updated latest.json for stable release $VERSION"
|
||||
|
||||
# Publish release (remove draft status)
|
||||
publish-release:
|
||||
name: Publish Release
|
||||
needs: [build-check, create-release, upload-release-assets]
|
||||
if: startsWith(github.ref, 'refs/tags/') && needs.build-check.outputs.build_type != 'development'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Update release notes and publish
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
TAG="${{ needs.build-check.outputs.version }}"
|
||||
VERSION="${{ needs.build-check.outputs.version }}"
|
||||
IS_PRERELEASE="${{ needs.build-check.outputs.is_prerelease }}"
|
||||
BUILD_TYPE="${{ needs.build-check.outputs.build_type }}"
|
||||
|
||||
# Determine release type
|
||||
if [[ "$BUILD_TYPE" == "prerelease" ]]; then
|
||||
if [[ "$TAG" == *"alpha"* ]]; then
|
||||
RELEASE_TYPE="alpha"
|
||||
elif [[ "$TAG" == *"beta"* ]]; then
|
||||
RELEASE_TYPE="beta"
|
||||
elif [[ "$TAG" == *"rc"* ]]; then
|
||||
RELEASE_TYPE="rc"
|
||||
else
|
||||
RELEASE_TYPE="prerelease"
|
||||
fi
|
||||
else
|
||||
RELEASE_TYPE="release"
|
||||
fi
|
||||
|
||||
# Get original release notes from tag
|
||||
ORIGINAL_NOTES=$(git tag -l --format='%(contents)' "${TAG}")
|
||||
if [[ -z "$ORIGINAL_NOTES" || "$ORIGINAL_NOTES" =~ ^[[:space:]]*$ ]]; then
|
||||
if [[ "$IS_PRERELEASE" == "true" ]]; then
|
||||
ORIGINAL_NOTES="Pre-release ${VERSION} (${RELEASE_TYPE})"
|
||||
else
|
||||
ORIGINAL_NOTES="Release ${VERSION}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Publish the release (remove draft status)
|
||||
gh release edit "$TAG" --draft=false
|
||||
|
||||
echo "🎉 Released $TAG successfully!"
|
||||
echo "📄 Release URL: ${{ needs.create-release.outputs.release_url }}"
|
||||
|
||||
10
.github/workflows/ci.yml
vendored
10
.github/workflows/ci.yml
vendored
@@ -83,6 +83,16 @@ jobs:
|
||||
# Never skip release events and tag pushes
|
||||
do_not_skip: '["workflow_dispatch", "schedule", "merge_group", "release", "push"]'
|
||||
|
||||
|
||||
typos:
|
||||
name: Typos
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- name: Typos check with custom config file
|
||||
uses: crate-ci/typos@master
|
||||
|
||||
test-and-lint:
|
||||
name: Test and Lint
|
||||
needs: skip-check
|
||||
|
||||
89
.github/workflows/docker.yml
vendored
89
.github/workflows/docker.yml
vendored
@@ -38,7 +38,6 @@ on:
|
||||
workflow_run:
|
||||
workflows: ["Build and Release"]
|
||||
types: [completed]
|
||||
branches: [main]
|
||||
# Manual trigger with same parameters for consistency
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
@@ -83,6 +82,8 @@ jobs:
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
# For workflow_run events, checkout the specific commit that triggered the workflow
|
||||
ref: ${{ github.event.workflow_run.head_sha || github.sha }}
|
||||
|
||||
- name: Check build conditions
|
||||
id: check
|
||||
@@ -114,32 +115,62 @@ jobs:
|
||||
# Use Git to generate consistent short SHA (ensures uniqueness like build.yml)
|
||||
short_sha=$(git rev-parse --short "${{ github.event.workflow_run.head_sha }}")
|
||||
|
||||
# Determine build type based on branch and commit
|
||||
if [[ "${{ github.event.workflow_run.head_branch }}" == "main" ]]; then
|
||||
build_type="development"
|
||||
version="dev-${short_sha}"
|
||||
# Skip Docker build for development builds
|
||||
should_build=false
|
||||
echo "⏭️ Skipping Docker build for development version (main branch)"
|
||||
elif [[ "${{ github.event.workflow_run.event }}" == "push" ]] && [[ "${{ github.event.workflow_run.head_branch }}" =~ ^refs/tags/ ]]; then
|
||||
# Tag push - only build for releases and prereleases
|
||||
tag_name="${{ github.event.workflow_run.head_branch }}"
|
||||
version="${tag_name#refs/tags/}"
|
||||
if [[ "$version" == *"alpha"* ]] || [[ "$version" == *"beta"* ]] || [[ "$version" == *"rc"* ]]; then
|
||||
build_type="prerelease"
|
||||
is_prerelease=true
|
||||
echo "🧪 Building Docker image for prerelease: $version"
|
||||
# Determine build type based on triggering workflow event and ref
|
||||
triggering_event="${{ github.event.workflow_run.event }}"
|
||||
head_branch="${{ github.event.workflow_run.head_branch }}"
|
||||
|
||||
echo "🔍 Analyzing triggering workflow:"
|
||||
echo " 📋 Event: $triggering_event"
|
||||
echo " 🌿 Head branch: $head_branch"
|
||||
echo " 📎 Head SHA: ${{ github.event.workflow_run.head_sha }}"
|
||||
|
||||
# Check if this was triggered by a tag push
|
||||
if [[ "$triggering_event" == "push" ]]; then
|
||||
# For tag pushes, head_branch will be like "refs/tags/v1.0.0" or just "v1.0.0"
|
||||
if [[ "$head_branch" == refs/tags/* ]]; then
|
||||
# Extract tag name from refs/tags/TAG_NAME
|
||||
tag_name="${head_branch#refs/tags/}"
|
||||
version="$tag_name"
|
||||
elif [[ "$head_branch" =~ ^v?[0-9]+\.[0-9]+\.[0-9]+ ]]; then
|
||||
# Direct tag name like "v1.0.0" or "1.0.0-alpha.1"
|
||||
version="$head_branch"
|
||||
elif [[ "$head_branch" == "main" ]]; then
|
||||
# Regular branch push to main
|
||||
build_type="development"
|
||||
version="dev-${short_sha}"
|
||||
should_build=false
|
||||
echo "⏭️ Skipping Docker build for development version (main branch push)"
|
||||
else
|
||||
build_type="release"
|
||||
create_latest=true
|
||||
echo "🚀 Building Docker image for release: $version"
|
||||
# Other branch push
|
||||
build_type="development"
|
||||
version="dev-${short_sha}"
|
||||
should_build=false
|
||||
echo "⏭️ Skipping Docker build for development version (branch: $head_branch)"
|
||||
fi
|
||||
|
||||
# If we extracted a version (tag), determine release type
|
||||
if [[ -n "$version" ]] && [[ "$version" != "dev-${short_sha}" ]]; then
|
||||
# Remove 'v' prefix if present for consistent version format
|
||||
if [[ "$version" == v* ]]; then
|
||||
version="${version#v}"
|
||||
fi
|
||||
|
||||
if [[ "$version" == *"alpha"* ]] || [[ "$version" == *"beta"* ]] || [[ "$version" == *"rc"* ]]; then
|
||||
build_type="prerelease"
|
||||
is_prerelease=true
|
||||
echo "🧪 Building Docker image for prerelease: $version"
|
||||
else
|
||||
build_type="release"
|
||||
create_latest=true
|
||||
echo "🚀 Building Docker image for release: $version"
|
||||
fi
|
||||
fi
|
||||
else
|
||||
# Non-push events
|
||||
build_type="development"
|
||||
version="dev-${short_sha}"
|
||||
# Skip Docker build for development builds
|
||||
should_build=false
|
||||
echo "⏭️ Skipping Docker build for development version"
|
||||
echo "⏭️ Skipping Docker build for development version (event: $triggering_event)"
|
||||
fi
|
||||
|
||||
echo "🔄 Build triggered by workflow_run:"
|
||||
@@ -169,21 +200,23 @@ jobs:
|
||||
create_latest=true
|
||||
echo "🚀 Building with latest stable release version"
|
||||
;;
|
||||
v[0-9]*)
|
||||
build_type="release"
|
||||
create_latest=true
|
||||
echo "📦 Building with specific release version: $input_version"
|
||||
;;
|
||||
v*alpha*|v*beta*|v*rc*)
|
||||
# Prerelease versions (must match first, more specific)
|
||||
v*alpha*|v*beta*|v*rc*|*alpha*|*beta*|*rc*)
|
||||
build_type="prerelease"
|
||||
is_prerelease=true
|
||||
echo "🧪 Building with prerelease version: $input_version"
|
||||
;;
|
||||
# Release versions (match after prereleases, more general)
|
||||
v[0-9]*|[0-9]*.*.*)
|
||||
build_type="release"
|
||||
create_latest=true
|
||||
echo "📦 Building with specific release version: $input_version"
|
||||
;;
|
||||
*)
|
||||
# Invalid version for Docker build
|
||||
should_build=false
|
||||
echo "❌ Invalid version for Docker build: $input_version"
|
||||
echo "⚠️ Only release versions (latest, v1.0.0) and prereleases (v1.0.0-alpha1) are supported"
|
||||
echo "⚠️ Only release versions (latest, v1.0.0, 1.0.0) and prereleases (v1.0.0-alpha1, 1.0.0-beta2) are supported"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
78
.github/workflows/release-notes-template.md
vendored
78
.github/workflows/release-notes-template.md
vendored
@@ -1,78 +0,0 @@
|
||||
## RustFS ${VERSION_CLEAN}
|
||||
|
||||
${ORIGINAL_NOTES}
|
||||
|
||||
---
|
||||
|
||||
### 🚀 Quick Download
|
||||
|
||||
**Linux (Static Binaries - No Dependencies):**
|
||||
|
||||
```bash
|
||||
# x86_64 (Intel/AMD)
|
||||
curl -LO https://github.com/rustfs/rustfs/releases/download/${VERSION}/rustfs-x86_64-unknown-linux-musl.zip
|
||||
unzip rustfs-x86_64-unknown-linux-musl.zip
|
||||
sudo mv rustfs /usr/local/bin/
|
||||
|
||||
# ARM64 (Graviton, Apple Silicon VMs)
|
||||
curl -LO https://github.com/rustfs/rustfs/releases/download/${VERSION}/rustfs-aarch64-unknown-linux-musl.zip
|
||||
unzip rustfs-aarch64-unknown-linux-musl.zip
|
||||
sudo mv rustfs /usr/local/bin/
|
||||
```
|
||||
|
||||
**macOS:**
|
||||
|
||||
```bash
|
||||
# Apple Silicon (M1/M2/M3)
|
||||
curl -LO https://github.com/rustfs/rustfs/releases/download/${VERSION}/rustfs-aarch64-apple-darwin.zip
|
||||
unzip rustfs-aarch64-apple-darwin.zip
|
||||
sudo mv rustfs /usr/local/bin/
|
||||
|
||||
# Intel
|
||||
curl -LO https://github.com/rustfs/rustfs/releases/download/${VERSION}/rustfs-x86_64-apple-darwin.zip
|
||||
unzip rustfs-x86_64-apple-darwin.zip
|
||||
sudo mv rustfs /usr/local/bin/
|
||||
```
|
||||
|
||||
### 📁 Available Downloads
|
||||
|
||||
| Platform | Architecture | File | Description |
|
||||
|----------|-------------|------|-------------|
|
||||
| Linux | x86_64 | `rustfs-x86_64-unknown-linux-musl.zip` | Static binary, no dependencies |
|
||||
| Linux | ARM64 | `rustfs-aarch64-unknown-linux-musl.zip` | Static binary, no dependencies |
|
||||
| macOS | Apple Silicon | `rustfs-aarch64-apple-darwin.zip` | Native binary, ZIP archive |
|
||||
| macOS | Intel | `rustfs-x86_64-apple-darwin.zip` | Native binary, ZIP archive |
|
||||
|
||||
### 🔐 Verification
|
||||
|
||||
Download checksums and verify your download:
|
||||
|
||||
```bash
|
||||
# Download checksums
|
||||
curl -LO https://github.com/rustfs/rustfs/releases/download/${VERSION}/SHA256SUMS
|
||||
|
||||
# Verify (Linux)
|
||||
sha256sum -c SHA256SUMS --ignore-missing
|
||||
|
||||
# Verify (macOS)
|
||||
shasum -a 256 -c SHA256SUMS --ignore-missing
|
||||
```
|
||||
|
||||
### 🛠️ System Requirements
|
||||
|
||||
- **Linux**: Any distribution with glibc 2.17+ (CentOS 7+, Ubuntu 16.04+)
|
||||
- **macOS**: 10.15+ (Catalina or later)
|
||||
- **Windows**: Windows 10 version 1809 or later
|
||||
|
||||
### 📚 Documentation
|
||||
|
||||
- [Installation Guide](https://github.com/rustfs/rustfs#installation)
|
||||
- [Quick Start](https://github.com/rustfs/rustfs#quick-start)
|
||||
- [Configuration](https://github.com/rustfs/rustfs/blob/main/docs/)
|
||||
- [API Documentation](https://docs.rs/rustfs)
|
||||
|
||||
### 🆘 Support
|
||||
|
||||
- 🐛 [Report Issues](https://github.com/rustfs/rustfs/issues)
|
||||
- 💬 [Community Discussions](https://github.com/rustfs/rustfs/discussions)
|
||||
- 📖 [Documentation](https://github.com/rustfs/rustfs/tree/main/docs)
|
||||
353
.github/workflows/release.yml
vendored
353
.github/workflows/release.yml
vendored
@@ -1,353 +0,0 @@
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name: Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags: ["*.*.*"]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: "Tag to create release for"
|
||||
required: true
|
||||
type: string
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
# Determine release type
|
||||
release-check:
|
||||
name: Release Type Check
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
tag: ${{ steps.check.outputs.tag }}
|
||||
version: ${{ steps.check.outputs.version }}
|
||||
is_prerelease: ${{ steps.check.outputs.is_prerelease }}
|
||||
release_type: ${{ steps.check.outputs.release_type }}
|
||||
steps:
|
||||
- name: Determine release type
|
||||
id: check
|
||||
run: |
|
||||
if [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then
|
||||
TAG="${{ github.event.inputs.tag }}"
|
||||
else
|
||||
TAG="${GITHUB_REF#refs/tags/}"
|
||||
fi
|
||||
|
||||
VERSION="${TAG}"
|
||||
|
||||
# Check if this is a prerelease
|
||||
IS_PRERELEASE=false
|
||||
RELEASE_TYPE="release"
|
||||
|
||||
if [[ "$TAG" == *"alpha"* ]] || [[ "$TAG" == *"beta"* ]] || [[ "$TAG" == *"rc"* ]]; then
|
||||
IS_PRERELEASE=true
|
||||
if [[ "$TAG" == *"alpha"* ]]; then
|
||||
RELEASE_TYPE="alpha"
|
||||
elif [[ "$TAG" == *"beta"* ]]; then
|
||||
RELEASE_TYPE="beta"
|
||||
elif [[ "$TAG" == *"rc"* ]]; then
|
||||
RELEASE_TYPE="rc"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "tag=$TAG" >> $GITHUB_OUTPUT
|
||||
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
||||
echo "is_prerelease=$IS_PRERELEASE" >> $GITHUB_OUTPUT
|
||||
echo "release_type=$RELEASE_TYPE" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "📦 Release Type: $RELEASE_TYPE"
|
||||
echo "🏷️ Tag: $TAG"
|
||||
echo "🔢 Version: $VERSION"
|
||||
echo "🚀 Is Prerelease: $IS_PRERELEASE"
|
||||
|
||||
# Create GitHub Release
|
||||
create-release:
|
||||
name: Create GitHub Release
|
||||
needs: release-check
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
outputs:
|
||||
release_id: ${{ steps.create.outputs.release_id }}
|
||||
release_url: ${{ steps.create.outputs.release_url }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Create GitHub Release
|
||||
id: create
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
TAG="${{ needs.release-check.outputs.tag }}"
|
||||
VERSION="${{ needs.release-check.outputs.version }}"
|
||||
IS_PRERELEASE="${{ needs.release-check.outputs.is_prerelease }}"
|
||||
RELEASE_TYPE="${{ needs.release-check.outputs.release_type }}"
|
||||
|
||||
# Check if release already exists
|
||||
if gh release view "$TAG" >/dev/null 2>&1; then
|
||||
echo "Release $TAG already exists"
|
||||
RELEASE_ID=$(gh release view "$TAG" --json databaseId --jq '.databaseId')
|
||||
RELEASE_URL=$(gh release view "$TAG" --json url --jq '.url')
|
||||
else
|
||||
# Get release notes from tag message
|
||||
RELEASE_NOTES=$(git tag -l --format='%(contents)' "${TAG}")
|
||||
if [[ -z "$RELEASE_NOTES" || "$RELEASE_NOTES" =~ ^[[:space:]]*$ ]]; then
|
||||
if [[ "$IS_PRERELEASE" == "true" ]]; then
|
||||
RELEASE_NOTES="Pre-release ${VERSION} (${RELEASE_TYPE})"
|
||||
else
|
||||
RELEASE_NOTES="Release ${VERSION}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Create release title
|
||||
if [[ "$IS_PRERELEASE" == "true" ]]; then
|
||||
TITLE="RustFS $VERSION (${RELEASE_TYPE})"
|
||||
else
|
||||
TITLE="RustFS $VERSION"
|
||||
fi
|
||||
|
||||
# Create the release
|
||||
PRERELEASE_FLAG=""
|
||||
if [[ "$IS_PRERELEASE" == "true" ]]; then
|
||||
PRERELEASE_FLAG="--prerelease"
|
||||
fi
|
||||
|
||||
gh release create "$TAG" \
|
||||
--title "$TITLE" \
|
||||
--notes "$RELEASE_NOTES" \
|
||||
$PRERELEASE_FLAG \
|
||||
--draft
|
||||
|
||||
RELEASE_ID=$(gh release view "$TAG" --json databaseId --jq '.databaseId')
|
||||
RELEASE_URL=$(gh release view "$TAG" --json url --jq '.url')
|
||||
fi
|
||||
|
||||
echo "release_id=$RELEASE_ID" >> $GITHUB_OUTPUT
|
||||
echo "release_url=$RELEASE_URL" >> $GITHUB_OUTPUT
|
||||
echo "Created release: $RELEASE_URL"
|
||||
|
||||
# Wait for build artifacts from build.yml
|
||||
wait-for-artifacts:
|
||||
name: Wait for Build Artifacts
|
||||
needs: release-check
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Wait for build workflow
|
||||
uses: lewagon/wait-on-check-action@v1.3.1
|
||||
with:
|
||||
ref: ${{ needs.release-check.outputs.tag }}
|
||||
check-name: "Build RustFS"
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
wait-interval: 30
|
||||
allowed-conclusions: success
|
||||
|
||||
# Download and prepare release assets
|
||||
prepare-assets:
|
||||
name: Prepare Release Assets
|
||||
needs: [release-check, wait-for-artifacts]
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
assets_prepared: ${{ steps.prepare.outputs.assets_prepared }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Download artifacts from build workflow
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: ./artifacts
|
||||
pattern: rustfs-*
|
||||
merge-multiple: true
|
||||
|
||||
- name: Prepare release assets
|
||||
id: prepare
|
||||
run: |
|
||||
VERSION="${{ needs.release-check.outputs.version }}"
|
||||
TAG="${{ needs.release-check.outputs.tag }}"
|
||||
|
||||
mkdir -p ./release-assets
|
||||
|
||||
# Copy and verify artifacts
|
||||
ASSETS_COUNT=0
|
||||
for file in ./artifacts/rustfs-*.zip; do
|
||||
if [[ -f "$file" ]]; then
|
||||
cp "$file" ./release-assets/
|
||||
ASSETS_COUNT=$((ASSETS_COUNT + 1))
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ $ASSETS_COUNT -eq 0 ]]; then
|
||||
echo "❌ No artifacts found!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd ./release-assets
|
||||
|
||||
# Generate checksums
|
||||
if ls *.zip >/dev/null 2>&1; then
|
||||
sha256sum *.zip > SHA256SUMS
|
||||
sha512sum *.zip > SHA512SUMS
|
||||
fi
|
||||
|
||||
# TODO: Add GPG signing for signatures
|
||||
# For now, create placeholder signature files
|
||||
for file in *.zip; do
|
||||
echo "# Signature for $file" > "${file}.asc"
|
||||
echo "# GPG signature will be added in future versions" >> "${file}.asc"
|
||||
done
|
||||
|
||||
echo "assets_prepared=true" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "📦 Prepared assets:"
|
||||
ls -la
|
||||
|
||||
echo "🔢 Asset count: $ASSETS_COUNT"
|
||||
|
||||
- name: Upload prepared assets
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: release-assets-${{ needs.release-check.outputs.tag }}
|
||||
path: ./release-assets/
|
||||
retention-days: 30
|
||||
|
||||
# Upload assets to GitHub Release
|
||||
upload-assets:
|
||||
name: Upload Release Assets
|
||||
needs: [release-check, create-release, prepare-assets]
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Download prepared assets
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: release-assets-${{ needs.release-check.outputs.tag }}
|
||||
path: ./release-assets
|
||||
|
||||
- name: Upload to GitHub Release
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
TAG="${{ needs.release-check.outputs.tag }}"
|
||||
|
||||
cd ./release-assets
|
||||
|
||||
# Upload all files
|
||||
for file in *; do
|
||||
if [[ -f "$file" ]]; then
|
||||
echo "📤 Uploading $file..."
|
||||
gh release upload "$TAG" "$file" --clobber
|
||||
fi
|
||||
done
|
||||
|
||||
echo "✅ All assets uploaded successfully"
|
||||
|
||||
# Update latest.json for stable releases only
|
||||
update-latest:
|
||||
name: Update Latest Version
|
||||
needs: [release-check, upload-assets]
|
||||
if: needs.release-check.outputs.is_prerelease == 'false'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Update latest.json
|
||||
env:
|
||||
OSS_ACCESS_KEY_ID: ${{ secrets.ALICLOUDOSS_KEY_ID }}
|
||||
OSS_ACCESS_KEY_SECRET: ${{ secrets.ALICLOUDOSS_KEY_SECRET }}
|
||||
run: |
|
||||
if [[ -z "$OSS_ACCESS_KEY_ID" ]]; then
|
||||
echo "⚠️ OSS credentials not available, skipping latest.json update"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
VERSION="${{ needs.release-check.outputs.version }}"
|
||||
TAG="${{ needs.release-check.outputs.tag }}"
|
||||
|
||||
# Install ossutil
|
||||
OSSUTIL_VERSION="2.1.1"
|
||||
OSSUTIL_ZIP="ossutil-${OSSUTIL_VERSION}-linux-amd64.zip"
|
||||
OSSUTIL_DIR="ossutil-${OSSUTIL_VERSION}-linux-amd64"
|
||||
|
||||
curl -o "$OSSUTIL_ZIP" "https://gosspublic.alicdn.com/ossutil/v2/${OSSUTIL_VERSION}/${OSSUTIL_ZIP}"
|
||||
unzip "$OSSUTIL_ZIP"
|
||||
chmod +x "${OSSUTIL_DIR}/ossutil"
|
||||
|
||||
# Create latest.json
|
||||
cat > latest.json << EOF
|
||||
{
|
||||
"version": "${VERSION}",
|
||||
"tag": "${TAG}",
|
||||
"release_date": "$(date -u +%Y-%m-%dT%H:%M:%SZ)",
|
||||
"release_type": "stable",
|
||||
"download_url": "https://github.com/${{ github.repository }}/releases/tag/${TAG}"
|
||||
}
|
||||
EOF
|
||||
|
||||
# Upload to OSS
|
||||
./${OSSUTIL_DIR}/ossutil cp latest.json oss://rustfs-version/latest.json --force
|
||||
|
||||
echo "✅ Updated latest.json for stable release $VERSION"
|
||||
|
||||
# Publish release (remove draft status)
|
||||
publish-release:
|
||||
name: Publish Release
|
||||
needs: [release-check, create-release, upload-assets]
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Update release notes and publish
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
TAG="${{ needs.release-check.outputs.tag }}"
|
||||
VERSION="${{ needs.release-check.outputs.version }}"
|
||||
IS_PRERELEASE="${{ needs.release-check.outputs.is_prerelease }}"
|
||||
RELEASE_TYPE="${{ needs.release-check.outputs.release_type }}"
|
||||
|
||||
# Get original release notes from tag
|
||||
ORIGINAL_NOTES=$(git tag -l --format='%(contents)' "${TAG}")
|
||||
if [[ -z "$ORIGINAL_NOTES" || "$ORIGINAL_NOTES" =~ ^[[:space:]]*$ ]]; then
|
||||
if [[ "$IS_PRERELEASE" == "true" ]]; then
|
||||
ORIGINAL_NOTES="Pre-release ${VERSION} (${RELEASE_TYPE})"
|
||||
else
|
||||
ORIGINAL_NOTES="Release ${VERSION}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Use release notes template if available
|
||||
if [[ -f ".github/workflows/release-notes-template.md" ]]; then
|
||||
# Substitute variables in template
|
||||
sed -e "s/\${VERSION}/$TAG/g" \
|
||||
-e "s/\${VERSION_CLEAN}/$VERSION/g" \
|
||||
-e "s/\${ORIGINAL_NOTES}/$(echo "$ORIGINAL_NOTES" | sed 's/[[\.*^$()+?{|]/\\&/g')/g" \
|
||||
.github/workflows/release-notes-template.md > enhanced_notes.md
|
||||
|
||||
# Update release notes
|
||||
gh release edit "$TAG" --notes-file enhanced_notes.md
|
||||
fi
|
||||
|
||||
# Publish the release (remove draft status)
|
||||
gh release edit "$TAG" --draft=false
|
||||
|
||||
echo "🎉 Released $TAG successfully!"
|
||||
echo "📄 Release URL: ${{ needs.create-release.outputs.release_url }}"
|
||||
247
Cargo.lock
generated
247
Cargo.lock
generated
@@ -455,7 +455,10 @@ dependencies = [
|
||||
"serde_repr",
|
||||
"tokio",
|
||||
"url",
|
||||
"zbus 5.8.0",
|
||||
"wayland-backend",
|
||||
"wayland-client",
|
||||
"wayland-protocols",
|
||||
"zbus 5.9.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -512,7 +515,7 @@ dependencies = [
|
||||
"futures-lite",
|
||||
"parking",
|
||||
"polling",
|
||||
"rustix 1.0.7",
|
||||
"rustix 1.0.8",
|
||||
"slab",
|
||||
"tracing",
|
||||
"windows-sys 0.59.0",
|
||||
@@ -544,7 +547,7 @@ dependencies = [
|
||||
"cfg-if",
|
||||
"event-listener",
|
||||
"futures-lite",
|
||||
"rustix 1.0.7",
|
||||
"rustix 1.0.8",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
@@ -571,7 +574,7 @@ dependencies = [
|
||||
"cfg-if",
|
||||
"futures-core",
|
||||
"futures-io",
|
||||
"rustix 1.0.7",
|
||||
"rustix 1.0.8",
|
||||
"signal-hook-registry",
|
||||
"slab",
|
||||
"windows-sys 0.59.0",
|
||||
@@ -673,9 +676,9 @@ checksum = "c08606f8c3cbf4ce6ec8e28fb0014a2c086708fe954eaa885384a6165172e7e8"
|
||||
|
||||
[[package]]
|
||||
name = "aws-credential-types"
|
||||
version = "1.2.3"
|
||||
version = "1.2.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "687bc16bc431a8533fe0097c7f0182874767f920989d7260950172ae8e3c4465"
|
||||
checksum = "b68c2194a190e1efc999612792e25b1ab3abfefe4306494efaaabc25933c0cbe"
|
||||
dependencies = [
|
||||
"aws-smithy-async",
|
||||
"aws-smithy-runtime-api",
|
||||
@@ -708,9 +711,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-runtime"
|
||||
version = "1.5.8"
|
||||
version = "1.5.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4f6c68419d8ba16d9a7463671593c54f81ba58cab466e9b759418da606dcc2e2"
|
||||
checksum = "b2090e664216c78e766b6bac10fe74d2f451c02441d43484cd76ac9a295075f7"
|
||||
dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-sigv4",
|
||||
@@ -733,9 +736,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-sdk-s3"
|
||||
version = "1.96.0"
|
||||
version = "1.98.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6e25d24de44b34dcdd5182ac4e4c6f07bcec2661c505acef94c0d293b65505fe"
|
||||
checksum = "029e89cae7e628553643aecb3a3f054a0a0912ff0fd1f5d6a0b4fda421dce64b"
|
||||
dependencies = [
|
||||
"aws-credential-types",
|
||||
"aws-runtime",
|
||||
@@ -806,9 +809,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-smithy-checksums"
|
||||
version = "0.63.4"
|
||||
version = "0.63.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "244f00666380d35c1c76b90f7b88a11935d11b84076ac22a4c014ea0939627af"
|
||||
checksum = "5ab9472f7a8ec259ddb5681d2ef1cb1cf16c0411890063e67cdc7b62562cc496"
|
||||
dependencies = [
|
||||
"aws-smithy-http",
|
||||
"aws-smithy-types",
|
||||
@@ -826,9 +829,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-smithy-eventstream"
|
||||
version = "0.60.9"
|
||||
version = "0.60.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "338a3642c399c0a5d157648426110e199ca7fd1c689cc395676b81aa563700c4"
|
||||
checksum = "604c7aec361252b8f1c871a7641d5e0ba3a7f5a586e51b66bc9510a5519594d9"
|
||||
dependencies = [
|
||||
"aws-smithy-types",
|
||||
"bytes",
|
||||
@@ -837,9 +840,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "aws-smithy-http"
|
||||
version = "0.62.1"
|
||||
version = "0.62.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "99335bec6cdc50a346fda1437f9fefe33abf8c99060739a546a16457f2862ca9"
|
||||
checksum = "43c82ba4cab184ea61f6edaafc1072aad3c2a17dcf4c0fce19ac5694b90d8b5f"
|
||||
dependencies = [
|
||||
"aws-smithy-eventstream",
|
||||
"aws-smithy-runtime-api",
|
||||
@@ -865,7 +868,7 @@ dependencies = [
|
||||
"aws-smithy-async",
|
||||
"aws-smithy-runtime-api",
|
||||
"aws-smithy-types",
|
||||
"h2 0.3.26",
|
||||
"h2 0.3.27",
|
||||
"h2 0.4.11",
|
||||
"http 0.2.12",
|
||||
"http 1.3.1",
|
||||
@@ -1574,23 +1577,12 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "chrono-tz"
|
||||
version = "0.10.3"
|
||||
version = "0.10.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "efdce149c370f133a071ca8ef6ea340b7b88748ab0810097a9e2976eaa34b4f3"
|
||||
checksum = "a6139a8597ed92cf816dfb33f5dd6cf0bb93a6adc938f11039f371bc5bcd26c3"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"chrono-tz-build",
|
||||
"phf 0.11.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "chrono-tz-build"
|
||||
version = "0.4.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "8f10f8c9340e31fc120ff885fcdb54a0b48e474bbd77cab557f0c30a3e569402"
|
||||
dependencies = [
|
||||
"parse-zoneinfo",
|
||||
"phf_codegen 0.11.3",
|
||||
"phf 0.12.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1850,18 +1842,18 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "const-str"
|
||||
version = "0.6.2"
|
||||
version = "0.6.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9e991226a70654b49d34de5ed064885f0bef0348a8e70018b8ff1ac80aa984a2"
|
||||
checksum = "041fbfcf8e7054df725fb9985297e92422cdc80fcf313665f5ca3d761bb63f4c"
|
||||
dependencies = [
|
||||
"const-str-proc-macro",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "const-str-proc-macro"
|
||||
version = "0.6.2"
|
||||
version = "0.6.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f0d1c4c3cb85e5856b34e829af0035d7154f8c2889b15bbf43c8a6c6786dcab5"
|
||||
checksum = "f801882b7ecd4188f4bca0317f34e022d623590d85893d7024b18d14f2a3b40b"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -2031,9 +2023,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "crc32fast"
|
||||
version = "1.4.2"
|
||||
version = "1.5.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a97769d94ddab943e4510d138150169a2758b5ef3eb191a9ee688de3e23ef7b3"
|
||||
checksum = "9481c1c90cbf2ac953f07c8d4a58aa3945c425b7185c9154d67a65e4230da511"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
]
|
||||
@@ -3384,18 +3376,6 @@ version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bd0c93bb4b0c6d9b77f4435b0ae98c24d17f1c45b2ff844c6151a07256ca923b"
|
||||
|
||||
[[package]]
|
||||
name = "dispatch2"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1a0d569e003ff27784e0e14e4a594048698e0c0f0b66cabcb51511be55a7caa0"
|
||||
dependencies = [
|
||||
"bitflags 2.9.1",
|
||||
"block2 0.6.1",
|
||||
"libc",
|
||||
"objc2 0.6.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dispatch2"
|
||||
version = "0.3.0"
|
||||
@@ -3403,6 +3383,8 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "89a09f22a6c6069a18470eb92d2298acf25463f14256d24778e1230d789a2aec"
|
||||
dependencies = [
|
||||
"bitflags 2.9.1",
|
||||
"block2 0.6.1",
|
||||
"libc",
|
||||
"objc2 0.6.1",
|
||||
]
|
||||
|
||||
@@ -3417,6 +3399,15 @@ dependencies = [
|
||||
"syn 2.0.104",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dlib"
|
||||
version = "0.5.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "330c60081dcc4c72131f8eb70510f1ac07223e5d4163db481a04a0befcffa412"
|
||||
dependencies = [
|
||||
"libloading 0.8.8",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dlopen2"
|
||||
version = "0.7.0"
|
||||
@@ -3440,6 +3431,12 @@ dependencies = [
|
||||
"syn 2.0.104",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "downcast-rs"
|
||||
version = "1.2.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "75b325c5dbd37f80359721ad39aca5a29fb04c89279657cffdda8736d0c0b9d2"
|
||||
|
||||
[[package]]
|
||||
name = "dpi"
|
||||
version = "0.1.2"
|
||||
@@ -4375,9 +4372,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "h2"
|
||||
version = "0.3.26"
|
||||
version = "0.3.27"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "81fe527a889e1532da5c525686d96d4c2e74cdd345badf8dfef9f6b39dd5f5e8"
|
||||
checksum = "0beca50380b1fc32983fc1cb4587bfa4bb9e78fc259aad4a0032d2080309222d"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"fnv",
|
||||
@@ -4623,7 +4620,7 @@ dependencies = [
|
||||
"futures-channel",
|
||||
"futures-core",
|
||||
"futures-util",
|
||||
"h2 0.3.26",
|
||||
"h2 0.3.27",
|
||||
"http 0.2.12",
|
||||
"http-body 0.4.6",
|
||||
"httparse",
|
||||
@@ -6212,7 +6209,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1c10c2894a6fed806ade6027bcd50662746363a9589d3ec9d9bef30a4e4bc166"
|
||||
dependencies = [
|
||||
"bitflags 2.9.1",
|
||||
"dispatch2 0.3.0",
|
||||
"dispatch2",
|
||||
"objc2 0.6.1",
|
||||
]
|
||||
|
||||
@@ -6611,15 +6608,6 @@ dependencies = [
|
||||
"zstd",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "parse-zoneinfo"
|
||||
version = "0.3.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1f2a05b18d44e2957b88f96ba460715e295bc1d7510468a2f3d3b44535d26c24"
|
||||
dependencies = [
|
||||
"regex",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "password-hash"
|
||||
version = "0.5.0"
|
||||
@@ -6728,11 +6716,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "phf"
|
||||
version = "0.11.3"
|
||||
version = "0.12.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "1fd6780a80ae0c52cc120a26a1a42c1ae51b247a253e4e06113d23d2c2edd078"
|
||||
checksum = "913273894cec178f401a31ec4b656318d95473527be05c0752cc41cdc32be8b7"
|
||||
dependencies = [
|
||||
"phf_shared 0.11.3",
|
||||
"phf_shared 0.12.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -6755,16 +6743,6 @@ dependencies = [
|
||||
"phf_shared 0.10.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "phf_codegen"
|
||||
version = "0.11.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "aef8048c789fa5e851558d709946d6d79a8ff88c0440c587967f8e94bfb1216a"
|
||||
dependencies = [
|
||||
"phf_generator 0.11.3",
|
||||
"phf_shared 0.11.3",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "phf_generator"
|
||||
version = "0.8.0"
|
||||
@@ -6836,6 +6814,15 @@ dependencies = [
|
||||
"siphasher 1.0.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "phf_shared"
|
||||
version = "0.12.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "06005508882fb681fd97892ecff4b7fd0fee13ef1aa569f8695dae7ab9099981"
|
||||
dependencies = [
|
||||
"siphasher 1.0.1",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "pin-project"
|
||||
version = "1.1.10"
|
||||
@@ -6967,7 +6954,7 @@ dependencies = [
|
||||
"concurrent-queue",
|
||||
"hermit-abi",
|
||||
"pin-project-lite",
|
||||
"rustix 1.0.7",
|
||||
"rustix 1.0.8",
|
||||
"tracing",
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
@@ -7647,13 +7634,13 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rfd"
|
||||
version = "0.15.3"
|
||||
version = "0.15.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "80c844748fdc82aae252ee4594a89b6e7ebef1063de7951545564cbc4e57075d"
|
||||
checksum = "ef2bee61e6cffa4635c72d7d81a84294e28f0930db0ddcb0f66d10244674ebed"
|
||||
dependencies = [
|
||||
"ashpd 0.11.0",
|
||||
"block2 0.6.1",
|
||||
"dispatch2 0.2.0",
|
||||
"dispatch2",
|
||||
"js-sys",
|
||||
"log",
|
||||
"objc2 0.6.1",
|
||||
@@ -8107,7 +8094,7 @@ dependencies = [
|
||||
"dirs",
|
||||
"hex",
|
||||
"keyring",
|
||||
"rfd 0.15.3",
|
||||
"rfd 0.15.4",
|
||||
"rust-embed",
|
||||
"rust-i18n",
|
||||
"serde",
|
||||
@@ -8443,15 +8430,15 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rustix"
|
||||
version = "1.0.7"
|
||||
version = "1.0.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c71e83d6afe7ff64890ec6b71d6a69bb8a610ab78ce364b3352876bb4c801266"
|
||||
checksum = "11181fbabf243db407ef8df94a6ce0b2f9a733bd8be4ad02b4eda9602296cac8"
|
||||
dependencies = [
|
||||
"bitflags 2.9.1",
|
||||
"errno",
|
||||
"libc",
|
||||
"linux-raw-sys 0.9.4",
|
||||
"windows-sys 0.59.0",
|
||||
"windows-sys 0.60.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -8675,6 +8662,12 @@ dependencies = [
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "scoped-tls"
|
||||
version = "1.0.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e1cf6437eb19a8f4a6cc0f7dca544973b0b78843adbfeb3683d1a94a0024a294"
|
||||
|
||||
[[package]]
|
||||
name = "scopeguard"
|
||||
version = "1.2.0"
|
||||
@@ -9698,7 +9691,7 @@ dependencies = [
|
||||
"fastrand",
|
||||
"getrandom 0.3.3",
|
||||
"once_cell",
|
||||
"rustix 1.0.7",
|
||||
"rustix 1.0.8",
|
||||
"windows-sys 0.59.0",
|
||||
]
|
||||
|
||||
@@ -10079,7 +10072,7 @@ dependencies = [
|
||||
"serde_spanned",
|
||||
"toml_datetime",
|
||||
"toml_write",
|
||||
"winnow 0.7.11",
|
||||
"winnow 0.7.12",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -10769,6 +10762,66 @@ dependencies = [
|
||||
"web-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wayland-backend"
|
||||
version = "0.3.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "fe770181423e5fc79d3e2a7f4410b7799d5aab1de4372853de3c6aa13ca24121"
|
||||
dependencies = [
|
||||
"cc",
|
||||
"downcast-rs",
|
||||
"rustix 0.38.44",
|
||||
"scoped-tls",
|
||||
"smallvec",
|
||||
"wayland-sys",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wayland-client"
|
||||
version = "0.31.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "978fa7c67b0847dbd6a9f350ca2569174974cd4082737054dbb7fbb79d7d9a61"
|
||||
dependencies = [
|
||||
"bitflags 2.9.1",
|
||||
"rustix 0.38.44",
|
||||
"wayland-backend",
|
||||
"wayland-scanner",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wayland-protocols"
|
||||
version = "0.32.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "779075454e1e9a521794fed15886323ea0feda3f8b0fc1390f5398141310422a"
|
||||
dependencies = [
|
||||
"bitflags 2.9.1",
|
||||
"wayland-backend",
|
||||
"wayland-client",
|
||||
"wayland-scanner",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wayland-scanner"
|
||||
version = "0.31.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "896fdafd5d28145fce7958917d69f2fd44469b1d4e861cb5961bcbeebc6d1484"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quick-xml 0.37.5",
|
||||
"quote",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wayland-sys"
|
||||
version = "0.31.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dbcebb399c77d5aa9fa5db874806ee7b4eba4e73650948e8f93963f128896615"
|
||||
dependencies = [
|
||||
"dlib",
|
||||
"log",
|
||||
"pkg-config",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "web-sys"
|
||||
version = "0.3.77"
|
||||
@@ -11440,9 +11493,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "winnow"
|
||||
version = "0.7.11"
|
||||
version = "0.7.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "74c7b26e3480b707944fc872477815d29a8e429d2f93a1ce000f5fa84a15cbcd"
|
||||
checksum = "f3edebf492c8125044983378ecb5766203ad3b4c2f7a922bd7dd207f6d443e95"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
@@ -11542,7 +11595,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "af3a19837351dc82ba89f8a125e22a3c475f05aba604acc023d62b2739ae2909"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"rustix 1.0.7",
|
||||
"rustix 1.0.8",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -11641,9 +11694,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "zbus"
|
||||
version = "5.8.0"
|
||||
version = "5.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "597f45e98bc7e6f0988276012797855613cd8269e23b5be62cc4e5d28b7e515d"
|
||||
checksum = "4bb4f9a464286d42851d18a605f7193b8febaf5b0919d71c6399b7b26e5b0aad"
|
||||
dependencies = [
|
||||
"async-broadcast",
|
||||
"async-recursion",
|
||||
@@ -11661,8 +11714,8 @@ dependencies = [
|
||||
"tracing",
|
||||
"uds_windows",
|
||||
"windows-sys 0.59.0",
|
||||
"winnow 0.7.11",
|
||||
"zbus_macros 5.8.0",
|
||||
"winnow 0.7.12",
|
||||
"zbus_macros 5.9.0",
|
||||
"zbus_names 4.2.0",
|
||||
"zvariant 5.6.0",
|
||||
]
|
||||
@@ -11682,9 +11735,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "zbus_macros"
|
||||
version = "5.8.0"
|
||||
version = "5.9.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "e5c8e4e14dcdd9d97a98b189cd1220f30e8394ad271e8c987da84f73693862c2"
|
||||
checksum = "ef9859f68ee0c4ee2e8cde84737c78e3f4c54f946f2a38645d0d4c7a95327659"
|
||||
dependencies = [
|
||||
"proc-macro-crate 3.3.0",
|
||||
"proc-macro2",
|
||||
@@ -11714,7 +11767,7 @@ checksum = "7be68e64bf6ce8db94f63e72f0c7eb9a60d733f7e0499e628dfab0f84d6bcb97"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"static_assertions",
|
||||
"winnow 0.7.11",
|
||||
"winnow 0.7.12",
|
||||
"zvariant 5.6.0",
|
||||
]
|
||||
|
||||
@@ -11906,7 +11959,7 @@ dependencies = [
|
||||
"enumflags2",
|
||||
"serde",
|
||||
"url",
|
||||
"winnow 0.7.11",
|
||||
"winnow 0.7.12",
|
||||
"zvariant_derive 5.6.0",
|
||||
"zvariant_utils 3.2.0",
|
||||
]
|
||||
@@ -11959,5 +12012,5 @@ dependencies = [
|
||||
"serde",
|
||||
"static_assertions",
|
||||
"syn 2.0.104",
|
||||
"winnow 0.7.11",
|
||||
"winnow 0.7.12",
|
||||
]
|
||||
|
||||
@@ -108,8 +108,8 @@ cfg-if = "1.0.1"
|
||||
chacha20poly1305 = { version = "0.10.1" }
|
||||
chrono = { version = "0.4.41", features = ["serde"] }
|
||||
clap = { version = "4.5.41", features = ["derive", "env"] }
|
||||
const-str = { version = "0.6.2", features = ["std", "proc"] }
|
||||
crc32fast = "1.4.2"
|
||||
const-str = { version = "0.6.3", features = ["std", "proc"] }
|
||||
crc32fast = "1.5.0"
|
||||
criterion = { version = "0.5", features = ["html_reports"] }
|
||||
dashmap = "6.1.0"
|
||||
datafusion = "46.0.1"
|
||||
@@ -196,7 +196,7 @@ reqwest = { version = "0.12.22", default-features = false, features = [
|
||||
"json",
|
||||
"blocking",
|
||||
] }
|
||||
rfd = { version = "0.15.3", default-features = false, features = [
|
||||
rfd = { version = "0.15.4", default-features = false, features = [
|
||||
"xdg-portal",
|
||||
"tokio",
|
||||
] }
|
||||
|
||||
152
Dockerfile
152
Dockerfile
@@ -1,121 +1,95 @@
|
||||
# Multi-stage build for RustFS production image
|
||||
FROM alpine:latest AS build
|
||||
|
||||
# Build arguments - use TARGETPLATFORM for consistency with Dockerfile.source
|
||||
ARG TARGETPLATFORM
|
||||
ARG BUILDPLATFORM
|
||||
# Build stage: Download and extract RustFS binary
|
||||
FROM alpine:3.22 AS build
|
||||
|
||||
# Build arguments for platform and release
|
||||
ARG TARGETARCH
|
||||
ARG RELEASE=latest
|
||||
|
||||
# Install dependencies for downloading and verifying binaries
|
||||
RUN apk add --no-cache \
|
||||
ca-certificates \
|
||||
curl \
|
||||
bash \
|
||||
wget \
|
||||
unzip \
|
||||
jq
|
||||
# Install minimal dependencies for downloading and extracting
|
||||
RUN apk add --no-cache ca-certificates curl unzip
|
||||
|
||||
# Create build directory
|
||||
WORKDIR /build
|
||||
|
||||
# Map TARGETPLATFORM to architecture format used in builds
|
||||
RUN case "${TARGETPLATFORM}" in \
|
||||
"linux/amd64") ARCH="x86_64" ;; \
|
||||
"linux/arm64") ARCH="aarch64" ;; \
|
||||
*) echo "Unsupported platform: ${TARGETPLATFORM}" && exit 1 ;; \
|
||||
# Detect architecture and download corresponding binary
|
||||
RUN case "${TARGETARCH}" in \
|
||||
amd64) ARCH="x86_64" ;; \
|
||||
arm64) ARCH="aarch64" ;; \
|
||||
*) echo "Unsupported architecture: ${TARGETARCH}" >&2 && exit 1 ;; \
|
||||
esac && \
|
||||
echo "ARCH=${ARCH}" > /build/arch.env
|
||||
|
||||
# Download rustfs binary from dl.rustfs.com (release channel only)
|
||||
RUN . /build/arch.env && \
|
||||
BASE_URL="https://dl.rustfs.com/artifacts/rustfs/release" && \
|
||||
PLATFORM="linux" && \
|
||||
if [ "${RELEASE}" = "latest" ]; then \
|
||||
# Download latest release version \
|
||||
PACKAGE_NAME="rustfs-${PLATFORM}-${ARCH}-latest.zip"; \
|
||||
DOWNLOAD_URL="${BASE_URL}/${PACKAGE_NAME}"; \
|
||||
echo "📥 Downloading latest release build: ${PACKAGE_NAME}"; \
|
||||
VERSION="latest"; \
|
||||
else \
|
||||
# Download specific release version \
|
||||
PACKAGE_NAME="rustfs-${PLATFORM}-${ARCH}-v${RELEASE}.zip"; \
|
||||
DOWNLOAD_URL="${BASE_URL}/${PACKAGE_NAME}"; \
|
||||
echo "📥 Downloading specific release version: ${PACKAGE_NAME}"; \
|
||||
VERSION="v${RELEASE#v}"; \
|
||||
fi && \
|
||||
echo "🔗 Download URL: ${DOWNLOAD_URL}" && \
|
||||
curl -f -L "${DOWNLOAD_URL}" -o /build/rustfs.zip && \
|
||||
if [ ! -f /build/rustfs.zip ] || [ ! -s /build/rustfs.zip ]; then \
|
||||
echo "❌ Failed to download binary package"; \
|
||||
echo "💡 Make sure the package ${PACKAGE_NAME} exists"; \
|
||||
echo "🔗 Check: ${DOWNLOAD_URL}"; \
|
||||
exit 1; \
|
||||
fi && \
|
||||
unzip /build/rustfs.zip -d /build && \
|
||||
BASE_URL="https://dl.rustfs.com/artifacts/rustfs/release" && \
|
||||
PACKAGE_NAME="rustfs-linux-${ARCH}-${VERSION}.zip" && \
|
||||
DOWNLOAD_URL="${BASE_URL}/${PACKAGE_NAME}" && \
|
||||
echo "Downloading ${PACKAGE_NAME} from ${DOWNLOAD_URL}" >&2 && \
|
||||
curl -f -L "${DOWNLOAD_URL}" -o rustfs.zip && \
|
||||
unzip rustfs.zip -d /build && \
|
||||
chmod +x /build/rustfs && \
|
||||
rm /build/rustfs.zip && \
|
||||
echo "✅ Successfully downloaded and extracted rustfs binary"
|
||||
rm rustfs.zip || { echo "Failed to download or extract ${PACKAGE_NAME}" >&2; exit 1; }
|
||||
|
||||
# Runtime stage
|
||||
FROM alpine:latest
|
||||
# Runtime stage: Configure runtime environment
|
||||
FROM alpine:3.22.1
|
||||
|
||||
# Set build arguments and labels
|
||||
# Build arguments and labels
|
||||
ARG RELEASE=latest
|
||||
ARG BUILD_DATE
|
||||
ARG VCS_REF
|
||||
|
||||
LABEL name="RustFS" \
|
||||
vendor="RustFS Team" \
|
||||
maintainer="RustFS Team <dev@rustfs.com>" \
|
||||
version="${RELEASE}" \
|
||||
release="${RELEASE}" \
|
||||
build-date="${BUILD_DATE}" \
|
||||
vcs-ref="${VCS_REF}" \
|
||||
summary="RustFS is a high-performance distributed object storage system written in Rust, compatible with S3 API." \
|
||||
description="RustFS is a high-performance distributed object storage software built using Rust. It supports erasure coding storage, multi-tenant management, observability, and other enterprise-level features." \
|
||||
url="https://rustfs.com" \
|
||||
license="Apache-2.0"
|
||||
vendor="RustFS Team" \
|
||||
maintainer="RustFS Team <dev@rustfs.com>" \
|
||||
version="${RELEASE}" \
|
||||
release="${RELEASE}" \
|
||||
build-date="${BUILD_DATE}" \
|
||||
vcs-ref="${VCS_REF}" \
|
||||
summary="High-performance distributed object storage system compatible with S3 API" \
|
||||
description="RustFS is a distributed object storage system written in Rust, supporting erasure coding, multi-tenant management, and observability." \
|
||||
url="https://rustfs.com" \
|
||||
license="Apache-2.0"
|
||||
|
||||
# Install runtime dependencies
|
||||
RUN apk add --no-cache \
|
||||
ca-certificates \
|
||||
curl \
|
||||
tzdata \
|
||||
bash \
|
||||
&& addgroup -g 1000 rustfs \
|
||||
&& adduser -u 1000 -G rustfs -s /bin/sh -D rustfs
|
||||
RUN echo "https://dl-cdn.alpinelinux.org/alpine/v3.20/community" >> /etc/apk/repositories && \
|
||||
apk update && \
|
||||
apk add --no-cache ca-certificates bash gosu coreutils shadow && \
|
||||
addgroup -g 1000 rustfs && \
|
||||
adduser -u 1000 -G rustfs -s /bin/bash -D rustfs
|
||||
|
||||
# Environment variables
|
||||
ENV RUSTFS_ACCESS_KEY=rustfsadmin \
|
||||
# Copy CA certificates and RustFS binary from build stage
|
||||
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=build /build/rustfs /usr/bin/rustfs
|
||||
|
||||
# Copy entry point script
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
|
||||
# Set permissions
|
||||
RUN chmod +x /usr/bin/rustfs /entrypoint.sh && \
|
||||
mkdir -p /data /logs && \
|
||||
chown rustfs:rustfs /data /logs && \
|
||||
chmod 700 /data /logs
|
||||
|
||||
# Environment variables (credentials should be set via environment or secrets)
|
||||
ENV RUSTFS_ADDRESS=:9000 \
|
||||
RUSTFS_ACCESS_KEY=rustfsadmin \
|
||||
RUSTFS_SECRET_KEY=rustfsadmin \
|
||||
RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_CONSOLE_ENABLE=true \
|
||||
RUSTFS_VOLUMES=/data \
|
||||
RUST_LOG=warn
|
||||
|
||||
# Set permissions for /usr/bin (similar to MinIO's approach)
|
||||
RUN chmod -R 755 /usr/bin
|
||||
|
||||
# Copy CA certificates and binaries from build stage
|
||||
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=build /build/rustfs /usr/bin/
|
||||
|
||||
# Set executable permissions
|
||||
RUN chmod +x /usr/bin/rustfs
|
||||
|
||||
# Create data directory
|
||||
RUN mkdir -p /data /config && chown -R rustfs:rustfs /data /config
|
||||
|
||||
# Switch to non-root user
|
||||
USER rustfs
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /data
|
||||
RUST_LOG=warn \
|
||||
RUSTFS_OBS_LOG_DIRECTORY=/logs \
|
||||
RUSTFS_SINKS_FILE_PATH=/logs
|
||||
|
||||
# Expose port
|
||||
EXPOSE 9000
|
||||
|
||||
# Volumes for data and logs
|
||||
VOLUME ["/data", "/logs"]
|
||||
|
||||
# Volume for data
|
||||
VOLUME ["/data"]
|
||||
# Set entry point
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
CMD ["/usr/bin/rustfs"]
|
||||
|
||||
# Set entrypoint
|
||||
ENTRYPOINT ["/usr/bin/rustfs"]
|
||||
|
||||
@@ -112,6 +112,8 @@ RUN apt-get update && apt-get install -y \
|
||||
ca-certificates \
|
||||
tzdata \
|
||||
wget \
|
||||
coreutils \
|
||||
passwd \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Create rustfs user and group
|
||||
@@ -128,6 +130,10 @@ RUN mkdir -p /data/rustfs{0,1,2,3} && \
|
||||
COPY --from=builder /usr/local/bin/rustfs /app/rustfs
|
||||
RUN chmod +x /app/rustfs && chown rustfs:rustfs /app/rustfs
|
||||
|
||||
# Copy entrypoint script
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
RUN chmod +x /entrypoint.sh
|
||||
|
||||
# Switch to non-root user
|
||||
USER rustfs
|
||||
|
||||
@@ -142,9 +148,9 @@ ENV RUSTFS_ACCESS_KEY=rustfsadmin \
|
||||
RUSTFS_VOLUMES=/data \
|
||||
RUST_LOG=warn
|
||||
|
||||
|
||||
# Volume for data
|
||||
VOLUME ["/data"]
|
||||
|
||||
# Set default command
|
||||
# Set entrypoint and default command
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
CMD ["/app/rustfs"]
|
||||
|
||||
192
Makefile
192
Makefile
@@ -46,21 +46,6 @@ setup-hooks:
|
||||
chmod +x .git/hooks/pre-commit
|
||||
@echo "✅ Git hooks setup complete!"
|
||||
|
||||
.PHONY: init-devenv
|
||||
init-devenv:
|
||||
$(DOCKER_CLI) build -t $(IMAGE_NAME) -f Dockerfile.source .
|
||||
$(DOCKER_CLI) stop $(CONTAINER_NAME)
|
||||
$(DOCKER_CLI) rm $(CONTAINER_NAME)
|
||||
$(DOCKER_CLI) run -d --name $(CONTAINER_NAME) -p 9010:9010 -p 9000:9000 -v $(shell pwd):/root/s3-rustfs -it $(IMAGE_NAME)
|
||||
|
||||
.PHONY: start
|
||||
start:
|
||||
$(DOCKER_CLI) start $(CONTAINER_NAME)
|
||||
|
||||
.PHONY: stop
|
||||
stop:
|
||||
$(DOCKER_CLI) stop $(CONTAINER_NAME)
|
||||
|
||||
.PHONY: e2e-server
|
||||
e2e-server:
|
||||
sh $(shell pwd)/scripts/run.sh
|
||||
@@ -80,8 +65,6 @@ build-dev:
|
||||
@echo "🔨 Building RustFS in development mode..."
|
||||
./build-rustfs.sh --dev
|
||||
|
||||
|
||||
|
||||
# Docker-based build (alternative approach)
|
||||
# Usage: make BUILD_OS=ubuntu22.04 build-docker
|
||||
# Output: target/ubuntu22.04/release/rustfs
|
||||
@@ -98,13 +81,13 @@ build-docker:
|
||||
.PHONY: build-musl
|
||||
build-musl:
|
||||
@echo "🔨 Building rustfs for x86_64-unknown-linux-musl..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-buildx' instead"
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-musl
|
||||
|
||||
.PHONY: build-gnu
|
||||
build-gnu:
|
||||
@echo "🔨 Building rustfs for x86_64-unknown-linux-gnu..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-buildx' instead"
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-gnu
|
||||
|
||||
.PHONY: deploy-dev
|
||||
@@ -112,15 +95,19 @@ deploy-dev: build-musl
|
||||
@echo "🚀 Deploying to dev server: $${IP}"
|
||||
./scripts/dev_deploy.sh $${IP}
|
||||
|
||||
# Multi-architecture Docker build targets (NEW: using docker-buildx.sh)
|
||||
# ========================================================================================
|
||||
# Docker Multi-Architecture Builds (Primary Methods)
|
||||
# ========================================================================================
|
||||
|
||||
# Production builds using docker-buildx.sh (for CI/CD and production)
|
||||
.PHONY: docker-buildx
|
||||
docker-buildx:
|
||||
@echo "🏗️ Building multi-architecture Docker images with buildx..."
|
||||
@echo "🏗️ Building multi-architecture production Docker images with buildx..."
|
||||
./docker-buildx.sh
|
||||
|
||||
.PHONY: docker-buildx-push
|
||||
docker-buildx-push:
|
||||
@echo "🚀 Building and pushing multi-architecture Docker images with buildx..."
|
||||
@echo "🚀 Building and pushing multi-architecture production Docker images with buildx..."
|
||||
./docker-buildx.sh --push
|
||||
|
||||
.PHONY: docker-buildx-version
|
||||
@@ -129,7 +116,7 @@ docker-buildx-version:
|
||||
echo "❌ 错误: 请指定版本, 例如: make docker-buildx-version VERSION=v1.0.0"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🏗️ Building multi-architecture Docker images (version: $(VERSION))..."
|
||||
@echo "🏗️ Building multi-architecture production Docker images (version: $(VERSION))..."
|
||||
./docker-buildx.sh --release $(VERSION)
|
||||
|
||||
.PHONY: docker-buildx-push-version
|
||||
@@ -138,21 +125,114 @@ docker-buildx-push-version:
|
||||
echo "❌ 错误: 请指定版本, 例如: make docker-buildx-push-version VERSION=v1.0.0"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🚀 Building and pushing multi-architecture Docker images (version: $(VERSION))..."
|
||||
@echo "🚀 Building and pushing multi-architecture production Docker images (version: $(VERSION))..."
|
||||
./docker-buildx.sh --release $(VERSION) --push
|
||||
|
||||
# Development/Source builds using direct buildx commands
|
||||
.PHONY: docker-dev
|
||||
docker-dev:
|
||||
@echo "🏗️ Building multi-architecture development Docker images with buildx..."
|
||||
@echo "💡 This builds from source code and is intended for local development and testing"
|
||||
@echo "⚠️ Multi-arch images cannot be loaded locally, use docker-dev-push to push to registry"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag rustfs:source-latest \
|
||||
--tag rustfs:dev-latest \
|
||||
.
|
||||
|
||||
.PHONY: docker-dev-local
|
||||
docker-dev-local:
|
||||
@echo "🏗️ Building single-architecture development Docker image for local use..."
|
||||
@echo "💡 This builds from source code for the current platform and loads locally"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag rustfs:source-latest \
|
||||
--tag rustfs:dev-latest \
|
||||
--load \
|
||||
.
|
||||
|
||||
.PHONY: docker-dev-push
|
||||
docker-dev-push:
|
||||
@if [ -z "$(REGISTRY)" ]; then \
|
||||
echo "❌ 错误: 请指定镜像仓库, 例如: make docker-dev-push REGISTRY=ghcr.io/username"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🚀 Building and pushing multi-architecture development Docker images..."
|
||||
@echo "💡 推送到仓库: $(REGISTRY)"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag $(REGISTRY)/rustfs:source-latest \
|
||||
--tag $(REGISTRY)/rustfs:dev-latest \
|
||||
--push \
|
||||
.
|
||||
|
||||
|
||||
|
||||
# Local production builds using direct buildx (alternative to docker-buildx.sh)
|
||||
.PHONY: docker-buildx-production-local
|
||||
docker-buildx-production-local:
|
||||
@echo "🏗️ Building single-architecture production Docker image locally..."
|
||||
@echo "💡 Alternative to docker-buildx.sh for local testing"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--file $(DOCKERFILE_PRODUCTION) \
|
||||
--tag rustfs:production-latest \
|
||||
--tag rustfs:latest \
|
||||
--load \
|
||||
--build-arg RELEASE=latest \
|
||||
.
|
||||
|
||||
# ========================================================================================
|
||||
# Single Architecture Docker Builds (Traditional)
|
||||
# ========================================================================================
|
||||
|
||||
.PHONY: docker-build-production
|
||||
docker-build-production:
|
||||
@echo "🏗️ Building production Docker image..."
|
||||
@echo "🏗️ Building single-architecture production Docker image..."
|
||||
@echo "💡 Consider using 'make docker-buildx-production-local' for multi-arch support"
|
||||
$(DOCKER_CLI) build -f $(DOCKERFILE_PRODUCTION) -t rustfs:latest .
|
||||
|
||||
.PHONY: docker-build-source
|
||||
docker-build-source:
|
||||
@echo "🏗️ Building source Docker image..."
|
||||
@echo "🏗️ Building single-architecture source Docker image..."
|
||||
@echo "💡 Consider using 'make docker-dev-local' for multi-arch support"
|
||||
$(DOCKER_CLI) build -f $(DOCKERFILE_SOURCE) -t rustfs:source .
|
||||
|
||||
# ========================================================================================
|
||||
# Development Environment
|
||||
# ========================================================================================
|
||||
|
||||
.PHONY: dev-env-start
|
||||
dev-env-start:
|
||||
@echo "🚀 Starting development environment..."
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag rustfs:dev \
|
||||
--load \
|
||||
.
|
||||
$(DOCKER_CLI) stop $(CONTAINER_NAME) 2>/dev/null || true
|
||||
$(DOCKER_CLI) rm $(CONTAINER_NAME) 2>/dev/null || true
|
||||
$(DOCKER_CLI) run -d --name $(CONTAINER_NAME) \
|
||||
-p 9010:9010 -p 9000:9000 \
|
||||
-v $(shell pwd):/workspace \
|
||||
-it rustfs:dev
|
||||
|
||||
.PHONY: dev-env-stop
|
||||
dev-env-stop:
|
||||
@echo "🛑 Stopping development environment..."
|
||||
$(DOCKER_CLI) stop $(CONTAINER_NAME) 2>/dev/null || true
|
||||
$(DOCKER_CLI) rm $(CONTAINER_NAME) 2>/dev/null || true
|
||||
|
||||
.PHONY: dev-env-restart
|
||||
dev-env-restart: dev-env-stop dev-env-start
|
||||
|
||||
|
||||
|
||||
# ========================================================================================
|
||||
# Build Utilities
|
||||
# ========================================================================================
|
||||
|
||||
.PHONY: docker-inspect-multiarch
|
||||
docker-inspect-multiarch:
|
||||
@if [ -z "$(IMAGE)" ]; then \
|
||||
@@ -165,7 +245,7 @@ docker-inspect-multiarch:
|
||||
.PHONY: build-cross-all
|
||||
build-cross-all:
|
||||
@echo "🔧 Building all target architectures..."
|
||||
@echo "💡 On macOS/Windows, use 'make docker-buildx' for reliable multi-arch builds"
|
||||
@echo "💡 On macOS/Windows, use 'make docker-dev' for reliable multi-arch builds"
|
||||
@echo "🔨 Generating protobuf code..."
|
||||
cargo run --bin gproto || true
|
||||
@echo "🔨 Building x86_64-unknown-linux-musl..."
|
||||
@@ -174,6 +254,10 @@ build-cross-all:
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-gnu
|
||||
@echo "✅ All architectures built successfully!"
|
||||
|
||||
# ========================================================================================
|
||||
# Help and Documentation
|
||||
# ========================================================================================
|
||||
|
||||
.PHONY: help-build
|
||||
help-build:
|
||||
@echo "🔨 RustFS 构建帮助:"
|
||||
@@ -206,23 +290,61 @@ help-build:
|
||||
help-docker:
|
||||
@echo "🐳 Docker 多架构构建帮助:"
|
||||
@echo ""
|
||||
@echo "🚀 推荐使用 (新的 docker-buildx 方式):"
|
||||
@echo " make docker-buildx # 构建多架构镜像(不推送)"
|
||||
@echo " make docker-buildx-push # 构建并推送多架构镜像"
|
||||
@echo "🚀 生产镜像构建 (推荐使用 docker-buildx.sh):"
|
||||
@echo " make docker-buildx # 构建生产多架构镜像(不推送)"
|
||||
@echo " make docker-buildx-push # 构建并推送生产多架构镜像"
|
||||
@echo " make docker-buildx-version VERSION=v1.0.0 # 构建指定版本"
|
||||
@echo " make docker-buildx-push-version VERSION=v1.0.0 # 构建并推送指定版本"
|
||||
@echo ""
|
||||
@echo "🏗️ 单架构构建:"
|
||||
@echo " make docker-build-production # 构建生产环境镜像"
|
||||
@echo " make docker-build-source # 构建源码构建镜像"
|
||||
@echo "🔧 开发/源码镜像构建 (本地开发测试):"
|
||||
@echo " make docker-dev # 构建开发多架构镜像(无法本地加载)"
|
||||
@echo " make docker-dev-local # 构建开发单架构镜像(本地加载)"
|
||||
@echo " make docker-dev-push REGISTRY=xxx # 构建并推送开发镜像"
|
||||
@echo ""
|
||||
@echo "🏗️ 本地生产镜像构建 (替代方案):"
|
||||
@echo " make docker-buildx-production-local # 本地构建生产单架构镜像"
|
||||
@echo ""
|
||||
@echo "📦 单架构构建 (传统方式):"
|
||||
@echo " make docker-build-production # 构建单架构生产镜像"
|
||||
@echo " make docker-build-source # 构建单架构源码镜像"
|
||||
@echo ""
|
||||
@echo "🚀 开发环境管理:"
|
||||
@echo " make dev-env-start # 启动开发容器环境"
|
||||
@echo " make dev-env-stop # 停止开发容器环境"
|
||||
@echo " make dev-env-restart # 重启开发容器环境"
|
||||
@echo ""
|
||||
@echo "🔧 辅助工具:"
|
||||
@echo " make build-cross-all # 构建所有架构的二进制文件"
|
||||
@echo " make docker-inspect-multiarch IMAGE=xxx # 检查镜像的架构支持"
|
||||
@echo ""
|
||||
@echo "📋 环境变量 (在推送时需要设置):"
|
||||
@echo "📋 环境变量:"
|
||||
@echo " REGISTRY 镜像仓库地址 (推送时需要)"
|
||||
@echo " DOCKERHUB_USERNAME Docker Hub 用户名"
|
||||
@echo " DOCKERHUB_TOKEN Docker Hub 访问令牌"
|
||||
@echo " GITHUB_TOKEN GitHub 访问令牌"
|
||||
@echo ""
|
||||
@echo "💡 更多详情请参考项目根目录的 docker-buildx.sh 脚本"
|
||||
@echo "💡 建议:"
|
||||
@echo " - 生产用途: 使用 docker-buildx* 命令 (基于预编译二进制)"
|
||||
@echo " - 本地开发: 使用 docker-dev* 命令 (从源码构建)"
|
||||
@echo " - 开发环境: 使用 dev-env-* 命令管理开发容器"
|
||||
|
||||
.PHONY: help
|
||||
help:
|
||||
@echo "🦀 RustFS Makefile 帮助:"
|
||||
@echo ""
|
||||
@echo "📋 主要命令分类:"
|
||||
@echo " make help-build # 显示构建相关帮助"
|
||||
@echo " make help-docker # 显示 Docker 相关帮助"
|
||||
@echo ""
|
||||
@echo "🔧 代码质量:"
|
||||
@echo " make fmt # 格式化代码"
|
||||
@echo " make clippy # 运行 clippy 检查"
|
||||
@echo " make test # 运行测试"
|
||||
@echo " make pre-commit # 运行所有预提交检查"
|
||||
@echo ""
|
||||
@echo "🚀 快速开始:"
|
||||
@echo " make build # 构建 RustFS 二进制"
|
||||
@echo " make docker-dev-local # 构建开发 Docker 镜像(本地)"
|
||||
@echo " make dev-env-start # 启动开发环境"
|
||||
@echo ""
|
||||
@echo "💡 更多帮助请使用 'make help-build' 或 'make help-docker'"
|
||||
|
||||
41
_typos.toml
Normal file
41
_typos.toml
Normal file
@@ -0,0 +1,41 @@
|
||||
[default]
|
||||
# # Ignore specific spell checking patterns
|
||||
# extend-ignore-identifiers-re = [
|
||||
# # Ignore common patterns in base64 encoding and hash values
|
||||
# "[A-Za-z0-9+/]{8,}={0,2}", # base64 encoding
|
||||
# "[A-Fa-f0-9]{8,}", # hexadecimal hash
|
||||
# "[A-Za-z0-9_-]{20,}", # long random strings
|
||||
# ]
|
||||
|
||||
# # Ignore specific regex patterns in content
|
||||
# extend-ignore-re = [
|
||||
# # Ignore hash values and encoded strings (base64 patterns)
|
||||
# "(?i)[A-Za-z0-9+/]{8,}={0,2}",
|
||||
# # Ignore long strings in quotes (usually hash or base64)
|
||||
# '"[A-Za-z0-9+/=_-]{8,}"',
|
||||
# # Ignore IV values and similar cryptographic strings
|
||||
# '"[A-Za-z0-9+/=]{12,}"',
|
||||
# # Ignore cryptographic signatures and keys (including partial strings)
|
||||
# "[A-Za-z0-9+/]{6,}[A-Za-z0-9+/=]*",
|
||||
# # Ignore base64-like strings in comments (common in examples)
|
||||
# "//.*[A-Za-z0-9+/]{8,}[A-Za-z0-9+/=]*",
|
||||
# ]
|
||||
extend-ignore-re = [
|
||||
# Ignore long strings in quotes (usually hash or base64)
|
||||
'"[A-Za-z0-9+/=_-]{32,}"',
|
||||
# Ignore IV values and similar cryptographic strings
|
||||
'"[A-Za-z0-9+/=]{12,}"',
|
||||
# Ignore cryptographic signatures and keys (including partial strings)
|
||||
"[A-Za-z0-9+/]{16,}[A-Za-z0-9+/=]*",
|
||||
]
|
||||
|
||||
[default.extend-words]
|
||||
bui = "bui"
|
||||
typ = "typ"
|
||||
clen = "clen"
|
||||
datas = "datas"
|
||||
bre = "bre"
|
||||
abd = "abd"
|
||||
|
||||
[files]
|
||||
extend-exclude = []
|
||||
@@ -108,14 +108,26 @@ pub const DEFAULT_CONSOLE_ADDRESS: &str = concat!(":", DEFAULT_CONSOLE_PORT);
|
||||
/// It is used to store the logs of the application.
|
||||
/// Default value: rustfs.log
|
||||
/// Environment variable: RUSTFS_OBSERVABILITY_LOG_FILENAME
|
||||
pub const DEFAULT_LOG_FILENAME: &str = "rustfs.log";
|
||||
pub const DEFAULT_LOG_FILENAME: &str = "rustfs";
|
||||
|
||||
/// Default OBS log filename for rustfs
|
||||
/// This is the default log filename for OBS.
|
||||
/// It is used to store the logs of the application.
|
||||
/// Default value: rustfs.log
|
||||
pub const DEFAULT_OBS_LOG_FILENAME: &str = concat!(DEFAULT_LOG_FILENAME, ".log");
|
||||
|
||||
/// Default sink file log file for rustfs
|
||||
/// This is the default sink file log file for rustfs.
|
||||
/// It is used to store the logs of the application.
|
||||
/// Default value: rustfs-sink.log
|
||||
pub const DEFAULT_SINK_FILE_LOG_FILE: &str = concat!(DEFAULT_LOG_FILENAME, "-sink.log");
|
||||
|
||||
/// Default log directory for rustfs
|
||||
/// This is the default log directory for rustfs.
|
||||
/// It is used to store the logs of the application.
|
||||
/// Default value: logs
|
||||
/// Environment variable: RUSTFS_OBSERVABILITY_LOG_DIRECTORY
|
||||
pub const DEFAULT_LOG_DIR: &str = "deploy/logs";
|
||||
pub const DEFAULT_LOG_DIR: &str = "/logs";
|
||||
|
||||
/// Default log rotation size mb for rustfs
|
||||
/// This is the default log rotation size for rustfs.
|
||||
|
||||
@@ -33,11 +33,11 @@ pub fn decrypt_data(password: &[u8], data: &[u8]) -> Result<Vec<u8>, crate::Erro
|
||||
match id {
|
||||
ID::Argon2idChaCHa20Poly1305 => {
|
||||
let key = id.get_key(password, salt)?;
|
||||
decryp(ChaCha20Poly1305::new_from_slice(&key)?, nonce, data)
|
||||
decrypt(ChaCha20Poly1305::new_from_slice(&key)?, nonce, data)
|
||||
}
|
||||
_ => {
|
||||
let key = id.get_key(password, salt)?;
|
||||
decryp(Aes256Gcm::new_from_slice(&key)?, nonce, data)
|
||||
decrypt(Aes256Gcm::new_from_slice(&key)?, nonce, data)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -135,7 +135,7 @@ pub fn decrypt_data(password: &[u8], data: &[u8]) -> Result<Vec<u8>, crate::Erro
|
||||
|
||||
#[cfg(any(test, feature = "crypto"))]
|
||||
#[inline]
|
||||
fn decryp<T: aes_gcm::aead::Aead>(stream: T, nonce: &[u8], data: &[u8]) -> Result<Vec<u8>, crate::Error> {
|
||||
fn decrypt<T: aes_gcm::aead::Aead>(stream: T, nonce: &[u8], data: &[u8]) -> Result<Vec<u8>, crate::Error> {
|
||||
use crate::error::Error;
|
||||
stream
|
||||
.decrypt(aes_gcm::Nonce::from_slice(nonce), data)
|
||||
|
||||
@@ -253,7 +253,7 @@ pub async fn get_server_info(get_pools: bool) -> InfoMessage {
|
||||
|
||||
warn!("load_data_usage_from_backend end {:?}", after3 - after2);
|
||||
|
||||
let backen_info = store.clone().backend_info().await;
|
||||
let backend_info = store.clone().backend_info().await;
|
||||
|
||||
let after4 = OffsetDateTime::now_utc();
|
||||
|
||||
@@ -272,10 +272,10 @@ pub async fn get_server_info(get_pools: bool) -> InfoMessage {
|
||||
backend_type: rustfs_madmin::BackendType::ErasureType,
|
||||
online_disks: online_disks.sum(),
|
||||
offline_disks: offline_disks.sum(),
|
||||
standard_sc_parity: backen_info.standard_sc_parity,
|
||||
rr_sc_parity: backen_info.rr_sc_parity,
|
||||
total_sets: backen_info.total_sets,
|
||||
drives_per_set: backen_info.drives_per_set,
|
||||
standard_sc_parity: backend_info.standard_sc_parity,
|
||||
rr_sc_parity: backend_info.rr_sc_parity,
|
||||
total_sets: backend_info.total_sets,
|
||||
drives_per_set: backend_info.drives_per_set,
|
||||
};
|
||||
if get_pools {
|
||||
pools = get_pools_info(&all_disks).await.unwrap_or_default();
|
||||
|
||||
@@ -31,7 +31,7 @@ pub struct ListPathRawOptions {
|
||||
pub fallback_disks: Vec<Option<DiskStore>>,
|
||||
pub bucket: String,
|
||||
pub path: String,
|
||||
pub recursice: bool,
|
||||
pub recursive: bool,
|
||||
pub filter_prefix: Option<String>,
|
||||
pub forward_to: Option<String>,
|
||||
pub min_disks: usize,
|
||||
@@ -52,7 +52,7 @@ impl Clone for ListPathRawOptions {
|
||||
fallback_disks: self.fallback_disks.clone(),
|
||||
bucket: self.bucket.clone(),
|
||||
path: self.path.clone(),
|
||||
recursice: self.recursice,
|
||||
recursive: self.recursive,
|
||||
filter_prefix: self.filter_prefix.clone(),
|
||||
forward_to: self.forward_to.clone(),
|
||||
min_disks: self.min_disks,
|
||||
@@ -85,7 +85,7 @@ pub async fn list_path_raw(mut rx: B_Receiver<bool>, opts: ListPathRawOptions) -
|
||||
let wakl_opts = WalkDirOptions {
|
||||
bucket: opts_clone.bucket.clone(),
|
||||
base_dir: opts_clone.path.clone(),
|
||||
recursive: opts_clone.recursice,
|
||||
recursive: opts_clone.recursive,
|
||||
report_notfound: opts_clone.report_not_found,
|
||||
filter_prefix: opts_clone.filter_prefix.clone(),
|
||||
forward_to: opts_clone.forward_to.clone(),
|
||||
@@ -133,7 +133,7 @@ pub async fn list_path_raw(mut rx: B_Receiver<bool>, opts: ListPathRawOptions) -
|
||||
WalkDirOptions {
|
||||
bucket: opts_clone.bucket.clone(),
|
||||
base_dir: opts_clone.path.clone(),
|
||||
recursive: opts_clone.recursice,
|
||||
recursive: opts_clone.recursive,
|
||||
report_notfound: opts_clone.report_not_found,
|
||||
filter_prefix: opts_clone.filter_prefix.clone(),
|
||||
forward_to: opts_clone.forward_to.clone(),
|
||||
|
||||
@@ -41,7 +41,7 @@
|
||||
// pin_mut!(body);
|
||||
// // 上一次没用完的数据
|
||||
// let mut prev_bytes = Bytes::new();
|
||||
// let mut readed_size = 0;
|
||||
// let mut read_size = 0;
|
||||
|
||||
// loop {
|
||||
// let data: Vec<Bytes> = {
|
||||
@@ -51,9 +51,9 @@
|
||||
// Some(Err(e)) => return Err(e),
|
||||
// Some(Ok((data, remaining_bytes))) => {
|
||||
// // debug!(
|
||||
// // "content_length:{},readed_size:{}, read_data data:{}, remaining_bytes: {} ",
|
||||
// // "content_length:{},read_size:{}, read_data data:{}, remaining_bytes: {} ",
|
||||
// // content_length,
|
||||
// // readed_size,
|
||||
// // read_size,
|
||||
// // data.len(),
|
||||
// // remaining_bytes.len()
|
||||
// // );
|
||||
@@ -65,15 +65,15 @@
|
||||
// };
|
||||
|
||||
// for bytes in data {
|
||||
// readed_size += bytes.len();
|
||||
// // debug!("readed_size {}, content_length {}", readed_size, content_length,);
|
||||
// read_size += bytes.len();
|
||||
// // debug!("read_size {}, content_length {}", read_size, content_length,);
|
||||
// y.yield_ok(bytes).await;
|
||||
// }
|
||||
|
||||
// if readed_size + prev_bytes.len() >= content_length {
|
||||
// if read_size + prev_bytes.len() >= content_length {
|
||||
// // debug!(
|
||||
// // "读完了 readed_size:{} + prev_bytes.len({}) == content_length {}",
|
||||
// // readed_size,
|
||||
// // "读完了 read_size:{} + prev_bytes.len({}) == content_length {}",
|
||||
// // read_size,
|
||||
// // prev_bytes.len(),
|
||||
// // content_length,
|
||||
// // );
|
||||
|
||||
@@ -135,7 +135,7 @@ impl Default for PutObjectOptions {
|
||||
|
||||
#[allow(dead_code)]
|
||||
impl PutObjectOptions {
|
||||
fn set_matche_tag(&mut self, etag: &str) {
|
||||
fn set_match_tag(&mut self, etag: &str) {
|
||||
if etag == "*" {
|
||||
self.custom_header
|
||||
.insert("If-Match", HeaderValue::from_str("*").expect("err"));
|
||||
@@ -145,7 +145,7 @@ impl PutObjectOptions {
|
||||
}
|
||||
}
|
||||
|
||||
fn set_matche_tag_except(&mut self, etag: &str) {
|
||||
fn set_match_tag_except(&mut self, etag: &str) {
|
||||
if etag == "*" {
|
||||
self.custom_header
|
||||
.insert("If-None-Match", HeaderValue::from_str("*").expect("err"));
|
||||
@@ -181,7 +181,7 @@ impl PutObjectOptions {
|
||||
header.insert(
|
||||
"Expires",
|
||||
HeaderValue::from_str(&self.expires.format(ISO8601_DATEFORMAT).unwrap()).expect("err"),
|
||||
); //rustfs invalid heade
|
||||
); //rustfs invalid header
|
||||
}
|
||||
|
||||
if self.mode.as_str() != "" {
|
||||
|
||||
@@ -2422,7 +2422,7 @@ impl ReplicateObjectInfo {
|
||||
// let mut arns = Vec::new();
|
||||
// let mut tgts_map = std::collections::HashSet::new();
|
||||
// for rule in cfg.rules {
|
||||
// if rule.status.as_str() == "Disabe" {
|
||||
// if rule.status.as_str() == "Disable" {
|
||||
// continue;
|
||||
// }
|
||||
|
||||
|
||||
@@ -95,7 +95,7 @@ impl ArnTarget {
|
||||
Self {
|
||||
client: TargetClient {
|
||||
bucket,
|
||||
storage_class: "STANDRD".to_string(),
|
||||
storage_class: "STANDARD".to_string(),
|
||||
disable_proxy: false,
|
||||
health_check_duration: Duration::from_secs(100),
|
||||
endpoint,
|
||||
@@ -361,7 +361,7 @@ impl BucketTargetSys {
|
||||
// // Mocked implementation for obtaining a remote client
|
||||
// let tcli = TargetClient {
|
||||
// bucket: _tgt.target_bucket.clone(),
|
||||
// storage_class: "STANDRD".to_string(),
|
||||
// storage_class: "STANDARD".to_string(),
|
||||
// disable_proxy: false,
|
||||
// health_check_duration: Duration::from_secs(100),
|
||||
// endpoint: _tgt.endpoint.clone(),
|
||||
@@ -379,7 +379,7 @@ impl BucketTargetSys {
|
||||
// // Mocked implementation for obtaining a remote client
|
||||
// let tcli = TargetClient {
|
||||
// bucket: _tgt.target_bucket.clone(),
|
||||
// storage_class: "STANDRD".to_string(),
|
||||
// storage_class: "STANDARD".to_string(),
|
||||
// disable_proxy: false,
|
||||
// health_check_duration: Duration::from_secs(100),
|
||||
// endpoint: _tgt.endpoint.clone(),
|
||||
@@ -403,7 +403,7 @@ impl BucketTargetSys {
|
||||
match store.get_bucket_info(_bucket, &store_api::BucketOptions::default()).await {
|
||||
Ok(info) => {
|
||||
println!("Bucket Info: {info:?}");
|
||||
info.versionning
|
||||
info.versioning
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("Error: {err:?}");
|
||||
@@ -431,7 +431,7 @@ impl BucketTargetSys {
|
||||
// {
|
||||
// Ok(info) => {
|
||||
// println!("Bucket Info: {:?}", info);
|
||||
// info.versionning
|
||||
// info.versioning
|
||||
// }
|
||||
// Err(err) => {
|
||||
// eprintln!("Error: {:?}", err);
|
||||
@@ -475,8 +475,7 @@ impl BucketTargetSys {
|
||||
{
|
||||
Ok(info) => {
|
||||
println!("Bucket Info: {info:?}");
|
||||
if !info.versionning {
|
||||
println!("2222222222 {}", info.versionning);
|
||||
if !info.versioning {
|
||||
return Err(SetTargetError::TargetNotVersioned(tgt.target_bucket.to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -563,7 +563,7 @@ impl LocalDisk {
|
||||
}
|
||||
|
||||
async fn read_metadata(&self, file_path: impl AsRef<Path>) -> Result<Vec<u8>> {
|
||||
// TODO: suport timeout
|
||||
// TODO: support timeout
|
||||
let (data, _) = self.read_metadata_with_dmtime(file_path.as_ref()).await?;
|
||||
Ok(data)
|
||||
}
|
||||
@@ -595,7 +595,7 @@ impl LocalDisk {
|
||||
}
|
||||
|
||||
async fn read_all_data(&self, volume: &str, volume_dir: impl AsRef<Path>, file_path: impl AsRef<Path>) -> Result<Vec<u8>> {
|
||||
// TODO: timeout suport
|
||||
// TODO: timeout support
|
||||
let (data, _) = self.read_all_data_with_dmtime(volume, volume_dir, file_path).await?;
|
||||
Ok(data)
|
||||
}
|
||||
@@ -750,7 +750,7 @@ impl LocalDisk {
|
||||
|
||||
let mut f = {
|
||||
if sync {
|
||||
// TODO: suport sync
|
||||
// TODO: support sync
|
||||
self.open_file(file_path, flags, skip_parent).await?
|
||||
} else {
|
||||
self.open_file(file_path, flags, skip_parent).await?
|
||||
@@ -2336,7 +2336,7 @@ impl DiskAPI for LocalDisk {
|
||||
};
|
||||
done_sz(buf.len() as u64);
|
||||
res.insert("metasize".to_string(), buf.len().to_string());
|
||||
item.transform_meda_dir();
|
||||
item.transform_meta_dir();
|
||||
let meta_cache = MetaCacheEntry {
|
||||
name: item.object_path().to_string_lossy().to_string(),
|
||||
metadata: buf,
|
||||
|
||||
@@ -308,7 +308,7 @@ impl Erasure {
|
||||
// ec encode, 结果会写进 data_buffer
|
||||
let data_slices: SmallVec<[&mut [u8]; 16]> = data_buffer.chunks_exact_mut(shard_size).collect();
|
||||
|
||||
// partiy 数量大于 0 才 ec
|
||||
// parity 数量大于 0 才 ec
|
||||
if self.parity_shards > 0 {
|
||||
self.encoder.as_ref().unwrap().encode(data_slices).map_err(Error::other)?;
|
||||
}
|
||||
|
||||
@@ -563,12 +563,12 @@ impl CurrentScannerCycle {
|
||||
}
|
||||
}
|
||||
|
||||
// 将 SystemTime 转换为时间戳
|
||||
// Convert `SystemTime` to timestamp
|
||||
fn system_time_to_timestamp(time: &DateTime<Utc>) -> i64 {
|
||||
time.timestamp_micros()
|
||||
}
|
||||
|
||||
// 将时间戳转换为 SystemTime
|
||||
// Convert timestamp to `SystemTime`
|
||||
fn timestamp_to_system_time(timestamp: i64) -> DateTime<Utc> {
|
||||
DateTime::from_timestamp_micros(timestamp).unwrap_or_default()
|
||||
}
|
||||
@@ -593,7 +593,7 @@ pub struct ScannerItem {
|
||||
}
|
||||
|
||||
impl ScannerItem {
|
||||
pub fn transform_meda_dir(&mut self) {
|
||||
pub fn transform_meta_dir(&mut self) {
|
||||
let split = self.prefix.split(SLASH_SEPARATOR).map(PathBuf::from).collect::<Vec<_>>();
|
||||
if split.len() > 1 {
|
||||
self.prefix = path_join(&split[0..split.len() - 1]).to_string_lossy().to_string();
|
||||
@@ -1101,7 +1101,7 @@ impl FolderScanner {
|
||||
// successfully read means we have a valid object.
|
||||
found_objects = true;
|
||||
// Remove filename i.e is the meta file to construct object name
|
||||
item.transform_meda_dir();
|
||||
item.transform_meta_dir();
|
||||
// Object already accounted for, remove from heal map,
|
||||
// simply because getSize() function already heals the
|
||||
// object.
|
||||
@@ -1262,7 +1262,7 @@ impl FolderScanner {
|
||||
disks: self.disks.clone(),
|
||||
bucket: bucket.clone(),
|
||||
path: prefix.clone(),
|
||||
recursice: true,
|
||||
recursive: true,
|
||||
report_not_found: true,
|
||||
min_disks: self.disks_quorum,
|
||||
agreed: Some(Box::new(move |entry: MetaCacheEntry| {
|
||||
|
||||
@@ -1355,7 +1355,7 @@ impl SetDisks {
|
||||
disks: disks.iter().cloned().map(Some).collect(),
|
||||
bucket: bucket_info.name.clone(),
|
||||
path: bucket_info.prefix.clone(),
|
||||
recursice: true,
|
||||
recursive: true,
|
||||
min_disks: listing_quorum,
|
||||
agreed: Some(Box::new(move |entry: MetaCacheEntry| Box::pin(cb1(entry)))),
|
||||
partial: Some(Box::new(move |entries: MetaCacheEntries, _: &[Option<DiskError>]| {
|
||||
|
||||
@@ -1172,7 +1172,7 @@ impl SetDisks {
|
||||
ListPathRawOptions {
|
||||
disks: disks.iter().cloned().map(Some).collect(),
|
||||
bucket: bucket.clone(),
|
||||
recursice: true,
|
||||
recursive: true,
|
||||
min_disks: listing_quorum,
|
||||
agreed: Some(Box::new(move |entry: MetaCacheEntry| {
|
||||
info!("list_objects_to_rebalance: agreed: {:?}", &entry.name);
|
||||
|
||||
@@ -449,7 +449,7 @@ impl PeerS3Client for LocalPeerS3Client {
|
||||
op.as_ref().map(|v| BucketInfo {
|
||||
name: v.name.clone(),
|
||||
created: v.created,
|
||||
versionning: versioned,
|
||||
versioning: versioned,
|
||||
..Default::default()
|
||||
})
|
||||
})
|
||||
|
||||
@@ -1233,7 +1233,7 @@ impl SetDisks {
|
||||
return Err(DiskError::ErasureReadQuorum);
|
||||
}
|
||||
|
||||
let mut meta_hashs = vec![None; metas.len()];
|
||||
let mut meta_hashes = vec![None; metas.len()];
|
||||
let mut hasher = Sha256::new();
|
||||
|
||||
for (i, meta) in metas.iter().enumerate() {
|
||||
@@ -1265,7 +1265,7 @@ impl SetDisks {
|
||||
|
||||
hasher.flush()?;
|
||||
|
||||
meta_hashs[i] = Some(hex(hasher.clone().finalize().as_slice()));
|
||||
meta_hashes[i] = Some(hex(hasher.clone().finalize().as_slice()));
|
||||
|
||||
hasher.reset();
|
||||
}
|
||||
@@ -1273,7 +1273,7 @@ impl SetDisks {
|
||||
|
||||
let mut count_map = HashMap::new();
|
||||
|
||||
for hash in meta_hashs.iter().flatten() {
|
||||
for hash in meta_hashes.iter().flatten() {
|
||||
*count_map.entry(hash).or_insert(0) += 1;
|
||||
}
|
||||
|
||||
@@ -1297,7 +1297,7 @@ impl SetDisks {
|
||||
|
||||
let mut valid_obj_map = HashMap::new();
|
||||
|
||||
for (i, op_hash) in meta_hashs.iter().enumerate() {
|
||||
for (i, op_hash) in meta_hashes.iter().enumerate() {
|
||||
if let Some(hash) = op_hash {
|
||||
if let Some(max_hash) = max_val {
|
||||
if hash == max_hash {
|
||||
@@ -1749,8 +1749,8 @@ impl SetDisks {
|
||||
|
||||
// for res in results {
|
||||
// match res {
|
||||
// Ok(entrys) => {
|
||||
// ress.push(Some(entrys));
|
||||
// Ok(entries) => {
|
||||
// ress.push(Some(entries));
|
||||
// errs.push(None);
|
||||
// }
|
||||
// Err(e) => {
|
||||
@@ -2108,17 +2108,17 @@ impl SetDisks {
|
||||
|
||||
let erasure = erasure_coding::Erasure::new(fi.erasure.data_blocks, fi.erasure.parity_blocks, fi.erasure.block_size);
|
||||
|
||||
let mut total_readed = 0;
|
||||
let mut total_read = 0;
|
||||
for i in part_index..=last_part_index {
|
||||
if total_readed == length {
|
||||
if total_read == length {
|
||||
break;
|
||||
}
|
||||
|
||||
let part_number = fi.parts[i].number;
|
||||
let part_size = fi.parts[i].size;
|
||||
let mut part_length = part_size - part_offset;
|
||||
if part_length > (length - total_readed) {
|
||||
part_length = length - total_readed
|
||||
if part_length > (length - total_read) {
|
||||
part_length = length - total_read
|
||||
}
|
||||
|
||||
let till_offset = erasure.shard_file_offset(part_offset, part_length, part_size);
|
||||
@@ -2203,7 +2203,7 @@ impl SetDisks {
|
||||
|
||||
// debug!("ec decode {} written size {}", part_number, n);
|
||||
|
||||
total_readed += part_length;
|
||||
total_read += part_length;
|
||||
part_offset = 0;
|
||||
}
|
||||
|
||||
@@ -2306,7 +2306,7 @@ impl SetDisks {
|
||||
Some(filter_prefix.to_string())
|
||||
}
|
||||
},
|
||||
recursice: true,
|
||||
recursive: true,
|
||||
forward_to: None,
|
||||
min_disks: 1,
|
||||
report_not_found: false,
|
||||
@@ -2481,12 +2481,12 @@ impl SetDisks {
|
||||
};
|
||||
|
||||
match Self::pick_valid_fileinfo(&parts_metadata, mod_time, etag, read_quorum as usize) {
|
||||
Ok(lastest_meta) => {
|
||||
Ok(latest_meta) => {
|
||||
let (available_disks, data_errs_by_disk, data_errs_by_part) = disks_with_all_parts(
|
||||
&online_disks,
|
||||
&mut parts_metadata,
|
||||
&errs,
|
||||
&lastest_meta,
|
||||
&latest_meta,
|
||||
bucket,
|
||||
object,
|
||||
opts.scan_mode,
|
||||
@@ -2494,22 +2494,22 @@ impl SetDisks {
|
||||
.await?;
|
||||
|
||||
// info!(
|
||||
// "disks_with_all_parts: got available_disks: {:?}, data_errs_by_disk: {:?}, data_errs_by_part: {:?}, lastest_meta: {:?}",
|
||||
// available_disks, data_errs_by_disk, data_errs_by_part, lastest_meta
|
||||
// "disks_with_all_parts: got available_disks: {:?}, data_errs_by_disk: {:?}, data_errs_by_part: {:?}, latest_meta: {:?}",
|
||||
// available_disks, data_errs_by_disk, data_errs_by_part, latest_meta
|
||||
// );
|
||||
let erasure = if !lastest_meta.deleted && !lastest_meta.is_remote() {
|
||||
let erasure = if !latest_meta.deleted && !latest_meta.is_remote() {
|
||||
// Initialize erasure coding
|
||||
erasure_coding::Erasure::new(
|
||||
lastest_meta.erasure.data_blocks,
|
||||
lastest_meta.erasure.parity_blocks,
|
||||
lastest_meta.erasure.block_size,
|
||||
latest_meta.erasure.data_blocks,
|
||||
latest_meta.erasure.parity_blocks,
|
||||
latest_meta.erasure.block_size,
|
||||
)
|
||||
} else {
|
||||
erasure_coding::Erasure::default()
|
||||
};
|
||||
|
||||
result.object_size =
|
||||
ObjectInfo::from_file_info(&lastest_meta, bucket, object, true).get_actual_size()? as usize;
|
||||
ObjectInfo::from_file_info(&latest_meta, bucket, object, true).get_actual_size()? as usize;
|
||||
// Loop to find number of disks with valid data, per-drive
|
||||
// data state and a list of outdated disks on which data needs
|
||||
// to be healed.
|
||||
@@ -2517,15 +2517,15 @@ impl SetDisks {
|
||||
let mut disks_to_heal_count = 0;
|
||||
|
||||
// info!(
|
||||
// "errs: {:?}, data_errs_by_disk: {:?}, lastest_meta: {:?}",
|
||||
// errs, data_errs_by_disk, lastest_meta
|
||||
// "errs: {:?}, data_errs_by_disk: {:?}, latest_meta: {:?}",
|
||||
// errs, data_errs_by_disk, latest_meta
|
||||
// );
|
||||
for index in 0..available_disks.len() {
|
||||
let (yes, reason) = should_heal_object_on_disk(
|
||||
&errs[index],
|
||||
&data_errs_by_disk[&index],
|
||||
&parts_metadata[index],
|
||||
&lastest_meta,
|
||||
&latest_meta,
|
||||
);
|
||||
if yes {
|
||||
outdate_disks[index] = disks[index].clone();
|
||||
@@ -2583,10 +2583,10 @@ impl SetDisks {
|
||||
return Ok((result, None));
|
||||
}
|
||||
|
||||
if !lastest_meta.deleted && disks_to_heal_count > lastest_meta.erasure.parity_blocks {
|
||||
if !latest_meta.deleted && disks_to_heal_count > latest_meta.erasure.parity_blocks {
|
||||
error!(
|
||||
"file({} : {}) part corrupt too much, can not to fix, disks_to_heal_count: {}, parity_blocks: {}",
|
||||
bucket, object, disks_to_heal_count, lastest_meta.erasure.parity_blocks
|
||||
bucket, object, disks_to_heal_count, latest_meta.erasure.parity_blocks
|
||||
);
|
||||
|
||||
// Allow for dangling deletes, on versions that have DataDir missing etc.
|
||||
@@ -2633,39 +2633,37 @@ impl SetDisks {
|
||||
};
|
||||
}
|
||||
|
||||
if !lastest_meta.deleted && lastest_meta.erasure.distribution.len() != available_disks.len() {
|
||||
if !latest_meta.deleted && latest_meta.erasure.distribution.len() != available_disks.len() {
|
||||
let err_str = format!(
|
||||
"unexpected file distribution ({:?}) from available disks ({:?}), looks like backend disks have been manually modified refusing to heal {}/{}({})",
|
||||
lastest_meta.erasure.distribution, available_disks, bucket, object, version_id
|
||||
latest_meta.erasure.distribution, available_disks, bucket, object, version_id
|
||||
);
|
||||
warn!(err_str);
|
||||
let err = DiskError::other(err_str);
|
||||
return Ok((
|
||||
self.default_heal_result(lastest_meta, &errs, bucket, object, version_id)
|
||||
.await,
|
||||
self.default_heal_result(latest_meta, &errs, bucket, object, version_id).await,
|
||||
Some(err),
|
||||
));
|
||||
}
|
||||
|
||||
let latest_disks = Self::shuffle_disks(&available_disks, &lastest_meta.erasure.distribution);
|
||||
if !lastest_meta.deleted && lastest_meta.erasure.distribution.len() != outdate_disks.len() {
|
||||
let latest_disks = Self::shuffle_disks(&available_disks, &latest_meta.erasure.distribution);
|
||||
if !latest_meta.deleted && latest_meta.erasure.distribution.len() != outdate_disks.len() {
|
||||
let err_str = format!(
|
||||
"unexpected file distribution ({:?}) from outdated disks ({:?}), looks like backend disks have been manually modified refusing to heal {}/{}({})",
|
||||
lastest_meta.erasure.distribution, outdate_disks, bucket, object, version_id
|
||||
latest_meta.erasure.distribution, outdate_disks, bucket, object, version_id
|
||||
);
|
||||
warn!(err_str);
|
||||
let err = DiskError::other(err_str);
|
||||
return Ok((
|
||||
self.default_heal_result(lastest_meta, &errs, bucket, object, version_id)
|
||||
.await,
|
||||
self.default_heal_result(latest_meta, &errs, bucket, object, version_id).await,
|
||||
Some(err),
|
||||
));
|
||||
}
|
||||
|
||||
if !lastest_meta.deleted && lastest_meta.erasure.distribution.len() != parts_metadata.len() {
|
||||
if !latest_meta.deleted && latest_meta.erasure.distribution.len() != parts_metadata.len() {
|
||||
let err_str = format!(
|
||||
"unexpected file distribution ({:?}) from metadata entries ({:?}), looks like backend disks have been manually modified refusing to heal {}/{}({})",
|
||||
lastest_meta.erasure.distribution,
|
||||
latest_meta.erasure.distribution,
|
||||
parts_metadata.len(),
|
||||
bucket,
|
||||
object,
|
||||
@@ -2674,15 +2672,13 @@ impl SetDisks {
|
||||
warn!(err_str);
|
||||
let err = DiskError::other(err_str);
|
||||
return Ok((
|
||||
self.default_heal_result(lastest_meta, &errs, bucket, object, version_id)
|
||||
.await,
|
||||
self.default_heal_result(latest_meta, &errs, bucket, object, version_id).await,
|
||||
Some(err),
|
||||
));
|
||||
}
|
||||
|
||||
let out_dated_disks = Self::shuffle_disks(&outdate_disks, &lastest_meta.erasure.distribution);
|
||||
let mut parts_metadata =
|
||||
Self::shuffle_parts_metadata(&parts_metadata, &lastest_meta.erasure.distribution);
|
||||
let out_dated_disks = Self::shuffle_disks(&outdate_disks, &latest_meta.erasure.distribution);
|
||||
let mut parts_metadata = Self::shuffle_parts_metadata(&parts_metadata, &latest_meta.erasure.distribution);
|
||||
let mut copy_parts_metadata = vec![None; parts_metadata.len()];
|
||||
for (index, disk) in latest_disks.iter().enumerate() {
|
||||
if disk.is_some() {
|
||||
@@ -2703,18 +2699,18 @@ impl SetDisks {
|
||||
if disk.is_some() {
|
||||
// Make sure to write the FileInfo information
|
||||
// that is expected to be in quorum.
|
||||
parts_metadata[index] = clean_file_info(&lastest_meta);
|
||||
parts_metadata[index] = clean_file_info(&latest_meta);
|
||||
}
|
||||
}
|
||||
|
||||
// We write at temporary location and then rename to final location.
|
||||
let tmp_id = Uuid::new_v4().to_string();
|
||||
let src_data_dir = lastest_meta.data_dir.unwrap().to_string();
|
||||
let dst_data_dir = lastest_meta.data_dir.unwrap();
|
||||
let src_data_dir = latest_meta.data_dir.unwrap().to_string();
|
||||
let dst_data_dir = latest_meta.data_dir.unwrap();
|
||||
|
||||
if !lastest_meta.deleted && !lastest_meta.is_remote() {
|
||||
let erasure_info = lastest_meta.erasure;
|
||||
for part in lastest_meta.parts.iter() {
|
||||
if !latest_meta.deleted && !latest_meta.is_remote() {
|
||||
let erasure_info = latest_meta.erasure;
|
||||
for part in latest_meta.parts.iter() {
|
||||
let till_offset = erasure.shard_file_offset(0, part.size, part.size);
|
||||
let checksum_algo = erasure_info.get_checksum_info(part.number).algorithm;
|
||||
let mut readers = Vec::with_capacity(latest_disks.len());
|
||||
@@ -2759,7 +2755,7 @@ impl SetDisks {
|
||||
|
||||
let is_inline_buffer = {
|
||||
if let Some(sc) = GLOBAL_StorageClass.get() {
|
||||
sc.should_inline(erasure.shard_file_size(lastest_meta.size), false)
|
||||
sc.should_inline(erasure.shard_file_size(latest_meta.size), false)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
@@ -3840,7 +3836,7 @@ impl SetDisks {
|
||||
disks,
|
||||
fallback_disks,
|
||||
bucket: bucket.clone(),
|
||||
recursice: true,
|
||||
recursive: true,
|
||||
forward_to,
|
||||
min_disks: 1,
|
||||
report_not_found: false,
|
||||
@@ -4317,7 +4313,7 @@ impl ObjectIO for SetDisks {
|
||||
|
||||
fi.is_latest = true;
|
||||
|
||||
// TODO: version suport
|
||||
// TODO: version support
|
||||
Ok(ObjectInfo::from_file_info(&fi, bucket, object, opts.versioned || opts.version_suspended))
|
||||
}
|
||||
}
|
||||
@@ -6142,7 +6138,7 @@ async fn disks_with_all_parts(
|
||||
online_disks: &[Option<DiskStore>],
|
||||
parts_metadata: &mut [FileInfo],
|
||||
errs: &[Option<DiskError>],
|
||||
lastest_meta: &FileInfo,
|
||||
latest_meta: &FileInfo,
|
||||
bucket: &str,
|
||||
object: &str,
|
||||
scan_mode: HealScanMode,
|
||||
@@ -6150,10 +6146,10 @@ async fn disks_with_all_parts(
|
||||
let mut available_disks = vec![None; online_disks.len()];
|
||||
let mut data_errs_by_disk: HashMap<usize, Vec<usize>> = HashMap::new();
|
||||
for i in 0..online_disks.len() {
|
||||
data_errs_by_disk.insert(i, vec![1; lastest_meta.parts.len()]);
|
||||
data_errs_by_disk.insert(i, vec![1; latest_meta.parts.len()]);
|
||||
}
|
||||
let mut data_errs_by_part: HashMap<usize, Vec<usize>> = HashMap::new();
|
||||
for i in 0..lastest_meta.parts.len() {
|
||||
for i in 0..latest_meta.parts.len() {
|
||||
data_errs_by_part.insert(i, vec![1; online_disks.len()]);
|
||||
}
|
||||
|
||||
@@ -6191,7 +6187,7 @@ async fn disks_with_all_parts(
|
||||
continue;
|
||||
}
|
||||
let meta = &parts_metadata[index];
|
||||
if !meta.mod_time.eq(&lastest_meta.mod_time) || !meta.data_dir.eq(&lastest_meta.data_dir) {
|
||||
if !meta.mod_time.eq(&latest_meta.mod_time) || !meta.data_dir.eq(&latest_meta.data_dir) {
|
||||
warn!("mod_time is not Eq, file corrupt, index: {index}");
|
||||
meta_errs[index] = Some(DiskError::FileCorrupt);
|
||||
parts_metadata[index] = FileInfo::default();
|
||||
@@ -6217,7 +6213,7 @@ async fn disks_with_all_parts(
|
||||
meta_errs.iter().enumerate().for_each(|(index, err)| {
|
||||
if err.is_some() {
|
||||
let part_err = conv_part_err_to_int(err);
|
||||
for p in 0..lastest_meta.parts.len() {
|
||||
for p in 0..latest_meta.parts.len() {
|
||||
data_errs_by_part.entry(p).or_insert(vec![0; meta_errs.len()])[index] = part_err;
|
||||
}
|
||||
}
|
||||
@@ -6269,7 +6265,7 @@ async fn disks_with_all_parts(
|
||||
|
||||
let mut verify_resp = CheckPartsResp::default();
|
||||
let mut verify_err = None;
|
||||
meta.data_dir = lastest_meta.data_dir;
|
||||
meta.data_dir = latest_meta.data_dir;
|
||||
if scan_mode == HEAL_DEEP_SCAN {
|
||||
// disk has a valid xl.meta but may not have all the
|
||||
// parts. This is considered an outdated disk, since
|
||||
@@ -6293,7 +6289,7 @@ async fn disks_with_all_parts(
|
||||
}
|
||||
}
|
||||
|
||||
for p in 0..lastest_meta.parts.len() {
|
||||
for p in 0..latest_meta.parts.len() {
|
||||
if let Some(vec) = data_errs_by_part.get_mut(&p) {
|
||||
if index < vec.len() {
|
||||
if verify_err.is_some() {
|
||||
@@ -6331,7 +6327,7 @@ pub fn should_heal_object_on_disk(
|
||||
err: &Option<DiskError>,
|
||||
parts_errs: &[usize],
|
||||
meta: &FileInfo,
|
||||
lastest_meta: &FileInfo,
|
||||
latest_meta: &FileInfo,
|
||||
) -> (bool, Option<DiskError>) {
|
||||
if let Some(err) = err {
|
||||
if err == &DiskError::FileNotFound || err == &DiskError::FileVersionNotFound || err == &DiskError::FileCorrupt {
|
||||
@@ -6339,12 +6335,12 @@ pub fn should_heal_object_on_disk(
|
||||
}
|
||||
}
|
||||
|
||||
if lastest_meta.volume != meta.volume
|
||||
|| lastest_meta.name != meta.name
|
||||
|| lastest_meta.version_id != meta.version_id
|
||||
|| lastest_meta.deleted != meta.deleted
|
||||
if latest_meta.volume != meta.volume
|
||||
|| latest_meta.name != meta.name
|
||||
|| latest_meta.version_id != meta.version_id
|
||||
|| latest_meta.deleted != meta.deleted
|
||||
{
|
||||
info!("lastest_meta not Eq meta, lastest_meta: {:?}, meta: {:?}", lastest_meta, meta);
|
||||
info!("latest_meta not Eq meta, latest_meta: {:?}, meta: {:?}", latest_meta, meta);
|
||||
return (true, Some(DiskError::OutdatedXLMeta));
|
||||
}
|
||||
if !meta.deleted && !meta.is_remote() {
|
||||
|
||||
@@ -65,7 +65,7 @@ pub struct Sets {
|
||||
pub pool_idx: usize,
|
||||
pub endpoints: PoolEndpoints,
|
||||
pub format: FormatV3,
|
||||
pub partiy_count: usize,
|
||||
pub parity_count: usize,
|
||||
pub set_count: usize,
|
||||
pub set_drive_count: usize,
|
||||
pub default_parity_count: usize,
|
||||
@@ -82,13 +82,13 @@ impl Drop for Sets {
|
||||
}
|
||||
|
||||
impl Sets {
|
||||
#[tracing::instrument(level = "debug", skip(disks, endpoints, fm, pool_idx, partiy_count))]
|
||||
#[tracing::instrument(level = "debug", skip(disks, endpoints, fm, pool_idx, parity_count))]
|
||||
pub async fn new(
|
||||
disks: Vec<Option<DiskStore>>,
|
||||
endpoints: &PoolEndpoints,
|
||||
fm: &FormatV3,
|
||||
pool_idx: usize,
|
||||
partiy_count: usize,
|
||||
parity_count: usize,
|
||||
) -> Result<Arc<Self>> {
|
||||
let set_count = fm.erasure.sets.len();
|
||||
let set_drive_count = fm.erasure.sets[0].len();
|
||||
@@ -173,7 +173,7 @@ impl Sets {
|
||||
Arc::new(RwLock::new(NsLockMap::new(is_dist_erasure().await))),
|
||||
Arc::new(RwLock::new(set_drive)),
|
||||
set_drive_count,
|
||||
partiy_count,
|
||||
parity_count,
|
||||
i,
|
||||
pool_idx,
|
||||
set_endpoints,
|
||||
@@ -194,10 +194,10 @@ impl Sets {
|
||||
pool_idx,
|
||||
endpoints: endpoints.clone(),
|
||||
format: fm.clone(),
|
||||
partiy_count,
|
||||
parity_count,
|
||||
set_count,
|
||||
set_drive_count,
|
||||
default_parity_count: partiy_count,
|
||||
default_parity_count: parity_count,
|
||||
distribution_algo: fm.erasure.distribution_algo.clone(),
|
||||
exit_signal: Some(tx),
|
||||
});
|
||||
|
||||
@@ -152,7 +152,7 @@ impl ECStore {
|
||||
common_parity_drives = parity_drives;
|
||||
}
|
||||
|
||||
// validate_parity(partiy_count, pool_eps.drives_per_set)?;
|
||||
// validate_parity(parity_count, pool_eps.drives_per_set)?;
|
||||
|
||||
let (disks, errs) = store_init::init_disks(
|
||||
&pool_eps.endpoints,
|
||||
@@ -302,13 +302,13 @@ impl ECStore {
|
||||
}
|
||||
|
||||
let pools = meta.return_resumable_pools();
|
||||
let mut pool_indeces = Vec::with_capacity(pools.len());
|
||||
let mut pool_indices = Vec::with_capacity(pools.len());
|
||||
|
||||
let endpoints = get_global_endpoints();
|
||||
|
||||
for p in pools.iter() {
|
||||
if let Some(idx) = endpoints.get_pool_idx(&p.cmd_line) {
|
||||
pool_indeces.push(idx);
|
||||
pool_indices.push(idx);
|
||||
} else {
|
||||
return Err(Error::other(format!(
|
||||
"unexpected state present for decommission status pool({}) not found",
|
||||
@@ -317,8 +317,8 @@ impl ECStore {
|
||||
}
|
||||
}
|
||||
|
||||
if !pool_indeces.is_empty() {
|
||||
let idx = pool_indeces[0];
|
||||
if !pool_indices.is_empty() {
|
||||
let idx = pool_indices[0];
|
||||
if endpoints.as_ref()[idx].endpoints.as_ref()[0].is_local {
|
||||
let (_tx, rx) = broadcast::channel(1);
|
||||
|
||||
@@ -328,9 +328,9 @@ impl ECStore {
|
||||
// wait 3 minutes for cluster init
|
||||
tokio::time::sleep(Duration::from_secs(60 * 3)).await;
|
||||
|
||||
if let Err(err) = store.decommission(rx.resubscribe(), pool_indeces.clone()).await {
|
||||
if let Err(err) = store.decommission(rx.resubscribe(), pool_indices.clone()).await {
|
||||
if err == StorageError::DecommissionAlreadyRunning {
|
||||
for i in pool_indeces.iter() {
|
||||
for i in pool_indices.iter() {
|
||||
store.do_decommission_in_routine(rx.resubscribe(), *i).await;
|
||||
}
|
||||
return;
|
||||
@@ -417,9 +417,9 @@ impl ECStore {
|
||||
// // TODO handle errs
|
||||
// continue;
|
||||
// }
|
||||
// let entrys = disks_res.as_ref().unwrap();
|
||||
// let entries = disks_res.as_ref().unwrap();
|
||||
|
||||
// for entry in entrys {
|
||||
// for entry in entries {
|
||||
// // warn!("lst_merged entry---- {}", &entry.name);
|
||||
|
||||
// if !opts.prefix.is_empty() && !entry.name.starts_with(&opts.prefix) {
|
||||
@@ -1415,7 +1415,7 @@ impl StorageAPI for ECStore {
|
||||
|
||||
if let Ok(sys) = metadata_sys::get(bucket).await {
|
||||
info.created = Some(sys.created);
|
||||
info.versionning = sys.versioning();
|
||||
info.versioning = sys.versioning();
|
||||
info.object_locking = sys.object_locking();
|
||||
}
|
||||
|
||||
|
||||
@@ -276,7 +276,10 @@ impl HTTPRangeSpec {
|
||||
return Ok(range_length);
|
||||
}
|
||||
|
||||
Err(Error::other("range value invaild"))
|
||||
Err(Error::other(format!(
|
||||
"range value invalid: start={}, end={}, expected start <= end and end >= -1",
|
||||
self.start, self.end
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -336,7 +339,7 @@ pub struct BucketInfo {
|
||||
pub name: String,
|
||||
pub created: Option<OffsetDateTime>,
|
||||
pub deleted: Option<OffsetDateTime>,
|
||||
pub versionning: bool,
|
||||
pub versioning: bool,
|
||||
pub object_locking: bool,
|
||||
}
|
||||
|
||||
|
||||
@@ -222,7 +222,7 @@ fn check_format_erasure_value(format: &FormatV3) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// load_format_erasure_all 读取所有 foramt.json
|
||||
// load_format_erasure_all 读取所有 format.json
|
||||
pub async fn load_format_erasure_all(disks: &[Option<DiskStore>], heal: bool) -> (Vec<Option<FormatV3>>, Vec<Option<DiskError>>) {
|
||||
let mut futures = Vec::with_capacity(disks.len());
|
||||
let mut datas = Vec::with_capacity(disks.len());
|
||||
|
||||
@@ -776,7 +776,7 @@ impl ECStore {
|
||||
fallback_disks: fallback_disks.iter().cloned().map(Some).collect(),
|
||||
bucket: bucket.to_owned(),
|
||||
path,
|
||||
recursice: true,
|
||||
recursive: true,
|
||||
filter_prefix: Some(filter_prefix),
|
||||
forward_to: opts.marker.clone(),
|
||||
min_disks: listing_quorum,
|
||||
@@ -851,8 +851,8 @@ impl ECStore {
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(fiter) = opts.filter {
|
||||
if fiter(&fi) {
|
||||
if let Some(filter) = opts.filter {
|
||||
if filter(&fi) {
|
||||
let item = ObjectInfoOrErr {
|
||||
item: Some(ObjectInfo::from_file_info(&fi, &bucket, &fi.name, {
|
||||
if let Some(v) = &vcf { v.versioned(&fi.name) } else { false }
|
||||
@@ -899,8 +899,8 @@ impl ECStore {
|
||||
}
|
||||
|
||||
for fi in fvs.versions.iter() {
|
||||
if let Some(fiter) = opts.filter {
|
||||
if fiter(fi) {
|
||||
if let Some(filter) = opts.filter {
|
||||
if filter(fi) {
|
||||
let item = ObjectInfoOrErr {
|
||||
item: Some(ObjectInfo::from_file_info(fi, &bucket, &fi.name, {
|
||||
if let Some(v) = &vcf { v.versioned(&fi.name) } else { false }
|
||||
@@ -972,7 +972,7 @@ async fn gather_results(
|
||||
let mut sender = Some(results_tx);
|
||||
|
||||
let mut recv = recv;
|
||||
let mut entrys = Vec::new();
|
||||
let mut entries = Vec::new();
|
||||
while let Some(mut entry) = recv.recv().await {
|
||||
if returned {
|
||||
continue;
|
||||
@@ -1009,11 +1009,11 @@ async fn gather_results(
|
||||
|
||||
// TODO: Lifecycle
|
||||
|
||||
if opts.limit > 0 && entrys.len() >= opts.limit as usize {
|
||||
if opts.limit > 0 && entries.len() >= opts.limit as usize {
|
||||
if let Some(tx) = sender {
|
||||
tx.send(MetaCacheEntriesSortedResult {
|
||||
entries: Some(MetaCacheEntriesSorted {
|
||||
o: MetaCacheEntries(entrys.clone()),
|
||||
o: MetaCacheEntries(entries.clone()),
|
||||
..Default::default()
|
||||
}),
|
||||
err: None,
|
||||
@@ -1027,15 +1027,15 @@ async fn gather_results(
|
||||
continue;
|
||||
}
|
||||
|
||||
entrys.push(Some(entry));
|
||||
// entrys.push(entry);
|
||||
entries.push(Some(entry));
|
||||
// entries.push(entry);
|
||||
}
|
||||
|
||||
// finish not full, return eof
|
||||
if let Some(tx) = sender {
|
||||
tx.send(MetaCacheEntriesSortedResult {
|
||||
entries: Some(MetaCacheEntriesSorted {
|
||||
o: MetaCacheEntries(entrys.clone()),
|
||||
o: MetaCacheEntries(entries.clone()),
|
||||
..Default::default()
|
||||
}),
|
||||
err: Some(Error::Unexpected.into()),
|
||||
@@ -1125,10 +1125,10 @@ async fn merge_entry_channels(
|
||||
|
||||
if path::clean(&best_entry.name) == path::clean(&other_entry.name) {
|
||||
let dir_matches = best_entry.is_dir() && other_entry.is_dir();
|
||||
let suffix_matche =
|
||||
let suffix_matches =
|
||||
best_entry.name.ends_with(SLASH_SEPARATOR) == other_entry.name.ends_with(SLASH_SEPARATOR);
|
||||
|
||||
if dir_matches && suffix_matche {
|
||||
if dir_matches && suffix_matches {
|
||||
to_merge.push(other_idx);
|
||||
continue;
|
||||
}
|
||||
@@ -1286,7 +1286,7 @@ impl SetDisks {
|
||||
fallback_disks: fallback_disks.iter().cloned().map(Some).collect(),
|
||||
bucket: opts.bucket,
|
||||
path: opts.base_dir,
|
||||
recursice: opts.recursive,
|
||||
recursive: opts.recursive,
|
||||
filter_prefix: opts.filter_prefix,
|
||||
forward_to: opts.marker,
|
||||
min_disks: listing_quorum,
|
||||
|
||||
@@ -215,7 +215,7 @@ pub struct FileInfo {
|
||||
|
||||
impl FileInfo {
|
||||
pub fn new(object: &str, data_blocks: usize, parity_blocks: usize) -> Self {
|
||||
let indexs = {
|
||||
let indices = {
|
||||
let cardinality = data_blocks + parity_blocks;
|
||||
let mut nums = vec![0; cardinality];
|
||||
let key_crc = crc32fast::hash(object.as_bytes());
|
||||
@@ -233,7 +233,7 @@ impl FileInfo {
|
||||
data_blocks,
|
||||
parity_blocks,
|
||||
block_size: BLOCK_SIZE_V2,
|
||||
distribution: indexs,
|
||||
distribution: indices,
|
||||
..Default::default()
|
||||
},
|
||||
..Default::default()
|
||||
|
||||
@@ -702,7 +702,7 @@ impl FileMeta {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn lastest_mod_time(&self) -> Option<OffsetDateTime> {
|
||||
pub fn latest_mod_time(&self) -> Option<OffsetDateTime> {
|
||||
if self.versions.is_empty() {
|
||||
return None;
|
||||
}
|
||||
@@ -1762,7 +1762,7 @@ impl MetaDeleteMarker {
|
||||
|
||||
// self.meta_sys = Some(map);
|
||||
// }
|
||||
// name => return Err(Error::other(format!("not suport field name {name}"))),
|
||||
// name => return Err(Error::other(format!("not support field name {name}"))),
|
||||
// }
|
||||
// }
|
||||
|
||||
@@ -1962,32 +1962,32 @@ pub fn merge_file_meta_versions(
|
||||
n_versions += 1;
|
||||
}
|
||||
} else {
|
||||
let mut lastest_count = 0;
|
||||
let mut latest_count = 0;
|
||||
for (i, ver) in tops.iter().enumerate() {
|
||||
if ver.header == latest.header {
|
||||
lastest_count += 1;
|
||||
latest_count += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
if i == 0 || ver.header.sorts_before(&latest.header) {
|
||||
if i == 0 || lastest_count == 0 {
|
||||
lastest_count = 1;
|
||||
if i == 0 || latest_count == 0 {
|
||||
latest_count = 1;
|
||||
} else if !strict && ver.header.matches_not_strict(&latest.header) {
|
||||
lastest_count += 1;
|
||||
latest_count += 1;
|
||||
} else {
|
||||
lastest_count = 1;
|
||||
latest_count = 1;
|
||||
}
|
||||
latest = ver.clone();
|
||||
continue;
|
||||
}
|
||||
|
||||
// Mismatch, but older.
|
||||
if lastest_count > 0 && !strict && ver.header.matches_not_strict(&latest.header) {
|
||||
lastest_count += 1;
|
||||
if latest_count > 0 && !strict && ver.header.matches_not_strict(&latest.header) {
|
||||
latest_count += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
if lastest_count > 0 && ver.header.version_id == latest.header.version_id {
|
||||
if latest_count > 0 && ver.header.version_id == latest.header.version_id {
|
||||
let mut x: HashMap<FileMetaVersionHeader, usize> = HashMap::new();
|
||||
for a in tops.iter() {
|
||||
if a.header.version_id != ver.header.version_id {
|
||||
@@ -1999,12 +1999,12 @@ pub fn merge_file_meta_versions(
|
||||
}
|
||||
*x.entry(a_clone.header).or_insert(1) += 1;
|
||||
}
|
||||
lastest_count = 0;
|
||||
latest_count = 0;
|
||||
for (k, v) in x.iter() {
|
||||
if *v < lastest_count {
|
||||
if *v < latest_count {
|
||||
continue;
|
||||
}
|
||||
if *v == lastest_count && latest.header.sorts_before(k) {
|
||||
if *v == latest_count && latest.header.sorts_before(k) {
|
||||
continue;
|
||||
}
|
||||
tops.iter().for_each(|a| {
|
||||
@@ -2017,12 +2017,12 @@ pub fn merge_file_meta_versions(
|
||||
}
|
||||
});
|
||||
|
||||
lastest_count = *v;
|
||||
latest_count = *v;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
if lastest_count >= quorum {
|
||||
if latest_count >= quorum {
|
||||
if !latest.header.free_version() {
|
||||
n_versions += 1;
|
||||
}
|
||||
|
||||
@@ -221,7 +221,7 @@ impl MetaCacheEntry {
|
||||
};
|
||||
|
||||
if self_vers.versions.len() != other_vers.versions.len() {
|
||||
match self_vers.lastest_mod_time().cmp(&other_vers.lastest_mod_time()) {
|
||||
match self_vers.latest_mod_time().cmp(&other_vers.latest_mod_time()) {
|
||||
Ordering::Greater => return (Some(self.clone()), false),
|
||||
Ordering::Less => return (Some(other.clone()), false),
|
||||
_ => {}
|
||||
|
||||
@@ -90,7 +90,7 @@ where
|
||||
T: Store,
|
||||
{
|
||||
pub(crate) async fn new(api: T) -> Arc<Self> {
|
||||
let (sender, reciver) = mpsc::channel::<i64>(100);
|
||||
let (sender, receiver) = mpsc::channel::<i64>(100);
|
||||
|
||||
let sys = Arc::new(Self {
|
||||
api,
|
||||
@@ -101,11 +101,11 @@ where
|
||||
last_timestamp: AtomicI64::new(0),
|
||||
});
|
||||
|
||||
sys.clone().init(reciver).await.unwrap();
|
||||
sys.clone().init(receiver).await.unwrap();
|
||||
sys
|
||||
}
|
||||
|
||||
async fn init(self: Arc<Self>, reciver: Receiver<i64>) -> Result<()> {
|
||||
async fn init(self: Arc<Self>, receiver: Receiver<i64>) -> Result<()> {
|
||||
self.clone().save_iam_formatter().await?;
|
||||
self.clone().load().await?;
|
||||
|
||||
@@ -118,7 +118,7 @@ where
|
||||
let s = Arc::clone(&self);
|
||||
async move {
|
||||
let ticker = tokio::time::interval(Duration::from_secs(120));
|
||||
tokio::pin!(ticker, reciver);
|
||||
tokio::pin!(ticker, receiver);
|
||||
loop {
|
||||
select! {
|
||||
_ = ticker.tick() => {
|
||||
@@ -127,13 +127,13 @@ where
|
||||
error!("iam load err {:?}", err);
|
||||
}
|
||||
},
|
||||
i = reciver.recv() => {
|
||||
info!("iam load reciver");
|
||||
i = receiver.recv() => {
|
||||
info!("iam load receiver");
|
||||
match i {
|
||||
Some(t) => {
|
||||
let last = s.last_timestamp.load(Ordering::Relaxed);
|
||||
if last <= t {
|
||||
info!("iam load reciver load");
|
||||
info!("iam load receiver load");
|
||||
if let Err(err) =s.clone().load().await{
|
||||
error!("iam load err {:?}", err);
|
||||
}
|
||||
@@ -814,7 +814,7 @@ where
|
||||
let mp = MappedPolicy::new(policy);
|
||||
let (_, combined_policy_stmt) = filter_policies(&self.cache, &mp.policies, "temp");
|
||||
if combined_policy_stmt.is_empty() {
|
||||
return Err(Error::other(format!("need poliy not found {}", IamError::NoSuchPolicy)));
|
||||
return Err(Error::other(format!("Required policy not found: {}", IamError::NoSuchPolicy)));
|
||||
}
|
||||
|
||||
self.api
|
||||
@@ -987,7 +987,7 @@ where
|
||||
_ => auth::ACCOUNT_OFF,
|
||||
}
|
||||
};
|
||||
let user_entiry = UserIdentity::from(Credentials {
|
||||
let user_entry = UserIdentity::from(Credentials {
|
||||
access_key: access_key.to_string(),
|
||||
secret_key: args.secret_key.to_string(),
|
||||
status: status.to_owned(),
|
||||
@@ -995,10 +995,10 @@ where
|
||||
});
|
||||
|
||||
self.api
|
||||
.save_user_identity(access_key, UserType::Reg, user_entiry.clone(), None)
|
||||
.save_user_identity(access_key, UserType::Reg, user_entry.clone(), None)
|
||||
.await?;
|
||||
|
||||
self.update_user_with_claims(access_key, user_entiry)?;
|
||||
self.update_user_with_claims(access_key, user_entry)?;
|
||||
|
||||
Ok(OffsetDateTime::now_utc())
|
||||
}
|
||||
@@ -1104,7 +1104,7 @@ where
|
||||
}
|
||||
};
|
||||
|
||||
let user_entiry = UserIdentity::from(Credentials {
|
||||
let user_entry = UserIdentity::from(Credentials {
|
||||
access_key: access_key.to_string(),
|
||||
secret_key: u.credentials.secret_key.clone(),
|
||||
status: status.to_owned(),
|
||||
@@ -1112,10 +1112,10 @@ where
|
||||
});
|
||||
|
||||
self.api
|
||||
.save_user_identity(access_key, UserType::Reg, user_entiry.clone(), None)
|
||||
.save_user_identity(access_key, UserType::Reg, user_entry.clone(), None)
|
||||
.await?;
|
||||
|
||||
self.update_user_with_claims(access_key, user_entiry)?;
|
||||
self.update_user_with_claims(access_key, user_entry)?;
|
||||
|
||||
Ok(OffsetDateTime::now_utc())
|
||||
}
|
||||
|
||||
@@ -62,8 +62,12 @@ pub trait Store: Clone + Send + Sync + 'static {
|
||||
is_group: bool,
|
||||
m: &mut HashMap<String, MappedPolicy>,
|
||||
) -> Result<()>;
|
||||
async fn load_mapped_policys(&self, user_type: UserType, is_group: bool, m: &mut HashMap<String, MappedPolicy>)
|
||||
-> Result<()>;
|
||||
async fn load_mapped_policies(
|
||||
&self,
|
||||
user_type: UserType,
|
||||
is_group: bool,
|
||||
m: &mut HashMap<String, MappedPolicy>,
|
||||
) -> Result<()>;
|
||||
|
||||
async fn load_all(&self, cache: &Cache) -> Result<()>;
|
||||
}
|
||||
|
||||
@@ -656,7 +656,7 @@ impl Store for ObjectStore {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
async fn load_mapped_policys(
|
||||
async fn load_mapped_policies(
|
||||
&self,
|
||||
user_type: UserType,
|
||||
is_group: bool,
|
||||
|
||||
@@ -124,13 +124,13 @@ impl<T: Store> IamSys<T> {
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn load_mapped_policys(
|
||||
pub async fn load_mapped_policies(
|
||||
&self,
|
||||
user_type: UserType,
|
||||
is_group: bool,
|
||||
m: &mut HashMap<String, MappedPolicy>,
|
||||
) -> Result<()> {
|
||||
self.store.api.load_mapped_policys(user_type, is_group, m).await
|
||||
self.store.api.load_mapped_policies(user_type, is_group, m).await
|
||||
}
|
||||
|
||||
pub async fn list_polices(&self, bucket_name: &str) -> Result<HashMap<String, Policy>> {
|
||||
|
||||
@@ -22,7 +22,7 @@ pub struct LRWMutex {
|
||||
id: RwLock<String>,
|
||||
source: RwLock<String>,
|
||||
is_write: RwLock<bool>,
|
||||
refrence: RwLock<usize>,
|
||||
reference: RwLock<usize>,
|
||||
}
|
||||
|
||||
impl LRWMutex {
|
||||
@@ -66,13 +66,13 @@ impl LRWMutex {
|
||||
|
||||
let mut locked = false;
|
||||
if is_write {
|
||||
if *self.refrence.read().await == 0 && !*self.is_write.read().await {
|
||||
*self.refrence.write().await = 1;
|
||||
if *self.reference.read().await == 0 && !*self.is_write.read().await {
|
||||
*self.reference.write().await = 1;
|
||||
*self.is_write.write().await = true;
|
||||
locked = true;
|
||||
}
|
||||
} else if !*self.is_write.read().await {
|
||||
*self.refrence.write().await += 1;
|
||||
*self.reference.write().await += 1;
|
||||
locked = true;
|
||||
}
|
||||
|
||||
@@ -115,13 +115,13 @@ impl LRWMutex {
|
||||
async fn unlock(&self, is_write: bool) -> bool {
|
||||
let mut unlocked = false;
|
||||
if is_write {
|
||||
if *self.is_write.read().await && *self.refrence.read().await == 1 {
|
||||
*self.refrence.write().await = 0;
|
||||
if *self.is_write.read().await && *self.reference.read().await == 1 {
|
||||
*self.reference.write().await = 0;
|
||||
*self.is_write.write().await = false;
|
||||
unlocked = true;
|
||||
}
|
||||
} else if !*self.is_write.read().await && *self.refrence.read().await > 0 {
|
||||
*self.refrence.write().await -= 1;
|
||||
} else if !*self.is_write.read().await && *self.reference.read().await > 0 {
|
||||
*self.reference.write().await -= 1;
|
||||
unlocked = true;
|
||||
}
|
||||
|
||||
@@ -129,7 +129,7 @@ impl LRWMutex {
|
||||
}
|
||||
|
||||
pub async fn force_un_lock(&self) {
|
||||
*self.refrence.write().await = 0;
|
||||
*self.reference.write().await = 0;
|
||||
*self.is_write.write().await = false;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,7 +14,8 @@
|
||||
|
||||
use rustfs_config::{
|
||||
APP_NAME, DEFAULT_LOG_DIR, DEFAULT_LOG_FILENAME, DEFAULT_LOG_KEEP_FILES, DEFAULT_LOG_LEVEL, DEFAULT_LOG_ROTATION_SIZE_MB,
|
||||
DEFAULT_LOG_ROTATION_TIME, ENVIRONMENT, METER_INTERVAL, SAMPLE_RATIO, SERVICE_VERSION, USE_STDOUT,
|
||||
DEFAULT_LOG_ROTATION_TIME, DEFAULT_OBS_LOG_FILENAME, DEFAULT_SINK_FILE_LOG_FILE, ENVIRONMENT, METER_INTERVAL, SAMPLE_RATIO,
|
||||
SERVICE_VERSION, USE_STDOUT,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::env;
|
||||
@@ -104,7 +105,7 @@ impl OtelConfig {
|
||||
log_filename: env::var("RUSTFS_OBS_LOG_FILENAME")
|
||||
.ok()
|
||||
.and_then(|v| v.parse().ok())
|
||||
.or(Some(DEFAULT_LOG_FILENAME.to_string())),
|
||||
.or(Some(DEFAULT_OBS_LOG_FILENAME.to_string())),
|
||||
log_rotation_size_mb: env::var("RUSTFS_OBS_LOG_ROTATION_SIZE_MB")
|
||||
.ok()
|
||||
.and_then(|v| v.parse().ok())
|
||||
@@ -210,16 +211,15 @@ pub struct FileSinkConfig {
|
||||
|
||||
impl FileSinkConfig {
|
||||
pub fn get_default_log_path() -> String {
|
||||
let temp_dir = env::temp_dir().join("rustfs");
|
||||
|
||||
let temp_dir = env::temp_dir().join(DEFAULT_LOG_FILENAME);
|
||||
if let Err(e) = std::fs::create_dir_all(&temp_dir) {
|
||||
eprintln!("Failed to create log directory: {e}");
|
||||
return "rustfs/rustfs.log".to_string();
|
||||
return DEFAULT_LOG_DIR.to_string();
|
||||
}
|
||||
temp_dir
|
||||
.join("rustfs.log")
|
||||
.join(DEFAULT_SINK_FILE_LOG_FILE)
|
||||
.to_str()
|
||||
.unwrap_or("rustfs/rustfs.log")
|
||||
.unwrap_or(DEFAULT_LOG_DIR)
|
||||
.to_string()
|
||||
}
|
||||
pub fn new() -> Self {
|
||||
|
||||
@@ -77,8 +77,7 @@ impl Logger {
|
||||
}
|
||||
|
||||
/// Asynchronous logging of unified log entries
|
||||
#[tracing::instrument(skip(self), fields(log_source = "logger"))]
|
||||
#[tracing::instrument(level = "error", skip_all)]
|
||||
#[tracing::instrument(skip_all, fields(log_source = "logger"))]
|
||||
pub async fn log_entry(&self, entry: UnifiedLogEntry) -> Result<(), GlobalError> {
|
||||
// Extract information for tracing based on entry type
|
||||
match &entry {
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
use crate::{AppConfig, SinkConfig, UnifiedLogEntry};
|
||||
use async_trait::async_trait;
|
||||
use rustfs_config::DEFAULT_SINK_FILE_LOG_FILE;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[cfg(feature = "file")]
|
||||
@@ -71,7 +72,7 @@ pub async fn create_sinks(config: &AppConfig) -> Vec<Arc<dyn Sink>> {
|
||||
SinkConfig::File(file_config) => {
|
||||
tracing::debug!("FileSink: Using path: {}", file_config.path);
|
||||
match file::FileSink::new(
|
||||
file_config.path.clone(),
|
||||
format!("{}/{}", file_config.path.clone(), DEFAULT_SINK_FILE_LOG_FILE),
|
||||
file_config.buffer_size.unwrap_or(8192),
|
||||
file_config.flush_interval_ms.unwrap_or(1000),
|
||||
file_config.flush_threshold.unwrap_or(100),
|
||||
|
||||
@@ -36,6 +36,7 @@ use rustfs_config::{
|
||||
use rustfs_utils::get_local_ip_with_default;
|
||||
use smallvec::SmallVec;
|
||||
use std::borrow::Cow;
|
||||
use std::fs;
|
||||
use std::io::IsTerminal;
|
||||
use tracing::info;
|
||||
use tracing_error::ErrorLayer;
|
||||
@@ -295,6 +296,21 @@ pub(crate) fn init_telemetry(config: &OtelConfig) -> OtelGuard {
|
||||
let log_directory = config.log_directory.as_deref().unwrap_or(DEFAULT_LOG_DIR);
|
||||
let log_filename = config.log_filename.as_deref().unwrap_or(service_name);
|
||||
|
||||
if let Err(e) = fs::create_dir_all(log_directory) {
|
||||
eprintln!("Failed to create log directory {log_directory}: {e}");
|
||||
}
|
||||
#[cfg(unix)]
|
||||
{
|
||||
// Linux/macOS Setting Permissions
|
||||
// Set the log directory permissions to 755 (rwxr-xr-x)
|
||||
use std::fs::Permissions;
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
match fs::set_permissions(log_directory, Permissions::from_mode(0o755)) {
|
||||
Ok(_) => eprintln!("Log directory permissions set to 755: {log_directory}"),
|
||||
Err(e) => eprintln!("Failed to set log directory permissions {log_directory}: {e}"),
|
||||
}
|
||||
}
|
||||
|
||||
// Build log cutting conditions
|
||||
let rotation_criterion = match (config.log_rotation_time.as_deref(), config.log_rotation_size_mb) {
|
||||
// Cut by time and size at the same time
|
||||
@@ -354,13 +370,15 @@ pub(crate) fn init_telemetry(config: &OtelConfig) -> OtelGuard {
|
||||
FileSpec::default()
|
||||
.directory(log_directory)
|
||||
.basename(log_filename)
|
||||
.suffix("log"),
|
||||
.suppress_timestamp(),
|
||||
)
|
||||
.rotate(rotation_criterion, Naming::TimestampsDirect, Cleanup::KeepLogFiles(keep_files.into()))
|
||||
.format_for_files(format_for_file) // Add a custom formatting function for file output
|
||||
.duplicate_to_stdout(level_filter) // Use dynamic levels
|
||||
.format_for_stdout(format_with_color) // Add a custom formatting function for terminal output
|
||||
.write_mode(WriteMode::Async)
|
||||
.write_mode(WriteMode::BufferAndFlush)
|
||||
.append() // Avoid clearing existing logs at startup
|
||||
.print_message() // Startup information output to console
|
||||
.start();
|
||||
|
||||
if let Ok(logger) = flexi_logger_result {
|
||||
@@ -420,7 +438,7 @@ fn format_with_color(w: &mut dyn std::io::Write, now: &mut DeferredNow, record:
|
||||
writeln!(
|
||||
w,
|
||||
"[{}] {} [{}] [{}:{}] [{}:{}] {}",
|
||||
now.now().format("%Y-%m-%d %H:%M:%S%.6f"),
|
||||
now.now().format(flexi_logger::TS_DASHES_BLANK_COLONS_DOT_BLANK),
|
||||
level_style.paint(level.to_string()),
|
||||
Color::Magenta.paint(record.target()),
|
||||
Color::Blue.paint(record.file().unwrap_or("unknown")),
|
||||
@@ -444,7 +462,7 @@ fn format_for_file(w: &mut dyn std::io::Write, now: &mut DeferredNow, record: &R
|
||||
writeln!(
|
||||
w,
|
||||
"[{}] {} [{}] [{}:{}] [{}:{}] {}",
|
||||
now.now().format("%Y-%m-%d %H:%M:%S%.6f"),
|
||||
now.now().format(flexi_logger::TS_DASHES_BLANK_COLONS_DOT_BLANK),
|
||||
level,
|
||||
record.target(),
|
||||
record.file().unwrap_or("unknown"),
|
||||
|
||||
@@ -107,7 +107,7 @@ mod tests {
|
||||
#[test_case("jwt:dwebsite/aaa")]
|
||||
#[test_case("sfvc:DuratdionSeconds")]
|
||||
#[test_case("svc:DursationSeconds/aaa")]
|
||||
fn test_deserialize_falied(key: &str) {
|
||||
fn test_deserialize_failed(key: &str) {
|
||||
let val = serde_json::from_str::<Key>(key);
|
||||
assert!(val.is_err());
|
||||
}
|
||||
|
||||
@@ -30,9 +30,9 @@ use super::{
|
||||
pub struct ResourceSet(pub HashSet<Resource>);
|
||||
|
||||
impl ResourceSet {
|
||||
pub fn is_match(&self, resource: &str, conditons: &HashMap<String, Vec<String>>) -> bool {
|
||||
pub fn is_match(&self, resource: &str, conditions: &HashMap<String, Vec<String>>) -> bool {
|
||||
for re in self.0.iter() {
|
||||
if re.is_match(resource, conditons) {
|
||||
if re.is_match(resource, conditions) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -85,14 +85,14 @@ pub enum Resource {
|
||||
impl Resource {
|
||||
pub const S3_PREFIX: &'static str = "arn:aws:s3:::";
|
||||
|
||||
pub fn is_match(&self, resource: &str, conditons: &HashMap<String, Vec<String>>) -> bool {
|
||||
pub fn is_match(&self, resource: &str, conditions: &HashMap<String, Vec<String>>) -> bool {
|
||||
let mut pattern = match self {
|
||||
Resource::S3(s) => s.to_owned(),
|
||||
Resource::Kms(s) => s.to_owned(),
|
||||
};
|
||||
if !conditons.is_empty() {
|
||||
if !conditions.is_empty() {
|
||||
for key in KeyName::COMMON_KEYS {
|
||||
if let Some(rvalue) = conditons.get(key.name()) {
|
||||
if let Some(rvalue) = conditions.get(key.name()) {
|
||||
if matches!(rvalue.first().map(|c| !c.is_empty()), Some(true)) {
|
||||
pattern = pattern.replace(&key.var_name(), &rvalue[0]);
|
||||
}
|
||||
|
||||
@@ -39,14 +39,21 @@ pub async fn node_service_time_out_client(
|
||||
Box<dyn Error>,
|
||||
> {
|
||||
let token: MetadataValue<_> = "rustfs rpc".parse()?;
|
||||
let channel = match GLOBAL_Conn_Map.read().await.get(addr) {
|
||||
Some(channel) => channel.clone(),
|
||||
|
||||
let channel = { GLOBAL_Conn_Map.read().await.get(addr).cloned() };
|
||||
|
||||
let channel = match channel {
|
||||
Some(channel) => channel,
|
||||
None => {
|
||||
let connector = Endpoint::from_shared(addr.to_string())?.connect_timeout(Duration::from_secs(60));
|
||||
connector.connect().await?
|
||||
let channel = connector.connect().await?;
|
||||
|
||||
{
|
||||
GLOBAL_Conn_Map.write().await.insert(addr.to_string(), channel.clone());
|
||||
}
|
||||
channel
|
||||
}
|
||||
};
|
||||
GLOBAL_Conn_Map.write().await.insert(addr.to_string(), channel.clone());
|
||||
|
||||
// let timeout_channel = Timeout::new(channel, Duration::from_secs(60));
|
||||
Ok(NodeServiceClient::with_interceptor(
|
||||
|
||||
104
entrypoint.sh
Normal file
104
entrypoint.sh
Normal file
@@ -0,0 +1,104 @@
|
||||
#!/bin/bash
|
||||
set -e
|
||||
|
||||
APP_USER=rustfs
|
||||
APP_GROUP=rustfs
|
||||
APP_UID=${PUID:-1000}
|
||||
APP_GID=${PGID:-1000}
|
||||
|
||||
# Parse RUSTFS_VOLUMES into array (support space, comma, tab as separator)
|
||||
VOLUME_RAW="${RUSTFS_VOLUMES:-/data}"
|
||||
# Replace comma and tab with space, then split
|
||||
VOLUME_RAW=$(echo "$VOLUME_RAW" | tr ',\t' ' ')
|
||||
read -ra ALL_VOLUMES <<< "$VOLUME_RAW"
|
||||
|
||||
# Only keep local volumes (start with /, not http/https)
|
||||
LOCAL_VOLUMES=()
|
||||
for vol in "${ALL_VOLUMES[@]}"; do
|
||||
if [[ "$vol" =~ ^/ ]] && [[ ! "$vol" =~ ^https?:// ]]; then
|
||||
# Not a URL (http/https), just a local path
|
||||
LOCAL_VOLUMES+=("$vol")
|
||||
fi
|
||||
# If it's a URL (http/https), skip
|
||||
# If it's an empty string, skip
|
||||
# If it's a local path, keep
|
||||
# (We don't support other protocols here)
|
||||
done
|
||||
|
||||
# Always ensure /logs is included for permission fix
|
||||
include_logs=1
|
||||
for vol in "${LOCAL_VOLUMES[@]}"; do
|
||||
if [ "$vol" = "/logs" ]; then
|
||||
include_logs=0
|
||||
break
|
||||
fi
|
||||
done
|
||||
if [ $include_logs -eq 1 ]; then
|
||||
LOCAL_VOLUMES+=("/logs")
|
||||
fi
|
||||
|
||||
# Try to update rustfs UID/GID if needed (requires root and shadow tools)
|
||||
update_user_group_ids() {
|
||||
local uid="$1"
|
||||
local gid="$2"
|
||||
local user="$3"
|
||||
local group="$4"
|
||||
local updated=0
|
||||
if [ "$(id -u "$user")" != "$uid" ]; then
|
||||
if command -v usermod >/dev/null 2>&1; then
|
||||
echo "🔧 Updating UID of $user to $uid"
|
||||
usermod -u "$uid" "$user"
|
||||
updated=1
|
||||
fi
|
||||
fi
|
||||
if [ "$(id -g "$group")" != "$gid" ]; then
|
||||
if command -v groupmod >/dev/null 2>&1; then
|
||||
echo "🔧 Updating GID of $group to $gid"
|
||||
groupmod -g "$gid" "$group"
|
||||
updated=1
|
||||
fi
|
||||
fi
|
||||
return $updated
|
||||
}
|
||||
|
||||
echo "📦 Initializing mount directories: ${LOCAL_VOLUMES[*]}"
|
||||
|
||||
for vol in "${LOCAL_VOLUMES[@]}"; do
|
||||
if [ ! -d "$vol" ]; then
|
||||
echo "📁 Creating directory: $vol"
|
||||
mkdir -p "$vol"
|
||||
fi
|
||||
|
||||
# Alpine busybox stat does not support -c, coreutils is required
|
||||
dir_uid=$(stat -c '%u' "$vol")
|
||||
dir_gid=$(stat -c '%g' "$vol")
|
||||
|
||||
if [ "$dir_uid" != "$APP_UID" ] || [ "$dir_gid" != "$APP_GID" ]; then
|
||||
if [[ "$SKIP_CHOWN" != "true" ]]; then
|
||||
# Prefer to update rustfs user/group UID/GID
|
||||
update_user_group_ids "$dir_uid" "$dir_gid" "$APP_USER" "$APP_GROUP" || \
|
||||
{
|
||||
echo "🔧 Fixing ownership for: $vol → $APP_USER:$APP_GROUP"
|
||||
if [[ -n "$CHOWN_RECURSION_DEPTH" ]]; then
|
||||
echo "🔧 Applying ownership fix with recursion depth: $CHOWN_RECURSION_DEPTH"
|
||||
find "$vol" -mindepth 0 -maxdepth "$CHOWN_RECURSION_DEPTH" -exec chown "$APP_USER:$APP_GROUP" {} \;
|
||||
else
|
||||
echo "🔧 Applying ownership fix recursively (full depth)"
|
||||
chown -R "$APP_USER:$APP_GROUP" "$vol"
|
||||
fi
|
||||
}
|
||||
else
|
||||
echo "⚠️ SKIP_CHOWN is enabled. Skipping ownership fix for: $vol"
|
||||
fi
|
||||
fi
|
||||
chmod 700 "$vol"
|
||||
done
|
||||
|
||||
# Warn if default credentials are used
|
||||
if [[ "$RUSTFS_ACCESS_KEY" == "rustfsadmin" || "$RUSTFS_SECRET_KEY" == "rustfsadmin" ]]; then
|
||||
echo "⚠️ WARNING: Using default RUSTFS_ACCESS_KEY or RUSTFS_SECRET_KEY"
|
||||
echo "⚠️ It is strongly recommended to override these values in production!"
|
||||
fi
|
||||
|
||||
echo "🚀 Starting application: $*"
|
||||
exec gosu "$APP_USER" "$@"
|
||||
@@ -74,7 +74,7 @@ use tracing::{error, info, warn};
|
||||
pub mod bucket_meta;
|
||||
pub mod event;
|
||||
pub mod group;
|
||||
pub mod policys;
|
||||
pub mod policies;
|
||||
pub mod pools;
|
||||
pub mod rebalance;
|
||||
pub mod service_account;
|
||||
@@ -798,9 +798,9 @@ pub struct GetReplicationMetricsHandler {}
|
||||
impl Operation for GetReplicationMetricsHandler {
|
||||
async fn call(&self, _req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
|
||||
error!("GetReplicationMetricsHandler");
|
||||
let querys = extract_query_params(&_req.uri);
|
||||
if let Some(bucket) = querys.get("bucket") {
|
||||
error!("get bucket:{} metris", bucket);
|
||||
let queries = extract_query_params(&_req.uri);
|
||||
if let Some(bucket) = queries.get("bucket") {
|
||||
error!("get bucket:{} metrics", bucket);
|
||||
}
|
||||
//return Err(s3_error!(InvalidArgument, "Invalid bucket name"));
|
||||
//Ok(S3Response::with_headers((StatusCode::OK, Body::from()), header))
|
||||
@@ -815,7 +815,7 @@ impl Operation for SetRemoteTargetHandler {
|
||||
//return Ok(S3Response::new((StatusCode::OK, Body::from("OK".to_string()))));
|
||||
// println!("handle MetricsHandler, params: {:?}", _req.input);
|
||||
info!("SetRemoteTargetHandler params: {:?}", _req.credentials);
|
||||
let querys = extract_query_params(&_req.uri);
|
||||
let queries = extract_query_params(&_req.uri);
|
||||
let Some(_cred) = _req.credentials else {
|
||||
error!("credentials null");
|
||||
return Err(s3_error!(InvalidRequest, "get cred failed"));
|
||||
@@ -825,7 +825,7 @@ impl Operation for SetRemoteTargetHandler {
|
||||
//println!("body: {}", std::str::from_utf8(&body.clone()).unwrap());
|
||||
|
||||
//println!("bucket is:{}", bucket.clone());
|
||||
if let Some(bucket) = querys.get("bucket") {
|
||||
if let Some(bucket) = queries.get("bucket") {
|
||||
if bucket.is_empty() {
|
||||
info!("have bucket: {}", bucket);
|
||||
return Ok(S3Response::new((StatusCode::OK, Body::from("fuck".to_string()))));
|
||||
@@ -842,7 +842,7 @@ impl Operation for SetRemoteTargetHandler {
|
||||
{
|
||||
Ok(info) => {
|
||||
info!("Bucket Info: {:?}", info);
|
||||
if !info.versionning {
|
||||
if !info.versioning {
|
||||
return Ok(S3Response::new((StatusCode::FORBIDDEN, Body::from("bucket need versioned".to_string()))));
|
||||
}
|
||||
}
|
||||
@@ -932,13 +932,13 @@ impl Operation for ListRemoteTargetHandler {
|
||||
async fn call(&self, _req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
|
||||
warn!("list GetRemoteTargetHandler, params: {:?}", _req.credentials);
|
||||
|
||||
let querys = extract_query_params(&_req.uri);
|
||||
let queries = extract_query_params(&_req.uri);
|
||||
let Some(_cred) = _req.credentials else {
|
||||
error!("credentials null");
|
||||
return Err(s3_error!(InvalidRequest, "get cred failed"));
|
||||
};
|
||||
|
||||
if let Some(bucket) = querys.get("bucket") {
|
||||
if let Some(bucket) = queries.get("bucket") {
|
||||
if bucket.is_empty() {
|
||||
error!("bucket parameter is empty");
|
||||
return Ok(S3Response::new((
|
||||
@@ -957,7 +957,7 @@ impl Operation for ListRemoteTargetHandler {
|
||||
{
|
||||
Ok(info) => {
|
||||
info!("Bucket Info: {:?}", info);
|
||||
if !info.versionning {
|
||||
if !info.versioning {
|
||||
return Ok(S3Response::new((
|
||||
StatusCode::FORBIDDEN,
|
||||
Body::from("Bucket needs versioning".to_string()),
|
||||
@@ -1009,8 +1009,8 @@ pub struct RemoveRemoteTargetHandler {}
|
||||
impl Operation for RemoveRemoteTargetHandler {
|
||||
async fn call(&self, _req: S3Request<Body>, _params: Params<'_, '_>) -> S3Result<S3Response<(StatusCode, Body)>> {
|
||||
debug!("remove remote target called");
|
||||
let querys = extract_query_params(&_req.uri);
|
||||
let Some(bucket) = querys.get("bucket") else {
|
||||
let queries = extract_query_params(&_req.uri);
|
||||
let Some(bucket) = queries.get("bucket") else {
|
||||
return Ok(S3Response::new((
|
||||
StatusCode::BAD_REQUEST,
|
||||
Body::from("Bucket parameter is required".to_string()),
|
||||
@@ -1019,7 +1019,7 @@ impl Operation for RemoveRemoteTargetHandler {
|
||||
|
||||
let mut need_delete = true;
|
||||
|
||||
if let Some(arnstr) = querys.get("arn") {
|
||||
if let Some(arnstr) = queries.get("arn") {
|
||||
let _arn = bucket_targets::ARN::parse(arnstr);
|
||||
|
||||
match get_replication_config(bucket).await {
|
||||
|
||||
@@ -135,7 +135,7 @@ impl Operation for AddServiceAccount {
|
||||
|
||||
let is_svc_acc = target_user == req_user || target_user == req_parent_user;
|
||||
|
||||
let mut taget_groups = None;
|
||||
let mut target_groups = None;
|
||||
let mut opts = NewServiceAccountOpts {
|
||||
access_key: create_req.access_key,
|
||||
secret_key: create_req.secret_key,
|
||||
@@ -154,7 +154,7 @@ impl Operation for AddServiceAccount {
|
||||
target_user = req_parent_user;
|
||||
}
|
||||
|
||||
taget_groups = req_groups;
|
||||
target_groups = req_groups;
|
||||
|
||||
if let Some(claims) = cred.claims {
|
||||
if opts.claims.is_none() {
|
||||
@@ -172,7 +172,7 @@ impl Operation for AddServiceAccount {
|
||||
}
|
||||
|
||||
let (new_cred, _) = iam_store
|
||||
.new_service_account(&target_user, taget_groups, opts)
|
||||
.new_service_account(&target_user, target_groups, opts)
|
||||
.await
|
||||
.map_err(|e| {
|
||||
debug!("create service account failed, e: {:?}", e);
|
||||
|
||||
@@ -545,7 +545,7 @@ impl Operation for ExportIam {
|
||||
USER_POLICY_MAPPINGS_FILE => {
|
||||
let mut user_policy_mappings: HashMap<String, MappedPolicy> = HashMap::new();
|
||||
iam_store
|
||||
.load_mapped_policys(UserType::Reg, false, &mut user_policy_mappings)
|
||||
.load_mapped_policies(UserType::Reg, false, &mut user_policy_mappings)
|
||||
.await
|
||||
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
|
||||
|
||||
@@ -561,7 +561,7 @@ impl Operation for ExportIam {
|
||||
GROUP_POLICY_MAPPINGS_FILE => {
|
||||
let mut group_policy_mappings = HashMap::new();
|
||||
iam_store
|
||||
.load_mapped_policys(UserType::Reg, true, &mut group_policy_mappings)
|
||||
.load_mapped_policies(UserType::Reg, true, &mut group_policy_mappings)
|
||||
.await
|
||||
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
|
||||
|
||||
@@ -577,7 +577,7 @@ impl Operation for ExportIam {
|
||||
STS_USER_POLICY_MAPPINGS_FILE => {
|
||||
let mut sts_user_policy_mappings: HashMap<String, MappedPolicy> = HashMap::new();
|
||||
iam_store
|
||||
.load_mapped_policys(UserType::Sts, false, &mut sts_user_policy_mappings)
|
||||
.load_mapped_policies(UserType::Sts, false, &mut sts_user_policy_mappings)
|
||||
.await
|
||||
.map_err(|e| S3Error::with_message(S3ErrorCode::InternalError, e.to_string()))?;
|
||||
let json_str = serde_json::to_vec(&sts_user_policy_mappings)
|
||||
|
||||
@@ -20,7 +20,7 @@ pub mod utils;
|
||||
|
||||
// use ecstore::global::{is_dist_erasure, is_erasure};
|
||||
use handlers::{
|
||||
bucket_meta, group, policys, pools, rebalance,
|
||||
bucket_meta, group, policies, pools, rebalance,
|
||||
service_account::{AddServiceAccount, DeleteServiceAccount, InfoServiceAccount, ListServiceAccount, UpdateServiceAccount},
|
||||
sts, tier, user,
|
||||
};
|
||||
@@ -333,35 +333,35 @@ fn register_user_route(r: &mut S3Router<AdminOperation>) -> std::io::Result<()>
|
||||
r.insert(
|
||||
Method::GET,
|
||||
format!("{}{}", ADMIN_PREFIX, "/v3/list-canned-policies").as_str(),
|
||||
AdminOperation(&policys::ListCannedPolicies {}),
|
||||
AdminOperation(&policies::ListCannedPolicies {}),
|
||||
)?;
|
||||
|
||||
// info-canned-policy?name=xxx
|
||||
r.insert(
|
||||
Method::GET,
|
||||
format!("{}{}", ADMIN_PREFIX, "/v3/info-canned-policy").as_str(),
|
||||
AdminOperation(&policys::InfoCannedPolicy {}),
|
||||
AdminOperation(&policies::InfoCannedPolicy {}),
|
||||
)?;
|
||||
|
||||
// add-canned-policy?name=xxx
|
||||
r.insert(
|
||||
Method::PUT,
|
||||
format!("{}{}", ADMIN_PREFIX, "/v3/add-canned-policy").as_str(),
|
||||
AdminOperation(&policys::AddCannedPolicy {}),
|
||||
AdminOperation(&policies::AddCannedPolicy {}),
|
||||
)?;
|
||||
|
||||
// remove-canned-policy?name=xxx
|
||||
r.insert(
|
||||
Method::DELETE,
|
||||
format!("{}{}", ADMIN_PREFIX, "/v3/remove-canned-policy").as_str(),
|
||||
AdminOperation(&policys::RemoveCannedPolicy {}),
|
||||
AdminOperation(&policies::RemoveCannedPolicy {}),
|
||||
)?;
|
||||
|
||||
// set-user-or-group-policy?policyName=xxx&userOrGroup=xxx&isGroup=xxx
|
||||
r.insert(
|
||||
Method::PUT,
|
||||
format!("{}{}", ADMIN_PREFIX, "/v3/set-user-or-group-policy").as_str(),
|
||||
AdminOperation(&policys::SetPolicyForUserOrGroup {}),
|
||||
AdminOperation(&policies::SetPolicyForUserOrGroup {}),
|
||||
)?;
|
||||
|
||||
Ok(())
|
||||
|
||||
@@ -740,7 +740,7 @@ impl S3 for FS {
|
||||
|
||||
if let Some(part_num) = part_number {
|
||||
if part_num == 0 {
|
||||
return Err(s3_error!(InvalidArgument, "part_numer invalid"));
|
||||
return Err(s3_error!(InvalidArgument, "Invalid part number: part number must be greater than 0"));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -792,11 +792,6 @@ impl S3 for FS {
|
||||
};
|
||||
let last_modified = info.mod_time.map(Timestamp::from);
|
||||
|
||||
let body = Some(StreamingBlob::wrap(bytes_stream(
|
||||
ReaderStream::with_capacity(reader.stream, DEFAULT_READ_BUFFER_SIZE),
|
||||
info.size as usize,
|
||||
)));
|
||||
|
||||
let mut rs = rs;
|
||||
|
||||
if let Some(part_number) = part_number {
|
||||
@@ -805,17 +800,25 @@ impl S3 for FS {
|
||||
}
|
||||
}
|
||||
|
||||
let mut content_length = info.size as i64;
|
||||
|
||||
let content_range = if let Some(rs) = rs {
|
||||
let total_size = info.get_actual_size().map_err(ApiError::from)?;
|
||||
let (start, length) = rs.get_offset_length(total_size as i64).map_err(ApiError::from)?;
|
||||
content_length = length;
|
||||
Some(format!("bytes {}-{}/{}", start, start as i64 + length - 1, total_size))
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
let body = Some(StreamingBlob::wrap(bytes_stream(
|
||||
ReaderStream::with_capacity(reader.stream, DEFAULT_READ_BUFFER_SIZE),
|
||||
content_length as usize,
|
||||
)));
|
||||
|
||||
let output = GetObjectOutput {
|
||||
body,
|
||||
content_length: Some(info.size as i64),
|
||||
content_length: Some(content_length),
|
||||
last_modified,
|
||||
content_type,
|
||||
accept_ranges: Some("bytes".to_string()),
|
||||
@@ -879,7 +882,7 @@ impl S3 for FS {
|
||||
|
||||
if let Some(part_num) = part_number {
|
||||
if part_num == 0 {
|
||||
return Err(s3_error!(InvalidArgument, "part_numer invalid"));
|
||||
return Err(s3_error!(InvalidArgument, "part_number invalid"));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1938,7 +1941,7 @@ impl S3 for FS {
|
||||
|
||||
let conditions = get_condition_values(&req.headers, &auth::Credentials::default());
|
||||
|
||||
let read_olny = PolicySys::is_allowed(&BucketPolicyArgs {
|
||||
let read_only = PolicySys::is_allowed(&BucketPolicyArgs {
|
||||
bucket: &bucket,
|
||||
action: Action::S3Action(S3Action::ListBucketAction),
|
||||
is_owner: false,
|
||||
@@ -1949,7 +1952,7 @@ impl S3 for FS {
|
||||
})
|
||||
.await;
|
||||
|
||||
let write_olny = PolicySys::is_allowed(&BucketPolicyArgs {
|
||||
let write_only = PolicySys::is_allowed(&BucketPolicyArgs {
|
||||
bucket: &bucket,
|
||||
action: Action::S3Action(S3Action::PutObjectAction),
|
||||
is_owner: false,
|
||||
@@ -1960,7 +1963,7 @@ impl S3 for FS {
|
||||
})
|
||||
.await;
|
||||
|
||||
let is_public = read_olny && write_olny;
|
||||
let is_public = read_only && write_only;
|
||||
|
||||
let output = GetBucketPolicyStatusOutput {
|
||||
policy_status: Some(PolicyStatus {
|
||||
@@ -1993,9 +1996,9 @@ impl S3 for FS {
|
||||
}
|
||||
};
|
||||
|
||||
let policys = try_!(serde_json::to_string(&cfg));
|
||||
let policies = try_!(serde_json::to_string(&cfg));
|
||||
|
||||
Ok(S3Response::new(GetBucketPolicyOutput { policy: Some(policys) }))
|
||||
Ok(S3Response::new(GetBucketPolicyOutput { policy: Some(policies) }))
|
||||
}
|
||||
|
||||
async fn put_bucket_policy(&self, req: S3Request<PutBucketPolicyInput>) -> S3Result<S3Response<PutBucketPolicyOutput>> {
|
||||
@@ -2689,7 +2692,7 @@ impl S3 for FS {
|
||||
for batch in results {
|
||||
csv_writer
|
||||
.write(&batch)
|
||||
.map_err(|e| s3_error!(InternalError, "cann't encode output to csv. e: {}", e.to_string()))?;
|
||||
.map_err(|e| s3_error!(InternalError, "can't encode output to csv. e: {}", e.to_string()))?;
|
||||
}
|
||||
} else if input.request.output_serialization.json.is_some() {
|
||||
let mut json_writer = JsonWriterBuilder::new()
|
||||
@@ -2698,13 +2701,16 @@ impl S3 for FS {
|
||||
for batch in results {
|
||||
json_writer
|
||||
.write(&batch)
|
||||
.map_err(|e| s3_error!(InternalError, "cann't encode output to json. e: {}", e.to_string()))?;
|
||||
.map_err(|e| s3_error!(InternalError, "can't encode output to json. e: {}", e.to_string()))?;
|
||||
}
|
||||
json_writer
|
||||
.finish()
|
||||
.map_err(|e| s3_error!(InternalError, "writer output into json error, e: {}", e.to_string()))?;
|
||||
} else {
|
||||
return Err(s3_error!(InvalidArgument, "unknow output format"));
|
||||
return Err(s3_error!(
|
||||
InvalidArgument,
|
||||
"Unsupported output format. Supported formats are CSV and JSON"
|
||||
));
|
||||
}
|
||||
|
||||
let (tx, rx) = mpsc::channel::<S3Result<SelectObjectContentEvent>>(2);
|
||||
|
||||
@@ -51,7 +51,7 @@ export RUSTFS_CONSOLE_ADDRESS=":9001"
|
||||
# export RUSTFS_TLS_PATH="./deploy/certs"
|
||||
|
||||
# 可观测性 相关配置信息
|
||||
export RUSTFS_OBS_ENDPOINT=http://localhost:4317 # OpenTelemetry Collector 的地址
|
||||
#export RUSTFS_OBS_ENDPOINT=http://localhost:4317 # OpenTelemetry Collector 的地址
|
||||
#export RUSTFS_OBS_USE_STDOUT=false # 是否使用标准输出
|
||||
#export RUSTFS_OBS_SAMPLE_RATIO=2.0 # 采样率,0.0-1.0之间,0.0表示不采样,1.0表示全部采样
|
||||
#export RUSTFS_OBS_METER_INTERVAL=1 # 采样间隔,单位为秒
|
||||
@@ -64,8 +64,7 @@ export RUSTFS_OBS_LOG_DIRECTORY="$current_dir/deploy/logs" # Log directory
|
||||
export RUSTFS_OBS_LOG_ROTATION_TIME="minute" # Log rotation time unit, can be "second", "minute", "hour", "day"
|
||||
export RUSTFS_OBS_LOG_ROTATION_SIZE_MB=1 # Log rotation size in MB
|
||||
|
||||
#
|
||||
export RUSTFS_SINKS_FILE_PATH="$current_dir/deploy/logs/rustfs.log"
|
||||
export RUSTFS_SINKS_FILE_PATH="$current_dir/deploy/logs"
|
||||
export RUSTFS_SINKS_FILE_BUFFER_SIZE=12
|
||||
export RUSTFS_SINKS_FILE_FLUSH_INTERVAL_MS=1000
|
||||
export RUSTFS_SINKS_FILE_FLUSH_THRESHOLD=100
|
||||
|
||||
Reference in New Issue
Block a user