Compare commits
173 Commits
1.0.0-alph
...
1.0.0-alph
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
04bf4b0f98 | ||
|
|
7462be983a | ||
|
|
5264503e47 | ||
|
|
3b8cb0df41 | ||
|
|
9aebef31ff | ||
|
|
c2d782bed1 | ||
|
|
e00f5be746 | ||
|
|
e23297f695 | ||
|
|
d6840a6e04 | ||
|
|
3557a52dc4 | ||
|
|
fd2aab2bd9 | ||
|
|
f1c50fcb74 | ||
|
|
bdcba3460e | ||
|
|
8857f31b07 | ||
|
|
5b85bf7a00 | ||
|
|
46bd75c0f8 | ||
|
|
5fc5dd0fd9 | ||
|
|
adc07e5209 | ||
|
|
357cced49c | ||
|
|
a104c33974 | ||
|
|
516e00f15f | ||
|
|
a64c3c28b8 | ||
|
|
e9c9a2d1f2 | ||
|
|
3ebab98d2d | ||
|
|
10c949af62 | ||
|
|
4a3325276d | ||
|
|
c5f6c66f72 | ||
|
|
c7c149975b | ||
|
|
d552210b59 | ||
|
|
581607da6a | ||
|
|
e95107f7d6 | ||
|
|
a693cb52f3 | ||
|
|
2c7366038e | ||
|
|
1cc6dfde87 | ||
|
|
387f4faf78 | ||
|
|
0f7093c5f9 | ||
|
|
6a5c0055e7 | ||
|
|
76288f2501 | ||
|
|
3497ccfada | ||
|
|
24e3d3a2ce | ||
|
|
ebad748cdc | ||
|
|
b7e56ed92c | ||
|
|
4811632751 | ||
|
|
374a702f04 | ||
|
|
e369e9f481 | ||
|
|
fe2e4a2274 | ||
|
|
b391272e94 | ||
|
|
c55c7a6373 | ||
|
|
67f1c371a9 | ||
|
|
d987686c14 | ||
|
|
48a9707110 | ||
|
|
b89450f54d | ||
|
|
e0c99bced4 | ||
|
|
130f85a575 | ||
|
|
c42fbed3d2 | ||
|
|
fd539f0f0a | ||
|
|
9aba89a12c | ||
|
|
7b27b29e3a | ||
|
|
7ef014a433 | ||
|
|
1b88714d27 | ||
|
|
b119894425 | ||
|
|
a37aa664f5 | ||
|
|
9b8abbb009 | ||
|
|
3e5a48af65 | ||
|
|
d5aef963f9 | ||
|
|
6c37e1cb2a | ||
|
|
e9d7e211b9 | ||
|
|
45bbd1e5c4 | ||
|
|
57d196771a | ||
|
|
6202f50e15 | ||
|
|
c5df1f92c2 | ||
|
|
4f1770d3fe | ||
|
|
d56cee26db | ||
|
|
56fd8132e9 | ||
|
|
35daa74430 | ||
|
|
dc156fb4cd | ||
|
|
de905a878c | ||
|
|
f3252f989b | ||
|
|
01a2afca9a | ||
|
|
a4fe68ad21 | ||
|
|
c03f86b23c | ||
|
|
5667f324ae | ||
|
|
bcd806796f | ||
|
|
612404c47f | ||
|
|
85388262b3 | ||
|
|
25a4503285 | ||
|
|
526c4d5a61 | ||
|
|
addc964d56 | ||
|
|
371119f733 | ||
|
|
021abc0398 | ||
|
|
0672b6dd3e | ||
|
|
1372dc2857 | ||
|
|
77bc9af109 | ||
|
|
91b1c84430 | ||
|
|
b667927216 | ||
|
|
29795fac51 | ||
|
|
2ce7e01f55 | ||
|
|
4fefd63a5b | ||
|
|
2a8c46874d | ||
|
|
b8b5511b68 | ||
|
|
bdaee228db | ||
|
|
d562620e99 | ||
|
|
69b0c828c9 | ||
|
|
2bfd1efb9b | ||
|
|
0854e6b921 | ||
|
|
b907f4e61b | ||
|
|
6ec568459c | ||
|
|
ea210d52dc | ||
|
|
3d3c6e4e06 | ||
|
|
e7d0a8d4b9 | ||
|
|
7d3b2b774c | ||
|
|
aed8f52423 | ||
|
|
c49414f6ac | ||
|
|
8e766b90cd | ||
|
|
3409cd8dff | ||
|
|
f4973a681c | ||
|
|
4fb3d187d0 | ||
|
|
0aff736efd | ||
|
|
2aa7a631ef | ||
|
|
b40ef147a9 | ||
|
|
1f11a3167b | ||
|
|
18b0134ddf | ||
|
|
b48a5fdc94 | ||
|
|
168a07a670 | ||
|
|
cad005bc21 | ||
|
|
dc44cde081 | ||
|
|
4ccdeb9d2a | ||
|
|
1b48934f47 | ||
|
|
25fa645184 | ||
|
|
3a3bb880f2 | ||
|
|
affe27298c | ||
|
|
629db6218e | ||
|
|
aa1a3ce4e8 | ||
|
|
693db59fcc | ||
|
|
0a7df4ef26 | ||
|
|
9dcdc44718 | ||
|
|
2a0c618f8b | ||
|
|
bebd78fbbb | ||
|
|
3f095e75cb | ||
|
|
f7d30da9e0 | ||
|
|
823d4b6f79 | ||
|
|
051ea7786f | ||
|
|
42b645e355 | ||
|
|
f27ee96014 | ||
|
|
20cd117aa6 | ||
|
|
fc8931d69f | ||
|
|
0167b2decd | ||
|
|
e67980ff3c | ||
|
|
96760bba5a | ||
|
|
2501d7d241 | ||
|
|
55b84262b5 | ||
|
|
ce4252eb1a | ||
|
|
db708917b4 | ||
|
|
8ddb45627d | ||
|
|
550c225b79 | ||
|
|
0d46b550a8 | ||
|
|
0693cca1a4 | ||
|
|
0d9f9e381a | ||
|
|
6c7aa5a7ae | ||
|
|
a27d935925 | ||
|
|
b4f87a4fee | ||
|
|
ee5f94a2e2 | ||
|
|
9c3cf554d3 | ||
|
|
addbfa5487 | ||
|
|
5eb461d7b7 | ||
|
|
1ea45afcd7 | ||
|
|
dbd86f6aee | ||
|
|
af693f7b3f | ||
|
|
3be5ee6445 | ||
|
|
0acc8fe26a | ||
|
|
ecf40eb86c | ||
|
|
48ce7055f8 | ||
|
|
749f55d688 |
58
.copilot-rules.md
Normal file
@@ -0,0 +1,58 @@
|
||||
# GitHub Copilot Rules for RustFS Project
|
||||
|
||||
## Core Rules Reference
|
||||
|
||||
This project follows the comprehensive AI coding rules defined in `.rules.md`. Please refer to that file for the complete set of development guidelines, coding standards, and best practices.
|
||||
|
||||
## Copilot-Specific Configuration
|
||||
|
||||
When using GitHub Copilot for this project, ensure you:
|
||||
|
||||
1. **Review the unified rules**: Always check `.rules.md` for the latest project guidelines
|
||||
2. **Follow branch protection**: Never attempt to commit directly to main/master branch
|
||||
3. **Use English**: All code comments, documentation, and variable names must be in English
|
||||
4. **Clean code practices**: Only make modifications you're confident about
|
||||
5. **Test thoroughly**: Ensure all changes pass formatting, linting, and testing requirements
|
||||
|
||||
## Quick Reference
|
||||
|
||||
### Critical Rules
|
||||
- 🚫 **NEVER commit directly to main/master branch**
|
||||
- ✅ **ALWAYS work on feature branches**
|
||||
- 📝 **ALWAYS use English for code and documentation**
|
||||
- 🧹 **ALWAYS clean up temporary files after use**
|
||||
- 🎯 **ONLY make confident, necessary modifications**
|
||||
|
||||
### Pre-commit Checklist
|
||||
```bash
|
||||
# Before committing, always run:
|
||||
cargo fmt --all
|
||||
cargo clippy --all-targets --all-features -- -D warnings
|
||||
cargo check --all-targets
|
||||
cargo test
|
||||
```
|
||||
|
||||
### Branch Workflow
|
||||
```bash
|
||||
git checkout main
|
||||
git pull origin main
|
||||
git checkout -b feat/your-feature-name
|
||||
# Make your changes
|
||||
git add .
|
||||
git commit -m "feat: your feature description"
|
||||
git push origin feat/your-feature-name
|
||||
gh pr create
|
||||
```
|
||||
|
||||
## Important Notes
|
||||
|
||||
- This file serves as an entry point for GitHub Copilot
|
||||
- All detailed rules and guidelines are maintained in `.rules.md`
|
||||
- Updates to coding standards should be made in `.rules.md` to ensure consistency across all AI tools
|
||||
- When in doubt, always refer to `.rules.md` for authoritative guidance
|
||||
|
||||
## See Also
|
||||
|
||||
- [.rules.md](./.rules.md) - Complete AI coding rules and guidelines
|
||||
- [CONTRIBUTING.md](./CONTRIBUTING.md) - Contribution guidelines
|
||||
- [README.md](./README.md) - Project overview and setup instructions
|
||||
@@ -6,10 +6,10 @@ This directory contains Docker configuration files and supporting infrastructure
|
||||
|
||||
```
|
||||
rustfs/
|
||||
├── Dockerfile # Production image (Alpine + GitHub Releases)
|
||||
├── Dockerfile.source # Source build (Debian + cross-compilation)
|
||||
├── cargo.config.toml # Rust cargo configuration
|
||||
├── Dockerfile # Production image (Alpine + pre-built binaries)
|
||||
├── Dockerfile.source # Development image (Debian + source build)
|
||||
├── docker-buildx.sh # Multi-architecture build script
|
||||
├── Makefile # Build automation with simplified commands
|
||||
└── .docker/ # Supporting infrastructure
|
||||
├── observability/ # Monitoring and observability configs
|
||||
├── compose/ # Docker Compose configurations
|
||||
@@ -64,7 +64,11 @@ docker run rustfs/rustfs:main-latest # Main branch latest
|
||||
### Development Environment
|
||||
|
||||
```bash
|
||||
# Start development container
|
||||
# Quick setup using Makefile (recommended)
|
||||
make docker-dev-local # Build development image locally
|
||||
make dev-env-start # Start development container
|
||||
|
||||
# Manual Docker commands
|
||||
docker run -it -v $(pwd):/workspace -p 9000:9000 rustfs/rustfs:latest-dev
|
||||
|
||||
# Build from source locally
|
||||
@@ -76,9 +80,33 @@ docker-compose up rustfs-dev
|
||||
|
||||
## 🏗️ Build Arguments and Scripts
|
||||
|
||||
### Using docker-buildx.sh (Recommended)
|
||||
### Using Makefile Commands (Recommended)
|
||||
|
||||
For multi-architecture builds, use the provided script:
|
||||
The easiest way to build images using simplified commands:
|
||||
|
||||
```bash
|
||||
# Development images (build from source)
|
||||
make docker-dev-local # Build for local use (single arch)
|
||||
make docker-dev # Build multi-arch (for CI/CD)
|
||||
make docker-dev-push REGISTRY=xxx # Build and push to registry
|
||||
|
||||
# Production images (using pre-built binaries)
|
||||
make docker-buildx # Build multi-arch production images
|
||||
make docker-buildx-push # Build and push production images
|
||||
make docker-buildx-version VERSION=v1.0.0 # Build specific version
|
||||
|
||||
# Development environment
|
||||
make dev-env-start # Start development container
|
||||
make dev-env-stop # Stop development container
|
||||
make dev-env-restart # Restart development container
|
||||
|
||||
# Help
|
||||
make help-docker # Show all Docker-related commands
|
||||
```
|
||||
|
||||
### Using docker-buildx.sh (Advanced)
|
||||
|
||||
For direct script usage and advanced scenarios:
|
||||
|
||||
```bash
|
||||
# Build latest version for all architectures
|
||||
@@ -147,17 +175,51 @@ Architecture is automatically detected during build using Docker's `TARGETARCH`
|
||||
|
||||
## 🛠️ Development Workflow
|
||||
|
||||
For local development and testing:
|
||||
### Quick Start with Makefile (Recommended)
|
||||
|
||||
```bash
|
||||
# Quick development setup
|
||||
docker-compose up rustfs-dev
|
||||
# 1. Start development environment
|
||||
make dev-env-start
|
||||
|
||||
# Custom source build
|
||||
docker build -f Dockerfile.source -t rustfs:custom .
|
||||
# 2. Your development container is now running with:
|
||||
# - Port 9000 exposed for RustFS
|
||||
# - Port 9010 exposed for admin console
|
||||
# - Current directory mounted as /workspace
|
||||
|
||||
# 3. Stop when done
|
||||
make dev-env-stop
|
||||
```
|
||||
|
||||
### Manual Development Setup
|
||||
|
||||
```bash
|
||||
# Build development image from source
|
||||
make docker-dev-local
|
||||
|
||||
# Or use traditional Docker commands
|
||||
docker build -f Dockerfile.source -t rustfs:dev .
|
||||
|
||||
# Run with development tools
|
||||
docker run -it -v $(pwd):/workspace rustfs:custom bash
|
||||
docker run -it -v $(pwd):/workspace -p 9000:9000 rustfs:dev bash
|
||||
|
||||
# Or use docker-compose for complex setups
|
||||
docker-compose up rustfs-dev
|
||||
```
|
||||
|
||||
### Common Development Tasks
|
||||
|
||||
```bash
|
||||
# Build and test locally
|
||||
make build # Build binary natively
|
||||
make docker-dev-local # Build development Docker image
|
||||
make test # Run tests
|
||||
make fmt # Format code
|
||||
make clippy # Run linter
|
||||
|
||||
# Get help
|
||||
make help # General help
|
||||
make help-docker # Docker-specific help
|
||||
make help-build # Build-specific help
|
||||
```
|
||||
|
||||
## 🚀 CI/CD Integration
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
FROM alpine:3.18
|
||||
|
||||
ENV LANG C.UTF-8
|
||||
|
||||
# Install base dependencies
|
||||
RUN apk add --no-cache \
|
||||
wget \
|
||||
git \
|
||||
curl \
|
||||
unzip \
|
||||
gcc \
|
||||
musl-dev \
|
||||
pkgconfig \
|
||||
openssl-dev \
|
||||
dbus-dev \
|
||||
wayland-dev \
|
||||
webkit2gtk-4.1-dev \
|
||||
build-base \
|
||||
linux-headers
|
||||
|
||||
# install protoc
|
||||
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v30.2/protoc-30.2-linux-x86_64.zip \
|
||||
&& unzip protoc-30.2-linux-x86_64.zip -d protoc3 \
|
||||
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc \
|
||||
&& mv protoc3/include/* /usr/local/include/ && rm -rf protoc-30.2-linux-x86_64.zip protoc3
|
||||
|
||||
# install flatc
|
||||
RUN wget https://github.com/google/flatbuffers/releases/download/v24.3.25/Linux.flatc.binary.g++-13.zip \
|
||||
&& unzip Linux.flatc.binary.g++-13.zip \
|
||||
&& mv flatc /usr/local/bin/ && chmod +x /usr/local/bin/flatc && rm -rf Linux.flatc.binary.g++-13.zip
|
||||
|
||||
# install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
# Set PATH for rust
|
||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||
|
||||
COPY .docker/cargo.config.toml /root/.cargo/config.toml
|
||||
|
||||
WORKDIR /root/rustfs
|
||||
@@ -27,7 +27,7 @@ services:
|
||||
ports:
|
||||
- "9000:9000" # Map port 9001 of the host to port 9000 of the container
|
||||
volumes:
|
||||
- ../../target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs
|
||||
- ../../target/x86_64-unknown-linux-gnu/release/rustfs:/app/rustfs
|
||||
command: "/app/rustfs"
|
||||
|
||||
node1:
|
||||
@@ -44,7 +44,7 @@ services:
|
||||
ports:
|
||||
- "9001:9000" # Map port 9002 of the host to port 9000 of the container
|
||||
volumes:
|
||||
- ../../target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs
|
||||
- ../../target/x86_64-unknown-linux-gnu/release/rustfs:/app/rustfs
|
||||
command: "/app/rustfs"
|
||||
|
||||
node2:
|
||||
@@ -61,7 +61,7 @@ services:
|
||||
ports:
|
||||
- "9002:9000" # Map port 9003 of the host to port 9000 of the container
|
||||
volumes:
|
||||
- ../../target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs
|
||||
- ../../target/x86_64-unknown-linux-gnu/release/rustfs:/app/rustfs
|
||||
command: "/app/rustfs"
|
||||
|
||||
node3:
|
||||
@@ -78,5 +78,5 @@ services:
|
||||
ports:
|
||||
- "9003:9000" # Map port 9004 of the host to port 9000 of the container
|
||||
volumes:
|
||||
- ../../target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs
|
||||
- ../../target/x86_64-unknown-linux-gnu/release/rustfs:/app/rustfs
|
||||
command: "/app/rustfs"
|
||||
|
||||
1
.dockerignore
Normal file
@@ -0,0 +1 @@
|
||||
target
|
||||
4
.github/pull_request_template.md
vendored
@@ -19,9 +19,7 @@ Pull Request Template for RustFS
|
||||
|
||||
## Checklist
|
||||
- [ ] I have read and followed the [CONTRIBUTING.md](CONTRIBUTING.md) guidelines
|
||||
- [ ] Code is formatted with `cargo fmt --all`
|
||||
- [ ] Passed `cargo clippy --all-targets --all-features -- -D warnings`
|
||||
- [ ] Passed `cargo check --all-targets`
|
||||
- [ ] Passed `make pre-commit`
|
||||
- [ ] Added/updated necessary tests
|
||||
- [ ] Documentation updated (if needed)
|
||||
- [ ] CI/CD passed (if applicable)
|
||||
|
||||
11
.github/workflows/audit.yml
vendored
@@ -16,13 +16,13 @@ name: Security Audit
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
branches: [ main ]
|
||||
paths:
|
||||
- '**/Cargo.toml'
|
||||
- '**/Cargo.lock'
|
||||
- '.github/workflows/audit.yml'
|
||||
pull_request:
|
||||
branches: [main]
|
||||
branches: [ main ]
|
||||
paths:
|
||||
- '**/Cargo.toml'
|
||||
- '**/Cargo.lock'
|
||||
@@ -31,6 +31,9 @@ on:
|
||||
- cron: '0 0 * * 0' # Weekly on Sunday at midnight UTC
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
@@ -41,7 +44,7 @@ jobs:
|
||||
timeout-minutes: 15
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Install cargo-audit
|
||||
uses: taiki-e/install-action@v2
|
||||
@@ -69,7 +72,7 @@ jobs:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Dependency Review
|
||||
uses: actions/dependency-review-action@v4
|
||||
|
||||
583
.github/workflows/build.yml
vendored
@@ -12,12 +12,24 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Build and Release Workflow
|
||||
#
|
||||
# This workflow builds RustFS binaries and automatically triggers Docker image builds.
|
||||
#
|
||||
# Flow:
|
||||
# 1. Build binaries for multiple platforms
|
||||
# 2. Upload binaries to OSS storage
|
||||
# 3. Trigger docker.yml to build and push images using the uploaded binaries
|
||||
#
|
||||
# Manual Parameters:
|
||||
# - build_docker: Build and push Docker images (default: true)
|
||||
|
||||
name: Build and Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags: ["*.*.*"]
|
||||
branches: [main]
|
||||
tags: [ "*.*.*" ]
|
||||
branches: [ main ]
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
- "**.txt"
|
||||
@@ -33,7 +45,7 @@ on:
|
||||
- ".gitignore"
|
||||
- ".dockerignore"
|
||||
pull_request:
|
||||
branches: [main]
|
||||
branches: [ main ]
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
- "**.txt"
|
||||
@@ -52,12 +64,15 @@ on:
|
||||
- cron: "0 0 * * 0" # Weekly on Sunday at midnight UTC
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
force_build:
|
||||
description: "Force build even without changes"
|
||||
build_docker:
|
||||
description: "Build and push Docker images after binary build"
|
||||
required: false
|
||||
default: false
|
||||
default: true
|
||||
type: boolean
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
RUST_BACKTRACE: 1
|
||||
@@ -77,7 +92,7 @@ jobs:
|
||||
is_prerelease: ${{ steps.check.outputs.is_prerelease }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -117,7 +132,6 @@ jobs:
|
||||
echo "🛠️ Development build detected"
|
||||
elif [[ "${{ github.event_name }}" == "schedule" ]] || \
|
||||
[[ "${{ github.event_name }}" == "workflow_dispatch" ]] || \
|
||||
[[ "${{ github.event.inputs.force_build }}" == "true" ]] || \
|
||||
[[ "${{ contains(github.event.head_commit.message, '--build') }}" == "true" ]]; then
|
||||
# Scheduled or manual build
|
||||
should_build=true
|
||||
@@ -142,7 +156,7 @@ jobs:
|
||||
# Build RustFS binaries
|
||||
build-rustfs:
|
||||
name: Build RustFS
|
||||
needs: [build-check]
|
||||
needs: [ build-check ]
|
||||
if: needs.build-check.outputs.should_build == 'true'
|
||||
runs-on: ${{ matrix.os }}
|
||||
timeout-minutes: 60
|
||||
@@ -161,6 +175,14 @@ jobs:
|
||||
target: aarch64-unknown-linux-musl
|
||||
cross: true
|
||||
platform: linux
|
||||
- os: ubuntu-latest
|
||||
target: x86_64-unknown-linux-gnu
|
||||
cross: false
|
||||
platform: linux
|
||||
- os: ubuntu-latest
|
||||
target: aarch64-unknown-linux-gnu
|
||||
cross: true
|
||||
platform: linux
|
||||
# macOS builds
|
||||
- os: macos-latest
|
||||
target: aarch64-apple-darwin
|
||||
@@ -170,18 +192,18 @@ jobs:
|
||||
target: x86_64-apple-darwin
|
||||
cross: false
|
||||
platform: macos
|
||||
# # Windows builds (temporarily disabled)
|
||||
# - os: windows-latest
|
||||
# target: x86_64-pc-windows-msvc
|
||||
# cross: false
|
||||
# platform: windows
|
||||
# - os: windows-latest
|
||||
# target: aarch64-pc-windows-msvc
|
||||
# cross: true
|
||||
# platform: windows
|
||||
# Windows builds (temporarily disabled)
|
||||
- os: windows-latest
|
||||
target: x86_64-pc-windows-msvc
|
||||
cross: false
|
||||
platform: windows
|
||||
#- os: windows-latest
|
||||
# target: aarch64-pc-windows-msvc
|
||||
# cross: true
|
||||
# platform: windows
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -196,6 +218,7 @@ jobs:
|
||||
install-cross-tools: ${{ matrix.cross }}
|
||||
|
||||
- name: Download static console assets
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p ./rustfs/static
|
||||
if [[ "${{ matrix.platform }}" == "windows" ]]; then
|
||||
@@ -221,6 +244,7 @@ jobs:
|
||||
fi
|
||||
|
||||
- name: Build RustFS
|
||||
shell: bash
|
||||
run: |
|
||||
# Force rebuild by touching build.rs
|
||||
touch rustfs/build.rs
|
||||
@@ -231,7 +255,7 @@ jobs:
|
||||
cargo install cross --git https://github.com/cross-rs/cross
|
||||
cross build --release --target ${{ matrix.target }} -p rustfs --bins
|
||||
else
|
||||
# Use zigbuild for Linux ARM64
|
||||
# Use zigbuild for other cross-compilation
|
||||
cargo zigbuild --release --target ${{ matrix.target }} -p rustfs --bins
|
||||
fi
|
||||
else
|
||||
@@ -249,30 +273,55 @@ jobs:
|
||||
# Extract platform and arch from target
|
||||
TARGET="${{ matrix.target }}"
|
||||
PLATFORM="${{ matrix.platform }}"
|
||||
|
||||
# Map target to architecture
|
||||
# Map target to architecture and variant
|
||||
case "$TARGET" in
|
||||
*x86_64*musl*)
|
||||
ARCH="x86_64"
|
||||
VARIANT="musl"
|
||||
;;
|
||||
*x86_64*gnu*)
|
||||
ARCH="x86_64"
|
||||
VARIANT="gnu"
|
||||
;;
|
||||
*x86_64*)
|
||||
ARCH="x86_64"
|
||||
VARIANT=""
|
||||
;;
|
||||
*aarch64*musl*|*arm64*musl*)
|
||||
ARCH="aarch64"
|
||||
VARIANT="musl"
|
||||
;;
|
||||
*aarch64*gnu*|*arm64*gnu*)
|
||||
ARCH="aarch64"
|
||||
VARIANT="gnu"
|
||||
;;
|
||||
*aarch64*|*arm64*)
|
||||
ARCH="aarch64"
|
||||
VARIANT=""
|
||||
;;
|
||||
*armv7*)
|
||||
ARCH="armv7"
|
||||
VARIANT=""
|
||||
;;
|
||||
*)
|
||||
ARCH="unknown"
|
||||
VARIANT=""
|
||||
;;
|
||||
esac
|
||||
|
||||
# Generate package name based on build type
|
||||
if [[ "$BUILD_TYPE" == "development" ]]; then
|
||||
# Development build: rustfs-${platform}-${arch}-dev-${short_sha}.zip
|
||||
PACKAGE_NAME="rustfs-${PLATFORM}-${ARCH}-dev-${SHORT_SHA}"
|
||||
if [[ -n "$VARIANT" ]]; then
|
||||
ARCH_WITH_VARIANT="${ARCH}-${VARIANT}"
|
||||
else
|
||||
# Release/Prerelease build: rustfs-${platform}-${arch}-v${version}.zip
|
||||
PACKAGE_NAME="rustfs-${PLATFORM}-${ARCH}-v${VERSION}"
|
||||
ARCH_WITH_VARIANT="${ARCH}"
|
||||
fi
|
||||
|
||||
if [[ "$BUILD_TYPE" == "development" ]]; then
|
||||
# Development build: rustfs-${platform}-${arch}-${variant}-dev-${short_sha}.zip
|
||||
PACKAGE_NAME="rustfs-${PLATFORM}-${ARCH_WITH_VARIANT}-dev-${SHORT_SHA}"
|
||||
else
|
||||
# Release/Prerelease build: rustfs-${platform}-${arch}-${variant}-v${version}.zip
|
||||
PACKAGE_NAME="rustfs-${PLATFORM}-${ARCH_WITH_VARIANT}-v${VERSION}"
|
||||
fi
|
||||
|
||||
# Create zip packages for all platforms
|
||||
@@ -284,23 +333,119 @@ jobs:
|
||||
fi
|
||||
|
||||
cd target/${{ matrix.target }}/release
|
||||
zip "../../../${PACKAGE_NAME}.zip" rustfs
|
||||
# Determine the binary name based on platform
|
||||
if [[ "${{ matrix.platform }}" == "windows" ]]; then
|
||||
BINARY_NAME="rustfs.exe"
|
||||
else
|
||||
BINARY_NAME="rustfs"
|
||||
fi
|
||||
|
||||
# Verify the binary exists before packaging
|
||||
if [[ ! -f "$BINARY_NAME" ]]; then
|
||||
echo "❌ Binary $BINARY_NAME not found in $(pwd)"
|
||||
if [[ "${{ matrix.platform }}" == "windows" ]]; then
|
||||
dir
|
||||
else
|
||||
ls -la
|
||||
fi
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Universal packaging function
|
||||
package_zip() {
|
||||
local src=$1
|
||||
local dst=$2
|
||||
if [[ "${{ matrix.platform }}" == "windows" ]]; then
|
||||
# Windows uses PowerShell Compress-Archive
|
||||
powershell -Command "Compress-Archive -Path '$src' -DestinationPath '$dst' -Force"
|
||||
elif command -v zip &> /dev/null; then
|
||||
# Unix systems use zip command
|
||||
zip "$dst" "$src"
|
||||
else
|
||||
echo "❌ No zip utility available"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Create the zip package
|
||||
echo "Start packaging: $BINARY_NAME -> ../../../${PACKAGE_NAME}.zip"
|
||||
package_zip "$BINARY_NAME" "../../../${PACKAGE_NAME}.zip"
|
||||
|
||||
cd ../../..
|
||||
|
||||
# Verify the package was created
|
||||
if [[ -f "${PACKAGE_NAME}.zip" ]]; then
|
||||
echo "✅ Package created successfully: ${PACKAGE_NAME}.zip"
|
||||
if [[ "${{ matrix.platform }}" == "windows" ]]; then
|
||||
dir
|
||||
else
|
||||
ls -lh ${PACKAGE_NAME}.zip
|
||||
fi
|
||||
else
|
||||
echo "❌ Failed to create package: ${PACKAGE_NAME}.zip"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create latest version files right after the main package
|
||||
LATEST_FILES=""
|
||||
if [[ "$BUILD_TYPE" == "release" ]] || [[ "$BUILD_TYPE" == "prerelease" ]]; then
|
||||
# Create latest version filename
|
||||
# Convert from rustfs-linux-x86_64-musl-v1.0.0 to rustfs-linux-x86_64-musl-latest
|
||||
LATEST_FILE="${PACKAGE_NAME%-v*}-latest.zip"
|
||||
|
||||
echo "🔄 Creating latest version: ${PACKAGE_NAME}.zip -> $LATEST_FILE"
|
||||
cp "${PACKAGE_NAME}.zip" "$LATEST_FILE"
|
||||
|
||||
if [[ -f "$LATEST_FILE" ]]; then
|
||||
echo "✅ Latest version created: $LATEST_FILE"
|
||||
LATEST_FILES="$LATEST_FILE"
|
||||
fi
|
||||
elif [[ "$BUILD_TYPE" == "development" ]]; then
|
||||
# Development builds (only main branch triggers development builds)
|
||||
# Create main-latest version filename
|
||||
# Convert from rustfs-linux-x86_64-dev-abc123 to rustfs-linux-x86_64-main-latest
|
||||
MAIN_LATEST_FILE="${PACKAGE_NAME%-dev-*}-main-latest.zip"
|
||||
|
||||
echo "🔄 Creating main-latest version: ${PACKAGE_NAME}.zip -> $MAIN_LATEST_FILE"
|
||||
cp "${PACKAGE_NAME}.zip" "$MAIN_LATEST_FILE"
|
||||
|
||||
if [[ -f "$MAIN_LATEST_FILE" ]]; then
|
||||
echo "✅ Main-latest version created: $MAIN_LATEST_FILE"
|
||||
LATEST_FILES="$MAIN_LATEST_FILE"
|
||||
|
||||
# Also create a generic main-latest for Docker builds (Linux only)
|
||||
if [[ "${{ matrix.platform }}" == "linux" ]]; then
|
||||
DOCKER_MAIN_LATEST_FILE="rustfs-linux-${ARCH_WITH_VARIANT}-main-latest.zip"
|
||||
|
||||
echo "🔄 Creating Docker main-latest version: ${PACKAGE_NAME}.zip -> $DOCKER_MAIN_LATEST_FILE"
|
||||
cp "${PACKAGE_NAME}.zip" "$DOCKER_MAIN_LATEST_FILE"
|
||||
|
||||
if [[ -f "$DOCKER_MAIN_LATEST_FILE" ]]; then
|
||||
echo "✅ Docker main-latest version created: $DOCKER_MAIN_LATEST_FILE"
|
||||
LATEST_FILES="$LATEST_FILES $DOCKER_MAIN_LATEST_FILE"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "package_name=${PACKAGE_NAME}" >> $GITHUB_OUTPUT
|
||||
echo "package_file=${PACKAGE_NAME}.zip" >> $GITHUB_OUTPUT
|
||||
echo "latest_files=${LATEST_FILES}" >> $GITHUB_OUTPUT
|
||||
echo "build_type=${BUILD_TYPE}" >> $GITHUB_OUTPUT
|
||||
echo "version=${VERSION}" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "📦 Package created: ${PACKAGE_NAME}.zip"
|
||||
if [[ -n "$LATEST_FILES" ]]; then
|
||||
echo "📦 Latest files created: $LATEST_FILES"
|
||||
fi
|
||||
echo "🔧 Build type: ${BUILD_TYPE}"
|
||||
echo "📊 Version: ${VERSION}"
|
||||
|
||||
- name: Upload artifacts
|
||||
- name: Upload to GitHub artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ steps.package.outputs.package_name }}
|
||||
path: ${{ steps.package.outputs.package_file }}
|
||||
path: "rustfs-*.zip"
|
||||
retention-days: ${{ startsWith(github.ref, 'refs/tags/') && 30 || 7 }}
|
||||
|
||||
- name: Upload to Aliyun OSS
|
||||
@@ -310,6 +455,7 @@ jobs:
|
||||
OSS_ACCESS_KEY_SECRET: ${{ secrets.ALICLOUDOSS_KEY_SECRET }}
|
||||
OSS_REGION: cn-beijing
|
||||
OSS_ENDPOINT: https://oss-cn-beijing.aliyuncs.com
|
||||
shell: bash
|
||||
run: |
|
||||
BUILD_TYPE="${{ needs.build-check.outputs.build_type }}"
|
||||
|
||||
@@ -348,6 +494,16 @@ jobs:
|
||||
chmod +x /usr/local/bin/ossutil
|
||||
OSSUTIL_BIN=ossutil
|
||||
;;
|
||||
windows)
|
||||
OSSUTIL_ZIP="ossutil-${OSSUTIL_VERSION}-windows-amd64.zip"
|
||||
OSSUTIL_DIR="ossutil-${OSSUTIL_VERSION}-windows-amd64"
|
||||
|
||||
curl -o "$OSSUTIL_ZIP" "https://gosspublic.alicdn.com/ossutil/v2/${OSSUTIL_VERSION}/${OSSUTIL_ZIP}"
|
||||
unzip "$OSSUTIL_ZIP"
|
||||
mv "${OSSUTIL_DIR}/ossutil.exe" ./ossutil.exe
|
||||
rm -rf "$OSSUTIL_DIR" "$OSSUTIL_ZIP"
|
||||
OSSUTIL_BIN=./ossutil.exe
|
||||
;;
|
||||
esac
|
||||
|
||||
# Determine upload path based on build type
|
||||
@@ -359,83 +515,27 @@ jobs:
|
||||
echo "📤 Uploading release build to OSS release directory"
|
||||
fi
|
||||
|
||||
# Upload the package file to OSS
|
||||
echo "Uploading ${{ steps.package.outputs.package_file }} to $OSS_PATH..."
|
||||
$OSSUTIL_BIN cp "${{ steps.package.outputs.package_file }}" "$OSS_PATH" --force
|
||||
|
||||
# For release and prerelease builds, also create a latest version
|
||||
if [[ "$BUILD_TYPE" == "release" ]] || [[ "$BUILD_TYPE" == "prerelease" ]]; then
|
||||
# Extract platform and arch from package name
|
||||
PACKAGE_NAME="${{ steps.package.outputs.package_name }}"
|
||||
|
||||
# Create latest version filename
|
||||
# Convert from rustfs-linux-x86_64-v1.0.0 to rustfs-linux-x86_64-latest
|
||||
LATEST_FILE="${PACKAGE_NAME%-v*}-latest.zip"
|
||||
|
||||
# Copy the original file to latest version
|
||||
cp "${{ steps.package.outputs.package_file }}" "$LATEST_FILE"
|
||||
|
||||
# Upload the latest version
|
||||
echo "Uploading latest version: $LATEST_FILE to $OSS_PATH..."
|
||||
$OSSUTIL_BIN cp "$LATEST_FILE" "$OSS_PATH" --force
|
||||
|
||||
echo "✅ Latest version uploaded: $LATEST_FILE"
|
||||
fi
|
||||
|
||||
# For development builds, create dev-latest version
|
||||
if [[ "$BUILD_TYPE" == "development" ]]; then
|
||||
# Extract platform and arch from package name
|
||||
PACKAGE_NAME="${{ steps.package.outputs.package_name }}"
|
||||
|
||||
# Create dev-latest version filename
|
||||
# Convert from rustfs-linux-x86_64-dev-abc123 to rustfs-linux-x86_64-dev-latest
|
||||
DEV_LATEST_FILE="${PACKAGE_NAME%-*}-latest.zip"
|
||||
|
||||
# Copy the original file to dev-latest version
|
||||
cp "${{ steps.package.outputs.package_file }}" "$DEV_LATEST_FILE"
|
||||
|
||||
# Upload the dev-latest version
|
||||
echo "Uploading dev-latest version: $DEV_LATEST_FILE to $OSS_PATH..."
|
||||
$OSSUTIL_BIN cp "$DEV_LATEST_FILE" "$OSS_PATH" --force
|
||||
|
||||
echo "✅ Dev-latest version uploaded: $DEV_LATEST_FILE"
|
||||
|
||||
# For main branch builds, also create a main-latest version
|
||||
if [[ "${{ github.ref }}" == "refs/heads/main" ]]; then
|
||||
# Create main-latest version filename
|
||||
# Convert from rustfs-linux-x86_64-dev-abc123 to rustfs-linux-x86_64-main-latest
|
||||
MAIN_LATEST_FILE="${PACKAGE_NAME%-dev-*}-main-latest.zip"
|
||||
|
||||
# Copy the original file to main-latest version
|
||||
cp "${{ steps.package.outputs.package_file }}" "$MAIN_LATEST_FILE"
|
||||
|
||||
# Upload the main-latest version
|
||||
echo "Uploading main-latest version: $MAIN_LATEST_FILE to $OSS_PATH..."
|
||||
$OSSUTIL_BIN cp "$MAIN_LATEST_FILE" "$OSS_PATH" --force
|
||||
|
||||
echo "✅ Main-latest version uploaded: $MAIN_LATEST_FILE"
|
||||
|
||||
# Also create a generic main-latest for Docker builds
|
||||
if [[ "${{ matrix.platform }}" == "linux" ]]; then
|
||||
DOCKER_MAIN_LATEST_FILE="rustfs-linux-${{ matrix.target == 'x86_64-unknown-linux-musl' && 'x86_64' || 'aarch64' }}-main-latest.zip"
|
||||
|
||||
cp "${{ steps.package.outputs.package_file }}" "$DOCKER_MAIN_LATEST_FILE"
|
||||
$OSSUTIL_BIN cp "$DOCKER_MAIN_LATEST_FILE" "$OSS_PATH" --force
|
||||
echo "✅ Docker main-latest version uploaded: $DOCKER_MAIN_LATEST_FILE"
|
||||
fi
|
||||
# Upload all rustfs zip files to OSS using glob pattern
|
||||
echo "📤 Uploading all rustfs-*.zip files to $OSS_PATH..."
|
||||
for zip_file in rustfs-*.zip; do
|
||||
if [[ -f "$zip_file" ]]; then
|
||||
echo "Uploading: $zip_file to $OSS_PATH..."
|
||||
$OSSUTIL_BIN cp "$zip_file" "$OSS_PATH" --force
|
||||
echo "✅ Uploaded: $zip_file"
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
echo "✅ Upload completed successfully"
|
||||
|
||||
# Build summary
|
||||
build-summary:
|
||||
name: Build Summary
|
||||
needs: [build-check, build-rustfs]
|
||||
needs: [ build-check, build-rustfs ]
|
||||
if: always() && needs.build-check.outputs.should_build == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Build completion summary
|
||||
shell: bash
|
||||
run: |
|
||||
BUILD_TYPE="${{ needs.build-check.outputs.build_type }}"
|
||||
VERSION="${{ needs.build-check.outputs.version }}"
|
||||
@@ -445,6 +545,13 @@ jobs:
|
||||
echo "🔢 Version: $VERSION"
|
||||
echo ""
|
||||
|
||||
# Check build status
|
||||
BUILD_STATUS="${{ needs.build-rustfs.result }}"
|
||||
|
||||
echo "📊 Build Results:"
|
||||
echo " 📦 All platforms: $BUILD_STATUS"
|
||||
echo ""
|
||||
|
||||
case "$BUILD_TYPE" in
|
||||
"development")
|
||||
echo "🛠️ Development build artifacts have been uploaded to OSS dev directory"
|
||||
@@ -453,11 +560,291 @@ jobs:
|
||||
"release")
|
||||
echo "🚀 Release build artifacts have been uploaded to OSS release directory"
|
||||
echo "✅ This build is ready for production use"
|
||||
echo "🏷️ GitHub Release will be created automatically by the release workflow"
|
||||
echo "🏷️ GitHub Release will be created in this workflow"
|
||||
;;
|
||||
"prerelease")
|
||||
echo "🧪 Prerelease build artifacts have been uploaded to OSS release directory"
|
||||
echo "⚠️ This is a prerelease build - use with caution"
|
||||
echo "🏷️ GitHub Release will be created automatically by the release workflow"
|
||||
echo "🏷️ GitHub Release will be created in this workflow"
|
||||
;;
|
||||
esac
|
||||
|
||||
echo ""
|
||||
echo "🐳 Docker Images:"
|
||||
if [[ "${{ github.event.inputs.build_docker }}" == "false" ]]; then
|
||||
echo "⏭️ Docker image build was skipped (binary only build)"
|
||||
elif [[ "$BUILD_STATUS" == "success" ]]; then
|
||||
echo "🔄 Docker images will be built and pushed automatically via workflow_run event"
|
||||
else
|
||||
echo "❌ Docker image build will be skipped due to build failure"
|
||||
fi
|
||||
|
||||
# Create GitHub Release (only for tag pushes)
|
||||
create-release:
|
||||
name: Create GitHub Release
|
||||
needs: [ build-check, build-rustfs ]
|
||||
if: startsWith(github.ref, 'refs/tags/') && needs.build-check.outputs.build_type != 'development'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
outputs:
|
||||
release_id: ${{ steps.create.outputs.release_id }}
|
||||
release_url: ${{ steps.create.outputs.release_url }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Create GitHub Release
|
||||
id: create
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
shell: bash
|
||||
run: |
|
||||
TAG="${{ needs.build-check.outputs.version }}"
|
||||
VERSION="${{ needs.build-check.outputs.version }}"
|
||||
IS_PRERELEASE="${{ needs.build-check.outputs.is_prerelease }}"
|
||||
BUILD_TYPE="${{ needs.build-check.outputs.build_type }}"
|
||||
|
||||
# Determine release type for title
|
||||
if [[ "$BUILD_TYPE" == "prerelease" ]]; then
|
||||
if [[ "$TAG" == *"alpha"* ]]; then
|
||||
RELEASE_TYPE="alpha"
|
||||
elif [[ "$TAG" == *"beta"* ]]; then
|
||||
RELEASE_TYPE="beta"
|
||||
elif [[ "$TAG" == *"rc"* ]]; then
|
||||
RELEASE_TYPE="rc"
|
||||
else
|
||||
RELEASE_TYPE="prerelease"
|
||||
fi
|
||||
else
|
||||
RELEASE_TYPE="release"
|
||||
fi
|
||||
|
||||
# Check if release already exists
|
||||
if gh release view "$TAG" >/dev/null 2>&1; then
|
||||
echo "Release $TAG already exists"
|
||||
RELEASE_ID=$(gh release view "$TAG" --json databaseId --jq '.databaseId')
|
||||
RELEASE_URL=$(gh release view "$TAG" --json url --jq '.url')
|
||||
else
|
||||
# Get release notes from tag message
|
||||
RELEASE_NOTES=$(git tag -l --format='%(contents)' "${TAG}")
|
||||
if [[ -z "$RELEASE_NOTES" || "$RELEASE_NOTES" =~ ^[[:space:]]*$ ]]; then
|
||||
if [[ "$IS_PRERELEASE" == "true" ]]; then
|
||||
RELEASE_NOTES="Pre-release ${VERSION} (${RELEASE_TYPE})"
|
||||
else
|
||||
RELEASE_NOTES="Release ${VERSION}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Create release title
|
||||
if [[ "$IS_PRERELEASE" == "true" ]]; then
|
||||
TITLE="RustFS $VERSION (${RELEASE_TYPE})"
|
||||
else
|
||||
TITLE="RustFS $VERSION"
|
||||
fi
|
||||
|
||||
# Create the release
|
||||
PRERELEASE_FLAG=""
|
||||
if [[ "$IS_PRERELEASE" == "true" ]]; then
|
||||
PRERELEASE_FLAG="--prerelease"
|
||||
fi
|
||||
|
||||
gh release create "$TAG" \
|
||||
--title "$TITLE" \
|
||||
--notes "$RELEASE_NOTES" \
|
||||
$PRERELEASE_FLAG \
|
||||
--draft
|
||||
|
||||
RELEASE_ID=$(gh release view "$TAG" --json databaseId --jq '.databaseId')
|
||||
RELEASE_URL=$(gh release view "$TAG" --json url --jq '.url')
|
||||
fi
|
||||
|
||||
echo "release_id=$RELEASE_ID" >> $GITHUB_OUTPUT
|
||||
echo "release_url=$RELEASE_URL" >> $GITHUB_OUTPUT
|
||||
echo "Created release: $RELEASE_URL"
|
||||
|
||||
# Prepare and upload release assets
|
||||
upload-release-assets:
|
||||
name: Upload Release Assets
|
||||
needs: [ build-check, build-rustfs, create-release ]
|
||||
if: startsWith(github.ref, 'refs/tags/') && needs.build-check.outputs.build_type != 'development'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
actions: read
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Download all build artifacts
|
||||
uses: actions/download-artifact@v5
|
||||
with:
|
||||
path: ./artifacts
|
||||
pattern: rustfs-*
|
||||
merge-multiple: true
|
||||
|
||||
- name: Prepare release assets
|
||||
id: prepare
|
||||
shell: bash
|
||||
run: |
|
||||
VERSION="${{ needs.build-check.outputs.version }}"
|
||||
TAG="${{ needs.build-check.outputs.version }}"
|
||||
|
||||
mkdir -p ./release-assets
|
||||
|
||||
# Copy and verify artifacts (including latest files created during build)
|
||||
ASSETS_COUNT=0
|
||||
for file in ./artifacts/*.zip; do
|
||||
if [[ -f "$file" ]]; then
|
||||
cp "$file" ./release-assets/
|
||||
ASSETS_COUNT=$((ASSETS_COUNT + 1))
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ $ASSETS_COUNT -eq 0 ]]; then
|
||||
echo "❌ No artifacts found!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd ./release-assets
|
||||
|
||||
# Generate checksums for all files (including latest versions)
|
||||
if ls *.zip >/dev/null 2>&1; then
|
||||
sha256sum *.zip > SHA256SUMS
|
||||
sha512sum *.zip > SHA512SUMS
|
||||
fi
|
||||
|
||||
# Create signature placeholder files
|
||||
for file in *.zip; do
|
||||
echo "# Signature for $file" > "${file}.asc"
|
||||
echo "# GPG signature will be added in future versions" >> "${file}.asc"
|
||||
done
|
||||
|
||||
echo "📦 Prepared assets:"
|
||||
ls -la
|
||||
|
||||
echo "🔢 Total asset count: $ASSETS_COUNT"
|
||||
|
||||
- name: Upload to GitHub Release
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
shell: bash
|
||||
run: |
|
||||
TAG="${{ needs.build-check.outputs.version }}"
|
||||
|
||||
cd ./release-assets
|
||||
|
||||
# Upload all files
|
||||
for file in *; do
|
||||
if [[ -f "$file" ]]; then
|
||||
echo "📤 Uploading $file..."
|
||||
gh release upload "$TAG" "$file" --clobber
|
||||
fi
|
||||
done
|
||||
|
||||
echo "✅ All assets uploaded successfully"
|
||||
|
||||
# Update latest.json for stable releases only
|
||||
update-latest-version:
|
||||
name: Update Latest Version
|
||||
needs: [ build-check, upload-release-assets ]
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Update latest.json
|
||||
env:
|
||||
OSS_ACCESS_KEY_ID: ${{ secrets.ALICLOUDOSS_KEY_ID }}
|
||||
OSS_ACCESS_KEY_SECRET: ${{ secrets.ALICLOUDOSS_KEY_SECRET }}
|
||||
OSS_REGION: cn-beijing
|
||||
OSS_ENDPOINT: https://oss-cn-beijing.aliyuncs.com
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ -z "$OSS_ACCESS_KEY_ID" ]]; then
|
||||
echo "⚠️ OSS credentials not available, skipping latest.json update"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
VERSION="${{ needs.build-check.outputs.version }}"
|
||||
TAG="${{ needs.build-check.outputs.version }}"
|
||||
|
||||
# Install ossutil
|
||||
OSSUTIL_VERSION="2.1.1"
|
||||
OSSUTIL_ZIP="ossutil-${OSSUTIL_VERSION}-linux-amd64.zip"
|
||||
OSSUTIL_DIR="ossutil-${OSSUTIL_VERSION}-linux-amd64"
|
||||
|
||||
curl -o "$OSSUTIL_ZIP" "https://gosspublic.alicdn.com/ossutil/v2/${OSSUTIL_VERSION}/${OSSUTIL_ZIP}"
|
||||
unzip "$OSSUTIL_ZIP"
|
||||
mv "${OSSUTIL_DIR}/ossutil" /usr/local/bin/
|
||||
rm -rf "$OSSUTIL_DIR" "$OSSUTIL_ZIP"
|
||||
chmod +x /usr/local/bin/ossutil
|
||||
|
||||
# Create latest.json
|
||||
cat > latest.json << EOF
|
||||
{
|
||||
"version": "${VERSION}",
|
||||
"tag": "${TAG}",
|
||||
"release_date": "$(date -u +%Y-%m-%dT%H:%M:%SZ)",
|
||||
"release_type": "stable",
|
||||
"download_url": "https://github.com/${{ github.repository }}/releases/tag/${TAG}"
|
||||
}
|
||||
EOF
|
||||
|
||||
# Upload to OSS
|
||||
ossutil cp latest.json oss://rustfs-version/latest.json --force
|
||||
|
||||
echo "✅ Updated latest.json for stable release $VERSION"
|
||||
|
||||
# Publish release (remove draft status)
|
||||
publish-release:
|
||||
name: Publish Release
|
||||
needs: [ build-check, create-release, upload-release-assets ]
|
||||
if: startsWith(github.ref, 'refs/tags/') && needs.build-check.outputs.build_type != 'development'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Update release notes and publish
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
shell: bash
|
||||
run: |
|
||||
TAG="${{ needs.build-check.outputs.version }}"
|
||||
VERSION="${{ needs.build-check.outputs.version }}"
|
||||
IS_PRERELEASE="${{ needs.build-check.outputs.is_prerelease }}"
|
||||
BUILD_TYPE="${{ needs.build-check.outputs.build_type }}"
|
||||
|
||||
# Determine release type
|
||||
if [[ "$BUILD_TYPE" == "prerelease" ]]; then
|
||||
if [[ "$TAG" == *"alpha"* ]]; then
|
||||
RELEASE_TYPE="alpha"
|
||||
elif [[ "$TAG" == *"beta"* ]]; then
|
||||
RELEASE_TYPE="beta"
|
||||
elif [[ "$TAG" == *"rc"* ]]; then
|
||||
RELEASE_TYPE="rc"
|
||||
else
|
||||
RELEASE_TYPE="prerelease"
|
||||
fi
|
||||
else
|
||||
RELEASE_TYPE="release"
|
||||
fi
|
||||
|
||||
# Get original release notes from tag
|
||||
ORIGINAL_NOTES=$(git tag -l --format='%(contents)' "${TAG}")
|
||||
if [[ -z "$ORIGINAL_NOTES" || "$ORIGINAL_NOTES" =~ ^[[:space:]]*$ ]]; then
|
||||
if [[ "$IS_PRERELEASE" == "true" ]]; then
|
||||
ORIGINAL_NOTES="Pre-release ${VERSION} (${RELEASE_TYPE})"
|
||||
else
|
||||
ORIGINAL_NOTES="Release ${VERSION}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Publish the release (remove draft status)
|
||||
gh release edit "$TAG" --draft=false
|
||||
|
||||
echo "🎉 Released $TAG successfully!"
|
||||
echo "📄 Release URL: ${{ needs.create-release.outputs.release_url }}"
|
||||
|
||||
21
.github/workflows/ci.yml
vendored
@@ -16,7 +16,7 @@ name: Continuous Integration
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
branches: [ main ]
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
- "**.txt"
|
||||
@@ -36,7 +36,7 @@ on:
|
||||
- ".github/workflows/audit.yml"
|
||||
- ".github/workflows/performance.yml"
|
||||
pull_request:
|
||||
branches: [main]
|
||||
branches: [ main ]
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
- "**.txt"
|
||||
@@ -59,6 +59,9 @@ on:
|
||||
- cron: "0 0 * * 0" # Weekly on Sunday at midnight UTC
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
RUST_BACKTRACE: 1
|
||||
@@ -83,6 +86,16 @@ jobs:
|
||||
# Never skip release events and tag pushes
|
||||
do_not_skip: '["workflow_dispatch", "schedule", "merge_group", "release", "push"]'
|
||||
|
||||
|
||||
typos:
|
||||
name: Typos
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- name: Typos check with custom config file
|
||||
uses: crate-ci/typos@master
|
||||
|
||||
test-and-lint:
|
||||
name: Test and Lint
|
||||
needs: skip-check
|
||||
@@ -91,7 +104,7 @@ jobs:
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Setup Rust environment
|
||||
uses: ./.github/actions/setup
|
||||
@@ -120,7 +133,7 @@ jobs:
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Setup Rust environment
|
||||
uses: ./.github/actions/setup
|
||||
|
||||
418
.github/workflows/docker.yml
vendored
@@ -12,42 +12,33 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Docker Images Workflow
|
||||
#
|
||||
# This workflow builds Docker images using pre-built binaries from the build workflow.
|
||||
#
|
||||
# Trigger Types:
|
||||
# 1. workflow_run: Automatically triggered when "Build and Release" workflow completes
|
||||
# 2. workflow_dispatch: Manual trigger for standalone Docker builds
|
||||
#
|
||||
# Key Features:
|
||||
# - Only triggers when Linux builds (x86_64 + aarch64) are successful
|
||||
# - Independent of macOS/Windows build status
|
||||
# - Uses workflow_run event for precise control
|
||||
# - Only builds Docker images for releases and prereleases (development builds are skipped)
|
||||
|
||||
name: Docker Images
|
||||
|
||||
# Permissions needed for workflow_run event and Docker registry access
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
on:
|
||||
push:
|
||||
tags: ["*.*.*"]
|
||||
branches: [main]
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
- "**.txt"
|
||||
- ".github/**"
|
||||
- "docs/**"
|
||||
- "deploy/**"
|
||||
- "scripts/dev_*.sh"
|
||||
- "LICENSE*"
|
||||
- "README*"
|
||||
- "**/*.png"
|
||||
- "**/*.jpg"
|
||||
- "**/*.svg"
|
||||
- ".gitignore"
|
||||
- ".dockerignore"
|
||||
pull_request:
|
||||
branches: [main]
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
- "**.txt"
|
||||
- ".github/**"
|
||||
- "docs/**"
|
||||
- "deploy/**"
|
||||
- "scripts/dev_*.sh"
|
||||
- "LICENSE*"
|
||||
- "README*"
|
||||
- "**/*.png"
|
||||
- "**/*.jpg"
|
||||
- "**/*.svg"
|
||||
- ".gitignore"
|
||||
- ".dockerignore"
|
||||
# Automatically triggered when build workflow completes
|
||||
workflow_run:
|
||||
workflows: [ "Build and Release" ]
|
||||
types: [ completed ]
|
||||
# Manual trigger with same parameters for consistency
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
push_images:
|
||||
@@ -56,9 +47,9 @@ on:
|
||||
default: true
|
||||
type: boolean
|
||||
version:
|
||||
description: "Version to build (latest, main-latest, dev-latest, or specific version like v1.0.0 or dev-abc123)"
|
||||
description: "Version to build (latest for stable release, or specific version like v1.0.0, v1.0.0-alpha1)"
|
||||
required: false
|
||||
default: "main-latest"
|
||||
default: "latest"
|
||||
type: string
|
||||
force_rebuild:
|
||||
description: "Force rebuild even if binary exists (useful for testing)"
|
||||
@@ -67,12 +58,18 @@ on:
|
||||
type: boolean
|
||||
|
||||
env:
|
||||
CONCLUSION: ${{ github.event.workflow_run.conclusion }}
|
||||
HEAD_BRANCH: ${{ github.event.workflow_run.head_branch }}
|
||||
HEAD_SHA: ${{ github.event.workflow_run.head_sha }}
|
||||
TRIGGERING_EVENT: ${{ github.event.workflow_run.event }}
|
||||
DOCKERHUB_USERNAME: rustfs
|
||||
CARGO_TERM_COLOR: always
|
||||
REGISTRY_DOCKERHUB: rustfs/rustfs
|
||||
REGISTRY_GHCR: ghcr.io/${{ github.repository }}
|
||||
DOCKER_PLATFORMS: linux/amd64,linux/arm64
|
||||
|
||||
jobs:
|
||||
# Docker build strategy check
|
||||
# Check if we should build Docker images
|
||||
build-check:
|
||||
name: Docker Build Check
|
||||
runs-on: ubuntu-latest
|
||||
@@ -86,9 +83,11 @@ jobs:
|
||||
create_latest: ${{ steps.check.outputs.create_latest }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
# For workflow_run events, checkout the specific commit that triggered the workflow
|
||||
ref: ${{ github.event.workflow_run.head_sha || github.sha }}
|
||||
|
||||
- name: Check build conditions
|
||||
id: check
|
||||
@@ -101,26 +100,103 @@ jobs:
|
||||
is_prerelease=false
|
||||
create_latest=false
|
||||
|
||||
# Get short SHA for all builds
|
||||
short_sha=$(git rev-parse --short HEAD)
|
||||
if [[ "${{ github.event_name }}" == "workflow_run" ]]; then
|
||||
# Triggered by build workflow completion
|
||||
echo "🔗 Triggered by build workflow completion"
|
||||
|
||||
# Always build on workflow_dispatch or when changes detected
|
||||
if [[ "${{ github.event_name }}" == "workflow_dispatch" ]] || \
|
||||
[[ "${{ github.event_name }}" == "push" ]] || \
|
||||
[[ "${{ github.event_name }}" == "pull_request" ]]; then
|
||||
should_build=true
|
||||
fi
|
||||
# Check if the triggering workflow was successful
|
||||
# If the workflow succeeded, it means ALL builds (including Linux x86_64 and aarch64) succeeded
|
||||
if [[ "$CONCLUSION" == "success" ]]; then
|
||||
echo "✅ Build workflow succeeded, all builds including Linux are successful"
|
||||
should_build=true
|
||||
should_push=true
|
||||
else
|
||||
echo "❌ Build workflow failed (conclusion: $CONCLUSION), skipping Docker build"
|
||||
should_build=false
|
||||
fi
|
||||
|
||||
# Determine build type and version
|
||||
if [[ "${{ github.event_name }}" == "workflow_dispatch" ]] && [[ -n "${{ github.event.inputs.version }}" ]]; then
|
||||
# Manual trigger with version input
|
||||
# Extract version info from commit message or use commit SHA
|
||||
# Use Git to generate consistent short SHA (ensures uniqueness like build.yml)
|
||||
short_sha=$(git rev-parse --short "$HEAD_SHA")
|
||||
|
||||
# Determine build type based on triggering workflow event and ref
|
||||
triggering_event="$TRIGGERING_EVENT"
|
||||
head_branch="$HEAD_BRANCH"
|
||||
|
||||
echo "🔍 Analyzing triggering workflow:"
|
||||
echo " 📋 Event: $triggering_event"
|
||||
echo " 🌿 Head branch: $head_branch"
|
||||
echo " 📎 Head SHA: $HEAD_SHA"
|
||||
|
||||
# Check if this was triggered by a tag push
|
||||
if [[ "$triggering_event" == "push" ]]; then
|
||||
# For tag pushes, head_branch will be like "refs/tags/v1.0.0" or just "v1.0.0"
|
||||
if [[ "$head_branch" == refs/tags/* ]]; then
|
||||
# Extract tag name from refs/tags/TAG_NAME
|
||||
tag_name="${head_branch#refs/tags/}"
|
||||
version="$tag_name"
|
||||
elif [[ "$head_branch" =~ ^v?[0-9]+\.[0-9]+\.[0-9]+ ]]; then
|
||||
# Direct tag name like "v1.0.0" or "1.0.0-alpha.1"
|
||||
version="$head_branch"
|
||||
elif [[ "$head_branch" == "main" ]]; then
|
||||
# Regular branch push to main
|
||||
build_type="development"
|
||||
version="dev-${short_sha}"
|
||||
should_build=false
|
||||
echo "⏭️ Skipping Docker build for development version (main branch push)"
|
||||
else
|
||||
# Other branch push
|
||||
build_type="development"
|
||||
version="dev-${short_sha}"
|
||||
should_build=false
|
||||
echo "⏭️ Skipping Docker build for development version (branch: $head_branch)"
|
||||
fi
|
||||
|
||||
# If we extracted a version (tag), determine release type
|
||||
if [[ -n "$version" ]] && [[ "$version" != "dev-${short_sha}" ]]; then
|
||||
# Remove 'v' prefix if present for consistent version format
|
||||
if [[ "$version" == v* ]]; then
|
||||
version="${version#v}"
|
||||
fi
|
||||
|
||||
if [[ "$version" == *"alpha"* ]] || [[ "$version" == *"beta"* ]] || [[ "$version" == *"rc"* ]]; then
|
||||
build_type="prerelease"
|
||||
is_prerelease=true
|
||||
echo "🧪 Building Docker image for prerelease: $version"
|
||||
else
|
||||
build_type="release"
|
||||
create_latest=true
|
||||
echo "🚀 Building Docker image for release: $version"
|
||||
fi
|
||||
fi
|
||||
else
|
||||
# Non-push events
|
||||
build_type="development"
|
||||
version="dev-${short_sha}"
|
||||
should_build=false
|
||||
echo "⏭️ Skipping Docker build for development version (event: $triggering_event)"
|
||||
fi
|
||||
|
||||
echo "🔄 Build triggered by workflow_run:"
|
||||
echo " 📋 Conclusion: $CONCLUSION"
|
||||
echo " 🌿 Branch: $HEAD_BRANCH"
|
||||
echo " 📎 SHA: $HEAD_SHA"
|
||||
echo " 🎯 Event: $TRIGGERING_EVENT"
|
||||
|
||||
elif [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then
|
||||
# Manual trigger
|
||||
input_version="${{ github.event.inputs.version }}"
|
||||
version="${input_version}"
|
||||
force_rebuild="${{ github.event.inputs.force_rebuild }}"
|
||||
should_push="${{ github.event.inputs.push_images }}"
|
||||
should_build=true
|
||||
|
||||
# Get short SHA
|
||||
short_sha=$(git rev-parse --short HEAD)
|
||||
|
||||
echo "🎯 Manual Docker build triggered:"
|
||||
echo " 📋 Requested version: $input_version"
|
||||
echo " 🔧 Force rebuild: $force_rebuild"
|
||||
echo " 🔧 Force rebuild: ${{ github.event.inputs.force_rebuild }}"
|
||||
echo " 🚀 Push images: $should_push"
|
||||
|
||||
case "$input_version" in
|
||||
"latest")
|
||||
@@ -128,68 +204,25 @@ jobs:
|
||||
create_latest=true
|
||||
echo "🚀 Building with latest stable release version"
|
||||
;;
|
||||
"main-latest")
|
||||
build_type="development"
|
||||
version="main-latest"
|
||||
echo "🛠️ Building with main branch latest development version"
|
||||
;;
|
||||
"dev-latest")
|
||||
build_type="development"
|
||||
version="dev-latest"
|
||||
echo "🛠️ Building with development latest version"
|
||||
;;
|
||||
v[0-9]*)
|
||||
build_type="release"
|
||||
create_latest=true
|
||||
echo "📦 Building with specific release version: $input_version"
|
||||
;;
|
||||
v*alpha*|v*beta*|v*rc*)
|
||||
# Prerelease versions (must match first, more specific)
|
||||
v*alpha*|v*beta*|v*rc*|*alpha*|*beta*|*rc*)
|
||||
build_type="prerelease"
|
||||
is_prerelease=true
|
||||
echo "🧪 Building with prerelease version: $input_version"
|
||||
;;
|
||||
dev-[a-f0-9]*)
|
||||
build_type="development"
|
||||
echo "🔧 Building with specific development version: $input_version"
|
||||
# Release versions (match after prereleases, more general)
|
||||
v[0-9]*|[0-9]*.*.*)
|
||||
build_type="release"
|
||||
create_latest=true
|
||||
echo "📦 Building with specific release version: $input_version"
|
||||
;;
|
||||
*)
|
||||
build_type="development"
|
||||
echo "🔧 Building with custom version: $input_version"
|
||||
echo "⚠️ Warning: Custom version format may not follow standard patterns"
|
||||
# Invalid version for Docker build
|
||||
should_build=false
|
||||
echo "❌ Invalid version for Docker build: $input_version"
|
||||
echo "⚠️ Only release versions (latest, v1.0.0, 1.0.0) and prereleases (v1.0.0-alpha1, 1.0.0-beta2) are supported"
|
||||
;;
|
||||
esac
|
||||
elif [[ "${{ startsWith(github.ref, 'refs/tags/') }}" == "true" ]]; then
|
||||
# Tag push - release or prerelease
|
||||
tag_name="${GITHUB_REF#refs/tags/}"
|
||||
version="${tag_name}"
|
||||
|
||||
# Check if this is a prerelease
|
||||
if [[ "$tag_name" == *"alpha"* ]] || [[ "$tag_name" == *"beta"* ]] || [[ "$tag_name" == *"rc"* ]]; then
|
||||
build_type="prerelease"
|
||||
is_prerelease=true
|
||||
echo "🚀 Docker prerelease build detected: $tag_name"
|
||||
else
|
||||
build_type="release"
|
||||
create_latest=true
|
||||
echo "📦 Docker release build detected: $tag_name"
|
||||
fi
|
||||
elif [[ "${{ github.ref }}" == "refs/heads/main" ]]; then
|
||||
# Main branch push - development build
|
||||
build_type="development"
|
||||
version="dev-${short_sha}"
|
||||
echo "🛠️ Docker development build detected"
|
||||
else
|
||||
# Other branches - development build
|
||||
build_type="development"
|
||||
version="dev-${short_sha}"
|
||||
echo "🔧 Docker development build detected"
|
||||
fi
|
||||
|
||||
# Push only on main branch, tags, or manual trigger
|
||||
if [[ "${{ github.ref }}" == "refs/heads/main" ]] || \
|
||||
[[ "${{ startsWith(github.ref, 'refs/tags/') }}" == "true" ]] || \
|
||||
[[ "${{ github.event.inputs.push_images }}" == "true" ]]; then
|
||||
should_push=true
|
||||
fi
|
||||
|
||||
echo "should_build=$should_build" >> $GITHUB_OUTPUT
|
||||
@@ -210,35 +243,24 @@ jobs:
|
||||
echo " - Create latest: $create_latest"
|
||||
|
||||
# Build multi-arch Docker images
|
||||
# Strategy: Build images using pre-built binaries from dl.rustfs.com
|
||||
# Supports both release and dev channel binaries based on build context
|
||||
# Only runs when should_build is true (which includes workflow success check)
|
||||
build-docker:
|
||||
name: Build Docker Images
|
||||
needs: build-check
|
||||
if: needs.build-check.outputs.should_build == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
variant:
|
||||
- name: production
|
||||
dockerfile: Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
#- name: source
|
||||
# dockerfile: Dockerfile.source
|
||||
# platforms: linux/amd64,linux/arm64
|
||||
#- name: dev
|
||||
# dockerfile: Dockerfile.source
|
||||
# platforms: linux/amd64,linux/arm64
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
username: ${{ env.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
scopes: repository:rustfs/rustfs:pull,push
|
||||
|
||||
# - name: Login to GitHub Container Registry
|
||||
# uses: docker/login-action@v3
|
||||
@@ -260,57 +282,54 @@ jobs:
|
||||
VERSION="${{ needs.build-check.outputs.version }}"
|
||||
SHORT_SHA="${{ needs.build-check.outputs.short_sha }}"
|
||||
CREATE_LATEST="${{ needs.build-check.outputs.create_latest }}"
|
||||
VARIANT="${{ matrix.variant.name }}"
|
||||
|
||||
# Convert version format for Dockerfile compatibility
|
||||
case "$VERSION" in
|
||||
"latest")
|
||||
# For stable latest, use RELEASE=latest + release CHANNEL
|
||||
DOCKER_RELEASE="latest"
|
||||
DOCKER_CHANNEL="release"
|
||||
;;
|
||||
v*)
|
||||
# For versioned releases (v1.0.0), remove 'v' prefix for Dockerfile
|
||||
DOCKER_RELEASE="${VERSION#v}"
|
||||
DOCKER_CHANNEL="release"
|
||||
;;
|
||||
*)
|
||||
# For other versions, pass as-is
|
||||
DOCKER_RELEASE="${VERSION}"
|
||||
DOCKER_CHANNEL="release"
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "docker_release=$DOCKER_RELEASE" >> $GITHUB_OUTPUT
|
||||
echo "docker_channel=$DOCKER_CHANNEL" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "🐳 Docker build parameters:"
|
||||
echo " - Original version: $VERSION"
|
||||
echo " - Docker RELEASE: $DOCKER_RELEASE"
|
||||
echo " - Docker CHANNEL: $DOCKER_CHANNEL"
|
||||
|
||||
# Generate tags based on build type
|
||||
TAGS=""
|
||||
# Only support release and prerelease builds (no development builds)
|
||||
TAGS="${{ env.REGISTRY_DOCKERHUB }}:${VERSION}"
|
||||
|
||||
if [[ "$BUILD_TYPE" == "development" ]]; then
|
||||
# Development build: dev-${short_sha}-${variant} and dev-${variant}
|
||||
TAGS="${{ env.REGISTRY_DOCKERHUB }}:dev-${SHORT_SHA}-${VARIANT}"
|
||||
|
||||
# Add rolling dev tag for each variant
|
||||
TAGS="$TAGS,${{ env.REGISTRY_DOCKERHUB }}:dev-${VARIANT}"
|
||||
|
||||
# Special handling for production variant
|
||||
if [[ "$VARIANT" == "production" ]]; then
|
||||
TAGS="$TAGS,${{ env.REGISTRY_DOCKERHUB }}:dev-${SHORT_SHA}"
|
||||
TAGS="$TAGS,${{ env.REGISTRY_DOCKERHUB }}:dev"
|
||||
fi
|
||||
else
|
||||
# Release/Prerelease build: ${version}-${variant}
|
||||
TAGS="${{ env.REGISTRY_DOCKERHUB }}:${VERSION}-${VARIANT}"
|
||||
|
||||
# Special handling for production variant - create main version tag
|
||||
if [[ "$VARIANT" == "production" ]]; then
|
||||
TAGS="$TAGS,${{ env.REGISTRY_DOCKERHUB }}:${VERSION}"
|
||||
# Add channel tags for prereleases and latest for stable
|
||||
if [[ "$CREATE_LATEST" == "true" ]]; then
|
||||
# Stable release
|
||||
TAGS="$TAGS,${{ env.REGISTRY_DOCKERHUB }}:latest"
|
||||
elif [[ "$BUILD_TYPE" == "prerelease" ]]; then
|
||||
# Prerelease channel tags (alpha, beta, rc)
|
||||
if [[ "$VERSION" == *"alpha"* ]]; then
|
||||
CHANNEL="alpha"
|
||||
elif [[ "$VERSION" == *"beta"* ]]; then
|
||||
CHANNEL="beta"
|
||||
elif [[ "$VERSION" == *"rc"* ]]; then
|
||||
CHANNEL="rc"
|
||||
fi
|
||||
|
||||
# Add channel tags for prereleases and latest for stable
|
||||
if [[ "$CREATE_LATEST" == "true" ]]; then
|
||||
# Stable release
|
||||
if [[ "$VARIANT" == "production" ]]; then
|
||||
TAGS="$TAGS,${{ env.REGISTRY_DOCKERHUB }}:latest"
|
||||
else
|
||||
TAGS="$TAGS,${{ env.REGISTRY_DOCKERHUB }}:latest-${VARIANT}"
|
||||
fi
|
||||
elif [[ "$BUILD_TYPE" == "prerelease" ]]; then
|
||||
# Prerelease channel tags (alpha, beta, rc)
|
||||
if [[ "$VERSION" == *"alpha"* ]]; then
|
||||
CHANNEL="alpha"
|
||||
elif [[ "$VERSION" == *"beta"* ]]; then
|
||||
CHANNEL="beta"
|
||||
elif [[ "$VERSION" == *"rc"* ]]; then
|
||||
CHANNEL="rc"
|
||||
fi
|
||||
|
||||
if [[ -n "$CHANNEL" ]]; then
|
||||
if [[ "$VARIANT" == "production" ]]; then
|
||||
TAGS="$TAGS,${{ env.REGISTRY_DOCKERHUB }}:${CHANNEL}"
|
||||
else
|
||||
TAGS="$TAGS,${{ env.REGISTRY_DOCKERHUB }}:${CHANNEL}-${VARIANT}"
|
||||
fi
|
||||
fi
|
||||
if [[ -n "$CHANNEL" ]]; then
|
||||
TAGS="$TAGS,${{ env.REGISTRY_DOCKERHUB }}:${CHANNEL}"
|
||||
fi
|
||||
fi
|
||||
|
||||
@@ -324,7 +343,6 @@ jobs:
|
||||
LABELS="$LABELS,org.opencontainers.image.revision=${{ github.sha }}"
|
||||
LABELS="$LABELS,org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}"
|
||||
LABELS="$LABELS,org.opencontainers.image.created=$(date -u +'%Y-%m-%dT%H:%M:%SZ')"
|
||||
LABELS="$LABELS,org.opencontainers.image.variant=$VARIANT"
|
||||
LABELS="$LABELS,org.opencontainers.image.build-type=$BUILD_TYPE"
|
||||
|
||||
echo "labels=$LABELS" >> $GITHUB_OUTPUT
|
||||
@@ -338,20 +356,22 @@ jobs:
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: ${{ matrix.variant.dockerfile }}
|
||||
platforms: ${{ matrix.variant.platforms }}
|
||||
file: Dockerfile
|
||||
platforms: ${{ env.DOCKER_PLATFORMS }}
|
||||
push: ${{ needs.build-check.outputs.should_push == 'true' }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
cache-from: |
|
||||
type=gha,scope=docker-${{ matrix.variant.name }}
|
||||
type=gha,scope=docker-binary
|
||||
cache-to: |
|
||||
type=gha,mode=max,scope=docker-${{ matrix.variant.name }}
|
||||
type=gha,mode=max,scope=docker-binary
|
||||
build-args: |
|
||||
BUILDTIME=$(date -u +'%Y-%m-%dT%H:%M:%SZ')
|
||||
VERSION=${{ needs.build-check.outputs.version }}
|
||||
BUILD_TYPE=${{ needs.build-check.outputs.build_type }}
|
||||
REVISION=${{ github.sha }}
|
||||
RELEASE=${{ steps.meta.outputs.docker_release }}
|
||||
CHANNEL=${{ steps.meta.outputs.docker_channel }}
|
||||
BUILDKIT_INLINE_CACHE=1
|
||||
# Enable advanced BuildKit features for better performance
|
||||
provenance: false
|
||||
@@ -360,43 +380,13 @@ jobs:
|
||||
no-cache: false
|
||||
pull: true
|
||||
|
||||
# Create manifest for main production image (only for stable releases)
|
||||
create-manifest:
|
||||
name: Create Manifest
|
||||
needs: [build-check, build-docker]
|
||||
if: needs.build-check.outputs.should_push == 'true' && needs.build-check.outputs.create_latest == 'true' && needs.build-check.outputs.build_type == 'release'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
# - name: Login to GitHub Container Registry
|
||||
# uses: docker/login-action@v3
|
||||
# with:
|
||||
# registry: ghcr.io
|
||||
# username: ${{ github.actor }}
|
||||
# password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Create and push manifest
|
||||
run: |
|
||||
VERSION="${{ needs.build-check.outputs.version }}"
|
||||
|
||||
echo "🐳 Creating manifest for stable release: $VERSION"
|
||||
|
||||
# Create main image tag (without variant suffix) for stable releases only
|
||||
# Note: The "production" variant already creates the main tags without suffix
|
||||
echo "Manifest creation is handled by the production variant build step"
|
||||
echo "Main tags ${VERSION} and latest are created directly by the production variant"
|
||||
|
||||
echo "✅ Manifest created successfully for stable release"
|
||||
# Note: Manifest creation is no longer needed as we only build one variant
|
||||
# Multi-arch manifests are automatically created by docker/build-push-action
|
||||
|
||||
# Docker build summary
|
||||
docker-summary:
|
||||
name: Docker Build Summary
|
||||
needs: [build-check, build-docker]
|
||||
needs: [ build-check, build-docker ]
|
||||
if: always() && needs.build-check.outputs.should_build == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
@@ -409,23 +399,23 @@ jobs:
|
||||
echo "🐳 Docker build completed successfully!"
|
||||
echo "📦 Build type: $BUILD_TYPE"
|
||||
echo "🔢 Version: $VERSION"
|
||||
echo "🚀 Strategy: Images using pre-built binaries (release channel only)"
|
||||
echo ""
|
||||
|
||||
case "$BUILD_TYPE" in
|
||||
"development")
|
||||
echo "🛠️ Development Docker images have been built with dev-${VERSION} tags"
|
||||
echo "⚠️ These are development images - not suitable for production use"
|
||||
;;
|
||||
"release")
|
||||
echo "🚀 Release Docker images have been built with v${VERSION} tags"
|
||||
echo "✅ These images are ready for production use"
|
||||
echo "🚀 Release Docker image has been built with ${VERSION} tags"
|
||||
echo "✅ This image is ready for production use"
|
||||
if [[ "$CREATE_LATEST" == "true" ]]; then
|
||||
echo "🏷️ Latest tags have been created for stable release"
|
||||
echo "🏷️ Latest tag has been created for stable release"
|
||||
fi
|
||||
;;
|
||||
"prerelease")
|
||||
echo "🧪 Prerelease Docker images have been built with v${VERSION} tags"
|
||||
echo "⚠️ These are prerelease images - use with caution"
|
||||
echo "🚫 Latest tags NOT created for prerelease"
|
||||
echo "🧪 Prerelease Docker image has been built with ${VERSION} tags"
|
||||
echo "⚠️ This is a prerelease image - use with caution"
|
||||
echo "🚫 Latest tag NOT created for prerelease"
|
||||
;;
|
||||
*)
|
||||
echo "❌ Unexpected build type: $BUILD_TYPE"
|
||||
;;
|
||||
esac
|
||||
|
||||
8
.github/workflows/issue-translator.yml
vendored
@@ -15,9 +15,13 @@
|
||||
name: "issue-translator"
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
types: [ created ]
|
||||
issues:
|
||||
types: [opened]
|
||||
types: [ opened ]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
build:
|
||||
|
||||
9
.github/workflows/performance.yml
vendored
@@ -16,7 +16,7 @@ name: Performance Testing
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
branches: [ main ]
|
||||
paths:
|
||||
- "**/*.rs"
|
||||
- "**/Cargo.toml"
|
||||
@@ -30,6 +30,9 @@ on:
|
||||
default: "120"
|
||||
type: string
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
RUST_BACKTRACE: 1
|
||||
@@ -41,7 +44,7 @@ jobs:
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Setup Rust environment
|
||||
uses: ./.github/actions/setup
|
||||
@@ -116,7 +119,7 @@ jobs:
|
||||
timeout-minutes: 45
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Setup Rust environment
|
||||
uses: ./.github/actions/setup
|
||||
|
||||
78
.github/workflows/release-notes-template.md
vendored
@@ -1,78 +0,0 @@
|
||||
## RustFS ${VERSION_CLEAN}
|
||||
|
||||
${ORIGINAL_NOTES}
|
||||
|
||||
---
|
||||
|
||||
### 🚀 Quick Download
|
||||
|
||||
**Linux (Static Binaries - No Dependencies):**
|
||||
|
||||
```bash
|
||||
# x86_64 (Intel/AMD)
|
||||
curl -LO https://github.com/rustfs/rustfs/releases/download/${VERSION}/rustfs-x86_64-unknown-linux-musl.zip
|
||||
unzip rustfs-x86_64-unknown-linux-musl.zip
|
||||
sudo mv rustfs /usr/local/bin/
|
||||
|
||||
# ARM64 (Graviton, Apple Silicon VMs)
|
||||
curl -LO https://github.com/rustfs/rustfs/releases/download/${VERSION}/rustfs-aarch64-unknown-linux-musl.zip
|
||||
unzip rustfs-aarch64-unknown-linux-musl.zip
|
||||
sudo mv rustfs /usr/local/bin/
|
||||
```
|
||||
|
||||
**macOS:**
|
||||
|
||||
```bash
|
||||
# Apple Silicon (M1/M2/M3)
|
||||
curl -LO https://github.com/rustfs/rustfs/releases/download/${VERSION}/rustfs-aarch64-apple-darwin.zip
|
||||
unzip rustfs-aarch64-apple-darwin.zip
|
||||
sudo mv rustfs /usr/local/bin/
|
||||
|
||||
# Intel
|
||||
curl -LO https://github.com/rustfs/rustfs/releases/download/${VERSION}/rustfs-x86_64-apple-darwin.zip
|
||||
unzip rustfs-x86_64-apple-darwin.zip
|
||||
sudo mv rustfs /usr/local/bin/
|
||||
```
|
||||
|
||||
### 📁 Available Downloads
|
||||
|
||||
| Platform | Architecture | File | Description |
|
||||
|----------|-------------|------|-------------|
|
||||
| Linux | x86_64 | `rustfs-x86_64-unknown-linux-musl.zip` | Static binary, no dependencies |
|
||||
| Linux | ARM64 | `rustfs-aarch64-unknown-linux-musl.zip` | Static binary, no dependencies |
|
||||
| macOS | Apple Silicon | `rustfs-aarch64-apple-darwin.zip` | Native binary, ZIP archive |
|
||||
| macOS | Intel | `rustfs-x86_64-apple-darwin.zip` | Native binary, ZIP archive |
|
||||
|
||||
### 🔐 Verification
|
||||
|
||||
Download checksums and verify your download:
|
||||
|
||||
```bash
|
||||
# Download checksums
|
||||
curl -LO https://github.com/rustfs/rustfs/releases/download/${VERSION}/SHA256SUMS
|
||||
|
||||
# Verify (Linux)
|
||||
sha256sum -c SHA256SUMS --ignore-missing
|
||||
|
||||
# Verify (macOS)
|
||||
shasum -a 256 -c SHA256SUMS --ignore-missing
|
||||
```
|
||||
|
||||
### 🛠️ System Requirements
|
||||
|
||||
- **Linux**: Any distribution with glibc 2.17+ (CentOS 7+, Ubuntu 16.04+)
|
||||
- **macOS**: 10.15+ (Catalina or later)
|
||||
- **Windows**: Windows 10 version 1809 or later
|
||||
|
||||
### 📚 Documentation
|
||||
|
||||
- [Installation Guide](https://github.com/rustfs/rustfs#installation)
|
||||
- [Quick Start](https://github.com/rustfs/rustfs#quick-start)
|
||||
- [Configuration](https://github.com/rustfs/rustfs/blob/main/docs/)
|
||||
- [API Documentation](https://docs.rs/rustfs)
|
||||
|
||||
### 🆘 Support
|
||||
|
||||
- 🐛 [Report Issues](https://github.com/rustfs/rustfs/issues)
|
||||
- 💬 [Community Discussions](https://github.com/rustfs/rustfs/discussions)
|
||||
- 📖 [Documentation](https://github.com/rustfs/rustfs/tree/main/docs)
|
||||
353
.github/workflows/release.yml
vendored
@@ -1,353 +0,0 @@
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name: Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags: ["*.*.*"]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: "Tag to create release for"
|
||||
required: true
|
||||
type: string
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
# Determine release type
|
||||
release-check:
|
||||
name: Release Type Check
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
tag: ${{ steps.check.outputs.tag }}
|
||||
version: ${{ steps.check.outputs.version }}
|
||||
is_prerelease: ${{ steps.check.outputs.is_prerelease }}
|
||||
release_type: ${{ steps.check.outputs.release_type }}
|
||||
steps:
|
||||
- name: Determine release type
|
||||
id: check
|
||||
run: |
|
||||
if [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then
|
||||
TAG="${{ github.event.inputs.tag }}"
|
||||
else
|
||||
TAG="${GITHUB_REF#refs/tags/}"
|
||||
fi
|
||||
|
||||
VERSION="${TAG}"
|
||||
|
||||
# Check if this is a prerelease
|
||||
IS_PRERELEASE=false
|
||||
RELEASE_TYPE="release"
|
||||
|
||||
if [[ "$TAG" == *"alpha"* ]] || [[ "$TAG" == *"beta"* ]] || [[ "$TAG" == *"rc"* ]]; then
|
||||
IS_PRERELEASE=true
|
||||
if [[ "$TAG" == *"alpha"* ]]; then
|
||||
RELEASE_TYPE="alpha"
|
||||
elif [[ "$TAG" == *"beta"* ]]; then
|
||||
RELEASE_TYPE="beta"
|
||||
elif [[ "$TAG" == *"rc"* ]]; then
|
||||
RELEASE_TYPE="rc"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "tag=$TAG" >> $GITHUB_OUTPUT
|
||||
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
||||
echo "is_prerelease=$IS_PRERELEASE" >> $GITHUB_OUTPUT
|
||||
echo "release_type=$RELEASE_TYPE" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "📦 Release Type: $RELEASE_TYPE"
|
||||
echo "🏷️ Tag: $TAG"
|
||||
echo "🔢 Version: $VERSION"
|
||||
echo "🚀 Is Prerelease: $IS_PRERELEASE"
|
||||
|
||||
# Create GitHub Release
|
||||
create-release:
|
||||
name: Create GitHub Release
|
||||
needs: release-check
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
outputs:
|
||||
release_id: ${{ steps.create.outputs.release_id }}
|
||||
release_url: ${{ steps.create.outputs.release_url }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Create GitHub Release
|
||||
id: create
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
TAG="${{ needs.release-check.outputs.tag }}"
|
||||
VERSION="${{ needs.release-check.outputs.version }}"
|
||||
IS_PRERELEASE="${{ needs.release-check.outputs.is_prerelease }}"
|
||||
RELEASE_TYPE="${{ needs.release-check.outputs.release_type }}"
|
||||
|
||||
# Check if release already exists
|
||||
if gh release view "$TAG" >/dev/null 2>&1; then
|
||||
echo "Release $TAG already exists"
|
||||
RELEASE_ID=$(gh release view "$TAG" --json databaseId --jq '.databaseId')
|
||||
RELEASE_URL=$(gh release view "$TAG" --json url --jq '.url')
|
||||
else
|
||||
# Get release notes from tag message
|
||||
RELEASE_NOTES=$(git tag -l --format='%(contents)' "${TAG}")
|
||||
if [[ -z "$RELEASE_NOTES" || "$RELEASE_NOTES" =~ ^[[:space:]]*$ ]]; then
|
||||
if [[ "$IS_PRERELEASE" == "true" ]]; then
|
||||
RELEASE_NOTES="Pre-release ${VERSION} (${RELEASE_TYPE})"
|
||||
else
|
||||
RELEASE_NOTES="Release ${VERSION}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Create release title
|
||||
if [[ "$IS_PRERELEASE" == "true" ]]; then
|
||||
TITLE="RustFS $VERSION (${RELEASE_TYPE})"
|
||||
else
|
||||
TITLE="RustFS $VERSION"
|
||||
fi
|
||||
|
||||
# Create the release
|
||||
PRERELEASE_FLAG=""
|
||||
if [[ "$IS_PRERELEASE" == "true" ]]; then
|
||||
PRERELEASE_FLAG="--prerelease"
|
||||
fi
|
||||
|
||||
gh release create "$TAG" \
|
||||
--title "$TITLE" \
|
||||
--notes "$RELEASE_NOTES" \
|
||||
$PRERELEASE_FLAG \
|
||||
--draft
|
||||
|
||||
RELEASE_ID=$(gh release view "$TAG" --json databaseId --jq '.databaseId')
|
||||
RELEASE_URL=$(gh release view "$TAG" --json url --jq '.url')
|
||||
fi
|
||||
|
||||
echo "release_id=$RELEASE_ID" >> $GITHUB_OUTPUT
|
||||
echo "release_url=$RELEASE_URL" >> $GITHUB_OUTPUT
|
||||
echo "Created release: $RELEASE_URL"
|
||||
|
||||
# Wait for build artifacts from build.yml
|
||||
wait-for-artifacts:
|
||||
name: Wait for Build Artifacts
|
||||
needs: release-check
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Wait for build workflow
|
||||
uses: lewagon/wait-on-check-action@v1.3.1
|
||||
with:
|
||||
ref: ${{ needs.release-check.outputs.tag }}
|
||||
check-name: "Build RustFS"
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
wait-interval: 30
|
||||
allowed-conclusions: success
|
||||
|
||||
# Download and prepare release assets
|
||||
prepare-assets:
|
||||
name: Prepare Release Assets
|
||||
needs: [release-check, wait-for-artifacts]
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
assets_prepared: ${{ steps.prepare.outputs.assets_prepared }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Download artifacts from build workflow
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: ./artifacts
|
||||
pattern: rustfs-*
|
||||
merge-multiple: true
|
||||
|
||||
- name: Prepare release assets
|
||||
id: prepare
|
||||
run: |
|
||||
VERSION="${{ needs.release-check.outputs.version }}"
|
||||
TAG="${{ needs.release-check.outputs.tag }}"
|
||||
|
||||
mkdir -p ./release-assets
|
||||
|
||||
# Copy and verify artifacts
|
||||
ASSETS_COUNT=0
|
||||
for file in ./artifacts/rustfs-*.zip; do
|
||||
if [[ -f "$file" ]]; then
|
||||
cp "$file" ./release-assets/
|
||||
ASSETS_COUNT=$((ASSETS_COUNT + 1))
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ $ASSETS_COUNT -eq 0 ]]; then
|
||||
echo "❌ No artifacts found!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd ./release-assets
|
||||
|
||||
# Generate checksums
|
||||
if ls *.zip >/dev/null 2>&1; then
|
||||
sha256sum *.zip > SHA256SUMS
|
||||
sha512sum *.zip > SHA512SUMS
|
||||
fi
|
||||
|
||||
# TODO: Add GPG signing for signatures
|
||||
# For now, create placeholder signature files
|
||||
for file in *.zip; do
|
||||
echo "# Signature for $file" > "${file}.asc"
|
||||
echo "# GPG signature will be added in future versions" >> "${file}.asc"
|
||||
done
|
||||
|
||||
echo "assets_prepared=true" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "📦 Prepared assets:"
|
||||
ls -la
|
||||
|
||||
echo "🔢 Asset count: $ASSETS_COUNT"
|
||||
|
||||
- name: Upload prepared assets
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: release-assets-${{ needs.release-check.outputs.tag }}
|
||||
path: ./release-assets/
|
||||
retention-days: 30
|
||||
|
||||
# Upload assets to GitHub Release
|
||||
upload-assets:
|
||||
name: Upload Release Assets
|
||||
needs: [release-check, create-release, prepare-assets]
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Download prepared assets
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: release-assets-${{ needs.release-check.outputs.tag }}
|
||||
path: ./release-assets
|
||||
|
||||
- name: Upload to GitHub Release
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
TAG="${{ needs.release-check.outputs.tag }}"
|
||||
|
||||
cd ./release-assets
|
||||
|
||||
# Upload all files
|
||||
for file in *; do
|
||||
if [[ -f "$file" ]]; then
|
||||
echo "📤 Uploading $file..."
|
||||
gh release upload "$TAG" "$file" --clobber
|
||||
fi
|
||||
done
|
||||
|
||||
echo "✅ All assets uploaded successfully"
|
||||
|
||||
# Update latest.json for stable releases only
|
||||
update-latest:
|
||||
name: Update Latest Version
|
||||
needs: [release-check, upload-assets]
|
||||
if: needs.release-check.outputs.is_prerelease == 'false'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Update latest.json
|
||||
env:
|
||||
OSS_ACCESS_KEY_ID: ${{ secrets.ALICLOUDOSS_KEY_ID }}
|
||||
OSS_ACCESS_KEY_SECRET: ${{ secrets.ALICLOUDOSS_KEY_SECRET }}
|
||||
run: |
|
||||
if [[ -z "$OSS_ACCESS_KEY_ID" ]]; then
|
||||
echo "⚠️ OSS credentials not available, skipping latest.json update"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
VERSION="${{ needs.release-check.outputs.version }}"
|
||||
TAG="${{ needs.release-check.outputs.tag }}"
|
||||
|
||||
# Install ossutil
|
||||
OSSUTIL_VERSION="2.1.1"
|
||||
OSSUTIL_ZIP="ossutil-${OSSUTIL_VERSION}-linux-amd64.zip"
|
||||
OSSUTIL_DIR="ossutil-${OSSUTIL_VERSION}-linux-amd64"
|
||||
|
||||
curl -o "$OSSUTIL_ZIP" "https://gosspublic.alicdn.com/ossutil/v2/${OSSUTIL_VERSION}/${OSSUTIL_ZIP}"
|
||||
unzip "$OSSUTIL_ZIP"
|
||||
chmod +x "${OSSUTIL_DIR}/ossutil"
|
||||
|
||||
# Create latest.json
|
||||
cat > latest.json << EOF
|
||||
{
|
||||
"version": "${VERSION}",
|
||||
"tag": "${TAG}",
|
||||
"release_date": "$(date -u +%Y-%m-%dT%H:%M:%SZ)",
|
||||
"release_type": "stable",
|
||||
"download_url": "https://github.com/${{ github.repository }}/releases/tag/${TAG}"
|
||||
}
|
||||
EOF
|
||||
|
||||
# Upload to OSS
|
||||
./${OSSUTIL_DIR}/ossutil cp latest.json oss://rustfs-version/latest.json --force
|
||||
|
||||
echo "✅ Updated latest.json for stable release $VERSION"
|
||||
|
||||
# Publish release (remove draft status)
|
||||
publish-release:
|
||||
name: Publish Release
|
||||
needs: [release-check, create-release, upload-assets]
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Update release notes and publish
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
TAG="${{ needs.release-check.outputs.tag }}"
|
||||
VERSION="${{ needs.release-check.outputs.version }}"
|
||||
IS_PRERELEASE="${{ needs.release-check.outputs.is_prerelease }}"
|
||||
RELEASE_TYPE="${{ needs.release-check.outputs.release_type }}"
|
||||
|
||||
# Get original release notes from tag
|
||||
ORIGINAL_NOTES=$(git tag -l --format='%(contents)' "${TAG}")
|
||||
if [[ -z "$ORIGINAL_NOTES" || "$ORIGINAL_NOTES" =~ ^[[:space:]]*$ ]]; then
|
||||
if [[ "$IS_PRERELEASE" == "true" ]]; then
|
||||
ORIGINAL_NOTES="Pre-release ${VERSION} (${RELEASE_TYPE})"
|
||||
else
|
||||
ORIGINAL_NOTES="Release ${VERSION}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Use release notes template if available
|
||||
if [[ -f ".github/workflows/release-notes-template.md" ]]; then
|
||||
# Substitute variables in template
|
||||
sed -e "s/\${VERSION}/$TAG/g" \
|
||||
-e "s/\${VERSION_CLEAN}/$VERSION/g" \
|
||||
-e "s/\${ORIGINAL_NOTES}/$(echo "$ORIGINAL_NOTES" | sed 's/[[\.*^$()+?{|]/\\&/g')/g" \
|
||||
.github/workflows/release-notes-template.md > enhanced_notes.md
|
||||
|
||||
# Update release notes
|
||||
gh release edit "$TAG" --notes-file enhanced_notes.md
|
||||
fi
|
||||
|
||||
# Publish the release (remove draft status)
|
||||
gh release edit "$TAG" --draft=false
|
||||
|
||||
echo "🎉 Released $TAG successfully!"
|
||||
echo "📄 Release URL: ${{ needs.create-release.outputs.release_url }}"
|
||||
1
.gitignore
vendored
@@ -20,3 +20,4 @@ profile.json
|
||||
.docker/openobserve-otel/data
|
||||
*.zst
|
||||
.secrets
|
||||
*.go
|
||||
702
.rules.md
Normal file
@@ -0,0 +1,702 @@
|
||||
# RustFS Project AI Coding Rules
|
||||
|
||||
## 🚨🚨🚨 CRITICAL DEVELOPMENT RULES - ZERO TOLERANCE 🚨🚨🚨
|
||||
|
||||
### ⛔️ ABSOLUTE PROHIBITION: NEVER COMMIT DIRECTLY TO MASTER/MAIN BRANCH ⛔️
|
||||
|
||||
**🔥 THIS IS THE MOST CRITICAL RULE - VIOLATION WILL RESULT IN IMMEDIATE REVERSAL 🔥**
|
||||
|
||||
- **🚫 ZERO DIRECT COMMITS TO MAIN/MASTER BRANCH - ABSOLUTELY FORBIDDEN**
|
||||
- **🚫 ANY DIRECT COMMIT TO MAIN BRANCH MUST BE IMMEDIATELY REVERTED**
|
||||
- **🚫 NO EXCEPTIONS FOR HOTFIXES, EMERGENCIES, OR URGENT CHANGES**
|
||||
- **🚫 NO EXCEPTIONS FOR SMALL CHANGES, TYPOS, OR DOCUMENTATION UPDATES**
|
||||
- **🚫 NO EXCEPTIONS FOR ANYONE - MAINTAINERS, CONTRIBUTORS, OR ADMINS**
|
||||
|
||||
### 📋 MANDATORY WORKFLOW - STRICTLY ENFORCED
|
||||
|
||||
**EVERY SINGLE CHANGE MUST FOLLOW THIS WORKFLOW:**
|
||||
|
||||
1. **Check current branch**: `git branch` (MUST NOT be on main/master)
|
||||
2. **Switch to main**: `git checkout main`
|
||||
3. **Pull latest**: `git pull origin main`
|
||||
4. **Create feature branch**: `git checkout -b feat/your-feature-name`
|
||||
5. **Make changes ONLY on feature branch**
|
||||
6. **Test thoroughly before committing**
|
||||
7. **Commit and push to feature branch**: `git push origin feat/your-feature-name`
|
||||
8. **Create Pull Request**: Use `gh pr create` (MANDATORY)
|
||||
9. **Wait for PR approval**: NO self-merging allowed
|
||||
10. **Merge through GitHub interface**: ONLY after approval
|
||||
|
||||
### 🔒 ENFORCEMENT MECHANISMS
|
||||
|
||||
- **Branch protection rules**: Main branch is protected
|
||||
- **Pre-commit hooks**: Will block direct commits to main
|
||||
- **CI/CD checks**: All PRs must pass before merging
|
||||
- **Code review requirement**: At least one approval needed
|
||||
- **Automated reversal**: Direct commits to main will be automatically reverted
|
||||
|
||||
## 🎯 Core AI Development Principles
|
||||
|
||||
### Five Execution Steps
|
||||
|
||||
#### 1. Task Analysis and Planning
|
||||
- **Clear Objectives**: Deeply understand task requirements and expected results before starting coding
|
||||
- **Plan Development**: List specific files, components, and functions that need modification, explaining the reasons for changes
|
||||
- **Risk Assessment**: Evaluate the impact of changes on existing functionality, develop rollback plans
|
||||
|
||||
#### 2. Precise Code Location
|
||||
- **File Identification**: Determine specific files and line numbers that need modification
|
||||
- **Impact Analysis**: Avoid modifying irrelevant files, clearly state the reason for each file modification
|
||||
- **Minimization Principle**: Unless explicitly required by the task, do not create new abstraction layers or refactor existing code
|
||||
|
||||
#### 3. Minimal Code Changes
|
||||
- **Focus on Core**: Only write code directly required by the task
|
||||
- **Avoid Redundancy**: Do not add unnecessary logs, comments, tests, or error handling
|
||||
- **Isolation**: Ensure new code does not interfere with existing functionality, maintain code independence
|
||||
|
||||
#### 4. Strict Code Review
|
||||
- **Correctness Check**: Verify the correctness and completeness of code logic
|
||||
- **Style Consistency**: Ensure code conforms to established project coding style
|
||||
- **Side Effect Assessment**: Evaluate the impact of changes on downstream systems
|
||||
|
||||
#### 5. Clear Delivery Documentation
|
||||
- **Change Summary**: Detailed explanation of all modifications and reasons
|
||||
- **File List**: List all modified files and their specific changes
|
||||
- **Risk Statement**: Mark any assumptions or potential risk points
|
||||
|
||||
### Core Principles
|
||||
- **🎯 Precise Execution**: Strictly follow task requirements, no arbitrary innovation
|
||||
- **⚡ Efficient Development**: Avoid over-design, only do necessary work
|
||||
- **🛡️ Safe and Reliable**: Always follow development processes, ensure code quality and system stability
|
||||
- **🔒 Cautious Modification**: Only modify when clearly knowing what needs to be changed and having confidence
|
||||
|
||||
### Additional AI Behavior Rules
|
||||
|
||||
1. **Use English for all code comments and documentation** - All comments, variable names, function names, documentation, and user-facing text in code should be in English
|
||||
2. **Clean up temporary scripts after use** - Any temporary scripts, test files, or helper files created during AI work should be removed after task completion
|
||||
3. **Only make confident modifications** - Do not make speculative changes or "convenient" modifications outside the task scope. If uncertain about a change, ask for clarification rather than guessing
|
||||
|
||||
## Project Overview
|
||||
|
||||
RustFS is a high-performance distributed object storage system written in Rust, compatible with S3 API. The project adopts a modular architecture, supporting erasure coding storage, multi-tenant management, observability, and other enterprise-level features.
|
||||
|
||||
## Core Architecture Principles
|
||||
|
||||
### 1. Modular Design
|
||||
|
||||
- Project uses Cargo workspace structure, containing multiple independent crates
|
||||
- Core modules: `rustfs` (main service), `ecstore` (erasure coding storage), `common` (shared components)
|
||||
- Functional modules: `iam` (identity management), `madmin` (management interface), `crypto` (encryption), etc.
|
||||
- Tool modules: `cli` (command line tool), `crates/*` (utility libraries)
|
||||
|
||||
### 2. Asynchronous Programming Pattern
|
||||
|
||||
- Comprehensive use of `tokio` async runtime
|
||||
- Prioritize `async/await` syntax
|
||||
- Use `async-trait` for async methods in traits
|
||||
- Avoid blocking operations, use `spawn_blocking` when necessary
|
||||
|
||||
### 3. Error Handling Strategy
|
||||
|
||||
- **Use modular, type-safe error handling with `thiserror`**
|
||||
- Each module should define its own error type using `thiserror::Error` derive macro
|
||||
- Support error chains and context information through `#[from]` and `#[source]` attributes
|
||||
- Use `Result<T>` type aliases for consistency within each module
|
||||
- Error conversion between modules should use explicit `From` implementations
|
||||
- Follow the pattern: `pub type Result<T> = core::result::Result<T, Error>`
|
||||
- Use `#[error("description")]` attributes for clear error messages
|
||||
- Support error downcasting when needed through `other()` helper methods
|
||||
- Implement `Clone` for errors when required by the domain logic
|
||||
|
||||
## Code Style Guidelines
|
||||
|
||||
### 1. Formatting Configuration
|
||||
|
||||
```toml
|
||||
max_width = 130
|
||||
fn_call_width = 90
|
||||
single_line_let_else_max_width = 100
|
||||
```
|
||||
|
||||
### 2. **🔧 MANDATORY Code Formatting Rules**
|
||||
|
||||
**CRITICAL**: All code must be properly formatted before committing. This project enforces strict formatting standards to maintain code consistency and readability.
|
||||
|
||||
#### Pre-commit Requirements (MANDATORY)
|
||||
|
||||
Before every commit, you **MUST**:
|
||||
|
||||
1. **Format your code**:
|
||||
```bash
|
||||
cargo fmt --all
|
||||
```
|
||||
|
||||
2. **Verify formatting**:
|
||||
```bash
|
||||
cargo fmt --all --check
|
||||
```
|
||||
|
||||
3. **Pass clippy checks**:
|
||||
```bash
|
||||
cargo clippy --all-targets --all-features -- -D warnings
|
||||
```
|
||||
|
||||
4. **Ensure compilation**:
|
||||
```bash
|
||||
cargo check --all-targets
|
||||
```
|
||||
|
||||
#### Quick Commands
|
||||
|
||||
Use these convenient Makefile targets for common tasks:
|
||||
|
||||
```bash
|
||||
# Format all code
|
||||
make fmt
|
||||
|
||||
# Check if code is properly formatted
|
||||
make fmt-check
|
||||
|
||||
# Run clippy checks
|
||||
make clippy
|
||||
|
||||
# Run compilation check
|
||||
make check
|
||||
|
||||
# Run tests
|
||||
make test
|
||||
|
||||
# Run all pre-commit checks (format + clippy + check + test)
|
||||
make pre-commit
|
||||
|
||||
# Setup git hooks (one-time setup)
|
||||
make setup-hooks
|
||||
```
|
||||
|
||||
### 3. Naming Conventions
|
||||
|
||||
- Use `snake_case` for functions, variables, modules
|
||||
- Use `PascalCase` for types, traits, enums
|
||||
- Constants use `SCREAMING_SNAKE_CASE`
|
||||
- Global variables prefix `GLOBAL_`, e.g., `GLOBAL_Endpoints`
|
||||
- Use meaningful and descriptive names for variables, functions, and methods
|
||||
- Avoid meaningless names like `temp`, `data`, `foo`, `bar`, `test123`
|
||||
- Choose names that clearly express the purpose and intent
|
||||
|
||||
### 4. Type Declaration Guidelines
|
||||
|
||||
- **Prefer type inference over explicit type declarations** when the type is obvious from context
|
||||
- Let the Rust compiler infer types whenever possible to reduce verbosity and improve maintainability
|
||||
- Only specify types explicitly when:
|
||||
- The type cannot be inferred by the compiler
|
||||
- Explicit typing improves code clarity and readability
|
||||
- Required for API boundaries (function signatures, public struct fields)
|
||||
- Needed to resolve ambiguity between multiple possible types
|
||||
|
||||
### 5. Documentation Comments
|
||||
|
||||
- Public APIs must have documentation comments
|
||||
- Use `///` for documentation comments
|
||||
- Complex functions add `# Examples` and `# Parameters` descriptions
|
||||
- Error cases use `# Errors` descriptions
|
||||
- Always use English for all comments and documentation
|
||||
- Avoid meaningless comments like "debug 111" or placeholder text
|
||||
|
||||
### 6. Import Guidelines
|
||||
|
||||
- Standard library imports first
|
||||
- Third-party crate imports in the middle
|
||||
- Project internal imports last
|
||||
- Group `use` statements with blank lines between groups
|
||||
|
||||
## Asynchronous Programming Guidelines
|
||||
|
||||
### 1. Trait Definition
|
||||
|
||||
```rust
|
||||
#[async_trait::async_trait]
|
||||
pub trait StorageAPI: Send + Sync {
|
||||
async fn get_object(&self, bucket: &str, object: &str) -> Result<ObjectInfo>;
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Error Handling
|
||||
|
||||
```rust
|
||||
// Use ? operator to propagate errors
|
||||
async fn example_function() -> Result<()> {
|
||||
let data = read_file("path").await?;
|
||||
process_data(data).await?;
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Concurrency Control
|
||||
|
||||
- Use `Arc` and `Mutex`/`RwLock` for shared state management
|
||||
- Prioritize async locks from `tokio::sync`
|
||||
- Avoid holding locks for long periods
|
||||
|
||||
## Logging and Tracing Guidelines
|
||||
|
||||
### 1. Tracing Usage
|
||||
|
||||
```rust
|
||||
#[tracing::instrument(skip(self, data))]
|
||||
async fn process_data(&self, data: &[u8]) -> Result<()> {
|
||||
info!("Processing {} bytes", data.len());
|
||||
// Implementation logic
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Log Levels
|
||||
|
||||
- `error!`: System errors requiring immediate attention
|
||||
- `warn!`: Warning information that may affect functionality
|
||||
- `info!`: Important business information
|
||||
- `debug!`: Debug information for development use
|
||||
- `trace!`: Detailed execution paths
|
||||
|
||||
### 3. Structured Logging
|
||||
|
||||
```rust
|
||||
info!(
|
||||
counter.rustfs_api_requests_total = 1_u64,
|
||||
key_request_method = %request.method(),
|
||||
key_request_uri_path = %request.uri().path(),
|
||||
"API request processed"
|
||||
);
|
||||
```
|
||||
|
||||
## Error Handling Guidelines
|
||||
|
||||
### 1. Error Type Definition
|
||||
|
||||
```rust
|
||||
// Use thiserror for module-specific error types
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
pub enum MyError {
|
||||
#[error("IO error: {0}")]
|
||||
Io(#[from] std::io::Error),
|
||||
|
||||
#[error("Storage error: {0}")]
|
||||
Storage(#[from] ecstore::error::StorageError),
|
||||
|
||||
#[error("Custom error: {message}")]
|
||||
Custom { message: String },
|
||||
|
||||
#[error("File not found: {path}")]
|
||||
FileNotFound { path: String },
|
||||
|
||||
#[error("Invalid configuration: {0}")]
|
||||
InvalidConfig(String),
|
||||
}
|
||||
|
||||
// Provide Result type alias for the module
|
||||
pub type Result<T> = core::result::Result<T, MyError>;
|
||||
```
|
||||
|
||||
### 2. Error Helper Methods
|
||||
|
||||
```rust
|
||||
impl MyError {
|
||||
/// Create error from any compatible error type
|
||||
pub fn other<E>(error: E) -> Self
|
||||
where
|
||||
E: Into<Box<dyn std::error::Error + Send + Sync>>,
|
||||
{
|
||||
MyError::Io(std::io::Error::other(error))
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Error Context and Propagation
|
||||
|
||||
```rust
|
||||
// Use ? operator for clean error propagation
|
||||
async fn example_function() -> Result<()> {
|
||||
let data = read_file("path").await?;
|
||||
process_data(data).await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// Add context to errors
|
||||
fn process_with_context(path: &str) -> Result<()> {
|
||||
std::fs::read(path)
|
||||
.map_err(|e| MyError::Custom {
|
||||
message: format!("Failed to read {}: {}", path, e)
|
||||
})?;
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
## Performance Optimization Guidelines
|
||||
|
||||
### 1. Memory Management
|
||||
|
||||
- Use `Bytes` instead of `Vec<u8>` for zero-copy operations
|
||||
- Avoid unnecessary cloning, use reference passing
|
||||
- Use `Arc` for sharing large objects
|
||||
|
||||
### 2. Concurrency Optimization
|
||||
|
||||
```rust
|
||||
// Use join_all for concurrent operations
|
||||
let futures = disks.iter().map(|disk| disk.operation());
|
||||
let results = join_all(futures).await;
|
||||
```
|
||||
|
||||
### 3. Caching Strategy
|
||||
|
||||
- Use `LazyLock` for global caching
|
||||
- Implement LRU cache to avoid memory leaks
|
||||
|
||||
## Testing Guidelines
|
||||
|
||||
### 1. Unit Tests
|
||||
|
||||
```rust
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use test_case::test_case;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_async_function() {
|
||||
let result = async_function().await;
|
||||
assert!(result.is_ok());
|
||||
}
|
||||
|
||||
#[test_case("input1", "expected1")]
|
||||
#[test_case("input2", "expected2")]
|
||||
fn test_with_cases(input: &str, expected: &str) {
|
||||
assert_eq!(function(input), expected);
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Integration Tests
|
||||
|
||||
- Use `e2e_test` module for end-to-end testing
|
||||
- Simulate real storage environments
|
||||
|
||||
### 3. Test Quality Standards
|
||||
|
||||
- Write meaningful test cases that verify actual functionality
|
||||
- Avoid placeholder or debug content like "debug 111", "test test", etc.
|
||||
- Use descriptive test names that clearly indicate what is being tested
|
||||
- Each test should have a clear purpose and verify specific behavior
|
||||
- Test data should be realistic and representative of actual use cases
|
||||
|
||||
## Cross-Platform Compatibility Guidelines
|
||||
|
||||
### 1. CPU Architecture Compatibility
|
||||
|
||||
- **Always consider multi-platform and different CPU architecture compatibility** when writing code
|
||||
- Support major architectures: x86_64, aarch64 (ARM64), and other target platforms
|
||||
- Use conditional compilation for architecture-specific code:
|
||||
|
||||
```rust
|
||||
#[cfg(target_arch = "x86_64")]
|
||||
fn optimized_x86_64_function() { /* x86_64 specific implementation */ }
|
||||
|
||||
#[cfg(target_arch = "aarch64")]
|
||||
fn optimized_aarch64_function() { /* ARM64 specific implementation */ }
|
||||
|
||||
#[cfg(not(any(target_arch = "x86_64", target_arch = "aarch64")))]
|
||||
fn generic_function() { /* Generic fallback implementation */ }
|
||||
```
|
||||
|
||||
### 2. Platform-Specific Dependencies
|
||||
|
||||
- Use feature flags for platform-specific dependencies
|
||||
- Provide fallback implementations for unsupported platforms
|
||||
- Test on multiple architectures in CI/CD pipeline
|
||||
|
||||
### 3. Endianness Considerations
|
||||
|
||||
- Use explicit byte order conversion when dealing with binary data
|
||||
- Prefer `to_le_bytes()`, `from_le_bytes()` for consistent little-endian format
|
||||
- Use `byteorder` crate for complex binary format handling
|
||||
|
||||
### 4. SIMD and Performance Optimizations
|
||||
|
||||
- Use portable SIMD libraries like `wide` or `packed_simd`
|
||||
- Provide fallback implementations for non-SIMD architectures
|
||||
- Use runtime feature detection when appropriate
|
||||
|
||||
## Security Guidelines
|
||||
|
||||
### 1. Memory Safety
|
||||
|
||||
- Disable `unsafe` code (workspace.lints.rust.unsafe_code = "deny")
|
||||
- Use `rustls` instead of `openssl`
|
||||
|
||||
### 2. Authentication and Authorization
|
||||
|
||||
```rust
|
||||
// Use IAM system for permission checks
|
||||
let identity = iam.authenticate(&access_key, &secret_key).await?;
|
||||
iam.authorize(&identity, &action, &resource).await?;
|
||||
```
|
||||
|
||||
## Configuration Management Guidelines
|
||||
|
||||
### 1. Environment Variables
|
||||
|
||||
- Use `RUSTFS_` prefix
|
||||
- Support both configuration files and environment variables
|
||||
- Provide reasonable default values
|
||||
|
||||
### 2. Configuration Structure
|
||||
|
||||
```rust
|
||||
#[derive(Debug, Deserialize, Clone)]
|
||||
pub struct Config {
|
||||
pub address: String,
|
||||
pub volumes: String,
|
||||
#[serde(default)]
|
||||
pub console_enable: bool,
|
||||
}
|
||||
```
|
||||
|
||||
## Dependency Management Guidelines
|
||||
|
||||
### 1. Workspace Dependencies
|
||||
|
||||
- Manage versions uniformly at workspace level
|
||||
- Use `workspace = true` to inherit configuration
|
||||
|
||||
### 2. Feature Flags
|
||||
|
||||
```rust
|
||||
[features]
|
||||
default = ["file"]
|
||||
gpu = ["dep:nvml-wrapper"]
|
||||
kafka = ["dep:rdkafka"]
|
||||
```
|
||||
|
||||
## Deployment and Operations Guidelines
|
||||
|
||||
### 1. Containerization
|
||||
|
||||
- Provide Dockerfile and docker-compose configuration
|
||||
- Support multi-stage builds to optimize image size
|
||||
|
||||
### 2. Observability
|
||||
|
||||
- Integrate OpenTelemetry for distributed tracing
|
||||
- Support Prometheus metrics collection
|
||||
- Provide Grafana dashboards
|
||||
|
||||
### 3. Health Checks
|
||||
|
||||
```rust
|
||||
// Implement health check endpoint
|
||||
async fn health_check() -> Result<HealthStatus> {
|
||||
// Check component status
|
||||
}
|
||||
```
|
||||
|
||||
## Code Review Checklist
|
||||
|
||||
### 1. **Code Formatting and Quality (MANDATORY)**
|
||||
|
||||
- [ ] **Code is properly formatted** (`cargo fmt --all --check` passes)
|
||||
- [ ] **All clippy warnings are resolved** (`cargo clippy --all-targets --all-features -- -D warnings` passes)
|
||||
- [ ] **Code compiles successfully** (`cargo check --all-targets` passes)
|
||||
- [ ] **Pre-commit hooks are working** and all checks pass
|
||||
- [ ] **No formatting-related changes** mixed with functional changes (separate commits)
|
||||
|
||||
### 2. Functionality
|
||||
|
||||
- [ ] Are all error cases properly handled?
|
||||
- [ ] Is there appropriate logging?
|
||||
- [ ] Is there necessary test coverage?
|
||||
|
||||
### 3. Performance
|
||||
|
||||
- [ ] Are unnecessary memory allocations avoided?
|
||||
- [ ] Are async operations used correctly?
|
||||
- [ ] Are there potential deadlock risks?
|
||||
|
||||
### 4. Security
|
||||
|
||||
- [ ] Are input parameters properly validated?
|
||||
- [ ] Are there appropriate permission checks?
|
||||
- [ ] Is information leakage avoided?
|
||||
|
||||
### 5. Cross-Platform Compatibility
|
||||
|
||||
- [ ] Does the code work on different CPU architectures (x86_64, aarch64)?
|
||||
- [ ] Are platform-specific features properly gated with conditional compilation?
|
||||
- [ ] Is byte order handling correct for binary data?
|
||||
- [ ] Are there appropriate fallback implementations for unsupported platforms?
|
||||
|
||||
### 6. Code Commits and Documentation
|
||||
|
||||
- [ ] Does it comply with [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/)?
|
||||
- [ ] Are commit messages concise and under 72 characters for the title line?
|
||||
- [ ] Commit titles should be concise and in English, avoid Chinese
|
||||
- [ ] Is PR description provided in copyable markdown format for easy copying?
|
||||
|
||||
## Common Patterns and Best Practices
|
||||
|
||||
### 1. Resource Management
|
||||
|
||||
```rust
|
||||
// Use RAII pattern for resource management
|
||||
pub struct ResourceGuard {
|
||||
resource: Resource,
|
||||
}
|
||||
|
||||
impl Drop for ResourceGuard {
|
||||
fn drop(&mut self) {
|
||||
// Clean up resources
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 2. Dependency Injection
|
||||
|
||||
```rust
|
||||
// Use dependency injection pattern
|
||||
pub struct Service {
|
||||
config: Arc<Config>,
|
||||
storage: Arc<dyn StorageAPI>,
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Graceful Shutdown
|
||||
|
||||
```rust
|
||||
// Implement graceful shutdown
|
||||
async fn shutdown_gracefully(shutdown_rx: &mut Receiver<()>) {
|
||||
tokio::select! {
|
||||
_ = shutdown_rx.recv() => {
|
||||
info!("Received shutdown signal");
|
||||
// Perform cleanup operations
|
||||
}
|
||||
_ = tokio::time::sleep(SHUTDOWN_TIMEOUT) => {
|
||||
warn!("Shutdown timeout reached");
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Domain-Specific Guidelines
|
||||
|
||||
### 1. Storage Operations
|
||||
|
||||
- All storage operations must support erasure coding
|
||||
- Implement read/write quorum mechanisms
|
||||
- Support data integrity verification
|
||||
|
||||
### 2. Network Communication
|
||||
|
||||
- Use gRPC for internal service communication
|
||||
- HTTP/HTTPS support for S3-compatible API
|
||||
- Implement connection pooling and retry mechanisms
|
||||
|
||||
### 3. Metadata Management
|
||||
|
||||
- Use FlatBuffers for serialization
|
||||
- Support version control and migration
|
||||
- Implement metadata caching
|
||||
|
||||
## Branch Management and Development Workflow
|
||||
|
||||
### Branch Management
|
||||
|
||||
- **🚨 CRITICAL: NEVER modify code directly on main or master branch - THIS IS ABSOLUTELY FORBIDDEN 🚨**
|
||||
- **⚠️ ANY DIRECT COMMITS TO MASTER/MAIN WILL BE REJECTED AND MUST BE REVERTED IMMEDIATELY ⚠️**
|
||||
- **🔒 ALL CHANGES MUST GO THROUGH PULL REQUESTS - NO DIRECT COMMITS TO MAIN UNDER ANY CIRCUMSTANCES 🔒**
|
||||
- **Always work on feature branches - NO EXCEPTIONS**
|
||||
- Always check the .rules.md file before starting to ensure you understand the project guidelines
|
||||
- **MANDATORY workflow for ALL changes:**
|
||||
1. `git checkout main` (switch to main branch)
|
||||
2. `git pull` (get latest changes)
|
||||
3. `git checkout -b feat/your-feature-name` (create and switch to feature branch)
|
||||
4. Make your changes ONLY on the feature branch
|
||||
5. Test thoroughly before committing
|
||||
6. Commit and push to the feature branch
|
||||
7. **Create a pull request for code review - THIS IS THE ONLY WAY TO MERGE TO MAIN**
|
||||
8. **Wait for PR approval before merging - NEVER merge your own PRs without review**
|
||||
- Use descriptive branch names following the pattern: `feat/feature-name`, `fix/issue-name`, `refactor/component-name`, etc.
|
||||
- **Double-check current branch before ANY commit: `git branch` to ensure you're NOT on main/master**
|
||||
- **Pull Request Requirements:**
|
||||
- All changes must be submitted via PR regardless of size or urgency
|
||||
- PRs must include comprehensive description and testing information
|
||||
- PRs must pass all CI/CD checks before merging
|
||||
- PRs require at least one approval from code reviewers
|
||||
- Even hotfixes and emergency changes must go through PR process
|
||||
- **Enforcement:**
|
||||
- Main branch should be protected with branch protection rules
|
||||
- Direct pushes to main should be blocked by repository settings
|
||||
- Any accidental direct commits to main must be immediately reverted via PR
|
||||
|
||||
### Development Workflow
|
||||
|
||||
## 🎯 **Core Development Principles**
|
||||
|
||||
- **🔴 Every change must be precise - don't modify unless you're confident**
|
||||
- Carefully analyze code logic and ensure complete understanding before making changes
|
||||
- When uncertain, prefer asking users or consulting documentation over blind modifications
|
||||
- Use small iterative steps, modify only necessary parts at a time
|
||||
- Evaluate impact scope before changes to ensure no new issues are introduced
|
||||
|
||||
- **🚀 GitHub PR creation prioritizes gh command usage**
|
||||
- Prefer using `gh pr create` command to create Pull Requests
|
||||
- Avoid having users manually create PRs through web interface
|
||||
- Provide clear and professional PR titles and descriptions
|
||||
- Using `gh` commands ensures better integration and automation
|
||||
|
||||
## 📝 **Code Quality Requirements**
|
||||
|
||||
- Use English for all code comments, documentation, and variable names
|
||||
- Write meaningful and descriptive names for variables, functions, and methods
|
||||
- Avoid meaningless test content like "debug 111" or placeholder values
|
||||
- Before each change, carefully read the existing code to ensure you understand the code structure and implementation, do not break existing logic implementation, do not introduce new issues
|
||||
- Ensure each change provides sufficient test cases to guarantee code correctness
|
||||
- Do not arbitrarily modify numbers and constants in test cases, carefully analyze their meaning to ensure test case correctness
|
||||
- When writing or modifying tests, check existing test cases to ensure they have scientific naming and rigorous logic testing, if not compliant, modify test cases to ensure scientific and rigorous testing
|
||||
- **Before committing any changes, run `cargo clippy --all-targets --all-features -- -D warnings` to ensure all code passes Clippy checks**
|
||||
- After each development completion, first git add . then git commit -m "feat: feature description" or "fix: issue description", ensure compliance with [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/)
|
||||
- **Keep commit messages concise and under 72 characters** for the title line, use body for detailed explanations if needed
|
||||
- After each development completion, first git push to remote repository
|
||||
- After each change completion, summarize the changes, do not create summary files, provide a brief change description, ensure compliance with [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/)
|
||||
- Provide change descriptions needed for PR in the conversation, ensure compliance with [Conventional Commits](https://www.conventionalcommits.org/en/v1.0.0/)
|
||||
- **Always provide PR descriptions in English** after completing any changes, including:
|
||||
- Clear and concise title following Conventional Commits format
|
||||
- Detailed description of what was changed and why
|
||||
- List of key changes and improvements
|
||||
- Any breaking changes or migration notes if applicable
|
||||
- Testing information and verification steps
|
||||
- **Provide PR descriptions in copyable markdown format** enclosed in code blocks for easy one-click copying
|
||||
|
||||
## 🚫 AI Documentation Generation Restrictions
|
||||
|
||||
### Forbidden Summary Documents
|
||||
|
||||
- **Strictly forbidden to create any form of AI-generated summary documents**
|
||||
- **Do not create documents containing large amounts of emoji, detailed formatting tables and typical AI style**
|
||||
- **Do not generate the following types of documents in the project:**
|
||||
- Benchmark summary documents (BENCHMARK*.md)
|
||||
- Implementation comparison analysis documents (IMPLEMENTATION_COMPARISON*.md)
|
||||
- Performance analysis report documents
|
||||
- Architecture summary documents
|
||||
- Feature comparison documents
|
||||
- Any documents with large amounts of emoji and formatted content
|
||||
- **If documentation is needed, only create when explicitly requested by the user, and maintain a concise and practical style**
|
||||
- **Documentation should focus on actually needed information, avoiding excessive formatting and decorative content**
|
||||
- **Any discovered AI-generated summary documents should be immediately deleted**
|
||||
|
||||
### Allowed Documentation Types
|
||||
|
||||
- README.md (project introduction, keep concise)
|
||||
- Technical documentation (only create when explicitly needed)
|
||||
- User manual (only create when explicitly needed)
|
||||
- API documentation (generated from code)
|
||||
- Changelog (CHANGELOG.md)
|
||||
|
||||
These rules should serve as guiding principles when developing the RustFS project, ensuring code quality, performance, and maintainability.
|
||||
13
.vscode/launch.json
vendored
@@ -85,6 +85,19 @@
|
||||
"sourceLanguages": [
|
||||
"rust"
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "Debug executable target/debug/test",
|
||||
"type": "lldb",
|
||||
"request": "launch",
|
||||
"program": "${workspaceFolder}/target/debug/deps/lifecycle_integration_test-5eb7590b8f3bea55",
|
||||
"args": [],
|
||||
"cwd": "${workspaceFolder}",
|
||||
//"stopAtEntry": false,
|
||||
//"preLaunchTask": "cargo build",
|
||||
"sourceLanguages": [
|
||||
"rust"
|
||||
],
|
||||
}
|
||||
]
|
||||
}
|
||||
68
CLAUDE.md
Normal file
@@ -0,0 +1,68 @@
|
||||
# Claude AI Rules for RustFS Project
|
||||
|
||||
## Core Rules Reference
|
||||
|
||||
This project follows the comprehensive AI coding rules defined in `.rules.md`. Please refer to that file for the complete set of development guidelines, coding standards, and best practices.
|
||||
|
||||
## Claude-Specific Configuration
|
||||
|
||||
When using Claude for this project, ensure you:
|
||||
|
||||
1. **Review the unified rules**: Always check `.rules.md` for the latest project guidelines
|
||||
2. **Follow branch protection**: Never attempt to commit directly to main/master branch
|
||||
3. **Use English**: All code comments, documentation, and variable names must be in English
|
||||
4. **Clean code practices**: Only make modifications you're confident about
|
||||
5. **Test thoroughly**: Ensure all changes pass formatting, linting, and testing requirements
|
||||
6. **Clean up after yourself**: Remove any temporary scripts or test files created during the session
|
||||
|
||||
## Quick Reference
|
||||
|
||||
### Critical Rules
|
||||
- 🚫 **NEVER commit directly to main/master branch**
|
||||
- ✅ **ALWAYS work on feature branches**
|
||||
- 📝 **ALWAYS use English for code and documentation**
|
||||
- 🧹 **ALWAYS clean up temporary files after use**
|
||||
- 🎯 **ONLY make confident, necessary modifications**
|
||||
|
||||
### Pre-commit Checklist
|
||||
```bash
|
||||
# Before committing, always run:
|
||||
cargo fmt --all
|
||||
cargo clippy --all-targets --all-features -- -D warnings
|
||||
cargo check --all-targets
|
||||
cargo test
|
||||
```
|
||||
|
||||
### Branch Workflow
|
||||
```bash
|
||||
git checkout main
|
||||
git pull origin main
|
||||
git checkout -b feat/your-feature-name
|
||||
# Make your changes
|
||||
git add .
|
||||
git commit -m "feat: your feature description"
|
||||
git push origin feat/your-feature-name
|
||||
gh pr create
|
||||
```
|
||||
|
||||
## Claude-Specific Best Practices
|
||||
|
||||
1. **Task Analysis**: Always thoroughly analyze the task before starting implementation
|
||||
2. **Minimal Changes**: Make only the necessary changes to accomplish the task
|
||||
3. **Clear Communication**: Provide clear explanations of changes and their rationale
|
||||
4. **Error Prevention**: Verify code correctness before suggesting changes
|
||||
5. **Documentation**: Ensure all code changes are properly documented in English
|
||||
|
||||
## Important Notes
|
||||
|
||||
- This file serves as an entry point for Claude AI
|
||||
- All detailed rules and guidelines are maintained in `.rules.md`
|
||||
- Updates to coding standards should be made in `.rules.md` to ensure consistency across all AI tools
|
||||
- When in doubt, always refer to `.rules.md` for authoritative guidance
|
||||
- Claude should prioritize code quality, safety, and maintainability over speed
|
||||
|
||||
## See Also
|
||||
|
||||
- [.rules.md](./.rules.md) - Complete AI coding rules and guidelines
|
||||
- [CONTRIBUTING.md](./CONTRIBUTING.md) - Contribution guidelines
|
||||
- [README.md](./README.md) - Project overview and setup instructions
|
||||
5199
Cargo.lock
generated
119
Cargo.toml
@@ -15,8 +15,8 @@
|
||||
[workspace]
|
||||
members = [
|
||||
"rustfs", # Core file system implementation
|
||||
"cli/rustfs-gui", # Graphical user interface client
|
||||
"crates/appauth", # Application authentication and authorization
|
||||
"crates/audit-logger", # Audit logging system for file operations
|
||||
"crates/common", # Shared utilities and data structures
|
||||
"crates/config", # Configuration management
|
||||
"crates/crypto", # Cryptography and security features
|
||||
@@ -30,13 +30,16 @@ members = [
|
||||
"crates/obs", # Observability utilities
|
||||
"crates/protos", # Protocol buffer definitions
|
||||
"crates/rio", # Rust I/O utilities and abstractions
|
||||
"crates/targets", # Target-specific configurations and utilities
|
||||
"crates/s3select-api", # S3 Select API interface
|
||||
"crates/s3select-query", # S3 Select query engine
|
||||
"crates/signer", # client signer
|
||||
"crates/checksums", # client checksums
|
||||
"crates/utils", # Utility functions and helpers
|
||||
"crates/workers", # Worker thread pools and task scheduling
|
||||
"crates/zip", # ZIP file handling and compression
|
||||
"crates/ahm",
|
||||
"crates/ahm", # Asynchronous Hash Map for concurrent data structures
|
||||
"crates/mcp", # MCP server for S3 operations
|
||||
]
|
||||
resolver = "2"
|
||||
|
||||
@@ -57,15 +60,11 @@ unsafe_code = "deny"
|
||||
[workspace.lints.clippy]
|
||||
all = "warn"
|
||||
|
||||
[patch.crates-io]
|
||||
rustfs-utils = { path = "crates/utils" }
|
||||
rustfs-filemeta = { path = "crates/filemeta" }
|
||||
rustfs-rio = { path = "crates/rio" }
|
||||
|
||||
[workspace.dependencies]
|
||||
rustfs-ahm = { path = "crates/ahm", version = "0.0.5" }
|
||||
rustfs-s3select-api = { path = "crates/s3select-api", version = "0.0.5" }
|
||||
rustfs-appauth = { path = "crates/appauth", version = "0.0.5" }
|
||||
rustfs-audit-logger = { path = "crates/audit-logger", version = "0.0.5" }
|
||||
rustfs-common = { path = "crates/common", version = "0.0.5" }
|
||||
rustfs-crypto = { path = "crates/crypto", version = "0.0.5" }
|
||||
rustfs-ecstore = { path = "crates/ecstore", version = "0.0.5" }
|
||||
@@ -84,53 +83,55 @@ rustfs-utils = { path = "crates/utils", version = "0.0.5" }
|
||||
rustfs-rio = { path = "crates/rio", version = "0.0.5" }
|
||||
rustfs-filemeta = { path = "crates/filemeta", version = "0.0.5" }
|
||||
rustfs-signer = { path = "crates/signer", version = "0.0.5" }
|
||||
rustfs-checksums = { path = "crates/checksums", version = "0.0.5" }
|
||||
rustfs-workers = { path = "crates/workers", version = "0.0.5" }
|
||||
rustfs-mcp = { path = "crates/mcp", version = "0.0.5" }
|
||||
rustfs-targets = { path = "crates/targets", version = "0.0.5" }
|
||||
aes-gcm = { version = "0.10.3", features = ["std"] }
|
||||
anyhow = "1.0.99"
|
||||
arc-swap = "1.7.1"
|
||||
argon2 = { version = "0.5.3", features = ["std"] }
|
||||
atoi = "2.0.0"
|
||||
async-channel = "2.5.0"
|
||||
async-recursion = "1.1.1"
|
||||
async-trait = "0.1.88"
|
||||
async-compression = { version = "0.4.0" }
|
||||
async-trait = "0.1.89"
|
||||
async-compression = { version = "0.4.19" }
|
||||
atomic_enum = "0.3.0"
|
||||
aws-sdk-s3 = "1.96.0"
|
||||
aws-config = { version = "1.8.6" }
|
||||
aws-sdk-s3 = "1.101.0"
|
||||
axum = "0.8.4"
|
||||
axum-extra = "0.10.1"
|
||||
axum-server = { version = "0.7.2", features = ["tls-rustls"] }
|
||||
base64-simd = "0.8.0"
|
||||
base64 = "0.22.1"
|
||||
brotli = "8.0.1"
|
||||
brotli = "8.0.2"
|
||||
bytes = { version = "1.10.1", features = ["serde"] }
|
||||
bytesize = "2.0.1"
|
||||
byteorder = "1.5.0"
|
||||
cfg-if = "1.0.1"
|
||||
cfg-if = "1.0.3"
|
||||
crc-fast = "1.4.0"
|
||||
chacha20poly1305 = { version = "0.10.1" }
|
||||
chrono = { version = "0.4.41", features = ["serde"] }
|
||||
clap = { version = "4.5.41", features = ["derive", "env"] }
|
||||
const-str = { version = "0.6.2", features = ["std", "proc"] }
|
||||
crc32fast = "1.4.2"
|
||||
criterion = { version = "0.5", features = ["html_reports"] }
|
||||
clap = { version = "4.5.46", features = ["derive", "env"] }
|
||||
const-str = { version = "0.6.4", features = ["std", "proc"] }
|
||||
crc32fast = "1.5.0"
|
||||
criterion = { version = "0.7", features = ["html_reports"] }
|
||||
dashmap = "6.1.0"
|
||||
datafusion = "46.0.1"
|
||||
derive_builder = "0.20.2"
|
||||
dioxus = { version = "0.6.3", features = ["router"] }
|
||||
dirs = "6.0.0"
|
||||
enumset = "1.1.7"
|
||||
enumset = "1.1.10"
|
||||
flatbuffers = "25.2.10"
|
||||
flate2 = "1.1.2"
|
||||
flexi_logger = { version = "0.31.2", features = ["trc", "dont_minimize_extra_stacks"] }
|
||||
form_urlencoded = "1.2.1"
|
||||
form_urlencoded = "1.2.2"
|
||||
futures = "0.3.31"
|
||||
futures-core = "0.3.31"
|
||||
futures-util = "0.3.31"
|
||||
glob = "0.3.2"
|
||||
glob = "0.3.3"
|
||||
hex = "0.4.3"
|
||||
hex-simd = "0.8.0"
|
||||
highway = { version = "1.3.0" }
|
||||
hmac = "0.12.1"
|
||||
hyper = "1.6.0"
|
||||
hyper-util = { version = "0.1.15", features = [
|
||||
hyper = "1.7.0"
|
||||
hyper-util = { version = "0.1.16", features = [
|
||||
"tokio",
|
||||
"server-auto",
|
||||
"server-graceful",
|
||||
@@ -141,11 +142,6 @@ http-body = "1.0.1"
|
||||
humantime = "2.2.0"
|
||||
ipnetwork = { version = "0.21.1", features = ["serde"] }
|
||||
jsonwebtoken = "9.3.1"
|
||||
keyring = { version = "3.6.2", features = [
|
||||
"apple-native",
|
||||
"windows-native",
|
||||
"sync-secret-service",
|
||||
] }
|
||||
lazy_static = "1.5.0"
|
||||
libsystemd = { version = "0.7.2" }
|
||||
local-ip-address = "0.6.5"
|
||||
@@ -179,15 +175,16 @@ path-absolutize = "3.1.1"
|
||||
path-clean = "1.0.1"
|
||||
blake3 = { version = "1.8.2" }
|
||||
pbkdf2 = "0.12.2"
|
||||
percent-encoding = "2.3.1"
|
||||
percent-encoding = "2.3.2"
|
||||
pin-project-lite = "0.2.16"
|
||||
prost = "0.13.5"
|
||||
quick-xml = "0.38.0"
|
||||
rand = "0.9.1"
|
||||
prost = "0.14.1"
|
||||
pretty_assertions = "1.4.1"
|
||||
quick-xml = "0.38.3"
|
||||
rand = "0.9.2"
|
||||
rdkafka = { version = "0.38.0", features = ["tokio"] }
|
||||
reed-solomon-simd = { version = "3.0.1" }
|
||||
regex = { version = "1.11.1" }
|
||||
reqwest = { version = "0.12.22", default-features = false, features = [
|
||||
regex = { version = "1.11.2" }
|
||||
reqwest = { version = "0.12.23", default-features = false, features = [
|
||||
"rustls-tls",
|
||||
"charset",
|
||||
"http2",
|
||||
@@ -196,67 +193,64 @@ reqwest = { version = "0.12.22", default-features = false, features = [
|
||||
"json",
|
||||
"blocking",
|
||||
] }
|
||||
rfd = { version = "0.15.3", default-features = false, features = [
|
||||
"xdg-portal",
|
||||
"tokio",
|
||||
] }
|
||||
rmcp = { version = "0.6.1" }
|
||||
rmp = "0.8.14"
|
||||
rmp-serde = "1.3.0"
|
||||
rsa = "0.9.8"
|
||||
rumqttc = { version = "0.24" }
|
||||
rust-embed = { version = "8.7.2" }
|
||||
rust-i18n = { version = "3.1.5" }
|
||||
rustfs-rsc = "2025.506.1"
|
||||
rustls = { version = "0.23.29" }
|
||||
rustls = { version = "0.23.31" }
|
||||
rustls-pki-types = "1.12.0"
|
||||
rustls-pemfile = "2.2.0"
|
||||
s3s = { version = "0.12.0-minio-preview.2" }
|
||||
shadow-rs = { version = "1.2.0", default-features = false }
|
||||
s3s = { version = "0.12.0-minio-preview.3" }
|
||||
schemars = "1.0.4"
|
||||
serde = { version = "1.0.219", features = ["derive"] }
|
||||
serde_json = { version = "1.0.140", features = ["raw_value"] }
|
||||
serde-xml-rs = "0.8.1"
|
||||
serde_json = { version = "1.0.143", features = ["raw_value"] }
|
||||
serde_urlencoded = "0.7.1"
|
||||
serial_test = "3.2.0"
|
||||
sha1 = "0.10.6"
|
||||
sha2 = "0.10.9"
|
||||
shadow-rs = { version = "1.3.0", default-features = false }
|
||||
siphasher = "1.0.1"
|
||||
smallvec = { version = "1.15.1", features = ["serde"] }
|
||||
snafu = "0.8.6"
|
||||
snafu = "0.8.8"
|
||||
snap = "1.1.1"
|
||||
socket2 = "0.6.0"
|
||||
strum = { version = "0.27.1", features = ["derive"] }
|
||||
sysinfo = "0.36.0"
|
||||
strum = { version = "0.27.2", features = ["derive"] }
|
||||
sysinfo = "0.37.0"
|
||||
sysctl = "0.6.0"
|
||||
tempfile = "3.20.0"
|
||||
tempfile = "3.21.0"
|
||||
temp-env = "0.3.6"
|
||||
test-case = "3.3.1"
|
||||
thiserror = "2.0.12"
|
||||
time = { version = "0.3.41", features = [
|
||||
thiserror = "2.0.16"
|
||||
time = { version = "0.3.42", features = [
|
||||
"std",
|
||||
"parsing",
|
||||
"formatting",
|
||||
"macros",
|
||||
"serde",
|
||||
] }
|
||||
tokio = { version = "1.46.1", features = ["fs", "rt-multi-thread"] }
|
||||
tokio = { version = "1.47.1", features = ["fs", "rt-multi-thread"] }
|
||||
tokio-rustls = { version = "0.26.2", default-features = false }
|
||||
tokio-stream = { version = "0.1.17" }
|
||||
tokio-tar = "0.3.1"
|
||||
tokio-test = "0.4.4"
|
||||
tokio-util = { version = "0.7.15", features = ["io", "compat"] }
|
||||
tonic = { version = "0.13.1", features = ["gzip"] }
|
||||
tonic-build = { version = "0.13.1" }
|
||||
tokio-util = { version = "0.7.16", features = ["io", "compat"] }
|
||||
tonic = { version = "0.14.1", features = ["gzip"] }
|
||||
tonic-prost = { version = "0.14.1" }
|
||||
tonic-prost-build = { version = "0.14.1" }
|
||||
tower = { version = "0.5.2", features = ["timeout"] }
|
||||
tower-http = { version = "0.6.6", features = ["cors"] }
|
||||
tracing = "0.1.41"
|
||||
tracing-core = "0.1.34"
|
||||
tracing-error = "0.2.1"
|
||||
tracing-subscriber = { version = "0.3.19", features = ["env-filter", "time"] }
|
||||
tracing-appender = "0.2.3"
|
||||
tracing-opentelemetry = "0.31.0"
|
||||
tracing-subscriber = { version = "0.3.20", features = ["env-filter", "time"] }
|
||||
transform-stream = "0.3.1"
|
||||
url = "2.5.4"
|
||||
url = "2.5.7"
|
||||
urlencoding = "2.1.3"
|
||||
uuid = { version = "1.17.0", features = [
|
||||
uuid = { version = "1.18.0", features = [
|
||||
"v4",
|
||||
"fast-rng",
|
||||
"macro-diagnostics",
|
||||
@@ -266,7 +260,10 @@ winapi = { version = "0.3.9" }
|
||||
xxhash-rust = { version = "0.8.15", features = ["xxh64", "xxh3"] }
|
||||
zip = "2.4.2"
|
||||
zstd = "0.13.3"
|
||||
anyhow = "1.0.98"
|
||||
|
||||
|
||||
[workspace.metadata.cargo-shear]
|
||||
ignored = ["rustfs", "rust-i18n", "rustfs-mcp", "rustfs-audit-logger", "tokio-test"]
|
||||
|
||||
[profile.wasm-dev]
|
||||
inherits = "dev"
|
||||
|
||||
168
Dockerfile
@@ -1,129 +1,85 @@
|
||||
# Multi-stage build for RustFS production image
|
||||
FROM alpine:latest AS build
|
||||
FROM alpine:3.22 AS build
|
||||
|
||||
# Build arguments
|
||||
ARG TARGETARCH
|
||||
ARG RELEASE=latest
|
||||
ARG CHANNEL=release
|
||||
|
||||
# Install dependencies for downloading and verifying binaries
|
||||
RUN apk add --no-cache \
|
||||
ca-certificates \
|
||||
curl \
|
||||
bash \
|
||||
wget \
|
||||
unzip \
|
||||
jq
|
||||
|
||||
# Create build directory
|
||||
RUN apk add --no-cache ca-certificates curl unzip
|
||||
WORKDIR /build
|
||||
|
||||
# Map TARGETARCH to architecture format used in builds
|
||||
RUN case "${TARGETARCH}" in \
|
||||
"amd64") ARCH="x86_64" ;; \
|
||||
"arm64") ARCH="aarch64" ;; \
|
||||
*) echo "Unsupported architecture: ${TARGETARCH}" && exit 1 ;; \
|
||||
esac && \
|
||||
echo "ARCH=${ARCH}" > /build/arch.env
|
||||
|
||||
# Download rustfs binary from dl.rustfs.com
|
||||
RUN . /build/arch.env && \
|
||||
BASE_URL="https://dl.rustfs.com/artifacts/rustfs" && \
|
||||
PLATFORM="linux" && \
|
||||
if [ "${RELEASE}" = "latest" ]; then \
|
||||
# Download latest version from specified channel \
|
||||
if [ "${CHANNEL}" = "dev" ]; then \
|
||||
PACKAGE_NAME="rustfs-${PLATFORM}-${ARCH}-dev-latest.zip"; \
|
||||
DOWNLOAD_URL="${BASE_URL}/dev/${PACKAGE_NAME}"; \
|
||||
echo "📥 Downloading latest dev build: ${PACKAGE_NAME}"; \
|
||||
else \
|
||||
PACKAGE_NAME="rustfs-${PLATFORM}-${ARCH}-latest.zip"; \
|
||||
DOWNLOAD_URL="${BASE_URL}/release/${PACKAGE_NAME}"; \
|
||||
echo "📥 Downloading latest release build: ${PACKAGE_NAME}"; \
|
||||
fi; \
|
||||
RUN set -eux; \
|
||||
case "$TARGETARCH" in \
|
||||
amd64) ARCH_SUBSTR="x86_64-musl" ;; \
|
||||
arm64) ARCH_SUBSTR="aarch64-musl" ;; \
|
||||
*) echo "Unsupported TARGETARCH=$TARGETARCH" >&2; exit 1 ;; \
|
||||
esac; \
|
||||
if [ "$RELEASE" = "latest" ]; then \
|
||||
TAG="$(curl -fsSL https://api.github.com/repos/rustfs/rustfs/releases \
|
||||
| grep -o '"tag_name": "[^"]*"' | cut -d'"' -f4 | head -n 1)"; \
|
||||
else \
|
||||
# Download specific version (always from release channel) \
|
||||
PACKAGE_NAME="rustfs-${PLATFORM}-${ARCH}-v${RELEASE}.zip"; \
|
||||
DOWNLOAD_URL="${BASE_URL}/release/${PACKAGE_NAME}"; \
|
||||
echo "📥 Downloading specific version: ${PACKAGE_NAME}"; \
|
||||
fi && \
|
||||
echo "🔗 Download URL: ${DOWNLOAD_URL}" && \
|
||||
curl -f -L "${DOWNLOAD_URL}" -o /build/rustfs.zip && \
|
||||
if [ ! -f /build/rustfs.zip ] || [ ! -s /build/rustfs.zip ]; then \
|
||||
echo "❌ Failed to download binary package"; \
|
||||
echo "💡 Make sure the package ${PACKAGE_NAME} exists"; \
|
||||
echo "🔗 Check: ${DOWNLOAD_URL}"; \
|
||||
exit 1; \
|
||||
fi && \
|
||||
unzip /build/rustfs.zip -d /build && \
|
||||
chmod +x /build/rustfs && \
|
||||
rm /build/rustfs.zip && \
|
||||
echo "✅ Successfully downloaded and extracted rustfs binary"
|
||||
TAG="$RELEASE"; \
|
||||
fi; \
|
||||
echo "Using tag: $TAG (arch pattern: $ARCH_SUBSTR)"; \
|
||||
# Find download URL in assets list for this tag that contains arch substring and ends with .zip
|
||||
URL="$(curl -fsSL "https://api.github.com/repos/rustfs/rustfs/releases/tags/$TAG" \
|
||||
| grep -o "\"browser_download_url\": \"[^\"]*${ARCH_SUBSTR}[^\"]*\\.zip\"" \
|
||||
| cut -d'"' -f4 | head -n 1)"; \
|
||||
if [ -z "$URL" ]; then echo "Failed to locate release asset for $ARCH_SUBSTR at tag $TAG" >&2; exit 1; fi; \
|
||||
echo "Downloading: $URL"; \
|
||||
curl -fL "$URL" -o rustfs.zip; \
|
||||
unzip -q rustfs.zip -d /build; \
|
||||
# If binary is not in root directory, try to locate and move from zip to /build/rustfs
|
||||
if [ ! -x /build/rustfs ]; then \
|
||||
BIN_PATH="$(unzip -Z -1 rustfs.zip | grep -E '(^|/)rustfs$' | head -n 1 || true)"; \
|
||||
if [ -n "$BIN_PATH" ]; then \
|
||||
mkdir -p /build/.tmp && unzip -q rustfs.zip "$BIN_PATH" -d /build/.tmp && \
|
||||
mv "/build/.tmp/$BIN_PATH" /build/rustfs; \
|
||||
fi; \
|
||||
fi; \
|
||||
[ -x /build/rustfs ] || { echo "rustfs binary not found in asset" >&2; exit 1; }; \
|
||||
chmod +x /build/rustfs; \
|
||||
rm -rf rustfs.zip /build/.tmp || true
|
||||
|
||||
# Runtime stage
|
||||
FROM alpine:latest
|
||||
|
||||
# Set build arguments and labels
|
||||
FROM alpine:3.22
|
||||
|
||||
ARG RELEASE=latest
|
||||
ARG CHANNEL=release
|
||||
ARG BUILD_DATE
|
||||
ARG VCS_REF
|
||||
|
||||
LABEL name="RustFS" \
|
||||
vendor="RustFS Team" \
|
||||
maintainer="RustFS Team <dev@rustfs.com>" \
|
||||
version="${RELEASE}" \
|
||||
release="${RELEASE}" \
|
||||
channel="${CHANNEL}" \
|
||||
build-date="${BUILD_DATE}" \
|
||||
vcs-ref="${VCS_REF}" \
|
||||
summary="RustFS is a high-performance distributed object storage system written in Rust, compatible with S3 API." \
|
||||
description="RustFS is a high-performance distributed object storage software built using Rust. It supports erasure coding storage, multi-tenant management, observability, and other enterprise-level features." \
|
||||
url="https://rustfs.com" \
|
||||
license="Apache-2.0"
|
||||
vendor="RustFS Team" \
|
||||
maintainer="RustFS Team <dev@rustfs.com>" \
|
||||
version="v${RELEASE#v}" \
|
||||
release="${RELEASE}" \
|
||||
build-date="${BUILD_DATE}" \
|
||||
vcs-ref="${VCS_REF}" \
|
||||
summary="High-performance distributed object storage system compatible with S3 API" \
|
||||
description="RustFS is a distributed object storage system written in Rust, supporting erasure coding, multi-tenant management, and observability." \
|
||||
url="https://rustfs.com" \
|
||||
license="Apache-2.0"
|
||||
|
||||
# Install runtime dependencies
|
||||
RUN apk add --no-cache \
|
||||
ca-certificates \
|
||||
curl \
|
||||
tzdata \
|
||||
bash \
|
||||
&& addgroup -g 1000 rustfs \
|
||||
&& adduser -u 1000 -G rustfs -s /bin/sh -D rustfs
|
||||
RUN apk add --no-cache ca-certificates coreutils
|
||||
|
||||
# Environment variables
|
||||
ENV RUSTFS_ACCESS_KEY=rustfsadmin \
|
||||
RUSTFS_SECRET_KEY=rustfsadmin \
|
||||
RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_CONSOLE_ENABLE=true \
|
||||
RUSTFS_VOLUMES=/data \
|
||||
RUST_LOG=warn
|
||||
|
||||
# Set permissions for /usr/bin (similar to MinIO's approach)
|
||||
RUN chmod -R 755 /usr/bin
|
||||
|
||||
# Copy CA certificates and binaries from build stage
|
||||
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=build /build/rustfs /usr/bin/
|
||||
COPY --from=build /build/rustfs /usr/bin/rustfs
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
|
||||
# Set executable permissions
|
||||
RUN chmod +x /usr/bin/rustfs
|
||||
RUN chmod +x /usr/bin/rustfs /entrypoint.sh && \
|
||||
mkdir -p /data /logs && \
|
||||
chmod 0750 /data /logs
|
||||
|
||||
# Create data directory
|
||||
RUN mkdir -p /data /config && chown -R rustfs:rustfs /data /config
|
||||
ENV RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_ACCESS_KEY="rustfsadmin" \
|
||||
RUSTFS_SECRET_KEY="rustfsadmin" \
|
||||
RUSTFS_CONSOLE_ENABLE="true" \
|
||||
RUSTFS_VOLUMES="/data" \
|
||||
RUST_LOG="warn" \
|
||||
RUSTFS_OBS_LOG_DIRECTORY="/logs" \
|
||||
RUSTFS_SINKS_FILE_PATH="/logs"
|
||||
|
||||
# Switch to non-root user
|
||||
USER rustfs
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /data
|
||||
|
||||
# Expose port
|
||||
EXPOSE 9000
|
||||
VOLUME ["/data", "/logs"]
|
||||
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
|
||||
# Volume for data
|
||||
VOLUME ["/data"]
|
||||
|
||||
# Set entrypoint
|
||||
ENTRYPOINT ["/usr/bin/rustfs"]
|
||||
CMD ["rustfs"]
|
||||
|
||||
@@ -1,80 +1,88 @@
|
||||
# Multi-stage Dockerfile for RustFS
|
||||
# Supports cross-compilation for amd64 and arm64 architectures
|
||||
# syntax=docker/dockerfile:1.6
|
||||
# Multi-stage Dockerfile for RustFS - LOCAL DEVELOPMENT ONLY
|
||||
#
|
||||
# IMPORTANT: This Dockerfile builds RustFS from source for local development and testing.
|
||||
# CI/CD uses the production Dockerfile with prebuilt binaries instead.
|
||||
#
|
||||
# Example:
|
||||
# docker build -f Dockerfile.source -t rustfs:dev-local .
|
||||
# docker run --rm -p 9000:9000 rustfs:dev-local
|
||||
#
|
||||
# Supports cross-compilation for amd64 and arm64 via TARGETPLATFORM.
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
ARG BUILDPLATFORM
|
||||
|
||||
# -----------------------------
|
||||
# Build stage
|
||||
FROM --platform=$BUILDPLATFORM rust:1.88-bookworm AS builder
|
||||
# -----------------------------
|
||||
FROM rust:1.88-bookworm AS builder
|
||||
|
||||
# Install required build dependencies
|
||||
RUN apt-get update && apt-get install -y \
|
||||
wget \
|
||||
git \
|
||||
# Re-declare args after FROM
|
||||
ARG TARGETPLATFORM
|
||||
ARG BUILDPLATFORM
|
||||
|
||||
# Debug: print platforms
|
||||
RUN echo "Build info -> BUILDPLATFORM=${BUILDPLATFORM}, TARGETPLATFORM=${TARGETPLATFORM}"
|
||||
|
||||
# Install build toolchain and headers
|
||||
# Use distro packages for protoc/flatc to avoid host-arch mismatch
|
||||
RUN set -eux; \
|
||||
export DEBIAN_FRONTEND=noninteractive; \
|
||||
apt-get update; \
|
||||
apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
ca-certificates \
|
||||
curl \
|
||||
unzip \
|
||||
gcc \
|
||||
git \
|
||||
pkg-config \
|
||||
libssl-dev \
|
||||
lld \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
protobuf-compiler \
|
||||
flatbuffers-compiler; \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install sccache for Rust compilation caching
|
||||
RUN wget https://github.com/mozilla/sccache/releases/download/v0.10.0/sccache-dist-v0.10.0-x86_64-unknown-linux-musl.tar.gz \
|
||||
&& tar -xzf sccache-dist-v0.10.0-x86_64-unknown-linux-musl.tar.gz \
|
||||
&& mv sccache-dist-v0.10.0-x86_64-unknown-linux-musl/sccache-dist /usr/local/bin/sccache \
|
||||
&& chmod +x /usr/local/bin/sccache \
|
||||
&& rm -rf sccache-dist-v0.10.0-x86_64-unknown-linux-musl.tar.gz sccache-dist-v0.10.0-x86_64-unknown-linux-musl
|
||||
|
||||
# Set up sccache environment
|
||||
ENV RUSTC_WRAPPER=sccache \
|
||||
SCCACHE_DIR=/tmp/sccache \
|
||||
SCCACHE_CACHE_SIZE=2G
|
||||
|
||||
# Install cross-compilation tools for ARM64
|
||||
RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
|
||||
apt-get update && \
|
||||
apt-get install -y gcc-aarch64-linux-gnu && \
|
||||
rm -rf /var/lib/apt/lists/*; \
|
||||
# Optional: cross toolchain for aarch64 (only when targeting linux/arm64)
|
||||
RUN set -eux; \
|
||||
if [ "${TARGETPLATFORM:-linux/amd64}" = "linux/arm64" ]; then \
|
||||
export DEBIAN_FRONTEND=noninteractive; \
|
||||
apt-get update; \
|
||||
apt-get install -y --no-install-recommends gcc-aarch64-linux-gnu; \
|
||||
rm -rf /var/lib/apt/lists/*; \
|
||||
fi
|
||||
|
||||
# Install protoc
|
||||
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip \
|
||||
&& unzip protoc-31.1-linux-x86_64.zip -d protoc3 \
|
||||
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc \
|
||||
&& mv protoc3/include/* /usr/local/include/ && rm -rf protoc-31.1-linux-x86_64.zip protoc3
|
||||
|
||||
# Install flatc
|
||||
RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip \
|
||||
&& unzip Linux.flatc.binary.g++-13.zip \
|
||||
&& mv flatc /usr/local/bin/ && chmod +x /usr/local/bin/flatc && rm -rf Linux.flatc.binary.g++-13.zip
|
||||
|
||||
# Set up Rust targets based on platform
|
||||
RUN case "$TARGETPLATFORM" in \
|
||||
"linux/amd64") rustup target add x86_64-unknown-linux-gnu ;; \
|
||||
"linux/arm64") rustup target add aarch64-unknown-linux-gnu ;; \
|
||||
*) echo "Unsupported platform: $TARGETPLATFORM" && exit 1 ;; \
|
||||
# Add Rust targets based on TARGETPLATFORM
|
||||
RUN set -eux; \
|
||||
case "${TARGETPLATFORM:-linux/amd64}" in \
|
||||
linux/amd64) rustup target add x86_64-unknown-linux-gnu ;; \
|
||||
linux/arm64) rustup target add aarch64-unknown-linux-gnu ;; \
|
||||
*) echo "Unsupported TARGETPLATFORM=${TARGETPLATFORM}" >&2; exit 1 ;; \
|
||||
esac
|
||||
|
||||
# Set up environment for cross-compilation
|
||||
# Cross-compilation environment (used only when targeting aarch64)
|
||||
ENV CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=aarch64-linux-gnu-gcc
|
||||
ENV CC_aarch64_unknown_linux_gnu=aarch64-linux-gnu-gcc
|
||||
ENV CXX_aarch64_unknown_linux_gnu=aarch64-linux-gnu-g++
|
||||
|
||||
WORKDIR /usr/src/rustfs
|
||||
|
||||
# Copy cargo configuration for optimized builds
|
||||
COPY Cargo.toml ./.cargo/config.toml
|
||||
|
||||
# Copy Cargo files for dependency caching
|
||||
# Layered copy to maximize caching:
|
||||
# 1) top-level manifests
|
||||
COPY Cargo.toml Cargo.lock ./
|
||||
COPY */Cargo.toml ./*/
|
||||
# 2) workspace member manifests (adjust if workspace layout changes)
|
||||
COPY rustfs/Cargo.toml rustfs/Cargo.toml
|
||||
COPY crates/*/Cargo.toml crates/
|
||||
COPY cli/rustfs-gui/Cargo.toml cli/rustfs-gui/Cargo.toml
|
||||
|
||||
# Create dummy main.rs files for dependency compilation
|
||||
RUN find . -name "Cargo.toml" -not -path "./Cargo.toml" | \
|
||||
xargs -I {} dirname {} | \
|
||||
xargs -I {} sh -c 'mkdir -p {}/src && echo "fn main() {}" > {}/src/main.rs'
|
||||
# Pre-fetch dependencies for better caching
|
||||
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||
--mount=type=cache,target=/usr/local/cargo/git \
|
||||
cargo fetch --locked || true
|
||||
|
||||
# Configure cargo for optimized builds
|
||||
# 3) copy full sources (this is the main cache invalidation point)
|
||||
COPY . .
|
||||
|
||||
# Cargo build configuration for lean release artifacts
|
||||
ENV CARGO_NET_GIT_FETCH_WITH_CLI=true \
|
||||
CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse \
|
||||
CARGO_INCREMENTAL=0 \
|
||||
@@ -82,74 +90,92 @@ ENV CARGO_NET_GIT_FETCH_WITH_CLI=true \
|
||||
CARGO_PROFILE_RELEASE_SPLIT_DEBUGINFO=off \
|
||||
CARGO_PROFILE_RELEASE_STRIP=symbols
|
||||
|
||||
# Build dependencies only (cache layer) with optimizations
|
||||
RUN sccache --start-server 2>/dev/null || true && \
|
||||
case "$TARGETPLATFORM" in \
|
||||
"linux/amd64") cargo build --release --target x86_64-unknown-linux-gnu -j $(nproc) ;; \
|
||||
"linux/arm64") cargo build --release --target aarch64-unknown-linux-gnu -j $(nproc) ;; \
|
||||
# Generate protobuf/flatbuffers code (uses protoc/flatc from distro)
|
||||
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||
--mount=type=cache,target=/usr/local/cargo/git \
|
||||
--mount=type=cache,target=/usr/src/rustfs/target \
|
||||
cargo run --bin gproto
|
||||
|
||||
# Build RustFS (target depends on TARGETPLATFORM)
|
||||
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||
--mount=type=cache,target=/usr/local/cargo/git \
|
||||
--mount=type=cache,target=/usr/src/rustfs/target \
|
||||
set -eux; \
|
||||
case "${TARGETPLATFORM:-linux/amd64}" in \
|
||||
linux/amd64) \
|
||||
echo "Building for x86_64-unknown-linux-gnu"; \
|
||||
cargo build --release --locked --target x86_64-unknown-linux-gnu --bin rustfs -j "$(nproc)"; \
|
||||
install -m 0755 target/x86_64-unknown-linux-gnu/release/rustfs /usr/local/bin/rustfs \
|
||||
;; \
|
||||
linux/arm64) \
|
||||
echo "Building for aarch64-unknown-linux-gnu"; \
|
||||
cargo build --release --locked --target aarch64-unknown-linux-gnu --bin rustfs -j "$(nproc)"; \
|
||||
install -m 0755 target/aarch64-unknown-linux-gnu/release/rustfs /usr/local/bin/rustfs \
|
||||
;; \
|
||||
*) \
|
||||
echo "Unsupported TARGETPLATFORM=${TARGETPLATFORM}" >&2; exit 1 \
|
||||
;; \
|
||||
esac
|
||||
|
||||
# Copy source code
|
||||
COPY . .
|
||||
|
||||
# Generate protobuf code
|
||||
RUN cargo run --bin gproto
|
||||
|
||||
# Build the actual application with optimizations
|
||||
RUN sccache --start-server 2>/dev/null || true && \
|
||||
case "$TARGETPLATFORM" in \
|
||||
"linux/amd64") \
|
||||
cargo build --release --target x86_64-unknown-linux-gnu --bin rustfs -j $(nproc) && \
|
||||
cp target/x86_64-unknown-linux-gnu/release/rustfs /usr/local/bin/rustfs \
|
||||
;; \
|
||||
"linux/arm64") \
|
||||
cargo build --release --target aarch64-unknown-linux-gnu --bin rustfs -j $(nproc) && \
|
||||
cp target/aarch64-unknown-linux-gnu/release/rustfs /usr/local/bin/rustfs \
|
||||
;; \
|
||||
esac && \
|
||||
sccache --show-stats || true
|
||||
|
||||
# Runtime stage - Ubuntu minimal for better compatibility
|
||||
# -----------------------------
|
||||
# Runtime stage (Ubuntu minimal)
|
||||
# -----------------------------
|
||||
FROM ubuntu:22.04
|
||||
|
||||
# Install runtime dependencies
|
||||
RUN apt-get update && apt-get install -y \
|
||||
ARG BUILD_DATE
|
||||
ARG VCS_REF
|
||||
|
||||
LABEL name="RustFS (dev-local)" \
|
||||
maintainer="RustFS Team" \
|
||||
build-date="${BUILD_DATE}" \
|
||||
vcs-ref="${VCS_REF}" \
|
||||
description="RustFS - local development image built from source (NOT for production)."
|
||||
|
||||
# Minimal runtime deps: certificates + tzdata + coreutils (for chroot --userspec)
|
||||
RUN set -eux; \
|
||||
export DEBIAN_FRONTEND=noninteractive; \
|
||||
apt-get update; \
|
||||
apt-get install -y --no-install-recommends \
|
||||
ca-certificates \
|
||||
tzdata \
|
||||
wget \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
coreutils; \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Create rustfs user and group
|
||||
RUN groupadd -g 1000 rustfs && \
|
||||
useradd -d /app -g rustfs -u 1000 -s /bin/bash rustfs
|
||||
# Create a conventional runtime user/group (final switch happens in entrypoint via chroot --userspec)
|
||||
RUN set -eux; \
|
||||
groupadd -g 1000 rustfs; \
|
||||
useradd -u 1000 -g rustfs -M -s /usr/sbin/nologin rustfs
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Create data directories
|
||||
RUN mkdir -p /data/rustfs{0,1,2,3} && \
|
||||
chown -R rustfs:rustfs /data /app
|
||||
# Prepare data/log directories with sane defaults
|
||||
RUN set -eux; \
|
||||
mkdir -p /data /logs; \
|
||||
chown -R rustfs:rustfs /data /logs /app; \
|
||||
chmod 0750 /data /logs
|
||||
|
||||
# Copy binary from builder stage
|
||||
COPY --from=builder /usr/local/bin/rustfs /app/rustfs
|
||||
RUN chmod +x /app/rustfs && chown rustfs:rustfs /app/rustfs
|
||||
# Copy the freshly built binary and the entrypoint
|
||||
COPY --from=builder /usr/local/bin/rustfs /usr/bin/rustfs
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
RUN chmod +x /usr/bin/rustfs /entrypoint.sh
|
||||
|
||||
# Switch to non-root user
|
||||
USER rustfs
|
||||
# Default environment (override in docker run/compose as needed)
|
||||
ENV RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_ACCESS_KEY="rustfsadmin" \
|
||||
RUSTFS_SECRET_KEY="rustfsadmin" \
|
||||
RUSTFS_CONSOLE_ENABLE="true" \
|
||||
RUSTFS_VOLUMES="/data" \
|
||||
RUST_LOG="warn" \
|
||||
RUSTFS_OBS_LOG_DIRECTORY="/logs" \
|
||||
RUSTFS_SINKS_FILE_PATH="/logs" \
|
||||
RUSTFS_USERNAME="rustfs" \
|
||||
RUSTFS_GROUPNAME="rustfs" \
|
||||
RUSTFS_UID="1000" \
|
||||
RUSTFS_GID="1000"
|
||||
|
||||
# Expose ports
|
||||
EXPOSE 9000
|
||||
VOLUME ["/data", "/logs"]
|
||||
|
||||
# Environment variables
|
||||
ENV RUSTFS_ACCESS_KEY=rustfsadmin \
|
||||
RUSTFS_SECRET_KEY=rustfsadmin \
|
||||
RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_CONSOLE_ENABLE=true \
|
||||
RUSTFS_VOLUMES=/data \
|
||||
RUST_LOG=warn
|
||||
|
||||
|
||||
# Volume for data
|
||||
VOLUME ["/data"]
|
||||
|
||||
# Set default command
|
||||
CMD ["/app/rustfs"]
|
||||
# Keep root here; entrypoint will drop privileges using chroot --userspec
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
CMD ["/usr/bin/rustfs"]
|
||||
|
||||
258
Justfile
Normal file
@@ -0,0 +1,258 @@
|
||||
DOCKER_CLI := env("DOCKER_CLI", "docker")
|
||||
IMAGE_NAME := env("IMAGE_NAME", "rustfs:v1.0.0")
|
||||
DOCKERFILE_SOURCE := env("DOCKERFILE_SOURCE", "Dockerfile.source")
|
||||
DOCKERFILE_PRODUCTION := env("DOCKERFILE_PRODUCTION", "Dockerfile")
|
||||
CONTAINER_NAME := env("CONTAINER_NAME", "rustfs-dev")
|
||||
|
||||
[group("📒 Help")]
|
||||
[private]
|
||||
default:
|
||||
@just --list --list-heading $'🦀 RustFS justfile manual page:\n'
|
||||
|
||||
[doc("show help")]
|
||||
[group("📒 Help")]
|
||||
help: default
|
||||
|
||||
[doc("run `cargo fmt` to format codes")]
|
||||
[group("👆 Code Quality")]
|
||||
fmt:
|
||||
@echo "🔧 Formatting code..."
|
||||
cargo fmt --all
|
||||
|
||||
[doc("run `cargo fmt` in check mode")]
|
||||
[group("👆 Code Quality")]
|
||||
fmt-check:
|
||||
@echo "📝 Checking code formatting..."
|
||||
cargo fmt --all --check
|
||||
|
||||
[doc("run `cargo clippy`")]
|
||||
[group("👆 Code Quality")]
|
||||
clippy:
|
||||
@echo "🔍 Running clippy checks..."
|
||||
cargo clippy --all-targets --all-features --fix --allow-dirty -- -D warnings
|
||||
|
||||
[doc("run `cargo check`")]
|
||||
[group("👆 Code Quality")]
|
||||
check:
|
||||
@echo "🔨 Running compilation check..."
|
||||
cargo check --all-targets
|
||||
|
||||
[doc("run `cargo test`")]
|
||||
[group("👆 Code Quality")]
|
||||
test:
|
||||
@echo "🧪 Running tests..."
|
||||
cargo nextest run --all --exclude e2e_test
|
||||
cargo test --all --doc
|
||||
|
||||
[doc("run `fmt` `clippy` `check` `test` at once")]
|
||||
[group("👆 Code Quality")]
|
||||
pre-commit: fmt clippy check test
|
||||
@echo "✅ All pre-commit checks passed!"
|
||||
|
||||
[group("🤔 Git")]
|
||||
setup-hooks:
|
||||
@echo "🔧 Setting up git hooks..."
|
||||
chmod +x .git/hooks/pre-commit
|
||||
@echo "✅ Git hooks setup complete!"
|
||||
|
||||
[doc("use `release` mode for building")]
|
||||
[group("🔨 Build")]
|
||||
build:
|
||||
@echo "🔨 Building RustFS using build-rustfs.sh script..."
|
||||
./build-rustfs.sh
|
||||
|
||||
[doc("use `debug` mode for building")]
|
||||
[group("🔨 Build")]
|
||||
build-dev:
|
||||
@echo "🔨 Building RustFS in development mode..."
|
||||
./build-rustfs.sh --dev
|
||||
|
||||
[group("🔨 Build")]
|
||||
[private]
|
||||
build-target target:
|
||||
@echo "🔨 Building rustfs for {{ target }}..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform {{ target }}
|
||||
|
||||
[doc("use `x86_64-unknown-linux-musl` target for building")]
|
||||
[group("🔨 Build")]
|
||||
build-musl: (build-target "x86_64-unknown-linux-musl")
|
||||
|
||||
[doc("use `x86_64-unknown-linux-gnu` target for building")]
|
||||
[group("🔨 Build")]
|
||||
build-gnu: (build-target "x86_64-unknown-linux-gnu")
|
||||
|
||||
[doc("use `aarch64-unknown-linux-musl` target for building")]
|
||||
[group("🔨 Build")]
|
||||
build-musl-arm64: (build-target "aarch64-unknown-linux-musl")
|
||||
|
||||
[doc("use `aarch64-unknown-linux-gnu` target for building")]
|
||||
[group("🔨 Build")]
|
||||
build-gnu-arm64: (build-target "aarch64-unknown-linux-gnu")
|
||||
|
||||
[doc("build and deploy to server")]
|
||||
[group("🔨 Build")]
|
||||
deploy-dev ip: build-musl
|
||||
@echo "🚀 Deploying to dev server: {{ ip }}"
|
||||
./scripts/dev_deploy.sh {{ ip }}
|
||||
|
||||
[group("🔨 Build")]
|
||||
[private]
|
||||
build-cross-all-pre:
|
||||
@echo "🔧 Building all target architectures..."
|
||||
@echo "💡 On macOS/Windows, use 'make docker-dev' for reliable multi-arch builds"
|
||||
@echo "🔨 Generating protobuf code..."
|
||||
-cargo run --bin gproto
|
||||
|
||||
[doc("build all targets at once")]
|
||||
[group("🔨 Build")]
|
||||
build-cross-all: build-cross-all-pre && build-gnu build-gnu-arm64 build-musl build-musl-arm64
|
||||
|
||||
# ========================================================================================
|
||||
# Docker Multi-Architecture Builds (Primary Methods)
|
||||
# ========================================================================================
|
||||
|
||||
[doc("build an image and run it")]
|
||||
[group("🐳 Build Image")]
|
||||
build-docker os="rockylinux9.3" cli=(DOCKER_CLI) dockerfile=(DOCKERFILE_SOURCE):
|
||||
#!/usr/bin/env bash
|
||||
SOURCE_BUILD_IMAGE_NAME="rustfs/rustfs-{{ os }}:v1"
|
||||
SOURCE_BUILD_CONTAINER_NAME="rustfs-{{ os }}-build"
|
||||
BUILD_CMD="/root/.cargo/bin/cargo build --release --bin rustfs --target-dir /root/s3-rustfs/target/{{ os }}"
|
||||
echo "🐳 Building RustFS using Docker ({{ os }})..."
|
||||
{{ cli }} buildx build -t $SOURCE_BUILD_IMAGE_NAME -f {{ dockerfile }} .
|
||||
{{ cli }} run --rm --name $SOURCE_BUILD_CONTAINER_NAME -v $(pwd):/root/s3-rustfs -it $SOURCE_BUILD_IMAGE_NAME $BUILD_CMD
|
||||
|
||||
[doc("build an image")]
|
||||
[group("🐳 Build Image")]
|
||||
docker-buildx:
|
||||
@echo "🏗️ Building multi-architecture production Docker images with buildx..."
|
||||
./docker-buildx.sh
|
||||
|
||||
[doc("build an image and push it")]
|
||||
[group("🐳 Build Image")]
|
||||
docker-buildx-push:
|
||||
@echo "🚀 Building and pushing multi-architecture production Docker images with buildx..."
|
||||
./docker-buildx.sh --push
|
||||
|
||||
[doc("build an image with a version")]
|
||||
[group("🐳 Build Image")]
|
||||
docker-buildx-version version:
|
||||
@echo "🏗️ Building multi-architecture production Docker images (version: {{ version }}..."
|
||||
./docker-buildx.sh --release {{ version }}
|
||||
|
||||
[doc("build an image with a version and push it")]
|
||||
[group("🐳 Build Image")]
|
||||
docker-buildx-push-version version:
|
||||
@echo "🚀 Building and pushing multi-architecture production Docker images (version: {{ version }}..."
|
||||
./docker-buildx.sh --release {{ version }} --push
|
||||
|
||||
[doc("build an image with a version and push it to registry")]
|
||||
[group("🐳 Build Image")]
|
||||
docker-dev-push registry cli=(DOCKER_CLI) source=(DOCKERFILE_SOURCE):
|
||||
@echo "🚀 Building and pushing multi-architecture development Docker images..."
|
||||
@echo "💡 push to registry: {{ registry }}"
|
||||
{{ cli }} buildx build \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--file {{ source }} \
|
||||
--tag {{ registry }}/rustfs:source-latest \
|
||||
--tag {{ registry }}/rustfs:dev-latest \
|
||||
--push \
|
||||
.
|
||||
|
||||
# Local production builds using direct buildx (alternative to docker-buildx.sh)
|
||||
|
||||
[group("🐳 Build Image")]
|
||||
docker-buildx-production-local cli=(DOCKER_CLI) source=(DOCKERFILE_PRODUCTION):
|
||||
@echo "🏗️ Building single-architecture production Docker image locally..."
|
||||
@echo "💡 Alternative to docker-buildx.sh for local testing"
|
||||
{{ cli }} buildx build \
|
||||
--file {{ source }} \
|
||||
--tag rustfs:production-latest \
|
||||
--tag rustfs:latest \
|
||||
--load \
|
||||
--build-arg RELEASE=latest \
|
||||
.
|
||||
|
||||
# Development/Source builds using direct buildx commands
|
||||
|
||||
[group("🐳 Build Image")]
|
||||
docker-dev cli=(DOCKER_CLI) source=(DOCKERFILE_SOURCE):
|
||||
@echo "🏗️ Building multi-architecture development Docker images with buildx..."
|
||||
@echo "💡 This builds from source code and is intended for local development and testing"
|
||||
@echo "⚠️ Multi-arch images cannot be loaded locally, use docker-dev-push to push to registry"
|
||||
{{ cli }} buildx build \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--file {{ source }} \
|
||||
--tag rustfs:source-latest \
|
||||
--tag rustfs:dev-latest \
|
||||
.
|
||||
|
||||
[group("🐳 Build Image")]
|
||||
docker-dev-local cli=(DOCKER_CLI) source=(DOCKERFILE_SOURCE):
|
||||
@echo "🏗️ Building single-architecture development Docker image for local use..."
|
||||
@echo "💡 This builds from source code for the current platform and loads locally"
|
||||
{{ cli }} buildx build \
|
||||
--file {{ source }} \
|
||||
--tag rustfs:source-latest \
|
||||
--tag rustfs:dev-latest \
|
||||
--load \
|
||||
.
|
||||
|
||||
# ========================================================================================
|
||||
# Single Architecture Docker Builds (Traditional)
|
||||
# ========================================================================================
|
||||
|
||||
[group("🐳 Build Image")]
|
||||
docker-build-production cli=(DOCKER_CLI) source=(DOCKERFILE_PRODUCTION):
|
||||
@echo "🏗️ Building single-architecture production Docker image..."
|
||||
@echo "💡 Consider using 'make docker-buildx-production-local' for multi-arch support"
|
||||
{{ cli }} build -f {{ source }} -t rustfs:latest .
|
||||
|
||||
[group("🐳 Build Image")]
|
||||
docker-build-source cli=(DOCKER_CLI) source=(DOCKERFILE_SOURCE):
|
||||
@echo "🏗️ Building single-architecture source Docker image..."
|
||||
@echo "💡 Consider using 'make docker-dev-local' for multi-arch support"
|
||||
{{ cli }} build -f {{ source }} -t rustfs:source .
|
||||
|
||||
# ========================================================================================
|
||||
# Development Environment
|
||||
# ========================================================================================
|
||||
|
||||
[group("🏃 Running")]
|
||||
dev-env-start cli=(DOCKER_CLI) source=(DOCKERFILE_SOURCE) container=(CONTAINER_NAME):
|
||||
@echo "🚀 Starting development environment..."
|
||||
{{ cli }} buildx build \
|
||||
--file {{ source }} \
|
||||
--tag rustfs:dev \
|
||||
--load \
|
||||
.
|
||||
-{{ cli }} stop {{ container }} 2>/dev/null
|
||||
-{{ cli }} rm {{ container }} 2>/dev/null
|
||||
{{ cli }} run -d --name {{ container }} \
|
||||
-p 9010:9010 -p 9000:9000 \
|
||||
-v {{ invocation_directory() }}:/workspace \
|
||||
-it rustfs:dev
|
||||
|
||||
[group("🏃 Running")]
|
||||
dev-env-stop cli=(DOCKER_CLI) container=(CONTAINER_NAME):
|
||||
@echo "🛑 Stopping development environment..."
|
||||
-{{ cli }} stop {{ container }} 2>/dev/null
|
||||
-{{ cli }} rm {{ container }} 2>/dev/null
|
||||
|
||||
[group("🏃 Running")]
|
||||
dev-env-restart: dev-env-stop dev-env-start
|
||||
|
||||
[group("👍 E2E")]
|
||||
e2e-server:
|
||||
sh scripts/run.sh
|
||||
|
||||
[group("👍 E2E")]
|
||||
probe-e2e:
|
||||
sh scripts/probe.sh
|
||||
|
||||
[doc("inspect one image")]
|
||||
[group("🚚 Other")]
|
||||
docker-inspect-multiarch image cli=(DOCKER_CLI):
|
||||
@echo "🔍 Inspecting multi-architecture image: {{ image }}"
|
||||
{{ cli }} buildx imagetools inspect {{ image }}
|
||||
294
Makefile
@@ -1,5 +1,5 @@
|
||||
###########
|
||||
# 远程开发,需要 VSCode 安装 Dev Containers, Remote SSH, Remote Explorer
|
||||
# Remote development requires VSCode with Dev Containers, Remote SSH, Remote Explorer
|
||||
# https://code.visualstudio.com/docs/remote/containers
|
||||
###########
|
||||
DOCKER_CLI ?= docker
|
||||
@@ -23,6 +23,7 @@ fmt-check:
|
||||
.PHONY: clippy
|
||||
clippy:
|
||||
@echo "🔍 Running clippy checks..."
|
||||
cargo clippy --fix --allow-dirty
|
||||
cargo clippy --all-targets --all-features -- -D warnings
|
||||
|
||||
.PHONY: check
|
||||
@@ -33,7 +34,12 @@ check:
|
||||
.PHONY: test
|
||||
test:
|
||||
@echo "🧪 Running tests..."
|
||||
cargo nextest run --all --exclude e2e_test
|
||||
@if command -v cargo-nextest >/dev/null 2>&1; then \
|
||||
cargo nextest run --all --exclude e2e_test; \
|
||||
else \
|
||||
echo "ℹ️ cargo-nextest not found; falling back to 'cargo test'"; \
|
||||
cargo test --workspace --exclude e2e_test -- --nocapture; \
|
||||
fi
|
||||
cargo test --all --doc
|
||||
|
||||
.PHONY: pre-commit
|
||||
@@ -46,21 +52,6 @@ setup-hooks:
|
||||
chmod +x .git/hooks/pre-commit
|
||||
@echo "✅ Git hooks setup complete!"
|
||||
|
||||
.PHONY: init-devenv
|
||||
init-devenv:
|
||||
$(DOCKER_CLI) build -t $(IMAGE_NAME) -f Dockerfile.source .
|
||||
$(DOCKER_CLI) stop $(CONTAINER_NAME)
|
||||
$(DOCKER_CLI) rm $(CONTAINER_NAME)
|
||||
$(DOCKER_CLI) run -d --name $(CONTAINER_NAME) -p 9010:9010 -p 9000:9000 -v $(shell pwd):/root/s3-rustfs -it $(IMAGE_NAME)
|
||||
|
||||
.PHONY: start
|
||||
start:
|
||||
$(DOCKER_CLI) start $(CONTAINER_NAME)
|
||||
|
||||
.PHONY: stop
|
||||
stop:
|
||||
$(DOCKER_CLI) stop $(CONTAINER_NAME)
|
||||
|
||||
.PHONY: e2e-server
|
||||
e2e-server:
|
||||
sh $(shell pwd)/scripts/run.sh
|
||||
@@ -80,8 +71,6 @@ build-dev:
|
||||
@echo "🔨 Building RustFS in development mode..."
|
||||
./build-rustfs.sh --dev
|
||||
|
||||
|
||||
|
||||
# Docker-based build (alternative approach)
|
||||
# Usage: make BUILD_OS=ubuntu22.04 build-docker
|
||||
# Output: target/ubuntu22.04/release/rustfs
|
||||
@@ -92,71 +81,182 @@ build-docker: SOURCE_BUILD_CONTAINER_NAME = rustfs-$(BUILD_OS)-build
|
||||
build-docker: BUILD_CMD = /root/.cargo/bin/cargo build --release --bin rustfs --target-dir /root/s3-rustfs/target/$(BUILD_OS)
|
||||
build-docker:
|
||||
@echo "🐳 Building RustFS using Docker ($(BUILD_OS))..."
|
||||
$(DOCKER_CLI) build -t $(SOURCE_BUILD_IMAGE_NAME) -f $(DOCKERFILE_SOURCE) .
|
||||
$(DOCKER_CLI) buildx build -t $(SOURCE_BUILD_IMAGE_NAME) -f $(DOCKERFILE_SOURCE) .
|
||||
$(DOCKER_CLI) run --rm --name $(SOURCE_BUILD_CONTAINER_NAME) -v $(shell pwd):/root/s3-rustfs -it $(SOURCE_BUILD_IMAGE_NAME) $(BUILD_CMD)
|
||||
|
||||
.PHONY: build-musl
|
||||
build-musl:
|
||||
@echo "🔨 Building rustfs for x86_64-unknown-linux-musl..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-buildx' instead"
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-musl
|
||||
|
||||
.PHONY: build-gnu
|
||||
build-gnu:
|
||||
@echo "🔨 Building rustfs for x86_64-unknown-linux-gnu..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-buildx' instead"
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-gnu
|
||||
|
||||
.PHONY: build-musl-arm64
|
||||
build-musl-arm64:
|
||||
@echo "🔨 Building rustfs for aarch64-unknown-linux-musl..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-musl
|
||||
|
||||
.PHONY: build-gnu-arm64
|
||||
build-gnu-arm64:
|
||||
@echo "🔨 Building rustfs for aarch64-unknown-linux-gnu..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-gnu
|
||||
|
||||
.PHONY: deploy-dev
|
||||
deploy-dev: build-musl
|
||||
@echo "🚀 Deploying to dev server: $${IP}"
|
||||
./scripts/dev_deploy.sh $${IP}
|
||||
|
||||
# Multi-architecture Docker build targets (NEW: using docker-buildx.sh)
|
||||
# ========================================================================================
|
||||
# Docker Multi-Architecture Builds (Primary Methods)
|
||||
# ========================================================================================
|
||||
|
||||
# Production builds using docker-buildx.sh (for CI/CD and production)
|
||||
.PHONY: docker-buildx
|
||||
docker-buildx:
|
||||
@echo "🏗️ Building multi-architecture Docker images with buildx..."
|
||||
@echo "🏗️ Building multi-architecture production Docker images with buildx..."
|
||||
./docker-buildx.sh
|
||||
|
||||
.PHONY: docker-buildx-push
|
||||
docker-buildx-push:
|
||||
@echo "🚀 Building and pushing multi-architecture Docker images with buildx..."
|
||||
@echo "🚀 Building and pushing multi-architecture production Docker images with buildx..."
|
||||
./docker-buildx.sh --push
|
||||
|
||||
.PHONY: docker-buildx-version
|
||||
docker-buildx-version:
|
||||
@if [ -z "$(VERSION)" ]; then \
|
||||
echo "❌ 错误: 请指定版本, 例如: make docker-buildx-version VERSION=v1.0.0"; \
|
||||
echo "❌ Error: Please specify version, example: make docker-buildx-version VERSION=v1.0.0"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🏗️ Building multi-architecture Docker images (version: $(VERSION))..."
|
||||
@echo "🏗️ Building multi-architecture production Docker images (version: $(VERSION))..."
|
||||
./docker-buildx.sh --release $(VERSION)
|
||||
|
||||
.PHONY: docker-buildx-push-version
|
||||
docker-buildx-push-version:
|
||||
@if [ -z "$(VERSION)" ]; then \
|
||||
echo "❌ 错误: 请指定版本, 例如: make docker-buildx-push-version VERSION=v1.0.0"; \
|
||||
echo "❌ Error: Please specify version, example: make docker-buildx-push-version VERSION=v1.0.0"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🚀 Building and pushing multi-architecture Docker images (version: $(VERSION))..."
|
||||
@echo "🚀 Building and pushing multi-architecture production Docker images (version: $(VERSION))..."
|
||||
./docker-buildx.sh --release $(VERSION) --push
|
||||
|
||||
# Development/Source builds using direct buildx commands
|
||||
.PHONY: docker-dev
|
||||
docker-dev:
|
||||
@echo "🏗️ Building multi-architecture development Docker images with buildx..."
|
||||
@echo "💡 This builds from source code and is intended for local development and testing"
|
||||
@echo "⚠️ Multi-arch images cannot be loaded locally, use docker-dev-push to push to registry"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag rustfs:source-latest \
|
||||
--tag rustfs:dev-latest \
|
||||
.
|
||||
|
||||
.PHONY: docker-dev-local
|
||||
docker-dev-local:
|
||||
@echo "🏗️ Building single-architecture development Docker image for local use..."
|
||||
@echo "💡 This builds from source code for the current platform and loads locally"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag rustfs:source-latest \
|
||||
--tag rustfs:dev-latest \
|
||||
--load \
|
||||
.
|
||||
|
||||
.PHONY: docker-dev-push
|
||||
docker-dev-push:
|
||||
@if [ -z "$(REGISTRY)" ]; then \
|
||||
echo "❌ Error: Please specify registry, example: make docker-dev-push REGISTRY=ghcr.io/username"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🚀 Building and pushing multi-architecture development Docker images..."
|
||||
@echo "💡 Pushing to registry: $(REGISTRY)"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag $(REGISTRY)/rustfs:source-latest \
|
||||
--tag $(REGISTRY)/rustfs:dev-latest \
|
||||
--push \
|
||||
.
|
||||
|
||||
|
||||
|
||||
# Local production builds using direct buildx (alternative to docker-buildx.sh)
|
||||
.PHONY: docker-buildx-production-local
|
||||
docker-buildx-production-local:
|
||||
@echo "🏗️ Building single-architecture production Docker image locally..."
|
||||
@echo "💡 Alternative to docker-buildx.sh for local testing"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--file $(DOCKERFILE_PRODUCTION) \
|
||||
--tag rustfs:production-latest \
|
||||
--tag rustfs:latest \
|
||||
--load \
|
||||
--build-arg RELEASE=latest \
|
||||
.
|
||||
|
||||
# ========================================================================================
|
||||
# Single Architecture Docker Builds (Traditional)
|
||||
# ========================================================================================
|
||||
|
||||
.PHONY: docker-build-production
|
||||
docker-build-production:
|
||||
@echo "🏗️ Building production Docker image..."
|
||||
@echo "🏗️ Building single-architecture production Docker image..."
|
||||
@echo "💡 Consider using 'make docker-buildx-production-local' for multi-arch support"
|
||||
$(DOCKER_CLI) build -f $(DOCKERFILE_PRODUCTION) -t rustfs:latest .
|
||||
|
||||
.PHONY: docker-build-source
|
||||
docker-build-source:
|
||||
@echo "🏗️ Building source Docker image..."
|
||||
$(DOCKER_CLI) build -f $(DOCKERFILE_SOURCE) -t rustfs:source .
|
||||
@echo "🏗️ Building single-architecture source Docker image..."
|
||||
@echo "💡 Consider using 'make docker-dev-local' for multi-arch support"
|
||||
DOCKER_BUILDKIT=1 $(DOCKER_CLI) build \
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||
-f $(DOCKERFILE_SOURCE) -t rustfs:source .
|
||||
|
||||
# ========================================================================================
|
||||
# Development Environment
|
||||
# ========================================================================================
|
||||
|
||||
.PHONY: dev-env-start
|
||||
dev-env-start:
|
||||
@echo "🚀 Starting development environment..."
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag rustfs:dev \
|
||||
--load \
|
||||
.
|
||||
$(DOCKER_CLI) stop $(CONTAINER_NAME) 2>/dev/null || true
|
||||
$(DOCKER_CLI) rm $(CONTAINER_NAME) 2>/dev/null || true
|
||||
$(DOCKER_CLI) run -d --name $(CONTAINER_NAME) \
|
||||
-p 9010:9010 -p 9000:9000 \
|
||||
-v $(shell pwd):/workspace \
|
||||
-it rustfs:dev
|
||||
|
||||
.PHONY: dev-env-stop
|
||||
dev-env-stop:
|
||||
@echo "🛑 Stopping development environment..."
|
||||
$(DOCKER_CLI) stop $(CONTAINER_NAME) 2>/dev/null || true
|
||||
$(DOCKER_CLI) rm $(CONTAINER_NAME) 2>/dev/null || true
|
||||
|
||||
.PHONY: dev-env-restart
|
||||
dev-env-restart: dev-env-stop dev-env-start
|
||||
|
||||
|
||||
|
||||
# ========================================================================================
|
||||
# Build Utilities
|
||||
# ========================================================================================
|
||||
|
||||
.PHONY: docker-inspect-multiarch
|
||||
docker-inspect-multiarch:
|
||||
@if [ -z "$(IMAGE)" ]; then \
|
||||
echo "❌ 错误: 请指定镜像, 例如: make docker-inspect-multiarch IMAGE=rustfs/rustfs:latest"; \
|
||||
echo "❌ Error: Please specify image, example: make docker-inspect-multiarch IMAGE=rustfs/rustfs:latest"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🔍 Inspecting multi-architecture image: $(IMAGE)"
|
||||
@@ -165,64 +265,112 @@ docker-inspect-multiarch:
|
||||
.PHONY: build-cross-all
|
||||
build-cross-all:
|
||||
@echo "🔧 Building all target architectures..."
|
||||
@echo "💡 On macOS/Windows, use 'make docker-buildx' for reliable multi-arch builds"
|
||||
@echo "💡 On macOS/Windows, use 'make docker-dev' for reliable multi-arch builds"
|
||||
@echo "🔨 Generating protobuf code..."
|
||||
cargo run --bin gproto || true
|
||||
@echo "🔨 Building x86_64-unknown-linux-musl..."
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-musl
|
||||
@echo "🔨 Building x86_64-unknown-linux-gnu..."
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-gnu
|
||||
@echo "🔨 Building aarch64-unknown-linux-gnu..."
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-gnu
|
||||
@echo "🔨 Building x86_64-unknown-linux-musl..."
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-musl
|
||||
@echo "🔨 Building aarch64-unknown-linux-musl..."
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-musl
|
||||
@echo "✅ All architectures built successfully!"
|
||||
|
||||
# ========================================================================================
|
||||
# Help and Documentation
|
||||
# ========================================================================================
|
||||
|
||||
.PHONY: help-build
|
||||
help-build:
|
||||
@echo "🔨 RustFS 构建帮助:"
|
||||
@echo "🔨 RustFS Build Help:"
|
||||
@echo ""
|
||||
@echo "🚀 本地构建 (推荐使用):"
|
||||
@echo " make build # 构建 RustFS 二进制文件 (默认包含 console)"
|
||||
@echo " make build-dev # 开发模式构建"
|
||||
@echo " make build-musl # 构建 musl 版本"
|
||||
@echo " make build-gnu # 构建 GNU 版本"
|
||||
@echo "🚀 Local Build (Recommended):"
|
||||
@echo " make build # Build RustFS binary (includes console by default)"
|
||||
@echo " make build-dev # Development mode build"
|
||||
@echo " make build-musl # Build x86_64 musl version"
|
||||
@echo " make build-gnu # Build x86_64 GNU version"
|
||||
@echo " make build-musl-arm64 # Build aarch64 musl version"
|
||||
@echo " make build-gnu-arm64 # Build aarch64 GNU version"
|
||||
@echo ""
|
||||
@echo "🐳 Docker 构建:"
|
||||
@echo " make build-docker # 使用 Docker 容器构建"
|
||||
@echo " make build-docker BUILD_OS=ubuntu22.04 # 指定构建系统"
|
||||
@echo "🐳 Docker Build:"
|
||||
@echo " make build-docker # Build using Docker container"
|
||||
@echo " make build-docker BUILD_OS=ubuntu22.04 # Specify build system"
|
||||
@echo ""
|
||||
@echo "🏗️ 跨架构构建:"
|
||||
@echo " make build-cross-all # 构建所有架构的二进制文件"
|
||||
@echo "🏗️ Cross-architecture Build:"
|
||||
@echo " make build-cross-all # Build binaries for all architectures"
|
||||
@echo ""
|
||||
@echo "🔧 直接使用 build-rustfs.sh 脚本:"
|
||||
@echo " ./build-rustfs.sh --help # 查看脚本帮助"
|
||||
@echo " ./build-rustfs.sh --no-console # 构建时跳过 console 资源"
|
||||
@echo " ./build-rustfs.sh --force-console-update # 强制更新 console 资源"
|
||||
@echo " ./build-rustfs.sh --dev # 开发模式构建"
|
||||
@echo " ./build-rustfs.sh --sign # 签名二进制文件"
|
||||
@echo " ./build-rustfs.sh --platform x86_64-unknown-linux-musl # 指定目标平台"
|
||||
@echo " ./build-rustfs.sh --skip-verification # 跳过二进制验证"
|
||||
@echo "🔧 Direct usage of build-rustfs.sh script:"
|
||||
@echo " ./build-rustfs.sh --help # View script help"
|
||||
@echo " ./build-rustfs.sh --no-console # Build without console resources"
|
||||
@echo " ./build-rustfs.sh --force-console-update # Force update console resources"
|
||||
@echo " ./build-rustfs.sh --dev # Development mode build"
|
||||
@echo " ./build-rustfs.sh --sign # Sign binary files"
|
||||
@echo " ./build-rustfs.sh --platform x86_64-unknown-linux-gnu # Specify target platform"
|
||||
@echo " ./build-rustfs.sh --skip-verification # Skip binary verification"
|
||||
@echo ""
|
||||
@echo "💡 build-rustfs.sh 脚本提供了更多选项、智能检测和二进制验证功能"
|
||||
@echo "💡 build-rustfs.sh script provides more options, smart detection and binary verification"
|
||||
|
||||
.PHONY: help-docker
|
||||
help-docker:
|
||||
@echo "🐳 Docker 多架构构建帮助:"
|
||||
@echo "🐳 Docker Multi-architecture Build Help:"
|
||||
@echo ""
|
||||
@echo "🚀 推荐使用 (新的 docker-buildx 方式):"
|
||||
@echo " make docker-buildx # 构建多架构镜像(不推送)"
|
||||
@echo " make docker-buildx-push # 构建并推送多架构镜像"
|
||||
@echo " make docker-buildx-version VERSION=v1.0.0 # 构建指定版本"
|
||||
@echo " make docker-buildx-push-version VERSION=v1.0.0 # 构建并推送指定版本"
|
||||
@echo "🚀 Production Image Build (Recommended to use docker-buildx.sh):"
|
||||
@echo " make docker-buildx # Build production multi-arch image (no push)"
|
||||
@echo " make docker-buildx-push # Build and push production multi-arch image"
|
||||
@echo " make docker-buildx-version VERSION=v1.0.0 # Build specific version"
|
||||
@echo " make docker-buildx-push-version VERSION=v1.0.0 # Build and push specific version"
|
||||
@echo ""
|
||||
@echo "🏗️ 单架构构建:"
|
||||
@echo " make docker-build-production # 构建生产环境镜像"
|
||||
@echo " make docker-build-source # 构建源码构建镜像"
|
||||
@echo "🔧 Development/Source Image Build (Local development testing):"
|
||||
@echo " make docker-dev # Build dev multi-arch image (cannot load locally)"
|
||||
@echo " make docker-dev-local # Build dev single-arch image (local load)"
|
||||
@echo " make docker-dev-push REGISTRY=xxx # Build and push dev image"
|
||||
@echo ""
|
||||
@echo "🔧 辅助工具:"
|
||||
@echo " make build-cross-all # 构建所有架构的二进制文件"
|
||||
@echo " make docker-inspect-multiarch IMAGE=xxx # 检查镜像的架构支持"
|
||||
@echo "🏗️ Local Production Image Build (Alternative):"
|
||||
@echo " make docker-buildx-production-local # Build production single-arch image locally"
|
||||
@echo ""
|
||||
@echo "📋 环境变量 (在推送时需要设置):"
|
||||
@echo " DOCKERHUB_USERNAME Docker Hub 用户名"
|
||||
@echo " DOCKERHUB_TOKEN Docker Hub 访问令牌"
|
||||
@echo " GITHUB_TOKEN GitHub 访问令牌"
|
||||
@echo "📦 Single-architecture Build (Traditional way):"
|
||||
@echo " make docker-build-production # Build single-arch production image"
|
||||
@echo " make docker-build-source # Build single-arch source image"
|
||||
@echo ""
|
||||
@echo "💡 更多详情请参考项目根目录的 docker-buildx.sh 脚本"
|
||||
@echo "🚀 Development Environment Management:"
|
||||
@echo " make dev-env-start # Start development container environment"
|
||||
@echo " make dev-env-stop # Stop development container environment"
|
||||
@echo " make dev-env-restart # Restart development container environment"
|
||||
@echo ""
|
||||
@echo "🔧 Auxiliary Tools:"
|
||||
@echo " make build-cross-all # Build binaries for all architectures"
|
||||
@echo " make docker-inspect-multiarch IMAGE=xxx # Check image architecture support"
|
||||
@echo ""
|
||||
@echo "📋 Environment Variables:"
|
||||
@echo " REGISTRY Image registry address (required for push)"
|
||||
@echo " DOCKERHUB_USERNAME Docker Hub username"
|
||||
@echo " DOCKERHUB_TOKEN Docker Hub access token"
|
||||
@echo " GITHUB_TOKEN GitHub access token"
|
||||
@echo ""
|
||||
@echo "💡 Suggestions:"
|
||||
@echo " - Production use: Use docker-buildx* commands (based on precompiled binaries)"
|
||||
@echo " - Local development: Use docker-dev* commands (build from source)"
|
||||
@echo " - Development environment: Use dev-env-* commands to manage dev containers"
|
||||
|
||||
.PHONY: help
|
||||
help:
|
||||
@echo "🦀 RustFS Makefile Help:"
|
||||
@echo ""
|
||||
@echo "📋 Main Command Categories:"
|
||||
@echo " make help-build # Show build-related help"
|
||||
@echo " make help-docker # Show Docker-related help"
|
||||
@echo ""
|
||||
@echo "🔧 Code Quality:"
|
||||
@echo " make fmt # Format code"
|
||||
@echo " make clippy # Run clippy checks"
|
||||
@echo " make test # Run tests"
|
||||
@echo " make pre-commit # Run all pre-commit checks"
|
||||
@echo ""
|
||||
@echo "🚀 Quick Start:"
|
||||
@echo " make build # Build RustFS binary"
|
||||
@echo " make docker-dev-local # Build development Docker image (local)"
|
||||
@echo " make dev-env-start # Start development environment"
|
||||
@echo ""
|
||||
@echo "💡 For more help use 'make help-build' or 'make help-docker'"
|
||||
|
||||
14
README.md
@@ -11,8 +11,8 @@
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://docs.rustfs.com/en/introduction.html">Getting Started</a>
|
||||
· <a href="https://docs.rustfs.com/en/">Docs</a>
|
||||
<a href="https://docs.rustfs.com/introduction.html">Getting Started</a>
|
||||
· <a href="https://docs.rustfs.com/">Docs</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/issues">Bug reports</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/discussions">Discussions</a>
|
||||
</p>
|
||||
@@ -81,14 +81,14 @@ To get started with RustFS, follow these steps:
|
||||
2. **Docker Quick Start (Option 2)**
|
||||
|
||||
```bash
|
||||
# Latest stable release
|
||||
docker run -d -p 9000:9000 -v /data:/data rustfs/rustfs:latest
|
||||
# create data and logs directories
|
||||
mkdir -p data logs
|
||||
|
||||
# Development version (main branch)
|
||||
docker run -d -p 9000:9000 -v /data:/data rustfs/rustfs:main-latest
|
||||
# using latest alpha version
|
||||
docker run -d -p 9000:9000 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:alpha
|
||||
|
||||
# Specific version
|
||||
docker run -d -p 9000:9000 -v /data:/data rustfs/rustfs:v1.0.0
|
||||
docker run -d -p 9000:9000 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:1.0.0.alpha.45
|
||||
```
|
||||
|
||||
3. **Build from Source (Option 3) - Advanced Users**
|
||||
|
||||
41
_typos.toml
Normal file
@@ -0,0 +1,41 @@
|
||||
[default]
|
||||
# # Ignore specific spell checking patterns
|
||||
# extend-ignore-identifiers-re = [
|
||||
# # Ignore common patterns in base64 encoding and hash values
|
||||
# "[A-Za-z0-9+/]{8,}={0,2}", # base64 encoding
|
||||
# "[A-Fa-f0-9]{8,}", # hexadecimal hash
|
||||
# "[A-Za-z0-9_-]{20,}", # long random strings
|
||||
# ]
|
||||
|
||||
# # Ignore specific regex patterns in content
|
||||
# extend-ignore-re = [
|
||||
# # Ignore hash values and encoded strings (base64 patterns)
|
||||
# "(?i)[A-Za-z0-9+/]{8,}={0,2}",
|
||||
# # Ignore long strings in quotes (usually hash or base64)
|
||||
# '"[A-Za-z0-9+/=_-]{8,}"',
|
||||
# # Ignore IV values and similar cryptographic strings
|
||||
# '"[A-Za-z0-9+/=]{12,}"',
|
||||
# # Ignore cryptographic signatures and keys (including partial strings)
|
||||
# "[A-Za-z0-9+/]{6,}[A-Za-z0-9+/=]*",
|
||||
# # Ignore base64-like strings in comments (common in examples)
|
||||
# "//.*[A-Za-z0-9+/]{8,}[A-Za-z0-9+/=]*",
|
||||
# ]
|
||||
extend-ignore-re = [
|
||||
# Ignore long strings in quotes (usually hash or base64)
|
||||
'"[A-Za-z0-9+/=_-]{32,}"',
|
||||
# Ignore IV values and similar cryptographic strings
|
||||
'"[A-Za-z0-9+/=]{12,}"',
|
||||
# Ignore cryptographic signatures and keys (including partial strings)
|
||||
"[A-Za-z0-9+/]{16,}[A-Za-z0-9+/=]*",
|
||||
]
|
||||
|
||||
[default.extend-words]
|
||||
bui = "bui"
|
||||
typ = "typ"
|
||||
clen = "clen"
|
||||
datas = "datas"
|
||||
bre = "bre"
|
||||
abd = "abd"
|
||||
|
||||
[files]
|
||||
extend-exclude = []
|
||||
@@ -21,13 +21,17 @@ detect_platform() {
|
||||
"linux")
|
||||
case "$arch" in
|
||||
"x86_64")
|
||||
echo "x86_64-unknown-linux-musl"
|
||||
# Default to GNU for better compatibility
|
||||
echo "x86_64-unknown-linux-gnu"
|
||||
;;
|
||||
"aarch64"|"arm64")
|
||||
echo "aarch64-unknown-linux-musl"
|
||||
echo "aarch64-unknown-linux-gnu"
|
||||
;;
|
||||
"armv7l")
|
||||
echo "armv7-unknown-linux-musleabihf"
|
||||
echo "armv7-unknown-linux-gnueabihf"
|
||||
;;
|
||||
"loongarch64")
|
||||
echo "loongarch64-unknown-linux-musl"
|
||||
;;
|
||||
*)
|
||||
echo "unknown-platform"
|
||||
@@ -119,6 +123,17 @@ usage() {
|
||||
echo " -o, --output-dir DIR Output directory (default: target/release)"
|
||||
echo " -b, --binary-name NAME Binary name (default: rustfs)"
|
||||
echo " -p, --platform TARGET Target platform (default: auto-detect)"
|
||||
echo " Supported platforms:"
|
||||
echo " x86_64-unknown-linux-gnu"
|
||||
echo " aarch64-unknown-linux-gnu"
|
||||
echo " armv7-unknown-linux-gnueabihf"
|
||||
echo " x86_64-unknown-linux-musl"
|
||||
echo " aarch64-unknown-linux-musl"
|
||||
echo " armv7-unknown-linux-musleabihf"
|
||||
echo " x86_64-apple-darwin"
|
||||
echo " aarch64-apple-darwin"
|
||||
echo " x86_64-pc-windows-msvc"
|
||||
echo " aarch64-pc-windows-msvc"
|
||||
echo " --dev Build in dev mode"
|
||||
echo " --sign Sign binaries after build"
|
||||
echo " --with-console Download console static assets (default)"
|
||||
@@ -385,7 +400,7 @@ build_binary() {
|
||||
fi
|
||||
else
|
||||
# Native compilation
|
||||
build_cmd="cargo build"
|
||||
build_cmd="RUSTFLAGS=-Clink-arg=-lm cargo build"
|
||||
fi
|
||||
|
||||
if [ "$BUILD_TYPE" = "release" ]; then
|
||||
|
||||
@@ -1,52 +0,0 @@
|
||||
[application]
|
||||
|
||||
# App (Project) Name
|
||||
name = "rustfs-gui"
|
||||
|
||||
# The static resource path
|
||||
asset_dir = "public"
|
||||
|
||||
[web.app]
|
||||
|
||||
# HTML title tag content
|
||||
title = "rustfs-gui"
|
||||
|
||||
# include `assets` in web platform
|
||||
[web.resource]
|
||||
|
||||
# Additional CSS style files
|
||||
style = []
|
||||
|
||||
# Additional JavaScript files
|
||||
script = []
|
||||
|
||||
[web.resource.dev]
|
||||
|
||||
# Javascript code file
|
||||
# serve: [dev-server] only
|
||||
script = []
|
||||
|
||||
[bundle]
|
||||
identifier = "com.rustfs.cli.gui"
|
||||
|
||||
publisher = "RustFsGUI"
|
||||
|
||||
category = "Utility"
|
||||
|
||||
copyright = "Copyright 2025 rustfs.com"
|
||||
|
||||
icon = [
|
||||
"assets/icons/icon.icns",
|
||||
"assets/icons/icon.ico",
|
||||
"assets/icons/icon.png",
|
||||
"assets/icons/rustfs-icon.png",
|
||||
]
|
||||
#[bundle.macos]
|
||||
#provider_short_name = "RustFs"
|
||||
[bundle.windows]
|
||||
tsp = true
|
||||
icon_path = "assets/icons/icon.ico"
|
||||
allow_downgrades = true
|
||||
[bundle.windows.webview_install_mode]
|
||||
[bundle.windows.webview_install_mode.EmbedBootstrapper]
|
||||
silent = true
|
||||
@@ -1,34 +0,0 @@
|
||||
## Rustfs GUI
|
||||
|
||||
### Tailwind
|
||||
|
||||
1. Install npm: https://docs.npmjs.com/downloading-and-installing-node-js-and-npm
|
||||
2. Install the Tailwind CSS CLI: https://tailwindcss.com/docs/installation
|
||||
3. Run the following command in the root of the project to start the Tailwind CSS compiler:
|
||||
|
||||
```bash
|
||||
npx tailwindcss -i ./input.css -o ./assets/tailwind.css --watch
|
||||
```
|
||||
|
||||
### Dioxus CLI
|
||||
|
||||
#### Install the stable version (recommended)
|
||||
|
||||
```shell
|
||||
cargo install dioxus-cli
|
||||
```
|
||||
|
||||
### Serving Your App
|
||||
|
||||
Run the following command in the root of your project to start developing with the default platform:
|
||||
|
||||
```bash
|
||||
dx serve
|
||||
```
|
||||
|
||||
To run for a different platform, use the `--platform platform` flag. E.g.
|
||||
|
||||
```bash
|
||||
dx serve --platform desktop
|
||||
```
|
||||
|
||||
|
Before Width: | Height: | Size: 80 KiB |
|
Before Width: | Height: | Size: 23 KiB |
|
Before Width: | Height: | Size: 80 KiB |
|
Before Width: | Height: | Size: 80 KiB |
|
Before Width: | Height: | Size: 23 KiB |
|
Before Width: | Height: | Size: 4.5 KiB |
|
Before Width: | Height: | Size: 9.9 KiB |
|
Before Width: | Height: | Size: 498 B |
|
Before Width: | Height: | Size: 969 B |
|
Before Width: | Height: | Size: 9.9 KiB |
|
Before Width: | Height: | Size: 23 KiB |
|
Before Width: | Height: | Size: 969 B |
|
Before Width: | Height: | Size: 2.0 KiB |
|
Before Width: | Height: | Size: 23 KiB |
|
Before Width: | Height: | Size: 47 KiB |
|
Before Width: | Height: | Size: 23 KiB |
@@ -1,48 +0,0 @@
|
||||
/**
|
||||
* Copyright 2024 RustFS Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
window.switchTab = function (tabId) {
|
||||
// Hide everything
|
||||
document.querySelectorAll('.tab-content').forEach(content => {
|
||||
content.classList.add('hidden');
|
||||
});
|
||||
|
||||
// Reset all label styles
|
||||
document.querySelectorAll('.tab-btn').forEach(btn => {
|
||||
btn.classList.remove('border-b-2', 'border-black');
|
||||
btn.classList.add('text-gray-500');
|
||||
});
|
||||
|
||||
// Displays the selected content
|
||||
const activeContent = document.getElementById(tabId);
|
||||
if (activeContent) {
|
||||
activeContent.classList.remove('hidden');
|
||||
}
|
||||
|
||||
// Updates the selected label style
|
||||
const activeBtn = document.querySelector(`[data-tab="${tabId}"]`);
|
||||
if (activeBtn) {
|
||||
activeBtn.classList.add('border-b-2', 'border-black');
|
||||
activeBtn.classList.remove('text-gray-500');
|
||||
}
|
||||
};
|
||||
|
||||
window.togglePassword = function (button) {
|
||||
const input = button.parentElement.querySelector('input[type="password"], input[type="text"]');
|
||||
if (input) {
|
||||
input.type = input.type === 'password' ? 'text' : 'password';
|
||||
}
|
||||
};
|
||||
|
Before Width: | Height: | Size: 23 KiB |
|
Before Width: | Height: | Size: 34 KiB |
@@ -1,15 +0,0 @@
|
||||
<svg width="1558" height="260" viewBox="0 0 1558 260" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g clip-path="url(#clip0_0_3)">
|
||||
<path d="M1288.5 112.905H1159.75V58.4404H1262L1270 0L1074 0V260H1159.75V162.997H1296.95L1288.5 112.905Z" fill="#0196D0"/>
|
||||
<path d="M1058.62 58.4404V0H789V58.4404H881.133V260H966.885V58.4404H1058.62Z" fill="#0196D0"/>
|
||||
<path d="M521 179.102V0L454.973 15V161C454.973 181.124 452.084 193.146 443.5 202C434.916 211.257 419.318 214.5 400.5 214.5C381.022 214.5 366.744 210.854 357.5 202C348.916 193.548 346.357 175.721 346.357 156V0L280 15V175.48C280 208.08 290.234 229.412 309.712 241.486C329.19 253.56 358.903 260 400.5 260C440.447 260 470.159 253.56 490.297 241.486C510.766 229.412 521 208.483 521 179.102Z" fill="#0196D0"/>
|
||||
<path d="M172.84 84.2813C172.84 97.7982 168.249 107.737 158.41 113.303C149.883 118.471 137.092 121.254 120.693 122.049V162.997C129.876 163.792 138.076 166.177 144.307 176.514L184.647 260H265L225.316 180.489C213.181 155.046 201.374 149.48 178.744 143.517C212.197 138.349 241.386 118.471 241.386 73.1499C241.386 53.2722 233.843 30.2141 218.756 17.8899C203.998 5.56575 183.991 0 159.394 0H120.693V48.5015H127.58C142.23 48.5015 153.6 51.4169 161.689 57.2477C169.233 62.8135 172.84 71.5596 172.84 84.2813ZM120.693 122.049C119.163 122.049 117.741 122.049 116.43 122.049H68.5457V48.5015H120.693V0H0V260H70.5137V162.997H110.526C113.806 162.997 117.741 162.997 120.693 162.997V122.049Z" fill="#0196D0"/>
|
||||
<path d="M774 179.297C774 160.829 766.671 144.669 752.013 131.972C738.127 119.66 712.025 110.169 673.708 103.5C662.136 101.191 651.722 99.6523 643.235 97.3437C586.532 84.6467 594.632 52.7118 650.564 52.7118C680.651 52.7118 709.582 61.946 738.127 66.9478C742.37 67.7174 743.913 68.1021 744.298 68.1021L750.47 12.697C720.383 3.46282 684.895 0 654.036 0C616.619 0 587.689 6.54088 567.245 19.2379C546.801 31.9349 536 57.7137 536 82.3382C536 103.5 543.715 119.66 559.916 131.972C575.731 143.515 604.276 152.749 645.55 160.059C658.279 162.368 668.694 163.907 676.794 166.215C685.023 168.524 691.066 170.704 694.924 172.756C702.253 176.604 706.11 182.375 706.11 188.531C706.11 196.611 701.481 202.767 692.224 207C664.836 220.081 587.689 212.001 556.83 198.15L543.715 247.784C547.186 248.169 552.972 249.323 559.916 250.477C616.619 259.327 690.681 270.869 741.212 238.935C762.814 225.468 774 206.23 774 179.297Z" fill="#0196D0"/>
|
||||
<path d="M1558 179.568C1558 160.383 1550.42 144.268 1535.67 131.99C1521.32 119.968 1494.34 110.631 1454.74 103.981C1442.38 101.679 1432.01 99.3764 1422.84 97.8416C1422.44 97.8416 1422.04 97.8416 1422.04 97.4579V112.422L1361.04 75.2038L1422.04 38.3692V52.9496C1424.7 52.9496 1427.49 52.9496 1430.41 52.9496C1461.51 52.9496 1491.42 62.5419 1521.32 67.5299C1525.31 67.9136 1526.9 67.9136 1527.3 67.9136L1533.68 12.6619C1502.98 3.83692 1465.9 0 1434 0C1395.33 0 1365.43 6.52277 1345.09 19.5683C1323.16 32.6139 1312 57.9376 1312 82.8776C1312 103.981 1320.37 120.096 1336.72 131.607C1353.46 143.885 1382.97 153.093 1425.23 160.383C1434 161.535 1441.18 162.686 1447.56 164.22L1448.36 150.791L1507.36 190.312L1445.57 224.844L1445.96 212.949C1409.68 215.635 1357.45 209.112 1333.53 197.985L1320.37 247.482C1323.56 248.249 1329.54 248.633 1336.72 250.551C1395.33 259.376 1471.88 270.887 1524.11 238.657C1546.84 225.611 1558 205.659 1558 179.568Z" fill="#0196D0"/>
|
||||
</g>
|
||||
<defs>
|
||||
<clipPath id="clip0_0_3">
|
||||
<rect width="1558" height="260" fill="white"/>
|
||||
</clipPath>
|
||||
</defs>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 3.4 KiB |
@@ -1,33 +0,0 @@
|
||||
/**
|
||||
* Copyright 2024 RustFS Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#navbar {
|
||||
display: flex;
|
||||
flex-direction: row;
|
||||
}
|
||||
|
||||
#navbar a {
|
||||
color: #ffffff;
|
||||
margin-right: 20px;
|
||||
text-decoration: none;
|
||||
transition: color 0.2s ease;
|
||||
}
|
||||
|
||||
#navbar a:hover {
|
||||
cursor: pointer;
|
||||
color: #ffffff;
|
||||
/ / #91a4d2;
|
||||
}
|
||||
@@ -1,972 +0,0 @@
|
||||
/**
|
||||
* Copyright 2024 RustFS Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
*, ::before, ::after {
|
||||
--tw-border-spacing-x: 0;
|
||||
--tw-border-spacing-y: 0;
|
||||
--tw-translate-x: 0;
|
||||
--tw-translate-y: 0;
|
||||
--tw-rotate: 0;
|
||||
--tw-skew-x: 0;
|
||||
--tw-skew-y: 0;
|
||||
--tw-scale-x: 1;
|
||||
--tw-scale-y: 1;
|
||||
--tw-pan-x: ;
|
||||
--tw-pan-y: ;
|
||||
--tw-pinch-zoom: ;
|
||||
--tw-scroll-snap-strictness: proximity;
|
||||
--tw-gradient-from-position: ;
|
||||
--tw-gradient-via-position: ;
|
||||
--tw-gradient-to-position: ;
|
||||
--tw-ordinal: ;
|
||||
--tw-slashed-zero: ;
|
||||
--tw-numeric-figure: ;
|
||||
--tw-numeric-spacing: ;
|
||||
--tw-numeric-fraction: ;
|
||||
--tw-ring-inset: ;
|
||||
--tw-ring-offset-width: 0px;
|
||||
--tw-ring-offset-color: #fff;
|
||||
--tw-ring-color: rgb(59 130 246 / 0.5);
|
||||
--tw-ring-offset-shadow: 0 0 #0000;
|
||||
--tw-ring-shadow: 0 0 #0000;
|
||||
--tw-shadow: 0 0 #0000;
|
||||
--tw-shadow-colored: 0 0 #0000;
|
||||
--tw-blur: ;
|
||||
--tw-brightness: ;
|
||||
--tw-contrast: ;
|
||||
--tw-grayscale: ;
|
||||
--tw-hue-rotate: ;
|
||||
--tw-invert: ;
|
||||
--tw-saturate: ;
|
||||
--tw-sepia: ;
|
||||
--tw-drop-shadow: ;
|
||||
--tw-backdrop-blur: ;
|
||||
--tw-backdrop-brightness: ;
|
||||
--tw-backdrop-contrast: ;
|
||||
--tw-backdrop-grayscale: ;
|
||||
--tw-backdrop-hue-rotate: ;
|
||||
--tw-backdrop-invert: ;
|
||||
--tw-backdrop-opacity: ;
|
||||
--tw-backdrop-saturate: ;
|
||||
--tw-backdrop-sepia: ;
|
||||
--tw-contain-size: ;
|
||||
--tw-contain-layout: ;
|
||||
--tw-contain-paint: ;
|
||||
--tw-contain-style: ;
|
||||
}
|
||||
|
||||
::backdrop {
|
||||
--tw-border-spacing-x: 0;
|
||||
--tw-border-spacing-y: 0;
|
||||
--tw-translate-x: 0;
|
||||
--tw-translate-y: 0;
|
||||
--tw-rotate: 0;
|
||||
--tw-skew-x: 0;
|
||||
--tw-skew-y: 0;
|
||||
--tw-scale-x: 1;
|
||||
--tw-scale-y: 1;
|
||||
--tw-pan-x: ;
|
||||
--tw-pan-y: ;
|
||||
--tw-pinch-zoom: ;
|
||||
--tw-scroll-snap-strictness: proximity;
|
||||
--tw-gradient-from-position: ;
|
||||
--tw-gradient-via-position: ;
|
||||
--tw-gradient-to-position: ;
|
||||
--tw-ordinal: ;
|
||||
--tw-slashed-zero: ;
|
||||
--tw-numeric-figure: ;
|
||||
--tw-numeric-spacing: ;
|
||||
--tw-numeric-fraction: ;
|
||||
--tw-ring-inset: ;
|
||||
--tw-ring-offset-width: 0px;
|
||||
--tw-ring-offset-color: #fff;
|
||||
--tw-ring-color: rgb(59 130 246 / 0.5);
|
||||
--tw-ring-offset-shadow: 0 0 #0000;
|
||||
--tw-ring-shadow: 0 0 #0000;
|
||||
--tw-shadow: 0 0 #0000;
|
||||
--tw-shadow-colored: 0 0 #0000;
|
||||
--tw-blur: ;
|
||||
--tw-brightness: ;
|
||||
--tw-contrast: ;
|
||||
--tw-grayscale: ;
|
||||
--tw-hue-rotate: ;
|
||||
--tw-invert: ;
|
||||
--tw-saturate: ;
|
||||
--tw-sepia: ;
|
||||
--tw-drop-shadow: ;
|
||||
--tw-backdrop-blur: ;
|
||||
--tw-backdrop-brightness: ;
|
||||
--tw-backdrop-contrast: ;
|
||||
--tw-backdrop-grayscale: ;
|
||||
--tw-backdrop-hue-rotate: ;
|
||||
--tw-backdrop-invert: ;
|
||||
--tw-backdrop-opacity: ;
|
||||
--tw-backdrop-saturate: ;
|
||||
--tw-backdrop-sepia: ;
|
||||
--tw-contain-size: ;
|
||||
--tw-contain-layout: ;
|
||||
--tw-contain-paint: ;
|
||||
--tw-contain-style: ;
|
||||
}
|
||||
|
||||
/*
|
||||
! tailwindcss v3.4.17 | MIT License | https://tailwindcss.com
|
||||
*/
|
||||
|
||||
/*
|
||||
1. Prevent padding and border from affecting element width. (https://github.com/mozdevs/cssremedy/issues/4)
|
||||
2. Allow adding a border to an element by just adding a border-width. (https://github.com/tailwindcss/tailwindcss/pull/116)
|
||||
*/
|
||||
|
||||
*,
|
||||
::before,
|
||||
::after {
|
||||
box-sizing: border-box;
|
||||
/* 1 */
|
||||
border-width: 0;
|
||||
/* 2 */
|
||||
border-style: solid;
|
||||
/* 2 */
|
||||
border-color: #e5e7eb;
|
||||
/* 2 */
|
||||
}
|
||||
|
||||
::before,
|
||||
::after {
|
||||
--tw-content: '';
|
||||
}
|
||||
|
||||
/*
|
||||
1. Use a consistent sensible line-height in all browsers.
|
||||
2. Prevent adjustments of font size after orientation changes in iOS.
|
||||
3. Use a more readable tab size.
|
||||
4. Use the user's configured `sans` font-family by default.
|
||||
5. Use the user's configured `sans` font-feature-settings by default.
|
||||
6. Use the user's configured `sans` font-variation-settings by default.
|
||||
7. Disable tap highlights on iOS
|
||||
*/
|
||||
|
||||
html,
|
||||
:host {
|
||||
line-height: 1.5;
|
||||
/* 1 */
|
||||
-webkit-text-size-adjust: 100%;
|
||||
/* 2 */
|
||||
-moz-tab-size: 4;
|
||||
/* 3 */
|
||||
-o-tab-size: 4;
|
||||
tab-size: 4;
|
||||
/* 3 */
|
||||
font-family: ui-sans-serif, system-ui, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji";
|
||||
/* 4 */
|
||||
font-feature-settings: normal;
|
||||
/* 5 */
|
||||
font-variation-settings: normal;
|
||||
/* 6 */
|
||||
-webkit-tap-highlight-color: transparent;
|
||||
/* 7 */
|
||||
}
|
||||
|
||||
/*
|
||||
1. Remove the margin in all browsers.
|
||||
2. Inherit line-height from `html` so users can set them as a class directly on the `html` element.
|
||||
*/
|
||||
|
||||
body {
|
||||
margin: 0;
|
||||
/* 1 */
|
||||
line-height: inherit;
|
||||
/* 2 */
|
||||
}
|
||||
|
||||
/*
|
||||
1. Add the correct height in Firefox.
|
||||
2. Correct the inheritance of border color in Firefox. (https://bugzilla.mozilla.org/show_bug.cgi?id=190655)
|
||||
3. Ensure horizontal rules are visible by default.
|
||||
*/
|
||||
|
||||
hr {
|
||||
height: 0;
|
||||
/* 1 */
|
||||
color: inherit;
|
||||
/* 2 */
|
||||
border-top-width: 1px;
|
||||
/* 3 */
|
||||
}
|
||||
|
||||
/*
|
||||
Add the correct text decoration in Chrome, Edge, and Safari.
|
||||
*/
|
||||
|
||||
abbr:where([title]) {
|
||||
-webkit-text-decoration: underline dotted;
|
||||
text-decoration: underline dotted;
|
||||
}
|
||||
|
||||
/*
|
||||
Remove the default font size and weight for headings.
|
||||
*/
|
||||
|
||||
h1,
|
||||
h2,
|
||||
h3,
|
||||
h4,
|
||||
h5,
|
||||
h6 {
|
||||
font-size: inherit;
|
||||
font-weight: inherit;
|
||||
}
|
||||
|
||||
/*
|
||||
Reset links to optimize for opt-in styling instead of opt-out.
|
||||
*/
|
||||
|
||||
a {
|
||||
color: inherit;
|
||||
text-decoration: inherit;
|
||||
}
|
||||
|
||||
/*
|
||||
Add the correct font weight in Edge and Safari.
|
||||
*/
|
||||
|
||||
b,
|
||||
strong {
|
||||
font-weight: bolder;
|
||||
}
|
||||
|
||||
/*
|
||||
1. Use the user's configured `mono` font-family by default.
|
||||
2. Use the user's configured `mono` font-feature-settings by default.
|
||||
3. Use the user's configured `mono` font-variation-settings by default.
|
||||
4. Correct the odd `em` font sizing in all browsers.
|
||||
*/
|
||||
|
||||
code,
|
||||
kbd,
|
||||
samp,
|
||||
pre {
|
||||
font-family: ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace;
|
||||
/* 1 */
|
||||
font-feature-settings: normal;
|
||||
/* 2 */
|
||||
font-variation-settings: normal;
|
||||
/* 3 */
|
||||
font-size: 1em;
|
||||
/* 4 */
|
||||
}
|
||||
|
||||
/*
|
||||
Add the correct font size in all browsers.
|
||||
*/
|
||||
|
||||
small {
|
||||
font-size: 80%;
|
||||
}
|
||||
|
||||
/*
|
||||
Prevent `sub` and `sup` elements from affecting the line height in all browsers.
|
||||
*/
|
||||
|
||||
sub,
|
||||
sup {
|
||||
font-size: 75%;
|
||||
line-height: 0;
|
||||
position: relative;
|
||||
vertical-align: baseline;
|
||||
}
|
||||
|
||||
sub {
|
||||
bottom: -0.25em;
|
||||
}
|
||||
|
||||
sup {
|
||||
top: -0.5em;
|
||||
}
|
||||
|
||||
/*
|
||||
1. Remove text indentation from table contents in Chrome and Safari. (https://bugs.chromium.org/p/chromium/issues/detail?id=999088, https://bugs.webkit.org/show_bug.cgi?id=201297)
|
||||
2. Correct table border color inheritance in all Chrome and Safari. (https://bugs.chromium.org/p/chromium/issues/detail?id=935729, https://bugs.webkit.org/show_bug.cgi?id=195016)
|
||||
3. Remove gaps between table borders by default.
|
||||
*/
|
||||
|
||||
table {
|
||||
text-indent: 0;
|
||||
/* 1 */
|
||||
border-color: inherit;
|
||||
/* 2 */
|
||||
border-collapse: collapse;
|
||||
/* 3 */
|
||||
}
|
||||
|
||||
/*
|
||||
1. Change the font styles in all browsers.
|
||||
2. Remove the margin in Firefox and Safari.
|
||||
3. Remove default padding in all browsers.
|
||||
*/
|
||||
|
||||
button,
|
||||
input,
|
||||
optgroup,
|
||||
select,
|
||||
textarea {
|
||||
font-family: inherit;
|
||||
/* 1 */
|
||||
font-feature-settings: inherit;
|
||||
/* 1 */
|
||||
font-variation-settings: inherit;
|
||||
/* 1 */
|
||||
font-size: 100%;
|
||||
/* 1 */
|
||||
font-weight: inherit;
|
||||
/* 1 */
|
||||
line-height: inherit;
|
||||
/* 1 */
|
||||
letter-spacing: inherit;
|
||||
/* 1 */
|
||||
color: inherit;
|
||||
/* 1 */
|
||||
margin: 0;
|
||||
/* 2 */
|
||||
padding: 0;
|
||||
/* 3 */
|
||||
}
|
||||
|
||||
/*
|
||||
Remove the inheritance of text transform in Edge and Firefox.
|
||||
*/
|
||||
|
||||
button,
|
||||
select {
|
||||
text-transform: none;
|
||||
}
|
||||
|
||||
/*
|
||||
1. Correct the inability to style clickable types in iOS and Safari.
|
||||
2. Remove default button styles.
|
||||
*/
|
||||
|
||||
button,
|
||||
input:where([type='button']),
|
||||
input:where([type='reset']),
|
||||
input:where([type='submit']) {
|
||||
-webkit-appearance: button;
|
||||
/* 1 */
|
||||
background-color: transparent;
|
||||
/* 2 */
|
||||
background-image: none;
|
||||
/* 2 */
|
||||
}
|
||||
|
||||
/*
|
||||
Use the modern Firefox focus style for all focusable elements.
|
||||
*/
|
||||
|
||||
:-moz-focusring {
|
||||
outline: auto;
|
||||
}
|
||||
|
||||
/*
|
||||
Remove the additional `:invalid` styles in Firefox. (https://github.com/mozilla/gecko-dev/blob/2f9eacd9d3d995c937b4251a5557d95d494c9be1/layout/style/res/forms.css#L728-L737)
|
||||
*/
|
||||
|
||||
:-moz-ui-invalid {
|
||||
box-shadow: none;
|
||||
}
|
||||
|
||||
/*
|
||||
Add the correct vertical alignment in Chrome and Firefox.
|
||||
*/
|
||||
|
||||
progress {
|
||||
vertical-align: baseline;
|
||||
}
|
||||
|
||||
/*
|
||||
Correct the cursor style of increment and decrement buttons in Safari.
|
||||
*/
|
||||
|
||||
::-webkit-inner-spin-button,
|
||||
::-webkit-outer-spin-button {
|
||||
height: auto;
|
||||
}
|
||||
|
||||
/*
|
||||
1. Correct the odd appearance in Chrome and Safari.
|
||||
2. Correct the outline style in Safari.
|
||||
*/
|
||||
|
||||
[type='search'] {
|
||||
-webkit-appearance: textfield;
|
||||
/* 1 */
|
||||
outline-offset: -2px;
|
||||
/* 2 */
|
||||
}
|
||||
|
||||
/*
|
||||
Remove the inner padding in Chrome and Safari on macOS.
|
||||
*/
|
||||
|
||||
::-webkit-search-decoration {
|
||||
-webkit-appearance: none;
|
||||
}
|
||||
|
||||
/*
|
||||
1. Correct the inability to style clickable types in iOS and Safari.
|
||||
2. Change font properties to `inherit` in Safari.
|
||||
*/
|
||||
|
||||
::-webkit-file-upload-button {
|
||||
-webkit-appearance: button;
|
||||
/* 1 */
|
||||
font: inherit;
|
||||
/* 2 */
|
||||
}
|
||||
|
||||
/*
|
||||
Add the correct display in Chrome and Safari.
|
||||
*/
|
||||
|
||||
summary {
|
||||
display: list-item;
|
||||
}
|
||||
|
||||
/*
|
||||
Removes the default spacing and border for appropriate elements.
|
||||
*/
|
||||
|
||||
blockquote,
|
||||
dl,
|
||||
dd,
|
||||
h1,
|
||||
h2,
|
||||
h3,
|
||||
h4,
|
||||
h5,
|
||||
h6,
|
||||
hr,
|
||||
figure,
|
||||
p,
|
||||
pre {
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
fieldset {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
legend {
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
ol,
|
||||
ul,
|
||||
menu {
|
||||
list-style: none;
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
/*
|
||||
Reset default styling for dialogs.
|
||||
*/
|
||||
|
||||
dialog {
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
/*
|
||||
Prevent resizing textareas horizontally by default.
|
||||
*/
|
||||
|
||||
textarea {
|
||||
resize: vertical;
|
||||
}
|
||||
|
||||
/*
|
||||
1. Reset the default placeholder opacity in Firefox. (https://github.com/tailwindlabs/tailwindcss/issues/3300)
|
||||
2. Set the default placeholder color to the user's configured gray 400 color.
|
||||
*/
|
||||
|
||||
input::-moz-placeholder, textarea::-moz-placeholder {
|
||||
opacity: 1;
|
||||
/* 1 */
|
||||
color: #9ca3af;
|
||||
/* 2 */
|
||||
}
|
||||
|
||||
input::placeholder,
|
||||
textarea::placeholder {
|
||||
opacity: 1;
|
||||
/* 1 */
|
||||
color: #9ca3af;
|
||||
/* 2 */
|
||||
}
|
||||
|
||||
/*
|
||||
Set the default cursor for buttons.
|
||||
*/
|
||||
|
||||
button,
|
||||
[role="button"] {
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
/*
|
||||
Make sure disabled buttons don't get the pointer cursor.
|
||||
*/
|
||||
|
||||
:disabled {
|
||||
cursor: default;
|
||||
}
|
||||
|
||||
/*
|
||||
1. Make replaced elements `display: block` by default. (https://github.com/mozdevs/cssremedy/issues/14)
|
||||
2. Add `vertical-align: middle` to align replaced elements more sensibly by default. (https://github.com/jensimmons/cssremedy/issues/14#issuecomment-634934210)
|
||||
This can trigger a poorly considered lint error in some tools but is included by design.
|
||||
*/
|
||||
|
||||
img,
|
||||
svg,
|
||||
video,
|
||||
canvas,
|
||||
audio,
|
||||
iframe,
|
||||
embed,
|
||||
object {
|
||||
display: block;
|
||||
/* 1 */
|
||||
vertical-align: middle;
|
||||
/* 2 */
|
||||
}
|
||||
|
||||
/*
|
||||
Constrain images and videos to the parent width and preserve their intrinsic aspect ratio. (https://github.com/mozdevs/cssremedy/issues/14)
|
||||
*/
|
||||
|
||||
img,
|
||||
video {
|
||||
max-width: 100%;
|
||||
height: auto;
|
||||
}
|
||||
|
||||
/* Make elements with the HTML hidden attribute stay hidden by default */
|
||||
|
||||
[hidden]:where(:not([hidden="until-found"])) {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.static {
|
||||
position: static;
|
||||
}
|
||||
|
||||
.absolute {
|
||||
position: absolute;
|
||||
}
|
||||
|
||||
.relative {
|
||||
position: relative;
|
||||
}
|
||||
|
||||
.right-2 {
|
||||
right: 0.5rem;
|
||||
}
|
||||
|
||||
.right-6 {
|
||||
right: 1.5rem;
|
||||
}
|
||||
|
||||
.top-1\/2 {
|
||||
top: 50%;
|
||||
}
|
||||
|
||||
.top-4 {
|
||||
top: 1rem;
|
||||
}
|
||||
|
||||
.z-10 {
|
||||
z-index: 10;
|
||||
}
|
||||
|
||||
.mb-2 {
|
||||
margin-bottom: 0.5rem;
|
||||
}
|
||||
|
||||
.mb-4 {
|
||||
margin-bottom: 1rem;
|
||||
}
|
||||
|
||||
.mb-6 {
|
||||
margin-bottom: 1.5rem;
|
||||
}
|
||||
|
||||
.mb-8 {
|
||||
margin-bottom: 2rem;
|
||||
}
|
||||
|
||||
.ml-2 {
|
||||
margin-left: 0.5rem;
|
||||
}
|
||||
|
||||
.flex {
|
||||
display: flex;
|
||||
}
|
||||
|
||||
.hidden {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.h-16 {
|
||||
height: 4rem;
|
||||
}
|
||||
|
||||
.h-24 {
|
||||
height: 6rem;
|
||||
}
|
||||
|
||||
.h-4 {
|
||||
height: 1rem;
|
||||
}
|
||||
|
||||
.h-5 {
|
||||
height: 1.25rem;
|
||||
}
|
||||
|
||||
.h-6 {
|
||||
height: 1.5rem;
|
||||
}
|
||||
|
||||
.min-h-screen {
|
||||
min-height: 100vh;
|
||||
}
|
||||
|
||||
.w-16 {
|
||||
width: 4rem;
|
||||
}
|
||||
|
||||
.w-20 {
|
||||
width: 5rem;
|
||||
}
|
||||
|
||||
.w-24 {
|
||||
width: 6rem;
|
||||
}
|
||||
|
||||
.w-4 {
|
||||
width: 1rem;
|
||||
}
|
||||
|
||||
.w-48 {
|
||||
width: 12rem;
|
||||
}
|
||||
|
||||
.w-5 {
|
||||
width: 1.25rem;
|
||||
}
|
||||
|
||||
.w-6 {
|
||||
width: 1.5rem;
|
||||
}
|
||||
|
||||
.w-full {
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.flex-1 {
|
||||
flex: 1 1 0%;
|
||||
}
|
||||
|
||||
.-translate-y-1\/2 {
|
||||
--tw-translate-y: -50%;
|
||||
transform: translate(var(--tw-translate-x), var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y));
|
||||
}
|
||||
|
||||
.transform {
|
||||
transform: translate(var(--tw-translate-x), var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y));
|
||||
}
|
||||
|
||||
@keyframes spin {
|
||||
to {
|
||||
transform: rotate(360deg);
|
||||
}
|
||||
}
|
||||
|
||||
.animate-spin {
|
||||
animation: spin 1s linear infinite;
|
||||
}
|
||||
|
||||
.flex-col {
|
||||
flex-direction: column;
|
||||
}
|
||||
|
||||
.items-center {
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.justify-center {
|
||||
justify-content: center;
|
||||
}
|
||||
|
||||
.space-x-2 > :not([hidden]) ~ :not([hidden]) {
|
||||
--tw-space-x-reverse: 0;
|
||||
margin-right: calc(0.5rem * var(--tw-space-x-reverse));
|
||||
margin-left: calc(0.5rem * calc(1 - var(--tw-space-x-reverse)));
|
||||
}
|
||||
|
||||
.space-x-4 > :not([hidden]) ~ :not([hidden]) {
|
||||
--tw-space-x-reverse: 0;
|
||||
margin-right: calc(1rem * var(--tw-space-x-reverse));
|
||||
margin-left: calc(1rem * calc(1 - var(--tw-space-x-reverse)));
|
||||
}
|
||||
|
||||
.space-x-8 > :not([hidden]) ~ :not([hidden]) {
|
||||
--tw-space-x-reverse: 0;
|
||||
margin-right: calc(2rem * var(--tw-space-x-reverse));
|
||||
margin-left: calc(2rem * calc(1 - var(--tw-space-x-reverse)));
|
||||
}
|
||||
|
||||
.space-y-4 > :not([hidden]) ~ :not([hidden]) {
|
||||
--tw-space-y-reverse: 0;
|
||||
margin-top: calc(1rem * calc(1 - var(--tw-space-y-reverse)));
|
||||
margin-bottom: calc(1rem * var(--tw-space-y-reverse));
|
||||
}
|
||||
|
||||
.space-y-6 > :not([hidden]) ~ :not([hidden]) {
|
||||
--tw-space-y-reverse: 0;
|
||||
margin-top: calc(1.5rem * calc(1 - var(--tw-space-y-reverse)));
|
||||
margin-bottom: calc(1.5rem * var(--tw-space-y-reverse));
|
||||
}
|
||||
|
||||
.rounded {
|
||||
border-radius: 0.25rem;
|
||||
}
|
||||
|
||||
.rounded-full {
|
||||
border-radius: 9999px;
|
||||
}
|
||||
|
||||
.rounded-lg {
|
||||
border-radius: 0.5rem;
|
||||
}
|
||||
|
||||
.rounded-md {
|
||||
border-radius: 0.375rem;
|
||||
}
|
||||
|
||||
.border {
|
||||
border-width: 1px;
|
||||
}
|
||||
|
||||
.border-b {
|
||||
border-bottom-width: 1px;
|
||||
}
|
||||
|
||||
.border-b-2 {
|
||||
border-bottom-width: 2px;
|
||||
}
|
||||
|
||||
.border-black {
|
||||
--tw-border-opacity: 1;
|
||||
border-color: rgb(0 0 0 / var(--tw-border-opacity, 1));
|
||||
}
|
||||
|
||||
.border-gray-200 {
|
||||
--tw-border-opacity: 1;
|
||||
border-color: rgb(229 231 235 / var(--tw-border-opacity, 1));
|
||||
}
|
||||
|
||||
.bg-\[\#111827\] {
|
||||
--tw-bg-opacity: 1;
|
||||
background-color: rgb(17 24 39 / var(--tw-bg-opacity, 1));
|
||||
}
|
||||
|
||||
.bg-gray-100 {
|
||||
--tw-bg-opacity: 1;
|
||||
background-color: rgb(243 244 246 / var(--tw-bg-opacity, 1));
|
||||
}
|
||||
|
||||
.bg-gray-900 {
|
||||
--tw-bg-opacity: 1;
|
||||
background-color: rgb(17 24 39 / var(--tw-bg-opacity, 1));
|
||||
}
|
||||
|
||||
.bg-red-500 {
|
||||
--tw-bg-opacity: 1;
|
||||
background-color: rgb(239 68 68 / var(--tw-bg-opacity, 1));
|
||||
}
|
||||
|
||||
.bg-white {
|
||||
--tw-bg-opacity: 1;
|
||||
background-color: rgb(255 255 255 / var(--tw-bg-opacity, 1));
|
||||
}
|
||||
|
||||
.p-2 {
|
||||
padding: 0.5rem;
|
||||
}
|
||||
|
||||
.p-4 {
|
||||
padding: 1rem;
|
||||
}
|
||||
|
||||
.p-8 {
|
||||
padding: 2rem;
|
||||
}
|
||||
|
||||
.px-1 {
|
||||
padding-left: 0.25rem;
|
||||
padding-right: 0.25rem;
|
||||
}
|
||||
|
||||
.px-3 {
|
||||
padding-left: 0.75rem;
|
||||
padding-right: 0.75rem;
|
||||
}
|
||||
|
||||
.px-4 {
|
||||
padding-left: 1rem;
|
||||
padding-right: 1rem;
|
||||
}
|
||||
|
||||
.py-0\.5 {
|
||||
padding-top: 0.125rem;
|
||||
padding-bottom: 0.125rem;
|
||||
}
|
||||
|
||||
.py-2 {
|
||||
padding-top: 0.5rem;
|
||||
padding-bottom: 0.5rem;
|
||||
}
|
||||
|
||||
.py-4 {
|
||||
padding-top: 1rem;
|
||||
padding-bottom: 1rem;
|
||||
}
|
||||
|
||||
.py-6 {
|
||||
padding-top: 1.5rem;
|
||||
padding-bottom: 1.5rem;
|
||||
}
|
||||
|
||||
.pr-10 {
|
||||
padding-right: 2.5rem;
|
||||
}
|
||||
|
||||
.text-2xl {
|
||||
font-size: 1.5rem;
|
||||
line-height: 2rem;
|
||||
}
|
||||
|
||||
.text-base {
|
||||
font-size: 1rem;
|
||||
line-height: 1.5rem;
|
||||
}
|
||||
|
||||
.text-sm {
|
||||
font-size: 0.875rem;
|
||||
line-height: 1.25rem;
|
||||
}
|
||||
|
||||
.font-medium {
|
||||
font-weight: 500;
|
||||
}
|
||||
|
||||
.font-semibold {
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
.text-blue-500 {
|
||||
--tw-text-opacity: 1;
|
||||
color: rgb(59 130 246 / var(--tw-text-opacity, 1));
|
||||
}
|
||||
|
||||
.text-blue-600 {
|
||||
--tw-text-opacity: 1;
|
||||
color: rgb(37 99 235 / var(--tw-text-opacity, 1));
|
||||
}
|
||||
|
||||
.text-gray-400 {
|
||||
--tw-text-opacity: 1;
|
||||
color: rgb(156 163 175 / var(--tw-text-opacity, 1));
|
||||
}
|
||||
|
||||
.text-gray-500 {
|
||||
--tw-text-opacity: 1;
|
||||
color: rgb(107 114 128 / var(--tw-text-opacity, 1));
|
||||
}
|
||||
|
||||
.text-gray-600 {
|
||||
--tw-text-opacity: 1;
|
||||
color: rgb(75 85 99 / var(--tw-text-opacity, 1));
|
||||
}
|
||||
|
||||
.text-white {
|
||||
--tw-text-opacity: 1;
|
||||
color: rgb(255 255 255 / var(--tw-text-opacity, 1));
|
||||
}
|
||||
|
||||
.opacity-25 {
|
||||
opacity: 0.25;
|
||||
}
|
||||
|
||||
.opacity-75 {
|
||||
opacity: 0.75;
|
||||
}
|
||||
|
||||
.filter {
|
||||
filter: var(--tw-blur) var(--tw-brightness) var(--tw-contrast) var(--tw-grayscale) var(--tw-hue-rotate) var(--tw-invert) var(--tw-saturate) var(--tw-sepia) var(--tw-drop-shadow);
|
||||
}
|
||||
|
||||
.hover\:bg-\[\#1f2937\]:hover {
|
||||
--tw-bg-opacity: 1;
|
||||
background-color: rgb(31 41 55 / var(--tw-bg-opacity, 1));
|
||||
}
|
||||
|
||||
.hover\:bg-gray-100:hover {
|
||||
--tw-bg-opacity: 1;
|
||||
background-color: rgb(243 244 246 / var(--tw-bg-opacity, 1));
|
||||
}
|
||||
|
||||
.hover\:bg-red-600:hover {
|
||||
--tw-bg-opacity: 1;
|
||||
background-color: rgb(220 38 38 / var(--tw-bg-opacity, 1));
|
||||
}
|
||||
|
||||
.hover\:text-gray-700:hover {
|
||||
--tw-text-opacity: 1;
|
||||
color: rgb(55 65 81 / var(--tw-text-opacity, 1));
|
||||
}
|
||||
|
||||
.hover\:text-gray-900:hover {
|
||||
--tw-text-opacity: 1;
|
||||
color: rgb(17 24 39 / var(--tw-text-opacity, 1));
|
||||
}
|
||||
|
||||
.focus\:outline-none:focus {
|
||||
outline: 2px solid transparent;
|
||||
outline-offset: 2px;
|
||||
}
|
||||
|
||||
.focus\:ring-2:focus {
|
||||
--tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
|
||||
--tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(2px + var(--tw-ring-offset-width)) var(--tw-ring-color);
|
||||
box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
|
||||
}
|
||||
|
||||
.focus\:ring-blue-500:focus {
|
||||
--tw-ring-opacity: 1;
|
||||
--tw-ring-color: rgb(59 130 246 / var(--tw-ring-opacity, 1));
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
rustfs bin path, do not delete
|
||||
@@ -1,19 +0,0 @@
|
||||
/**
|
||||
* Copyright 2024 RustFS Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
@tailwind base;
|
||||
@tailwind components;
|
||||
@tailwind utilities;
|
||||
@@ -1,330 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::components::navbar::LoadingSpinner;
|
||||
use crate::route::Route;
|
||||
use crate::utils::{RustFSConfig, ServiceManager};
|
||||
use chrono::Datelike;
|
||||
use dioxus::logger::tracing::debug;
|
||||
use dioxus::prelude::*;
|
||||
use std::time::Duration;
|
||||
|
||||
const HEADER_LOGO: Asset = asset!("/assets/rustfs-logo.svg");
|
||||
const TAILWIND_CSS: Asset = asset!("/assets/tailwind.css");
|
||||
|
||||
/// Define the state of the service
|
||||
#[derive(PartialEq, Debug, Clone)]
|
||||
enum ServiceState {
|
||||
Start,
|
||||
Stop,
|
||||
}
|
||||
|
||||
/// Define the Home component
|
||||
/// The Home component is the main component of the application
|
||||
/// It is responsible for starting and stopping the service
|
||||
/// It also displays the service status and provides a button to toggle the service
|
||||
/// The Home component also displays the footer of the application
|
||||
/// The footer contains links to the official site, documentation, GitHub, and license
|
||||
/// The footer also displays the version of the application
|
||||
/// The Home component also contains a button to change the theme of the application
|
||||
/// The Home component also contains a button to go to the settings page
|
||||
#[component]
|
||||
pub fn Home() -> Element {
|
||||
#[allow(clippy::redundant_closure)]
|
||||
let service = use_signal(|| ServiceManager::new());
|
||||
let conf = RustFSConfig::load().unwrap_or_else(|e| {
|
||||
ServiceManager::show_error(&format!("load config failed: {e}"));
|
||||
RustFSConfig::default()
|
||||
});
|
||||
|
||||
debug!("loaded configurations: {:?}", conf);
|
||||
let config = use_signal(|| conf.clone());
|
||||
|
||||
use dioxus_router::prelude::Link;
|
||||
use document::{Meta, Stylesheet, Title};
|
||||
let mut service_state = use_signal(|| ServiceState::Start);
|
||||
// Create a periodic check on the effect of the service status
|
||||
use_effect(move || {
|
||||
spawn(async move {
|
||||
loop {
|
||||
if let Some(pid) = ServiceManager::check_service_status().await {
|
||||
debug!("service_running true pid: {:?}", pid);
|
||||
service_state.set(ServiceState::Stop);
|
||||
} else {
|
||||
debug!("service_running true pid: 0");
|
||||
service_state.set(ServiceState::Start);
|
||||
}
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
}
|
||||
});
|
||||
});
|
||||
debug!("project start service_state: {:?}", service_state.read());
|
||||
// Use 'use_signal' to manage service status
|
||||
let mut loading = use_signal(|| false);
|
||||
let mut start_service = move |_| {
|
||||
let service = service;
|
||||
let config = config.read().clone();
|
||||
let mut service_state = service_state;
|
||||
// set the loading status
|
||||
loading.set(true);
|
||||
debug!("stop loading_state: {:?}", loading.read());
|
||||
spawn(async move {
|
||||
match service.read().start(config).await {
|
||||
Ok(result) => {
|
||||
if result.success {
|
||||
let duration = result.end_time - result.start_time;
|
||||
debug!("The service starts successfully and takes a long time:{}ms", duration.num_milliseconds());
|
||||
service_state.set(ServiceState::Stop);
|
||||
} else {
|
||||
ServiceManager::show_error(&result.message);
|
||||
service_state.set(ServiceState::Start);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
ServiceManager::show_error(&format!("start service failed: {e}"));
|
||||
}
|
||||
}
|
||||
// Only set loading to false when it's actually done
|
||||
loading.set(false);
|
||||
debug!("start loading_state: {:?}", loading.read());
|
||||
});
|
||||
};
|
||||
|
||||
let mut stop_service = move |_| {
|
||||
let service = service;
|
||||
let mut service_state = service_state;
|
||||
// set the loading status
|
||||
loading.set(true);
|
||||
spawn(async move {
|
||||
match service.read().stop().await {
|
||||
Ok(result) => {
|
||||
if result.success {
|
||||
let duration = result.end_time - result.start_time;
|
||||
debug!("The service stops successfully and takes a long time:{}ms", duration.num_milliseconds());
|
||||
service_state.set(ServiceState::Start);
|
||||
} else {
|
||||
ServiceManager::show_error(&result.message);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
ServiceManager::show_error(&format!("stop service failed: {e}"));
|
||||
}
|
||||
}
|
||||
debug!("service_state: {:?}", service_state.read());
|
||||
// Only set loading to false when it's actually done
|
||||
loading.set(false);
|
||||
debug!("stop loading_state: {:?}", loading.read());
|
||||
});
|
||||
};
|
||||
|
||||
// Toggle the state when the button is clicked
|
||||
let toggle_service = {
|
||||
let mut service_state = service_state;
|
||||
debug!("toggle_service service_state: {:?}", service_state.read());
|
||||
move |_| {
|
||||
if service_state.read().eq(&ServiceState::Stop) {
|
||||
// If the service status is started, you need to run a command to stop the service
|
||||
stop_service(());
|
||||
service_state.set(ServiceState::Start);
|
||||
} else {
|
||||
start_service(());
|
||||
service_state.set(ServiceState::Stop);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Define dynamic styles based on state
|
||||
let button_class = if service_state.read().eq(&ServiceState::Start) {
|
||||
"bg-[#111827] hover:bg-[#1f2937] text-white px-4 py-2 rounded-md flex items-center space-x-2"
|
||||
} else {
|
||||
"bg-red-500 hover:bg-red-600 text-white px-4 py-2 rounded-md flex items-center space-x-2"
|
||||
};
|
||||
|
||||
rsx! {
|
||||
// The Stylesheet component inserts a style link into the head of the document
|
||||
Stylesheet {href: TAILWIND_CSS,}
|
||||
Title { "RustFS APP" }
|
||||
Meta {
|
||||
name: "description",
|
||||
// TODO: translate to english
|
||||
content: "RustFS RustFS 用热门安全的 Rust 语言开发,兼容 S3 协议。适用于 AI/ML 及海量数据存储、大数据、互联网、工业和保密存储等全部场景。近乎免费使用。遵循 Apache 2 协议,支持国产保密设备和系统。",
|
||||
}
|
||||
div { class: "min-h-screen flex flex-col items-center bg-white",
|
||||
div { class: "absolute top-4 right-6 flex space-x-2",
|
||||
// change theme
|
||||
button { class: "p-2 hover:bg-gray-100 rounded-lg", ChangeThemeButton {} }
|
||||
// setting button
|
||||
Link {
|
||||
class: "p-2 hover:bg-gray-100 rounded-lg",
|
||||
to: Route::SettingViews {},
|
||||
SettingButton {}
|
||||
}
|
||||
}
|
||||
main { class: "flex-1 flex flex-col items-center justify-center space-y-6 p-4",
|
||||
div { class: "w-24 h-24 bg-gray-900 rounded-full flex items-center justify-center",
|
||||
img { alt: "Logo", class: "w-16 h-16", src: HEADER_LOGO }
|
||||
}
|
||||
div { class: "text-gray-600",
|
||||
"Service is running on "
|
||||
span { class: "text-blue-600", " 127.0.0.1:9000 " }
|
||||
}
|
||||
LoadingSpinner {
|
||||
loading: loading.read().to_owned(),
|
||||
text: "processing...",
|
||||
}
|
||||
button { class: button_class, onclick: toggle_service,
|
||||
svg {
|
||||
class: "h-4 w-4",
|
||||
fill: "none",
|
||||
stroke: "currentColor",
|
||||
view_box: "0 0 24 24",
|
||||
xmlns: "http://www.w3.org/2000/svg",
|
||||
if service_state.read().eq(&ServiceState::Start) {
|
||||
path {
|
||||
d: "M14.752 11.168l-3.197-2.132A1 1 0 0010 9.87v4.263a1 1 0 001.555.832l3.197-2.132a1 1 0 000-1.664z",
|
||||
stroke_linecap: "round",
|
||||
stroke_linejoin: "round",
|
||||
stroke_width: "2",
|
||||
}
|
||||
path {
|
||||
d: "M21 12a9 9 0 11-18 0 9 9 0 0118 0z",
|
||||
stroke_linecap: "round",
|
||||
stroke_linejoin: "round",
|
||||
stroke_width: "2",
|
||||
}
|
||||
} else {
|
||||
path {
|
||||
stroke_linecap: "round",
|
||||
stroke_linejoin: "round",
|
||||
stroke_width: "2",
|
||||
d: "M21 12a9 9 0 11-18 0 9 9 0 0118 0z",
|
||||
}
|
||||
path {
|
||||
stroke_linecap: "round",
|
||||
stroke_linejoin: "round",
|
||||
stroke_width: "2",
|
||||
d: "M9 10h6v4H9z",
|
||||
}
|
||||
}
|
||||
}
|
||||
span { id: "serviceStatus",
|
||||
if service_state.read().eq(&ServiceState::Start) {
|
||||
"Start service"
|
||||
} else {
|
||||
"Stop service"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Footer { version: "v1.0.0".to_string() }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[component]
|
||||
pub fn Footer(version: String) -> Element {
|
||||
let now = chrono::Local::now();
|
||||
let year = now.naive_local().year();
|
||||
rsx! {
|
||||
footer { class: "w-full py-6 flex flex-col items-center space-y-4 mb-6",
|
||||
nav { class: "flex space-x-4 text-gray-600",
|
||||
a { class: "hover:text-gray-900", href: "https://rustfs.com", "Official Site" }
|
||||
a {
|
||||
class: "hover:text-gray-900",
|
||||
href: "https://rustfs.com/docs",
|
||||
"Documentation"
|
||||
}
|
||||
a {
|
||||
class: "hover:text-gray-900",
|
||||
href: "https://github.com/rustfs/rustfs",
|
||||
"GitHub"
|
||||
}
|
||||
a {
|
||||
class: "hover:text-gray-900",
|
||||
href: "https://rustfs.com/docs/license/",
|
||||
"License"
|
||||
}
|
||||
a { class: "hover:text-gray-900", href: "#", "Sponsors" }
|
||||
}
|
||||
div { class: "text-gray-500 text-sm", " © rustfs.com {year}, All rights reserved." }
|
||||
div { class: "text-gray-400 text-sm mb-8", " version {version} " }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[component]
|
||||
pub fn GoBackButtons() -> Element {
|
||||
rsx! {
|
||||
button {
|
||||
class: "p-2 hover:bg-gray-100 rounded-lg",
|
||||
"onclick": "window.history.back()",
|
||||
"Back to the Past"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[component]
|
||||
pub fn GoForwardButtons() -> Element {
|
||||
rsx! {
|
||||
button {
|
||||
class: "p-2 hover:bg-gray-100 rounded-lg",
|
||||
"onclick": "window.history.forward()",
|
||||
"Back to the Future"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[component]
|
||||
pub fn ChangeThemeButton() -> Element {
|
||||
rsx! {
|
||||
svg {
|
||||
class: "h-6 w-6 text-gray-600",
|
||||
fill: "none",
|
||||
stroke: "currentColor",
|
||||
view_box: "0 0 24 24",
|
||||
xmlns: "http://www.w3.org/2000/svg",
|
||||
path {
|
||||
d: "M9 3v2m6-2v2M9 19v2m6-2v2M5 9H3m2 6H3m18-6h-2m2 6h-2M7 19h10a2 2 0 002-2V7a2 2 0 00-2-2H7a2 2 0 00-2 2v10a2 2 0 002 2zM9 9h6v6H9V9z",
|
||||
stroke_linecap: "round",
|
||||
stroke_linejoin: "round",
|
||||
stroke_width: "2",
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[component]
|
||||
pub fn SettingButton() -> Element {
|
||||
rsx! {
|
||||
svg {
|
||||
class: "h-6 w-6 text-gray-600",
|
||||
fill: "none",
|
||||
stroke: "currentColor",
|
||||
view_box: "0 0 24 24",
|
||||
xmlns: "http://www.w3.org/2000/svg",
|
||||
path {
|
||||
d: "M10.325 4.317c.426-1.756 2.924-1.756 3.35 0a1.724 1.724 0 002.573 1.066c1.543-.94 3.31.826 2.37 2.37a1.724 1.724 0 001.065 2.572c1.756.426 1.756 2.924 0 3.35a1.724 1.724 0 00-1.066 2.573c.94 1.543-.826 3.31-2.37 2.37a1.724 1.724 0 00-2.572 1.065c-.426 1.756-2.924 1.756-3.35 0a1.724 1.724 0 00-2.573-1.066c-1.543.94-3.31-.826-2.37-2.37a1.724 1.724 0 00-1.065-2.572c-1.756-.426-1.756-2.924 0-3.35a1.724 1.724 0 001.066-2.573c-.94-1.543.826-3.31 2.37-2.37.996.608 2.296.07 2.572-1.065z",
|
||||
stroke_linecap: "round",
|
||||
stroke_linejoin: "round",
|
||||
stroke_width: "2",
|
||||
}
|
||||
path {
|
||||
d: "M15 12a3 3 0 11-6 0 3 3 0 016 0z",
|
||||
stroke_linecap: "round",
|
||||
stroke_linejoin: "round",
|
||||
stroke_width: "2",
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,74 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::route::Route;
|
||||
use dioxus::logger::tracing::debug;
|
||||
use dioxus::prelude::*;
|
||||
|
||||
const NAVBAR_CSS: Asset = asset!("/assets/styling/navbar.css");
|
||||
|
||||
#[component]
|
||||
pub fn Navbar() -> Element {
|
||||
rsx! {
|
||||
document::Link { rel: "stylesheet", href: NAVBAR_CSS }
|
||||
|
||||
div { id: "navbar", class: "hidden", style: "display: none;",
|
||||
Link { to: Route::HomeViews {}, "Home" }
|
||||
Link { to: Route::SettingViews {}, "Setting" }
|
||||
}
|
||||
|
||||
Outlet::<Route> {}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Props, PartialEq, Debug, Clone)]
|
||||
pub struct LoadingSpinnerProps {
|
||||
#[props(default = true)]
|
||||
loading: bool,
|
||||
#[props(default = "正在处理中...")]
|
||||
text: &'static str,
|
||||
}
|
||||
|
||||
#[component]
|
||||
pub fn LoadingSpinner(props: LoadingSpinnerProps) -> Element {
|
||||
debug!("loading: {}", props.loading);
|
||||
if !props.loading {
|
||||
debug!("LoadingSpinner false loading: {}", props.loading);
|
||||
return rsx! {};
|
||||
}
|
||||
rsx! {
|
||||
div { class: "flex items-center justify-center z-10",
|
||||
svg {
|
||||
class: "animate-spin h-5 w-5 text-blue-500",
|
||||
xmlns: "http://www.w3.org/2000/svg",
|
||||
fill: "none",
|
||||
view_box: "0 0 24 24",
|
||||
circle {
|
||||
class: "opacity-25",
|
||||
cx: "12",
|
||||
cy: "12",
|
||||
r: "10",
|
||||
stroke: "currentColor",
|
||||
stroke_width: "4",
|
||||
}
|
||||
path {
|
||||
class: "opacity-75",
|
||||
fill: "currentColor",
|
||||
d: "M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z",
|
||||
}
|
||||
}
|
||||
span { class: "ml-2 text-gray-600", "{props.text}" }
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,216 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::components::navbar::LoadingSpinner;
|
||||
use dioxus::logger::tracing::{debug, error};
|
||||
use dioxus::prelude::*;
|
||||
|
||||
const SETTINGS_JS: Asset = asset!("/assets/js/sts.js");
|
||||
const TAILWIND_CSS: Asset = asset!("/assets/tailwind.css");
|
||||
#[component]
|
||||
pub fn Setting() -> Element {
|
||||
use crate::utils::{RustFSConfig, ServiceManager};
|
||||
use document::{Meta, Script, Stylesheet, Title};
|
||||
|
||||
#[allow(clippy::redundant_closure)]
|
||||
let service = use_signal(|| ServiceManager::new());
|
||||
let conf = RustFSConfig::load().unwrap_or_else(|e| {
|
||||
error!("load config error: {}", e);
|
||||
RustFSConfig::default_config()
|
||||
});
|
||||
debug!("conf address: {:?}", conf.clone().address);
|
||||
|
||||
let config = use_signal(|| conf.clone());
|
||||
let address_state = use_signal(|| conf.address.to_string());
|
||||
let mut host_state = use_signal(|| conf.host.to_string());
|
||||
let mut port_state = use_signal(|| conf.port.to_string());
|
||||
let mut access_key_state = use_signal(|| conf.access_key.to_string());
|
||||
let mut secret_key_state = use_signal(|| conf.secret_key.to_string());
|
||||
let mut volume_name_state = use_signal(|| conf.volume_name.to_string());
|
||||
let loading = use_signal(|| false);
|
||||
|
||||
let save_and_restart = {
|
||||
let host_state = host_state;
|
||||
let port_state = port_state;
|
||||
let access_key_state = access_key_state;
|
||||
let secret_key_state = secret_key_state;
|
||||
let volume_name_state = volume_name_state;
|
||||
let mut loading = loading;
|
||||
debug!("save_and_restart access_key:{}", access_key_state.read());
|
||||
move |_| {
|
||||
// set the loading status
|
||||
loading.set(true);
|
||||
let mut config = config;
|
||||
config.write().address = format!("{}:{}", host_state.read(), port_state.read());
|
||||
config.write().host = host_state.read().to_string();
|
||||
config.write().port = port_state.read().to_string();
|
||||
config.write().access_key = access_key_state.read().to_string();
|
||||
config.write().secret_key = secret_key_state.read().to_string();
|
||||
config.write().volume_name = volume_name_state.read().to_string();
|
||||
// restart service
|
||||
let service = service;
|
||||
let config = config.read().clone();
|
||||
spawn(async move {
|
||||
if let Err(e) = service.read().restart(config).await {
|
||||
ServiceManager::show_error(&format!("发送重启命令失败:{e}"));
|
||||
}
|
||||
// reset the status when you're done
|
||||
loading.set(false);
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
rsx! {
|
||||
Title { "Settings - RustFS App" }
|
||||
Meta { name: "description", content: "Settings - RustFS App." }
|
||||
// The Stylesheet component inserts a style link into the head of the document
|
||||
Stylesheet { href: TAILWIND_CSS }
|
||||
Script { src: SETTINGS_JS }
|
||||
div { class: "bg-white p-8",
|
||||
h1 { class: "text-2xl font-semibold mb-6", "Settings" }
|
||||
div { class: "border-b border-gray-200 mb-6",
|
||||
nav { class: "flex space-x-8",
|
||||
button {
|
||||
class: "tab-btn px-1 py-4 text-sm font-medium border-b-2 border-black",
|
||||
"data-tab": "service",
|
||||
"onclick": "switchTab('service')",
|
||||
"Service "
|
||||
}
|
||||
button {
|
||||
class: "tab-btn px-1 py-4 text-sm font-medium text-gray-500 hover:text-gray-700",
|
||||
"data-tab": "user",
|
||||
"onclick": "switchTab('user')",
|
||||
"User "
|
||||
}
|
||||
button {
|
||||
class: "tab-btn px-1 py-4 text-sm font-medium text-gray-500 hover:text-gray-700 hidden",
|
||||
"data-tab": "logs",
|
||||
"onclick": "switchTab('logs')",
|
||||
"Logs "
|
||||
}
|
||||
}
|
||||
}
|
||||
div { id: "tabContent",
|
||||
div { class: "tab-content", id: "service",
|
||||
div { class: "mb-8",
|
||||
h2 { class: "text-base font-medium mb-2", "Service address" }
|
||||
p { class: "text-gray-600 mb-4",
|
||||
" The service address is the IP address and port number of the service. the default address is "
|
||||
code { class: "bg-gray-100 px-1 py-0.5 rounded", {address_state} }
|
||||
". "
|
||||
}
|
||||
div { class: "flex space-x-2",
|
||||
input {
|
||||
class: "border rounded px-3 py-2 w-48 focus:outline-none focus:ring-2 focus:ring-blue-500",
|
||||
r#type: "text",
|
||||
value: host_state,
|
||||
oninput: move |evt| host_state.set(evt.value().clone()),
|
||||
}
|
||||
span { class: "flex items-center", ":" }
|
||||
input {
|
||||
class: "border rounded px-3 py-2 w-20 focus:outline-none focus:ring-2 focus:ring-blue-500",
|
||||
r#type: "text",
|
||||
value: port_state,
|
||||
oninput: move |evt| port_state.set(evt.value().clone()),
|
||||
}
|
||||
}
|
||||
}
|
||||
div { class: "mb-8",
|
||||
h2 { class: "text-base font-medium mb-2", "Storage path" }
|
||||
p { class: "text-gray-600 mb-4",
|
||||
"Update the storage path of the service. the default path is {volume_name_state}."
|
||||
}
|
||||
input {
|
||||
class: "border rounded px-3 py-2 w-full focus:outline-none focus:ring-2 focus:ring-blue-500",
|
||||
r#type: "text",
|
||||
value: volume_name_state,
|
||||
oninput: move |evt| volume_name_state.set(evt.value().clone()),
|
||||
}
|
||||
}
|
||||
}
|
||||
div { class: "tab-content hidden", id: "user",
|
||||
div { class: "mb-8",
|
||||
h2 { class: "text-base font-medium mb-2", "User" }
|
||||
p { class: "text-gray-600 mb-4",
|
||||
"The user is the owner of the service. the default user is "
|
||||
code { class: "bg-gray-100 px-1 py-0.5 rounded", {access_key_state} }
|
||||
}
|
||||
input {
|
||||
class: "border rounded px-3 py-2 w-full focus:outline-none focus:ring-2 focus:ring-blue-500",
|
||||
r#type: "text",
|
||||
value: access_key_state,
|
||||
oninput: move |evt| access_key_state.set(evt.value().clone()),
|
||||
}
|
||||
}
|
||||
div { class: "mb-8",
|
||||
h2 { class: "text-base font-medium mb-2", "Password" }
|
||||
p { class: "text-gray-600 mb-4",
|
||||
"The password is the password of the user. the default password is "
|
||||
code { class: "bg-gray-100 px-1 py-0.5 rounded", {secret_key_state} }
|
||||
}
|
||||
div { class: "relative",
|
||||
input {
|
||||
class: "border rounded px-3 py-2 w-full pr-10 focus:outline-none focus:ring-2 focus:ring-blue-500",
|
||||
r#type: "password",
|
||||
value: secret_key_state,
|
||||
oninput: move |evt| secret_key_state.set(evt.value().clone()),
|
||||
}
|
||||
button {
|
||||
class: "absolute right-2 top-1/2 transform -translate-y-1/2 text-gray-500 hover:text-gray-700",
|
||||
"onclick": "togglePassword(this)",
|
||||
svg {
|
||||
class: "h-5 w-5",
|
||||
fill: "currentColor",
|
||||
view_box: "0 0 20 20",
|
||||
xmlns: "http://www.w3.org/2000/svg",
|
||||
path { d: "M10 12a2 2 0 100-4 2 2 0 000 4z" }
|
||||
path {
|
||||
clip_rule: "evenodd",
|
||||
d: "M.458 10C1.732 5.943 5.522 3 10 3s8.268 2.943 9.542 7c-1.274 4.057-5.064 7-9.542 7S1.732 14.057.458 10zM14 10a4 4 0 11-8 0 4 4 0 018 0z",
|
||||
fill_rule: "evenodd",
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
div { class: "tab-content hidden", id: "logs",
|
||||
div { class: "mb-8",
|
||||
h2 { class: "text-base font-medium mb-2", "Logs storage path" }
|
||||
p { class: "text-gray-600 mb-4",
|
||||
"The logs storage path is the path where the logs are stored. the default path is /var/log/rustfs. "
|
||||
}
|
||||
input {
|
||||
class: "border rounded px-3 py-2 w-full focus:outline-none focus:ring-2 focus:ring-blue-500",
|
||||
r#type: "text",
|
||||
value: "/var/logs/rustfs",
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
div { class: "flex space-x-4",
|
||||
button {
|
||||
class: "bg-[#111827] text-white px-4 py-2 rounded hover:bg-[#1f2937]",
|
||||
onclick: save_and_restart,
|
||||
" Save and restart "
|
||||
}
|
||||
GoBackButton { "Back" }
|
||||
}
|
||||
LoadingSpinner {
|
||||
loading: loading.read().to_owned(),
|
||||
text: "服务处理中...",
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod components;
|
||||
mod route;
|
||||
mod utils;
|
||||
mod views;
|
||||
|
||||
fn main() {
|
||||
let _worker_guard = utils::init_logger();
|
||||
dioxus::launch(views::App);
|
||||
}
|
||||
@@ -1,28 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::components::Navbar;
|
||||
use crate::views::{HomeViews, SettingViews};
|
||||
use dioxus::prelude::*;
|
||||
|
||||
/// The router for the application
|
||||
#[derive(Debug, Clone, Routable, PartialEq)]
|
||||
#[rustfmt::skip]
|
||||
pub enum Route {
|
||||
#[layout(Navbar)]
|
||||
#[route("/")]
|
||||
HomeViews {},
|
||||
#[route("/settings")]
|
||||
SettingViews {},
|
||||
}
|
||||
@@ -1,564 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use keyring::Entry;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::error::Error;
|
||||
|
||||
/// Configuration for the RustFS service
|
||||
///
|
||||
/// # Fields
|
||||
/// * `address` - The address of the RustFS service
|
||||
/// * `host` - The host of the RustFS service
|
||||
/// * `port` - The port of the RustFS service
|
||||
/// * `access_key` - The access key of the RustFS service
|
||||
/// * `secret_key` - The secret key of the RustFS service
|
||||
/// * `domain_name` - The domain name of the RustFS service
|
||||
/// * `volume_name` - The volume name of the RustFS service
|
||||
/// * `console_address` - The console address of the RustFS service
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let config = RustFSConfig {
|
||||
/// address: "127.0.0.1:9000".to_string(),
|
||||
/// host: "127.0.0.1".to_string(),
|
||||
/// port: "9000".to_string(),
|
||||
/// access_key: "rustfsadmin".to_string(),
|
||||
/// secret_key: "rustfsadmin".to_string(),
|
||||
/// domain_name: "demo.rustfs.com".to_string(),
|
||||
/// volume_name: "data".to_string(),
|
||||
/// console_address: "127.0.0.1:9001".to_string(),
|
||||
/// };
|
||||
/// println!("{:?}", config);
|
||||
/// assert_eq!(config.address, "127.0.0.1:9000");
|
||||
/// ```
|
||||
#[derive(Debug, Clone, Default, Deserialize, Serialize, Ord, PartialOrd, Eq, PartialEq)]
|
||||
pub struct RustFSConfig {
|
||||
pub address: String,
|
||||
pub host: String,
|
||||
pub port: String,
|
||||
pub access_key: String,
|
||||
pub secret_key: String,
|
||||
pub domain_name: String,
|
||||
pub volume_name: String,
|
||||
pub console_address: String,
|
||||
}
|
||||
|
||||
impl RustFSConfig {
|
||||
/// keyring the name of the service
|
||||
const SERVICE_NAME: &'static str = "rustfs-service";
|
||||
/// keyring the key of the service
|
||||
const SERVICE_KEY: &'static str = "rustfs_key";
|
||||
/// default domain name
|
||||
const DEFAULT_DOMAIN_NAME_VALUE: &'static str = "demo.rustfs.com";
|
||||
/// default address value
|
||||
const DEFAULT_ADDRESS_VALUE: &'static str = "127.0.0.1:9000";
|
||||
/// default port value
|
||||
const DEFAULT_PORT_VALUE: &'static str = "9000";
|
||||
/// default host value
|
||||
const DEFAULT_HOST_VALUE: &'static str = "127.0.0.1";
|
||||
/// default access key value
|
||||
const DEFAULT_ACCESS_KEY_VALUE: &'static str = "rustfsadmin";
|
||||
/// default secret key value
|
||||
const DEFAULT_SECRET_KEY_VALUE: &'static str = "rustfsadmin";
|
||||
/// default console address value
|
||||
const DEFAULT_CONSOLE_ADDRESS_VALUE: &'static str = "127.0.0.1:9001";
|
||||
|
||||
/// get the default volume_name
|
||||
///
|
||||
/// # Returns
|
||||
/// * The default volume name
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let volume_name = RustFSConfig::default_volume_name();
|
||||
/// ```
|
||||
pub fn default_volume_name() -> String {
|
||||
dirs::home_dir()
|
||||
.map(|home| home.join("rustfs").join("data"))
|
||||
.and_then(|path| path.to_str().map(String::from))
|
||||
.unwrap_or_else(|| "data".to_string())
|
||||
}
|
||||
|
||||
/// create a default configuration
|
||||
///
|
||||
/// # Returns
|
||||
/// * The default configuration
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let config = RustFSConfig::default_config();
|
||||
/// println!("{:?}", config);
|
||||
/// assert_eq!(config.address, "127.0.0.1:9000");
|
||||
/// ```
|
||||
pub fn default_config() -> Self {
|
||||
Self {
|
||||
address: Self::DEFAULT_ADDRESS_VALUE.to_string(),
|
||||
host: Self::DEFAULT_HOST_VALUE.to_string(),
|
||||
port: Self::DEFAULT_PORT_VALUE.to_string(),
|
||||
access_key: Self::DEFAULT_ACCESS_KEY_VALUE.to_string(),
|
||||
secret_key: Self::DEFAULT_SECRET_KEY_VALUE.to_string(),
|
||||
domain_name: Self::DEFAULT_DOMAIN_NAME_VALUE.to_string(),
|
||||
volume_name: Self::default_volume_name(),
|
||||
console_address: Self::DEFAULT_CONSOLE_ADDRESS_VALUE.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Load the configuration from the keyring
|
||||
///
|
||||
/// # Errors
|
||||
/// * If the configuration cannot be loaded from the keyring
|
||||
/// * If the configuration cannot be deserialized
|
||||
/// * If the address cannot be extracted from the configuration
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let config = RustFSConfig::load().unwrap();
|
||||
/// println!("{:?}", config);
|
||||
/// assert_eq!(config.address, "127.0.0.1:9000");
|
||||
/// ```
|
||||
pub fn load() -> Result<Self, Box<dyn Error>> {
|
||||
let mut config = Self::default_config();
|
||||
|
||||
// Try to get the configuration of the storage from the keyring
|
||||
let entry = Entry::new(Self::SERVICE_NAME, Self::SERVICE_KEY)?;
|
||||
if let Ok(stored_json) = entry.get_password() {
|
||||
if let Ok(stored_config) = serde_json::from_str::<RustFSConfig>(&stored_json) {
|
||||
// update fields that are not empty and non default
|
||||
if !stored_config.address.is_empty() && stored_config.address != Self::DEFAULT_ADDRESS_VALUE {
|
||||
config.address = stored_config.address;
|
||||
let (host, port) = Self::extract_host_port(config.address.as_str())
|
||||
.ok_or_else(|| format!("无法从地址 '{}' 中提取主机和端口", config.address))?;
|
||||
config.host = host.to_string();
|
||||
config.port = port.to_string();
|
||||
}
|
||||
if !stored_config.access_key.is_empty() && stored_config.access_key != Self::DEFAULT_ACCESS_KEY_VALUE {
|
||||
config.access_key = stored_config.access_key;
|
||||
}
|
||||
if !stored_config.secret_key.is_empty() && stored_config.secret_key != Self::DEFAULT_SECRET_KEY_VALUE {
|
||||
config.secret_key = stored_config.secret_key;
|
||||
}
|
||||
if !stored_config.domain_name.is_empty() && stored_config.domain_name != Self::DEFAULT_DOMAIN_NAME_VALUE {
|
||||
config.domain_name = stored_config.domain_name;
|
||||
}
|
||||
// The stored volume_name is updated only if it is not empty and different from the default
|
||||
if !stored_config.volume_name.is_empty() && stored_config.volume_name != Self::default_volume_name() {
|
||||
config.volume_name = stored_config.volume_name;
|
||||
}
|
||||
if !stored_config.console_address.is_empty()
|
||||
&& stored_config.console_address != Self::DEFAULT_CONSOLE_ADDRESS_VALUE
|
||||
{
|
||||
config.console_address = stored_config.console_address;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
/// Auxiliary method: Extract the host and port from the address string
|
||||
/// # Arguments
|
||||
/// * `address` - The address string
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Some((host, port))` - The host and port
|
||||
///
|
||||
/// # Errors
|
||||
/// * If the address is not in the form 'host:port'
|
||||
/// * If the port is not a valid u16
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let (host, port) = RustFSConfig::extract_host_port("127.0.0.1:9000").unwrap();
|
||||
/// assert_eq!(host, "127.0.0.1");
|
||||
/// assert_eq!(port, 9000);
|
||||
/// ```
|
||||
pub fn extract_host_port(address: &str) -> Option<(&str, u16)> {
|
||||
let parts: Vec<&str> = address.split(':').collect();
|
||||
if parts.len() == 2 {
|
||||
if let Ok(port) = parts[1].parse::<u16>() {
|
||||
return Some((parts[0], port));
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// save the configuration to keyring
|
||||
///
|
||||
/// # Errors
|
||||
/// * If the configuration cannot be serialized
|
||||
/// * If the configuration cannot be saved to the keyring
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let config = RustFSConfig::default_config();
|
||||
/// config.save().unwrap();
|
||||
/// ```
|
||||
pub fn save(&self) -> Result<(), Box<dyn Error>> {
|
||||
let entry = Entry::new(Self::SERVICE_NAME, Self::SERVICE_KEY)?;
|
||||
let json = serde_json::to_string(self)?;
|
||||
entry.set_password(&json)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Clear the stored configuration from the system keyring
|
||||
///
|
||||
/// # Returns
|
||||
/// `Ok(())` if the configuration was successfully cleared, or an error if the operation failed.
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// RustFSConfig::clear().unwrap();
|
||||
/// ```
|
||||
#[allow(dead_code)]
|
||||
pub fn clear() -> Result<(), Box<dyn Error>> {
|
||||
let entry = Entry::new(Self::SERVICE_NAME, Self::SERVICE_KEY)?;
|
||||
entry.delete_credential()?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_rustfs_config_default() {
|
||||
let config = RustFSConfig::default();
|
||||
assert!(config.address.is_empty());
|
||||
assert!(config.host.is_empty());
|
||||
assert!(config.port.is_empty());
|
||||
assert!(config.access_key.is_empty());
|
||||
assert!(config.secret_key.is_empty());
|
||||
assert!(config.domain_name.is_empty());
|
||||
assert!(config.volume_name.is_empty());
|
||||
assert!(config.console_address.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rustfs_config_creation() {
|
||||
let config = RustFSConfig {
|
||||
address: "192.168.1.100:9000".to_string(),
|
||||
host: "192.168.1.100".to_string(),
|
||||
port: "9000".to_string(),
|
||||
access_key: "testuser".to_string(),
|
||||
secret_key: "testpass".to_string(),
|
||||
domain_name: "test.rustfs.com".to_string(),
|
||||
volume_name: "/data/rustfs".to_string(),
|
||||
console_address: "192.168.1.100:9001".to_string(),
|
||||
};
|
||||
|
||||
assert_eq!(config.address, "192.168.1.100:9000");
|
||||
assert_eq!(config.host, "192.168.1.100");
|
||||
assert_eq!(config.port, "9000");
|
||||
assert_eq!(config.access_key, "testuser");
|
||||
assert_eq!(config.secret_key, "testpass");
|
||||
assert_eq!(config.domain_name, "test.rustfs.com");
|
||||
assert_eq!(config.volume_name, "/data/rustfs");
|
||||
assert_eq!(config.console_address, "192.168.1.100:9001");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_volume_name() {
|
||||
let volume_name = RustFSConfig::default_volume_name();
|
||||
assert!(!volume_name.is_empty());
|
||||
// Should either be the home directory path or fallback to "data"
|
||||
assert!(volume_name.contains("rustfs") || volume_name == "data");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_config() {
|
||||
let config = RustFSConfig::default_config();
|
||||
assert_eq!(config.address, RustFSConfig::DEFAULT_ADDRESS_VALUE);
|
||||
assert_eq!(config.host, RustFSConfig::DEFAULT_HOST_VALUE);
|
||||
assert_eq!(config.port, RustFSConfig::DEFAULT_PORT_VALUE);
|
||||
assert_eq!(config.access_key, RustFSConfig::DEFAULT_ACCESS_KEY_VALUE);
|
||||
assert_eq!(config.secret_key, RustFSConfig::DEFAULT_SECRET_KEY_VALUE);
|
||||
assert_eq!(config.domain_name, RustFSConfig::DEFAULT_DOMAIN_NAME_VALUE);
|
||||
assert_eq!(config.console_address, RustFSConfig::DEFAULT_CONSOLE_ADDRESS_VALUE);
|
||||
assert!(!config.volume_name.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_host_port_valid() {
|
||||
let test_cases = vec![
|
||||
("127.0.0.1:9000", Some(("127.0.0.1", 9000))),
|
||||
("localhost:8080", Some(("localhost", 8080))),
|
||||
("192.168.1.100:3000", Some(("192.168.1.100", 3000))),
|
||||
("0.0.0.0:80", Some(("0.0.0.0", 80))),
|
||||
("example.com:443", Some(("example.com", 443))),
|
||||
];
|
||||
|
||||
for (input, expected) in test_cases {
|
||||
let result = RustFSConfig::extract_host_port(input);
|
||||
assert_eq!(result, expected, "Failed for input: {input}");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_host_port_invalid() {
|
||||
let invalid_cases = vec![
|
||||
"127.0.0.1", // Missing port
|
||||
"127.0.0.1:", // Empty port
|
||||
"127.0.0.1:abc", // Invalid port
|
||||
"127.0.0.1:99999", // Port out of range
|
||||
"", // Empty string
|
||||
"127.0.0.1:9000:extra", // Too many parts
|
||||
"invalid", // No colon
|
||||
];
|
||||
|
||||
for input in invalid_cases {
|
||||
let result = RustFSConfig::extract_host_port(input);
|
||||
assert_eq!(result, None, "Should be None for input: {input}");
|
||||
}
|
||||
|
||||
// Special case: empty host but valid port should still work
|
||||
let result = RustFSConfig::extract_host_port(":9000");
|
||||
assert_eq!(result, Some(("", 9000)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_host_port_edge_cases() {
|
||||
// Test edge cases for port numbers
|
||||
assert_eq!(RustFSConfig::extract_host_port("host:0"), Some(("host", 0)));
|
||||
assert_eq!(RustFSConfig::extract_host_port("host:65535"), Some(("host", 65535)));
|
||||
assert_eq!(RustFSConfig::extract_host_port("host:65536"), None); // Out of range
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialization() {
|
||||
let config = RustFSConfig {
|
||||
address: "127.0.0.1:9000".to_string(),
|
||||
host: "127.0.0.1".to_string(),
|
||||
port: "9000".to_string(),
|
||||
access_key: "admin".to_string(),
|
||||
secret_key: "password".to_string(),
|
||||
domain_name: "test.com".to_string(),
|
||||
volume_name: "/data".to_string(),
|
||||
console_address: "127.0.0.1:9001".to_string(),
|
||||
};
|
||||
|
||||
let json = serde_json::to_string(&config).unwrap();
|
||||
assert!(json.contains("127.0.0.1:9000"));
|
||||
assert!(json.contains("admin"));
|
||||
assert!(json.contains("test.com"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deserialization() {
|
||||
let json = r#"{
|
||||
"address": "192.168.1.100:9000",
|
||||
"host": "192.168.1.100",
|
||||
"port": "9000",
|
||||
"access_key": "testuser",
|
||||
"secret_key": "testpass",
|
||||
"domain_name": "example.com",
|
||||
"volume_name": "/opt/data",
|
||||
"console_address": "192.168.1.100:9001"
|
||||
}"#;
|
||||
|
||||
let config: RustFSConfig = serde_json::from_str(json).unwrap();
|
||||
assert_eq!(config.address, "192.168.1.100:9000");
|
||||
assert_eq!(config.host, "192.168.1.100");
|
||||
assert_eq!(config.port, "9000");
|
||||
assert_eq!(config.access_key, "testuser");
|
||||
assert_eq!(config.secret_key, "testpass");
|
||||
assert_eq!(config.domain_name, "example.com");
|
||||
assert_eq!(config.volume_name, "/opt/data");
|
||||
assert_eq!(config.console_address, "192.168.1.100:9001");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialization_deserialization_roundtrip() {
|
||||
let original_config = RustFSConfig {
|
||||
address: "10.0.0.1:8080".to_string(),
|
||||
host: "10.0.0.1".to_string(),
|
||||
port: "8080".to_string(),
|
||||
access_key: "roundtrip_user".to_string(),
|
||||
secret_key: "roundtrip_pass".to_string(),
|
||||
domain_name: "roundtrip.test".to_string(),
|
||||
volume_name: "/tmp/roundtrip".to_string(),
|
||||
console_address: "10.0.0.1:8081".to_string(),
|
||||
};
|
||||
|
||||
let json = serde_json::to_string(&original_config).unwrap();
|
||||
let deserialized_config: RustFSConfig = serde_json::from_str(&json).unwrap();
|
||||
|
||||
assert_eq!(original_config, deserialized_config);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_config_ordering() {
|
||||
let config1 = RustFSConfig {
|
||||
address: "127.0.0.1:9000".to_string(),
|
||||
host: "127.0.0.1".to_string(),
|
||||
port: "9000".to_string(),
|
||||
access_key: "admin".to_string(),
|
||||
secret_key: "password".to_string(),
|
||||
domain_name: "test.com".to_string(),
|
||||
volume_name: "/data".to_string(),
|
||||
console_address: "127.0.0.1:9001".to_string(),
|
||||
};
|
||||
|
||||
let config2 = RustFSConfig {
|
||||
address: "127.0.0.1:9000".to_string(),
|
||||
host: "127.0.0.1".to_string(),
|
||||
port: "9000".to_string(),
|
||||
access_key: "admin".to_string(),
|
||||
secret_key: "password".to_string(),
|
||||
domain_name: "test.com".to_string(),
|
||||
volume_name: "/data".to_string(),
|
||||
console_address: "127.0.0.1:9001".to_string(),
|
||||
};
|
||||
|
||||
let config3 = RustFSConfig {
|
||||
address: "127.0.0.1:9001".to_string(), // Different port
|
||||
host: "127.0.0.1".to_string(),
|
||||
port: "9001".to_string(),
|
||||
access_key: "admin".to_string(),
|
||||
secret_key: "password".to_string(),
|
||||
domain_name: "test.com".to_string(),
|
||||
volume_name: "/data".to_string(),
|
||||
console_address: "127.0.0.1:9002".to_string(),
|
||||
};
|
||||
|
||||
assert_eq!(config1, config2);
|
||||
assert_ne!(config1, config3);
|
||||
assert!(config1 < config3); // Lexicographic ordering
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_clone() {
|
||||
let original = RustFSConfig::default_config();
|
||||
let cloned = original.clone();
|
||||
|
||||
assert_eq!(original, cloned);
|
||||
assert_eq!(original.address, cloned.address);
|
||||
assert_eq!(original.access_key, cloned.access_key);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_debug_format() {
|
||||
let config = RustFSConfig::default_config();
|
||||
let debug_str = format!("{config:?}");
|
||||
|
||||
assert!(debug_str.contains("RustFSConfig"));
|
||||
assert!(debug_str.contains("address"));
|
||||
assert!(debug_str.contains("127.0.0.1:9000"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_constants() {
|
||||
assert_eq!(RustFSConfig::SERVICE_NAME, "rustfs-service");
|
||||
assert_eq!(RustFSConfig::SERVICE_KEY, "rustfs_key");
|
||||
assert_eq!(RustFSConfig::DEFAULT_DOMAIN_NAME_VALUE, "demo.rustfs.com");
|
||||
assert_eq!(RustFSConfig::DEFAULT_ADDRESS_VALUE, "127.0.0.1:9000");
|
||||
assert_eq!(RustFSConfig::DEFAULT_PORT_VALUE, "9000");
|
||||
assert_eq!(RustFSConfig::DEFAULT_HOST_VALUE, "127.0.0.1");
|
||||
assert_eq!(RustFSConfig::DEFAULT_ACCESS_KEY_VALUE, "rustfsadmin");
|
||||
assert_eq!(RustFSConfig::DEFAULT_SECRET_KEY_VALUE, "rustfsadmin");
|
||||
assert_eq!(RustFSConfig::DEFAULT_CONSOLE_ADDRESS_VALUE, "127.0.0.1:9001");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty_strings() {
|
||||
let config = RustFSConfig {
|
||||
address: "".to_string(),
|
||||
host: "".to_string(),
|
||||
port: "".to_string(),
|
||||
access_key: "".to_string(),
|
||||
secret_key: "".to_string(),
|
||||
domain_name: "".to_string(),
|
||||
volume_name: "".to_string(),
|
||||
console_address: "".to_string(),
|
||||
};
|
||||
|
||||
assert!(config.address.is_empty());
|
||||
assert!(config.host.is_empty());
|
||||
assert!(config.port.is_empty());
|
||||
assert!(config.access_key.is_empty());
|
||||
assert!(config.secret_key.is_empty());
|
||||
assert!(config.domain_name.is_empty());
|
||||
assert!(config.volume_name.is_empty());
|
||||
assert!(config.console_address.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_very_long_strings() {
|
||||
let long_string = "a".repeat(1000);
|
||||
let config = RustFSConfig {
|
||||
address: format!("{long_string}:9000"),
|
||||
host: long_string.clone(),
|
||||
port: "9000".to_string(),
|
||||
access_key: long_string.clone(),
|
||||
secret_key: long_string.clone(),
|
||||
domain_name: format!("{long_string}.com"),
|
||||
volume_name: format!("/data/{long_string}"),
|
||||
console_address: format!("{long_string}:9001"),
|
||||
};
|
||||
|
||||
assert_eq!(config.host.len(), 1000);
|
||||
assert_eq!(config.access_key.len(), 1000);
|
||||
assert_eq!(config.secret_key.len(), 1000);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_special_characters() {
|
||||
let config = RustFSConfig {
|
||||
address: "127.0.0.1:9000".to_string(),
|
||||
host: "127.0.0.1".to_string(),
|
||||
port: "9000".to_string(),
|
||||
access_key: "user@domain.com".to_string(),
|
||||
secret_key: "p@ssw0rd!#$%".to_string(),
|
||||
domain_name: "test-domain.example.com".to_string(),
|
||||
volume_name: "/data/rust-fs/storage".to_string(),
|
||||
console_address: "127.0.0.1:9001".to_string(),
|
||||
};
|
||||
|
||||
assert!(config.access_key.contains("@"));
|
||||
assert!(config.secret_key.contains("!#$%"));
|
||||
assert!(config.domain_name.contains("-"));
|
||||
assert!(config.volume_name.contains("/"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_unicode_strings() {
|
||||
let config = RustFSConfig {
|
||||
address: "127.0.0.1:9000".to_string(),
|
||||
host: "127.0.0.1".to_string(),
|
||||
port: "9000".to_string(),
|
||||
access_key: "用户名".to_string(),
|
||||
secret_key: "密码 123".to_string(),
|
||||
domain_name: "测试.com".to_string(),
|
||||
volume_name: "/数据/存储".to_string(),
|
||||
console_address: "127.0.0.1:9001".to_string(),
|
||||
};
|
||||
|
||||
assert_eq!(config.access_key, "用户名");
|
||||
assert_eq!(config.secret_key, "密码 123");
|
||||
assert_eq!(config.domain_name, "测试.com");
|
||||
assert_eq!(config.volume_name, "/数据/存储");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_memory_efficiency() {
|
||||
// Test that the structure doesn't use excessive memory
|
||||
assert!(std::mem::size_of::<RustFSConfig>() < 1000);
|
||||
}
|
||||
|
||||
// Note: Keyring-related tests (load, save, clear) are not included here
|
||||
// because they require actual keyring access and would be integration tests
|
||||
// rather than unit tests. They should be tested separately in an integration
|
||||
// test environment where keyring access can be properly mocked or controlled.
|
||||
}
|
||||
@@ -1,899 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::utils::RustFSConfig;
|
||||
use dioxus::logger::tracing::{debug, error, info};
|
||||
use rust_embed::RustEmbed;
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::error::Error;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::Command as StdCommand;
|
||||
use std::sync::LazyLock;
|
||||
use std::time::Duration;
|
||||
use tokio::fs;
|
||||
use tokio::fs::File;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
use tokio::net::TcpStream;
|
||||
use tokio::sync::{Mutex, mpsc};
|
||||
|
||||
#[derive(RustEmbed)]
|
||||
#[folder = "$CARGO_MANIFEST_DIR/embedded-rustfs/"]
|
||||
struct Asset;
|
||||
|
||||
// Use `LazyLock` to cache the checksum of embedded resources
|
||||
static RUSTFS_HASH: LazyLock<Mutex<String>> = LazyLock::new(|| {
|
||||
let rustfs_file = if cfg!(windows) { "rustfs.exe" } else { "rustfs" };
|
||||
let rustfs_data = Asset::get(rustfs_file).expect("RustFs binary not embedded");
|
||||
let hash = hex::encode(Sha256::digest(&rustfs_data.data));
|
||||
Mutex::new(hash)
|
||||
});
|
||||
|
||||
/// Service command
|
||||
/// This enum represents the commands that can be sent to the service manager
|
||||
/// to start, stop, or restart the service
|
||||
/// The `Start` variant contains the configuration for the service
|
||||
/// The `Restart` variant contains the configuration for the service
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let config = RustFSConfig {
|
||||
/// address: "127.0.0.1:9000".to_string(),
|
||||
/// host: "127.0.0.1".to_string(),
|
||||
/// port: "9000".to_string(),
|
||||
/// access_key: "rustfsadmin".to_string(),
|
||||
/// secret_key: "rustfsadmin".to_string(),
|
||||
/// domain_name: "demo.rustfs.com".to_string(),
|
||||
/// volume_name: "data".to_string(),
|
||||
/// console_address: "127.0.0.1:9001".to_string(),
|
||||
/// };
|
||||
///
|
||||
/// let command = ServiceCommand::Start(config);
|
||||
/// println!("{:?}", command);
|
||||
///
|
||||
/// assert_eq!(command, ServiceCommand::Start(config));
|
||||
/// ```
|
||||
pub enum ServiceCommand {
|
||||
Start(RustFSConfig),
|
||||
Stop,
|
||||
Restart(RustFSConfig),
|
||||
}
|
||||
|
||||
/// Service operation result
|
||||
/// This struct represents the result of a service operation
|
||||
/// It contains information about the success of the operation,
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use chrono::Local;
|
||||
///
|
||||
/// let result = ServiceOperationResult {
|
||||
/// success: true,
|
||||
/// start_time: chrono::Local::now(),
|
||||
/// end_time: chrono::Local::now(),
|
||||
/// message: "服务启动成功".to_string(),
|
||||
/// };
|
||||
///
|
||||
/// println!("{:?}", result);
|
||||
/// assert_eq!(result.success, true);
|
||||
/// ```
|
||||
#[derive(Debug)]
|
||||
pub struct ServiceOperationResult {
|
||||
pub success: bool,
|
||||
pub start_time: chrono::DateTime<chrono::Local>,
|
||||
pub end_time: chrono::DateTime<chrono::Local>,
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
/// Service manager
|
||||
/// This struct represents a service manager that can be used to start, stop, or restart a service
|
||||
/// It contains a command sender that can be used to send commands to the service manager
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let service_manager = ServiceManager::new();
|
||||
/// println!("{:?}", service_manager);
|
||||
/// ```
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ServiceManager {
|
||||
command_tx: mpsc::Sender<ServiceCommand>,
|
||||
// process: Arc<Mutex<Option<Child>>>,
|
||||
// pid: Arc<Mutex<Option<u32>>>, // Add PID storage
|
||||
// current_config: Arc<Mutex<Option<RustFSConfig>>>, // Add configuration storage
|
||||
}
|
||||
|
||||
impl ServiceManager {
|
||||
/// check if the service is running and return a pid
|
||||
/// This function is platform dependent
|
||||
/// On Unix systems, it uses the `ps` command to check for the service
|
||||
/// On Windows systems, it uses the `wmic` command to check for the service
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let pid = check_service_status().await;
|
||||
/// println!("{:?}", pid);
|
||||
/// ```
|
||||
pub async fn check_service_status() -> Option<u32> {
|
||||
#[cfg(unix)]
|
||||
{
|
||||
// use the ps command on a unix system
|
||||
if let Ok(output) = StdCommand::new("ps").arg("-ef").output() {
|
||||
let output_str = String::from_utf8_lossy(&output.stdout);
|
||||
for line in output_str.lines() {
|
||||
// match contains `rustfs/bin/rustfs` of the line
|
||||
if line.contains("rustfs/bin/rustfs") && !line.contains("grep") {
|
||||
if let Some(pid_str) = line.split_whitespace().nth(1) {
|
||||
if let Ok(pid) = pid_str.parse::<u32>() {
|
||||
return Some(pid);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
{
|
||||
if let Ok(output) = StdCommand::new("wmic")
|
||||
.arg("process")
|
||||
.arg("where")
|
||||
.arg("caption='rustfs.exe'")
|
||||
.arg("get")
|
||||
.arg("processid")
|
||||
.output()
|
||||
{
|
||||
let output_str = String::from_utf8_lossy(&output.stdout);
|
||||
for line in output_str.lines() {
|
||||
if let Ok(pid) = line.trim().parse::<u32>() {
|
||||
return Some(pid);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// Prepare the service
|
||||
/// This function downloads the service executable if it doesn't exist
|
||||
/// It also creates the necessary directories for the service
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let executable_path = prepare_service().await;
|
||||
/// println!("{:?}", executable_path);
|
||||
/// ```
|
||||
async fn prepare_service() -> Result<PathBuf, Box<dyn Error>> {
|
||||
// get the user directory
|
||||
let home_dir = dirs::home_dir().ok_or("无法获取用户目录")?;
|
||||
let rustfs_dir = home_dir.join("rustfs");
|
||||
let bin_dir = rustfs_dir.join("bin");
|
||||
let data_dir = rustfs_dir.join("data");
|
||||
let logs_dir = rustfs_dir.join("logs");
|
||||
|
||||
// create the necessary directories
|
||||
for dir in [&bin_dir, &data_dir, &logs_dir] {
|
||||
if !dir.exists() {
|
||||
tokio::fs::create_dir_all(dir).await?;
|
||||
}
|
||||
}
|
||||
|
||||
let rustfs_file = if cfg!(windows) { "rustfs.exe" } else { "rustfs" };
|
||||
let executable_path = bin_dir.join(rustfs_file);
|
||||
let hash_path = bin_dir.join("embedded_rustfs.sha256");
|
||||
|
||||
if executable_path.exists() && hash_path.exists() {
|
||||
let cached_hash = fs::read_to_string(&hash_path).await?;
|
||||
let expected_hash = RUSTFS_HASH.lock().await;
|
||||
if cached_hash == *expected_hash {
|
||||
println!("Use cached rustfs: {executable_path:?}");
|
||||
return Ok(executable_path);
|
||||
}
|
||||
}
|
||||
|
||||
// Extract and write files
|
||||
let rustfs_data = Asset::get(rustfs_file).expect("RustFS binary not embedded");
|
||||
let mut file = File::create(&executable_path).await?;
|
||||
file.write_all(&rustfs_data.data).await?;
|
||||
let expected_hash = hex::encode(Sha256::digest(&rustfs_data.data));
|
||||
fs::write(&hash_path, expected_hash).await?;
|
||||
|
||||
// set execution permissions on unix systems
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
let mut perms = std::fs::metadata(&executable_path)?.permissions();
|
||||
perms.set_mode(0o755);
|
||||
std::fs::set_permissions(&executable_path, perms)?;
|
||||
}
|
||||
|
||||
Ok(executable_path)
|
||||
}
|
||||
|
||||
/// Helper function: Extracts the port from the address string
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let address = "127.0.0.1:9000";
|
||||
/// let port = extract_port(address);
|
||||
/// println!("{:?}", port);
|
||||
/// ```
|
||||
fn extract_port(address: &str) -> Option<u16> {
|
||||
address.split(':').nth(1)?.parse().ok()
|
||||
}
|
||||
|
||||
/// Create a new instance of the service manager
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let service_manager = ServiceManager::new();
|
||||
/// println!("{:?}", service_manager);
|
||||
/// ```
|
||||
pub(crate) fn new() -> Self {
|
||||
let (command_tx, mut command_rx) = mpsc::channel(10);
|
||||
// Start the control loop
|
||||
tokio::spawn(async move {
|
||||
while let Some(cmd) = command_rx.recv().await {
|
||||
match cmd {
|
||||
ServiceCommand::Start(config) => {
|
||||
if let Err(e) = Self::start_service(&config).await {
|
||||
Self::show_error(&format!("启动服务失败:{e}"));
|
||||
}
|
||||
}
|
||||
ServiceCommand::Stop => {
|
||||
if let Err(e) = Self::stop_service().await {
|
||||
Self::show_error(&format!("停止服务失败:{e}"));
|
||||
}
|
||||
}
|
||||
ServiceCommand::Restart(config) => {
|
||||
if Self::check_service_status().await.is_some() {
|
||||
if let Err(e) = Self::stop_service().await {
|
||||
Self::show_error(&format!("重启服务失败:{e}"));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if let Err(e) = Self::start_service(&config).await {
|
||||
Self::show_error(&format!("重启服务失败:{e}"));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
ServiceManager { command_tx }
|
||||
}
|
||||
|
||||
/// Start the service
|
||||
/// This function starts the service with the given configuration
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let config = RustFSConfig {
|
||||
/// address: "127.0.0.1:9000".to_string(),
|
||||
/// host: "127.0.0.1".to_string(),
|
||||
/// port: "9000".to_string(),
|
||||
/// access_key: "rustfsadmin".to_string(),
|
||||
/// secret_key: "rustfsadmin".to_string(),
|
||||
/// domain_name: "demo.rustfs.com".to_string(),
|
||||
/// volume_name: "data".to_string(),
|
||||
/// console_address: "127.0.0.1:9001".to_string(),
|
||||
/// };
|
||||
///
|
||||
/// let result = start_service(&config).await;
|
||||
/// println!("{:?}", result);
|
||||
/// ```
|
||||
async fn start_service(config: &RustFSConfig) -> Result<(), Box<dyn Error>> {
|
||||
// Check if the service is already running
|
||||
if let Some(existing_pid) = Self::check_service_status().await {
|
||||
return Err(format!("服务已经在运行,PID: {existing_pid}").into());
|
||||
}
|
||||
|
||||
// Prepare the service program
|
||||
let executable_path = Self::prepare_service().await?;
|
||||
// Check the data catalog
|
||||
let volume_name_path = Path::new(&config.volume_name);
|
||||
if !volume_name_path.exists() {
|
||||
tokio::fs::create_dir_all(&config.volume_name).await?;
|
||||
}
|
||||
|
||||
// Extract the port from the configuration
|
||||
let main_port = Self::extract_port(&config.address).ok_or("无法解析主服务端口")?;
|
||||
let console_port = Self::extract_port(&config.console_address).ok_or("无法解析控制台端口")?;
|
||||
|
||||
let host = config.address.split(':').next().ok_or("无法解析主机地址")?;
|
||||
|
||||
// Check the port
|
||||
let ports = vec![main_port, console_port];
|
||||
for port in ports {
|
||||
if Self::is_port_in_use(host, port).await {
|
||||
return Err(format!("端口 {port} 已被占用").into());
|
||||
}
|
||||
}
|
||||
|
||||
// Start the service
|
||||
let mut child = tokio::process::Command::new(executable_path)
|
||||
.arg("--address")
|
||||
.arg(&config.address)
|
||||
.arg("--access-key")
|
||||
.arg(&config.access_key)
|
||||
.arg("--secret-key")
|
||||
.arg(&config.secret_key)
|
||||
.arg("--console-address")
|
||||
.arg(&config.console_address)
|
||||
.arg(config.volume_name.clone())
|
||||
.spawn()?;
|
||||
|
||||
let process_pid = child.id().unwrap();
|
||||
// Wait for the service to start
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
|
||||
// Check if the service started successfully
|
||||
if Self::is_port_in_use(host, main_port).await {
|
||||
Self::show_info(&format!("服务启动成功!进程 ID: {process_pid}"));
|
||||
|
||||
Ok(())
|
||||
} else {
|
||||
child.kill().await?;
|
||||
Err("服务启动失败".into())
|
||||
}
|
||||
}
|
||||
|
||||
/// Stop the service
|
||||
/// This function stops the service
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let result = stop_service().await;
|
||||
/// println!("{:?}", result);
|
||||
/// ```
|
||||
async fn stop_service() -> Result<(), Box<dyn Error>> {
|
||||
let existing_pid = Self::check_service_status().await;
|
||||
debug!("existing_pid: {:?}", existing_pid);
|
||||
if let Some(service_pid) = existing_pid {
|
||||
// An attempt was made to terminate the process
|
||||
#[cfg(unix)]
|
||||
{
|
||||
StdCommand::new("kill").arg("-9").arg(service_pid.to_string()).output()?;
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
{
|
||||
StdCommand::new("taskkill")
|
||||
.arg("/F")
|
||||
.arg("/PID")
|
||||
.arg(&service_pid.to_string())
|
||||
.output()?;
|
||||
}
|
||||
|
||||
// Verify that the service is indeed stopped
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
if Self::check_service_status().await.is_some() {
|
||||
return Err("服务停止失败".into());
|
||||
}
|
||||
Self::show_info("服务已成功停止");
|
||||
|
||||
Ok(())
|
||||
} else {
|
||||
Err("服务未运行".into())
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if the port is in use
|
||||
/// This function checks if the given port is in use on the given host
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let host = "127.0.0.1";
|
||||
/// let port = 9000;
|
||||
/// let result = is_port_in_use(host, port).await;
|
||||
/// println!("{:?}", result);
|
||||
/// ```
|
||||
async fn is_port_in_use(host: &str, port: u16) -> bool {
|
||||
TcpStream::connect(format!("{host}:{port}")).await.is_ok()
|
||||
}
|
||||
|
||||
/// Show an error message
|
||||
/// This function shows an error message dialog
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// show_error("This is an error message");
|
||||
/// ```
|
||||
pub(crate) fn show_error(message: &str) {
|
||||
rfd::MessageDialog::new()
|
||||
.set_title("错误")
|
||||
.set_description(message)
|
||||
.set_level(rfd::MessageLevel::Error)
|
||||
.show();
|
||||
}
|
||||
|
||||
/// Show an information message
|
||||
/// This function shows an information message dialog
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// show_info("This is an information message");
|
||||
/// ```
|
||||
pub(crate) fn show_info(message: &str) {
|
||||
rfd::MessageDialog::new()
|
||||
.set_title("成功")
|
||||
.set_description(message)
|
||||
.set_level(rfd::MessageLevel::Info)
|
||||
.show();
|
||||
}
|
||||
|
||||
/// Start the service
|
||||
/// This function sends a `Start` command to the service manager
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let config = RustFSConfig {
|
||||
/// address: "127.0.0.1:9000".to_string(),
|
||||
/// host: "127.0.0.1".to_string(),
|
||||
/// port: "9000".to_string(),
|
||||
/// access_key: "rustfsadmin".to_string(),
|
||||
/// secret_key: "rustfsadmin".to_string(),
|
||||
/// domain_name: "demo.rustfs.com".to_string(),
|
||||
/// volume_name: "data".to_string(),
|
||||
/// console_address: "127.0.0.1:9001".to_string(),
|
||||
/// };
|
||||
///
|
||||
/// let service_manager = ServiceManager::new();
|
||||
/// let result = service_manager.start(config).await;
|
||||
/// println!("{:?}", result);
|
||||
/// ```
|
||||
///
|
||||
/// # Errors
|
||||
/// This function returns an error if the service fails to start
|
||||
///
|
||||
/// # Panics
|
||||
/// This function panics if the port number is invalid
|
||||
///
|
||||
/// # Safety
|
||||
/// This function is not marked as unsafe
|
||||
///
|
||||
/// # Performance
|
||||
/// This function is not optimized for performance
|
||||
///
|
||||
/// # Design
|
||||
/// This function is designed to be simple and easy to use
|
||||
///
|
||||
/// # Security
|
||||
/// This function does not have any security implications
|
||||
pub async fn start(&self, config: RustFSConfig) -> Result<ServiceOperationResult, Box<dyn Error>> {
|
||||
let start_time = chrono::Local::now();
|
||||
self.command_tx.send(ServiceCommand::Start(config.clone())).await?;
|
||||
|
||||
let host = &config.host;
|
||||
let port = config.port.parse::<u16>().expect("无效的端口号");
|
||||
// wait for the service to actually start
|
||||
let mut retries = 0;
|
||||
while retries < 30 {
|
||||
// wait up to 30 seconds
|
||||
if Self::check_service_status().await.is_some() && Self::is_port_in_use(host, port).await {
|
||||
let end_time = chrono::Local::now();
|
||||
return Ok(ServiceOperationResult {
|
||||
success: true,
|
||||
start_time,
|
||||
end_time,
|
||||
message: "服务启动成功".to_string(),
|
||||
});
|
||||
}
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
retries += 1;
|
||||
}
|
||||
|
||||
Err("服务启动超时".into())
|
||||
}
|
||||
|
||||
/// Stop the service
|
||||
/// This function sends a `Stop` command to the service manager
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let service_manager = ServiceManager::new();
|
||||
/// let result = service_manager.stop().await;
|
||||
/// println!("{:?}", result);
|
||||
/// ```
|
||||
///
|
||||
/// # Errors
|
||||
/// This function returns an error if the service fails to stop
|
||||
///
|
||||
/// # Panics
|
||||
/// This function panics if the port number is invalid
|
||||
///
|
||||
/// # Safety
|
||||
/// This function is not marked as unsafe
|
||||
///
|
||||
/// # Performance
|
||||
/// This function is not optimized for performance
|
||||
///
|
||||
/// # Design
|
||||
/// This function is designed to be simple and easy to use
|
||||
///
|
||||
/// # Security
|
||||
/// This function does not have any security implications
|
||||
pub async fn stop(&self) -> Result<ServiceOperationResult, Box<dyn Error>> {
|
||||
let start_time = chrono::Local::now();
|
||||
self.command_tx.send(ServiceCommand::Stop).await?;
|
||||
|
||||
// Wait for the service to actually stop
|
||||
let mut retries = 0;
|
||||
while retries < 15 {
|
||||
// Wait up to 15 seconds
|
||||
if Self::check_service_status().await.is_none() {
|
||||
let end_time = chrono::Local::now();
|
||||
return Ok(ServiceOperationResult {
|
||||
success: true,
|
||||
start_time,
|
||||
end_time,
|
||||
message: "服务停止成功".to_string(),
|
||||
});
|
||||
}
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
retries += 1;
|
||||
}
|
||||
|
||||
Err("服务停止超时".into())
|
||||
}
|
||||
|
||||
/// Restart the service
|
||||
/// This function sends a `Restart` command to the service manager
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let config = RustFSConfig {
|
||||
/// address: "127.0.0.1:9000".to_string(),
|
||||
/// host: "127.0.0.1".to_string(),
|
||||
/// port: "9000".to_string(),
|
||||
/// access_key: "rustfsadmin".to_string(),
|
||||
/// secret_key: "rustfsadmin".to_string(),
|
||||
/// domain_name: "demo.rustfs.com".to_string(),
|
||||
/// volume_name: "data".to_string(),
|
||||
/// console_address: "127.0.0.1:9001".to_string(),
|
||||
/// };
|
||||
///
|
||||
/// let service_manager = ServiceManager::new();
|
||||
/// let result = service_manager.restart(config).await;
|
||||
/// println!("{:?}", result);
|
||||
/// ```
|
||||
///
|
||||
/// # Errors
|
||||
/// This function returns an error if the service fails to restart
|
||||
///
|
||||
/// # Panics
|
||||
/// This function panics if the port number is invalid
|
||||
///
|
||||
/// # Safety
|
||||
/// This function is not marked as unsafe
|
||||
///
|
||||
/// # Performance
|
||||
/// This function is not optimized for performance
|
||||
///
|
||||
/// # Design
|
||||
/// This function is designed to be simple and easy to use
|
||||
///
|
||||
/// # Security
|
||||
/// This function does not have any security implications
|
||||
pub async fn restart(&self, config: RustFSConfig) -> Result<ServiceOperationResult, Box<dyn Error>> {
|
||||
let start_time = chrono::Local::now();
|
||||
self.command_tx.send(ServiceCommand::Restart(config.clone())).await?;
|
||||
|
||||
let host = &config.host;
|
||||
let port = config.port.parse::<u16>().expect("无效的端口号");
|
||||
|
||||
// wait for the service to restart
|
||||
let mut retries = 0;
|
||||
while retries < 45 {
|
||||
// Longer waiting time is given as both the stop and start processes are involved
|
||||
if Self::check_service_status().await.is_some() && Self::is_port_in_use(host, port).await {
|
||||
match config.save() {
|
||||
Ok(_) => info!("save config success"),
|
||||
Err(e) => {
|
||||
error!("save config error: {}", e);
|
||||
self.command_tx.send(ServiceCommand::Stop).await?;
|
||||
Self::show_error("保存配置失败");
|
||||
return Err("保存配置失败".into());
|
||||
}
|
||||
}
|
||||
let end_time = chrono::Local::now();
|
||||
return Ok(ServiceOperationResult {
|
||||
success: true,
|
||||
start_time,
|
||||
end_time,
|
||||
message: "服务重启成功".to_string(),
|
||||
});
|
||||
}
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
retries += 1;
|
||||
}
|
||||
Err("服务重启超时".into())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::time::Duration;
|
||||
|
||||
#[test]
|
||||
fn test_service_command_creation() {
|
||||
let config = RustFSConfig::default_config();
|
||||
|
||||
let start_cmd = ServiceCommand::Start(config.clone());
|
||||
let stop_cmd = ServiceCommand::Stop;
|
||||
let restart_cmd = ServiceCommand::Restart(config);
|
||||
|
||||
// Test that commands can be created
|
||||
match start_cmd {
|
||||
ServiceCommand::Start(_) => {}
|
||||
_ => panic!("Expected Start command"),
|
||||
}
|
||||
|
||||
match stop_cmd {
|
||||
ServiceCommand::Stop => {}
|
||||
_ => panic!("Expected Stop command"),
|
||||
}
|
||||
|
||||
match restart_cmd {
|
||||
ServiceCommand::Restart(_) => {}
|
||||
_ => panic!("Expected Restart command"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_service_operation_result_creation() {
|
||||
let start_time = chrono::Local::now();
|
||||
let end_time = chrono::Local::now();
|
||||
|
||||
let success_result = ServiceOperationResult {
|
||||
success: true,
|
||||
start_time,
|
||||
end_time,
|
||||
message: "Operation successful".to_string(),
|
||||
};
|
||||
|
||||
let failure_result = ServiceOperationResult {
|
||||
success: false,
|
||||
start_time,
|
||||
end_time,
|
||||
message: "Operation failed".to_string(),
|
||||
};
|
||||
|
||||
assert!(success_result.success);
|
||||
assert_eq!(success_result.message, "Operation successful");
|
||||
|
||||
assert!(!failure_result.success);
|
||||
assert_eq!(failure_result.message, "Operation failed");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_service_operation_result_debug() {
|
||||
let result = ServiceOperationResult {
|
||||
success: true,
|
||||
start_time: chrono::Local::now(),
|
||||
end_time: chrono::Local::now(),
|
||||
message: "Test message".to_string(),
|
||||
};
|
||||
|
||||
let debug_str = format!("{result:?}");
|
||||
assert!(debug_str.contains("ServiceOperationResult"));
|
||||
assert!(debug_str.contains("success: true"));
|
||||
assert!(debug_str.contains("Test message"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_service_manager_creation() {
|
||||
// Test ServiceManager creation in a tokio runtime
|
||||
let rt = tokio::runtime::Runtime::new().unwrap();
|
||||
rt.block_on(async {
|
||||
let service_manager = ServiceManager::new();
|
||||
|
||||
// Test that ServiceManager can be created and cloned
|
||||
let cloned_manager = service_manager.clone();
|
||||
|
||||
// Both should be valid (we can't test much more without async runtime)
|
||||
assert!(format!("{service_manager:?}").contains("ServiceManager"));
|
||||
assert!(format!("{cloned_manager:?}").contains("ServiceManager"));
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_port_valid() {
|
||||
let test_cases = vec![
|
||||
("127.0.0.1:9000", Some(9000)),
|
||||
("localhost:8080", Some(8080)),
|
||||
("192.168.1.100:3000", Some(3000)),
|
||||
("0.0.0.0:80", Some(80)),
|
||||
("example.com:443", Some(443)),
|
||||
("host:65535", Some(65535)),
|
||||
("host:1", Some(1)),
|
||||
];
|
||||
|
||||
for (input, expected) in test_cases {
|
||||
let result = ServiceManager::extract_port(input);
|
||||
assert_eq!(result, expected, "Failed for input: {input}");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_port_invalid() {
|
||||
let invalid_cases = vec![
|
||||
"127.0.0.1", // Missing port
|
||||
"127.0.0.1:", // Empty port
|
||||
"127.0.0.1:abc", // Invalid port
|
||||
"127.0.0.1:99999", // Port out of range
|
||||
"", // Empty string
|
||||
"invalid", // No colon
|
||||
"host:-1", // Negative port
|
||||
"host:0.5", // Decimal port
|
||||
];
|
||||
|
||||
for input in invalid_cases {
|
||||
let result = ServiceManager::extract_port(input);
|
||||
assert_eq!(result, None, "Should be None for input: {input}");
|
||||
}
|
||||
|
||||
// Special case: empty host but valid port should still work
|
||||
assert_eq!(ServiceManager::extract_port(":9000"), Some(9000));
|
||||
|
||||
// Special case: multiple colons - extract_port takes the second part
|
||||
// For "127.0.0.1:9000:extra", it takes "9000" which is valid
|
||||
assert_eq!(ServiceManager::extract_port("127.0.0.1:9000:extra"), Some(9000));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_port_edge_cases() {
|
||||
// Test edge cases for port numbers
|
||||
assert_eq!(ServiceManager::extract_port("host:0"), Some(0));
|
||||
assert_eq!(ServiceManager::extract_port("host:65535"), Some(65535));
|
||||
assert_eq!(ServiceManager::extract_port("host:65536"), None); // Out of range
|
||||
// IPv6-like address - extract_port takes the second part after split(':')
|
||||
// For "::1:8080", split(':') gives ["", "", "1", "8080"], nth(1) gives ""
|
||||
assert_eq!(ServiceManager::extract_port("::1:8080"), None); // Second part is empty
|
||||
// For "[::1]:8080", split(':') gives ["[", "", "1]", "8080"], nth(1) gives ""
|
||||
assert_eq!(ServiceManager::extract_port("[::1]:8080"), None); // Second part is empty
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_show_error() {
|
||||
// Test that show_error function exists and can be called
|
||||
// We can't actually test the dialog in a test environment
|
||||
// so we just verify the function signature
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_show_info() {
|
||||
// Test that show_info function exists and can be called
|
||||
// We can't actually test the dialog in a test environment
|
||||
// so we just verify the function signature
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_service_operation_result_timing() {
|
||||
let start_time = chrono::Local::now();
|
||||
std::thread::sleep(Duration::from_millis(10)); // Small delay
|
||||
let end_time = chrono::Local::now();
|
||||
|
||||
let result = ServiceOperationResult {
|
||||
success: true,
|
||||
start_time,
|
||||
end_time,
|
||||
message: "Timing test".to_string(),
|
||||
};
|
||||
|
||||
// End time should be after start time
|
||||
assert!(result.end_time >= result.start_time);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_service_operation_result_with_unicode() {
|
||||
let result = ServiceOperationResult {
|
||||
success: true,
|
||||
start_time: chrono::Local::now(),
|
||||
end_time: chrono::Local::now(),
|
||||
message: "操作成功 🎉".to_string(),
|
||||
};
|
||||
|
||||
assert_eq!(result.message, "操作成功 🎉");
|
||||
assert!(result.success);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_service_operation_result_with_long_message() {
|
||||
let long_message = "A".repeat(10000);
|
||||
let result = ServiceOperationResult {
|
||||
success: false,
|
||||
start_time: chrono::Local::now(),
|
||||
end_time: chrono::Local::now(),
|
||||
message: long_message.clone(),
|
||||
};
|
||||
|
||||
assert_eq!(result.message.len(), 10000);
|
||||
assert_eq!(result.message, long_message);
|
||||
assert!(!result.success);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_service_command_with_different_configs() {
|
||||
let config1 = RustFSConfig {
|
||||
address: "127.0.0.1:9000".to_string(),
|
||||
host: "127.0.0.1".to_string(),
|
||||
port: "9000".to_string(),
|
||||
access_key: "admin1".to_string(),
|
||||
secret_key: "pass1".to_string(),
|
||||
domain_name: "test1.com".to_string(),
|
||||
volume_name: "/data1".to_string(),
|
||||
console_address: "127.0.0.1:9001".to_string(),
|
||||
};
|
||||
|
||||
let config2 = RustFSConfig {
|
||||
address: "192.168.1.100:8080".to_string(),
|
||||
host: "192.168.1.100".to_string(),
|
||||
port: "8080".to_string(),
|
||||
access_key: "admin2".to_string(),
|
||||
secret_key: "pass2".to_string(),
|
||||
domain_name: "test2.com".to_string(),
|
||||
volume_name: "/data2".to_string(),
|
||||
console_address: "192.168.1.100:8081".to_string(),
|
||||
};
|
||||
|
||||
let start_cmd1 = ServiceCommand::Start(config1);
|
||||
let restart_cmd2 = ServiceCommand::Restart(config2);
|
||||
|
||||
// Test that different configs can be used
|
||||
match start_cmd1 {
|
||||
ServiceCommand::Start(config) => {
|
||||
assert_eq!(config.address, "127.0.0.1:9000");
|
||||
assert_eq!(config.access_key, "admin1");
|
||||
}
|
||||
_ => panic!("Expected Start command"),
|
||||
}
|
||||
|
||||
match restart_cmd2 {
|
||||
ServiceCommand::Restart(config) => {
|
||||
assert_eq!(config.address, "192.168.1.100:8080");
|
||||
assert_eq!(config.access_key, "admin2");
|
||||
}
|
||||
_ => panic!("Expected Restart command"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_memory_efficiency() {
|
||||
// Test that structures don't use excessive memory
|
||||
assert!(std::mem::size_of::<ServiceCommand>() < 2000);
|
||||
assert!(std::mem::size_of::<ServiceOperationResult>() < 1000);
|
||||
assert!(std::mem::size_of::<ServiceManager>() < 1000);
|
||||
}
|
||||
|
||||
// Note: The following methods are not tested here because they require:
|
||||
// - Async runtime (tokio)
|
||||
// - File system access
|
||||
// - Network access
|
||||
// - Process management
|
||||
// - External dependencies (embedded assets)
|
||||
//
|
||||
// These should be tested in integration tests:
|
||||
// - check_service_status()
|
||||
// - prepare_service()
|
||||
// - start_service()
|
||||
// - stop_service()
|
||||
// - is_port_in_use()
|
||||
// - ServiceManager::start()
|
||||
// - ServiceManager::stop()
|
||||
// - ServiceManager::restart()
|
||||
//
|
||||
// The RUSTFS_HASH lazy_static is also not tested here as it depends
|
||||
// on embedded assets that may not be available in unit test environment.
|
||||
}
|
||||
@@ -1,300 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use dioxus::logger::tracing::debug;
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
use tracing_appender::rolling::{RollingFileAppender, Rotation};
|
||||
use tracing_subscriber::fmt;
|
||||
use tracing_subscriber::layer::SubscriberExt;
|
||||
use tracing_subscriber::util::SubscriberInitExt;
|
||||
|
||||
/// Initialize the logger with a rolling file appender
|
||||
/// that rotates log files daily
|
||||
pub fn init_logger() -> WorkerGuard {
|
||||
// configuring rolling logs rolling by day
|
||||
let home_dir = dirs::home_dir().expect("无法获取用户目录");
|
||||
let rustfs_dir = home_dir.join("rustfs");
|
||||
let logs_dir = rustfs_dir.join("logs");
|
||||
let file_appender = RollingFileAppender::builder()
|
||||
.rotation(Rotation::DAILY) // rotate log files once every hour
|
||||
.filename_prefix("rustfs-cli") // log file names will be prefixed with `myapp.`
|
||||
.filename_suffix("log") // log file names will be suffixed with `.log`
|
||||
.build(logs_dir) // try to build an appender that stores log files in `/ var/ log`
|
||||
.expect("initializing rolling file appender failed");
|
||||
// non-blocking writer for improved performance
|
||||
let (non_blocking_file, worker_guard) = tracing_appender::non_blocking(file_appender);
|
||||
|
||||
// console output layer
|
||||
let console_layer = fmt::layer()
|
||||
.with_writer(std::io::stdout)
|
||||
.with_ansi(true)
|
||||
.with_line_number(true); // enable colors in the console
|
||||
|
||||
// file output layer
|
||||
let file_layer = fmt::layer()
|
||||
.with_writer(non_blocking_file)
|
||||
.with_ansi(false)
|
||||
.with_thread_names(true)
|
||||
.with_target(true)
|
||||
.with_thread_ids(true)
|
||||
.with_level(true)
|
||||
.with_line_number(true); // disable colors in the file
|
||||
|
||||
// Combine all tiers and initialize global subscribers
|
||||
tracing_subscriber::registry()
|
||||
.with(console_layer)
|
||||
.with(file_layer)
|
||||
.with(tracing_subscriber::EnvFilter::new("info")) // filter the log level by environment variables
|
||||
.init();
|
||||
debug!("Logger initialized");
|
||||
worker_guard
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::sync::Once;
|
||||
|
||||
static INIT: Once = Once::new();
|
||||
|
||||
// Helper function to ensure logger is only initialized once in tests
|
||||
fn ensure_logger_init() {
|
||||
INIT.call_once(|| {
|
||||
// Initialize a simple test logger to avoid conflicts
|
||||
let _ = tracing_subscriber::fmt().with_test_writer().try_init();
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_logger_initialization_components() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test that we can create the components used in init_logger
|
||||
// without actually initializing the global logger again
|
||||
|
||||
// Test home directory access
|
||||
let home_dir_result = dirs::home_dir();
|
||||
assert!(home_dir_result.is_some(), "Should be able to get home directory");
|
||||
|
||||
let home_dir = home_dir_result.unwrap();
|
||||
let rustfs_dir = home_dir.join("rustfs");
|
||||
let logs_dir = rustfs_dir.join("logs");
|
||||
|
||||
// Test path construction
|
||||
assert!(rustfs_dir.to_string_lossy().contains("rustfs"));
|
||||
assert!(logs_dir.to_string_lossy().contains("logs"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rolling_file_appender_builder() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test that we can create a RollingFileAppender builder
|
||||
let builder = RollingFileAppender::builder()
|
||||
.rotation(Rotation::DAILY)
|
||||
.filename_prefix("test-rustfs-cli")
|
||||
.filename_suffix("log");
|
||||
|
||||
// We can't actually build it without creating directories,
|
||||
// but we can verify the builder pattern works
|
||||
let debug_str = format!("{builder:?}");
|
||||
// The actual debug format might be different, so just check it's not empty
|
||||
assert!(!debug_str.is_empty());
|
||||
// Check that it contains some expected parts
|
||||
assert!(debug_str.contains("Builder") || debug_str.contains("builder") || debug_str.contains("RollingFileAppender"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rotation_types() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test different rotation types
|
||||
let daily = Rotation::DAILY;
|
||||
let hourly = Rotation::HOURLY;
|
||||
let minutely = Rotation::MINUTELY;
|
||||
let never = Rotation::NEVER;
|
||||
|
||||
// Test that rotation types can be created and formatted
|
||||
assert!(!format!("{daily:?}").is_empty());
|
||||
assert!(!format!("{hourly:?}").is_empty());
|
||||
assert!(!format!("{minutely:?}").is_empty());
|
||||
assert!(!format!("{never:?}").is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fmt_layer_configuration() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test that we can create fmt layers with different configurations
|
||||
// We can't actually test the layers directly due to type complexity,
|
||||
// but we can test that the configuration values are correct
|
||||
|
||||
// Test console layer settings
|
||||
let console_ansi = true;
|
||||
let console_line_number = true;
|
||||
assert!(console_ansi);
|
||||
assert!(console_line_number);
|
||||
|
||||
// Test file layer settings
|
||||
let file_ansi = false;
|
||||
let file_thread_names = true;
|
||||
let file_target = true;
|
||||
let file_thread_ids = true;
|
||||
let file_level = true;
|
||||
let file_line_number = true;
|
||||
|
||||
assert!(!file_ansi);
|
||||
assert!(file_thread_names);
|
||||
assert!(file_target);
|
||||
assert!(file_thread_ids);
|
||||
assert!(file_level);
|
||||
assert!(file_line_number);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_env_filter_creation() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test that EnvFilter can be created with different levels
|
||||
let info_filter = tracing_subscriber::EnvFilter::new("info");
|
||||
let debug_filter = tracing_subscriber::EnvFilter::new("debug");
|
||||
let warn_filter = tracing_subscriber::EnvFilter::new("warn");
|
||||
let error_filter = tracing_subscriber::EnvFilter::new("error");
|
||||
|
||||
// Test that filters can be created
|
||||
assert!(!format!("{info_filter:?}").is_empty());
|
||||
assert!(!format!("{debug_filter:?}").is_empty());
|
||||
assert!(!format!("{warn_filter:?}").is_empty());
|
||||
assert!(!format!("{error_filter:?}").is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_path_construction() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test path construction logic used in init_logger
|
||||
if let Some(home_dir) = dirs::home_dir() {
|
||||
let rustfs_dir = home_dir.join("rustfs");
|
||||
let logs_dir = rustfs_dir.join("logs");
|
||||
|
||||
// Test that paths are constructed correctly
|
||||
assert!(rustfs_dir.ends_with("rustfs"));
|
||||
assert!(logs_dir.ends_with("logs"));
|
||||
assert!(logs_dir.parent().unwrap().ends_with("rustfs"));
|
||||
|
||||
// Test path string representation
|
||||
let rustfs_str = rustfs_dir.to_string_lossy();
|
||||
let logs_str = logs_dir.to_string_lossy();
|
||||
|
||||
assert!(rustfs_str.contains("rustfs"));
|
||||
assert!(logs_str.contains("rustfs"));
|
||||
assert!(logs_str.contains("logs"));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_filename_patterns() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test the filename patterns used in the logger
|
||||
let prefix = "rustfs-cli";
|
||||
let suffix = "log";
|
||||
|
||||
assert_eq!(prefix, "rustfs-cli");
|
||||
assert_eq!(suffix, "log");
|
||||
|
||||
// Test that these would create valid filenames
|
||||
let sample_filename = format!("{prefix}.2024-01-01.{suffix}");
|
||||
assert_eq!(sample_filename, "rustfs-cli.2024-01-01.log");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_worker_guard_type() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test that WorkerGuard type exists and can be referenced
|
||||
// We can't actually create one without the full setup, but we can test the type
|
||||
let guard_size = std::mem::size_of::<WorkerGuard>();
|
||||
assert!(guard_size > 0, "WorkerGuard should have non-zero size");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_logger_configuration_constants() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test the configuration values used in the logger
|
||||
let default_log_level = "info";
|
||||
let filename_prefix = "rustfs-cli";
|
||||
let filename_suffix = "log";
|
||||
let rotation = Rotation::DAILY;
|
||||
|
||||
assert_eq!(default_log_level, "info");
|
||||
assert_eq!(filename_prefix, "rustfs-cli");
|
||||
assert_eq!(filename_suffix, "log");
|
||||
assert!(matches!(rotation, Rotation::DAILY));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_directory_names() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test the directory names used in the logger setup
|
||||
let rustfs_dir_name = "rustfs";
|
||||
let logs_dir_name = "logs";
|
||||
|
||||
assert_eq!(rustfs_dir_name, "rustfs");
|
||||
assert_eq!(logs_dir_name, "logs");
|
||||
|
||||
// Test path joining
|
||||
let combined = format!("{rustfs_dir_name}/{logs_dir_name}");
|
||||
assert_eq!(combined, "rustfs/logs");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_layer_settings() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test the boolean settings used in layer configuration
|
||||
let console_ansi = true;
|
||||
let console_line_number = true;
|
||||
let file_ansi = false;
|
||||
let file_thread_names = true;
|
||||
let file_target = true;
|
||||
let file_thread_ids = true;
|
||||
let file_level = true;
|
||||
let file_line_number = true;
|
||||
|
||||
// Verify the settings
|
||||
assert!(console_ansi);
|
||||
assert!(console_line_number);
|
||||
assert!(!file_ansi);
|
||||
assert!(file_thread_names);
|
||||
assert!(file_target);
|
||||
assert!(file_thread_ids);
|
||||
assert!(file_level);
|
||||
assert!(file_line_number);
|
||||
}
|
||||
|
||||
// Note: The actual init_logger() function is not tested here because:
|
||||
// 1. It initializes a global tracing subscriber which can only be done once
|
||||
// 2. It requires file system access to create directories
|
||||
// 3. It has side effects that would interfere with other tests
|
||||
// 4. It returns a WorkerGuard that needs to be kept alive
|
||||
//
|
||||
// This function should be tested in integration tests where:
|
||||
// - File system access can be properly controlled
|
||||
// - The global state can be managed
|
||||
// - The actual logging behavior can be verified
|
||||
// - The WorkerGuard lifecycle can be properly managed
|
||||
}
|
||||
@@ -1,21 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod config;
|
||||
mod helper;
|
||||
mod logger;
|
||||
|
||||
pub use config::RustFSConfig;
|
||||
pub use helper::ServiceManager;
|
||||
pub use logger::init_logger;
|
||||
@@ -1,38 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::route::Route;
|
||||
use dioxus::logger::tracing::info;
|
||||
use dioxus::prelude::*;
|
||||
|
||||
const FAVICON: Asset = asset!("/assets/favicon.ico");
|
||||
const TAILWIND_CSS: Asset = asset!("/assets/tailwind.css");
|
||||
|
||||
/// The main application component
|
||||
/// This is the root component of the application
|
||||
/// It contains the global resources and the router
|
||||
/// for the application
|
||||
#[component]
|
||||
pub fn App() -> Element {
|
||||
// Build cool things ✌️
|
||||
use document::{Link, Title};
|
||||
info!("App rendered");
|
||||
rsx! {
|
||||
// Global app resources
|
||||
Link { rel: "icon", href: FAVICON }
|
||||
Link { rel: "stylesheet", href: TAILWIND_CSS }
|
||||
Title { "RustFS" }
|
||||
Router::<Route> {}
|
||||
}
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::components::Home;
|
||||
use dioxus::prelude::*;
|
||||
|
||||
#[component]
|
||||
pub fn HomeViews() -> Element {
|
||||
rsx! {
|
||||
Home {}
|
||||
}
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
/**
|
||||
* Copyright 2024 RustFS Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
module.exports = {
|
||||
mode: "all",
|
||||
content: ["./src/**/*.{rs,html,css}", "./dist/**/*.html"],
|
||||
theme: {
|
||||
extend: {},
|
||||
},
|
||||
plugins: [],
|
||||
};
|
||||
@@ -22,20 +22,22 @@ tokio = { workspace = true, features = ["full"] }
|
||||
tokio-util = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
time.workspace = true
|
||||
serde_json = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
bytes = { workspace = true }
|
||||
time = { workspace = true, features = ["serde"] }
|
||||
uuid = { workspace = true, features = ["v4", "serde"] }
|
||||
anyhow = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
url = { workspace = true }
|
||||
rustfs-lock = { workspace = true }
|
||||
|
||||
s3s = { workspace = true }
|
||||
lazy_static = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
rmp-serde = { workspace = true }
|
||||
tokio-test = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
serial_test = "3.2.0"
|
||||
tracing-subscriber = { workspace = true }
|
||||
walkdir = "2.5.0"
|
||||
tempfile = { workspace = true }
|
||||
|
||||
@@ -14,30 +14,79 @@
|
||||
|
||||
use thiserror::Error;
|
||||
|
||||
/// RustFS AHM/Heal/Scanner 统一错误类型
|
||||
#[derive(Debug, Error)]
|
||||
pub enum Error {
|
||||
// 通用
|
||||
#[error("I/O error: {0}")]
|
||||
Io(#[from] std::io::Error),
|
||||
|
||||
#[error("Storage error: {0}")]
|
||||
Storage(#[from] rustfs_ecstore::error::Error),
|
||||
|
||||
#[error("Disk error: {0}")]
|
||||
Disk(#[from] rustfs_ecstore::disk::error::DiskError),
|
||||
|
||||
#[error("Configuration error: {0}")]
|
||||
Config(String),
|
||||
|
||||
#[error("Heal configuration error: {message}")]
|
||||
ConfigurationError { message: String },
|
||||
|
||||
#[error("Other error: {0}")]
|
||||
Other(String),
|
||||
|
||||
#[error(transparent)]
|
||||
Anyhow(#[from] anyhow::Error),
|
||||
|
||||
// Scanner相关
|
||||
#[error("Scanner error: {0}")]
|
||||
Scanner(String),
|
||||
|
||||
#[error("Metrics error: {0}")]
|
||||
Metrics(String),
|
||||
|
||||
#[error(transparent)]
|
||||
Other(#[from] anyhow::Error),
|
||||
// Heal相关
|
||||
#[error("Heal task not found: {task_id}")]
|
||||
TaskNotFound { task_id: String },
|
||||
|
||||
#[error("Heal task already exists: {task_id}")]
|
||||
TaskAlreadyExists { task_id: String },
|
||||
|
||||
#[error("Heal manager is not running")]
|
||||
ManagerNotRunning,
|
||||
|
||||
#[error("Heal task execution failed: {message}")]
|
||||
TaskExecutionFailed { message: String },
|
||||
|
||||
#[error("Invalid heal type: {heal_type}")]
|
||||
InvalidHealType { heal_type: String },
|
||||
|
||||
#[error("Heal task cancelled")]
|
||||
TaskCancelled,
|
||||
|
||||
#[error("Heal task timeout")]
|
||||
TaskTimeout,
|
||||
|
||||
#[error("Heal event processing failed: {message}")]
|
||||
EventProcessingFailed { message: String },
|
||||
|
||||
#[error("Heal progress tracking failed: {message}")]
|
||||
ProgressTrackingFailed { message: String },
|
||||
}
|
||||
|
||||
pub type Result<T, E = Error> = std::result::Result<T, E>;
|
||||
|
||||
// Implement conversion from ahm::Error to std::io::Error for use in main.rs
|
||||
impl Error {
|
||||
pub fn other<E>(error: E) -> Self
|
||||
where
|
||||
E: Into<Box<dyn std::error::Error + Send + Sync>>,
|
||||
{
|
||||
Error::Other(error.into().to_string())
|
||||
}
|
||||
}
|
||||
|
||||
// 可选:实现与 std::io::Error 的互转
|
||||
impl From<Error> for std::io::Error {
|
||||
fn from(err: Error) -> Self {
|
||||
std::io::Error::other(err)
|
||||
|
||||
233
crates/ahm/src/heal/channel.rs
Normal file
@@ -0,0 +1,233 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::heal::{
|
||||
manager::HealManager,
|
||||
task::{HealOptions, HealPriority, HealRequest, HealType},
|
||||
};
|
||||
|
||||
use rustfs_common::heal_channel::{
|
||||
HealChannelCommand, HealChannelPriority, HealChannelReceiver, HealChannelRequest, HealChannelResponse, HealScanMode,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::mpsc;
|
||||
use tracing::{error, info};
|
||||
|
||||
/// Heal channel processor
|
||||
pub struct HealChannelProcessor {
|
||||
/// Heal manager
|
||||
heal_manager: Arc<HealManager>,
|
||||
/// Response sender
|
||||
response_sender: mpsc::UnboundedSender<HealChannelResponse>,
|
||||
/// Response receiver
|
||||
response_receiver: mpsc::UnboundedReceiver<HealChannelResponse>,
|
||||
}
|
||||
|
||||
impl HealChannelProcessor {
|
||||
/// Create new HealChannelProcessor
|
||||
pub fn new(heal_manager: Arc<HealManager>) -> Self {
|
||||
let (response_tx, response_rx) = mpsc::unbounded_channel();
|
||||
Self {
|
||||
heal_manager,
|
||||
response_sender: response_tx,
|
||||
response_receiver: response_rx,
|
||||
}
|
||||
}
|
||||
|
||||
/// Start processing heal channel requests
|
||||
pub async fn start(&mut self, mut receiver: HealChannelReceiver) -> Result<()> {
|
||||
info!("Starting heal channel processor");
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
command = receiver.recv() => {
|
||||
match command {
|
||||
Some(command) => {
|
||||
if let Err(e) = self.process_command(command).await {
|
||||
error!("Failed to process heal command: {}", e);
|
||||
}
|
||||
}
|
||||
None => {
|
||||
info!("Heal channel receiver closed, stopping processor");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
response = self.response_receiver.recv() => {
|
||||
if let Some(response) = response {
|
||||
// Handle response if needed
|
||||
info!("Received heal response for request: {}", response.request_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!("Heal channel processor stopped");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process heal command
|
||||
async fn process_command(&self, command: HealChannelCommand) -> Result<()> {
|
||||
match command {
|
||||
HealChannelCommand::Start(request) => self.process_start_request(request).await,
|
||||
HealChannelCommand::Query { heal_path, client_token } => self.process_query_request(heal_path, client_token).await,
|
||||
HealChannelCommand::Cancel { heal_path } => self.process_cancel_request(heal_path).await,
|
||||
}
|
||||
}
|
||||
|
||||
/// Process start request
|
||||
async fn process_start_request(&self, request: HealChannelRequest) -> Result<()> {
|
||||
info!("Processing heal start request: {} for bucket: {}", request.id, request.bucket);
|
||||
|
||||
// Convert channel request to heal request
|
||||
let heal_request = self.convert_to_heal_request(request.clone())?;
|
||||
|
||||
// Submit to heal manager
|
||||
match self.heal_manager.submit_heal_request(heal_request).await {
|
||||
Ok(task_id) => {
|
||||
info!("Successfully submitted heal request: {} as task: {}", request.id, task_id);
|
||||
|
||||
// Send success response
|
||||
let response = HealChannelResponse {
|
||||
request_id: request.id,
|
||||
success: true,
|
||||
data: Some(format!("Task ID: {task_id}").into_bytes()),
|
||||
error: None,
|
||||
};
|
||||
|
||||
if let Err(e) = self.response_sender.send(response) {
|
||||
error!("Failed to send heal response: {}", e);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to submit heal request: {} - {}", request.id, e);
|
||||
|
||||
// Send error response
|
||||
let response = HealChannelResponse {
|
||||
request_id: request.id,
|
||||
success: false,
|
||||
data: None,
|
||||
error: Some(e.to_string()),
|
||||
};
|
||||
|
||||
if let Err(e) = self.response_sender.send(response) {
|
||||
error!("Failed to send heal error response: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process query request
|
||||
async fn process_query_request(&self, heal_path: String, client_token: String) -> Result<()> {
|
||||
info!("Processing heal query request for path: {}", heal_path);
|
||||
|
||||
// TODO: Implement query logic based on heal_path and client_token
|
||||
// For now, return a placeholder response
|
||||
let response = HealChannelResponse {
|
||||
request_id: client_token,
|
||||
success: true,
|
||||
data: Some(format!("Query result for path: {heal_path}").into_bytes()),
|
||||
error: None,
|
||||
};
|
||||
|
||||
if let Err(e) = self.response_sender.send(response) {
|
||||
error!("Failed to send query response: {}", e);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process cancel request
|
||||
async fn process_cancel_request(&self, heal_path: String) -> Result<()> {
|
||||
info!("Processing heal cancel request for path: {}", heal_path);
|
||||
|
||||
// TODO: Implement cancel logic based on heal_path
|
||||
// For now, return a placeholder response
|
||||
let response = HealChannelResponse {
|
||||
request_id: heal_path.clone(),
|
||||
success: true,
|
||||
data: Some(format!("Cancel request for path: {heal_path}").into_bytes()),
|
||||
error: None,
|
||||
};
|
||||
|
||||
if let Err(e) = self.response_sender.send(response) {
|
||||
error!("Failed to send cancel response: {}", e);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Convert channel request to heal request
|
||||
fn convert_to_heal_request(&self, request: HealChannelRequest) -> Result<HealRequest> {
|
||||
let heal_type = if let Some(disk_id) = &request.disk {
|
||||
HealType::ErasureSet {
|
||||
buckets: vec![],
|
||||
set_disk_id: disk_id.clone(),
|
||||
}
|
||||
} else if let Some(prefix) = &request.object_prefix {
|
||||
if !prefix.is_empty() {
|
||||
HealType::Object {
|
||||
bucket: request.bucket.clone(),
|
||||
object: prefix.clone(),
|
||||
version_id: None,
|
||||
}
|
||||
} else {
|
||||
HealType::Bucket {
|
||||
bucket: request.bucket.clone(),
|
||||
}
|
||||
}
|
||||
} else {
|
||||
HealType::Bucket {
|
||||
bucket: request.bucket.clone(),
|
||||
}
|
||||
};
|
||||
|
||||
let priority = match request.priority {
|
||||
HealChannelPriority::Low => HealPriority::Low,
|
||||
HealChannelPriority::Normal => HealPriority::Normal,
|
||||
HealChannelPriority::High => HealPriority::High,
|
||||
HealChannelPriority::Critical => HealPriority::Urgent,
|
||||
};
|
||||
|
||||
// Build HealOptions with all available fields
|
||||
let mut options = HealOptions {
|
||||
scan_mode: request.scan_mode.unwrap_or(HealScanMode::Normal),
|
||||
remove_corrupted: request.remove_corrupted.unwrap_or(false),
|
||||
recreate_missing: request.recreate_missing.unwrap_or(true),
|
||||
update_parity: request.update_parity.unwrap_or(true),
|
||||
recursive: request.recursive.unwrap_or(false),
|
||||
dry_run: request.dry_run.unwrap_or(false),
|
||||
timeout: request.timeout_seconds.map(std::time::Duration::from_secs),
|
||||
pool_index: request.pool_index,
|
||||
set_index: request.set_index,
|
||||
};
|
||||
|
||||
// Apply force_start overrides
|
||||
if request.force_start {
|
||||
options.remove_corrupted = true;
|
||||
options.recreate_missing = true;
|
||||
options.update_parity = true;
|
||||
}
|
||||
|
||||
Ok(HealRequest::new(heal_type, options, priority))
|
||||
}
|
||||
|
||||
/// Get response sender for external use
|
||||
pub fn get_response_sender(&self) -> mpsc::UnboundedSender<HealChannelResponse> {
|
||||
self.response_sender.clone()
|
||||
}
|
||||
}
|
||||
456
crates/ahm/src/heal/erasure_healer.rs
Normal file
@@ -0,0 +1,456 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
use crate::heal::{
|
||||
progress::HealProgress,
|
||||
resume::{CheckpointManager, ResumeManager, ResumeUtils},
|
||||
storage::HealStorageAPI,
|
||||
};
|
||||
use futures::future::join_all;
|
||||
use rustfs_common::heal_channel::{HealOpts, HealScanMode};
|
||||
use rustfs_ecstore::disk::DiskStore;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{error, info, warn};
|
||||
|
||||
/// Erasure Set Healer
|
||||
pub struct ErasureSetHealer {
|
||||
storage: Arc<dyn HealStorageAPI>,
|
||||
progress: Arc<RwLock<HealProgress>>,
|
||||
cancel_token: tokio_util::sync::CancellationToken,
|
||||
disk: DiskStore,
|
||||
}
|
||||
|
||||
impl ErasureSetHealer {
|
||||
pub fn new(
|
||||
storage: Arc<dyn HealStorageAPI>,
|
||||
progress: Arc<RwLock<HealProgress>>,
|
||||
cancel_token: tokio_util::sync::CancellationToken,
|
||||
disk: DiskStore,
|
||||
) -> Self {
|
||||
Self {
|
||||
storage,
|
||||
progress,
|
||||
cancel_token,
|
||||
disk,
|
||||
}
|
||||
}
|
||||
|
||||
/// execute erasure set heal with resume
|
||||
pub async fn heal_erasure_set(&self, buckets: &[String], set_disk_id: &str) -> Result<()> {
|
||||
info!("Starting erasure set heal for {} buckets on set disk {}", buckets.len(), set_disk_id);
|
||||
|
||||
// 1. generate or get task id
|
||||
let task_id = self.get_or_create_task_id(set_disk_id).await?;
|
||||
|
||||
// 2. initialize or resume resume state
|
||||
let (resume_manager, checkpoint_manager) = self.initialize_resume_state(&task_id, buckets).await?;
|
||||
|
||||
// 3. execute heal with resume
|
||||
let result = self
|
||||
.execute_heal_with_resume(buckets, &resume_manager, &checkpoint_manager)
|
||||
.await;
|
||||
|
||||
// 4. cleanup resume state
|
||||
if result.is_ok() {
|
||||
if let Err(e) = resume_manager.cleanup().await {
|
||||
warn!("Failed to cleanup resume state: {}", e);
|
||||
}
|
||||
if let Err(e) = checkpoint_manager.cleanup().await {
|
||||
warn!("Failed to cleanup checkpoint: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
/// get or create task id
|
||||
async fn get_or_create_task_id(&self, _set_disk_id: &str) -> Result<String> {
|
||||
// check if there are resumable tasks
|
||||
let resumable_tasks = ResumeUtils::get_resumable_tasks(&self.disk).await?;
|
||||
|
||||
for task_id in resumable_tasks {
|
||||
if ResumeUtils::can_resume_task(&self.disk, &task_id).await {
|
||||
info!("Found resumable task: {}", task_id);
|
||||
return Ok(task_id);
|
||||
}
|
||||
}
|
||||
|
||||
// create new task id
|
||||
let task_id = ResumeUtils::generate_task_id();
|
||||
info!("Created new heal task: {}", task_id);
|
||||
Ok(task_id)
|
||||
}
|
||||
|
||||
/// initialize or resume resume state
|
||||
async fn initialize_resume_state(&self, task_id: &str, buckets: &[String]) -> Result<(ResumeManager, CheckpointManager)> {
|
||||
// check if resume state exists
|
||||
if ResumeManager::has_resume_state(&self.disk, task_id).await {
|
||||
info!("Loading existing resume state for task: {}", task_id);
|
||||
|
||||
let resume_manager = ResumeManager::load_from_disk(self.disk.clone(), task_id).await?;
|
||||
let checkpoint_manager = if CheckpointManager::has_checkpoint(&self.disk, task_id).await {
|
||||
CheckpointManager::load_from_disk(self.disk.clone(), task_id).await?
|
||||
} else {
|
||||
CheckpointManager::new(self.disk.clone(), task_id.to_string()).await?
|
||||
};
|
||||
|
||||
Ok((resume_manager, checkpoint_manager))
|
||||
} else {
|
||||
info!("Creating new resume state for task: {}", task_id);
|
||||
|
||||
let resume_manager =
|
||||
ResumeManager::new(self.disk.clone(), task_id.to_string(), "erasure_set".to_string(), buckets.to_vec()).await?;
|
||||
|
||||
let checkpoint_manager = CheckpointManager::new(self.disk.clone(), task_id.to_string()).await?;
|
||||
|
||||
Ok((resume_manager, checkpoint_manager))
|
||||
}
|
||||
}
|
||||
|
||||
/// execute heal with resume
|
||||
async fn execute_heal_with_resume(
|
||||
&self,
|
||||
buckets: &[String],
|
||||
resume_manager: &ResumeManager,
|
||||
checkpoint_manager: &CheckpointManager,
|
||||
) -> Result<()> {
|
||||
// 1. get current state
|
||||
let state = resume_manager.get_state().await;
|
||||
let checkpoint = checkpoint_manager.get_checkpoint().await;
|
||||
|
||||
info!(
|
||||
"Resuming from bucket {} object {}",
|
||||
checkpoint.current_bucket_index, checkpoint.current_object_index
|
||||
);
|
||||
|
||||
// 2. initialize progress
|
||||
self.initialize_progress(buckets, &state).await;
|
||||
|
||||
// 3. continue from checkpoint
|
||||
let current_bucket_index = checkpoint.current_bucket_index;
|
||||
let mut current_object_index = checkpoint.current_object_index;
|
||||
|
||||
let mut processed_objects = state.processed_objects;
|
||||
let mut successful_objects = state.successful_objects;
|
||||
let mut failed_objects = state.failed_objects;
|
||||
let mut skipped_objects = state.skipped_objects;
|
||||
|
||||
// 4. process remaining buckets
|
||||
for (bucket_idx, bucket) in buckets.iter().enumerate().skip(current_bucket_index) {
|
||||
// check if completed
|
||||
if state.completed_buckets.contains(bucket) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// update current bucket
|
||||
resume_manager.set_current_item(Some(bucket.clone()), None).await?;
|
||||
|
||||
// process objects in bucket
|
||||
let bucket_result = self
|
||||
.heal_bucket_with_resume(
|
||||
bucket,
|
||||
&mut current_object_index,
|
||||
&mut processed_objects,
|
||||
&mut successful_objects,
|
||||
&mut failed_objects,
|
||||
&mut skipped_objects,
|
||||
resume_manager,
|
||||
checkpoint_manager,
|
||||
)
|
||||
.await;
|
||||
|
||||
// update checkpoint position
|
||||
checkpoint_manager.update_position(bucket_idx, current_object_index).await?;
|
||||
|
||||
// update progress
|
||||
resume_manager
|
||||
.update_progress(processed_objects, successful_objects, failed_objects, skipped_objects)
|
||||
.await?;
|
||||
|
||||
// check cancel status
|
||||
if self.cancel_token.is_cancelled() {
|
||||
info!("Heal task cancelled");
|
||||
return Err(Error::TaskCancelled);
|
||||
}
|
||||
|
||||
// process bucket result
|
||||
match bucket_result {
|
||||
Ok(_) => {
|
||||
resume_manager.complete_bucket(bucket).await?;
|
||||
info!("Completed heal for bucket: {}", bucket);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to heal bucket {}: {}", bucket, e);
|
||||
// continue to next bucket, do not interrupt the whole process
|
||||
}
|
||||
}
|
||||
|
||||
// reset object index
|
||||
current_object_index = 0;
|
||||
}
|
||||
|
||||
// 5. mark task completed
|
||||
resume_manager.mark_completed().await?;
|
||||
|
||||
info!("Erasure set heal completed successfully");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// heal single bucket with resume
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn heal_bucket_with_resume(
|
||||
&self,
|
||||
bucket: &str,
|
||||
current_object_index: &mut usize,
|
||||
processed_objects: &mut u64,
|
||||
successful_objects: &mut u64,
|
||||
failed_objects: &mut u64,
|
||||
_skipped_objects: &mut u64,
|
||||
resume_manager: &ResumeManager,
|
||||
checkpoint_manager: &CheckpointManager,
|
||||
) -> Result<()> {
|
||||
info!("Starting heal for bucket: {} from object index {}", bucket, current_object_index);
|
||||
|
||||
// 1. get bucket info
|
||||
let _bucket_info = match self.storage.get_bucket_info(bucket).await? {
|
||||
Some(info) => info,
|
||||
None => {
|
||||
warn!("Bucket {} not found, skipping", bucket);
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
// 2. get objects to heal
|
||||
let objects = self.storage.list_objects_for_heal(bucket, "").await?;
|
||||
|
||||
// 3. continue from checkpoint
|
||||
for (obj_idx, object) in objects.iter().enumerate().skip(*current_object_index) {
|
||||
// check if already processed
|
||||
if checkpoint_manager.get_checkpoint().await.processed_objects.contains(object) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// update current object
|
||||
resume_manager
|
||||
.set_current_item(Some(bucket.to_string()), Some(object.clone()))
|
||||
.await?;
|
||||
|
||||
// heal object
|
||||
let heal_opts = HealOpts {
|
||||
scan_mode: HealScanMode::Normal,
|
||||
remove: true,
|
||||
recreate: true,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
match self.storage.heal_object(bucket, object, None, &heal_opts).await {
|
||||
Ok((_result, None)) => {
|
||||
*successful_objects += 1;
|
||||
checkpoint_manager.add_processed_object(object.clone()).await?;
|
||||
info!("Successfully healed object {}/{}", bucket, object);
|
||||
}
|
||||
Ok((_, Some(err))) => {
|
||||
*failed_objects += 1;
|
||||
checkpoint_manager.add_failed_object(object.clone()).await?;
|
||||
warn!("Failed to heal object {}/{}: {}", bucket, object, err);
|
||||
}
|
||||
Err(err) => {
|
||||
*failed_objects += 1;
|
||||
checkpoint_manager.add_failed_object(object.clone()).await?;
|
||||
warn!("Error healing object {}/{}: {}", bucket, object, err);
|
||||
}
|
||||
}
|
||||
|
||||
*processed_objects += 1;
|
||||
*current_object_index = obj_idx + 1;
|
||||
|
||||
// check cancel status
|
||||
if self.cancel_token.is_cancelled() {
|
||||
info!("Heal task cancelled during object processing");
|
||||
return Err(Error::TaskCancelled);
|
||||
}
|
||||
|
||||
// save checkpoint periodically
|
||||
if obj_idx % 100 == 0 {
|
||||
checkpoint_manager.update_position(0, *current_object_index).await?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// initialize progress tracking
|
||||
async fn initialize_progress(&self, _buckets: &[String], state: &crate::heal::resume::ResumeState) {
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.objects_scanned = state.total_objects;
|
||||
progress.objects_healed = state.successful_objects;
|
||||
progress.objects_failed = state.failed_objects;
|
||||
progress.bytes_processed = 0; // set to 0 for now, can be extended later
|
||||
progress.set_current_object(state.current_object.clone());
|
||||
}
|
||||
|
||||
/// heal all buckets concurrently
|
||||
#[allow(dead_code)]
|
||||
async fn heal_buckets_concurrently(&self, buckets: &[String]) -> Vec<Result<()>> {
|
||||
// use semaphore to control concurrency, avoid too many concurrent healings
|
||||
let semaphore = Arc::new(tokio::sync::Semaphore::new(4)); // max 4 concurrent healings
|
||||
|
||||
let heal_futures = buckets.iter().map(|bucket| {
|
||||
let bucket = bucket.clone();
|
||||
let storage = self.storage.clone();
|
||||
let progress = self.progress.clone();
|
||||
let semaphore = semaphore.clone();
|
||||
let cancel_token = self.cancel_token.clone();
|
||||
|
||||
async move {
|
||||
let _permit = semaphore.acquire().await.unwrap();
|
||||
|
||||
if cancel_token.is_cancelled() {
|
||||
return Err(Error::TaskCancelled);
|
||||
}
|
||||
|
||||
Self::heal_single_bucket(&storage, &bucket, &progress).await
|
||||
}
|
||||
});
|
||||
|
||||
// use join_all to process concurrently
|
||||
join_all(heal_futures).await
|
||||
}
|
||||
|
||||
/// heal single bucket
|
||||
#[allow(dead_code)]
|
||||
async fn heal_single_bucket(
|
||||
storage: &Arc<dyn HealStorageAPI>,
|
||||
bucket: &str,
|
||||
progress: &Arc<RwLock<HealProgress>>,
|
||||
) -> Result<()> {
|
||||
info!("Starting heal for bucket: {}", bucket);
|
||||
|
||||
// 1. get bucket info
|
||||
let _bucket_info = match storage.get_bucket_info(bucket).await? {
|
||||
Some(info) => info,
|
||||
None => {
|
||||
warn!("Bucket {} not found, skipping", bucket);
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
// 2. get objects to heal
|
||||
let objects = storage.list_objects_for_heal(bucket, "").await?;
|
||||
|
||||
// 3. update progress
|
||||
{
|
||||
let mut p = progress.write().await;
|
||||
p.objects_scanned += objects.len() as u64;
|
||||
}
|
||||
|
||||
// 4. heal objects concurrently
|
||||
let heal_opts = HealOpts {
|
||||
scan_mode: HealScanMode::Normal,
|
||||
remove: true, // remove corrupted data
|
||||
recreate: true, // recreate missing data
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let object_results = Self::heal_objects_concurrently(storage, bucket, &objects, &heal_opts, progress).await;
|
||||
|
||||
// 5. count results
|
||||
let (success_count, failure_count) = object_results
|
||||
.into_iter()
|
||||
.fold((0, 0), |(success, failure), result| match result {
|
||||
Ok(_) => (success + 1, failure),
|
||||
Err(_) => (success, failure + 1),
|
||||
});
|
||||
|
||||
// 6. update progress
|
||||
{
|
||||
let mut p = progress.write().await;
|
||||
p.objects_healed += success_count;
|
||||
p.objects_failed += failure_count;
|
||||
p.set_current_object(Some(format!("completed bucket: {bucket}")));
|
||||
}
|
||||
|
||||
info!(
|
||||
"Completed heal for bucket {}: {} success, {} failures",
|
||||
bucket, success_count, failure_count
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// heal objects concurrently
|
||||
#[allow(dead_code)]
|
||||
async fn heal_objects_concurrently(
|
||||
storage: &Arc<dyn HealStorageAPI>,
|
||||
bucket: &str,
|
||||
objects: &[String],
|
||||
heal_opts: &HealOpts,
|
||||
_progress: &Arc<RwLock<HealProgress>>,
|
||||
) -> Vec<Result<()>> {
|
||||
// use semaphore to control object healing concurrency
|
||||
let semaphore = Arc::new(tokio::sync::Semaphore::new(8)); // max 8 concurrent object healings
|
||||
|
||||
let heal_futures = objects.iter().map(|object| {
|
||||
let object = object.clone();
|
||||
let bucket = bucket.to_string();
|
||||
let storage = storage.clone();
|
||||
let heal_opts = *heal_opts;
|
||||
let semaphore = semaphore.clone();
|
||||
|
||||
async move {
|
||||
let _permit = semaphore.acquire().await.unwrap();
|
||||
|
||||
match storage.heal_object(&bucket, &object, None, &heal_opts).await {
|
||||
Ok((_result, None)) => {
|
||||
info!("Successfully healed object {}/{}", bucket, object);
|
||||
Ok(())
|
||||
}
|
||||
Ok((_, Some(err))) => {
|
||||
warn!("Failed to heal object {}/{}: {}", bucket, object, err);
|
||||
Err(Error::other(err))
|
||||
}
|
||||
Err(err) => {
|
||||
warn!("Error healing object {}/{}: {}", bucket, object, err);
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
join_all(heal_futures).await
|
||||
}
|
||||
|
||||
/// process results
|
||||
#[allow(dead_code)]
|
||||
async fn process_results(&self, results: Vec<Result<()>>) -> Result<()> {
|
||||
let (success_count, failure_count): (usize, usize) =
|
||||
results.into_iter().fold((0, 0), |(success, failure), result| match result {
|
||||
Ok(_) => (success + 1, failure),
|
||||
Err(_) => (success, failure + 1),
|
||||
});
|
||||
|
||||
let total = success_count + failure_count;
|
||||
|
||||
info!("Erasure set heal completed: {}/{} buckets successful", success_count, total);
|
||||
|
||||
if failure_count > 0 {
|
||||
warn!("{} buckets failed to heal", failure_count);
|
||||
return Err(Error::other(format!("{failure_count} buckets failed to heal")));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
359
crates/ahm/src/heal/event.rs
Normal file
@@ -0,0 +1,359 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::heal::task::{HealOptions, HealPriority, HealRequest, HealType};
|
||||
use rustfs_ecstore::disk::endpoint::Endpoint;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::time::SystemTime;
|
||||
|
||||
/// Corruption type
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum CorruptionType {
|
||||
/// Data corruption
|
||||
DataCorruption,
|
||||
/// Metadata corruption
|
||||
MetadataCorruption,
|
||||
/// Partial corruption
|
||||
PartialCorruption,
|
||||
/// Complete corruption
|
||||
CompleteCorruption,
|
||||
}
|
||||
|
||||
/// Severity level
|
||||
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
|
||||
pub enum Severity {
|
||||
/// Low severity
|
||||
Low = 0,
|
||||
/// Medium severity
|
||||
Medium = 1,
|
||||
/// High severity
|
||||
High = 2,
|
||||
/// Critical severity
|
||||
Critical = 3,
|
||||
}
|
||||
|
||||
/// Heal event
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum HealEvent {
|
||||
/// Object corruption event
|
||||
ObjectCorruption {
|
||||
bucket: String,
|
||||
object: String,
|
||||
version_id: Option<String>,
|
||||
corruption_type: CorruptionType,
|
||||
severity: Severity,
|
||||
},
|
||||
/// Object missing event
|
||||
ObjectMissing {
|
||||
bucket: String,
|
||||
object: String,
|
||||
version_id: Option<String>,
|
||||
expected_locations: Vec<usize>,
|
||||
available_locations: Vec<usize>,
|
||||
},
|
||||
/// Metadata corruption event
|
||||
MetadataCorruption {
|
||||
bucket: String,
|
||||
object: String,
|
||||
corruption_type: CorruptionType,
|
||||
},
|
||||
/// Disk status change event
|
||||
DiskStatusChange {
|
||||
endpoint: Endpoint,
|
||||
old_status: String,
|
||||
new_status: String,
|
||||
},
|
||||
/// EC decode failure event
|
||||
ECDecodeFailure {
|
||||
bucket: String,
|
||||
object: String,
|
||||
version_id: Option<String>,
|
||||
missing_shards: Vec<usize>,
|
||||
available_shards: Vec<usize>,
|
||||
},
|
||||
/// Checksum mismatch event
|
||||
ChecksumMismatch {
|
||||
bucket: String,
|
||||
object: String,
|
||||
version_id: Option<String>,
|
||||
expected_checksum: String,
|
||||
actual_checksum: String,
|
||||
},
|
||||
/// Bucket metadata corruption event
|
||||
BucketMetadataCorruption {
|
||||
bucket: String,
|
||||
corruption_type: CorruptionType,
|
||||
},
|
||||
/// MRF metadata corruption event
|
||||
MRFMetadataCorruption {
|
||||
meta_path: String,
|
||||
corruption_type: CorruptionType,
|
||||
},
|
||||
}
|
||||
|
||||
impl HealEvent {
|
||||
/// Convert HealEvent to HealRequest
|
||||
pub fn to_heal_request(&self) -> HealRequest {
|
||||
match self {
|
||||
HealEvent::ObjectCorruption {
|
||||
bucket,
|
||||
object,
|
||||
version_id,
|
||||
severity,
|
||||
..
|
||||
} => HealRequest::new(
|
||||
HealType::Object {
|
||||
bucket: bucket.clone(),
|
||||
object: object.clone(),
|
||||
version_id: version_id.clone(),
|
||||
},
|
||||
HealOptions::default(),
|
||||
Self::severity_to_priority(severity),
|
||||
),
|
||||
HealEvent::ObjectMissing {
|
||||
bucket,
|
||||
object,
|
||||
version_id,
|
||||
..
|
||||
} => HealRequest::new(
|
||||
HealType::Object {
|
||||
bucket: bucket.clone(),
|
||||
object: object.clone(),
|
||||
version_id: version_id.clone(),
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::High,
|
||||
),
|
||||
HealEvent::MetadataCorruption { bucket, object, .. } => HealRequest::new(
|
||||
HealType::Metadata {
|
||||
bucket: bucket.clone(),
|
||||
object: object.clone(),
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::High,
|
||||
),
|
||||
HealEvent::DiskStatusChange { endpoint, .. } => {
|
||||
// Convert disk status change to erasure set heal
|
||||
// Note: This requires access to storage to get bucket list, which is not available here
|
||||
// The actual bucket list will need to be provided by the caller or retrieved differently
|
||||
HealRequest::new(
|
||||
HealType::ErasureSet {
|
||||
buckets: vec![], // Empty bucket list - caller should populate this
|
||||
set_disk_id: format!("{}_{}", endpoint.pool_idx, endpoint.set_idx),
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::High,
|
||||
)
|
||||
}
|
||||
HealEvent::ECDecodeFailure {
|
||||
bucket,
|
||||
object,
|
||||
version_id,
|
||||
..
|
||||
} => HealRequest::new(
|
||||
HealType::ECDecode {
|
||||
bucket: bucket.clone(),
|
||||
object: object.clone(),
|
||||
version_id: version_id.clone(),
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::Urgent,
|
||||
),
|
||||
HealEvent::ChecksumMismatch {
|
||||
bucket,
|
||||
object,
|
||||
version_id,
|
||||
..
|
||||
} => HealRequest::new(
|
||||
HealType::Object {
|
||||
bucket: bucket.clone(),
|
||||
object: object.clone(),
|
||||
version_id: version_id.clone(),
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::High,
|
||||
),
|
||||
HealEvent::BucketMetadataCorruption { bucket, .. } => {
|
||||
HealRequest::new(HealType::Bucket { bucket: bucket.clone() }, HealOptions::default(), HealPriority::High)
|
||||
}
|
||||
HealEvent::MRFMetadataCorruption { meta_path, .. } => HealRequest::new(
|
||||
HealType::MRF {
|
||||
meta_path: meta_path.clone(),
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::High,
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert severity to priority
|
||||
fn severity_to_priority(severity: &Severity) -> HealPriority {
|
||||
match severity {
|
||||
Severity::Low => HealPriority::Low,
|
||||
Severity::Medium => HealPriority::Normal,
|
||||
Severity::High => HealPriority::High,
|
||||
Severity::Critical => HealPriority::Urgent,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get event description
|
||||
pub fn description(&self) -> String {
|
||||
match self {
|
||||
HealEvent::ObjectCorruption {
|
||||
bucket,
|
||||
object,
|
||||
corruption_type,
|
||||
..
|
||||
} => {
|
||||
format!("Object corruption detected: {bucket}/{object} - {corruption_type:?}")
|
||||
}
|
||||
HealEvent::ObjectMissing { bucket, object, .. } => {
|
||||
format!("Object missing: {bucket}/{object}")
|
||||
}
|
||||
HealEvent::MetadataCorruption {
|
||||
bucket,
|
||||
object,
|
||||
corruption_type,
|
||||
..
|
||||
} => {
|
||||
format!("Metadata corruption: {bucket}/{object} - {corruption_type:?}")
|
||||
}
|
||||
HealEvent::DiskStatusChange {
|
||||
endpoint,
|
||||
old_status,
|
||||
new_status,
|
||||
..
|
||||
} => {
|
||||
format!("Disk status changed: {endpoint:?} {old_status} -> {new_status}")
|
||||
}
|
||||
HealEvent::ECDecodeFailure {
|
||||
bucket,
|
||||
object,
|
||||
missing_shards,
|
||||
..
|
||||
} => {
|
||||
format!("EC decode failure: {bucket}/{object} - missing shards: {missing_shards:?}")
|
||||
}
|
||||
HealEvent::ChecksumMismatch {
|
||||
bucket,
|
||||
object,
|
||||
expected_checksum,
|
||||
actual_checksum,
|
||||
..
|
||||
} => {
|
||||
format!("Checksum mismatch: {bucket}/{object} - expected: {expected_checksum}, actual: {actual_checksum}")
|
||||
}
|
||||
HealEvent::BucketMetadataCorruption {
|
||||
bucket, corruption_type, ..
|
||||
} => {
|
||||
format!("Bucket metadata corruption: {bucket} - {corruption_type:?}")
|
||||
}
|
||||
HealEvent::MRFMetadataCorruption {
|
||||
meta_path,
|
||||
corruption_type,
|
||||
..
|
||||
} => {
|
||||
format!("MRF metadata corruption: {meta_path} - {corruption_type:?}")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Get event severity
|
||||
pub fn severity(&self) -> Severity {
|
||||
match self {
|
||||
HealEvent::ObjectCorruption { severity, .. } => severity.clone(),
|
||||
HealEvent::ObjectMissing { .. } => Severity::High,
|
||||
HealEvent::MetadataCorruption { .. } => Severity::High,
|
||||
HealEvent::DiskStatusChange { .. } => Severity::High,
|
||||
HealEvent::ECDecodeFailure { .. } => Severity::Critical,
|
||||
HealEvent::ChecksumMismatch { .. } => Severity::High,
|
||||
HealEvent::BucketMetadataCorruption { .. } => Severity::High,
|
||||
HealEvent::MRFMetadataCorruption { .. } => Severity::High,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get event timestamp
|
||||
pub fn timestamp(&self) -> SystemTime {
|
||||
SystemTime::now()
|
||||
}
|
||||
}
|
||||
|
||||
/// Heal event handler
|
||||
pub struct HealEventHandler {
|
||||
/// Event queue
|
||||
events: Vec<HealEvent>,
|
||||
/// Maximum number of events
|
||||
max_events: usize,
|
||||
}
|
||||
|
||||
impl HealEventHandler {
|
||||
pub fn new(max_events: usize) -> Self {
|
||||
Self {
|
||||
events: Vec::new(),
|
||||
max_events,
|
||||
}
|
||||
}
|
||||
|
||||
/// Add event
|
||||
pub fn add_event(&mut self, event: HealEvent) {
|
||||
if self.events.len() >= self.max_events {
|
||||
// Remove oldest event
|
||||
self.events.remove(0);
|
||||
}
|
||||
self.events.push(event);
|
||||
}
|
||||
|
||||
/// Get all events
|
||||
pub fn get_events(&self) -> &[HealEvent] {
|
||||
&self.events
|
||||
}
|
||||
|
||||
/// Clear events
|
||||
pub fn clear_events(&mut self) {
|
||||
self.events.clear();
|
||||
}
|
||||
|
||||
/// Get event count
|
||||
pub fn event_count(&self) -> usize {
|
||||
self.events.len()
|
||||
}
|
||||
|
||||
/// Filter events by severity
|
||||
pub fn filter_by_severity(&self, min_severity: Severity) -> Vec<&HealEvent> {
|
||||
self.events.iter().filter(|event| event.severity() >= min_severity).collect()
|
||||
}
|
||||
|
||||
/// Filter events by type
|
||||
pub fn filter_by_type(&self, event_type: &str) -> Vec<&HealEvent> {
|
||||
self.events
|
||||
.iter()
|
||||
.filter(|event| match event {
|
||||
HealEvent::ObjectCorruption { .. } => event_type == "ObjectCorruption",
|
||||
HealEvent::ObjectMissing { .. } => event_type == "ObjectMissing",
|
||||
HealEvent::MetadataCorruption { .. } => event_type == "MetadataCorruption",
|
||||
HealEvent::DiskStatusChange { .. } => event_type == "DiskStatusChange",
|
||||
HealEvent::ECDecodeFailure { .. } => event_type == "ECDecodeFailure",
|
||||
HealEvent::ChecksumMismatch { .. } => event_type == "ChecksumMismatch",
|
||||
HealEvent::BucketMetadataCorruption { .. } => event_type == "BucketMetadataCorruption",
|
||||
HealEvent::MRFMetadataCorruption { .. } => event_type == "MRFMetadataCorruption",
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for HealEventHandler {
|
||||
fn default() -> Self {
|
||||
Self::new(1000)
|
||||
}
|
||||
}
|
||||
422
crates/ahm/src/heal/manager.rs
Normal file
@@ -0,0 +1,422 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
use crate::heal::{
|
||||
progress::{HealProgress, HealStatistics},
|
||||
storage::HealStorageAPI,
|
||||
task::{HealOptions, HealPriority, HealRequest, HealTask, HealTaskStatus, HealType},
|
||||
};
|
||||
use rustfs_ecstore::disk::DiskAPI;
|
||||
use rustfs_ecstore::disk::error::DiskError;
|
||||
use rustfs_ecstore::global::GLOBAL_LOCAL_DISK_MAP;
|
||||
use std::{
|
||||
collections::{HashMap, VecDeque},
|
||||
sync::Arc,
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
use tokio::{
|
||||
sync::{Mutex, RwLock},
|
||||
time::interval,
|
||||
};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::{error, info, warn};
|
||||
|
||||
/// Heal config
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct HealConfig {
|
||||
/// Whether to enable auto heal
|
||||
pub enable_auto_heal: bool,
|
||||
/// Heal interval
|
||||
pub heal_interval: Duration,
|
||||
/// Maximum concurrent heal tasks
|
||||
pub max_concurrent_heals: usize,
|
||||
/// Task timeout
|
||||
pub task_timeout: Duration,
|
||||
/// Queue size
|
||||
pub queue_size: usize,
|
||||
}
|
||||
|
||||
impl Default for HealConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
enable_auto_heal: true,
|
||||
heal_interval: Duration::from_secs(10), // 10 seconds
|
||||
max_concurrent_heals: 4,
|
||||
task_timeout: Duration::from_secs(300), // 5 minutes
|
||||
queue_size: 1000,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Heal state
|
||||
#[derive(Debug, Default)]
|
||||
pub struct HealState {
|
||||
/// Whether running
|
||||
pub is_running: bool,
|
||||
/// Current heal cycle
|
||||
pub current_cycle: u64,
|
||||
/// Last heal time
|
||||
pub last_heal_time: Option<SystemTime>,
|
||||
/// Total healed objects
|
||||
pub total_healed_objects: u64,
|
||||
/// Total heal failures
|
||||
pub total_heal_failures: u64,
|
||||
/// Current active heal tasks
|
||||
pub active_heal_count: usize,
|
||||
}
|
||||
|
||||
/// Heal manager
|
||||
pub struct HealManager {
|
||||
/// Heal config
|
||||
config: Arc<RwLock<HealConfig>>,
|
||||
/// Heal state
|
||||
state: Arc<RwLock<HealState>>,
|
||||
/// Active heal tasks
|
||||
active_heals: Arc<Mutex<HashMap<String, Arc<HealTask>>>>,
|
||||
/// Heal queue
|
||||
heal_queue: Arc<Mutex<VecDeque<HealRequest>>>,
|
||||
/// Storage layer interface
|
||||
storage: Arc<dyn HealStorageAPI>,
|
||||
/// Cancel token
|
||||
cancel_token: CancellationToken,
|
||||
/// Statistics
|
||||
statistics: Arc<RwLock<HealStatistics>>,
|
||||
}
|
||||
|
||||
impl HealManager {
|
||||
/// Create new HealManager
|
||||
pub fn new(storage: Arc<dyn HealStorageAPI>, config: Option<HealConfig>) -> Self {
|
||||
let config = config.unwrap_or_default();
|
||||
Self {
|
||||
config: Arc::new(RwLock::new(config)),
|
||||
state: Arc::new(RwLock::new(HealState::default())),
|
||||
active_heals: Arc::new(Mutex::new(HashMap::new())),
|
||||
heal_queue: Arc::new(Mutex::new(VecDeque::new())),
|
||||
storage,
|
||||
cancel_token: CancellationToken::new(),
|
||||
statistics: Arc::new(RwLock::new(HealStatistics::new())),
|
||||
}
|
||||
}
|
||||
|
||||
/// Start HealManager
|
||||
pub async fn start(&self) -> Result<()> {
|
||||
let mut state = self.state.write().await;
|
||||
if state.is_running {
|
||||
warn!("HealManager is already running");
|
||||
return Ok(());
|
||||
}
|
||||
state.is_running = true;
|
||||
drop(state);
|
||||
|
||||
info!("Starting HealManager");
|
||||
|
||||
// start scheduler
|
||||
self.start_scheduler().await?;
|
||||
|
||||
// start auto disk scanner
|
||||
self.start_auto_disk_scanner().await?;
|
||||
|
||||
info!("HealManager started successfully");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Stop HealManager
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
info!("Stopping HealManager");
|
||||
|
||||
// cancel all tasks
|
||||
self.cancel_token.cancel();
|
||||
|
||||
// wait for all tasks to complete
|
||||
let mut active_heals = self.active_heals.lock().await;
|
||||
for task in active_heals.values() {
|
||||
if let Err(e) = task.cancel().await {
|
||||
warn!("Failed to cancel task {}: {}", task.id, e);
|
||||
}
|
||||
}
|
||||
active_heals.clear();
|
||||
|
||||
// update state
|
||||
let mut state = self.state.write().await;
|
||||
state.is_running = false;
|
||||
|
||||
info!("HealManager stopped successfully");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Submit heal request
|
||||
pub async fn submit_heal_request(&self, request: HealRequest) -> Result<String> {
|
||||
let config = self.config.read().await;
|
||||
let mut queue = self.heal_queue.lock().await;
|
||||
|
||||
if queue.len() >= config.queue_size {
|
||||
return Err(Error::ConfigurationError {
|
||||
message: "Heal queue is full".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
let request_id = request.id.clone();
|
||||
queue.push_back(request);
|
||||
drop(queue);
|
||||
|
||||
info!("Submitted heal request: {}", request_id);
|
||||
Ok(request_id)
|
||||
}
|
||||
|
||||
/// Get task status
|
||||
pub async fn get_task_status(&self, task_id: &str) -> Result<HealTaskStatus> {
|
||||
let active_heals = self.active_heals.lock().await;
|
||||
if let Some(task) = active_heals.get(task_id) {
|
||||
Ok(task.get_status().await)
|
||||
} else {
|
||||
Err(Error::TaskNotFound {
|
||||
task_id: task_id.to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Get task progress
|
||||
pub async fn get_active_tasks_count(&self) -> usize {
|
||||
self.active_heals.lock().await.len()
|
||||
}
|
||||
|
||||
pub async fn get_task_progress(&self, task_id: &str) -> Result<HealProgress> {
|
||||
let active_heals = self.active_heals.lock().await;
|
||||
if let Some(task) = active_heals.get(task_id) {
|
||||
Ok(task.get_progress().await)
|
||||
} else {
|
||||
Err(Error::TaskNotFound {
|
||||
task_id: task_id.to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Cancel task
|
||||
pub async fn cancel_task(&self, task_id: &str) -> Result<()> {
|
||||
let mut active_heals = self.active_heals.lock().await;
|
||||
if let Some(task) = active_heals.get(task_id) {
|
||||
task.cancel().await?;
|
||||
active_heals.remove(task_id);
|
||||
info!("Cancelled heal task: {}", task_id);
|
||||
Ok(())
|
||||
} else {
|
||||
Err(Error::TaskNotFound {
|
||||
task_id: task_id.to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Get statistics
|
||||
pub async fn get_statistics(&self) -> HealStatistics {
|
||||
self.statistics.read().await.clone()
|
||||
}
|
||||
|
||||
/// Get active task count
|
||||
pub async fn get_active_task_count(&self) -> usize {
|
||||
let active_heals = self.active_heals.lock().await;
|
||||
active_heals.len()
|
||||
}
|
||||
|
||||
/// Get queue length
|
||||
pub async fn get_queue_length(&self) -> usize {
|
||||
let queue = self.heal_queue.lock().await;
|
||||
queue.len()
|
||||
}
|
||||
|
||||
/// Start scheduler
|
||||
async fn start_scheduler(&self) -> Result<()> {
|
||||
let config = self.config.clone();
|
||||
let heal_queue = self.heal_queue.clone();
|
||||
let active_heals = self.active_heals.clone();
|
||||
let cancel_token = self.cancel_token.clone();
|
||||
let statistics = self.statistics.clone();
|
||||
let storage = self.storage.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let mut interval = interval(config.read().await.heal_interval);
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = cancel_token.cancelled() => {
|
||||
info!("Heal scheduler received shutdown signal");
|
||||
break;
|
||||
}
|
||||
_ = interval.tick() => {
|
||||
Self::process_heal_queue(&heal_queue, &active_heals, &config, &statistics, &storage).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Start background task to auto scan local disks and enqueue erasure set heal requests
|
||||
async fn start_auto_disk_scanner(&self) -> Result<()> {
|
||||
let config = self.config.clone();
|
||||
let heal_queue = self.heal_queue.clone();
|
||||
let active_heals = self.active_heals.clone();
|
||||
let cancel_token = self.cancel_token.clone();
|
||||
let storage = self.storage.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let mut interval = interval(config.read().await.heal_interval);
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = cancel_token.cancelled() => {
|
||||
info!("Auto disk scanner received shutdown signal");
|
||||
break;
|
||||
}
|
||||
_ = interval.tick() => {
|
||||
// Build list of endpoints that need healing
|
||||
let mut endpoints = Vec::new();
|
||||
for (_, disk_opt) in GLOBAL_LOCAL_DISK_MAP.read().await.iter() {
|
||||
if let Some(disk) = disk_opt {
|
||||
// detect unformatted disk via get_disk_id()
|
||||
if let Err(err) = disk.get_disk_id().await {
|
||||
if err == DiskError::UnformattedDisk {
|
||||
endpoints.push(disk.endpoint());
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if endpoints.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Get bucket list for erasure set healing
|
||||
let buckets = match storage.list_buckets().await {
|
||||
Ok(buckets) => buckets.iter().map(|b| b.name.clone()).collect::<Vec<String>>(),
|
||||
Err(e) => {
|
||||
error!("Failed to get bucket list for auto healing: {}", e);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
// Create erasure set heal requests for each endpoint
|
||||
for ep in endpoints {
|
||||
// skip if already queued or healing
|
||||
let mut skip = false;
|
||||
{
|
||||
let queue = heal_queue.lock().await;
|
||||
if queue.iter().any(|req| matches!(&req.heal_type, crate::heal::task::HealType::ErasureSet { set_disk_id, .. } if set_disk_id == &format!("{}_{}", ep.pool_idx, ep.set_idx))) {
|
||||
skip = true;
|
||||
}
|
||||
}
|
||||
if !skip {
|
||||
let active = active_heals.lock().await;
|
||||
if active.values().any(|task| matches!(&task.heal_type, crate::heal::task::HealType::ErasureSet { set_disk_id, .. } if set_disk_id == &format!("{}_{}", ep.pool_idx, ep.set_idx))) {
|
||||
skip = true;
|
||||
}
|
||||
}
|
||||
|
||||
if skip {
|
||||
continue;
|
||||
}
|
||||
|
||||
// enqueue erasure set heal request for this disk
|
||||
let set_disk_id = format!("pool_{}_set_{}", ep.pool_idx, ep.set_idx);
|
||||
let req = HealRequest::new(
|
||||
HealType::ErasureSet {
|
||||
buckets: buckets.clone(),
|
||||
set_disk_id: set_disk_id.clone()
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::Normal,
|
||||
);
|
||||
let mut queue = heal_queue.lock().await;
|
||||
queue.push_back(req);
|
||||
info!("Enqueued auto erasure set heal for endpoint: {} (set_disk_id: {})", ep, set_disk_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process heal queue
|
||||
async fn process_heal_queue(
|
||||
heal_queue: &Arc<Mutex<VecDeque<HealRequest>>>,
|
||||
active_heals: &Arc<Mutex<HashMap<String, Arc<HealTask>>>>,
|
||||
config: &Arc<RwLock<HealConfig>>,
|
||||
statistics: &Arc<RwLock<HealStatistics>>,
|
||||
storage: &Arc<dyn HealStorageAPI>,
|
||||
) {
|
||||
let config = config.read().await;
|
||||
let mut active_heals_guard = active_heals.lock().await;
|
||||
|
||||
// check if new heal tasks can be started
|
||||
if active_heals_guard.len() >= config.max_concurrent_heals {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut queue = heal_queue.lock().await;
|
||||
if let Some(request) = queue.pop_front() {
|
||||
let task = Arc::new(HealTask::from_request(request, storage.clone()));
|
||||
let task_id = task.id.clone();
|
||||
active_heals_guard.insert(task_id.clone(), task.clone());
|
||||
drop(active_heals_guard);
|
||||
let active_heals_clone = active_heals.clone();
|
||||
let statistics_clone = statistics.clone();
|
||||
|
||||
// start heal task
|
||||
tokio::spawn(async move {
|
||||
info!("Starting heal task: {}", task_id);
|
||||
let result = task.execute().await;
|
||||
match result {
|
||||
Ok(_) => {
|
||||
info!("Heal task completed successfully: {}", task_id);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Heal task failed: {} - {}", task_id, e);
|
||||
}
|
||||
}
|
||||
let mut active_heals_guard = active_heals_clone.lock().await;
|
||||
if let Some(completed_task) = active_heals_guard.remove(&task_id) {
|
||||
// update statistics
|
||||
let mut stats = statistics_clone.write().await;
|
||||
match completed_task.get_status().await {
|
||||
HealTaskStatus::Completed => {
|
||||
stats.update_task_completion(true);
|
||||
}
|
||||
_ => {
|
||||
stats.update_task_completion(false);
|
||||
}
|
||||
}
|
||||
stats.update_running_tasks(active_heals_guard.len() as u64);
|
||||
}
|
||||
});
|
||||
|
||||
// update statistics
|
||||
let mut stats = statistics.write().await;
|
||||
stats.total_tasks += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for HealManager {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("HealManager")
|
||||
.field("config", &"<config>")
|
||||
.field("state", &"<state>")
|
||||
.field("active_heals_count", &"<active_heals>")
|
||||
.field("queue_length", &"<queue>")
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
@@ -12,10 +12,16 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod app;
|
||||
mod home;
|
||||
mod setting;
|
||||
pub mod channel;
|
||||
pub mod erasure_healer;
|
||||
pub mod event;
|
||||
pub mod manager;
|
||||
pub mod progress;
|
||||
pub mod resume;
|
||||
pub mod storage;
|
||||
pub mod task;
|
||||
|
||||
pub use app::App;
|
||||
pub use home::HomeViews;
|
||||
pub use setting::SettingViews;
|
||||
pub use erasure_healer::ErasureSetHealer;
|
||||
pub use manager::HealManager;
|
||||
pub use resume::{CheckpointManager, ResumeCheckpoint, ResumeManager, ResumeState, ResumeUtils};
|
||||
pub use task::{HealOptions, HealPriority, HealRequest, HealTask, HealType};
|
||||
148
crates/ahm/src/heal/progress.rs
Normal file
@@ -0,0 +1,148 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::time::SystemTime;
|
||||
|
||||
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
|
||||
pub struct HealProgress {
|
||||
/// Objects scanned
|
||||
pub objects_scanned: u64,
|
||||
/// Objects healed
|
||||
pub objects_healed: u64,
|
||||
/// Objects failed
|
||||
pub objects_failed: u64,
|
||||
/// Bytes processed
|
||||
pub bytes_processed: u64,
|
||||
/// Current object
|
||||
pub current_object: Option<String>,
|
||||
/// Progress percentage
|
||||
pub progress_percentage: f64,
|
||||
/// Start time
|
||||
pub start_time: Option<SystemTime>,
|
||||
/// Last update time
|
||||
pub last_update_time: Option<SystemTime>,
|
||||
/// Estimated completion time
|
||||
pub estimated_completion_time: Option<SystemTime>,
|
||||
}
|
||||
|
||||
impl HealProgress {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
start_time: Some(SystemTime::now()),
|
||||
last_update_time: Some(SystemTime::now()),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update_progress(&mut self, scanned: u64, healed: u64, failed: u64, bytes: u64) {
|
||||
self.objects_scanned = scanned;
|
||||
self.objects_healed = healed;
|
||||
self.objects_failed = failed;
|
||||
self.bytes_processed = bytes;
|
||||
self.last_update_time = Some(SystemTime::now());
|
||||
|
||||
// calculate progress percentage
|
||||
let total = scanned + healed + failed;
|
||||
if total > 0 {
|
||||
self.progress_percentage = (healed as f64 / total as f64) * 100.0;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_current_object(&mut self, object: Option<String>) {
|
||||
self.current_object = object;
|
||||
self.last_update_time = Some(SystemTime::now());
|
||||
}
|
||||
|
||||
pub fn is_completed(&self) -> bool {
|
||||
self.progress_percentage >= 100.0
|
||||
|| self.objects_scanned > 0 && self.objects_healed + self.objects_failed >= self.objects_scanned
|
||||
}
|
||||
|
||||
pub fn get_success_rate(&self) -> f64 {
|
||||
let total = self.objects_healed + self.objects_failed;
|
||||
if total > 0 {
|
||||
(self.objects_healed as f64 / total as f64) * 100.0
|
||||
} else {
|
||||
0.0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct HealStatistics {
|
||||
/// Total heal tasks
|
||||
pub total_tasks: u64,
|
||||
/// Successful tasks
|
||||
pub successful_tasks: u64,
|
||||
/// Failed tasks
|
||||
pub failed_tasks: u64,
|
||||
/// Running tasks
|
||||
pub running_tasks: u64,
|
||||
/// Total healed objects
|
||||
pub total_objects_healed: u64,
|
||||
/// Total healed bytes
|
||||
pub total_bytes_healed: u64,
|
||||
/// Last update time
|
||||
pub last_update_time: SystemTime,
|
||||
}
|
||||
|
||||
impl Default for HealStatistics {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl HealStatistics {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
total_tasks: 0,
|
||||
successful_tasks: 0,
|
||||
failed_tasks: 0,
|
||||
running_tasks: 0,
|
||||
total_objects_healed: 0,
|
||||
total_bytes_healed: 0,
|
||||
last_update_time: SystemTime::now(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update_task_completion(&mut self, success: bool) {
|
||||
if success {
|
||||
self.successful_tasks += 1;
|
||||
} else {
|
||||
self.failed_tasks += 1;
|
||||
}
|
||||
self.last_update_time = SystemTime::now();
|
||||
}
|
||||
|
||||
pub fn update_running_tasks(&mut self, count: u64) {
|
||||
self.running_tasks = count;
|
||||
self.last_update_time = SystemTime::now();
|
||||
}
|
||||
|
||||
pub fn add_healed_objects(&mut self, count: u64, bytes: u64) {
|
||||
self.total_objects_healed += count;
|
||||
self.total_bytes_healed += bytes;
|
||||
self.last_update_time = SystemTime::now();
|
||||
}
|
||||
|
||||
pub fn get_success_rate(&self) -> f64 {
|
||||
let total = self.successful_tasks + self.failed_tasks;
|
||||
if total > 0 {
|
||||
(self.successful_tasks as f64 / total as f64) * 100.0
|
||||
} else {
|
||||
0.0
|
||||
}
|
||||
}
|
||||
}
|
||||
696
crates/ahm/src/heal/resume.rs
Normal file
@@ -0,0 +1,696 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
use rustfs_ecstore::disk::{BUCKET_META_PREFIX, DiskAPI, DiskStore, RUSTFS_META_BUCKET};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{debug, info, warn};
|
||||
use uuid::Uuid;
|
||||
|
||||
/// resume state file constants
|
||||
const RESUME_STATE_FILE: &str = "ahm_resume_state.json";
|
||||
const RESUME_PROGRESS_FILE: &str = "ahm_progress.json";
|
||||
const RESUME_CHECKPOINT_FILE: &str = "ahm_checkpoint.json";
|
||||
|
||||
/// resume state
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ResumeState {
|
||||
/// task id
|
||||
pub task_id: String,
|
||||
/// task type
|
||||
pub task_type: String,
|
||||
/// start time
|
||||
pub start_time: u64,
|
||||
/// last update time
|
||||
pub last_update: u64,
|
||||
/// completed
|
||||
pub completed: bool,
|
||||
/// total objects
|
||||
pub total_objects: u64,
|
||||
/// processed objects
|
||||
pub processed_objects: u64,
|
||||
/// successful objects
|
||||
pub successful_objects: u64,
|
||||
/// failed objects
|
||||
pub failed_objects: u64,
|
||||
/// skipped objects
|
||||
pub skipped_objects: u64,
|
||||
/// current bucket
|
||||
pub current_bucket: Option<String>,
|
||||
/// current object
|
||||
pub current_object: Option<String>,
|
||||
/// completed buckets
|
||||
pub completed_buckets: Vec<String>,
|
||||
/// pending buckets
|
||||
pub pending_buckets: Vec<String>,
|
||||
/// error message
|
||||
pub error_message: Option<String>,
|
||||
/// retry count
|
||||
pub retry_count: u32,
|
||||
/// max retries
|
||||
pub max_retries: u32,
|
||||
}
|
||||
|
||||
impl ResumeState {
|
||||
pub fn new(task_id: String, task_type: String, buckets: Vec<String>) -> Self {
|
||||
Self {
|
||||
task_id,
|
||||
task_type,
|
||||
start_time: SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs(),
|
||||
last_update: SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs(),
|
||||
completed: false,
|
||||
total_objects: 0,
|
||||
processed_objects: 0,
|
||||
successful_objects: 0,
|
||||
failed_objects: 0,
|
||||
skipped_objects: 0,
|
||||
current_bucket: None,
|
||||
current_object: None,
|
||||
completed_buckets: Vec::new(),
|
||||
pending_buckets: buckets,
|
||||
error_message: None,
|
||||
retry_count: 0,
|
||||
max_retries: 3,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update_progress(&mut self, processed: u64, successful: u64, failed: u64, skipped: u64) {
|
||||
self.processed_objects = processed;
|
||||
self.successful_objects = successful;
|
||||
self.failed_objects = failed;
|
||||
self.skipped_objects = skipped;
|
||||
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs();
|
||||
}
|
||||
|
||||
pub fn set_current_item(&mut self, bucket: Option<String>, object: Option<String>) {
|
||||
self.current_bucket = bucket;
|
||||
self.current_object = object;
|
||||
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs();
|
||||
}
|
||||
|
||||
pub fn complete_bucket(&mut self, bucket: &str) {
|
||||
if !self.completed_buckets.contains(&bucket.to_string()) {
|
||||
self.completed_buckets.push(bucket.to_string());
|
||||
}
|
||||
if let Some(pos) = self.pending_buckets.iter().position(|b| b == bucket) {
|
||||
self.pending_buckets.remove(pos);
|
||||
}
|
||||
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs();
|
||||
}
|
||||
|
||||
pub fn mark_completed(&mut self) {
|
||||
self.completed = true;
|
||||
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs();
|
||||
}
|
||||
|
||||
pub fn set_error(&mut self, error: String) {
|
||||
self.error_message = Some(error);
|
||||
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs();
|
||||
}
|
||||
|
||||
pub fn increment_retry(&mut self) {
|
||||
self.retry_count += 1;
|
||||
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs();
|
||||
}
|
||||
|
||||
pub fn can_retry(&self) -> bool {
|
||||
self.retry_count < self.max_retries
|
||||
}
|
||||
|
||||
pub fn get_progress_percentage(&self) -> f64 {
|
||||
if self.total_objects == 0 {
|
||||
return 0.0;
|
||||
}
|
||||
(self.processed_objects as f64 / self.total_objects as f64) * 100.0
|
||||
}
|
||||
|
||||
pub fn get_success_rate(&self) -> f64 {
|
||||
let total = self.successful_objects + self.failed_objects;
|
||||
if total == 0 {
|
||||
return 0.0;
|
||||
}
|
||||
(self.successful_objects as f64 / total as f64) * 100.0
|
||||
}
|
||||
}
|
||||
|
||||
/// resume manager
|
||||
pub struct ResumeManager {
|
||||
disk: DiskStore,
|
||||
state: Arc<RwLock<ResumeState>>,
|
||||
}
|
||||
|
||||
impl ResumeManager {
|
||||
/// create new resume manager
|
||||
pub async fn new(disk: DiskStore, task_id: String, task_type: String, buckets: Vec<String>) -> Result<Self> {
|
||||
let state = ResumeState::new(task_id, task_type, buckets);
|
||||
let manager = Self {
|
||||
disk,
|
||||
state: Arc::new(RwLock::new(state)),
|
||||
};
|
||||
|
||||
// save initial state
|
||||
manager.save_state().await?;
|
||||
Ok(manager)
|
||||
}
|
||||
|
||||
/// load resume state from disk
|
||||
pub async fn load_from_disk(disk: DiskStore, task_id: &str) -> Result<Self> {
|
||||
let state_data = Self::read_state_file(&disk, task_id).await?;
|
||||
let state: ResumeState = serde_json::from_slice(&state_data).map_err(|e| Error::TaskExecutionFailed {
|
||||
message: format!("Failed to deserialize resume state: {e}"),
|
||||
})?;
|
||||
|
||||
Ok(Self {
|
||||
disk,
|
||||
state: Arc::new(RwLock::new(state)),
|
||||
})
|
||||
}
|
||||
|
||||
/// check if resume state exists
|
||||
pub async fn has_resume_state(disk: &DiskStore, task_id: &str) -> bool {
|
||||
let file_path = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_STATE_FILE}"));
|
||||
match disk.read_all(RUSTFS_META_BUCKET, file_path.to_str().unwrap()).await {
|
||||
Ok(data) => !data.is_empty(),
|
||||
Err(_) => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// get current state
|
||||
pub async fn get_state(&self) -> ResumeState {
|
||||
self.state.read().await.clone()
|
||||
}
|
||||
|
||||
/// update progress
|
||||
pub async fn update_progress(&self, processed: u64, successful: u64, failed: u64, skipped: u64) -> Result<()> {
|
||||
let mut state = self.state.write().await;
|
||||
state.update_progress(processed, successful, failed, skipped);
|
||||
drop(state);
|
||||
self.save_state().await
|
||||
}
|
||||
|
||||
/// set current item
|
||||
pub async fn set_current_item(&self, bucket: Option<String>, object: Option<String>) -> Result<()> {
|
||||
let mut state = self.state.write().await;
|
||||
state.set_current_item(bucket, object);
|
||||
drop(state);
|
||||
self.save_state().await
|
||||
}
|
||||
|
||||
/// complete bucket
|
||||
pub async fn complete_bucket(&self, bucket: &str) -> Result<()> {
|
||||
let mut state = self.state.write().await;
|
||||
state.complete_bucket(bucket);
|
||||
drop(state);
|
||||
self.save_state().await
|
||||
}
|
||||
|
||||
/// mark task completed
|
||||
pub async fn mark_completed(&self) -> Result<()> {
|
||||
let mut state = self.state.write().await;
|
||||
state.mark_completed();
|
||||
drop(state);
|
||||
self.save_state().await
|
||||
}
|
||||
|
||||
/// set error message
|
||||
pub async fn set_error(&self, error: String) -> Result<()> {
|
||||
let mut state = self.state.write().await;
|
||||
state.set_error(error);
|
||||
drop(state);
|
||||
self.save_state().await
|
||||
}
|
||||
|
||||
/// increment retry count
|
||||
pub async fn increment_retry(&self) -> Result<()> {
|
||||
let mut state = self.state.write().await;
|
||||
state.increment_retry();
|
||||
drop(state);
|
||||
self.save_state().await
|
||||
}
|
||||
|
||||
/// cleanup resume state
|
||||
pub async fn cleanup(&self) -> Result<()> {
|
||||
let state = self.state.read().await;
|
||||
let task_id = &state.task_id;
|
||||
|
||||
// delete state files
|
||||
let state_file = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_STATE_FILE}"));
|
||||
let progress_file = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_PROGRESS_FILE}"));
|
||||
let checkpoint_file = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_CHECKPOINT_FILE}"));
|
||||
|
||||
// ignore delete errors, files may not exist
|
||||
let _ = self
|
||||
.disk
|
||||
.delete(RUSTFS_META_BUCKET, state_file.to_str().unwrap(), Default::default())
|
||||
.await;
|
||||
let _ = self
|
||||
.disk
|
||||
.delete(RUSTFS_META_BUCKET, progress_file.to_str().unwrap(), Default::default())
|
||||
.await;
|
||||
let _ = self
|
||||
.disk
|
||||
.delete(RUSTFS_META_BUCKET, checkpoint_file.to_str().unwrap(), Default::default())
|
||||
.await;
|
||||
|
||||
info!("Cleaned up resume state for task: {}", task_id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// save state to disk
|
||||
async fn save_state(&self) -> Result<()> {
|
||||
let state = self.state.read().await;
|
||||
let state_data = serde_json::to_vec(&*state).map_err(|e| Error::TaskExecutionFailed {
|
||||
message: format!("Failed to serialize resume state: {e}"),
|
||||
})?;
|
||||
|
||||
let file_path = Path::new(BUCKET_META_PREFIX).join(format!("{}_{}", state.task_id, RESUME_STATE_FILE));
|
||||
|
||||
self.disk
|
||||
.write_all(RUSTFS_META_BUCKET, file_path.to_str().unwrap(), state_data.into())
|
||||
.await
|
||||
.map_err(|e| Error::TaskExecutionFailed {
|
||||
message: format!("Failed to save resume state: {e}"),
|
||||
})?;
|
||||
|
||||
debug!("Saved resume state for task: {}", state.task_id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// read state file from disk
|
||||
async fn read_state_file(disk: &DiskStore, task_id: &str) -> Result<Vec<u8>> {
|
||||
let file_path = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_STATE_FILE}"));
|
||||
|
||||
disk.read_all(RUSTFS_META_BUCKET, file_path.to_str().unwrap())
|
||||
.await
|
||||
.map(|bytes| bytes.to_vec())
|
||||
.map_err(|e| Error::TaskExecutionFailed {
|
||||
message: format!("Failed to read resume state file: {e}"),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// resume checkpoint
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ResumeCheckpoint {
|
||||
/// task id
|
||||
pub task_id: String,
|
||||
/// checkpoint time
|
||||
pub checkpoint_time: u64,
|
||||
/// current bucket index
|
||||
pub current_bucket_index: usize,
|
||||
/// current object index
|
||||
pub current_object_index: usize,
|
||||
/// processed objects
|
||||
pub processed_objects: Vec<String>,
|
||||
/// failed objects
|
||||
pub failed_objects: Vec<String>,
|
||||
/// skipped objects
|
||||
pub skipped_objects: Vec<String>,
|
||||
}
|
||||
|
||||
impl ResumeCheckpoint {
|
||||
pub fn new(task_id: String) -> Self {
|
||||
Self {
|
||||
task_id,
|
||||
checkpoint_time: SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs(),
|
||||
current_bucket_index: 0,
|
||||
current_object_index: 0,
|
||||
processed_objects: Vec::new(),
|
||||
failed_objects: Vec::new(),
|
||||
skipped_objects: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update_position(&mut self, bucket_index: usize, object_index: usize) {
|
||||
self.current_bucket_index = bucket_index;
|
||||
self.current_object_index = object_index;
|
||||
self.checkpoint_time = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs();
|
||||
}
|
||||
|
||||
pub fn add_processed_object(&mut self, object: String) {
|
||||
if !self.processed_objects.contains(&object) {
|
||||
self.processed_objects.push(object);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_failed_object(&mut self, object: String) {
|
||||
if !self.failed_objects.contains(&object) {
|
||||
self.failed_objects.push(object);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_skipped_object(&mut self, object: String) {
|
||||
if !self.skipped_objects.contains(&object) {
|
||||
self.skipped_objects.push(object);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// resume checkpoint manager
|
||||
pub struct CheckpointManager {
|
||||
disk: DiskStore,
|
||||
checkpoint: Arc<RwLock<ResumeCheckpoint>>,
|
||||
}
|
||||
|
||||
impl CheckpointManager {
|
||||
/// create new checkpoint manager
|
||||
pub async fn new(disk: DiskStore, task_id: String) -> Result<Self> {
|
||||
let checkpoint = ResumeCheckpoint::new(task_id);
|
||||
let manager = Self {
|
||||
disk,
|
||||
checkpoint: Arc::new(RwLock::new(checkpoint)),
|
||||
};
|
||||
|
||||
// save initial checkpoint
|
||||
manager.save_checkpoint().await?;
|
||||
Ok(manager)
|
||||
}
|
||||
|
||||
/// load checkpoint from disk
|
||||
pub async fn load_from_disk(disk: DiskStore, task_id: &str) -> Result<Self> {
|
||||
let checkpoint_data = Self::read_checkpoint_file(&disk, task_id).await?;
|
||||
let checkpoint: ResumeCheckpoint = serde_json::from_slice(&checkpoint_data).map_err(|e| Error::TaskExecutionFailed {
|
||||
message: format!("Failed to deserialize checkpoint: {e}"),
|
||||
})?;
|
||||
|
||||
Ok(Self {
|
||||
disk,
|
||||
checkpoint: Arc::new(RwLock::new(checkpoint)),
|
||||
})
|
||||
}
|
||||
|
||||
/// check if checkpoint exists
|
||||
pub async fn has_checkpoint(disk: &DiskStore, task_id: &str) -> bool {
|
||||
let file_path = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_CHECKPOINT_FILE}"));
|
||||
match disk.read_all(RUSTFS_META_BUCKET, file_path.to_str().unwrap()).await {
|
||||
Ok(data) => !data.is_empty(),
|
||||
Err(_) => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// get current checkpoint
|
||||
pub async fn get_checkpoint(&self) -> ResumeCheckpoint {
|
||||
self.checkpoint.read().await.clone()
|
||||
}
|
||||
|
||||
/// update position
|
||||
pub async fn update_position(&self, bucket_index: usize, object_index: usize) -> Result<()> {
|
||||
let mut checkpoint = self.checkpoint.write().await;
|
||||
checkpoint.update_position(bucket_index, object_index);
|
||||
drop(checkpoint);
|
||||
self.save_checkpoint().await
|
||||
}
|
||||
|
||||
/// add processed object
|
||||
pub async fn add_processed_object(&self, object: String) -> Result<()> {
|
||||
let mut checkpoint = self.checkpoint.write().await;
|
||||
checkpoint.add_processed_object(object);
|
||||
drop(checkpoint);
|
||||
self.save_checkpoint().await
|
||||
}
|
||||
|
||||
/// add failed object
|
||||
pub async fn add_failed_object(&self, object: String) -> Result<()> {
|
||||
let mut checkpoint = self.checkpoint.write().await;
|
||||
checkpoint.add_failed_object(object);
|
||||
drop(checkpoint);
|
||||
self.save_checkpoint().await
|
||||
}
|
||||
|
||||
/// add skipped object
|
||||
pub async fn add_skipped_object(&self, object: String) -> Result<()> {
|
||||
let mut checkpoint = self.checkpoint.write().await;
|
||||
checkpoint.add_skipped_object(object);
|
||||
drop(checkpoint);
|
||||
self.save_checkpoint().await
|
||||
}
|
||||
|
||||
/// cleanup checkpoint
|
||||
pub async fn cleanup(&self) -> Result<()> {
|
||||
let checkpoint = self.checkpoint.read().await;
|
||||
let task_id = &checkpoint.task_id;
|
||||
|
||||
let checkpoint_file = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_CHECKPOINT_FILE}"));
|
||||
let _ = self
|
||||
.disk
|
||||
.delete(RUSTFS_META_BUCKET, checkpoint_file.to_str().unwrap(), Default::default())
|
||||
.await;
|
||||
|
||||
info!("Cleaned up checkpoint for task: {}", task_id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// save checkpoint to disk
|
||||
async fn save_checkpoint(&self) -> Result<()> {
|
||||
let checkpoint = self.checkpoint.read().await;
|
||||
let checkpoint_data = serde_json::to_vec(&*checkpoint).map_err(|e| Error::TaskExecutionFailed {
|
||||
message: format!("Failed to serialize checkpoint: {e}"),
|
||||
})?;
|
||||
|
||||
let file_path = Path::new(BUCKET_META_PREFIX).join(format!("{}_{}", checkpoint.task_id, RESUME_CHECKPOINT_FILE));
|
||||
|
||||
self.disk
|
||||
.write_all(RUSTFS_META_BUCKET, file_path.to_str().unwrap(), checkpoint_data.into())
|
||||
.await
|
||||
.map_err(|e| Error::TaskExecutionFailed {
|
||||
message: format!("Failed to save checkpoint: {e}"),
|
||||
})?;
|
||||
|
||||
debug!("Saved checkpoint for task: {}", checkpoint.task_id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// read checkpoint file from disk
|
||||
async fn read_checkpoint_file(disk: &DiskStore, task_id: &str) -> Result<Vec<u8>> {
|
||||
let file_path = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_CHECKPOINT_FILE}"));
|
||||
|
||||
disk.read_all(RUSTFS_META_BUCKET, file_path.to_str().unwrap())
|
||||
.await
|
||||
.map(|bytes| bytes.to_vec())
|
||||
.map_err(|e| Error::TaskExecutionFailed {
|
||||
message: format!("Failed to read checkpoint file: {e}"),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// resume utils
|
||||
pub struct ResumeUtils;
|
||||
|
||||
impl ResumeUtils {
|
||||
/// generate unique task id
|
||||
pub fn generate_task_id() -> String {
|
||||
Uuid::new_v4().to_string()
|
||||
}
|
||||
|
||||
/// check if task can be resumed
|
||||
pub async fn can_resume_task(disk: &DiskStore, task_id: &str) -> bool {
|
||||
ResumeManager::has_resume_state(disk, task_id).await
|
||||
}
|
||||
|
||||
/// get all resumable task ids
|
||||
pub async fn get_resumable_tasks(disk: &DiskStore) -> Result<Vec<String>> {
|
||||
// List all files in the buckets metadata directory
|
||||
let entries = match disk.list_dir("", RUSTFS_META_BUCKET, BUCKET_META_PREFIX, -1).await {
|
||||
Ok(entries) => entries,
|
||||
Err(e) => {
|
||||
debug!("Failed to list resume state files: {}", e);
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
};
|
||||
|
||||
let mut task_ids = Vec::new();
|
||||
|
||||
// Filter files that end with ahm_resume_state.json and extract task IDs
|
||||
for entry in entries {
|
||||
if entry.ends_with(&format!("_{RESUME_STATE_FILE}")) {
|
||||
// Extract task ID from filename: {task_id}_ahm_resume_state.json
|
||||
if let Some(task_id) = entry.strip_suffix(&format!("_{RESUME_STATE_FILE}")) {
|
||||
if !task_id.is_empty() {
|
||||
task_ids.push(task_id.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
debug!("Found {} resumable tasks: {:?}", task_ids.len(), task_ids);
|
||||
Ok(task_ids)
|
||||
}
|
||||
|
||||
/// cleanup expired resume states
|
||||
pub async fn cleanup_expired_states(disk: &DiskStore, max_age_hours: u64) -> Result<()> {
|
||||
let task_ids = Self::get_resumable_tasks(disk).await?;
|
||||
let current_time = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs();
|
||||
|
||||
for task_id in task_ids {
|
||||
if let Ok(resume_manager) = ResumeManager::load_from_disk(disk.clone(), &task_id).await {
|
||||
let state = resume_manager.get_state().await;
|
||||
let age_hours = (current_time - state.last_update) / 3600;
|
||||
|
||||
if age_hours > max_age_hours {
|
||||
info!("Cleaning up expired resume state for task: {} (age: {} hours)", task_id, age_hours);
|
||||
if let Err(e) = resume_manager.cleanup().await {
|
||||
warn!("Failed to cleanup expired resume state for task {}: {}", task_id, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_resume_state_creation() {
|
||||
let task_id = ResumeUtils::generate_task_id();
|
||||
let buckets = vec!["bucket1".to_string(), "bucket2".to_string()];
|
||||
let state = ResumeState::new(task_id.clone(), "erasure_set".to_string(), buckets);
|
||||
|
||||
assert_eq!(state.task_id, task_id);
|
||||
assert_eq!(state.task_type, "erasure_set");
|
||||
assert!(!state.completed);
|
||||
assert_eq!(state.processed_objects, 0);
|
||||
assert_eq!(state.pending_buckets.len(), 2);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_resume_state_progress() {
|
||||
let task_id = ResumeUtils::generate_task_id();
|
||||
let buckets = vec!["bucket1".to_string()];
|
||||
let mut state = ResumeState::new(task_id, "erasure_set".to_string(), buckets);
|
||||
|
||||
state.update_progress(10, 8, 1, 1);
|
||||
assert_eq!(state.processed_objects, 10);
|
||||
assert_eq!(state.successful_objects, 8);
|
||||
assert_eq!(state.failed_objects, 1);
|
||||
assert_eq!(state.skipped_objects, 1);
|
||||
|
||||
let progress = state.get_progress_percentage();
|
||||
assert_eq!(progress, 0.0); // total_objects is 0
|
||||
|
||||
state.total_objects = 100;
|
||||
let progress = state.get_progress_percentage();
|
||||
assert_eq!(progress, 10.0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_resume_state_bucket_completion() {
|
||||
let task_id = ResumeUtils::generate_task_id();
|
||||
let buckets = vec!["bucket1".to_string(), "bucket2".to_string()];
|
||||
let mut state = ResumeState::new(task_id, "erasure_set".to_string(), buckets);
|
||||
|
||||
assert_eq!(state.pending_buckets.len(), 2);
|
||||
assert_eq!(state.completed_buckets.len(), 0);
|
||||
|
||||
state.complete_bucket("bucket1");
|
||||
assert_eq!(state.pending_buckets.len(), 1);
|
||||
assert_eq!(state.completed_buckets.len(), 1);
|
||||
assert!(state.completed_buckets.contains(&"bucket1".to_string()));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_resume_utils() {
|
||||
let task_id1 = ResumeUtils::generate_task_id();
|
||||
let task_id2 = ResumeUtils::generate_task_id();
|
||||
|
||||
assert_ne!(task_id1, task_id2);
|
||||
assert_eq!(task_id1.len(), 36); // UUID length
|
||||
assert_eq!(task_id2.len(), 36);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_resumable_tasks_integration() {
|
||||
use rustfs_ecstore::disk::{DiskOption, endpoint::Endpoint, new_disk};
|
||||
use tempfile::TempDir;
|
||||
|
||||
// Create a temporary directory for testing
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let disk_path = temp_dir.path().join("test_disk");
|
||||
std::fs::create_dir_all(&disk_path).unwrap();
|
||||
|
||||
// Create a local disk for testing
|
||||
let endpoint = Endpoint::try_from(disk_path.to_string_lossy().as_ref()).unwrap();
|
||||
let disk_option = DiskOption {
|
||||
cleanup: false,
|
||||
health_check: false,
|
||||
};
|
||||
let disk = new_disk(&endpoint, &disk_option).await.unwrap();
|
||||
|
||||
// Create necessary directories first (ignore if already exist)
|
||||
let _ = disk.make_volume(RUSTFS_META_BUCKET).await;
|
||||
let _ = disk.make_volume(&format!("{RUSTFS_META_BUCKET}/{BUCKET_META_PREFIX}")).await;
|
||||
|
||||
// Create some test resume state files
|
||||
let task_ids = vec![
|
||||
"test-task-1".to_string(),
|
||||
"test-task-2".to_string(),
|
||||
"test-task-3".to_string(),
|
||||
];
|
||||
|
||||
// Save resume state files for each task
|
||||
for task_id in &task_ids {
|
||||
let state = ResumeState::new(
|
||||
task_id.clone(),
|
||||
"erasure_set".to_string(),
|
||||
vec!["bucket1".to_string(), "bucket2".to_string()],
|
||||
);
|
||||
|
||||
let state_data = serde_json::to_vec(&state).unwrap();
|
||||
let file_path = format!("{BUCKET_META_PREFIX}/{task_id}_{RESUME_STATE_FILE}");
|
||||
|
||||
disk.write_all(RUSTFS_META_BUCKET, &file_path, state_data.into())
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
// Also create some non-resume state files to test filtering
|
||||
let non_resume_files = vec![
|
||||
"other_file.txt",
|
||||
"task4_ahm_checkpoint.json",
|
||||
"task5_ahm_progress.json",
|
||||
"_ahm_resume_state.json", // Invalid: empty task ID
|
||||
];
|
||||
|
||||
for file_name in non_resume_files {
|
||||
let file_path = format!("{BUCKET_META_PREFIX}/{file_name}");
|
||||
disk.write_all(RUSTFS_META_BUCKET, &file_path, b"test data".to_vec().into())
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
// Now call get_resumable_tasks to see if it finds the correct files
|
||||
let found_task_ids = ResumeUtils::get_resumable_tasks(&disk).await.unwrap();
|
||||
|
||||
// Verify that only the valid resume state files are found
|
||||
assert_eq!(found_task_ids.len(), 3);
|
||||
for task_id in &task_ids {
|
||||
assert!(found_task_ids.contains(task_id), "Task ID {task_id} not found");
|
||||
}
|
||||
|
||||
// Verify that invalid files are not included
|
||||
assert!(!found_task_ids.contains(&"".to_string()));
|
||||
assert!(!found_task_ids.contains(&"task4".to_string()));
|
||||
assert!(!found_task_ids.contains(&"task5".to_string()));
|
||||
|
||||
// Clean up
|
||||
temp_dir.close().unwrap();
|
||||
}
|
||||
}
|
||||
544
crates/ahm/src/heal/storage.rs
Normal file
@@ -0,0 +1,544 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
use async_trait::async_trait;
|
||||
use rustfs_common::heal_channel::{HealOpts, HealScanMode};
|
||||
use rustfs_ecstore::{
|
||||
disk::{DiskStore, endpoint::Endpoint},
|
||||
store::ECStore,
|
||||
store_api::{BucketInfo, ObjectIO, StorageAPI},
|
||||
};
|
||||
use rustfs_madmin::heal_commands::HealResultItem;
|
||||
use std::sync::Arc;
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
/// Disk status for heal operations
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum DiskStatus {
|
||||
/// Ok
|
||||
Ok,
|
||||
/// Offline
|
||||
Offline,
|
||||
/// Corrupt
|
||||
Corrupt,
|
||||
/// Missing
|
||||
Missing,
|
||||
/// Permission denied
|
||||
PermissionDenied,
|
||||
/// Faulty
|
||||
Faulty,
|
||||
/// Root mount
|
||||
RootMount,
|
||||
/// Unknown
|
||||
Unknown,
|
||||
/// Unformatted
|
||||
Unformatted,
|
||||
}
|
||||
|
||||
/// Heal storage layer interface
|
||||
#[async_trait]
|
||||
pub trait HealStorageAPI: Send + Sync {
|
||||
/// Get object meta
|
||||
async fn get_object_meta(&self, bucket: &str, object: &str) -> Result<Option<rustfs_ecstore::store_api::ObjectInfo>>;
|
||||
|
||||
/// Get object data
|
||||
async fn get_object_data(&self, bucket: &str, object: &str) -> Result<Option<Vec<u8>>>;
|
||||
|
||||
/// Put object data
|
||||
async fn put_object_data(&self, bucket: &str, object: &str, data: &[u8]) -> Result<()>;
|
||||
|
||||
/// Delete object
|
||||
async fn delete_object(&self, bucket: &str, object: &str) -> Result<()>;
|
||||
|
||||
/// Check object integrity
|
||||
async fn verify_object_integrity(&self, bucket: &str, object: &str) -> Result<bool>;
|
||||
|
||||
/// EC decode rebuild
|
||||
async fn ec_decode_rebuild(&self, bucket: &str, object: &str) -> Result<Vec<u8>>;
|
||||
|
||||
/// Get disk status
|
||||
async fn get_disk_status(&self, endpoint: &Endpoint) -> Result<DiskStatus>;
|
||||
|
||||
/// Format disk
|
||||
async fn format_disk(&self, endpoint: &Endpoint) -> Result<()>;
|
||||
|
||||
/// Get bucket info
|
||||
async fn get_bucket_info(&self, bucket: &str) -> Result<Option<BucketInfo>>;
|
||||
|
||||
/// Fix bucket metadata
|
||||
async fn heal_bucket_metadata(&self, bucket: &str) -> Result<()>;
|
||||
|
||||
/// Get all buckets
|
||||
async fn list_buckets(&self) -> Result<Vec<BucketInfo>>;
|
||||
|
||||
/// Check object exists
|
||||
async fn object_exists(&self, bucket: &str, object: &str) -> Result<bool>;
|
||||
|
||||
/// Get object size
|
||||
async fn get_object_size(&self, bucket: &str, object: &str) -> Result<Option<u64>>;
|
||||
|
||||
/// Get object checksum
|
||||
async fn get_object_checksum(&self, bucket: &str, object: &str) -> Result<Option<String>>;
|
||||
|
||||
/// Heal object using ecstore
|
||||
async fn heal_object(
|
||||
&self,
|
||||
bucket: &str,
|
||||
object: &str,
|
||||
version_id: Option<&str>,
|
||||
opts: &HealOpts,
|
||||
) -> Result<(HealResultItem, Option<Error>)>;
|
||||
|
||||
/// Heal bucket using ecstore
|
||||
async fn heal_bucket(&self, bucket: &str, opts: &HealOpts) -> Result<HealResultItem>;
|
||||
|
||||
/// Heal format using ecstore
|
||||
async fn heal_format(&self, dry_run: bool) -> Result<(HealResultItem, Option<Error>)>;
|
||||
|
||||
/// List objects for healing
|
||||
async fn list_objects_for_heal(&self, bucket: &str, prefix: &str) -> Result<Vec<String>>;
|
||||
|
||||
/// Get disk for resume functionality
|
||||
async fn get_disk_for_resume(&self, set_disk_id: &str) -> Result<DiskStore>;
|
||||
}
|
||||
|
||||
/// ECStore Heal storage layer implementation
|
||||
pub struct ECStoreHealStorage {
|
||||
ecstore: Arc<ECStore>,
|
||||
}
|
||||
|
||||
impl ECStoreHealStorage {
|
||||
pub fn new(ecstore: Arc<ECStore>) -> Self {
|
||||
Self { ecstore }
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl HealStorageAPI for ECStoreHealStorage {
|
||||
async fn get_object_meta(&self, bucket: &str, object: &str) -> Result<Option<rustfs_ecstore::store_api::ObjectInfo>> {
|
||||
debug!("Getting object meta: {}/{}", bucket, object);
|
||||
|
||||
match self.ecstore.get_object_info(bucket, object, &Default::default()).await {
|
||||
Ok(info) => Ok(Some(info)),
|
||||
Err(e) => {
|
||||
// Map ObjectNotFound to None to align with Option return type
|
||||
if matches!(e, rustfs_ecstore::error::StorageError::ObjectNotFound(_, _)) {
|
||||
debug!("Object meta not found: {}/{}", bucket, object);
|
||||
Ok(None)
|
||||
} else {
|
||||
error!("Failed to get object meta: {}/{} - {}", bucket, object, e);
|
||||
Err(Error::other(e))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_object_data(&self, bucket: &str, object: &str) -> Result<Option<Vec<u8>>> {
|
||||
debug!("Getting object data: {}/{}", bucket, object);
|
||||
|
||||
let reader = match (*self.ecstore)
|
||||
.get_object_reader(bucket, object, None, Default::default(), &Default::default())
|
||||
.await
|
||||
{
|
||||
Ok(reader) => reader,
|
||||
Err(e) => {
|
||||
error!("Failed to get object: {}/{} - {}", bucket, object, e);
|
||||
return Err(Error::other(e));
|
||||
}
|
||||
};
|
||||
|
||||
// WARNING: Returning Vec<u8> for large objects is dangerous. To avoid OOM, cap the read size.
|
||||
// If needed, refactor callers to stream instead of buffering entire object.
|
||||
const MAX_READ_BYTES: usize = 16 * 1024 * 1024; // 16 MiB cap
|
||||
let mut buf = Vec::with_capacity(1024 * 1024);
|
||||
use tokio::io::AsyncReadExt as _;
|
||||
let mut n_read: usize = 0;
|
||||
let mut stream = reader.stream;
|
||||
loop {
|
||||
// Read in chunks
|
||||
let mut chunk = vec![0u8; 1024 * 1024];
|
||||
match stream.read(&mut chunk).await {
|
||||
Ok(0) => break,
|
||||
Ok(n) => {
|
||||
buf.extend_from_slice(&chunk[..n]);
|
||||
n_read += n;
|
||||
if n_read > MAX_READ_BYTES {
|
||||
warn!(
|
||||
"Object data exceeds cap ({} bytes), aborting full read to prevent OOM: {}/{}",
|
||||
MAX_READ_BYTES, bucket, object
|
||||
);
|
||||
return Ok(None);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to read object data: {}/{} - {}", bucket, object, e);
|
||||
return Err(Error::other(e));
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(Some(buf))
|
||||
}
|
||||
|
||||
async fn put_object_data(&self, bucket: &str, object: &str, data: &[u8]) -> Result<()> {
|
||||
debug!("Putting object data: {}/{} ({} bytes)", bucket, object, data.len());
|
||||
|
||||
let mut reader = rustfs_ecstore::store_api::PutObjReader::from_vec(data.to_vec());
|
||||
match (*self.ecstore)
|
||||
.put_object(bucket, object, &mut reader, &Default::default())
|
||||
.await
|
||||
{
|
||||
Ok(_) => {
|
||||
info!("Successfully put object: {}/{}", bucket, object);
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to put object: {}/{} - {}", bucket, object, e);
|
||||
Err(Error::other(e))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn delete_object(&self, bucket: &str, object: &str) -> Result<()> {
|
||||
debug!("Deleting object: {}/{}", bucket, object);
|
||||
|
||||
match self.ecstore.delete_object(bucket, object, Default::default()).await {
|
||||
Ok(_) => {
|
||||
info!("Successfully deleted object: {}/{}", bucket, object);
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to delete object: {}/{} - {}", bucket, object, e);
|
||||
Err(Error::other(e))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn verify_object_integrity(&self, bucket: &str, object: &str) -> Result<bool> {
|
||||
debug!("Verifying object integrity: {}/{}", bucket, object);
|
||||
|
||||
// Check object metadata first
|
||||
match self.get_object_meta(bucket, object).await? {
|
||||
Some(obj_info) => {
|
||||
if obj_info.size < 0 {
|
||||
warn!("Object has invalid size: {}/{}", bucket, object);
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// Stream-read the object to a sink to avoid loading into memory
|
||||
match (*self.ecstore)
|
||||
.get_object_reader(bucket, object, None, Default::default(), &Default::default())
|
||||
.await
|
||||
{
|
||||
Ok(reader) => {
|
||||
let mut stream = reader.stream;
|
||||
match tokio::io::copy(&mut stream, &mut tokio::io::sink()).await {
|
||||
Ok(_) => {
|
||||
info!("Object integrity check passed: {}/{}", bucket, object);
|
||||
Ok(true)
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Object stream read failed: {}/{} - {}", bucket, object, e);
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Failed to get object reader: {}/{} - {}", bucket, object, e);
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
}
|
||||
None => {
|
||||
warn!("Object metadata not found: {}/{}", bucket, object);
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn ec_decode_rebuild(&self, bucket: &str, object: &str) -> Result<Vec<u8>> {
|
||||
debug!("EC decode rebuild: {}/{}", bucket, object);
|
||||
|
||||
// Use ecstore's heal_object to rebuild the object
|
||||
let heal_opts = HealOpts {
|
||||
recursive: false,
|
||||
dry_run: false,
|
||||
remove: false,
|
||||
recreate: true,
|
||||
scan_mode: HealScanMode::Deep,
|
||||
update_parity: true,
|
||||
no_lock: false,
|
||||
pool: None,
|
||||
set: None,
|
||||
};
|
||||
|
||||
match self.heal_object(bucket, object, None, &heal_opts).await {
|
||||
Ok((_result, error)) => {
|
||||
if error.is_some() {
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Heal failed: {error:?}"),
|
||||
});
|
||||
}
|
||||
|
||||
// After healing, try to read the object data
|
||||
match self.get_object_data(bucket, object).await? {
|
||||
Some(data) => {
|
||||
info!("EC decode rebuild successful: {}/{} ({} bytes)", bucket, object, data.len());
|
||||
Ok(data)
|
||||
}
|
||||
None => {
|
||||
error!("Object not found after heal: {}/{}", bucket, object);
|
||||
Err(Error::TaskExecutionFailed {
|
||||
message: format!("Object not found after heal: {bucket}/{object}"),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Heal operation failed: {}/{} - {}", bucket, object, e);
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_disk_status(&self, endpoint: &Endpoint) -> Result<DiskStatus> {
|
||||
debug!("Getting disk status: {:?}", endpoint);
|
||||
|
||||
// TODO: implement disk status check using ecstore
|
||||
// For now, return Ok status
|
||||
info!("Disk status check: {:?} - OK", endpoint);
|
||||
Ok(DiskStatus::Ok)
|
||||
}
|
||||
|
||||
async fn format_disk(&self, endpoint: &Endpoint) -> Result<()> {
|
||||
debug!("Formatting disk: {:?}", endpoint);
|
||||
|
||||
// Use ecstore's heal_format
|
||||
match self.heal_format(false).await {
|
||||
Ok((_, error)) => {
|
||||
if error.is_some() {
|
||||
return Err(Error::other(format!("Format failed: {error:?}")));
|
||||
}
|
||||
info!("Successfully formatted disk: {:?}", endpoint);
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to format disk: {:?} - {}", endpoint, e);
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_bucket_info(&self, bucket: &str) -> Result<Option<BucketInfo>> {
|
||||
debug!("Getting bucket info: {}", bucket);
|
||||
|
||||
match self.ecstore.get_bucket_info(bucket, &Default::default()).await {
|
||||
Ok(info) => Ok(Some(info)),
|
||||
Err(e) => {
|
||||
error!("Failed to get bucket info: {} - {}", bucket, e);
|
||||
Err(Error::other(e))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn heal_bucket_metadata(&self, bucket: &str) -> Result<()> {
|
||||
debug!("Healing bucket metadata: {}", bucket);
|
||||
|
||||
let heal_opts = HealOpts {
|
||||
recursive: true,
|
||||
dry_run: false,
|
||||
remove: false,
|
||||
recreate: false,
|
||||
scan_mode: HealScanMode::Normal,
|
||||
update_parity: false,
|
||||
no_lock: false,
|
||||
pool: None,
|
||||
set: None,
|
||||
};
|
||||
|
||||
match self.heal_bucket(bucket, &heal_opts).await {
|
||||
Ok(_) => {
|
||||
info!("Successfully healed bucket metadata: {}", bucket);
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to heal bucket metadata: {} - {}", bucket, e);
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn list_buckets(&self) -> Result<Vec<BucketInfo>> {
|
||||
debug!("Listing buckets");
|
||||
|
||||
match self.ecstore.list_bucket(&Default::default()).await {
|
||||
Ok(buckets) => Ok(buckets),
|
||||
Err(e) => {
|
||||
error!("Failed to list buckets: {}", e);
|
||||
Err(Error::other(e))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn object_exists(&self, bucket: &str, object: &str) -> Result<bool> {
|
||||
debug!("Checking object exists: {}/{}", bucket, object);
|
||||
|
||||
match self.get_object_meta(bucket, object).await {
|
||||
Ok(Some(_)) => Ok(true),
|
||||
Ok(None) => Ok(false),
|
||||
Err(_) => Ok(false),
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_object_size(&self, bucket: &str, object: &str) -> Result<Option<u64>> {
|
||||
debug!("Getting object size: {}/{}", bucket, object);
|
||||
|
||||
match self.get_object_meta(bucket, object).await {
|
||||
Ok(Some(obj_info)) => Ok(Some(obj_info.size as u64)),
|
||||
Ok(None) => Ok(None),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_object_checksum(&self, bucket: &str, object: &str) -> Result<Option<String>> {
|
||||
debug!("Getting object checksum: {}/{}", bucket, object);
|
||||
|
||||
match self.get_object_meta(bucket, object).await {
|
||||
Ok(Some(obj_info)) => {
|
||||
// Convert checksum bytes to hex string
|
||||
let checksum = obj_info.checksum.iter().map(|b| format!("{b:02x}")).collect::<String>();
|
||||
Ok(Some(checksum))
|
||||
}
|
||||
Ok(None) => Ok(None),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
|
||||
async fn heal_object(
|
||||
&self,
|
||||
bucket: &str,
|
||||
object: &str,
|
||||
version_id: Option<&str>,
|
||||
opts: &HealOpts,
|
||||
) -> Result<(HealResultItem, Option<Error>)> {
|
||||
debug!("Healing object: {}/{}", bucket, object);
|
||||
|
||||
let version_id_str = version_id.unwrap_or("");
|
||||
|
||||
match self.ecstore.heal_object(bucket, object, version_id_str, opts).await {
|
||||
Ok((result, ecstore_error)) => {
|
||||
let error = ecstore_error.map(Error::other);
|
||||
info!("Heal object completed: {}/{} - result: {:?}, error: {:?}", bucket, object, result, error);
|
||||
Ok((result, error))
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Heal object failed: {}/{} - {}", bucket, object, e);
|
||||
Err(Error::other(e))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn heal_bucket(&self, bucket: &str, opts: &HealOpts) -> Result<HealResultItem> {
|
||||
debug!("Healing bucket: {}", bucket);
|
||||
|
||||
match self.ecstore.heal_bucket(bucket, opts).await {
|
||||
Ok(result) => {
|
||||
info!("Heal bucket completed: {} - result: {:?}", bucket, result);
|
||||
Ok(result)
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Heal bucket failed: {} - {}", bucket, e);
|
||||
Err(Error::other(e))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn heal_format(&self, dry_run: bool) -> Result<(HealResultItem, Option<Error>)> {
|
||||
debug!("Healing format (dry_run: {})", dry_run);
|
||||
|
||||
match self.ecstore.heal_format(dry_run).await {
|
||||
Ok((result, ecstore_error)) => {
|
||||
let error = ecstore_error.map(Error::other);
|
||||
info!("Heal format completed - result: {:?}, error: {:?}", result, error);
|
||||
Ok((result, error))
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Heal format failed: {}", e);
|
||||
Err(Error::other(e))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn list_objects_for_heal(&self, bucket: &str, prefix: &str) -> Result<Vec<String>> {
|
||||
debug!("Listing objects for heal: {}/{}", bucket, prefix);
|
||||
|
||||
// Use list_objects_v2 to get objects
|
||||
match self
|
||||
.ecstore
|
||||
.clone()
|
||||
.list_objects_v2(bucket, prefix, None, None, 1000, false, None)
|
||||
.await
|
||||
{
|
||||
Ok(list_info) => {
|
||||
let objects: Vec<String> = list_info.objects.into_iter().map(|obj| obj.name).collect();
|
||||
info!("Found {} objects for heal in {}/{}", objects.len(), bucket, prefix);
|
||||
Ok(objects)
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to list objects for heal: {}/{} - {}", bucket, prefix, e);
|
||||
Err(Error::other(e))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_disk_for_resume(&self, set_disk_id: &str) -> Result<DiskStore> {
|
||||
debug!("Getting disk for resume: {}", set_disk_id);
|
||||
|
||||
// Parse set_disk_id to extract pool and set indices
|
||||
// Format: "pool_{pool_idx}_set_{set_idx}"
|
||||
let parts: Vec<&str> = set_disk_id.split('_').collect();
|
||||
if parts.len() != 4 || parts[0] != "pool" || parts[2] != "set" {
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Invalid set_disk_id format: {set_disk_id}"),
|
||||
});
|
||||
}
|
||||
|
||||
let pool_idx: usize = parts[1].parse().map_err(|_| Error::TaskExecutionFailed {
|
||||
message: format!("Invalid pool index in set_disk_id: {set_disk_id}"),
|
||||
})?;
|
||||
|
||||
let set_idx: usize = parts[3].parse().map_err(|_| Error::TaskExecutionFailed {
|
||||
message: format!("Invalid set index in set_disk_id: {set_disk_id}"),
|
||||
})?;
|
||||
|
||||
// Get the first available disk from the set
|
||||
let disks = self
|
||||
.ecstore
|
||||
.get_disks(pool_idx, set_idx)
|
||||
.await
|
||||
.map_err(|e| Error::TaskExecutionFailed {
|
||||
message: format!("Failed to get disks for pool {pool_idx} set {set_idx}: {e}"),
|
||||
})?;
|
||||
|
||||
// Find the first available disk
|
||||
if let Some(disk_store) = disks.into_iter().flatten().next() {
|
||||
info!("Found disk for resume: {:?}", disk_store);
|
||||
return Ok(disk_store);
|
||||
}
|
||||
|
||||
Err(Error::TaskExecutionFailed {
|
||||
message: format!("No available disk found for set_disk_id: {set_disk_id}"),
|
||||
})
|
||||
}
|
||||
}
|
||||
855
crates/ahm/src/heal/task.rs
Normal file
@@ -0,0 +1,855 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
use crate::heal::ErasureSetHealer;
|
||||
use crate::heal::{progress::HealProgress, storage::HealStorageAPI};
|
||||
use rustfs_common::heal_channel::{HealOpts, HealScanMode};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, SystemTime};
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{error, info, warn};
|
||||
use uuid::Uuid;
|
||||
|
||||
/// Heal type
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum HealType {
|
||||
/// Object heal
|
||||
Object {
|
||||
bucket: String,
|
||||
object: String,
|
||||
version_id: Option<String>,
|
||||
},
|
||||
/// Bucket heal
|
||||
Bucket { bucket: String },
|
||||
/// Erasure Set heal (includes disk format repair)
|
||||
ErasureSet { buckets: Vec<String>, set_disk_id: String },
|
||||
/// Metadata heal
|
||||
Metadata { bucket: String, object: String },
|
||||
/// MRF heal
|
||||
MRF { meta_path: String },
|
||||
/// EC decode heal
|
||||
ECDecode {
|
||||
bucket: String,
|
||||
object: String,
|
||||
version_id: Option<String>,
|
||||
},
|
||||
}
|
||||
|
||||
/// Heal priority
|
||||
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
|
||||
pub enum HealPriority {
|
||||
/// Low priority
|
||||
Low = 0,
|
||||
/// Normal priority
|
||||
Normal = 1,
|
||||
/// High priority
|
||||
High = 2,
|
||||
/// Urgent priority
|
||||
Urgent = 3,
|
||||
}
|
||||
|
||||
impl Default for HealPriority {
|
||||
fn default() -> Self {
|
||||
Self::Normal
|
||||
}
|
||||
}
|
||||
|
||||
/// Heal options
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct HealOptions {
|
||||
/// Scan mode
|
||||
pub scan_mode: HealScanMode,
|
||||
/// Whether to remove corrupted data
|
||||
pub remove_corrupted: bool,
|
||||
/// Whether to recreate
|
||||
pub recreate_missing: bool,
|
||||
/// Whether to update parity
|
||||
pub update_parity: bool,
|
||||
/// Whether to recursively process
|
||||
pub recursive: bool,
|
||||
/// Whether to dry run
|
||||
pub dry_run: bool,
|
||||
/// Timeout
|
||||
pub timeout: Option<Duration>,
|
||||
/// pool index
|
||||
pub pool_index: Option<usize>,
|
||||
/// set index
|
||||
pub set_index: Option<usize>,
|
||||
}
|
||||
|
||||
impl Default for HealOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
scan_mode: HealScanMode::Normal,
|
||||
remove_corrupted: false,
|
||||
recreate_missing: true,
|
||||
update_parity: true,
|
||||
recursive: false,
|
||||
dry_run: false,
|
||||
timeout: Some(Duration::from_secs(300)), // 5 minutes default timeout
|
||||
pool_index: None,
|
||||
set_index: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Heal task status
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub enum HealTaskStatus {
|
||||
/// Pending
|
||||
Pending,
|
||||
/// Running
|
||||
Running,
|
||||
/// Completed
|
||||
Completed,
|
||||
/// Failed
|
||||
Failed { error: String },
|
||||
/// Cancelled
|
||||
Cancelled,
|
||||
/// Timeout
|
||||
Timeout,
|
||||
}
|
||||
|
||||
/// Heal request
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct HealRequest {
|
||||
/// Request ID
|
||||
pub id: String,
|
||||
/// Heal type
|
||||
pub heal_type: HealType,
|
||||
/// Heal options
|
||||
pub options: HealOptions,
|
||||
/// Priority
|
||||
pub priority: HealPriority,
|
||||
/// Created time
|
||||
pub created_at: SystemTime,
|
||||
}
|
||||
|
||||
impl HealRequest {
|
||||
pub fn new(heal_type: HealType, options: HealOptions, priority: HealPriority) -> Self {
|
||||
Self {
|
||||
id: Uuid::new_v4().to_string(),
|
||||
heal_type,
|
||||
options,
|
||||
priority,
|
||||
created_at: SystemTime::now(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn object(bucket: String, object: String, version_id: Option<String>) -> Self {
|
||||
Self::new(
|
||||
HealType::Object {
|
||||
bucket,
|
||||
object,
|
||||
version_id,
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::Normal,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn bucket(bucket: String) -> Self {
|
||||
Self::new(HealType::Bucket { bucket }, HealOptions::default(), HealPriority::Normal)
|
||||
}
|
||||
|
||||
pub fn metadata(bucket: String, object: String) -> Self {
|
||||
Self::new(HealType::Metadata { bucket, object }, HealOptions::default(), HealPriority::High)
|
||||
}
|
||||
|
||||
pub fn ec_decode(bucket: String, object: String, version_id: Option<String>) -> Self {
|
||||
Self::new(
|
||||
HealType::ECDecode {
|
||||
bucket,
|
||||
object,
|
||||
version_id,
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::Urgent,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Heal task
|
||||
pub struct HealTask {
|
||||
/// Task ID
|
||||
pub id: String,
|
||||
/// Heal type
|
||||
pub heal_type: HealType,
|
||||
/// Heal options
|
||||
pub options: HealOptions,
|
||||
/// Task status
|
||||
pub status: Arc<RwLock<HealTaskStatus>>,
|
||||
/// Progress tracking
|
||||
pub progress: Arc<RwLock<HealProgress>>,
|
||||
/// Created time
|
||||
pub created_at: SystemTime,
|
||||
/// Started time
|
||||
pub started_at: Arc<RwLock<Option<SystemTime>>>,
|
||||
/// Completed time
|
||||
pub completed_at: Arc<RwLock<Option<SystemTime>>>,
|
||||
/// Cancel token
|
||||
pub cancel_token: tokio_util::sync::CancellationToken,
|
||||
/// Storage layer interface
|
||||
pub storage: Arc<dyn HealStorageAPI>,
|
||||
}
|
||||
|
||||
impl HealTask {
|
||||
pub fn from_request(request: HealRequest, storage: Arc<dyn HealStorageAPI>) -> Self {
|
||||
Self {
|
||||
id: request.id,
|
||||
heal_type: request.heal_type,
|
||||
options: request.options,
|
||||
status: Arc::new(RwLock::new(HealTaskStatus::Pending)),
|
||||
progress: Arc::new(RwLock::new(HealProgress::new())),
|
||||
created_at: request.created_at,
|
||||
started_at: Arc::new(RwLock::new(None)),
|
||||
completed_at: Arc::new(RwLock::new(None)),
|
||||
cancel_token: tokio_util::sync::CancellationToken::new(),
|
||||
storage,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn execute(&self) -> Result<()> {
|
||||
// update status to running
|
||||
{
|
||||
let mut status = self.status.write().await;
|
||||
*status = HealTaskStatus::Running;
|
||||
}
|
||||
{
|
||||
let mut started_at = self.started_at.write().await;
|
||||
*started_at = Some(SystemTime::now());
|
||||
}
|
||||
|
||||
info!("Starting heal task: {} with type: {:?}", self.id, self.heal_type);
|
||||
|
||||
let result = match &self.heal_type {
|
||||
HealType::Object {
|
||||
bucket,
|
||||
object,
|
||||
version_id,
|
||||
} => self.heal_object(bucket, object, version_id.as_deref()).await,
|
||||
HealType::Bucket { bucket } => self.heal_bucket(bucket).await,
|
||||
|
||||
HealType::Metadata { bucket, object } => self.heal_metadata(bucket, object).await,
|
||||
HealType::MRF { meta_path } => self.heal_mrf(meta_path).await,
|
||||
HealType::ECDecode {
|
||||
bucket,
|
||||
object,
|
||||
version_id,
|
||||
} => self.heal_ec_decode(bucket, object, version_id.as_deref()).await,
|
||||
HealType::ErasureSet { buckets, set_disk_id } => self.heal_erasure_set(buckets.clone(), set_disk_id.clone()).await,
|
||||
};
|
||||
|
||||
// update completed time and status
|
||||
{
|
||||
let mut completed_at = self.completed_at.write().await;
|
||||
*completed_at = Some(SystemTime::now());
|
||||
}
|
||||
|
||||
match &result {
|
||||
Ok(_) => {
|
||||
let mut status = self.status.write().await;
|
||||
*status = HealTaskStatus::Completed;
|
||||
info!("Heal task completed successfully: {}", self.id);
|
||||
}
|
||||
Err(e) => {
|
||||
let mut status = self.status.write().await;
|
||||
*status = HealTaskStatus::Failed { error: e.to_string() };
|
||||
error!("Heal task failed: {} with error: {}", self.id, e);
|
||||
}
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
pub async fn cancel(&self) -> Result<()> {
|
||||
self.cancel_token.cancel();
|
||||
let mut status = self.status.write().await;
|
||||
*status = HealTaskStatus::Cancelled;
|
||||
info!("Heal task cancelled: {}", self.id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn get_status(&self) -> HealTaskStatus {
|
||||
self.status.read().await.clone()
|
||||
}
|
||||
|
||||
pub async fn get_progress(&self) -> HealProgress {
|
||||
self.progress.read().await.clone()
|
||||
}
|
||||
|
||||
// specific heal implementation method
|
||||
async fn heal_object(&self, bucket: &str, object: &str, version_id: Option<&str>) -> Result<()> {
|
||||
info!("Healing object: {}/{}", bucket, object);
|
||||
|
||||
// update progress
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.set_current_object(Some(format!("{bucket}/{object}")));
|
||||
progress.update_progress(0, 4, 0, 0); // 开始heal,总共4个步骤
|
||||
}
|
||||
|
||||
// Step 1: Check if object exists and get metadata
|
||||
info!("Step 1: Checking object existence and metadata");
|
||||
let object_exists = self.storage.object_exists(bucket, object).await?;
|
||||
if !object_exists {
|
||||
warn!("Object does not exist: {}/{}", bucket, object);
|
||||
if self.options.recreate_missing {
|
||||
info!("Attempting to recreate missing object: {}/{}", bucket, object);
|
||||
return self.recreate_missing_object(bucket, object, version_id).await;
|
||||
} else {
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Object not found: {bucket}/{object}"),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(1, 3, 0, 0);
|
||||
}
|
||||
|
||||
// Step 2: directly call ecstore to perform heal
|
||||
info!("Step 2: Performing heal using ecstore");
|
||||
let heal_opts = HealOpts {
|
||||
recursive: self.options.recursive,
|
||||
dry_run: self.options.dry_run,
|
||||
remove: self.options.remove_corrupted,
|
||||
recreate: self.options.recreate_missing,
|
||||
scan_mode: self.options.scan_mode,
|
||||
update_parity: self.options.update_parity,
|
||||
no_lock: false,
|
||||
pool: self.options.pool_index,
|
||||
set: self.options.set_index,
|
||||
};
|
||||
|
||||
match self.storage.heal_object(bucket, object, version_id, &heal_opts).await {
|
||||
Ok((result, error)) => {
|
||||
if let Some(e) = error {
|
||||
error!("Heal operation failed: {}/{} - {}", bucket, object, e);
|
||||
|
||||
// If heal failed and remove_corrupted is enabled, delete the corrupted object
|
||||
if self.options.remove_corrupted {
|
||||
warn!("Removing corrupted object: {}/{}", bucket, object);
|
||||
if !self.options.dry_run {
|
||||
self.storage.delete_object(bucket, object).await?;
|
||||
info!("Successfully deleted corrupted object: {}/{}", bucket, object);
|
||||
} else {
|
||||
info!("Dry run mode - would delete corrupted object: {}/{}", bucket, object);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(3, 3, 0, 0);
|
||||
}
|
||||
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to heal object {bucket}/{object}: {e}"),
|
||||
});
|
||||
}
|
||||
|
||||
// Step 3: Verify heal result
|
||||
info!("Step 3: Verifying heal result");
|
||||
let object_size = result.object_size as u64;
|
||||
info!(
|
||||
"Heal completed successfully: {}/{} ({} bytes, {} drives healed)",
|
||||
bucket,
|
||||
object,
|
||||
object_size,
|
||||
result.after.drives.len()
|
||||
);
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(3, 3, object_size, object_size);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Heal operation failed: {}/{} - {}", bucket, object, e);
|
||||
|
||||
// If heal failed and remove_corrupted is enabled, delete the corrupted object
|
||||
if self.options.remove_corrupted {
|
||||
warn!("Removing corrupted object: {}/{}", bucket, object);
|
||||
if !self.options.dry_run {
|
||||
self.storage.delete_object(bucket, object).await?;
|
||||
info!("Successfully deleted corrupted object: {}/{}", bucket, object);
|
||||
} else {
|
||||
info!("Dry run mode - would delete corrupted object: {}/{}", bucket, object);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(3, 3, 0, 0);
|
||||
}
|
||||
|
||||
Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to heal object {bucket}/{object}: {e}"),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Recreate missing object (for EC decode scenarios)
|
||||
async fn recreate_missing_object(&self, bucket: &str, object: &str, version_id: Option<&str>) -> Result<()> {
|
||||
info!("Attempting to recreate missing object: {}/{}", bucket, object);
|
||||
|
||||
// Use ecstore's heal_object with recreate option
|
||||
let heal_opts = HealOpts {
|
||||
recursive: false,
|
||||
dry_run: self.options.dry_run,
|
||||
remove: false,
|
||||
recreate: true,
|
||||
scan_mode: HealScanMode::Deep,
|
||||
update_parity: true,
|
||||
no_lock: false,
|
||||
pool: None,
|
||||
set: None,
|
||||
};
|
||||
|
||||
match self.storage.heal_object(bucket, object, version_id, &heal_opts).await {
|
||||
Ok((result, error)) => {
|
||||
if let Some(e) = error {
|
||||
error!("Failed to recreate missing object: {}/{} - {}", bucket, object, e);
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to recreate missing object {bucket}/{object}: {e}"),
|
||||
});
|
||||
}
|
||||
|
||||
let object_size = result.object_size as u64;
|
||||
info!("Successfully recreated missing object: {}/{} ({} bytes)", bucket, object, object_size);
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(4, 4, object_size, object_size);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to recreate missing object: {}/{} - {}", bucket, object, e);
|
||||
Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to recreate missing object {bucket}/{object}: {e}"),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn heal_bucket(&self, bucket: &str) -> Result<()> {
|
||||
info!("Healing bucket: {}", bucket);
|
||||
|
||||
// update progress
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.set_current_object(Some(format!("bucket: {bucket}")));
|
||||
progress.update_progress(0, 3, 0, 0);
|
||||
}
|
||||
|
||||
// Step 1: Check if bucket exists
|
||||
info!("Step 1: Checking bucket existence");
|
||||
let bucket_exists = self.storage.get_bucket_info(bucket).await?.is_some();
|
||||
if !bucket_exists {
|
||||
warn!("Bucket does not exist: {}", bucket);
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Bucket not found: {bucket}"),
|
||||
});
|
||||
}
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(1, 3, 0, 0);
|
||||
}
|
||||
|
||||
// Step 2: Perform bucket heal using ecstore
|
||||
info!("Step 2: Performing bucket heal using ecstore");
|
||||
let heal_opts = HealOpts {
|
||||
recursive: self.options.recursive,
|
||||
dry_run: self.options.dry_run,
|
||||
remove: self.options.remove_corrupted,
|
||||
recreate: self.options.recreate_missing,
|
||||
scan_mode: self.options.scan_mode,
|
||||
update_parity: self.options.update_parity,
|
||||
no_lock: false,
|
||||
pool: self.options.pool_index,
|
||||
set: self.options.set_index,
|
||||
};
|
||||
|
||||
match self.storage.heal_bucket(bucket, &heal_opts).await {
|
||||
Ok(result) => {
|
||||
info!("Bucket heal completed successfully: {} ({} drives)", bucket, result.after.drives.len());
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(3, 3, 0, 0);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Bucket heal failed: {} - {}", bucket, e);
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(3, 3, 0, 0);
|
||||
}
|
||||
Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to heal bucket {bucket}: {e}"),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn heal_metadata(&self, bucket: &str, object: &str) -> Result<()> {
|
||||
info!("Healing metadata: {}/{}", bucket, object);
|
||||
|
||||
// update progress
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.set_current_object(Some(format!("metadata: {bucket}/{object}")));
|
||||
progress.update_progress(0, 3, 0, 0);
|
||||
}
|
||||
|
||||
// Step 1: Check if object exists
|
||||
info!("Step 1: Checking object existence");
|
||||
let object_exists = self.storage.object_exists(bucket, object).await?;
|
||||
if !object_exists {
|
||||
warn!("Object does not exist: {}/{}", bucket, object);
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Object not found: {bucket}/{object}"),
|
||||
});
|
||||
}
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(1, 3, 0, 0);
|
||||
}
|
||||
|
||||
// Step 2: Perform metadata heal using ecstore
|
||||
info!("Step 2: Performing metadata heal using ecstore");
|
||||
let heal_opts = HealOpts {
|
||||
recursive: false,
|
||||
dry_run: self.options.dry_run,
|
||||
remove: false,
|
||||
recreate: false,
|
||||
scan_mode: HealScanMode::Deep,
|
||||
update_parity: false,
|
||||
no_lock: false,
|
||||
pool: self.options.pool_index,
|
||||
set: self.options.set_index,
|
||||
};
|
||||
|
||||
match self.storage.heal_object(bucket, object, None, &heal_opts).await {
|
||||
Ok((result, error)) => {
|
||||
if let Some(e) = error {
|
||||
error!("Metadata heal failed: {}/{} - {}", bucket, object, e);
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(3, 3, 0, 0);
|
||||
}
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to heal metadata {bucket}/{object}: {e}"),
|
||||
});
|
||||
}
|
||||
|
||||
info!(
|
||||
"Metadata heal completed successfully: {}/{} ({} drives)",
|
||||
bucket,
|
||||
object,
|
||||
result.after.drives.len()
|
||||
);
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(3, 3, 0, 0);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Metadata heal failed: {}/{} - {}", bucket, object, e);
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(3, 3, 0, 0);
|
||||
}
|
||||
Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to heal metadata {bucket}/{object}: {e}"),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn heal_mrf(&self, meta_path: &str) -> Result<()> {
|
||||
info!("Healing MRF: {}", meta_path);
|
||||
|
||||
// update progress
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.set_current_object(Some(format!("mrf: {meta_path}")));
|
||||
progress.update_progress(0, 2, 0, 0);
|
||||
}
|
||||
|
||||
// Parse meta_path to extract bucket and object
|
||||
let parts: Vec<&str> = meta_path.split('/').collect();
|
||||
if parts.len() < 2 {
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Invalid meta path format: {meta_path}"),
|
||||
});
|
||||
}
|
||||
|
||||
let bucket = parts[0];
|
||||
let object = parts[1..].join("/");
|
||||
|
||||
// Step 1: Perform MRF heal using ecstore
|
||||
info!("Step 1: Performing MRF heal using ecstore");
|
||||
let heal_opts = HealOpts {
|
||||
recursive: true,
|
||||
dry_run: self.options.dry_run,
|
||||
remove: self.options.remove_corrupted,
|
||||
recreate: self.options.recreate_missing,
|
||||
scan_mode: HealScanMode::Deep,
|
||||
update_parity: true,
|
||||
no_lock: false,
|
||||
pool: None,
|
||||
set: None,
|
||||
};
|
||||
|
||||
match self.storage.heal_object(bucket, &object, None, &heal_opts).await {
|
||||
Ok((result, error)) => {
|
||||
if let Some(e) = error {
|
||||
error!("MRF heal failed: {} - {}", meta_path, e);
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(2, 2, 0, 0);
|
||||
}
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to heal MRF {meta_path}: {e}"),
|
||||
});
|
||||
}
|
||||
|
||||
info!("MRF heal completed successfully: {} ({} drives)", meta_path, result.after.drives.len());
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(2, 2, 0, 0);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
error!("MRF heal failed: {} - {}", meta_path, e);
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(2, 2, 0, 0);
|
||||
}
|
||||
Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to heal MRF {meta_path}: {e}"),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn heal_ec_decode(&self, bucket: &str, object: &str, version_id: Option<&str>) -> Result<()> {
|
||||
info!("Healing EC decode: {}/{}", bucket, object);
|
||||
|
||||
// update progress
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.set_current_object(Some(format!("ec_decode: {bucket}/{object}")));
|
||||
progress.update_progress(0, 3, 0, 0);
|
||||
}
|
||||
|
||||
// Step 1: Check if object exists
|
||||
info!("Step 1: Checking object existence");
|
||||
let object_exists = self.storage.object_exists(bucket, object).await?;
|
||||
if !object_exists {
|
||||
warn!("Object does not exist: {}/{}", bucket, object);
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Object not found: {bucket}/{object}"),
|
||||
});
|
||||
}
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(1, 3, 0, 0);
|
||||
}
|
||||
|
||||
// Step 2: Perform EC decode heal using ecstore
|
||||
info!("Step 2: Performing EC decode heal using ecstore");
|
||||
let heal_opts = HealOpts {
|
||||
recursive: false,
|
||||
dry_run: self.options.dry_run,
|
||||
remove: false,
|
||||
recreate: true,
|
||||
scan_mode: HealScanMode::Deep,
|
||||
update_parity: true,
|
||||
no_lock: false,
|
||||
pool: None,
|
||||
set: None,
|
||||
};
|
||||
|
||||
match self.storage.heal_object(bucket, object, version_id, &heal_opts).await {
|
||||
Ok((result, error)) => {
|
||||
if let Some(e) = error {
|
||||
error!("EC decode heal failed: {}/{} - {}", bucket, object, e);
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(3, 3, 0, 0);
|
||||
}
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to heal EC decode {bucket}/{object}: {e}"),
|
||||
});
|
||||
}
|
||||
|
||||
let object_size = result.object_size as u64;
|
||||
info!(
|
||||
"EC decode heal completed successfully: {}/{} ({} bytes, {} drives)",
|
||||
bucket,
|
||||
object,
|
||||
object_size,
|
||||
result.after.drives.len()
|
||||
);
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(3, 3, object_size, object_size);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
error!("EC decode heal failed: {}/{} - {}", bucket, object, e);
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(3, 3, 0, 0);
|
||||
}
|
||||
Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to heal EC decode {bucket}/{object}: {e}"),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn heal_erasure_set(&self, buckets: Vec<String>, set_disk_id: String) -> Result<()> {
|
||||
info!("Healing Erasure Set: {} ({} buckets)", set_disk_id, buckets.len());
|
||||
|
||||
// update progress
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.set_current_object(Some(format!("erasure_set: {} ({} buckets)", set_disk_id, buckets.len())));
|
||||
progress.update_progress(0, 4, 0, 0);
|
||||
}
|
||||
|
||||
let buckets = if buckets.is_empty() {
|
||||
info!("No buckets specified, listing all buckets");
|
||||
let bucket_infos = self.storage.list_buckets().await?;
|
||||
bucket_infos.into_iter().map(|info| info.name).collect()
|
||||
} else {
|
||||
buckets
|
||||
};
|
||||
|
||||
// Step 1: Perform disk format heal using ecstore
|
||||
info!("Step 1: Performing disk format heal using ecstore");
|
||||
match self.storage.heal_format(self.options.dry_run).await {
|
||||
Ok((result, error)) => {
|
||||
if let Some(e) = error {
|
||||
error!("Disk format heal failed: {} - {}", set_disk_id, e);
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(4, 4, 0, 0);
|
||||
}
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to heal disk format for {set_disk_id}: {e}"),
|
||||
});
|
||||
}
|
||||
|
||||
info!(
|
||||
"Disk format heal completed successfully: {} ({} drives)",
|
||||
set_disk_id,
|
||||
result.after.drives.len()
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Disk format heal failed: {} - {}", set_disk_id, e);
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(4, 4, 0, 0);
|
||||
}
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to heal disk format for {set_disk_id}: {e}"),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(1, 4, 0, 0);
|
||||
}
|
||||
|
||||
// Step 2: Get disk for resume functionality
|
||||
info!("Step 2: Getting disk for resume functionality");
|
||||
let disk = self.storage.get_disk_for_resume(&set_disk_id).await?;
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(2, 4, 0, 0);
|
||||
}
|
||||
|
||||
// Step 3: Heal bucket structure
|
||||
for bucket in buckets.iter() {
|
||||
if let Err(err) = self.heal_bucket(bucket).await {
|
||||
info!("{}", err.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
// Step 3: Create erasure set healer with resume support
|
||||
info!("Step 3: Creating erasure set healer with resume support");
|
||||
let erasure_healer = ErasureSetHealer::new(self.storage.clone(), self.progress.clone(), self.cancel_token.clone(), disk);
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(3, 4, 0, 0);
|
||||
}
|
||||
|
||||
// Step 4: Execute erasure set heal with resume
|
||||
info!("Step 4: Executing erasure set heal with resume");
|
||||
let result = erasure_healer.heal_erasure_set(&buckets, &set_disk_id).await;
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(4, 4, 0, 0);
|
||||
}
|
||||
|
||||
match result {
|
||||
Ok(_) => {
|
||||
info!("Erasure set heal completed successfully: {} ({} buckets)", set_disk_id, buckets.len());
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Erasure set heal failed: {} - {}", set_disk_id, e);
|
||||
Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to heal erasure set {set_disk_id}: {e}"),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for HealTask {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("HealTask")
|
||||
.field("id", &self.id)
|
||||
.field("heal_type", &self.heal_type)
|
||||
.field("options", &self.options)
|
||||
.field("created_at", &self.created_at)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
@@ -12,17 +12,17 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::OnceLock;
|
||||
use std::sync::{Arc, OnceLock};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::{error, info};
|
||||
|
||||
pub mod error;
|
||||
pub mod heal;
|
||||
pub mod scanner;
|
||||
|
||||
pub use error::{Error, Result};
|
||||
pub use scanner::{
|
||||
BucketTargetUsageInfo, BucketUsageInfo, DataUsageInfo, Scanner, ScannerMetrics, load_data_usage_from_backend,
|
||||
store_data_usage_in_backend,
|
||||
};
|
||||
pub use heal::{HealManager, HealOptions, HealPriority, HealRequest, HealType, channel::HealChannelProcessor};
|
||||
pub use scanner::Scanner;
|
||||
|
||||
// Global cancellation token for AHM services (scanner and other background tasks)
|
||||
static GLOBAL_AHM_SERVICES_CANCEL_TOKEN: OnceLock<CancellationToken> = OnceLock::new();
|
||||
@@ -52,3 +52,61 @@ pub fn shutdown_ahm_services() {
|
||||
cancel_token.cancel();
|
||||
}
|
||||
}
|
||||
|
||||
/// Global heal manager instance
|
||||
static GLOBAL_HEAL_MANAGER: OnceLock<Arc<HealManager>> = OnceLock::new();
|
||||
|
||||
/// Global heal channel processor instance
|
||||
static GLOBAL_HEAL_CHANNEL_PROCESSOR: OnceLock<Arc<tokio::sync::Mutex<HealChannelProcessor>>> = OnceLock::new();
|
||||
|
||||
/// Initialize and start heal manager with channel processor
|
||||
pub async fn init_heal_manager(
|
||||
storage: Arc<dyn heal::storage::HealStorageAPI>,
|
||||
config: Option<heal::manager::HealConfig>,
|
||||
) -> Result<Arc<HealManager>> {
|
||||
// Create heal manager
|
||||
let heal_manager = Arc::new(HealManager::new(storage, config));
|
||||
|
||||
// Start heal manager
|
||||
heal_manager.start().await?;
|
||||
|
||||
// Store global instance
|
||||
GLOBAL_HEAL_MANAGER
|
||||
.set(heal_manager.clone())
|
||||
.map_err(|_| Error::Config("Heal manager already initialized".to_string()))?;
|
||||
|
||||
// Initialize heal channel
|
||||
let channel_receiver = rustfs_common::heal_channel::init_heal_channel();
|
||||
|
||||
// Create channel processor
|
||||
let channel_processor = HealChannelProcessor::new(heal_manager.clone());
|
||||
|
||||
// Store channel processor instance first
|
||||
GLOBAL_HEAL_CHANNEL_PROCESSOR
|
||||
.set(Arc::new(tokio::sync::Mutex::new(channel_processor)))
|
||||
.map_err(|_| Error::Config("Heal channel processor already initialized".to_string()))?;
|
||||
|
||||
// Start channel processor in background
|
||||
let receiver = channel_receiver;
|
||||
tokio::spawn(async move {
|
||||
if let Some(processor_guard) = GLOBAL_HEAL_CHANNEL_PROCESSOR.get() {
|
||||
let mut processor = processor_guard.lock().await;
|
||||
if let Err(e) = processor.start(receiver).await {
|
||||
error!("Heal channel processor failed: {}", e);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
info!("Heal manager with channel processor initialized successfully");
|
||||
Ok(heal_manager)
|
||||
}
|
||||
|
||||
/// Get global heal manager instance
|
||||
pub fn get_heal_manager() -> Option<&'static Arc<HealManager>> {
|
||||
GLOBAL_HEAL_MANAGER.get()
|
||||
}
|
||||
|
||||
/// Get global heal channel processor instance
|
||||
pub fn get_heal_channel_processor() -> Option<&'static Arc<tokio::sync::Mutex<HealChannelProcessor>>> {
|
||||
GLOBAL_HEAL_CHANNEL_PROCESSOR.get()
|
||||
}
|
||||
|
||||
@@ -1,671 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::{collections::HashMap, sync::Arc, time::SystemTime};
|
||||
|
||||
use rustfs_ecstore::{bucket::metadata_sys::get_replication_config, config::com::read_config, store::ECStore};
|
||||
use rustfs_utils::path::SLASH_SEPARATOR;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tracing::{error, info, warn};
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
|
||||
// Data usage storage constants
|
||||
pub const DATA_USAGE_ROOT: &str = SLASH_SEPARATOR;
|
||||
const DATA_USAGE_OBJ_NAME: &str = ".usage.json";
|
||||
const DATA_USAGE_BLOOM_NAME: &str = ".bloomcycle.bin";
|
||||
pub const DATA_USAGE_CACHE_NAME: &str = ".usage-cache.bin";
|
||||
|
||||
// Data usage storage paths
|
||||
lazy_static::lazy_static! {
|
||||
pub static ref DATA_USAGE_BUCKET: String = format!("{}{}{}",
|
||||
rustfs_ecstore::disk::RUSTFS_META_BUCKET,
|
||||
SLASH_SEPARATOR,
|
||||
rustfs_ecstore::disk::BUCKET_META_PREFIX
|
||||
);
|
||||
pub static ref DATA_USAGE_OBJ_NAME_PATH: String = format!("{}{}{}",
|
||||
rustfs_ecstore::disk::BUCKET_META_PREFIX,
|
||||
SLASH_SEPARATOR,
|
||||
DATA_USAGE_OBJ_NAME
|
||||
);
|
||||
pub static ref DATA_USAGE_BLOOM_NAME_PATH: String = format!("{}{}{}",
|
||||
rustfs_ecstore::disk::BUCKET_META_PREFIX,
|
||||
SLASH_SEPARATOR,
|
||||
DATA_USAGE_BLOOM_NAME
|
||||
);
|
||||
}
|
||||
|
||||
/// Bucket target usage info provides replication statistics
|
||||
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
|
||||
pub struct BucketTargetUsageInfo {
|
||||
pub replication_pending_size: u64,
|
||||
pub replication_failed_size: u64,
|
||||
pub replicated_size: u64,
|
||||
pub replica_size: u64,
|
||||
pub replication_pending_count: u64,
|
||||
pub replication_failed_count: u64,
|
||||
pub replicated_count: u64,
|
||||
}
|
||||
|
||||
/// Bucket usage info provides bucket-level statistics
|
||||
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
|
||||
pub struct BucketUsageInfo {
|
||||
pub size: u64,
|
||||
// Following five fields suffixed with V1 are here for backward compatibility
|
||||
// Total Size for objects that have not yet been replicated
|
||||
pub replication_pending_size_v1: u64,
|
||||
// Total size for objects that have witness one or more failures and will be retried
|
||||
pub replication_failed_size_v1: u64,
|
||||
// Total size for objects that have been replicated to destination
|
||||
pub replicated_size_v1: u64,
|
||||
// Total number of objects pending replication
|
||||
pub replication_pending_count_v1: u64,
|
||||
// Total number of objects that failed replication
|
||||
pub replication_failed_count_v1: u64,
|
||||
|
||||
pub objects_count: u64,
|
||||
pub object_size_histogram: HashMap<String, u64>,
|
||||
pub object_versions_histogram: HashMap<String, u64>,
|
||||
pub versions_count: u64,
|
||||
pub delete_markers_count: u64,
|
||||
pub replica_size: u64,
|
||||
pub replica_count: u64,
|
||||
pub replication_info: HashMap<String, BucketTargetUsageInfo>,
|
||||
}
|
||||
|
||||
/// DataUsageInfo represents data usage stats of the underlying storage
|
||||
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
|
||||
pub struct DataUsageInfo {
|
||||
/// Total capacity
|
||||
pub total_capacity: u64,
|
||||
/// Total used capacity
|
||||
pub total_used_capacity: u64,
|
||||
/// Total free capacity
|
||||
pub total_free_capacity: u64,
|
||||
|
||||
/// LastUpdate is the timestamp of when the data usage info was last updated
|
||||
pub last_update: Option<SystemTime>,
|
||||
|
||||
/// Objects total count across all buckets
|
||||
pub objects_total_count: u64,
|
||||
/// Versions total count across all buckets
|
||||
pub versions_total_count: u64,
|
||||
/// Delete markers total count across all buckets
|
||||
pub delete_markers_total_count: u64,
|
||||
/// Objects total size across all buckets
|
||||
pub objects_total_size: u64,
|
||||
/// Replication info across all buckets
|
||||
pub replication_info: HashMap<String, BucketTargetUsageInfo>,
|
||||
|
||||
/// Total number of buckets in this cluster
|
||||
pub buckets_count: u64,
|
||||
/// Buckets usage info provides following information across all buckets
|
||||
pub buckets_usage: HashMap<String, BucketUsageInfo>,
|
||||
/// Deprecated kept here for backward compatibility reasons
|
||||
pub bucket_sizes: HashMap<String, u64>,
|
||||
}
|
||||
|
||||
/// Size summary for a single object or group of objects
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct SizeSummary {
|
||||
/// Total size
|
||||
pub total_size: usize,
|
||||
/// Number of versions
|
||||
pub versions: usize,
|
||||
/// Number of delete markers
|
||||
pub delete_markers: usize,
|
||||
/// Replicated size
|
||||
pub replicated_size: usize,
|
||||
/// Replicated count
|
||||
pub replicated_count: usize,
|
||||
/// Pending size
|
||||
pub pending_size: usize,
|
||||
/// Failed size
|
||||
pub failed_size: usize,
|
||||
/// Replica size
|
||||
pub replica_size: usize,
|
||||
/// Replica count
|
||||
pub replica_count: usize,
|
||||
/// Pending count
|
||||
pub pending_count: usize,
|
||||
/// Failed count
|
||||
pub failed_count: usize,
|
||||
/// Replication target stats
|
||||
pub repl_target_stats: HashMap<String, ReplTargetSizeSummary>,
|
||||
}
|
||||
|
||||
/// Replication target size summary
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct ReplTargetSizeSummary {
|
||||
/// Replicated size
|
||||
pub replicated_size: usize,
|
||||
/// Replicated count
|
||||
pub replicated_count: usize,
|
||||
/// Pending size
|
||||
pub pending_size: usize,
|
||||
/// Failed size
|
||||
pub failed_size: usize,
|
||||
/// Pending count
|
||||
pub pending_count: usize,
|
||||
/// Failed count
|
||||
pub failed_count: usize,
|
||||
}
|
||||
|
||||
impl DataUsageInfo {
|
||||
/// Create a new DataUsageInfo
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
/// Add object metadata to data usage statistics
|
||||
pub fn add_object(&mut self, object_path: &str, meta_object: &rustfs_filemeta::MetaObject) {
|
||||
// This method is kept for backward compatibility
|
||||
// For accurate version counting, use add_object_from_file_meta instead
|
||||
let bucket_name = match self.extract_bucket_from_path(object_path) {
|
||||
Ok(name) => name,
|
||||
Err(_) => return,
|
||||
};
|
||||
|
||||
// Update bucket statistics
|
||||
if let Some(bucket_usage) = self.buckets_usage.get_mut(&bucket_name) {
|
||||
bucket_usage.size += meta_object.size as u64;
|
||||
bucket_usage.objects_count += 1;
|
||||
bucket_usage.versions_count += 1; // Simplified: assume 1 version per object
|
||||
|
||||
// Update size histogram
|
||||
let total_size = meta_object.size as u64;
|
||||
let size_ranges = [
|
||||
("0-1KB", 0, 1024),
|
||||
("1KB-1MB", 1024, 1024 * 1024),
|
||||
("1MB-10MB", 1024 * 1024, 10 * 1024 * 1024),
|
||||
("10MB-100MB", 10 * 1024 * 1024, 100 * 1024 * 1024),
|
||||
("100MB-1GB", 100 * 1024 * 1024, 1024 * 1024 * 1024),
|
||||
("1GB+", 1024 * 1024 * 1024, u64::MAX),
|
||||
];
|
||||
|
||||
for (range_name, min_size, max_size) in size_ranges {
|
||||
if total_size >= min_size && total_size < max_size {
|
||||
*bucket_usage.object_size_histogram.entry(range_name.to_string()).or_insert(0) += 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Update version histogram (simplified - count as single version)
|
||||
*bucket_usage
|
||||
.object_versions_histogram
|
||||
.entry("SINGLE_VERSION".to_string())
|
||||
.or_insert(0) += 1;
|
||||
} else {
|
||||
// Create new bucket usage
|
||||
let mut bucket_usage = BucketUsageInfo {
|
||||
size: meta_object.size as u64,
|
||||
objects_count: 1,
|
||||
versions_count: 1,
|
||||
..Default::default()
|
||||
};
|
||||
bucket_usage.object_size_histogram.insert("0-1KB".to_string(), 1);
|
||||
bucket_usage.object_versions_histogram.insert("SINGLE_VERSION".to_string(), 1);
|
||||
self.buckets_usage.insert(bucket_name, bucket_usage);
|
||||
}
|
||||
|
||||
// Update global statistics
|
||||
self.objects_total_size += meta_object.size as u64;
|
||||
self.objects_total_count += 1;
|
||||
self.versions_total_count += 1;
|
||||
}
|
||||
|
||||
/// Add object from FileMeta for accurate version counting
|
||||
pub fn add_object_from_file_meta(&mut self, object_path: &str, file_meta: &rustfs_filemeta::FileMeta) {
|
||||
let bucket_name = match self.extract_bucket_from_path(object_path) {
|
||||
Ok(name) => name,
|
||||
Err(_) => return,
|
||||
};
|
||||
|
||||
// Calculate accurate statistics from all versions
|
||||
let mut total_size = 0u64;
|
||||
let mut versions_count = 0u64;
|
||||
let mut delete_markers_count = 0u64;
|
||||
let mut latest_object_size = 0u64;
|
||||
|
||||
// Process all versions to get accurate counts
|
||||
for version in &file_meta.versions {
|
||||
match rustfs_filemeta::FileMetaVersion::try_from(version.clone()) {
|
||||
Ok(ver) => {
|
||||
if let Some(obj) = ver.object {
|
||||
total_size += obj.size as u64;
|
||||
versions_count += 1;
|
||||
latest_object_size = obj.size as u64; // Keep track of latest object size
|
||||
} else if ver.delete_marker.is_some() {
|
||||
delete_markers_count += 1;
|
||||
}
|
||||
}
|
||||
Err(_) => {
|
||||
// Skip invalid versions
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update bucket statistics
|
||||
if let Some(bucket_usage) = self.buckets_usage.get_mut(&bucket_name) {
|
||||
bucket_usage.size += total_size;
|
||||
bucket_usage.objects_count += 1;
|
||||
bucket_usage.versions_count += versions_count;
|
||||
bucket_usage.delete_markers_count += delete_markers_count;
|
||||
|
||||
// Update size histogram based on latest object size
|
||||
let size_ranges = [
|
||||
("0-1KB", 0, 1024),
|
||||
("1KB-1MB", 1024, 1024 * 1024),
|
||||
("1MB-10MB", 1024 * 1024, 10 * 1024 * 1024),
|
||||
("10MB-100MB", 10 * 1024 * 1024, 100 * 1024 * 1024),
|
||||
("100MB-1GB", 100 * 1024 * 1024, 1024 * 1024 * 1024),
|
||||
("1GB+", 1024 * 1024 * 1024, u64::MAX),
|
||||
];
|
||||
|
||||
for (range_name, min_size, max_size) in size_ranges {
|
||||
if latest_object_size >= min_size && latest_object_size < max_size {
|
||||
*bucket_usage.object_size_histogram.entry(range_name.to_string()).or_insert(0) += 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Update version histogram based on actual version count
|
||||
let version_ranges = [
|
||||
("1", 1, 1),
|
||||
("2-5", 2, 5),
|
||||
("6-10", 6, 10),
|
||||
("11-50", 11, 50),
|
||||
("51-100", 51, 100),
|
||||
("100+", 101, usize::MAX),
|
||||
];
|
||||
|
||||
for (range_name, min_versions, max_versions) in version_ranges {
|
||||
if versions_count as usize >= min_versions && versions_count as usize <= max_versions {
|
||||
*bucket_usage
|
||||
.object_versions_histogram
|
||||
.entry(range_name.to_string())
|
||||
.or_insert(0) += 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Create new bucket usage
|
||||
let mut bucket_usage = BucketUsageInfo {
|
||||
size: total_size,
|
||||
objects_count: 1,
|
||||
versions_count,
|
||||
delete_markers_count,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Set size histogram
|
||||
let size_ranges = [
|
||||
("0-1KB", 0, 1024),
|
||||
("1KB-1MB", 1024, 1024 * 1024),
|
||||
("1MB-10MB", 1024 * 1024, 10 * 1024 * 1024),
|
||||
("10MB-100MB", 10 * 1024 * 1024, 100 * 1024 * 1024),
|
||||
("100MB-1GB", 100 * 1024 * 1024, 1024 * 1024 * 1024),
|
||||
("1GB+", 1024 * 1024 * 1024, u64::MAX),
|
||||
];
|
||||
|
||||
for (range_name, min_size, max_size) in size_ranges {
|
||||
if latest_object_size >= min_size && latest_object_size < max_size {
|
||||
bucket_usage.object_size_histogram.insert(range_name.to_string(), 1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Set version histogram
|
||||
let version_ranges = [
|
||||
("1", 1, 1),
|
||||
("2-5", 2, 5),
|
||||
("6-10", 6, 10),
|
||||
("11-50", 11, 50),
|
||||
("51-100", 51, 100),
|
||||
("100+", 101, usize::MAX),
|
||||
];
|
||||
|
||||
for (range_name, min_versions, max_versions) in version_ranges {
|
||||
if versions_count as usize >= min_versions && versions_count as usize <= max_versions {
|
||||
bucket_usage.object_versions_histogram.insert(range_name.to_string(), 1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
self.buckets_usage.insert(bucket_name, bucket_usage);
|
||||
// Update buckets count when adding new bucket
|
||||
self.buckets_count = self.buckets_usage.len() as u64;
|
||||
}
|
||||
|
||||
// Update global statistics
|
||||
self.objects_total_size += total_size;
|
||||
self.objects_total_count += 1;
|
||||
self.versions_total_count += versions_count;
|
||||
self.delete_markers_total_count += delete_markers_count;
|
||||
}
|
||||
|
||||
/// Extract bucket name from object path
|
||||
fn extract_bucket_from_path(&self, object_path: &str) -> Result<String> {
|
||||
let parts: Vec<&str> = object_path.split('/').collect();
|
||||
if parts.is_empty() {
|
||||
return Err(Error::Scanner("Invalid object path: empty".to_string()));
|
||||
}
|
||||
Ok(parts[0].to_string())
|
||||
}
|
||||
|
||||
/// Update capacity information
|
||||
pub fn update_capacity(&mut self, total: u64, used: u64, free: u64) {
|
||||
self.total_capacity = total;
|
||||
self.total_used_capacity = used;
|
||||
self.total_free_capacity = free;
|
||||
self.last_update = Some(SystemTime::now());
|
||||
}
|
||||
|
||||
/// Add bucket usage info
|
||||
pub fn add_bucket_usage(&mut self, bucket: String, usage: BucketUsageInfo) {
|
||||
self.buckets_usage.insert(bucket.clone(), usage);
|
||||
self.buckets_count = self.buckets_usage.len() as u64;
|
||||
self.last_update = Some(SystemTime::now());
|
||||
}
|
||||
|
||||
/// Get bucket usage info
|
||||
pub fn get_bucket_usage(&self, bucket: &str) -> Option<&BucketUsageInfo> {
|
||||
self.buckets_usage.get(bucket)
|
||||
}
|
||||
|
||||
/// Calculate total statistics from all buckets
|
||||
pub fn calculate_totals(&mut self) {
|
||||
self.objects_total_count = 0;
|
||||
self.versions_total_count = 0;
|
||||
self.delete_markers_total_count = 0;
|
||||
self.objects_total_size = 0;
|
||||
|
||||
for usage in self.buckets_usage.values() {
|
||||
self.objects_total_count += usage.objects_count;
|
||||
self.versions_total_count += usage.versions_count;
|
||||
self.delete_markers_total_count += usage.delete_markers_count;
|
||||
self.objects_total_size += usage.size;
|
||||
}
|
||||
}
|
||||
|
||||
/// Merge another DataUsageInfo into this one
|
||||
pub fn merge(&mut self, other: &DataUsageInfo) {
|
||||
// Merge bucket usage
|
||||
for (bucket, usage) in &other.buckets_usage {
|
||||
if let Some(existing) = self.buckets_usage.get_mut(bucket) {
|
||||
existing.merge(usage);
|
||||
} else {
|
||||
self.buckets_usage.insert(bucket.clone(), usage.clone());
|
||||
}
|
||||
}
|
||||
|
||||
// Recalculate totals
|
||||
self.calculate_totals();
|
||||
|
||||
// Ensure buckets_count stays consistent with buckets_usage
|
||||
self.buckets_count = self.buckets_usage.len() as u64;
|
||||
|
||||
// Update last update time
|
||||
if let Some(other_update) = other.last_update {
|
||||
if self.last_update.is_none() || other_update > self.last_update.unwrap() {
|
||||
self.last_update = Some(other_update);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl BucketUsageInfo {
|
||||
/// Create a new BucketUsageInfo
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
/// Add size summary to this bucket usage
|
||||
pub fn add_size_summary(&mut self, summary: &SizeSummary) {
|
||||
self.size += summary.total_size as u64;
|
||||
self.versions_count += summary.versions as u64;
|
||||
self.delete_markers_count += summary.delete_markers as u64;
|
||||
self.replica_size += summary.replica_size as u64;
|
||||
self.replica_count += summary.replica_count as u64;
|
||||
}
|
||||
|
||||
/// Merge another BucketUsageInfo into this one
|
||||
pub fn merge(&mut self, other: &BucketUsageInfo) {
|
||||
self.size += other.size;
|
||||
self.objects_count += other.objects_count;
|
||||
self.versions_count += other.versions_count;
|
||||
self.delete_markers_count += other.delete_markers_count;
|
||||
self.replica_size += other.replica_size;
|
||||
self.replica_count += other.replica_count;
|
||||
|
||||
// Merge histograms
|
||||
for (key, value) in &other.object_size_histogram {
|
||||
*self.object_size_histogram.entry(key.clone()).or_insert(0) += value;
|
||||
}
|
||||
|
||||
for (key, value) in &other.object_versions_histogram {
|
||||
*self.object_versions_histogram.entry(key.clone()).or_insert(0) += value;
|
||||
}
|
||||
|
||||
// Merge replication info
|
||||
for (target, info) in &other.replication_info {
|
||||
let entry = self.replication_info.entry(target.clone()).or_default();
|
||||
entry.replicated_size += info.replicated_size;
|
||||
entry.replica_size += info.replica_size;
|
||||
entry.replication_pending_size += info.replication_pending_size;
|
||||
entry.replication_failed_size += info.replication_failed_size;
|
||||
entry.replication_pending_count += info.replication_pending_count;
|
||||
entry.replication_failed_count += info.replication_failed_count;
|
||||
entry.replicated_count += info.replicated_count;
|
||||
}
|
||||
|
||||
// Merge backward compatibility fields
|
||||
self.replication_pending_size_v1 += other.replication_pending_size_v1;
|
||||
self.replication_failed_size_v1 += other.replication_failed_size_v1;
|
||||
self.replicated_size_v1 += other.replicated_size_v1;
|
||||
self.replication_pending_count_v1 += other.replication_pending_count_v1;
|
||||
self.replication_failed_count_v1 += other.replication_failed_count_v1;
|
||||
}
|
||||
}
|
||||
|
||||
impl SizeSummary {
|
||||
/// Create a new SizeSummary
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
/// Add another SizeSummary to this one
|
||||
pub fn add(&mut self, other: &SizeSummary) {
|
||||
self.total_size += other.total_size;
|
||||
self.versions += other.versions;
|
||||
self.delete_markers += other.delete_markers;
|
||||
self.replicated_size += other.replicated_size;
|
||||
self.replicated_count += other.replicated_count;
|
||||
self.pending_size += other.pending_size;
|
||||
self.failed_size += other.failed_size;
|
||||
self.replica_size += other.replica_size;
|
||||
self.replica_count += other.replica_count;
|
||||
self.pending_count += other.pending_count;
|
||||
self.failed_count += other.failed_count;
|
||||
|
||||
// Merge replication target stats
|
||||
for (target, stats) in &other.repl_target_stats {
|
||||
let entry = self.repl_target_stats.entry(target.clone()).or_default();
|
||||
entry.replicated_size += stats.replicated_size;
|
||||
entry.replicated_count += stats.replicated_count;
|
||||
entry.pending_size += stats.pending_size;
|
||||
entry.failed_size += stats.failed_size;
|
||||
entry.pending_count += stats.pending_count;
|
||||
entry.failed_count += stats.failed_count;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Store data usage info to backend storage
|
||||
pub async fn store_data_usage_in_backend(data_usage_info: DataUsageInfo, store: Arc<ECStore>) -> Result<()> {
|
||||
let data =
|
||||
serde_json::to_vec(&data_usage_info).map_err(|e| Error::Config(format!("Failed to serialize data usage info: {e}")))?;
|
||||
|
||||
// Save to backend using the same mechanism as original code
|
||||
rustfs_ecstore::config::com::save_config(store, &DATA_USAGE_OBJ_NAME_PATH, data)
|
||||
.await
|
||||
.map_err(Error::Storage)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Load data usage info from backend storage
|
||||
pub async fn load_data_usage_from_backend(store: Arc<ECStore>) -> Result<DataUsageInfo> {
|
||||
let buf = match read_config(store, &DATA_USAGE_OBJ_NAME_PATH).await {
|
||||
Ok(data) => data,
|
||||
Err(e) => {
|
||||
error!("Failed to read data usage info from backend: {}", e);
|
||||
if e == rustfs_ecstore::error::Error::ConfigNotFound {
|
||||
return Ok(DataUsageInfo::default());
|
||||
}
|
||||
return Err(Error::Storage(e));
|
||||
}
|
||||
};
|
||||
|
||||
let mut data_usage_info: DataUsageInfo =
|
||||
serde_json::from_slice(&buf).map_err(|e| Error::Config(format!("Failed to deserialize data usage info: {e}")))?;
|
||||
|
||||
warn!("Loaded data usage info from backend {:?}", &data_usage_info);
|
||||
|
||||
// Handle backward compatibility like original code
|
||||
if data_usage_info.buckets_usage.is_empty() {
|
||||
data_usage_info.buckets_usage = data_usage_info
|
||||
.bucket_sizes
|
||||
.iter()
|
||||
.map(|(bucket, &size)| {
|
||||
(
|
||||
bucket.clone(),
|
||||
BucketUsageInfo {
|
||||
size,
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
}
|
||||
|
||||
if data_usage_info.bucket_sizes.is_empty() {
|
||||
data_usage_info.bucket_sizes = data_usage_info
|
||||
.buckets_usage
|
||||
.iter()
|
||||
.map(|(bucket, bui)| (bucket.clone(), bui.size))
|
||||
.collect();
|
||||
}
|
||||
|
||||
for (bucket, bui) in &data_usage_info.buckets_usage {
|
||||
if bui.replicated_size_v1 > 0
|
||||
|| bui.replication_failed_count_v1 > 0
|
||||
|| bui.replication_failed_size_v1 > 0
|
||||
|| bui.replication_pending_count_v1 > 0
|
||||
{
|
||||
if let Ok((cfg, _)) = get_replication_config(bucket).await {
|
||||
if !cfg.role.is_empty() {
|
||||
data_usage_info.replication_info.insert(
|
||||
cfg.role.clone(),
|
||||
BucketTargetUsageInfo {
|
||||
replication_failed_size: bui.replication_failed_size_v1,
|
||||
replication_failed_count: bui.replication_failed_count_v1,
|
||||
replicated_size: bui.replicated_size_v1,
|
||||
replication_pending_count: bui.replication_pending_count_v1,
|
||||
replication_pending_size: bui.replication_pending_size_v1,
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(data_usage_info)
|
||||
}
|
||||
|
||||
/// Example function showing how to use AHM data usage functionality
|
||||
/// This demonstrates the integration pattern for DataUsageInfoHandler
|
||||
pub async fn example_data_usage_integration() -> Result<()> {
|
||||
// Get the global storage instance
|
||||
let Some(store) = rustfs_ecstore::new_object_layer_fn() else {
|
||||
return Err(Error::Config("Storage not initialized".to_string()));
|
||||
};
|
||||
|
||||
// Load data usage from backend (this replaces the original load_data_usage_from_backend)
|
||||
let data_usage = load_data_usage_from_backend(store).await?;
|
||||
|
||||
info!(
|
||||
"Loaded data usage info: {} buckets, {} total objects",
|
||||
data_usage.buckets_count, data_usage.objects_total_count
|
||||
);
|
||||
|
||||
// Example: Store updated data usage back to backend
|
||||
// This would typically be called by the scanner after collecting new statistics
|
||||
// store_data_usage_in_backend(data_usage, store).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_data_usage_info_creation() {
|
||||
let mut info = DataUsageInfo::new();
|
||||
info.update_capacity(1000, 500, 500);
|
||||
|
||||
assert_eq!(info.total_capacity, 1000);
|
||||
assert_eq!(info.total_used_capacity, 500);
|
||||
assert_eq!(info.total_free_capacity, 500);
|
||||
assert!(info.last_update.is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bucket_usage_info_merge() {
|
||||
let mut usage1 = BucketUsageInfo::new();
|
||||
usage1.size = 100;
|
||||
usage1.objects_count = 10;
|
||||
usage1.versions_count = 5;
|
||||
|
||||
let mut usage2 = BucketUsageInfo::new();
|
||||
usage2.size = 200;
|
||||
usage2.objects_count = 20;
|
||||
usage2.versions_count = 10;
|
||||
|
||||
usage1.merge(&usage2);
|
||||
|
||||
assert_eq!(usage1.size, 300);
|
||||
assert_eq!(usage1.objects_count, 30);
|
||||
assert_eq!(usage1.versions_count, 15);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_size_summary_add() {
|
||||
let mut summary1 = SizeSummary::new();
|
||||
summary1.total_size = 100;
|
||||
summary1.versions = 5;
|
||||
|
||||
let mut summary2 = SizeSummary::new();
|
||||
summary2.total_size = 200;
|
||||
summary2.versions = 10;
|
||||
|
||||
summary1.add(&summary2);
|
||||
|
||||
assert_eq!(summary1.total_size, 300);
|
||||
assert_eq!(summary1.versions, 15);
|
||||
}
|
||||
}
|
||||
@@ -12,197 +12,258 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
sync::atomic::{AtomicU64, Ordering},
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
|
||||
/// Size interval for object size histogram
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SizeInterval {
|
||||
pub start: u64,
|
||||
pub end: u64,
|
||||
pub name: &'static str,
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tracing::info;
|
||||
|
||||
/// Scanner metrics
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct ScannerMetrics {
|
||||
/// Total objects scanned since server start
|
||||
pub objects_scanned: u64,
|
||||
/// Total object versions scanned since server start
|
||||
pub versions_scanned: u64,
|
||||
/// Total directories scanned since server start
|
||||
pub directories_scanned: u64,
|
||||
/// Total bucket scans started since server start
|
||||
pub bucket_scans_started: u64,
|
||||
/// Total bucket scans finished since server start
|
||||
pub bucket_scans_finished: u64,
|
||||
/// Total objects with health issues found
|
||||
pub objects_with_issues: u64,
|
||||
/// Total heal tasks queued
|
||||
pub heal_tasks_queued: u64,
|
||||
/// Total heal tasks completed
|
||||
pub heal_tasks_completed: u64,
|
||||
/// Total heal tasks failed
|
||||
pub heal_tasks_failed: u64,
|
||||
/// Total healthy objects found
|
||||
pub healthy_objects: u64,
|
||||
/// Total corrupted objects found
|
||||
pub corrupted_objects: u64,
|
||||
/// Last scan activity time
|
||||
pub last_activity: Option<SystemTime>,
|
||||
/// Current scan cycle
|
||||
pub current_cycle: u64,
|
||||
/// Total scan cycles completed
|
||||
pub total_cycles: u64,
|
||||
/// Current scan duration
|
||||
pub current_scan_duration: Option<Duration>,
|
||||
/// Average scan duration
|
||||
pub avg_scan_duration: Duration,
|
||||
/// Objects scanned per second
|
||||
pub objects_per_second: f64,
|
||||
/// Buckets scanned per second
|
||||
pub buckets_per_second: f64,
|
||||
/// Storage metrics by bucket
|
||||
pub bucket_metrics: HashMap<String, BucketMetrics>,
|
||||
/// Disk metrics
|
||||
pub disk_metrics: HashMap<String, DiskMetrics>,
|
||||
}
|
||||
|
||||
/// Version interval for object versions histogram
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct VersionInterval {
|
||||
pub start: u64,
|
||||
pub end: u64,
|
||||
pub name: &'static str,
|
||||
/// Bucket-specific metrics
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct BucketMetrics {
|
||||
/// Bucket name
|
||||
pub bucket: String,
|
||||
/// Total objects in bucket
|
||||
pub total_objects: u64,
|
||||
/// Total size of objects in bucket (bytes)
|
||||
pub total_size: u64,
|
||||
/// Objects with health issues
|
||||
pub objects_with_issues: u64,
|
||||
/// Last scan time
|
||||
pub last_scan_time: Option<SystemTime>,
|
||||
/// Scan duration
|
||||
pub scan_duration: Option<Duration>,
|
||||
/// Heal tasks queued for this bucket
|
||||
pub heal_tasks_queued: u64,
|
||||
/// Heal tasks completed for this bucket
|
||||
pub heal_tasks_completed: u64,
|
||||
/// Heal tasks failed for this bucket
|
||||
pub heal_tasks_failed: u64,
|
||||
}
|
||||
|
||||
/// Object size histogram intervals
|
||||
pub const OBJECTS_HISTOGRAM_INTERVALS: &[SizeInterval] = &[
|
||||
SizeInterval {
|
||||
start: 0,
|
||||
end: 1024 - 1,
|
||||
name: "LESS_THAN_1_KiB",
|
||||
},
|
||||
SizeInterval {
|
||||
start: 1024,
|
||||
end: 1024 * 1024 - 1,
|
||||
name: "1_KiB_TO_1_MiB",
|
||||
},
|
||||
SizeInterval {
|
||||
start: 1024 * 1024,
|
||||
end: 10 * 1024 * 1024 - 1,
|
||||
name: "1_MiB_TO_10_MiB",
|
||||
},
|
||||
SizeInterval {
|
||||
start: 10 * 1024 * 1024,
|
||||
end: 64 * 1024 * 1024 - 1,
|
||||
name: "10_MiB_TO_64_MiB",
|
||||
},
|
||||
SizeInterval {
|
||||
start: 64 * 1024 * 1024,
|
||||
end: 128 * 1024 * 1024 - 1,
|
||||
name: "64_MiB_TO_128_MiB",
|
||||
},
|
||||
SizeInterval {
|
||||
start: 128 * 1024 * 1024,
|
||||
end: 512 * 1024 * 1024 - 1,
|
||||
name: "128_MiB_TO_512_MiB",
|
||||
},
|
||||
SizeInterval {
|
||||
start: 512 * 1024 * 1024,
|
||||
end: u64::MAX,
|
||||
name: "MORE_THAN_512_MiB",
|
||||
},
|
||||
];
|
||||
|
||||
/// Object version count histogram intervals
|
||||
pub const OBJECTS_VERSION_COUNT_INTERVALS: &[VersionInterval] = &[
|
||||
VersionInterval {
|
||||
start: 1,
|
||||
end: 1,
|
||||
name: "1_VERSION",
|
||||
},
|
||||
VersionInterval {
|
||||
start: 2,
|
||||
end: 10,
|
||||
name: "2_TO_10_VERSIONS",
|
||||
},
|
||||
VersionInterval {
|
||||
start: 11,
|
||||
end: 100,
|
||||
name: "11_TO_100_VERSIONS",
|
||||
},
|
||||
VersionInterval {
|
||||
start: 101,
|
||||
end: 1000,
|
||||
name: "101_TO_1000_VERSIONS",
|
||||
},
|
||||
VersionInterval {
|
||||
start: 1001,
|
||||
end: u64::MAX,
|
||||
name: "MORE_THAN_1000_VERSIONS",
|
||||
},
|
||||
];
|
||||
|
||||
/// Size histogram for object size distribution
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct SizeHistogram {
|
||||
counts: Vec<u64>,
|
||||
/// Disk-specific metrics
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct DiskMetrics {
|
||||
/// Disk path
|
||||
pub disk_path: String,
|
||||
/// Total disk space (bytes)
|
||||
pub total_space: u64,
|
||||
/// Used disk space (bytes)
|
||||
pub used_space: u64,
|
||||
/// Free disk space (bytes)
|
||||
pub free_space: u64,
|
||||
/// Objects scanned on this disk
|
||||
pub objects_scanned: u64,
|
||||
/// Objects with issues on this disk
|
||||
pub objects_with_issues: u64,
|
||||
/// Last scan time
|
||||
pub last_scan_time: Option<SystemTime>,
|
||||
/// Whether disk is online
|
||||
pub is_online: bool,
|
||||
/// Whether disk is being scanned
|
||||
pub is_scanning: bool,
|
||||
}
|
||||
|
||||
/// Versions histogram for object version count distribution
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct VersionsHistogram {
|
||||
counts: Vec<u64>,
|
||||
/// Thread-safe metrics collector
|
||||
pub struct MetricsCollector {
|
||||
/// Atomic counters for real-time metrics
|
||||
objects_scanned: AtomicU64,
|
||||
versions_scanned: AtomicU64,
|
||||
directories_scanned: AtomicU64,
|
||||
bucket_scans_started: AtomicU64,
|
||||
bucket_scans_finished: AtomicU64,
|
||||
objects_with_issues: AtomicU64,
|
||||
heal_tasks_queued: AtomicU64,
|
||||
heal_tasks_completed: AtomicU64,
|
||||
heal_tasks_failed: AtomicU64,
|
||||
current_cycle: AtomicU64,
|
||||
total_cycles: AtomicU64,
|
||||
healthy_objects: AtomicU64,
|
||||
corrupted_objects: AtomicU64,
|
||||
}
|
||||
|
||||
impl SizeHistogram {
|
||||
/// Create a new size histogram
|
||||
impl MetricsCollector {
|
||||
/// Create a new metrics collector
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
counts: vec![0; OBJECTS_HISTOGRAM_INTERVALS.len()],
|
||||
objects_scanned: AtomicU64::new(0),
|
||||
versions_scanned: AtomicU64::new(0),
|
||||
directories_scanned: AtomicU64::new(0),
|
||||
bucket_scans_started: AtomicU64::new(0),
|
||||
bucket_scans_finished: AtomicU64::new(0),
|
||||
objects_with_issues: AtomicU64::new(0),
|
||||
heal_tasks_queued: AtomicU64::new(0),
|
||||
heal_tasks_completed: AtomicU64::new(0),
|
||||
heal_tasks_failed: AtomicU64::new(0),
|
||||
current_cycle: AtomicU64::new(0),
|
||||
total_cycles: AtomicU64::new(0),
|
||||
healthy_objects: AtomicU64::new(0),
|
||||
corrupted_objects: AtomicU64::new(0),
|
||||
}
|
||||
}
|
||||
|
||||
/// Add a size to the histogram
|
||||
pub fn add(&mut self, size: u64) {
|
||||
for (idx, interval) in OBJECTS_HISTOGRAM_INTERVALS.iter().enumerate() {
|
||||
if size >= interval.start && size <= interval.end {
|
||||
self.counts[idx] += 1;
|
||||
break;
|
||||
}
|
||||
/// Increment objects scanned count
|
||||
pub fn increment_objects_scanned(&self, count: u64) {
|
||||
self.objects_scanned.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment versions scanned count
|
||||
pub fn increment_versions_scanned(&self, count: u64) {
|
||||
self.versions_scanned.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment directories scanned count
|
||||
pub fn increment_directories_scanned(&self, count: u64) {
|
||||
self.directories_scanned.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment bucket scans started count
|
||||
pub fn increment_bucket_scans_started(&self, count: u64) {
|
||||
self.bucket_scans_started.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment bucket scans finished count
|
||||
pub fn increment_bucket_scans_finished(&self, count: u64) {
|
||||
self.bucket_scans_finished.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment objects with issues count
|
||||
pub fn increment_objects_with_issues(&self, count: u64) {
|
||||
self.objects_with_issues.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment heal tasks queued count
|
||||
pub fn increment_heal_tasks_queued(&self, count: u64) {
|
||||
self.heal_tasks_queued.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment heal tasks completed count
|
||||
pub fn increment_heal_tasks_completed(&self, count: u64) {
|
||||
self.heal_tasks_completed.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment heal tasks failed count
|
||||
pub fn increment_heal_tasks_failed(&self, count: u64) {
|
||||
self.heal_tasks_failed.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Set current cycle
|
||||
pub fn set_current_cycle(&self, cycle: u64) {
|
||||
self.current_cycle.store(cycle, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment total cycles
|
||||
pub fn increment_total_cycles(&self) {
|
||||
self.total_cycles.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment healthy objects count
|
||||
pub fn increment_healthy_objects(&self) {
|
||||
self.healthy_objects.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment corrupted objects count
|
||||
pub fn increment_corrupted_objects(&self) {
|
||||
self.corrupted_objects.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Get current metrics snapshot
|
||||
pub fn get_metrics(&self) -> ScannerMetrics {
|
||||
ScannerMetrics {
|
||||
objects_scanned: self.objects_scanned.load(Ordering::Relaxed),
|
||||
versions_scanned: self.versions_scanned.load(Ordering::Relaxed),
|
||||
directories_scanned: self.directories_scanned.load(Ordering::Relaxed),
|
||||
bucket_scans_started: self.bucket_scans_started.load(Ordering::Relaxed),
|
||||
bucket_scans_finished: self.bucket_scans_finished.load(Ordering::Relaxed),
|
||||
objects_with_issues: self.objects_with_issues.load(Ordering::Relaxed),
|
||||
heal_tasks_queued: self.heal_tasks_queued.load(Ordering::Relaxed),
|
||||
heal_tasks_completed: self.heal_tasks_completed.load(Ordering::Relaxed),
|
||||
heal_tasks_failed: self.heal_tasks_failed.load(Ordering::Relaxed),
|
||||
healthy_objects: self.healthy_objects.load(Ordering::Relaxed),
|
||||
corrupted_objects: self.corrupted_objects.load(Ordering::Relaxed),
|
||||
last_activity: Some(SystemTime::now()),
|
||||
current_cycle: self.current_cycle.load(Ordering::Relaxed),
|
||||
total_cycles: self.total_cycles.load(Ordering::Relaxed),
|
||||
current_scan_duration: None, // Will be set by scanner
|
||||
avg_scan_duration: Duration::ZERO, // Will be calculated
|
||||
objects_per_second: 0.0, // Will be calculated
|
||||
buckets_per_second: 0.0, // Will be calculated
|
||||
bucket_metrics: HashMap::new(), // Will be populated by scanner
|
||||
disk_metrics: HashMap::new(), // Will be populated by scanner
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the histogram as a map
|
||||
pub fn to_map(&self) -> HashMap<String, u64> {
|
||||
let mut result = HashMap::new();
|
||||
for (idx, count) in self.counts.iter().enumerate() {
|
||||
let interval = &OBJECTS_HISTOGRAM_INTERVALS[idx];
|
||||
result.insert(interval.name.to_string(), *count);
|
||||
}
|
||||
result
|
||||
}
|
||||
/// Reset all metrics
|
||||
pub fn reset(&self) {
|
||||
self.objects_scanned.store(0, Ordering::Relaxed);
|
||||
self.versions_scanned.store(0, Ordering::Relaxed);
|
||||
self.directories_scanned.store(0, Ordering::Relaxed);
|
||||
self.bucket_scans_started.store(0, Ordering::Relaxed);
|
||||
self.bucket_scans_finished.store(0, Ordering::Relaxed);
|
||||
self.objects_with_issues.store(0, Ordering::Relaxed);
|
||||
self.heal_tasks_queued.store(0, Ordering::Relaxed);
|
||||
self.heal_tasks_completed.store(0, Ordering::Relaxed);
|
||||
self.heal_tasks_failed.store(0, Ordering::Relaxed);
|
||||
self.current_cycle.store(0, Ordering::Relaxed);
|
||||
self.total_cycles.store(0, Ordering::Relaxed);
|
||||
self.healthy_objects.store(0, Ordering::Relaxed);
|
||||
self.corrupted_objects.store(0, Ordering::Relaxed);
|
||||
|
||||
/// Merge another histogram into this one
|
||||
pub fn merge(&mut self, other: &SizeHistogram) {
|
||||
for (idx, count) in other.counts.iter().enumerate() {
|
||||
self.counts[idx] += count;
|
||||
}
|
||||
}
|
||||
|
||||
/// Get total count
|
||||
pub fn total_count(&self) -> u64 {
|
||||
self.counts.iter().sum()
|
||||
}
|
||||
|
||||
/// Reset the histogram
|
||||
pub fn reset(&mut self) {
|
||||
for count in &mut self.counts {
|
||||
*count = 0;
|
||||
}
|
||||
info!("Scanner metrics reset");
|
||||
}
|
||||
}
|
||||
|
||||
impl VersionsHistogram {
|
||||
/// Create a new versions histogram
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
counts: vec![0; OBJECTS_VERSION_COUNT_INTERVALS.len()],
|
||||
}
|
||||
}
|
||||
|
||||
/// Add a version count to the histogram
|
||||
pub fn add(&mut self, versions: u64) {
|
||||
for (idx, interval) in OBJECTS_VERSION_COUNT_INTERVALS.iter().enumerate() {
|
||||
if versions >= interval.start && versions <= interval.end {
|
||||
self.counts[idx] += 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the histogram as a map
|
||||
pub fn to_map(&self) -> HashMap<String, u64> {
|
||||
let mut result = HashMap::new();
|
||||
for (idx, count) in self.counts.iter().enumerate() {
|
||||
let interval = &OBJECTS_VERSION_COUNT_INTERVALS[idx];
|
||||
result.insert(interval.name.to_string(), *count);
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
/// Merge another histogram into this one
|
||||
pub fn merge(&mut self, other: &VersionsHistogram) {
|
||||
for (idx, count) in other.counts.iter().enumerate() {
|
||||
self.counts[idx] += count;
|
||||
}
|
||||
}
|
||||
|
||||
/// Get total count
|
||||
pub fn total_count(&self) -> u64 {
|
||||
self.counts.iter().sum()
|
||||
}
|
||||
|
||||
/// Reset the histogram
|
||||
pub fn reset(&mut self) {
|
||||
for count in &mut self.counts {
|
||||
*count = 0;
|
||||
}
|
||||
impl Default for MetricsCollector {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
@@ -211,67 +272,35 @@ mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_size_histogram() {
|
||||
let mut histogram = SizeHistogram::new();
|
||||
|
||||
// Add some sizes
|
||||
histogram.add(512); // LESS_THAN_1_KiB
|
||||
histogram.add(1024); // 1_KiB_TO_1_MiB
|
||||
histogram.add(1024 * 1024); // 1_MiB_TO_10_MiB
|
||||
histogram.add(5 * 1024 * 1024); // 1_MiB_TO_10_MiB
|
||||
|
||||
let map = histogram.to_map();
|
||||
|
||||
assert_eq!(map.get("LESS_THAN_1_KiB"), Some(&1));
|
||||
assert_eq!(map.get("1_KiB_TO_1_MiB"), Some(&1));
|
||||
assert_eq!(map.get("1_MiB_TO_10_MiB"), Some(&2));
|
||||
assert_eq!(map.get("10_MiB_TO_64_MiB"), Some(&0));
|
||||
fn test_metrics_collector_creation() {
|
||||
let collector = MetricsCollector::new();
|
||||
let metrics = collector.get_metrics();
|
||||
assert_eq!(metrics.objects_scanned, 0);
|
||||
assert_eq!(metrics.versions_scanned, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_versions_histogram() {
|
||||
let mut histogram = VersionsHistogram::new();
|
||||
fn test_metrics_increment() {
|
||||
let collector = MetricsCollector::new();
|
||||
|
||||
// Add some version counts
|
||||
histogram.add(1); // 1_VERSION
|
||||
histogram.add(5); // 2_TO_10_VERSIONS
|
||||
histogram.add(50); // 11_TO_100_VERSIONS
|
||||
histogram.add(500); // 101_TO_1000_VERSIONS
|
||||
collector.increment_objects_scanned(10);
|
||||
collector.increment_versions_scanned(5);
|
||||
collector.increment_objects_with_issues(2);
|
||||
|
||||
let map = histogram.to_map();
|
||||
|
||||
assert_eq!(map.get("1_VERSION"), Some(&1));
|
||||
assert_eq!(map.get("2_TO_10_VERSIONS"), Some(&1));
|
||||
assert_eq!(map.get("11_TO_100_VERSIONS"), Some(&1));
|
||||
assert_eq!(map.get("101_TO_1000_VERSIONS"), Some(&1));
|
||||
let metrics = collector.get_metrics();
|
||||
assert_eq!(metrics.objects_scanned, 10);
|
||||
assert_eq!(metrics.versions_scanned, 5);
|
||||
assert_eq!(metrics.objects_with_issues, 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_histogram_merge() {
|
||||
let mut histogram1 = SizeHistogram::new();
|
||||
histogram1.add(1024);
|
||||
histogram1.add(1024 * 1024);
|
||||
fn test_metrics_reset() {
|
||||
let collector = MetricsCollector::new();
|
||||
|
||||
let mut histogram2 = SizeHistogram::new();
|
||||
histogram2.add(1024);
|
||||
histogram2.add(5 * 1024 * 1024);
|
||||
collector.increment_objects_scanned(10);
|
||||
collector.reset();
|
||||
|
||||
histogram1.merge(&histogram2);
|
||||
|
||||
let map = histogram1.to_map();
|
||||
assert_eq!(map.get("1_KiB_TO_1_MiB"), Some(&2)); // 1 from histogram1 + 1 from histogram2
|
||||
assert_eq!(map.get("1_MiB_TO_10_MiB"), Some(&2)); // 1 from histogram1 + 1 from histogram2
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_histogram_reset() {
|
||||
let mut histogram = SizeHistogram::new();
|
||||
histogram.add(1024);
|
||||
histogram.add(1024 * 1024);
|
||||
|
||||
assert_eq!(histogram.total_count(), 2);
|
||||
|
||||
histogram.reset();
|
||||
assert_eq!(histogram.total_count(), 0);
|
||||
let metrics = collector.get_metrics();
|
||||
assert_eq!(metrics.objects_scanned, 0);
|
||||
}
|
||||
}
|
||||
|
||||
234
crates/ahm/src/scanner/lifecycle.rs
Normal file
@@ -0,0 +1,234 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicU64, Ordering};
|
||||
use time::OffsetDateTime;
|
||||
|
||||
use crate::error::Result;
|
||||
use rustfs_common::data_usage::SizeSummary;
|
||||
use rustfs_common::metrics::IlmAction;
|
||||
use rustfs_ecstore::bucket::lifecycle::{
|
||||
bucket_lifecycle_audit::LcEventSrc,
|
||||
bucket_lifecycle_ops::{GLOBAL_ExpiryState, apply_lifecycle_action, eval_action_from_lifecycle},
|
||||
lifecycle,
|
||||
lifecycle::Lifecycle,
|
||||
};
|
||||
use rustfs_ecstore::bucket::metadata_sys::get_object_lock_config;
|
||||
use rustfs_ecstore::bucket::object_lock::objectlock_sys::{BucketObjectLockSys, enforce_retention_for_deletion};
|
||||
use rustfs_ecstore::bucket::versioning::VersioningApi;
|
||||
use rustfs_ecstore::bucket::versioning_sys::BucketVersioningSys;
|
||||
use rustfs_ecstore::cmd::bucket_targets::VersioningConfig;
|
||||
use rustfs_ecstore::store_api::{ObjectInfo, ObjectToDelete};
|
||||
use rustfs_filemeta::FileInfo;
|
||||
use s3s::dto::BucketLifecycleConfiguration as LifecycleConfig;
|
||||
use tracing::info;
|
||||
|
||||
static SCANNER_EXCESS_OBJECT_VERSIONS: AtomicU64 = AtomicU64::new(100);
|
||||
static SCANNER_EXCESS_OBJECT_VERSIONS_TOTAL_SIZE: AtomicU64 = AtomicU64::new(1024 * 1024 * 1024 * 1024); // 1 TB
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct ScannerItem {
|
||||
pub bucket: String,
|
||||
pub object_name: String,
|
||||
pub lifecycle: Option<Arc<LifecycleConfig>>,
|
||||
pub versioning: Option<Arc<VersioningConfig>>,
|
||||
}
|
||||
|
||||
impl ScannerItem {
|
||||
pub fn new(bucket: String, lifecycle: Option<Arc<LifecycleConfig>>, versioning: Option<Arc<VersioningConfig>>) -> Self {
|
||||
Self {
|
||||
bucket,
|
||||
object_name: "".to_string(),
|
||||
lifecycle,
|
||||
versioning,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn apply_versions_actions(&self, fivs: &[FileInfo]) -> Result<Vec<ObjectInfo>> {
|
||||
let obj_infos = self.apply_newer_noncurrent_version_limit(fivs).await?;
|
||||
if obj_infos.len() >= SCANNER_EXCESS_OBJECT_VERSIONS.load(Ordering::SeqCst) as usize {
|
||||
// todo
|
||||
}
|
||||
|
||||
let mut cumulative_size = 0;
|
||||
for obj_info in obj_infos.iter() {
|
||||
cumulative_size += obj_info.size;
|
||||
}
|
||||
|
||||
if cumulative_size >= SCANNER_EXCESS_OBJECT_VERSIONS_TOTAL_SIZE.load(Ordering::SeqCst) as i64 {
|
||||
//todo
|
||||
}
|
||||
|
||||
Ok(obj_infos)
|
||||
}
|
||||
|
||||
pub async fn apply_newer_noncurrent_version_limit(&self, fivs: &[FileInfo]) -> Result<Vec<ObjectInfo>> {
|
||||
let lock_enabled = if let Some(rcfg) = BucketObjectLockSys::get(&self.bucket).await {
|
||||
rcfg.mode.is_some()
|
||||
} else {
|
||||
false
|
||||
};
|
||||
let _vcfg = BucketVersioningSys::get(&self.bucket).await?;
|
||||
|
||||
let versioned = match BucketVersioningSys::get(&self.bucket).await {
|
||||
Ok(vcfg) => vcfg.versioned(&self.object_name),
|
||||
Err(_) => false,
|
||||
};
|
||||
let mut object_infos = Vec::with_capacity(fivs.len());
|
||||
|
||||
if self.lifecycle.is_none() {
|
||||
for info in fivs.iter() {
|
||||
object_infos.push(ObjectInfo::from_file_info(info, &self.bucket, &self.object_name, versioned));
|
||||
}
|
||||
return Ok(object_infos);
|
||||
}
|
||||
|
||||
let event = self
|
||||
.lifecycle
|
||||
.as_ref()
|
||||
.expect("lifecycle err.")
|
||||
.clone()
|
||||
.noncurrent_versions_expiration_limit(&lifecycle::ObjectOpts {
|
||||
name: self.object_name.clone(),
|
||||
..Default::default()
|
||||
})
|
||||
.await;
|
||||
let lim = event.newer_noncurrent_versions;
|
||||
if lim == 0 || fivs.len() <= lim + 1 {
|
||||
for fi in fivs.iter() {
|
||||
object_infos.push(ObjectInfo::from_file_info(fi, &self.bucket, &self.object_name, versioned));
|
||||
}
|
||||
return Ok(object_infos);
|
||||
}
|
||||
|
||||
let overflow_versions = &fivs[lim + 1..];
|
||||
for fi in fivs[..lim + 1].iter() {
|
||||
object_infos.push(ObjectInfo::from_file_info(fi, &self.bucket, &self.object_name, versioned));
|
||||
}
|
||||
|
||||
let mut to_del = Vec::<ObjectToDelete>::with_capacity(overflow_versions.len());
|
||||
for fi in overflow_versions.iter() {
|
||||
let obj = ObjectInfo::from_file_info(fi, &self.bucket, &self.object_name, versioned);
|
||||
if lock_enabled && enforce_retention_for_deletion(&obj) {
|
||||
//if enforce_retention_for_deletion(&obj) {
|
||||
/*if self.debug {
|
||||
if obj.version_id.is_some() {
|
||||
info!("lifecycle: {} v({}) is locked, not deleting\n", obj.name, obj.version_id.expect("err"));
|
||||
} else {
|
||||
info!("lifecycle: {} is locked, not deleting\n", obj.name);
|
||||
}
|
||||
}*/
|
||||
object_infos.push(obj);
|
||||
continue;
|
||||
}
|
||||
|
||||
if OffsetDateTime::now_utc().unix_timestamp()
|
||||
< lifecycle::expected_expiry_time(obj.successor_mod_time.expect("err"), event.noncurrent_days as i32)
|
||||
.unix_timestamp()
|
||||
{
|
||||
object_infos.push(obj);
|
||||
continue;
|
||||
}
|
||||
|
||||
to_del.push(ObjectToDelete {
|
||||
object_name: obj.name,
|
||||
version_id: obj.version_id,
|
||||
});
|
||||
}
|
||||
|
||||
if !to_del.is_empty() {
|
||||
let mut expiry_state = GLOBAL_ExpiryState.write().await;
|
||||
expiry_state.enqueue_by_newer_noncurrent(&self.bucket, to_del, event).await;
|
||||
}
|
||||
|
||||
Ok(object_infos)
|
||||
}
|
||||
|
||||
pub async fn apply_actions(&mut self, oi: &ObjectInfo, _size_s: &mut SizeSummary) -> (bool, i64) {
|
||||
let (action, _size) = self.apply_lifecycle(oi).await;
|
||||
|
||||
info!(
|
||||
"apply_actions {} {} {:?} {:?}",
|
||||
oi.bucket.clone(),
|
||||
oi.name.clone(),
|
||||
oi.version_id.clone(),
|
||||
oi.user_defined.clone()
|
||||
);
|
||||
|
||||
// Create a mutable clone if you need to modify fields
|
||||
/*let mut oi = oi.clone();
|
||||
oi.replication_status = ReplicationStatusType::from(
|
||||
oi.user_defined
|
||||
.get("x-amz-bucket-replication-status")
|
||||
.unwrap_or(&"PENDING".to_string()),
|
||||
);
|
||||
info!("apply status is: {:?}", oi.replication_status);
|
||||
self.heal_replication(&oi, _size_s).await;*/
|
||||
|
||||
if action.delete_all() {
|
||||
return (true, 0);
|
||||
}
|
||||
|
||||
(false, oi.size)
|
||||
}
|
||||
|
||||
async fn apply_lifecycle(&mut self, oi: &ObjectInfo) -> (IlmAction, i64) {
|
||||
let size = oi.size;
|
||||
if self.lifecycle.is_none() {
|
||||
return (IlmAction::NoneAction, size);
|
||||
}
|
||||
|
||||
let (olcfg, rcfg) = if self.bucket != ".minio.sys" {
|
||||
(
|
||||
get_object_lock_config(&self.bucket).await.ok(),
|
||||
None, // FIXME: replication config
|
||||
)
|
||||
} else {
|
||||
(None, None)
|
||||
};
|
||||
|
||||
let lc_evt = eval_action_from_lifecycle(
|
||||
self.lifecycle.as_ref().unwrap(),
|
||||
olcfg
|
||||
.as_ref()
|
||||
.and_then(|(c, _)| c.rule.as_ref().and_then(|r| r.default_retention.clone())),
|
||||
rcfg.clone(),
|
||||
oi,
|
||||
)
|
||||
.await;
|
||||
|
||||
info!("lifecycle: {} Initial scan: {}", oi.name, lc_evt.action);
|
||||
|
||||
let mut new_size = size;
|
||||
match lc_evt.action {
|
||||
IlmAction::DeleteVersionAction | IlmAction::DeleteAllVersionsAction | IlmAction::DelMarkerDeleteAllVersionsAction => {
|
||||
new_size = 0;
|
||||
}
|
||||
IlmAction::DeleteAction => {
|
||||
if let Some(vcfg) = &self.versioning {
|
||||
if !vcfg.is_enabled() {
|
||||
new_size = 0;
|
||||
}
|
||||
} else {
|
||||
new_size = 0;
|
||||
}
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
|
||||
apply_lifecycle_action(&lc_evt, &LcEventSrc::Scanner, oi).await;
|
||||
(lc_evt.action, new_size)
|
||||
}
|
||||
}
|
||||
@@ -42,6 +42,10 @@ pub struct ScannerMetrics {
|
||||
pub heal_tasks_completed: u64,
|
||||
/// Total heal tasks failed
|
||||
pub heal_tasks_failed: u64,
|
||||
/// Total healthy objects found
|
||||
pub healthy_objects: u64,
|
||||
/// Total corrupted objects found
|
||||
pub corrupted_objects: u64,
|
||||
/// Last scan activity time
|
||||
pub last_activity: Option<SystemTime>,
|
||||
/// Current scan cycle
|
||||
@@ -122,6 +126,8 @@ pub struct MetricsCollector {
|
||||
heal_tasks_failed: AtomicU64,
|
||||
current_cycle: AtomicU64,
|
||||
total_cycles: AtomicU64,
|
||||
healthy_objects: AtomicU64,
|
||||
corrupted_objects: AtomicU64,
|
||||
}
|
||||
|
||||
impl MetricsCollector {
|
||||
@@ -139,6 +145,8 @@ impl MetricsCollector {
|
||||
heal_tasks_failed: AtomicU64::new(0),
|
||||
current_cycle: AtomicU64::new(0),
|
||||
total_cycles: AtomicU64::new(0),
|
||||
healthy_objects: AtomicU64::new(0),
|
||||
corrupted_objects: AtomicU64::new(0),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -197,6 +205,16 @@ impl MetricsCollector {
|
||||
self.total_cycles.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment healthy objects count
|
||||
pub fn increment_healthy_objects(&self) {
|
||||
self.healthy_objects.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment corrupted objects count
|
||||
pub fn increment_corrupted_objects(&self) {
|
||||
self.corrupted_objects.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Get current metrics snapshot
|
||||
pub fn get_metrics(&self) -> ScannerMetrics {
|
||||
ScannerMetrics {
|
||||
@@ -209,6 +227,8 @@ impl MetricsCollector {
|
||||
heal_tasks_queued: self.heal_tasks_queued.load(Ordering::Relaxed),
|
||||
heal_tasks_completed: self.heal_tasks_completed.load(Ordering::Relaxed),
|
||||
heal_tasks_failed: self.heal_tasks_failed.load(Ordering::Relaxed),
|
||||
healthy_objects: self.healthy_objects.load(Ordering::Relaxed),
|
||||
corrupted_objects: self.corrupted_objects.load(Ordering::Relaxed),
|
||||
last_activity: Some(SystemTime::now()),
|
||||
current_cycle: self.current_cycle.load(Ordering::Relaxed),
|
||||
total_cycles: self.total_cycles.load(Ordering::Relaxed),
|
||||
@@ -234,6 +254,8 @@ impl MetricsCollector {
|
||||
self.heal_tasks_failed.store(0, Ordering::Relaxed);
|
||||
self.current_cycle.store(0, Ordering::Relaxed);
|
||||
self.total_cycles.store(0, Ordering::Relaxed);
|
||||
self.healthy_objects.store(0, Ordering::Relaxed);
|
||||
self.corrupted_objects.store(0, Ordering::Relaxed);
|
||||
|
||||
info!("Scanner metrics reset");
|
||||
}
|
||||
|
||||
@@ -13,13 +13,9 @@
|
||||
// limitations under the License.
|
||||
|
||||
pub mod data_scanner;
|
||||
pub mod data_usage;
|
||||
pub mod histogram;
|
||||
pub mod lifecycle;
|
||||
pub mod metrics;
|
||||
|
||||
// Re-export main types for convenience
|
||||
pub use data_scanner::Scanner;
|
||||
pub use data_usage::{
|
||||
BucketTargetUsageInfo, BucketUsageInfo, DataUsageInfo, load_data_usage_from_backend, store_data_usage_in_backend,
|
||||
};
|
||||
pub use metrics::ScannerMetrics;
|
||||
|
||||
424
crates/ahm/tests/heal_integration_test.rs
Normal file
@@ -0,0 +1,424 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use rustfs_ahm::heal::{
|
||||
manager::{HealConfig, HealManager},
|
||||
storage::{ECStoreHealStorage, HealStorageAPI},
|
||||
task::{HealOptions, HealPriority, HealRequest, HealTaskStatus, HealType},
|
||||
};
|
||||
use rustfs_common::heal_channel::{HealOpts, HealScanMode};
|
||||
use rustfs_ecstore::{
|
||||
disk::endpoint::Endpoint,
|
||||
endpoints::{EndpointServerPools, Endpoints, PoolEndpoints},
|
||||
store::ECStore,
|
||||
store_api::{ObjectIO, ObjectOptions, PutObjReader, StorageAPI},
|
||||
};
|
||||
use serial_test::serial;
|
||||
use std::sync::Once;
|
||||
use std::sync::OnceLock;
|
||||
use std::{path::PathBuf, sync::Arc, time::Duration};
|
||||
use tokio::fs;
|
||||
use tracing::info;
|
||||
use walkdir::WalkDir;
|
||||
|
||||
static GLOBAL_ENV: OnceLock<(Vec<PathBuf>, Arc<ECStore>, Arc<ECStoreHealStorage>)> = OnceLock::new();
|
||||
static INIT: Once = Once::new();
|
||||
|
||||
fn init_tracing() {
|
||||
INIT.call_once(|| {
|
||||
let _ = tracing_subscriber::fmt::try_init();
|
||||
});
|
||||
}
|
||||
|
||||
/// Test helper: Create test environment with ECStore
|
||||
async fn setup_test_env() -> (Vec<PathBuf>, Arc<ECStore>, Arc<ECStoreHealStorage>) {
|
||||
init_tracing();
|
||||
|
||||
// Fast path: already initialized, just clone and return
|
||||
if let Some((paths, ecstore, heal_storage)) = GLOBAL_ENV.get() {
|
||||
return (paths.clone(), ecstore.clone(), heal_storage.clone());
|
||||
}
|
||||
|
||||
// create temp dir as 4 disks with unique base dir
|
||||
let test_base_dir = format!("/tmp/rustfs_ahm_heal_test_{}", uuid::Uuid::new_v4());
|
||||
let temp_dir = std::path::PathBuf::from(&test_base_dir);
|
||||
if temp_dir.exists() {
|
||||
fs::remove_dir_all(&temp_dir).await.ok();
|
||||
}
|
||||
fs::create_dir_all(&temp_dir).await.unwrap();
|
||||
|
||||
// create 4 disk dirs
|
||||
let disk_paths = vec![
|
||||
temp_dir.join("disk1"),
|
||||
temp_dir.join("disk2"),
|
||||
temp_dir.join("disk3"),
|
||||
temp_dir.join("disk4"),
|
||||
];
|
||||
|
||||
for disk_path in &disk_paths {
|
||||
fs::create_dir_all(disk_path).await.unwrap();
|
||||
}
|
||||
|
||||
// create EndpointServerPools
|
||||
let mut endpoints = Vec::new();
|
||||
for (i, disk_path) in disk_paths.iter().enumerate() {
|
||||
let mut endpoint = Endpoint::try_from(disk_path.to_str().unwrap()).unwrap();
|
||||
// set correct index
|
||||
endpoint.set_pool_index(0);
|
||||
endpoint.set_set_index(0);
|
||||
endpoint.set_disk_index(i);
|
||||
endpoints.push(endpoint);
|
||||
}
|
||||
|
||||
let pool_endpoints = PoolEndpoints {
|
||||
legacy: false,
|
||||
set_count: 1,
|
||||
drives_per_set: 4,
|
||||
endpoints: Endpoints::from(endpoints),
|
||||
cmd_line: "test".to_string(),
|
||||
platform: format!("OS: {} | Arch: {}", std::env::consts::OS, std::env::consts::ARCH),
|
||||
};
|
||||
|
||||
let endpoint_pools = EndpointServerPools(vec![pool_endpoints]);
|
||||
|
||||
// format disks (only first time)
|
||||
rustfs_ecstore::store::init_local_disks(endpoint_pools.clone()).await.unwrap();
|
||||
|
||||
// create ECStore with dynamic port 0 (let OS assign) or fixed 9001 if free
|
||||
let port = 9001; // for simplicity
|
||||
let server_addr: std::net::SocketAddr = format!("127.0.0.1:{port}").parse().unwrap();
|
||||
let ecstore = ECStore::new(server_addr, endpoint_pools).await.unwrap();
|
||||
|
||||
// init bucket metadata system
|
||||
let buckets_list = ecstore
|
||||
.list_bucket(&rustfs_ecstore::store_api::BucketOptions {
|
||||
no_metadata: true,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
let buckets = buckets_list.into_iter().map(|v| v.name).collect();
|
||||
rustfs_ecstore::bucket::metadata_sys::init_bucket_metadata_sys(ecstore.clone(), buckets).await;
|
||||
|
||||
// Create heal storage layer
|
||||
let heal_storage = Arc::new(ECStoreHealStorage::new(ecstore.clone()));
|
||||
|
||||
// Store in global once lock
|
||||
let _ = GLOBAL_ENV.set((disk_paths.clone(), ecstore.clone(), heal_storage.clone()));
|
||||
|
||||
(disk_paths, ecstore, heal_storage)
|
||||
}
|
||||
|
||||
/// Test helper: Create a test bucket
|
||||
async fn create_test_bucket(ecstore: &Arc<ECStore>, bucket_name: &str) {
|
||||
(**ecstore)
|
||||
.make_bucket(bucket_name, &Default::default())
|
||||
.await
|
||||
.expect("Failed to create test bucket");
|
||||
info!("Created test bucket: {}", bucket_name);
|
||||
}
|
||||
|
||||
/// Test helper: Upload test object
|
||||
async fn upload_test_object(ecstore: &Arc<ECStore>, bucket: &str, object: &str, data: &[u8]) {
|
||||
let mut reader = PutObjReader::from_vec(data.to_vec());
|
||||
let object_info = (**ecstore)
|
||||
.put_object(bucket, object, &mut reader, &ObjectOptions::default())
|
||||
.await
|
||||
.expect("Failed to upload test object");
|
||||
|
||||
info!("Uploaded test object: {}/{} ({} bytes)", bucket, object, object_info.size);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
#[serial]
|
||||
async fn test_heal_object_basic() {
|
||||
let (disk_paths, ecstore, heal_storage) = setup_test_env().await;
|
||||
|
||||
// Create test bucket and object
|
||||
let bucket_name = "test-bucket";
|
||||
let object_name = "test-object.txt";
|
||||
let test_data = b"Hello, this is test data for healing!";
|
||||
|
||||
create_test_bucket(&ecstore, bucket_name).await;
|
||||
upload_test_object(&ecstore, bucket_name, object_name, test_data).await;
|
||||
|
||||
// ─── 1️⃣ delete single data shard file ─────────────────────────────────────
|
||||
let obj_dir = disk_paths[0].join(bucket_name).join(object_name);
|
||||
// find part file at depth 2, e.g. .../<uuid>/part.1
|
||||
let target_part = WalkDir::new(&obj_dir)
|
||||
.min_depth(2)
|
||||
.max_depth(2)
|
||||
.into_iter()
|
||||
.filter_map(Result::ok)
|
||||
.find(|e| e.file_type().is_file() && e.file_name().to_str().map(|n| n.starts_with("part.")).unwrap_or(false))
|
||||
.map(|e| e.into_path())
|
||||
.expect("Failed to locate part file to delete");
|
||||
|
||||
std::fs::remove_file(&target_part).expect("failed to delete part file");
|
||||
assert!(!target_part.exists());
|
||||
println!("✅ Deleted shard part file: {target_part:?}");
|
||||
|
||||
// Create heal manager with faster interval
|
||||
let cfg = HealConfig {
|
||||
heal_interval: Duration::from_millis(1),
|
||||
..Default::default()
|
||||
};
|
||||
let heal_manager = HealManager::new(heal_storage.clone(), Some(cfg));
|
||||
heal_manager.start().await.unwrap();
|
||||
|
||||
// Submit heal request for the object
|
||||
let heal_request = HealRequest::new(
|
||||
HealType::Object {
|
||||
bucket: bucket_name.to_string(),
|
||||
object: object_name.to_string(),
|
||||
version_id: None,
|
||||
},
|
||||
HealOptions {
|
||||
dry_run: false,
|
||||
recursive: false,
|
||||
remove_corrupted: false,
|
||||
recreate_missing: true,
|
||||
scan_mode: HealScanMode::Normal,
|
||||
update_parity: true,
|
||||
timeout: Some(Duration::from_secs(300)),
|
||||
pool_index: None,
|
||||
set_index: None,
|
||||
},
|
||||
HealPriority::Normal,
|
||||
);
|
||||
|
||||
let task_id = heal_manager
|
||||
.submit_heal_request(heal_request)
|
||||
.await
|
||||
.expect("Failed to submit heal request");
|
||||
|
||||
info!("Submitted heal request with task ID: {}", task_id);
|
||||
|
||||
// Wait for task completion
|
||||
tokio::time::sleep(tokio::time::Duration::from_secs(8)).await;
|
||||
|
||||
// Attempt to fetch task status (might be removed if finished)
|
||||
match heal_manager.get_task_status(&task_id).await {
|
||||
Ok(status) => info!("Task status: {:?}", status),
|
||||
Err(e) => info!("Task status not found (likely completed): {}", e),
|
||||
}
|
||||
|
||||
// ─── 2️⃣ verify each part file is restored ───────
|
||||
assert!(target_part.exists());
|
||||
|
||||
info!("Heal object basic test passed");
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
#[serial]
|
||||
async fn test_heal_bucket_basic() {
|
||||
let (disk_paths, ecstore, heal_storage) = setup_test_env().await;
|
||||
|
||||
// Create test bucket
|
||||
let bucket_name = "test-bucket-heal";
|
||||
create_test_bucket(&ecstore, bucket_name).await;
|
||||
|
||||
// ─── 1️⃣ delete bucket dir on disk ──────────────
|
||||
let broken_bucket_path = disk_paths[0].join(bucket_name);
|
||||
assert!(broken_bucket_path.exists(), "bucket dir does not exist on disk");
|
||||
std::fs::remove_dir_all(&broken_bucket_path).expect("failed to delete bucket dir on disk");
|
||||
assert!(!broken_bucket_path.exists(), "bucket dir still exists after deletion");
|
||||
println!("✅ Deleted bucket directory on disk: {broken_bucket_path:?}");
|
||||
|
||||
// Create heal manager with faster interval
|
||||
let cfg = HealConfig {
|
||||
heal_interval: Duration::from_millis(1),
|
||||
..Default::default()
|
||||
};
|
||||
let heal_manager = HealManager::new(heal_storage.clone(), Some(cfg));
|
||||
heal_manager.start().await.unwrap();
|
||||
|
||||
// Submit heal request for the bucket
|
||||
let heal_request = HealRequest::new(
|
||||
HealType::Bucket {
|
||||
bucket: bucket_name.to_string(),
|
||||
},
|
||||
HealOptions {
|
||||
dry_run: false,
|
||||
recursive: true,
|
||||
remove_corrupted: false,
|
||||
recreate_missing: false,
|
||||
scan_mode: HealScanMode::Normal,
|
||||
update_parity: false,
|
||||
timeout: Some(Duration::from_secs(300)),
|
||||
pool_index: None,
|
||||
set_index: None,
|
||||
},
|
||||
HealPriority::Normal,
|
||||
);
|
||||
|
||||
let task_id = heal_manager
|
||||
.submit_heal_request(heal_request)
|
||||
.await
|
||||
.expect("Failed to submit bucket heal request");
|
||||
|
||||
info!("Submitted bucket heal request with task ID: {}", task_id);
|
||||
|
||||
// Wait for task completion
|
||||
tokio::time::sleep(tokio::time::Duration::from_secs(5)).await;
|
||||
|
||||
// Attempt to fetch task status (optional)
|
||||
if let Ok(status) = heal_manager.get_task_status(&task_id).await {
|
||||
if status == HealTaskStatus::Completed {
|
||||
info!("Bucket heal task status: {:?}", status);
|
||||
} else {
|
||||
panic!("Bucket heal task status: {status:?}");
|
||||
}
|
||||
}
|
||||
|
||||
// ─── 3️⃣ Verify bucket directory is restored on every disk ───────
|
||||
assert!(broken_bucket_path.exists(), "bucket dir does not exist on disk");
|
||||
|
||||
info!("Heal bucket basic test passed");
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
#[serial]
|
||||
async fn test_heal_format_basic() {
|
||||
let (disk_paths, _ecstore, heal_storage) = setup_test_env().await;
|
||||
|
||||
// ─── 1️⃣ delete format.json on one disk ──────────────
|
||||
let format_path = disk_paths[0].join(".rustfs.sys").join("format.json");
|
||||
assert!(format_path.exists(), "format.json does not exist on disk");
|
||||
std::fs::remove_file(&format_path).expect("failed to delete format.json on disk");
|
||||
assert!(!format_path.exists(), "format.json still exists after deletion");
|
||||
println!("✅ Deleted format.json on disk: {format_path:?}");
|
||||
|
||||
// Create heal manager with faster interval
|
||||
let cfg = HealConfig {
|
||||
heal_interval: Duration::from_secs(2),
|
||||
..Default::default()
|
||||
};
|
||||
let heal_manager = HealManager::new(heal_storage.clone(), Some(cfg));
|
||||
heal_manager.start().await.unwrap();
|
||||
|
||||
// Wait for task completion
|
||||
tokio::time::sleep(tokio::time::Duration::from_secs(5)).await;
|
||||
|
||||
// ─── 2️⃣ verify format.json is restored ───────
|
||||
assert!(format_path.exists(), "format.json does not exist on disk after heal");
|
||||
|
||||
info!("Heal format basic test passed");
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
#[serial]
|
||||
async fn test_heal_format_with_data() {
|
||||
let (disk_paths, ecstore, heal_storage) = setup_test_env().await;
|
||||
|
||||
// Create test bucket and object
|
||||
let bucket_name = "test-bucket";
|
||||
let object_name = "test-object.txt";
|
||||
let test_data = b"Hello, this is test data for healing!";
|
||||
|
||||
create_test_bucket(&ecstore, bucket_name).await;
|
||||
upload_test_object(&ecstore, bucket_name, object_name, test_data).await;
|
||||
|
||||
let obj_dir = disk_paths[0].join(bucket_name).join(object_name);
|
||||
let target_part = WalkDir::new(&obj_dir)
|
||||
.min_depth(2)
|
||||
.max_depth(2)
|
||||
.into_iter()
|
||||
.filter_map(Result::ok)
|
||||
.find(|e| e.file_type().is_file() && e.file_name().to_str().map(|n| n.starts_with("part.")).unwrap_or(false))
|
||||
.map(|e| e.into_path())
|
||||
.expect("Failed to locate part file to delete");
|
||||
|
||||
// ─── 1️⃣ delete format.json on one disk ──────────────
|
||||
let format_path = disk_paths[0].join(".rustfs.sys").join("format.json");
|
||||
std::fs::remove_dir_all(&disk_paths[0]).expect("failed to delete all contents under disk_paths[0]");
|
||||
std::fs::create_dir_all(&disk_paths[0]).expect("failed to recreate disk_paths[0] directory");
|
||||
println!("✅ Deleted format.json on disk: {:?}", disk_paths[0]);
|
||||
|
||||
// Create heal manager with faster interval
|
||||
let cfg = HealConfig {
|
||||
heal_interval: Duration::from_secs(2),
|
||||
..Default::default()
|
||||
};
|
||||
let heal_manager = HealManager::new(heal_storage.clone(), Some(cfg));
|
||||
heal_manager.start().await.unwrap();
|
||||
|
||||
// Wait for task completion
|
||||
tokio::time::sleep(tokio::time::Duration::from_secs(5)).await;
|
||||
|
||||
// ─── 2️⃣ verify format.json is restored ───────
|
||||
assert!(format_path.exists(), "format.json does not exist on disk after heal");
|
||||
// ─── 3 verify each part file is restored ───────
|
||||
assert!(target_part.exists());
|
||||
|
||||
info!("Heal format basic test passed");
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
#[serial]
|
||||
async fn test_heal_storage_api_direct() {
|
||||
let (_disk_paths, ecstore, heal_storage) = setup_test_env().await;
|
||||
|
||||
// Test direct heal storage API calls
|
||||
|
||||
// Test heal_format
|
||||
let format_result = heal_storage.heal_format(true).await; // dry run
|
||||
assert!(format_result.is_ok());
|
||||
info!("Direct heal_format test passed");
|
||||
|
||||
// Test heal_bucket
|
||||
let bucket_name = "test-bucket-direct";
|
||||
create_test_bucket(&ecstore, bucket_name).await;
|
||||
|
||||
let heal_opts = HealOpts {
|
||||
recursive: true,
|
||||
dry_run: true,
|
||||
remove: false,
|
||||
recreate: false,
|
||||
scan_mode: HealScanMode::Normal,
|
||||
update_parity: false,
|
||||
no_lock: false,
|
||||
pool: None,
|
||||
set: None,
|
||||
};
|
||||
|
||||
let bucket_result = heal_storage.heal_bucket(bucket_name, &heal_opts).await;
|
||||
assert!(bucket_result.is_ok());
|
||||
info!("Direct heal_bucket test passed");
|
||||
|
||||
// Test heal_object
|
||||
let object_name = "test-object-direct.txt";
|
||||
let test_data = b"Test data for direct heal API";
|
||||
upload_test_object(&ecstore, bucket_name, object_name, test_data).await;
|
||||
|
||||
let object_heal_opts = HealOpts {
|
||||
recursive: false,
|
||||
dry_run: true,
|
||||
remove: false,
|
||||
recreate: false,
|
||||
scan_mode: HealScanMode::Normal,
|
||||
update_parity: false,
|
||||
no_lock: false,
|
||||
pool: None,
|
||||
set: None,
|
||||
};
|
||||
|
||||
let object_result = heal_storage
|
||||
.heal_object(bucket_name, object_name, None, &object_heal_opts)
|
||||
.await;
|
||||
assert!(object_result.is_ok());
|
||||
info!("Direct heal_object test passed");
|
||||
|
||||
info!("Direct heal storage API test passed");
|
||||
}
|
||||
565
crates/ahm/tests/lifecycle_integration_test.rs
Normal file
@@ -0,0 +1,565 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use rustfs_ahm::scanner::{Scanner, data_scanner::ScannerConfig};
|
||||
use rustfs_ecstore::{
|
||||
bucket::metadata::BUCKET_LIFECYCLE_CONFIG,
|
||||
bucket::metadata_sys,
|
||||
disk::endpoint::Endpoint,
|
||||
endpoints::{EndpointServerPools, Endpoints, PoolEndpoints},
|
||||
store::ECStore,
|
||||
store_api::{ObjectIO, ObjectOptions, PutObjReader, StorageAPI},
|
||||
tier::tier::TierConfigMgr,
|
||||
tier::tier_config::{TierConfig, TierMinIO, TierType},
|
||||
};
|
||||
use serial_test::serial;
|
||||
use std::sync::Once;
|
||||
use std::sync::OnceLock;
|
||||
use std::{path::PathBuf, sync::Arc, time::Duration};
|
||||
use tokio::fs;
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::info;
|
||||
use tracing::warn;
|
||||
|
||||
static GLOBAL_ENV: OnceLock<(Vec<PathBuf>, Arc<ECStore>)> = OnceLock::new();
|
||||
static INIT: Once = Once::new();
|
||||
static GLOBAL_TIER_CONFIG_MGR: OnceLock<Arc<RwLock<TierConfigMgr>>> = OnceLock::new();
|
||||
|
||||
fn init_tracing() {
|
||||
INIT.call_once(|| {
|
||||
let _ = tracing_subscriber::fmt::try_init();
|
||||
});
|
||||
}
|
||||
|
||||
/// Test helper: Create test environment with ECStore
|
||||
async fn setup_test_env() -> (Vec<PathBuf>, Arc<ECStore>) {
|
||||
init_tracing();
|
||||
|
||||
// Fast path: already initialized, just clone and return
|
||||
if let Some((paths, ecstore)) = GLOBAL_ENV.get() {
|
||||
return (paths.clone(), ecstore.clone());
|
||||
}
|
||||
|
||||
// create temp dir as 4 disks with unique base dir
|
||||
let test_base_dir = format!("/tmp/rustfs_ahm_lifecycle_test_{}", uuid::Uuid::new_v4());
|
||||
let temp_dir = std::path::PathBuf::from(&test_base_dir);
|
||||
if temp_dir.exists() {
|
||||
fs::remove_dir_all(&temp_dir).await.ok();
|
||||
}
|
||||
fs::create_dir_all(&temp_dir).await.unwrap();
|
||||
|
||||
// create 4 disk dirs
|
||||
let disk_paths = vec![
|
||||
temp_dir.join("disk1"),
|
||||
temp_dir.join("disk2"),
|
||||
temp_dir.join("disk3"),
|
||||
temp_dir.join("disk4"),
|
||||
];
|
||||
|
||||
for disk_path in &disk_paths {
|
||||
fs::create_dir_all(disk_path).await.unwrap();
|
||||
}
|
||||
|
||||
// create EndpointServerPools
|
||||
let mut endpoints = Vec::new();
|
||||
for (i, disk_path) in disk_paths.iter().enumerate() {
|
||||
let mut endpoint = Endpoint::try_from(disk_path.to_str().unwrap()).unwrap();
|
||||
// set correct index
|
||||
endpoint.set_pool_index(0);
|
||||
endpoint.set_set_index(0);
|
||||
endpoint.set_disk_index(i);
|
||||
endpoints.push(endpoint);
|
||||
}
|
||||
|
||||
let pool_endpoints = PoolEndpoints {
|
||||
legacy: false,
|
||||
set_count: 1,
|
||||
drives_per_set: 4,
|
||||
endpoints: Endpoints::from(endpoints),
|
||||
cmd_line: "test".to_string(),
|
||||
platform: format!("OS: {} | Arch: {}", std::env::consts::OS, std::env::consts::ARCH),
|
||||
};
|
||||
|
||||
let endpoint_pools = EndpointServerPools(vec![pool_endpoints]);
|
||||
|
||||
// format disks (only first time)
|
||||
rustfs_ecstore::store::init_local_disks(endpoint_pools.clone()).await.unwrap();
|
||||
|
||||
// create ECStore with dynamic port 0 (let OS assign) or fixed 9002 if free
|
||||
let port = 9002; // for simplicity
|
||||
let server_addr: std::net::SocketAddr = format!("127.0.0.1:{port}").parse().unwrap();
|
||||
let ecstore = ECStore::new(server_addr, endpoint_pools).await.unwrap();
|
||||
|
||||
// init bucket metadata system
|
||||
let buckets_list = ecstore
|
||||
.list_bucket(&rustfs_ecstore::store_api::BucketOptions {
|
||||
no_metadata: true,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
let buckets = buckets_list.into_iter().map(|v| v.name).collect();
|
||||
rustfs_ecstore::bucket::metadata_sys::init_bucket_metadata_sys(ecstore.clone(), buckets).await;
|
||||
|
||||
// Initialize background expiry workers
|
||||
rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::init_background_expiry(ecstore.clone()).await;
|
||||
|
||||
// Store in global once lock
|
||||
let _ = GLOBAL_ENV.set((disk_paths.clone(), ecstore.clone()));
|
||||
|
||||
let _ = GLOBAL_TIER_CONFIG_MGR.set(TierConfigMgr::new());
|
||||
|
||||
(disk_paths, ecstore)
|
||||
}
|
||||
|
||||
/// Test helper: Create a test bucket
|
||||
async fn create_test_bucket(ecstore: &Arc<ECStore>, bucket_name: &str) {
|
||||
(**ecstore)
|
||||
.make_bucket(bucket_name, &Default::default())
|
||||
.await
|
||||
.expect("Failed to create test bucket");
|
||||
info!("Created test bucket: {}", bucket_name);
|
||||
}
|
||||
|
||||
/// Test helper: Upload test object
|
||||
async fn upload_test_object(ecstore: &Arc<ECStore>, bucket: &str, object: &str, data: &[u8]) {
|
||||
let mut reader = PutObjReader::from_vec(data.to_vec());
|
||||
let object_info = (**ecstore)
|
||||
.put_object(bucket, object, &mut reader, &ObjectOptions::default())
|
||||
.await
|
||||
.expect("Failed to upload test object");
|
||||
|
||||
info!("Uploaded test object: {}/{} ({} bytes)", bucket, object, object_info.size);
|
||||
}
|
||||
|
||||
/// Test helper: Set bucket lifecycle configuration
|
||||
async fn set_bucket_lifecycle(bucket_name: &str) -> Result<(), Box<dyn std::error::Error>> {
|
||||
// Create a simple lifecycle configuration XML with 0 days expiry for immediate testing
|
||||
let lifecycle_xml = r#"<?xml version="1.0" encoding="UTF-8"?>
|
||||
<LifecycleConfiguration>
|
||||
<Rule>
|
||||
<ID>test-rule</ID>
|
||||
<Status>Enabled</Status>
|
||||
<Filter>
|
||||
<Prefix>test/</Prefix>
|
||||
</Filter>
|
||||
<Expiration>
|
||||
<Days>0</Days>
|
||||
</Expiration>
|
||||
</Rule>
|
||||
</LifecycleConfiguration>"#;
|
||||
|
||||
metadata_sys::update(bucket_name, BUCKET_LIFECYCLE_CONFIG, lifecycle_xml.as_bytes().to_vec()).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Test helper: Set bucket lifecycle configuration
|
||||
async fn set_bucket_lifecycle_deletemarker(bucket_name: &str) -> Result<(), Box<dyn std::error::Error>> {
|
||||
// Create a simple lifecycle configuration XML with 0 days expiry for immediate testing
|
||||
let lifecycle_xml = r#"<?xml version="1.0" encoding="UTF-8"?>
|
||||
<LifecycleConfiguration>
|
||||
<Rule>
|
||||
<ID>test-rule</ID>
|
||||
<Status>Enabled</Status>
|
||||
<Filter>
|
||||
<Prefix>test/</Prefix>
|
||||
</Filter>
|
||||
<Expiration>
|
||||
<Days>0</Days>
|
||||
<ExpiredObjectDeleteMarker>true</ExpiredObjectDeleteMarker>
|
||||
</Expiration>
|
||||
</Rule>
|
||||
</LifecycleConfiguration>"#;
|
||||
|
||||
metadata_sys::update(bucket_name, BUCKET_LIFECYCLE_CONFIG, lifecycle_xml.as_bytes().to_vec()).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
async fn set_bucket_lifecycle_transition(bucket_name: &str) -> Result<(), Box<dyn std::error::Error>> {
|
||||
// Create a simple lifecycle configuration XML with 0 days expiry for immediate testing
|
||||
let lifecycle_xml = r#"<?xml version="1.0" encoding="UTF-8"?>
|
||||
<LifecycleConfiguration>
|
||||
<Rule>
|
||||
<ID>test-rule</ID>
|
||||
<Status>Enabled</Status>
|
||||
<Filter>
|
||||
<Prefix>test/</Prefix>
|
||||
</Filter>
|
||||
<Transition>
|
||||
<Days>0</Days>
|
||||
<StorageClass>COLDTIER</StorageClass>
|
||||
</Transition>
|
||||
</Rule>
|
||||
<Rule>
|
||||
<ID>test-rule2</ID>
|
||||
<Status>Desabled</Status>
|
||||
<Filter>
|
||||
<Prefix>test/</Prefix>
|
||||
</Filter>
|
||||
<NoncurrentVersionTransition>
|
||||
<NoncurrentDays>0</NoncurrentDays>
|
||||
<StorageClass>COLDTIER</StorageClass>
|
||||
</NoncurrentVersionTransition>
|
||||
</Rule>
|
||||
</LifecycleConfiguration>"#;
|
||||
|
||||
metadata_sys::update(bucket_name, BUCKET_LIFECYCLE_CONFIG, lifecycle_xml.as_bytes().to_vec()).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Test helper: Create a test tier
|
||||
#[allow(dead_code)]
|
||||
async fn create_test_tier() {
|
||||
let args = TierConfig {
|
||||
version: "v1".to_string(),
|
||||
tier_type: TierType::MinIO,
|
||||
name: "COLDTIER".to_string(),
|
||||
s3: None,
|
||||
rustfs: None,
|
||||
minio: Some(TierMinIO {
|
||||
access_key: "minioadmin".to_string(),
|
||||
secret_key: "minioadmin".to_string(),
|
||||
bucket: "mblock2".to_string(),
|
||||
endpoint: "http://127.0.0.1:9020".to_string(),
|
||||
prefix: "mypre3/".to_string(),
|
||||
region: "".to_string(),
|
||||
..Default::default()
|
||||
}),
|
||||
};
|
||||
let mut tier_config_mgr = GLOBAL_TIER_CONFIG_MGR.get().unwrap().write().await;
|
||||
if let Err(err) = tier_config_mgr.add(args, false).await {
|
||||
warn!("tier_config_mgr add failed, e: {:?}", err);
|
||||
panic!("tier add failed. {err}");
|
||||
}
|
||||
if let Err(e) = tier_config_mgr.save().await {
|
||||
warn!("tier_config_mgr save failed, e: {:?}", e);
|
||||
panic!("tier save failed");
|
||||
}
|
||||
info!("Created test tier: {}", "COLDTIER");
|
||||
}
|
||||
|
||||
/// Test helper: Check if object exists
|
||||
async fn object_exists(ecstore: &Arc<ECStore>, bucket: &str, object: &str) -> bool {
|
||||
((**ecstore).get_object_info(bucket, object, &ObjectOptions::default()).await).is_ok()
|
||||
}
|
||||
|
||||
/// Test helper: Check if object exists
|
||||
#[allow(dead_code)]
|
||||
async fn object_is_delete_marker(ecstore: &Arc<ECStore>, bucket: &str, object: &str) -> bool {
|
||||
if let Ok(oi) = (**ecstore).get_object_info(bucket, object, &ObjectOptions::default()).await {
|
||||
println!("oi: {:?}", oi);
|
||||
oi.delete_marker
|
||||
} else {
|
||||
panic!("object_is_delete_marker is error");
|
||||
}
|
||||
}
|
||||
|
||||
/// Test helper: Check if object exists
|
||||
#[allow(dead_code)]
|
||||
async fn object_is_transitioned(ecstore: &Arc<ECStore>, bucket: &str, object: &str) -> bool {
|
||||
if let Ok(oi) = (**ecstore).get_object_info(bucket, object, &ObjectOptions::default()).await {
|
||||
info!("oi: {:?}", oi);
|
||||
!oi.transitioned_object.status.is_empty()
|
||||
} else {
|
||||
panic!("object_is_transitioned is error");
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
#[serial]
|
||||
async fn test_lifecycle_expiry_basic() {
|
||||
let (_disk_paths, ecstore) = setup_test_env().await;
|
||||
|
||||
// Create test bucket and object
|
||||
let bucket_name = "test-lifecycle-bucket";
|
||||
let object_name = "test/object.txt"; // Match the lifecycle rule prefix "test/"
|
||||
let test_data = b"Hello, this is test data for lifecycle expiry!";
|
||||
|
||||
create_test_bucket(&ecstore, bucket_name).await;
|
||||
upload_test_object(&ecstore, bucket_name, object_name, test_data).await;
|
||||
|
||||
// Verify object exists initially
|
||||
assert!(object_exists(&ecstore, bucket_name, object_name).await);
|
||||
println!("✅ Object exists before lifecycle processing");
|
||||
|
||||
// Set lifecycle configuration with very short expiry (0 days = immediate expiry)
|
||||
set_bucket_lifecycle(bucket_name)
|
||||
.await
|
||||
.expect("Failed to set lifecycle configuration");
|
||||
println!("✅ Lifecycle configuration set for bucket: {bucket_name}");
|
||||
|
||||
// Verify lifecycle configuration was set
|
||||
match rustfs_ecstore::bucket::metadata_sys::get(bucket_name).await {
|
||||
Ok(bucket_meta) => {
|
||||
assert!(bucket_meta.lifecycle_config.is_some());
|
||||
println!("✅ Bucket metadata retrieved successfully");
|
||||
}
|
||||
Err(e) => {
|
||||
println!("❌ Error retrieving bucket metadata: {e:?}");
|
||||
}
|
||||
}
|
||||
|
||||
// Create scanner with very short intervals for testing
|
||||
let scanner_config = ScannerConfig {
|
||||
scan_interval: Duration::from_millis(100),
|
||||
deep_scan_interval: Duration::from_millis(500),
|
||||
max_concurrent_scans: 1,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let scanner = Scanner::new(Some(scanner_config), None);
|
||||
|
||||
// Start scanner
|
||||
scanner.start().await.expect("Failed to start scanner");
|
||||
println!("✅ Scanner started");
|
||||
|
||||
// Wait for scanner to process lifecycle rules
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
|
||||
// Manually trigger a scan cycle to ensure lifecycle processing
|
||||
scanner.scan_cycle().await.expect("Failed to trigger scan cycle");
|
||||
println!("✅ Manual scan cycle completed");
|
||||
|
||||
// Wait a bit more for background workers to process expiry tasks
|
||||
tokio::time::sleep(Duration::from_secs(5)).await;
|
||||
|
||||
// Check if object has been expired (delete_marker)
|
||||
//let check_result = object_is_delete_marker(&ecstore, bucket_name, object_name).await;
|
||||
let check_result = object_exists(&ecstore, bucket_name, object_name).await;
|
||||
println!("Object is_delete_marker after lifecycle processing: {check_result}");
|
||||
|
||||
if !check_result {
|
||||
println!("❌ Object was not deleted by lifecycle processing");
|
||||
// Let's try to get object info to see its details
|
||||
match ecstore
|
||||
.get_object_info(bucket_name, object_name, &rustfs_ecstore::store_api::ObjectOptions::default())
|
||||
.await
|
||||
{
|
||||
Ok(obj_info) => {
|
||||
println!(
|
||||
"Object info: name={}, size={}, mod_time={:?}",
|
||||
obj_info.name, obj_info.size, obj_info.mod_time
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
println!("Error getting object info: {e:?}");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
println!("✅ Object was successfully deleted by lifecycle processing");
|
||||
}
|
||||
|
||||
assert!(check_result);
|
||||
println!("✅ Object successfully expired");
|
||||
|
||||
// Stop scanner
|
||||
let _ = scanner.stop().await;
|
||||
println!("✅ Scanner stopped");
|
||||
|
||||
println!("Lifecycle expiry basic test completed");
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
#[serial]
|
||||
async fn test_lifecycle_expiry_deletemarker() {
|
||||
let (_disk_paths, ecstore) = setup_test_env().await;
|
||||
|
||||
// Create test bucket and object
|
||||
let bucket_name = "test-lifecycle-bucket";
|
||||
let object_name = "test/object.txt"; // Match the lifecycle rule prefix "test/"
|
||||
let test_data = b"Hello, this is test data for lifecycle expiry!";
|
||||
|
||||
create_test_bucket(&ecstore, bucket_name).await;
|
||||
upload_test_object(&ecstore, bucket_name, object_name, test_data).await;
|
||||
|
||||
// Verify object exists initially
|
||||
assert!(object_exists(&ecstore, bucket_name, object_name).await);
|
||||
println!("✅ Object exists before lifecycle processing");
|
||||
|
||||
// Set lifecycle configuration with very short expiry (0 days = immediate expiry)
|
||||
set_bucket_lifecycle_deletemarker(bucket_name)
|
||||
.await
|
||||
.expect("Failed to set lifecycle configuration");
|
||||
println!("✅ Lifecycle configuration set for bucket: {bucket_name}");
|
||||
|
||||
// Verify lifecycle configuration was set
|
||||
match rustfs_ecstore::bucket::metadata_sys::get(bucket_name).await {
|
||||
Ok(bucket_meta) => {
|
||||
assert!(bucket_meta.lifecycle_config.is_some());
|
||||
println!("✅ Bucket metadata retrieved successfully");
|
||||
}
|
||||
Err(e) => {
|
||||
println!("❌ Error retrieving bucket metadata: {e:?}");
|
||||
}
|
||||
}
|
||||
|
||||
// Create scanner with very short intervals for testing
|
||||
let scanner_config = ScannerConfig {
|
||||
scan_interval: Duration::from_millis(100),
|
||||
deep_scan_interval: Duration::from_millis(500),
|
||||
max_concurrent_scans: 1,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let scanner = Scanner::new(Some(scanner_config), None);
|
||||
|
||||
// Start scanner
|
||||
scanner.start().await.expect("Failed to start scanner");
|
||||
println!("✅ Scanner started");
|
||||
|
||||
// Wait for scanner to process lifecycle rules
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
|
||||
// Manually trigger a scan cycle to ensure lifecycle processing
|
||||
scanner.scan_cycle().await.expect("Failed to trigger scan cycle");
|
||||
println!("✅ Manual scan cycle completed");
|
||||
|
||||
// Wait a bit more for background workers to process expiry tasks
|
||||
tokio::time::sleep(Duration::from_secs(5)).await;
|
||||
|
||||
// Check if object has been expired (deleted)
|
||||
let check_result = object_exists(&ecstore, bucket_name, object_name).await;
|
||||
println!("Object exists after lifecycle processing: {check_result}");
|
||||
|
||||
if !check_result {
|
||||
println!("❌ Object was not deleted by lifecycle processing");
|
||||
// Let's try to get object info to see its details
|
||||
match ecstore
|
||||
.get_object_info(bucket_name, object_name, &rustfs_ecstore::store_api::ObjectOptions::default())
|
||||
.await
|
||||
{
|
||||
Ok(obj_info) => {
|
||||
println!(
|
||||
"Object info: name={}, size={}, mod_time={:?}",
|
||||
obj_info.name, obj_info.size, obj_info.mod_time
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
println!("Error getting object info: {e:?}");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
println!("✅ Object was successfully deleted by lifecycle processing");
|
||||
}
|
||||
|
||||
assert!(check_result);
|
||||
println!("✅ Object successfully expired");
|
||||
|
||||
// Stop scanner
|
||||
let _ = scanner.stop().await;
|
||||
println!("✅ Scanner stopped");
|
||||
|
||||
println!("Lifecycle expiry basic test completed");
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
#[serial]
|
||||
async fn test_lifecycle_transition_basic() {
|
||||
let (_disk_paths, ecstore) = setup_test_env().await;
|
||||
|
||||
//create_test_tier().await;
|
||||
|
||||
// Create test bucket and object
|
||||
let bucket_name = "test-lifecycle-bucket";
|
||||
let object_name = "test/object.txt"; // Match the lifecycle rule prefix "test/"
|
||||
let test_data = b"Hello, this is test data for lifecycle expiry!";
|
||||
|
||||
create_test_bucket(&ecstore, bucket_name).await;
|
||||
upload_test_object(&ecstore, bucket_name, object_name, test_data).await;
|
||||
|
||||
// Verify object exists initially
|
||||
assert!(object_exists(&ecstore, bucket_name, object_name).await);
|
||||
println!("✅ Object exists before lifecycle processing");
|
||||
|
||||
// Set lifecycle configuration with very short expiry (0 days = immediate expiry)
|
||||
/*set_bucket_lifecycle_transition(bucket_name)
|
||||
.await
|
||||
.expect("Failed to set lifecycle configuration");
|
||||
println!("✅ Lifecycle configuration set for bucket: {bucket_name}");
|
||||
|
||||
// Verify lifecycle configuration was set
|
||||
match rustfs_ecstore::bucket::metadata_sys::get(bucket_name).await {
|
||||
Ok(bucket_meta) => {
|
||||
assert!(bucket_meta.lifecycle_config.is_some());
|
||||
println!("✅ Bucket metadata retrieved successfully");
|
||||
}
|
||||
Err(e) => {
|
||||
println!("❌ Error retrieving bucket metadata: {e:?}");
|
||||
}
|
||||
}*/
|
||||
|
||||
// Create scanner with very short intervals for testing
|
||||
let scanner_config = ScannerConfig {
|
||||
scan_interval: Duration::from_millis(100),
|
||||
deep_scan_interval: Duration::from_millis(500),
|
||||
max_concurrent_scans: 1,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let scanner = Scanner::new(Some(scanner_config), None);
|
||||
|
||||
// Start scanner
|
||||
scanner.start().await.expect("Failed to start scanner");
|
||||
println!("✅ Scanner started");
|
||||
|
||||
// Wait for scanner to process lifecycle rules
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
|
||||
// Manually trigger a scan cycle to ensure lifecycle processing
|
||||
scanner.scan_cycle().await.expect("Failed to trigger scan cycle");
|
||||
println!("✅ Manual scan cycle completed");
|
||||
|
||||
// Wait a bit more for background workers to process expiry tasks
|
||||
tokio::time::sleep(Duration::from_secs(5)).await;
|
||||
|
||||
// Check if object has been expired (deleted)
|
||||
//let check_result = object_is_transitioned(&ecstore, bucket_name, object_name).await;
|
||||
let check_result = object_exists(&ecstore, bucket_name, object_name).await;
|
||||
println!("Object exists after lifecycle processing: {check_result}");
|
||||
|
||||
if check_result {
|
||||
println!("✅ Object was not deleted by lifecycle processing");
|
||||
// Let's try to get object info to see its details
|
||||
match ecstore
|
||||
.get_object_info(bucket_name, object_name, &rustfs_ecstore::store_api::ObjectOptions::default())
|
||||
.await
|
||||
{
|
||||
Ok(obj_info) => {
|
||||
println!(
|
||||
"Object info: name={}, size={}, mod_time={:?}",
|
||||
obj_info.name, obj_info.size, obj_info.mod_time
|
||||
);
|
||||
println!("Object info: transitioned_object={:?}", obj_info.transitioned_object);
|
||||
}
|
||||
Err(e) => {
|
||||
println!("Error getting object info: {e:?}");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
println!("❌ Object was deleted by lifecycle processing");
|
||||
}
|
||||
|
||||
assert!(check_result);
|
||||
println!("✅ Object successfully transitioned");
|
||||
|
||||
// Stop scanner
|
||||
let _ = scanner.stop().await;
|
||||
println!("✅ Scanner stopped");
|
||||
|
||||
println!("Lifecycle transition basic test completed");
|
||||
}
|
||||
44
crates/audit-logger/Cargo.toml
Normal file
@@ -0,0 +1,44 @@
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
[package]
|
||||
name = "rustfs-audit-logger"
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
version.workspace = true
|
||||
homepage.workspace = true
|
||||
description = "Audit logging system for RustFS, providing detailed logging of file operations and system events."
|
||||
documentation = "https://docs.rs/audit-logger/latest/audit_logger/"
|
||||
keywords = ["audit", "logging", "file-operations", "system-events", "RustFS"]
|
||||
categories = ["web-programming", "development-tools::profiling", "asynchronous", "api-bindings", "development-tools::debugging"]
|
||||
|
||||
[dependencies]
|
||||
rustfs-targets = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
reqwest = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
tracing = { workspace = true, features = ["std", "attributes"] }
|
||||
tracing-core = { workspace = true }
|
||||
tokio = { workspace = true, features = ["sync", "fs", "rt-multi-thread", "rt", "time", "macros"] }
|
||||
url = { workspace = true }
|
||||
uuid = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
figment = { version = "0.10", features = ["json", "env"] }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
34
crates/audit-logger/examples/config.json
Normal file
@@ -0,0 +1,34 @@
|
||||
{
|
||||
"console": {
|
||||
"enabled": true
|
||||
},
|
||||
"logger_webhook": {
|
||||
"default": {
|
||||
"enabled": true,
|
||||
"endpoint": "http://localhost:3000/logs",
|
||||
"auth_token": "secret-token-for-logs",
|
||||
"batch_size": 5,
|
||||
"queue_size": 1000,
|
||||
"max_retry": 3,
|
||||
"retry_interval": "2s"
|
||||
}
|
||||
},
|
||||
"audit_webhook": {
|
||||
"splunk": {
|
||||
"enabled": true,
|
||||
"endpoint": "http://localhost:3000/audit",
|
||||
"auth_token": "secret-token-for-audit",
|
||||
"batch_size": 10
|
||||
}
|
||||
},
|
||||
"audit_kafka": {
|
||||
"default": {
|
||||
"enabled": false,
|
||||
"brokers": [
|
||||
"kafka1:9092",
|
||||
"kafka2:9092"
|
||||
],
|
||||
"topic": "minio-audit-events"
|
||||
}
|
||||
}
|
||||
}
|
||||
17
crates/audit-logger/examples/main.rs
Normal file
@@ -0,0 +1,17 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
fn main() {
|
||||
println!("Audit Logger Example");
|
||||
}
|
||||
90
crates/audit-logger/src/entry/args.rs
Normal file
@@ -0,0 +1,90 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use crate::entry::ObjectVersion;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Args - defines the arguments for API operations
|
||||
/// Args is used to define the arguments for API operations.
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use rustfs_audit_logger::Args;
|
||||
/// use std::collections::HashMap;
|
||||
///
|
||||
/// let args = Args::new()
|
||||
/// .set_bucket(Some("my-bucket".to_string()))
|
||||
/// .set_object(Some("my-object".to_string()))
|
||||
/// .set_version_id(Some("123".to_string()))
|
||||
/// .set_metadata(Some(HashMap::new()));
|
||||
/// ```
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Default, Eq, PartialEq)]
|
||||
pub struct Args {
|
||||
#[serde(rename = "bucket", skip_serializing_if = "Option::is_none")]
|
||||
pub bucket: Option<String>,
|
||||
#[serde(rename = "object", skip_serializing_if = "Option::is_none")]
|
||||
pub object: Option<String>,
|
||||
#[serde(rename = "versionId", skip_serializing_if = "Option::is_none")]
|
||||
pub version_id: Option<String>,
|
||||
#[serde(rename = "objects", skip_serializing_if = "Option::is_none")]
|
||||
pub objects: Option<Vec<ObjectVersion>>,
|
||||
#[serde(rename = "metadata", skip_serializing_if = "Option::is_none")]
|
||||
pub metadata: Option<HashMap<String, String>>,
|
||||
}
|
||||
|
||||
impl Args {
|
||||
/// Create a new Args object
|
||||
pub fn new() -> Self {
|
||||
Args {
|
||||
bucket: None,
|
||||
object: None,
|
||||
version_id: None,
|
||||
objects: None,
|
||||
metadata: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the bucket
|
||||
pub fn set_bucket(mut self, bucket: Option<String>) -> Self {
|
||||
self.bucket = bucket;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the object
|
||||
pub fn set_object(mut self, object: Option<String>) -> Self {
|
||||
self.object = object;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the version ID
|
||||
pub fn set_version_id(mut self, version_id: Option<String>) -> Self {
|
||||
self.version_id = version_id;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the objects
|
||||
pub fn set_objects(mut self, objects: Option<Vec<ObjectVersion>>) -> Self {
|
||||
self.objects = objects;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the metadata
|
||||
pub fn set_metadata(mut self, metadata: Option<HashMap<String, String>>) -> Self {
|
||||
self.metadata = metadata;
|
||||
self
|
||||
}
|
||||
}
|
||||
469
crates/audit-logger/src/entry/audit.rs
Normal file
@@ -0,0 +1,469 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use crate::{BaseLogEntry, LogRecord, ObjectVersion};
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// API details structure
|
||||
/// ApiDetails is used to define the details of an API operation
|
||||
///
|
||||
/// The `ApiDetails` structure contains the following fields:
|
||||
/// - `name` - the name of the API operation
|
||||
/// - `bucket` - the bucket name
|
||||
/// - `object` - the object name
|
||||
/// - `objects` - the list of objects
|
||||
/// - `status` - the status of the API operation
|
||||
/// - `status_code` - the status code of the API operation
|
||||
/// - `input_bytes` - the input bytes
|
||||
/// - `output_bytes` - the output bytes
|
||||
/// - `header_bytes` - the header bytes
|
||||
/// - `time_to_first_byte` - the time to first byte
|
||||
/// - `time_to_first_byte_in_ns` - the time to first byte in nanoseconds
|
||||
/// - `time_to_response` - the time to response
|
||||
/// - `time_to_response_in_ns` - the time to response in nanoseconds
|
||||
///
|
||||
/// The `ApiDetails` structure contains the following methods:
|
||||
/// - `new` - create a new `ApiDetails` with default values
|
||||
/// - `set_name` - set the name
|
||||
/// - `set_bucket` - set the bucket
|
||||
/// - `set_object` - set the object
|
||||
/// - `set_objects` - set the objects
|
||||
/// - `set_status` - set the status
|
||||
/// - `set_status_code` - set the status code
|
||||
/// - `set_input_bytes` - set the input bytes
|
||||
/// - `set_output_bytes` - set the output bytes
|
||||
/// - `set_header_bytes` - set the header bytes
|
||||
/// - `set_time_to_first_byte` - set the time to first byte
|
||||
/// - `set_time_to_first_byte_in_ns` - set the time to first byte in nanoseconds
|
||||
/// - `set_time_to_response` - set the time to response
|
||||
/// - `set_time_to_response_in_ns` - set the time to response in nanoseconds
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use rustfs_audit_logger::ApiDetails;
|
||||
/// use rustfs_audit_logger::ObjectVersion;
|
||||
///
|
||||
/// let api = ApiDetails::new()
|
||||
/// .set_name(Some("GET".to_string()))
|
||||
/// .set_bucket(Some("my-bucket".to_string()))
|
||||
/// .set_object(Some("my-object".to_string()))
|
||||
/// .set_objects(vec![ObjectVersion::new_with_object_name("my-object".to_string())])
|
||||
/// .set_status(Some("OK".to_string()))
|
||||
/// .set_status_code(Some(200))
|
||||
/// .set_input_bytes(100)
|
||||
/// .set_output_bytes(200)
|
||||
/// .set_header_bytes(Some(50))
|
||||
/// .set_time_to_first_byte(Some("100ms".to_string()))
|
||||
/// .set_time_to_first_byte_in_ns(Some("100000000ns".to_string()))
|
||||
/// .set_time_to_response(Some("200ms".to_string()))
|
||||
/// .set_time_to_response_in_ns(Some("200000000ns".to_string()));
|
||||
/// ```
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, Default, PartialEq, Eq)]
|
||||
pub struct ApiDetails {
|
||||
#[serde(rename = "name", skip_serializing_if = "Option::is_none")]
|
||||
pub name: Option<String>,
|
||||
#[serde(rename = "bucket", skip_serializing_if = "Option::is_none")]
|
||||
pub bucket: Option<String>,
|
||||
#[serde(rename = "object", skip_serializing_if = "Option::is_none")]
|
||||
pub object: Option<String>,
|
||||
#[serde(rename = "objects", skip_serializing_if = "Vec::is_empty", default)]
|
||||
pub objects: Vec<ObjectVersion>,
|
||||
#[serde(rename = "status", skip_serializing_if = "Option::is_none")]
|
||||
pub status: Option<String>,
|
||||
#[serde(rename = "statusCode", skip_serializing_if = "Option::is_none")]
|
||||
pub status_code: Option<i32>,
|
||||
#[serde(rename = "rx")]
|
||||
pub input_bytes: i64,
|
||||
#[serde(rename = "tx")]
|
||||
pub output_bytes: i64,
|
||||
#[serde(rename = "txHeaders", skip_serializing_if = "Option::is_none")]
|
||||
pub header_bytes: Option<i64>,
|
||||
#[serde(rename = "timeToFirstByte", skip_serializing_if = "Option::is_none")]
|
||||
pub time_to_first_byte: Option<String>,
|
||||
#[serde(rename = "timeToFirstByteInNS", skip_serializing_if = "Option::is_none")]
|
||||
pub time_to_first_byte_in_ns: Option<String>,
|
||||
#[serde(rename = "timeToResponse", skip_serializing_if = "Option::is_none")]
|
||||
pub time_to_response: Option<String>,
|
||||
#[serde(rename = "timeToResponseInNS", skip_serializing_if = "Option::is_none")]
|
||||
pub time_to_response_in_ns: Option<String>,
|
||||
}
|
||||
|
||||
impl ApiDetails {
|
||||
/// Create a new `ApiDetails` with default values
|
||||
pub fn new() -> Self {
|
||||
ApiDetails {
|
||||
name: None,
|
||||
bucket: None,
|
||||
object: None,
|
||||
objects: Vec::new(),
|
||||
status: None,
|
||||
status_code: None,
|
||||
input_bytes: 0,
|
||||
output_bytes: 0,
|
||||
header_bytes: None,
|
||||
time_to_first_byte: None,
|
||||
time_to_first_byte_in_ns: None,
|
||||
time_to_response: None,
|
||||
time_to_response_in_ns: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the name
|
||||
pub fn set_name(mut self, name: Option<String>) -> Self {
|
||||
self.name = name;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the bucket
|
||||
pub fn set_bucket(mut self, bucket: Option<String>) -> Self {
|
||||
self.bucket = bucket;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the object
|
||||
pub fn set_object(mut self, object: Option<String>) -> Self {
|
||||
self.object = object;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the objects
|
||||
pub fn set_objects(mut self, objects: Vec<ObjectVersion>) -> Self {
|
||||
self.objects = objects;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the status
|
||||
pub fn set_status(mut self, status: Option<String>) -> Self {
|
||||
self.status = status;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the status code
|
||||
pub fn set_status_code(mut self, status_code: Option<i32>) -> Self {
|
||||
self.status_code = status_code;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the input bytes
|
||||
pub fn set_input_bytes(mut self, input_bytes: i64) -> Self {
|
||||
self.input_bytes = input_bytes;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the output bytes
|
||||
pub fn set_output_bytes(mut self, output_bytes: i64) -> Self {
|
||||
self.output_bytes = output_bytes;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the header bytes
|
||||
pub fn set_header_bytes(mut self, header_bytes: Option<i64>) -> Self {
|
||||
self.header_bytes = header_bytes;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the time to first byte
|
||||
pub fn set_time_to_first_byte(mut self, time_to_first_byte: Option<String>) -> Self {
|
||||
self.time_to_first_byte = time_to_first_byte;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the time to first byte in nanoseconds
|
||||
pub fn set_time_to_first_byte_in_ns(mut self, time_to_first_byte_in_ns: Option<String>) -> Self {
|
||||
self.time_to_first_byte_in_ns = time_to_first_byte_in_ns;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the time to response
|
||||
pub fn set_time_to_response(mut self, time_to_response: Option<String>) -> Self {
|
||||
self.time_to_response = time_to_response;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the time to response in nanoseconds
|
||||
pub fn set_time_to_response_in_ns(mut self, time_to_response_in_ns: Option<String>) -> Self {
|
||||
self.time_to_response_in_ns = time_to_response_in_ns;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
/// Entry - audit entry logs
|
||||
/// AuditLogEntry is used to define the structure of an audit log entry
|
||||
///
|
||||
/// The `AuditLogEntry` structure contains the following fields:
|
||||
/// - `base` - the base log entry
|
||||
/// - `version` - the version of the audit log entry
|
||||
/// - `deployment_id` - the deployment ID
|
||||
/// - `event` - the event
|
||||
/// - `entry_type` - the type of audit message
|
||||
/// - `api` - the API details
|
||||
/// - `remote_host` - the remote host
|
||||
/// - `user_agent` - the user agent
|
||||
/// - `req_path` - the request path
|
||||
/// - `req_host` - the request host
|
||||
/// - `req_claims` - the request claims
|
||||
/// - `req_query` - the request query
|
||||
/// - `req_header` - the request header
|
||||
/// - `resp_header` - the response header
|
||||
/// - `access_key` - the access key
|
||||
/// - `parent_user` - the parent user
|
||||
/// - `error` - the error
|
||||
///
|
||||
/// The `AuditLogEntry` structure contains the following methods:
|
||||
/// - `new` - create a new `AuditEntry` with default values
|
||||
/// - `new_with_values` - create a new `AuditEntry` with version, time, event and api details
|
||||
/// - `with_base` - set the base log entry
|
||||
/// - `set_version` - set the version
|
||||
/// - `set_deployment_id` - set the deployment ID
|
||||
/// - `set_event` - set the event
|
||||
/// - `set_entry_type` - set the entry type
|
||||
/// - `set_api` - set the API details
|
||||
/// - `set_remote_host` - set the remote host
|
||||
/// - `set_user_agent` - set the user agent
|
||||
/// - `set_req_path` - set the request path
|
||||
/// - `set_req_host` - set the request host
|
||||
/// - `set_req_claims` - set the request claims
|
||||
/// - `set_req_query` - set the request query
|
||||
/// - `set_req_header` - set the request header
|
||||
/// - `set_resp_header` - set the response header
|
||||
/// - `set_access_key` - set the access key
|
||||
/// - `set_parent_user` - set the parent user
|
||||
/// - `set_error` - set the error
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use rustfs_audit_logger::AuditLogEntry;
|
||||
/// use rustfs_audit_logger::ApiDetails;
|
||||
/// use std::collections::HashMap;
|
||||
///
|
||||
/// let entry = AuditLogEntry::new()
|
||||
/// .set_version("1.0".to_string())
|
||||
/// .set_deployment_id(Some("123".to_string()))
|
||||
/// .set_event("event".to_string())
|
||||
/// .set_entry_type(Some("type".to_string()))
|
||||
/// .set_api(ApiDetails::new())
|
||||
/// .set_remote_host(Some("remote-host".to_string()))
|
||||
/// .set_user_agent(Some("user-agent".to_string()))
|
||||
/// .set_req_path(Some("req-path".to_string()))
|
||||
/// .set_req_host(Some("req-host".to_string()))
|
||||
/// .set_req_claims(Some(HashMap::new()))
|
||||
/// .set_req_query(Some(HashMap::new()))
|
||||
/// .set_req_header(Some(HashMap::new()))
|
||||
/// .set_resp_header(Some(HashMap::new()))
|
||||
/// .set_access_key(Some("access-key".to_string()))
|
||||
/// .set_parent_user(Some("parent-user".to_string()))
|
||||
/// .set_error(Some("error".to_string()));
|
||||
#[derive(Debug, Serialize, Deserialize, Clone, Default)]
|
||||
pub struct AuditLogEntry {
|
||||
#[serde(flatten)]
|
||||
pub base: BaseLogEntry,
|
||||
pub version: String,
|
||||
#[serde(rename = "deploymentid", skip_serializing_if = "Option::is_none")]
|
||||
pub deployment_id: Option<String>,
|
||||
pub event: String,
|
||||
// Class of audit message - S3, admin ops, bucket management
|
||||
#[serde(rename = "type", skip_serializing_if = "Option::is_none")]
|
||||
pub entry_type: Option<String>,
|
||||
pub api: ApiDetails,
|
||||
#[serde(rename = "remotehost", skip_serializing_if = "Option::is_none")]
|
||||
pub remote_host: Option<String>,
|
||||
#[serde(rename = "userAgent", skip_serializing_if = "Option::is_none")]
|
||||
pub user_agent: Option<String>,
|
||||
#[serde(rename = "requestPath", skip_serializing_if = "Option::is_none")]
|
||||
pub req_path: Option<String>,
|
||||
#[serde(rename = "requestHost", skip_serializing_if = "Option::is_none")]
|
||||
pub req_host: Option<String>,
|
||||
#[serde(rename = "requestClaims", skip_serializing_if = "Option::is_none")]
|
||||
pub req_claims: Option<HashMap<String, Value>>,
|
||||
#[serde(rename = "requestQuery", skip_serializing_if = "Option::is_none")]
|
||||
pub req_query: Option<HashMap<String, String>>,
|
||||
#[serde(rename = "requestHeader", skip_serializing_if = "Option::is_none")]
|
||||
pub req_header: Option<HashMap<String, String>>,
|
||||
#[serde(rename = "responseHeader", skip_serializing_if = "Option::is_none")]
|
||||
pub resp_header: Option<HashMap<String, String>>,
|
||||
#[serde(rename = "accessKey", skip_serializing_if = "Option::is_none")]
|
||||
pub access_key: Option<String>,
|
||||
#[serde(rename = "parentUser", skip_serializing_if = "Option::is_none")]
|
||||
pub parent_user: Option<String>,
|
||||
#[serde(rename = "error", skip_serializing_if = "Option::is_none")]
|
||||
pub error: Option<String>,
|
||||
}
|
||||
|
||||
impl AuditLogEntry {
|
||||
/// Create a new `AuditEntry` with default values
|
||||
pub fn new() -> Self {
|
||||
AuditLogEntry {
|
||||
base: BaseLogEntry::new(),
|
||||
version: String::new(),
|
||||
deployment_id: None,
|
||||
event: String::new(),
|
||||
entry_type: None,
|
||||
api: ApiDetails::new(),
|
||||
remote_host: None,
|
||||
user_agent: None,
|
||||
req_path: None,
|
||||
req_host: None,
|
||||
req_claims: None,
|
||||
req_query: None,
|
||||
req_header: None,
|
||||
resp_header: None,
|
||||
access_key: None,
|
||||
parent_user: None,
|
||||
error: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new `AuditEntry` with version, time, event and api details
|
||||
pub fn new_with_values(version: String, time: DateTime<Utc>, event: String, api: ApiDetails) -> Self {
|
||||
let mut base = BaseLogEntry::new();
|
||||
base.timestamp = time;
|
||||
|
||||
AuditLogEntry {
|
||||
base,
|
||||
version,
|
||||
deployment_id: None,
|
||||
event,
|
||||
entry_type: None,
|
||||
api,
|
||||
remote_host: None,
|
||||
user_agent: None,
|
||||
req_path: None,
|
||||
req_host: None,
|
||||
req_claims: None,
|
||||
req_query: None,
|
||||
req_header: None,
|
||||
resp_header: None,
|
||||
access_key: None,
|
||||
parent_user: None,
|
||||
error: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the base log entry
|
||||
pub fn with_base(mut self, base: BaseLogEntry) -> Self {
|
||||
self.base = base;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the version
|
||||
pub fn set_version(mut self, version: String) -> Self {
|
||||
self.version = version;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the deployment ID
|
||||
pub fn set_deployment_id(mut self, deployment_id: Option<String>) -> Self {
|
||||
self.deployment_id = deployment_id;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the event
|
||||
pub fn set_event(mut self, event: String) -> Self {
|
||||
self.event = event;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the entry type
|
||||
pub fn set_entry_type(mut self, entry_type: Option<String>) -> Self {
|
||||
self.entry_type = entry_type;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the API details
|
||||
pub fn set_api(mut self, api: ApiDetails) -> Self {
|
||||
self.api = api;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the remote host
|
||||
pub fn set_remote_host(mut self, remote_host: Option<String>) -> Self {
|
||||
self.remote_host = remote_host;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the user agent
|
||||
pub fn set_user_agent(mut self, user_agent: Option<String>) -> Self {
|
||||
self.user_agent = user_agent;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the request path
|
||||
pub fn set_req_path(mut self, req_path: Option<String>) -> Self {
|
||||
self.req_path = req_path;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the request host
|
||||
pub fn set_req_host(mut self, req_host: Option<String>) -> Self {
|
||||
self.req_host = req_host;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the request claims
|
||||
pub fn set_req_claims(mut self, req_claims: Option<HashMap<String, Value>>) -> Self {
|
||||
self.req_claims = req_claims;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the request query
|
||||
pub fn set_req_query(mut self, req_query: Option<HashMap<String, String>>) -> Self {
|
||||
self.req_query = req_query;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the request header
|
||||
pub fn set_req_header(mut self, req_header: Option<HashMap<String, String>>) -> Self {
|
||||
self.req_header = req_header;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the response header
|
||||
pub fn set_resp_header(mut self, resp_header: Option<HashMap<String, String>>) -> Self {
|
||||
self.resp_header = resp_header;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the access key
|
||||
pub fn set_access_key(mut self, access_key: Option<String>) -> Self {
|
||||
self.access_key = access_key;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the parent user
|
||||
pub fn set_parent_user(mut self, parent_user: Option<String>) -> Self {
|
||||
self.parent_user = parent_user;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the error
|
||||
pub fn set_error(mut self, error: Option<String>) -> Self {
|
||||
self.error = error;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl LogRecord for AuditLogEntry {
|
||||
fn to_json(&self) -> String {
|
||||
serde_json::to_string(self).unwrap_or_else(|_| String::from("{}"))
|
||||
}
|
||||
|
||||
fn get_timestamp(&self) -> DateTime<Utc> {
|
||||
self.base.timestamp
|
||||
}
|
||||
}
|
||||
108
crates/audit-logger/src/entry/base.rs
Normal file
@@ -0,0 +1,108 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Base log entry structure shared by all log types
|
||||
/// This structure is used to serialize log entries to JSON
|
||||
/// and send them to the log sinks
|
||||
/// This structure is also used to deserialize log entries from JSON
|
||||
/// This structure is also used to store log entries in the database
|
||||
/// This structure is also used to query log entries from the database
|
||||
///
|
||||
/// The `BaseLogEntry` structure contains the following fields:
|
||||
/// - `timestamp` - the timestamp of the log entry
|
||||
/// - `request_id` - the request ID of the log entry
|
||||
/// - `message` - the message of the log entry
|
||||
/// - `tags` - the tags of the log entry
|
||||
///
|
||||
/// The `BaseLogEntry` structure contains the following methods:
|
||||
/// - `new` - create a new `BaseLogEntry` with default values
|
||||
/// - `message` - set the message
|
||||
/// - `request_id` - set the request ID
|
||||
/// - `tags` - set the tags
|
||||
/// - `timestamp` - set the timestamp
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use rustfs_audit_logger::BaseLogEntry;
|
||||
/// use chrono::{DateTime, Utc};
|
||||
/// use std::collections::HashMap;
|
||||
///
|
||||
/// let timestamp = Utc::now();
|
||||
/// let request = Some("req-123".to_string());
|
||||
/// let message = Some("This is a log message".to_string());
|
||||
/// let tags = Some(HashMap::new());
|
||||
///
|
||||
/// let entry = BaseLogEntry::new()
|
||||
/// .timestamp(timestamp)
|
||||
/// .request_id(request)
|
||||
/// .message(message)
|
||||
/// .tags(tags);
|
||||
/// ```
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq, Default)]
|
||||
pub struct BaseLogEntry {
|
||||
#[serde(rename = "time")]
|
||||
pub timestamp: DateTime<Utc>,
|
||||
|
||||
#[serde(rename = "requestID", skip_serializing_if = "Option::is_none")]
|
||||
pub request_id: Option<String>,
|
||||
|
||||
#[serde(rename = "message", skip_serializing_if = "Option::is_none")]
|
||||
pub message: Option<String>,
|
||||
|
||||
#[serde(rename = "tags", skip_serializing_if = "Option::is_none")]
|
||||
pub tags: Option<HashMap<String, Value>>,
|
||||
}
|
||||
|
||||
impl BaseLogEntry {
|
||||
/// Create a new BaseLogEntry with default values
|
||||
pub fn new() -> Self {
|
||||
BaseLogEntry {
|
||||
timestamp: Utc::now(),
|
||||
request_id: None,
|
||||
message: None,
|
||||
tags: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the message
|
||||
pub fn message(mut self, message: Option<String>) -> Self {
|
||||
self.message = message;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the request ID
|
||||
pub fn request_id(mut self, request_id: Option<String>) -> Self {
|
||||
self.request_id = request_id;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the tags
|
||||
pub fn tags(mut self, tags: Option<HashMap<String, Value>>) -> Self {
|
||||
self.tags = tags;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the timestamp
|
||||
pub fn timestamp(mut self, timestamp: DateTime<Utc>) -> Self {
|
||||
self.timestamp = timestamp;
|
||||
self
|
||||
}
|
||||
}
|
||||
159
crates/audit-logger/src/entry/mod.rs
Normal file
@@ -0,0 +1,159 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![allow(dead_code)]
|
||||
pub(crate) mod args;
|
||||
pub(crate) mod audit;
|
||||
pub(crate) mod base;
|
||||
pub(crate) mod unified;
|
||||
|
||||
use serde::de::Error;
|
||||
use serde::{Deserialize, Deserializer, Serialize, Serializer};
|
||||
use tracing_core::Level;
|
||||
|
||||
/// ObjectVersion is used across multiple modules
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, Eq, PartialEq)]
|
||||
pub struct ObjectVersion {
|
||||
#[serde(rename = "name")]
|
||||
pub object_name: String,
|
||||
#[serde(rename = "versionId", skip_serializing_if = "Option::is_none")]
|
||||
pub version_id: Option<String>,
|
||||
}
|
||||
|
||||
impl ObjectVersion {
|
||||
/// Create a new ObjectVersion object
|
||||
pub fn new() -> Self {
|
||||
ObjectVersion {
|
||||
object_name: String::new(),
|
||||
version_id: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new ObjectVersion with object name
|
||||
pub fn new_with_object_name(object_name: String) -> Self {
|
||||
ObjectVersion {
|
||||
object_name,
|
||||
version_id: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the object name
|
||||
pub fn set_object_name(mut self, object_name: String) -> Self {
|
||||
self.object_name = object_name;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the version ID
|
||||
pub fn set_version_id(mut self, version_id: Option<String>) -> Self {
|
||||
self.version_id = version_id;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ObjectVersion {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
/// Log kind/level enum
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq, Default)]
|
||||
pub enum LogKind {
|
||||
#[serde(rename = "INFO")]
|
||||
#[default]
|
||||
Info,
|
||||
#[serde(rename = "WARNING")]
|
||||
Warning,
|
||||
#[serde(rename = "ERROR")]
|
||||
Error,
|
||||
#[serde(rename = "FATAL")]
|
||||
Fatal,
|
||||
}
|
||||
|
||||
/// Trait for types that can be serialized to JSON and have a timestamp
|
||||
/// This trait is used by `ServerLogEntry` to convert the log entry to JSON
|
||||
/// and get the timestamp of the log entry
|
||||
/// This trait is implemented by `ServerLogEntry`
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use rustfs_audit_logger::LogRecord;
|
||||
/// use chrono::{DateTime, Utc};
|
||||
/// use rustfs_audit_logger::ServerLogEntry;
|
||||
/// use tracing_core::Level;
|
||||
///
|
||||
/// let log_entry = ServerLogEntry::new(Level::INFO, "api_handler".to_string());
|
||||
/// let json = log_entry.to_json();
|
||||
/// let timestamp = log_entry.get_timestamp();
|
||||
/// ```
|
||||
pub trait LogRecord {
|
||||
fn to_json(&self) -> String;
|
||||
fn get_timestamp(&self) -> chrono::DateTime<chrono::Utc>;
|
||||
}
|
||||
|
||||
/// Wrapper for `tracing_core::Level` to implement `Serialize` and `Deserialize`
|
||||
/// for `ServerLogEntry`
|
||||
/// This is necessary because `tracing_core::Level` does not implement `Serialize`
|
||||
/// and `Deserialize`
|
||||
/// This is a workaround to allow `ServerLogEntry` to be serialized and deserialized
|
||||
/// using `serde`
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use rustfs_audit_logger::SerializableLevel;
|
||||
/// use tracing_core::Level;
|
||||
///
|
||||
/// let level = Level::INFO;
|
||||
/// let serializable_level = SerializableLevel::from(level);
|
||||
/// ```
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct SerializableLevel(pub Level);
|
||||
|
||||
impl From<Level> for SerializableLevel {
|
||||
fn from(level: Level) -> Self {
|
||||
SerializableLevel(level)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<SerializableLevel> for Level {
|
||||
fn from(serializable_level: SerializableLevel) -> Self {
|
||||
serializable_level.0
|
||||
}
|
||||
}
|
||||
|
||||
impl Serialize for SerializableLevel {
|
||||
fn serialize<S>(&self, serializer: S) -> Result<S::Ok, S::Error>
|
||||
where
|
||||
S: Serializer,
|
||||
{
|
||||
serializer.serialize_str(self.0.as_str())
|
||||
}
|
||||
}
|
||||
|
||||
impl<'de> Deserialize<'de> for SerializableLevel {
|
||||
fn deserialize<D>(deserializer: D) -> Result<Self, D::Error>
|
||||
where
|
||||
D: Deserializer<'de>,
|
||||
{
|
||||
let s = String::deserialize(deserializer)?;
|
||||
match s.as_str() {
|
||||
"TRACE" => Ok(SerializableLevel(Level::TRACE)),
|
||||
"DEBUG" => Ok(SerializableLevel(Level::DEBUG)),
|
||||
"INFO" => Ok(SerializableLevel(Level::INFO)),
|
||||
"WARN" => Ok(SerializableLevel(Level::WARN)),
|
||||
"ERROR" => Ok(SerializableLevel(Level::ERROR)),
|
||||
_ => Err(D::Error::custom("unknown log level")),
|
||||
}
|
||||
}
|
||||
}
|
||||
266
crates/audit-logger/src/entry/unified.rs
Normal file
@@ -0,0 +1,266 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use crate::{AuditLogEntry, BaseLogEntry, LogKind, LogRecord, SerializableLevel};
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tracing_core::Level;
|
||||
|
||||
/// Server log entry with structured fields
|
||||
/// ServerLogEntry is used to log structured log entries from the server
|
||||
///
|
||||
/// The `ServerLogEntry` structure contains the following fields:
|
||||
/// - `base` - the base log entry
|
||||
/// - `level` - the log level
|
||||
/// - `source` - the source of the log entry
|
||||
/// - `user_id` - the user ID
|
||||
/// - `fields` - the structured fields of the log entry
|
||||
///
|
||||
/// The `ServerLogEntry` structure contains the following methods:
|
||||
/// - `new` - create a new `ServerLogEntry` with specified level and source
|
||||
/// - `with_base` - set the base log entry
|
||||
/// - `user_id` - set the user ID
|
||||
/// - `fields` - set the fields
|
||||
/// - `add_field` - add a field
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use rustfs_audit_logger::ServerLogEntry;
|
||||
/// use tracing_core::Level;
|
||||
///
|
||||
/// let entry = ServerLogEntry::new(Level::INFO, "test_module".to_string())
|
||||
/// .user_id(Some("user-456".to_string()))
|
||||
/// .add_field("operation".to_string(), "login".to_string());
|
||||
/// ```
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub struct ServerLogEntry {
|
||||
#[serde(flatten)]
|
||||
pub base: BaseLogEntry,
|
||||
|
||||
pub level: SerializableLevel,
|
||||
pub source: String,
|
||||
|
||||
#[serde(rename = "userId", skip_serializing_if = "Option::is_none")]
|
||||
pub user_id: Option<String>,
|
||||
|
||||
#[serde(skip_serializing_if = "Vec::is_empty", default)]
|
||||
pub fields: Vec<(String, String)>,
|
||||
}
|
||||
|
||||
impl ServerLogEntry {
|
||||
/// Create a new ServerLogEntry with specified level and source
|
||||
pub fn new(level: Level, source: String) -> Self {
|
||||
ServerLogEntry {
|
||||
base: BaseLogEntry::new(),
|
||||
level: SerializableLevel(level),
|
||||
source,
|
||||
user_id: None,
|
||||
fields: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the base log entry
|
||||
pub fn with_base(mut self, base: BaseLogEntry) -> Self {
|
||||
self.base = base;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the user ID
|
||||
pub fn user_id(mut self, user_id: Option<String>) -> Self {
|
||||
self.user_id = user_id;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set fields
|
||||
pub fn fields(mut self, fields: Vec<(String, String)>) -> Self {
|
||||
self.fields = fields;
|
||||
self
|
||||
}
|
||||
|
||||
/// Add a field
|
||||
pub fn add_field(mut self, key: String, value: String) -> Self {
|
||||
self.fields.push((key, value));
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl LogRecord for ServerLogEntry {
|
||||
fn to_json(&self) -> String {
|
||||
serde_json::to_string(self).unwrap_or_else(|_| String::from("{}"))
|
||||
}
|
||||
|
||||
fn get_timestamp(&self) -> DateTime<Utc> {
|
||||
self.base.timestamp
|
||||
}
|
||||
}
|
||||
|
||||
/// Console log entry structure
|
||||
/// ConsoleLogEntry is used to log console log entries
|
||||
/// The `ConsoleLogEntry` structure contains the following fields:
|
||||
/// - `base` - the base log entry
|
||||
/// - `level` - the log level
|
||||
/// - `console_msg` - the console message
|
||||
/// - `node_name` - the node name
|
||||
/// - `err` - the error message
|
||||
///
|
||||
/// The `ConsoleLogEntry` structure contains the following methods:
|
||||
/// - `new` - create a new `ConsoleLogEntry`
|
||||
/// - `new_with_console_msg` - create a new `ConsoleLogEntry` with console message and node name
|
||||
/// - `with_base` - set the base log entry
|
||||
/// - `set_level` - set the log level
|
||||
/// - `set_node_name` - set the node name
|
||||
/// - `set_console_msg` - set the console message
|
||||
/// - `set_err` - set the error message
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use rustfs_audit_logger::ConsoleLogEntry;
|
||||
///
|
||||
/// let entry = ConsoleLogEntry::new_with_console_msg("Test message".to_string(), "node-123".to_string());
|
||||
/// ```
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ConsoleLogEntry {
|
||||
#[serde(flatten)]
|
||||
pub base: BaseLogEntry,
|
||||
|
||||
pub level: LogKind,
|
||||
pub console_msg: String,
|
||||
pub node_name: String,
|
||||
|
||||
#[serde(skip)]
|
||||
pub err: Option<String>,
|
||||
}
|
||||
|
||||
impl ConsoleLogEntry {
|
||||
/// Create a new ConsoleLogEntry
|
||||
pub fn new() -> Self {
|
||||
ConsoleLogEntry {
|
||||
base: BaseLogEntry::new(),
|
||||
level: LogKind::Info,
|
||||
console_msg: String::new(),
|
||||
node_name: String::new(),
|
||||
err: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new ConsoleLogEntry with console message and node name
|
||||
pub fn new_with_console_msg(console_msg: String, node_name: String) -> Self {
|
||||
ConsoleLogEntry {
|
||||
base: BaseLogEntry::new(),
|
||||
level: LogKind::Info,
|
||||
console_msg,
|
||||
node_name,
|
||||
err: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the base log entry
|
||||
pub fn with_base(mut self, base: BaseLogEntry) -> Self {
|
||||
self.base = base;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the log level
|
||||
pub fn set_level(mut self, level: LogKind) -> Self {
|
||||
self.level = level;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the node name
|
||||
pub fn set_node_name(mut self, node_name: String) -> Self {
|
||||
self.node_name = node_name;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the console message
|
||||
pub fn set_console_msg(mut self, console_msg: String) -> Self {
|
||||
self.console_msg = console_msg;
|
||||
self
|
||||
}
|
||||
|
||||
/// Set the error message
|
||||
pub fn set_err(mut self, err: Option<String>) -> Self {
|
||||
self.err = err;
|
||||
self
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for ConsoleLogEntry {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl LogRecord for ConsoleLogEntry {
|
||||
fn to_json(&self) -> String {
|
||||
serde_json::to_string(self).unwrap_or_else(|_| String::from("{}"))
|
||||
}
|
||||
|
||||
fn get_timestamp(&self) -> DateTime<Utc> {
|
||||
self.base.timestamp
|
||||
}
|
||||
}
|
||||
|
||||
/// Unified log entry type
|
||||
/// UnifiedLogEntry is used to log different types of log entries
|
||||
///
|
||||
/// The `UnifiedLogEntry` enum contains the following variants:
|
||||
/// - `Server` - a server log entry
|
||||
/// - `Audit` - an audit log entry
|
||||
/// - `Console` - a console log entry
|
||||
///
|
||||
/// The `UnifiedLogEntry` enum contains the following methods:
|
||||
/// - `to_json` - convert the log entry to JSON
|
||||
/// - `get_timestamp` - get the timestamp of the log entry
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use rustfs_audit_logger::{UnifiedLogEntry, ServerLogEntry};
|
||||
/// use tracing_core::Level;
|
||||
///
|
||||
/// let server_entry = ServerLogEntry::new(Level::INFO, "test_module".to_string());
|
||||
/// let unified = UnifiedLogEntry::Server(server_entry);
|
||||
/// ```
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
#[serde(tag = "type")]
|
||||
pub enum UnifiedLogEntry {
|
||||
#[serde(rename = "server")]
|
||||
Server(ServerLogEntry),
|
||||
|
||||
#[serde(rename = "audit")]
|
||||
Audit(Box<AuditLogEntry>),
|
||||
|
||||
#[serde(rename = "console")]
|
||||
Console(ConsoleLogEntry),
|
||||
}
|
||||
|
||||
impl LogRecord for UnifiedLogEntry {
|
||||
fn to_json(&self) -> String {
|
||||
match self {
|
||||
UnifiedLogEntry::Server(entry) => entry.to_json(),
|
||||
UnifiedLogEntry::Audit(entry) => entry.to_json(),
|
||||
UnifiedLogEntry::Console(entry) => entry.to_json(),
|
||||
}
|
||||
}
|
||||
|
||||
fn get_timestamp(&self) -> DateTime<Utc> {
|
||||
match self {
|
||||
UnifiedLogEntry::Server(entry) => entry.get_timestamp(),
|
||||
UnifiedLogEntry::Audit(entry) => entry.get_timestamp(),
|
||||
UnifiedLogEntry::Console(entry) => entry.get_timestamp(),
|
||||
}
|
||||
}
|
||||
}
|
||||
8
crates/audit-logger/src/lib.rs
Normal file
@@ -0,0 +1,8 @@
|
||||
mod entry;
|
||||
mod logger;
|
||||
|
||||
pub use entry::args::Args;
|
||||
pub use entry::audit::{ApiDetails, AuditLogEntry};
|
||||
pub use entry::base::BaseLogEntry;
|
||||
pub use entry::unified::{ConsoleLogEntry, ServerLogEntry, UnifiedLogEntry};
|
||||
pub use entry::{LogKind, LogRecord, ObjectVersion, SerializableLevel};
|
||||
@@ -12,12 +12,18 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::components::Setting;
|
||||
use dioxus::prelude::*;
|
||||
#![allow(dead_code)]
|
||||
|
||||
#[component]
|
||||
pub fn SettingViews() -> Element {
|
||||
rsx! {
|
||||
Setting {}
|
||||
}
|
||||
// Default value function
|
||||
fn default_batch_size() -> usize {
|
||||
10
|
||||
}
|
||||
fn default_queue_size() -> usize {
|
||||
10000
|
||||
}
|
||||
fn default_max_retry() -> u32 {
|
||||
5
|
||||
}
|
||||
fn default_retry_interval() -> std::time::Duration {
|
||||
std::time::Duration::from_secs(3)
|
||||
}
|
||||
13
crates/audit-logger/src/logger/dispatch.rs
Normal file
@@ -0,0 +1,13 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
108
crates/audit-logger/src/logger/entry.rs
Normal file
@@ -0,0 +1,108 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![allow(dead_code)]
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use serde::Serialize;
|
||||
use std::collections::HashMap;
|
||||
use uuid::Uuid;
|
||||
|
||||
///A Trait for a log entry that can be serialized and sent
|
||||
pub trait Loggable: Serialize + Send + Sync + 'static {
|
||||
fn to_json(&self) -> Result<String, serde_json::Error> {
|
||||
serde_json::to_string(self)
|
||||
}
|
||||
}
|
||||
|
||||
/// Standard log entries
|
||||
#[derive(Serialize, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct LogEntry {
|
||||
pub deployment_id: String,
|
||||
pub level: String,
|
||||
pub message: String,
|
||||
#[serde(skip_serializing_if = "Option::is_none")]
|
||||
pub trace: Option<Trace>,
|
||||
pub time: DateTime<Utc>,
|
||||
pub request_id: String,
|
||||
}
|
||||
|
||||
impl Loggable for LogEntry {}
|
||||
|
||||
/// Audit log entry
|
||||
#[derive(Serialize, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct AuditEntry {
|
||||
pub version: String,
|
||||
pub deployment_id: String,
|
||||
pub time: DateTime<Utc>,
|
||||
pub trigger: String,
|
||||
pub api: ApiDetails,
|
||||
pub remote_host: String,
|
||||
pub request_id: String,
|
||||
pub user_agent: String,
|
||||
pub access_key: String,
|
||||
#[serde(skip_serializing_if = "HashMap::is_empty")]
|
||||
pub tags: HashMap<String, String>,
|
||||
}
|
||||
|
||||
impl Loggable for AuditEntry {}
|
||||
|
||||
#[derive(Serialize, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct Trace {
|
||||
pub message: String,
|
||||
pub source: Vec<String>,
|
||||
#[serde(skip_serializing_if = "HashMap::is_empty")]
|
||||
pub variables: HashMap<String, String>,
|
||||
}
|
||||
|
||||
#[derive(Serialize, Debug)]
|
||||
#[serde(rename_all = "camelCase")]
|
||||
pub struct ApiDetails {
|
||||
pub name: String,
|
||||
pub bucket: String,
|
||||
pub object: String,
|
||||
pub status: String,
|
||||
pub status_code: u16,
|
||||
pub time_to_first_byte: String,
|
||||
pub time_to_response: String,
|
||||
}
|
||||
|
||||
// Helper functions to create entries
|
||||
impl AuditEntry {
|
||||
pub fn new(api_name: &str, bucket: &str, object: &str) -> Self {
|
||||
AuditEntry {
|
||||
version: "1".to_string(),
|
||||
deployment_id: "global-deployment-id".to_string(),
|
||||
time: Utc::now(),
|
||||
trigger: "incoming".to_string(),
|
||||
api: ApiDetails {
|
||||
name: api_name.to_string(),
|
||||
bucket: bucket.to_string(),
|
||||
object: object.to_string(),
|
||||
status: "OK".to_string(),
|
||||
status_code: 200,
|
||||
time_to_first_byte: "10ms".to_string(),
|
||||
time_to_response: "50ms".to_string(),
|
||||
},
|
||||
remote_host: "127.0.0.1".to_string(),
|
||||
request_id: Uuid::new_v4().to_string(),
|
||||
user_agent: "Rust-Client/1.0".to_string(),
|
||||
access_key: "minioadmin".to_string(),
|
||||
tags: HashMap::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
13
crates/audit-logger/src/logger/factory.rs
Normal file
@@ -0,0 +1,13 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||