Compare commits
207 Commits
1.0.0-alph
...
1.0.0-alph
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
387f4faf78 | ||
|
|
0f7093c5f9 | ||
|
|
6a5c0055e7 | ||
|
|
76288f2501 | ||
|
|
3497ccfada | ||
|
|
24e3d3a2ce | ||
|
|
ebad748cdc | ||
|
|
b7e56ed92c | ||
|
|
4811632751 | ||
|
|
374a702f04 | ||
|
|
e369e9f481 | ||
|
|
fe2e4a2274 | ||
|
|
b391272e94 | ||
|
|
c55c7a6373 | ||
|
|
67f1c371a9 | ||
|
|
d987686c14 | ||
|
|
48a9707110 | ||
|
|
b89450f54d | ||
|
|
e0c99bced4 | ||
|
|
130f85a575 | ||
|
|
c42fbed3d2 | ||
|
|
fd539f0f0a | ||
|
|
9aba89a12c | ||
|
|
7b27b29e3a | ||
|
|
7ef014a433 | ||
|
|
1b88714d27 | ||
|
|
b119894425 | ||
|
|
a37aa664f5 | ||
|
|
9b8abbb009 | ||
|
|
3e5a48af65 | ||
|
|
d5aef963f9 | ||
|
|
6c37e1cb2a | ||
|
|
e9d7e211b9 | ||
|
|
45bbd1e5c4 | ||
|
|
57d196771a | ||
|
|
6202f50e15 | ||
|
|
c5df1f92c2 | ||
|
|
4f1770d3fe | ||
|
|
d56cee26db | ||
|
|
56fd8132e9 | ||
|
|
35daa74430 | ||
|
|
dc156fb4cd | ||
|
|
de905a878c | ||
|
|
f3252f989b | ||
|
|
01a2afca9a | ||
|
|
a4fe68ad21 | ||
|
|
c03f86b23c | ||
|
|
5667f324ae | ||
|
|
bcd806796f | ||
|
|
612404c47f | ||
|
|
85388262b3 | ||
|
|
25a4503285 | ||
|
|
526c4d5a61 | ||
|
|
addc964d56 | ||
|
|
371119f733 | ||
|
|
021abc0398 | ||
|
|
0672b6dd3e | ||
|
|
1372dc2857 | ||
|
|
77bc9af109 | ||
|
|
91b1c84430 | ||
|
|
b667927216 | ||
|
|
29795fac51 | ||
|
|
2ce7e01f55 | ||
|
|
4fefd63a5b | ||
|
|
2a8c46874d | ||
|
|
b8b5511b68 | ||
|
|
bdaee228db | ||
|
|
d562620e99 | ||
|
|
69b0c828c9 | ||
|
|
2bfd1efb9b | ||
|
|
0854e6b921 | ||
|
|
b907f4e61b | ||
|
|
6ec568459c | ||
|
|
ea210d52dc | ||
|
|
3d3c6e4e06 | ||
|
|
e7d0a8d4b9 | ||
|
|
7d3b2b774c | ||
|
|
aed8f52423 | ||
|
|
c49414f6ac | ||
|
|
8e766b90cd | ||
|
|
3409cd8dff | ||
|
|
f4973a681c | ||
|
|
4fb3d187d0 | ||
|
|
0aff736efd | ||
|
|
2aa7a631ef | ||
|
|
b40ef147a9 | ||
|
|
1f11a3167b | ||
|
|
18b0134ddf | ||
|
|
b48a5fdc94 | ||
|
|
168a07a670 | ||
|
|
cad005bc21 | ||
|
|
dc44cde081 | ||
|
|
4ccdeb9d2a | ||
|
|
1b48934f47 | ||
|
|
25fa645184 | ||
|
|
3a3bb880f2 | ||
|
|
affe27298c | ||
|
|
629db6218e | ||
|
|
aa1a3ce4e8 | ||
|
|
693db59fcc | ||
|
|
0a7df4ef26 | ||
|
|
9dcdc44718 | ||
|
|
2a0c618f8b | ||
|
|
bebd78fbbb | ||
|
|
3f095e75cb | ||
|
|
f7d30da9e0 | ||
|
|
823d4b6f79 | ||
|
|
051ea7786f | ||
|
|
42b645e355 | ||
|
|
f27ee96014 | ||
|
|
20cd117aa6 | ||
|
|
fc8931d69f | ||
|
|
0167b2decd | ||
|
|
e67980ff3c | ||
|
|
96760bba5a | ||
|
|
2501d7d241 | ||
|
|
55b84262b5 | ||
|
|
ce4252eb1a | ||
|
|
db708917b4 | ||
|
|
8ddb45627d | ||
|
|
550c225b79 | ||
|
|
0d46b550a8 | ||
|
|
0693cca1a4 | ||
|
|
0d9f9e381a | ||
|
|
6c7aa5a7ae | ||
|
|
a27d935925 | ||
|
|
b4f87a4fee | ||
|
|
ee5f94a2e2 | ||
|
|
9c3cf554d3 | ||
|
|
addbfa5487 | ||
|
|
5eb461d7b7 | ||
|
|
1ea45afcd7 | ||
|
|
dbd86f6aee | ||
|
|
af693f7b3f | ||
|
|
3be5ee6445 | ||
|
|
0acc8fe26a | ||
|
|
ecf40eb86c | ||
|
|
48ce7055f8 | ||
|
|
749f55d688 | ||
|
|
e5d17f5382 | ||
|
|
982cc66c74 | ||
|
|
74bf4909c8 | ||
|
|
9c956b4445 | ||
|
|
4c1fc9317e | ||
|
|
a9d77a618f | ||
|
|
38cdc87e93 | ||
|
|
f5ff93b65e | ||
|
|
6ef6f188e5 | ||
|
|
ccad91a4a9 | ||
|
|
63b79ae151 | ||
|
|
9284f64e2a | ||
|
|
b9bbae27de | ||
|
|
36e3efb5a5 | ||
|
|
04d1c8724d | ||
|
|
4fb4b353f8 | ||
|
|
564a02f344 | ||
|
|
5b582a4234 | ||
|
|
2e9792577f | ||
|
|
2066e0a03b | ||
|
|
a4d49a500f | ||
|
|
a8fbced928 | ||
|
|
99ca405279 | ||
|
|
2e1d1018aa | ||
|
|
c57b4be1c7 | ||
|
|
238a016242 | ||
|
|
2c0c7fafa3 | ||
|
|
ee4962fe31 | ||
|
|
55895d0a10 | ||
|
|
676897d389 | ||
|
|
5205ff6695 | ||
|
|
15cf3ce92b | ||
|
|
c0441b2412 | ||
|
|
6267872ddb | ||
|
|
618779a89d | ||
|
|
b3ec2325ed | ||
|
|
49a5643e76 | ||
|
|
657395af8a | ||
|
|
4de62ed77e | ||
|
|
505f493729 | ||
|
|
be05b704b0 | ||
|
|
b33c2fa3cf | ||
|
|
98674c60d4 | ||
|
|
e39eb86967 | ||
|
|
646070ae7a | ||
|
|
2525b66658 | ||
|
|
58c5a633e2 | ||
|
|
aefd894fc2 | ||
|
|
1e1d4646a2 | ||
|
|
b97845fffd | ||
|
|
84f5a4cb48 | ||
|
|
2832f0e089 | ||
|
|
a3b5445824 | ||
|
|
363e37c791 | ||
|
|
1b0b041530 | ||
|
|
7d5fc87002 | ||
|
|
13130e9dd4 | ||
|
|
1061ce11a3 | ||
|
|
9f9a74000d | ||
|
|
d1863018df | ||
|
|
166080aac8 | ||
|
|
78b2487639 | ||
|
|
79f4e81fea | ||
|
|
28da78d544 | ||
|
|
df2eb9bc6a | ||
|
|
7c20d92fe5 | ||
|
|
b4c316c662 | ||
|
|
411b511937 |
49
.cursorrules
@@ -1,22 +1,39 @@
|
||||
# RustFS Project Cursor Rules
|
||||
|
||||
## ⚠️ CRITICAL DEVELOPMENT RULES ⚠️
|
||||
## 🚨🚨🚨 CRITICAL DEVELOPMENT RULES - ZERO TOLERANCE 🚨🚨🚨
|
||||
|
||||
### 🚨 NEVER COMMIT DIRECTLY TO MASTER/MAIN BRANCH 🚨
|
||||
### ⛔️ ABSOLUTE PROHIBITION: NEVER COMMIT DIRECTLY TO MASTER/MAIN BRANCH ⛔️
|
||||
|
||||
- **This is the most important rule - NEVER modify code directly on main or master branch**
|
||||
- **ALL CHANGES MUST GO THROUGH PULL REQUESTS - NO EXCEPTIONS**
|
||||
- **Always work on feature branches and use pull requests for all changes**
|
||||
- **Any direct commits to master/main branch are strictly forbidden**
|
||||
- **Pull requests are the ONLY way to merge code to main branch**
|
||||
- Before starting any development, always:
|
||||
1. `git checkout main` (switch to main branch)
|
||||
2. `git pull` (get latest changes)
|
||||
3. `git checkout -b feat/your-feature-name` (create and switch to feature branch)
|
||||
4. Make your changes on the feature branch
|
||||
5. Commit and push to the feature branch
|
||||
6. **Create a pull request for review - THIS IS MANDATORY**
|
||||
7. **Wait for PR approval and merge through GitHub interface only**
|
||||
**🔥 THIS IS THE MOST CRITICAL RULE - VIOLATION WILL RESULT IN IMMEDIATE REVERSAL 🔥**
|
||||
|
||||
- **🚫 ZERO DIRECT COMMITS TO MAIN/MASTER BRANCH - ABSOLUTELY FORBIDDEN**
|
||||
- **🚫 ANY DIRECT COMMIT TO MAIN BRANCH MUST BE IMMEDIATELY REVERTED**
|
||||
- **🚫 NO EXCEPTIONS FOR HOTFIXES, EMERGENCIES, OR URGENT CHANGES**
|
||||
- **🚫 NO EXCEPTIONS FOR SMALL CHANGES, TYPOS, OR DOCUMENTATION UPDATES**
|
||||
- **🚫 NO EXCEPTIONS FOR ANYONE - MAINTAINERS, CONTRIBUTORS, OR ADMINS**
|
||||
|
||||
### 📋 MANDATORY WORKFLOW - STRICTLY ENFORCED
|
||||
|
||||
**EVERY SINGLE CHANGE MUST FOLLOW THIS WORKFLOW:**
|
||||
|
||||
1. **Check current branch**: `git branch` (MUST NOT be on main/master)
|
||||
2. **Switch to main**: `git checkout main`
|
||||
3. **Pull latest**: `git pull origin main`
|
||||
4. **Create feature branch**: `git checkout -b feat/your-feature-name`
|
||||
5. **Make changes ONLY on feature branch**
|
||||
6. **Test thoroughly before committing**
|
||||
7. **Commit and push to feature branch**: `git push origin feat/your-feature-name`
|
||||
8. **Create Pull Request**: Use `gh pr create` (MANDATORY)
|
||||
9. **Wait for PR approval**: NO self-merging allowed
|
||||
10. **Merge through GitHub interface**: ONLY after approval
|
||||
|
||||
### 🔒 ENFORCEMENT MECHANISMS
|
||||
|
||||
- **Branch protection rules**: Main branch is protected
|
||||
- **Pre-commit hooks**: Will block direct commits to main
|
||||
- **CI/CD checks**: All PRs must pass before merging
|
||||
- **Code review requirement**: At least one approval needed
|
||||
- **Automated reversal**: Direct commits to main will be automatically reverted
|
||||
|
||||
## Project Overview
|
||||
|
||||
@@ -517,7 +534,7 @@ let results = join_all(futures).await;
|
||||
|
||||
### 3. Caching Strategy
|
||||
|
||||
- Use `lazy_static` or `OnceCell` for global caching
|
||||
- Use `LazyLock` for global caching
|
||||
- Implement LRU cache to avoid memory leaks
|
||||
|
||||
## Testing Guidelines
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
FROM ubuntu:22.04
|
||||
|
||||
ENV LANG C.UTF-8
|
||||
|
||||
RUN sed -i s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g /etc/apt/sources.list
|
||||
|
||||
RUN apt-get clean && apt-get update && apt-get install wget git curl unzip gcc pkg-config libssl-dev lld libdbus-1-dev libwayland-dev libwebkit2gtk-4.1-dev libxdo-dev -y
|
||||
|
||||
# install protoc
|
||||
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip \
|
||||
&& unzip protoc-31.1-linux-x86_64.zip -d protoc3 \
|
||||
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc \
|
||||
&& mv protoc3/include/* /usr/local/include/ && rm -rf protoc-31.1-linux-x86_64.zip protoc3
|
||||
|
||||
# install flatc
|
||||
RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip \
|
||||
&& unzip Linux.flatc.binary.g++-13.zip \
|
||||
&& mv flatc /usr/local/bin/ && chmod +x /usr/local/bin/flatc && rm -rf Linux.flatc.binary.g++-13.zip
|
||||
|
||||
# install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
COPY .docker/cargo.config.toml /root/.cargo/config.toml
|
||||
|
||||
WORKDIR /root/s3-rustfs
|
||||
|
||||
CMD [ "bash", "-c", "while true; do sleep 1; done" ]
|
||||
@@ -1,32 +0,0 @@
|
||||
FROM rockylinux:9.3 AS builder
|
||||
|
||||
ENV LANG C.UTF-8
|
||||
|
||||
RUN sed -e 's|^mirrorlist=|#mirrorlist=|g' \
|
||||
-e 's|^#baseurl=http://dl.rockylinux.org/$contentdir|baseurl=https://mirrors.ustc.edu.cn/rocky|g' \
|
||||
-i.bak \
|
||||
/etc/yum.repos.d/rocky-extras.repo \
|
||||
/etc/yum.repos.d/rocky.repo
|
||||
|
||||
RUN dnf makecache
|
||||
|
||||
RUN yum install wget git unzip gcc openssl-devel pkgconf-pkg-config -y
|
||||
|
||||
# install protoc
|
||||
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip \
|
||||
&& unzip protoc-31.1-linux-x86_64.zip -d protoc3 \
|
||||
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc \
|
||||
&& mv protoc3/include/* /usr/local/include/ && rm -rf protoc-31.1-linux-x86_64.zip protoc3
|
||||
|
||||
# install flatc
|
||||
RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip \
|
||||
&& unzip Linux.flatc.binary.g++-13.zip \
|
||||
&& mv flatc /usr/local/bin/ && chmod +x /usr/local/bin/flatc \
|
||||
&& rm -rf Linux.flatc.binary.g++-13.zip
|
||||
|
||||
# install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
COPY .docker/cargo.config.toml /root/.cargo/config.toml
|
||||
|
||||
WORKDIR /root/s3-rustfs
|
||||
@@ -1,25 +0,0 @@
|
||||
FROM ubuntu:22.04
|
||||
|
||||
ENV LANG C.UTF-8
|
||||
|
||||
RUN sed -i s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g /etc/apt/sources.list
|
||||
|
||||
RUN apt-get clean && apt-get update && apt-get install wget git curl unzip gcc pkg-config libssl-dev lld libdbus-1-dev libwayland-dev libwebkit2gtk-4.1-dev libxdo-dev -y
|
||||
|
||||
# install protoc
|
||||
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip \
|
||||
&& unzip protoc-31.1-linux-x86_64.zip -d protoc3 \
|
||||
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc \
|
||||
&& mv protoc3/include/* /usr/local/include/ && rm -rf protoc-31.1-linux-x86_64.zip protoc3
|
||||
|
||||
# install flatc
|
||||
RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip \
|
||||
&& unzip Linux.flatc.binary.g++-13.zip \
|
||||
&& mv flatc /usr/local/bin/ && chmod +x /usr/local/bin/flatc && rm -rf Linux.flatc.binary.g++-13.zip
|
||||
|
||||
# install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
COPY .docker/cargo.config.toml /root/.cargo/config.toml
|
||||
|
||||
WORKDIR /root/s3-rustfs
|
||||
261
.docker/README.md
Normal file
@@ -0,0 +1,261 @@
|
||||
# RustFS Docker Images
|
||||
|
||||
This directory contains Docker configuration files and supporting infrastructure for building and running RustFS container images.
|
||||
|
||||
## 📁 Directory Structure
|
||||
|
||||
```
|
||||
rustfs/
|
||||
├── Dockerfile # Production image (Alpine + pre-built binaries)
|
||||
├── Dockerfile.source # Development image (Debian + source build)
|
||||
├── docker-buildx.sh # Multi-architecture build script
|
||||
├── Makefile # Build automation with simplified commands
|
||||
└── .docker/ # Supporting infrastructure
|
||||
├── observability/ # Monitoring and observability configs
|
||||
├── compose/ # Docker Compose configurations
|
||||
├── mqtt/ # MQTT broker configs
|
||||
└── openobserve-otel/ # OpenObserve + OpenTelemetry configs
|
||||
```
|
||||
|
||||
## 🎯 Image Variants
|
||||
|
||||
### Core Images
|
||||
|
||||
| Image | Base OS | Build Method | Size | Use Case |
|
||||
|-------|---------|--------------|------|----------|
|
||||
| `production` (default) | Alpine 3.18 | GitHub Releases | Smallest | Production deployment |
|
||||
| `source` | Debian Bookworm | Source build | Medium | Custom builds with cross-compilation |
|
||||
| `dev` | Debian Bookworm | Development tools | Large | Interactive development |
|
||||
|
||||
## 🚀 Usage Examples
|
||||
|
||||
### Quick Start (Production)
|
||||
|
||||
```bash
|
||||
# Default production image (Alpine + GitHub Releases)
|
||||
docker run -p 9000:9000 rustfs/rustfs:latest
|
||||
|
||||
# Specific version
|
||||
docker run -p 9000:9000 rustfs/rustfs:1.2.3
|
||||
```
|
||||
|
||||
### Complete Tag Strategy Examples
|
||||
|
||||
```bash
|
||||
# Stable Releases
|
||||
docker run rustfs/rustfs:1.2.3 # Main version (production)
|
||||
docker run rustfs/rustfs:1.2.3-production # Explicit production variant
|
||||
docker run rustfs/rustfs:1.2.3-source # Source build variant
|
||||
docker run rustfs/rustfs:latest # Latest stable
|
||||
|
||||
# Prerelease Versions
|
||||
docker run rustfs/rustfs:1.3.0-alpha.2 # Specific alpha version
|
||||
docker run rustfs/rustfs:alpha # Latest alpha
|
||||
docker run rustfs/rustfs:beta # Latest beta
|
||||
docker run rustfs/rustfs:rc # Latest release candidate
|
||||
|
||||
# Development Versions
|
||||
docker run rustfs/rustfs:dev # Latest main branch development
|
||||
docker run rustfs/rustfs:dev-13e4a0b # Specific commit
|
||||
docker run rustfs/rustfs:dev-latest # Latest development
|
||||
docker run rustfs/rustfs:main-latest # Main branch latest
|
||||
```
|
||||
|
||||
### Development Environment
|
||||
|
||||
```bash
|
||||
# Quick setup using Makefile (recommended)
|
||||
make docker-dev-local # Build development image locally
|
||||
make dev-env-start # Start development container
|
||||
|
||||
# Manual Docker commands
|
||||
docker run -it -v $(pwd):/workspace -p 9000:9000 rustfs/rustfs:latest-dev
|
||||
|
||||
# Build from source locally
|
||||
docker build -f Dockerfile.source -t rustfs:custom .
|
||||
|
||||
# Development with hot reload
|
||||
docker-compose up rustfs-dev
|
||||
```
|
||||
|
||||
## 🏗️ Build Arguments and Scripts
|
||||
|
||||
### Using Makefile Commands (Recommended)
|
||||
|
||||
The easiest way to build images using simplified commands:
|
||||
|
||||
```bash
|
||||
# Development images (build from source)
|
||||
make docker-dev-local # Build for local use (single arch)
|
||||
make docker-dev # Build multi-arch (for CI/CD)
|
||||
make docker-dev-push REGISTRY=xxx # Build and push to registry
|
||||
|
||||
# Production images (using pre-built binaries)
|
||||
make docker-buildx # Build multi-arch production images
|
||||
make docker-buildx-push # Build and push production images
|
||||
make docker-buildx-version VERSION=v1.0.0 # Build specific version
|
||||
|
||||
# Development environment
|
||||
make dev-env-start # Start development container
|
||||
make dev-env-stop # Stop development container
|
||||
make dev-env-restart # Restart development container
|
||||
|
||||
# Help
|
||||
make help-docker # Show all Docker-related commands
|
||||
```
|
||||
|
||||
### Using docker-buildx.sh (Advanced)
|
||||
|
||||
For direct script usage and advanced scenarios:
|
||||
|
||||
```bash
|
||||
# Build latest version for all architectures
|
||||
./docker-buildx.sh
|
||||
|
||||
# Build and push to registry
|
||||
./docker-buildx.sh --push
|
||||
|
||||
# Build specific version
|
||||
./docker-buildx.sh --release v1.2.3
|
||||
|
||||
# Build and push specific version
|
||||
./docker-buildx.sh --release v1.2.3 --push
|
||||
```
|
||||
|
||||
### Manual Docker Builds
|
||||
|
||||
All images support dynamic version selection:
|
||||
|
||||
```bash
|
||||
# Build production image with latest release
|
||||
docker build --build-arg RELEASE="latest" -t rustfs:latest .
|
||||
|
||||
# Build from source with specific target
|
||||
docker build -f Dockerfile.source \
|
||||
--build-arg TARGETPLATFORM="linux/amd64" \
|
||||
-t rustfs:source .
|
||||
|
||||
# Development build
|
||||
docker build -f Dockerfile.source -t rustfs:dev .
|
||||
```
|
||||
|
||||
## 🔧 Binary Download Sources
|
||||
|
||||
### Unified GitHub Releases
|
||||
|
||||
The production image downloads from GitHub Releases for reliability and transparency:
|
||||
|
||||
- ✅ **production** → GitHub Releases API with automatic latest detection
|
||||
- ✅ **Checksum verification** → SHA256SUMS validation when available
|
||||
- ✅ **Multi-architecture** → Supports amd64 and arm64
|
||||
|
||||
### Source Build
|
||||
|
||||
The source variant compiles from source code with advanced features:
|
||||
|
||||
- 🔧 **Cross-compilation** → Supports multiple target platforms via `TARGETPLATFORM`
|
||||
- ⚡ **Build caching** → sccache for faster compilation
|
||||
- 🎯 **Optimized builds** → Release optimizations with LTO and symbol stripping
|
||||
|
||||
## 📋 Architecture Support
|
||||
|
||||
All variants support multi-architecture builds:
|
||||
|
||||
- **linux/amd64** (x86_64)
|
||||
- **linux/arm64** (aarch64)
|
||||
|
||||
Architecture is automatically detected during build using Docker's `TARGETARCH` build argument.
|
||||
|
||||
## 🔐 Security Features
|
||||
|
||||
- **Checksum Verification**: Production image verifies SHA256SUMS when available
|
||||
- **Non-root User**: All images run as user `rustfs` (UID 1000)
|
||||
- **Minimal Runtime**: Production image only includes necessary dependencies
|
||||
- **Secure Defaults**: No hardcoded credentials or keys
|
||||
|
||||
## 🛠️ Development Workflow
|
||||
|
||||
### Quick Start with Makefile (Recommended)
|
||||
|
||||
```bash
|
||||
# 1. Start development environment
|
||||
make dev-env-start
|
||||
|
||||
# 2. Your development container is now running with:
|
||||
# - Port 9000 exposed for RustFS
|
||||
# - Port 9010 exposed for admin console
|
||||
# - Current directory mounted as /workspace
|
||||
|
||||
# 3. Stop when done
|
||||
make dev-env-stop
|
||||
```
|
||||
|
||||
### Manual Development Setup
|
||||
|
||||
```bash
|
||||
# Build development image from source
|
||||
make docker-dev-local
|
||||
|
||||
# Or use traditional Docker commands
|
||||
docker build -f Dockerfile.source -t rustfs:dev .
|
||||
|
||||
# Run with development tools
|
||||
docker run -it -v $(pwd):/workspace -p 9000:9000 rustfs:dev bash
|
||||
|
||||
# Or use docker-compose for complex setups
|
||||
docker-compose up rustfs-dev
|
||||
```
|
||||
|
||||
### Common Development Tasks
|
||||
|
||||
```bash
|
||||
# Build and test locally
|
||||
make build # Build binary natively
|
||||
make docker-dev-local # Build development Docker image
|
||||
make test # Run tests
|
||||
make fmt # Format code
|
||||
make clippy # Run linter
|
||||
|
||||
# Get help
|
||||
make help # General help
|
||||
make help-docker # Docker-specific help
|
||||
make help-build # Build-specific help
|
||||
```
|
||||
|
||||
## 🚀 CI/CD Integration
|
||||
|
||||
The project uses GitHub Actions for automated multi-architecture Docker builds:
|
||||
|
||||
### Automated Builds
|
||||
|
||||
- **Tags**: Automatic builds triggered on version tags (e.g., `v1.2.3`)
|
||||
- **Main Branch**: Development builds with `dev-latest` and `main-latest` tags
|
||||
- **Pull Requests**: Test builds without registry push
|
||||
|
||||
### Build Variants
|
||||
|
||||
Each build creates three image variants:
|
||||
|
||||
- `rustfs/rustfs:v1.2.3` (production - Alpine-based)
|
||||
- `rustfs/rustfs:v1.2.3-source` (source build - Debian-based)
|
||||
- `rustfs/rustfs:v1.2.3-dev` (development - Debian-based with tools)
|
||||
|
||||
### Manual Builds
|
||||
|
||||
Trigger custom builds via GitHub Actions:
|
||||
|
||||
```bash
|
||||
# Use workflow_dispatch to build specific versions
|
||||
# Available options: latest, main-latest, dev-latest, v1.2.3, dev-abc123
|
||||
```
|
||||
|
||||
## 📦 Supporting Infrastructure
|
||||
|
||||
The `.docker/` directory contains supporting configuration files:
|
||||
|
||||
- **observability/** - Prometheus, Grafana, OpenTelemetry configs
|
||||
- **compose/** - Multi-service Docker Compose setups
|
||||
- **mqtt/** - MQTT broker configurations
|
||||
- **openobserve-otel/** - Log aggregation and tracing setup
|
||||
|
||||
See individual README files in each subdirectory for specific usage instructions.
|
||||
@@ -1,19 +0,0 @@
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
[source.crates-io]
|
||||
registry = "https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[net]
|
||||
git-fetch-with-cli = true
|
||||
80
.docker/compose/README.md
Normal file
@@ -0,0 +1,80 @@
|
||||
# Docker Compose Configurations
|
||||
|
||||
This directory contains specialized Docker Compose configurations for different use cases.
|
||||
|
||||
## 📁 Configuration Files
|
||||
|
||||
This directory contains specialized Docker Compose configurations and their associated Dockerfiles, keeping related files organized together.
|
||||
|
||||
### Main Configuration (Root Directory)
|
||||
|
||||
- **`../../docker-compose.yml`** - **Default Production Setup**
|
||||
- Complete production-ready configuration
|
||||
- Includes RustFS server + full observability stack
|
||||
- Supports multiple profiles: `dev`, `observability`, `cache`, `proxy`
|
||||
- Recommended for most users
|
||||
|
||||
### Specialized Configurations
|
||||
|
||||
- **`docker-compose.cluster.yaml`** - **Distributed Testing**
|
||||
- 4-node cluster setup for testing distributed storage
|
||||
- Uses local compiled binaries
|
||||
- Simulates multi-node environment
|
||||
- Ideal for development and cluster testing
|
||||
|
||||
- **`docker-compose.observability.yaml`** - **Observability Focus**
|
||||
- Specialized setup for testing observability features
|
||||
- Includes OpenTelemetry, Jaeger, Prometheus, Loki, Grafana
|
||||
- Uses `../../Dockerfile.source` for builds
|
||||
- Perfect for observability development
|
||||
|
||||
## 🚀 Usage Examples
|
||||
|
||||
### Production Setup
|
||||
|
||||
```bash
|
||||
# Start main service
|
||||
docker-compose up -d
|
||||
|
||||
# Start with development profile
|
||||
docker-compose --profile dev up -d
|
||||
|
||||
# Start with full observability
|
||||
docker-compose --profile observability up -d
|
||||
```
|
||||
|
||||
### Cluster Testing
|
||||
|
||||
```bash
|
||||
# Build and start 4-node cluster (run from project root)
|
||||
cd .docker/compose
|
||||
docker-compose -f docker-compose.cluster.yaml up -d
|
||||
|
||||
# Or run directly from project root
|
||||
docker-compose -f .docker/compose/docker-compose.cluster.yaml up -d
|
||||
```
|
||||
|
||||
### Observability Testing
|
||||
|
||||
```bash
|
||||
# Start observability-focused environment (run from project root)
|
||||
cd .docker/compose
|
||||
docker-compose -f docker-compose.observability.yaml up -d
|
||||
|
||||
# Or run directly from project root
|
||||
docker-compose -f .docker/compose/docker-compose.observability.yaml up -d
|
||||
```
|
||||
|
||||
## 🔧 Configuration Overview
|
||||
|
||||
| Configuration | Nodes | Storage | Observability | Use Case |
|
||||
|---------------|-------|---------|---------------|----------|
|
||||
| **Main** | 1 | Volume mounts | Full stack | Production |
|
||||
| **Cluster** | 4 | HTTP endpoints | Basic | Testing |
|
||||
| **Observability** | 4 | Local data | Advanced | Development |
|
||||
|
||||
## 📝 Notes
|
||||
|
||||
- Always ensure you have built the required binaries before starting cluster tests
|
||||
- The main configuration is sufficient for most use cases
|
||||
- Specialized configurations are for specific testing scenarios
|
||||
@@ -14,70 +14,69 @@
|
||||
|
||||
services:
|
||||
node0:
|
||||
image: rustfs:v1 # 替换为你的镜像名称和标签
|
||||
image: rustfs/rustfs:latest # Replace with your image name and label
|
||||
container_name: node0
|
||||
hostname: node0
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=http://node{0...3}:9000/data/rustfs{0...3}
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9002
|
||||
- RUSTFS_ACCESS_KEY=rustfsadmin
|
||||
- RUSTFS_SECRET_KEY=rustfsadmin
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "9000:9000" # 映射宿主机的 9001 端口到容器的 9000 端口
|
||||
- "8000:9001" # 映射宿主机的 9001 端口到容器的 9000 端口
|
||||
- "9000:9000" # Map port 9001 of the host to port 9000 of the container
|
||||
volumes:
|
||||
- ./target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs
|
||||
# - ./data/node0:/data # 将当前路径挂载到容器内的 /root/data
|
||||
- ../../target/x86_64-unknown-linux-gnu/release/rustfs:/app/rustfs
|
||||
command: "/app/rustfs"
|
||||
|
||||
node1:
|
||||
image: rustfs:v1
|
||||
image: rustfs/rustfs:latest
|
||||
container_name: node1
|
||||
hostname: node1
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=http://node{0...3}:9000/data/rustfs{0...3}
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9002
|
||||
- RUSTFS_ACCESS_KEY=rustfsadmin
|
||||
- RUSTFS_SECRET_KEY=rustfsadmin
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "9001:9000" # 映射宿主机的 9002 端口到容器的 9000 端口
|
||||
- "9001:9000" # Map port 9002 of the host to port 9000 of the container
|
||||
volumes:
|
||||
- ./target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs
|
||||
# - ./data/node1:/data
|
||||
- ../../target/x86_64-unknown-linux-gnu/release/rustfs:/app/rustfs
|
||||
command: "/app/rustfs"
|
||||
|
||||
node2:
|
||||
image: rustfs:v1
|
||||
image: rustfs/rustfs:latest
|
||||
container_name: node2
|
||||
hostname: node2
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=http://node{0...3}:9000/data/rustfs{0...3}
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9002
|
||||
- RUSTFS_ACCESS_KEY=rustfsadmin
|
||||
- RUSTFS_SECRET_KEY=rustfsadmin
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "9002:9000" # 映射宿主机的 9003 端口到容器的 9000 端口
|
||||
- "9002:9000" # Map port 9003 of the host to port 9000 of the container
|
||||
volumes:
|
||||
- ./target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs
|
||||
# - ./data/node2:/data
|
||||
- ../../target/x86_64-unknown-linux-gnu/release/rustfs:/app/rustfs
|
||||
command: "/app/rustfs"
|
||||
|
||||
node3:
|
||||
image: rustfs:v1
|
||||
image: rustfs/rustfs:latest
|
||||
container_name: node3
|
||||
hostname: node3
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=http://node{0...3}:9000/data/rustfs{0...3}
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9002
|
||||
- RUSTFS_ACCESS_KEY=rustfsadmin
|
||||
- RUSTFS_SECRET_KEY=rustfsadmin
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "9003:9000" # 映射宿主机的 9004 端口到容器的 9000 端口
|
||||
- "9003:9000" # Map port 9004 of the host to port 9000 of the container
|
||||
volumes:
|
||||
- ./target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs
|
||||
# - ./data/node3:/data
|
||||
- ../../target/x86_64-unknown-linux-gnu/release/rustfs:/app/rustfs
|
||||
command: "/app/rustfs"
|
||||
@@ -14,11 +14,11 @@
|
||||
|
||||
services:
|
||||
otel-collector:
|
||||
image: ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-contrib:0.127.0
|
||||
image: otel/opentelemetry-collector-contrib:0.129.1
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
volumes:
|
||||
- ./.docker/observability/otel-collector-config.yaml:/etc/otelcol-contrib/config.yaml
|
||||
- ../../.docker/observability/otel-collector-config.yaml:/etc/otelcol-contrib/config.yaml
|
||||
ports:
|
||||
- 1888:1888
|
||||
- 8888:8888
|
||||
@@ -30,7 +30,7 @@ services:
|
||||
networks:
|
||||
- rustfs-network
|
||||
jaeger:
|
||||
image: jaegertracing/jaeger:2.6.0
|
||||
image: jaegertracing/jaeger:2.8.0
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
ports:
|
||||
@@ -40,11 +40,11 @@ services:
|
||||
networks:
|
||||
- rustfs-network
|
||||
prometheus:
|
||||
image: prom/prometheus:v3.4.1
|
||||
image: prom/prometheus:v3.4.2
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
volumes:
|
||||
- ./.docker/observability/prometheus.yml:/etc/prometheus/prometheus.yml
|
||||
- ../../.docker/observability/prometheus.yml:/etc/prometheus/prometheus.yml
|
||||
ports:
|
||||
- "9090:9090"
|
||||
networks:
|
||||
@@ -54,16 +54,16 @@ services:
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
volumes:
|
||||
- ./.docker/observability/loki-config.yaml:/etc/loki/local-config.yaml
|
||||
- ../../.docker/observability/loki-config.yaml:/etc/loki/local-config.yaml
|
||||
ports:
|
||||
- "3100:3100"
|
||||
command: -config.file=/etc/loki/local-config.yaml
|
||||
networks:
|
||||
- rustfs-network
|
||||
grafana:
|
||||
image: grafana/grafana:12.0.1
|
||||
image: grafana/grafana:12.0.2
|
||||
ports:
|
||||
- "3000:3000" # Web UI
|
||||
- "3000:3000" # Web UI
|
||||
environment:
|
||||
- GF_SECURITY_ADMIN_PASSWORD=admin
|
||||
- TZ=Asia/Shanghai
|
||||
@@ -72,85 +72,69 @@ services:
|
||||
|
||||
node1:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.obs
|
||||
context: ../..
|
||||
dockerfile: Dockerfile.source
|
||||
container_name: node1
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=http://node{1...4}:9000/root/data/target/volume/test{1...4}
|
||||
- RUSTFS_ADDRESS=:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=:9002
|
||||
- RUSTFS_OBS_CONFIG=/etc/observability/config/obs-multi.toml
|
||||
- RUSTFS_OBS_ENDPOINT=http://otel-collector:4317
|
||||
- RUSTFS_OBS_LOGGER_LEVEL=debug
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "9001:9000" # 映射宿主机的 9001 端口到容器的 9000 端口
|
||||
- "9101:9002"
|
||||
volumes:
|
||||
# - ./data:/root/data # 将当前路径挂载到容器内的 /root/data
|
||||
- ./.docker/observability/config:/etc/observability/config
|
||||
- "9001:9000" # Map port 9001 of the host to port 9000 of the container
|
||||
networks:
|
||||
- rustfs-network
|
||||
|
||||
node2:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.obs
|
||||
context: ../..
|
||||
dockerfile: Dockerfile.source
|
||||
container_name: node2
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=http://node{1...4}:9000/root/data/target/volume/test{1...4}
|
||||
- RUSTFS_ADDRESS=:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=:9002
|
||||
- RUSTFS_OBS_CONFIG=/etc/observability/config/obs-multi.toml
|
||||
- RUSTFS_OBS_ENDPOINT=http://otel-collector:4317
|
||||
- RUSTFS_OBS_LOGGER_LEVEL=debug
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "9002:9000" # 映射宿主机的 9002 端口到容器的 9000 端口
|
||||
- "9102:9002"
|
||||
volumes:
|
||||
# - ./data:/root/data
|
||||
- ./.docker/observability/config:/etc/observability/config
|
||||
- "9002:9000" # Map port 9002 of the host to port 9000 of the container
|
||||
networks:
|
||||
- rustfs-network
|
||||
|
||||
node3:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.obs
|
||||
context: ../..
|
||||
dockerfile: Dockerfile.source
|
||||
container_name: node3
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=http://node{1...4}:9000/root/data/target/volume/test{1...4}
|
||||
- RUSTFS_ADDRESS=:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=:9002
|
||||
- RUSTFS_OBS_CONFIG=/etc/observability/config/obs-multi.toml
|
||||
- RUSTFS_OBS_ENDPOINT=http://otel-collector:4317
|
||||
- RUSTFS_OBS_LOGGER_LEVEL=debug
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "9003:9000" # 映射宿主机的 9003 端口到容器的 9000 端口
|
||||
- "9103:9002"
|
||||
volumes:
|
||||
# - ./data:/root/data
|
||||
- ./.docker/observability/config:/etc/observability/config
|
||||
- "9003:9000" # Map port 9003 of the host to port 9000 of the container
|
||||
networks:
|
||||
- rustfs-network
|
||||
|
||||
node4:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.obs
|
||||
context: ../..
|
||||
dockerfile: Dockerfile.source
|
||||
container_name: node4
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=http://node{1...4}:9000/root/data/target/volume/test{1...4}
|
||||
- RUSTFS_ADDRESS=:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=:9002
|
||||
- RUSTFS_OBS_CONFIG=/etc/observability/config/obs-multi.toml
|
||||
- RUSTFS_OBS_ENDPOINT=http://otel-collector:4317
|
||||
- RUSTFS_OBS_LOGGER_LEVEL=debug
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "9004:9000" # 映射宿主机的 9004 端口到容器的 9000 端口
|
||||
- "9104:9002"
|
||||
volumes:
|
||||
# - ./data:/root/data
|
||||
- ./.docker/observability/config:/etc/observability/config
|
||||
- "9004:9000" # Map port 9004 of the host to port 9000 of the container
|
||||
networks:
|
||||
- rustfs-network
|
||||
|
||||
@@ -13,24 +13,40 @@
|
||||
# limitations under the License.
|
||||
|
||||
services:
|
||||
|
||||
tempo:
|
||||
image: grafana/tempo:latest
|
||||
#user: root # The container must be started with root to execute chown in the script
|
||||
#entrypoint: [ "/etc/tempo/entrypoint.sh" ] # Specify a custom entry point
|
||||
command: [ "-config.file=/etc/tempo.yaml" ] # This is passed as a parameter to the entry point script
|
||||
volumes:
|
||||
- ./tempo-entrypoint.sh:/etc/tempo/entrypoint.sh # Mount entry point script
|
||||
- ./tempo.yaml:/etc/tempo.yaml
|
||||
- ./tempo-data:/var/tempo
|
||||
ports:
|
||||
- "3200:3200" # tempo
|
||||
- "24317:4317" # otlp grpc
|
||||
networks:
|
||||
- otel-network
|
||||
|
||||
otel-collector:
|
||||
image: ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-contrib:0.127.0
|
||||
image: otel/opentelemetry-collector-contrib:0.129.1
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otelcol-contrib/config.yaml
|
||||
ports:
|
||||
- 1888:1888
|
||||
- 8888:8888
|
||||
- 8889:8889
|
||||
- 13133:13133
|
||||
- 4317:4317
|
||||
- 4318:4318
|
||||
- 55679:55679
|
||||
- "1888:1888"
|
||||
- "8888:8888"
|
||||
- "8889:8889"
|
||||
- "13133:13133"
|
||||
- "4317:4317"
|
||||
- "4318:4318"
|
||||
- "55679:55679"
|
||||
networks:
|
||||
- otel-network
|
||||
jaeger:
|
||||
image: jaegertracing/jaeger:2.7.0
|
||||
image: jaegertracing/jaeger:2.8.0
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
ports:
|
||||
@@ -40,7 +56,7 @@ services:
|
||||
networks:
|
||||
- otel-network
|
||||
prometheus:
|
||||
image: prom/prometheus:v3.4.1
|
||||
image: prom/prometheus:v3.4.2
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
volumes:
|
||||
@@ -64,6 +80,8 @@ services:
|
||||
image: grafana/grafana:12.0.2
|
||||
ports:
|
||||
- "3000:3000" # Web UI
|
||||
volumes:
|
||||
- ./grafana-datasources.yaml:/etc/grafana/provisioning/datasources/datasources.yaml
|
||||
environment:
|
||||
- GF_SECURITY_ADMIN_PASSWORD=admin
|
||||
- TZ=Asia/Shanghai
|
||||
|
||||
32
.docker/observability/grafana-datasources.yaml
Normal file
@@ -0,0 +1,32 @@
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Prometheus
|
||||
type: prometheus
|
||||
uid: prometheus
|
||||
access: proxy
|
||||
orgId: 1
|
||||
url: http://prometheus:9090
|
||||
basicAuth: false
|
||||
isDefault: false
|
||||
version: 1
|
||||
editable: false
|
||||
jsonData:
|
||||
httpMethod: GET
|
||||
- name: Tempo
|
||||
type: tempo
|
||||
access: proxy
|
||||
orgId: 1
|
||||
url: http://tempo:3200
|
||||
basicAuth: false
|
||||
isDefault: true
|
||||
version: 1
|
||||
editable: false
|
||||
apiVersion: 1
|
||||
uid: tempo
|
||||
jsonData:
|
||||
httpMethod: GET
|
||||
serviceMap:
|
||||
datasourceUid: prometheus
|
||||
streamingEnabled:
|
||||
search: true
|
||||
@@ -33,6 +33,10 @@ exporters:
|
||||
endpoint: "jaeger:4317" # Jaeger 的 OTLP gRPC 端点
|
||||
tls:
|
||||
insecure: true # 开发环境禁用 TLS,生产环境需配置证书
|
||||
otlp/tempo: # OTLP 导出器,用于跟踪数据
|
||||
endpoint: "tempo:4317" # tempo 的 OTLP gRPC 端点
|
||||
tls:
|
||||
insecure: true # 开发环境禁用 TLS,生产环境需配置证书
|
||||
prometheus: # Prometheus 导出器,用于指标数据
|
||||
endpoint: "0.0.0.0:8889" # Prometheus 刮取端点
|
||||
namespace: "rustfs" # 指标前缀
|
||||
@@ -53,7 +57,7 @@ service:
|
||||
traces:
|
||||
receivers: [ otlp ]
|
||||
processors: [ memory_limiter,batch ]
|
||||
exporters: [ otlp/traces ]
|
||||
exporters: [ otlp/traces,otlp/tempo ]
|
||||
metrics:
|
||||
receivers: [ otlp ]
|
||||
processors: [ batch ]
|
||||
@@ -66,6 +70,12 @@ service:
|
||||
logs:
|
||||
level: "info" # Collector 日志级别
|
||||
metrics:
|
||||
address: "0.0.0.0:8888" # Collector 自身指标暴露
|
||||
level: "detailed" # 可以是 basic, normal, detailed
|
||||
readers:
|
||||
- periodic:
|
||||
exporter:
|
||||
otlp:
|
||||
protocol: http/protobuf
|
||||
endpoint: http://otel-collector:4318
|
||||
|
||||
|
||||
|
||||
@@ -18,8 +18,11 @@ global:
|
||||
scrape_configs:
|
||||
- job_name: 'otel-collector'
|
||||
static_configs:
|
||||
- targets: ['otel-collector:8888'] # 从 Collector 刮取指标
|
||||
- targets: [ 'otel-collector:8888' ] # 从 Collector 刮取指标
|
||||
- job_name: 'otel-metrics'
|
||||
static_configs:
|
||||
- targets: ['otel-collector:8889'] # 应用指标
|
||||
- targets: [ 'otel-collector:8889' ] # 应用指标
|
||||
- job_name: 'tempo'
|
||||
static_configs:
|
||||
- targets: [ 'tempo:3200' ]
|
||||
|
||||
|
||||
1
.docker/observability/tempo-data/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
*
|
||||
8
.docker/observability/tempo-entrypoint.sh
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/bin/sh
|
||||
# Run as root to fix directory permissions
|
||||
chown -R 10001:10001 /var/tempo
|
||||
|
||||
# Use su-exec (a lightweight sudo/gosu alternative, commonly used in Alpine mirroring)
|
||||
# Switch to user 10001 and execute the original command (CMD) passed to the script
|
||||
# "$@" represents all parameters passed to this script, i.e. command in docker-compose
|
||||
exec su-exec 10001:10001 /tempo "$@"
|
||||
55
.docker/observability/tempo.yaml
Normal file
@@ -0,0 +1,55 @@
|
||||
stream_over_http_enabled: true
|
||||
server:
|
||||
http_listen_port: 3200
|
||||
log_level: info
|
||||
|
||||
query_frontend:
|
||||
search:
|
||||
duration_slo: 5s
|
||||
throughput_bytes_slo: 1.073741824e+09
|
||||
metadata_slo:
|
||||
duration_slo: 5s
|
||||
throughput_bytes_slo: 1.073741824e+09
|
||||
trace_by_id:
|
||||
duration_slo: 5s
|
||||
|
||||
distributor:
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: "tempo:4317"
|
||||
|
||||
ingester:
|
||||
max_block_duration: 5m # cut the headblock when this much time passes. this is being set for demo purposes and should probably be left alone normally
|
||||
|
||||
compactor:
|
||||
compaction:
|
||||
block_retention: 1h # overall Tempo trace retention. set for demo purposes
|
||||
|
||||
metrics_generator:
|
||||
registry:
|
||||
external_labels:
|
||||
source: tempo
|
||||
cluster: docker-compose
|
||||
storage:
|
||||
path: /var/tempo/generator/wal
|
||||
remote_write:
|
||||
- url: http://prometheus:9090/api/v1/write
|
||||
send_exemplars: true
|
||||
traces_storage:
|
||||
path: /var/tempo/generator/traces
|
||||
|
||||
storage:
|
||||
trace:
|
||||
backend: local # backend configuration to use
|
||||
wal:
|
||||
path: /var/tempo/wal # where to store the wal locally
|
||||
local:
|
||||
path: /var/tempo/blocks
|
||||
|
||||
overrides:
|
||||
defaults:
|
||||
metrics_generator:
|
||||
processors: [ service-graphs, span-metrics, local-blocks ] # enables metrics generator
|
||||
generate_native_histograms: both
|
||||
1
.dockerignore
Normal file
@@ -0,0 +1 @@
|
||||
target
|
||||
15
.github/actions/setup/action.yml
vendored
@@ -60,15 +60,7 @@ runs:
|
||||
pkg-config \
|
||||
libssl-dev
|
||||
|
||||
- name: Cache protoc binary
|
||||
id: cache-protoc
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.local/bin/protoc
|
||||
key: protoc-31.1-${{ runner.os }}-${{ runner.arch }}
|
||||
|
||||
- name: Install protoc
|
||||
if: steps.cache-protoc.outputs.cache-hit != 'true'
|
||||
uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
version: "31.1"
|
||||
@@ -94,6 +86,9 @@ runs:
|
||||
if: inputs.install-cross-tools == 'true'
|
||||
uses: taiki-e/install-action@cargo-zigbuild
|
||||
|
||||
- name: Install cargo-nextest
|
||||
uses: taiki-e/install-action@cargo-nextest
|
||||
|
||||
- name: Setup Rust cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
@@ -101,7 +96,3 @@ runs:
|
||||
cache-on-failure: true
|
||||
shared-key: ${{ inputs.cache-shared-key }}
|
||||
save-if: ${{ inputs.cache-save-if }}
|
||||
# Cache workspace dependencies
|
||||
workspaces: |
|
||||
. -> target
|
||||
cli/rustfs-gui -> cli/rustfs-gui/target
|
||||
|
||||
4
.github/pull_request_template.md
vendored
@@ -19,9 +19,7 @@ Pull Request Template for RustFS
|
||||
|
||||
## Checklist
|
||||
- [ ] I have read and followed the [CONTRIBUTING.md](CONTRIBUTING.md) guidelines
|
||||
- [ ] Code is formatted with `cargo fmt --all`
|
||||
- [ ] Passed `cargo clippy --all-targets --all-features -- -D warnings`
|
||||
- [ ] Passed `cargo check --all-targets`
|
||||
- [ ] Passed `make pre-commit`
|
||||
- [ ] Added/updated necessary tests
|
||||
- [ ] Documentation updated (if needed)
|
||||
- [ ] CI/CD passed (if applicable)
|
||||
|
||||
8
.github/workflows/audit.yml
vendored
@@ -16,13 +16,13 @@ name: Security Audit
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
branches: [ main ]
|
||||
paths:
|
||||
- '**/Cargo.toml'
|
||||
- '**/Cargo.lock'
|
||||
- '.github/workflows/audit.yml'
|
||||
pull_request:
|
||||
branches: [main]
|
||||
branches: [ main ]
|
||||
paths:
|
||||
- '**/Cargo.toml'
|
||||
- '**/Cargo.lock'
|
||||
@@ -41,7 +41,7 @@ jobs:
|
||||
timeout-minutes: 15
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Install cargo-audit
|
||||
uses: taiki-e/install-action@v2
|
||||
@@ -69,7 +69,7 @@ jobs:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Dependency Review
|
||||
uses: actions/dependency-review-action@v4
|
||||
|
||||
951
.github/workflows/build.yml
vendored
24
.github/workflows/ci.yml
vendored
@@ -16,7 +16,7 @@ name: Continuous Integration
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
branches: [ main ]
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
- "**.txt"
|
||||
@@ -36,7 +36,7 @@ on:
|
||||
- ".github/workflows/audit.yml"
|
||||
- ".github/workflows/performance.yml"
|
||||
pull_request:
|
||||
branches: [main]
|
||||
branches: [ main ]
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
- "**.txt"
|
||||
@@ -81,7 +81,17 @@ jobs:
|
||||
cancel_others: true
|
||||
paths_ignore: '["*.md", "docs/**", "deploy/**"]'
|
||||
# Never skip release events and tag pushes
|
||||
do_not_skip: '["release", "push"]'
|
||||
do_not_skip: '["workflow_dispatch", "schedule", "merge_group", "release", "push"]'
|
||||
|
||||
|
||||
typos:
|
||||
name: Typos
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- name: Typos check with custom config file
|
||||
uses: crate-ci/typos@master
|
||||
|
||||
test-and-lint:
|
||||
name: Test and Lint
|
||||
@@ -91,7 +101,7 @@ jobs:
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Setup Rust environment
|
||||
uses: ./.github/actions/setup
|
||||
@@ -102,7 +112,9 @@ jobs:
|
||||
cache-save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||
|
||||
- name: Run tests
|
||||
run: cargo test --all --exclude e2e_test
|
||||
run: |
|
||||
cargo nextest run --all --exclude e2e_test
|
||||
cargo test --all --doc
|
||||
|
||||
- name: Check code formatting
|
||||
run: cargo fmt --all --check
|
||||
@@ -118,7 +130,7 @@ jobs:
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Setup Rust environment
|
||||
uses: ./.github/actions/setup
|
||||
|
||||
467
.github/workflows/docker.yml
vendored
@@ -12,42 +12,33 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Docker Images Workflow
|
||||
#
|
||||
# This workflow builds Docker images using pre-built binaries from the build workflow.
|
||||
#
|
||||
# Trigger Types:
|
||||
# 1. workflow_run: Automatically triggered when "Build and Release" workflow completes
|
||||
# 2. workflow_dispatch: Manual trigger for standalone Docker builds
|
||||
#
|
||||
# Key Features:
|
||||
# - Only triggers when Linux builds (x86_64 + aarch64) are successful
|
||||
# - Independent of macOS/Windows build status
|
||||
# - Uses workflow_run event for precise control
|
||||
# - Only builds Docker images for releases and prereleases (development builds are skipped)
|
||||
|
||||
name: Docker Images
|
||||
|
||||
# Permissions needed for workflow_run event and Docker registry access
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
on:
|
||||
push:
|
||||
tags: ["*"]
|
||||
branches: [main]
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
- "**.txt"
|
||||
- ".github/**"
|
||||
- "docs/**"
|
||||
- "deploy/**"
|
||||
- "scripts/dev_*.sh"
|
||||
- "LICENSE*"
|
||||
- "README*"
|
||||
- "**/*.png"
|
||||
- "**/*.jpg"
|
||||
- "**/*.svg"
|
||||
- ".gitignore"
|
||||
- ".dockerignore"
|
||||
pull_request:
|
||||
branches: [main]
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
- "**.txt"
|
||||
- ".github/**"
|
||||
- "docs/**"
|
||||
- "deploy/**"
|
||||
- "scripts/dev_*.sh"
|
||||
- "LICENSE*"
|
||||
- "README*"
|
||||
- "**/*.png"
|
||||
- "**/*.jpg"
|
||||
- "**/*.svg"
|
||||
- ".gitignore"
|
||||
- ".dockerignore"
|
||||
# Automatically triggered when build workflow completes
|
||||
workflow_run:
|
||||
workflows: [ "Build and Release" ]
|
||||
types: [ completed ]
|
||||
# Manual trigger with same parameters for consistency
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
push_images:
|
||||
@@ -55,156 +46,372 @@ on:
|
||||
required: false
|
||||
default: true
|
||||
type: boolean
|
||||
version:
|
||||
description: "Version to build (latest for stable release, or specific version like v1.0.0, v1.0.0-alpha1)"
|
||||
required: false
|
||||
default: "latest"
|
||||
type: string
|
||||
force_rebuild:
|
||||
description: "Force rebuild even if binary exists (useful for testing)"
|
||||
required: false
|
||||
default: false
|
||||
type: boolean
|
||||
|
||||
env:
|
||||
DOCKERHUB_USERNAME: rustfs
|
||||
CARGO_TERM_COLOR: always
|
||||
REGISTRY_DOCKERHUB: rustfs/rustfs
|
||||
REGISTRY_GHCR: ghcr.io/${{ github.repository }}
|
||||
DOCKER_PLATFORMS: linux/amd64,linux/arm64
|
||||
|
||||
jobs:
|
||||
# Check if we should build
|
||||
# Check if we should build Docker images
|
||||
build-check:
|
||||
name: Build Check
|
||||
name: Docker Build Check
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
should_build: ${{ steps.check.outputs.should_build }}
|
||||
should_push: ${{ steps.check.outputs.should_push }}
|
||||
build_type: ${{ steps.check.outputs.build_type }}
|
||||
version: ${{ steps.check.outputs.version }}
|
||||
short_sha: ${{ steps.check.outputs.short_sha }}
|
||||
is_prerelease: ${{ steps.check.outputs.is_prerelease }}
|
||||
create_latest: ${{ steps.check.outputs.create_latest }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
# For workflow_run events, checkout the specific commit that triggered the workflow
|
||||
ref: ${{ github.event.workflow_run.head_sha || github.sha }}
|
||||
|
||||
- name: Check build conditions
|
||||
id: check
|
||||
run: |
|
||||
should_build=false
|
||||
should_push=false
|
||||
build_type="none"
|
||||
version=""
|
||||
short_sha=""
|
||||
is_prerelease=false
|
||||
create_latest=false
|
||||
|
||||
# Always build on workflow_dispatch or when changes detected
|
||||
if [[ "${{ github.event_name }}" == "workflow_dispatch" ]] || \
|
||||
[[ "${{ github.event_name }}" == "push" ]] || \
|
||||
[[ "${{ github.event_name }}" == "pull_request" ]]; then
|
||||
if [[ "${{ github.event_name }}" == "workflow_run" ]]; then
|
||||
# Triggered by build workflow completion
|
||||
echo "🔗 Triggered by build workflow completion"
|
||||
|
||||
# Check if the triggering workflow was successful
|
||||
# If the workflow succeeded, it means ALL builds (including Linux x86_64 and aarch64) succeeded
|
||||
if [[ "${{ github.event.workflow_run.conclusion }}" == "success" ]]; then
|
||||
echo "✅ Build workflow succeeded, all builds including Linux are successful"
|
||||
should_build=true
|
||||
should_push=true
|
||||
else
|
||||
echo "❌ Build workflow failed (conclusion: ${{ github.event.workflow_run.conclusion }}), skipping Docker build"
|
||||
should_build=false
|
||||
fi
|
||||
|
||||
# Extract version info from commit message or use commit SHA
|
||||
# Use Git to generate consistent short SHA (ensures uniqueness like build.yml)
|
||||
short_sha=$(git rev-parse --short "${{ github.event.workflow_run.head_sha }}")
|
||||
|
||||
# Determine build type based on triggering workflow event and ref
|
||||
triggering_event="${{ github.event.workflow_run.event }}"
|
||||
head_branch="${{ github.event.workflow_run.head_branch }}"
|
||||
|
||||
echo "🔍 Analyzing triggering workflow:"
|
||||
echo " 📋 Event: $triggering_event"
|
||||
echo " 🌿 Head branch: $head_branch"
|
||||
echo " 📎 Head SHA: ${{ github.event.workflow_run.head_sha }}"
|
||||
|
||||
# Check if this was triggered by a tag push
|
||||
if [[ "$triggering_event" == "push" ]]; then
|
||||
# For tag pushes, head_branch will be like "refs/tags/v1.0.0" or just "v1.0.0"
|
||||
if [[ "$head_branch" == refs/tags/* ]]; then
|
||||
# Extract tag name from refs/tags/TAG_NAME
|
||||
tag_name="${head_branch#refs/tags/}"
|
||||
version="$tag_name"
|
||||
elif [[ "$head_branch" =~ ^v?[0-9]+\.[0-9]+\.[0-9]+ ]]; then
|
||||
# Direct tag name like "v1.0.0" or "1.0.0-alpha.1"
|
||||
version="$head_branch"
|
||||
elif [[ "$head_branch" == "main" ]]; then
|
||||
# Regular branch push to main
|
||||
build_type="development"
|
||||
version="dev-${short_sha}"
|
||||
should_build=false
|
||||
echo "⏭️ Skipping Docker build for development version (main branch push)"
|
||||
else
|
||||
# Other branch push
|
||||
build_type="development"
|
||||
version="dev-${short_sha}"
|
||||
should_build=false
|
||||
echo "⏭️ Skipping Docker build for development version (branch: $head_branch)"
|
||||
fi
|
||||
|
||||
# If we extracted a version (tag), determine release type
|
||||
if [[ -n "$version" ]] && [[ "$version" != "dev-${short_sha}" ]]; then
|
||||
# Remove 'v' prefix if present for consistent version format
|
||||
if [[ "$version" == v* ]]; then
|
||||
version="${version#v}"
|
||||
fi
|
||||
|
||||
if [[ "$version" == *"alpha"* ]] || [[ "$version" == *"beta"* ]] || [[ "$version" == *"rc"* ]]; then
|
||||
build_type="prerelease"
|
||||
is_prerelease=true
|
||||
echo "🧪 Building Docker image for prerelease: $version"
|
||||
else
|
||||
build_type="release"
|
||||
create_latest=true
|
||||
echo "🚀 Building Docker image for release: $version"
|
||||
fi
|
||||
fi
|
||||
else
|
||||
# Non-push events
|
||||
build_type="development"
|
||||
version="dev-${short_sha}"
|
||||
should_build=false
|
||||
echo "⏭️ Skipping Docker build for development version (event: $triggering_event)"
|
||||
fi
|
||||
|
||||
echo "🔄 Build triggered by workflow_run:"
|
||||
echo " 📋 Conclusion: ${{ github.event.workflow_run.conclusion }}"
|
||||
echo " 🌿 Branch: ${{ github.event.workflow_run.head_branch }}"
|
||||
echo " 📎 SHA: ${{ github.event.workflow_run.head_sha }}"
|
||||
echo " 🎯 Event: ${{ github.event.workflow_run.event }}"
|
||||
|
||||
elif [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then
|
||||
# Manual trigger
|
||||
input_version="${{ github.event.inputs.version }}"
|
||||
version="${input_version}"
|
||||
should_push="${{ github.event.inputs.push_images }}"
|
||||
should_build=true
|
||||
fi
|
||||
|
||||
# Push only on main branch, tags, or manual trigger
|
||||
if [[ "${{ github.ref }}" == "refs/heads/main" ]] || \
|
||||
[[ "${{ startsWith(github.ref, 'refs/tags/') }}" == "true" ]] || \
|
||||
[[ "${{ github.event.inputs.push_images }}" == "true" ]]; then
|
||||
should_push=true
|
||||
# Get short SHA
|
||||
short_sha=$(git rev-parse --short HEAD)
|
||||
|
||||
echo "🎯 Manual Docker build triggered:"
|
||||
echo " 📋 Requested version: $input_version"
|
||||
echo " 🔧 Force rebuild: ${{ github.event.inputs.force_rebuild }}"
|
||||
echo " 🚀 Push images: $should_push"
|
||||
|
||||
case "$input_version" in
|
||||
"latest")
|
||||
build_type="release"
|
||||
create_latest=true
|
||||
echo "🚀 Building with latest stable release version"
|
||||
;;
|
||||
# Prerelease versions (must match first, more specific)
|
||||
v*alpha*|v*beta*|v*rc*|*alpha*|*beta*|*rc*)
|
||||
build_type="prerelease"
|
||||
is_prerelease=true
|
||||
echo "🧪 Building with prerelease version: $input_version"
|
||||
;;
|
||||
# Release versions (match after prereleases, more general)
|
||||
v[0-9]*|[0-9]*.*.*)
|
||||
build_type="release"
|
||||
create_latest=true
|
||||
echo "📦 Building with specific release version: $input_version"
|
||||
;;
|
||||
*)
|
||||
# Invalid version for Docker build
|
||||
should_build=false
|
||||
echo "❌ Invalid version for Docker build: $input_version"
|
||||
echo "⚠️ Only release versions (latest, v1.0.0, 1.0.0) and prereleases (v1.0.0-alpha1, 1.0.0-beta2) are supported"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
echo "should_build=$should_build" >> $GITHUB_OUTPUT
|
||||
echo "should_push=$should_push" >> $GITHUB_OUTPUT
|
||||
echo "Build: $should_build, Push: $should_push"
|
||||
echo "build_type=$build_type" >> $GITHUB_OUTPUT
|
||||
echo "version=$version" >> $GITHUB_OUTPUT
|
||||
echo "short_sha=$short_sha" >> $GITHUB_OUTPUT
|
||||
echo "is_prerelease=$is_prerelease" >> $GITHUB_OUTPUT
|
||||
echo "create_latest=$create_latest" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "🐳 Docker Build Summary:"
|
||||
echo " - Should build: $should_build"
|
||||
echo " - Should push: $should_push"
|
||||
echo " - Build type: $build_type"
|
||||
echo " - Version: $version"
|
||||
echo " - Short SHA: $short_sha"
|
||||
echo " - Is prerelease: $is_prerelease"
|
||||
echo " - Create latest: $create_latest"
|
||||
|
||||
# Build multi-arch Docker images
|
||||
# Strategy: Build images using pre-built binaries from dl.rustfs.com
|
||||
# Supports both release and dev channel binaries based on build context
|
||||
# Only runs when should_build is true (which includes workflow success check)
|
||||
build-docker:
|
||||
name: Build Docker Images
|
||||
needs: build-check
|
||||
if: needs.build-check.outputs.should_build == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
variant:
|
||||
- name: production
|
||||
dockerfile: Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
- name: ubuntu
|
||||
dockerfile: .docker/Dockerfile.ubuntu22.04
|
||||
platforms: linux/amd64,linux/arm64
|
||||
- name: alpine
|
||||
dockerfile: .docker/Dockerfile.alpine
|
||||
platforms: linux/amd64,linux/arm64
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ env.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
# - name: Login to GitHub Container Registry
|
||||
# uses: docker/login-action@v3
|
||||
# with:
|
||||
# registry: ghcr.io
|
||||
# username: ${{ github.actor }}
|
||||
# password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Login to Docker Hub
|
||||
if: needs.build-check.outputs.should_push == 'true' && secrets.DOCKERHUB_USERNAME != ''
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
if: needs.build-check.outputs.should_push == 'true'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata
|
||||
- name: Extract metadata and generate tags
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: |
|
||||
${{ env.REGISTRY_DOCKERHUB }}
|
||||
${{ env.REGISTRY_GHCR }}
|
||||
tags: |
|
||||
type=ref,event=branch,suffix=-${{ matrix.variant.name }}
|
||||
type=ref,event=pr,suffix=-${{ matrix.variant.name }}
|
||||
type=semver,pattern={{version}},suffix=-${{ matrix.variant.name }}
|
||||
type=semver,pattern={{major}}.{{minor}},suffix=-${{ matrix.variant.name }}
|
||||
type=raw,value=latest,suffix=-${{ matrix.variant.name }},enable={{is_default_branch}}
|
||||
flavor: |
|
||||
latest=false
|
||||
run: |
|
||||
BUILD_TYPE="${{ needs.build-check.outputs.build_type }}"
|
||||
VERSION="${{ needs.build-check.outputs.version }}"
|
||||
SHORT_SHA="${{ needs.build-check.outputs.short_sha }}"
|
||||
CREATE_LATEST="${{ needs.build-check.outputs.create_latest }}"
|
||||
|
||||
# Convert version format for Dockerfile compatibility
|
||||
case "$VERSION" in
|
||||
"latest")
|
||||
# For stable latest, use RELEASE=latest + release CHANNEL
|
||||
DOCKER_RELEASE="latest"
|
||||
DOCKER_CHANNEL="release"
|
||||
;;
|
||||
v*)
|
||||
# For versioned releases (v1.0.0), remove 'v' prefix for Dockerfile
|
||||
DOCKER_RELEASE="${VERSION#v}"
|
||||
DOCKER_CHANNEL="release"
|
||||
;;
|
||||
*)
|
||||
# For other versions, pass as-is
|
||||
DOCKER_RELEASE="${VERSION}"
|
||||
DOCKER_CHANNEL="release"
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "docker_release=$DOCKER_RELEASE" >> $GITHUB_OUTPUT
|
||||
echo "docker_channel=$DOCKER_CHANNEL" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "🐳 Docker build parameters:"
|
||||
echo " - Original version: $VERSION"
|
||||
echo " - Docker RELEASE: $DOCKER_RELEASE"
|
||||
echo " - Docker CHANNEL: $DOCKER_CHANNEL"
|
||||
|
||||
# Generate tags based on build type
|
||||
# Only support release and prerelease builds (no development builds)
|
||||
TAGS="${{ env.REGISTRY_DOCKERHUB }}:${VERSION}"
|
||||
|
||||
# Add channel tags for prereleases and latest for stable
|
||||
if [[ "$CREATE_LATEST" == "true" ]]; then
|
||||
# Stable release
|
||||
TAGS="$TAGS,${{ env.REGISTRY_DOCKERHUB }}:latest"
|
||||
elif [[ "$BUILD_TYPE" == "prerelease" ]]; then
|
||||
# Prerelease channel tags (alpha, beta, rc)
|
||||
if [[ "$VERSION" == *"alpha"* ]]; then
|
||||
CHANNEL="alpha"
|
||||
elif [[ "$VERSION" == *"beta"* ]]; then
|
||||
CHANNEL="beta"
|
||||
elif [[ "$VERSION" == *"rc"* ]]; then
|
||||
CHANNEL="rc"
|
||||
fi
|
||||
|
||||
if [[ -n "$CHANNEL" ]]; then
|
||||
TAGS="$TAGS,${{ env.REGISTRY_DOCKERHUB }}:${CHANNEL}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Output tags
|
||||
echo "tags=$TAGS" >> $GITHUB_OUTPUT
|
||||
|
||||
# Generate labels
|
||||
LABELS="org.opencontainers.image.title=RustFS"
|
||||
LABELS="$LABELS,org.opencontainers.image.description=RustFS distributed object storage system"
|
||||
LABELS="$LABELS,org.opencontainers.image.version=$VERSION"
|
||||
LABELS="$LABELS,org.opencontainers.image.revision=${{ github.sha }}"
|
||||
LABELS="$LABELS,org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}"
|
||||
LABELS="$LABELS,org.opencontainers.image.created=$(date -u +'%Y-%m-%dT%H:%M:%SZ')"
|
||||
LABELS="$LABELS,org.opencontainers.image.build-type=$BUILD_TYPE"
|
||||
|
||||
echo "labels=$LABELS" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "🐳 Generated Docker tags:"
|
||||
echo "$TAGS" | tr ',' '\n' | sed 's/^/ - /'
|
||||
echo "📋 Build type: $BUILD_TYPE"
|
||||
echo "🔖 Version: $VERSION"
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: ${{ matrix.variant.dockerfile }}
|
||||
platforms: ${{ matrix.variant.platforms }}
|
||||
file: Dockerfile
|
||||
platforms: ${{ env.DOCKER_PLATFORMS }}
|
||||
push: ${{ needs.build-check.outputs.should_push == 'true' }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
cache-from: type=gha,scope=docker-${{ matrix.variant.name }}
|
||||
cache-to: type=gha,mode=max,scope=docker-${{ matrix.variant.name }}
|
||||
cache-from: |
|
||||
type=gha,scope=docker-binary
|
||||
cache-to: |
|
||||
type=gha,mode=max,scope=docker-binary
|
||||
build-args: |
|
||||
BUILDTIME=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.created'] }}
|
||||
VERSION=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.version'] }}
|
||||
REVISION=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.revision'] }}
|
||||
BUILDTIME=$(date -u +'%Y-%m-%dT%H:%M:%SZ')
|
||||
VERSION=${{ needs.build-check.outputs.version }}
|
||||
BUILD_TYPE=${{ needs.build-check.outputs.build_type }}
|
||||
REVISION=${{ github.sha }}
|
||||
RELEASE=${{ steps.meta.outputs.docker_release }}
|
||||
CHANNEL=${{ steps.meta.outputs.docker_channel }}
|
||||
BUILDKIT_INLINE_CACHE=1
|
||||
# Enable advanced BuildKit features for better performance
|
||||
provenance: false
|
||||
sbom: false
|
||||
# Add retry mechanism by splitting the build process
|
||||
no-cache: false
|
||||
pull: true
|
||||
|
||||
# Create manifest for main production image
|
||||
create-manifest:
|
||||
name: Create Manifest
|
||||
needs: [build-check, build-docker]
|
||||
if: needs.build-check.outputs.should_push == 'true' && startsWith(github.ref, 'refs/tags/')
|
||||
# Note: Manifest creation is no longer needed as we only build one variant
|
||||
# Multi-arch manifests are automatically created by docker/build-push-action
|
||||
|
||||
# Docker build summary
|
||||
docker-summary:
|
||||
name: Docker Build Summary
|
||||
needs: [ build-check, build-docker ]
|
||||
if: always() && needs.build-check.outputs.should_build == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Login to Docker Hub
|
||||
if: secrets.DOCKERHUB_USERNAME != ''
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Create and push manifest
|
||||
- name: Docker build completion summary
|
||||
run: |
|
||||
VERSION=${GITHUB_REF#refs/tags/}
|
||||
BUILD_TYPE="${{ needs.build-check.outputs.build_type }}"
|
||||
VERSION="${{ needs.build-check.outputs.version }}"
|
||||
CREATE_LATEST="${{ needs.build-check.outputs.create_latest }}"
|
||||
|
||||
# Create main image tag (without variant suffix)
|
||||
if [[ -n "${{ secrets.DOCKERHUB_USERNAME }}" ]]; then
|
||||
docker buildx imagetools create \
|
||||
-t ${{ env.REGISTRY_DOCKERHUB }}:${VERSION} \
|
||||
-t ${{ env.REGISTRY_DOCKERHUB }}:latest \
|
||||
${{ env.REGISTRY_DOCKERHUB }}:${VERSION}-production
|
||||
fi
|
||||
echo "🐳 Docker build completed successfully!"
|
||||
echo "📦 Build type: $BUILD_TYPE"
|
||||
echo "🔢 Version: $VERSION"
|
||||
echo "🚀 Strategy: Images using pre-built binaries (release channel only)"
|
||||
echo ""
|
||||
|
||||
docker buildx imagetools create \
|
||||
-t ${{ env.REGISTRY_GHCR }}:${VERSION} \
|
||||
-t ${{ env.REGISTRY_GHCR }}:latest \
|
||||
${{ env.REGISTRY_GHCR }}:${VERSION}-production
|
||||
case "$BUILD_TYPE" in
|
||||
"release")
|
||||
echo "🚀 Release Docker image has been built with ${VERSION} tags"
|
||||
echo "✅ This image is ready for production use"
|
||||
if [[ "$CREATE_LATEST" == "true" ]]; then
|
||||
echo "🏷️ Latest tag has been created for stable release"
|
||||
fi
|
||||
;;
|
||||
"prerelease")
|
||||
echo "🧪 Prerelease Docker image has been built with ${VERSION} tags"
|
||||
echo "⚠️ This is a prerelease image - use with caution"
|
||||
echo "🚫 Latest tag NOT created for prerelease"
|
||||
;;
|
||||
*)
|
||||
echo "❌ Unexpected build type: $BUILD_TYPE"
|
||||
;;
|
||||
esac
|
||||
|
||||
24
.github/workflows/issue-translator.yml
vendored
@@ -1,8 +1,22 @@
|
||||
name: 'issue-translator'
|
||||
on:
|
||||
issue_comment:
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name: "issue-translator"
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
issues:
|
||||
issues:
|
||||
types: [opened]
|
||||
|
||||
jobs:
|
||||
@@ -14,5 +28,5 @@ jobs:
|
||||
IS_MODIFY_TITLE: false
|
||||
# not require, default false, . Decide whether to modify the issue title
|
||||
# if true, the robot account @Issues-translate-bot must have modification permissions, invite @Issues-translate-bot to your project or use your custom bot.
|
||||
CUSTOM_BOT_NOTE: Bot detected the issue body's language is not English, translate it automatically.
|
||||
CUSTOM_BOT_NOTE: Bot detected the issue body's language is not English, translate it automatically.
|
||||
# not require. Customize the translation robot prefix message.
|
||||
|
||||
23
.github/workflows/performance.yml
vendored
@@ -16,12 +16,12 @@ name: Performance Testing
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
branches: [ main ]
|
||||
paths:
|
||||
- '**/*.rs'
|
||||
- '**/Cargo.toml'
|
||||
- '**/Cargo.lock'
|
||||
- '.github/workflows/performance.yml'
|
||||
- "**/*.rs"
|
||||
- "**/Cargo.toml"
|
||||
- "**/Cargo.lock"
|
||||
- ".github/workflows/performance.yml"
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
profile_duration:
|
||||
@@ -41,7 +41,7 @@ jobs:
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Setup Rust environment
|
||||
uses: ./.github/actions/setup
|
||||
@@ -73,12 +73,11 @@ jobs:
|
||||
echo "RUSTFS_VOLUMES=./target/volume/test{0...4}" >> $GITHUB_ENV
|
||||
echo "RUST_LOG=rustfs=info,ecstore=info,s3s=info,iam=info,rustfs-obs=info" >> $GITHUB_ENV
|
||||
|
||||
- name: Download static files
|
||||
- name: Verify console static assets
|
||||
run: |
|
||||
curl -L "https://dl.rustfs.com/artifacts/console/rustfs-console-latest.zip" \
|
||||
-o tempfile.zip --retry 3 --retry-delay 5
|
||||
unzip -o tempfile.zip -d ./rustfs/static
|
||||
rm tempfile.zip
|
||||
# Console static assets are already embedded in the repository
|
||||
echo "Console static assets size: $(du -sh rustfs/static/)"
|
||||
echo "Console static assets are embedded via rust-embed, no external download needed"
|
||||
|
||||
- name: Build with profiling optimizations
|
||||
run: |
|
||||
@@ -117,7 +116,7 @@ jobs:
|
||||
timeout-minutes: 45
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Setup Rust environment
|
||||
uses: ./.github/actions/setup
|
||||
|
||||
2
.gitignore
vendored
@@ -19,3 +19,5 @@ deploy/certs/*
|
||||
profile.json
|
||||
.docker/openobserve-otel/data
|
||||
*.zst
|
||||
.secrets
|
||||
*.go
|
||||
1808
Cargo.lock
generated
86
Cargo.toml
@@ -33,9 +33,12 @@ members = [
|
||||
"crates/s3select-api", # S3 Select API interface
|
||||
"crates/s3select-query", # S3 Select query engine
|
||||
"crates/signer", # client signer
|
||||
"crates/checksums", # client checksums
|
||||
"crates/utils", # Utility functions and helpers
|
||||
"crates/workers", # Worker thread pools and task scheduling
|
||||
"crates/zip", # ZIP file handling and compression
|
||||
"crates/ahm",
|
||||
"crates/mcp", # MCP server for S3 operations
|
||||
]
|
||||
resolver = "2"
|
||||
|
||||
@@ -62,6 +65,7 @@ rustfs-filemeta = { path = "crates/filemeta" }
|
||||
rustfs-rio = { path = "crates/rio" }
|
||||
|
||||
[workspace.dependencies]
|
||||
rustfs-ahm = { path = "crates/ahm", version = "0.0.5" }
|
||||
rustfs-s3select-api = { path = "crates/s3select-api", version = "0.0.5" }
|
||||
rustfs-appauth = { path = "crates/appauth", version = "0.0.5" }
|
||||
rustfs-common = { path = "crates/common", version = "0.0.5" }
|
||||
@@ -82,20 +86,22 @@ rustfs-utils = { path = "crates/utils", version = "0.0.5" }
|
||||
rustfs-rio = { path = "crates/rio", version = "0.0.5" }
|
||||
rustfs-filemeta = { path = "crates/filemeta", version = "0.0.5" }
|
||||
rustfs-signer = { path = "crates/signer", version = "0.0.5" }
|
||||
rustfs-checksums = { path = "crates/checksums", version = "0.0.5" }
|
||||
rustfs-workers = { path = "crates/workers", version = "0.0.5" }
|
||||
rustfs-mcp = { path = "crates/mcp", version = "0.0.5" }
|
||||
aes-gcm = { version = "0.10.3", features = ["std"] }
|
||||
anyhow = "1.0.99"
|
||||
arc-swap = "1.7.1"
|
||||
argon2 = { version = "0.5.3", features = ["std"] }
|
||||
atoi = "2.0.0"
|
||||
async-channel = "2.4.0"
|
||||
async-channel = "2.5.0"
|
||||
async-recursion = "1.1.1"
|
||||
async-trait = "0.1.88"
|
||||
async-compression = { version = "0.4.0" }
|
||||
async-compression = { version = "0.4.19" }
|
||||
atomic_enum = "0.3.0"
|
||||
aws-sdk-s3 = "1.96.0"
|
||||
aws-config = { version = "1.8.4" }
|
||||
aws-sdk-s3 = "1.101.0"
|
||||
axum = "0.8.4"
|
||||
axum-extra = "0.10.1"
|
||||
axum-server = { version = "0.7.2", features = ["tls-rustls"] }
|
||||
base64-simd = "0.8.0"
|
||||
base64 = "0.22.1"
|
||||
brotli = "8.0.1"
|
||||
@@ -103,18 +109,19 @@ bytes = { version = "1.10.1", features = ["serde"] }
|
||||
bytesize = "2.0.1"
|
||||
byteorder = "1.5.0"
|
||||
cfg-if = "1.0.1"
|
||||
crc-fast = "1.4.0"
|
||||
chacha20poly1305 = { version = "0.10.1" }
|
||||
chrono = { version = "0.4.41", features = ["serde"] }
|
||||
clap = { version = "4.5.40", features = ["derive", "env"] }
|
||||
const-str = { version = "0.6.2", features = ["std", "proc"] }
|
||||
crc32fast = "1.4.2"
|
||||
criterion = { version = "0.5", features = ["html_reports"] }
|
||||
clap = { version = "4.5.44", features = ["derive", "env"] }
|
||||
const-str = { version = "0.6.4", features = ["std", "proc"] }
|
||||
crc32fast = "1.5.0"
|
||||
criterion = { version = "0.7", features = ["html_reports"] }
|
||||
dashmap = "6.1.0"
|
||||
datafusion = "46.0.1"
|
||||
derive_builder = "0.20.2"
|
||||
dioxus = { version = "0.6.3", features = ["router"] }
|
||||
dirs = "6.0.0"
|
||||
enumset = "1.1.6"
|
||||
enumset = "1.1.7"
|
||||
flatbuffers = "25.2.10"
|
||||
flate2 = "1.1.2"
|
||||
flexi_logger = { version = "0.31.2", features = ["trc", "dont_minimize_extra_stacks"] }
|
||||
@@ -122,13 +129,13 @@ form_urlencoded = "1.2.1"
|
||||
futures = "0.3.31"
|
||||
futures-core = "0.3.31"
|
||||
futures-util = "0.3.31"
|
||||
glob = "0.3.2"
|
||||
glob = "0.3.3"
|
||||
hex = "0.4.3"
|
||||
hex-simd = "0.8.0"
|
||||
highway = { version = "1.3.0" }
|
||||
hmac = "0.12.1"
|
||||
hyper = "1.6.0"
|
||||
hyper-util = { version = "0.1.14", features = [
|
||||
hyper-util = { version = "0.1.16", features = [
|
||||
"tokio",
|
||||
"server-auto",
|
||||
"server-graceful",
|
||||
@@ -139,7 +146,7 @@ http-body = "1.0.1"
|
||||
humantime = "2.2.0"
|
||||
ipnetwork = { version = "0.21.1", features = ["serde"] }
|
||||
jsonwebtoken = "9.3.1"
|
||||
keyring = { version = "3.6.2", features = [
|
||||
keyring = { version = "3.6.3", features = [
|
||||
"apple-native",
|
||||
"windows-native",
|
||||
"sync-secret-service",
|
||||
@@ -179,10 +186,11 @@ blake3 = { version = "1.8.2" }
|
||||
pbkdf2 = "0.12.2"
|
||||
percent-encoding = "2.3.1"
|
||||
pin-project-lite = "0.2.16"
|
||||
prost = "0.13.5"
|
||||
quick-xml = "0.37.5"
|
||||
rand = "0.9.1"
|
||||
rdkafka = { version = "0.37.0", features = ["tokio"] }
|
||||
prost = "0.14.1"
|
||||
pretty_assertions = "1.4.1"
|
||||
quick-xml = "0.38.1"
|
||||
rand = "0.9.2"
|
||||
rdkafka = { version = "0.38.0", features = ["tokio"] }
|
||||
reed-solomon-simd = { version = "3.0.1" }
|
||||
regex = { version = "1.11.1" }
|
||||
reqwest = { version = "0.12.22", default-features = false, features = [
|
||||
@@ -194,10 +202,11 @@ reqwest = { version = "0.12.22", default-features = false, features = [
|
||||
"json",
|
||||
"blocking",
|
||||
] }
|
||||
rfd = { version = "0.15.3", default-features = false, features = [
|
||||
rfd = { version = "0.15.4", default-features = false, features = [
|
||||
"xdg-portal",
|
||||
"tokio",
|
||||
] }
|
||||
rmcp = { version = "0.5.0" }
|
||||
rmp = "0.8.14"
|
||||
rmp-serde = "1.3.0"
|
||||
rsa = "0.9.8"
|
||||
@@ -205,27 +214,30 @@ rumqttc = { version = "0.24" }
|
||||
rust-embed = { version = "8.7.2" }
|
||||
rust-i18n = { version = "3.1.5" }
|
||||
rustfs-rsc = "2025.506.1"
|
||||
rustls = { version = "0.23.28" }
|
||||
rustls = { version = "0.23.31" }
|
||||
rustls-pki-types = "1.12.0"
|
||||
rustls-pemfile = "2.2.0"
|
||||
s3s = { version = "0.12.0-minio-preview.1" }
|
||||
shadow-rs = { version = "1.2.0", default-features = false }
|
||||
s3s = { version = "0.12.0-minio-preview.3" }
|
||||
schemars = "1.0.4"
|
||||
serde = { version = "1.0.219", features = ["derive"] }
|
||||
serde_json = { version = "1.0.140", features = ["raw_value"] }
|
||||
serde-xml-rs = "0.8.1"
|
||||
serde_json = { version = "1.0.142", features = ["raw_value"] }
|
||||
serde_urlencoded = "0.7.1"
|
||||
serial_test = "3.2.0"
|
||||
sha1 = "0.10.6"
|
||||
sha2 = "0.10.9"
|
||||
shadow-rs = { version = "1.2.1", default-features = false }
|
||||
siphasher = "1.0.1"
|
||||
smallvec = { version = "1.15.1", features = ["serde"] }
|
||||
snafu = "0.8.6"
|
||||
snap = "1.1.1"
|
||||
socket2 = "0.5.10"
|
||||
strum = { version = "0.27.1", features = ["derive"] }
|
||||
sysinfo = "0.35.2"
|
||||
socket2 = "0.6.0"
|
||||
strum = { version = "0.27.2", features = ["derive"] }
|
||||
sysinfo = "0.37.0"
|
||||
sysctl = "0.6.0"
|
||||
tempfile = "3.20.0"
|
||||
temp-env = "0.3.6"
|
||||
test-case = "3.3.1"
|
||||
thiserror = "2.0.12"
|
||||
thiserror = "2.0.14"
|
||||
time = { version = "0.3.41", features = [
|
||||
"std",
|
||||
"parsing",
|
||||
@@ -233,25 +245,27 @@ time = { version = "0.3.41", features = [
|
||||
"macros",
|
||||
"serde",
|
||||
] }
|
||||
tokio = { version = "1.46.1", features = ["fs", "rt-multi-thread"] }
|
||||
tokio = { version = "1.47.1", features = ["fs", "rt-multi-thread"] }
|
||||
tokio-rustls = { version = "0.26.2", default-features = false }
|
||||
tokio-stream = { version = "0.1.17" }
|
||||
tokio-tar = "0.3.1"
|
||||
tokio-util = { version = "0.7.15", features = ["io", "compat"] }
|
||||
tonic = { version = "0.13.1", features = ["gzip"] }
|
||||
tonic-build = { version = "0.13.1" }
|
||||
tokio-test = "0.4.4"
|
||||
tokio-util = { version = "0.7.16", features = ["io", "compat"] }
|
||||
tonic = { version = "0.14.1", features = ["gzip"] }
|
||||
tonic-prost = { version = "0.14.1" }
|
||||
tonic-prost-build = { version = "0.14.1" }
|
||||
tower = { version = "0.5.2", features = ["timeout"] }
|
||||
tower-http = { version = "0.6.6", features = ["cors"] }
|
||||
tracing = "0.1.41"
|
||||
tracing-appender = "0.2.3"
|
||||
tracing-core = "0.1.34"
|
||||
tracing-error = "0.2.1"
|
||||
tracing-subscriber = { version = "0.3.19", features = ["env-filter", "time"] }
|
||||
tracing-appender = "0.2.3"
|
||||
tracing-opentelemetry = "0.31.0"
|
||||
tracing-subscriber = { version = "0.3.19", features = ["env-filter", "time"] }
|
||||
transform-stream = "0.3.1"
|
||||
url = "2.5.4"
|
||||
urlencoding = "2.1.3"
|
||||
uuid = { version = "1.17.0", features = [
|
||||
uuid = { version = "1.18.0", features = [
|
||||
"v4",
|
||||
"fast-rng",
|
||||
"macro-diagnostics",
|
||||
@@ -262,6 +276,10 @@ xxhash-rust = { version = "0.8.15", features = ["xxh64", "xxh3"] }
|
||||
zip = "2.4.2"
|
||||
zstd = "0.13.3"
|
||||
|
||||
|
||||
[workspace.metadata.cargo-shear]
|
||||
ignored = ["rustfs", "rust-i18n", "rustfs-mcp"]
|
||||
|
||||
[profile.wasm-dev]
|
||||
inherits = "dev"
|
||||
opt-level = 1
|
||||
|
||||
126
Dockerfile
@@ -1,51 +1,101 @@
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# Multi-stage build for RustFS production image
|
||||
|
||||
FROM alpine:3.18 AS builder
|
||||
# Build stage: Download and extract RustFS binary
|
||||
FROM alpine:3.22 AS build
|
||||
|
||||
RUN apk add -U --no-cache \
|
||||
ca-certificates \
|
||||
curl \
|
||||
bash \
|
||||
unzip
|
||||
# Build arguments for platform and release
|
||||
ARG TARGETARCH
|
||||
ARG RELEASE=latest
|
||||
|
||||
# Install minimal dependencies for downloading and extracting
|
||||
RUN apk add --no-cache ca-certificates curl unzip
|
||||
|
||||
RUN curl -Lo /tmp/rustfs.zip https://dl.rustfs.com/artifacts/rustfs/rustfs-release-x86_64-unknown-linux-musl.latest.zip && \
|
||||
unzip -o /tmp/rustfs.zip -d /tmp && \
|
||||
tar -xzf /tmp/rustfs-x86_64-unknown-linux-musl.tar.gz -C /tmp && \
|
||||
mv /tmp/rustfs-x86_64-unknown-linux-musl/bin/rustfs /rustfs && \
|
||||
chmod +x /rustfs && \
|
||||
rm -rf /tmp/*
|
||||
# Create build directory
|
||||
WORKDIR /build
|
||||
|
||||
FROM alpine:3.18
|
||||
# Set architecture-specific variables
|
||||
RUN if [ "$TARGETARCH" = "amd64" ]; then \
|
||||
echo "x86_64-musl" > /tmp/arch; \
|
||||
elif [ "$TARGETARCH" = "arm64" ]; then \
|
||||
echo "aarch64-musl" > /tmp/arch; \
|
||||
else \
|
||||
echo "unsupported" > /tmp/arch; \
|
||||
fi
|
||||
RUN ARCH=$(cat /tmp/arch) && \
|
||||
if [ "$ARCH" = "unsupported" ]; then \
|
||||
echo "Unsupported architecture: $TARGETARCH" && exit 1; \
|
||||
fi && \
|
||||
if [ "${RELEASE}" = "latest" ]; then \
|
||||
VERSION="latest"; \
|
||||
else \
|
||||
VERSION="v${RELEASE#v}"; \
|
||||
fi && \
|
||||
BASE_URL="https://dl.rustfs.com/artifacts/rustfs/release" && \
|
||||
PACKAGE_NAME="rustfs-linux-${ARCH}-${VERSION}.zip" && \
|
||||
DOWNLOAD_URL="${BASE_URL}/${PACKAGE_NAME}" && \
|
||||
echo "Downloading ${PACKAGE_NAME} from ${DOWNLOAD_URL}" >&2 && \
|
||||
curl -f -L "${DOWNLOAD_URL}" -o rustfs.zip && \
|
||||
unzip rustfs.zip -d /build && \
|
||||
chmod +x /build/rustfs && \
|
||||
rm rustfs.zip || { echo "Failed to download or extract ${PACKAGE_NAME}" >&2; exit 1; }
|
||||
|
||||
RUN apk add -U --no-cache \
|
||||
ca-certificates \
|
||||
bash
|
||||
# Runtime stage: Configure runtime environment
|
||||
FROM alpine:3.22.1
|
||||
|
||||
COPY --from=builder /rustfs /usr/local/bin/rustfs
|
||||
# Build arguments and labels
|
||||
ARG RELEASE=latest
|
||||
ARG BUILD_DATE
|
||||
ARG VCS_REF
|
||||
|
||||
ENV RUSTFS_ACCESS_KEY=rustfsadmin \
|
||||
LABEL name="RustFS" \
|
||||
vendor="RustFS Team" \
|
||||
maintainer="RustFS Team <dev@rustfs.com>" \
|
||||
version="${RELEASE}" \
|
||||
release="${RELEASE}" \
|
||||
build-date="${BUILD_DATE}" \
|
||||
vcs-ref="${VCS_REF}" \
|
||||
summary="High-performance distributed object storage system compatible with S3 API" \
|
||||
description="RustFS is a distributed object storage system written in Rust, supporting erasure coding, multi-tenant management, and observability." \
|
||||
url="https://rustfs.com" \
|
||||
license="Apache-2.0"
|
||||
|
||||
# Install runtime dependencies
|
||||
RUN echo "https://dl-cdn.alpinelinux.org/alpine/v3.20/community" >> /etc/apk/repositories && \
|
||||
apk update && \
|
||||
apk add --no-cache ca-certificates bash gosu coreutils shadow && \
|
||||
addgroup -g 1000 rustfs && \
|
||||
adduser -u 1000 -G rustfs -s /bin/bash -D rustfs
|
||||
|
||||
# Copy CA certificates and RustFS binary from build stage
|
||||
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=build /build/rustfs /usr/bin/rustfs
|
||||
|
||||
# Copy entry point script
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
|
||||
# Set permissions
|
||||
RUN chmod +x /usr/bin/rustfs /entrypoint.sh && \
|
||||
mkdir -p /data /logs && \
|
||||
chown rustfs:rustfs /data /logs && \
|
||||
chmod 700 /data /logs
|
||||
|
||||
# Environment variables (credentials should be set via environment or secrets)
|
||||
ENV RUSTFS_ADDRESS=:9000 \
|
||||
RUSTFS_ACCESS_KEY=rustfsadmin \
|
||||
RUSTFS_SECRET_KEY=rustfsadmin \
|
||||
RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_CONSOLE_ADDRESS=":9001" \
|
||||
RUSTFS_CONSOLE_ENABLE=true \
|
||||
RUST_LOG=warn
|
||||
RUSTFS_VOLUMES=/data \
|
||||
RUST_LOG=warn \
|
||||
RUSTFS_OBS_LOG_DIRECTORY=/logs \
|
||||
RUSTFS_SINKS_FILE_PATH=/logs
|
||||
|
||||
EXPOSE 9000 9001
|
||||
# Expose port
|
||||
EXPOSE 9000
|
||||
|
||||
RUN mkdir -p /data
|
||||
VOLUME /data
|
||||
# Volumes for data and logs
|
||||
VOLUME ["/data", "/logs"]
|
||||
|
||||
# Set entry point
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
CMD ["/usr/bin/rustfs"]
|
||||
|
||||
CMD ["rustfs", "/data"]
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
FROM ubuntu:latest
|
||||
|
||||
# RUN apk add --no-cache <package-name>
|
||||
# 如果 rustfs 有依赖,可以在这里添加,例如:
|
||||
# RUN apk add --no-cache openssl
|
||||
# RUN apk add --no-cache bash # 安装 Bash
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# 创建与 RUSTFS_VOLUMES 一致的目录
|
||||
RUN mkdir -p /root/data/target/volume/test1 /root/data/target/volume/test2 /root/data/target/volume/test3 /root/data/target/volume/test4
|
||||
|
||||
# COPY ./target/x86_64-unknown-linux-musl/release/rustfs /app/rustfs
|
||||
COPY ./target/x86_64-unknown-linux-gnu/release/rustfs /app/rustfs
|
||||
|
||||
RUN chmod +x /app/rustfs
|
||||
|
||||
EXPOSE 9000
|
||||
EXPOSE 9002
|
||||
|
||||
CMD ["/app/rustfs"]
|
||||
@@ -1,10 +1,26 @@
|
||||
# Multi-stage Dockerfile for RustFS
|
||||
# Multi-stage Dockerfile for RustFS - LOCAL DEVELOPMENT ONLY
|
||||
#
|
||||
# ⚠️ IMPORTANT: This Dockerfile is for local development and testing only.
|
||||
# ⚠️ It builds RustFS from source code and is NOT used in CI/CD pipelines.
|
||||
# ⚠️ CI/CD pipeline uses pre-built binaries from Dockerfile instead.
|
||||
#
|
||||
# Usage for local development:
|
||||
# docker build -f Dockerfile.source -t rustfs:dev-local .
|
||||
# docker run --rm -p 9000:9000 rustfs:dev-local
|
||||
#
|
||||
# Supports cross-compilation for amd64 and arm64 architectures
|
||||
ARG TARGETPLATFORM
|
||||
ARG BUILDPLATFORM
|
||||
|
||||
# Build stage
|
||||
FROM --platform=$BUILDPLATFORM rust:1.85-bookworm AS builder
|
||||
FROM --platform=$BUILDPLATFORM rust:1.88-bookworm AS builder
|
||||
|
||||
# Re-declare build arguments after FROM (required for multi-stage builds)
|
||||
ARG TARGETPLATFORM
|
||||
ARG BUILDPLATFORM
|
||||
|
||||
# Debug: Print platform information
|
||||
RUN echo "🐳 Build Info: BUILDPLATFORM=$BUILDPLATFORM, TARGETPLATFORM=$TARGETPLATFORM"
|
||||
|
||||
# Install required build dependencies
|
||||
RUN apt-get update && apt-get install -y \
|
||||
@@ -18,6 +34,8 @@ RUN apt-get update && apt-get install -y \
|
||||
lld \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Note: sccache removed for simpler builds
|
||||
|
||||
# Install cross-compilation tools for ARM64
|
||||
RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
|
||||
apt-get update && \
|
||||
@@ -37,10 +55,13 @@ RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.
|
||||
&& mv flatc /usr/local/bin/ && chmod +x /usr/local/bin/flatc && rm -rf Linux.flatc.binary.g++-13.zip
|
||||
|
||||
# Set up Rust targets based on platform
|
||||
RUN case "$TARGETPLATFORM" in \
|
||||
RUN set -e && \
|
||||
PLATFORM="${TARGETPLATFORM:-linux/amd64}" && \
|
||||
echo "🎯 Setting up Rust target for platform: $PLATFORM" && \
|
||||
case "$PLATFORM" in \
|
||||
"linux/amd64") rustup target add x86_64-unknown-linux-gnu ;; \
|
||||
"linux/arm64") rustup target add aarch64-unknown-linux-gnu ;; \
|
||||
*) echo "Unsupported platform: $TARGETPLATFORM" && exit 1 ;; \
|
||||
*) echo "❌ Unsupported platform: $PLATFORM" && exit 1 ;; \
|
||||
esac
|
||||
|
||||
# Set up environment for cross-compilation
|
||||
@@ -50,37 +71,37 @@ ENV CXX_aarch64_unknown_linux_gnu=aarch64-linux-gnu-g++
|
||||
|
||||
WORKDIR /usr/src/rustfs
|
||||
|
||||
# Copy Cargo files for dependency caching
|
||||
COPY Cargo.toml Cargo.lock ./
|
||||
COPY */Cargo.toml ./*/
|
||||
|
||||
# Create dummy main.rs files for dependency compilation
|
||||
RUN find . -name "Cargo.toml" -not -path "./Cargo.toml" | \
|
||||
xargs -I {} dirname {} | \
|
||||
xargs -I {} sh -c 'mkdir -p {}/src && echo "fn main() {}" > {}/src/main.rs'
|
||||
|
||||
# Build dependencies only (cache layer)
|
||||
RUN case "$TARGETPLATFORM" in \
|
||||
"linux/amd64") cargo build --release --target x86_64-unknown-linux-gnu ;; \
|
||||
"linux/arm64") cargo build --release --target aarch64-unknown-linux-gnu ;; \
|
||||
esac
|
||||
|
||||
# Copy source code
|
||||
# Copy all source code
|
||||
COPY . .
|
||||
|
||||
# Configure cargo for optimized builds
|
||||
ENV CARGO_NET_GIT_FETCH_WITH_CLI=true \
|
||||
CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse \
|
||||
CARGO_INCREMENTAL=0 \
|
||||
CARGO_PROFILE_RELEASE_DEBUG=false \
|
||||
CARGO_PROFILE_RELEASE_SPLIT_DEBUGINFO=off \
|
||||
CARGO_PROFILE_RELEASE_STRIP=symbols
|
||||
|
||||
# Generate protobuf code
|
||||
RUN cargo run --bin gproto
|
||||
|
||||
# Build the actual application
|
||||
# Build the actual application with optimizations
|
||||
RUN case "$TARGETPLATFORM" in \
|
||||
"linux/amd64") \
|
||||
cargo build --release --target x86_64-unknown-linux-gnu --bin rustfs && \
|
||||
echo "🔨 Building for amd64..." && \
|
||||
rustup target add x86_64-unknown-linux-gnu && \
|
||||
cargo build --release --target x86_64-unknown-linux-gnu --bin rustfs -j $(nproc) && \
|
||||
cp target/x86_64-unknown-linux-gnu/release/rustfs /usr/local/bin/rustfs \
|
||||
;; \
|
||||
"linux/arm64") \
|
||||
cargo build --release --target aarch64-unknown-linux-gnu --bin rustfs && \
|
||||
echo "🔨 Building for arm64..." && \
|
||||
rustup target add aarch64-unknown-linux-gnu && \
|
||||
cargo build --release --target aarch64-unknown-linux-gnu --bin rustfs -j $(nproc) && \
|
||||
cp target/aarch64-unknown-linux-gnu/release/rustfs /usr/local/bin/rustfs \
|
||||
;; \
|
||||
*) \
|
||||
echo "❌ Unsupported platform: $TARGETPLATFORM" && exit 1 \
|
||||
;; \
|
||||
esac
|
||||
|
||||
# Runtime stage - Ubuntu minimal for better compatibility
|
||||
@@ -91,6 +112,8 @@ RUN apt-get update && apt-get install -y \
|
||||
ca-certificates \
|
||||
tzdata \
|
||||
wget \
|
||||
coreutils \
|
||||
passwd \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Create rustfs user and group
|
||||
@@ -107,15 +130,27 @@ RUN mkdir -p /data/rustfs{0,1,2,3} && \
|
||||
COPY --from=builder /usr/local/bin/rustfs /app/rustfs
|
||||
RUN chmod +x /app/rustfs && chown rustfs:rustfs /app/rustfs
|
||||
|
||||
# Copy entrypoint script
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
RUN chmod +x /entrypoint.sh
|
||||
|
||||
# Switch to non-root user
|
||||
USER rustfs
|
||||
|
||||
# Expose ports
|
||||
EXPOSE 9000 9001
|
||||
EXPOSE 9000
|
||||
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD wget --no-verbose --tries=1 --spider http://localhost:9000/health || exit 1
|
||||
# Environment variables
|
||||
ENV RUSTFS_ACCESS_KEY=rustfsadmin \
|
||||
RUSTFS_SECRET_KEY=rustfsadmin \
|
||||
RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_CONSOLE_ENABLE=true \
|
||||
RUSTFS_VOLUMES=/data \
|
||||
RUST_LOG=warn
|
||||
|
||||
# Set default command
|
||||
# Volume for data
|
||||
VOLUME ["/data"]
|
||||
|
||||
# Set entrypoint and default command
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
CMD ["/app/rustfs"]
|
||||
258
Justfile
Normal file
@@ -0,0 +1,258 @@
|
||||
DOCKER_CLI := env("DOCKER_CLI", "docker")
|
||||
IMAGE_NAME := env("IMAGE_NAME", "rustfs:v1.0.0")
|
||||
DOCKERFILE_SOURCE := env("DOCKERFILE_SOURCE", "Dockerfile.source")
|
||||
DOCKERFILE_PRODUCTION := env("DOCKERFILE_PRODUCTION", "Dockerfile")
|
||||
CONTAINER_NAME := env("CONTAINER_NAME", "rustfs-dev")
|
||||
|
||||
[group("📒 Help")]
|
||||
[private]
|
||||
default:
|
||||
@just --list --list-heading $'🦀 RustFS justfile manual page:\n'
|
||||
|
||||
[doc("show help")]
|
||||
[group("📒 Help")]
|
||||
help: default
|
||||
|
||||
[doc("run `cargo fmt` to format codes")]
|
||||
[group("👆 Code Quality")]
|
||||
fmt:
|
||||
@echo "🔧 Formatting code..."
|
||||
cargo fmt --all
|
||||
|
||||
[doc("run `cargo fmt` in check mode")]
|
||||
[group("👆 Code Quality")]
|
||||
fmt-check:
|
||||
@echo "📝 Checking code formatting..."
|
||||
cargo fmt --all --check
|
||||
|
||||
[doc("run `cargo clippy`")]
|
||||
[group("👆 Code Quality")]
|
||||
clippy:
|
||||
@echo "🔍 Running clippy checks..."
|
||||
cargo clippy --all-targets --all-features --fix --allow-dirty -- -D warnings
|
||||
|
||||
[doc("run `cargo check`")]
|
||||
[group("👆 Code Quality")]
|
||||
check:
|
||||
@echo "🔨 Running compilation check..."
|
||||
cargo check --all-targets
|
||||
|
||||
[doc("run `cargo test`")]
|
||||
[group("👆 Code Quality")]
|
||||
test:
|
||||
@echo "🧪 Running tests..."
|
||||
cargo nextest run --all --exclude e2e_test
|
||||
cargo test --all --doc
|
||||
|
||||
[doc("run `fmt` `clippy` `check` `test` at once")]
|
||||
[group("👆 Code Quality")]
|
||||
pre-commit: fmt clippy check test
|
||||
@echo "✅ All pre-commit checks passed!"
|
||||
|
||||
[group("🤔 Git")]
|
||||
setup-hooks:
|
||||
@echo "🔧 Setting up git hooks..."
|
||||
chmod +x .git/hooks/pre-commit
|
||||
@echo "✅ Git hooks setup complete!"
|
||||
|
||||
[doc("use `release` mode for building")]
|
||||
[group("🔨 Build")]
|
||||
build:
|
||||
@echo "🔨 Building RustFS using build-rustfs.sh script..."
|
||||
./build-rustfs.sh
|
||||
|
||||
[doc("use `debug` mode for building")]
|
||||
[group("🔨 Build")]
|
||||
build-dev:
|
||||
@echo "🔨 Building RustFS in development mode..."
|
||||
./build-rustfs.sh --dev
|
||||
|
||||
[group("🔨 Build")]
|
||||
[private]
|
||||
build-target target:
|
||||
@echo "🔨 Building rustfs for {{ target }}..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform {{ target }}
|
||||
|
||||
[doc("use `x86_64-unknown-linux-musl` target for building")]
|
||||
[group("🔨 Build")]
|
||||
build-musl: (build-target "x86_64-unknown-linux-musl")
|
||||
|
||||
[doc("use `x86_64-unknown-linux-gnu` target for building")]
|
||||
[group("🔨 Build")]
|
||||
build-gnu: (build-target "x86_64-unknown-linux-gnu")
|
||||
|
||||
[doc("use `aarch64-unknown-linux-musl` target for building")]
|
||||
[group("🔨 Build")]
|
||||
build-musl-arm64: (build-target "aarch64-unknown-linux-musl")
|
||||
|
||||
[doc("use `aarch64-unknown-linux-gnu` target for building")]
|
||||
[group("🔨 Build")]
|
||||
build-gnu-arm64: (build-target "aarch64-unknown-linux-gnu")
|
||||
|
||||
[doc("build and deploy to server")]
|
||||
[group("🔨 Build")]
|
||||
deploy-dev ip: build-musl
|
||||
@echo "🚀 Deploying to dev server: {{ ip }}"
|
||||
./scripts/dev_deploy.sh {{ ip }}
|
||||
|
||||
[group("🔨 Build")]
|
||||
[private]
|
||||
build-cross-all-pre:
|
||||
@echo "🔧 Building all target architectures..."
|
||||
@echo "💡 On macOS/Windows, use 'make docker-dev' for reliable multi-arch builds"
|
||||
@echo "🔨 Generating protobuf code..."
|
||||
-cargo run --bin gproto
|
||||
|
||||
[doc("build all targets at once")]
|
||||
[group("🔨 Build")]
|
||||
build-cross-all: build-cross-all-pre && build-gnu build-gnu-arm64 build-musl build-musl-arm64
|
||||
|
||||
# ========================================================================================
|
||||
# Docker Multi-Architecture Builds (Primary Methods)
|
||||
# ========================================================================================
|
||||
|
||||
[doc("build an image and run it")]
|
||||
[group("🐳 Build Image")]
|
||||
build-docker os="rockylinux9.3" cli=(DOCKER_CLI) dockerfile=(DOCKERFILE_SOURCE):
|
||||
#!/usr/bin/env bash
|
||||
SOURCE_BUILD_IMAGE_NAME="rustfs/rustfs-{{ os }}:v1"
|
||||
SOURCE_BUILD_CONTAINER_NAME="rustfs-{{ os }}-build"
|
||||
BUILD_CMD="/root/.cargo/bin/cargo build --release --bin rustfs --target-dir /root/s3-rustfs/target/{{ os }}"
|
||||
echo "🐳 Building RustFS using Docker ({{ os }})..."
|
||||
{{ cli }} buildx build -t $SOURCE_BUILD_IMAGE_NAME -f {{ dockerfile }} .
|
||||
{{ cli }} run --rm --name $SOURCE_BUILD_CONTAINER_NAME -v $(pwd):/root/s3-rustfs -it $SOURCE_BUILD_IMAGE_NAME $BUILD_CMD
|
||||
|
||||
[doc("build an image")]
|
||||
[group("🐳 Build Image")]
|
||||
docker-buildx:
|
||||
@echo "🏗️ Building multi-architecture production Docker images with buildx..."
|
||||
./docker-buildx.sh
|
||||
|
||||
[doc("build an image and push it")]
|
||||
[group("🐳 Build Image")]
|
||||
docker-buildx-push:
|
||||
@echo "🚀 Building and pushing multi-architecture production Docker images with buildx..."
|
||||
./docker-buildx.sh --push
|
||||
|
||||
[doc("build an image with a version")]
|
||||
[group("🐳 Build Image")]
|
||||
docker-buildx-version version:
|
||||
@echo "🏗️ Building multi-architecture production Docker images (version: {{ version }}..."
|
||||
./docker-buildx.sh --release {{ version }}
|
||||
|
||||
[doc("build an image with a version and push it")]
|
||||
[group("🐳 Build Image")]
|
||||
docker-buildx-push-version version:
|
||||
@echo "🚀 Building and pushing multi-architecture production Docker images (version: {{ version }}..."
|
||||
./docker-buildx.sh --release {{ version }} --push
|
||||
|
||||
[doc("build an image with a version and push it to registry")]
|
||||
[group("🐳 Build Image")]
|
||||
docker-dev-push registry cli=(DOCKER_CLI) source=(DOCKERFILE_SOURCE):
|
||||
@echo "🚀 Building and pushing multi-architecture development Docker images..."
|
||||
@echo "💡 push to registry: {{ registry }}"
|
||||
{{ cli }} buildx build \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--file {{ source }} \
|
||||
--tag {{ registry }}/rustfs:source-latest \
|
||||
--tag {{ registry }}/rustfs:dev-latest \
|
||||
--push \
|
||||
.
|
||||
|
||||
# Local production builds using direct buildx (alternative to docker-buildx.sh)
|
||||
|
||||
[group("🐳 Build Image")]
|
||||
docker-buildx-production-local cli=(DOCKER_CLI) source=(DOCKERFILE_PRODUCTION):
|
||||
@echo "🏗️ Building single-architecture production Docker image locally..."
|
||||
@echo "💡 Alternative to docker-buildx.sh for local testing"
|
||||
{{ cli }} buildx build \
|
||||
--file {{ source }} \
|
||||
--tag rustfs:production-latest \
|
||||
--tag rustfs:latest \
|
||||
--load \
|
||||
--build-arg RELEASE=latest \
|
||||
.
|
||||
|
||||
# Development/Source builds using direct buildx commands
|
||||
|
||||
[group("🐳 Build Image")]
|
||||
docker-dev cli=(DOCKER_CLI) source=(DOCKERFILE_SOURCE):
|
||||
@echo "🏗️ Building multi-architecture development Docker images with buildx..."
|
||||
@echo "💡 This builds from source code and is intended for local development and testing"
|
||||
@echo "⚠️ Multi-arch images cannot be loaded locally, use docker-dev-push to push to registry"
|
||||
{{ cli }} buildx build \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--file {{ source }} \
|
||||
--tag rustfs:source-latest \
|
||||
--tag rustfs:dev-latest \
|
||||
.
|
||||
|
||||
[group("🐳 Build Image")]
|
||||
docker-dev-local cli=(DOCKER_CLI) source=(DOCKERFILE_SOURCE):
|
||||
@echo "🏗️ Building single-architecture development Docker image for local use..."
|
||||
@echo "💡 This builds from source code for the current platform and loads locally"
|
||||
{{ cli }} buildx build \
|
||||
--file {{ source }} \
|
||||
--tag rustfs:source-latest \
|
||||
--tag rustfs:dev-latest \
|
||||
--load \
|
||||
.
|
||||
|
||||
# ========================================================================================
|
||||
# Single Architecture Docker Builds (Traditional)
|
||||
# ========================================================================================
|
||||
|
||||
[group("🐳 Build Image")]
|
||||
docker-build-production cli=(DOCKER_CLI) source=(DOCKERFILE_PRODUCTION):
|
||||
@echo "🏗️ Building single-architecture production Docker image..."
|
||||
@echo "💡 Consider using 'make docker-buildx-production-local' for multi-arch support"
|
||||
{{ cli }} build -f {{ source }} -t rustfs:latest .
|
||||
|
||||
[group("🐳 Build Image")]
|
||||
docker-build-source cli=(DOCKER_CLI) source=(DOCKERFILE_SOURCE):
|
||||
@echo "🏗️ Building single-architecture source Docker image..."
|
||||
@echo "💡 Consider using 'make docker-dev-local' for multi-arch support"
|
||||
{{ cli }} build -f {{ source }} -t rustfs:source .
|
||||
|
||||
# ========================================================================================
|
||||
# Development Environment
|
||||
# ========================================================================================
|
||||
|
||||
[group("🏃 Running")]
|
||||
dev-env-start cli=(DOCKER_CLI) source=(DOCKERFILE_SOURCE) container=(CONTAINER_NAME):
|
||||
@echo "🚀 Starting development environment..."
|
||||
{{ cli }} buildx build \
|
||||
--file {{ source }} \
|
||||
--tag rustfs:dev \
|
||||
--load \
|
||||
.
|
||||
-{{ cli }} stop {{ container }} 2>/dev/null
|
||||
-{{ cli }} rm {{ container }} 2>/dev/null
|
||||
{{ cli }} run -d --name {{ container }} \
|
||||
-p 9010:9010 -p 9000:9000 \
|
||||
-v {{ invocation_directory() }}:/workspace \
|
||||
-it rustfs:dev
|
||||
|
||||
[group("🏃 Running")]
|
||||
dev-env-stop cli=(DOCKER_CLI) container=(CONTAINER_NAME):
|
||||
@echo "🛑 Stopping development environment..."
|
||||
-{{ cli }} stop {{ container }} 2>/dev/null
|
||||
-{{ cli }} rm {{ container }} 2>/dev/null
|
||||
|
||||
[group("🏃 Running")]
|
||||
dev-env-restart: dev-env-stop dev-env-start
|
||||
|
||||
[group("👍 E2E")]
|
||||
e2e-server:
|
||||
sh scripts/run.sh
|
||||
|
||||
[group("👍 E2E")]
|
||||
probe-e2e:
|
||||
sh scripts/probe.sh
|
||||
|
||||
[doc("inspect one image")]
|
||||
[group("🚚 Other")]
|
||||
docker-inspect-multiarch image cli=(DOCKER_CLI):
|
||||
@echo "🔍 Inspecting multi-architecture image: {{ image }}"
|
||||
{{ cli }} buildx imagetools inspect {{ image }}
|
||||
346
Makefile
@@ -5,7 +5,9 @@
|
||||
DOCKER_CLI ?= docker
|
||||
IMAGE_NAME ?= rustfs:v1.0.0
|
||||
CONTAINER_NAME ?= rustfs-dev
|
||||
DOCKERFILE_PATH = $(shell pwd)/.docker
|
||||
# Docker build configurations
|
||||
DOCKERFILE_PRODUCTION = Dockerfile
|
||||
DOCKERFILE_SOURCE = Dockerfile.source
|
||||
|
||||
# Code quality and formatting targets
|
||||
.PHONY: fmt
|
||||
@@ -21,6 +23,7 @@ fmt-check:
|
||||
.PHONY: clippy
|
||||
clippy:
|
||||
@echo "🔍 Running clippy checks..."
|
||||
cargo clippy --fix --allow-dirty
|
||||
cargo clippy --all-targets --all-features -- -D warnings
|
||||
|
||||
.PHONY: check
|
||||
@@ -31,7 +34,8 @@ check:
|
||||
.PHONY: test
|
||||
test:
|
||||
@echo "🧪 Running tests..."
|
||||
cargo test --all --exclude e2e_test
|
||||
cargo nextest run --all --exclude e2e_test
|
||||
cargo test --all --doc
|
||||
|
||||
.PHONY: pre-commit
|
||||
pre-commit: fmt clippy check test
|
||||
@@ -43,21 +47,6 @@ setup-hooks:
|
||||
chmod +x .git/hooks/pre-commit
|
||||
@echo "✅ Git hooks setup complete!"
|
||||
|
||||
.PHONY: init-devenv
|
||||
init-devenv:
|
||||
$(DOCKER_CLI) build -t $(IMAGE_NAME) -f $(DOCKERFILE_PATH)/Dockerfile.devenv .
|
||||
$(DOCKER_CLI) stop $(CONTAINER_NAME)
|
||||
$(DOCKER_CLI) rm $(CONTAINER_NAME)
|
||||
$(DOCKER_CLI) run -d --name $(CONTAINER_NAME) -p 9010:9010 -p 9000:9000 -v $(shell pwd):/root/s3-rustfs -it $(IMAGE_NAME)
|
||||
|
||||
.PHONY: start
|
||||
start:
|
||||
$(DOCKER_CLI) start $(CONTAINER_NAME)
|
||||
|
||||
.PHONY: stop
|
||||
stop:
|
||||
$(DOCKER_CLI) stop $(CONTAINER_NAME)
|
||||
|
||||
.PHONY: e2e-server
|
||||
e2e-server:
|
||||
sh $(shell pwd)/scripts/run.sh
|
||||
@@ -66,86 +55,196 @@ e2e-server:
|
||||
probe-e2e:
|
||||
sh $(shell pwd)/scripts/probe.sh
|
||||
|
||||
# make BUILD_OS=ubuntu22.04 build
|
||||
# in target/ubuntu22.04/release/rustfs
|
||||
|
||||
# make BUILD_OS=rockylinux9.3 build
|
||||
# in target/rockylinux9.3/release/rustfs
|
||||
BUILD_OS ?= rockylinux9.3
|
||||
# Native build using build-rustfs.sh script
|
||||
.PHONY: build
|
||||
build: ROCKYLINUX_BUILD_IMAGE_NAME = rustfs-$(BUILD_OS):v1
|
||||
build: ROCKYLINUX_BUILD_CONTAINER_NAME = rustfs-$(BUILD_OS)-build
|
||||
build: BUILD_CMD = /root/.cargo/bin/cargo build --release --bin rustfs --target-dir /root/s3-rustfs/target/$(BUILD_OS)
|
||||
build:
|
||||
$(DOCKER_CLI) build -t $(ROCKYLINUX_BUILD_IMAGE_NAME) -f $(DOCKERFILE_PATH)/Dockerfile.$(BUILD_OS) .
|
||||
$(DOCKER_CLI) run --rm --name $(ROCKYLINUX_BUILD_CONTAINER_NAME) -v $(shell pwd):/root/s3-rustfs -it $(ROCKYLINUX_BUILD_IMAGE_NAME) $(BUILD_CMD)
|
||||
@echo "🔨 Building RustFS using build-rustfs.sh script..."
|
||||
./build-rustfs.sh
|
||||
|
||||
.PHONY: build-dev
|
||||
build-dev:
|
||||
@echo "🔨 Building RustFS in development mode..."
|
||||
./build-rustfs.sh --dev
|
||||
|
||||
# Docker-based build (alternative approach)
|
||||
# Usage: make BUILD_OS=ubuntu22.04 build-docker
|
||||
# Output: target/ubuntu22.04/release/rustfs
|
||||
BUILD_OS ?= rockylinux9.3
|
||||
.PHONY: build-docker
|
||||
build-docker: SOURCE_BUILD_IMAGE_NAME = rustfs-$(BUILD_OS):v1
|
||||
build-docker: SOURCE_BUILD_CONTAINER_NAME = rustfs-$(BUILD_OS)-build
|
||||
build-docker: BUILD_CMD = /root/.cargo/bin/cargo build --release --bin rustfs --target-dir /root/s3-rustfs/target/$(BUILD_OS)
|
||||
build-docker:
|
||||
@echo "🐳 Building RustFS using Docker ($(BUILD_OS))..."
|
||||
$(DOCKER_CLI) buildx build -t $(SOURCE_BUILD_IMAGE_NAME) -f $(DOCKERFILE_SOURCE) .
|
||||
$(DOCKER_CLI) run --rm --name $(SOURCE_BUILD_CONTAINER_NAME) -v $(shell pwd):/root/s3-rustfs -it $(SOURCE_BUILD_IMAGE_NAME) $(BUILD_CMD)
|
||||
|
||||
.PHONY: build-musl
|
||||
build-musl:
|
||||
@echo "🔨 Building rustfs for x86_64-unknown-linux-musl..."
|
||||
cargo build --target x86_64-unknown-linux-musl --bin rustfs -r
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-musl
|
||||
|
||||
.PHONY: build-gnu
|
||||
build-gnu:
|
||||
@echo "🔨 Building rustfs for x86_64-unknown-linux-gnu..."
|
||||
cargo build --target x86_64-unknown-linux-gnu --bin rustfs -r
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-gnu
|
||||
|
||||
.PHONY: build-musl-arm64
|
||||
build-musl-arm64:
|
||||
@echo "🔨 Building rustfs for aarch64-unknown-linux-musl..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-musl
|
||||
|
||||
.PHONY: build-gnu-arm64
|
||||
build-gnu-arm64:
|
||||
@echo "🔨 Building rustfs for aarch64-unknown-linux-gnu..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-gnu
|
||||
|
||||
.PHONY: deploy-dev
|
||||
deploy-dev: build-musl
|
||||
@echo "🚀 Deploying to dev server: $${IP}"
|
||||
./scripts/dev_deploy.sh $${IP}
|
||||
|
||||
# Multi-architecture Docker build targets
|
||||
.PHONY: docker-build-multiarch
|
||||
docker-build-multiarch:
|
||||
@echo "🏗️ Building multi-architecture Docker images..."
|
||||
./scripts/build-docker-multiarch.sh
|
||||
# ========================================================================================
|
||||
# Docker Multi-Architecture Builds (Primary Methods)
|
||||
# ========================================================================================
|
||||
|
||||
.PHONY: docker-build-multiarch-push
|
||||
docker-build-multiarch-push:
|
||||
@echo "🚀 Building and pushing multi-architecture Docker images..."
|
||||
./scripts/build-docker-multiarch.sh --push
|
||||
# Production builds using docker-buildx.sh (for CI/CD and production)
|
||||
.PHONY: docker-buildx
|
||||
docker-buildx:
|
||||
@echo "🏗️ Building multi-architecture production Docker images with buildx..."
|
||||
./docker-buildx.sh
|
||||
|
||||
.PHONY: docker-build-multiarch-version
|
||||
docker-build-multiarch-version:
|
||||
.PHONY: docker-buildx-push
|
||||
docker-buildx-push:
|
||||
@echo "🚀 Building and pushing multi-architecture production Docker images with buildx..."
|
||||
./docker-buildx.sh --push
|
||||
|
||||
.PHONY: docker-buildx-version
|
||||
docker-buildx-version:
|
||||
@if [ -z "$(VERSION)" ]; then \
|
||||
echo "❌ 错误: 请指定版本, 例如: make docker-build-multiarch-version VERSION=v1.0.0"; \
|
||||
echo "❌ 错误: 请指定版本, 例如: make docker-buildx-version VERSION=v1.0.0"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🏗️ Building multi-architecture Docker images (version: $(VERSION))..."
|
||||
./scripts/build-docker-multiarch.sh --version $(VERSION)
|
||||
@echo "🏗️ Building multi-architecture production Docker images (version: $(VERSION))..."
|
||||
./docker-buildx.sh --release $(VERSION)
|
||||
|
||||
.PHONY: docker-push-multiarch-version
|
||||
docker-push-multiarch-version:
|
||||
.PHONY: docker-buildx-push-version
|
||||
docker-buildx-push-version:
|
||||
@if [ -z "$(VERSION)" ]; then \
|
||||
echo "❌ 错误: 请指定版本, 例如: make docker-push-multiarch-version VERSION=v1.0.0"; \
|
||||
echo "❌ 错误: 请指定版本, 例如: make docker-buildx-push-version VERSION=v1.0.0"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🚀 Building and pushing multi-architecture Docker images (version: $(VERSION))..."
|
||||
./scripts/build-docker-multiarch.sh --version $(VERSION) --push
|
||||
@echo "🚀 Building and pushing multi-architecture production Docker images (version: $(VERSION))..."
|
||||
./docker-buildx.sh --release $(VERSION) --push
|
||||
|
||||
.PHONY: docker-build-ubuntu
|
||||
docker-build-ubuntu:
|
||||
@echo "🏗️ Building multi-architecture Ubuntu Docker images..."
|
||||
./scripts/build-docker-multiarch.sh --type ubuntu
|
||||
# Development/Source builds using direct buildx commands
|
||||
.PHONY: docker-dev
|
||||
docker-dev:
|
||||
@echo "🏗️ Building multi-architecture development Docker images with buildx..."
|
||||
@echo "💡 This builds from source code and is intended for local development and testing"
|
||||
@echo "⚠️ Multi-arch images cannot be loaded locally, use docker-dev-push to push to registry"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag rustfs:source-latest \
|
||||
--tag rustfs:dev-latest \
|
||||
.
|
||||
|
||||
.PHONY: docker-build-rockylinux
|
||||
docker-build-rockylinux:
|
||||
@echo "🏗️ Building multi-architecture RockyLinux Docker images..."
|
||||
./scripts/build-docker-multiarch.sh --type rockylinux
|
||||
.PHONY: docker-dev-local
|
||||
docker-dev-local:
|
||||
@echo "🏗️ Building single-architecture development Docker image for local use..."
|
||||
@echo "💡 This builds from source code for the current platform and loads locally"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag rustfs:source-latest \
|
||||
--tag rustfs:dev-latest \
|
||||
--load \
|
||||
.
|
||||
|
||||
.PHONY: docker-build-devenv
|
||||
docker-build-devenv:
|
||||
@echo "🏗️ Building multi-architecture development environment Docker images..."
|
||||
./scripts/build-docker-multiarch.sh --type devenv
|
||||
.PHONY: docker-dev-push
|
||||
docker-dev-push:
|
||||
@if [ -z "$(REGISTRY)" ]; then \
|
||||
echo "❌ 错误: 请指定镜像仓库, 例如: make docker-dev-push REGISTRY=ghcr.io/username"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🚀 Building and pushing multi-architecture development Docker images..."
|
||||
@echo "💡 推送到仓库: $(REGISTRY)"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag $(REGISTRY)/rustfs:source-latest \
|
||||
--tag $(REGISTRY)/rustfs:dev-latest \
|
||||
--push \
|
||||
.
|
||||
|
||||
.PHONY: docker-build-all-types
|
||||
docker-build-all-types:
|
||||
@echo "🏗️ Building all multi-architecture Docker image types..."
|
||||
./scripts/build-docker-multiarch.sh --type production
|
||||
./scripts/build-docker-multiarch.sh --type ubuntu
|
||||
./scripts/build-docker-multiarch.sh --type rockylinux
|
||||
./scripts/build-docker-multiarch.sh --type devenv
|
||||
|
||||
|
||||
# Local production builds using direct buildx (alternative to docker-buildx.sh)
|
||||
.PHONY: docker-buildx-production-local
|
||||
docker-buildx-production-local:
|
||||
@echo "🏗️ Building single-architecture production Docker image locally..."
|
||||
@echo "💡 Alternative to docker-buildx.sh for local testing"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--file $(DOCKERFILE_PRODUCTION) \
|
||||
--tag rustfs:production-latest \
|
||||
--tag rustfs:latest \
|
||||
--load \
|
||||
--build-arg RELEASE=latest \
|
||||
.
|
||||
|
||||
# ========================================================================================
|
||||
# Single Architecture Docker Builds (Traditional)
|
||||
# ========================================================================================
|
||||
|
||||
.PHONY: docker-build-production
|
||||
docker-build-production:
|
||||
@echo "🏗️ Building single-architecture production Docker image..."
|
||||
@echo "💡 Consider using 'make docker-buildx-production-local' for multi-arch support"
|
||||
$(DOCKER_CLI) build -f $(DOCKERFILE_PRODUCTION) -t rustfs:latest .
|
||||
|
||||
.PHONY: docker-build-source
|
||||
docker-build-source:
|
||||
@echo "🏗️ Building single-architecture source Docker image..."
|
||||
@echo "💡 Consider using 'make docker-dev-local' for multi-arch support"
|
||||
$(DOCKER_CLI) build -f $(DOCKERFILE_SOURCE) -t rustfs:source .
|
||||
|
||||
# ========================================================================================
|
||||
# Development Environment
|
||||
# ========================================================================================
|
||||
|
||||
.PHONY: dev-env-start
|
||||
dev-env-start:
|
||||
@echo "🚀 Starting development environment..."
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag rustfs:dev \
|
||||
--load \
|
||||
.
|
||||
$(DOCKER_CLI) stop $(CONTAINER_NAME) 2>/dev/null || true
|
||||
$(DOCKER_CLI) rm $(CONTAINER_NAME) 2>/dev/null || true
|
||||
$(DOCKER_CLI) run -d --name $(CONTAINER_NAME) \
|
||||
-p 9010:9010 -p 9000:9000 \
|
||||
-v $(shell pwd):/workspace \
|
||||
-it rustfs:dev
|
||||
|
||||
.PHONY: dev-env-stop
|
||||
dev-env-stop:
|
||||
@echo "🛑 Stopping development environment..."
|
||||
$(DOCKER_CLI) stop $(CONTAINER_NAME) 2>/dev/null || true
|
||||
$(DOCKER_CLI) rm $(CONTAINER_NAME) 2>/dev/null || true
|
||||
|
||||
.PHONY: dev-env-restart
|
||||
dev-env-restart: dev-env-stop dev-env-start
|
||||
|
||||
|
||||
|
||||
# ========================================================================================
|
||||
# Build Utilities
|
||||
# ========================================================================================
|
||||
|
||||
.PHONY: docker-inspect-multiarch
|
||||
docker-inspect-multiarch:
|
||||
@@ -159,41 +258,112 @@ docker-inspect-multiarch:
|
||||
.PHONY: build-cross-all
|
||||
build-cross-all:
|
||||
@echo "🔧 Building all target architectures..."
|
||||
@if ! command -v cross &> /dev/null; then \
|
||||
echo "📦 Installing cross..."; \
|
||||
cargo install cross; \
|
||||
fi
|
||||
@echo "💡 On macOS/Windows, use 'make docker-dev' for reliable multi-arch builds"
|
||||
@echo "🔨 Generating protobuf code..."
|
||||
cargo run --bin gproto || true
|
||||
@echo "🔨 Building x86_64-unknown-linux-musl..."
|
||||
cargo build --release --target x86_64-unknown-linux-musl --bin rustfs
|
||||
@echo "🔨 Building x86_64-unknown-linux-gnu..."
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-gnu
|
||||
@echo "🔨 Building aarch64-unknown-linux-gnu..."
|
||||
cross build --release --target aarch64-unknown-linux-gnu --bin rustfs
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-gnu
|
||||
@echo "🔨 Building x86_64-unknown-linux-musl..."
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-musl
|
||||
@echo "🔨 Building aarch64-unknown-linux-musl..."
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-musl
|
||||
@echo "✅ All architectures built successfully!"
|
||||
|
||||
# ========================================================================================
|
||||
# Help and Documentation
|
||||
# ========================================================================================
|
||||
|
||||
.PHONY: help-build
|
||||
help-build:
|
||||
@echo "🔨 RustFS 构建帮助:"
|
||||
@echo ""
|
||||
@echo "🚀 本地构建 (推荐使用):"
|
||||
@echo " make build # 构建 RustFS 二进制文件 (默认包含 console)"
|
||||
@echo " make build-dev # 开发模式构建"
|
||||
@echo " make build-musl # 构建 x86_64 musl 版本"
|
||||
@echo " make build-gnu # 构建 x86_64 GNU 版本"
|
||||
@echo " make build-musl-arm64 # 构建 aarch64 musl 版本"
|
||||
@echo " make build-gnu-arm64 # 构建 aarch64 GNU 版本"
|
||||
@echo ""
|
||||
@echo "🐳 Docker 构建:"
|
||||
@echo " make build-docker # 使用 Docker 容器构建"
|
||||
@echo " make build-docker BUILD_OS=ubuntu22.04 # 指定构建系统"
|
||||
@echo ""
|
||||
@echo "🏗️ 跨架构构建:"
|
||||
@echo " make build-cross-all # 构建所有架构的二进制文件"
|
||||
@echo ""
|
||||
@echo "🔧 直接使用 build-rustfs.sh 脚本:"
|
||||
@echo " ./build-rustfs.sh --help # 查看脚本帮助"
|
||||
@echo " ./build-rustfs.sh --no-console # 构建时跳过 console 资源"
|
||||
@echo " ./build-rustfs.sh --force-console-update # 强制更新 console 资源"
|
||||
@echo " ./build-rustfs.sh --dev # 开发模式构建"
|
||||
@echo " ./build-rustfs.sh --sign # 签名二进制文件"
|
||||
@echo " ./build-rustfs.sh --platform x86_64-unknown-linux-gnu # 指定目标平台"
|
||||
@echo " ./build-rustfs.sh --skip-verification # 跳过二进制验证"
|
||||
@echo ""
|
||||
@echo "💡 build-rustfs.sh 脚本提供了更多选项、智能检测和二进制验证功能"
|
||||
|
||||
.PHONY: help-docker
|
||||
help-docker:
|
||||
@echo "🐳 Docker 多架构构建帮助:"
|
||||
@echo ""
|
||||
@echo "基本构建:"
|
||||
@echo " make docker-build-multiarch # 构建多架构镜像(不推送)"
|
||||
@echo " make docker-build-multiarch-push # 构建并推送多架构镜像"
|
||||
@echo "🚀 生产镜像构建 (推荐使用 docker-buildx.sh):"
|
||||
@echo " make docker-buildx # 构建生产多架构镜像(不推送)"
|
||||
@echo " make docker-buildx-push # 构建并推送生产多架构镜像"
|
||||
@echo " make docker-buildx-version VERSION=v1.0.0 # 构建指定版本"
|
||||
@echo " make docker-buildx-push-version VERSION=v1.0.0 # 构建并推送指定版本"
|
||||
@echo ""
|
||||
@echo "版本构建:"
|
||||
@echo " make docker-build-multiarch-version VERSION=v1.0.0 # 构建指定版本"
|
||||
@echo " make docker-push-multiarch-version VERSION=v1.0.0 # 构建并推送指定版本"
|
||||
@echo "🔧 开发/源码镜像构建 (本地开发测试):"
|
||||
@echo " make docker-dev # 构建开发多架构镜像(无法本地加载)"
|
||||
@echo " make docker-dev-local # 构建开发单架构镜像(本地加载)"
|
||||
@echo " make docker-dev-push REGISTRY=xxx # 构建并推送开发镜像"
|
||||
@echo ""
|
||||
@echo "镜像类型:"
|
||||
@echo " make docker-build-ubuntu # 构建 Ubuntu 镜像"
|
||||
@echo " make docker-build-rockylinux # 构建 RockyLinux 镜像"
|
||||
@echo " make docker-build-devenv # 构建开发环境镜像"
|
||||
@echo " make docker-build-all-types # 构建所有类型镜像"
|
||||
@echo "🏗️ 本地生产镜像构建 (替代方案):"
|
||||
@echo " make docker-buildx-production-local # 本地构建生产单架构镜像"
|
||||
@echo ""
|
||||
@echo "辅助工具:"
|
||||
@echo "📦 单架构构建 (传统方式):"
|
||||
@echo " make docker-build-production # 构建单架构生产镜像"
|
||||
@echo " make docker-build-source # 构建单架构源码镜像"
|
||||
@echo ""
|
||||
@echo "🚀 开发环境管理:"
|
||||
@echo " make dev-env-start # 启动开发容器环境"
|
||||
@echo " make dev-env-stop # 停止开发容器环境"
|
||||
@echo " make dev-env-restart # 重启开发容器环境"
|
||||
@echo ""
|
||||
@echo "🔧 辅助工具:"
|
||||
@echo " make build-cross-all # 构建所有架构的二进制文件"
|
||||
@echo " make docker-inspect-multiarch IMAGE=xxx # 检查镜像的架构支持"
|
||||
@echo ""
|
||||
@echo "环境变量 (在推送时需要设置):"
|
||||
@echo "📋 环境变量:"
|
||||
@echo " REGISTRY 镜像仓库地址 (推送时需要)"
|
||||
@echo " DOCKERHUB_USERNAME Docker Hub 用户名"
|
||||
@echo " DOCKERHUB_TOKEN Docker Hub 访问令牌"
|
||||
@echo " GITHUB_TOKEN GitHub 访问令牌"
|
||||
@echo ""
|
||||
@echo "💡 建议:"
|
||||
@echo " - 生产用途: 使用 docker-buildx* 命令 (基于预编译二进制)"
|
||||
@echo " - 本地开发: 使用 docker-dev* 命令 (从源码构建)"
|
||||
@echo " - 开发环境: 使用 dev-env-* 命令管理开发容器"
|
||||
|
||||
.PHONY: help
|
||||
help:
|
||||
@echo "🦀 RustFS Makefile 帮助:"
|
||||
@echo ""
|
||||
@echo "📋 主要命令分类:"
|
||||
@echo " make help-build # 显示构建相关帮助"
|
||||
@echo " make help-docker # 显示 Docker 相关帮助"
|
||||
@echo ""
|
||||
@echo "🔧 代码质量:"
|
||||
@echo " make fmt # 格式化代码"
|
||||
@echo " make clippy # 运行 clippy 检查"
|
||||
@echo " make test # 运行测试"
|
||||
@echo " make pre-commit # 运行所有预提交检查"
|
||||
@echo ""
|
||||
@echo "🚀 快速开始:"
|
||||
@echo " make build # 构建 RustFS 二进制"
|
||||
@echo " make docker-dev-local # 构建开发 Docker 镜像(本地)"
|
||||
@echo " make dev-env-start # 启动开发环境"
|
||||
@echo ""
|
||||
@echo "💡 更多帮助请使用 'make help-build' 或 'make help-docker'"
|
||||
|
||||
72
README.md
@@ -7,29 +7,32 @@
|
||||
<a href="https://github.com/rustfs/rustfs/actions/workflows/docker.yml"><img alt="Build and Push Docker Images" src="https://github.com/rustfs/rustfs/actions/workflows/docker.yml/badge.svg" /></a>
|
||||
<img alt="GitHub commit activity" src="https://img.shields.io/github/commit-activity/m/rustfs/rustfs"/>
|
||||
<img alt="Github Last Commit" src="https://img.shields.io/github/last-commit/rustfs/rustfs"/>
|
||||
<a href="https://hellogithub.com/repository/rustfs/rustfs" target="_blank"><img src="https://abroad.hellogithub.com/v1/widgets/recommend.svg?rid=b95bcb72bdc340b68f16fdf6790b7d5b&claim_uid=MsbvjYeLDKAH457&theme=small" alt="Featured|HelloGitHub" /></a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://docs.rustfs.com/en/introduction.html">Getting Started</a>
|
||||
· <a href="https://docs.rustfs.com/en/">Docs</a>
|
||||
<a href="https://docs.rustfs.com/introduction.html">Getting Started</a>
|
||||
· <a href="https://docs.rustfs.com/">Docs</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/issues">Bug reports</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/discussions">Discussions</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
English | <a href="https://github.com/rustfs/rustfs/blob/main/README_ZH.md">简体中文</a> |
|
||||
English | <a href="https://github.com/rustfs/rustfs/blob/main/README_ZH.md">简体中文</a> |
|
||||
<!-- Keep these links. Translations will automatically update with the README. -->
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=de">Deutsch</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=es">Español</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=fr">français</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ja">日本語</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ko">한국어</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=pt">Português</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=de">Deutsch</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=es">Español</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=fr">français</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ja">日本語</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ko">한국어</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=pt">Português</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ru">Русский</a>
|
||||
</p>
|
||||
|
||||
RustFS is a high-performance distributed object storage software built using Rust, one of the most popular languages worldwide. Along with MinIO, it shares a range of advantages such as simplicity, S3 compatibility, open-source nature, support for data lakes, AI, and big data. Furthermore, it has a better and more user-friendly open-source license in comparison to other storage systems, being constructed under the Apache license. As Rust serves as its foundation, RustFS provides faster speed and safer distributed features for high-performance object storage.
|
||||
|
||||
> ⚠️ **RustFS is under rapid development. Do NOT use in production environments!**
|
||||
|
||||
## Features
|
||||
|
||||
- **High Performance**: Built with Rust, ensuring speed and efficiency.
|
||||
@@ -69,7 +72,7 @@ Stress test server parameters
|
||||
|
||||
To get started with RustFS, follow these steps:
|
||||
|
||||
1. **One-click installation script (Option 1)**
|
||||
1. **One-click installation script (Option 1)**
|
||||
|
||||
```bash
|
||||
curl -O https://rustfs.com/install_rustfs.sh && bash install_rustfs.sh
|
||||
@@ -78,13 +81,52 @@ To get started with RustFS, follow these steps:
|
||||
2. **Docker Quick Start (Option 2)**
|
||||
|
||||
```bash
|
||||
podman run -d -p 9000:9000 -p 9001:9001 -v /data:/data quay.io/rustfs/rustfs
|
||||
# Latest stable release
|
||||
docker run -d -p 9000:9000 -v /data:/data rustfs/rustfs:latest
|
||||
|
||||
# Development version (main branch)
|
||||
docker run -d -p 9000:9000 -v /data:/data rustfs/rustfs:main-latest
|
||||
|
||||
# Specific version
|
||||
docker run -d -p 9000:9000 -v /data:/data rustfs/rustfs:v1.0.0
|
||||
```
|
||||
|
||||
3. **Build from Source (Option 3) - Advanced Users**
|
||||
|
||||
3. **Access the Console**: Open your web browser and navigate to `http://localhost:9001` to access the RustFS console, default username and password is `rustfsadmin` .
|
||||
4. **Create a Bucket**: Use the console to create a new bucket for your objects.
|
||||
5. **Upload Objects**: You can upload files directly through the console or use S3-compatible APIs to interact with your RustFS instance.
|
||||
For developers who want to build RustFS Docker images from source with multi-architecture support:
|
||||
|
||||
```bash
|
||||
# Build multi-architecture images locally
|
||||
./docker-buildx.sh --build-arg RELEASE=latest
|
||||
|
||||
# Build and push to registry
|
||||
./docker-buildx.sh --push
|
||||
|
||||
# Build specific version
|
||||
./docker-buildx.sh --release v1.0.0 --push
|
||||
|
||||
# Build for custom registry
|
||||
./docker-buildx.sh --registry your-registry.com --namespace yourname --push
|
||||
```
|
||||
|
||||
The `docker-buildx.sh` script supports:
|
||||
- **Multi-architecture builds**: `linux/amd64`, `linux/arm64`
|
||||
- **Automatic version detection**: Uses git tags or commit hashes
|
||||
- **Registry flexibility**: Supports Docker Hub, GitHub Container Registry, etc.
|
||||
- **Build optimization**: Includes caching and parallel builds
|
||||
|
||||
You can also use Make targets for convenience:
|
||||
|
||||
```bash
|
||||
make docker-buildx # Build locally
|
||||
make docker-buildx-push # Build and push
|
||||
make docker-buildx-version VERSION=v1.0.0 # Build specific version
|
||||
make help-docker # Show all Docker-related commands
|
||||
```
|
||||
|
||||
4. **Access the Console**: Open your web browser and navigate to `http://localhost:9000` to access the RustFS console, default username and password is `rustfsadmin` .
|
||||
5. **Create a Bucket**: Use the console to create a new bucket for your objects.
|
||||
6. **Upload Objects**: You can upload files directly through the console or use S3-compatible APIs to interact with your RustFS instance.
|
||||
|
||||
## Documentation
|
||||
|
||||
@@ -117,7 +159,7 @@ If you have any questions or need assistance, you can:
|
||||
RustFS is a community-driven project, and we appreciate all contributions. Check out the [Contributors](https://github.com/rustfs/rustfs/graphs/contributors) page to see the amazing people who have helped make RustFS better.
|
||||
|
||||
<a href="https://github.com/rustfs/rustfs/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=rustfs/rustfs" />
|
||||
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" />
|
||||
</a>
|
||||
|
||||
## License
|
||||
|
||||
10
README_ZH.md
@@ -7,6 +7,7 @@
|
||||
<a href="https://github.com/rustfs/rustfs/actions/workflows/docker.yml"><img alt="Build and Push Docker Images" src="https://github.com/rustfs/rustfs/actions/workflows/docker.yml/badge.svg" /></a>
|
||||
<img alt="GitHub commit activity" src="https://img.shields.io/github/commit-activity/m/rustfs/rustfs"/>
|
||||
<img alt="Github Last Commit" src="https://img.shields.io/github/last-commit/rustfs/rustfs"/>
|
||||
<a href="https://hellogithub.com/repository/rustfs/rustfs" target="_blank"><img src="https://abroad.hellogithub.com/v1/widgets/recommend.svg?rid=b95bcb72bdc340b68f16fdf6790b7d5b&claim_uid=MsbvjYeLDKAH457&theme=small" alt="Featured|HelloGitHub" /></a>
|
||||
</p >
|
||||
|
||||
<p align="center">
|
||||
@@ -61,7 +62,7 @@ RustFS 是一个使用 Rust(全球最受欢迎的编程语言之一)构建
|
||||
|
||||
要开始使用 RustFS,请按照以下步骤操作:
|
||||
|
||||
1. **一键脚本快速启动 (方案一)**
|
||||
1. **一键脚本快速启动 (方案一)**
|
||||
|
||||
```bash
|
||||
curl -O https://rustfs.com/install_rustfs.sh && bash install_rustfs.sh
|
||||
@@ -70,11 +71,10 @@ RustFS 是一个使用 Rust(全球最受欢迎的编程语言之一)构建
|
||||
2. **Docker快速启动(方案二)**
|
||||
|
||||
```bash
|
||||
podman run -d -p 9000:9000 -p 9001:9001 -v /data:/data quay.io/rustfs/rustfs
|
||||
docker run -d -p 9000:9000 -v /data:/data rustfs/rustfs
|
||||
```
|
||||
|
||||
|
||||
3. **访问控制台**:打开 Web 浏览器并导航到 `http://localhost:9001` 以访问 RustFS 控制台,默认的用户名和密码是 `rustfsadmin` 。
|
||||
3. **访问控制台**:打开 Web 浏览器并导航到 `http://localhost:9000` 以访问 RustFS 控制台,默认的用户名和密码是 `rustfsadmin` 。
|
||||
4. **创建存储桶**:使用控制台为您的对象创建新的存储桶。
|
||||
5. **上传对象**:您可以直接通过控制台上传文件,或使用 S3 兼容的 API 与您的 RustFS 实例交互。
|
||||
|
||||
@@ -109,7 +109,7 @@ RustFS 是一个使用 Rust(全球最受欢迎的编程语言之一)构建
|
||||
RustFS 是一个社区驱动的项目,我们感谢所有的贡献。查看[贡献者](https://github.com/rustfs/rustfs/graphs/contributors)页面,了解帮助 RustFS 变得更好的杰出人员。
|
||||
|
||||
<a href="https://github.com/rustfs/rustfs/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=rustfs/rustfs" />
|
||||
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" />
|
||||
</a >
|
||||
|
||||
## 许可证
|
||||
|
||||
41
_typos.toml
Normal file
@@ -0,0 +1,41 @@
|
||||
[default]
|
||||
# # Ignore specific spell checking patterns
|
||||
# extend-ignore-identifiers-re = [
|
||||
# # Ignore common patterns in base64 encoding and hash values
|
||||
# "[A-Za-z0-9+/]{8,}={0,2}", # base64 encoding
|
||||
# "[A-Fa-f0-9]{8,}", # hexadecimal hash
|
||||
# "[A-Za-z0-9_-]{20,}", # long random strings
|
||||
# ]
|
||||
|
||||
# # Ignore specific regex patterns in content
|
||||
# extend-ignore-re = [
|
||||
# # Ignore hash values and encoded strings (base64 patterns)
|
||||
# "(?i)[A-Za-z0-9+/]{8,}={0,2}",
|
||||
# # Ignore long strings in quotes (usually hash or base64)
|
||||
# '"[A-Za-z0-9+/=_-]{8,}"',
|
||||
# # Ignore IV values and similar cryptographic strings
|
||||
# '"[A-Za-z0-9+/=]{12,}"',
|
||||
# # Ignore cryptographic signatures and keys (including partial strings)
|
||||
# "[A-Za-z0-9+/]{6,}[A-Za-z0-9+/=]*",
|
||||
# # Ignore base64-like strings in comments (common in examples)
|
||||
# "//.*[A-Za-z0-9+/]{8,}[A-Za-z0-9+/=]*",
|
||||
# ]
|
||||
extend-ignore-re = [
|
||||
# Ignore long strings in quotes (usually hash or base64)
|
||||
'"[A-Za-z0-9+/=_-]{32,}"',
|
||||
# Ignore IV values and similar cryptographic strings
|
||||
'"[A-Za-z0-9+/=]{12,}"',
|
||||
# Ignore cryptographic signatures and keys (including partial strings)
|
||||
"[A-Za-z0-9+/]{16,}[A-Za-z0-9+/=]*",
|
||||
]
|
||||
|
||||
[default.extend-words]
|
||||
bui = "bui"
|
||||
typ = "typ"
|
||||
clen = "clen"
|
||||
datas = "datas"
|
||||
bre = "bre"
|
||||
abd = "abd"
|
||||
|
||||
[files]
|
||||
extend-exclude = []
|
||||
579
build-rustfs.sh
Executable file
@@ -0,0 +1,579 @@
|
||||
#!/bin/bash
|
||||
|
||||
# RustFS Binary Build Script
|
||||
# This script compiles RustFS binaries for different platforms and architectures
|
||||
|
||||
set -e
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Auto-detect current platform
|
||||
detect_platform() {
|
||||
local arch=$(uname -m)
|
||||
local os=$(uname -s | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
case "$os" in
|
||||
"linux")
|
||||
case "$arch" in
|
||||
"x86_64")
|
||||
# Default to GNU for better compatibility
|
||||
echo "x86_64-unknown-linux-gnu"
|
||||
;;
|
||||
"aarch64"|"arm64")
|
||||
echo "aarch64-unknown-linux-gnu"
|
||||
;;
|
||||
"armv7l")
|
||||
echo "armv7-unknown-linux-gnueabihf"
|
||||
;;
|
||||
"loongarch64")
|
||||
echo "loongarch64-unknown-linux-musl"
|
||||
;;
|
||||
*)
|
||||
echo "unknown-platform"
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
"darwin")
|
||||
case "$arch" in
|
||||
"x86_64")
|
||||
echo "x86_64-apple-darwin"
|
||||
;;
|
||||
"arm64"|"aarch64")
|
||||
echo "aarch64-apple-darwin"
|
||||
;;
|
||||
*)
|
||||
echo "unknown-platform"
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
echo "unknown-platform"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Cross-platform SHA256 checksum generation
|
||||
generate_sha256() {
|
||||
local file="$1"
|
||||
local output_file="$2"
|
||||
local os=$(uname -s | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
case "$os" in
|
||||
"linux")
|
||||
if command -v sha256sum &> /dev/null; then
|
||||
sha256sum "$file" > "$output_file"
|
||||
elif command -v shasum &> /dev/null; then
|
||||
shasum -a 256 "$file" > "$output_file"
|
||||
else
|
||||
print_message $RED "❌ No SHA256 command found (sha256sum or shasum)"
|
||||
return 1
|
||||
fi
|
||||
;;
|
||||
"darwin")
|
||||
if command -v shasum &> /dev/null; then
|
||||
shasum -a 256 "$file" > "$output_file"
|
||||
elif command -v sha256sum &> /dev/null; then
|
||||
sha256sum "$file" > "$output_file"
|
||||
else
|
||||
print_message $RED "❌ No SHA256 command found (shasum or sha256sum)"
|
||||
return 1
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
# Try common commands in order
|
||||
if command -v sha256sum &> /dev/null; then
|
||||
sha256sum "$file" > "$output_file"
|
||||
elif command -v shasum &> /dev/null; then
|
||||
shasum -a 256 "$file" > "$output_file"
|
||||
else
|
||||
print_message $RED "❌ No SHA256 command found"
|
||||
return 1
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Default values
|
||||
OUTPUT_DIR="target/release"
|
||||
PLATFORM=$(detect_platform) # Auto-detect current platform
|
||||
BINARY_NAME="rustfs"
|
||||
BUILD_TYPE="release"
|
||||
SIGN=false
|
||||
WITH_CONSOLE=true
|
||||
FORCE_CONSOLE_UPDATE=false
|
||||
CONSOLE_VERSION="latest"
|
||||
SKIP_VERIFICATION=false
|
||||
CUSTOM_PLATFORM=""
|
||||
|
||||
# Print usage
|
||||
usage() {
|
||||
echo "Usage: $0 [OPTIONS]"
|
||||
echo ""
|
||||
echo "Description:"
|
||||
echo " Build RustFS binary for the current platform. Designed for CI/CD pipelines"
|
||||
echo " where different runners build platform-specific binaries natively."
|
||||
echo " Includes automatic verification to ensure the built binary is functional."
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " -o, --output-dir DIR Output directory (default: target/release)"
|
||||
echo " -b, --binary-name NAME Binary name (default: rustfs)"
|
||||
echo " -p, --platform TARGET Target platform (default: auto-detect)"
|
||||
echo " Supported platforms:"
|
||||
echo " x86_64-unknown-linux-gnu"
|
||||
echo " aarch64-unknown-linux-gnu"
|
||||
echo " armv7-unknown-linux-gnueabihf"
|
||||
echo " x86_64-unknown-linux-musl"
|
||||
echo " aarch64-unknown-linux-musl"
|
||||
echo " armv7-unknown-linux-musleabihf"
|
||||
echo " x86_64-apple-darwin"
|
||||
echo " aarch64-apple-darwin"
|
||||
echo " x86_64-pc-windows-msvc"
|
||||
echo " aarch64-pc-windows-msvc"
|
||||
echo " --dev Build in dev mode"
|
||||
echo " --sign Sign binaries after build"
|
||||
echo " --with-console Download console static assets (default)"
|
||||
echo " --no-console Skip console static assets"
|
||||
echo " --force-console-update Force update console assets even if they exist"
|
||||
echo " --console-version VERSION Console version to download (default: latest)"
|
||||
echo " --skip-verification Skip binary verification after build"
|
||||
echo " -h, --help Show this help message"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " $0 # Build for current platform (includes console assets)"
|
||||
echo " $0 --dev # Development build"
|
||||
echo " $0 --sign # Build and sign binary (release CI)"
|
||||
echo " $0 --no-console # Build without console static assets"
|
||||
echo " $0 --force-console-update # Force update console assets"
|
||||
echo " $0 --platform x86_64-unknown-linux-musl # Build for specific platform"
|
||||
echo " $0 --skip-verification # Skip binary verification (for cross-compilation)"
|
||||
echo ""
|
||||
echo "Detected platform: $(detect_platform)"
|
||||
echo "CI Usage: Run this script on each platform's runner to build native binaries"
|
||||
}
|
||||
|
||||
# Print colored message
|
||||
print_message() {
|
||||
local color=$1
|
||||
local message=$2
|
||||
echo -e "${color}${message}${NC}"
|
||||
}
|
||||
|
||||
# Get version from git
|
||||
get_version() {
|
||||
if git describe --abbrev=0 --tags >/dev/null 2>&1; then
|
||||
git describe --abbrev=0 --tags
|
||||
else
|
||||
git rev-parse --short HEAD
|
||||
fi
|
||||
}
|
||||
|
||||
# Setup rust environment
|
||||
setup_rust_environment() {
|
||||
print_message $BLUE "🔧 Setting up Rust environment..."
|
||||
|
||||
# Install required target for current platform
|
||||
print_message $YELLOW "Installing target: $PLATFORM"
|
||||
rustup target add "$PLATFORM"
|
||||
|
||||
# Set up environment variables for musl targets
|
||||
if [[ "$PLATFORM" == *"musl"* ]]; then
|
||||
print_message $YELLOW "Setting up environment for musl target..."
|
||||
export RUSTFLAGS="-C target-feature=-crt-static"
|
||||
|
||||
# For cargo-zigbuild, set up additional environment variables
|
||||
if command -v cargo-zigbuild &> /dev/null; then
|
||||
print_message $YELLOW "Configuring cargo-zigbuild for musl target..."
|
||||
|
||||
# Set environment variables for better musl support
|
||||
export CC_x86_64_unknown_linux_musl="zig cc -target x86_64-linux-musl"
|
||||
export CXX_x86_64_unknown_linux_musl="zig c++ -target x86_64-linux-musl"
|
||||
export AR_x86_64_unknown_linux_musl="zig ar"
|
||||
export CARGO_TARGET_X86_64_UNKNOWN_LINUX_MUSL_LINKER="zig cc -target x86_64-linux-musl"
|
||||
|
||||
export CC_aarch64_unknown_linux_musl="zig cc -target aarch64-linux-musl"
|
||||
export CXX_aarch64_unknown_linux_musl="zig c++ -target aarch64-linux-musl"
|
||||
export AR_aarch64_unknown_linux_musl="zig ar"
|
||||
export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_MUSL_LINKER="zig cc -target aarch64-linux-musl"
|
||||
|
||||
# Set environment variables for zstd-sys to avoid target parsing issues
|
||||
export ZSTD_SYS_USE_PKG_CONFIG=1
|
||||
export PKG_CONFIG_ALLOW_CROSS=1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Install required tools
|
||||
if [ "$SIGN" = true ]; then
|
||||
if ! command -v minisign &> /dev/null; then
|
||||
print_message $YELLOW "Installing minisign for binary signing..."
|
||||
cargo install minisign
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Download console static assets
|
||||
download_console_assets() {
|
||||
local static_dir="rustfs/static"
|
||||
local console_exists=false
|
||||
|
||||
# Check if console assets already exist
|
||||
if [ -d "$static_dir" ] && [ -f "$static_dir/index.html" ]; then
|
||||
console_exists=true
|
||||
local static_size=$(du -sh "$static_dir" 2>/dev/null | cut -f1 || echo "unknown")
|
||||
print_message $YELLOW "Console static assets already exist ($static_size)"
|
||||
fi
|
||||
|
||||
# Determine if we need to download
|
||||
local should_download=false
|
||||
if [ "$WITH_CONSOLE" = true ]; then
|
||||
if [ "$console_exists" = false ]; then
|
||||
print_message $BLUE "🎨 Console assets not found, downloading..."
|
||||
should_download=true
|
||||
elif [ "$FORCE_CONSOLE_UPDATE" = true ]; then
|
||||
print_message $BLUE "🎨 Force updating console assets..."
|
||||
should_download=true
|
||||
else
|
||||
print_message $GREEN "✅ Console assets already available, skipping download"
|
||||
fi
|
||||
else
|
||||
if [ "$console_exists" = true ]; then
|
||||
print_message $GREEN "✅ Using existing console assets"
|
||||
else
|
||||
print_message $YELLOW "⚠️ Console assets not found. Use --download-console to download them."
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$should_download" = true ]; then
|
||||
print_message $BLUE "📥 Downloading console static assets..."
|
||||
|
||||
# Create static directory
|
||||
mkdir -p "$static_dir"
|
||||
|
||||
# Download from GitHub Releases (consistent with Docker build)
|
||||
local download_url
|
||||
if [ "$CONSOLE_VERSION" = "latest" ]; then
|
||||
print_message $YELLOW "Getting latest console release info..."
|
||||
# For now, use dl.rustfs.com as fallback until GitHub Releases includes console assets
|
||||
download_url="https://dl.rustfs.com/artifacts/console/rustfs-console-latest.zip"
|
||||
else
|
||||
download_url="https://dl.rustfs.com/artifacts/console/rustfs-console-${CONSOLE_VERSION}.zip"
|
||||
fi
|
||||
|
||||
print_message $YELLOW "Downloading from: $download_url"
|
||||
|
||||
# Download with retries
|
||||
local temp_file="console-assets-temp.zip"
|
||||
local download_success=false
|
||||
|
||||
for i in {1..3}; do
|
||||
if curl -L "$download_url" -o "$temp_file" --retry 3 --retry-delay 5 --max-time 300; then
|
||||
download_success=true
|
||||
break
|
||||
else
|
||||
print_message $YELLOW "Download attempt $i failed, retrying..."
|
||||
sleep 2
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$download_success" = true ]; then
|
||||
# Verify the downloaded file
|
||||
if [ -f "$temp_file" ] && [ -s "$temp_file" ]; then
|
||||
print_message $BLUE "📦 Extracting console assets..."
|
||||
|
||||
# Extract to static directory
|
||||
if unzip -o "$temp_file" -d "$static_dir"; then
|
||||
rm "$temp_file"
|
||||
local final_size=$(du -sh "$static_dir" 2>/dev/null | cut -f1 || echo "unknown")
|
||||
print_message $GREEN "✅ Console assets downloaded successfully ($final_size)"
|
||||
else
|
||||
print_message $RED "❌ Failed to extract console assets"
|
||||
rm -f "$temp_file"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
print_message $RED "❌ Downloaded file is empty or invalid"
|
||||
rm -f "$temp_file"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
print_message $RED "❌ Failed to download console assets after 3 attempts"
|
||||
print_message $YELLOW "💡 Console assets are optional. Build will continue without them."
|
||||
rm -f "$temp_file"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Verify binary functionality
|
||||
verify_binary() {
|
||||
local binary_path="$1"
|
||||
|
||||
# Check if binary exists
|
||||
if [ ! -f "$binary_path" ]; then
|
||||
print_message $RED "❌ Binary file not found: $binary_path"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check if binary is executable
|
||||
if [ ! -x "$binary_path" ]; then
|
||||
print_message $RED "❌ Binary is not executable: $binary_path"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check basic functionality - try to run help command
|
||||
print_message $YELLOW " Testing --help command..."
|
||||
if ! "$binary_path" --help >/dev/null 2>&1; then
|
||||
print_message $RED "❌ Binary failed to run --help command"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check version command
|
||||
print_message $YELLOW " Testing --version command..."
|
||||
if ! "$binary_path" --version >/dev/null 2>&1; then
|
||||
print_message $YELLOW "⚠️ Binary does not support --version command (this is optional)"
|
||||
fi
|
||||
|
||||
# Try to get some basic info about the binary
|
||||
local file_info=$(file "$binary_path" 2>/dev/null || echo "unknown")
|
||||
print_message $YELLOW " Binary info: $file_info"
|
||||
|
||||
# Check if it's a valid ELF/Mach-O binary
|
||||
if command -v readelf >/dev/null 2>&1; then
|
||||
if readelf -h "$binary_path" >/dev/null 2>&1; then
|
||||
print_message $YELLOW " ELF binary structure: valid"
|
||||
fi
|
||||
elif command -v otool >/dev/null 2>&1; then
|
||||
if otool -h "$binary_path" >/dev/null 2>&1; then
|
||||
print_message $YELLOW " Mach-O binary structure: valid"
|
||||
fi
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Build binary for current platform
|
||||
build_binary() {
|
||||
local version=$(get_version)
|
||||
local output_file="${OUTPUT_DIR}/${PLATFORM}/${BINARY_NAME}"
|
||||
|
||||
print_message $BLUE "🏗️ Building for platform: $PLATFORM"
|
||||
print_message $YELLOW " Version: $version"
|
||||
print_message $YELLOW " Output: $output_file"
|
||||
|
||||
# Create output directory
|
||||
mkdir -p "${OUTPUT_DIR}/${PLATFORM}"
|
||||
|
||||
# Simple build logic matching the working version (4fb4b353)
|
||||
# Force rebuild by touching build.rs
|
||||
touch rustfs/build.rs
|
||||
|
||||
# Determine build command based on platform and cross-compilation needs
|
||||
local build_cmd=""
|
||||
local current_platform=$(detect_platform)
|
||||
|
||||
print_message $BLUE "📦 Using working version build logic..."
|
||||
|
||||
# Check if we need cross-compilation
|
||||
if [ "$PLATFORM" != "$current_platform" ]; then
|
||||
# Cross-compilation needed
|
||||
if [[ "$PLATFORM" == *"apple-darwin"* ]]; then
|
||||
print_message $RED "❌ macOS cross-compilation not supported"
|
||||
print_message $YELLOW "💡 macOS targets must be built natively on macOS runners"
|
||||
return 1
|
||||
elif [[ "$PLATFORM" == *"windows"* ]]; then
|
||||
# Use cross for Windows ARM64
|
||||
if ! command -v cross &> /dev/null; then
|
||||
print_message $YELLOW "📦 Installing cross tool..."
|
||||
cargo install cross --git https://github.com/cross-rs/cross
|
||||
fi
|
||||
build_cmd="cross build"
|
||||
else
|
||||
# Use zigbuild for Linux ARM64 (matches working version)
|
||||
if ! command -v cargo-zigbuild &> /dev/null; then
|
||||
print_message $RED "❌ cargo-zigbuild not found. Please install it first."
|
||||
return 1
|
||||
fi
|
||||
build_cmd="cargo zigbuild"
|
||||
fi
|
||||
else
|
||||
# Native compilation
|
||||
build_cmd="RUSTFLAGS=-Clink-arg=-lm cargo build"
|
||||
fi
|
||||
|
||||
if [ "$BUILD_TYPE" = "release" ]; then
|
||||
build_cmd+=" --release"
|
||||
fi
|
||||
|
||||
build_cmd+=" --target $PLATFORM"
|
||||
build_cmd+=" -p rustfs --bins"
|
||||
|
||||
print_message $BLUE "📦 Executing: $build_cmd"
|
||||
|
||||
# Execute build (this matches exactly what the working version does)
|
||||
if eval $build_cmd; then
|
||||
print_message $GREEN "✅ Successfully built for $PLATFORM"
|
||||
|
||||
# Copy binary to output directory
|
||||
cp "target/${PLATFORM}/${BUILD_TYPE}/${BINARY_NAME}" "$output_file"
|
||||
|
||||
# Generate checksums
|
||||
print_message $BLUE "🔐 Generating checksums..."
|
||||
(cd "${OUTPUT_DIR}/${PLATFORM}" && generate_sha256 "${BINARY_NAME}" "${BINARY_NAME}.sha256sum")
|
||||
|
||||
# Verify binary functionality (if not skipped)
|
||||
if [ "$SKIP_VERIFICATION" = false ]; then
|
||||
print_message $BLUE "🔍 Verifying binary functionality..."
|
||||
if verify_binary "$output_file"; then
|
||||
print_message $GREEN "✅ Binary verification passed"
|
||||
else
|
||||
print_message $RED "❌ Binary verification failed"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
print_message $YELLOW "⚠️ Binary verification skipped by user request"
|
||||
fi
|
||||
|
||||
# Sign binary if requested
|
||||
if [ "$SIGN" = true ]; then
|
||||
print_message $BLUE "✍️ Signing binary..."
|
||||
(cd "${OUTPUT_DIR}/${PLATFORM}" && minisign -S -m "${BINARY_NAME}" -s ~/.minisign/minisign.key)
|
||||
fi
|
||||
|
||||
print_message $GREEN "✅ Build completed successfully"
|
||||
else
|
||||
print_message $RED "❌ Failed to build for $PLATFORM"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
|
||||
# Main build function
|
||||
build_rustfs() {
|
||||
local version=$(get_version)
|
||||
|
||||
print_message $BLUE "🚀 Starting RustFS binary build process..."
|
||||
print_message $YELLOW " Version: $version"
|
||||
print_message $YELLOW " Platform: $PLATFORM"
|
||||
print_message $YELLOW " Output Directory: $OUTPUT_DIR"
|
||||
print_message $YELLOW " Build Type: $BUILD_TYPE"
|
||||
print_message $YELLOW " Sign: $SIGN"
|
||||
print_message $YELLOW " With Console: $WITH_CONSOLE"
|
||||
if [ "$WITH_CONSOLE" = true ]; then
|
||||
print_message $YELLOW " Console Version: $CONSOLE_VERSION"
|
||||
print_message $YELLOW " Force Console Update: $FORCE_CONSOLE_UPDATE"
|
||||
fi
|
||||
print_message $YELLOW " Skip Verification: $SKIP_VERIFICATION"
|
||||
echo ""
|
||||
|
||||
# Setup environment
|
||||
setup_rust_environment
|
||||
echo ""
|
||||
|
||||
# Download console assets if requested
|
||||
download_console_assets
|
||||
echo ""
|
||||
|
||||
# Build binary
|
||||
build_binary
|
||||
echo ""
|
||||
|
||||
print_message $GREEN "🎉 Build process completed successfully!"
|
||||
|
||||
# Show built binary
|
||||
local binary_file="${OUTPUT_DIR}/${PLATFORM}/${BINARY_NAME}"
|
||||
if [ -f "$binary_file" ]; then
|
||||
local size=$(ls -lh "$binary_file" | awk '{print $5}')
|
||||
print_message $BLUE "📋 Built binary: $binary_file ($size)"
|
||||
fi
|
||||
}
|
||||
|
||||
# Parse command line arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
-o|--output-dir)
|
||||
OUTPUT_DIR="$2"
|
||||
shift 2
|
||||
;;
|
||||
-b|--binary-name)
|
||||
BINARY_NAME="$2"
|
||||
shift 2
|
||||
;;
|
||||
-p|--platform)
|
||||
CUSTOM_PLATFORM="$2"
|
||||
shift 2
|
||||
;;
|
||||
--dev)
|
||||
BUILD_TYPE="debug"
|
||||
shift
|
||||
;;
|
||||
--sign)
|
||||
SIGN=true
|
||||
shift
|
||||
;;
|
||||
--with-console)
|
||||
WITH_CONSOLE=true
|
||||
shift
|
||||
;;
|
||||
--no-console)
|
||||
WITH_CONSOLE=false
|
||||
shift
|
||||
;;
|
||||
--force-console-update)
|
||||
FORCE_CONSOLE_UPDATE=true
|
||||
WITH_CONSOLE=true # Auto-enable download when forcing update
|
||||
shift
|
||||
;;
|
||||
--console-version)
|
||||
CONSOLE_VERSION="$2"
|
||||
shift 2
|
||||
;;
|
||||
--skip-verification)
|
||||
SKIP_VERIFICATION=true
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
print_message $RED "❌ Unknown option: $1"
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Main execution
|
||||
main() {
|
||||
print_message $BLUE "🦀 RustFS Binary Build Script"
|
||||
echo ""
|
||||
|
||||
# Check if we're in a Rust project
|
||||
if [ ! -f "Cargo.toml" ]; then
|
||||
print_message $RED "❌ No Cargo.toml found. Are you in a Rust project directory?"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Override platform if specified
|
||||
if [ -n "$CUSTOM_PLATFORM" ]; then
|
||||
PLATFORM="$CUSTOM_PLATFORM"
|
||||
print_message $YELLOW "🎯 Using specified platform: $PLATFORM"
|
||||
|
||||
# Auto-enable skip verification for cross-compilation
|
||||
if [ "$PLATFORM" != "$(detect_platform)" ]; then
|
||||
SKIP_VERIFICATION=true
|
||||
print_message $YELLOW "⚠️ Cross-compilation detected, enabling --skip-verification"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Start build process
|
||||
build_rustfs
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
clear
|
||||
|
||||
# Get the current platform architecture
|
||||
ARCH=$(uname -m)
|
||||
|
||||
# Set the target directory according to the schema
|
||||
if [ "$ARCH" == "x86_64" ]; then
|
||||
TARGET_DIR="target/x86_64"
|
||||
elif [ "$ARCH" == "aarch64" ]; then
|
||||
TARGET_DIR="target/arm64"
|
||||
else
|
||||
TARGET_DIR="target/unknown"
|
||||
fi
|
||||
|
||||
# Set CARGO_TARGET_DIR and build the project
|
||||
CARGO_TARGET_DIR=$TARGET_DIR RUSTFLAGS="-C link-arg=-fuse-ld=mold" cargo build --release --package rustfs
|
||||
|
||||
echo -e "\a"
|
||||
echo -e "\a"
|
||||
echo -e "\a"
|
||||
@@ -26,7 +26,6 @@ dioxus = { workspace = true, features = ["router"] }
|
||||
dirs = { workspace = true }
|
||||
hex = { workspace = true }
|
||||
keyring = { workspace = true }
|
||||
lazy_static = { workspace = true }
|
||||
rfd = { workspace = true }
|
||||
rust-embed = { workspace = true, features = ["interpolate-folder-path"] }
|
||||
rust-i18n = { workspace = true }
|
||||
|
||||
@@ -37,7 +37,9 @@ copyright = "Copyright 2025 rustfs.com"
|
||||
|
||||
icon = [
|
||||
"assets/icons/icon.icns",
|
||||
"assets/icons/icon.ico"
|
||||
"assets/icons/icon.ico",
|
||||
"assets/icons/icon.png",
|
||||
"assets/icons/rustfs-icon.png",
|
||||
]
|
||||
#[bundle.macos]
|
||||
#provider_short_name = "RustFs"
|
||||
|
||||
BIN
cli/rustfs-gui/assets/icon.png
Normal file
|
After Width: | Height: | Size: 23 KiB |
BIN
cli/rustfs-gui/assets/icons/icon.png
Normal file
|
After Width: | Height: | Size: 23 KiB |
BIN
cli/rustfs-gui/assets/icons/icon_128x128.png
Normal file
|
After Width: | Height: | Size: 4.5 KiB |
BIN
cli/rustfs-gui/assets/icons/icon_128x128@2x.png
Normal file
|
After Width: | Height: | Size: 9.9 KiB |
BIN
cli/rustfs-gui/assets/icons/icon_16x16.png
Normal file
|
After Width: | Height: | Size: 498 B |
BIN
cli/rustfs-gui/assets/icons/icon_16x16@2x.png
Normal file
|
After Width: | Height: | Size: 969 B |
BIN
cli/rustfs-gui/assets/icons/icon_256x256.png
Normal file
|
After Width: | Height: | Size: 9.9 KiB |
BIN
cli/rustfs-gui/assets/icons/icon_256x256@2x.png
Normal file
|
After Width: | Height: | Size: 23 KiB |
BIN
cli/rustfs-gui/assets/icons/icon_32x32.png
Normal file
|
After Width: | Height: | Size: 969 B |
BIN
cli/rustfs-gui/assets/icons/icon_32x32@2x.png
Normal file
|
After Width: | Height: | Size: 2.0 KiB |
BIN
cli/rustfs-gui/assets/icons/icon_512x512.png
Normal file
|
After Width: | Height: | Size: 23 KiB |
BIN
cli/rustfs-gui/assets/icons/icon_512x512@2x.png
Normal file
|
After Width: | Height: | Size: 47 KiB |
BIN
cli/rustfs-gui/assets/icons/rustfs-icon.png
Normal file
|
After Width: | Height: | Size: 23 KiB |
BIN
cli/rustfs-gui/assets/rustfs-icon.png
Normal file
|
After Width: | Height: | Size: 23 KiB |
@@ -1,20 +1,15 @@
|
||||
<svg width="1558" height="260" viewBox="0 0 1558 260" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g clip-path="url(#clip0_0_3)">
|
||||
<path d="M1288.5 112.905H1159.75V58.4404H1262L1270 0L1074 0V260H1159.75V162.997H1296.95L1288.5 112.905Z"
|
||||
fill="#0196D0"/>
|
||||
<path d="M1058.62 58.4404V0H789V58.4404H881.133V260H966.885V58.4404H1058.62Z" fill="#0196D0"/>
|
||||
<path d="M521 179.102V0L454.973 15V161C454.973 181.124 452.084 193.146 443.5 202C434.916 211.257 419.318 214.5 400.5 214.5C381.022 214.5 366.744 210.854 357.5 202C348.916 193.548 346.357 175.721 346.357 156V0L280 15V175.48C280 208.08 290.234 229.412 309.712 241.486C329.19 253.56 358.903 260 400.5 260C440.447 260 470.159 253.56 490.297 241.486C510.766 229.412 521 208.483 521 179.102Z"
|
||||
fill="#0196D0"/>
|
||||
<path d="M172.84 84.2813C172.84 97.7982 168.249 107.737 158.41 113.303C149.883 118.471 137.092 121.254 120.693 122.049V162.997C129.876 163.792 138.076 166.177 144.307 176.514L184.647 260H265L225.316 180.489C213.181 155.046 201.374 149.48 178.744 143.517C212.197 138.349 241.386 118.471 241.386 73.1499C241.386 53.2722 233.843 30.2141 218.756 17.8899C203.998 5.56575 183.991 0 159.394 0H120.693V48.5015H127.58C142.23 48.5015 153.6 51.4169 161.689 57.2477C169.233 62.8135 172.84 71.5596 172.84 84.2813ZM120.693 122.049C119.163 122.049 117.741 122.049 116.43 122.049H68.5457V48.5015H120.693V0H0V260H70.5137V162.997H110.526C113.806 162.997 117.741 162.997 120.693 162.997V122.049Z"
|
||||
fill="#0196D0"/>
|
||||
<path d="M774 179.297C774 160.829 766.671 144.669 752.013 131.972C738.127 119.66 712.025 110.169 673.708 103.5C662.136 101.191 651.722 99.6523 643.235 97.3437C586.532 84.6467 594.632 52.7118 650.564 52.7118C680.651 52.7118 709.582 61.946 738.127 66.9478C742.37 67.7174 743.913 68.1021 744.298 68.1021L750.47 12.697C720.383 3.46282 684.895 0 654.036 0C616.619 0 587.689 6.54088 567.245 19.2379C546.801 31.9349 536 57.7137 536 82.3382C536 103.5 543.715 119.66 559.916 131.972C575.731 143.515 604.276 152.749 645.55 160.059C658.279 162.368 668.694 163.907 676.794 166.215C685.023 168.524 691.066 170.704 694.924 172.756C702.253 176.604 706.11 182.375 706.11 188.531C706.11 196.611 701.481 202.767 692.224 207C664.836 220.081 587.689 212.001 556.83 198.15L543.715 247.784C547.186 248.169 552.972 249.323 559.916 250.477C616.619 259.327 690.681 270.869 741.212 238.935C762.814 225.468 774 206.23 774 179.297Z"
|
||||
fill="#0196D0"/>
|
||||
<path d="M1558 179.568C1558 160.383 1550.42 144.268 1535.67 131.99C1521.32 119.968 1494.34 110.631 1454.74 103.981C1442.38 101.679 1432.01 99.3764 1422.84 97.8416C1422.44 97.8416 1422.04 97.8416 1422.04 97.4579V112.422L1361.04 75.2038L1422.04 38.3692V52.9496C1424.7 52.9496 1427.49 52.9496 1430.41 52.9496C1461.51 52.9496 1491.42 62.5419 1521.32 67.5299C1525.31 67.9136 1526.9 67.9136 1527.3 67.9136L1533.68 12.6619C1502.98 3.83692 1465.9 0 1434 0C1395.33 0 1365.43 6.52277 1345.09 19.5683C1323.16 32.6139 1312 57.9376 1312 82.8776C1312 103.981 1320.37 120.096 1336.72 131.607C1353.46 143.885 1382.97 153.093 1425.23 160.383C1434 161.535 1441.18 162.686 1447.56 164.22L1448.36 150.791L1507.36 190.312L1445.57 224.844L1445.96 212.949C1409.68 215.635 1357.45 209.112 1333.53 197.985L1320.37 247.482C1323.56 248.249 1329.54 248.633 1336.72 250.551C1395.33 259.376 1471.88 270.887 1524.11 238.657C1546.84 225.611 1558 205.659 1558 179.568Z"
|
||||
fill="#0196D0"/>
|
||||
</g>
|
||||
<defs>
|
||||
<clipPath id="clip0_0_3">
|
||||
<rect width="1558" height="260" fill="white"/>
|
||||
</clipPath>
|
||||
</defs>
|
||||
<g clip-path="url(#clip0_0_3)">
|
||||
<path d="M1288.5 112.905H1159.75V58.4404H1262L1270 0L1074 0V260H1159.75V162.997H1296.95L1288.5 112.905Z" fill="#0196D0"/>
|
||||
<path d="M1058.62 58.4404V0H789V58.4404H881.133V260H966.885V58.4404H1058.62Z" fill="#0196D0"/>
|
||||
<path d="M521 179.102V0L454.973 15V161C454.973 181.124 452.084 193.146 443.5 202C434.916 211.257 419.318 214.5 400.5 214.5C381.022 214.5 366.744 210.854 357.5 202C348.916 193.548 346.357 175.721 346.357 156V0L280 15V175.48C280 208.08 290.234 229.412 309.712 241.486C329.19 253.56 358.903 260 400.5 260C440.447 260 470.159 253.56 490.297 241.486C510.766 229.412 521 208.483 521 179.102Z" fill="#0196D0"/>
|
||||
<path d="M172.84 84.2813C172.84 97.7982 168.249 107.737 158.41 113.303C149.883 118.471 137.092 121.254 120.693 122.049V162.997C129.876 163.792 138.076 166.177 144.307 176.514L184.647 260H265L225.316 180.489C213.181 155.046 201.374 149.48 178.744 143.517C212.197 138.349 241.386 118.471 241.386 73.1499C241.386 53.2722 233.843 30.2141 218.756 17.8899C203.998 5.56575 183.991 0 159.394 0H120.693V48.5015H127.58C142.23 48.5015 153.6 51.4169 161.689 57.2477C169.233 62.8135 172.84 71.5596 172.84 84.2813ZM120.693 122.049C119.163 122.049 117.741 122.049 116.43 122.049H68.5457V48.5015H120.693V0H0V260H70.5137V162.997H110.526C113.806 162.997 117.741 162.997 120.693 162.997V122.049Z" fill="#0196D0"/>
|
||||
<path d="M774 179.297C774 160.829 766.671 144.669 752.013 131.972C738.127 119.66 712.025 110.169 673.708 103.5C662.136 101.191 651.722 99.6523 643.235 97.3437C586.532 84.6467 594.632 52.7118 650.564 52.7118C680.651 52.7118 709.582 61.946 738.127 66.9478C742.37 67.7174 743.913 68.1021 744.298 68.1021L750.47 12.697C720.383 3.46282 684.895 0 654.036 0C616.619 0 587.689 6.54088 567.245 19.2379C546.801 31.9349 536 57.7137 536 82.3382C536 103.5 543.715 119.66 559.916 131.972C575.731 143.515 604.276 152.749 645.55 160.059C658.279 162.368 668.694 163.907 676.794 166.215C685.023 168.524 691.066 170.704 694.924 172.756C702.253 176.604 706.11 182.375 706.11 188.531C706.11 196.611 701.481 202.767 692.224 207C664.836 220.081 587.689 212.001 556.83 198.15L543.715 247.784C547.186 248.169 552.972 249.323 559.916 250.477C616.619 259.327 690.681 270.869 741.212 238.935C762.814 225.468 774 206.23 774 179.297Z" fill="#0196D0"/>
|
||||
<path d="M1558 179.568C1558 160.383 1550.42 144.268 1535.67 131.99C1521.32 119.968 1494.34 110.631 1454.74 103.981C1442.38 101.679 1432.01 99.3764 1422.84 97.8416C1422.44 97.8416 1422.04 97.8416 1422.04 97.4579V112.422L1361.04 75.2038L1422.04 38.3692V52.9496C1424.7 52.9496 1427.49 52.9496 1430.41 52.9496C1461.51 52.9496 1491.42 62.5419 1521.32 67.5299C1525.31 67.9136 1526.9 67.9136 1527.3 67.9136L1533.68 12.6619C1502.98 3.83692 1465.9 0 1434 0C1395.33 0 1365.43 6.52277 1345.09 19.5683C1323.16 32.6139 1312 57.9376 1312 82.8776C1312 103.981 1320.37 120.096 1336.72 131.607C1353.46 143.885 1382.97 153.093 1425.23 160.383C1434 161.535 1441.18 162.686 1447.56 164.22L1448.36 150.791L1507.36 190.312L1445.57 224.844L1445.96 212.949C1409.68 215.635 1357.45 209.112 1333.53 197.985L1320.37 247.482C1323.56 248.249 1329.54 248.633 1336.72 250.551C1395.33 259.376 1471.88 270.887 1524.11 238.657C1546.84 225.611 1558 205.659 1558 179.568Z" fill="#0196D0"/>
|
||||
</g>
|
||||
<defs>
|
||||
<clipPath id="clip0_0_3">
|
||||
<rect width="1558" height="260" fill="white"/>
|
||||
</clipPath>
|
||||
</defs>
|
||||
</svg>
|
||||
|
||||
|
Before Width: | Height: | Size: 3.5 KiB After Width: | Height: | Size: 3.4 KiB |
@@ -14,12 +14,12 @@
|
||||
|
||||
use crate::utils::RustFSConfig;
|
||||
use dioxus::logger::tracing::{debug, error, info};
|
||||
use lazy_static::lazy_static;
|
||||
use rust_embed::RustEmbed;
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::error::Error;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::Command as StdCommand;
|
||||
use std::sync::LazyLock;
|
||||
use std::time::Duration;
|
||||
use tokio::fs;
|
||||
use tokio::fs::File;
|
||||
@@ -31,15 +31,13 @@ use tokio::sync::{Mutex, mpsc};
|
||||
#[folder = "$CARGO_MANIFEST_DIR/embedded-rustfs/"]
|
||||
struct Asset;
|
||||
|
||||
// Use `lazy_static` to cache the checksum of embedded resources
|
||||
lazy_static! {
|
||||
static ref RUSTFS_HASH: Mutex<String> = {
|
||||
let rustfs_file = if cfg!(windows) { "rustfs.exe" } else { "rustfs" };
|
||||
let rustfs_data = Asset::get(rustfs_file).expect("RustFs binary not embedded");
|
||||
let hash = hex::encode(Sha256::digest(&rustfs_data.data));
|
||||
Mutex::new(hash)
|
||||
};
|
||||
}
|
||||
// Use `LazyLock` to cache the checksum of embedded resources
|
||||
static RUSTFS_HASH: LazyLock<Mutex<String>> = LazyLock::new(|| {
|
||||
let rustfs_file = if cfg!(windows) { "rustfs.exe" } else { "rustfs" };
|
||||
let rustfs_data = Asset::get(rustfs_file).expect("RustFs binary not embedded");
|
||||
let hash = hex::encode(Sha256::digest(&rustfs_data.data));
|
||||
Mutex::new(hash)
|
||||
});
|
||||
|
||||
/// Service command
|
||||
/// This enum represents the commands that can be sent to the service manager
|
||||
@@ -373,7 +371,7 @@ impl ServiceManager {
|
||||
StdCommand::new("taskkill")
|
||||
.arg("/F")
|
||||
.arg("/PID")
|
||||
.arg(&service_pid.to_string())
|
||||
.arg(service_pid.to_string())
|
||||
.output()?;
|
||||
}
|
||||
|
||||
|
||||
42
crates/ahm/Cargo.toml
Normal file
@@ -0,0 +1,42 @@
|
||||
[package]
|
||||
name = "rustfs-ahm"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors = ["RustFS Team"]
|
||||
license.workspace = true
|
||||
description = "RustFS AHM (Automatic Health Management) Scanner"
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
homepage.workspace = true
|
||||
documentation = "https://docs.rs/rustfs-ahm/latest/rustfs_ahm/"
|
||||
keywords = ["RustFS", "AHM", "health-management", "scanner", "Minio"]
|
||||
categories = ["web-programming", "development-tools", "filesystem"]
|
||||
|
||||
[dependencies]
|
||||
rustfs-ecstore = { workspace = true }
|
||||
rustfs-common = { workspace = true }
|
||||
rustfs-filemeta = { workspace = true }
|
||||
rustfs-madmin = { workspace = true }
|
||||
rustfs-utils = { workspace = true }
|
||||
tokio = { workspace = true, features = ["full"] }
|
||||
tokio-util = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
uuid = { workspace = true, features = ["v4", "serde"] }
|
||||
anyhow = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
url = { workspace = true }
|
||||
rustfs-lock = { workspace = true }
|
||||
s3s = { workspace = true }
|
||||
lazy_static = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
serde_json = { workspace = true }
|
||||
serial_test = "3.2.0"
|
||||
tracing-subscriber = { workspace = true }
|
||||
walkdir = "2.5.0"
|
||||
tempfile = { workspace = true }
|
||||
94
crates/ahm/src/error.rs
Normal file
@@ -0,0 +1,94 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use thiserror::Error;
|
||||
|
||||
/// RustFS AHM/Heal/Scanner 统一错误类型
|
||||
#[derive(Debug, Error)]
|
||||
pub enum Error {
|
||||
// 通用
|
||||
#[error("I/O error: {0}")]
|
||||
Io(#[from] std::io::Error),
|
||||
|
||||
#[error("Storage error: {0}")]
|
||||
Storage(#[from] rustfs_ecstore::error::Error),
|
||||
|
||||
#[error("Disk error: {0}")]
|
||||
Disk(#[from] rustfs_ecstore::disk::error::DiskError),
|
||||
|
||||
#[error("Configuration error: {0}")]
|
||||
Config(String),
|
||||
|
||||
#[error("Heal configuration error: {message}")]
|
||||
ConfigurationError { message: String },
|
||||
|
||||
#[error("Other error: {0}")]
|
||||
Other(String),
|
||||
|
||||
#[error(transparent)]
|
||||
Anyhow(#[from] anyhow::Error),
|
||||
|
||||
// Scanner相关
|
||||
#[error("Scanner error: {0}")]
|
||||
Scanner(String),
|
||||
|
||||
#[error("Metrics error: {0}")]
|
||||
Metrics(String),
|
||||
|
||||
// Heal相关
|
||||
#[error("Heal task not found: {task_id}")]
|
||||
TaskNotFound { task_id: String },
|
||||
|
||||
#[error("Heal task already exists: {task_id}")]
|
||||
TaskAlreadyExists { task_id: String },
|
||||
|
||||
#[error("Heal manager is not running")]
|
||||
ManagerNotRunning,
|
||||
|
||||
#[error("Heal task execution failed: {message}")]
|
||||
TaskExecutionFailed { message: String },
|
||||
|
||||
#[error("Invalid heal type: {heal_type}")]
|
||||
InvalidHealType { heal_type: String },
|
||||
|
||||
#[error("Heal task cancelled")]
|
||||
TaskCancelled,
|
||||
|
||||
#[error("Heal task timeout")]
|
||||
TaskTimeout,
|
||||
|
||||
#[error("Heal event processing failed: {message}")]
|
||||
EventProcessingFailed { message: String },
|
||||
|
||||
#[error("Heal progress tracking failed: {message}")]
|
||||
ProgressTrackingFailed { message: String },
|
||||
}
|
||||
|
||||
pub type Result<T, E = Error> = std::result::Result<T, E>;
|
||||
|
||||
impl Error {
|
||||
pub fn other<E>(error: E) -> Self
|
||||
where
|
||||
E: Into<Box<dyn std::error::Error + Send + Sync>>,
|
||||
{
|
||||
Error::Other(error.into().to_string())
|
||||
}
|
||||
}
|
||||
|
||||
// 可选:实现与 std::io::Error 的互转
|
||||
impl From<Error> for std::io::Error {
|
||||
fn from(err: Error) -> Self {
|
||||
std::io::Error::other(err)
|
||||
}
|
||||
}
|
||||
233
crates/ahm/src/heal/channel.rs
Normal file
@@ -0,0 +1,233 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::error::Result;
|
||||
use crate::heal::{
|
||||
manager::HealManager,
|
||||
task::{HealOptions, HealPriority, HealRequest, HealType},
|
||||
};
|
||||
|
||||
use rustfs_common::heal_channel::{
|
||||
HealChannelCommand, HealChannelPriority, HealChannelReceiver, HealChannelRequest, HealChannelResponse, HealScanMode,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::mpsc;
|
||||
use tracing::{error, info};
|
||||
|
||||
/// Heal channel processor
|
||||
pub struct HealChannelProcessor {
|
||||
/// Heal manager
|
||||
heal_manager: Arc<HealManager>,
|
||||
/// Response sender
|
||||
response_sender: mpsc::UnboundedSender<HealChannelResponse>,
|
||||
/// Response receiver
|
||||
response_receiver: mpsc::UnboundedReceiver<HealChannelResponse>,
|
||||
}
|
||||
|
||||
impl HealChannelProcessor {
|
||||
/// Create new HealChannelProcessor
|
||||
pub fn new(heal_manager: Arc<HealManager>) -> Self {
|
||||
let (response_tx, response_rx) = mpsc::unbounded_channel();
|
||||
Self {
|
||||
heal_manager,
|
||||
response_sender: response_tx,
|
||||
response_receiver: response_rx,
|
||||
}
|
||||
}
|
||||
|
||||
/// Start processing heal channel requests
|
||||
pub async fn start(&mut self, mut receiver: HealChannelReceiver) -> Result<()> {
|
||||
info!("Starting heal channel processor");
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
command = receiver.recv() => {
|
||||
match command {
|
||||
Some(command) => {
|
||||
if let Err(e) = self.process_command(command).await {
|
||||
error!("Failed to process heal command: {}", e);
|
||||
}
|
||||
}
|
||||
None => {
|
||||
info!("Heal channel receiver closed, stopping processor");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
response = self.response_receiver.recv() => {
|
||||
if let Some(response) = response {
|
||||
// Handle response if needed
|
||||
info!("Received heal response for request: {}", response.request_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!("Heal channel processor stopped");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process heal command
|
||||
async fn process_command(&self, command: HealChannelCommand) -> Result<()> {
|
||||
match command {
|
||||
HealChannelCommand::Start(request) => self.process_start_request(request).await,
|
||||
HealChannelCommand::Query { heal_path, client_token } => self.process_query_request(heal_path, client_token).await,
|
||||
HealChannelCommand::Cancel { heal_path } => self.process_cancel_request(heal_path).await,
|
||||
}
|
||||
}
|
||||
|
||||
/// Process start request
|
||||
async fn process_start_request(&self, request: HealChannelRequest) -> Result<()> {
|
||||
info!("Processing heal start request: {} for bucket: {}", request.id, request.bucket);
|
||||
|
||||
// Convert channel request to heal request
|
||||
let heal_request = self.convert_to_heal_request(request.clone())?;
|
||||
|
||||
// Submit to heal manager
|
||||
match self.heal_manager.submit_heal_request(heal_request).await {
|
||||
Ok(task_id) => {
|
||||
info!("Successfully submitted heal request: {} as task: {}", request.id, task_id);
|
||||
|
||||
// Send success response
|
||||
let response = HealChannelResponse {
|
||||
request_id: request.id,
|
||||
success: true,
|
||||
data: Some(format!("Task ID: {task_id}").into_bytes()),
|
||||
error: None,
|
||||
};
|
||||
|
||||
if let Err(e) = self.response_sender.send(response) {
|
||||
error!("Failed to send heal response: {}", e);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to submit heal request: {} - {}", request.id, e);
|
||||
|
||||
// Send error response
|
||||
let response = HealChannelResponse {
|
||||
request_id: request.id,
|
||||
success: false,
|
||||
data: None,
|
||||
error: Some(e.to_string()),
|
||||
};
|
||||
|
||||
if let Err(e) = self.response_sender.send(response) {
|
||||
error!("Failed to send heal error response: {}", e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process query request
|
||||
async fn process_query_request(&self, heal_path: String, client_token: String) -> Result<()> {
|
||||
info!("Processing heal query request for path: {}", heal_path);
|
||||
|
||||
// TODO: Implement query logic based on heal_path and client_token
|
||||
// For now, return a placeholder response
|
||||
let response = HealChannelResponse {
|
||||
request_id: client_token,
|
||||
success: true,
|
||||
data: Some(format!("Query result for path: {heal_path}").into_bytes()),
|
||||
error: None,
|
||||
};
|
||||
|
||||
if let Err(e) = self.response_sender.send(response) {
|
||||
error!("Failed to send query response: {}", e);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process cancel request
|
||||
async fn process_cancel_request(&self, heal_path: String) -> Result<()> {
|
||||
info!("Processing heal cancel request for path: {}", heal_path);
|
||||
|
||||
// TODO: Implement cancel logic based on heal_path
|
||||
// For now, return a placeholder response
|
||||
let response = HealChannelResponse {
|
||||
request_id: heal_path.clone(),
|
||||
success: true,
|
||||
data: Some(format!("Cancel request for path: {heal_path}").into_bytes()),
|
||||
error: None,
|
||||
};
|
||||
|
||||
if let Err(e) = self.response_sender.send(response) {
|
||||
error!("Failed to send cancel response: {}", e);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Convert channel request to heal request
|
||||
fn convert_to_heal_request(&self, request: HealChannelRequest) -> Result<HealRequest> {
|
||||
let heal_type = if let Some(disk_id) = &request.disk {
|
||||
HealType::ErasureSet {
|
||||
buckets: vec![],
|
||||
set_disk_id: disk_id.clone(),
|
||||
}
|
||||
} else if let Some(prefix) = &request.object_prefix {
|
||||
if !prefix.is_empty() {
|
||||
HealType::Object {
|
||||
bucket: request.bucket.clone(),
|
||||
object: prefix.clone(),
|
||||
version_id: None,
|
||||
}
|
||||
} else {
|
||||
HealType::Bucket {
|
||||
bucket: request.bucket.clone(),
|
||||
}
|
||||
}
|
||||
} else {
|
||||
HealType::Bucket {
|
||||
bucket: request.bucket.clone(),
|
||||
}
|
||||
};
|
||||
|
||||
let priority = match request.priority {
|
||||
HealChannelPriority::Low => HealPriority::Low,
|
||||
HealChannelPriority::Normal => HealPriority::Normal,
|
||||
HealChannelPriority::High => HealPriority::High,
|
||||
HealChannelPriority::Critical => HealPriority::Urgent,
|
||||
};
|
||||
|
||||
// Build HealOptions with all available fields
|
||||
let mut options = HealOptions {
|
||||
scan_mode: request.scan_mode.unwrap_or(HealScanMode::Normal),
|
||||
remove_corrupted: request.remove_corrupted.unwrap_or(false),
|
||||
recreate_missing: request.recreate_missing.unwrap_or(true),
|
||||
update_parity: request.update_parity.unwrap_or(true),
|
||||
recursive: request.recursive.unwrap_or(false),
|
||||
dry_run: request.dry_run.unwrap_or(false),
|
||||
timeout: request.timeout_seconds.map(std::time::Duration::from_secs),
|
||||
pool_index: request.pool_index,
|
||||
set_index: request.set_index,
|
||||
};
|
||||
|
||||
// Apply force_start overrides
|
||||
if request.force_start {
|
||||
options.remove_corrupted = true;
|
||||
options.recreate_missing = true;
|
||||
options.update_parity = true;
|
||||
}
|
||||
|
||||
Ok(HealRequest::new(heal_type, options, priority))
|
||||
}
|
||||
|
||||
/// Get response sender for external use
|
||||
pub fn get_response_sender(&self) -> mpsc::UnboundedSender<HealChannelResponse> {
|
||||
self.response_sender.clone()
|
||||
}
|
||||
}
|
||||
456
crates/ahm/src/heal/erasure_healer.rs
Normal file
@@ -0,0 +1,456 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
use crate::heal::{
|
||||
progress::HealProgress,
|
||||
resume::{CheckpointManager, ResumeManager, ResumeUtils},
|
||||
storage::HealStorageAPI,
|
||||
};
|
||||
use futures::future::join_all;
|
||||
use rustfs_common::heal_channel::{HealOpts, HealScanMode};
|
||||
use rustfs_ecstore::disk::DiskStore;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{error, info, warn};
|
||||
|
||||
/// Erasure Set Healer
|
||||
pub struct ErasureSetHealer {
|
||||
storage: Arc<dyn HealStorageAPI>,
|
||||
progress: Arc<RwLock<HealProgress>>,
|
||||
cancel_token: tokio_util::sync::CancellationToken,
|
||||
disk: DiskStore,
|
||||
}
|
||||
|
||||
impl ErasureSetHealer {
|
||||
pub fn new(
|
||||
storage: Arc<dyn HealStorageAPI>,
|
||||
progress: Arc<RwLock<HealProgress>>,
|
||||
cancel_token: tokio_util::sync::CancellationToken,
|
||||
disk: DiskStore,
|
||||
) -> Self {
|
||||
Self {
|
||||
storage,
|
||||
progress,
|
||||
cancel_token,
|
||||
disk,
|
||||
}
|
||||
}
|
||||
|
||||
/// execute erasure set heal with resume
|
||||
pub async fn heal_erasure_set(&self, buckets: &[String], set_disk_id: &str) -> Result<()> {
|
||||
info!("Starting erasure set heal for {} buckets on set disk {}", buckets.len(), set_disk_id);
|
||||
|
||||
// 1. generate or get task id
|
||||
let task_id = self.get_or_create_task_id(set_disk_id).await?;
|
||||
|
||||
// 2. initialize or resume resume state
|
||||
let (resume_manager, checkpoint_manager) = self.initialize_resume_state(&task_id, buckets).await?;
|
||||
|
||||
// 3. execute heal with resume
|
||||
let result = self
|
||||
.execute_heal_with_resume(buckets, &resume_manager, &checkpoint_manager)
|
||||
.await;
|
||||
|
||||
// 4. cleanup resume state
|
||||
if result.is_ok() {
|
||||
if let Err(e) = resume_manager.cleanup().await {
|
||||
warn!("Failed to cleanup resume state: {}", e);
|
||||
}
|
||||
if let Err(e) = checkpoint_manager.cleanup().await {
|
||||
warn!("Failed to cleanup checkpoint: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
/// get or create task id
|
||||
async fn get_or_create_task_id(&self, _set_disk_id: &str) -> Result<String> {
|
||||
// check if there are resumable tasks
|
||||
let resumable_tasks = ResumeUtils::get_resumable_tasks(&self.disk).await?;
|
||||
|
||||
for task_id in resumable_tasks {
|
||||
if ResumeUtils::can_resume_task(&self.disk, &task_id).await {
|
||||
info!("Found resumable task: {}", task_id);
|
||||
return Ok(task_id);
|
||||
}
|
||||
}
|
||||
|
||||
// create new task id
|
||||
let task_id = ResumeUtils::generate_task_id();
|
||||
info!("Created new heal task: {}", task_id);
|
||||
Ok(task_id)
|
||||
}
|
||||
|
||||
/// initialize or resume resume state
|
||||
async fn initialize_resume_state(&self, task_id: &str, buckets: &[String]) -> Result<(ResumeManager, CheckpointManager)> {
|
||||
// check if resume state exists
|
||||
if ResumeManager::has_resume_state(&self.disk, task_id).await {
|
||||
info!("Loading existing resume state for task: {}", task_id);
|
||||
|
||||
let resume_manager = ResumeManager::load_from_disk(self.disk.clone(), task_id).await?;
|
||||
let checkpoint_manager = if CheckpointManager::has_checkpoint(&self.disk, task_id).await {
|
||||
CheckpointManager::load_from_disk(self.disk.clone(), task_id).await?
|
||||
} else {
|
||||
CheckpointManager::new(self.disk.clone(), task_id.to_string()).await?
|
||||
};
|
||||
|
||||
Ok((resume_manager, checkpoint_manager))
|
||||
} else {
|
||||
info!("Creating new resume state for task: {}", task_id);
|
||||
|
||||
let resume_manager =
|
||||
ResumeManager::new(self.disk.clone(), task_id.to_string(), "erasure_set".to_string(), buckets.to_vec()).await?;
|
||||
|
||||
let checkpoint_manager = CheckpointManager::new(self.disk.clone(), task_id.to_string()).await?;
|
||||
|
||||
Ok((resume_manager, checkpoint_manager))
|
||||
}
|
||||
}
|
||||
|
||||
/// execute heal with resume
|
||||
async fn execute_heal_with_resume(
|
||||
&self,
|
||||
buckets: &[String],
|
||||
resume_manager: &ResumeManager,
|
||||
checkpoint_manager: &CheckpointManager,
|
||||
) -> Result<()> {
|
||||
// 1. get current state
|
||||
let state = resume_manager.get_state().await;
|
||||
let checkpoint = checkpoint_manager.get_checkpoint().await;
|
||||
|
||||
info!(
|
||||
"Resuming from bucket {} object {}",
|
||||
checkpoint.current_bucket_index, checkpoint.current_object_index
|
||||
);
|
||||
|
||||
// 2. initialize progress
|
||||
self.initialize_progress(buckets, &state).await;
|
||||
|
||||
// 3. continue from checkpoint
|
||||
let current_bucket_index = checkpoint.current_bucket_index;
|
||||
let mut current_object_index = checkpoint.current_object_index;
|
||||
|
||||
let mut processed_objects = state.processed_objects;
|
||||
let mut successful_objects = state.successful_objects;
|
||||
let mut failed_objects = state.failed_objects;
|
||||
let mut skipped_objects = state.skipped_objects;
|
||||
|
||||
// 4. process remaining buckets
|
||||
for (bucket_idx, bucket) in buckets.iter().enumerate().skip(current_bucket_index) {
|
||||
// check if completed
|
||||
if state.completed_buckets.contains(bucket) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// update current bucket
|
||||
resume_manager.set_current_item(Some(bucket.clone()), None).await?;
|
||||
|
||||
// process objects in bucket
|
||||
let bucket_result = self
|
||||
.heal_bucket_with_resume(
|
||||
bucket,
|
||||
&mut current_object_index,
|
||||
&mut processed_objects,
|
||||
&mut successful_objects,
|
||||
&mut failed_objects,
|
||||
&mut skipped_objects,
|
||||
resume_manager,
|
||||
checkpoint_manager,
|
||||
)
|
||||
.await;
|
||||
|
||||
// update checkpoint position
|
||||
checkpoint_manager.update_position(bucket_idx, current_object_index).await?;
|
||||
|
||||
// update progress
|
||||
resume_manager
|
||||
.update_progress(processed_objects, successful_objects, failed_objects, skipped_objects)
|
||||
.await?;
|
||||
|
||||
// check cancel status
|
||||
if self.cancel_token.is_cancelled() {
|
||||
info!("Heal task cancelled");
|
||||
return Err(Error::TaskCancelled);
|
||||
}
|
||||
|
||||
// process bucket result
|
||||
match bucket_result {
|
||||
Ok(_) => {
|
||||
resume_manager.complete_bucket(bucket).await?;
|
||||
info!("Completed heal for bucket: {}", bucket);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to heal bucket {}: {}", bucket, e);
|
||||
// continue to next bucket, do not interrupt the whole process
|
||||
}
|
||||
}
|
||||
|
||||
// reset object index
|
||||
current_object_index = 0;
|
||||
}
|
||||
|
||||
// 5. mark task completed
|
||||
resume_manager.mark_completed().await?;
|
||||
|
||||
info!("Erasure set heal completed successfully");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// heal single bucket with resume
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn heal_bucket_with_resume(
|
||||
&self,
|
||||
bucket: &str,
|
||||
current_object_index: &mut usize,
|
||||
processed_objects: &mut u64,
|
||||
successful_objects: &mut u64,
|
||||
failed_objects: &mut u64,
|
||||
_skipped_objects: &mut u64,
|
||||
resume_manager: &ResumeManager,
|
||||
checkpoint_manager: &CheckpointManager,
|
||||
) -> Result<()> {
|
||||
info!("Starting heal for bucket: {} from object index {}", bucket, current_object_index);
|
||||
|
||||
// 1. get bucket info
|
||||
let _bucket_info = match self.storage.get_bucket_info(bucket).await? {
|
||||
Some(info) => info,
|
||||
None => {
|
||||
warn!("Bucket {} not found, skipping", bucket);
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
// 2. get objects to heal
|
||||
let objects = self.storage.list_objects_for_heal(bucket, "").await?;
|
||||
|
||||
// 3. continue from checkpoint
|
||||
for (obj_idx, object) in objects.iter().enumerate().skip(*current_object_index) {
|
||||
// check if already processed
|
||||
if checkpoint_manager.get_checkpoint().await.processed_objects.contains(object) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// update current object
|
||||
resume_manager
|
||||
.set_current_item(Some(bucket.to_string()), Some(object.clone()))
|
||||
.await?;
|
||||
|
||||
// heal object
|
||||
let heal_opts = HealOpts {
|
||||
scan_mode: HealScanMode::Normal,
|
||||
remove: true,
|
||||
recreate: true,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
match self.storage.heal_object(bucket, object, None, &heal_opts).await {
|
||||
Ok((_result, None)) => {
|
||||
*successful_objects += 1;
|
||||
checkpoint_manager.add_processed_object(object.clone()).await?;
|
||||
info!("Successfully healed object {}/{}", bucket, object);
|
||||
}
|
||||
Ok((_, Some(err))) => {
|
||||
*failed_objects += 1;
|
||||
checkpoint_manager.add_failed_object(object.clone()).await?;
|
||||
warn!("Failed to heal object {}/{}: {}", bucket, object, err);
|
||||
}
|
||||
Err(err) => {
|
||||
*failed_objects += 1;
|
||||
checkpoint_manager.add_failed_object(object.clone()).await?;
|
||||
warn!("Error healing object {}/{}: {}", bucket, object, err);
|
||||
}
|
||||
}
|
||||
|
||||
*processed_objects += 1;
|
||||
*current_object_index = obj_idx + 1;
|
||||
|
||||
// check cancel status
|
||||
if self.cancel_token.is_cancelled() {
|
||||
info!("Heal task cancelled during object processing");
|
||||
return Err(Error::TaskCancelled);
|
||||
}
|
||||
|
||||
// save checkpoint periodically
|
||||
if obj_idx % 100 == 0 {
|
||||
checkpoint_manager.update_position(0, *current_object_index).await?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// initialize progress tracking
|
||||
async fn initialize_progress(&self, _buckets: &[String], state: &crate::heal::resume::ResumeState) {
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.objects_scanned = state.total_objects;
|
||||
progress.objects_healed = state.successful_objects;
|
||||
progress.objects_failed = state.failed_objects;
|
||||
progress.bytes_processed = 0; // set to 0 for now, can be extended later
|
||||
progress.set_current_object(state.current_object.clone());
|
||||
}
|
||||
|
||||
/// heal all buckets concurrently
|
||||
#[allow(dead_code)]
|
||||
async fn heal_buckets_concurrently(&self, buckets: &[String]) -> Vec<Result<()>> {
|
||||
// use semaphore to control concurrency, avoid too many concurrent healings
|
||||
let semaphore = Arc::new(tokio::sync::Semaphore::new(4)); // max 4 concurrent healings
|
||||
|
||||
let heal_futures = buckets.iter().map(|bucket| {
|
||||
let bucket = bucket.clone();
|
||||
let storage = self.storage.clone();
|
||||
let progress = self.progress.clone();
|
||||
let semaphore = semaphore.clone();
|
||||
let cancel_token = self.cancel_token.clone();
|
||||
|
||||
async move {
|
||||
let _permit = semaphore.acquire().await.unwrap();
|
||||
|
||||
if cancel_token.is_cancelled() {
|
||||
return Err(Error::TaskCancelled);
|
||||
}
|
||||
|
||||
Self::heal_single_bucket(&storage, &bucket, &progress).await
|
||||
}
|
||||
});
|
||||
|
||||
// use join_all to process concurrently
|
||||
join_all(heal_futures).await
|
||||
}
|
||||
|
||||
/// heal single bucket
|
||||
#[allow(dead_code)]
|
||||
async fn heal_single_bucket(
|
||||
storage: &Arc<dyn HealStorageAPI>,
|
||||
bucket: &str,
|
||||
progress: &Arc<RwLock<HealProgress>>,
|
||||
) -> Result<()> {
|
||||
info!("Starting heal for bucket: {}", bucket);
|
||||
|
||||
// 1. get bucket info
|
||||
let _bucket_info = match storage.get_bucket_info(bucket).await? {
|
||||
Some(info) => info,
|
||||
None => {
|
||||
warn!("Bucket {} not found, skipping", bucket);
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
// 2. get objects to heal
|
||||
let objects = storage.list_objects_for_heal(bucket, "").await?;
|
||||
|
||||
// 3. update progress
|
||||
{
|
||||
let mut p = progress.write().await;
|
||||
p.objects_scanned += objects.len() as u64;
|
||||
}
|
||||
|
||||
// 4. heal objects concurrently
|
||||
let heal_opts = HealOpts {
|
||||
scan_mode: HealScanMode::Normal,
|
||||
remove: true, // remove corrupted data
|
||||
recreate: true, // recreate missing data
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let object_results = Self::heal_objects_concurrently(storage, bucket, &objects, &heal_opts, progress).await;
|
||||
|
||||
// 5. count results
|
||||
let (success_count, failure_count) = object_results
|
||||
.into_iter()
|
||||
.fold((0, 0), |(success, failure), result| match result {
|
||||
Ok(_) => (success + 1, failure),
|
||||
Err(_) => (success, failure + 1),
|
||||
});
|
||||
|
||||
// 6. update progress
|
||||
{
|
||||
let mut p = progress.write().await;
|
||||
p.objects_healed += success_count;
|
||||
p.objects_failed += failure_count;
|
||||
p.set_current_object(Some(format!("completed bucket: {bucket}")));
|
||||
}
|
||||
|
||||
info!(
|
||||
"Completed heal for bucket {}: {} success, {} failures",
|
||||
bucket, success_count, failure_count
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// heal objects concurrently
|
||||
#[allow(dead_code)]
|
||||
async fn heal_objects_concurrently(
|
||||
storage: &Arc<dyn HealStorageAPI>,
|
||||
bucket: &str,
|
||||
objects: &[String],
|
||||
heal_opts: &HealOpts,
|
||||
_progress: &Arc<RwLock<HealProgress>>,
|
||||
) -> Vec<Result<()>> {
|
||||
// use semaphore to control object healing concurrency
|
||||
let semaphore = Arc::new(tokio::sync::Semaphore::new(8)); // max 8 concurrent object healings
|
||||
|
||||
let heal_futures = objects.iter().map(|object| {
|
||||
let object = object.clone();
|
||||
let bucket = bucket.to_string();
|
||||
let storage = storage.clone();
|
||||
let heal_opts = *heal_opts;
|
||||
let semaphore = semaphore.clone();
|
||||
|
||||
async move {
|
||||
let _permit = semaphore.acquire().await.unwrap();
|
||||
|
||||
match storage.heal_object(&bucket, &object, None, &heal_opts).await {
|
||||
Ok((_result, None)) => {
|
||||
info!("Successfully healed object {}/{}", bucket, object);
|
||||
Ok(())
|
||||
}
|
||||
Ok((_, Some(err))) => {
|
||||
warn!("Failed to heal object {}/{}: {}", bucket, object, err);
|
||||
Err(Error::other(err))
|
||||
}
|
||||
Err(err) => {
|
||||
warn!("Error healing object {}/{}: {}", bucket, object, err);
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
join_all(heal_futures).await
|
||||
}
|
||||
|
||||
/// process results
|
||||
#[allow(dead_code)]
|
||||
async fn process_results(&self, results: Vec<Result<()>>) -> Result<()> {
|
||||
let (success_count, failure_count): (usize, usize) =
|
||||
results.into_iter().fold((0, 0), |(success, failure), result| match result {
|
||||
Ok(_) => (success + 1, failure),
|
||||
Err(_) => (success, failure + 1),
|
||||
});
|
||||
|
||||
let total = success_count + failure_count;
|
||||
|
||||
info!("Erasure set heal completed: {}/{} buckets successful", success_count, total);
|
||||
|
||||
if failure_count > 0 {
|
||||
warn!("{} buckets failed to heal", failure_count);
|
||||
return Err(Error::other(format!("{failure_count} buckets failed to heal")));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
359
crates/ahm/src/heal/event.rs
Normal file
@@ -0,0 +1,359 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::heal::task::{HealOptions, HealPriority, HealRequest, HealType};
|
||||
use rustfs_ecstore::disk::endpoint::Endpoint;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::time::SystemTime;
|
||||
|
||||
/// Corruption type
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum CorruptionType {
|
||||
/// Data corruption
|
||||
DataCorruption,
|
||||
/// Metadata corruption
|
||||
MetadataCorruption,
|
||||
/// Partial corruption
|
||||
PartialCorruption,
|
||||
/// Complete corruption
|
||||
CompleteCorruption,
|
||||
}
|
||||
|
||||
/// Severity level
|
||||
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
|
||||
pub enum Severity {
|
||||
/// Low severity
|
||||
Low = 0,
|
||||
/// Medium severity
|
||||
Medium = 1,
|
||||
/// High severity
|
||||
High = 2,
|
||||
/// Critical severity
|
||||
Critical = 3,
|
||||
}
|
||||
|
||||
/// Heal event
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum HealEvent {
|
||||
/// Object corruption event
|
||||
ObjectCorruption {
|
||||
bucket: String,
|
||||
object: String,
|
||||
version_id: Option<String>,
|
||||
corruption_type: CorruptionType,
|
||||
severity: Severity,
|
||||
},
|
||||
/// Object missing event
|
||||
ObjectMissing {
|
||||
bucket: String,
|
||||
object: String,
|
||||
version_id: Option<String>,
|
||||
expected_locations: Vec<usize>,
|
||||
available_locations: Vec<usize>,
|
||||
},
|
||||
/// Metadata corruption event
|
||||
MetadataCorruption {
|
||||
bucket: String,
|
||||
object: String,
|
||||
corruption_type: CorruptionType,
|
||||
},
|
||||
/// Disk status change event
|
||||
DiskStatusChange {
|
||||
endpoint: Endpoint,
|
||||
old_status: String,
|
||||
new_status: String,
|
||||
},
|
||||
/// EC decode failure event
|
||||
ECDecodeFailure {
|
||||
bucket: String,
|
||||
object: String,
|
||||
version_id: Option<String>,
|
||||
missing_shards: Vec<usize>,
|
||||
available_shards: Vec<usize>,
|
||||
},
|
||||
/// Checksum mismatch event
|
||||
ChecksumMismatch {
|
||||
bucket: String,
|
||||
object: String,
|
||||
version_id: Option<String>,
|
||||
expected_checksum: String,
|
||||
actual_checksum: String,
|
||||
},
|
||||
/// Bucket metadata corruption event
|
||||
BucketMetadataCorruption {
|
||||
bucket: String,
|
||||
corruption_type: CorruptionType,
|
||||
},
|
||||
/// MRF metadata corruption event
|
||||
MRFMetadataCorruption {
|
||||
meta_path: String,
|
||||
corruption_type: CorruptionType,
|
||||
},
|
||||
}
|
||||
|
||||
impl HealEvent {
|
||||
/// Convert HealEvent to HealRequest
|
||||
pub fn to_heal_request(&self) -> HealRequest {
|
||||
match self {
|
||||
HealEvent::ObjectCorruption {
|
||||
bucket,
|
||||
object,
|
||||
version_id,
|
||||
severity,
|
||||
..
|
||||
} => HealRequest::new(
|
||||
HealType::Object {
|
||||
bucket: bucket.clone(),
|
||||
object: object.clone(),
|
||||
version_id: version_id.clone(),
|
||||
},
|
||||
HealOptions::default(),
|
||||
Self::severity_to_priority(severity),
|
||||
),
|
||||
HealEvent::ObjectMissing {
|
||||
bucket,
|
||||
object,
|
||||
version_id,
|
||||
..
|
||||
} => HealRequest::new(
|
||||
HealType::Object {
|
||||
bucket: bucket.clone(),
|
||||
object: object.clone(),
|
||||
version_id: version_id.clone(),
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::High,
|
||||
),
|
||||
HealEvent::MetadataCorruption { bucket, object, .. } => HealRequest::new(
|
||||
HealType::Metadata {
|
||||
bucket: bucket.clone(),
|
||||
object: object.clone(),
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::High,
|
||||
),
|
||||
HealEvent::DiskStatusChange { endpoint, .. } => {
|
||||
// Convert disk status change to erasure set heal
|
||||
// Note: This requires access to storage to get bucket list, which is not available here
|
||||
// The actual bucket list will need to be provided by the caller or retrieved differently
|
||||
HealRequest::new(
|
||||
HealType::ErasureSet {
|
||||
buckets: vec![], // Empty bucket list - caller should populate this
|
||||
set_disk_id: format!("{}_{}", endpoint.pool_idx, endpoint.set_idx),
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::High,
|
||||
)
|
||||
}
|
||||
HealEvent::ECDecodeFailure {
|
||||
bucket,
|
||||
object,
|
||||
version_id,
|
||||
..
|
||||
} => HealRequest::new(
|
||||
HealType::ECDecode {
|
||||
bucket: bucket.clone(),
|
||||
object: object.clone(),
|
||||
version_id: version_id.clone(),
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::Urgent,
|
||||
),
|
||||
HealEvent::ChecksumMismatch {
|
||||
bucket,
|
||||
object,
|
||||
version_id,
|
||||
..
|
||||
} => HealRequest::new(
|
||||
HealType::Object {
|
||||
bucket: bucket.clone(),
|
||||
object: object.clone(),
|
||||
version_id: version_id.clone(),
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::High,
|
||||
),
|
||||
HealEvent::BucketMetadataCorruption { bucket, .. } => {
|
||||
HealRequest::new(HealType::Bucket { bucket: bucket.clone() }, HealOptions::default(), HealPriority::High)
|
||||
}
|
||||
HealEvent::MRFMetadataCorruption { meta_path, .. } => HealRequest::new(
|
||||
HealType::MRF {
|
||||
meta_path: meta_path.clone(),
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::High,
|
||||
),
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert severity to priority
|
||||
fn severity_to_priority(severity: &Severity) -> HealPriority {
|
||||
match severity {
|
||||
Severity::Low => HealPriority::Low,
|
||||
Severity::Medium => HealPriority::Normal,
|
||||
Severity::High => HealPriority::High,
|
||||
Severity::Critical => HealPriority::Urgent,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get event description
|
||||
pub fn description(&self) -> String {
|
||||
match self {
|
||||
HealEvent::ObjectCorruption {
|
||||
bucket,
|
||||
object,
|
||||
corruption_type,
|
||||
..
|
||||
} => {
|
||||
format!("Object corruption detected: {bucket}/{object} - {corruption_type:?}")
|
||||
}
|
||||
HealEvent::ObjectMissing { bucket, object, .. } => {
|
||||
format!("Object missing: {bucket}/{object}")
|
||||
}
|
||||
HealEvent::MetadataCorruption {
|
||||
bucket,
|
||||
object,
|
||||
corruption_type,
|
||||
..
|
||||
} => {
|
||||
format!("Metadata corruption: {bucket}/{object} - {corruption_type:?}")
|
||||
}
|
||||
HealEvent::DiskStatusChange {
|
||||
endpoint,
|
||||
old_status,
|
||||
new_status,
|
||||
..
|
||||
} => {
|
||||
format!("Disk status changed: {endpoint:?} {old_status} -> {new_status}")
|
||||
}
|
||||
HealEvent::ECDecodeFailure {
|
||||
bucket,
|
||||
object,
|
||||
missing_shards,
|
||||
..
|
||||
} => {
|
||||
format!("EC decode failure: {bucket}/{object} - missing shards: {missing_shards:?}")
|
||||
}
|
||||
HealEvent::ChecksumMismatch {
|
||||
bucket,
|
||||
object,
|
||||
expected_checksum,
|
||||
actual_checksum,
|
||||
..
|
||||
} => {
|
||||
format!("Checksum mismatch: {bucket}/{object} - expected: {expected_checksum}, actual: {actual_checksum}")
|
||||
}
|
||||
HealEvent::BucketMetadataCorruption {
|
||||
bucket, corruption_type, ..
|
||||
} => {
|
||||
format!("Bucket metadata corruption: {bucket} - {corruption_type:?}")
|
||||
}
|
||||
HealEvent::MRFMetadataCorruption {
|
||||
meta_path,
|
||||
corruption_type,
|
||||
..
|
||||
} => {
|
||||
format!("MRF metadata corruption: {meta_path} - {corruption_type:?}")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Get event severity
|
||||
pub fn severity(&self) -> Severity {
|
||||
match self {
|
||||
HealEvent::ObjectCorruption { severity, .. } => severity.clone(),
|
||||
HealEvent::ObjectMissing { .. } => Severity::High,
|
||||
HealEvent::MetadataCorruption { .. } => Severity::High,
|
||||
HealEvent::DiskStatusChange { .. } => Severity::High,
|
||||
HealEvent::ECDecodeFailure { .. } => Severity::Critical,
|
||||
HealEvent::ChecksumMismatch { .. } => Severity::High,
|
||||
HealEvent::BucketMetadataCorruption { .. } => Severity::High,
|
||||
HealEvent::MRFMetadataCorruption { .. } => Severity::High,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get event timestamp
|
||||
pub fn timestamp(&self) -> SystemTime {
|
||||
SystemTime::now()
|
||||
}
|
||||
}
|
||||
|
||||
/// Heal event handler
|
||||
pub struct HealEventHandler {
|
||||
/// Event queue
|
||||
events: Vec<HealEvent>,
|
||||
/// Maximum number of events
|
||||
max_events: usize,
|
||||
}
|
||||
|
||||
impl HealEventHandler {
|
||||
pub fn new(max_events: usize) -> Self {
|
||||
Self {
|
||||
events: Vec::new(),
|
||||
max_events,
|
||||
}
|
||||
}
|
||||
|
||||
/// Add event
|
||||
pub fn add_event(&mut self, event: HealEvent) {
|
||||
if self.events.len() >= self.max_events {
|
||||
// Remove oldest event
|
||||
self.events.remove(0);
|
||||
}
|
||||
self.events.push(event);
|
||||
}
|
||||
|
||||
/// Get all events
|
||||
pub fn get_events(&self) -> &[HealEvent] {
|
||||
&self.events
|
||||
}
|
||||
|
||||
/// Clear events
|
||||
pub fn clear_events(&mut self) {
|
||||
self.events.clear();
|
||||
}
|
||||
|
||||
/// Get event count
|
||||
pub fn event_count(&self) -> usize {
|
||||
self.events.len()
|
||||
}
|
||||
|
||||
/// Filter events by severity
|
||||
pub fn filter_by_severity(&self, min_severity: Severity) -> Vec<&HealEvent> {
|
||||
self.events.iter().filter(|event| event.severity() >= min_severity).collect()
|
||||
}
|
||||
|
||||
/// Filter events by type
|
||||
pub fn filter_by_type(&self, event_type: &str) -> Vec<&HealEvent> {
|
||||
self.events
|
||||
.iter()
|
||||
.filter(|event| match event {
|
||||
HealEvent::ObjectCorruption { .. } => event_type == "ObjectCorruption",
|
||||
HealEvent::ObjectMissing { .. } => event_type == "ObjectMissing",
|
||||
HealEvent::MetadataCorruption { .. } => event_type == "MetadataCorruption",
|
||||
HealEvent::DiskStatusChange { .. } => event_type == "DiskStatusChange",
|
||||
HealEvent::ECDecodeFailure { .. } => event_type == "ECDecodeFailure",
|
||||
HealEvent::ChecksumMismatch { .. } => event_type == "ChecksumMismatch",
|
||||
HealEvent::BucketMetadataCorruption { .. } => event_type == "BucketMetadataCorruption",
|
||||
HealEvent::MRFMetadataCorruption { .. } => event_type == "MRFMetadataCorruption",
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for HealEventHandler {
|
||||
fn default() -> Self {
|
||||
Self::new(1000)
|
||||
}
|
||||
}
|
||||
422
crates/ahm/src/heal/manager.rs
Normal file
@@ -0,0 +1,422 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
use crate::heal::{
|
||||
progress::{HealProgress, HealStatistics},
|
||||
storage::HealStorageAPI,
|
||||
task::{HealOptions, HealPriority, HealRequest, HealTask, HealTaskStatus, HealType},
|
||||
};
|
||||
use rustfs_ecstore::disk::DiskAPI;
|
||||
use rustfs_ecstore::disk::error::DiskError;
|
||||
use rustfs_ecstore::global::GLOBAL_LOCAL_DISK_MAP;
|
||||
use std::{
|
||||
collections::{HashMap, VecDeque},
|
||||
sync::Arc,
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
use tokio::{
|
||||
sync::{Mutex, RwLock},
|
||||
time::interval,
|
||||
};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::{error, info, warn};
|
||||
|
||||
/// Heal config
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct HealConfig {
|
||||
/// Whether to enable auto heal
|
||||
pub enable_auto_heal: bool,
|
||||
/// Heal interval
|
||||
pub heal_interval: Duration,
|
||||
/// Maximum concurrent heal tasks
|
||||
pub max_concurrent_heals: usize,
|
||||
/// Task timeout
|
||||
pub task_timeout: Duration,
|
||||
/// Queue size
|
||||
pub queue_size: usize,
|
||||
}
|
||||
|
||||
impl Default for HealConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
enable_auto_heal: true,
|
||||
heal_interval: Duration::from_secs(10), // 10 seconds
|
||||
max_concurrent_heals: 4,
|
||||
task_timeout: Duration::from_secs(300), // 5 minutes
|
||||
queue_size: 1000,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Heal state
|
||||
#[derive(Debug, Default)]
|
||||
pub struct HealState {
|
||||
/// Whether running
|
||||
pub is_running: bool,
|
||||
/// Current heal cycle
|
||||
pub current_cycle: u64,
|
||||
/// Last heal time
|
||||
pub last_heal_time: Option<SystemTime>,
|
||||
/// Total healed objects
|
||||
pub total_healed_objects: u64,
|
||||
/// Total heal failures
|
||||
pub total_heal_failures: u64,
|
||||
/// Current active heal tasks
|
||||
pub active_heal_count: usize,
|
||||
}
|
||||
|
||||
/// Heal manager
|
||||
pub struct HealManager {
|
||||
/// Heal config
|
||||
config: Arc<RwLock<HealConfig>>,
|
||||
/// Heal state
|
||||
state: Arc<RwLock<HealState>>,
|
||||
/// Active heal tasks
|
||||
active_heals: Arc<Mutex<HashMap<String, Arc<HealTask>>>>,
|
||||
/// Heal queue
|
||||
heal_queue: Arc<Mutex<VecDeque<HealRequest>>>,
|
||||
/// Storage layer interface
|
||||
storage: Arc<dyn HealStorageAPI>,
|
||||
/// Cancel token
|
||||
cancel_token: CancellationToken,
|
||||
/// Statistics
|
||||
statistics: Arc<RwLock<HealStatistics>>,
|
||||
}
|
||||
|
||||
impl HealManager {
|
||||
/// Create new HealManager
|
||||
pub fn new(storage: Arc<dyn HealStorageAPI>, config: Option<HealConfig>) -> Self {
|
||||
let config = config.unwrap_or_default();
|
||||
Self {
|
||||
config: Arc::new(RwLock::new(config)),
|
||||
state: Arc::new(RwLock::new(HealState::default())),
|
||||
active_heals: Arc::new(Mutex::new(HashMap::new())),
|
||||
heal_queue: Arc::new(Mutex::new(VecDeque::new())),
|
||||
storage,
|
||||
cancel_token: CancellationToken::new(),
|
||||
statistics: Arc::new(RwLock::new(HealStatistics::new())),
|
||||
}
|
||||
}
|
||||
|
||||
/// Start HealManager
|
||||
pub async fn start(&self) -> Result<()> {
|
||||
let mut state = self.state.write().await;
|
||||
if state.is_running {
|
||||
warn!("HealManager is already running");
|
||||
return Ok(());
|
||||
}
|
||||
state.is_running = true;
|
||||
drop(state);
|
||||
|
||||
info!("Starting HealManager");
|
||||
|
||||
// start scheduler
|
||||
self.start_scheduler().await?;
|
||||
|
||||
// start auto disk scanner
|
||||
self.start_auto_disk_scanner().await?;
|
||||
|
||||
info!("HealManager started successfully");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Stop HealManager
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
info!("Stopping HealManager");
|
||||
|
||||
// cancel all tasks
|
||||
self.cancel_token.cancel();
|
||||
|
||||
// wait for all tasks to complete
|
||||
let mut active_heals = self.active_heals.lock().await;
|
||||
for task in active_heals.values() {
|
||||
if let Err(e) = task.cancel().await {
|
||||
warn!("Failed to cancel task {}: {}", task.id, e);
|
||||
}
|
||||
}
|
||||
active_heals.clear();
|
||||
|
||||
// update state
|
||||
let mut state = self.state.write().await;
|
||||
state.is_running = false;
|
||||
|
||||
info!("HealManager stopped successfully");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Submit heal request
|
||||
pub async fn submit_heal_request(&self, request: HealRequest) -> Result<String> {
|
||||
let config = self.config.read().await;
|
||||
let mut queue = self.heal_queue.lock().await;
|
||||
|
||||
if queue.len() >= config.queue_size {
|
||||
return Err(Error::ConfigurationError {
|
||||
message: "Heal queue is full".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
let request_id = request.id.clone();
|
||||
queue.push_back(request);
|
||||
drop(queue);
|
||||
|
||||
info!("Submitted heal request: {}", request_id);
|
||||
Ok(request_id)
|
||||
}
|
||||
|
||||
/// Get task status
|
||||
pub async fn get_task_status(&self, task_id: &str) -> Result<HealTaskStatus> {
|
||||
let active_heals = self.active_heals.lock().await;
|
||||
if let Some(task) = active_heals.get(task_id) {
|
||||
Ok(task.get_status().await)
|
||||
} else {
|
||||
Err(Error::TaskNotFound {
|
||||
task_id: task_id.to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Get task progress
|
||||
pub async fn get_active_tasks_count(&self) -> usize {
|
||||
self.active_heals.lock().await.len()
|
||||
}
|
||||
|
||||
pub async fn get_task_progress(&self, task_id: &str) -> Result<HealProgress> {
|
||||
let active_heals = self.active_heals.lock().await;
|
||||
if let Some(task) = active_heals.get(task_id) {
|
||||
Ok(task.get_progress().await)
|
||||
} else {
|
||||
Err(Error::TaskNotFound {
|
||||
task_id: task_id.to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Cancel task
|
||||
pub async fn cancel_task(&self, task_id: &str) -> Result<()> {
|
||||
let mut active_heals = self.active_heals.lock().await;
|
||||
if let Some(task) = active_heals.get(task_id) {
|
||||
task.cancel().await?;
|
||||
active_heals.remove(task_id);
|
||||
info!("Cancelled heal task: {}", task_id);
|
||||
Ok(())
|
||||
} else {
|
||||
Err(Error::TaskNotFound {
|
||||
task_id: task_id.to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Get statistics
|
||||
pub async fn get_statistics(&self) -> HealStatistics {
|
||||
self.statistics.read().await.clone()
|
||||
}
|
||||
|
||||
/// Get active task count
|
||||
pub async fn get_active_task_count(&self) -> usize {
|
||||
let active_heals = self.active_heals.lock().await;
|
||||
active_heals.len()
|
||||
}
|
||||
|
||||
/// Get queue length
|
||||
pub async fn get_queue_length(&self) -> usize {
|
||||
let queue = self.heal_queue.lock().await;
|
||||
queue.len()
|
||||
}
|
||||
|
||||
/// Start scheduler
|
||||
async fn start_scheduler(&self) -> Result<()> {
|
||||
let config = self.config.clone();
|
||||
let heal_queue = self.heal_queue.clone();
|
||||
let active_heals = self.active_heals.clone();
|
||||
let cancel_token = self.cancel_token.clone();
|
||||
let statistics = self.statistics.clone();
|
||||
let storage = self.storage.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let mut interval = interval(config.read().await.heal_interval);
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = cancel_token.cancelled() => {
|
||||
info!("Heal scheduler received shutdown signal");
|
||||
break;
|
||||
}
|
||||
_ = interval.tick() => {
|
||||
Self::process_heal_queue(&heal_queue, &active_heals, &config, &statistics, &storage).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Start background task to auto scan local disks and enqueue erasure set heal requests
|
||||
async fn start_auto_disk_scanner(&self) -> Result<()> {
|
||||
let config = self.config.clone();
|
||||
let heal_queue = self.heal_queue.clone();
|
||||
let active_heals = self.active_heals.clone();
|
||||
let cancel_token = self.cancel_token.clone();
|
||||
let storage = self.storage.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let mut interval = interval(config.read().await.heal_interval);
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = cancel_token.cancelled() => {
|
||||
info!("Auto disk scanner received shutdown signal");
|
||||
break;
|
||||
}
|
||||
_ = interval.tick() => {
|
||||
// Build list of endpoints that need healing
|
||||
let mut endpoints = Vec::new();
|
||||
for (_, disk_opt) in GLOBAL_LOCAL_DISK_MAP.read().await.iter() {
|
||||
if let Some(disk) = disk_opt {
|
||||
// detect unformatted disk via get_disk_id()
|
||||
if let Err(err) = disk.get_disk_id().await {
|
||||
if err == DiskError::UnformattedDisk {
|
||||
endpoints.push(disk.endpoint());
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if endpoints.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Get bucket list for erasure set healing
|
||||
let buckets = match storage.list_buckets().await {
|
||||
Ok(buckets) => buckets.iter().map(|b| b.name.clone()).collect::<Vec<String>>(),
|
||||
Err(e) => {
|
||||
error!("Failed to get bucket list for auto healing: {}", e);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
// Create erasure set heal requests for each endpoint
|
||||
for ep in endpoints {
|
||||
// skip if already queued or healing
|
||||
let mut skip = false;
|
||||
{
|
||||
let queue = heal_queue.lock().await;
|
||||
if queue.iter().any(|req| matches!(&req.heal_type, crate::heal::task::HealType::ErasureSet { set_disk_id, .. } if set_disk_id == &format!("{}_{}", ep.pool_idx, ep.set_idx))) {
|
||||
skip = true;
|
||||
}
|
||||
}
|
||||
if !skip {
|
||||
let active = active_heals.lock().await;
|
||||
if active.values().any(|task| matches!(&task.heal_type, crate::heal::task::HealType::ErasureSet { set_disk_id, .. } if set_disk_id == &format!("{}_{}", ep.pool_idx, ep.set_idx))) {
|
||||
skip = true;
|
||||
}
|
||||
}
|
||||
|
||||
if skip {
|
||||
continue;
|
||||
}
|
||||
|
||||
// enqueue erasure set heal request for this disk
|
||||
let set_disk_id = format!("pool_{}_set_{}", ep.pool_idx, ep.set_idx);
|
||||
let req = HealRequest::new(
|
||||
HealType::ErasureSet {
|
||||
buckets: buckets.clone(),
|
||||
set_disk_id: set_disk_id.clone()
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::Normal,
|
||||
);
|
||||
let mut queue = heal_queue.lock().await;
|
||||
queue.push_back(req);
|
||||
info!("Enqueued auto erasure set heal for endpoint: {} (set_disk_id: {})", ep, set_disk_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process heal queue
|
||||
async fn process_heal_queue(
|
||||
heal_queue: &Arc<Mutex<VecDeque<HealRequest>>>,
|
||||
active_heals: &Arc<Mutex<HashMap<String, Arc<HealTask>>>>,
|
||||
config: &Arc<RwLock<HealConfig>>,
|
||||
statistics: &Arc<RwLock<HealStatistics>>,
|
||||
storage: &Arc<dyn HealStorageAPI>,
|
||||
) {
|
||||
let config = config.read().await;
|
||||
let mut active_heals_guard = active_heals.lock().await;
|
||||
|
||||
// check if new heal tasks can be started
|
||||
if active_heals_guard.len() >= config.max_concurrent_heals {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut queue = heal_queue.lock().await;
|
||||
if let Some(request) = queue.pop_front() {
|
||||
let task = Arc::new(HealTask::from_request(request, storage.clone()));
|
||||
let task_id = task.id.clone();
|
||||
active_heals_guard.insert(task_id.clone(), task.clone());
|
||||
drop(active_heals_guard);
|
||||
let active_heals_clone = active_heals.clone();
|
||||
let statistics_clone = statistics.clone();
|
||||
|
||||
// start heal task
|
||||
tokio::spawn(async move {
|
||||
info!("Starting heal task: {}", task_id);
|
||||
let result = task.execute().await;
|
||||
match result {
|
||||
Ok(_) => {
|
||||
info!("Heal task completed successfully: {}", task_id);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Heal task failed: {} - {}", task_id, e);
|
||||
}
|
||||
}
|
||||
let mut active_heals_guard = active_heals_clone.lock().await;
|
||||
if let Some(completed_task) = active_heals_guard.remove(&task_id) {
|
||||
// update statistics
|
||||
let mut stats = statistics_clone.write().await;
|
||||
match completed_task.get_status().await {
|
||||
HealTaskStatus::Completed => {
|
||||
stats.update_task_completion(true);
|
||||
}
|
||||
_ => {
|
||||
stats.update_task_completion(false);
|
||||
}
|
||||
}
|
||||
stats.update_running_tasks(active_heals_guard.len() as u64);
|
||||
}
|
||||
});
|
||||
|
||||
// update statistics
|
||||
let mut stats = statistics.write().await;
|
||||
stats.total_tasks += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for HealManager {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("HealManager")
|
||||
.field("config", &"<config>")
|
||||
.field("state", &"<state>")
|
||||
.field("active_heals_count", &"<active_heals>")
|
||||
.field("queue_length", &"<queue>")
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
@@ -12,24 +12,16 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
pub mod channel;
|
||||
pub mod erasure_healer;
|
||||
pub mod event;
|
||||
pub mod manager;
|
||||
pub mod progress;
|
||||
pub mod resume;
|
||||
pub mod storage;
|
||||
pub mod task;
|
||||
|
||||
/// Logger configuration
|
||||
#[derive(Debug, Deserialize, Serialize, Clone)]
|
||||
pub struct LoggerConfig {
|
||||
pub queue_capacity: Option<usize>,
|
||||
}
|
||||
|
||||
impl LoggerConfig {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
queue_capacity: Some(10000),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for LoggerConfig {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
pub use erasure_healer::ErasureSetHealer;
|
||||
pub use manager::HealManager;
|
||||
pub use resume::{CheckpointManager, ResumeCheckpoint, ResumeManager, ResumeState, ResumeUtils};
|
||||
pub use task::{HealOptions, HealPriority, HealRequest, HealTask, HealType};
|
||||
148
crates/ahm/src/heal/progress.rs
Normal file
@@ -0,0 +1,148 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::time::SystemTime;
|
||||
|
||||
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
|
||||
pub struct HealProgress {
|
||||
/// Objects scanned
|
||||
pub objects_scanned: u64,
|
||||
/// Objects healed
|
||||
pub objects_healed: u64,
|
||||
/// Objects failed
|
||||
pub objects_failed: u64,
|
||||
/// Bytes processed
|
||||
pub bytes_processed: u64,
|
||||
/// Current object
|
||||
pub current_object: Option<String>,
|
||||
/// Progress percentage
|
||||
pub progress_percentage: f64,
|
||||
/// Start time
|
||||
pub start_time: Option<SystemTime>,
|
||||
/// Last update time
|
||||
pub last_update_time: Option<SystemTime>,
|
||||
/// Estimated completion time
|
||||
pub estimated_completion_time: Option<SystemTime>,
|
||||
}
|
||||
|
||||
impl HealProgress {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
start_time: Some(SystemTime::now()),
|
||||
last_update_time: Some(SystemTime::now()),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update_progress(&mut self, scanned: u64, healed: u64, failed: u64, bytes: u64) {
|
||||
self.objects_scanned = scanned;
|
||||
self.objects_healed = healed;
|
||||
self.objects_failed = failed;
|
||||
self.bytes_processed = bytes;
|
||||
self.last_update_time = Some(SystemTime::now());
|
||||
|
||||
// calculate progress percentage
|
||||
let total = scanned + healed + failed;
|
||||
if total > 0 {
|
||||
self.progress_percentage = (healed as f64 / total as f64) * 100.0;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_current_object(&mut self, object: Option<String>) {
|
||||
self.current_object = object;
|
||||
self.last_update_time = Some(SystemTime::now());
|
||||
}
|
||||
|
||||
pub fn is_completed(&self) -> bool {
|
||||
self.progress_percentage >= 100.0
|
||||
|| self.objects_scanned > 0 && self.objects_healed + self.objects_failed >= self.objects_scanned
|
||||
}
|
||||
|
||||
pub fn get_success_rate(&self) -> f64 {
|
||||
let total = self.objects_healed + self.objects_failed;
|
||||
if total > 0 {
|
||||
(self.objects_healed as f64 / total as f64) * 100.0
|
||||
} else {
|
||||
0.0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct HealStatistics {
|
||||
/// Total heal tasks
|
||||
pub total_tasks: u64,
|
||||
/// Successful tasks
|
||||
pub successful_tasks: u64,
|
||||
/// Failed tasks
|
||||
pub failed_tasks: u64,
|
||||
/// Running tasks
|
||||
pub running_tasks: u64,
|
||||
/// Total healed objects
|
||||
pub total_objects_healed: u64,
|
||||
/// Total healed bytes
|
||||
pub total_bytes_healed: u64,
|
||||
/// Last update time
|
||||
pub last_update_time: SystemTime,
|
||||
}
|
||||
|
||||
impl Default for HealStatistics {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl HealStatistics {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
total_tasks: 0,
|
||||
successful_tasks: 0,
|
||||
failed_tasks: 0,
|
||||
running_tasks: 0,
|
||||
total_objects_healed: 0,
|
||||
total_bytes_healed: 0,
|
||||
last_update_time: SystemTime::now(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update_task_completion(&mut self, success: bool) {
|
||||
if success {
|
||||
self.successful_tasks += 1;
|
||||
} else {
|
||||
self.failed_tasks += 1;
|
||||
}
|
||||
self.last_update_time = SystemTime::now();
|
||||
}
|
||||
|
||||
pub fn update_running_tasks(&mut self, count: u64) {
|
||||
self.running_tasks = count;
|
||||
self.last_update_time = SystemTime::now();
|
||||
}
|
||||
|
||||
pub fn add_healed_objects(&mut self, count: u64, bytes: u64) {
|
||||
self.total_objects_healed += count;
|
||||
self.total_bytes_healed += bytes;
|
||||
self.last_update_time = SystemTime::now();
|
||||
}
|
||||
|
||||
pub fn get_success_rate(&self) -> f64 {
|
||||
let total = self.successful_tasks + self.failed_tasks;
|
||||
if total > 0 {
|
||||
(self.successful_tasks as f64 / total as f64) * 100.0
|
||||
} else {
|
||||
0.0
|
||||
}
|
||||
}
|
||||
}
|
||||
696
crates/ahm/src/heal/resume.rs
Normal file
@@ -0,0 +1,696 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
use rustfs_ecstore::disk::{BUCKET_META_PREFIX, DiskAPI, DiskStore, RUSTFS_META_BUCKET};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{debug, info, warn};
|
||||
use uuid::Uuid;
|
||||
|
||||
/// resume state file constants
|
||||
const RESUME_STATE_FILE: &str = "ahm_resume_state.json";
|
||||
const RESUME_PROGRESS_FILE: &str = "ahm_progress.json";
|
||||
const RESUME_CHECKPOINT_FILE: &str = "ahm_checkpoint.json";
|
||||
|
||||
/// resume state
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ResumeState {
|
||||
/// task id
|
||||
pub task_id: String,
|
||||
/// task type
|
||||
pub task_type: String,
|
||||
/// start time
|
||||
pub start_time: u64,
|
||||
/// last update time
|
||||
pub last_update: u64,
|
||||
/// completed
|
||||
pub completed: bool,
|
||||
/// total objects
|
||||
pub total_objects: u64,
|
||||
/// processed objects
|
||||
pub processed_objects: u64,
|
||||
/// successful objects
|
||||
pub successful_objects: u64,
|
||||
/// failed objects
|
||||
pub failed_objects: u64,
|
||||
/// skipped objects
|
||||
pub skipped_objects: u64,
|
||||
/// current bucket
|
||||
pub current_bucket: Option<String>,
|
||||
/// current object
|
||||
pub current_object: Option<String>,
|
||||
/// completed buckets
|
||||
pub completed_buckets: Vec<String>,
|
||||
/// pending buckets
|
||||
pub pending_buckets: Vec<String>,
|
||||
/// error message
|
||||
pub error_message: Option<String>,
|
||||
/// retry count
|
||||
pub retry_count: u32,
|
||||
/// max retries
|
||||
pub max_retries: u32,
|
||||
}
|
||||
|
||||
impl ResumeState {
|
||||
pub fn new(task_id: String, task_type: String, buckets: Vec<String>) -> Self {
|
||||
Self {
|
||||
task_id,
|
||||
task_type,
|
||||
start_time: SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs(),
|
||||
last_update: SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs(),
|
||||
completed: false,
|
||||
total_objects: 0,
|
||||
processed_objects: 0,
|
||||
successful_objects: 0,
|
||||
failed_objects: 0,
|
||||
skipped_objects: 0,
|
||||
current_bucket: None,
|
||||
current_object: None,
|
||||
completed_buckets: Vec::new(),
|
||||
pending_buckets: buckets,
|
||||
error_message: None,
|
||||
retry_count: 0,
|
||||
max_retries: 3,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update_progress(&mut self, processed: u64, successful: u64, failed: u64, skipped: u64) {
|
||||
self.processed_objects = processed;
|
||||
self.successful_objects = successful;
|
||||
self.failed_objects = failed;
|
||||
self.skipped_objects = skipped;
|
||||
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs();
|
||||
}
|
||||
|
||||
pub fn set_current_item(&mut self, bucket: Option<String>, object: Option<String>) {
|
||||
self.current_bucket = bucket;
|
||||
self.current_object = object;
|
||||
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs();
|
||||
}
|
||||
|
||||
pub fn complete_bucket(&mut self, bucket: &str) {
|
||||
if !self.completed_buckets.contains(&bucket.to_string()) {
|
||||
self.completed_buckets.push(bucket.to_string());
|
||||
}
|
||||
if let Some(pos) = self.pending_buckets.iter().position(|b| b == bucket) {
|
||||
self.pending_buckets.remove(pos);
|
||||
}
|
||||
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs();
|
||||
}
|
||||
|
||||
pub fn mark_completed(&mut self) {
|
||||
self.completed = true;
|
||||
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs();
|
||||
}
|
||||
|
||||
pub fn set_error(&mut self, error: String) {
|
||||
self.error_message = Some(error);
|
||||
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs();
|
||||
}
|
||||
|
||||
pub fn increment_retry(&mut self) {
|
||||
self.retry_count += 1;
|
||||
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs();
|
||||
}
|
||||
|
||||
pub fn can_retry(&self) -> bool {
|
||||
self.retry_count < self.max_retries
|
||||
}
|
||||
|
||||
pub fn get_progress_percentage(&self) -> f64 {
|
||||
if self.total_objects == 0 {
|
||||
return 0.0;
|
||||
}
|
||||
(self.processed_objects as f64 / self.total_objects as f64) * 100.0
|
||||
}
|
||||
|
||||
pub fn get_success_rate(&self) -> f64 {
|
||||
let total = self.successful_objects + self.failed_objects;
|
||||
if total == 0 {
|
||||
return 0.0;
|
||||
}
|
||||
(self.successful_objects as f64 / total as f64) * 100.0
|
||||
}
|
||||
}
|
||||
|
||||
/// resume manager
|
||||
pub struct ResumeManager {
|
||||
disk: DiskStore,
|
||||
state: Arc<RwLock<ResumeState>>,
|
||||
}
|
||||
|
||||
impl ResumeManager {
|
||||
/// create new resume manager
|
||||
pub async fn new(disk: DiskStore, task_id: String, task_type: String, buckets: Vec<String>) -> Result<Self> {
|
||||
let state = ResumeState::new(task_id, task_type, buckets);
|
||||
let manager = Self {
|
||||
disk,
|
||||
state: Arc::new(RwLock::new(state)),
|
||||
};
|
||||
|
||||
// save initial state
|
||||
manager.save_state().await?;
|
||||
Ok(manager)
|
||||
}
|
||||
|
||||
/// load resume state from disk
|
||||
pub async fn load_from_disk(disk: DiskStore, task_id: &str) -> Result<Self> {
|
||||
let state_data = Self::read_state_file(&disk, task_id).await?;
|
||||
let state: ResumeState = serde_json::from_slice(&state_data).map_err(|e| Error::TaskExecutionFailed {
|
||||
message: format!("Failed to deserialize resume state: {e}"),
|
||||
})?;
|
||||
|
||||
Ok(Self {
|
||||
disk,
|
||||
state: Arc::new(RwLock::new(state)),
|
||||
})
|
||||
}
|
||||
|
||||
/// check if resume state exists
|
||||
pub async fn has_resume_state(disk: &DiskStore, task_id: &str) -> bool {
|
||||
let file_path = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_STATE_FILE}"));
|
||||
match disk.read_all(RUSTFS_META_BUCKET, file_path.to_str().unwrap()).await {
|
||||
Ok(data) => !data.is_empty(),
|
||||
Err(_) => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// get current state
|
||||
pub async fn get_state(&self) -> ResumeState {
|
||||
self.state.read().await.clone()
|
||||
}
|
||||
|
||||
/// update progress
|
||||
pub async fn update_progress(&self, processed: u64, successful: u64, failed: u64, skipped: u64) -> Result<()> {
|
||||
let mut state = self.state.write().await;
|
||||
state.update_progress(processed, successful, failed, skipped);
|
||||
drop(state);
|
||||
self.save_state().await
|
||||
}
|
||||
|
||||
/// set current item
|
||||
pub async fn set_current_item(&self, bucket: Option<String>, object: Option<String>) -> Result<()> {
|
||||
let mut state = self.state.write().await;
|
||||
state.set_current_item(bucket, object);
|
||||
drop(state);
|
||||
self.save_state().await
|
||||
}
|
||||
|
||||
/// complete bucket
|
||||
pub async fn complete_bucket(&self, bucket: &str) -> Result<()> {
|
||||
let mut state = self.state.write().await;
|
||||
state.complete_bucket(bucket);
|
||||
drop(state);
|
||||
self.save_state().await
|
||||
}
|
||||
|
||||
/// mark task completed
|
||||
pub async fn mark_completed(&self) -> Result<()> {
|
||||
let mut state = self.state.write().await;
|
||||
state.mark_completed();
|
||||
drop(state);
|
||||
self.save_state().await
|
||||
}
|
||||
|
||||
/// set error message
|
||||
pub async fn set_error(&self, error: String) -> Result<()> {
|
||||
let mut state = self.state.write().await;
|
||||
state.set_error(error);
|
||||
drop(state);
|
||||
self.save_state().await
|
||||
}
|
||||
|
||||
/// increment retry count
|
||||
pub async fn increment_retry(&self) -> Result<()> {
|
||||
let mut state = self.state.write().await;
|
||||
state.increment_retry();
|
||||
drop(state);
|
||||
self.save_state().await
|
||||
}
|
||||
|
||||
/// cleanup resume state
|
||||
pub async fn cleanup(&self) -> Result<()> {
|
||||
let state = self.state.read().await;
|
||||
let task_id = &state.task_id;
|
||||
|
||||
// delete state files
|
||||
let state_file = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_STATE_FILE}"));
|
||||
let progress_file = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_PROGRESS_FILE}"));
|
||||
let checkpoint_file = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_CHECKPOINT_FILE}"));
|
||||
|
||||
// ignore delete errors, files may not exist
|
||||
let _ = self
|
||||
.disk
|
||||
.delete(RUSTFS_META_BUCKET, state_file.to_str().unwrap(), Default::default())
|
||||
.await;
|
||||
let _ = self
|
||||
.disk
|
||||
.delete(RUSTFS_META_BUCKET, progress_file.to_str().unwrap(), Default::default())
|
||||
.await;
|
||||
let _ = self
|
||||
.disk
|
||||
.delete(RUSTFS_META_BUCKET, checkpoint_file.to_str().unwrap(), Default::default())
|
||||
.await;
|
||||
|
||||
info!("Cleaned up resume state for task: {}", task_id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// save state to disk
|
||||
async fn save_state(&self) -> Result<()> {
|
||||
let state = self.state.read().await;
|
||||
let state_data = serde_json::to_vec(&*state).map_err(|e| Error::TaskExecutionFailed {
|
||||
message: format!("Failed to serialize resume state: {e}"),
|
||||
})?;
|
||||
|
||||
let file_path = Path::new(BUCKET_META_PREFIX).join(format!("{}_{}", state.task_id, RESUME_STATE_FILE));
|
||||
|
||||
self.disk
|
||||
.write_all(RUSTFS_META_BUCKET, file_path.to_str().unwrap(), state_data.into())
|
||||
.await
|
||||
.map_err(|e| Error::TaskExecutionFailed {
|
||||
message: format!("Failed to save resume state: {e}"),
|
||||
})?;
|
||||
|
||||
debug!("Saved resume state for task: {}", state.task_id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// read state file from disk
|
||||
async fn read_state_file(disk: &DiskStore, task_id: &str) -> Result<Vec<u8>> {
|
||||
let file_path = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_STATE_FILE}"));
|
||||
|
||||
disk.read_all(RUSTFS_META_BUCKET, file_path.to_str().unwrap())
|
||||
.await
|
||||
.map(|bytes| bytes.to_vec())
|
||||
.map_err(|e| Error::TaskExecutionFailed {
|
||||
message: format!("Failed to read resume state file: {e}"),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// resume checkpoint
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ResumeCheckpoint {
|
||||
/// task id
|
||||
pub task_id: String,
|
||||
/// checkpoint time
|
||||
pub checkpoint_time: u64,
|
||||
/// current bucket index
|
||||
pub current_bucket_index: usize,
|
||||
/// current object index
|
||||
pub current_object_index: usize,
|
||||
/// processed objects
|
||||
pub processed_objects: Vec<String>,
|
||||
/// failed objects
|
||||
pub failed_objects: Vec<String>,
|
||||
/// skipped objects
|
||||
pub skipped_objects: Vec<String>,
|
||||
}
|
||||
|
||||
impl ResumeCheckpoint {
|
||||
pub fn new(task_id: String) -> Self {
|
||||
Self {
|
||||
task_id,
|
||||
checkpoint_time: SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs(),
|
||||
current_bucket_index: 0,
|
||||
current_object_index: 0,
|
||||
processed_objects: Vec::new(),
|
||||
failed_objects: Vec::new(),
|
||||
skipped_objects: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update_position(&mut self, bucket_index: usize, object_index: usize) {
|
||||
self.current_bucket_index = bucket_index;
|
||||
self.current_object_index = object_index;
|
||||
self.checkpoint_time = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs();
|
||||
}
|
||||
|
||||
pub fn add_processed_object(&mut self, object: String) {
|
||||
if !self.processed_objects.contains(&object) {
|
||||
self.processed_objects.push(object);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_failed_object(&mut self, object: String) {
|
||||
if !self.failed_objects.contains(&object) {
|
||||
self.failed_objects.push(object);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_skipped_object(&mut self, object: String) {
|
||||
if !self.skipped_objects.contains(&object) {
|
||||
self.skipped_objects.push(object);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// resume checkpoint manager
|
||||
pub struct CheckpointManager {
|
||||
disk: DiskStore,
|
||||
checkpoint: Arc<RwLock<ResumeCheckpoint>>,
|
||||
}
|
||||
|
||||
impl CheckpointManager {
|
||||
/// create new checkpoint manager
|
||||
pub async fn new(disk: DiskStore, task_id: String) -> Result<Self> {
|
||||
let checkpoint = ResumeCheckpoint::new(task_id);
|
||||
let manager = Self {
|
||||
disk,
|
||||
checkpoint: Arc::new(RwLock::new(checkpoint)),
|
||||
};
|
||||
|
||||
// save initial checkpoint
|
||||
manager.save_checkpoint().await?;
|
||||
Ok(manager)
|
||||
}
|
||||
|
||||
/// load checkpoint from disk
|
||||
pub async fn load_from_disk(disk: DiskStore, task_id: &str) -> Result<Self> {
|
||||
let checkpoint_data = Self::read_checkpoint_file(&disk, task_id).await?;
|
||||
let checkpoint: ResumeCheckpoint = serde_json::from_slice(&checkpoint_data).map_err(|e| Error::TaskExecutionFailed {
|
||||
message: format!("Failed to deserialize checkpoint: {e}"),
|
||||
})?;
|
||||
|
||||
Ok(Self {
|
||||
disk,
|
||||
checkpoint: Arc::new(RwLock::new(checkpoint)),
|
||||
})
|
||||
}
|
||||
|
||||
/// check if checkpoint exists
|
||||
pub async fn has_checkpoint(disk: &DiskStore, task_id: &str) -> bool {
|
||||
let file_path = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_CHECKPOINT_FILE}"));
|
||||
match disk.read_all(RUSTFS_META_BUCKET, file_path.to_str().unwrap()).await {
|
||||
Ok(data) => !data.is_empty(),
|
||||
Err(_) => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// get current checkpoint
|
||||
pub async fn get_checkpoint(&self) -> ResumeCheckpoint {
|
||||
self.checkpoint.read().await.clone()
|
||||
}
|
||||
|
||||
/// update position
|
||||
pub async fn update_position(&self, bucket_index: usize, object_index: usize) -> Result<()> {
|
||||
let mut checkpoint = self.checkpoint.write().await;
|
||||
checkpoint.update_position(bucket_index, object_index);
|
||||
drop(checkpoint);
|
||||
self.save_checkpoint().await
|
||||
}
|
||||
|
||||
/// add processed object
|
||||
pub async fn add_processed_object(&self, object: String) -> Result<()> {
|
||||
let mut checkpoint = self.checkpoint.write().await;
|
||||
checkpoint.add_processed_object(object);
|
||||
drop(checkpoint);
|
||||
self.save_checkpoint().await
|
||||
}
|
||||
|
||||
/// add failed object
|
||||
pub async fn add_failed_object(&self, object: String) -> Result<()> {
|
||||
let mut checkpoint = self.checkpoint.write().await;
|
||||
checkpoint.add_failed_object(object);
|
||||
drop(checkpoint);
|
||||
self.save_checkpoint().await
|
||||
}
|
||||
|
||||
/// add skipped object
|
||||
pub async fn add_skipped_object(&self, object: String) -> Result<()> {
|
||||
let mut checkpoint = self.checkpoint.write().await;
|
||||
checkpoint.add_skipped_object(object);
|
||||
drop(checkpoint);
|
||||
self.save_checkpoint().await
|
||||
}
|
||||
|
||||
/// cleanup checkpoint
|
||||
pub async fn cleanup(&self) -> Result<()> {
|
||||
let checkpoint = self.checkpoint.read().await;
|
||||
let task_id = &checkpoint.task_id;
|
||||
|
||||
let checkpoint_file = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_CHECKPOINT_FILE}"));
|
||||
let _ = self
|
||||
.disk
|
||||
.delete(RUSTFS_META_BUCKET, checkpoint_file.to_str().unwrap(), Default::default())
|
||||
.await;
|
||||
|
||||
info!("Cleaned up checkpoint for task: {}", task_id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// save checkpoint to disk
|
||||
async fn save_checkpoint(&self) -> Result<()> {
|
||||
let checkpoint = self.checkpoint.read().await;
|
||||
let checkpoint_data = serde_json::to_vec(&*checkpoint).map_err(|e| Error::TaskExecutionFailed {
|
||||
message: format!("Failed to serialize checkpoint: {e}"),
|
||||
})?;
|
||||
|
||||
let file_path = Path::new(BUCKET_META_PREFIX).join(format!("{}_{}", checkpoint.task_id, RESUME_CHECKPOINT_FILE));
|
||||
|
||||
self.disk
|
||||
.write_all(RUSTFS_META_BUCKET, file_path.to_str().unwrap(), checkpoint_data.into())
|
||||
.await
|
||||
.map_err(|e| Error::TaskExecutionFailed {
|
||||
message: format!("Failed to save checkpoint: {e}"),
|
||||
})?;
|
||||
|
||||
debug!("Saved checkpoint for task: {}", checkpoint.task_id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// read checkpoint file from disk
|
||||
async fn read_checkpoint_file(disk: &DiskStore, task_id: &str) -> Result<Vec<u8>> {
|
||||
let file_path = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_CHECKPOINT_FILE}"));
|
||||
|
||||
disk.read_all(RUSTFS_META_BUCKET, file_path.to_str().unwrap())
|
||||
.await
|
||||
.map(|bytes| bytes.to_vec())
|
||||
.map_err(|e| Error::TaskExecutionFailed {
|
||||
message: format!("Failed to read checkpoint file: {e}"),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// resume utils
|
||||
pub struct ResumeUtils;
|
||||
|
||||
impl ResumeUtils {
|
||||
/// generate unique task id
|
||||
pub fn generate_task_id() -> String {
|
||||
Uuid::new_v4().to_string()
|
||||
}
|
||||
|
||||
/// check if task can be resumed
|
||||
pub async fn can_resume_task(disk: &DiskStore, task_id: &str) -> bool {
|
||||
ResumeManager::has_resume_state(disk, task_id).await
|
||||
}
|
||||
|
||||
/// get all resumable task ids
|
||||
pub async fn get_resumable_tasks(disk: &DiskStore) -> Result<Vec<String>> {
|
||||
// List all files in the buckets metadata directory
|
||||
let entries = match disk.list_dir("", RUSTFS_META_BUCKET, BUCKET_META_PREFIX, -1).await {
|
||||
Ok(entries) => entries,
|
||||
Err(e) => {
|
||||
debug!("Failed to list resume state files: {}", e);
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
};
|
||||
|
||||
let mut task_ids = Vec::new();
|
||||
|
||||
// Filter files that end with ahm_resume_state.json and extract task IDs
|
||||
for entry in entries {
|
||||
if entry.ends_with(&format!("_{RESUME_STATE_FILE}")) {
|
||||
// Extract task ID from filename: {task_id}_ahm_resume_state.json
|
||||
if let Some(task_id) = entry.strip_suffix(&format!("_{RESUME_STATE_FILE}")) {
|
||||
if !task_id.is_empty() {
|
||||
task_ids.push(task_id.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
debug!("Found {} resumable tasks: {:?}", task_ids.len(), task_ids);
|
||||
Ok(task_ids)
|
||||
}
|
||||
|
||||
/// cleanup expired resume states
|
||||
pub async fn cleanup_expired_states(disk: &DiskStore, max_age_hours: u64) -> Result<()> {
|
||||
let task_ids = Self::get_resumable_tasks(disk).await?;
|
||||
let current_time = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs();
|
||||
|
||||
for task_id in task_ids {
|
||||
if let Ok(resume_manager) = ResumeManager::load_from_disk(disk.clone(), &task_id).await {
|
||||
let state = resume_manager.get_state().await;
|
||||
let age_hours = (current_time - state.last_update) / 3600;
|
||||
|
||||
if age_hours > max_age_hours {
|
||||
info!("Cleaning up expired resume state for task: {} (age: {} hours)", task_id, age_hours);
|
||||
if let Err(e) = resume_manager.cleanup().await {
|
||||
warn!("Failed to cleanup expired resume state for task {}: {}", task_id, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_resume_state_creation() {
|
||||
let task_id = ResumeUtils::generate_task_id();
|
||||
let buckets = vec!["bucket1".to_string(), "bucket2".to_string()];
|
||||
let state = ResumeState::new(task_id.clone(), "erasure_set".to_string(), buckets);
|
||||
|
||||
assert_eq!(state.task_id, task_id);
|
||||
assert_eq!(state.task_type, "erasure_set");
|
||||
assert!(!state.completed);
|
||||
assert_eq!(state.processed_objects, 0);
|
||||
assert_eq!(state.pending_buckets.len(), 2);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_resume_state_progress() {
|
||||
let task_id = ResumeUtils::generate_task_id();
|
||||
let buckets = vec!["bucket1".to_string()];
|
||||
let mut state = ResumeState::new(task_id, "erasure_set".to_string(), buckets);
|
||||
|
||||
state.update_progress(10, 8, 1, 1);
|
||||
assert_eq!(state.processed_objects, 10);
|
||||
assert_eq!(state.successful_objects, 8);
|
||||
assert_eq!(state.failed_objects, 1);
|
||||
assert_eq!(state.skipped_objects, 1);
|
||||
|
||||
let progress = state.get_progress_percentage();
|
||||
assert_eq!(progress, 0.0); // total_objects is 0
|
||||
|
||||
state.total_objects = 100;
|
||||
let progress = state.get_progress_percentage();
|
||||
assert_eq!(progress, 10.0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_resume_state_bucket_completion() {
|
||||
let task_id = ResumeUtils::generate_task_id();
|
||||
let buckets = vec!["bucket1".to_string(), "bucket2".to_string()];
|
||||
let mut state = ResumeState::new(task_id, "erasure_set".to_string(), buckets);
|
||||
|
||||
assert_eq!(state.pending_buckets.len(), 2);
|
||||
assert_eq!(state.completed_buckets.len(), 0);
|
||||
|
||||
state.complete_bucket("bucket1");
|
||||
assert_eq!(state.pending_buckets.len(), 1);
|
||||
assert_eq!(state.completed_buckets.len(), 1);
|
||||
assert!(state.completed_buckets.contains(&"bucket1".to_string()));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_resume_utils() {
|
||||
let task_id1 = ResumeUtils::generate_task_id();
|
||||
let task_id2 = ResumeUtils::generate_task_id();
|
||||
|
||||
assert_ne!(task_id1, task_id2);
|
||||
assert_eq!(task_id1.len(), 36); // UUID length
|
||||
assert_eq!(task_id2.len(), 36);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_resumable_tasks_integration() {
|
||||
use rustfs_ecstore::disk::{DiskOption, endpoint::Endpoint, new_disk};
|
||||
use tempfile::TempDir;
|
||||
|
||||
// Create a temporary directory for testing
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let disk_path = temp_dir.path().join("test_disk");
|
||||
std::fs::create_dir_all(&disk_path).unwrap();
|
||||
|
||||
// Create a local disk for testing
|
||||
let endpoint = Endpoint::try_from(disk_path.to_string_lossy().as_ref()).unwrap();
|
||||
let disk_option = DiskOption {
|
||||
cleanup: false,
|
||||
health_check: false,
|
||||
};
|
||||
let disk = new_disk(&endpoint, &disk_option).await.unwrap();
|
||||
|
||||
// Create necessary directories first (ignore if already exist)
|
||||
let _ = disk.make_volume(RUSTFS_META_BUCKET).await;
|
||||
let _ = disk.make_volume(&format!("{RUSTFS_META_BUCKET}/{BUCKET_META_PREFIX}")).await;
|
||||
|
||||
// Create some test resume state files
|
||||
let task_ids = vec![
|
||||
"test-task-1".to_string(),
|
||||
"test-task-2".to_string(),
|
||||
"test-task-3".to_string(),
|
||||
];
|
||||
|
||||
// Save resume state files for each task
|
||||
for task_id in &task_ids {
|
||||
let state = ResumeState::new(
|
||||
task_id.clone(),
|
||||
"erasure_set".to_string(),
|
||||
vec!["bucket1".to_string(), "bucket2".to_string()],
|
||||
);
|
||||
|
||||
let state_data = serde_json::to_vec(&state).unwrap();
|
||||
let file_path = format!("{BUCKET_META_PREFIX}/{task_id}_{RESUME_STATE_FILE}");
|
||||
|
||||
disk.write_all(RUSTFS_META_BUCKET, &file_path, state_data.into())
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
// Also create some non-resume state files to test filtering
|
||||
let non_resume_files = vec![
|
||||
"other_file.txt",
|
||||
"task4_ahm_checkpoint.json",
|
||||
"task5_ahm_progress.json",
|
||||
"_ahm_resume_state.json", // Invalid: empty task ID
|
||||
];
|
||||
|
||||
for file_name in non_resume_files {
|
||||
let file_path = format!("{BUCKET_META_PREFIX}/{file_name}");
|
||||
disk.write_all(RUSTFS_META_BUCKET, &file_path, b"test data".to_vec().into())
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
// Now call get_resumable_tasks to see if it finds the correct files
|
||||
let found_task_ids = ResumeUtils::get_resumable_tasks(&disk).await.unwrap();
|
||||
|
||||
// Verify that only the valid resume state files are found
|
||||
assert_eq!(found_task_ids.len(), 3);
|
||||
for task_id in &task_ids {
|
||||
assert!(found_task_ids.contains(task_id), "Task ID {task_id} not found");
|
||||
}
|
||||
|
||||
// Verify that invalid files are not included
|
||||
assert!(!found_task_ids.contains(&"".to_string()));
|
||||
assert!(!found_task_ids.contains(&"task4".to_string()));
|
||||
assert!(!found_task_ids.contains(&"task5".to_string()));
|
||||
|
||||
// Clean up
|
||||
temp_dir.close().unwrap();
|
||||
}
|
||||
}
|
||||
544
crates/ahm/src/heal/storage.rs
Normal file
@@ -0,0 +1,544 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
use async_trait::async_trait;
|
||||
use rustfs_common::heal_channel::{HealOpts, HealScanMode};
|
||||
use rustfs_ecstore::{
|
||||
disk::{DiskStore, endpoint::Endpoint},
|
||||
store::ECStore,
|
||||
store_api::{BucketInfo, ObjectIO, StorageAPI},
|
||||
};
|
||||
use rustfs_madmin::heal_commands::HealResultItem;
|
||||
use std::sync::Arc;
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
/// Disk status for heal operations
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum DiskStatus {
|
||||
/// Ok
|
||||
Ok,
|
||||
/// Offline
|
||||
Offline,
|
||||
/// Corrupt
|
||||
Corrupt,
|
||||
/// Missing
|
||||
Missing,
|
||||
/// Permission denied
|
||||
PermissionDenied,
|
||||
/// Faulty
|
||||
Faulty,
|
||||
/// Root mount
|
||||
RootMount,
|
||||
/// Unknown
|
||||
Unknown,
|
||||
/// Unformatted
|
||||
Unformatted,
|
||||
}
|
||||
|
||||
/// Heal storage layer interface
|
||||
#[async_trait]
|
||||
pub trait HealStorageAPI: Send + Sync {
|
||||
/// Get object meta
|
||||
async fn get_object_meta(&self, bucket: &str, object: &str) -> Result<Option<rustfs_ecstore::store_api::ObjectInfo>>;
|
||||
|
||||
/// Get object data
|
||||
async fn get_object_data(&self, bucket: &str, object: &str) -> Result<Option<Vec<u8>>>;
|
||||
|
||||
/// Put object data
|
||||
async fn put_object_data(&self, bucket: &str, object: &str, data: &[u8]) -> Result<()>;
|
||||
|
||||
/// Delete object
|
||||
async fn delete_object(&self, bucket: &str, object: &str) -> Result<()>;
|
||||
|
||||
/// Check object integrity
|
||||
async fn verify_object_integrity(&self, bucket: &str, object: &str) -> Result<bool>;
|
||||
|
||||
/// EC decode rebuild
|
||||
async fn ec_decode_rebuild(&self, bucket: &str, object: &str) -> Result<Vec<u8>>;
|
||||
|
||||
/// Get disk status
|
||||
async fn get_disk_status(&self, endpoint: &Endpoint) -> Result<DiskStatus>;
|
||||
|
||||
/// Format disk
|
||||
async fn format_disk(&self, endpoint: &Endpoint) -> Result<()>;
|
||||
|
||||
/// Get bucket info
|
||||
async fn get_bucket_info(&self, bucket: &str) -> Result<Option<BucketInfo>>;
|
||||
|
||||
/// Fix bucket metadata
|
||||
async fn heal_bucket_metadata(&self, bucket: &str) -> Result<()>;
|
||||
|
||||
/// Get all buckets
|
||||
async fn list_buckets(&self) -> Result<Vec<BucketInfo>>;
|
||||
|
||||
/// Check object exists
|
||||
async fn object_exists(&self, bucket: &str, object: &str) -> Result<bool>;
|
||||
|
||||
/// Get object size
|
||||
async fn get_object_size(&self, bucket: &str, object: &str) -> Result<Option<u64>>;
|
||||
|
||||
/// Get object checksum
|
||||
async fn get_object_checksum(&self, bucket: &str, object: &str) -> Result<Option<String>>;
|
||||
|
||||
/// Heal object using ecstore
|
||||
async fn heal_object(
|
||||
&self,
|
||||
bucket: &str,
|
||||
object: &str,
|
||||
version_id: Option<&str>,
|
||||
opts: &HealOpts,
|
||||
) -> Result<(HealResultItem, Option<Error>)>;
|
||||
|
||||
/// Heal bucket using ecstore
|
||||
async fn heal_bucket(&self, bucket: &str, opts: &HealOpts) -> Result<HealResultItem>;
|
||||
|
||||
/// Heal format using ecstore
|
||||
async fn heal_format(&self, dry_run: bool) -> Result<(HealResultItem, Option<Error>)>;
|
||||
|
||||
/// List objects for healing
|
||||
async fn list_objects_for_heal(&self, bucket: &str, prefix: &str) -> Result<Vec<String>>;
|
||||
|
||||
/// Get disk for resume functionality
|
||||
async fn get_disk_for_resume(&self, set_disk_id: &str) -> Result<DiskStore>;
|
||||
}
|
||||
|
||||
/// ECStore Heal storage layer implementation
|
||||
pub struct ECStoreHealStorage {
|
||||
ecstore: Arc<ECStore>,
|
||||
}
|
||||
|
||||
impl ECStoreHealStorage {
|
||||
pub fn new(ecstore: Arc<ECStore>) -> Self {
|
||||
Self { ecstore }
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl HealStorageAPI for ECStoreHealStorage {
|
||||
async fn get_object_meta(&self, bucket: &str, object: &str) -> Result<Option<rustfs_ecstore::store_api::ObjectInfo>> {
|
||||
debug!("Getting object meta: {}/{}", bucket, object);
|
||||
|
||||
match self.ecstore.get_object_info(bucket, object, &Default::default()).await {
|
||||
Ok(info) => Ok(Some(info)),
|
||||
Err(e) => {
|
||||
// Map ObjectNotFound to None to align with Option return type
|
||||
if matches!(e, rustfs_ecstore::error::StorageError::ObjectNotFound(_, _)) {
|
||||
debug!("Object meta not found: {}/{}", bucket, object);
|
||||
Ok(None)
|
||||
} else {
|
||||
error!("Failed to get object meta: {}/{} - {}", bucket, object, e);
|
||||
Err(Error::other(e))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_object_data(&self, bucket: &str, object: &str) -> Result<Option<Vec<u8>>> {
|
||||
debug!("Getting object data: {}/{}", bucket, object);
|
||||
|
||||
let reader = match (*self.ecstore)
|
||||
.get_object_reader(bucket, object, None, Default::default(), &Default::default())
|
||||
.await
|
||||
{
|
||||
Ok(reader) => reader,
|
||||
Err(e) => {
|
||||
error!("Failed to get object: {}/{} - {}", bucket, object, e);
|
||||
return Err(Error::other(e));
|
||||
}
|
||||
};
|
||||
|
||||
// WARNING: Returning Vec<u8> for large objects is dangerous. To avoid OOM, cap the read size.
|
||||
// If needed, refactor callers to stream instead of buffering entire object.
|
||||
const MAX_READ_BYTES: usize = 16 * 1024 * 1024; // 16 MiB cap
|
||||
let mut buf = Vec::with_capacity(1024 * 1024);
|
||||
use tokio::io::AsyncReadExt as _;
|
||||
let mut n_read: usize = 0;
|
||||
let mut stream = reader.stream;
|
||||
loop {
|
||||
// Read in chunks
|
||||
let mut chunk = vec![0u8; 1024 * 1024];
|
||||
match stream.read(&mut chunk).await {
|
||||
Ok(0) => break,
|
||||
Ok(n) => {
|
||||
buf.extend_from_slice(&chunk[..n]);
|
||||
n_read += n;
|
||||
if n_read > MAX_READ_BYTES {
|
||||
warn!(
|
||||
"Object data exceeds cap ({} bytes), aborting full read to prevent OOM: {}/{}",
|
||||
MAX_READ_BYTES, bucket, object
|
||||
);
|
||||
return Ok(None);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to read object data: {}/{} - {}", bucket, object, e);
|
||||
return Err(Error::other(e));
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(Some(buf))
|
||||
}
|
||||
|
||||
async fn put_object_data(&self, bucket: &str, object: &str, data: &[u8]) -> Result<()> {
|
||||
debug!("Putting object data: {}/{} ({} bytes)", bucket, object, data.len());
|
||||
|
||||
let mut reader = rustfs_ecstore::store_api::PutObjReader::from_vec(data.to_vec());
|
||||
match (*self.ecstore)
|
||||
.put_object(bucket, object, &mut reader, &Default::default())
|
||||
.await
|
||||
{
|
||||
Ok(_) => {
|
||||
info!("Successfully put object: {}/{}", bucket, object);
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to put object: {}/{} - {}", bucket, object, e);
|
||||
Err(Error::other(e))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn delete_object(&self, bucket: &str, object: &str) -> Result<()> {
|
||||
debug!("Deleting object: {}/{}", bucket, object);
|
||||
|
||||
match self.ecstore.delete_object(bucket, object, Default::default()).await {
|
||||
Ok(_) => {
|
||||
info!("Successfully deleted object: {}/{}", bucket, object);
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to delete object: {}/{} - {}", bucket, object, e);
|
||||
Err(Error::other(e))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn verify_object_integrity(&self, bucket: &str, object: &str) -> Result<bool> {
|
||||
debug!("Verifying object integrity: {}/{}", bucket, object);
|
||||
|
||||
// Check object metadata first
|
||||
match self.get_object_meta(bucket, object).await? {
|
||||
Some(obj_info) => {
|
||||
if obj_info.size < 0 {
|
||||
warn!("Object has invalid size: {}/{}", bucket, object);
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// Stream-read the object to a sink to avoid loading into memory
|
||||
match (*self.ecstore)
|
||||
.get_object_reader(bucket, object, None, Default::default(), &Default::default())
|
||||
.await
|
||||
{
|
||||
Ok(reader) => {
|
||||
let mut stream = reader.stream;
|
||||
match tokio::io::copy(&mut stream, &mut tokio::io::sink()).await {
|
||||
Ok(_) => {
|
||||
info!("Object integrity check passed: {}/{}", bucket, object);
|
||||
Ok(true)
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Object stream read failed: {}/{} - {}", bucket, object, e);
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Failed to get object reader: {}/{} - {}", bucket, object, e);
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
}
|
||||
None => {
|
||||
warn!("Object metadata not found: {}/{}", bucket, object);
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn ec_decode_rebuild(&self, bucket: &str, object: &str) -> Result<Vec<u8>> {
|
||||
debug!("EC decode rebuild: {}/{}", bucket, object);
|
||||
|
||||
// Use ecstore's heal_object to rebuild the object
|
||||
let heal_opts = HealOpts {
|
||||
recursive: false,
|
||||
dry_run: false,
|
||||
remove: false,
|
||||
recreate: true,
|
||||
scan_mode: HealScanMode::Deep,
|
||||
update_parity: true,
|
||||
no_lock: false,
|
||||
pool: None,
|
||||
set: None,
|
||||
};
|
||||
|
||||
match self.heal_object(bucket, object, None, &heal_opts).await {
|
||||
Ok((_result, error)) => {
|
||||
if error.is_some() {
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Heal failed: {error:?}"),
|
||||
});
|
||||
}
|
||||
|
||||
// After healing, try to read the object data
|
||||
match self.get_object_data(bucket, object).await? {
|
||||
Some(data) => {
|
||||
info!("EC decode rebuild successful: {}/{} ({} bytes)", bucket, object, data.len());
|
||||
Ok(data)
|
||||
}
|
||||
None => {
|
||||
error!("Object not found after heal: {}/{}", bucket, object);
|
||||
Err(Error::TaskExecutionFailed {
|
||||
message: format!("Object not found after heal: {bucket}/{object}"),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Heal operation failed: {}/{} - {}", bucket, object, e);
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_disk_status(&self, endpoint: &Endpoint) -> Result<DiskStatus> {
|
||||
debug!("Getting disk status: {:?}", endpoint);
|
||||
|
||||
// TODO: implement disk status check using ecstore
|
||||
// For now, return Ok status
|
||||
info!("Disk status check: {:?} - OK", endpoint);
|
||||
Ok(DiskStatus::Ok)
|
||||
}
|
||||
|
||||
async fn format_disk(&self, endpoint: &Endpoint) -> Result<()> {
|
||||
debug!("Formatting disk: {:?}", endpoint);
|
||||
|
||||
// Use ecstore's heal_format
|
||||
match self.heal_format(false).await {
|
||||
Ok((_, error)) => {
|
||||
if error.is_some() {
|
||||
return Err(Error::other(format!("Format failed: {error:?}")));
|
||||
}
|
||||
info!("Successfully formatted disk: {:?}", endpoint);
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to format disk: {:?} - {}", endpoint, e);
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_bucket_info(&self, bucket: &str) -> Result<Option<BucketInfo>> {
|
||||
debug!("Getting bucket info: {}", bucket);
|
||||
|
||||
match self.ecstore.get_bucket_info(bucket, &Default::default()).await {
|
||||
Ok(info) => Ok(Some(info)),
|
||||
Err(e) => {
|
||||
error!("Failed to get bucket info: {} - {}", bucket, e);
|
||||
Err(Error::other(e))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn heal_bucket_metadata(&self, bucket: &str) -> Result<()> {
|
||||
debug!("Healing bucket metadata: {}", bucket);
|
||||
|
||||
let heal_opts = HealOpts {
|
||||
recursive: true,
|
||||
dry_run: false,
|
||||
remove: false,
|
||||
recreate: false,
|
||||
scan_mode: HealScanMode::Normal,
|
||||
update_parity: false,
|
||||
no_lock: false,
|
||||
pool: None,
|
||||
set: None,
|
||||
};
|
||||
|
||||
match self.heal_bucket(bucket, &heal_opts).await {
|
||||
Ok(_) => {
|
||||
info!("Successfully healed bucket metadata: {}", bucket);
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to heal bucket metadata: {} - {}", bucket, e);
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn list_buckets(&self) -> Result<Vec<BucketInfo>> {
|
||||
debug!("Listing buckets");
|
||||
|
||||
match self.ecstore.list_bucket(&Default::default()).await {
|
||||
Ok(buckets) => Ok(buckets),
|
||||
Err(e) => {
|
||||
error!("Failed to list buckets: {}", e);
|
||||
Err(Error::other(e))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn object_exists(&self, bucket: &str, object: &str) -> Result<bool> {
|
||||
debug!("Checking object exists: {}/{}", bucket, object);
|
||||
|
||||
match self.get_object_meta(bucket, object).await {
|
||||
Ok(Some(_)) => Ok(true),
|
||||
Ok(None) => Ok(false),
|
||||
Err(_) => Ok(false),
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_object_size(&self, bucket: &str, object: &str) -> Result<Option<u64>> {
|
||||
debug!("Getting object size: {}/{}", bucket, object);
|
||||
|
||||
match self.get_object_meta(bucket, object).await {
|
||||
Ok(Some(obj_info)) => Ok(Some(obj_info.size as u64)),
|
||||
Ok(None) => Ok(None),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_object_checksum(&self, bucket: &str, object: &str) -> Result<Option<String>> {
|
||||
debug!("Getting object checksum: {}/{}", bucket, object);
|
||||
|
||||
match self.get_object_meta(bucket, object).await {
|
||||
Ok(Some(obj_info)) => {
|
||||
// Convert checksum bytes to hex string
|
||||
let checksum = obj_info.checksum.iter().map(|b| format!("{b:02x}")).collect::<String>();
|
||||
Ok(Some(checksum))
|
||||
}
|
||||
Ok(None) => Ok(None),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
|
||||
async fn heal_object(
|
||||
&self,
|
||||
bucket: &str,
|
||||
object: &str,
|
||||
version_id: Option<&str>,
|
||||
opts: &HealOpts,
|
||||
) -> Result<(HealResultItem, Option<Error>)> {
|
||||
debug!("Healing object: {}/{}", bucket, object);
|
||||
|
||||
let version_id_str = version_id.unwrap_or("");
|
||||
|
||||
match self.ecstore.heal_object(bucket, object, version_id_str, opts).await {
|
||||
Ok((result, ecstore_error)) => {
|
||||
let error = ecstore_error.map(Error::other);
|
||||
info!("Heal object completed: {}/{} - result: {:?}, error: {:?}", bucket, object, result, error);
|
||||
Ok((result, error))
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Heal object failed: {}/{} - {}", bucket, object, e);
|
||||
Err(Error::other(e))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn heal_bucket(&self, bucket: &str, opts: &HealOpts) -> Result<HealResultItem> {
|
||||
debug!("Healing bucket: {}", bucket);
|
||||
|
||||
match self.ecstore.heal_bucket(bucket, opts).await {
|
||||
Ok(result) => {
|
||||
info!("Heal bucket completed: {} - result: {:?}", bucket, result);
|
||||
Ok(result)
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Heal bucket failed: {} - {}", bucket, e);
|
||||
Err(Error::other(e))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn heal_format(&self, dry_run: bool) -> Result<(HealResultItem, Option<Error>)> {
|
||||
debug!("Healing format (dry_run: {})", dry_run);
|
||||
|
||||
match self.ecstore.heal_format(dry_run).await {
|
||||
Ok((result, ecstore_error)) => {
|
||||
let error = ecstore_error.map(Error::other);
|
||||
info!("Heal format completed - result: {:?}, error: {:?}", result, error);
|
||||
Ok((result, error))
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Heal format failed: {}", e);
|
||||
Err(Error::other(e))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn list_objects_for_heal(&self, bucket: &str, prefix: &str) -> Result<Vec<String>> {
|
||||
debug!("Listing objects for heal: {}/{}", bucket, prefix);
|
||||
|
||||
// Use list_objects_v2 to get objects
|
||||
match self
|
||||
.ecstore
|
||||
.clone()
|
||||
.list_objects_v2(bucket, prefix, None, None, 1000, false, None)
|
||||
.await
|
||||
{
|
||||
Ok(list_info) => {
|
||||
let objects: Vec<String> = list_info.objects.into_iter().map(|obj| obj.name).collect();
|
||||
info!("Found {} objects for heal in {}/{}", objects.len(), bucket, prefix);
|
||||
Ok(objects)
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to list objects for heal: {}/{} - {}", bucket, prefix, e);
|
||||
Err(Error::other(e))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_disk_for_resume(&self, set_disk_id: &str) -> Result<DiskStore> {
|
||||
debug!("Getting disk for resume: {}", set_disk_id);
|
||||
|
||||
// Parse set_disk_id to extract pool and set indices
|
||||
// Format: "pool_{pool_idx}_set_{set_idx}"
|
||||
let parts: Vec<&str> = set_disk_id.split('_').collect();
|
||||
if parts.len() != 4 || parts[0] != "pool" || parts[2] != "set" {
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Invalid set_disk_id format: {set_disk_id}"),
|
||||
});
|
||||
}
|
||||
|
||||
let pool_idx: usize = parts[1].parse().map_err(|_| Error::TaskExecutionFailed {
|
||||
message: format!("Invalid pool index in set_disk_id: {set_disk_id}"),
|
||||
})?;
|
||||
|
||||
let set_idx: usize = parts[3].parse().map_err(|_| Error::TaskExecutionFailed {
|
||||
message: format!("Invalid set index in set_disk_id: {set_disk_id}"),
|
||||
})?;
|
||||
|
||||
// Get the first available disk from the set
|
||||
let disks = self
|
||||
.ecstore
|
||||
.get_disks(pool_idx, set_idx)
|
||||
.await
|
||||
.map_err(|e| Error::TaskExecutionFailed {
|
||||
message: format!("Failed to get disks for pool {pool_idx} set {set_idx}: {e}"),
|
||||
})?;
|
||||
|
||||
// Find the first available disk
|
||||
if let Some(disk_store) = disks.into_iter().flatten().next() {
|
||||
info!("Found disk for resume: {:?}", disk_store);
|
||||
return Ok(disk_store);
|
||||
}
|
||||
|
||||
Err(Error::TaskExecutionFailed {
|
||||
message: format!("No available disk found for set_disk_id: {set_disk_id}"),
|
||||
})
|
||||
}
|
||||
}
|
||||
855
crates/ahm/src/heal/task.rs
Normal file
@@ -0,0 +1,855 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
use crate::heal::ErasureSetHealer;
|
||||
use crate::heal::{progress::HealProgress, storage::HealStorageAPI};
|
||||
use rustfs_common::heal_channel::{HealOpts, HealScanMode};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::sync::Arc;
|
||||
use std::time::{Duration, SystemTime};
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{error, info, warn};
|
||||
use uuid::Uuid;
|
||||
|
||||
/// Heal type
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum HealType {
|
||||
/// Object heal
|
||||
Object {
|
||||
bucket: String,
|
||||
object: String,
|
||||
version_id: Option<String>,
|
||||
},
|
||||
/// Bucket heal
|
||||
Bucket { bucket: String },
|
||||
/// Erasure Set heal (includes disk format repair)
|
||||
ErasureSet { buckets: Vec<String>, set_disk_id: String },
|
||||
/// Metadata heal
|
||||
Metadata { bucket: String, object: String },
|
||||
/// MRF heal
|
||||
MRF { meta_path: String },
|
||||
/// EC decode heal
|
||||
ECDecode {
|
||||
bucket: String,
|
||||
object: String,
|
||||
version_id: Option<String>,
|
||||
},
|
||||
}
|
||||
|
||||
/// Heal priority
|
||||
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
|
||||
pub enum HealPriority {
|
||||
/// Low priority
|
||||
Low = 0,
|
||||
/// Normal priority
|
||||
Normal = 1,
|
||||
/// High priority
|
||||
High = 2,
|
||||
/// Urgent priority
|
||||
Urgent = 3,
|
||||
}
|
||||
|
||||
impl Default for HealPriority {
|
||||
fn default() -> Self {
|
||||
Self::Normal
|
||||
}
|
||||
}
|
||||
|
||||
/// Heal options
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct HealOptions {
|
||||
/// Scan mode
|
||||
pub scan_mode: HealScanMode,
|
||||
/// Whether to remove corrupted data
|
||||
pub remove_corrupted: bool,
|
||||
/// Whether to recreate
|
||||
pub recreate_missing: bool,
|
||||
/// Whether to update parity
|
||||
pub update_parity: bool,
|
||||
/// Whether to recursively process
|
||||
pub recursive: bool,
|
||||
/// Whether to dry run
|
||||
pub dry_run: bool,
|
||||
/// Timeout
|
||||
pub timeout: Option<Duration>,
|
||||
/// pool index
|
||||
pub pool_index: Option<usize>,
|
||||
/// set index
|
||||
pub set_index: Option<usize>,
|
||||
}
|
||||
|
||||
impl Default for HealOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
scan_mode: HealScanMode::Normal,
|
||||
remove_corrupted: false,
|
||||
recreate_missing: true,
|
||||
update_parity: true,
|
||||
recursive: false,
|
||||
dry_run: false,
|
||||
timeout: Some(Duration::from_secs(300)), // 5 minutes default timeout
|
||||
pool_index: None,
|
||||
set_index: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Heal task status
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub enum HealTaskStatus {
|
||||
/// Pending
|
||||
Pending,
|
||||
/// Running
|
||||
Running,
|
||||
/// Completed
|
||||
Completed,
|
||||
/// Failed
|
||||
Failed { error: String },
|
||||
/// Cancelled
|
||||
Cancelled,
|
||||
/// Timeout
|
||||
Timeout,
|
||||
}
|
||||
|
||||
/// Heal request
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct HealRequest {
|
||||
/// Request ID
|
||||
pub id: String,
|
||||
/// Heal type
|
||||
pub heal_type: HealType,
|
||||
/// Heal options
|
||||
pub options: HealOptions,
|
||||
/// Priority
|
||||
pub priority: HealPriority,
|
||||
/// Created time
|
||||
pub created_at: SystemTime,
|
||||
}
|
||||
|
||||
impl HealRequest {
|
||||
pub fn new(heal_type: HealType, options: HealOptions, priority: HealPriority) -> Self {
|
||||
Self {
|
||||
id: Uuid::new_v4().to_string(),
|
||||
heal_type,
|
||||
options,
|
||||
priority,
|
||||
created_at: SystemTime::now(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn object(bucket: String, object: String, version_id: Option<String>) -> Self {
|
||||
Self::new(
|
||||
HealType::Object {
|
||||
bucket,
|
||||
object,
|
||||
version_id,
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::Normal,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn bucket(bucket: String) -> Self {
|
||||
Self::new(HealType::Bucket { bucket }, HealOptions::default(), HealPriority::Normal)
|
||||
}
|
||||
|
||||
pub fn metadata(bucket: String, object: String) -> Self {
|
||||
Self::new(HealType::Metadata { bucket, object }, HealOptions::default(), HealPriority::High)
|
||||
}
|
||||
|
||||
pub fn ec_decode(bucket: String, object: String, version_id: Option<String>) -> Self {
|
||||
Self::new(
|
||||
HealType::ECDecode {
|
||||
bucket,
|
||||
object,
|
||||
version_id,
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::Urgent,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Heal task
|
||||
pub struct HealTask {
|
||||
/// Task ID
|
||||
pub id: String,
|
||||
/// Heal type
|
||||
pub heal_type: HealType,
|
||||
/// Heal options
|
||||
pub options: HealOptions,
|
||||
/// Task status
|
||||
pub status: Arc<RwLock<HealTaskStatus>>,
|
||||
/// Progress tracking
|
||||
pub progress: Arc<RwLock<HealProgress>>,
|
||||
/// Created time
|
||||
pub created_at: SystemTime,
|
||||
/// Started time
|
||||
pub started_at: Arc<RwLock<Option<SystemTime>>>,
|
||||
/// Completed time
|
||||
pub completed_at: Arc<RwLock<Option<SystemTime>>>,
|
||||
/// Cancel token
|
||||
pub cancel_token: tokio_util::sync::CancellationToken,
|
||||
/// Storage layer interface
|
||||
pub storage: Arc<dyn HealStorageAPI>,
|
||||
}
|
||||
|
||||
impl HealTask {
|
||||
pub fn from_request(request: HealRequest, storage: Arc<dyn HealStorageAPI>) -> Self {
|
||||
Self {
|
||||
id: request.id,
|
||||
heal_type: request.heal_type,
|
||||
options: request.options,
|
||||
status: Arc::new(RwLock::new(HealTaskStatus::Pending)),
|
||||
progress: Arc::new(RwLock::new(HealProgress::new())),
|
||||
created_at: request.created_at,
|
||||
started_at: Arc::new(RwLock::new(None)),
|
||||
completed_at: Arc::new(RwLock::new(None)),
|
||||
cancel_token: tokio_util::sync::CancellationToken::new(),
|
||||
storage,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn execute(&self) -> Result<()> {
|
||||
// update status to running
|
||||
{
|
||||
let mut status = self.status.write().await;
|
||||
*status = HealTaskStatus::Running;
|
||||
}
|
||||
{
|
||||
let mut started_at = self.started_at.write().await;
|
||||
*started_at = Some(SystemTime::now());
|
||||
}
|
||||
|
||||
info!("Starting heal task: {} with type: {:?}", self.id, self.heal_type);
|
||||
|
||||
let result = match &self.heal_type {
|
||||
HealType::Object {
|
||||
bucket,
|
||||
object,
|
||||
version_id,
|
||||
} => self.heal_object(bucket, object, version_id.as_deref()).await,
|
||||
HealType::Bucket { bucket } => self.heal_bucket(bucket).await,
|
||||
|
||||
HealType::Metadata { bucket, object } => self.heal_metadata(bucket, object).await,
|
||||
HealType::MRF { meta_path } => self.heal_mrf(meta_path).await,
|
||||
HealType::ECDecode {
|
||||
bucket,
|
||||
object,
|
||||
version_id,
|
||||
} => self.heal_ec_decode(bucket, object, version_id.as_deref()).await,
|
||||
HealType::ErasureSet { buckets, set_disk_id } => self.heal_erasure_set(buckets.clone(), set_disk_id.clone()).await,
|
||||
};
|
||||
|
||||
// update completed time and status
|
||||
{
|
||||
let mut completed_at = self.completed_at.write().await;
|
||||
*completed_at = Some(SystemTime::now());
|
||||
}
|
||||
|
||||
match &result {
|
||||
Ok(_) => {
|
||||
let mut status = self.status.write().await;
|
||||
*status = HealTaskStatus::Completed;
|
||||
info!("Heal task completed successfully: {}", self.id);
|
||||
}
|
||||
Err(e) => {
|
||||
let mut status = self.status.write().await;
|
||||
*status = HealTaskStatus::Failed { error: e.to_string() };
|
||||
error!("Heal task failed: {} with error: {}", self.id, e);
|
||||
}
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
pub async fn cancel(&self) -> Result<()> {
|
||||
self.cancel_token.cancel();
|
||||
let mut status = self.status.write().await;
|
||||
*status = HealTaskStatus::Cancelled;
|
||||
info!("Heal task cancelled: {}", self.id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn get_status(&self) -> HealTaskStatus {
|
||||
self.status.read().await.clone()
|
||||
}
|
||||
|
||||
pub async fn get_progress(&self) -> HealProgress {
|
||||
self.progress.read().await.clone()
|
||||
}
|
||||
|
||||
// specific heal implementation method
|
||||
async fn heal_object(&self, bucket: &str, object: &str, version_id: Option<&str>) -> Result<()> {
|
||||
info!("Healing object: {}/{}", bucket, object);
|
||||
|
||||
// update progress
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.set_current_object(Some(format!("{bucket}/{object}")));
|
||||
progress.update_progress(0, 4, 0, 0); // 开始heal,总共4个步骤
|
||||
}
|
||||
|
||||
// Step 1: Check if object exists and get metadata
|
||||
info!("Step 1: Checking object existence and metadata");
|
||||
let object_exists = self.storage.object_exists(bucket, object).await?;
|
||||
if !object_exists {
|
||||
warn!("Object does not exist: {}/{}", bucket, object);
|
||||
if self.options.recreate_missing {
|
||||
info!("Attempting to recreate missing object: {}/{}", bucket, object);
|
||||
return self.recreate_missing_object(bucket, object, version_id).await;
|
||||
} else {
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Object not found: {bucket}/{object}"),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(1, 3, 0, 0);
|
||||
}
|
||||
|
||||
// Step 2: directly call ecstore to perform heal
|
||||
info!("Step 2: Performing heal using ecstore");
|
||||
let heal_opts = HealOpts {
|
||||
recursive: self.options.recursive,
|
||||
dry_run: self.options.dry_run,
|
||||
remove: self.options.remove_corrupted,
|
||||
recreate: self.options.recreate_missing,
|
||||
scan_mode: self.options.scan_mode,
|
||||
update_parity: self.options.update_parity,
|
||||
no_lock: false,
|
||||
pool: self.options.pool_index,
|
||||
set: self.options.set_index,
|
||||
};
|
||||
|
||||
match self.storage.heal_object(bucket, object, version_id, &heal_opts).await {
|
||||
Ok((result, error)) => {
|
||||
if let Some(e) = error {
|
||||
error!("Heal operation failed: {}/{} - {}", bucket, object, e);
|
||||
|
||||
// If heal failed and remove_corrupted is enabled, delete the corrupted object
|
||||
if self.options.remove_corrupted {
|
||||
warn!("Removing corrupted object: {}/{}", bucket, object);
|
||||
if !self.options.dry_run {
|
||||
self.storage.delete_object(bucket, object).await?;
|
||||
info!("Successfully deleted corrupted object: {}/{}", bucket, object);
|
||||
} else {
|
||||
info!("Dry run mode - would delete corrupted object: {}/{}", bucket, object);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(3, 3, 0, 0);
|
||||
}
|
||||
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to heal object {bucket}/{object}: {e}"),
|
||||
});
|
||||
}
|
||||
|
||||
// Step 3: Verify heal result
|
||||
info!("Step 3: Verifying heal result");
|
||||
let object_size = result.object_size as u64;
|
||||
info!(
|
||||
"Heal completed successfully: {}/{} ({} bytes, {} drives healed)",
|
||||
bucket,
|
||||
object,
|
||||
object_size,
|
||||
result.after.drives.len()
|
||||
);
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(3, 3, object_size, object_size);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Heal operation failed: {}/{} - {}", bucket, object, e);
|
||||
|
||||
// If heal failed and remove_corrupted is enabled, delete the corrupted object
|
||||
if self.options.remove_corrupted {
|
||||
warn!("Removing corrupted object: {}/{}", bucket, object);
|
||||
if !self.options.dry_run {
|
||||
self.storage.delete_object(bucket, object).await?;
|
||||
info!("Successfully deleted corrupted object: {}/{}", bucket, object);
|
||||
} else {
|
||||
info!("Dry run mode - would delete corrupted object: {}/{}", bucket, object);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(3, 3, 0, 0);
|
||||
}
|
||||
|
||||
Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to heal object {bucket}/{object}: {e}"),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Recreate missing object (for EC decode scenarios)
|
||||
async fn recreate_missing_object(&self, bucket: &str, object: &str, version_id: Option<&str>) -> Result<()> {
|
||||
info!("Attempting to recreate missing object: {}/{}", bucket, object);
|
||||
|
||||
// Use ecstore's heal_object with recreate option
|
||||
let heal_opts = HealOpts {
|
||||
recursive: false,
|
||||
dry_run: self.options.dry_run,
|
||||
remove: false,
|
||||
recreate: true,
|
||||
scan_mode: HealScanMode::Deep,
|
||||
update_parity: true,
|
||||
no_lock: false,
|
||||
pool: None,
|
||||
set: None,
|
||||
};
|
||||
|
||||
match self.storage.heal_object(bucket, object, version_id, &heal_opts).await {
|
||||
Ok((result, error)) => {
|
||||
if let Some(e) = error {
|
||||
error!("Failed to recreate missing object: {}/{} - {}", bucket, object, e);
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to recreate missing object {bucket}/{object}: {e}"),
|
||||
});
|
||||
}
|
||||
|
||||
let object_size = result.object_size as u64;
|
||||
info!("Successfully recreated missing object: {}/{} ({} bytes)", bucket, object, object_size);
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(4, 4, object_size, object_size);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to recreate missing object: {}/{} - {}", bucket, object, e);
|
||||
Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to recreate missing object {bucket}/{object}: {e}"),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn heal_bucket(&self, bucket: &str) -> Result<()> {
|
||||
info!("Healing bucket: {}", bucket);
|
||||
|
||||
// update progress
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.set_current_object(Some(format!("bucket: {bucket}")));
|
||||
progress.update_progress(0, 3, 0, 0);
|
||||
}
|
||||
|
||||
// Step 1: Check if bucket exists
|
||||
info!("Step 1: Checking bucket existence");
|
||||
let bucket_exists = self.storage.get_bucket_info(bucket).await?.is_some();
|
||||
if !bucket_exists {
|
||||
warn!("Bucket does not exist: {}", bucket);
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Bucket not found: {bucket}"),
|
||||
});
|
||||
}
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(1, 3, 0, 0);
|
||||
}
|
||||
|
||||
// Step 2: Perform bucket heal using ecstore
|
||||
info!("Step 2: Performing bucket heal using ecstore");
|
||||
let heal_opts = HealOpts {
|
||||
recursive: self.options.recursive,
|
||||
dry_run: self.options.dry_run,
|
||||
remove: self.options.remove_corrupted,
|
||||
recreate: self.options.recreate_missing,
|
||||
scan_mode: self.options.scan_mode,
|
||||
update_parity: self.options.update_parity,
|
||||
no_lock: false,
|
||||
pool: self.options.pool_index,
|
||||
set: self.options.set_index,
|
||||
};
|
||||
|
||||
match self.storage.heal_bucket(bucket, &heal_opts).await {
|
||||
Ok(result) => {
|
||||
info!("Bucket heal completed successfully: {} ({} drives)", bucket, result.after.drives.len());
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(3, 3, 0, 0);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Bucket heal failed: {} - {}", bucket, e);
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(3, 3, 0, 0);
|
||||
}
|
||||
Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to heal bucket {bucket}: {e}"),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn heal_metadata(&self, bucket: &str, object: &str) -> Result<()> {
|
||||
info!("Healing metadata: {}/{}", bucket, object);
|
||||
|
||||
// update progress
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.set_current_object(Some(format!("metadata: {bucket}/{object}")));
|
||||
progress.update_progress(0, 3, 0, 0);
|
||||
}
|
||||
|
||||
// Step 1: Check if object exists
|
||||
info!("Step 1: Checking object existence");
|
||||
let object_exists = self.storage.object_exists(bucket, object).await?;
|
||||
if !object_exists {
|
||||
warn!("Object does not exist: {}/{}", bucket, object);
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Object not found: {bucket}/{object}"),
|
||||
});
|
||||
}
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(1, 3, 0, 0);
|
||||
}
|
||||
|
||||
// Step 2: Perform metadata heal using ecstore
|
||||
info!("Step 2: Performing metadata heal using ecstore");
|
||||
let heal_opts = HealOpts {
|
||||
recursive: false,
|
||||
dry_run: self.options.dry_run,
|
||||
remove: false,
|
||||
recreate: false,
|
||||
scan_mode: HealScanMode::Deep,
|
||||
update_parity: false,
|
||||
no_lock: false,
|
||||
pool: self.options.pool_index,
|
||||
set: self.options.set_index,
|
||||
};
|
||||
|
||||
match self.storage.heal_object(bucket, object, None, &heal_opts).await {
|
||||
Ok((result, error)) => {
|
||||
if let Some(e) = error {
|
||||
error!("Metadata heal failed: {}/{} - {}", bucket, object, e);
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(3, 3, 0, 0);
|
||||
}
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to heal metadata {bucket}/{object}: {e}"),
|
||||
});
|
||||
}
|
||||
|
||||
info!(
|
||||
"Metadata heal completed successfully: {}/{} ({} drives)",
|
||||
bucket,
|
||||
object,
|
||||
result.after.drives.len()
|
||||
);
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(3, 3, 0, 0);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Metadata heal failed: {}/{} - {}", bucket, object, e);
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(3, 3, 0, 0);
|
||||
}
|
||||
Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to heal metadata {bucket}/{object}: {e}"),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn heal_mrf(&self, meta_path: &str) -> Result<()> {
|
||||
info!("Healing MRF: {}", meta_path);
|
||||
|
||||
// update progress
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.set_current_object(Some(format!("mrf: {meta_path}")));
|
||||
progress.update_progress(0, 2, 0, 0);
|
||||
}
|
||||
|
||||
// Parse meta_path to extract bucket and object
|
||||
let parts: Vec<&str> = meta_path.split('/').collect();
|
||||
if parts.len() < 2 {
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Invalid meta path format: {meta_path}"),
|
||||
});
|
||||
}
|
||||
|
||||
let bucket = parts[0];
|
||||
let object = parts[1..].join("/");
|
||||
|
||||
// Step 1: Perform MRF heal using ecstore
|
||||
info!("Step 1: Performing MRF heal using ecstore");
|
||||
let heal_opts = HealOpts {
|
||||
recursive: true,
|
||||
dry_run: self.options.dry_run,
|
||||
remove: self.options.remove_corrupted,
|
||||
recreate: self.options.recreate_missing,
|
||||
scan_mode: HealScanMode::Deep,
|
||||
update_parity: true,
|
||||
no_lock: false,
|
||||
pool: None,
|
||||
set: None,
|
||||
};
|
||||
|
||||
match self.storage.heal_object(bucket, &object, None, &heal_opts).await {
|
||||
Ok((result, error)) => {
|
||||
if let Some(e) = error {
|
||||
error!("MRF heal failed: {} - {}", meta_path, e);
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(2, 2, 0, 0);
|
||||
}
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to heal MRF {meta_path}: {e}"),
|
||||
});
|
||||
}
|
||||
|
||||
info!("MRF heal completed successfully: {} ({} drives)", meta_path, result.after.drives.len());
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(2, 2, 0, 0);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
error!("MRF heal failed: {} - {}", meta_path, e);
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(2, 2, 0, 0);
|
||||
}
|
||||
Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to heal MRF {meta_path}: {e}"),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn heal_ec_decode(&self, bucket: &str, object: &str, version_id: Option<&str>) -> Result<()> {
|
||||
info!("Healing EC decode: {}/{}", bucket, object);
|
||||
|
||||
// update progress
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.set_current_object(Some(format!("ec_decode: {bucket}/{object}")));
|
||||
progress.update_progress(0, 3, 0, 0);
|
||||
}
|
||||
|
||||
// Step 1: Check if object exists
|
||||
info!("Step 1: Checking object existence");
|
||||
let object_exists = self.storage.object_exists(bucket, object).await?;
|
||||
if !object_exists {
|
||||
warn!("Object does not exist: {}/{}", bucket, object);
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Object not found: {bucket}/{object}"),
|
||||
});
|
||||
}
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(1, 3, 0, 0);
|
||||
}
|
||||
|
||||
// Step 2: Perform EC decode heal using ecstore
|
||||
info!("Step 2: Performing EC decode heal using ecstore");
|
||||
let heal_opts = HealOpts {
|
||||
recursive: false,
|
||||
dry_run: self.options.dry_run,
|
||||
remove: false,
|
||||
recreate: true,
|
||||
scan_mode: HealScanMode::Deep,
|
||||
update_parity: true,
|
||||
no_lock: false,
|
||||
pool: None,
|
||||
set: None,
|
||||
};
|
||||
|
||||
match self.storage.heal_object(bucket, object, version_id, &heal_opts).await {
|
||||
Ok((result, error)) => {
|
||||
if let Some(e) = error {
|
||||
error!("EC decode heal failed: {}/{} - {}", bucket, object, e);
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(3, 3, 0, 0);
|
||||
}
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to heal EC decode {bucket}/{object}: {e}"),
|
||||
});
|
||||
}
|
||||
|
||||
let object_size = result.object_size as u64;
|
||||
info!(
|
||||
"EC decode heal completed successfully: {}/{} ({} bytes, {} drives)",
|
||||
bucket,
|
||||
object,
|
||||
object_size,
|
||||
result.after.drives.len()
|
||||
);
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(3, 3, object_size, object_size);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
error!("EC decode heal failed: {}/{} - {}", bucket, object, e);
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(3, 3, 0, 0);
|
||||
}
|
||||
Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to heal EC decode {bucket}/{object}: {e}"),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn heal_erasure_set(&self, buckets: Vec<String>, set_disk_id: String) -> Result<()> {
|
||||
info!("Healing Erasure Set: {} ({} buckets)", set_disk_id, buckets.len());
|
||||
|
||||
// update progress
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.set_current_object(Some(format!("erasure_set: {} ({} buckets)", set_disk_id, buckets.len())));
|
||||
progress.update_progress(0, 4, 0, 0);
|
||||
}
|
||||
|
||||
let buckets = if buckets.is_empty() {
|
||||
info!("No buckets specified, listing all buckets");
|
||||
let bucket_infos = self.storage.list_buckets().await?;
|
||||
bucket_infos.into_iter().map(|info| info.name).collect()
|
||||
} else {
|
||||
buckets
|
||||
};
|
||||
|
||||
// Step 1: Perform disk format heal using ecstore
|
||||
info!("Step 1: Performing disk format heal using ecstore");
|
||||
match self.storage.heal_format(self.options.dry_run).await {
|
||||
Ok((result, error)) => {
|
||||
if let Some(e) = error {
|
||||
error!("Disk format heal failed: {} - {}", set_disk_id, e);
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(4, 4, 0, 0);
|
||||
}
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to heal disk format for {set_disk_id}: {e}"),
|
||||
});
|
||||
}
|
||||
|
||||
info!(
|
||||
"Disk format heal completed successfully: {} ({} drives)",
|
||||
set_disk_id,
|
||||
result.after.drives.len()
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Disk format heal failed: {} - {}", set_disk_id, e);
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(4, 4, 0, 0);
|
||||
}
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to heal disk format for {set_disk_id}: {e}"),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(1, 4, 0, 0);
|
||||
}
|
||||
|
||||
// Step 2: Get disk for resume functionality
|
||||
info!("Step 2: Getting disk for resume functionality");
|
||||
let disk = self.storage.get_disk_for_resume(&set_disk_id).await?;
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(2, 4, 0, 0);
|
||||
}
|
||||
|
||||
// Step 3: Heal bucket structure
|
||||
for bucket in buckets.iter() {
|
||||
if let Err(err) = self.heal_bucket(bucket).await {
|
||||
info!("{}", err.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
// Step 3: Create erasure set healer with resume support
|
||||
info!("Step 3: Creating erasure set healer with resume support");
|
||||
let erasure_healer = ErasureSetHealer::new(self.storage.clone(), self.progress.clone(), self.cancel_token.clone(), disk);
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(3, 4, 0, 0);
|
||||
}
|
||||
|
||||
// Step 4: Execute erasure set heal with resume
|
||||
info!("Step 4: Executing erasure set heal with resume");
|
||||
let result = erasure_healer.heal_erasure_set(&buckets, &set_disk_id).await;
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(4, 4, 0, 0);
|
||||
}
|
||||
|
||||
match result {
|
||||
Ok(_) => {
|
||||
info!("Erasure set heal completed successfully: {} ({} buckets)", set_disk_id, buckets.len());
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Erasure set heal failed: {} - {}", set_disk_id, e);
|
||||
Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to heal erasure set {set_disk_id}: {e}"),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for HealTask {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("HealTask")
|
||||
.field("id", &self.id)
|
||||
.field("heal_type", &self.heal_type)
|
||||
.field("options", &self.options)
|
||||
.field("created_at", &self.created_at)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
112
crates/ahm/src/lib.rs
Normal file
@@ -0,0 +1,112 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::{Arc, OnceLock};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::{error, info};
|
||||
|
||||
pub mod error;
|
||||
pub mod heal;
|
||||
pub mod scanner;
|
||||
|
||||
pub use error::{Error, Result};
|
||||
pub use heal::{HealManager, HealOptions, HealPriority, HealRequest, HealType, channel::HealChannelProcessor};
|
||||
pub use scanner::Scanner;
|
||||
|
||||
// Global cancellation token for AHM services (scanner and other background tasks)
|
||||
static GLOBAL_AHM_SERVICES_CANCEL_TOKEN: OnceLock<CancellationToken> = OnceLock::new();
|
||||
|
||||
/// Initialize the global AHM services cancellation token
|
||||
pub fn init_ahm_services_cancel_token(cancel_token: CancellationToken) -> Result<()> {
|
||||
GLOBAL_AHM_SERVICES_CANCEL_TOKEN
|
||||
.set(cancel_token)
|
||||
.map_err(|_| Error::Config("AHM services cancel token already initialized".to_string()))
|
||||
}
|
||||
|
||||
/// Get the global AHM services cancellation token
|
||||
pub fn get_ahm_services_cancel_token() -> Option<&'static CancellationToken> {
|
||||
GLOBAL_AHM_SERVICES_CANCEL_TOKEN.get()
|
||||
}
|
||||
|
||||
/// Create and initialize the global AHM services cancellation token
|
||||
pub fn create_ahm_services_cancel_token() -> CancellationToken {
|
||||
let cancel_token = CancellationToken::new();
|
||||
init_ahm_services_cancel_token(cancel_token.clone()).expect("AHM services cancel token already initialized");
|
||||
cancel_token
|
||||
}
|
||||
|
||||
/// Shutdown all AHM services gracefully
|
||||
pub fn shutdown_ahm_services() {
|
||||
if let Some(cancel_token) = GLOBAL_AHM_SERVICES_CANCEL_TOKEN.get() {
|
||||
cancel_token.cancel();
|
||||
}
|
||||
}
|
||||
|
||||
/// Global heal manager instance
|
||||
static GLOBAL_HEAL_MANAGER: OnceLock<Arc<HealManager>> = OnceLock::new();
|
||||
|
||||
/// Global heal channel processor instance
|
||||
static GLOBAL_HEAL_CHANNEL_PROCESSOR: OnceLock<Arc<tokio::sync::Mutex<HealChannelProcessor>>> = OnceLock::new();
|
||||
|
||||
/// Initialize and start heal manager with channel processor
|
||||
pub async fn init_heal_manager(
|
||||
storage: Arc<dyn heal::storage::HealStorageAPI>,
|
||||
config: Option<heal::manager::HealConfig>,
|
||||
) -> Result<Arc<HealManager>> {
|
||||
// Create heal manager
|
||||
let heal_manager = Arc::new(HealManager::new(storage, config));
|
||||
|
||||
// Start heal manager
|
||||
heal_manager.start().await?;
|
||||
|
||||
// Store global instance
|
||||
GLOBAL_HEAL_MANAGER
|
||||
.set(heal_manager.clone())
|
||||
.map_err(|_| Error::Config("Heal manager already initialized".to_string()))?;
|
||||
|
||||
// Initialize heal channel
|
||||
let channel_receiver = rustfs_common::heal_channel::init_heal_channel();
|
||||
|
||||
// Create channel processor
|
||||
let channel_processor = HealChannelProcessor::new(heal_manager.clone());
|
||||
|
||||
// Store channel processor instance first
|
||||
GLOBAL_HEAL_CHANNEL_PROCESSOR
|
||||
.set(Arc::new(tokio::sync::Mutex::new(channel_processor)))
|
||||
.map_err(|_| Error::Config("Heal channel processor already initialized".to_string()))?;
|
||||
|
||||
// Start channel processor in background
|
||||
let receiver = channel_receiver;
|
||||
tokio::spawn(async move {
|
||||
if let Some(processor_guard) = GLOBAL_HEAL_CHANNEL_PROCESSOR.get() {
|
||||
let mut processor = processor_guard.lock().await;
|
||||
if let Err(e) = processor.start(receiver).await {
|
||||
error!("Heal channel processor failed: {}", e);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
info!("Heal manager with channel processor initialized successfully");
|
||||
Ok(heal_manager)
|
||||
}
|
||||
|
||||
/// Get global heal manager instance
|
||||
pub fn get_heal_manager() -> Option<&'static Arc<HealManager>> {
|
||||
GLOBAL_HEAL_MANAGER.get()
|
||||
}
|
||||
|
||||
/// Get global heal channel processor instance
|
||||
pub fn get_heal_channel_processor() -> Option<&'static Arc<tokio::sync::Mutex<HealChannelProcessor>>> {
|
||||
GLOBAL_HEAL_CHANNEL_PROCESSOR.get()
|
||||
}
|
||||
2678
crates/ahm/src/scanner/data_scanner.rs
Normal file
306
crates/ahm/src/scanner/histogram.rs
Normal file
@@ -0,0 +1,306 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
sync::atomic::{AtomicU64, Ordering},
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tracing::info;
|
||||
|
||||
/// Scanner metrics
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct ScannerMetrics {
|
||||
/// Total objects scanned since server start
|
||||
pub objects_scanned: u64,
|
||||
/// Total object versions scanned since server start
|
||||
pub versions_scanned: u64,
|
||||
/// Total directories scanned since server start
|
||||
pub directories_scanned: u64,
|
||||
/// Total bucket scans started since server start
|
||||
pub bucket_scans_started: u64,
|
||||
/// Total bucket scans finished since server start
|
||||
pub bucket_scans_finished: u64,
|
||||
/// Total objects with health issues found
|
||||
pub objects_with_issues: u64,
|
||||
/// Total heal tasks queued
|
||||
pub heal_tasks_queued: u64,
|
||||
/// Total heal tasks completed
|
||||
pub heal_tasks_completed: u64,
|
||||
/// Total heal tasks failed
|
||||
pub heal_tasks_failed: u64,
|
||||
/// Total healthy objects found
|
||||
pub healthy_objects: u64,
|
||||
/// Total corrupted objects found
|
||||
pub corrupted_objects: u64,
|
||||
/// Last scan activity time
|
||||
pub last_activity: Option<SystemTime>,
|
||||
/// Current scan cycle
|
||||
pub current_cycle: u64,
|
||||
/// Total scan cycles completed
|
||||
pub total_cycles: u64,
|
||||
/// Current scan duration
|
||||
pub current_scan_duration: Option<Duration>,
|
||||
/// Average scan duration
|
||||
pub avg_scan_duration: Duration,
|
||||
/// Objects scanned per second
|
||||
pub objects_per_second: f64,
|
||||
/// Buckets scanned per second
|
||||
pub buckets_per_second: f64,
|
||||
/// Storage metrics by bucket
|
||||
pub bucket_metrics: HashMap<String, BucketMetrics>,
|
||||
/// Disk metrics
|
||||
pub disk_metrics: HashMap<String, DiskMetrics>,
|
||||
}
|
||||
|
||||
/// Bucket-specific metrics
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct BucketMetrics {
|
||||
/// Bucket name
|
||||
pub bucket: String,
|
||||
/// Total objects in bucket
|
||||
pub total_objects: u64,
|
||||
/// Total size of objects in bucket (bytes)
|
||||
pub total_size: u64,
|
||||
/// Objects with health issues
|
||||
pub objects_with_issues: u64,
|
||||
/// Last scan time
|
||||
pub last_scan_time: Option<SystemTime>,
|
||||
/// Scan duration
|
||||
pub scan_duration: Option<Duration>,
|
||||
/// Heal tasks queued for this bucket
|
||||
pub heal_tasks_queued: u64,
|
||||
/// Heal tasks completed for this bucket
|
||||
pub heal_tasks_completed: u64,
|
||||
/// Heal tasks failed for this bucket
|
||||
pub heal_tasks_failed: u64,
|
||||
}
|
||||
|
||||
/// Disk-specific metrics
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct DiskMetrics {
|
||||
/// Disk path
|
||||
pub disk_path: String,
|
||||
/// Total disk space (bytes)
|
||||
pub total_space: u64,
|
||||
/// Used disk space (bytes)
|
||||
pub used_space: u64,
|
||||
/// Free disk space (bytes)
|
||||
pub free_space: u64,
|
||||
/// Objects scanned on this disk
|
||||
pub objects_scanned: u64,
|
||||
/// Objects with issues on this disk
|
||||
pub objects_with_issues: u64,
|
||||
/// Last scan time
|
||||
pub last_scan_time: Option<SystemTime>,
|
||||
/// Whether disk is online
|
||||
pub is_online: bool,
|
||||
/// Whether disk is being scanned
|
||||
pub is_scanning: bool,
|
||||
}
|
||||
|
||||
/// Thread-safe metrics collector
|
||||
pub struct MetricsCollector {
|
||||
/// Atomic counters for real-time metrics
|
||||
objects_scanned: AtomicU64,
|
||||
versions_scanned: AtomicU64,
|
||||
directories_scanned: AtomicU64,
|
||||
bucket_scans_started: AtomicU64,
|
||||
bucket_scans_finished: AtomicU64,
|
||||
objects_with_issues: AtomicU64,
|
||||
heal_tasks_queued: AtomicU64,
|
||||
heal_tasks_completed: AtomicU64,
|
||||
heal_tasks_failed: AtomicU64,
|
||||
current_cycle: AtomicU64,
|
||||
total_cycles: AtomicU64,
|
||||
healthy_objects: AtomicU64,
|
||||
corrupted_objects: AtomicU64,
|
||||
}
|
||||
|
||||
impl MetricsCollector {
|
||||
/// Create a new metrics collector
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
objects_scanned: AtomicU64::new(0),
|
||||
versions_scanned: AtomicU64::new(0),
|
||||
directories_scanned: AtomicU64::new(0),
|
||||
bucket_scans_started: AtomicU64::new(0),
|
||||
bucket_scans_finished: AtomicU64::new(0),
|
||||
objects_with_issues: AtomicU64::new(0),
|
||||
heal_tasks_queued: AtomicU64::new(0),
|
||||
heal_tasks_completed: AtomicU64::new(0),
|
||||
heal_tasks_failed: AtomicU64::new(0),
|
||||
current_cycle: AtomicU64::new(0),
|
||||
total_cycles: AtomicU64::new(0),
|
||||
healthy_objects: AtomicU64::new(0),
|
||||
corrupted_objects: AtomicU64::new(0),
|
||||
}
|
||||
}
|
||||
|
||||
/// Increment objects scanned count
|
||||
pub fn increment_objects_scanned(&self, count: u64) {
|
||||
self.objects_scanned.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment versions scanned count
|
||||
pub fn increment_versions_scanned(&self, count: u64) {
|
||||
self.versions_scanned.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment directories scanned count
|
||||
pub fn increment_directories_scanned(&self, count: u64) {
|
||||
self.directories_scanned.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment bucket scans started count
|
||||
pub fn increment_bucket_scans_started(&self, count: u64) {
|
||||
self.bucket_scans_started.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment bucket scans finished count
|
||||
pub fn increment_bucket_scans_finished(&self, count: u64) {
|
||||
self.bucket_scans_finished.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment objects with issues count
|
||||
pub fn increment_objects_with_issues(&self, count: u64) {
|
||||
self.objects_with_issues.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment heal tasks queued count
|
||||
pub fn increment_heal_tasks_queued(&self, count: u64) {
|
||||
self.heal_tasks_queued.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment heal tasks completed count
|
||||
pub fn increment_heal_tasks_completed(&self, count: u64) {
|
||||
self.heal_tasks_completed.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment heal tasks failed count
|
||||
pub fn increment_heal_tasks_failed(&self, count: u64) {
|
||||
self.heal_tasks_failed.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Set current cycle
|
||||
pub fn set_current_cycle(&self, cycle: u64) {
|
||||
self.current_cycle.store(cycle, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment total cycles
|
||||
pub fn increment_total_cycles(&self) {
|
||||
self.total_cycles.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment healthy objects count
|
||||
pub fn increment_healthy_objects(&self) {
|
||||
self.healthy_objects.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment corrupted objects count
|
||||
pub fn increment_corrupted_objects(&self) {
|
||||
self.corrupted_objects.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Get current metrics snapshot
|
||||
pub fn get_metrics(&self) -> ScannerMetrics {
|
||||
ScannerMetrics {
|
||||
objects_scanned: self.objects_scanned.load(Ordering::Relaxed),
|
||||
versions_scanned: self.versions_scanned.load(Ordering::Relaxed),
|
||||
directories_scanned: self.directories_scanned.load(Ordering::Relaxed),
|
||||
bucket_scans_started: self.bucket_scans_started.load(Ordering::Relaxed),
|
||||
bucket_scans_finished: self.bucket_scans_finished.load(Ordering::Relaxed),
|
||||
objects_with_issues: self.objects_with_issues.load(Ordering::Relaxed),
|
||||
heal_tasks_queued: self.heal_tasks_queued.load(Ordering::Relaxed),
|
||||
heal_tasks_completed: self.heal_tasks_completed.load(Ordering::Relaxed),
|
||||
heal_tasks_failed: self.heal_tasks_failed.load(Ordering::Relaxed),
|
||||
healthy_objects: self.healthy_objects.load(Ordering::Relaxed),
|
||||
corrupted_objects: self.corrupted_objects.load(Ordering::Relaxed),
|
||||
last_activity: Some(SystemTime::now()),
|
||||
current_cycle: self.current_cycle.load(Ordering::Relaxed),
|
||||
total_cycles: self.total_cycles.load(Ordering::Relaxed),
|
||||
current_scan_duration: None, // Will be set by scanner
|
||||
avg_scan_duration: Duration::ZERO, // Will be calculated
|
||||
objects_per_second: 0.0, // Will be calculated
|
||||
buckets_per_second: 0.0, // Will be calculated
|
||||
bucket_metrics: HashMap::new(), // Will be populated by scanner
|
||||
disk_metrics: HashMap::new(), // Will be populated by scanner
|
||||
}
|
||||
}
|
||||
|
||||
/// Reset all metrics
|
||||
pub fn reset(&self) {
|
||||
self.objects_scanned.store(0, Ordering::Relaxed);
|
||||
self.versions_scanned.store(0, Ordering::Relaxed);
|
||||
self.directories_scanned.store(0, Ordering::Relaxed);
|
||||
self.bucket_scans_started.store(0, Ordering::Relaxed);
|
||||
self.bucket_scans_finished.store(0, Ordering::Relaxed);
|
||||
self.objects_with_issues.store(0, Ordering::Relaxed);
|
||||
self.heal_tasks_queued.store(0, Ordering::Relaxed);
|
||||
self.heal_tasks_completed.store(0, Ordering::Relaxed);
|
||||
self.heal_tasks_failed.store(0, Ordering::Relaxed);
|
||||
self.current_cycle.store(0, Ordering::Relaxed);
|
||||
self.total_cycles.store(0, Ordering::Relaxed);
|
||||
self.healthy_objects.store(0, Ordering::Relaxed);
|
||||
self.corrupted_objects.store(0, Ordering::Relaxed);
|
||||
|
||||
info!("Scanner metrics reset");
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for MetricsCollector {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_metrics_collector_creation() {
|
||||
let collector = MetricsCollector::new();
|
||||
let metrics = collector.get_metrics();
|
||||
assert_eq!(metrics.objects_scanned, 0);
|
||||
assert_eq!(metrics.versions_scanned, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_metrics_increment() {
|
||||
let collector = MetricsCollector::new();
|
||||
|
||||
collector.increment_objects_scanned(10);
|
||||
collector.increment_versions_scanned(5);
|
||||
collector.increment_objects_with_issues(2);
|
||||
|
||||
let metrics = collector.get_metrics();
|
||||
assert_eq!(metrics.objects_scanned, 10);
|
||||
assert_eq!(metrics.versions_scanned, 5);
|
||||
assert_eq!(metrics.objects_with_issues, 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_metrics_reset() {
|
||||
let collector = MetricsCollector::new();
|
||||
|
||||
collector.increment_objects_scanned(10);
|
||||
collector.reset();
|
||||
|
||||
let metrics = collector.get_metrics();
|
||||
assert_eq!(metrics.objects_scanned, 0);
|
||||
}
|
||||
}
|
||||
125
crates/ahm/src/scanner/lifecycle.rs
Normal file
@@ -0,0 +1,125 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::Arc;
|
||||
|
||||
use rustfs_common::metrics::IlmAction;
|
||||
use rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_audit::LcEventSrc;
|
||||
use rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::{apply_lifecycle_action, eval_action_from_lifecycle};
|
||||
use rustfs_ecstore::bucket::metadata_sys::get_object_lock_config;
|
||||
use rustfs_ecstore::cmd::bucket_targets::VersioningConfig;
|
||||
use rustfs_ecstore::store_api::ObjectInfo;
|
||||
use rustfs_filemeta::FileMetaVersion;
|
||||
use rustfs_filemeta::metacache::MetaCacheEntry;
|
||||
use s3s::dto::BucketLifecycleConfiguration as LifecycleConfig;
|
||||
use tracing::info;
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct ScannerItem {
|
||||
bucket: String,
|
||||
lifecycle: Option<Arc<LifecycleConfig>>,
|
||||
versioning: Option<Arc<VersioningConfig>>,
|
||||
}
|
||||
|
||||
impl ScannerItem {
|
||||
pub fn new(bucket: String, lifecycle: Option<Arc<LifecycleConfig>>, versioning: Option<Arc<VersioningConfig>>) -> Self {
|
||||
Self {
|
||||
bucket,
|
||||
lifecycle,
|
||||
versioning,
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn apply_actions(&mut self, object: &str, mut meta: MetaCacheEntry) -> anyhow::Result<()> {
|
||||
info!("apply_actions called for object: {}", object);
|
||||
if self.lifecycle.is_none() {
|
||||
info!("No lifecycle config for object: {}", object);
|
||||
return Ok(());
|
||||
}
|
||||
info!("Lifecycle config exists for object: {}", object);
|
||||
|
||||
let file_meta = match meta.xl_meta() {
|
||||
Ok(meta) => meta,
|
||||
Err(e) => {
|
||||
tracing::error!("Failed to get xl_meta for {}: {}", object, e);
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
let latest_version = file_meta.versions.first().cloned().unwrap_or_default();
|
||||
let file_meta_version = FileMetaVersion::try_from(latest_version.meta.as_slice()).unwrap_or_default();
|
||||
|
||||
let obj_info = ObjectInfo {
|
||||
bucket: self.bucket.clone(),
|
||||
name: object.to_string(),
|
||||
version_id: latest_version.header.version_id,
|
||||
mod_time: latest_version.header.mod_time,
|
||||
size: file_meta_version.object.as_ref().map_or(0, |o| o.size),
|
||||
user_defined: serde_json::from_slice(file_meta.data.as_slice()).unwrap_or_default(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
self.apply_lifecycle(&obj_info).await;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn apply_lifecycle(&mut self, oi: &ObjectInfo) -> (IlmAction, i64) {
|
||||
let size = oi.size;
|
||||
if self.lifecycle.is_none() {
|
||||
return (IlmAction::NoneAction, size);
|
||||
}
|
||||
|
||||
let (olcfg, rcfg) = if self.bucket != ".minio.sys" {
|
||||
(
|
||||
get_object_lock_config(&self.bucket).await.ok(),
|
||||
None, // FIXME: replication config
|
||||
)
|
||||
} else {
|
||||
(None, None)
|
||||
};
|
||||
|
||||
let lc_evt = eval_action_from_lifecycle(
|
||||
self.lifecycle.as_ref().unwrap(),
|
||||
olcfg
|
||||
.as_ref()
|
||||
.and_then(|(c, _)| c.rule.as_ref().and_then(|r| r.default_retention.clone())),
|
||||
rcfg.clone(),
|
||||
oi,
|
||||
)
|
||||
.await;
|
||||
|
||||
info!("lifecycle: {} Initial scan: {}", oi.name, lc_evt.action);
|
||||
|
||||
let mut new_size = size;
|
||||
match lc_evt.action {
|
||||
IlmAction::DeleteVersionAction | IlmAction::DeleteAllVersionsAction | IlmAction::DelMarkerDeleteAllVersionsAction => {
|
||||
new_size = 0;
|
||||
}
|
||||
IlmAction::DeleteAction => {
|
||||
if let Some(vcfg) = &self.versioning {
|
||||
if !vcfg.is_enabled() {
|
||||
new_size = 0;
|
||||
}
|
||||
} else {
|
||||
new_size = 0;
|
||||
}
|
||||
}
|
||||
_ => (),
|
||||
}
|
||||
|
||||
apply_lifecycle_action(&lc_evt, &LcEventSrc::Scanner, oi).await;
|
||||
(lc_evt.action, new_size)
|
||||
}
|
||||
}
|
||||
306
crates/ahm/src/scanner/metrics.rs
Normal file
@@ -0,0 +1,306 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
sync::atomic::{AtomicU64, Ordering},
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tracing::info;
|
||||
|
||||
/// Scanner metrics
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct ScannerMetrics {
|
||||
/// Total objects scanned since server start
|
||||
pub objects_scanned: u64,
|
||||
/// Total object versions scanned since server start
|
||||
pub versions_scanned: u64,
|
||||
/// Total directories scanned since server start
|
||||
pub directories_scanned: u64,
|
||||
/// Total bucket scans started since server start
|
||||
pub bucket_scans_started: u64,
|
||||
/// Total bucket scans finished since server start
|
||||
pub bucket_scans_finished: u64,
|
||||
/// Total objects with health issues found
|
||||
pub objects_with_issues: u64,
|
||||
/// Total heal tasks queued
|
||||
pub heal_tasks_queued: u64,
|
||||
/// Total heal tasks completed
|
||||
pub heal_tasks_completed: u64,
|
||||
/// Total heal tasks failed
|
||||
pub heal_tasks_failed: u64,
|
||||
/// Total healthy objects found
|
||||
pub healthy_objects: u64,
|
||||
/// Total corrupted objects found
|
||||
pub corrupted_objects: u64,
|
||||
/// Last scan activity time
|
||||
pub last_activity: Option<SystemTime>,
|
||||
/// Current scan cycle
|
||||
pub current_cycle: u64,
|
||||
/// Total scan cycles completed
|
||||
pub total_cycles: u64,
|
||||
/// Current scan duration
|
||||
pub current_scan_duration: Option<Duration>,
|
||||
/// Average scan duration
|
||||
pub avg_scan_duration: Duration,
|
||||
/// Objects scanned per second
|
||||
pub objects_per_second: f64,
|
||||
/// Buckets scanned per second
|
||||
pub buckets_per_second: f64,
|
||||
/// Storage metrics by bucket
|
||||
pub bucket_metrics: HashMap<String, BucketMetrics>,
|
||||
/// Disk metrics
|
||||
pub disk_metrics: HashMap<String, DiskMetrics>,
|
||||
}
|
||||
|
||||
/// Bucket-specific metrics
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct BucketMetrics {
|
||||
/// Bucket name
|
||||
pub bucket: String,
|
||||
/// Total objects in bucket
|
||||
pub total_objects: u64,
|
||||
/// Total size of objects in bucket (bytes)
|
||||
pub total_size: u64,
|
||||
/// Objects with health issues
|
||||
pub objects_with_issues: u64,
|
||||
/// Last scan time
|
||||
pub last_scan_time: Option<SystemTime>,
|
||||
/// Scan duration
|
||||
pub scan_duration: Option<Duration>,
|
||||
/// Heal tasks queued for this bucket
|
||||
pub heal_tasks_queued: u64,
|
||||
/// Heal tasks completed for this bucket
|
||||
pub heal_tasks_completed: u64,
|
||||
/// Heal tasks failed for this bucket
|
||||
pub heal_tasks_failed: u64,
|
||||
}
|
||||
|
||||
/// Disk-specific metrics
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct DiskMetrics {
|
||||
/// Disk path
|
||||
pub disk_path: String,
|
||||
/// Total disk space (bytes)
|
||||
pub total_space: u64,
|
||||
/// Used disk space (bytes)
|
||||
pub used_space: u64,
|
||||
/// Free disk space (bytes)
|
||||
pub free_space: u64,
|
||||
/// Objects scanned on this disk
|
||||
pub objects_scanned: u64,
|
||||
/// Objects with issues on this disk
|
||||
pub objects_with_issues: u64,
|
||||
/// Last scan time
|
||||
pub last_scan_time: Option<SystemTime>,
|
||||
/// Whether disk is online
|
||||
pub is_online: bool,
|
||||
/// Whether disk is being scanned
|
||||
pub is_scanning: bool,
|
||||
}
|
||||
|
||||
/// Thread-safe metrics collector
|
||||
pub struct MetricsCollector {
|
||||
/// Atomic counters for real-time metrics
|
||||
objects_scanned: AtomicU64,
|
||||
versions_scanned: AtomicU64,
|
||||
directories_scanned: AtomicU64,
|
||||
bucket_scans_started: AtomicU64,
|
||||
bucket_scans_finished: AtomicU64,
|
||||
objects_with_issues: AtomicU64,
|
||||
heal_tasks_queued: AtomicU64,
|
||||
heal_tasks_completed: AtomicU64,
|
||||
heal_tasks_failed: AtomicU64,
|
||||
current_cycle: AtomicU64,
|
||||
total_cycles: AtomicU64,
|
||||
healthy_objects: AtomicU64,
|
||||
corrupted_objects: AtomicU64,
|
||||
}
|
||||
|
||||
impl MetricsCollector {
|
||||
/// Create a new metrics collector
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
objects_scanned: AtomicU64::new(0),
|
||||
versions_scanned: AtomicU64::new(0),
|
||||
directories_scanned: AtomicU64::new(0),
|
||||
bucket_scans_started: AtomicU64::new(0),
|
||||
bucket_scans_finished: AtomicU64::new(0),
|
||||
objects_with_issues: AtomicU64::new(0),
|
||||
heal_tasks_queued: AtomicU64::new(0),
|
||||
heal_tasks_completed: AtomicU64::new(0),
|
||||
heal_tasks_failed: AtomicU64::new(0),
|
||||
current_cycle: AtomicU64::new(0),
|
||||
total_cycles: AtomicU64::new(0),
|
||||
healthy_objects: AtomicU64::new(0),
|
||||
corrupted_objects: AtomicU64::new(0),
|
||||
}
|
||||
}
|
||||
|
||||
/// Increment objects scanned count
|
||||
pub fn increment_objects_scanned(&self, count: u64) {
|
||||
self.objects_scanned.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment versions scanned count
|
||||
pub fn increment_versions_scanned(&self, count: u64) {
|
||||
self.versions_scanned.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment directories scanned count
|
||||
pub fn increment_directories_scanned(&self, count: u64) {
|
||||
self.directories_scanned.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment bucket scans started count
|
||||
pub fn increment_bucket_scans_started(&self, count: u64) {
|
||||
self.bucket_scans_started.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment bucket scans finished count
|
||||
pub fn increment_bucket_scans_finished(&self, count: u64) {
|
||||
self.bucket_scans_finished.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment objects with issues count
|
||||
pub fn increment_objects_with_issues(&self, count: u64) {
|
||||
self.objects_with_issues.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment heal tasks queued count
|
||||
pub fn increment_heal_tasks_queued(&self, count: u64) {
|
||||
self.heal_tasks_queued.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment heal tasks completed count
|
||||
pub fn increment_heal_tasks_completed(&self, count: u64) {
|
||||
self.heal_tasks_completed.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment heal tasks failed count
|
||||
pub fn increment_heal_tasks_failed(&self, count: u64) {
|
||||
self.heal_tasks_failed.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Set current cycle
|
||||
pub fn set_current_cycle(&self, cycle: u64) {
|
||||
self.current_cycle.store(cycle, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment total cycles
|
||||
pub fn increment_total_cycles(&self) {
|
||||
self.total_cycles.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment healthy objects count
|
||||
pub fn increment_healthy_objects(&self) {
|
||||
self.healthy_objects.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment corrupted objects count
|
||||
pub fn increment_corrupted_objects(&self) {
|
||||
self.corrupted_objects.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Get current metrics snapshot
|
||||
pub fn get_metrics(&self) -> ScannerMetrics {
|
||||
ScannerMetrics {
|
||||
objects_scanned: self.objects_scanned.load(Ordering::Relaxed),
|
||||
versions_scanned: self.versions_scanned.load(Ordering::Relaxed),
|
||||
directories_scanned: self.directories_scanned.load(Ordering::Relaxed),
|
||||
bucket_scans_started: self.bucket_scans_started.load(Ordering::Relaxed),
|
||||
bucket_scans_finished: self.bucket_scans_finished.load(Ordering::Relaxed),
|
||||
objects_with_issues: self.objects_with_issues.load(Ordering::Relaxed),
|
||||
heal_tasks_queued: self.heal_tasks_queued.load(Ordering::Relaxed),
|
||||
heal_tasks_completed: self.heal_tasks_completed.load(Ordering::Relaxed),
|
||||
heal_tasks_failed: self.heal_tasks_failed.load(Ordering::Relaxed),
|
||||
healthy_objects: self.healthy_objects.load(Ordering::Relaxed),
|
||||
corrupted_objects: self.corrupted_objects.load(Ordering::Relaxed),
|
||||
last_activity: Some(SystemTime::now()),
|
||||
current_cycle: self.current_cycle.load(Ordering::Relaxed),
|
||||
total_cycles: self.total_cycles.load(Ordering::Relaxed),
|
||||
current_scan_duration: None, // Will be set by scanner
|
||||
avg_scan_duration: Duration::ZERO, // Will be calculated
|
||||
objects_per_second: 0.0, // Will be calculated
|
||||
buckets_per_second: 0.0, // Will be calculated
|
||||
bucket_metrics: HashMap::new(), // Will be populated by scanner
|
||||
disk_metrics: HashMap::new(), // Will be populated by scanner
|
||||
}
|
||||
}
|
||||
|
||||
/// Reset all metrics
|
||||
pub fn reset(&self) {
|
||||
self.objects_scanned.store(0, Ordering::Relaxed);
|
||||
self.versions_scanned.store(0, Ordering::Relaxed);
|
||||
self.directories_scanned.store(0, Ordering::Relaxed);
|
||||
self.bucket_scans_started.store(0, Ordering::Relaxed);
|
||||
self.bucket_scans_finished.store(0, Ordering::Relaxed);
|
||||
self.objects_with_issues.store(0, Ordering::Relaxed);
|
||||
self.heal_tasks_queued.store(0, Ordering::Relaxed);
|
||||
self.heal_tasks_completed.store(0, Ordering::Relaxed);
|
||||
self.heal_tasks_failed.store(0, Ordering::Relaxed);
|
||||
self.current_cycle.store(0, Ordering::Relaxed);
|
||||
self.total_cycles.store(0, Ordering::Relaxed);
|
||||
self.healthy_objects.store(0, Ordering::Relaxed);
|
||||
self.corrupted_objects.store(0, Ordering::Relaxed);
|
||||
|
||||
info!("Scanner metrics reset");
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for MetricsCollector {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_metrics_collector_creation() {
|
||||
let collector = MetricsCollector::new();
|
||||
let metrics = collector.get_metrics();
|
||||
assert_eq!(metrics.objects_scanned, 0);
|
||||
assert_eq!(metrics.versions_scanned, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_metrics_increment() {
|
||||
let collector = MetricsCollector::new();
|
||||
|
||||
collector.increment_objects_scanned(10);
|
||||
collector.increment_versions_scanned(5);
|
||||
collector.increment_objects_with_issues(2);
|
||||
|
||||
let metrics = collector.get_metrics();
|
||||
assert_eq!(metrics.objects_scanned, 10);
|
||||
assert_eq!(metrics.versions_scanned, 5);
|
||||
assert_eq!(metrics.objects_with_issues, 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_metrics_reset() {
|
||||
let collector = MetricsCollector::new();
|
||||
|
||||
collector.increment_objects_scanned(10);
|
||||
collector.reset();
|
||||
|
||||
let metrics = collector.get_metrics();
|
||||
assert_eq!(metrics.objects_scanned, 0);
|
||||
}
|
||||
}
|
||||
@@ -12,12 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod background_heal_ops;
|
||||
pub mod data_scanner;
|
||||
pub mod data_scanner_metric;
|
||||
pub mod data_usage;
|
||||
pub mod data_usage_cache;
|
||||
pub mod error;
|
||||
pub mod heal_commands;
|
||||
pub mod heal_ops;
|
||||
pub mod mrf;
|
||||
pub mod histogram;
|
||||
pub mod lifecycle;
|
||||
pub mod metrics;
|
||||
|
||||
pub use data_scanner::Scanner;
|
||||
pub use metrics::ScannerMetrics;
|
||||
410
crates/ahm/tests/heal_integration_test.rs
Normal file
@@ -0,0 +1,410 @@
|
||||
use rustfs_ahm::heal::{
|
||||
manager::{HealConfig, HealManager},
|
||||
storage::{ECStoreHealStorage, HealStorageAPI},
|
||||
task::{HealOptions, HealPriority, HealRequest, HealTaskStatus, HealType},
|
||||
};
|
||||
use rustfs_common::heal_channel::{HealOpts, HealScanMode};
|
||||
use rustfs_ecstore::{
|
||||
disk::endpoint::Endpoint,
|
||||
endpoints::{EndpointServerPools, Endpoints, PoolEndpoints},
|
||||
store::ECStore,
|
||||
store_api::{ObjectIO, ObjectOptions, PutObjReader, StorageAPI},
|
||||
};
|
||||
use serial_test::serial;
|
||||
use std::sync::Once;
|
||||
use std::sync::OnceLock;
|
||||
use std::{path::PathBuf, sync::Arc, time::Duration};
|
||||
use tokio::fs;
|
||||
use tracing::info;
|
||||
use walkdir::WalkDir;
|
||||
|
||||
static GLOBAL_ENV: OnceLock<(Vec<PathBuf>, Arc<ECStore>, Arc<ECStoreHealStorage>)> = OnceLock::new();
|
||||
static INIT: Once = Once::new();
|
||||
|
||||
fn init_tracing() {
|
||||
INIT.call_once(|| {
|
||||
let _ = tracing_subscriber::fmt::try_init();
|
||||
});
|
||||
}
|
||||
|
||||
/// Test helper: Create test environment with ECStore
|
||||
async fn setup_test_env() -> (Vec<PathBuf>, Arc<ECStore>, Arc<ECStoreHealStorage>) {
|
||||
init_tracing();
|
||||
|
||||
// Fast path: already initialized, just clone and return
|
||||
if let Some((paths, ecstore, heal_storage)) = GLOBAL_ENV.get() {
|
||||
return (paths.clone(), ecstore.clone(), heal_storage.clone());
|
||||
}
|
||||
|
||||
// create temp dir as 4 disks with unique base dir
|
||||
let test_base_dir = format!("/tmp/rustfs_ahm_heal_test_{}", uuid::Uuid::new_v4());
|
||||
let temp_dir = std::path::PathBuf::from(&test_base_dir);
|
||||
if temp_dir.exists() {
|
||||
fs::remove_dir_all(&temp_dir).await.ok();
|
||||
}
|
||||
fs::create_dir_all(&temp_dir).await.unwrap();
|
||||
|
||||
// create 4 disk dirs
|
||||
let disk_paths = vec![
|
||||
temp_dir.join("disk1"),
|
||||
temp_dir.join("disk2"),
|
||||
temp_dir.join("disk3"),
|
||||
temp_dir.join("disk4"),
|
||||
];
|
||||
|
||||
for disk_path in &disk_paths {
|
||||
fs::create_dir_all(disk_path).await.unwrap();
|
||||
}
|
||||
|
||||
// create EndpointServerPools
|
||||
let mut endpoints = Vec::new();
|
||||
for (i, disk_path) in disk_paths.iter().enumerate() {
|
||||
let mut endpoint = Endpoint::try_from(disk_path.to_str().unwrap()).unwrap();
|
||||
// set correct index
|
||||
endpoint.set_pool_index(0);
|
||||
endpoint.set_set_index(0);
|
||||
endpoint.set_disk_index(i);
|
||||
endpoints.push(endpoint);
|
||||
}
|
||||
|
||||
let pool_endpoints = PoolEndpoints {
|
||||
legacy: false,
|
||||
set_count: 1,
|
||||
drives_per_set: 4,
|
||||
endpoints: Endpoints::from(endpoints),
|
||||
cmd_line: "test".to_string(),
|
||||
platform: format!("OS: {} | Arch: {}", std::env::consts::OS, std::env::consts::ARCH),
|
||||
};
|
||||
|
||||
let endpoint_pools = EndpointServerPools(vec![pool_endpoints]);
|
||||
|
||||
// format disks (only first time)
|
||||
rustfs_ecstore::store::init_local_disks(endpoint_pools.clone()).await.unwrap();
|
||||
|
||||
// create ECStore with dynamic port 0 (let OS assign) or fixed 9001 if free
|
||||
let port = 9001; // for simplicity
|
||||
let server_addr: std::net::SocketAddr = format!("127.0.0.1:{port}").parse().unwrap();
|
||||
let ecstore = ECStore::new(server_addr, endpoint_pools).await.unwrap();
|
||||
|
||||
// init bucket metadata system
|
||||
let buckets_list = ecstore
|
||||
.list_bucket(&rustfs_ecstore::store_api::BucketOptions {
|
||||
no_metadata: true,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
let buckets = buckets_list.into_iter().map(|v| v.name).collect();
|
||||
rustfs_ecstore::bucket::metadata_sys::init_bucket_metadata_sys(ecstore.clone(), buckets).await;
|
||||
|
||||
// Create heal storage layer
|
||||
let heal_storage = Arc::new(ECStoreHealStorage::new(ecstore.clone()));
|
||||
|
||||
// Store in global once lock
|
||||
let _ = GLOBAL_ENV.set((disk_paths.clone(), ecstore.clone(), heal_storage.clone()));
|
||||
|
||||
(disk_paths, ecstore, heal_storage)
|
||||
}
|
||||
|
||||
/// Test helper: Create a test bucket
|
||||
async fn create_test_bucket(ecstore: &Arc<ECStore>, bucket_name: &str) {
|
||||
(**ecstore)
|
||||
.make_bucket(bucket_name, &Default::default())
|
||||
.await
|
||||
.expect("Failed to create test bucket");
|
||||
info!("Created test bucket: {}", bucket_name);
|
||||
}
|
||||
|
||||
/// Test helper: Upload test object
|
||||
async fn upload_test_object(ecstore: &Arc<ECStore>, bucket: &str, object: &str, data: &[u8]) {
|
||||
let mut reader = PutObjReader::from_vec(data.to_vec());
|
||||
let object_info = (**ecstore)
|
||||
.put_object(bucket, object, &mut reader, &ObjectOptions::default())
|
||||
.await
|
||||
.expect("Failed to upload test object");
|
||||
|
||||
info!("Uploaded test object: {}/{} ({} bytes)", bucket, object, object_info.size);
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
#[serial]
|
||||
async fn test_heal_object_basic() {
|
||||
let (disk_paths, ecstore, heal_storage) = setup_test_env().await;
|
||||
|
||||
// Create test bucket and object
|
||||
let bucket_name = "test-bucket";
|
||||
let object_name = "test-object.txt";
|
||||
let test_data = b"Hello, this is test data for healing!";
|
||||
|
||||
create_test_bucket(&ecstore, bucket_name).await;
|
||||
upload_test_object(&ecstore, bucket_name, object_name, test_data).await;
|
||||
|
||||
// ─── 1️⃣ delete single data shard file ─────────────────────────────────────
|
||||
let obj_dir = disk_paths[0].join(bucket_name).join(object_name);
|
||||
// find part file at depth 2, e.g. .../<uuid>/part.1
|
||||
let target_part = WalkDir::new(&obj_dir)
|
||||
.min_depth(2)
|
||||
.max_depth(2)
|
||||
.into_iter()
|
||||
.filter_map(Result::ok)
|
||||
.find(|e| e.file_type().is_file() && e.file_name().to_str().map(|n| n.starts_with("part.")).unwrap_or(false))
|
||||
.map(|e| e.into_path())
|
||||
.expect("Failed to locate part file to delete");
|
||||
|
||||
std::fs::remove_file(&target_part).expect("failed to delete part file");
|
||||
assert!(!target_part.exists());
|
||||
println!("✅ Deleted shard part file: {target_part:?}");
|
||||
|
||||
// Create heal manager with faster interval
|
||||
let cfg = HealConfig {
|
||||
heal_interval: Duration::from_millis(1),
|
||||
..Default::default()
|
||||
};
|
||||
let heal_manager = HealManager::new(heal_storage.clone(), Some(cfg));
|
||||
heal_manager.start().await.unwrap();
|
||||
|
||||
// Submit heal request for the object
|
||||
let heal_request = HealRequest::new(
|
||||
HealType::Object {
|
||||
bucket: bucket_name.to_string(),
|
||||
object: object_name.to_string(),
|
||||
version_id: None,
|
||||
},
|
||||
HealOptions {
|
||||
dry_run: false,
|
||||
recursive: false,
|
||||
remove_corrupted: false,
|
||||
recreate_missing: true,
|
||||
scan_mode: HealScanMode::Normal,
|
||||
update_parity: true,
|
||||
timeout: Some(Duration::from_secs(300)),
|
||||
pool_index: None,
|
||||
set_index: None,
|
||||
},
|
||||
HealPriority::Normal,
|
||||
);
|
||||
|
||||
let task_id = heal_manager
|
||||
.submit_heal_request(heal_request)
|
||||
.await
|
||||
.expect("Failed to submit heal request");
|
||||
|
||||
info!("Submitted heal request with task ID: {}", task_id);
|
||||
|
||||
// Wait for task completion
|
||||
tokio::time::sleep(tokio::time::Duration::from_secs(8)).await;
|
||||
|
||||
// Attempt to fetch task status (might be removed if finished)
|
||||
match heal_manager.get_task_status(&task_id).await {
|
||||
Ok(status) => info!("Task status: {:?}", status),
|
||||
Err(e) => info!("Task status not found (likely completed): {}", e),
|
||||
}
|
||||
|
||||
// ─── 2️⃣ verify each part file is restored ───────
|
||||
assert!(target_part.exists());
|
||||
|
||||
info!("Heal object basic test passed");
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
#[serial]
|
||||
async fn test_heal_bucket_basic() {
|
||||
let (disk_paths, ecstore, heal_storage) = setup_test_env().await;
|
||||
|
||||
// Create test bucket
|
||||
let bucket_name = "test-bucket-heal";
|
||||
create_test_bucket(&ecstore, bucket_name).await;
|
||||
|
||||
// ─── 1️⃣ delete bucket dir on disk ──────────────
|
||||
let broken_bucket_path = disk_paths[0].join(bucket_name);
|
||||
assert!(broken_bucket_path.exists(), "bucket dir does not exist on disk");
|
||||
std::fs::remove_dir_all(&broken_bucket_path).expect("failed to delete bucket dir on disk");
|
||||
assert!(!broken_bucket_path.exists(), "bucket dir still exists after deletion");
|
||||
println!("✅ Deleted bucket directory on disk: {broken_bucket_path:?}");
|
||||
|
||||
// Create heal manager with faster interval
|
||||
let cfg = HealConfig {
|
||||
heal_interval: Duration::from_millis(1),
|
||||
..Default::default()
|
||||
};
|
||||
let heal_manager = HealManager::new(heal_storage.clone(), Some(cfg));
|
||||
heal_manager.start().await.unwrap();
|
||||
|
||||
// Submit heal request for the bucket
|
||||
let heal_request = HealRequest::new(
|
||||
HealType::Bucket {
|
||||
bucket: bucket_name.to_string(),
|
||||
},
|
||||
HealOptions {
|
||||
dry_run: false,
|
||||
recursive: true,
|
||||
remove_corrupted: false,
|
||||
recreate_missing: false,
|
||||
scan_mode: HealScanMode::Normal,
|
||||
update_parity: false,
|
||||
timeout: Some(Duration::from_secs(300)),
|
||||
pool_index: None,
|
||||
set_index: None,
|
||||
},
|
||||
HealPriority::Normal,
|
||||
);
|
||||
|
||||
let task_id = heal_manager
|
||||
.submit_heal_request(heal_request)
|
||||
.await
|
||||
.expect("Failed to submit bucket heal request");
|
||||
|
||||
info!("Submitted bucket heal request with task ID: {}", task_id);
|
||||
|
||||
// Wait for task completion
|
||||
tokio::time::sleep(tokio::time::Duration::from_secs(5)).await;
|
||||
|
||||
// Attempt to fetch task status (optional)
|
||||
if let Ok(status) = heal_manager.get_task_status(&task_id).await {
|
||||
if status == HealTaskStatus::Completed {
|
||||
info!("Bucket heal task status: {:?}", status);
|
||||
} else {
|
||||
panic!("Bucket heal task status: {status:?}");
|
||||
}
|
||||
}
|
||||
|
||||
// ─── 3️⃣ Verify bucket directory is restored on every disk ───────
|
||||
assert!(broken_bucket_path.exists(), "bucket dir does not exist on disk");
|
||||
|
||||
info!("Heal bucket basic test passed");
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
#[serial]
|
||||
async fn test_heal_format_basic() {
|
||||
let (disk_paths, _ecstore, heal_storage) = setup_test_env().await;
|
||||
|
||||
// ─── 1️⃣ delete format.json on one disk ──────────────
|
||||
let format_path = disk_paths[0].join(".rustfs.sys").join("format.json");
|
||||
assert!(format_path.exists(), "format.json does not exist on disk");
|
||||
std::fs::remove_file(&format_path).expect("failed to delete format.json on disk");
|
||||
assert!(!format_path.exists(), "format.json still exists after deletion");
|
||||
println!("✅ Deleted format.json on disk: {format_path:?}");
|
||||
|
||||
// Create heal manager with faster interval
|
||||
let cfg = HealConfig {
|
||||
heal_interval: Duration::from_secs(2),
|
||||
..Default::default()
|
||||
};
|
||||
let heal_manager = HealManager::new(heal_storage.clone(), Some(cfg));
|
||||
heal_manager.start().await.unwrap();
|
||||
|
||||
// Wait for task completion
|
||||
tokio::time::sleep(tokio::time::Duration::from_secs(5)).await;
|
||||
|
||||
// ─── 2️⃣ verify format.json is restored ───────
|
||||
assert!(format_path.exists(), "format.json does not exist on disk after heal");
|
||||
|
||||
info!("Heal format basic test passed");
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
#[serial]
|
||||
async fn test_heal_format_with_data() {
|
||||
let (disk_paths, ecstore, heal_storage) = setup_test_env().await;
|
||||
|
||||
// Create test bucket and object
|
||||
let bucket_name = "test-bucket";
|
||||
let object_name = "test-object.txt";
|
||||
let test_data = b"Hello, this is test data for healing!";
|
||||
|
||||
create_test_bucket(&ecstore, bucket_name).await;
|
||||
upload_test_object(&ecstore, bucket_name, object_name, test_data).await;
|
||||
|
||||
let obj_dir = disk_paths[0].join(bucket_name).join(object_name);
|
||||
let target_part = WalkDir::new(&obj_dir)
|
||||
.min_depth(2)
|
||||
.max_depth(2)
|
||||
.into_iter()
|
||||
.filter_map(Result::ok)
|
||||
.find(|e| e.file_type().is_file() && e.file_name().to_str().map(|n| n.starts_with("part.")).unwrap_or(false))
|
||||
.map(|e| e.into_path())
|
||||
.expect("Failed to locate part file to delete");
|
||||
|
||||
// ─── 1️⃣ delete format.json on one disk ──────────────
|
||||
let format_path = disk_paths[0].join(".rustfs.sys").join("format.json");
|
||||
std::fs::remove_dir_all(&disk_paths[0]).expect("failed to delete all contents under disk_paths[0]");
|
||||
std::fs::create_dir_all(&disk_paths[0]).expect("failed to recreate disk_paths[0] directory");
|
||||
println!("✅ Deleted format.json on disk: {:?}", disk_paths[0]);
|
||||
|
||||
// Create heal manager with faster interval
|
||||
let cfg = HealConfig {
|
||||
heal_interval: Duration::from_secs(2),
|
||||
..Default::default()
|
||||
};
|
||||
let heal_manager = HealManager::new(heal_storage.clone(), Some(cfg));
|
||||
heal_manager.start().await.unwrap();
|
||||
|
||||
// Wait for task completion
|
||||
tokio::time::sleep(tokio::time::Duration::from_secs(5)).await;
|
||||
|
||||
// ─── 2️⃣ verify format.json is restored ───────
|
||||
assert!(format_path.exists(), "format.json does not exist on disk after heal");
|
||||
// ─── 3 verify each part file is restored ───────
|
||||
assert!(target_part.exists());
|
||||
|
||||
info!("Heal format basic test passed");
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
#[serial]
|
||||
async fn test_heal_storage_api_direct() {
|
||||
let (_disk_paths, ecstore, heal_storage) = setup_test_env().await;
|
||||
|
||||
// Test direct heal storage API calls
|
||||
|
||||
// Test heal_format
|
||||
let format_result = heal_storage.heal_format(true).await; // dry run
|
||||
assert!(format_result.is_ok());
|
||||
info!("Direct heal_format test passed");
|
||||
|
||||
// Test heal_bucket
|
||||
let bucket_name = "test-bucket-direct";
|
||||
create_test_bucket(&ecstore, bucket_name).await;
|
||||
|
||||
let heal_opts = HealOpts {
|
||||
recursive: true,
|
||||
dry_run: true,
|
||||
remove: false,
|
||||
recreate: false,
|
||||
scan_mode: HealScanMode::Normal,
|
||||
update_parity: false,
|
||||
no_lock: false,
|
||||
pool: None,
|
||||
set: None,
|
||||
};
|
||||
|
||||
let bucket_result = heal_storage.heal_bucket(bucket_name, &heal_opts).await;
|
||||
assert!(bucket_result.is_ok());
|
||||
info!("Direct heal_bucket test passed");
|
||||
|
||||
// Test heal_object
|
||||
let object_name = "test-object-direct.txt";
|
||||
let test_data = b"Test data for direct heal API";
|
||||
upload_test_object(&ecstore, bucket_name, object_name, test_data).await;
|
||||
|
||||
let object_heal_opts = HealOpts {
|
||||
recursive: false,
|
||||
dry_run: true,
|
||||
remove: false,
|
||||
recreate: false,
|
||||
scan_mode: HealScanMode::Normal,
|
||||
update_parity: false,
|
||||
no_lock: false,
|
||||
pool: None,
|
||||
set: None,
|
||||
};
|
||||
|
||||
let object_result = heal_storage
|
||||
.heal_object(bucket_name, object_name, None, &object_heal_opts)
|
||||
.await;
|
||||
assert!(object_result.is_ok());
|
||||
info!("Direct heal_object test passed");
|
||||
|
||||
info!("Direct heal storage API test passed");
|
||||
}
|
||||
243
crates/ahm/tests/lifecycle_integration_test.rs
Normal file
@@ -0,0 +1,243 @@
|
||||
use rustfs_ahm::scanner::{Scanner, data_scanner::ScannerConfig};
|
||||
use rustfs_ecstore::{
|
||||
bucket::metadata::BUCKET_LIFECYCLE_CONFIG,
|
||||
bucket::metadata_sys,
|
||||
disk::endpoint::Endpoint,
|
||||
endpoints::{EndpointServerPools, Endpoints, PoolEndpoints},
|
||||
store::ECStore,
|
||||
store_api::{ObjectIO, ObjectOptions, PutObjReader, StorageAPI},
|
||||
};
|
||||
use serial_test::serial;
|
||||
use std::sync::Once;
|
||||
use std::sync::OnceLock;
|
||||
use std::{path::PathBuf, sync::Arc, time::Duration};
|
||||
use tokio::fs;
|
||||
use tracing::info;
|
||||
|
||||
static GLOBAL_ENV: OnceLock<(Vec<PathBuf>, Arc<ECStore>)> = OnceLock::new();
|
||||
static INIT: Once = Once::new();
|
||||
|
||||
fn init_tracing() {
|
||||
INIT.call_once(|| {
|
||||
let _ = tracing_subscriber::fmt::try_init();
|
||||
});
|
||||
}
|
||||
|
||||
/// Test helper: Create test environment with ECStore
|
||||
async fn setup_test_env() -> (Vec<PathBuf>, Arc<ECStore>) {
|
||||
init_tracing();
|
||||
|
||||
// Fast path: already initialized, just clone and return
|
||||
if let Some((paths, ecstore)) = GLOBAL_ENV.get() {
|
||||
return (paths.clone(), ecstore.clone());
|
||||
}
|
||||
|
||||
// create temp dir as 4 disks with unique base dir
|
||||
let test_base_dir = format!("/tmp/rustfs_ahm_lifecycle_test_{}", uuid::Uuid::new_v4());
|
||||
let temp_dir = std::path::PathBuf::from(&test_base_dir);
|
||||
if temp_dir.exists() {
|
||||
fs::remove_dir_all(&temp_dir).await.ok();
|
||||
}
|
||||
fs::create_dir_all(&temp_dir).await.unwrap();
|
||||
|
||||
// create 4 disk dirs
|
||||
let disk_paths = vec![
|
||||
temp_dir.join("disk1"),
|
||||
temp_dir.join("disk2"),
|
||||
temp_dir.join("disk3"),
|
||||
temp_dir.join("disk4"),
|
||||
];
|
||||
|
||||
for disk_path in &disk_paths {
|
||||
fs::create_dir_all(disk_path).await.unwrap();
|
||||
}
|
||||
|
||||
// create EndpointServerPools
|
||||
let mut endpoints = Vec::new();
|
||||
for (i, disk_path) in disk_paths.iter().enumerate() {
|
||||
let mut endpoint = Endpoint::try_from(disk_path.to_str().unwrap()).unwrap();
|
||||
// set correct index
|
||||
endpoint.set_pool_index(0);
|
||||
endpoint.set_set_index(0);
|
||||
endpoint.set_disk_index(i);
|
||||
endpoints.push(endpoint);
|
||||
}
|
||||
|
||||
let pool_endpoints = PoolEndpoints {
|
||||
legacy: false,
|
||||
set_count: 1,
|
||||
drives_per_set: 4,
|
||||
endpoints: Endpoints::from(endpoints),
|
||||
cmd_line: "test".to_string(),
|
||||
platform: format!("OS: {} | Arch: {}", std::env::consts::OS, std::env::consts::ARCH),
|
||||
};
|
||||
|
||||
let endpoint_pools = EndpointServerPools(vec![pool_endpoints]);
|
||||
|
||||
// format disks (only first time)
|
||||
rustfs_ecstore::store::init_local_disks(endpoint_pools.clone()).await.unwrap();
|
||||
|
||||
// create ECStore with dynamic port 0 (let OS assign) or fixed 9002 if free
|
||||
let port = 9002; // for simplicity
|
||||
let server_addr: std::net::SocketAddr = format!("127.0.0.1:{port}").parse().unwrap();
|
||||
let ecstore = ECStore::new(server_addr, endpoint_pools).await.unwrap();
|
||||
|
||||
// init bucket metadata system
|
||||
let buckets_list = ecstore
|
||||
.list_bucket(&rustfs_ecstore::store_api::BucketOptions {
|
||||
no_metadata: true,
|
||||
..Default::default()
|
||||
})
|
||||
.await
|
||||
.unwrap();
|
||||
let buckets = buckets_list.into_iter().map(|v| v.name).collect();
|
||||
rustfs_ecstore::bucket::metadata_sys::init_bucket_metadata_sys(ecstore.clone(), buckets).await;
|
||||
|
||||
// Initialize background expiry workers
|
||||
rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::init_background_expiry(ecstore.clone()).await;
|
||||
|
||||
// Store in global once lock
|
||||
let _ = GLOBAL_ENV.set((disk_paths.clone(), ecstore.clone()));
|
||||
|
||||
(disk_paths, ecstore)
|
||||
}
|
||||
|
||||
/// Test helper: Create a test bucket
|
||||
async fn create_test_bucket(ecstore: &Arc<ECStore>, bucket_name: &str) {
|
||||
(**ecstore)
|
||||
.make_bucket(bucket_name, &Default::default())
|
||||
.await
|
||||
.expect("Failed to create test bucket");
|
||||
info!("Created test bucket: {}", bucket_name);
|
||||
}
|
||||
|
||||
/// Test helper: Upload test object
|
||||
async fn upload_test_object(ecstore: &Arc<ECStore>, bucket: &str, object: &str, data: &[u8]) {
|
||||
let mut reader = PutObjReader::from_vec(data.to_vec());
|
||||
let object_info = (**ecstore)
|
||||
.put_object(bucket, object, &mut reader, &ObjectOptions::default())
|
||||
.await
|
||||
.expect("Failed to upload test object");
|
||||
|
||||
info!("Uploaded test object: {}/{} ({} bytes)", bucket, object, object_info.size);
|
||||
}
|
||||
|
||||
/// Test helper: Set bucket lifecycle configuration
|
||||
async fn set_bucket_lifecycle(bucket_name: &str) -> Result<(), Box<dyn std::error::Error>> {
|
||||
// Create a simple lifecycle configuration XML with 0 days expiry for immediate testing
|
||||
let lifecycle_xml = r#"<?xml version="1.0" encoding="UTF-8"?>
|
||||
<LifecycleConfiguration>
|
||||
<Rule>
|
||||
<ID>test-rule</ID>
|
||||
<Status>Enabled</Status>
|
||||
<Filter>
|
||||
<Prefix>test/</Prefix>
|
||||
</Filter>
|
||||
<Expiration>
|
||||
<Days>0</Days>
|
||||
</Expiration>
|
||||
</Rule>
|
||||
</LifecycleConfiguration>"#;
|
||||
|
||||
metadata_sys::update(bucket_name, BUCKET_LIFECYCLE_CONFIG, lifecycle_xml.as_bytes().to_vec()).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Test helper: Check if object exists
|
||||
async fn object_exists(ecstore: &Arc<ECStore>, bucket: &str, object: &str) -> bool {
|
||||
((**ecstore).get_object_info(bucket, object, &ObjectOptions::default()).await).is_ok()
|
||||
}
|
||||
|
||||
#[tokio::test(flavor = "multi_thread", worker_threads = 4)]
|
||||
#[serial]
|
||||
async fn test_lifecycle_expiry_basic() {
|
||||
let (_disk_paths, ecstore) = setup_test_env().await;
|
||||
|
||||
// Create test bucket and object
|
||||
let bucket_name = "test-lifecycle-bucket";
|
||||
let object_name = "test/object.txt"; // Match the lifecycle rule prefix "test/"
|
||||
let test_data = b"Hello, this is test data for lifecycle expiry!";
|
||||
|
||||
create_test_bucket(&ecstore, bucket_name).await;
|
||||
upload_test_object(&ecstore, bucket_name, object_name, test_data).await;
|
||||
|
||||
// Verify object exists initially
|
||||
assert!(object_exists(&ecstore, bucket_name, object_name).await);
|
||||
println!("✅ Object exists before lifecycle processing");
|
||||
|
||||
// Set lifecycle configuration with very short expiry (0 days = immediate expiry)
|
||||
set_bucket_lifecycle(bucket_name)
|
||||
.await
|
||||
.expect("Failed to set lifecycle configuration");
|
||||
println!("✅ Lifecycle configuration set for bucket: {bucket_name}");
|
||||
|
||||
// Verify lifecycle configuration was set
|
||||
match rustfs_ecstore::bucket::metadata_sys::get(bucket_name).await {
|
||||
Ok(bucket_meta) => {
|
||||
assert!(bucket_meta.lifecycle_config.is_some());
|
||||
println!("✅ Bucket metadata retrieved successfully");
|
||||
}
|
||||
Err(e) => {
|
||||
println!("❌ Error retrieving bucket metadata: {e:?}");
|
||||
}
|
||||
}
|
||||
|
||||
// Create scanner with very short intervals for testing
|
||||
let scanner_config = ScannerConfig {
|
||||
scan_interval: Duration::from_millis(100),
|
||||
deep_scan_interval: Duration::from_millis(500),
|
||||
max_concurrent_scans: 1,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let scanner = Scanner::new(Some(scanner_config), None);
|
||||
|
||||
// Start scanner
|
||||
scanner.start().await.expect("Failed to start scanner");
|
||||
println!("✅ Scanner started");
|
||||
|
||||
// Wait for scanner to process lifecycle rules
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
|
||||
// Manually trigger a scan cycle to ensure lifecycle processing
|
||||
scanner.scan_cycle().await.expect("Failed to trigger scan cycle");
|
||||
println!("✅ Manual scan cycle completed");
|
||||
|
||||
// Wait a bit more for background workers to process expiry tasks
|
||||
tokio::time::sleep(Duration::from_secs(5)).await;
|
||||
|
||||
// Check if object has been expired (deleted)
|
||||
let object_still_exists = object_exists(&ecstore, bucket_name, object_name).await;
|
||||
println!("Object exists after lifecycle processing: {object_still_exists}");
|
||||
|
||||
if object_still_exists {
|
||||
println!("❌ Object was not deleted by lifecycle processing");
|
||||
// Let's try to get object info to see its details
|
||||
match ecstore
|
||||
.get_object_info(bucket_name, object_name, &rustfs_ecstore::store_api::ObjectOptions::default())
|
||||
.await
|
||||
{
|
||||
Ok(obj_info) => {
|
||||
println!(
|
||||
"Object info: name={}, size={}, mod_time={:?}",
|
||||
obj_info.name, obj_info.size, obj_info.mod_time
|
||||
);
|
||||
}
|
||||
Err(e) => {
|
||||
println!("Error getting object info: {e:?}");
|
||||
}
|
||||
}
|
||||
} else {
|
||||
println!("✅ Object was successfully deleted by lifecycle processing");
|
||||
}
|
||||
|
||||
assert!(!object_still_exists);
|
||||
println!("✅ Object successfully expired");
|
||||
|
||||
// Stop scanner
|
||||
let _ = scanner.stop().await;
|
||||
println!("✅ Scanner stopped");
|
||||
|
||||
println!("Lifecycle expiry basic test completed");
|
||||
}
|
||||
38
crates/checksums/Cargo.toml
Normal file
@@ -0,0 +1,38 @@
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
[package]
|
||||
name = "rustfs-checksums"
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
version.workspace = true
|
||||
homepage.workspace = true
|
||||
description = "Checksum calculation and verification callbacks for HTTP request and response bodies sent by service clients generated by RustFS, ensuring data integrity and authenticity."
|
||||
keywords = ["checksum-calculation", "verification", "integrity", "authenticity", "rustfs"]
|
||||
categories = ["web-programming", "development-tools", "network-programming"]
|
||||
documentation = "https://docs.rs/rustfs-signer/latest/rustfs_checksum/"
|
||||
|
||||
[dependencies]
|
||||
bytes = { workspace = true }
|
||||
crc-fast = { workspace = true }
|
||||
http = { workspace = true }
|
||||
base64-simd = { workspace = true }
|
||||
md-5 = { workspace = true }
|
||||
sha1 = { workspace = true }
|
||||
sha2 = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
pretty_assertions = { workspace = true }
|
||||
3
crates/checksums/README.md
Normal file
@@ -0,0 +1,3 @@
|
||||
# rustfs-checksums
|
||||
|
||||
Checksum calculation and verification callbacks for HTTP request and response bodies sent by service clients generated by RustFS object storage.
|
||||
44
crates/checksums/src/base64.rs
Normal file
@@ -0,0 +1,44 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#![allow(dead_code)]
|
||||
|
||||
use base64_simd::STANDARD;
|
||||
use std::error::Error;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub(crate) struct DecodeError(base64_simd::Error);
|
||||
|
||||
impl Error for DecodeError {
|
||||
fn source(&self) -> Option<&(dyn Error + 'static)> {
|
||||
Some(&self.0)
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Display for DecodeError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "failed to decode base64")
|
||||
}
|
||||
}
|
||||
|
||||
pub(crate) fn decode(input: impl AsRef<str>) -> Result<Vec<u8>, DecodeError> {
|
||||
STANDARD.decode_to_vec(input.as_ref()).map_err(DecodeError)
|
||||
}
|
||||
|
||||
pub(crate) fn encode(input: impl AsRef<[u8]>) -> String {
|
||||
STANDARD.encode_to_string(input.as_ref())
|
||||
}
|
||||
|
||||
pub(crate) fn encoded_length(length: usize) -> usize {
|
||||
STANDARD.encoded_length(length)
|
||||
}
|
||||
45
crates/checksums/src/error.rs
Normal file
@@ -0,0 +1,45 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::error::Error;
|
||||
use std::fmt;
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct UnknownChecksumAlgorithmError {
|
||||
checksum_algorithm: String,
|
||||
}
|
||||
|
||||
impl UnknownChecksumAlgorithmError {
|
||||
pub(crate) fn new(checksum_algorithm: impl Into<String>) -> Self {
|
||||
Self {
|
||||
checksum_algorithm: checksum_algorithm.into(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn checksum_algorithm(&self) -> &str {
|
||||
&self.checksum_algorithm
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for UnknownChecksumAlgorithmError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(
|
||||
f,
|
||||
r#"unknown checksum algorithm "{}", please pass a known algorithm name ("crc32", "crc32c", "sha1", "sha256", "md5")"#,
|
||||
self.checksum_algorithm
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
impl Error for UnknownChecksumAlgorithmError {}
|
||||
197
crates/checksums/src/http.rs
Normal file
@@ -0,0 +1,197 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::base64;
|
||||
use http::header::{HeaderMap, HeaderValue};
|
||||
|
||||
use crate::Crc64Nvme;
|
||||
use crate::{CRC_32_C_NAME, CRC_32_NAME, CRC_64_NVME_NAME, Checksum, Crc32, Crc32c, Md5, SHA_1_NAME, SHA_256_NAME, Sha1, Sha256};
|
||||
|
||||
pub const CRC_32_HEADER_NAME: &str = "x-amz-checksum-crc32";
|
||||
pub const CRC_32_C_HEADER_NAME: &str = "x-amz-checksum-crc32c";
|
||||
pub const SHA_1_HEADER_NAME: &str = "x-amz-checksum-sha1";
|
||||
pub const SHA_256_HEADER_NAME: &str = "x-amz-checksum-sha256";
|
||||
pub const CRC_64_NVME_HEADER_NAME: &str = "x-amz-checksum-crc64nvme";
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub(crate) static MD5_HEADER_NAME: &str = "content-md5";
|
||||
|
||||
pub const CHECKSUM_ALGORITHMS_IN_PRIORITY_ORDER: [&str; 5] =
|
||||
[CRC_64_NVME_NAME, CRC_32_C_NAME, CRC_32_NAME, SHA_1_NAME, SHA_256_NAME];
|
||||
|
||||
pub trait HttpChecksum: Checksum + Send + Sync {
|
||||
fn headers(self: Box<Self>) -> HeaderMap<HeaderValue> {
|
||||
let mut header_map = HeaderMap::new();
|
||||
header_map.insert(self.header_name(), self.header_value());
|
||||
|
||||
header_map
|
||||
}
|
||||
|
||||
fn header_name(&self) -> &'static str;
|
||||
|
||||
fn header_value(self: Box<Self>) -> HeaderValue {
|
||||
let hash = self.finalize();
|
||||
HeaderValue::from_str(&base64::encode(&hash[..])).expect("base64 encoded bytes are always valid header values")
|
||||
}
|
||||
|
||||
fn size(&self) -> u64 {
|
||||
let trailer_name_size_in_bytes = self.header_name().len();
|
||||
let base64_encoded_checksum_size_in_bytes = base64::encoded_length(Checksum::size(self) as usize);
|
||||
|
||||
let size = trailer_name_size_in_bytes + ":".len() + base64_encoded_checksum_size_in_bytes;
|
||||
|
||||
size as u64
|
||||
}
|
||||
}
|
||||
|
||||
impl HttpChecksum for Crc32 {
|
||||
fn header_name(&self) -> &'static str {
|
||||
CRC_32_HEADER_NAME
|
||||
}
|
||||
}
|
||||
|
||||
impl HttpChecksum for Crc32c {
|
||||
fn header_name(&self) -> &'static str {
|
||||
CRC_32_C_HEADER_NAME
|
||||
}
|
||||
}
|
||||
|
||||
impl HttpChecksum for Crc64Nvme {
|
||||
fn header_name(&self) -> &'static str {
|
||||
CRC_64_NVME_HEADER_NAME
|
||||
}
|
||||
}
|
||||
|
||||
impl HttpChecksum for Sha1 {
|
||||
fn header_name(&self) -> &'static str {
|
||||
SHA_1_HEADER_NAME
|
||||
}
|
||||
}
|
||||
|
||||
impl HttpChecksum for Sha256 {
|
||||
fn header_name(&self) -> &'static str {
|
||||
SHA_256_HEADER_NAME
|
||||
}
|
||||
}
|
||||
|
||||
impl HttpChecksum for Md5 {
|
||||
fn header_name(&self) -> &'static str {
|
||||
MD5_HEADER_NAME
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::base64;
|
||||
use bytes::Bytes;
|
||||
|
||||
use crate::{CRC_32_C_NAME, CRC_32_NAME, CRC_64_NVME_NAME, ChecksumAlgorithm, SHA_1_NAME, SHA_256_NAME};
|
||||
|
||||
use super::HttpChecksum;
|
||||
|
||||
#[test]
|
||||
fn test_trailer_length_of_crc32_checksum_body() {
|
||||
let checksum = CRC_32_NAME.parse::<ChecksumAlgorithm>().unwrap().into_impl();
|
||||
let expected_size = 29;
|
||||
let actual_size = HttpChecksum::size(&*checksum);
|
||||
assert_eq!(expected_size, actual_size)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_trailer_value_of_crc32_checksum_body() {
|
||||
let checksum = CRC_32_NAME.parse::<ChecksumAlgorithm>().unwrap().into_impl();
|
||||
// The CRC32 of an empty string is all zeroes
|
||||
let expected_value = Bytes::from_static(b"\0\0\0\0");
|
||||
let expected_value = base64::encode(&expected_value);
|
||||
let actual_value = checksum.header_value();
|
||||
assert_eq!(expected_value, actual_value)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_trailer_length_of_crc32c_checksum_body() {
|
||||
let checksum = CRC_32_C_NAME.parse::<ChecksumAlgorithm>().unwrap().into_impl();
|
||||
let expected_size = 30;
|
||||
let actual_size = HttpChecksum::size(&*checksum);
|
||||
assert_eq!(expected_size, actual_size)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_trailer_value_of_crc32c_checksum_body() {
|
||||
let checksum = CRC_32_C_NAME.parse::<ChecksumAlgorithm>().unwrap().into_impl();
|
||||
// The CRC32C of an empty string is all zeroes
|
||||
let expected_value = Bytes::from_static(b"\0\0\0\0");
|
||||
let expected_value = base64::encode(&expected_value);
|
||||
let actual_value = checksum.header_value();
|
||||
assert_eq!(expected_value, actual_value)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_trailer_length_of_crc64nvme_checksum_body() {
|
||||
let checksum = CRC_64_NVME_NAME.parse::<ChecksumAlgorithm>().unwrap().into_impl();
|
||||
let expected_size = 37;
|
||||
let actual_size = HttpChecksum::size(&*checksum);
|
||||
assert_eq!(expected_size, actual_size)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_trailer_value_of_crc64nvme_checksum_body() {
|
||||
let checksum = CRC_64_NVME_NAME.parse::<ChecksumAlgorithm>().unwrap().into_impl();
|
||||
// The CRC64NVME of an empty string is all zeroes
|
||||
let expected_value = Bytes::from_static(b"\0\0\0\0\0\0\0\0");
|
||||
let expected_value = base64::encode(&expected_value);
|
||||
let actual_value = checksum.header_value();
|
||||
assert_eq!(expected_value, actual_value)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_trailer_length_of_sha1_checksum_body() {
|
||||
let checksum = SHA_1_NAME.parse::<ChecksumAlgorithm>().unwrap().into_impl();
|
||||
let expected_size = 48;
|
||||
let actual_size = HttpChecksum::size(&*checksum);
|
||||
assert_eq!(expected_size, actual_size)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_trailer_value_of_sha1_checksum_body() {
|
||||
let checksum = SHA_1_NAME.parse::<ChecksumAlgorithm>().unwrap().into_impl();
|
||||
// The SHA1 of an empty string is da39a3ee5e6b4b0d3255bfef95601890afd80709
|
||||
let expected_value = Bytes::from_static(&[
|
||||
0xda, 0x39, 0xa3, 0xee, 0x5e, 0x6b, 0x4b, 0x0d, 0x32, 0x55, 0xbf, 0xef, 0x95, 0x60, 0x18, 0x90, 0xaf, 0xd8, 0x07,
|
||||
0x09,
|
||||
]);
|
||||
let expected_value = base64::encode(&expected_value);
|
||||
let actual_value = checksum.header_value();
|
||||
assert_eq!(expected_value, actual_value)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_trailer_length_of_sha256_checksum_body() {
|
||||
let checksum = SHA_256_NAME.parse::<ChecksumAlgorithm>().unwrap().into_impl();
|
||||
let expected_size = 66;
|
||||
let actual_size = HttpChecksum::size(&*checksum);
|
||||
assert_eq!(expected_size, actual_size)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_trailer_value_of_sha256_checksum_body() {
|
||||
let checksum = SHA_256_NAME.parse::<ChecksumAlgorithm>().unwrap().into_impl();
|
||||
let expected_value = Bytes::from_static(&[
|
||||
0xe3, 0xb0, 0xc4, 0x42, 0x98, 0xfc, 0x1c, 0x14, 0x9a, 0xfb, 0xf4, 0xc8, 0x99, 0x6f, 0xb9, 0x24, 0x27, 0xae, 0x41,
|
||||
0xe4, 0x64, 0x9b, 0x93, 0x4c, 0xa4, 0x95, 0x99, 0x1b, 0x78, 0x52, 0xb8, 0x55,
|
||||
]);
|
||||
let expected_value = base64::encode(&expected_value);
|
||||
let actual_value = checksum.header_value();
|
||||
assert_eq!(expected_value, actual_value)
|
||||
}
|
||||
}
|
||||
446
crates/checksums/src/lib.rs
Normal file
@@ -0,0 +1,446 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![cfg_attr(docsrs, feature(doc_auto_cfg))]
|
||||
#![allow(clippy::derive_partial_eq_without_eq)]
|
||||
#![warn(
|
||||
// missing_docs,
|
||||
rustdoc::missing_crate_level_docs,
|
||||
unreachable_pub,
|
||||
rust_2018_idioms
|
||||
)]
|
||||
|
||||
use crate::error::UnknownChecksumAlgorithmError;
|
||||
|
||||
use bytes::Bytes;
|
||||
use std::{fmt::Debug, str::FromStr};
|
||||
|
||||
mod base64;
|
||||
pub mod error;
|
||||
pub mod http;
|
||||
|
||||
pub const CRC_32_NAME: &str = "crc32";
|
||||
pub const CRC_32_C_NAME: &str = "crc32c";
|
||||
pub const CRC_64_NVME_NAME: &str = "crc64nvme";
|
||||
pub const SHA_1_NAME: &str = "sha1";
|
||||
pub const SHA_256_NAME: &str = "sha256";
|
||||
pub const MD5_NAME: &str = "md5";
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||
#[non_exhaustive]
|
||||
pub enum ChecksumAlgorithm {
|
||||
#[default]
|
||||
Crc32,
|
||||
Crc32c,
|
||||
#[deprecated]
|
||||
Md5,
|
||||
Sha1,
|
||||
Sha256,
|
||||
Crc64Nvme,
|
||||
}
|
||||
|
||||
impl FromStr for ChecksumAlgorithm {
|
||||
type Err = UnknownChecksumAlgorithmError;
|
||||
|
||||
fn from_str(checksum_algorithm: &str) -> Result<Self, Self::Err> {
|
||||
if checksum_algorithm.eq_ignore_ascii_case(CRC_32_NAME) {
|
||||
Ok(Self::Crc32)
|
||||
} else if checksum_algorithm.eq_ignore_ascii_case(CRC_32_C_NAME) {
|
||||
Ok(Self::Crc32c)
|
||||
} else if checksum_algorithm.eq_ignore_ascii_case(SHA_1_NAME) {
|
||||
Ok(Self::Sha1)
|
||||
} else if checksum_algorithm.eq_ignore_ascii_case(SHA_256_NAME) {
|
||||
Ok(Self::Sha256)
|
||||
} else if checksum_algorithm.eq_ignore_ascii_case(MD5_NAME) {
|
||||
// MD5 is now an alias for the default Crc32 since it is deprecated
|
||||
Ok(Self::Crc32)
|
||||
} else if checksum_algorithm.eq_ignore_ascii_case(CRC_64_NVME_NAME) {
|
||||
Ok(Self::Crc64Nvme)
|
||||
} else {
|
||||
Err(UnknownChecksumAlgorithmError::new(checksum_algorithm))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl ChecksumAlgorithm {
|
||||
pub fn into_impl(self) -> Box<dyn http::HttpChecksum> {
|
||||
match self {
|
||||
Self::Crc32 => Box::<Crc32>::default(),
|
||||
Self::Crc32c => Box::<Crc32c>::default(),
|
||||
Self::Crc64Nvme => Box::<Crc64Nvme>::default(),
|
||||
#[allow(deprecated)]
|
||||
Self::Md5 => Box::<Crc32>::default(),
|
||||
Self::Sha1 => Box::<Sha1>::default(),
|
||||
Self::Sha256 => Box::<Sha256>::default(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn as_str(&self) -> &'static str {
|
||||
match self {
|
||||
Self::Crc32 => CRC_32_NAME,
|
||||
Self::Crc32c => CRC_32_C_NAME,
|
||||
Self::Crc64Nvme => CRC_64_NVME_NAME,
|
||||
#[allow(deprecated)]
|
||||
Self::Md5 => MD5_NAME,
|
||||
Self::Sha1 => SHA_1_NAME,
|
||||
Self::Sha256 => SHA_256_NAME,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub trait Checksum: Send + Sync {
|
||||
fn update(&mut self, bytes: &[u8]);
|
||||
fn finalize(self: Box<Self>) -> Bytes;
|
||||
fn size(&self) -> u64;
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct Crc32 {
|
||||
hasher: crc_fast::Digest,
|
||||
}
|
||||
|
||||
impl Default for Crc32 {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
hasher: crc_fast::Digest::new(crc_fast::CrcAlgorithm::Crc32IsoHdlc),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Crc32 {
|
||||
fn update(&mut self, bytes: &[u8]) {
|
||||
self.hasher.update(bytes);
|
||||
}
|
||||
|
||||
fn finalize(self) -> Bytes {
|
||||
let checksum = self.hasher.finalize() as u32;
|
||||
|
||||
Bytes::copy_from_slice(checksum.to_be_bytes().as_slice())
|
||||
}
|
||||
|
||||
fn size() -> u64 {
|
||||
4
|
||||
}
|
||||
}
|
||||
|
||||
impl Checksum for Crc32 {
|
||||
fn update(&mut self, bytes: &[u8]) {
|
||||
Self::update(self, bytes)
|
||||
}
|
||||
fn finalize(self: Box<Self>) -> Bytes {
|
||||
Self::finalize(*self)
|
||||
}
|
||||
fn size(&self) -> u64 {
|
||||
Self::size()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct Crc32c {
|
||||
hasher: crc_fast::Digest,
|
||||
}
|
||||
|
||||
impl Default for Crc32c {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
hasher: crc_fast::Digest::new(crc_fast::CrcAlgorithm::Crc32Iscsi),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Crc32c {
|
||||
fn update(&mut self, bytes: &[u8]) {
|
||||
self.hasher.update(bytes);
|
||||
}
|
||||
|
||||
fn finalize(self) -> Bytes {
|
||||
let checksum = self.hasher.finalize() as u32;
|
||||
|
||||
Bytes::copy_from_slice(checksum.to_be_bytes().as_slice())
|
||||
}
|
||||
|
||||
fn size() -> u64 {
|
||||
4
|
||||
}
|
||||
}
|
||||
|
||||
impl Checksum for Crc32c {
|
||||
fn update(&mut self, bytes: &[u8]) {
|
||||
Self::update(self, bytes)
|
||||
}
|
||||
fn finalize(self: Box<Self>) -> Bytes {
|
||||
Self::finalize(*self)
|
||||
}
|
||||
fn size(&self) -> u64 {
|
||||
Self::size()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
struct Crc64Nvme {
|
||||
hasher: crc_fast::Digest,
|
||||
}
|
||||
|
||||
impl Default for Crc64Nvme {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
hasher: crc_fast::Digest::new(crc_fast::CrcAlgorithm::Crc64Nvme),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Crc64Nvme {
|
||||
fn update(&mut self, bytes: &[u8]) {
|
||||
self.hasher.update(bytes);
|
||||
}
|
||||
|
||||
fn finalize(self) -> Bytes {
|
||||
Bytes::copy_from_slice(self.hasher.finalize().to_be_bytes().as_slice())
|
||||
}
|
||||
|
||||
fn size() -> u64 {
|
||||
8
|
||||
}
|
||||
}
|
||||
|
||||
impl Checksum for Crc64Nvme {
|
||||
fn update(&mut self, bytes: &[u8]) {
|
||||
Self::update(self, bytes)
|
||||
}
|
||||
fn finalize(self: Box<Self>) -> Bytes {
|
||||
Self::finalize(*self)
|
||||
}
|
||||
fn size(&self) -> u64 {
|
||||
Self::size()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
struct Sha1 {
|
||||
hasher: sha1::Sha1,
|
||||
}
|
||||
|
||||
impl Sha1 {
|
||||
fn update(&mut self, bytes: &[u8]) {
|
||||
use sha1::Digest;
|
||||
self.hasher.update(bytes);
|
||||
}
|
||||
|
||||
fn finalize(self) -> Bytes {
|
||||
use sha1::Digest;
|
||||
Bytes::copy_from_slice(self.hasher.finalize().as_slice())
|
||||
}
|
||||
|
||||
fn size() -> u64 {
|
||||
use sha1::Digest;
|
||||
sha1::Sha1::output_size() as u64
|
||||
}
|
||||
}
|
||||
|
||||
impl Checksum for Sha1 {
|
||||
fn update(&mut self, bytes: &[u8]) {
|
||||
Self::update(self, bytes)
|
||||
}
|
||||
|
||||
fn finalize(self: Box<Self>) -> Bytes {
|
||||
Self::finalize(*self)
|
||||
}
|
||||
fn size(&self) -> u64 {
|
||||
Self::size()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Default)]
|
||||
struct Sha256 {
|
||||
hasher: sha2::Sha256,
|
||||
}
|
||||
|
||||
impl Sha256 {
|
||||
fn update(&mut self, bytes: &[u8]) {
|
||||
use sha2::Digest;
|
||||
self.hasher.update(bytes);
|
||||
}
|
||||
|
||||
fn finalize(self) -> Bytes {
|
||||
use sha2::Digest;
|
||||
Bytes::copy_from_slice(self.hasher.finalize().as_slice())
|
||||
}
|
||||
|
||||
fn size() -> u64 {
|
||||
use sha2::Digest;
|
||||
sha2::Sha256::output_size() as u64
|
||||
}
|
||||
}
|
||||
|
||||
impl Checksum for Sha256 {
|
||||
fn update(&mut self, bytes: &[u8]) {
|
||||
Self::update(self, bytes);
|
||||
}
|
||||
fn finalize(self: Box<Self>) -> Bytes {
|
||||
Self::finalize(*self)
|
||||
}
|
||||
fn size(&self) -> u64 {
|
||||
Self::size()
|
||||
}
|
||||
}
|
||||
#[allow(dead_code)]
|
||||
#[derive(Debug, Default)]
|
||||
struct Md5 {
|
||||
hasher: md5::Md5,
|
||||
}
|
||||
|
||||
impl Md5 {
|
||||
fn update(&mut self, bytes: &[u8]) {
|
||||
use md5::Digest;
|
||||
self.hasher.update(bytes);
|
||||
}
|
||||
|
||||
fn finalize(self) -> Bytes {
|
||||
use md5::Digest;
|
||||
Bytes::copy_from_slice(self.hasher.finalize().as_slice())
|
||||
}
|
||||
|
||||
fn size() -> u64 {
|
||||
use md5::Digest;
|
||||
md5::Md5::output_size() as u64
|
||||
}
|
||||
}
|
||||
|
||||
impl Checksum for Md5 {
|
||||
fn update(&mut self, bytes: &[u8]) {
|
||||
Self::update(self, bytes)
|
||||
}
|
||||
fn finalize(self: Box<Self>) -> Bytes {
|
||||
Self::finalize(*self)
|
||||
}
|
||||
fn size(&self) -> u64 {
|
||||
Self::size()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{
|
||||
Crc32, Crc32c, Md5, Sha1, Sha256,
|
||||
http::{CRC_32_C_HEADER_NAME, CRC_32_HEADER_NAME, MD5_HEADER_NAME, SHA_1_HEADER_NAME, SHA_256_HEADER_NAME},
|
||||
};
|
||||
|
||||
use crate::ChecksumAlgorithm;
|
||||
use crate::http::HttpChecksum;
|
||||
|
||||
use crate::base64;
|
||||
use http::HeaderValue;
|
||||
use pretty_assertions::assert_eq;
|
||||
use std::fmt::Write;
|
||||
|
||||
const TEST_DATA: &str = r#"test data"#;
|
||||
|
||||
fn base64_encoded_checksum_to_hex_string(header_value: &HeaderValue) -> String {
|
||||
let decoded_checksum = base64::decode(header_value.to_str().unwrap()).unwrap();
|
||||
let decoded_checksum = decoded_checksum.into_iter().fold(String::new(), |mut acc, byte| {
|
||||
write!(acc, "{byte:02X?}").expect("string will always be writeable");
|
||||
acc
|
||||
});
|
||||
|
||||
format!("0x{decoded_checksum}")
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_crc32_checksum() {
|
||||
let mut checksum = Crc32::default();
|
||||
checksum.update(TEST_DATA.as_bytes());
|
||||
let checksum_result = Box::new(checksum).headers();
|
||||
let encoded_checksum = checksum_result.get(CRC_32_HEADER_NAME).unwrap();
|
||||
let decoded_checksum = base64_encoded_checksum_to_hex_string(encoded_checksum);
|
||||
|
||||
let expected_checksum = "0xD308AEB2";
|
||||
|
||||
assert_eq!(decoded_checksum, expected_checksum);
|
||||
}
|
||||
|
||||
#[cfg(not(any(target_arch = "powerpc", target_arch = "powerpc64")))]
|
||||
#[test]
|
||||
fn test_crc32c_checksum() {
|
||||
let mut checksum = Crc32c::default();
|
||||
checksum.update(TEST_DATA.as_bytes());
|
||||
let checksum_result = Box::new(checksum).headers();
|
||||
let encoded_checksum = checksum_result.get(CRC_32_C_HEADER_NAME).unwrap();
|
||||
let decoded_checksum = base64_encoded_checksum_to_hex_string(encoded_checksum);
|
||||
|
||||
let expected_checksum = "0x3379B4CA";
|
||||
|
||||
assert_eq!(decoded_checksum, expected_checksum);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_crc64nvme_checksum() {
|
||||
use crate::{Crc64Nvme, http::CRC_64_NVME_HEADER_NAME};
|
||||
let mut checksum = Crc64Nvme::default();
|
||||
checksum.update(TEST_DATA.as_bytes());
|
||||
let checksum_result = Box::new(checksum).headers();
|
||||
let encoded_checksum = checksum_result.get(CRC_64_NVME_HEADER_NAME).unwrap();
|
||||
let decoded_checksum = base64_encoded_checksum_to_hex_string(encoded_checksum);
|
||||
|
||||
let expected_checksum = "0xAECAF3AF9C98A855";
|
||||
|
||||
assert_eq!(decoded_checksum, expected_checksum);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sha1_checksum() {
|
||||
let mut checksum = Sha1::default();
|
||||
checksum.update(TEST_DATA.as_bytes());
|
||||
let checksum_result = Box::new(checksum).headers();
|
||||
let encoded_checksum = checksum_result.get(SHA_1_HEADER_NAME).unwrap();
|
||||
let decoded_checksum = base64_encoded_checksum_to_hex_string(encoded_checksum);
|
||||
|
||||
let expected_checksum = "0xF48DD853820860816C75D54D0F584DC863327A7C";
|
||||
|
||||
assert_eq!(decoded_checksum, expected_checksum);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_sha256_checksum() {
|
||||
let mut checksum = Sha256::default();
|
||||
checksum.update(TEST_DATA.as_bytes());
|
||||
let checksum_result = Box::new(checksum).headers();
|
||||
let encoded_checksum = checksum_result.get(SHA_256_HEADER_NAME).unwrap();
|
||||
let decoded_checksum = base64_encoded_checksum_to_hex_string(encoded_checksum);
|
||||
|
||||
let expected_checksum = "0x916F0027A575074CE72A331777C3478D6513F786A591BD892DA1A577BF2335F9";
|
||||
|
||||
assert_eq!(decoded_checksum, expected_checksum);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_md5_checksum() {
|
||||
let mut checksum = Md5::default();
|
||||
checksum.update(TEST_DATA.as_bytes());
|
||||
let checksum_result = Box::new(checksum).headers();
|
||||
let encoded_checksum = checksum_result.get(MD5_HEADER_NAME).unwrap();
|
||||
let decoded_checksum = base64_encoded_checksum_to_hex_string(encoded_checksum);
|
||||
|
||||
let expected_checksum = "0xEB733A00C0C9D336E65691A37AB54293";
|
||||
|
||||
assert_eq!(decoded_checksum, expected_checksum);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_checksum_algorithm_returns_error_for_unknown() {
|
||||
let error = "some invalid checksum algorithm"
|
||||
.parse::<ChecksumAlgorithm>()
|
||||
.expect_err("it should error");
|
||||
assert_eq!("some invalid checksum algorithm", error.checksum_algorithm());
|
||||
}
|
||||
}
|
||||
@@ -28,6 +28,15 @@ categories = ["web-programming", "development-tools", "data-structures"]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
lazy_static.workspace = true
|
||||
tokio.workspace = true
|
||||
lazy_static = { workspace = true}
|
||||
tokio = { workspace = true }
|
||||
tonic = { workspace = true }
|
||||
uuid = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
rustfs-madmin = { workspace = true }
|
||||
rustfs-filemeta = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
path-clean = { workspace = true }
|
||||
rmp-serde = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
s3s = { workspace = true }
|
||||
|
||||
1281
crates/common/src/data_usage.rs
Normal file
@@ -12,19 +12,19 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
#![allow(non_upper_case_globals)] // FIXME
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::LazyLock;
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
use tokio::sync::RwLock;
|
||||
use tonic::transport::Channel;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref GLOBAL_Local_Node_Name: RwLock<String> = RwLock::new("".to_string());
|
||||
pub static ref GLOBAL_Rustfs_Host: RwLock<String> = RwLock::new("".to_string());
|
||||
pub static ref GLOBAL_Rustfs_Port: RwLock<String> = RwLock::new("9000".to_string());
|
||||
pub static ref GLOBAL_Rustfs_Addr: RwLock<String> = RwLock::new("".to_string());
|
||||
pub static ref GLOBAL_Conn_Map: RwLock<HashMap<String, Channel>> = RwLock::new(HashMap::new());
|
||||
}
|
||||
pub static GLOBAL_Local_Node_Name: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
|
||||
pub static GLOBAL_Rustfs_Host: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
|
||||
pub static GLOBAL_Rustfs_Port: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("9000".to_string()));
|
||||
pub static GLOBAL_Rustfs_Addr: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
|
||||
pub static GLOBAL_Conn_Map: LazyLock<RwLock<HashMap<String, Channel>>> = LazyLock::new(|| RwLock::new(HashMap::new()));
|
||||
|
||||
pub async fn set_global_addr(addr: &str) {
|
||||
*GLOBAL_Rustfs_Addr.write().await = addr.to_string();
|
||||
|
||||
427
crates/common/src/heal_channel.rs
Normal file
@@ -0,0 +1,427 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use s3s::dto::{BucketLifecycleConfiguration, ExpirationStatus, LifecycleRule, ReplicationConfiguration, ReplicationRuleStatus};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
fmt::{self, Display},
|
||||
sync::OnceLock,
|
||||
};
|
||||
use tokio::sync::mpsc;
|
||||
use uuid::Uuid;
|
||||
|
||||
pub const HEAL_DELETE_DANGLING: bool = true;
|
||||
pub const RUSTFS_RESERVED_BUCKET: &str = "rustfs";
|
||||
pub const RUSTFS_RESERVED_BUCKET_PATH: &str = "/rustfs";
|
||||
|
||||
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
|
||||
pub enum HealItemType {
|
||||
Metadata,
|
||||
Bucket,
|
||||
BucketMetadata,
|
||||
Object,
|
||||
}
|
||||
|
||||
impl HealItemType {
|
||||
pub fn to_str(&self) -> &str {
|
||||
match self {
|
||||
HealItemType::Metadata => "metadata",
|
||||
HealItemType::Bucket => "bucket",
|
||||
HealItemType::BucketMetadata => "bucket-metadata",
|
||||
HealItemType::Object => "object",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for HealItemType {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.to_str())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Serialize, Deserialize)]
|
||||
pub enum DriveState {
|
||||
Ok,
|
||||
Offline,
|
||||
Corrupt,
|
||||
Missing,
|
||||
PermissionDenied,
|
||||
Faulty,
|
||||
RootMount,
|
||||
Unknown,
|
||||
Unformatted, // only returned by disk
|
||||
}
|
||||
|
||||
impl DriveState {
|
||||
pub fn to_str(&self) -> &str {
|
||||
match self {
|
||||
DriveState::Ok => "ok",
|
||||
DriveState::Offline => "offline",
|
||||
DriveState::Corrupt => "corrupt",
|
||||
DriveState::Missing => "missing",
|
||||
DriveState::PermissionDenied => "permission-denied",
|
||||
DriveState::Faulty => "faulty",
|
||||
DriveState::RootMount => "root-mount",
|
||||
DriveState::Unknown => "unknown",
|
||||
DriveState::Unformatted => "unformatted",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for DriveState {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
write!(f, "{}", self.to_str())
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub enum HealScanMode {
|
||||
Unknown,
|
||||
Normal,
|
||||
Deep,
|
||||
}
|
||||
|
||||
impl Default for HealScanMode {
|
||||
fn default() -> Self {
|
||||
Self::Normal
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Clone, Copy, Debug, Default, Serialize, Deserialize)]
|
||||
pub struct HealOpts {
|
||||
pub recursive: bool,
|
||||
#[serde(rename = "dryRun")]
|
||||
pub dry_run: bool,
|
||||
pub remove: bool,
|
||||
pub recreate: bool,
|
||||
#[serde(rename = "scanMode")]
|
||||
pub scan_mode: HealScanMode,
|
||||
#[serde(rename = "updateParity")]
|
||||
pub update_parity: bool,
|
||||
#[serde(rename = "nolock")]
|
||||
pub no_lock: bool,
|
||||
pub pool: Option<usize>,
|
||||
pub set: Option<usize>,
|
||||
}
|
||||
|
||||
/// Heal channel command type
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum HealChannelCommand {
|
||||
/// Start a new heal task
|
||||
Start(HealChannelRequest),
|
||||
/// Query heal task status
|
||||
Query { heal_path: String, client_token: String },
|
||||
/// Cancel heal task
|
||||
Cancel { heal_path: String },
|
||||
}
|
||||
|
||||
/// Heal request from admin to ahm
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct HealChannelRequest {
|
||||
/// Unique request ID
|
||||
pub id: String,
|
||||
/// Disk ID for heal disk/erasure set task
|
||||
pub disk: Option<String>,
|
||||
/// Bucket name
|
||||
pub bucket: String,
|
||||
/// Object prefix (optional)
|
||||
pub object_prefix: Option<String>,
|
||||
/// Force start heal
|
||||
pub force_start: bool,
|
||||
/// Priority
|
||||
pub priority: HealChannelPriority,
|
||||
/// Pool index (optional)
|
||||
pub pool_index: Option<usize>,
|
||||
/// Set index (optional)
|
||||
pub set_index: Option<usize>,
|
||||
/// Scan mode (optional)
|
||||
pub scan_mode: Option<HealScanMode>,
|
||||
/// Whether to remove corrupted data
|
||||
pub remove_corrupted: Option<bool>,
|
||||
/// Whether to recreate missing data
|
||||
pub recreate_missing: Option<bool>,
|
||||
/// Whether to update parity
|
||||
pub update_parity: Option<bool>,
|
||||
/// Whether to recursively process
|
||||
pub recursive: Option<bool>,
|
||||
/// Whether to dry run
|
||||
pub dry_run: Option<bool>,
|
||||
/// Timeout in seconds (optional)
|
||||
pub timeout_seconds: Option<u64>,
|
||||
}
|
||||
|
||||
/// Heal response from ahm to admin
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct HealChannelResponse {
|
||||
/// Request ID
|
||||
pub request_id: String,
|
||||
/// Success status
|
||||
pub success: bool,
|
||||
/// Response data (if successful)
|
||||
pub data: Option<Vec<u8>>,
|
||||
/// Error message (if failed)
|
||||
pub error: Option<String>,
|
||||
}
|
||||
|
||||
/// Heal priority
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum HealChannelPriority {
|
||||
/// Low priority
|
||||
Low,
|
||||
/// Normal priority
|
||||
Normal,
|
||||
/// High priority
|
||||
High,
|
||||
/// Critical priority
|
||||
Critical,
|
||||
}
|
||||
|
||||
impl Default for HealChannelPriority {
|
||||
fn default() -> Self {
|
||||
Self::Normal
|
||||
}
|
||||
}
|
||||
|
||||
/// Heal channel sender
|
||||
pub type HealChannelSender = mpsc::UnboundedSender<HealChannelCommand>;
|
||||
|
||||
/// Heal channel receiver
|
||||
pub type HealChannelReceiver = mpsc::UnboundedReceiver<HealChannelCommand>;
|
||||
|
||||
/// Global heal channel sender
|
||||
static GLOBAL_HEAL_CHANNEL_SENDER: OnceLock<HealChannelSender> = OnceLock::new();
|
||||
|
||||
/// Initialize global heal channel
|
||||
pub fn init_heal_channel() -> HealChannelReceiver {
|
||||
let (tx, rx) = mpsc::unbounded_channel();
|
||||
GLOBAL_HEAL_CHANNEL_SENDER
|
||||
.set(tx)
|
||||
.expect("Heal channel sender already initialized");
|
||||
rx
|
||||
}
|
||||
|
||||
/// Get global heal channel sender
|
||||
pub fn get_heal_channel_sender() -> Option<&'static HealChannelSender> {
|
||||
GLOBAL_HEAL_CHANNEL_SENDER.get()
|
||||
}
|
||||
|
||||
/// Send heal command through global channel
|
||||
pub async fn send_heal_command(command: HealChannelCommand) -> Result<(), String> {
|
||||
if let Some(sender) = get_heal_channel_sender() {
|
||||
sender
|
||||
.send(command)
|
||||
.map_err(|e| format!("Failed to send heal command: {e}"))?;
|
||||
Ok(())
|
||||
} else {
|
||||
Err("Heal channel not initialized".to_string())
|
||||
}
|
||||
}
|
||||
|
||||
/// Send heal start request
|
||||
pub async fn send_heal_request(request: HealChannelRequest) -> Result<(), String> {
|
||||
send_heal_command(HealChannelCommand::Start(request)).await
|
||||
}
|
||||
|
||||
/// Send heal query request
|
||||
pub async fn query_heal_status(heal_path: String, client_token: String) -> Result<(), String> {
|
||||
send_heal_command(HealChannelCommand::Query { heal_path, client_token }).await
|
||||
}
|
||||
|
||||
/// Send heal cancel request
|
||||
pub async fn cancel_heal_task(heal_path: String) -> Result<(), String> {
|
||||
send_heal_command(HealChannelCommand::Cancel { heal_path }).await
|
||||
}
|
||||
|
||||
/// Create a new heal request
|
||||
pub fn create_heal_request(
|
||||
bucket: String,
|
||||
object_prefix: Option<String>,
|
||||
force_start: bool,
|
||||
priority: Option<HealChannelPriority>,
|
||||
) -> HealChannelRequest {
|
||||
HealChannelRequest {
|
||||
id: Uuid::new_v4().to_string(),
|
||||
bucket,
|
||||
object_prefix,
|
||||
force_start,
|
||||
priority: priority.unwrap_or_default(),
|
||||
pool_index: None,
|
||||
set_index: None,
|
||||
scan_mode: None,
|
||||
remove_corrupted: None,
|
||||
recreate_missing: None,
|
||||
update_parity: None,
|
||||
recursive: None,
|
||||
dry_run: None,
|
||||
timeout_seconds: None,
|
||||
disk: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a new heal request with advanced options
|
||||
pub fn create_heal_request_with_options(
|
||||
bucket: String,
|
||||
object_prefix: Option<String>,
|
||||
force_start: bool,
|
||||
priority: Option<HealChannelPriority>,
|
||||
pool_index: Option<usize>,
|
||||
set_index: Option<usize>,
|
||||
) -> HealChannelRequest {
|
||||
HealChannelRequest {
|
||||
id: Uuid::new_v4().to_string(),
|
||||
bucket,
|
||||
object_prefix,
|
||||
force_start,
|
||||
priority: priority.unwrap_or_default(),
|
||||
pool_index,
|
||||
set_index,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a heal response
|
||||
pub fn create_heal_response(
|
||||
request_id: String,
|
||||
success: bool,
|
||||
data: Option<Vec<u8>>,
|
||||
error: Option<String>,
|
||||
) -> HealChannelResponse {
|
||||
HealChannelResponse {
|
||||
request_id,
|
||||
success,
|
||||
data,
|
||||
error,
|
||||
}
|
||||
}
|
||||
|
||||
fn lc_get_prefix(rule: &LifecycleRule) -> String {
|
||||
if let Some(p) = &rule.prefix {
|
||||
return p.to_string();
|
||||
} else if let Some(filter) = &rule.filter {
|
||||
if let Some(p) = &filter.prefix {
|
||||
return p.to_string();
|
||||
} else if let Some(and) = &filter.and {
|
||||
if let Some(p) = &and.prefix {
|
||||
return p.to_string();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
"".into()
|
||||
}
|
||||
|
||||
pub fn lc_has_active_rules(config: &BucketLifecycleConfiguration, prefix: &str) -> bool {
|
||||
if config.rules.is_empty() {
|
||||
return false;
|
||||
}
|
||||
|
||||
for rule in config.rules.iter() {
|
||||
if rule.status == ExpirationStatus::from_static(ExpirationStatus::DISABLED) {
|
||||
continue;
|
||||
}
|
||||
let rule_prefix = lc_get_prefix(rule);
|
||||
if !prefix.is_empty() && !rule_prefix.is_empty() && !prefix.starts_with(&rule_prefix) && !rule_prefix.starts_with(prefix)
|
||||
{
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(e) = &rule.noncurrent_version_expiration {
|
||||
if let Some(true) = e.noncurrent_days.map(|d| d > 0) {
|
||||
return true;
|
||||
}
|
||||
if let Some(true) = e.newer_noncurrent_versions.map(|d| d > 0) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
|
||||
if rule.noncurrent_version_transitions.is_some() {
|
||||
return true;
|
||||
}
|
||||
if let Some(true) = rule.expiration.as_ref().map(|e| e.date.is_some()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if let Some(true) = rule.expiration.as_ref().map(|e| e.days.is_some()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if let Some(Some(true)) = rule.expiration.as_ref().map(|e| e.expired_object_delete_marker) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if let Some(true) = rule.transitions.as_ref().map(|t| !t.is_empty()) {
|
||||
return true;
|
||||
}
|
||||
|
||||
if rule.transitions.is_some() {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
pub fn rep_has_active_rules(config: &ReplicationConfiguration, prefix: &str, recursive: bool) -> bool {
|
||||
if config.rules.is_empty() {
|
||||
return false;
|
||||
}
|
||||
|
||||
for rule in config.rules.iter() {
|
||||
if rule
|
||||
.status
|
||||
.eq(&ReplicationRuleStatus::from_static(ReplicationRuleStatus::DISABLED))
|
||||
{
|
||||
continue;
|
||||
}
|
||||
if !prefix.is_empty() {
|
||||
if let Some(filter) = &rule.filter {
|
||||
if let Some(r_prefix) = &filter.prefix {
|
||||
if !r_prefix.is_empty() {
|
||||
// incoming prefix must be in rule prefix
|
||||
if !recursive && !prefix.starts_with(r_prefix) {
|
||||
continue;
|
||||
}
|
||||
// If recursive, we can skip this rule if it doesn't match the tested prefix or level below prefix
|
||||
// does not match
|
||||
if recursive && !r_prefix.starts_with(prefix) && !prefix.starts_with(r_prefix) {
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return true;
|
||||
}
|
||||
false
|
||||
}
|
||||
|
||||
pub async fn send_heal_disk(set_disk_id: String, priority: Option<HealChannelPriority>) -> Result<(), String> {
|
||||
let req = HealChannelRequest {
|
||||
id: Uuid::new_v4().to_string(),
|
||||
bucket: "".to_string(),
|
||||
object_prefix: None,
|
||||
disk: Some(set_disk_id),
|
||||
force_start: false,
|
||||
priority: priority.unwrap_or_default(),
|
||||
pool_index: None,
|
||||
set_index: None,
|
||||
scan_mode: None,
|
||||
remove_corrupted: None,
|
||||
recreate_missing: None,
|
||||
update_parity: None,
|
||||
recursive: None,
|
||||
dry_run: None,
|
||||
timeout_seconds: None,
|
||||
};
|
||||
send_heal_request(req).await
|
||||
}
|
||||
@@ -14,8 +14,11 @@
|
||||
|
||||
pub mod bucket_stats;
|
||||
// pub mod error;
|
||||
pub mod data_usage;
|
||||
pub mod globals;
|
||||
pub mod heal_channel;
|
||||
pub mod last_minute;
|
||||
pub mod metrics;
|
||||
|
||||
// is ','
|
||||
pub static DEFAULT_DELIMITER: u8 = 44;
|
||||
|
||||
@@ -12,14 +12,12 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use super::data_scanner::CurrentScannerCycle;
|
||||
use crate::bucket::lifecycle::lifecycle;
|
||||
use chrono::Utc;
|
||||
use chrono::{DateTime, Utc};
|
||||
use lazy_static::lazy_static;
|
||||
use rustfs_common::last_minute::{AccElem, LastMinuteLatency};
|
||||
use rustfs_madmin::metrics::ScannerMetrics as M_ScannerMetrics;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
fmt::Display,
|
||||
pin::Pin,
|
||||
sync::{
|
||||
Arc,
|
||||
@@ -29,12 +27,58 @@ use std::{
|
||||
};
|
||||
use tokio::sync::{Mutex, RwLock};
|
||||
|
||||
use crate::last_minute::{AccElem, LastMinuteLatency};
|
||||
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum IlmAction {
|
||||
NoneAction = 0,
|
||||
DeleteAction,
|
||||
DeleteVersionAction,
|
||||
TransitionAction,
|
||||
TransitionVersionAction,
|
||||
DeleteRestoredAction,
|
||||
DeleteRestoredVersionAction,
|
||||
DeleteAllVersionsAction,
|
||||
DelMarkerDeleteAllVersionsAction,
|
||||
ActionCount,
|
||||
}
|
||||
|
||||
impl IlmAction {
|
||||
pub fn delete_restored(&self) -> bool {
|
||||
*self == Self::DeleteRestoredAction || *self == Self::DeleteRestoredVersionAction
|
||||
}
|
||||
|
||||
pub fn delete_versioned(&self) -> bool {
|
||||
*self == Self::DeleteVersionAction || *self == Self::DeleteRestoredVersionAction
|
||||
}
|
||||
|
||||
pub fn delete_all(&self) -> bool {
|
||||
*self == Self::DeleteAllVersionsAction || *self == Self::DelMarkerDeleteAllVersionsAction
|
||||
}
|
||||
|
||||
pub fn delete(&self) -> bool {
|
||||
if self.delete_restored() {
|
||||
return true;
|
||||
}
|
||||
*self == Self::DeleteVersionAction
|
||||
|| *self == Self::DeleteAction
|
||||
|| *self == Self::DeleteAllVersionsAction
|
||||
|| *self == Self::DelMarkerDeleteAllVersionsAction
|
||||
}
|
||||
}
|
||||
|
||||
impl Display for IlmAction {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
write!(f, "{self:?}")
|
||||
}
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
pub static ref globalScannerMetrics: Arc<ScannerMetrics> = Arc::new(ScannerMetrics::new());
|
||||
pub static ref globalMetrics: Arc<Metrics> = Arc::new(Metrics::new());
|
||||
}
|
||||
|
||||
#[derive(Clone, Debug, PartialEq, PartialOrd)]
|
||||
pub enum ScannerMetric {
|
||||
pub enum Metric {
|
||||
// START Realtime metrics, that only records
|
||||
// last minute latencies and total operation count.
|
||||
ReadMetadata = 0,
|
||||
@@ -69,7 +113,7 @@ pub enum ScannerMetric {
|
||||
Last,
|
||||
}
|
||||
|
||||
impl ScannerMetric {
|
||||
impl Metric {
|
||||
/// Convert to string representation for metrics
|
||||
pub fn as_str(self) -> &'static str {
|
||||
match self {
|
||||
@@ -203,7 +247,7 @@ impl CurrentPathTracker {
|
||||
}
|
||||
|
||||
/// Main scanner metrics structure
|
||||
pub struct ScannerMetrics {
|
||||
pub struct Metrics {
|
||||
// All fields must be accessed atomically and aligned.
|
||||
operations: Vec<AtomicU64>,
|
||||
latency: Vec<LockedLastMinuteLatency>,
|
||||
@@ -213,94 +257,102 @@ pub struct ScannerMetrics {
|
||||
current_paths: Arc<RwLock<HashMap<String, Arc<CurrentPathTracker>>>>,
|
||||
|
||||
// Cycle information
|
||||
cycle_info: Arc<RwLock<Option<CurrentScannerCycle>>>,
|
||||
cycle_info: Arc<RwLock<Option<CurrentCycle>>>,
|
||||
}
|
||||
|
||||
impl ScannerMetrics {
|
||||
pub fn new() -> Self {
|
||||
let operations = (0..ScannerMetric::Last as usize).map(|_| AtomicU64::new(0)).collect();
|
||||
// This is a placeholder. We'll need to define this struct.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct CurrentCycle {
|
||||
pub current: u64,
|
||||
pub cycle_completed: Vec<DateTime<Utc>>,
|
||||
pub started: DateTime<Utc>,
|
||||
}
|
||||
|
||||
let latency = (0..ScannerMetric::LastRealtime as usize)
|
||||
impl Metrics {
|
||||
pub fn new() -> Self {
|
||||
let operations = (0..Metric::Last as usize).map(|_| AtomicU64::new(0)).collect();
|
||||
|
||||
let latency = (0..Metric::LastRealtime as usize)
|
||||
.map(|_| LockedLastMinuteLatency::new())
|
||||
.collect();
|
||||
|
||||
Self {
|
||||
operations,
|
||||
latency,
|
||||
actions: (0..ScannerMetric::Last as usize).map(|_| AtomicU64::new(0)).collect(),
|
||||
actions_latency: vec![LockedLastMinuteLatency::default(); ScannerMetric::LastRealtime as usize],
|
||||
actions: (0..IlmAction::ActionCount as usize).map(|_| AtomicU64::new(0)).collect(),
|
||||
actions_latency: vec![LockedLastMinuteLatency::default(); IlmAction::ActionCount as usize],
|
||||
current_paths: Arc::new(RwLock::new(HashMap::new())),
|
||||
cycle_info: Arc::new(RwLock::new(None)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Log scanner action with custom metadata - compatible with existing usage
|
||||
pub fn log(metric: ScannerMetric) -> impl Fn(&HashMap<String, String>) {
|
||||
pub fn log(metric: Metric) -> impl Fn(&HashMap<String, String>) {
|
||||
let metric = metric as usize;
|
||||
let start_time = SystemTime::now();
|
||||
move |_custom: &HashMap<String, String>| {
|
||||
let duration = SystemTime::now().duration_since(start_time).unwrap_or_default();
|
||||
|
||||
// Update operation count
|
||||
globalScannerMetrics.operations[metric].fetch_add(1, Ordering::Relaxed);
|
||||
globalMetrics.operations[metric].fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
// Update latency for realtime metrics (spawn async task for this)
|
||||
if (metric) < ScannerMetric::LastRealtime as usize {
|
||||
if (metric) < Metric::LastRealtime as usize {
|
||||
let metric_index = metric;
|
||||
tokio::spawn(async move {
|
||||
globalScannerMetrics.latency[metric_index].add(duration).await;
|
||||
globalMetrics.latency[metric_index].add(duration).await;
|
||||
});
|
||||
}
|
||||
|
||||
// Log trace metrics
|
||||
if metric as u8 > ScannerMetric::StartTrace as u8 {
|
||||
if metric as u8 > Metric::StartTrace as u8 {
|
||||
//debug!(metric = metric.as_str(), duration_ms = duration.as_millis(), "Scanner trace metric");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Time scanner action with size - returns function that takes size
|
||||
pub fn time_size(metric: ScannerMetric) -> impl Fn(u64) {
|
||||
pub fn time_size(metric: Metric) -> impl Fn(u64) {
|
||||
let metric = metric as usize;
|
||||
let start_time = SystemTime::now();
|
||||
move |size: u64| {
|
||||
let duration = SystemTime::now().duration_since(start_time).unwrap_or_default();
|
||||
|
||||
// Update operation count
|
||||
globalScannerMetrics.operations[metric].fetch_add(1, Ordering::Relaxed);
|
||||
globalMetrics.operations[metric].fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
// Update latency for realtime metrics with size (spawn async task)
|
||||
if (metric) < ScannerMetric::LastRealtime as usize {
|
||||
if (metric) < Metric::LastRealtime as usize {
|
||||
let metric_index = metric;
|
||||
tokio::spawn(async move {
|
||||
globalScannerMetrics.latency[metric_index].add_size(duration, size).await;
|
||||
globalMetrics.latency[metric_index].add_size(duration, size).await;
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Time a scanner action - returns a closure to call when done
|
||||
pub fn time(metric: ScannerMetric) -> impl Fn() {
|
||||
pub fn time(metric: Metric) -> impl Fn() {
|
||||
let metric = metric as usize;
|
||||
let start_time = SystemTime::now();
|
||||
move || {
|
||||
let duration = SystemTime::now().duration_since(start_time).unwrap_or_default();
|
||||
|
||||
// Update operation count
|
||||
globalScannerMetrics.operations[metric].fetch_add(1, Ordering::Relaxed);
|
||||
globalMetrics.operations[metric].fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
// Update latency for realtime metrics (spawn async task)
|
||||
if (metric) < ScannerMetric::LastRealtime as usize {
|
||||
if (metric) < Metric::LastRealtime as usize {
|
||||
let metric_index = metric;
|
||||
tokio::spawn(async move {
|
||||
globalScannerMetrics.latency[metric_index].add(duration).await;
|
||||
globalMetrics.latency[metric_index].add(duration).await;
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Time N scanner actions - returns function that takes count, then returns completion function
|
||||
pub fn time_n(metric: ScannerMetric) -> Box<dyn Fn(usize) -> Box<dyn Fn() + Send + Sync> + Send + Sync> {
|
||||
pub fn time_n(metric: Metric) -> Box<dyn Fn(usize) -> Box<dyn Fn() + Send + Sync> + Send + Sync> {
|
||||
let metric = metric as usize;
|
||||
let start_time = SystemTime::now();
|
||||
Box::new(move |count: usize| {
|
||||
@@ -308,22 +360,23 @@ impl ScannerMetrics {
|
||||
let duration = SystemTime::now().duration_since(start_time).unwrap_or_default();
|
||||
|
||||
// Update operation count
|
||||
globalScannerMetrics.operations[metric].fetch_add(count as u64, Ordering::Relaxed);
|
||||
globalMetrics.operations[metric].fetch_add(count as u64, Ordering::Relaxed);
|
||||
|
||||
// Update latency for realtime metrics (spawn async task)
|
||||
if (metric) < ScannerMetric::LastRealtime as usize {
|
||||
if (metric) < Metric::LastRealtime as usize {
|
||||
let metric_index = metric;
|
||||
tokio::spawn(async move {
|
||||
globalScannerMetrics.latency[metric_index].add(duration).await;
|
||||
globalMetrics.latency[metric_index].add(duration).await;
|
||||
});
|
||||
}
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
pub fn time_ilm(a: lifecycle::IlmAction) -> Box<dyn Fn(u64) -> Box<dyn Fn() + Send + Sync> + Send + Sync> {
|
||||
/// Time ILM action with versions - returns function that takes versions, then returns completion function
|
||||
pub fn time_ilm(a: IlmAction) -> Box<dyn Fn(u64) -> Box<dyn Fn() + Send + Sync> + Send + Sync> {
|
||||
let a_clone = a as usize;
|
||||
if a_clone == lifecycle::IlmAction::NoneAction as usize || a_clone >= lifecycle::IlmAction::ActionCount as usize {
|
||||
if a_clone == IlmAction::NoneAction as usize || a_clone >= IlmAction::ActionCount as usize {
|
||||
return Box::new(move |_: u64| Box::new(move || {}));
|
||||
}
|
||||
let start = SystemTime::now();
|
||||
@@ -331,50 +384,50 @@ impl ScannerMetrics {
|
||||
Box::new(move || {
|
||||
let duration = SystemTime::now().duration_since(start).unwrap_or(Duration::from_secs(0));
|
||||
tokio::spawn(async move {
|
||||
globalScannerMetrics.actions[a_clone].fetch_add(versions, Ordering::Relaxed);
|
||||
globalScannerMetrics.actions_latency[a_clone].add(duration).await;
|
||||
globalMetrics.actions[a_clone].fetch_add(versions, Ordering::Relaxed);
|
||||
globalMetrics.actions_latency[a_clone].add(duration).await;
|
||||
});
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
/// Increment time with specific duration
|
||||
pub async fn inc_time(metric: ScannerMetric, duration: Duration) {
|
||||
pub async fn inc_time(metric: Metric, duration: Duration) {
|
||||
let metric = metric as usize;
|
||||
// Update operation count
|
||||
globalScannerMetrics.operations[metric].fetch_add(1, Ordering::Relaxed);
|
||||
globalMetrics.operations[metric].fetch_add(1, Ordering::Relaxed);
|
||||
|
||||
// Update latency for realtime metrics
|
||||
if (metric) < ScannerMetric::LastRealtime as usize {
|
||||
globalScannerMetrics.latency[metric].add(duration).await;
|
||||
if (metric) < Metric::LastRealtime as usize {
|
||||
globalMetrics.latency[metric].add(duration).await;
|
||||
}
|
||||
}
|
||||
|
||||
/// Get lifetime operation count for a metric
|
||||
pub fn lifetime(&self, metric: ScannerMetric) -> u64 {
|
||||
pub fn lifetime(&self, metric: Metric) -> u64 {
|
||||
let metric = metric as usize;
|
||||
if (metric) >= ScannerMetric::Last as usize {
|
||||
if (metric) >= Metric::Last as usize {
|
||||
return 0;
|
||||
}
|
||||
self.operations[metric].load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
/// Get last minute statistics for a metric
|
||||
pub async fn last_minute(&self, metric: ScannerMetric) -> AccElem {
|
||||
pub async fn last_minute(&self, metric: Metric) -> AccElem {
|
||||
let metric = metric as usize;
|
||||
if (metric) >= ScannerMetric::LastRealtime as usize {
|
||||
if (metric) >= Metric::LastRealtime as usize {
|
||||
return AccElem::default();
|
||||
}
|
||||
self.latency[metric].total().await
|
||||
}
|
||||
|
||||
/// Set current cycle information
|
||||
pub async fn set_cycle(&self, cycle: Option<CurrentScannerCycle>) {
|
||||
pub async fn set_cycle(&self, cycle: Option<CurrentCycle>) {
|
||||
*self.cycle_info.write().await = cycle;
|
||||
}
|
||||
|
||||
/// Get current cycle information
|
||||
pub async fn get_cycle(&self) -> Option<CurrentScannerCycle> {
|
||||
pub async fn get_cycle(&self) -> Option<CurrentCycle> {
|
||||
self.cycle_info.read().await.clone()
|
||||
}
|
||||
|
||||
@@ -411,20 +464,20 @@ impl ScannerMetrics {
|
||||
metrics.active_paths = self.get_current_paths().await;
|
||||
|
||||
// Lifetime operations
|
||||
for i in 0..ScannerMetric::Last as usize {
|
||||
for i in 0..Metric::Last as usize {
|
||||
let count = self.operations[i].load(Ordering::Relaxed);
|
||||
if count > 0 {
|
||||
if let Some(metric) = ScannerMetric::from_index(i) {
|
||||
if let Some(metric) = Metric::from_index(i) {
|
||||
metrics.life_time_ops.insert(metric.as_str().to_string(), count);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Last minute statistics for realtime metrics
|
||||
for i in 0..ScannerMetric::LastRealtime as usize {
|
||||
for i in 0..Metric::LastRealtime as usize {
|
||||
let last_min = self.latency[i].total().await;
|
||||
if last_min.n > 0 {
|
||||
if let Some(_metric) = ScannerMetric::from_index(i) {
|
||||
if let Some(_metric) = Metric::from_index(i) {
|
||||
// Convert to madmin TimedAction format if needed
|
||||
// This would require implementing the conversion
|
||||
}
|
||||
@@ -448,11 +501,7 @@ pub fn current_path_updater(disk: &str, initial: &str) -> (UpdateCurrentPathFn,
|
||||
let tracker_clone = Arc::clone(&tracker);
|
||||
let disk_clone = disk_name.clone();
|
||||
tokio::spawn(async move {
|
||||
globalScannerMetrics
|
||||
.current_paths
|
||||
.write()
|
||||
.await
|
||||
.insert(disk_clone, tracker_clone);
|
||||
globalMetrics.current_paths.write().await.insert(disk_clone, tracker_clone);
|
||||
});
|
||||
|
||||
let update_fn = {
|
||||
@@ -471,7 +520,7 @@ pub fn current_path_updater(disk: &str, initial: &str) -> (UpdateCurrentPathFn,
|
||||
Arc::new(move || -> Pin<Box<dyn std::future::Future<Output = ()> + Send>> {
|
||||
let disk_name = disk_name.clone();
|
||||
Box::pin(async move {
|
||||
globalScannerMetrics.current_paths.write().await.remove(&disk_name);
|
||||
globalMetrics.current_paths.write().await.remove(&disk_name);
|
||||
})
|
||||
})
|
||||
};
|
||||
@@ -479,7 +528,7 @@ pub fn current_path_updater(disk: &str, initial: &str) -> (UpdateCurrentPathFn,
|
||||
(update_fn, done_fn)
|
||||
}
|
||||
|
||||
impl Default for ScannerMetrics {
|
||||
impl Default for Metrics {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
@@ -26,9 +26,6 @@ categories = ["web-programming", "development-tools", "config"]
|
||||
|
||||
[dependencies]
|
||||
const-str = { workspace = true, optional = true }
|
||||
serde = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
|
||||
@@ -15,9 +15,9 @@
|
||||
use const_str::concat;
|
||||
|
||||
/// Application name
|
||||
/// Default value: RustFs
|
||||
/// Default value: RustFS
|
||||
/// Environment variable: RUSTFS_APP_NAME
|
||||
pub const APP_NAME: &str = "RustFs";
|
||||
pub const APP_NAME: &str = "RustFS";
|
||||
/// Application version
|
||||
/// Default value: 1.0.0
|
||||
/// Environment variable: RUSTFS_VERSION
|
||||
@@ -71,6 +71,16 @@ pub const DEFAULT_ACCESS_KEY: &str = "rustfsadmin";
|
||||
/// Example: --secret-key rustfsadmin
|
||||
pub const DEFAULT_SECRET_KEY: &str = "rustfsadmin";
|
||||
|
||||
/// Default console enable
|
||||
/// This is the default value for the console server.
|
||||
/// It is used to enable or disable the console server.
|
||||
/// Default value: true
|
||||
/// Environment variable: RUSTFS_CONSOLE_ENABLE
|
||||
/// Command line argument: --console-enable
|
||||
/// Example: RUSTFS_CONSOLE_ENABLE=true
|
||||
/// Example: --console-enable true
|
||||
pub const DEFAULT_CONSOLE_ENABLE: bool = true;
|
||||
|
||||
/// Default OBS configuration endpoint
|
||||
/// Environment variable: DEFAULT_OBS_ENDPOINT
|
||||
/// Command line argument: --obs-endpoint
|
||||
@@ -108,34 +118,46 @@ pub const DEFAULT_CONSOLE_ADDRESS: &str = concat!(":", DEFAULT_CONSOLE_PORT);
|
||||
/// It is used to store the logs of the application.
|
||||
/// Default value: rustfs.log
|
||||
/// Environment variable: RUSTFS_OBSERVABILITY_LOG_FILENAME
|
||||
pub const DEFAULT_LOG_FILENAME: &str = "rustfs.log";
|
||||
pub const DEFAULT_LOG_FILENAME: &str = "rustfs";
|
||||
|
||||
/// Default OBS log filename for rustfs
|
||||
/// This is the default log filename for OBS.
|
||||
/// It is used to store the logs of the application.
|
||||
/// Default value: rustfs.log
|
||||
pub const DEFAULT_OBS_LOG_FILENAME: &str = concat!(DEFAULT_LOG_FILENAME, ".log");
|
||||
|
||||
/// Default sink file log file for rustfs
|
||||
/// This is the default sink file log file for rustfs.
|
||||
/// It is used to store the logs of the application.
|
||||
/// Default value: rustfs-sink.log
|
||||
pub const DEFAULT_SINK_FILE_LOG_FILE: &str = concat!(DEFAULT_LOG_FILENAME, "-sink.log");
|
||||
|
||||
/// Default log directory for rustfs
|
||||
/// This is the default log directory for rustfs.
|
||||
/// It is used to store the logs of the application.
|
||||
/// Default value: logs
|
||||
/// Environment variable: RUSTFS_OBSERVABILITY_LOG_DIRECTORY
|
||||
pub const DEFAULT_LOG_DIR: &str = "deploy/logs";
|
||||
/// Environment variable: RUSTFS_LOG_DIRECTORY
|
||||
pub const DEFAULT_LOG_DIR: &str = "logs";
|
||||
|
||||
/// Default log rotation size mb for rustfs
|
||||
/// This is the default log rotation size for rustfs.
|
||||
/// It is used to rotate the logs of the application.
|
||||
/// Default value: 100 MB
|
||||
/// Environment variable: RUSTFS_OBSERVABILITY_LOG_ROTATION_SIZE_MB
|
||||
/// Environment variable: RUSTFS_OBS_LOG_ROTATION_SIZE_MB
|
||||
pub const DEFAULT_LOG_ROTATION_SIZE_MB: u64 = 100;
|
||||
|
||||
/// Default log rotation time for rustfs
|
||||
/// This is the default log rotation time for rustfs.
|
||||
/// It is used to rotate the logs of the application.
|
||||
/// Default value: hour, eg: day,hour,minute,second
|
||||
/// Environment variable: RUSTFS_OBSERVABILITY_LOG_ROTATION_TIME
|
||||
/// Environment variable: RUSTFS_OBS_LOG_ROTATION_TIME
|
||||
pub const DEFAULT_LOG_ROTATION_TIME: &str = "day";
|
||||
|
||||
/// Default log keep files for rustfs
|
||||
/// This is the default log keep files for rustfs.
|
||||
/// It is used to keep the logs of the application.
|
||||
/// Default value: 30
|
||||
/// Environment variable: RUSTFS_OBSERVABILITY_LOG_KEEP_FILES
|
||||
/// Environment variable: RUSTFS_OBS_LOG_KEEP_FILES
|
||||
pub const DEFAULT_LOG_KEEP_FILES: u16 = 30;
|
||||
|
||||
#[cfg(test)]
|
||||
@@ -145,7 +167,7 @@ mod tests {
|
||||
#[test]
|
||||
fn test_app_basic_constants() {
|
||||
// Test application basic constants
|
||||
assert_eq!(APP_NAME, "RustFs");
|
||||
assert_eq!(APP_NAME, "RustFS");
|
||||
assert!(!APP_NAME.contains(' '), "App name should not contain spaces");
|
||||
|
||||
assert_eq!(VERSION, "0.0.1");
|
||||
|
||||
@@ -19,3 +19,265 @@ pub const ENV_WORD_DELIMITER: &str = "_";
|
||||
/// Medium-drawn lines separator
|
||||
/// This is used to separate words in environment variable names.
|
||||
pub const ENV_WORD_DELIMITER_DASH: &str = "-";
|
||||
|
||||
#[derive(Debug, PartialEq, Eq, Clone, Copy, Default)]
|
||||
pub enum EnableState {
|
||||
True,
|
||||
False,
|
||||
#[default]
|
||||
Empty,
|
||||
Yes,
|
||||
No,
|
||||
On,
|
||||
Off,
|
||||
Enabled,
|
||||
Disabled,
|
||||
Ok,
|
||||
NotOk,
|
||||
Success,
|
||||
Failure,
|
||||
Active,
|
||||
Inactive,
|
||||
One,
|
||||
Zero,
|
||||
}
|
||||
impl std::fmt::Display for EnableState {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.write_str(self.as_str())
|
||||
}
|
||||
}
|
||||
impl std::str::FromStr for EnableState {
|
||||
type Err = ();
|
||||
fn from_str(s: &str) -> Result<Self, Self::Err> {
|
||||
match s.trim() {
|
||||
s if s.eq_ignore_ascii_case("true") => Ok(EnableState::True),
|
||||
s if s.eq_ignore_ascii_case("false") => Ok(EnableState::False),
|
||||
"" => Ok(EnableState::Empty),
|
||||
s if s.eq_ignore_ascii_case("yes") => Ok(EnableState::Yes),
|
||||
s if s.eq_ignore_ascii_case("no") => Ok(EnableState::No),
|
||||
s if s.eq_ignore_ascii_case("on") => Ok(EnableState::On),
|
||||
s if s.eq_ignore_ascii_case("off") => Ok(EnableState::Off),
|
||||
s if s.eq_ignore_ascii_case("enabled") => Ok(EnableState::Enabled),
|
||||
s if s.eq_ignore_ascii_case("disabled") => Ok(EnableState::Disabled),
|
||||
s if s.eq_ignore_ascii_case("ok") => Ok(EnableState::Ok),
|
||||
s if s.eq_ignore_ascii_case("not_ok") => Ok(EnableState::NotOk),
|
||||
s if s.eq_ignore_ascii_case("success") => Ok(EnableState::Success),
|
||||
s if s.eq_ignore_ascii_case("failure") => Ok(EnableState::Failure),
|
||||
s if s.eq_ignore_ascii_case("active") => Ok(EnableState::Active),
|
||||
s if s.eq_ignore_ascii_case("inactive") => Ok(EnableState::Inactive),
|
||||
"1" => Ok(EnableState::One),
|
||||
"0" => Ok(EnableState::Zero),
|
||||
_ => Err(()),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl EnableState {
|
||||
/// Returns the default value for the enum.
|
||||
pub fn get_default() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
/// Returns the string representation of the enum.
|
||||
pub fn as_str(&self) -> &str {
|
||||
match self {
|
||||
EnableState::True => "true",
|
||||
EnableState::False => "false",
|
||||
EnableState::Empty => "",
|
||||
EnableState::Yes => "yes",
|
||||
EnableState::No => "no",
|
||||
EnableState::On => "on",
|
||||
EnableState::Off => "off",
|
||||
EnableState::Enabled => "enabled",
|
||||
EnableState::Disabled => "disabled",
|
||||
EnableState::Ok => "ok",
|
||||
EnableState::NotOk => "not_ok",
|
||||
EnableState::Success => "success",
|
||||
EnableState::Failure => "failure",
|
||||
EnableState::Active => "active",
|
||||
EnableState::Inactive => "inactive",
|
||||
EnableState::One => "1",
|
||||
EnableState::Zero => "0",
|
||||
}
|
||||
}
|
||||
|
||||
/// is_enabled checks if the state represents an enabled condition.
|
||||
pub fn is_enabled(self) -> bool {
|
||||
matches!(
|
||||
self,
|
||||
EnableState::True
|
||||
| EnableState::Yes
|
||||
| EnableState::On
|
||||
| EnableState::Enabled
|
||||
| EnableState::Ok
|
||||
| EnableState::Success
|
||||
| EnableState::Active
|
||||
| EnableState::One
|
||||
)
|
||||
}
|
||||
|
||||
/// is_disabled checks if the state represents a disabled condition.
|
||||
pub fn is_disabled(self) -> bool {
|
||||
matches!(
|
||||
self,
|
||||
EnableState::False
|
||||
| EnableState::No
|
||||
| EnableState::Off
|
||||
| EnableState::Disabled
|
||||
| EnableState::NotOk
|
||||
| EnableState::Failure
|
||||
| EnableState::Inactive
|
||||
| EnableState::Zero
|
||||
| EnableState::Empty
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::str::FromStr;
|
||||
#[test]
|
||||
fn test_enable_state_display_and_fromstr() {
|
||||
let cases = [
|
||||
(EnableState::True, "true"),
|
||||
(EnableState::False, "false"),
|
||||
(EnableState::Empty, ""),
|
||||
(EnableState::Yes, "yes"),
|
||||
(EnableState::No, "no"),
|
||||
(EnableState::On, "on"),
|
||||
(EnableState::Off, "off"),
|
||||
(EnableState::Enabled, "enabled"),
|
||||
(EnableState::Disabled, "disabled"),
|
||||
(EnableState::Ok, "ok"),
|
||||
(EnableState::NotOk, "not_ok"),
|
||||
(EnableState::Success, "success"),
|
||||
(EnableState::Failure, "failure"),
|
||||
(EnableState::Active, "active"),
|
||||
(EnableState::Inactive, "inactive"),
|
||||
(EnableState::One, "1"),
|
||||
(EnableState::Zero, "0"),
|
||||
];
|
||||
for (variant, string) in cases.iter() {
|
||||
assert_eq!(&variant.to_string(), string);
|
||||
assert_eq!(EnableState::from_str(string).unwrap(), *variant);
|
||||
}
|
||||
// Test invalid string
|
||||
assert!(EnableState::from_str("invalid").is_err());
|
||||
}
|
||||
#[test]
|
||||
fn test_enable_state_enum() {
|
||||
let cases = [
|
||||
(EnableState::True, "true"),
|
||||
(EnableState::False, "false"),
|
||||
(EnableState::Empty, ""),
|
||||
(EnableState::Yes, "yes"),
|
||||
(EnableState::No, "no"),
|
||||
(EnableState::On, "on"),
|
||||
(EnableState::Off, "off"),
|
||||
(EnableState::Enabled, "enabled"),
|
||||
(EnableState::Disabled, "disabled"),
|
||||
(EnableState::Ok, "ok"),
|
||||
(EnableState::NotOk, "not_ok"),
|
||||
(EnableState::Success, "success"),
|
||||
(EnableState::Failure, "failure"),
|
||||
(EnableState::Active, "active"),
|
||||
(EnableState::Inactive, "inactive"),
|
||||
(EnableState::One, "1"),
|
||||
(EnableState::Zero, "0"),
|
||||
];
|
||||
for (variant, string) in cases.iter() {
|
||||
assert_eq!(variant.to_string(), *string);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_enable_state_enum_from_str() {
|
||||
let cases = [
|
||||
("true", EnableState::True),
|
||||
("false", EnableState::False),
|
||||
("", EnableState::Empty),
|
||||
("yes", EnableState::Yes),
|
||||
("no", EnableState::No),
|
||||
("on", EnableState::On),
|
||||
("off", EnableState::Off),
|
||||
("enabled", EnableState::Enabled),
|
||||
("disabled", EnableState::Disabled),
|
||||
("ok", EnableState::Ok),
|
||||
("not_ok", EnableState::NotOk),
|
||||
("success", EnableState::Success),
|
||||
("failure", EnableState::Failure),
|
||||
("active", EnableState::Active),
|
||||
("inactive", EnableState::Inactive),
|
||||
("1", EnableState::One),
|
||||
("0", EnableState::Zero),
|
||||
];
|
||||
for (string, variant) in cases.iter() {
|
||||
assert_eq!(EnableState::from_str(string).unwrap(), *variant);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_enable_state_default() {
|
||||
let default_state = EnableState::get_default();
|
||||
assert_eq!(default_state, EnableState::Empty);
|
||||
assert_eq!(default_state.as_str(), "");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_enable_state_as_str() {
|
||||
let cases = [
|
||||
(EnableState::True, "true"),
|
||||
(EnableState::False, "false"),
|
||||
(EnableState::Empty, ""),
|
||||
(EnableState::Yes, "yes"),
|
||||
(EnableState::No, "no"),
|
||||
(EnableState::On, "on"),
|
||||
(EnableState::Off, "off"),
|
||||
(EnableState::Enabled, "enabled"),
|
||||
(EnableState::Disabled, "disabled"),
|
||||
(EnableState::Ok, "ok"),
|
||||
(EnableState::NotOk, "not_ok"),
|
||||
(EnableState::Success, "success"),
|
||||
(EnableState::Failure, "failure"),
|
||||
(EnableState::Active, "active"),
|
||||
(EnableState::Inactive, "inactive"),
|
||||
(EnableState::One, "1"),
|
||||
(EnableState::Zero, "0"),
|
||||
];
|
||||
for (variant, string) in cases.iter() {
|
||||
assert_eq!(variant.as_str(), *string);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_enable_state_is_enabled() {
|
||||
let enabled_states = [
|
||||
EnableState::True,
|
||||
EnableState::Yes,
|
||||
EnableState::On,
|
||||
EnableState::Enabled,
|
||||
EnableState::Ok,
|
||||
EnableState::Success,
|
||||
EnableState::Active,
|
||||
EnableState::One,
|
||||
];
|
||||
for state in enabled_states.iter() {
|
||||
assert!(state.is_enabled());
|
||||
}
|
||||
|
||||
let disabled_states = [
|
||||
EnableState::False,
|
||||
EnableState::No,
|
||||
EnableState::Off,
|
||||
EnableState::Disabled,
|
||||
EnableState::NotOk,
|
||||
EnableState::Failure,
|
||||
EnableState::Inactive,
|
||||
EnableState::Zero,
|
||||
EnableState::Empty,
|
||||
];
|
||||
for state in disabled_states.iter() {
|
||||
assert!(state.is_disabled());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,5 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub(crate) mod app;
|
||||
pub(crate) mod env;
|
||||
pub mod app;
|
||||
pub mod env;
|
||||
pub mod tls;
|
||||
|
||||
15
crates/config/src/constants/tls.rs
Normal file
@@ -0,0 +1,15 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub const ENV_TLS_KEYLOG: &str = "RUSTFS_TLS_KEYLOG";
|
||||
@@ -18,6 +18,8 @@ pub mod constants;
|
||||
pub use constants::app::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::env::*;
|
||||
#[cfg(feature = "constants")]
|
||||
pub use constants::tls::*;
|
||||
#[cfg(feature = "notify")]
|
||||
pub mod notify;
|
||||
#[cfg(feature = "observability")]
|
||||
|
||||
@@ -27,7 +27,15 @@ pub const DEFAULT_TARGET: &str = "1";
|
||||
|
||||
pub const NOTIFY_PREFIX: &str = "notify";
|
||||
|
||||
pub const NOTIFY_ROUTE_PREFIX: &str = "notify_";
|
||||
pub const NOTIFY_ROUTE_PREFIX: &str = const_str::concat!(NOTIFY_PREFIX, "_");
|
||||
|
||||
/// Standard config keys and values.
|
||||
pub const ENABLE_KEY: &str = "enable";
|
||||
pub const COMMENT_KEY: &str = "comment";
|
||||
|
||||
/// Enable values
|
||||
pub const ENABLE_ON: &str = "on";
|
||||
pub const ENABLE_OFF: &str = "off";
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub const NOTIFY_SUB_SYSTEMS: &[&str] = &[NOTIFY_MQTT_SUB_SYS, NOTIFY_WEBHOOK_SUB_SYS];
|
||||
|
||||
@@ -12,6 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::notify::{COMMENT_KEY, ENABLE_KEY};
|
||||
|
||||
// MQTT Keys
|
||||
pub const MQTT_BROKER: &str = "broker";
|
||||
pub const MQTT_TOPIC: &str = "topic";
|
||||
@@ -23,6 +25,21 @@ pub const MQTT_KEEP_ALIVE_INTERVAL: &str = "keep_alive_interval";
|
||||
pub const MQTT_QUEUE_DIR: &str = "queue_dir";
|
||||
pub const MQTT_QUEUE_LIMIT: &str = "queue_limit";
|
||||
|
||||
/// A list of all valid configuration keys for an MQTT target.
|
||||
pub const NOTIFY_MQTT_KEYS: &[&str] = &[
|
||||
ENABLE_KEY, // "enable" is a common key
|
||||
MQTT_BROKER,
|
||||
MQTT_TOPIC,
|
||||
MQTT_QOS,
|
||||
MQTT_USERNAME,
|
||||
MQTT_PASSWORD,
|
||||
MQTT_RECONNECT_INTERVAL,
|
||||
MQTT_KEEP_ALIVE_INTERVAL,
|
||||
MQTT_QUEUE_DIR,
|
||||
MQTT_QUEUE_LIMIT,
|
||||
COMMENT_KEY,
|
||||
];
|
||||
|
||||
// MQTT Environment Variables
|
||||
pub const ENV_MQTT_ENABLE: &str = "RUSTFS_NOTIFY_MQTT_ENABLE";
|
||||
pub const ENV_MQTT_BROKER: &str = "RUSTFS_NOTIFY_MQTT_BROKER";
|
||||
@@ -34,3 +51,16 @@ pub const ENV_MQTT_RECONNECT_INTERVAL: &str = "RUSTFS_NOTIFY_MQTT_RECONNECT_INTE
|
||||
pub const ENV_MQTT_KEEP_ALIVE_INTERVAL: &str = "RUSTFS_NOTIFY_MQTT_KEEP_ALIVE_INTERVAL";
|
||||
pub const ENV_MQTT_QUEUE_DIR: &str = "RUSTFS_NOTIFY_MQTT_QUEUE_DIR";
|
||||
pub const ENV_MQTT_QUEUE_LIMIT: &str = "RUSTFS_NOTIFY_MQTT_QUEUE_LIMIT";
|
||||
|
||||
pub const ENV_NOTIFY_MQTT_KEYS: &[&str; 10] = &[
|
||||
ENV_MQTT_ENABLE,
|
||||
ENV_MQTT_BROKER,
|
||||
ENV_MQTT_TOPIC,
|
||||
ENV_MQTT_QOS,
|
||||
ENV_MQTT_USERNAME,
|
||||
ENV_MQTT_PASSWORD,
|
||||
ENV_MQTT_RECONNECT_INTERVAL,
|
||||
ENV_MQTT_KEEP_ALIVE_INTERVAL,
|
||||
ENV_MQTT_QUEUE_DIR,
|
||||
ENV_MQTT_QUEUE_LIMIT,
|
||||
];
|
||||
|
||||
@@ -12,6 +12,8 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::notify::{COMMENT_KEY, ENABLE_KEY};
|
||||
|
||||
// Webhook Keys
|
||||
pub const WEBHOOK_ENDPOINT: &str = "endpoint";
|
||||
pub const WEBHOOK_AUTH_TOKEN: &str = "auth_token";
|
||||
@@ -20,6 +22,18 @@ pub const WEBHOOK_QUEUE_DIR: &str = "queue_dir";
|
||||
pub const WEBHOOK_CLIENT_CERT: &str = "client_cert";
|
||||
pub const WEBHOOK_CLIENT_KEY: &str = "client_key";
|
||||
|
||||
/// A list of all valid configuration keys for a webhook target.
|
||||
pub const NOTIFY_WEBHOOK_KEYS: &[&str] = &[
|
||||
ENABLE_KEY, // "enable" is a common key
|
||||
WEBHOOK_ENDPOINT,
|
||||
WEBHOOK_AUTH_TOKEN,
|
||||
WEBHOOK_QUEUE_LIMIT,
|
||||
WEBHOOK_QUEUE_DIR,
|
||||
WEBHOOK_CLIENT_CERT,
|
||||
WEBHOOK_CLIENT_KEY,
|
||||
COMMENT_KEY,
|
||||
];
|
||||
|
||||
// Webhook Environment Variables
|
||||
pub const ENV_WEBHOOK_ENABLE: &str = "RUSTFS_NOTIFY_WEBHOOK_ENABLE";
|
||||
pub const ENV_WEBHOOK_ENDPOINT: &str = "RUSTFS_NOTIFY_WEBHOOK_ENDPOINT";
|
||||
@@ -28,3 +42,13 @@ pub const ENV_WEBHOOK_QUEUE_LIMIT: &str = "RUSTFS_NOTIFY_WEBHOOK_QUEUE_LIMIT";
|
||||
pub const ENV_WEBHOOK_QUEUE_DIR: &str = "RUSTFS_NOTIFY_WEBHOOK_QUEUE_DIR";
|
||||
pub const ENV_WEBHOOK_CLIENT_CERT: &str = "RUSTFS_NOTIFY_WEBHOOK_CLIENT_CERT";
|
||||
pub const ENV_WEBHOOK_CLIENT_KEY: &str = "RUSTFS_NOTIFY_WEBHOOK_CLIENT_KEY";
|
||||
|
||||
pub const ENV_NOTIFY_WEBHOOK_KEYS: &[&str; 7] = &[
|
||||
ENV_WEBHOOK_ENABLE,
|
||||
ENV_WEBHOOK_ENDPOINT,
|
||||
ENV_WEBHOOK_AUTH_TOKEN,
|
||||
ENV_WEBHOOK_QUEUE_LIMIT,
|
||||
ENV_WEBHOOK_QUEUE_DIR,
|
||||
ENV_WEBHOOK_CLIENT_CERT,
|
||||
ENV_WEBHOOK_CLIENT_KEY,
|
||||
];
|
||||
|
||||
@@ -12,279 +12,24 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::observability::logger::LoggerConfig;
|
||||
use crate::observability::otel::OtelConfig;
|
||||
use crate::observability::sink::SinkConfig;
|
||||
use serde::{Deserialize, Serialize};
|
||||
// Observability Keys
|
||||
|
||||
/// Observability configuration
|
||||
#[derive(Debug, Deserialize, Serialize, Clone)]
|
||||
pub struct ObservabilityConfig {
|
||||
pub otel: OtelConfig,
|
||||
pub sinks: Vec<SinkConfig>,
|
||||
pub logger: Option<LoggerConfig>,
|
||||
}
|
||||
pub const ENV_OBS_ENDPOINT: &str = "RUSTFS_OBS_ENDPOINT";
|
||||
pub const ENV_OBS_USE_STDOUT: &str = "RUSTFS_OBS_USE_STDOUT";
|
||||
pub const ENV_OBS_SAMPLE_RATIO: &str = "RUSTFS_OBS_SAMPLE_RATIO";
|
||||
pub const ENV_OBS_METER_INTERVAL: &str = "RUSTFS_OBS_METER_INTERVAL";
|
||||
pub const ENV_OBS_SERVICE_NAME: &str = "RUSTFS_OBS_SERVICE_NAME";
|
||||
pub const ENV_OBS_SERVICE_VERSION: &str = "RUSTFS_OBS_SERVICE_VERSION";
|
||||
pub const ENV_OBS_ENVIRONMENT: &str = "RUSTFS_OBS_ENVIRONMENT";
|
||||
pub const ENV_OBS_LOGGER_LEVEL: &str = "RUSTFS_OBS_LOGGER_LEVEL";
|
||||
pub const ENV_OBS_LOCAL_LOGGING_ENABLED: &str = "RUSTFS_OBS_LOCAL_LOGGING_ENABLED";
|
||||
pub const ENV_OBS_LOG_DIRECTORY: &str = "RUSTFS_OBS_LOG_DIRECTORY";
|
||||
pub const ENV_OBS_LOG_FILENAME: &str = "RUSTFS_OBS_LOG_FILENAME";
|
||||
pub const ENV_OBS_LOG_ROTATION_SIZE_MB: &str = "RUSTFS_OBS_LOG_ROTATION_SIZE_MB";
|
||||
pub const ENV_OBS_LOG_ROTATION_TIME: &str = "RUSTFS_OBS_LOG_ROTATION_TIME";
|
||||
pub const ENV_OBS_LOG_KEEP_FILES: &str = "RUSTFS_OBS_LOG_KEEP_FILES";
|
||||
|
||||
impl ObservabilityConfig {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
otel: OtelConfig::new(),
|
||||
sinks: vec![SinkConfig::new()],
|
||||
logger: Some(LoggerConfig::new()),
|
||||
}
|
||||
}
|
||||
}
|
||||
pub const ENV_AUDIT_LOGGER_QUEUE_CAPACITY: &str = "RUSTFS_AUDIT_LOGGER_QUEUE_CAPACITY";
|
||||
|
||||
impl Default for ObservabilityConfig {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_observability_config_new() {
|
||||
let config = ObservabilityConfig::new();
|
||||
|
||||
// Verify OTEL config is initialized
|
||||
assert!(config.otel.use_stdout.is_some(), "OTEL use_stdout should be configured");
|
||||
assert!(config.otel.sample_ratio.is_some(), "OTEL sample_ratio should be configured");
|
||||
assert!(config.otel.meter_interval.is_some(), "OTEL meter_interval should be configured");
|
||||
assert!(config.otel.service_name.is_some(), "OTEL service_name should be configured");
|
||||
assert!(config.otel.service_version.is_some(), "OTEL service_version should be configured");
|
||||
assert!(config.otel.environment.is_some(), "OTEL environment should be configured");
|
||||
assert!(config.otel.logger_level.is_some(), "OTEL logger_level should be configured");
|
||||
|
||||
// Verify sinks are initialized
|
||||
assert!(!config.sinks.is_empty(), "Sinks should not be empty");
|
||||
assert_eq!(config.sinks.len(), 1, "Should have exactly one default sink");
|
||||
|
||||
// Verify logger is initialized
|
||||
assert!(config.logger.is_some(), "Logger should be configured");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_observability_config_default() {
|
||||
let config = ObservabilityConfig::default();
|
||||
let new_config = ObservabilityConfig::new();
|
||||
|
||||
// Default should be equivalent to new()
|
||||
assert_eq!(config.sinks.len(), new_config.sinks.len());
|
||||
assert_eq!(config.logger.is_some(), new_config.logger.is_some());
|
||||
|
||||
// OTEL configs should be equivalent
|
||||
assert_eq!(config.otel.use_stdout, new_config.otel.use_stdout);
|
||||
assert_eq!(config.otel.sample_ratio, new_config.otel.sample_ratio);
|
||||
assert_eq!(config.otel.meter_interval, new_config.otel.meter_interval);
|
||||
assert_eq!(config.otel.service_name, new_config.otel.service_name);
|
||||
assert_eq!(config.otel.service_version, new_config.otel.service_version);
|
||||
assert_eq!(config.otel.environment, new_config.otel.environment);
|
||||
assert_eq!(config.otel.logger_level, new_config.otel.logger_level);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_observability_config_otel_defaults() {
|
||||
let config = ObservabilityConfig::new();
|
||||
|
||||
// Test OTEL default values
|
||||
if let Some(_use_stdout) = config.otel.use_stdout {
|
||||
// Test boolean values - any boolean value is valid
|
||||
}
|
||||
|
||||
if let Some(sample_ratio) = config.otel.sample_ratio {
|
||||
assert!((0.0..=1.0).contains(&sample_ratio), "Sample ratio should be between 0.0 and 1.0");
|
||||
}
|
||||
|
||||
if let Some(meter_interval) = config.otel.meter_interval {
|
||||
assert!(meter_interval > 0, "Meter interval should be positive");
|
||||
assert!(meter_interval <= 3600, "Meter interval should be reasonable (≤ 1 hour)");
|
||||
}
|
||||
|
||||
if let Some(service_name) = &config.otel.service_name {
|
||||
assert!(!service_name.is_empty(), "Service name should not be empty");
|
||||
assert!(!service_name.contains(' '), "Service name should not contain spaces");
|
||||
}
|
||||
|
||||
if let Some(service_version) = &config.otel.service_version {
|
||||
assert!(!service_version.is_empty(), "Service version should not be empty");
|
||||
}
|
||||
|
||||
if let Some(environment) = &config.otel.environment {
|
||||
assert!(!environment.is_empty(), "Environment should not be empty");
|
||||
assert!(
|
||||
["development", "staging", "production", "test"].contains(&environment.as_str()),
|
||||
"Environment should be a standard environment name"
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(logger_level) = &config.otel.logger_level {
|
||||
assert!(
|
||||
["trace", "debug", "info", "warn", "error"].contains(&logger_level.as_str()),
|
||||
"Logger level should be a valid tracing level"
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_observability_config_sinks() {
|
||||
let config = ObservabilityConfig::new();
|
||||
|
||||
// Test default sink configuration
|
||||
assert_eq!(config.sinks.len(), 1, "Should have exactly one default sink");
|
||||
|
||||
let _default_sink = &config.sinks[0];
|
||||
// Test that the sink has valid configuration
|
||||
// Note: We can't test specific values without knowing SinkConfig implementation
|
||||
// but we can test that it's properly initialized
|
||||
|
||||
// Test that we can add more sinks
|
||||
let mut config_mut = config.clone();
|
||||
config_mut.sinks.push(SinkConfig::new());
|
||||
assert_eq!(config_mut.sinks.len(), 2, "Should be able to add more sinks");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_observability_config_logger() {
|
||||
let config = ObservabilityConfig::new();
|
||||
|
||||
// Test logger configuration
|
||||
assert!(config.logger.is_some(), "Logger should be configured by default");
|
||||
|
||||
if let Some(_logger) = &config.logger {
|
||||
// Test that logger has valid configuration
|
||||
// Note: We can't test specific values without knowing LoggerConfig implementation
|
||||
// but we can test that it's properly initialized
|
||||
}
|
||||
|
||||
// Test that logger can be disabled
|
||||
let mut config_mut = config.clone();
|
||||
config_mut.logger = None;
|
||||
assert!(config_mut.logger.is_none(), "Logger should be able to be disabled");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_observability_config_serialization() {
|
||||
let config = ObservabilityConfig::new();
|
||||
|
||||
// Test serialization to JSON
|
||||
let json_result = serde_json::to_string(&config);
|
||||
assert!(json_result.is_ok(), "Config should be serializable to JSON");
|
||||
|
||||
let json_str = json_result.unwrap();
|
||||
assert!(!json_str.is_empty(), "Serialized JSON should not be empty");
|
||||
assert!(json_str.contains("otel"), "JSON should contain otel configuration");
|
||||
assert!(json_str.contains("sinks"), "JSON should contain sinks configuration");
|
||||
assert!(json_str.contains("logger"), "JSON should contain logger configuration");
|
||||
|
||||
// Test deserialization from JSON
|
||||
let deserialized_result: Result<ObservabilityConfig, _> = serde_json::from_str(&json_str);
|
||||
assert!(deserialized_result.is_ok(), "Config should be deserializable from JSON");
|
||||
|
||||
let deserialized_config = deserialized_result.unwrap();
|
||||
assert_eq!(deserialized_config.sinks.len(), config.sinks.len());
|
||||
assert_eq!(deserialized_config.logger.is_some(), config.logger.is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_observability_config_debug_format() {
|
||||
let config = ObservabilityConfig::new();
|
||||
|
||||
let debug_str = format!("{config:?}");
|
||||
assert!(!debug_str.is_empty(), "Debug output should not be empty");
|
||||
assert!(debug_str.contains("ObservabilityConfig"), "Debug output should contain struct name");
|
||||
assert!(debug_str.contains("otel"), "Debug output should contain otel field");
|
||||
assert!(debug_str.contains("sinks"), "Debug output should contain sinks field");
|
||||
assert!(debug_str.contains("logger"), "Debug output should contain logger field");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_observability_config_clone() {
|
||||
let config = ObservabilityConfig::new();
|
||||
let cloned_config = config.clone();
|
||||
|
||||
// Test that clone creates an independent copy
|
||||
assert_eq!(cloned_config.sinks.len(), config.sinks.len());
|
||||
assert_eq!(cloned_config.logger.is_some(), config.logger.is_some());
|
||||
assert_eq!(cloned_config.otel.endpoint, config.otel.endpoint);
|
||||
assert_eq!(cloned_config.otel.use_stdout, config.otel.use_stdout);
|
||||
assert_eq!(cloned_config.otel.sample_ratio, config.otel.sample_ratio);
|
||||
assert_eq!(cloned_config.otel.meter_interval, config.otel.meter_interval);
|
||||
assert_eq!(cloned_config.otel.service_name, config.otel.service_name);
|
||||
assert_eq!(cloned_config.otel.service_version, config.otel.service_version);
|
||||
assert_eq!(cloned_config.otel.environment, config.otel.environment);
|
||||
assert_eq!(cloned_config.otel.logger_level, config.otel.logger_level);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_observability_config_modification() {
|
||||
let mut config = ObservabilityConfig::new();
|
||||
|
||||
// Test modifying OTEL endpoint
|
||||
let original_endpoint = config.otel.endpoint.clone();
|
||||
config.otel.endpoint = "http://localhost:4317".to_string();
|
||||
assert_ne!(config.otel.endpoint, original_endpoint);
|
||||
assert_eq!(config.otel.endpoint, "http://localhost:4317");
|
||||
|
||||
// Test modifying sinks
|
||||
let original_sinks_len = config.sinks.len();
|
||||
config.sinks.push(SinkConfig::new());
|
||||
assert_eq!(config.sinks.len(), original_sinks_len + 1);
|
||||
|
||||
// Test disabling logger
|
||||
config.logger = None;
|
||||
assert!(config.logger.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_observability_config_edge_cases() {
|
||||
// Test with empty sinks
|
||||
let mut config = ObservabilityConfig::new();
|
||||
config.sinks.clear();
|
||||
assert!(config.sinks.is_empty(), "Sinks should be empty after clearing");
|
||||
|
||||
// Test serialization with empty sinks
|
||||
let json_result = serde_json::to_string(&config);
|
||||
assert!(json_result.is_ok(), "Config with empty sinks should be serializable");
|
||||
|
||||
// Test with no logger
|
||||
config.logger = None;
|
||||
let json_result = serde_json::to_string(&config);
|
||||
assert!(json_result.is_ok(), "Config with no logger should be serializable");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_observability_config_memory_efficiency() {
|
||||
let config = ObservabilityConfig::new();
|
||||
|
||||
// Test that config doesn't use excessive memory
|
||||
let config_size = std::mem::size_of_val(&config);
|
||||
assert!(config_size < 5000, "Config should not use excessive memory");
|
||||
|
||||
// Test that endpoint string is not excessively long
|
||||
assert!(config.otel.endpoint.len() < 1000, "Endpoint should not be excessively long");
|
||||
|
||||
// Test that collections are reasonably sized
|
||||
assert!(config.sinks.len() < 100, "Sinks collection should be reasonably sized");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_observability_config_consistency() {
|
||||
// Create multiple configs and ensure they're consistent
|
||||
let config1 = ObservabilityConfig::new();
|
||||
let config2 = ObservabilityConfig::new();
|
||||
|
||||
// Both configs should have the same default structure
|
||||
assert_eq!(config1.sinks.len(), config2.sinks.len());
|
||||
assert_eq!(config1.logger.is_some(), config2.logger.is_some());
|
||||
assert_eq!(config1.otel.use_stdout, config2.otel.use_stdout);
|
||||
assert_eq!(config1.otel.sample_ratio, config2.otel.sample_ratio);
|
||||
assert_eq!(config1.otel.meter_interval, config2.otel.meter_interval);
|
||||
assert_eq!(config1.otel.service_name, config2.otel.service_name);
|
||||
assert_eq!(config1.otel.service_version, config2.otel.service_version);
|
||||
assert_eq!(config1.otel.environment, config2.otel.environment);
|
||||
assert_eq!(config1.otel.logger_level, config2.otel.logger_level);
|
||||
}
|
||||
}
|
||||
// Default values for observability configuration
|
||||
pub const DEFAULT_AUDIT_LOGGER_QUEUE_CAPACITY: usize = 10000;
|
||||
|
||||
@@ -12,62 +12,17 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::env;
|
||||
// RUSTFS_SINKS_FILE_PATH
|
||||
pub const ENV_SINKS_FILE_PATH: &str = "RUSTFS_SINKS_FILE_PATH";
|
||||
// RUSTFS_SINKS_FILE_BUFFER_SIZE
|
||||
pub const ENV_SINKS_FILE_BUFFER_SIZE: &str = "RUSTFS_SINKS_FILE_BUFFER_SIZE";
|
||||
// RUSTFS_SINKS_FILE_FLUSH_INTERVAL_MS
|
||||
pub const ENV_SINKS_FILE_FLUSH_INTERVAL_MS: &str = "RUSTFS_SINKS_FILE_FLUSH_INTERVAL_MS";
|
||||
// RUSTFS_SINKS_FILE_FLUSH_THRESHOLD
|
||||
pub const ENV_SINKS_FILE_FLUSH_THRESHOLD: &str = "RUSTFS_SINKS_FILE_FLUSH_THRESHOLD";
|
||||
|
||||
/// File sink configuration
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct FileSink {
|
||||
pub path: String,
|
||||
#[serde(default = "default_buffer_size")]
|
||||
pub buffer_size: Option<usize>,
|
||||
#[serde(default = "default_flush_interval_ms")]
|
||||
pub flush_interval_ms: Option<u64>,
|
||||
#[serde(default = "default_flush_threshold")]
|
||||
pub flush_threshold: Option<usize>,
|
||||
}
|
||||
pub const DEFAULT_SINKS_FILE_BUFFER_SIZE: usize = 8192;
|
||||
|
||||
impl FileSink {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
path: env::var("RUSTFS_SINKS_FILE_PATH")
|
||||
.ok()
|
||||
.filter(|s| !s.trim().is_empty())
|
||||
.unwrap_or_else(default_path),
|
||||
buffer_size: default_buffer_size(),
|
||||
flush_interval_ms: default_flush_interval_ms(),
|
||||
flush_threshold: default_flush_threshold(),
|
||||
}
|
||||
}
|
||||
}
|
||||
pub const DEFAULT_SINKS_FILE_FLUSH_INTERVAL_MS: u64 = 1000;
|
||||
|
||||
impl Default for FileSink {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
fn default_buffer_size() -> Option<usize> {
|
||||
Some(8192)
|
||||
}
|
||||
fn default_flush_interval_ms() -> Option<u64> {
|
||||
Some(1000)
|
||||
}
|
||||
fn default_flush_threshold() -> Option<usize> {
|
||||
Some(100)
|
||||
}
|
||||
|
||||
fn default_path() -> String {
|
||||
let temp_dir = env::temp_dir().join("rustfs");
|
||||
|
||||
if let Err(e) = std::fs::create_dir_all(&temp_dir) {
|
||||
eprintln!("Failed to create log directory: {e}");
|
||||
return "rustfs/rustfs.log".to_string();
|
||||
}
|
||||
|
||||
temp_dir
|
||||
.join("rustfs.log")
|
||||
.to_str()
|
||||
.unwrap_or("rustfs/rustfs.log")
|
||||
.to_string()
|
||||
}
|
||||
pub const DEFAULT_SINKS_FILE_FLUSH_THRESHOLD: usize = 100;
|
||||
|
||||
@@ -12,39 +12,16 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
// RUSTFS_SINKS_KAFKA_BROKERS
|
||||
pub const ENV_SINKS_KAFKA_BROKERS: &str = "RUSTFS_SINKS_KAFKA_BROKERS";
|
||||
pub const ENV_SINKS_KAFKA_TOPIC: &str = "RUSTFS_SINKS_KAFKA_TOPIC";
|
||||
// batch_size
|
||||
pub const ENV_SINKS_KAFKA_BATCH_SIZE: &str = "RUSTFS_SINKS_KAFKA_BATCH_SIZE";
|
||||
// batch_timeout_ms
|
||||
pub const ENV_SINKS_KAFKA_BATCH_TIMEOUT_MS: &str = "RUSTFS_SINKS_KAFKA_BATCH_TIMEOUT_MS";
|
||||
|
||||
/// Kafka sink configuration
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct KafkaSink {
|
||||
pub brokers: String,
|
||||
pub topic: String,
|
||||
#[serde(default = "default_batch_size")]
|
||||
pub batch_size: Option<usize>,
|
||||
#[serde(default = "default_batch_timeout_ms")]
|
||||
pub batch_timeout_ms: Option<u64>,
|
||||
}
|
||||
|
||||
impl KafkaSink {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
brokers: "localhost:9092".to_string(),
|
||||
topic: "rustfs".to_string(),
|
||||
batch_size: default_batch_size(),
|
||||
batch_timeout_ms: default_batch_timeout_ms(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for KafkaSink {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
fn default_batch_size() -> Option<usize> {
|
||||
Some(100)
|
||||
}
|
||||
fn default_batch_timeout_ms() -> Option<u64> {
|
||||
Some(1000)
|
||||
}
|
||||
// brokers
|
||||
pub const DEFAULT_SINKS_KAFKA_BROKERS: &str = "localhost:9092";
|
||||
pub const DEFAULT_SINKS_KAFKA_TOPIC: &str = "rustfs-sinks";
|
||||
pub const DEFAULT_SINKS_KAFKA_BATCH_SIZE: usize = 100;
|
||||
pub const DEFAULT_SINKS_KAFKA_BATCH_TIMEOUT_MS: u64 = 1000;
|
||||
|
||||
@@ -12,10 +12,12 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub(crate) mod config;
|
||||
pub(crate) mod file;
|
||||
pub(crate) mod kafka;
|
||||
pub(crate) mod logger;
|
||||
pub(crate) mod otel;
|
||||
pub(crate) mod sink;
|
||||
pub(crate) mod webhook;
|
||||
mod config;
|
||||
mod file;
|
||||
mod kafka;
|
||||
mod webhook;
|
||||
|
||||
pub use config::*;
|
||||
pub use file::*;
|
||||
pub use kafka::*;
|
||||
pub use webhook::*;
|
||||
|
||||