Compare commits
343 Commits
1.0.0-alph
...
1.0.0-alph
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
38f26b7c94 | ||
|
|
eb7eb9c5a1 | ||
|
|
d934e3905b | ||
|
|
6617372b33 | ||
|
|
769778e565 | ||
|
|
a7f5c4af46 | ||
|
|
a9d5fbac54 | ||
|
|
281e68c9bf | ||
|
|
d30c42f85a | ||
|
|
79012be2c8 | ||
|
|
325ff62684 | ||
|
|
f0c2ede7a7 | ||
|
|
b9fd66c1cd | ||
|
|
c43b11fb92 | ||
|
|
d737a439d5 | ||
|
|
0714c7a9ca | ||
|
|
2ceb65adb4 | ||
|
|
dd47fcf2a8 | ||
|
|
64ba52bc1e | ||
|
|
d2ced233e5 | ||
|
|
40660e7b80 | ||
|
|
2aca1f77af | ||
|
|
6f3d2885cd | ||
|
|
6ab7619023 | ||
|
|
ed73e2b782 | ||
|
|
6a59c0a474 | ||
|
|
c5264f9703 | ||
|
|
b47765b4c0 | ||
|
|
e22b24684f | ||
|
|
1d069fd351 | ||
|
|
416d3ad5b7 | ||
|
|
f30698ec7f | ||
|
|
7dcf01f127 | ||
|
|
e524a106c5 | ||
|
|
d9e5f5d2e3 | ||
|
|
684e832530 | ||
|
|
a65856bdf4 | ||
|
|
2edb2929b2 | ||
|
|
14bc55479b | ||
|
|
cd1e244c68 | ||
|
|
46797dc815 | ||
|
|
7f24dbda19 | ||
|
|
ef11d3a2eb | ||
|
|
d1398cb3ab | ||
|
|
95019c4cb5 | ||
|
|
4168e6c180 | ||
|
|
42d3645d6f | ||
|
|
30e7f00b02 | ||
|
|
58f8a8f46b | ||
|
|
aae768f446 | ||
|
|
d447b3e426 | ||
|
|
8f310cd4a8 | ||
|
|
8ed01a3e06 | ||
|
|
9e1739ed8d | ||
|
|
7abbfc9c2c | ||
|
|
639bf0c233 | ||
|
|
ad99019749 | ||
|
|
aac9b1edb7 | ||
|
|
5689311cff | ||
|
|
007d9c0b21 | ||
|
|
626c7ed34a | ||
|
|
0e680eae31 | ||
|
|
7622b37f7b | ||
|
|
f1dd3a982e | ||
|
|
4f73760a45 | ||
|
|
be66cf8bd3 | ||
|
|
23b40d398f | ||
|
|
90f21a9102 | ||
|
|
9b029d18b2 | ||
|
|
9b7f4d477a | ||
|
|
12ecb36c6d | ||
|
|
ef0dbaaeb5 | ||
|
|
29b0935be7 | ||
|
|
08aeca89ef | ||
|
|
d39ce6d8e9 | ||
|
|
9ddf6a011d | ||
|
|
f7e188eee7 | ||
|
|
4b9cb512f2 | ||
|
|
e5f0760009 | ||
|
|
a6c211f4ea | ||
|
|
f049c656d9 | ||
|
|
65dd947350 | ||
|
|
57f082ee2b | ||
|
|
ae7e86d7ef | ||
|
|
a12a3bedc3 | ||
|
|
cafec06b7e | ||
|
|
1770679e66 | ||
|
|
a4fbf596e6 | ||
|
|
3f717292bf | ||
|
|
73f0ecbf8f | ||
|
|
0c3079ae5e | ||
|
|
ebf30b0db5 | ||
|
|
29c004d935 | ||
|
|
4595bf7db6 | ||
|
|
f372ccf4a8 | ||
|
|
9ce867f585 | ||
|
|
124c31a68b | ||
|
|
62a01f3801 | ||
|
|
70e6bec2a4 | ||
|
|
cf863ba059 | ||
|
|
d4beb1cc0b | ||
|
|
971e74281c | ||
|
|
ca9a2b6ab9 | ||
|
|
4e00110bfe | ||
|
|
9c97524c3b | ||
|
|
14a8802ce7 | ||
|
|
9d5ed1acac | ||
|
|
44f3eb7244 | ||
|
|
01b2623f66 | ||
|
|
cf4d63795f | ||
|
|
0efc818635 | ||
|
|
c9d26c6e88 | ||
|
|
087df484a3 | ||
|
|
04bf4b0f98 | ||
|
|
7462be983a | ||
|
|
5264503e47 | ||
|
|
3b8cb0df41 | ||
|
|
9aebef31ff | ||
|
|
c2d782bed1 | ||
|
|
e00f5be746 | ||
|
|
e23297f695 | ||
|
|
d6840a6e04 | ||
|
|
3557a52dc4 | ||
|
|
fd2aab2bd9 | ||
|
|
f1c50fcb74 | ||
|
|
bdcba3460e | ||
|
|
8857f31b07 | ||
|
|
5b85bf7a00 | ||
|
|
46bd75c0f8 | ||
|
|
5fc5dd0fd9 | ||
|
|
adc07e5209 | ||
|
|
357cced49c | ||
|
|
a104c33974 | ||
|
|
516e00f15f | ||
|
|
a64c3c28b8 | ||
|
|
e9c9a2d1f2 | ||
|
|
3ebab98d2d | ||
|
|
10c949af62 | ||
|
|
4a3325276d | ||
|
|
c5f6c66f72 | ||
|
|
c7c149975b | ||
|
|
d552210b59 | ||
|
|
581607da6a | ||
|
|
e95107f7d6 | ||
|
|
a693cb52f3 | ||
|
|
2c7366038e | ||
|
|
1cc6dfde87 | ||
|
|
387f4faf78 | ||
|
|
0f7093c5f9 | ||
|
|
6a5c0055e7 | ||
|
|
76288f2501 | ||
|
|
3497ccfada | ||
|
|
24e3d3a2ce | ||
|
|
ebad748cdc | ||
|
|
b7e56ed92c | ||
|
|
4811632751 | ||
|
|
374a702f04 | ||
|
|
e369e9f481 | ||
|
|
fe2e4a2274 | ||
|
|
b391272e94 | ||
|
|
c55c7a6373 | ||
|
|
67f1c371a9 | ||
|
|
d987686c14 | ||
|
|
48a9707110 | ||
|
|
b89450f54d | ||
|
|
e0c99bced4 | ||
|
|
130f85a575 | ||
|
|
c42fbed3d2 | ||
|
|
fd539f0f0a | ||
|
|
9aba89a12c | ||
|
|
7b27b29e3a | ||
|
|
7ef014a433 | ||
|
|
1b88714d27 | ||
|
|
b119894425 | ||
|
|
a37aa664f5 | ||
|
|
9b8abbb009 | ||
|
|
3e5a48af65 | ||
|
|
d5aef963f9 | ||
|
|
6c37e1cb2a | ||
|
|
e9d7e211b9 | ||
|
|
45bbd1e5c4 | ||
|
|
57d196771a | ||
|
|
6202f50e15 | ||
|
|
c5df1f92c2 | ||
|
|
4f1770d3fe | ||
|
|
d56cee26db | ||
|
|
56fd8132e9 | ||
|
|
35daa74430 | ||
|
|
dc156fb4cd | ||
|
|
de905a878c | ||
|
|
f3252f989b | ||
|
|
01a2afca9a | ||
|
|
a4fe68ad21 | ||
|
|
c03f86b23c | ||
|
|
5667f324ae | ||
|
|
bcd806796f | ||
|
|
612404c47f | ||
|
|
85388262b3 | ||
|
|
25a4503285 | ||
|
|
526c4d5a61 | ||
|
|
addc964d56 | ||
|
|
371119f733 | ||
|
|
021abc0398 | ||
|
|
0672b6dd3e | ||
|
|
1372dc2857 | ||
|
|
77bc9af109 | ||
|
|
91b1c84430 | ||
|
|
b667927216 | ||
|
|
29795fac51 | ||
|
|
2ce7e01f55 | ||
|
|
4fefd63a5b | ||
|
|
2a8c46874d | ||
|
|
b8b5511b68 | ||
|
|
bdaee228db | ||
|
|
d562620e99 | ||
|
|
69b0c828c9 | ||
|
|
2bfd1efb9b | ||
|
|
0854e6b921 | ||
|
|
b907f4e61b | ||
|
|
6ec568459c | ||
|
|
ea210d52dc | ||
|
|
3d3c6e4e06 | ||
|
|
e7d0a8d4b9 | ||
|
|
7d3b2b774c | ||
|
|
aed8f52423 | ||
|
|
c49414f6ac | ||
|
|
8e766b90cd | ||
|
|
3409cd8dff | ||
|
|
f4973a681c | ||
|
|
4fb3d187d0 | ||
|
|
0aff736efd | ||
|
|
2aa7a631ef | ||
|
|
b40ef147a9 | ||
|
|
1f11a3167b | ||
|
|
18b0134ddf | ||
|
|
b48a5fdc94 | ||
|
|
168a07a670 | ||
|
|
cad005bc21 | ||
|
|
dc44cde081 | ||
|
|
4ccdeb9d2a | ||
|
|
1b48934f47 | ||
|
|
25fa645184 | ||
|
|
3a3bb880f2 | ||
|
|
affe27298c | ||
|
|
629db6218e | ||
|
|
aa1a3ce4e8 | ||
|
|
693db59fcc | ||
|
|
0a7df4ef26 | ||
|
|
9dcdc44718 | ||
|
|
2a0c618f8b | ||
|
|
bebd78fbbb | ||
|
|
3f095e75cb | ||
|
|
f7d30da9e0 | ||
|
|
823d4b6f79 | ||
|
|
051ea7786f | ||
|
|
42b645e355 | ||
|
|
f27ee96014 | ||
|
|
20cd117aa6 | ||
|
|
fc8931d69f | ||
|
|
0167b2decd | ||
|
|
e67980ff3c | ||
|
|
96760bba5a | ||
|
|
2501d7d241 | ||
|
|
55b84262b5 | ||
|
|
ce4252eb1a | ||
|
|
db708917b4 | ||
|
|
8ddb45627d | ||
|
|
550c225b79 | ||
|
|
0d46b550a8 | ||
|
|
0693cca1a4 | ||
|
|
0d9f9e381a | ||
|
|
6c7aa5a7ae | ||
|
|
a27d935925 | ||
|
|
b4f87a4fee | ||
|
|
ee5f94a2e2 | ||
|
|
9c3cf554d3 | ||
|
|
addbfa5487 | ||
|
|
5eb461d7b7 | ||
|
|
1ea45afcd7 | ||
|
|
dbd86f6aee | ||
|
|
af693f7b3f | ||
|
|
3be5ee6445 | ||
|
|
0acc8fe26a | ||
|
|
ecf40eb86c | ||
|
|
48ce7055f8 | ||
|
|
749f55d688 | ||
|
|
e5d17f5382 | ||
|
|
982cc66c74 | ||
|
|
74bf4909c8 | ||
|
|
9c956b4445 | ||
|
|
4c1fc9317e | ||
|
|
a9d77a618f | ||
|
|
38cdc87e93 | ||
|
|
f5ff93b65e | ||
|
|
6ef6f188e5 | ||
|
|
ccad91a4a9 | ||
|
|
63b79ae151 | ||
|
|
9284f64e2a | ||
|
|
b9bbae27de | ||
|
|
36e3efb5a5 | ||
|
|
04d1c8724d | ||
|
|
4fb4b353f8 | ||
|
|
564a02f344 | ||
|
|
5b582a4234 | ||
|
|
2e9792577f | ||
|
|
2066e0a03b | ||
|
|
a4d49a500f | ||
|
|
a8fbced928 | ||
|
|
99ca405279 | ||
|
|
2e1d1018aa | ||
|
|
c57b4be1c7 | ||
|
|
238a016242 | ||
|
|
2c0c7fafa3 | ||
|
|
ee4962fe31 | ||
|
|
55895d0a10 | ||
|
|
676897d389 | ||
|
|
5205ff6695 | ||
|
|
15cf3ce92b | ||
|
|
c0441b2412 | ||
|
|
6267872ddb | ||
|
|
618779a89d | ||
|
|
b3ec2325ed | ||
|
|
49a5643e76 | ||
|
|
657395af8a | ||
|
|
4de62ed77e | ||
|
|
505f493729 | ||
|
|
be05b704b0 | ||
|
|
b33c2fa3cf | ||
|
|
98674c60d4 | ||
|
|
e39eb86967 | ||
|
|
646070ae7a | ||
|
|
2525b66658 | ||
|
|
58c5a633e2 | ||
|
|
aefd894fc2 | ||
|
|
1e1d4646a2 | ||
|
|
b97845fffd | ||
|
|
84f5a4cb48 | ||
|
|
2832f0e089 | ||
|
|
a3b5445824 | ||
|
|
363e37c791 | ||
|
|
1b0b041530 | ||
|
|
7d5fc87002 | ||
|
|
13130e9dd4 |
@@ -1,27 +0,0 @@
|
||||
FROM ubuntu:22.04
|
||||
|
||||
ENV LANG C.UTF-8
|
||||
|
||||
RUN sed -i s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g /etc/apt/sources.list
|
||||
|
||||
RUN apt-get clean && apt-get update && apt-get install wget git curl unzip gcc pkg-config libssl-dev lld libdbus-1-dev libwayland-dev libwebkit2gtk-4.1-dev libxdo-dev -y
|
||||
|
||||
# install protoc
|
||||
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip \
|
||||
&& unzip protoc-31.1-linux-x86_64.zip -d protoc3 \
|
||||
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc \
|
||||
&& mv protoc3/include/* /usr/local/include/ && rm -rf protoc-31.1-linux-x86_64.zip protoc3
|
||||
|
||||
# install flatc
|
||||
RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip \
|
||||
&& unzip Linux.flatc.binary.g++-13.zip \
|
||||
&& mv flatc /usr/local/bin/ && chmod +x /usr/local/bin/flatc && rm -rf Linux.flatc.binary.g++-13.zip
|
||||
|
||||
# install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
COPY .docker/cargo.config.toml /root/.cargo/config.toml
|
||||
|
||||
WORKDIR /root/s3-rustfs
|
||||
|
||||
CMD [ "bash", "-c", "while true; do sleep 1; done" ]
|
||||
@@ -1,32 +0,0 @@
|
||||
FROM rockylinux:9.3 AS builder
|
||||
|
||||
ENV LANG C.UTF-8
|
||||
|
||||
RUN sed -e 's|^mirrorlist=|#mirrorlist=|g' \
|
||||
-e 's|^#baseurl=http://dl.rockylinux.org/$contentdir|baseurl=https://mirrors.ustc.edu.cn/rocky|g' \
|
||||
-i.bak \
|
||||
/etc/yum.repos.d/rocky-extras.repo \
|
||||
/etc/yum.repos.d/rocky.repo
|
||||
|
||||
RUN dnf makecache
|
||||
|
||||
RUN yum install wget git unzip gcc openssl-devel pkgconf-pkg-config -y
|
||||
|
||||
# install protoc
|
||||
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip \
|
||||
&& unzip protoc-31.1-linux-x86_64.zip -d protoc3 \
|
||||
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc \
|
||||
&& mv protoc3/include/* /usr/local/include/ && rm -rf protoc-31.1-linux-x86_64.zip protoc3
|
||||
|
||||
# install flatc
|
||||
RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip \
|
||||
&& unzip Linux.flatc.binary.g++-13.zip \
|
||||
&& mv flatc /usr/local/bin/ && chmod +x /usr/local/bin/flatc \
|
||||
&& rm -rf Linux.flatc.binary.g++-13.zip
|
||||
|
||||
# install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
COPY .docker/cargo.config.toml /root/.cargo/config.toml
|
||||
|
||||
WORKDIR /root/s3-rustfs
|
||||
@@ -1,25 +0,0 @@
|
||||
FROM ubuntu:22.04
|
||||
|
||||
ENV LANG C.UTF-8
|
||||
|
||||
RUN sed -i s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g /etc/apt/sources.list
|
||||
|
||||
RUN apt-get clean && apt-get update && apt-get install wget git curl unzip gcc pkg-config libssl-dev lld libdbus-1-dev libwayland-dev libwebkit2gtk-4.1-dev libxdo-dev -y
|
||||
|
||||
# install protoc
|
||||
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip \
|
||||
&& unzip protoc-31.1-linux-x86_64.zip -d protoc3 \
|
||||
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc \
|
||||
&& mv protoc3/include/* /usr/local/include/ && rm -rf protoc-31.1-linux-x86_64.zip protoc3
|
||||
|
||||
# install flatc
|
||||
RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip \
|
||||
&& unzip Linux.flatc.binary.g++-13.zip \
|
||||
&& mv flatc /usr/local/bin/ && chmod +x /usr/local/bin/flatc && rm -rf Linux.flatc.binary.g++-13.zip
|
||||
|
||||
# install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
COPY .docker/cargo.config.toml /root/.cargo/config.toml
|
||||
|
||||
WORKDIR /root/s3-rustfs
|
||||
261
.docker/README.md
Normal file
@@ -0,0 +1,261 @@
|
||||
# RustFS Docker Images
|
||||
|
||||
This directory contains Docker configuration files and supporting infrastructure for building and running RustFS container images.
|
||||
|
||||
## 📁 Directory Structure
|
||||
|
||||
```
|
||||
rustfs/
|
||||
├── Dockerfile # Production image (Alpine + pre-built binaries)
|
||||
├── Dockerfile.source # Development image (Debian + source build)
|
||||
├── docker-buildx.sh # Multi-architecture build script
|
||||
├── Makefile # Build automation with simplified commands
|
||||
└── .docker/ # Supporting infrastructure
|
||||
├── observability/ # Monitoring and observability configs
|
||||
├── compose/ # Docker Compose configurations
|
||||
├── mqtt/ # MQTT broker configs
|
||||
└── openobserve-otel/ # OpenObserve + OpenTelemetry configs
|
||||
```
|
||||
|
||||
## 🎯 Image Variants
|
||||
|
||||
### Core Images
|
||||
|
||||
| Image | Base OS | Build Method | Size | Use Case |
|
||||
|-------|---------|--------------|------|----------|
|
||||
| `production` (default) | Alpine 3.18 | GitHub Releases | Smallest | Production deployment |
|
||||
| `source` | Debian Bookworm | Source build | Medium | Custom builds with cross-compilation |
|
||||
| `dev` | Debian Bookworm | Development tools | Large | Interactive development |
|
||||
|
||||
## 🚀 Usage Examples
|
||||
|
||||
### Quick Start (Production)
|
||||
|
||||
```bash
|
||||
# Default production image (Alpine + GitHub Releases)
|
||||
docker run -p 9000:9000 rustfs/rustfs:latest
|
||||
|
||||
# Specific version
|
||||
docker run -p 9000:9000 rustfs/rustfs:1.2.3
|
||||
```
|
||||
|
||||
### Complete Tag Strategy Examples
|
||||
|
||||
```bash
|
||||
# Stable Releases
|
||||
docker run rustfs/rustfs:1.2.3 # Main version (production)
|
||||
docker run rustfs/rustfs:1.2.3-production # Explicit production variant
|
||||
docker run rustfs/rustfs:1.2.3-source # Source build variant
|
||||
docker run rustfs/rustfs:latest # Latest stable
|
||||
|
||||
# Prerelease Versions
|
||||
docker run rustfs/rustfs:1.3.0-alpha.2 # Specific alpha version
|
||||
docker run rustfs/rustfs:alpha # Latest alpha
|
||||
docker run rustfs/rustfs:beta # Latest beta
|
||||
docker run rustfs/rustfs:rc # Latest release candidate
|
||||
|
||||
# Development Versions
|
||||
docker run rustfs/rustfs:dev # Latest main branch development
|
||||
docker run rustfs/rustfs:dev-13e4a0b # Specific commit
|
||||
docker run rustfs/rustfs:dev-latest # Latest development
|
||||
docker run rustfs/rustfs:main-latest # Main branch latest
|
||||
```
|
||||
|
||||
### Development Environment
|
||||
|
||||
```bash
|
||||
# Quick setup using Makefile (recommended)
|
||||
make docker-dev-local # Build development image locally
|
||||
make dev-env-start # Start development container
|
||||
|
||||
# Manual Docker commands
|
||||
docker run -it -v $(pwd):/workspace -p 9000:9000 rustfs/rustfs:latest-dev
|
||||
|
||||
# Build from source locally
|
||||
docker build -f Dockerfile.source -t rustfs:custom .
|
||||
|
||||
# Development with hot reload
|
||||
docker-compose up rustfs-dev
|
||||
```
|
||||
|
||||
## 🏗️ Build Arguments and Scripts
|
||||
|
||||
### Using Makefile Commands (Recommended)
|
||||
|
||||
The easiest way to build images using simplified commands:
|
||||
|
||||
```bash
|
||||
# Development images (build from source)
|
||||
make docker-dev-local # Build for local use (single arch)
|
||||
make docker-dev # Build multi-arch (for CI/CD)
|
||||
make docker-dev-push REGISTRY=xxx # Build and push to registry
|
||||
|
||||
# Production images (using pre-built binaries)
|
||||
make docker-buildx # Build multi-arch production images
|
||||
make docker-buildx-push # Build and push production images
|
||||
make docker-buildx-version VERSION=v1.0.0 # Build specific version
|
||||
|
||||
# Development environment
|
||||
make dev-env-start # Start development container
|
||||
make dev-env-stop # Stop development container
|
||||
make dev-env-restart # Restart development container
|
||||
|
||||
# Help
|
||||
make help-docker # Show all Docker-related commands
|
||||
```
|
||||
|
||||
### Using docker-buildx.sh (Advanced)
|
||||
|
||||
For direct script usage and advanced scenarios:
|
||||
|
||||
```bash
|
||||
# Build latest version for all architectures
|
||||
./docker-buildx.sh
|
||||
|
||||
# Build and push to registry
|
||||
./docker-buildx.sh --push
|
||||
|
||||
# Build specific version
|
||||
./docker-buildx.sh --release v1.2.3
|
||||
|
||||
# Build and push specific version
|
||||
./docker-buildx.sh --release v1.2.3 --push
|
||||
```
|
||||
|
||||
### Manual Docker Builds
|
||||
|
||||
All images support dynamic version selection:
|
||||
|
||||
```bash
|
||||
# Build production image with latest release
|
||||
docker build --build-arg RELEASE="latest" -t rustfs:latest .
|
||||
|
||||
# Build from source with specific target
|
||||
docker build -f Dockerfile.source \
|
||||
--build-arg TARGETPLATFORM="linux/amd64" \
|
||||
-t rustfs:source .
|
||||
|
||||
# Development build
|
||||
docker build -f Dockerfile.source -t rustfs:dev .
|
||||
```
|
||||
|
||||
## 🔧 Binary Download Sources
|
||||
|
||||
### Unified GitHub Releases
|
||||
|
||||
The production image downloads from GitHub Releases for reliability and transparency:
|
||||
|
||||
- ✅ **production** → GitHub Releases API with automatic latest detection
|
||||
- ✅ **Checksum verification** → SHA256SUMS validation when available
|
||||
- ✅ **Multi-architecture** → Supports amd64 and arm64
|
||||
|
||||
### Source Build
|
||||
|
||||
The source variant compiles from source code with advanced features:
|
||||
|
||||
- 🔧 **Cross-compilation** → Supports multiple target platforms via `TARGETPLATFORM`
|
||||
- ⚡ **Build caching** → sccache for faster compilation
|
||||
- 🎯 **Optimized builds** → Release optimizations with LTO and symbol stripping
|
||||
|
||||
## 📋 Architecture Support
|
||||
|
||||
All variants support multi-architecture builds:
|
||||
|
||||
- **linux/amd64** (x86_64)
|
||||
- **linux/arm64** (aarch64)
|
||||
|
||||
Architecture is automatically detected during build using Docker's `TARGETARCH` build argument.
|
||||
|
||||
## 🔐 Security Features
|
||||
|
||||
- **Checksum Verification**: Production image verifies SHA256SUMS when available
|
||||
- **Non-root User**: All images run as user `rustfs` (UID 1000)
|
||||
- **Minimal Runtime**: Production image only includes necessary dependencies
|
||||
- **Secure Defaults**: No hardcoded credentials or keys
|
||||
|
||||
## 🛠️ Development Workflow
|
||||
|
||||
### Quick Start with Makefile (Recommended)
|
||||
|
||||
```bash
|
||||
# 1. Start development environment
|
||||
make dev-env-start
|
||||
|
||||
# 2. Your development container is now running with:
|
||||
# - Port 9000 exposed for RustFS
|
||||
# - Port 9010 exposed for admin console
|
||||
# - Current directory mounted as /workspace
|
||||
|
||||
# 3. Stop when done
|
||||
make dev-env-stop
|
||||
```
|
||||
|
||||
### Manual Development Setup
|
||||
|
||||
```bash
|
||||
# Build development image from source
|
||||
make docker-dev-local
|
||||
|
||||
# Or use traditional Docker commands
|
||||
docker build -f Dockerfile.source -t rustfs:dev .
|
||||
|
||||
# Run with development tools
|
||||
docker run -it -v $(pwd):/workspace -p 9000:9000 rustfs:dev bash
|
||||
|
||||
# Or use docker-compose for complex setups
|
||||
docker-compose up rustfs-dev
|
||||
```
|
||||
|
||||
### Common Development Tasks
|
||||
|
||||
```bash
|
||||
# Build and test locally
|
||||
make build # Build binary natively
|
||||
make docker-dev-local # Build development Docker image
|
||||
make test # Run tests
|
||||
make fmt # Format code
|
||||
make clippy # Run linter
|
||||
|
||||
# Get help
|
||||
make help # General help
|
||||
make help-docker # Docker-specific help
|
||||
make help-build # Build-specific help
|
||||
```
|
||||
|
||||
## 🚀 CI/CD Integration
|
||||
|
||||
The project uses GitHub Actions for automated multi-architecture Docker builds:
|
||||
|
||||
### Automated Builds
|
||||
|
||||
- **Tags**: Automatic builds triggered on version tags (e.g., `v1.2.3`)
|
||||
- **Main Branch**: Development builds with `dev-latest` and `main-latest` tags
|
||||
- **Pull Requests**: Test builds without registry push
|
||||
|
||||
### Build Variants
|
||||
|
||||
Each build creates three image variants:
|
||||
|
||||
- `rustfs/rustfs:v1.2.3` (production - Alpine-based)
|
||||
- `rustfs/rustfs:v1.2.3-source` (source build - Debian-based)
|
||||
- `rustfs/rustfs:v1.2.3-dev` (development - Debian-based with tools)
|
||||
|
||||
### Manual Builds
|
||||
|
||||
Trigger custom builds via GitHub Actions:
|
||||
|
||||
```bash
|
||||
# Use workflow_dispatch to build specific versions
|
||||
# Available options: latest, main-latest, dev-latest, v1.2.3, dev-abc123
|
||||
```
|
||||
|
||||
## 📦 Supporting Infrastructure
|
||||
|
||||
The `.docker/` directory contains supporting configuration files:
|
||||
|
||||
- **observability/** - Prometheus, Grafana, OpenTelemetry configs
|
||||
- **compose/** - Multi-service Docker Compose setups
|
||||
- **mqtt/** - MQTT broker configurations
|
||||
- **openobserve-otel/** - Log aggregation and tracing setup
|
||||
|
||||
See individual README files in each subdirectory for specific usage instructions.
|
||||
@@ -1,19 +0,0 @@
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
[source.crates-io]
|
||||
registry = "https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[net]
|
||||
git-fetch-with-cli = true
|
||||
80
.docker/compose/README.md
Normal file
@@ -0,0 +1,80 @@
|
||||
# Docker Compose Configurations
|
||||
|
||||
This directory contains specialized Docker Compose configurations for different use cases.
|
||||
|
||||
## 📁 Configuration Files
|
||||
|
||||
This directory contains specialized Docker Compose configurations and their associated Dockerfiles, keeping related files organized together.
|
||||
|
||||
### Main Configuration (Root Directory)
|
||||
|
||||
- **`../../docker-compose.yml`** - **Default Production Setup**
|
||||
- Complete production-ready configuration
|
||||
- Includes RustFS server + full observability stack
|
||||
- Supports multiple profiles: `dev`, `observability`, `cache`, `proxy`
|
||||
- Recommended for most users
|
||||
|
||||
### Specialized Configurations
|
||||
|
||||
- **`docker-compose.cluster.yaml`** - **Distributed Testing**
|
||||
- 4-node cluster setup for testing distributed storage
|
||||
- Uses local compiled binaries
|
||||
- Simulates multi-node environment
|
||||
- Ideal for development and cluster testing
|
||||
|
||||
- **`docker-compose.observability.yaml`** - **Observability Focus**
|
||||
- Specialized setup for testing observability features
|
||||
- Includes OpenTelemetry, Jaeger, Prometheus, Loki, Grafana
|
||||
- Uses `../../Dockerfile.source` for builds
|
||||
- Perfect for observability development
|
||||
|
||||
## 🚀 Usage Examples
|
||||
|
||||
### Production Setup
|
||||
|
||||
```bash
|
||||
# Start main service
|
||||
docker-compose up -d
|
||||
|
||||
# Start with development profile
|
||||
docker-compose --profile dev up -d
|
||||
|
||||
# Start with full observability
|
||||
docker-compose --profile observability up -d
|
||||
```
|
||||
|
||||
### Cluster Testing
|
||||
|
||||
```bash
|
||||
# Build and start 4-node cluster (run from project root)
|
||||
cd .docker/compose
|
||||
docker-compose -f docker-compose.cluster.yaml up -d
|
||||
|
||||
# Or run directly from project root
|
||||
docker-compose -f .docker/compose/docker-compose.cluster.yaml up -d
|
||||
```
|
||||
|
||||
### Observability Testing
|
||||
|
||||
```bash
|
||||
# Start observability-focused environment (run from project root)
|
||||
cd .docker/compose
|
||||
docker-compose -f docker-compose.observability.yaml up -d
|
||||
|
||||
# Or run directly from project root
|
||||
docker-compose -f .docker/compose/docker-compose.observability.yaml up -d
|
||||
```
|
||||
|
||||
## 🔧 Configuration Overview
|
||||
|
||||
| Configuration | Nodes | Storage | Observability | Use Case |
|
||||
|---------------|-------|---------|---------------|----------|
|
||||
| **Main** | 1 | Volume mounts | Full stack | Production |
|
||||
| **Cluster** | 4 | HTTP endpoints | Basic | Testing |
|
||||
| **Observability** | 4 | Local data | Advanced | Development |
|
||||
|
||||
## 📝 Notes
|
||||
|
||||
- Always ensure you have built the required binaries before starting cluster tests
|
||||
- The main configuration is sufficient for most use cases
|
||||
- Specialized configurations are for specific testing scenarios
|
||||
@@ -14,70 +14,69 @@
|
||||
|
||||
services:
|
||||
node0:
|
||||
image: rustfs:v1 # 替换为你的镜像名称和标签
|
||||
image: rustfs/rustfs:latest # Replace with your image name and label
|
||||
container_name: node0
|
||||
hostname: node0
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=http://node{0...3}:9000/data/rustfs{0...3}
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9002
|
||||
- RUSTFS_ACCESS_KEY=rustfsadmin
|
||||
- RUSTFS_SECRET_KEY=rustfsadmin
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "9000:9000" # 映射宿主机的 9001 端口到容器的 9000 端口
|
||||
- "8000:9001" # 映射宿主机的 9001 端口到容器的 9000 端口
|
||||
- "9000:9000" # Map port 9001 of the host to port 9000 of the container
|
||||
volumes:
|
||||
- ./target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs
|
||||
# - ./data/node0:/data # 将当前路径挂载到容器内的 /root/data
|
||||
- ../../target/x86_64-unknown-linux-gnu/release/rustfs:/app/rustfs
|
||||
command: "/app/rustfs"
|
||||
|
||||
node1:
|
||||
image: rustfs:v1
|
||||
image: rustfs/rustfs:latest
|
||||
container_name: node1
|
||||
hostname: node1
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=http://node{0...3}:9000/data/rustfs{0...3}
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9002
|
||||
- RUSTFS_ACCESS_KEY=rustfsadmin
|
||||
- RUSTFS_SECRET_KEY=rustfsadmin
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "9001:9000" # 映射宿主机的 9002 端口到容器的 9000 端口
|
||||
- "9001:9000" # Map port 9002 of the host to port 9000 of the container
|
||||
volumes:
|
||||
- ./target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs
|
||||
# - ./data/node1:/data
|
||||
- ../../target/x86_64-unknown-linux-gnu/release/rustfs:/app/rustfs
|
||||
command: "/app/rustfs"
|
||||
|
||||
node2:
|
||||
image: rustfs:v1
|
||||
image: rustfs/rustfs:latest
|
||||
container_name: node2
|
||||
hostname: node2
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=http://node{0...3}:9000/data/rustfs{0...3}
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9002
|
||||
- RUSTFS_ACCESS_KEY=rustfsadmin
|
||||
- RUSTFS_SECRET_KEY=rustfsadmin
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "9002:9000" # 映射宿主机的 9003 端口到容器的 9000 端口
|
||||
- "9002:9000" # Map port 9003 of the host to port 9000 of the container
|
||||
volumes:
|
||||
- ./target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs
|
||||
# - ./data/node2:/data
|
||||
- ../../target/x86_64-unknown-linux-gnu/release/rustfs:/app/rustfs
|
||||
command: "/app/rustfs"
|
||||
|
||||
node3:
|
||||
image: rustfs:v1
|
||||
image: rustfs/rustfs:latest
|
||||
container_name: node3
|
||||
hostname: node3
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=http://node{0...3}:9000/data/rustfs{0...3}
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9002
|
||||
- RUSTFS_ACCESS_KEY=rustfsadmin
|
||||
- RUSTFS_SECRET_KEY=rustfsadmin
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "9003:9000" # 映射宿主机的 9004 端口到容器的 9000 端口
|
||||
- "9003:9000" # Map port 9004 of the host to port 9000 of the container
|
||||
volumes:
|
||||
- ./target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs
|
||||
# - ./data/node3:/data
|
||||
- ../../target/x86_64-unknown-linux-gnu/release/rustfs:/app/rustfs
|
||||
command: "/app/rustfs"
|
||||
@@ -14,11 +14,11 @@
|
||||
|
||||
services:
|
||||
otel-collector:
|
||||
image: ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-contrib:0.127.0
|
||||
image: otel/opentelemetry-collector-contrib:0.129.1
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
volumes:
|
||||
- ./.docker/observability/otel-collector-config.yaml:/etc/otelcol-contrib/config.yaml
|
||||
- ../../.docker/observability/otel-collector-config.yaml:/etc/otelcol-contrib/config.yaml
|
||||
ports:
|
||||
- 1888:1888
|
||||
- 8888:8888
|
||||
@@ -30,7 +30,7 @@ services:
|
||||
networks:
|
||||
- rustfs-network
|
||||
jaeger:
|
||||
image: jaegertracing/jaeger:2.6.0
|
||||
image: jaegertracing/jaeger:2.8.0
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
ports:
|
||||
@@ -40,11 +40,11 @@ services:
|
||||
networks:
|
||||
- rustfs-network
|
||||
prometheus:
|
||||
image: prom/prometheus:v3.4.1
|
||||
image: prom/prometheus:v3.4.2
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
volumes:
|
||||
- ./.docker/observability/prometheus.yml:/etc/prometheus/prometheus.yml
|
||||
- ../../.docker/observability/prometheus.yml:/etc/prometheus/prometheus.yml
|
||||
ports:
|
||||
- "9090:9090"
|
||||
networks:
|
||||
@@ -54,16 +54,16 @@ services:
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
volumes:
|
||||
- ./.docker/observability/loki-config.yaml:/etc/loki/local-config.yaml
|
||||
- ../../.docker/observability/loki-config.yaml:/etc/loki/local-config.yaml
|
||||
ports:
|
||||
- "3100:3100"
|
||||
command: -config.file=/etc/loki/local-config.yaml
|
||||
networks:
|
||||
- rustfs-network
|
||||
grafana:
|
||||
image: grafana/grafana:12.0.1
|
||||
image: grafana/grafana:12.0.2
|
||||
ports:
|
||||
- "3000:3000" # Web UI
|
||||
- "3000:3000" # Web UI
|
||||
environment:
|
||||
- GF_SECURITY_ADMIN_PASSWORD=admin
|
||||
- TZ=Asia/Shanghai
|
||||
@@ -72,85 +72,69 @@ services:
|
||||
|
||||
node1:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.obs
|
||||
context: ../..
|
||||
dockerfile: Dockerfile.source
|
||||
container_name: node1
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=http://node{1...4}:9000/root/data/target/volume/test{1...4}
|
||||
- RUSTFS_ADDRESS=:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=:9002
|
||||
- RUSTFS_OBS_CONFIG=/etc/observability/config/obs-multi.toml
|
||||
- RUSTFS_OBS_ENDPOINT=http://otel-collector:4317
|
||||
- RUSTFS_OBS_LOGGER_LEVEL=debug
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "9001:9000" # 映射宿主机的 9001 端口到容器的 9000 端口
|
||||
- "9101:9002"
|
||||
volumes:
|
||||
# - ./data:/root/data # 将当前路径挂载到容器内的 /root/data
|
||||
- ./.docker/observability/config:/etc/observability/config
|
||||
- "9001:9000" # Map port 9001 of the host to port 9000 of the container
|
||||
networks:
|
||||
- rustfs-network
|
||||
|
||||
node2:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.obs
|
||||
context: ../..
|
||||
dockerfile: Dockerfile.source
|
||||
container_name: node2
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=http://node{1...4}:9000/root/data/target/volume/test{1...4}
|
||||
- RUSTFS_ADDRESS=:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=:9002
|
||||
- RUSTFS_OBS_CONFIG=/etc/observability/config/obs-multi.toml
|
||||
- RUSTFS_OBS_ENDPOINT=http://otel-collector:4317
|
||||
- RUSTFS_OBS_LOGGER_LEVEL=debug
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "9002:9000" # 映射宿主机的 9002 端口到容器的 9000 端口
|
||||
- "9102:9002"
|
||||
volumes:
|
||||
# - ./data:/root/data
|
||||
- ./.docker/observability/config:/etc/observability/config
|
||||
- "9002:9000" # Map port 9002 of the host to port 9000 of the container
|
||||
networks:
|
||||
- rustfs-network
|
||||
|
||||
node3:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.obs
|
||||
context: ../..
|
||||
dockerfile: Dockerfile.source
|
||||
container_name: node3
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=http://node{1...4}:9000/root/data/target/volume/test{1...4}
|
||||
- RUSTFS_ADDRESS=:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=:9002
|
||||
- RUSTFS_OBS_CONFIG=/etc/observability/config/obs-multi.toml
|
||||
- RUSTFS_OBS_ENDPOINT=http://otel-collector:4317
|
||||
- RUSTFS_OBS_LOGGER_LEVEL=debug
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "9003:9000" # 映射宿主机的 9003 端口到容器的 9000 端口
|
||||
- "9103:9002"
|
||||
volumes:
|
||||
# - ./data:/root/data
|
||||
- ./.docker/observability/config:/etc/observability/config
|
||||
- "9003:9000" # Map port 9003 of the host to port 9000 of the container
|
||||
networks:
|
||||
- rustfs-network
|
||||
|
||||
node4:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.obs
|
||||
context: ../..
|
||||
dockerfile: Dockerfile.source
|
||||
container_name: node4
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=http://node{1...4}:9000/root/data/target/volume/test{1...4}
|
||||
- RUSTFS_ADDRESS=:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=:9002
|
||||
- RUSTFS_OBS_CONFIG=/etc/observability/config/obs-multi.toml
|
||||
- RUSTFS_OBS_ENDPOINT=http://otel-collector:4317
|
||||
- RUSTFS_OBS_LOGGER_LEVEL=debug
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "9004:9000" # 映射宿主机的 9004 端口到容器的 9000 端口
|
||||
- "9104:9002"
|
||||
volumes:
|
||||
# - ./data:/root/data
|
||||
- ./.docker/observability/config:/etc/observability/config
|
||||
- "9004:9000" # Map port 9004 of the host to port 9000 of the container
|
||||
networks:
|
||||
- rustfs-network
|
||||
|
||||
@@ -13,24 +13,49 @@
|
||||
# limitations under the License.
|
||||
|
||||
services:
|
||||
|
||||
tempo-init:
|
||||
image: busybox:latest
|
||||
command: ["sh", "-c", "chown -R 10001:10001 /var/tempo"]
|
||||
volumes:
|
||||
- ./tempo-data:/var/tempo
|
||||
user: root
|
||||
networks:
|
||||
- otel-network
|
||||
restart: "no"
|
||||
|
||||
tempo:
|
||||
image: grafana/tempo:latest
|
||||
user: "10001" # The container must be started with root to execute chown in the script
|
||||
command: [ "-config.file=/etc/tempo.yaml" ] # This is passed as a parameter to the entry point script
|
||||
volumes:
|
||||
- ./tempo.yaml:/etc/tempo.yaml:ro
|
||||
- ./tempo-data:/var/tempo
|
||||
ports:
|
||||
- "3200:3200" # tempo
|
||||
- "24317:4317" # otlp grpc
|
||||
restart: unless-stopped
|
||||
networks:
|
||||
- otel-network
|
||||
|
||||
otel-collector:
|
||||
image: ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-contrib:0.127.0
|
||||
image: otel/opentelemetry-collector-contrib:0.129.1
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otelcol-contrib/config.yaml
|
||||
ports:
|
||||
- 1888:1888
|
||||
- 8888:8888
|
||||
- 8889:8889
|
||||
- 13133:13133
|
||||
- 4317:4317
|
||||
- 4318:4318
|
||||
- 55679:55679
|
||||
- "1888:1888"
|
||||
- "8888:8888"
|
||||
- "8889:8889"
|
||||
- "13133:13133"
|
||||
- "4317:4317"
|
||||
- "4318:4318"
|
||||
- "55679:55679"
|
||||
networks:
|
||||
- otel-network
|
||||
jaeger:
|
||||
image: jaegertracing/jaeger:2.7.0
|
||||
image: jaegertracing/jaeger:2.8.0
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
ports:
|
||||
@@ -40,7 +65,7 @@ services:
|
||||
networks:
|
||||
- otel-network
|
||||
prometheus:
|
||||
image: prom/prometheus:v3.4.1
|
||||
image: prom/prometheus:v3.4.2
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
volumes:
|
||||
@@ -64,6 +89,8 @@ services:
|
||||
image: grafana/grafana:12.0.2
|
||||
ports:
|
||||
- "3000:3000" # Web UI
|
||||
volumes:
|
||||
- ./grafana-datasources.yaml:/etc/grafana/provisioning/datasources/datasources.yaml
|
||||
environment:
|
||||
- GF_SECURITY_ADMIN_PASSWORD=admin
|
||||
- TZ=Asia/Shanghai
|
||||
@@ -76,4 +103,4 @@ networks:
|
||||
driver: bridge
|
||||
name: "network_otel_config"
|
||||
driver_opts:
|
||||
com.docker.network.enable_ipv6: "true"
|
||||
com.docker.network.enable_ipv6: "true"
|
||||
|
||||
32
.docker/observability/grafana-datasources.yaml
Normal file
@@ -0,0 +1,32 @@
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Prometheus
|
||||
type: prometheus
|
||||
uid: prometheus
|
||||
access: proxy
|
||||
orgId: 1
|
||||
url: http://prometheus:9090
|
||||
basicAuth: false
|
||||
isDefault: false
|
||||
version: 1
|
||||
editable: false
|
||||
jsonData:
|
||||
httpMethod: GET
|
||||
- name: Tempo
|
||||
type: tempo
|
||||
access: proxy
|
||||
orgId: 1
|
||||
url: http://tempo:3200
|
||||
basicAuth: false
|
||||
isDefault: true
|
||||
version: 1
|
||||
editable: false
|
||||
apiVersion: 1
|
||||
uid: tempo
|
||||
jsonData:
|
||||
httpMethod: GET
|
||||
serviceMap:
|
||||
datasourceUid: prometheus
|
||||
streamingEnabled:
|
||||
search: true
|
||||
@@ -33,14 +33,18 @@ exporters:
|
||||
endpoint: "jaeger:4317" # Jaeger 的 OTLP gRPC 端点
|
||||
tls:
|
||||
insecure: true # 开发环境禁用 TLS,生产环境需配置证书
|
||||
otlp/tempo: # OTLP 导出器,用于跟踪数据
|
||||
endpoint: "tempo:4317" # tempo 的 OTLP gRPC 端点
|
||||
tls:
|
||||
insecure: true # 开发环境禁用 TLS,生产环境需配置证书
|
||||
prometheus: # Prometheus 导出器,用于指标数据
|
||||
endpoint: "0.0.0.0:8889" # Prometheus 刮取端点
|
||||
namespace: "rustfs" # 指标前缀
|
||||
send_timestamps: true # 发送时间戳
|
||||
# enable_open_metrics: true
|
||||
loki: # Loki 导出器,用于日志数据
|
||||
otlphttp/loki: # Loki 导出器,用于日志数据
|
||||
# endpoint: "http://loki:3100/otlp/v1/logs"
|
||||
endpoint: "http://loki:3100/loki/api/v1/push"
|
||||
endpoint: "http://loki:3100/otlp/v1/logs"
|
||||
tls:
|
||||
insecure: true
|
||||
extensions:
|
||||
@@ -53,7 +57,7 @@ service:
|
||||
traces:
|
||||
receivers: [ otlp ]
|
||||
processors: [ memory_limiter,batch ]
|
||||
exporters: [ otlp/traces ]
|
||||
exporters: [ otlp/traces,otlp/tempo ]
|
||||
metrics:
|
||||
receivers: [ otlp ]
|
||||
processors: [ batch ]
|
||||
@@ -61,11 +65,17 @@ service:
|
||||
logs:
|
||||
receivers: [ otlp ]
|
||||
processors: [ batch ]
|
||||
exporters: [ loki ]
|
||||
exporters: [ otlphttp/loki ]
|
||||
telemetry:
|
||||
logs:
|
||||
level: "info" # Collector 日志级别
|
||||
metrics:
|
||||
address: "0.0.0.0:8888" # Collector 自身指标暴露
|
||||
level: "detailed" # 可以是 basic, normal, detailed
|
||||
readers:
|
||||
- periodic:
|
||||
exporter:
|
||||
otlp:
|
||||
protocol: http/protobuf
|
||||
endpoint: http://otel-collector:4318
|
||||
|
||||
|
||||
|
||||
@@ -18,8 +18,11 @@ global:
|
||||
scrape_configs:
|
||||
- job_name: 'otel-collector'
|
||||
static_configs:
|
||||
- targets: ['otel-collector:8888'] # 从 Collector 刮取指标
|
||||
- targets: [ 'otel-collector:8888' ] # 从 Collector 刮取指标
|
||||
- job_name: 'otel-metrics'
|
||||
static_configs:
|
||||
- targets: ['otel-collector:8889'] # 应用指标
|
||||
- targets: [ 'otel-collector:8889' ] # 应用指标
|
||||
- job_name: 'tempo'
|
||||
static_configs:
|
||||
- targets: [ 'tempo:3200' ]
|
||||
|
||||
|
||||
1
.docker/observability/tempo-data/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
*
|
||||
55
.docker/observability/tempo.yaml
Normal file
@@ -0,0 +1,55 @@
|
||||
stream_over_http_enabled: true
|
||||
server:
|
||||
http_listen_port: 3200
|
||||
log_level: info
|
||||
|
||||
query_frontend:
|
||||
search:
|
||||
duration_slo: 5s
|
||||
throughput_bytes_slo: 1.073741824e+09
|
||||
metadata_slo:
|
||||
duration_slo: 5s
|
||||
throughput_bytes_slo: 1.073741824e+09
|
||||
trace_by_id:
|
||||
duration_slo: 5s
|
||||
|
||||
distributor:
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: "tempo:4317"
|
||||
|
||||
ingester:
|
||||
max_block_duration: 5m # cut the headblock when this much time passes. this is being set for demo purposes and should probably be left alone normally
|
||||
|
||||
compactor:
|
||||
compaction:
|
||||
block_retention: 1h # overall Tempo trace retention. set for demo purposes
|
||||
|
||||
metrics_generator:
|
||||
registry:
|
||||
external_labels:
|
||||
source: tempo
|
||||
cluster: docker-compose
|
||||
storage:
|
||||
path: /var/tempo/generator/wal
|
||||
remote_write:
|
||||
- url: http://prometheus:9090/api/v1/write
|
||||
send_exemplars: true
|
||||
traces_storage:
|
||||
path: /var/tempo/generator/traces
|
||||
|
||||
storage:
|
||||
trace:
|
||||
backend: local # backend configuration to use
|
||||
wal:
|
||||
path: /var/tempo/wal # where to store the wal locally
|
||||
local:
|
||||
path: /var/tempo/blocks
|
||||
|
||||
overrides:
|
||||
defaults:
|
||||
metrics_generator:
|
||||
processors: [ service-graphs, span-metrics, local-blocks ] # enables metrics generator
|
||||
generate_native_histograms: both
|
||||
1
.dockerignore
Normal file
@@ -0,0 +1 @@
|
||||
target
|
||||
15
.github/actions/setup/action.yml
vendored
@@ -60,15 +60,7 @@ runs:
|
||||
pkg-config \
|
||||
libssl-dev
|
||||
|
||||
- name: Cache protoc binary
|
||||
id: cache-protoc
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.local/bin/protoc
|
||||
key: protoc-31.1-${{ runner.os }}-${{ runner.arch }}
|
||||
|
||||
- name: Install protoc
|
||||
if: steps.cache-protoc.outputs.cache-hit != 'true'
|
||||
uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
version: "31.1"
|
||||
@@ -94,6 +86,9 @@ runs:
|
||||
if: inputs.install-cross-tools == 'true'
|
||||
uses: taiki-e/install-action@cargo-zigbuild
|
||||
|
||||
- name: Install cargo-nextest
|
||||
uses: taiki-e/install-action@cargo-nextest
|
||||
|
||||
- name: Setup Rust cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
@@ -101,7 +96,3 @@ runs:
|
||||
cache-on-failure: true
|
||||
shared-key: ${{ inputs.cache-shared-key }}
|
||||
save-if: ${{ inputs.cache-save-if }}
|
||||
# Cache workspace dependencies
|
||||
workspaces: |
|
||||
. -> target
|
||||
cli/rustfs-gui -> cli/rustfs-gui/target
|
||||
|
||||
4
.github/pull_request_template.md
vendored
@@ -19,9 +19,7 @@ Pull Request Template for RustFS
|
||||
|
||||
## Checklist
|
||||
- [ ] I have read and followed the [CONTRIBUTING.md](CONTRIBUTING.md) guidelines
|
||||
- [ ] Code is formatted with `cargo fmt --all`
|
||||
- [ ] Passed `cargo clippy --all-targets --all-features -- -D warnings`
|
||||
- [ ] Passed `cargo check --all-targets`
|
||||
- [ ] Passed `make pre-commit`
|
||||
- [ ] Added/updated necessary tests
|
||||
- [ ] Documentation updated (if needed)
|
||||
- [ ] CI/CD passed (if applicable)
|
||||
|
||||
11
.github/workflows/audit.yml
vendored
@@ -16,13 +16,13 @@ name: Security Audit
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
branches: [ main ]
|
||||
paths:
|
||||
- '**/Cargo.toml'
|
||||
- '**/Cargo.lock'
|
||||
- '.github/workflows/audit.yml'
|
||||
pull_request:
|
||||
branches: [main]
|
||||
branches: [ main ]
|
||||
paths:
|
||||
- '**/Cargo.toml'
|
||||
- '**/Cargo.lock'
|
||||
@@ -31,6 +31,9 @@ on:
|
||||
- cron: '0 0 * * 0' # Weekly on Sunday at midnight UTC
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
@@ -41,7 +44,7 @@ jobs:
|
||||
timeout-minutes: 15
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Install cargo-audit
|
||||
uses: taiki-e/install-action@v2
|
||||
@@ -69,7 +72,7 @@ jobs:
|
||||
pull-requests: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Dependency Review
|
||||
uses: actions/dependency-review-action@v4
|
||||
|
||||
838
.github/workflows/build.yml
vendored
@@ -12,12 +12,24 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Build and Release Workflow
|
||||
#
|
||||
# This workflow builds RustFS binaries and automatically triggers Docker image builds.
|
||||
#
|
||||
# Flow:
|
||||
# 1. Build binaries for multiple platforms
|
||||
# 2. Upload binaries to OSS storage
|
||||
# 3. Trigger docker.yml to build and push images using the uploaded binaries
|
||||
#
|
||||
# Manual Parameters:
|
||||
# - build_docker: Build and push Docker images (default: true)
|
||||
|
||||
name: Build and Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags: ["*"]
|
||||
branches: [main]
|
||||
tags: [ "*.*.*" ]
|
||||
branches: [ main ]
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
- "**.txt"
|
||||
@@ -33,7 +45,7 @@ on:
|
||||
- ".gitignore"
|
||||
- ".dockerignore"
|
||||
pull_request:
|
||||
branches: [main]
|
||||
branches: [ main ]
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
- "**.txt"
|
||||
@@ -52,12 +64,15 @@ on:
|
||||
- cron: "0 0 * * 0" # Weekly on Sunday at midnight UTC
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
force_build:
|
||||
description: "Force build even without changes"
|
||||
build_docker:
|
||||
description: "Build and push Docker images after binary build"
|
||||
required: false
|
||||
default: false
|
||||
default: true
|
||||
type: boolean
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
RUST_BACKTRACE: 1
|
||||
@@ -65,44 +80,83 @@ env:
|
||||
CARGO_INCREMENTAL: 0
|
||||
|
||||
jobs:
|
||||
# Second layer: Business logic level checks (handling build strategy)
|
||||
# Build strategy check - determine build type based on trigger
|
||||
build-check:
|
||||
name: Build Strategy Check
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
should_build: ${{ steps.check.outputs.should_build }}
|
||||
build_type: ${{ steps.check.outputs.build_type }}
|
||||
version: ${{ steps.check.outputs.version }}
|
||||
short_sha: ${{ steps.check.outputs.short_sha }}
|
||||
is_prerelease: ${{ steps.check.outputs.is_prerelease }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Determine build strategy
|
||||
id: check
|
||||
run: |
|
||||
should_build=false
|
||||
build_type="none"
|
||||
version=""
|
||||
short_sha=""
|
||||
is_prerelease=false
|
||||
|
||||
# Business logic: when we need to build
|
||||
if [[ "${{ github.event_name }}" == "schedule" ]] || \
|
||||
[[ "${{ github.event_name }}" == "workflow_dispatch" ]] || \
|
||||
[[ "${{ github.event.inputs.force_build }}" == "true" ]] || \
|
||||
[[ "${{ contains(github.event.head_commit.message, '--build') }}" == "true" ]]; then
|
||||
# Get short SHA for all builds
|
||||
short_sha=$(git rev-parse --short HEAD)
|
||||
|
||||
# Determine build type based on trigger
|
||||
if [[ "${{ startsWith(github.ref, 'refs/tags/') }}" == "true" ]]; then
|
||||
# Tag push - release or prerelease
|
||||
should_build=true
|
||||
tag_name="${GITHUB_REF#refs/tags/}"
|
||||
version="${tag_name}"
|
||||
|
||||
# Check if this is a prerelease
|
||||
if [[ "$tag_name" == *"alpha"* ]] || [[ "$tag_name" == *"beta"* ]] || [[ "$tag_name" == *"rc"* ]]; then
|
||||
build_type="prerelease"
|
||||
is_prerelease=true
|
||||
echo "🚀 Prerelease build detected: $tag_name"
|
||||
else
|
||||
build_type="release"
|
||||
echo "📦 Release build detected: $tag_name"
|
||||
fi
|
||||
elif [[ "${{ github.ref }}" == "refs/heads/main" ]]; then
|
||||
# Main branch push - development build
|
||||
should_build=true
|
||||
build_type="development"
|
||||
fi
|
||||
|
||||
# Always build for tag pushes (version releases)
|
||||
if [[ "${{ startsWith(github.ref, 'refs/tags/') }}" == "true" ]]; then
|
||||
version="dev-${short_sha}"
|
||||
echo "🛠️ Development build detected"
|
||||
elif [[ "${{ github.event_name }}" == "schedule" ]] || \
|
||||
[[ "${{ github.event_name }}" == "workflow_dispatch" ]] || \
|
||||
[[ "${{ contains(github.event.head_commit.message, '--build') }}" == "true" ]]; then
|
||||
# Scheduled or manual build
|
||||
should_build=true
|
||||
build_type="release"
|
||||
echo "🏷️ Tag detected: forcing release build"
|
||||
build_type="development"
|
||||
version="dev-${short_sha}"
|
||||
echo "⚡ Manual/scheduled build detected"
|
||||
fi
|
||||
|
||||
echo "should_build=$should_build" >> $GITHUB_OUTPUT
|
||||
echo "build_type=$build_type" >> $GITHUB_OUTPUT
|
||||
echo "Build needed: $should_build (type: $build_type)"
|
||||
echo "version=$version" >> $GITHUB_OUTPUT
|
||||
echo "short_sha=$short_sha" >> $GITHUB_OUTPUT
|
||||
echo "is_prerelease=$is_prerelease" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "📊 Build Summary:"
|
||||
echo " - Should build: $should_build"
|
||||
echo " - Build type: $build_type"
|
||||
echo " - Version: $version"
|
||||
echo " - Short SHA: $short_sha"
|
||||
echo " - Is prerelease: $is_prerelease"
|
||||
|
||||
# Build RustFS binaries
|
||||
build-rustfs:
|
||||
name: Build RustFS
|
||||
needs: [build-check]
|
||||
needs: [ build-check ]
|
||||
if: needs.build-check.outputs.should_build == 'true'
|
||||
runs-on: ${{ matrix.os }}
|
||||
timeout-minutes: 60
|
||||
@@ -121,6 +175,14 @@ jobs:
|
||||
target: aarch64-unknown-linux-musl
|
||||
cross: true
|
||||
platform: linux
|
||||
- os: ubuntu-latest
|
||||
target: x86_64-unknown-linux-gnu
|
||||
cross: false
|
||||
platform: linux
|
||||
- os: ubuntu-latest
|
||||
target: aarch64-unknown-linux-gnu
|
||||
cross: true
|
||||
platform: linux
|
||||
# macOS builds
|
||||
- os: macos-latest
|
||||
target: aarch64-apple-darwin
|
||||
@@ -130,18 +192,18 @@ jobs:
|
||||
target: x86_64-apple-darwin
|
||||
cross: false
|
||||
platform: macos
|
||||
# # Windows builds (temporarily disabled)
|
||||
# - os: windows-latest
|
||||
# target: x86_64-pc-windows-msvc
|
||||
# cross: false
|
||||
# platform: windows
|
||||
# - os: windows-latest
|
||||
# target: aarch64-pc-windows-msvc
|
||||
# cross: true
|
||||
# platform: windows
|
||||
# Windows builds (temporarily disabled)
|
||||
- os: windows-latest
|
||||
target: x86_64-pc-windows-msvc
|
||||
cross: false
|
||||
platform: windows
|
||||
#- os: windows-latest
|
||||
# target: aarch64-pc-windows-msvc
|
||||
# cross: true
|
||||
# platform: windows
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -156,6 +218,7 @@ jobs:
|
||||
install-cross-tools: ${{ matrix.cross }}
|
||||
|
||||
- name: Download static console assets
|
||||
shell: bash
|
||||
run: |
|
||||
mkdir -p ./rustfs/static
|
||||
if [[ "${{ matrix.platform }}" == "windows" ]]; then
|
||||
@@ -168,6 +231,7 @@ jobs:
|
||||
echo "// Static assets not available" > ./rustfs/static/empty.txt
|
||||
fi
|
||||
else
|
||||
chmod +w ./rustfs/static/LICENSE || true
|
||||
curl -L "https://dl.rustfs.com/artifacts/console/rustfs-console-latest.zip" \
|
||||
-o console.zip --retry 3 --retry-delay 5 --max-time 300
|
||||
if [[ $? -eq 0 ]]; then
|
||||
@@ -180,6 +244,7 @@ jobs:
|
||||
fi
|
||||
|
||||
- name: Build RustFS
|
||||
shell: bash
|
||||
run: |
|
||||
# Force rebuild by touching build.rs
|
||||
touch rustfs/build.rs
|
||||
@@ -190,7 +255,7 @@ jobs:
|
||||
cargo install cross --git https://github.com/cross-rs/cross
|
||||
cross build --release --target ${{ matrix.target }} -p rustfs --bins
|
||||
else
|
||||
# Use zigbuild for Linux ARM64
|
||||
# Use zigbuild for other cross-compilation
|
||||
cargo zigbuild --release --target ${{ matrix.target }} -p rustfs --bins
|
||||
fi
|
||||
else
|
||||
@@ -201,9 +266,65 @@ jobs:
|
||||
id: package
|
||||
shell: bash
|
||||
run: |
|
||||
PACKAGE_NAME="rustfs-${{ matrix.target }}"
|
||||
BUILD_TYPE="${{ needs.build-check.outputs.build_type }}"
|
||||
VERSION="${{ needs.build-check.outputs.version }}"
|
||||
SHORT_SHA="${{ needs.build-check.outputs.short_sha }}"
|
||||
|
||||
# Create zip packages for all platforms
|
||||
# Extract platform and arch from target
|
||||
TARGET="${{ matrix.target }}"
|
||||
PLATFORM="${{ matrix.platform }}"
|
||||
# Map target to architecture and variant
|
||||
case "$TARGET" in
|
||||
*x86_64*musl*)
|
||||
ARCH="x86_64"
|
||||
VARIANT="musl"
|
||||
;;
|
||||
*x86_64*gnu*)
|
||||
ARCH="x86_64"
|
||||
VARIANT="gnu"
|
||||
;;
|
||||
*x86_64*)
|
||||
ARCH="x86_64"
|
||||
VARIANT=""
|
||||
;;
|
||||
*aarch64*musl*|*arm64*musl*)
|
||||
ARCH="aarch64"
|
||||
VARIANT="musl"
|
||||
;;
|
||||
*aarch64*gnu*|*arm64*gnu*)
|
||||
ARCH="aarch64"
|
||||
VARIANT="gnu"
|
||||
;;
|
||||
*aarch64*|*arm64*)
|
||||
ARCH="aarch64"
|
||||
VARIANT=""
|
||||
;;
|
||||
*armv7*)
|
||||
ARCH="armv7"
|
||||
VARIANT=""
|
||||
;;
|
||||
*)
|
||||
ARCH="unknown"
|
||||
VARIANT=""
|
||||
;;
|
||||
esac
|
||||
|
||||
# Generate package name based on build type
|
||||
if [[ -n "$VARIANT" ]]; then
|
||||
ARCH_WITH_VARIANT="${ARCH}-${VARIANT}"
|
||||
else
|
||||
ARCH_WITH_VARIANT="${ARCH}"
|
||||
fi
|
||||
|
||||
if [[ "$BUILD_TYPE" == "development" ]]; then
|
||||
# Development build: rustfs-${platform}-${arch}-${variant}-dev-${short_sha}.zip
|
||||
PACKAGE_NAME="rustfs-${PLATFORM}-${ARCH_WITH_VARIANT}-dev-${SHORT_SHA}"
|
||||
else
|
||||
# Release/Prerelease build: rustfs-${platform}-${arch}-${variant}-v${version}.zip
|
||||
PACKAGE_NAME="rustfs-${PLATFORM}-${ARCH_WITH_VARIANT}-v${VERSION}"
|
||||
fi
|
||||
|
||||
# Create zip packages for all platforms
|
||||
# Ensure zip is available
|
||||
if ! command -v zip &> /dev/null; then
|
||||
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then
|
||||
@@ -212,27 +333,132 @@ jobs:
|
||||
fi
|
||||
|
||||
cd target/${{ matrix.target }}/release
|
||||
zip "../../../${PACKAGE_NAME}.zip" rustfs
|
||||
# Determine the binary name based on platform
|
||||
if [[ "${{ matrix.platform }}" == "windows" ]]; then
|
||||
BINARY_NAME="rustfs.exe"
|
||||
else
|
||||
BINARY_NAME="rustfs"
|
||||
fi
|
||||
|
||||
# Verify the binary exists before packaging
|
||||
if [[ ! -f "$BINARY_NAME" ]]; then
|
||||
echo "❌ Binary $BINARY_NAME not found in $(pwd)"
|
||||
if [[ "${{ matrix.platform }}" == "windows" ]]; then
|
||||
dir
|
||||
else
|
||||
ls -la
|
||||
fi
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Universal packaging function
|
||||
package_zip() {
|
||||
local src=$1
|
||||
local dst=$2
|
||||
if [[ "${{ matrix.platform }}" == "windows" ]]; then
|
||||
# Windows uses PowerShell Compress-Archive
|
||||
powershell -Command "Compress-Archive -Path '$src' -DestinationPath '$dst' -Force"
|
||||
elif command -v zip &> /dev/null; then
|
||||
# Unix systems use zip command
|
||||
zip "$dst" "$src"
|
||||
else
|
||||
echo "❌ No zip utility available"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Create the zip package
|
||||
echo "Start packaging: $BINARY_NAME -> ../../../${PACKAGE_NAME}.zip"
|
||||
package_zip "$BINARY_NAME" "../../../${PACKAGE_NAME}.zip"
|
||||
|
||||
cd ../../..
|
||||
|
||||
# Verify the package was created
|
||||
if [[ -f "${PACKAGE_NAME}.zip" ]]; then
|
||||
echo "✅ Package created successfully: ${PACKAGE_NAME}.zip"
|
||||
if [[ "${{ matrix.platform }}" == "windows" ]]; then
|
||||
dir
|
||||
else
|
||||
ls -lh ${PACKAGE_NAME}.zip
|
||||
fi
|
||||
else
|
||||
echo "❌ Failed to create package: ${PACKAGE_NAME}.zip"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create latest version files right after the main package
|
||||
LATEST_FILES=""
|
||||
if [[ "$BUILD_TYPE" == "release" ]] || [[ "$BUILD_TYPE" == "prerelease" ]]; then
|
||||
# Create latest version filename
|
||||
# Convert from rustfs-linux-x86_64-musl-v1.0.0 to rustfs-linux-x86_64-musl-latest
|
||||
LATEST_FILE="${PACKAGE_NAME%-v*}-latest.zip"
|
||||
|
||||
echo "🔄 Creating latest version: ${PACKAGE_NAME}.zip -> $LATEST_FILE"
|
||||
cp "${PACKAGE_NAME}.zip" "$LATEST_FILE"
|
||||
|
||||
if [[ -f "$LATEST_FILE" ]]; then
|
||||
echo "✅ Latest version created: $LATEST_FILE"
|
||||
LATEST_FILES="$LATEST_FILE"
|
||||
fi
|
||||
elif [[ "$BUILD_TYPE" == "development" ]]; then
|
||||
# Development builds (only main branch triggers development builds)
|
||||
# Create main-latest version filename
|
||||
# Convert from rustfs-linux-x86_64-dev-abc123 to rustfs-linux-x86_64-main-latest
|
||||
MAIN_LATEST_FILE="${PACKAGE_NAME%-dev-*}-main-latest.zip"
|
||||
|
||||
echo "🔄 Creating main-latest version: ${PACKAGE_NAME}.zip -> $MAIN_LATEST_FILE"
|
||||
cp "${PACKAGE_NAME}.zip" "$MAIN_LATEST_FILE"
|
||||
|
||||
if [[ -f "$MAIN_LATEST_FILE" ]]; then
|
||||
echo "✅ Main-latest version created: $MAIN_LATEST_FILE"
|
||||
LATEST_FILES="$MAIN_LATEST_FILE"
|
||||
|
||||
# Also create a generic main-latest for Docker builds (Linux only)
|
||||
if [[ "${{ matrix.platform }}" == "linux" ]]; then
|
||||
DOCKER_MAIN_LATEST_FILE="rustfs-linux-${ARCH_WITH_VARIANT}-main-latest.zip"
|
||||
|
||||
echo "🔄 Creating Docker main-latest version: ${PACKAGE_NAME}.zip -> $DOCKER_MAIN_LATEST_FILE"
|
||||
cp "${PACKAGE_NAME}.zip" "$DOCKER_MAIN_LATEST_FILE"
|
||||
|
||||
if [[ -f "$DOCKER_MAIN_LATEST_FILE" ]]; then
|
||||
echo "✅ Docker main-latest version created: $DOCKER_MAIN_LATEST_FILE"
|
||||
LATEST_FILES="$LATEST_FILES $DOCKER_MAIN_LATEST_FILE"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "package_name=${PACKAGE_NAME}" >> $GITHUB_OUTPUT
|
||||
echo "package_file=${PACKAGE_NAME}.zip" >> $GITHUB_OUTPUT
|
||||
echo "Package created: ${PACKAGE_NAME}.zip"
|
||||
echo "latest_files=${LATEST_FILES}" >> $GITHUB_OUTPUT
|
||||
echo "build_type=${BUILD_TYPE}" >> $GITHUB_OUTPUT
|
||||
echo "version=${VERSION}" >> $GITHUB_OUTPUT
|
||||
|
||||
- name: Upload artifacts
|
||||
echo "📦 Package created: ${PACKAGE_NAME}.zip"
|
||||
if [[ -n "$LATEST_FILES" ]]; then
|
||||
echo "📦 Latest files created: $LATEST_FILES"
|
||||
fi
|
||||
echo "🔧 Build type: ${BUILD_TYPE}"
|
||||
echo "📊 Version: ${VERSION}"
|
||||
|
||||
- name: Upload to GitHub artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: ${{ steps.package.outputs.package_name }}
|
||||
path: ${{ steps.package.outputs.package_file }}
|
||||
path: "rustfs-*.zip"
|
||||
retention-days: ${{ startsWith(github.ref, 'refs/tags/') && 30 || 7 }}
|
||||
|
||||
- name: Upload to Aliyun OSS
|
||||
if: needs.build-check.outputs.build_type == 'release' && env.OSS_ACCESS_KEY_ID != ''
|
||||
if: env.OSS_ACCESS_KEY_ID != '' && (needs.build-check.outputs.build_type == 'release' || needs.build-check.outputs.build_type == 'prerelease' || needs.build-check.outputs.build_type == 'development')
|
||||
env:
|
||||
OSS_ACCESS_KEY_ID: ${{ secrets.ALICLOUDOSS_KEY_ID }}
|
||||
OSS_ACCESS_KEY_SECRET: ${{ secrets.ALICLOUDOSS_KEY_SECRET }}
|
||||
OSS_REGION: cn-beijing
|
||||
OSS_ENDPOINT: https://oss-cn-beijing.aliyuncs.com
|
||||
shell: bash
|
||||
run: |
|
||||
BUILD_TYPE="${{ needs.build-check.outputs.build_type }}"
|
||||
|
||||
# Install ossutil (platform-specific)
|
||||
OSSUTIL_VERSION="2.1.1"
|
||||
case "${{ matrix.platform }}" in
|
||||
@@ -250,6 +476,7 @@ jobs:
|
||||
mv "${OSSUTIL_DIR}/ossutil" /usr/local/bin/
|
||||
rm -rf "$OSSUTIL_DIR" "$OSSUTIL_ZIP"
|
||||
chmod +x /usr/local/bin/ossutil
|
||||
OSSUTIL_BIN=ossutil
|
||||
;;
|
||||
macos)
|
||||
if [[ "$(uname -m)" == "arm64" ]]; then
|
||||
@@ -267,226 +494,357 @@ jobs:
|
||||
chmod +x /usr/local/bin/ossutil
|
||||
OSSUTIL_BIN=ossutil
|
||||
;;
|
||||
# windows)
|
||||
# 暂不支持 Windows ossutil
|
||||
# ;;
|
||||
windows)
|
||||
OSSUTIL_ZIP="ossutil-${OSSUTIL_VERSION}-windows-amd64.zip"
|
||||
OSSUTIL_DIR="ossutil-${OSSUTIL_VERSION}-windows-amd64"
|
||||
|
||||
curl -o "$OSSUTIL_ZIP" "https://gosspublic.alicdn.com/ossutil/v2/${OSSUTIL_VERSION}/${OSSUTIL_ZIP}"
|
||||
unzip "$OSSUTIL_ZIP"
|
||||
mv "${OSSUTIL_DIR}/ossutil.exe" ./ossutil.exe
|
||||
rm -rf "$OSSUTIL_DIR" "$OSSUTIL_ZIP"
|
||||
OSSUTIL_BIN=./ossutil.exe
|
||||
;;
|
||||
esac
|
||||
|
||||
# Upload the package file directly to OSS
|
||||
echo "Uploading ${{ steps.package.outputs.package_file }} to OSS..."
|
||||
$OSSUTIL_BIN cp "${{ steps.package.outputs.package_file }}" oss://rustfs-artifacts/artifacts/rustfs/ --force
|
||||
|
||||
# Create latest.json (only for the first Linux build to avoid duplication)
|
||||
if [[ "${{ matrix.target }}" == "x86_64-unknown-linux-musl" ]]; then
|
||||
VERSION="${GITHUB_REF#refs/tags/v}"
|
||||
echo "{\"version\":\"${VERSION}\",\"release_date\":\"$(date -u +%Y-%m-%dT%H:%M:%SZ)\"}" > latest.json
|
||||
$OSSUTIL_BIN cp latest.json oss://rustfs-version/latest.json --force
|
||||
# Determine upload path based on build type
|
||||
if [[ "$BUILD_TYPE" == "development" ]]; then
|
||||
OSS_PATH="oss://rustfs-artifacts/artifacts/rustfs/dev/"
|
||||
echo "📤 Uploading development build to OSS dev directory"
|
||||
else
|
||||
OSS_PATH="oss://rustfs-artifacts/artifacts/rustfs/release/"
|
||||
echo "📤 Uploading release build to OSS release directory"
|
||||
fi
|
||||
|
||||
# Release management
|
||||
release:
|
||||
name: GitHub Release
|
||||
needs: [build-check, build-rustfs]
|
||||
if: always() && needs.build-check.outputs.build_type == 'release'
|
||||
# Upload all rustfs zip files to OSS using glob pattern
|
||||
echo "📤 Uploading all rustfs-*.zip files to $OSS_PATH..."
|
||||
for zip_file in rustfs-*.zip; do
|
||||
if [[ -f "$zip_file" ]]; then
|
||||
echo "Uploading: $zip_file to $OSS_PATH..."
|
||||
$OSSUTIL_BIN cp "$zip_file" "$OSS_PATH" --force
|
||||
echo "✅ Uploaded: $zip_file"
|
||||
fi
|
||||
done
|
||||
|
||||
echo "✅ Upload completed successfully"
|
||||
|
||||
# Build summary
|
||||
build-summary:
|
||||
name: Build Summary
|
||||
needs: [ build-check, build-rustfs ]
|
||||
if: always() && needs.build-check.outputs.should_build == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Build completion summary
|
||||
shell: bash
|
||||
run: |
|
||||
BUILD_TYPE="${{ needs.build-check.outputs.build_type }}"
|
||||
VERSION="${{ needs.build-check.outputs.version }}"
|
||||
|
||||
echo "🎉 Build completed successfully!"
|
||||
echo "📦 Build type: $BUILD_TYPE"
|
||||
echo "🔢 Version: $VERSION"
|
||||
echo ""
|
||||
|
||||
# Check build status
|
||||
BUILD_STATUS="${{ needs.build-rustfs.result }}"
|
||||
|
||||
echo "📊 Build Results:"
|
||||
echo " 📦 All platforms: $BUILD_STATUS"
|
||||
echo ""
|
||||
|
||||
case "$BUILD_TYPE" in
|
||||
"development")
|
||||
echo "🛠️ Development build artifacts have been uploaded to OSS dev directory"
|
||||
echo "⚠️ This is a development build - not suitable for production use"
|
||||
;;
|
||||
"release")
|
||||
echo "🚀 Release build artifacts have been uploaded to OSS release directory"
|
||||
echo "✅ This build is ready for production use"
|
||||
echo "🏷️ GitHub Release will be created in this workflow"
|
||||
;;
|
||||
"prerelease")
|
||||
echo "🧪 Prerelease build artifacts have been uploaded to OSS release directory"
|
||||
echo "⚠️ This is a prerelease build - use with caution"
|
||||
echo "🏷️ GitHub Release will be created in this workflow"
|
||||
;;
|
||||
esac
|
||||
|
||||
echo ""
|
||||
echo "🐳 Docker Images:"
|
||||
if [[ "${{ github.event.inputs.build_docker }}" == "false" ]]; then
|
||||
echo "⏭️ Docker image build was skipped (binary only build)"
|
||||
elif [[ "$BUILD_STATUS" == "success" ]]; then
|
||||
echo "🔄 Docker images will be built and pushed automatically via workflow_run event"
|
||||
else
|
||||
echo "❌ Docker image build will be skipped due to build failure"
|
||||
fi
|
||||
|
||||
# Create GitHub Release (only for tag pushes)
|
||||
create-release:
|
||||
name: Create GitHub Release
|
||||
needs: [ build-check, build-rustfs ]
|
||||
if: startsWith(github.ref, 'refs/tags/') && needs.build-check.outputs.build_type != 'development'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
outputs:
|
||||
release_id: ${{ steps.create.outputs.release_id }}
|
||||
release_url: ${{ steps.create.outputs.release_url }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Create GitHub Release
|
||||
id: create
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
shell: bash
|
||||
run: |
|
||||
TAG="${{ needs.build-check.outputs.version }}"
|
||||
VERSION="${{ needs.build-check.outputs.version }}"
|
||||
IS_PRERELEASE="${{ needs.build-check.outputs.is_prerelease }}"
|
||||
BUILD_TYPE="${{ needs.build-check.outputs.build_type }}"
|
||||
|
||||
# Determine release type for title
|
||||
if [[ "$BUILD_TYPE" == "prerelease" ]]; then
|
||||
if [[ "$TAG" == *"alpha"* ]]; then
|
||||
RELEASE_TYPE="alpha"
|
||||
elif [[ "$TAG" == *"beta"* ]]; then
|
||||
RELEASE_TYPE="beta"
|
||||
elif [[ "$TAG" == *"rc"* ]]; then
|
||||
RELEASE_TYPE="rc"
|
||||
else
|
||||
RELEASE_TYPE="prerelease"
|
||||
fi
|
||||
else
|
||||
RELEASE_TYPE="release"
|
||||
fi
|
||||
|
||||
# Check if release already exists
|
||||
if gh release view "$TAG" >/dev/null 2>&1; then
|
||||
echo "Release $TAG already exists"
|
||||
RELEASE_ID=$(gh release view "$TAG" --json databaseId --jq '.databaseId')
|
||||
RELEASE_URL=$(gh release view "$TAG" --json url --jq '.url')
|
||||
else
|
||||
# Get release notes from tag message
|
||||
RELEASE_NOTES=$(git tag -l --format='%(contents)' "${TAG}")
|
||||
if [[ -z "$RELEASE_NOTES" || "$RELEASE_NOTES" =~ ^[[:space:]]*$ ]]; then
|
||||
if [[ "$IS_PRERELEASE" == "true" ]]; then
|
||||
RELEASE_NOTES="Pre-release ${VERSION} (${RELEASE_TYPE})"
|
||||
else
|
||||
RELEASE_NOTES="Release ${VERSION}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Create release title
|
||||
if [[ "$IS_PRERELEASE" == "true" ]]; then
|
||||
TITLE="RustFS $VERSION (${RELEASE_TYPE})"
|
||||
else
|
||||
TITLE="RustFS $VERSION"
|
||||
fi
|
||||
|
||||
# Create the release
|
||||
PRERELEASE_FLAG=""
|
||||
if [[ "$IS_PRERELEASE" == "true" ]]; then
|
||||
PRERELEASE_FLAG="--prerelease"
|
||||
fi
|
||||
|
||||
gh release create "$TAG" \
|
||||
--title "$TITLE" \
|
||||
--notes "$RELEASE_NOTES" \
|
||||
$PRERELEASE_FLAG \
|
||||
--draft
|
||||
|
||||
RELEASE_ID=$(gh release view "$TAG" --json databaseId --jq '.databaseId')
|
||||
RELEASE_URL=$(gh release view "$TAG" --json url --jq '.url')
|
||||
fi
|
||||
|
||||
echo "release_id=$RELEASE_ID" >> $GITHUB_OUTPUT
|
||||
echo "release_url=$RELEASE_URL" >> $GITHUB_OUTPUT
|
||||
echo "Created release: $RELEASE_URL"
|
||||
|
||||
# Prepare and upload release assets
|
||||
upload-release-assets:
|
||||
name: Upload Release Assets
|
||||
needs: [ build-check, build-rustfs, create-release ]
|
||||
if: startsWith(github.ref, 'refs/tags/') && needs.build-check.outputs.build_type != 'development'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
actions: read
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Download all build artifacts
|
||||
uses: actions/download-artifact@v5
|
||||
with:
|
||||
path: ./artifacts
|
||||
pattern: rustfs-*
|
||||
merge-multiple: true
|
||||
|
||||
- name: Prepare release assets
|
||||
id: prepare
|
||||
shell: bash
|
||||
run: |
|
||||
VERSION="${{ needs.build-check.outputs.version }}"
|
||||
TAG="${{ needs.build-check.outputs.version }}"
|
||||
|
||||
mkdir -p ./release-assets
|
||||
|
||||
# Copy and verify artifacts (including latest files created during build)
|
||||
ASSETS_COUNT=0
|
||||
for file in ./artifacts/*.zip; do
|
||||
if [[ -f "$file" ]]; then
|
||||
cp "$file" ./release-assets/
|
||||
ASSETS_COUNT=$((ASSETS_COUNT + 1))
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ $ASSETS_COUNT -eq 0 ]]; then
|
||||
echo "❌ No artifacts found!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd ./release-assets
|
||||
|
||||
# Generate checksums for all files (including latest versions)
|
||||
if ls *.zip >/dev/null 2>&1; then
|
||||
sha256sum *.zip > SHA256SUMS
|
||||
sha512sum *.zip > SHA512SUMS
|
||||
fi
|
||||
|
||||
# Create signature placeholder files
|
||||
for file in *.zip; do
|
||||
echo "# Signature for $file" > "${file}.asc"
|
||||
echo "# GPG signature will be added in future versions" >> "${file}.asc"
|
||||
done
|
||||
|
||||
echo "📦 Prepared assets:"
|
||||
ls -la
|
||||
|
||||
echo "🔢 Total asset count: $ASSETS_COUNT"
|
||||
|
||||
- name: Upload to GitHub Release
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
shell: bash
|
||||
run: |
|
||||
TAG="${{ needs.build-check.outputs.version }}"
|
||||
|
||||
cd ./release-assets
|
||||
|
||||
# Upload all files
|
||||
for file in *; do
|
||||
if [[ -f "$file" ]]; then
|
||||
echo "📤 Uploading $file..."
|
||||
gh release upload "$TAG" "$file" --clobber
|
||||
fi
|
||||
done
|
||||
|
||||
echo "✅ All assets uploaded successfully"
|
||||
|
||||
# Update latest.json for stable releases only
|
||||
update-latest-version:
|
||||
name: Update Latest Version
|
||||
needs: [ build-check, upload-release-assets ]
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Update latest.json
|
||||
env:
|
||||
OSS_ACCESS_KEY_ID: ${{ secrets.ALICLOUDOSS_KEY_ID }}
|
||||
OSS_ACCESS_KEY_SECRET: ${{ secrets.ALICLOUDOSS_KEY_SECRET }}
|
||||
OSS_REGION: cn-beijing
|
||||
OSS_ENDPOINT: https://oss-cn-beijing.aliyuncs.com
|
||||
shell: bash
|
||||
run: |
|
||||
if [[ -z "$OSS_ACCESS_KEY_ID" ]]; then
|
||||
echo "⚠️ OSS credentials not available, skipping latest.json update"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
VERSION="${{ needs.build-check.outputs.version }}"
|
||||
TAG="${{ needs.build-check.outputs.version }}"
|
||||
|
||||
# Install ossutil
|
||||
OSSUTIL_VERSION="2.1.1"
|
||||
OSSUTIL_ZIP="ossutil-${OSSUTIL_VERSION}-linux-amd64.zip"
|
||||
OSSUTIL_DIR="ossutil-${OSSUTIL_VERSION}-linux-amd64"
|
||||
|
||||
curl -o "$OSSUTIL_ZIP" "https://gosspublic.alicdn.com/ossutil/v2/${OSSUTIL_VERSION}/${OSSUTIL_ZIP}"
|
||||
unzip "$OSSUTIL_ZIP"
|
||||
mv "${OSSUTIL_DIR}/ossutil" /usr/local/bin/
|
||||
rm -rf "$OSSUTIL_DIR" "$OSSUTIL_ZIP"
|
||||
chmod +x /usr/local/bin/ossutil
|
||||
|
||||
# Create latest.json
|
||||
cat > latest.json << EOF
|
||||
{
|
||||
"version": "${VERSION}",
|
||||
"tag": "${TAG}",
|
||||
"release_date": "$(date -u +%Y-%m-%dT%H:%M:%SZ)",
|
||||
"release_type": "stable",
|
||||
"download_url": "https://github.com/${{ github.repository }}/releases/tag/${TAG}"
|
||||
}
|
||||
EOF
|
||||
|
||||
# Upload to OSS
|
||||
ossutil cp latest.json oss://rustfs-version/latest.json --force
|
||||
|
||||
echo "✅ Updated latest.json for stable release $VERSION"
|
||||
|
||||
# Publish release (remove draft status)
|
||||
publish-release:
|
||||
name: Publish Release
|
||||
needs: [ build-check, create-release, upload-release-assets ]
|
||||
if: startsWith(github.ref, 'refs/tags/') && needs.build-check.outputs.build_type != 'development'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Download all artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: ./release-artifacts
|
||||
|
||||
- name: Prepare release assets
|
||||
id: release_prep
|
||||
run: |
|
||||
VERSION="${GITHUB_REF#refs/tags/}"
|
||||
VERSION_CLEAN="${VERSION#v}"
|
||||
|
||||
echo "version=${VERSION}" >> $GITHUB_OUTPUT
|
||||
echo "version_clean=${VERSION_CLEAN}" >> $GITHUB_OUTPUT
|
||||
|
||||
# Organize artifacts
|
||||
mkdir -p ./release-files
|
||||
|
||||
# Copy all artifacts (.zip files)
|
||||
find ./release-artifacts -name "*.zip" -exec cp {} ./release-files/ \;
|
||||
|
||||
# Generate checksums for all files
|
||||
cd ./release-files
|
||||
if ls *.zip >/dev/null 2>&1; then
|
||||
sha256sum *.zip >> SHA256SUMS
|
||||
sha512sum *.zip >> SHA512SUMS
|
||||
fi
|
||||
cd ..
|
||||
|
||||
# Display what we're releasing
|
||||
echo "=== Release Files ==="
|
||||
ls -la ./release-files/
|
||||
|
||||
- name: Create GitHub Release
|
||||
- name: Update release notes and publish
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
shell: bash
|
||||
run: |
|
||||
VERSION="${{ steps.release_prep.outputs.version }}"
|
||||
VERSION_CLEAN="${{ steps.release_prep.outputs.version_clean }}"
|
||||
TAG="${{ needs.build-check.outputs.version }}"
|
||||
VERSION="${{ needs.build-check.outputs.version }}"
|
||||
IS_PRERELEASE="${{ needs.build-check.outputs.is_prerelease }}"
|
||||
BUILD_TYPE="${{ needs.build-check.outputs.build_type }}"
|
||||
|
||||
# Check if release already exists
|
||||
if gh release view "$VERSION" >/dev/null 2>&1; then
|
||||
echo "Release $VERSION already exists, skipping creation"
|
||||
# Determine release type
|
||||
if [[ "$BUILD_TYPE" == "prerelease" ]]; then
|
||||
if [[ "$TAG" == *"alpha"* ]]; then
|
||||
RELEASE_TYPE="alpha"
|
||||
elif [[ "$TAG" == *"beta"* ]]; then
|
||||
RELEASE_TYPE="beta"
|
||||
elif [[ "$TAG" == *"rc"* ]]; then
|
||||
RELEASE_TYPE="rc"
|
||||
else
|
||||
RELEASE_TYPE="prerelease"
|
||||
fi
|
||||
else
|
||||
# Get release notes from tag message
|
||||
RELEASE_NOTES=$(git tag -l --format='%(contents)' "${VERSION}")
|
||||
if [[ -z "$RELEASE_NOTES" || "$RELEASE_NOTES" =~ ^[[:space:]]*$ ]]; then
|
||||
RELEASE_NOTES="Release ${VERSION_CLEAN}"
|
||||
fi
|
||||
|
||||
# Determine if this is a prerelease
|
||||
PRERELEASE_FLAG=""
|
||||
if [[ "$VERSION" == *"alpha"* ]] || [[ "$VERSION" == *"beta"* ]] || [[ "$VERSION" == *"rc"* ]]; then
|
||||
PRERELEASE_FLAG="--prerelease"
|
||||
fi
|
||||
|
||||
# Create the release only if it doesn't exist
|
||||
gh release create "$VERSION" \
|
||||
--title "RustFS $VERSION_CLEAN" \
|
||||
--notes "$RELEASE_NOTES" \
|
||||
$PRERELEASE_FLAG
|
||||
RELEASE_TYPE="release"
|
||||
fi
|
||||
|
||||
- name: Upload release assets
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
VERSION="${{ steps.release_prep.outputs.version }}"
|
||||
|
||||
cd ./release-files
|
||||
|
||||
# Upload all binary files
|
||||
for file in *.zip; do
|
||||
if [[ -f "$file" ]]; then
|
||||
echo "Uploading $file..."
|
||||
gh release upload "$VERSION" "$file" --clobber
|
||||
# Get original release notes from tag
|
||||
ORIGINAL_NOTES=$(git tag -l --format='%(contents)' "${TAG}")
|
||||
if [[ -z "$ORIGINAL_NOTES" || "$ORIGINAL_NOTES" =~ ^[[:space:]]*$ ]]; then
|
||||
if [[ "$IS_PRERELEASE" == "true" ]]; then
|
||||
ORIGINAL_NOTES="Pre-release ${VERSION} (${RELEASE_TYPE})"
|
||||
else
|
||||
ORIGINAL_NOTES="Release ${VERSION}"
|
||||
fi
|
||||
done
|
||||
|
||||
# Upload checksum files
|
||||
if [[ -f "SHA256SUMS" ]]; then
|
||||
echo "Uploading SHA256SUMS..."
|
||||
gh release upload "$VERSION" "SHA256SUMS" --clobber
|
||||
fi
|
||||
|
||||
if [[ -f "SHA512SUMS" ]]; then
|
||||
echo "Uploading SHA512SUMS..."
|
||||
gh release upload "$VERSION" "SHA512SUMS" --clobber
|
||||
fi
|
||||
# Publish the release (remove draft status)
|
||||
gh release edit "$TAG" --draft=false
|
||||
|
||||
- name: Update release notes
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
VERSION="${{ steps.release_prep.outputs.version }}"
|
||||
VERSION_CLEAN="${{ steps.release_prep.outputs.version_clean }}"
|
||||
|
||||
# Check if release already has custom notes (not auto-generated)
|
||||
EXISTING_NOTES=$(gh release view "$VERSION" --json body --jq '.body' 2>/dev/null || echo "")
|
||||
|
||||
# Only update if release notes are empty or auto-generated
|
||||
if [[ -z "$EXISTING_NOTES" ]] || [[ "$EXISTING_NOTES" == *"Release ${VERSION_CLEAN}"* ]]; then
|
||||
echo "Updating release notes for $VERSION"
|
||||
|
||||
# Get original release notes from tag
|
||||
ORIGINAL_NOTES=$(git tag -l --format='%(contents)' "${VERSION}")
|
||||
if [[ -z "$ORIGINAL_NOTES" || "$ORIGINAL_NOTES" =~ ^[[:space:]]*$ ]]; then
|
||||
ORIGINAL_NOTES="Release ${VERSION_CLEAN}"
|
||||
fi
|
||||
|
||||
# Create comprehensive release notes
|
||||
cat > enhanced_notes.md << EOF
|
||||
## RustFS ${VERSION_CLEAN}
|
||||
|
||||
${ORIGINAL_NOTES}
|
||||
|
||||
---
|
||||
|
||||
### 🚀 Quick Download
|
||||
|
||||
**Linux (Static Binaries - No Dependencies):**
|
||||
\`\`\`bash
|
||||
# x86_64 (Intel/AMD)
|
||||
curl -LO https://github.com/rustfs/rustfs/releases/download/${VERSION}/rustfs-x86_64-unknown-linux-musl.zip
|
||||
unzip rustfs-x86_64-unknown-linux-musl.zip
|
||||
sudo mv rustfs /usr/local/bin/
|
||||
|
||||
# ARM64 (Graviton, Apple Silicon VMs)
|
||||
curl -LO https://github.com/rustfs/rustfs/releases/download/${VERSION}/rustfs-aarch64-unknown-linux-musl.zip
|
||||
unzip rustfs-aarch64-unknown-linux-musl.zip
|
||||
sudo mv rustfs /usr/local/bin/
|
||||
\`\`\`
|
||||
|
||||
**macOS:**
|
||||
\`\`\`bash
|
||||
# Apple Silicon (M1/M2/M3)
|
||||
curl -LO https://github.com/rustfs/rustfs/releases/download/${VERSION}/rustfs-aarch64-apple-darwin.zip
|
||||
unzip rustfs-aarch64-apple-darwin.zip
|
||||
sudo mv rustfs /usr/local/bin/
|
||||
|
||||
# Intel
|
||||
curl -LO https://github.com/rustfs/rustfs/releases/download/${VERSION}/rustfs-x86_64-apple-darwin.zip
|
||||
unzip rustfs-x86_64-apple-darwin.zip
|
||||
sudo mv rustfs /usr/local/bin/
|
||||
\`\`\`
|
||||
|
||||
### 📁 Available Downloads
|
||||
|
||||
| Platform | Architecture | File | Description |
|
||||
|----------|-------------|------|-------------|
|
||||
| Linux | x86_64 | \`rustfs-x86_64-unknown-linux-musl.zip\` | Static binary, no dependencies |
|
||||
| Linux | ARM64 | \`rustfs-aarch64-unknown-linux-musl.zip\` | Static binary, no dependencies |
|
||||
| macOS | Apple Silicon | \`rustfs-aarch64-apple-darwin.zip\` | Native binary, ZIP archive |
|
||||
| macOS | Intel | \`rustfs-x86_64-apple-darwin.zip\` | Native binary, ZIP archive |
|
||||
|
||||
### 🔐 Verification
|
||||
|
||||
Download checksums and verify your download:
|
||||
\`\`\`bash
|
||||
# Download checksums
|
||||
curl -LO https://github.com/rustfs/rustfs/releases/download/${VERSION}/SHA256SUMS
|
||||
|
||||
# Verify (Linux)
|
||||
sha256sum -c SHA256SUMS --ignore-missing
|
||||
|
||||
# Verify (macOS)
|
||||
shasum -a 256 -c SHA256SUMS --ignore-missing
|
||||
\`\`\`
|
||||
|
||||
### 🛠️ System Requirements
|
||||
|
||||
- **Linux**: Any distribution with glibc 2.17+ (CentOS 7+, Ubuntu 16.04+)
|
||||
- **macOS**: 10.15+ (Catalina or later)
|
||||
- **Windows**: Windows 10 version 1809 or later
|
||||
|
||||
### 📚 Documentation
|
||||
|
||||
- [Installation Guide](https://github.com/rustfs/rustfs#installation)
|
||||
- [Quick Start](https://github.com/rustfs/rustfs#quick-start)
|
||||
- [Configuration](https://github.com/rustfs/rustfs/blob/main/docs/)
|
||||
- [API Documentation](https://docs.rs/rustfs)
|
||||
|
||||
### 🆘 Support
|
||||
|
||||
- 🐛 [Report Issues](https://github.com/rustfs/rustfs/issues)
|
||||
- 💬 [Community Discussions](https://github.com/rustfs/rustfs/discussions)
|
||||
- 📖 [Documentation](https://github.com/rustfs/rustfs/tree/main/docs)
|
||||
EOF
|
||||
|
||||
# Update the release with enhanced notes
|
||||
gh release edit "$VERSION" --notes-file enhanced_notes.md
|
||||
else
|
||||
echo "Release $VERSION already has custom notes, skipping update to preserve manual edits"
|
||||
fi
|
||||
echo "🎉 Released $TAG successfully!"
|
||||
echo "📄 Release URL: ${{ needs.create-release.outputs.release_url }}"
|
||||
|
||||
29
.github/workflows/ci.yml
vendored
@@ -16,7 +16,7 @@ name: Continuous Integration
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
branches: [ main ]
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
- "**.txt"
|
||||
@@ -36,7 +36,7 @@ on:
|
||||
- ".github/workflows/audit.yml"
|
||||
- ".github/workflows/performance.yml"
|
||||
pull_request:
|
||||
branches: [main]
|
||||
branches: [ main ]
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
- "**.txt"
|
||||
@@ -59,6 +59,9 @@ on:
|
||||
- cron: "0 0 * * 0" # Weekly on Sunday at midnight UTC
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
RUST_BACKTRACE: 1
|
||||
@@ -81,7 +84,17 @@ jobs:
|
||||
cancel_others: true
|
||||
paths_ignore: '["*.md", "docs/**", "deploy/**"]'
|
||||
# Never skip release events and tag pushes
|
||||
do_not_skip: '["release", "push"]'
|
||||
do_not_skip: '["workflow_dispatch", "schedule", "merge_group", "release", "push"]'
|
||||
|
||||
|
||||
typos:
|
||||
name: Typos
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v5
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- name: Typos check with custom config file
|
||||
uses: crate-ci/typos@master
|
||||
|
||||
test-and-lint:
|
||||
name: Test and Lint
|
||||
@@ -90,8 +103,10 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: Delete huge unnecessary tools folder
|
||||
run: rm -rf /opt/hostedtoolcache
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Setup Rust environment
|
||||
uses: ./.github/actions/setup
|
||||
@@ -102,7 +117,9 @@ jobs:
|
||||
cache-save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||
|
||||
- name: Run tests
|
||||
run: cargo test --all --exclude e2e_test
|
||||
run: |
|
||||
cargo nextest run --all --exclude e2e_test
|
||||
cargo test --all --doc
|
||||
|
||||
- name: Check code formatting
|
||||
run: cargo fmt --all --check
|
||||
@@ -118,7 +135,7 @@ jobs:
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Setup Rust environment
|
||||
uses: ./.github/actions/setup
|
||||
|
||||
493
.github/workflows/docker.yml
vendored
@@ -12,42 +12,33 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Docker Images Workflow
|
||||
#
|
||||
# This workflow builds Docker images using pre-built binaries from the build workflow.
|
||||
#
|
||||
# Trigger Types:
|
||||
# 1. workflow_run: Automatically triggered when "Build and Release" workflow completes
|
||||
# 2. workflow_dispatch: Manual trigger for standalone Docker builds
|
||||
#
|
||||
# Key Features:
|
||||
# - Only triggers when Linux builds (x86_64 + aarch64) are successful
|
||||
# - Independent of macOS/Windows build status
|
||||
# - Uses workflow_run event for precise control
|
||||
# - Only builds Docker images for releases and prereleases (development builds are skipped)
|
||||
|
||||
name: Docker Images
|
||||
|
||||
# Permissions needed for workflow_run event and Docker registry access
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
on:
|
||||
push:
|
||||
tags: ["*"]
|
||||
branches: [main]
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
- "**.txt"
|
||||
- ".github/**"
|
||||
- "docs/**"
|
||||
- "deploy/**"
|
||||
- "scripts/dev_*.sh"
|
||||
- "LICENSE*"
|
||||
- "README*"
|
||||
- "**/*.png"
|
||||
- "**/*.jpg"
|
||||
- "**/*.svg"
|
||||
- ".gitignore"
|
||||
- ".dockerignore"
|
||||
pull_request:
|
||||
branches: [main]
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
- "**.txt"
|
||||
- ".github/**"
|
||||
- "docs/**"
|
||||
- "deploy/**"
|
||||
- "scripts/dev_*.sh"
|
||||
- "LICENSE*"
|
||||
- "README*"
|
||||
- "**/*.png"
|
||||
- "**/*.jpg"
|
||||
- "**/*.svg"
|
||||
- ".gitignore"
|
||||
- ".dockerignore"
|
||||
# Automatically triggered when build workflow completes
|
||||
workflow_run:
|
||||
workflows: [ "Build and Release" ]
|
||||
types: [ completed ]
|
||||
# Manual trigger with same parameters for consistency
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
push_images:
|
||||
@@ -55,156 +46,398 @@ on:
|
||||
required: false
|
||||
default: true
|
||||
type: boolean
|
||||
version:
|
||||
description: "Version to build (latest for stable release, or specific version like v1.0.0, v1.0.0-alpha1)"
|
||||
required: false
|
||||
default: "latest"
|
||||
type: string
|
||||
force_rebuild:
|
||||
description: "Force rebuild even if binary exists (useful for testing)"
|
||||
required: false
|
||||
default: false
|
||||
type: boolean
|
||||
|
||||
env:
|
||||
CONCLUSION: ${{ github.event.workflow_run.conclusion }}
|
||||
HEAD_BRANCH: ${{ github.event.workflow_run.head_branch }}
|
||||
HEAD_SHA: ${{ github.event.workflow_run.head_sha }}
|
||||
TRIGGERING_EVENT: ${{ github.event.workflow_run.event }}
|
||||
DOCKERHUB_USERNAME: rustfs
|
||||
CARGO_TERM_COLOR: always
|
||||
REGISTRY_DOCKERHUB: rustfs/rustfs
|
||||
REGISTRY_GHCR: ghcr.io/${{ github.repository }}
|
||||
DOCKER_PLATFORMS: linux/amd64,linux/arm64
|
||||
|
||||
jobs:
|
||||
# Check if we should build
|
||||
# Check if we should build Docker images
|
||||
build-check:
|
||||
name: Build Check
|
||||
name: Docker Build Check
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
should_build: ${{ steps.check.outputs.should_build }}
|
||||
should_push: ${{ steps.check.outputs.should_push }}
|
||||
build_type: ${{ steps.check.outputs.build_type }}
|
||||
version: ${{ steps.check.outputs.version }}
|
||||
short_sha: ${{ steps.check.outputs.short_sha }}
|
||||
is_prerelease: ${{ steps.check.outputs.is_prerelease }}
|
||||
create_latest: ${{ steps.check.outputs.create_latest }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
# For workflow_run events, checkout the specific commit that triggered the workflow
|
||||
ref: ${{ github.event.workflow_run.head_sha || github.sha }}
|
||||
|
||||
- name: Check build conditions
|
||||
id: check
|
||||
run: |
|
||||
should_build=false
|
||||
should_push=false
|
||||
build_type="none"
|
||||
version=""
|
||||
short_sha=""
|
||||
is_prerelease=false
|
||||
create_latest=false
|
||||
|
||||
# Always build on workflow_dispatch or when changes detected
|
||||
if [[ "${{ github.event_name }}" == "workflow_dispatch" ]] || \
|
||||
[[ "${{ github.event_name }}" == "push" ]] || \
|
||||
[[ "${{ github.event_name }}" == "pull_request" ]]; then
|
||||
if [[ "${{ github.event_name }}" == "workflow_run" ]]; then
|
||||
# Triggered by build workflow completion
|
||||
echo "🔗 Triggered by build workflow completion"
|
||||
|
||||
# Check if the triggering workflow was successful
|
||||
# If the workflow succeeded, it means ALL builds (including Linux x86_64 and aarch64) succeeded
|
||||
if [[ "$CONCLUSION" == "success" ]]; then
|
||||
echo "✅ Build workflow succeeded, all builds including Linux are successful"
|
||||
should_build=true
|
||||
should_push=true
|
||||
else
|
||||
echo "❌ Build workflow failed (conclusion: $CONCLUSION), skipping Docker build"
|
||||
should_build=false
|
||||
fi
|
||||
|
||||
# Extract version info from commit message or use commit SHA
|
||||
# Use Git to generate consistent short SHA (ensures uniqueness like build.yml)
|
||||
short_sha=$(git rev-parse --short "$HEAD_SHA")
|
||||
|
||||
# Determine build type based on triggering workflow event and ref
|
||||
triggering_event="$TRIGGERING_EVENT"
|
||||
head_branch="$HEAD_BRANCH"
|
||||
|
||||
echo "🔍 Analyzing triggering workflow:"
|
||||
echo " 📋 Event: $triggering_event"
|
||||
echo " 🌿 Head branch: $head_branch"
|
||||
echo " 📎 Head SHA: $HEAD_SHA"
|
||||
|
||||
# Check if this was triggered by a tag push
|
||||
if [[ "$triggering_event" == "push" ]]; then
|
||||
# For tag pushes, head_branch will be like "refs/tags/v1.0.0" or just "v1.0.0"
|
||||
if [[ "$head_branch" == refs/tags/* ]]; then
|
||||
# Extract tag name from refs/tags/TAG_NAME
|
||||
tag_name="${head_branch#refs/tags/}"
|
||||
version="$tag_name"
|
||||
elif [[ "$head_branch" =~ ^v?[0-9]+\.[0-9]+\.[0-9]+ ]]; then
|
||||
# Direct tag name like "v1.0.0" or "1.0.0-alpha.1"
|
||||
version="$head_branch"
|
||||
elif [[ "$head_branch" == "main" ]]; then
|
||||
# Regular branch push to main
|
||||
build_type="development"
|
||||
version="dev-${short_sha}"
|
||||
should_build=false
|
||||
echo "⏭️ Skipping Docker build for development version (main branch push)"
|
||||
else
|
||||
# Other branch push
|
||||
build_type="development"
|
||||
version="dev-${short_sha}"
|
||||
should_build=false
|
||||
echo "⏭️ Skipping Docker build for development version (branch: $head_branch)"
|
||||
fi
|
||||
|
||||
# If we extracted a version (tag), determine release type
|
||||
if [[ -n "$version" ]] && [[ "$version" != "dev-${short_sha}" ]]; then
|
||||
# Remove 'v' prefix if present for consistent version format
|
||||
if [[ "$version" == v* ]]; then
|
||||
version="${version#v}"
|
||||
fi
|
||||
|
||||
if [[ "$version" == *"alpha"* ]] || [[ "$version" == *"beta"* ]] || [[ "$version" == *"rc"* ]]; then
|
||||
build_type="prerelease"
|
||||
is_prerelease=true
|
||||
# TODO: 临时修改 - 当前允许 alpha 版本也创建 latest 标签
|
||||
# 等版本稳定后,需要移除下面这行,恢复原有逻辑(只有稳定版本才创建 latest)
|
||||
if [[ "$version" == *"alpha"* ]]; then
|
||||
create_latest=true
|
||||
echo "🧪 Building Docker image for prerelease: $version (临时允许创建 latest 标签)"
|
||||
else
|
||||
echo "🧪 Building Docker image for prerelease: $version"
|
||||
fi
|
||||
else
|
||||
build_type="release"
|
||||
create_latest=true
|
||||
echo "🚀 Building Docker image for release: $version"
|
||||
fi
|
||||
fi
|
||||
else
|
||||
# Non-push events
|
||||
build_type="development"
|
||||
version="dev-${short_sha}"
|
||||
should_build=false
|
||||
echo "⏭️ Skipping Docker build for development version (event: $triggering_event)"
|
||||
fi
|
||||
|
||||
echo "🔄 Build triggered by workflow_run:"
|
||||
echo " 📋 Conclusion: $CONCLUSION"
|
||||
echo " 🌿 Branch: $HEAD_BRANCH"
|
||||
echo " 📎 SHA: $HEAD_SHA"
|
||||
echo " 🎯 Event: $TRIGGERING_EVENT"
|
||||
|
||||
elif [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then
|
||||
# Manual trigger
|
||||
input_version="${{ github.event.inputs.version }}"
|
||||
version="${input_version}"
|
||||
should_push="${{ github.event.inputs.push_images }}"
|
||||
should_build=true
|
||||
fi
|
||||
|
||||
# Push only on main branch, tags, or manual trigger
|
||||
if [[ "${{ github.ref }}" == "refs/heads/main" ]] || \
|
||||
[[ "${{ startsWith(github.ref, 'refs/tags/') }}" == "true" ]] || \
|
||||
[[ "${{ github.event.inputs.push_images }}" == "true" ]]; then
|
||||
should_push=true
|
||||
# Get short SHA
|
||||
short_sha=$(git rev-parse --short HEAD)
|
||||
|
||||
echo "🎯 Manual Docker build triggered:"
|
||||
echo " 📋 Requested version: $input_version"
|
||||
echo " 🔧 Force rebuild: ${{ github.event.inputs.force_rebuild }}"
|
||||
echo " 🚀 Push images: $should_push"
|
||||
|
||||
case "$input_version" in
|
||||
"latest")
|
||||
build_type="release"
|
||||
create_latest=true
|
||||
echo "🚀 Building with latest stable release version"
|
||||
;;
|
||||
# Prerelease versions (must match first, more specific)
|
||||
v*alpha*|v*beta*|v*rc*|*alpha*|*beta*|*rc*)
|
||||
build_type="prerelease"
|
||||
is_prerelease=true
|
||||
# TODO: 临时修改 - 当前允许 alpha 版本也创建 latest 标签
|
||||
# 等版本稳定后,需要移除下面的 if 块,恢复原有逻辑
|
||||
if [[ "$input_version" == *"alpha"* ]]; then
|
||||
create_latest=true
|
||||
echo "🧪 Building with prerelease version: $input_version (临时允许创建 latest 标签)"
|
||||
else
|
||||
echo "🧪 Building with prerelease version: $input_version"
|
||||
fi
|
||||
;;
|
||||
# Release versions (match after prereleases, more general)
|
||||
v[0-9]*|[0-9]*.*.*)
|
||||
build_type="release"
|
||||
create_latest=true
|
||||
echo "📦 Building with specific release version: $input_version"
|
||||
;;
|
||||
*)
|
||||
# Invalid version for Docker build
|
||||
should_build=false
|
||||
echo "❌ Invalid version for Docker build: $input_version"
|
||||
echo "⚠️ Only release versions (latest, v1.0.0, 1.0.0) and prereleases (v1.0.0-alpha1, 1.0.0-beta2) are supported"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
echo "should_build=$should_build" >> $GITHUB_OUTPUT
|
||||
echo "should_push=$should_push" >> $GITHUB_OUTPUT
|
||||
echo "Build: $should_build, Push: $should_push"
|
||||
echo "build_type=$build_type" >> $GITHUB_OUTPUT
|
||||
echo "version=$version" >> $GITHUB_OUTPUT
|
||||
echo "short_sha=$short_sha" >> $GITHUB_OUTPUT
|
||||
echo "is_prerelease=$is_prerelease" >> $GITHUB_OUTPUT
|
||||
echo "create_latest=$create_latest" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "🐳 Docker Build Summary:"
|
||||
echo " - Should build: $should_build"
|
||||
echo " - Should push: $should_push"
|
||||
echo " - Build type: $build_type"
|
||||
echo " - Version: $version"
|
||||
echo " - Short SHA: $short_sha"
|
||||
echo " - Is prerelease: $is_prerelease"
|
||||
echo " - Create latest: $create_latest"
|
||||
|
||||
# Build multi-arch Docker images
|
||||
# Strategy: Build images using pre-built binaries from dl.rustfs.com
|
||||
# Supports both release and dev channel binaries based on build context
|
||||
# Only runs when should_build is true (which includes workflow success check)
|
||||
build-docker:
|
||||
name: Build Docker Images
|
||||
needs: build-check
|
||||
if: needs.build-check.outputs.should_build == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
variant:
|
||||
- name: production
|
||||
dockerfile: Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
- name: ubuntu
|
||||
dockerfile: .docker/Dockerfile.ubuntu22.04
|
||||
platforms: linux/amd64,linux/arm64
|
||||
- name: alpine
|
||||
dockerfile: .docker/Dockerfile.alpine
|
||||
platforms: linux/amd64,linux/arm64
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ env.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
# - name: Login to GitHub Container Registry
|
||||
# uses: docker/login-action@v3
|
||||
# with:
|
||||
# registry: ghcr.io
|
||||
# username: ${{ github.actor }}
|
||||
# password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Login to Docker Hub
|
||||
if: needs.build-check.outputs.should_push == 'true' && secrets.DOCKERHUB_USERNAME != ''
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
if: needs.build-check.outputs.should_push == 'true'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata
|
||||
- name: Extract metadata and generate tags
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: |
|
||||
${{ env.REGISTRY_DOCKERHUB }}
|
||||
${{ env.REGISTRY_GHCR }}
|
||||
tags: |
|
||||
type=ref,event=branch,suffix=-${{ matrix.variant.name }}
|
||||
type=ref,event=pr,suffix=-${{ matrix.variant.name }}
|
||||
type=semver,pattern={{version}},suffix=-${{ matrix.variant.name }}
|
||||
type=semver,pattern={{major}}.{{minor}},suffix=-${{ matrix.variant.name }}
|
||||
type=raw,value=latest,suffix=-${{ matrix.variant.name }},enable={{is_default_branch}}
|
||||
flavor: |
|
||||
latest=false
|
||||
run: |
|
||||
BUILD_TYPE="${{ needs.build-check.outputs.build_type }}"
|
||||
VERSION="${{ needs.build-check.outputs.version }}"
|
||||
SHORT_SHA="${{ needs.build-check.outputs.short_sha }}"
|
||||
CREATE_LATEST="${{ needs.build-check.outputs.create_latest }}"
|
||||
|
||||
# Convert version format for Dockerfile compatibility
|
||||
case "$VERSION" in
|
||||
"latest")
|
||||
# For stable latest, use RELEASE=latest + release CHANNEL
|
||||
DOCKER_RELEASE="latest"
|
||||
DOCKER_CHANNEL="release"
|
||||
;;
|
||||
v*)
|
||||
# For versioned releases (v1.0.0), remove 'v' prefix for Dockerfile
|
||||
DOCKER_RELEASE="${VERSION#v}"
|
||||
DOCKER_CHANNEL="release"
|
||||
;;
|
||||
*)
|
||||
# For other versions, pass as-is
|
||||
DOCKER_RELEASE="${VERSION}"
|
||||
DOCKER_CHANNEL="release"
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "docker_release=$DOCKER_RELEASE" >> $GITHUB_OUTPUT
|
||||
echo "docker_channel=$DOCKER_CHANNEL" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "🐳 Docker build parameters:"
|
||||
echo " - Original version: $VERSION"
|
||||
echo " - Docker RELEASE: $DOCKER_RELEASE"
|
||||
echo " - Docker CHANNEL: $DOCKER_CHANNEL"
|
||||
|
||||
# Generate tags based on build type
|
||||
# Only support release and prerelease builds (no development builds)
|
||||
TAGS="${{ env.REGISTRY_DOCKERHUB }}:${VERSION}"
|
||||
|
||||
# Add channel tags for prereleases and latest for stable
|
||||
if [[ "$CREATE_LATEST" == "true" ]]; then
|
||||
# TODO: 临时修改 - 当前 alpha 版本也会创建 latest 标签
|
||||
# 等版本稳定后,这里的逻辑保持不变,但上游的 CREATE_LATEST 设置需要恢复
|
||||
# Stable release (以及临时的 alpha 版本)
|
||||
TAGS="$TAGS,${{ env.REGISTRY_DOCKERHUB }}:latest"
|
||||
elif [[ "$BUILD_TYPE" == "prerelease" ]]; then
|
||||
# Prerelease channel tags (alpha, beta, rc)
|
||||
if [[ "$VERSION" == *"alpha"* ]]; then
|
||||
CHANNEL="alpha"
|
||||
elif [[ "$VERSION" == *"beta"* ]]; then
|
||||
CHANNEL="beta"
|
||||
elif [[ "$VERSION" == *"rc"* ]]; then
|
||||
CHANNEL="rc"
|
||||
fi
|
||||
|
||||
if [[ -n "$CHANNEL" ]]; then
|
||||
TAGS="$TAGS,${{ env.REGISTRY_DOCKERHUB }}:${CHANNEL}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Output tags
|
||||
echo "tags=$TAGS" >> $GITHUB_OUTPUT
|
||||
|
||||
# Generate labels
|
||||
LABELS="org.opencontainers.image.title=RustFS"
|
||||
LABELS="$LABELS,org.opencontainers.image.description=RustFS distributed object storage system"
|
||||
LABELS="$LABELS,org.opencontainers.image.version=$VERSION"
|
||||
LABELS="$LABELS,org.opencontainers.image.revision=${{ github.sha }}"
|
||||
LABELS="$LABELS,org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}"
|
||||
LABELS="$LABELS,org.opencontainers.image.created=$(date -u +'%Y-%m-%dT%H:%M:%SZ')"
|
||||
LABELS="$LABELS,org.opencontainers.image.build-type=$BUILD_TYPE"
|
||||
|
||||
echo "labels=$LABELS" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "🐳 Generated Docker tags:"
|
||||
echo "$TAGS" | tr ',' '\n' | sed 's/^/ - /'
|
||||
echo "📋 Build type: $BUILD_TYPE"
|
||||
echo "🔖 Version: $VERSION"
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: ${{ matrix.variant.dockerfile }}
|
||||
platforms: ${{ matrix.variant.platforms }}
|
||||
file: Dockerfile
|
||||
platforms: ${{ env.DOCKER_PLATFORMS }}
|
||||
push: ${{ needs.build-check.outputs.should_push == 'true' }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
cache-from: type=gha,scope=docker-${{ matrix.variant.name }}
|
||||
cache-to: type=gha,mode=max,scope=docker-${{ matrix.variant.name }}
|
||||
cache-from: |
|
||||
type=gha,scope=docker-binary
|
||||
cache-to: |
|
||||
type=gha,mode=max,scope=docker-binary
|
||||
build-args: |
|
||||
BUILDTIME=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.created'] }}
|
||||
VERSION=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.version'] }}
|
||||
REVISION=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.revision'] }}
|
||||
BUILDTIME=$(date -u +'%Y-%m-%dT%H:%M:%SZ')
|
||||
VERSION=${{ needs.build-check.outputs.version }}
|
||||
BUILD_TYPE=${{ needs.build-check.outputs.build_type }}
|
||||
REVISION=${{ github.sha }}
|
||||
RELEASE=${{ steps.meta.outputs.docker_release }}
|
||||
CHANNEL=${{ steps.meta.outputs.docker_channel }}
|
||||
BUILDKIT_INLINE_CACHE=1
|
||||
# Enable advanced BuildKit features for better performance
|
||||
provenance: false
|
||||
sbom: false
|
||||
# Add retry mechanism by splitting the build process
|
||||
no-cache: false
|
||||
pull: true
|
||||
|
||||
# Create manifest for main production image
|
||||
create-manifest:
|
||||
name: Create Manifest
|
||||
needs: [build-check, build-docker]
|
||||
if: needs.build-check.outputs.should_push == 'true' && startsWith(github.ref, 'refs/tags/')
|
||||
# Note: Manifest creation is no longer needed as we only build one variant
|
||||
# Multi-arch manifests are automatically created by docker/build-push-action
|
||||
|
||||
# Docker build summary
|
||||
docker-summary:
|
||||
name: Docker Build Summary
|
||||
needs: [ build-check, build-docker ]
|
||||
if: always() && needs.build-check.outputs.should_build == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Login to Docker Hub
|
||||
if: secrets.DOCKERHUB_USERNAME != ''
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Create and push manifest
|
||||
- name: Docker build completion summary
|
||||
run: |
|
||||
VERSION=${GITHUB_REF#refs/tags/}
|
||||
BUILD_TYPE="${{ needs.build-check.outputs.build_type }}"
|
||||
VERSION="${{ needs.build-check.outputs.version }}"
|
||||
CREATE_LATEST="${{ needs.build-check.outputs.create_latest }}"
|
||||
|
||||
# Create main image tag (without variant suffix)
|
||||
if [[ -n "${{ secrets.DOCKERHUB_USERNAME }}" ]]; then
|
||||
docker buildx imagetools create \
|
||||
-t ${{ env.REGISTRY_DOCKERHUB }}:${VERSION} \
|
||||
-t ${{ env.REGISTRY_DOCKERHUB }}:latest \
|
||||
${{ env.REGISTRY_DOCKERHUB }}:${VERSION}-production
|
||||
fi
|
||||
echo "🐳 Docker build completed successfully!"
|
||||
echo "📦 Build type: $BUILD_TYPE"
|
||||
echo "🔢 Version: $VERSION"
|
||||
echo "🚀 Strategy: Images using pre-built binaries (release channel only)"
|
||||
echo ""
|
||||
|
||||
docker buildx imagetools create \
|
||||
-t ${{ env.REGISTRY_GHCR }}:${VERSION} \
|
||||
-t ${{ env.REGISTRY_GHCR }}:latest \
|
||||
${{ env.REGISTRY_GHCR }}:${VERSION}-production
|
||||
case "$BUILD_TYPE" in
|
||||
"release")
|
||||
echo "🚀 Release Docker image has been built with ${VERSION} tags"
|
||||
echo "✅ This image is ready for production use"
|
||||
if [[ "$CREATE_LATEST" == "true" ]]; then
|
||||
echo "🏷️ Latest tag has been created for stable release"
|
||||
fi
|
||||
;;
|
||||
"prerelease")
|
||||
echo "🧪 Prerelease Docker image has been built with ${VERSION} tags"
|
||||
echo "⚠️ This is a prerelease image - use with caution"
|
||||
# TODO: 临时修改 - alpha 版本当前会创建 latest 标签
|
||||
# 等版本稳定后,需要恢复下面的提示信息
|
||||
if [[ "$VERSION" == *"alpha"* ]] && [[ "$CREATE_LATEST" == "true" ]]; then
|
||||
echo "🏷️ Latest tag has been created for alpha version (临时措施)"
|
||||
else
|
||||
echo "🚫 Latest tag NOT created for prerelease"
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
echo "❌ Unexpected build type: $BUILD_TYPE"
|
||||
;;
|
||||
esac
|
||||
|
||||
32
.github/workflows/issue-translator.yml
vendored
@@ -1,9 +1,27 @@
|
||||
name: 'issue-translator'
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
issues:
|
||||
types: [opened]
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name: "issue-translator"
|
||||
on:
|
||||
issue_comment:
|
||||
types: [ created ]
|
||||
issues:
|
||||
types: [ opened ]
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
issues: write
|
||||
|
||||
jobs:
|
||||
build:
|
||||
@@ -14,5 +32,5 @@ jobs:
|
||||
IS_MODIFY_TITLE: false
|
||||
# not require, default false, . Decide whether to modify the issue title
|
||||
# if true, the robot account @Issues-translate-bot must have modification permissions, invite @Issues-translate-bot to your project or use your custom bot.
|
||||
CUSTOM_BOT_NOTE: Bot detected the issue body's language is not English, translate it automatically.
|
||||
CUSTOM_BOT_NOTE: Bot detected the issue body's language is not English, translate it automatically.
|
||||
# not require. Customize the translation robot prefix message.
|
||||
|
||||
26
.github/workflows/performance.yml
vendored
@@ -16,12 +16,12 @@ name: Performance Testing
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [main]
|
||||
branches: [ main ]
|
||||
paths:
|
||||
- '**/*.rs'
|
||||
- '**/Cargo.toml'
|
||||
- '**/Cargo.lock'
|
||||
- '.github/workflows/performance.yml'
|
||||
- "**/*.rs"
|
||||
- "**/Cargo.toml"
|
||||
- "**/Cargo.lock"
|
||||
- ".github/workflows/performance.yml"
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
profile_duration:
|
||||
@@ -30,6 +30,9 @@ on:
|
||||
default: "120"
|
||||
type: string
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
RUST_BACKTRACE: 1
|
||||
@@ -41,7 +44,7 @@ jobs:
|
||||
timeout-minutes: 30
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Setup Rust environment
|
||||
uses: ./.github/actions/setup
|
||||
@@ -73,12 +76,11 @@ jobs:
|
||||
echo "RUSTFS_VOLUMES=./target/volume/test{0...4}" >> $GITHUB_ENV
|
||||
echo "RUST_LOG=rustfs=info,ecstore=info,s3s=info,iam=info,rustfs-obs=info" >> $GITHUB_ENV
|
||||
|
||||
- name: Download static files
|
||||
- name: Verify console static assets
|
||||
run: |
|
||||
curl -L "https://dl.rustfs.com/artifacts/console/rustfs-console-latest.zip" \
|
||||
-o tempfile.zip --retry 3 --retry-delay 5
|
||||
unzip -o tempfile.zip -d ./rustfs/static
|
||||
rm tempfile.zip
|
||||
# Console static assets are already embedded in the repository
|
||||
echo "Console static assets size: $(du -sh rustfs/static/)"
|
||||
echo "Console static assets are embedded via rust-embed, no external download needed"
|
||||
|
||||
- name: Build with profiling optimizations
|
||||
run: |
|
||||
@@ -117,7 +119,7 @@ jobs:
|
||||
timeout-minutes: 45
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Setup Rust environment
|
||||
uses: ./.github/actions/setup
|
||||
|
||||
5
.gitignore
vendored
@@ -19,3 +19,8 @@ deploy/certs/*
|
||||
profile.json
|
||||
.docker/openobserve-otel/data
|
||||
*.zst
|
||||
.secrets
|
||||
*.go
|
||||
*.pb
|
||||
*.svg
|
||||
deploy/logs/*.log.*
|
||||
@@ -1,22 +1,80 @@
|
||||
# RustFS Project Cursor Rules
|
||||
# RustFS Project AI Coding Rules
|
||||
|
||||
## ⚠️ CRITICAL DEVELOPMENT RULES ⚠️
|
||||
## 🚨🚨🚨 CRITICAL DEVELOPMENT RULES - ZERO TOLERANCE 🚨🚨🚨
|
||||
|
||||
### 🚨 NEVER COMMIT DIRECTLY TO MASTER/MAIN BRANCH 🚨
|
||||
### ⛔️ ABSOLUTE PROHIBITION: NEVER COMMIT DIRECTLY TO MASTER/MAIN BRANCH ⛔️
|
||||
|
||||
- **This is the most important rule - NEVER modify code directly on main or master branch**
|
||||
- **ALL CHANGES MUST GO THROUGH PULL REQUESTS - NO EXCEPTIONS**
|
||||
- **Always work on feature branches and use pull requests for all changes**
|
||||
- **Any direct commits to master/main branch are strictly forbidden**
|
||||
- **Pull requests are the ONLY way to merge code to main branch**
|
||||
- Before starting any development, always:
|
||||
1. `git checkout main` (switch to main branch)
|
||||
2. `git pull` (get latest changes)
|
||||
3. `git checkout -b feat/your-feature-name` (create and switch to feature branch)
|
||||
4. Make your changes on the feature branch
|
||||
5. Commit and push to the feature branch
|
||||
6. **Create a pull request for review - THIS IS MANDATORY**
|
||||
7. **Wait for PR approval and merge through GitHub interface only**
|
||||
**🔥 THIS IS THE MOST CRITICAL RULE - VIOLATION WILL RESULT IN IMMEDIATE REVERSAL 🔥**
|
||||
|
||||
- **🚫 ZERO DIRECT COMMITS TO MAIN/MASTER BRANCH - ABSOLUTELY FORBIDDEN**
|
||||
- **🚫 ANY DIRECT COMMIT TO MAIN BRANCH MUST BE IMMEDIATELY REVERTED**
|
||||
- **🚫 NO EXCEPTIONS FOR HOTFIXES, EMERGENCIES, OR URGENT CHANGES**
|
||||
- **🚫 NO EXCEPTIONS FOR SMALL CHANGES, TYPOS, OR DOCUMENTATION UPDATES**
|
||||
- **🚫 NO EXCEPTIONS FOR ANYONE - MAINTAINERS, CONTRIBUTORS, OR ADMINS**
|
||||
|
||||
### 📋 MANDATORY WORKFLOW - STRICTLY ENFORCED
|
||||
|
||||
**EVERY SINGLE CHANGE MUST FOLLOW THIS WORKFLOW:**
|
||||
|
||||
1. **Check current branch**: `git branch` (MUST NOT be on main/master)
|
||||
2. **Switch to main**: `git checkout main`
|
||||
3. **Pull latest**: `git pull origin main`
|
||||
4. **Create feature branch**: `git checkout -b feat/your-feature-name`
|
||||
5. **Make changes ONLY on feature branch**
|
||||
6. **Test thoroughly before committing**
|
||||
7. **Commit and push to feature branch**: `git push origin feat/your-feature-name`
|
||||
8. **Create Pull Request**: Use `gh pr create` (MANDATORY)
|
||||
9. **Wait for PR approval**: NO self-merging allowed
|
||||
10. **Merge through GitHub interface**: ONLY after approval
|
||||
|
||||
### 🔒 ENFORCEMENT MECHANISMS
|
||||
|
||||
- **Branch protection rules**: Main branch is protected
|
||||
- **Pre-commit hooks**: Will block direct commits to main
|
||||
- **CI/CD checks**: All PRs must pass before merging
|
||||
- **Code review requirement**: At least one approval needed
|
||||
- **Automated reversal**: Direct commits to main will be automatically reverted
|
||||
|
||||
## 🎯 Core AI Development Principles
|
||||
|
||||
### Five Execution Steps
|
||||
|
||||
#### 1. Task Analysis and Planning
|
||||
- **Clear Objectives**: Deeply understand task requirements and expected results before starting coding
|
||||
- **Plan Development**: List specific files, components, and functions that need modification, explaining the reasons for changes
|
||||
- **Risk Assessment**: Evaluate the impact of changes on existing functionality, develop rollback plans
|
||||
|
||||
#### 2. Precise Code Location
|
||||
- **File Identification**: Determine specific files and line numbers that need modification
|
||||
- **Impact Analysis**: Avoid modifying irrelevant files, clearly state the reason for each file modification
|
||||
- **Minimization Principle**: Unless explicitly required by the task, do not create new abstraction layers or refactor existing code
|
||||
|
||||
#### 3. Minimal Code Changes
|
||||
- **Focus on Core**: Only write code directly required by the task
|
||||
- **Avoid Redundancy**: Do not add unnecessary logs, comments, tests, or error handling
|
||||
- **Isolation**: Ensure new code does not interfere with existing functionality, maintain code independence
|
||||
|
||||
#### 4. Strict Code Review
|
||||
- **Correctness Check**: Verify the correctness and completeness of code logic
|
||||
- **Style Consistency**: Ensure code conforms to established project coding style
|
||||
- **Side Effect Assessment**: Evaluate the impact of changes on downstream systems
|
||||
|
||||
#### 5. Clear Delivery Documentation
|
||||
- **Change Summary**: Detailed explanation of all modifications and reasons
|
||||
- **File List**: List all modified files and their specific changes
|
||||
- **Risk Statement**: Mark any assumptions or potential risk points
|
||||
|
||||
### Core Principles
|
||||
- **🎯 Precise Execution**: Strictly follow task requirements, no arbitrary innovation
|
||||
- **⚡ Efficient Development**: Avoid over-design, only do necessary work
|
||||
- **🛡️ Safe and Reliable**: Always follow development processes, ensure code quality and system stability
|
||||
- **🔒 Cautious Modification**: Only modify when clearly knowing what needs to be changed and having confidence
|
||||
|
||||
### Additional AI Behavior Rules
|
||||
|
||||
1. **Use English for all code comments and documentation** - All comments, variable names, function names, documentation, and user-facing text in code should be in English
|
||||
2. **Clean up temporary scripts after use** - Any temporary scripts, test files, or helper files created during AI work should be removed after task completion
|
||||
3. **Only make confident modifications** - Do not make speculative changes or "convenient" modifications outside the task scope. If uncertain about a change, ask for clarification rather than guessing
|
||||
|
||||
## Project Overview
|
||||
|
||||
@@ -49,15 +107,6 @@ RustFS is a high-performance distributed object storage system written in Rust,
|
||||
- Use `#[error("description")]` attributes for clear error messages
|
||||
- Support error downcasting when needed through `other()` helper methods
|
||||
- Implement `Clone` for errors when required by the domain logic
|
||||
- **Current module error types:**
|
||||
- `ecstore::error::StorageError` - Storage layer errors
|
||||
- `ecstore::disk::error::DiskError` - Disk operation errors
|
||||
- `iam::error::Error` - Identity and access management errors
|
||||
- `policy::error::Error` - Policy-related errors
|
||||
- `crypto::error::Error` - Cryptographic operation errors
|
||||
- `filemeta::error::Error` - File metadata errors
|
||||
- `rustfs::error::ApiError` - API layer errors
|
||||
- Module-specific error types for specialized functionality
|
||||
|
||||
## Code Style Guidelines
|
||||
|
||||
@@ -78,25 +127,21 @@ single_line_let_else_max_width = 100
|
||||
Before every commit, you **MUST**:
|
||||
|
||||
1. **Format your code**:
|
||||
|
||||
```bash
|
||||
cargo fmt --all
|
||||
```
|
||||
|
||||
2. **Verify formatting**:
|
||||
|
||||
```bash
|
||||
cargo fmt --all --check
|
||||
```
|
||||
|
||||
3. **Pass clippy checks**:
|
||||
|
||||
```bash
|
||||
cargo clippy --all-targets --all-features -- -D warnings
|
||||
```
|
||||
|
||||
4. **Ensure compilation**:
|
||||
|
||||
```bash
|
||||
cargo check --all-targets
|
||||
```
|
||||
@@ -128,48 +173,6 @@ make pre-commit
|
||||
make setup-hooks
|
||||
```
|
||||
|
||||
#### 🔒 Automated Pre-commit Hooks
|
||||
|
||||
This project includes a pre-commit hook that automatically runs before each commit to ensure:
|
||||
|
||||
- ✅ Code is properly formatted (`cargo fmt --all --check`)
|
||||
- ✅ No clippy warnings (`cargo clippy --all-targets --all-features -- -D warnings`)
|
||||
- ✅ Code compiles successfully (`cargo check --all-targets`)
|
||||
|
||||
**Setting Up Pre-commit Hooks** (MANDATORY for all developers):
|
||||
|
||||
Run this command once after cloning the repository:
|
||||
|
||||
```bash
|
||||
make setup-hooks
|
||||
```
|
||||
|
||||
Or manually:
|
||||
|
||||
```bash
|
||||
chmod +x .git/hooks/pre-commit
|
||||
```
|
||||
|
||||
#### 🚫 Commit Prevention
|
||||
|
||||
If your code doesn't meet the formatting requirements, the pre-commit hook will:
|
||||
|
||||
1. **Block the commit** and show clear error messages
|
||||
2. **Provide exact commands** to fix the issues
|
||||
3. **Guide you through** the resolution process
|
||||
|
||||
Example output when formatting fails:
|
||||
|
||||
```
|
||||
❌ Code formatting check failed!
|
||||
💡 Please run 'cargo fmt --all' to format your code before committing.
|
||||
|
||||
🔧 Quick fix:
|
||||
cargo fmt --all
|
||||
git add .
|
||||
git commit
|
||||
```
|
||||
|
||||
### 3. Naming Conventions
|
||||
|
||||
- Use `snake_case` for functions, variables, modules
|
||||
@@ -190,40 +193,6 @@ Example output when formatting fails:
|
||||
- Required for API boundaries (function signatures, public struct fields)
|
||||
- Needed to resolve ambiguity between multiple possible types
|
||||
|
||||
**Good examples (prefer these):**
|
||||
|
||||
```rust
|
||||
// Compiler can infer the type
|
||||
let items = vec![1, 2, 3, 4];
|
||||
let config = Config::default();
|
||||
let result = process_data(&input);
|
||||
|
||||
// Iterator chains with clear context
|
||||
let filtered: Vec<_> = items.iter().filter(|&&x| x > 2).collect();
|
||||
```
|
||||
|
||||
**Avoid unnecessary explicit types:**
|
||||
|
||||
```rust
|
||||
// Unnecessary - type is obvious
|
||||
let items: Vec<i32> = vec![1, 2, 3, 4];
|
||||
let config: Config = Config::default();
|
||||
let result: ProcessResult = process_data(&input);
|
||||
```
|
||||
|
||||
**When explicit types are beneficial:**
|
||||
|
||||
```rust
|
||||
// API boundaries - always specify types
|
||||
pub fn process_data(input: &[u8]) -> Result<ProcessResult, Error> { ... }
|
||||
|
||||
// Ambiguous cases - explicit type needed
|
||||
let value: f64 = "3.14".parse().unwrap();
|
||||
|
||||
// Complex generic types - explicit for clarity
|
||||
let cache: HashMap<String, Arc<Mutex<CacheEntry>>> = HashMap::new();
|
||||
```
|
||||
|
||||
### 5. Documentation Comments
|
||||
|
||||
- Public APIs must have documentation comments
|
||||
@@ -341,34 +310,7 @@ impl MyError {
|
||||
}
|
||||
```
|
||||
|
||||
### 3. Error Conversion Between Modules
|
||||
|
||||
```rust
|
||||
// Convert between different module error types
|
||||
impl From<ecstore::error::StorageError> for MyError {
|
||||
fn from(e: ecstore::error::StorageError) -> Self {
|
||||
match e {
|
||||
ecstore::error::StorageError::FileNotFound => {
|
||||
MyError::FileNotFound { path: "unknown".to_string() }
|
||||
}
|
||||
_ => MyError::Storage(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Provide reverse conversion when needed
|
||||
impl From<MyError> for ecstore::error::StorageError {
|
||||
fn from(e: MyError) -> Self {
|
||||
match e {
|
||||
MyError::FileNotFound { .. } => ecstore::error::StorageError::FileNotFound,
|
||||
MyError::Storage(e) => e,
|
||||
_ => ecstore::error::StorageError::other(e),
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 4. Error Context and Propagation
|
||||
### 3. Error Context and Propagation
|
||||
|
||||
```rust
|
||||
// Use ? operator for clean error propagation
|
||||
@@ -388,117 +330,6 @@ fn process_with_context(path: &str) -> Result<()> {
|
||||
}
|
||||
```
|
||||
|
||||
### 5. API Error Conversion (S3 Example)
|
||||
|
||||
```rust
|
||||
// Convert storage errors to API-specific errors
|
||||
use s3s::{S3Error, S3ErrorCode};
|
||||
|
||||
#[derive(Debug)]
|
||||
pub struct ApiError {
|
||||
pub code: S3ErrorCode,
|
||||
pub message: String,
|
||||
pub source: Option<Box<dyn std::error::Error + Send + Sync>>,
|
||||
}
|
||||
|
||||
impl From<ecstore::error::StorageError> for ApiError {
|
||||
fn from(err: ecstore::error::StorageError) -> Self {
|
||||
let code = match &err {
|
||||
ecstore::error::StorageError::BucketNotFound(_) => S3ErrorCode::NoSuchBucket,
|
||||
ecstore::error::StorageError::ObjectNotFound(_, _) => S3ErrorCode::NoSuchKey,
|
||||
ecstore::error::StorageError::BucketExists(_) => S3ErrorCode::BucketAlreadyExists,
|
||||
ecstore::error::StorageError::InvalidArgument(_, _, _) => S3ErrorCode::InvalidArgument,
|
||||
ecstore::error::StorageError::MethodNotAllowed => S3ErrorCode::MethodNotAllowed,
|
||||
ecstore::error::StorageError::StorageFull => S3ErrorCode::ServiceUnavailable,
|
||||
_ => S3ErrorCode::InternalError,
|
||||
};
|
||||
|
||||
ApiError {
|
||||
code,
|
||||
message: err.to_string(),
|
||||
source: Some(Box::new(err)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl From<ApiError> for S3Error {
|
||||
fn from(err: ApiError) -> Self {
|
||||
let mut s3e = S3Error::with_message(err.code, err.message);
|
||||
if let Some(source) = err.source {
|
||||
s3e.set_source(source);
|
||||
}
|
||||
s3e
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
### 6. Error Handling Best Practices
|
||||
|
||||
#### Pattern Matching and Error Classification
|
||||
|
||||
```rust
|
||||
// Use pattern matching for specific error handling
|
||||
async fn handle_storage_operation() -> Result<()> {
|
||||
match storage.get_object("bucket", "key").await {
|
||||
Ok(object) => process_object(object),
|
||||
Err(ecstore::error::StorageError::ObjectNotFound(bucket, key)) => {
|
||||
warn!("Object not found: {}/{}", bucket, key);
|
||||
create_default_object(bucket, key).await
|
||||
}
|
||||
Err(ecstore::error::StorageError::BucketNotFound(bucket)) => {
|
||||
error!("Bucket not found: {}", bucket);
|
||||
Err(MyError::Custom {
|
||||
message: format!("Bucket {} does not exist", bucket)
|
||||
})
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Storage operation failed: {}", e);
|
||||
Err(MyError::Storage(e))
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Error Aggregation and Reporting
|
||||
|
||||
```rust
|
||||
// Collect and report multiple errors
|
||||
pub fn validate_configuration(config: &Config) -> Result<()> {
|
||||
let mut errors = Vec::new();
|
||||
|
||||
if config.bucket_name.is_empty() {
|
||||
errors.push("Bucket name cannot be empty");
|
||||
}
|
||||
|
||||
if config.region.is_empty() {
|
||||
errors.push("Region must be specified");
|
||||
}
|
||||
|
||||
if !errors.is_empty() {
|
||||
return Err(MyError::Custom {
|
||||
message: format!("Configuration validation failed: {}", errors.join(", "))
|
||||
});
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
```
|
||||
|
||||
#### Contextual Error Information
|
||||
|
||||
```rust
|
||||
// Add operation context to errors
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn upload_file(&self, bucket: &str, key: &str, data: Vec<u8>) -> Result<()> {
|
||||
self.storage
|
||||
.put_object(bucket, key, data)
|
||||
.await
|
||||
.map_err(|e| MyError::Custom {
|
||||
message: format!("Failed to upload {}/{}: {}", bucket, key, e)
|
||||
})
|
||||
}
|
||||
```
|
||||
|
||||
## Performance Optimization Guidelines
|
||||
|
||||
### 1. Memory Management
|
||||
@@ -517,7 +348,7 @@ let results = join_all(futures).await;
|
||||
|
||||
### 3. Caching Strategy
|
||||
|
||||
- Use `lazy_static` or `OnceCell` for global caching
|
||||
- Use `LazyLock` for global caching
|
||||
- Implement LRU cache to avoid memory leaks
|
||||
|
||||
## Testing Guidelines
|
||||
@@ -541,45 +372,6 @@ mod tests {
|
||||
fn test_with_cases(input: &str, expected: &str) {
|
||||
assert_eq!(function(input), expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_conversion() {
|
||||
use ecstore::error::StorageError;
|
||||
|
||||
let storage_err = StorageError::BucketNotFound("test-bucket".to_string());
|
||||
let api_err: ApiError = storage_err.into();
|
||||
|
||||
assert_eq!(api_err.code, S3ErrorCode::NoSuchBucket);
|
||||
assert!(api_err.message.contains("test-bucket"));
|
||||
assert!(api_err.source.is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_types() {
|
||||
let io_err = std::io::Error::new(std::io::ErrorKind::NotFound, "file not found");
|
||||
let my_err = MyError::Io(io_err);
|
||||
|
||||
// Test error matching
|
||||
match my_err {
|
||||
MyError::Io(_) => {}, // Expected
|
||||
_ => panic!("Unexpected error type"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_context() {
|
||||
let result = process_with_context("nonexistent_file.txt");
|
||||
assert!(result.is_err());
|
||||
|
||||
let err = result.unwrap_err();
|
||||
match err {
|
||||
MyError::Custom { message } => {
|
||||
assert!(message.contains("Failed to read"));
|
||||
assert!(message.contains("nonexistent_file.txt"));
|
||||
}
|
||||
_ => panic!("Expected Custom error"),
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
@@ -812,17 +604,15 @@ async fn shutdown_gracefully(shutdown_rx: &mut Receiver<()>) {
|
||||
- Support version control and migration
|
||||
- Implement metadata caching
|
||||
|
||||
These rules should serve as guiding principles when developing the RustFS project, ensuring code quality, performance, and maintainability.
|
||||
## Branch Management and Development Workflow
|
||||
|
||||
### 4. Code Operations
|
||||
|
||||
#### Branch Management
|
||||
### Branch Management
|
||||
|
||||
- **🚨 CRITICAL: NEVER modify code directly on main or master branch - THIS IS ABSOLUTELY FORBIDDEN 🚨**
|
||||
- **⚠️ ANY DIRECT COMMITS TO MASTER/MAIN WILL BE REJECTED AND MUST BE REVERTED IMMEDIATELY ⚠️**
|
||||
- **🔒 ALL CHANGES MUST GO THROUGH PULL REQUESTS - NO DIRECT COMMITS TO MAIN UNDER ANY CIRCUMSTANCES 🔒**
|
||||
- **Always work on feature branches - NO EXCEPTIONS**
|
||||
- Always check the .cursorrules file before starting to ensure you understand the project guidelines
|
||||
- Always check the .rules.md file before starting to ensure you understand the project guidelines
|
||||
- **MANDATORY workflow for ALL changes:**
|
||||
1. `git checkout main` (switch to main branch)
|
||||
2. `git pull` (get latest changes)
|
||||
@@ -845,7 +635,7 @@ These rules should serve as guiding principles when developing the RustFS projec
|
||||
- Direct pushes to main should be blocked by repository settings
|
||||
- Any accidental direct commits to main must be immediately reverted via PR
|
||||
|
||||
#### Development Workflow
|
||||
### Development Workflow
|
||||
|
||||
## 🎯 **Core Development Principles**
|
||||
|
||||
@@ -884,27 +674,29 @@ These rules should serve as guiding principles when developing the RustFS projec
|
||||
- Testing information and verification steps
|
||||
- **Provide PR descriptions in copyable markdown format** enclosed in code blocks for easy one-click copying
|
||||
|
||||
## 🚫 AI 文档生成限制
|
||||
## 🚫 AI Documentation Generation Restrictions
|
||||
|
||||
### 禁止生成总结文档
|
||||
### Forbidden Summary Documents
|
||||
|
||||
- **严格禁止创建任何形式的AI生成总结文档**
|
||||
- **不得创建包含大量表情符号、详细格式化表格和典型AI风格的文档**
|
||||
- **不得在项目中生成以下类型的文档:**
|
||||
- 基准测试总结文档(BENCHMARK*.md)
|
||||
- 实现对比分析文档(IMPLEMENTATION_COMPARISON*.md)
|
||||
- 性能分析报告文档
|
||||
- 架构总结文档
|
||||
- 功能对比文档
|
||||
- 任何带有大量表情符号和格式化内容的文档
|
||||
- **如果需要文档,请只在用户明确要求时创建,并保持简洁实用的风格**
|
||||
- **文档应当专注于实际需要的信息,避免过度格式化和装饰性内容**
|
||||
- **任何发现的AI生成总结文档都应该立即删除**
|
||||
- **Strictly forbidden to create any form of AI-generated summary documents**
|
||||
- **Do not create documents containing large amounts of emoji, detailed formatting tables and typical AI style**
|
||||
- **Do not generate the following types of documents in the project:**
|
||||
- Benchmark summary documents (BENCHMARK*.md)
|
||||
- Implementation comparison analysis documents (IMPLEMENTATION_COMPARISON*.md)
|
||||
- Performance analysis report documents
|
||||
- Architecture summary documents
|
||||
- Feature comparison documents
|
||||
- Any documents with large amounts of emoji and formatted content
|
||||
- **If documentation is needed, only create when explicitly requested by the user, and maintain a concise and practical style**
|
||||
- **Documentation should focus on actually needed information, avoiding excessive formatting and decorative content**
|
||||
- **Any discovered AI-generated summary documents should be immediately deleted**
|
||||
|
||||
### 允许的文档类型
|
||||
### Allowed Documentation Types
|
||||
|
||||
- README.md(项目介绍,保持简洁)
|
||||
- 技术文档(仅在明确需要时创建)
|
||||
- 用户手册(仅在明确需要时创建)
|
||||
- API文档(从代码生成)
|
||||
- 变更日志(CHANGELOG.md)
|
||||
- README.md (project introduction, keep concise)
|
||||
- Technical documentation (only create when explicitly needed)
|
||||
- User manual (only create when explicitly needed)
|
||||
- API documentation (generated from code)
|
||||
- Changelog (CHANGELOG.md)
|
||||
|
||||
These rules should serve as guiding principles when developing the RustFS project, ensuring code quality, performance, and maintainability.
|
||||
33
.vscode/launch.json
vendored
@@ -20,18 +20,21 @@
|
||||
}
|
||||
},
|
||||
"env": {
|
||||
"RUST_LOG": "rustfs=debug,ecstore=info,s3s=debug"
|
||||
"RUST_LOG": "rustfs=debug,ecstore=info,s3s=debug,iam=debug",
|
||||
"RUSTFS_SKIP_BACKGROUND_TASK": "on",
|
||||
// "RUSTFS_POLICY_PLUGIN_URL":"http://localhost:8181/v1/data/rustfs/authz/allow",
|
||||
// "RUSTFS_POLICY_PLUGIN_AUTH_TOKEN":"your-opa-token"
|
||||
},
|
||||
"args": [
|
||||
"--access-key",
|
||||
"AKEXAMPLERUSTFS",
|
||||
"rustfsadmin",
|
||||
"--secret-key",
|
||||
"SKEXAMPLERUSTFS",
|
||||
"rustfsadmin",
|
||||
"--address",
|
||||
"0.0.0.0:9010",
|
||||
"--domain-name",
|
||||
"--server-domains",
|
||||
"127.0.0.1:9010",
|
||||
"./target/volume/test{0...4}"
|
||||
"./target/volume/test{1...4}"
|
||||
],
|
||||
"cwd": "${workspaceFolder}"
|
||||
},
|
||||
@@ -85,6 +88,26 @@
|
||||
"sourceLanguages": [
|
||||
"rust"
|
||||
],
|
||||
},
|
||||
{
|
||||
"name": "Debug executable target/debug/test",
|
||||
"type": "lldb",
|
||||
"request": "launch",
|
||||
"program": "${workspaceFolder}/target/debug/deps/lifecycle_integration_test-5915cbfcab491b3b",
|
||||
"args": [
|
||||
"--skip",
|
||||
"test_lifecycle_expiry_basic",
|
||||
"--skip",
|
||||
"test_lifecycle_expiry_deletemarker",
|
||||
//"--skip",
|
||||
//"test_lifecycle_transition_basic",
|
||||
],
|
||||
"cwd": "${workspaceFolder}",
|
||||
//"stopAtEntry": false,
|
||||
//"preLaunchTask": "cargo build",
|
||||
"sourceLanguages": [
|
||||
"rust"
|
||||
],
|
||||
}
|
||||
]
|
||||
}
|
||||
24
AGENTS.md
Normal file
@@ -0,0 +1,24 @@
|
||||
# Repository Guidelines
|
||||
|
||||
## Communication Rules
|
||||
- Respond to the user in Chinese; use English in all other contexts.
|
||||
|
||||
## Project Structure & Module Organization
|
||||
The workspace root hosts shared dependencies in `Cargo.toml`. The service binary lives under `rustfs/src/main.rs`, while reusable crates sit in `crates/` (`crypto`, `iam`, `kms`, and `e2e_test`). Local fixtures for standalone flows reside in `test_standalone/`, deployment manifests are under `deploy/`, Docker assets sit at the root, and automation lives in `scripts/`. Skim each crate’s README or module docs before contributing changes.
|
||||
|
||||
## Build, Test, and Development Commands
|
||||
Run `cargo check --all-targets` for fast validation. Build release binaries via `cargo build --release` or the pipeline-aligned `make build`. Use `./build-rustfs.sh --dev` for iterative development and `./build-rustfs.sh --platform <target>` for cross-compiles. Prefer `make pre-commit` before pushing to cover formatting, clippy, checks, and tests.
|
||||
Always ensure `cargo fmt --all --check`, `cargo test --workspace --exclude e2e_test`, and `cargo clippy --all-targets --all-features -- -D warnings` complete successfully after each code change to keep the tree healthy and warning-free.
|
||||
|
||||
## Coding Style & Naming Conventions
|
||||
Formatting follows the repo `rustfmt.toml` (130-column width). Use `snake_case` for items, `PascalCase` for types, and `SCREAMING_SNAKE_CASE` for constants. Avoid `unwrap()` or `expect()` outside tests; bubble errors with `Result` and crate-specific `thiserror` types. Keep async code non-blocking and offload CPU-heavy work with `tokio::task::spawn_blocking` when necessary.
|
||||
|
||||
## Testing Guidelines
|
||||
Co-locate unit tests with their modules and give behavior-led names such as `handles_expired_token`. Integration suites belong in each crate’s `tests/` directory, while exhaustive end-to-end scenarios live in `crates/e2e_test/`. Run `cargo test --workspace --exclude e2e_test` during iteration, `cargo nextest run --all --exclude e2e_test` when available, and finish with `cargo test --all` before requesting review. Use `NO_PROXY=127.0.0.1,localhost HTTP_PROXY= HTTPS_PROXY=` for KMS e2e tests.
|
||||
When fixing bugs or adding features, include regression tests that capture the new behavior so future changes cannot silently break it.
|
||||
|
||||
## Commit & Pull Request Guidelines
|
||||
Work on feature branches (e.g., `feat/...`) after syncing `main`. Follow Conventional Commits under 72 characters (e.g., `feat: add kms key rotation`). Each commit must compile, format cleanly, and pass `make pre-commit`. Open PRs with a concise summary, note verification commands, link relevant issues, and wait for reviewer approval.
|
||||
|
||||
## Security & Configuration Tips
|
||||
Do not commit secrets or cloud credentials; prefer environment variables or vault tooling. Review IAM- and KMS-related changes with a second maintainer. Confirm proxy settings before running sensitive tests to avoid leaking traffic outside localhost.
|
||||
75
CLA.md
@@ -1,39 +1,88 @@
|
||||
RustFS Individual Contributor License Agreement
|
||||
|
||||
Thank you for your interest in contributing documentation and related software code to a project hosted or managed by RustFS. In order to clarify the intellectual property license granted with Contributions from any person or entity, RustFS must have a Contributor License Agreement (“CLA”) on file that has been signed by each Contributor, indicating agreement to the license terms below. This version of the Contributor License Agreement allows an individual to submit Contributions to the applicable project. If you are making a submission on behalf of a legal entity, then you should sign the separate Corporate Contributor License Agreement.
|
||||
Thank you for your interest in contributing documentation and related software code to a project hosted or managed by
|
||||
RustFS. In order to clarify the intellectual property license granted with Contributions from any person or entity,
|
||||
RustFS must have a Contributor License Agreement ("CLA") on file that has been signed by each Contributor, indicating
|
||||
agreement to the license terms below. This version of the Contributor License Agreement allows an individual to submit
|
||||
Contributions to the applicable project. If you are making a submission on behalf of a legal entity, then you should
|
||||
sign the separate Corporate Contributor License Agreement.
|
||||
|
||||
You accept and agree to the following terms and conditions for Your present and future Contributions submitted to RustFS. You hereby irrevocably assign and transfer to RustFS all right, title, and interest in and to Your Contributions, including all copyrights and other intellectual property rights therein.
|
||||
You accept and agree to the following terms and conditions for Your present and future Contributions submitted to
|
||||
RustFS. You hereby irrevocably assign and transfer to RustFS all right, title, and interest in and to Your
|
||||
Contributions, including all copyrights and other intellectual property rights therein.
|
||||
|
||||
Definitions
|
||||
|
||||
“You” (or “Your”) shall mean the copyright owner or legal entity authorized by the copyright owner that is making this Agreement with RustFS. For legal entities, the entity making a Contribution and all other entities that control, are controlled by, or are under common control with that entity are considered to be a single Contributor. For the purposes of this definition, “control” means (i) the power, direct or indirect, to cause the direction or management of such entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
“You” (or “Your”) shall mean the copyright owner or legal entity authorized by the copyright owner that is making this
|
||||
Agreement with RustFS. For legal entities, the entity making a Contribution and all other entities that control, are
|
||||
controlled by, or are under common control with that entity are considered to be a single Contributor. For the purposes
|
||||
of this definition, "control" means (i) the power, direct or indirect, to cause the direction or management of such
|
||||
entity, whether by contract or otherwise, or (ii) ownership of fifty percent (50%) or more of the outstanding shares,
|
||||
or (iii) beneficial ownership of such entity.
|
||||
|
||||
“Contribution” shall mean any original work of authorship, including any modifications or additions to an existing work, that is intentionally submitted by You to RustFS for inclusion in, or documentation of, any of the products or projects owned or managed by RustFS (the “Work”), including without limitation any Work described in Schedule A. For the purposes of this definition, “submitted” means any form of electronic or written communication sent to RustFS or its representatives, including but not limited to communication on electronic mailing lists, source code control systems, and issue tracking systems that are managed by, or on behalf of, RustFS for the purpose of discussing and improving the Work.
|
||||
“Contribution” shall mean any original work of authorship, including any modifications or additions to an existing work,
|
||||
that is intentionally submitted by You to RustFS for inclusion in, or documentation of, any of the products or projects
|
||||
owned or managed by RustFS (the "Work"), including without limitation any Work described in Schedule A. For the purposes
|
||||
of this definition, "submitted" means any form of electronic or written communication sent to RustFS or its
|
||||
representatives, including but not limited to communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, RustFS for the purpose of discussing and improving the
|
||||
Work.
|
||||
|
||||
Assignment of Copyright
|
||||
|
||||
Subject to the terms and conditions of this Agreement, You hereby irrevocably assign and transfer to RustFS all right, title, and interest in and to Your Contributions, including all copyrights and other intellectual property rights therein, for the entire term of such rights, including all renewals and extensions. You agree to execute all documents and take all actions as may be reasonably necessary to vest in RustFS the ownership of Your Contributions and to assist RustFS in perfecting, maintaining, and enforcing its rights in Your Contributions.
|
||||
Subject to the terms and conditions of this Agreement, You hereby irrevocably assign and transfer to RustFS all right,
|
||||
title, and interest in and to Your Contributions, including all copyrights and other intellectual property rights
|
||||
therein, for the entire term of such rights, including all renewals and extensions. You agree to execute all documents
|
||||
and take all actions as may be reasonably necessary to vest in RustFS the ownership of Your Contributions and to assist
|
||||
RustFS in perfecting, maintaining, and enforcing its rights in Your Contributions.
|
||||
|
||||
Grant of Patent License
|
||||
|
||||
Subject to the terms and conditions of this Agreement, You hereby grant to RustFS and to recipients of documentation and software distributed by RustFS a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the Work, where such license applies only to those patent claims licensable by You that are necessarily infringed by Your Contribution(s) alone or by combination of Your Contribution(s) with the Work to which such Contribution(s) was submitted. If any entity institutes patent litigation against You or any other entity (including a cross-claim or counterclaim in a lawsuit) alleging that your Contribution, or the Work to which you have contributed, constitutes direct or contributory patent infringement, then any patent licenses granted to that entity under this Agreement for that Contribution or Work shall terminate as of the date such litigation is filed.
|
||||
Subject to the terms and conditions of this Agreement, You hereby grant to RustFS and to recipients of documentation and
|
||||
software distributed by RustFS a perpetual, worldwide, non-exclusive, no-charge, royalty-free, irrevocable (except as
|
||||
stated in this section) patent license to make, have made, use, offer to sell, sell, import, and otherwise transfer the
|
||||
Work, where such license applies only to those patent claims licensable by You that are necessarily infringed by Your
|
||||
Contribution(s) alone or by combination of Your Contribution(s) with the Work to which such Contribution(s) was
|
||||
submitted. If any entity institutes patent litigation against You or any other entity (including a cross-claim or
|
||||
counterclaim in a lawsuit) alleging that your Contribution, or the Work to which you have contributed, constitutes
|
||||
direct or contributory patent infringement, then any patent licenses granted to that entity under this Agreement for
|
||||
that Contribution or Work shall terminate as of the date such litigation is filed.
|
||||
|
||||
You represent that you are legally entitled to grant the above assignment and license.
|
||||
|
||||
You represent that each of Your Contributions is Your original creation (see section 7 for submissions on behalf of others). You represent that Your Contribution submissions include complete details of any third-party license or other restriction (including, but not limited to, related patents and trademarks) of which you are personally aware and which are associated with any part of Your Contributions.
|
||||
You represent that each of Your Contributions is Your original creation (see section 7 for submissions on behalf of
|
||||
others). You represent that Your Contribution submissions include complete details of any third-party license or other
|
||||
restriction (including, but not limited to, related patents and trademarks) of which you are personally aware and which
|
||||
are associated with any part of Your Contributions.
|
||||
|
||||
You are not expected to provide support for Your Contributions, except to the extent You desire to provide support. You may provide support for free, for a fee, or not at all. Unless required by applicable law or agreed to in writing, You provide Your Contributions on an “AS IS” BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied, including, without limitation, any warranties or conditions of TITLE, NON- INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A PARTICULAR PURPOSE.
|
||||
You are not expected to provide support for Your Contributions, except to the extent You desire to provide support. You
|
||||
may provide support for free, for a fee, or not at all. Unless required by applicable law or agreed to in writing, You
|
||||
provide Your Contributions on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied,
|
||||
including, without limitation, any warranties or conditions of TITLE, NON- INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR
|
||||
A PARTICULAR PURPOSE.
|
||||
|
||||
Should You wish to submit work that is not Your original creation, You may submit it to RustFS separately from any Contribution, identifying the complete details of its source and of any license or other restriction (including, but not limited to, related patents, trademarks, and license agreements) of which you are personally aware, and conspicuously marking the work as “Submitted on behalf of a third-party: [named here]”.
|
||||
Should You wish to submit work that is not Your original creation, You may submit it to RustFS separately from any
|
||||
Contribution, identifying the complete details of its source and of any license or other restriction (including, but not
|
||||
limited to, related patents, trademarks, and license agreements) of which you are personally aware, and conspicuously
|
||||
marking the work as "Submitted on behalf of a third-party: [named here]”.
|
||||
|
||||
You agree to notify RustFS of any facts or circumstances of which you become aware that would make these representations inaccurate in any respect.
|
||||
You agree to notify RustFS of any facts or circumstances of which you become aware that would make these representations
|
||||
inaccurate in any respect.
|
||||
|
||||
Modification of CLA
|
||||
|
||||
RustFS reserves the right to update or modify this CLA in the future. Any updates or modifications to this CLA shall apply only to Contributions made after the effective date of the revised CLA. Contributions made prior to the update shall remain governed by the version of the CLA that was in effect at the time of submission. It is not necessary for all Contributors to re-sign the CLA when the CLA is updated or modified.
|
||||
RustFS reserves the right to update or modify this CLA in the future. Any updates or modifications to this CLA shall
|
||||
apply only to Contributions made after the effective date of the revised CLA. Contributions made prior to the update
|
||||
shall remain governed by the version of the CLA that was in effect at the time of submission. It is not necessary for
|
||||
all Contributors to re-sign the CLA when the CLA is updated or modified.
|
||||
|
||||
Governing Law and Dispute Resolution
|
||||
|
||||
This Agreement will be governed by and construed in accordance with the laws of the People’s Republic of China excluding that body of laws known as conflict of laws. The parties expressly agree that the United Nations Convention on Contracts for the International Sale of Goods will not apply. Any legal action or proceeding arising under this Agreement will be brought exclusively in the courts located in Beijing, China, and the parties hereby irrevocably consent to the personal jurisdiction and venue therein.
|
||||
This Agreement will be governed by and construed in accordance with the laws of the People's Republic of China excluding
|
||||
that body of laws known as conflict of laws. The parties expressly agree that the United Nations Convention on Contracts
|
||||
for the International Sale of Goods will not apply. Any legal action or proceeding arising under this Agreement will be
|
||||
brought exclusively in the courts located in Beijing, China, and the parties hereby irrevocably consent to the personal
|
||||
jurisdiction and venue therein.
|
||||
|
||||
For your reading convenience, this Agreement is written in parallel English and Chinese sections. To the extent there is a conflict between the English and Chinese sections, the English sections shall govern.
|
||||
For your reading convenience, this Agreement is written in parallel English and Chinese sections. To the extent there is
|
||||
a conflict between the English and Chinese sections, the English sections shall govern.
|
||||
275
CLAUDE.md
Normal file
@@ -0,0 +1,275 @@
|
||||
# CLAUDE.md
|
||||
|
||||
This file provides guidance to Claude Code (claude.ai/code) when working with code in this repository.
|
||||
|
||||
## Project Overview
|
||||
|
||||
RustFS is a high-performance distributed object storage software built with Rust, providing S3-compatible APIs and
|
||||
advanced features like data lakes, AI, and big data support. It's designed as an alternative to MinIO with better
|
||||
performance and a more business-friendly Apache 2.0 license.
|
||||
|
||||
## Build Commands
|
||||
|
||||
### Primary Build Commands
|
||||
|
||||
- `cargo build --release` - Build the main RustFS binary
|
||||
- `./build-rustfs.sh` - Recommended build script that handles console resources and cross-platform compilation
|
||||
- `./build-rustfs.sh --dev` - Development build with debug symbols
|
||||
- `make build` or `just build` - Use Make/Just for standardized builds
|
||||
|
||||
### Platform-Specific Builds
|
||||
|
||||
- `./build-rustfs.sh --platform x86_64-unknown-linux-musl` - Build for musl target
|
||||
- `./build-rustfs.sh --platform aarch64-unknown-linux-gnu` - Build for ARM64
|
||||
- `make build-musl` or `just build-musl` - Build musl variant
|
||||
- `make build-cross-all` - Build all supported architectures
|
||||
|
||||
### Testing Commands
|
||||
|
||||
- `cargo test --workspace --exclude e2e_test` - Run unit tests (excluding e2e tests)
|
||||
- `cargo nextest run --all --exclude e2e_test` - Use nextest if available (faster)
|
||||
- `cargo test --all --doc` - Run documentation tests
|
||||
- `make test` or `just test` - Run full test suite
|
||||
- `make pre-commit` - Run all quality checks (fmt, clippy, check, test)
|
||||
|
||||
### End-to-End Testing
|
||||
|
||||
- `cargo test --package e2e_test` - Run all e2e tests
|
||||
- `./scripts/run_e2e_tests.sh` - Run e2e tests via script
|
||||
- `./scripts/run_scanner_benchmarks.sh` - Run scanner performance benchmarks
|
||||
|
||||
### KMS-Specific Testing (with proxy bypass)
|
||||
|
||||
-
|
||||
`NO_PROXY=127.0.0.1,localhost HTTP_PROXY= HTTPS_PROXY= http_proxy= https_proxy= cargo test --package e2e_test test_local_kms_end_to_end -- --nocapture --test-threads=1` -
|
||||
Run complete KMS end-to-end test
|
||||
-
|
||||
`NO_PROXY=127.0.0.1,localhost HTTP_PROXY= HTTPS_PROXY= http_proxy= https_proxy= cargo test --package e2e_test kms:: -- --nocapture --test-threads=1` -
|
||||
Run all KMS tests
|
||||
- `cargo test --package e2e_test test_local_kms_key_isolation -- --nocapture --test-threads=1` - Test KMS key isolation
|
||||
- `cargo test --package e2e_test test_local_kms_large_file -- --nocapture --test-threads=1` - Test KMS with large files
|
||||
|
||||
### Code Quality
|
||||
|
||||
- `cargo fmt --all` - Format code
|
||||
- `cargo clippy --all-targets --all-features -- -D warnings` - Lint code
|
||||
- `make pre-commit` or `just pre-commit` - Run all quality checks (fmt, clippy, check, test)
|
||||
|
||||
### Quick Development Commands
|
||||
|
||||
- `make help` or `just help` - Show all available commands with descriptions
|
||||
- `make help-build` - Show detailed build options and cross-compilation help
|
||||
- `make help-docker` - Show comprehensive Docker build and deployment options
|
||||
- `./scripts/dev_deploy.sh <IP>` - Deploy development build to remote server
|
||||
- `./scripts/run.sh` - Start local development server
|
||||
- `./scripts/probe.sh` - Health check and connectivity testing
|
||||
|
||||
### Docker Build Commands
|
||||
|
||||
- `make docker-buildx` - Build multi-architecture production images
|
||||
- `make docker-dev-local` - Build development image for local use
|
||||
- `./docker-buildx.sh --push` - Build and push production images
|
||||
|
||||
## Architecture Overview
|
||||
|
||||
### Core Components
|
||||
|
||||
**Main Binary (`rustfs/`):**
|
||||
|
||||
- Entry point at `rustfs/src/main.rs`
|
||||
- Core modules: admin, auth, config, server, storage, license management, profiling
|
||||
- HTTP server with S3-compatible APIs
|
||||
- Service state management and graceful shutdown
|
||||
- Parallel service initialization with DNS resolver, bucket metadata, and IAM
|
||||
|
||||
**Key Crates (`crates/`):**
|
||||
|
||||
- `ecstore` - Erasure coding storage implementation (core storage layer)
|
||||
- `iam` - Identity and Access Management
|
||||
- `kms` - Key Management Service for encryption and key handling
|
||||
- `madmin` - Management dashboard and admin API interface
|
||||
- `s3select-api` & `s3select-query` - S3 Select API and query engine
|
||||
- `config` - Configuration management with notify features
|
||||
- `crypto` - Cryptography and security features
|
||||
- `lock` - Distributed locking implementation
|
||||
- `filemeta` - File metadata management
|
||||
- `rio` - Rust I/O utilities and abstractions
|
||||
- `common` - Shared utilities and data structures
|
||||
- `protos` - Protocol buffer definitions
|
||||
- `audit-logger` - Audit logging for file operations
|
||||
- `notify` - Event notification system
|
||||
- `obs` - Observability utilities
|
||||
- `workers` - Worker thread pools and task scheduling
|
||||
- `appauth` - Application authentication and authorization
|
||||
- `ahm` - Asynchronous Hash Map for concurrent data structures
|
||||
- `mcp` - MCP server for S3 operations
|
||||
- `signer` - Client request signing utilities
|
||||
- `checksums` - Client checksum calculation utilities
|
||||
- `utils` - General utility functions and helpers
|
||||
- `zip` - ZIP file handling and compression
|
||||
- `targets` - Target-specific configurations and utilities
|
||||
|
||||
### Build System
|
||||
|
||||
- Cargo workspace with 25+ crates (including new KMS functionality)
|
||||
- Custom `build-rustfs.sh` script for advanced build options
|
||||
- Multi-architecture Docker builds via `docker-buildx.sh`
|
||||
- Both Make and Just task runners supported with comprehensive help
|
||||
- Cross-compilation support for multiple Linux targets
|
||||
- Automated CI/CD with GitHub Actions for testing, building, and Docker publishing
|
||||
- Performance benchmarking and audit workflows
|
||||
|
||||
### Key Dependencies
|
||||
|
||||
- `axum` - HTTP framework for S3 API server
|
||||
- `tokio` - Async runtime
|
||||
- `s3s` - S3 protocol implementation library
|
||||
- `datafusion` - For S3 Select query processing
|
||||
- `hyper`/`hyper-util` - HTTP client/server utilities
|
||||
- `rustls` - TLS implementation
|
||||
- `serde`/`serde_json` - Serialization
|
||||
- `tracing` - Structured logging and observability
|
||||
- `pprof` - Performance profiling with flamegraph support
|
||||
- `tikv-jemallocator` - Memory allocator for Linux GNU builds
|
||||
|
||||
### Development Workflow
|
||||
|
||||
- Console resources are embedded during build via `rust-embed`
|
||||
- Protocol buffers generated via custom `gproto` binary
|
||||
- E2E tests in separate crate (`e2e_test`) with comprehensive KMS testing
|
||||
- Shadow build for version/metadata embedding
|
||||
- Support for both GNU and musl libc targets
|
||||
- Development scripts in `scripts/` directory for common tasks
|
||||
- Git hooks setup available via `make setup-hooks` or `just setup-hooks`
|
||||
|
||||
### Performance & Observability
|
||||
|
||||
- Performance profiling available with `pprof` integration (disabled on Windows)
|
||||
- Profiling enabled via environment variables in production
|
||||
- Built-in observability with OpenTelemetry integration
|
||||
- Background services (scanner, heal) can be controlled via environment variables:
|
||||
- `RUSTFS_ENABLE_SCANNER` (default: true)
|
||||
- `RUSTFS_ENABLE_HEAL` (default: true)
|
||||
|
||||
### Service Architecture
|
||||
|
||||
- Service state management with graceful shutdown handling
|
||||
- Parallel initialization of core systems (DNS, bucket metadata, IAM)
|
||||
- Event notification system with MQTT and webhook support
|
||||
- Auto-heal and data scanner for storage integrity
|
||||
- Jemalloc allocator for Linux GNU targets for better performance
|
||||
|
||||
## Environment Variables
|
||||
|
||||
- `RUSTFS_ENABLE_SCANNER` - Enable/disable background data scanner (default: true)
|
||||
- `RUSTFS_ENABLE_HEAL` - Enable/disable auto-heal functionality (default: true)
|
||||
- Various profiling and observability controls
|
||||
- Build-time variables for Docker builds (RELEASE, REGISTRY, etc.)
|
||||
- Test environment configurations in `scripts/dev_rustfs.env`
|
||||
|
||||
### KMS Environment Variables
|
||||
|
||||
- `NO_PROXY=127.0.0.1,localhost` - Required for KMS E2E tests to bypass proxy
|
||||
- `HTTP_PROXY=` `HTTPS_PROXY=` `http_proxy=` `https_proxy=` - Clear proxy settings for local KMS testing
|
||||
|
||||
## KMS (Key Management Service) Architecture
|
||||
|
||||
### KMS Implementation Status
|
||||
|
||||
- **Full KMS Integration:** Complete implementation with Local and Vault backends
|
||||
- **Automatic Configuration:** KMS auto-configures on startup with `--kms-enable` flag
|
||||
- **Encryption Support:** Full S3-compatible server-side encryption (SSE-S3, SSE-KMS, SSE-C)
|
||||
- **Admin API:** Complete KMS management via HTTP admin endpoints
|
||||
- **Production Ready:** Comprehensive testing including large files and key isolation
|
||||
|
||||
### KMS Configuration
|
||||
|
||||
- **Local Backend:** `--kms-backend local --kms-key-dir <path> --kms-default-key-id <id>`
|
||||
- **Vault Backend:** `--kms-backend vault --kms-vault-endpoint <url> --kms-vault-key-name <name>`
|
||||
- **Auto-startup:** KMS automatically initializes when `--kms-enable` is provided
|
||||
- **Manual Configuration:** Also supports dynamic configuration via admin API
|
||||
|
||||
### S3 Encryption Support
|
||||
|
||||
- **SSE-S3:** Server-side encryption with S3-managed keys (`ServerSideEncryption: AES256`)
|
||||
- **SSE-KMS:** Server-side encryption with KMS-managed keys (`ServerSideEncryption: aws:kms`)
|
||||
- **SSE-C:** Server-side encryption with customer-provided keys
|
||||
- **Response Headers:** All encryption types return correct `server_side_encryption` headers in PUT/GET responses
|
||||
|
||||
### KMS Testing Architecture
|
||||
|
||||
- **Comprehensive E2E Tests:** Located in `crates/e2e_test/src/kms/`
|
||||
- **Test Environments:** Automated test environment setup with temporary directories
|
||||
- **Encryption Coverage:** Tests all three encryption types (SSE-S3, SSE-KMS, SSE-C)
|
||||
- **API Coverage:** Tests all KMS admin APIs (CreateKey, DescribeKey, ListKeys, etc.)
|
||||
- **Edge Cases:** Key isolation, large file handling, error scenarios
|
||||
|
||||
### Key Files for KMS
|
||||
|
||||
- `crates/kms/` - Core KMS implementation with Local/Vault backends
|
||||
- `rustfs/src/main.rs` - KMS auto-initialization in `init_kms_system()`
|
||||
- `rustfs/src/storage/ecfs.rs` - SSE encryption/decryption in PUT/GET operations
|
||||
- `rustfs/src/admin/handlers/kms*.rs` - KMS admin endpoints
|
||||
- `crates/e2e_test/src/kms/` - Comprehensive KMS test suite
|
||||
- `crates/rio/src/encrypt_reader.rs` - Streaming encryption for large files
|
||||
|
||||
## Code Style and Safety Requirements
|
||||
|
||||
- **Language Requirements:**
|
||||
- Communicate with me in Chinese, but **only English can be used in code files**
|
||||
- Code comments, function names, variable names, and all text in source files must be in English only
|
||||
- No Chinese characters, emojis, or non-ASCII characters are allowed in any source code files
|
||||
- This includes comments, strings, documentation, and any other text within code files
|
||||
- **Safety-Critical Rules:**
|
||||
- `unsafe_code = "deny"` enforced at workspace level
|
||||
- Never use `unwrap()`, `expect()`, or panic-inducing code except in tests
|
||||
- Avoid blocking I/O operations in async contexts
|
||||
- Use proper error handling with `Result<T, E>` and `Option<T>`
|
||||
- Follow Rust's ownership and borrowing rules strictly
|
||||
- **Performance Guidelines:**
|
||||
- Use `cargo clippy --all-targets --all-features -- -D warnings` to catch issues
|
||||
- Prefer `anyhow` for error handling in applications, `thiserror` for libraries
|
||||
- Use appropriate async runtimes and avoid blocking calls
|
||||
- **Testing Standards:**
|
||||
- All new features must include comprehensive tests
|
||||
- Use `#[cfg(test)]` for test-only code that may use panic macros
|
||||
- E2E tests should cover KMS integration scenarios
|
||||
|
||||
## Common Development Tasks
|
||||
|
||||
### Running KMS Tests Locally
|
||||
|
||||
1. **Clear proxy settings:** KMS tests require direct localhost connections
|
||||
2. **Use serial execution:** `--test-threads=1` prevents port conflicts
|
||||
3. **Enable output:** `--nocapture` shows detailed test logs
|
||||
4. **Full command:**
|
||||
`NO_PROXY=127.0.0.1,localhost HTTP_PROXY= HTTPS_PROXY= http_proxy= https_proxy= cargo test --package e2e_test test_local_kms_end_to_end -- --nocapture --test-threads=1`
|
||||
|
||||
### KMS Development Workflow
|
||||
|
||||
1. **Code changes:** Modify KMS-related code in `crates/kms/` or `rustfs/src/`
|
||||
2. **Compile:** Always run `cargo build` after changes
|
||||
3. **Test specific functionality:** Use targeted test commands for faster iteration
|
||||
4. **Full validation:** Run complete end-to-end tests before commits
|
||||
|
||||
### Debugging KMS Issues
|
||||
|
||||
- **Server startup:** Check that KMS auto-initializes with debug logs
|
||||
- **Encryption failures:** Verify SSE headers are correctly set in both PUT and GET responses
|
||||
- **Test failures:** Use `--nocapture` to see detailed error messages
|
||||
- **Key management:** Test admin API endpoints with proper authentication
|
||||
|
||||
## Important Reminders
|
||||
|
||||
- **Always compile after code changes:** Use `cargo build` to catch errors early
|
||||
- **Don't bypass tests:** All functionality must be properly tested, not worked around
|
||||
- **Use proper error handling:** Never use `unwrap()` or `expect()` in production code (except tests)
|
||||
- **Follow S3 compatibility:** Ensure all encryption types return correct HTTP response headers
|
||||
|
||||
# important-instruction-reminders
|
||||
|
||||
Do what has been asked; nothing more, nothing less.
|
||||
NEVER create files unless they're absolutely necessary for achieving your goal.
|
||||
ALWAYS prefer editing an existing file to creating a new one.
|
||||
NEVER proactively create documentation files (*.md) or README files. Only create documentation files if explicitly
|
||||
requested by the User.
|
||||
7672
Cargo.lock
generated
348
Cargo.toml
@@ -15,8 +15,8 @@
|
||||
[workspace]
|
||||
members = [
|
||||
"rustfs", # Core file system implementation
|
||||
"cli/rustfs-gui", # Graphical user interface client
|
||||
"crates/appauth", # Application authentication and authorization
|
||||
"crates/audit", # Audit target management system with multi-target fan-out
|
||||
"crates/common", # Shared utilities and data structures
|
||||
"crates/config", # Configuration management
|
||||
"crates/crypto", # Cryptography and security features
|
||||
@@ -28,14 +28,20 @@ members = [
|
||||
"crates/madmin", # Management dashboard and admin API interface
|
||||
"crates/notify", # Notification system for events
|
||||
"crates/obs", # Observability utilities
|
||||
"crates/policy", # Policy management
|
||||
"crates/protos", # Protocol buffer definitions
|
||||
"crates/rio", # Rust I/O utilities and abstractions
|
||||
"crates/targets", # Target-specific configurations and utilities
|
||||
"crates/s3select-api", # S3 Select API interface
|
||||
"crates/s3select-query", # S3 Select query engine
|
||||
"crates/signer", # client signer
|
||||
"crates/checksums", # client checksums
|
||||
"crates/utils", # Utility functions and helpers
|
||||
"crates/workers", # Worker thread pools and task scheduling
|
||||
"crates/zip", # ZIP file handling and compression
|
||||
"crates/ahm", # Asynchronous Hash Map for concurrent data structures
|
||||
"crates/mcp", # MCP server for S3 operations
|
||||
"crates/kms", # Key Management Service
|
||||
]
|
||||
resolver = "2"
|
||||
|
||||
@@ -56,221 +62,225 @@ unsafe_code = "deny"
|
||||
[workspace.lints.clippy]
|
||||
all = "warn"
|
||||
|
||||
[patch.crates-io]
|
||||
rustfs-utils = { path = "crates/utils" }
|
||||
rustfs-filemeta = { path = "crates/filemeta" }
|
||||
rustfs-rio = { path = "crates/rio" }
|
||||
|
||||
[workspace.dependencies]
|
||||
rustfs-s3select-api = { path = "crates/s3select-api", version = "0.0.5" }
|
||||
# RustFS Internal Crates
|
||||
rustfs = { path = "./rustfs", version = "0.0.5" }
|
||||
rustfs-ahm = { path = "crates/ahm", version = "0.0.5" }
|
||||
rustfs-appauth = { path = "crates/appauth", version = "0.0.5" }
|
||||
rustfs-audit = { path = "crates/audit", version = "0.0.5" }
|
||||
rustfs-checksums = { path = "crates/checksums", version = "0.0.5" }
|
||||
rustfs-common = { path = "crates/common", version = "0.0.5" }
|
||||
rustfs-config = { path = "./crates/config", version = "0.0.5" }
|
||||
rustfs-crypto = { path = "crates/crypto", version = "0.0.5" }
|
||||
rustfs-ecstore = { path = "crates/ecstore", version = "0.0.5" }
|
||||
rustfs-filemeta = { path = "crates/filemeta", version = "0.0.5" }
|
||||
rustfs-iam = { path = "crates/iam", version = "0.0.5" }
|
||||
rustfs-kms = { path = "crates/kms", version = "0.0.5" }
|
||||
rustfs-lock = { path = "crates/lock", version = "0.0.5" }
|
||||
rustfs-madmin = { path = "crates/madmin", version = "0.0.5" }
|
||||
rustfs-mcp = { path = "crates/mcp", version = "0.0.5" }
|
||||
rustfs-notify = { path = "crates/notify", version = "0.0.5" }
|
||||
rustfs-obs = { path = "crates/obs", version = "0.0.5" }
|
||||
rustfs-policy = { path = "crates/policy", version = "0.0.5" }
|
||||
rustfs-protos = { path = "crates/protos", version = "0.0.5" }
|
||||
rustfs-s3select-query = { path = "crates/s3select-query", version = "0.0.5" }
|
||||
rustfs = { path = "./rustfs", version = "0.0.5" }
|
||||
rustfs-zip = { path = "./crates/zip", version = "0.0.5" }
|
||||
rustfs-config = { path = "./crates/config", version = "0.0.5" }
|
||||
rustfs-obs = { path = "crates/obs", version = "0.0.5" }
|
||||
rustfs-notify = { path = "crates/notify", version = "0.0.5" }
|
||||
rustfs-utils = { path = "crates/utils", version = "0.0.5" }
|
||||
rustfs-rio = { path = "crates/rio", version = "0.0.5" }
|
||||
rustfs-filemeta = { path = "crates/filemeta", version = "0.0.5" }
|
||||
rustfs-s3select-api = { path = "crates/s3select-api", version = "0.0.5" }
|
||||
rustfs-s3select-query = { path = "crates/s3select-query", version = "0.0.5" }
|
||||
rustfs-signer = { path = "crates/signer", version = "0.0.5" }
|
||||
rustfs-targets = { path = "crates/targets", version = "0.0.5" }
|
||||
rustfs-utils = { path = "crates/utils", version = "0.0.5" }
|
||||
rustfs-workers = { path = "crates/workers", version = "0.0.5" }
|
||||
aes-gcm = { version = "0.10.3", features = ["std"] }
|
||||
arc-swap = "1.7.1"
|
||||
argon2 = { version = "0.5.3", features = ["std"] }
|
||||
atoi = "2.0.0"
|
||||
async-channel = "2.4.0"
|
||||
rustfs-zip = { path = "./crates/zip", version = "0.0.5" }
|
||||
|
||||
# Async Runtime and Networking
|
||||
async-channel = "2.5.0"
|
||||
async-compression = { version = "0.4.19" }
|
||||
async-recursion = "1.1.1"
|
||||
async-trait = "0.1.88"
|
||||
async-compression = { version = "0.4.0" }
|
||||
atomic_enum = "0.3.0"
|
||||
aws-sdk-s3 = "1.96.0"
|
||||
axum = "0.8.4"
|
||||
axum-extra = "0.10.1"
|
||||
axum-server = { version = "0.7.2", features = ["tls-rustls"] }
|
||||
base64-simd = "0.8.0"
|
||||
base64 = "0.22.1"
|
||||
brotli = "8.0.1"
|
||||
bytes = { version = "1.10.1", features = ["serde"] }
|
||||
bytesize = "2.0.1"
|
||||
byteorder = "1.5.0"
|
||||
cfg-if = "1.0.1"
|
||||
chacha20poly1305 = { version = "0.10.1" }
|
||||
chrono = { version = "0.4.41", features = ["serde"] }
|
||||
clap = { version = "4.5.40", features = ["derive", "env"] }
|
||||
const-str = { version = "0.6.2", features = ["std", "proc"] }
|
||||
crc32fast = "1.4.2"
|
||||
criterion = { version = "0.5", features = ["html_reports"] }
|
||||
dashmap = "6.1.0"
|
||||
datafusion = "46.0.1"
|
||||
derive_builder = "0.20.2"
|
||||
dioxus = { version = "0.6.3", features = ["router"] }
|
||||
dirs = "6.0.0"
|
||||
enumset = "1.1.6"
|
||||
flatbuffers = "25.2.10"
|
||||
flate2 = "1.1.2"
|
||||
flexi_logger = { version = "0.31.2", features = ["trc", "dont_minimize_extra_stacks"] }
|
||||
form_urlencoded = "1.2.1"
|
||||
async-trait = "0.1.89"
|
||||
axum = "0.8.6"
|
||||
axum-extra = "0.12.1"
|
||||
axum-server = { version = "0.7.2", features = ["tls-rustls-no-provider"], default-features = false }
|
||||
futures = "0.3.31"
|
||||
futures-core = "0.3.31"
|
||||
futures-util = "0.3.31"
|
||||
glob = "0.3.2"
|
||||
hex = "0.4.3"
|
||||
hex-simd = "0.8.0"
|
||||
highway = { version = "1.3.0" }
|
||||
hmac = "0.12.1"
|
||||
hyper = "1.6.0"
|
||||
hyper-util = { version = "0.1.14", features = [
|
||||
"tokio",
|
||||
"server-auto",
|
||||
"server-graceful",
|
||||
] }
|
||||
hyper-rustls = "0.27.7"
|
||||
hyper = { version = "1.7.0", features = ["http2", "http1", "server"] }
|
||||
hyper-rustls = { version = "0.27.7", default-features = false, features = ["native-tokio", "http1", "tls12", "logging", "http2", "ring", "webpki-roots"] }
|
||||
hyper-util = { version = "0.1.17", features = ["tokio", "server-auto", "server-graceful"] }
|
||||
http = "1.3.1"
|
||||
http-body = "1.0.1"
|
||||
humantime = "2.2.0"
|
||||
reqwest = { version = "0.12.24", default-features = false, features = ["rustls-tls-webpki-roots", "charset", "http2", "system-proxy", "stream", "json", "blocking"] }
|
||||
socket2 = "0.6.1"
|
||||
tokio = { version = "1.48.0", features = ["fs", "rt-multi-thread"] }
|
||||
tokio-rustls = { version = "0.26.4", default-features = false, features = ["logging", "tls12", "ring"] }
|
||||
tokio-stream = { version = "0.1.17" }
|
||||
tokio-test = "0.4.4"
|
||||
tokio-util = { version = "0.7.17", features = ["io", "compat"] }
|
||||
tonic = { version = "0.14.2", features = ["gzip"] }
|
||||
tonic-prost = { version = "0.14.2" }
|
||||
tonic-prost-build = { version = "0.14.2" }
|
||||
tower = { version = "0.5.2", features = ["timeout"] }
|
||||
tower-http = { version = "0.6.6", features = ["cors"] }
|
||||
|
||||
# Serialization and Data Formats
|
||||
bytes = { version = "1.10.1", features = ["serde"] }
|
||||
bytesize = "2.1.0"
|
||||
byteorder = "1.5.0"
|
||||
flatbuffers = "25.9.23"
|
||||
form_urlencoded = "1.2.2"
|
||||
prost = "0.14.1"
|
||||
quick-xml = "0.38.3"
|
||||
rmcp = { version = "0.8.4" }
|
||||
rmp = { version = "0.8.14" }
|
||||
rmp-serde = { version = "1.3.0" }
|
||||
serde = { version = "1.0.228", features = ["derive"] }
|
||||
serde_json = { version = "1.0.145", features = ["raw_value"] }
|
||||
serde_urlencoded = "0.7.1"
|
||||
schemars = "1.0.5"
|
||||
|
||||
# Cryptography and Security
|
||||
aes-gcm = { version = "0.10.3", features = ["std"] }
|
||||
argon2 = { version = "0.5.3", features = ["std"] }
|
||||
blake3 = { version = "1.8.2" }
|
||||
chacha20poly1305 = { version = "0.10.1" }
|
||||
crc-fast = "1.3.0"
|
||||
crc32c = "0.6.8"
|
||||
crc32fast = "1.5.0"
|
||||
crc64fast-nvme = "1.2.0"
|
||||
hmac = "0.12.1"
|
||||
jsonwebtoken = { version = "10.1.0", features = ["rust_crypto"] }
|
||||
pbkdf2 = "0.12.2"
|
||||
rsa = { version = "0.9.8" }
|
||||
rustls = { version = "0.23.35", features = ["ring", "logging", "std", "tls12"], default-features = false }
|
||||
rustls-pemfile = "2.2.0"
|
||||
rustls-pki-types = "1.13.0"
|
||||
sha1 = "0.10.6"
|
||||
sha2 = "0.10.9"
|
||||
zeroize = { version = "1.8.2", features = ["derive"] }
|
||||
|
||||
# Time and Date
|
||||
chrono = { version = "0.4.42", features = ["serde"] }
|
||||
humantime = "2.3.0"
|
||||
time = { version = "0.3.44", features = ["std", "parsing", "formatting", "macros", "serde"] }
|
||||
|
||||
# Utilities and Tools
|
||||
anyhow = "1.0.100"
|
||||
arc-swap = "1.7.1"
|
||||
astral-tokio-tar = "0.5.6"
|
||||
atoi = "2.0.0"
|
||||
atomic_enum = "0.3.0"
|
||||
aws-config = { version = "1.8.10" }
|
||||
aws-credential-types = { version = "1.2.8" }
|
||||
aws-sdk-s3 = { version = "1.110.0", default-features = false, features = ["sigv4a", "rustls", "rt-tokio"] }
|
||||
aws-smithy-types = { version = "1.3.4" }
|
||||
base64 = "0.22.1"
|
||||
base64-simd = "0.8.0"
|
||||
brotli = "8.0.2"
|
||||
cfg-if = "1.0.4"
|
||||
clap = { version = "4.5.51", features = ["derive", "env"] }
|
||||
const-str = { version = "0.7.0", features = ["std", "proc"] }
|
||||
convert_case = "0.8.0"
|
||||
criterion = { version = "0.7", features = ["html_reports"] }
|
||||
crossbeam-queue = "0.3.12"
|
||||
datafusion = "50.3.0"
|
||||
derive_builder = "0.20.2"
|
||||
enumset = "1.1.10"
|
||||
flate2 = "1.1.5"
|
||||
flexi_logger = { version = "0.31.7", features = ["trc", "dont_minimize_extra_stacks", "compress", "kv"] }
|
||||
glob = "0.3.3"
|
||||
google-cloud-storage = "1.2.0"
|
||||
google-cloud-auth = "1.1.0"
|
||||
hashbrown = { version = "0.16.0", features = ["serde", "rayon"] }
|
||||
heed = { version = "0.22.0" }
|
||||
hex-simd = "0.8.0"
|
||||
highway = { version = "1.3.0" }
|
||||
ipnetwork = { version = "0.21.1", features = ["serde"] }
|
||||
jsonwebtoken = "9.3.1"
|
||||
keyring = { version = "3.6.2", features = [
|
||||
"apple-native",
|
||||
"windows-native",
|
||||
"sync-secret-service",
|
||||
] }
|
||||
lazy_static = "1.5.0"
|
||||
libc = "0.2.177"
|
||||
libsystemd = { version = "0.7.2" }
|
||||
local-ip-address = "0.6.5"
|
||||
lz4 = "1.28.1"
|
||||
matchit = "0.8.4"
|
||||
matchit = "0.9.0"
|
||||
md-5 = "0.10.6"
|
||||
md5 = "0.8.0"
|
||||
metrics = "0.24.2"
|
||||
metrics-exporter-opentelemetry = "0.1.2"
|
||||
mime_guess = "2.0.5"
|
||||
moka = { version = "0.12.11", features = ["future"] }
|
||||
netif = "0.1.6"
|
||||
nix = { version = "0.30.1", features = ["fs"] }
|
||||
nu-ansi-term = "0.50.1"
|
||||
nu-ansi-term = "0.50.3"
|
||||
num_cpus = { version = "1.17.0" }
|
||||
nvml-wrapper = "0.11.0"
|
||||
object_store = "0.11.2"
|
||||
object_store = "0.12.4"
|
||||
once_cell = "1.21.3"
|
||||
opentelemetry = { version = "0.30.0" }
|
||||
opentelemetry-appender-tracing = { version = "0.30.1", features = [
|
||||
"experimental_use_tracing_span_context",
|
||||
"experimental_metadata_attributes",
|
||||
"spec_unstable_logs_enabled"
|
||||
] }
|
||||
opentelemetry_sdk = { version = "0.30.0" }
|
||||
opentelemetry-stdout = { version = "0.30.0" }
|
||||
opentelemetry-otlp = { version = "0.30.0", default-features = false, features = [
|
||||
"grpc-tonic", "gzip-tonic", "trace", "metrics", "logs", "internal-logs"
|
||||
] }
|
||||
opentelemetry-semantic-conventions = { version = "0.30.0", features = [
|
||||
"semconv_experimental",
|
||||
] }
|
||||
parking_lot = "0.12.4"
|
||||
parking_lot = "0.12.5"
|
||||
path-absolutize = "3.1.1"
|
||||
path-clean = "1.0.1"
|
||||
blake3 = { version = "1.8.2" }
|
||||
pbkdf2 = "0.12.2"
|
||||
percent-encoding = "2.3.1"
|
||||
pin-project-lite = "0.2.16"
|
||||
prost = "0.13.5"
|
||||
quick-xml = "0.37.5"
|
||||
rand = "0.9.1"
|
||||
rdkafka = { version = "0.37.0", features = ["tokio"] }
|
||||
reed-solomon-simd = { version = "3.0.1" }
|
||||
regex = { version = "1.11.1" }
|
||||
reqwest = { version = "0.12.22", default-features = false, features = [
|
||||
"rustls-tls",
|
||||
"charset",
|
||||
"http2",
|
||||
"system-proxy",
|
||||
"stream",
|
||||
"json",
|
||||
"blocking",
|
||||
] }
|
||||
rfd = { version = "0.15.3", default-features = false, features = [
|
||||
"xdg-portal",
|
||||
"tokio",
|
||||
] }
|
||||
rmp = "0.8.14"
|
||||
rmp-serde = "1.3.0"
|
||||
rsa = "0.9.8"
|
||||
rumqttc = { version = "0.24" }
|
||||
rust-embed = { version = "8.7.2" }
|
||||
rust-i18n = { version = "3.1.5" }
|
||||
rustfs-rsc = "2025.506.1"
|
||||
rustls = { version = "0.23.28" }
|
||||
rustls-pki-types = "1.12.0"
|
||||
rustls-pemfile = "2.2.0"
|
||||
s3s = { version = "0.12.0-minio-preview.1" }
|
||||
shadow-rs = { version = "1.2.0", default-features = false }
|
||||
serde = { version = "1.0.219", features = ["derive"] }
|
||||
serde_json = { version = "1.0.140", features = ["raw_value"] }
|
||||
serde-xml-rs = "0.8.1"
|
||||
serde_urlencoded = "0.7.1"
|
||||
sha1 = "0.10.6"
|
||||
sha2 = "0.10.9"
|
||||
pretty_assertions = "1.4.1"
|
||||
rand = "0.9.2"
|
||||
rayon = "1.11.0"
|
||||
reed-solomon-simd = { version = "3.1.0" }
|
||||
regex = { version = "1.12.2" }
|
||||
rumqttc = { version = "0.25.0" }
|
||||
rust-embed = { version = "8.9.0" }
|
||||
rustc-hash = { version = "2.1.1" }
|
||||
s3s = { version = "0.12.0-rc.3", features = ["minio"] }
|
||||
scopeguard = "1.2.0"
|
||||
serial_test = "3.2.0"
|
||||
shadow-rs = { version = "1.4.0", default-features = false }
|
||||
siphasher = "1.0.1"
|
||||
smallvec = { version = "1.15.1", features = ["serde"] }
|
||||
snafu = "0.8.6"
|
||||
smartstring = "1.0.1"
|
||||
snafu = "0.8.9"
|
||||
snap = "1.1.1"
|
||||
socket2 = "0.5.10"
|
||||
strum = { version = "0.27.1", features = ["derive"] }
|
||||
sysinfo = "0.35.2"
|
||||
tempfile = "3.20.0"
|
||||
starshard = { version = "0.5.0", features = ["rayon", "async", "serde"] }
|
||||
strum = { version = "0.27.2", features = ["derive"] }
|
||||
sysctl = "0.7.1"
|
||||
sysinfo = "0.37.2"
|
||||
temp-env = "0.3.6"
|
||||
tempfile = "3.23.0"
|
||||
test-case = "3.3.1"
|
||||
thiserror = "2.0.12"
|
||||
time = { version = "0.3.41", features = [
|
||||
"std",
|
||||
"parsing",
|
||||
"formatting",
|
||||
"macros",
|
||||
"serde",
|
||||
] }
|
||||
tokio = { version = "1.46.1", features = ["fs", "rt-multi-thread"] }
|
||||
tokio-rustls = { version = "0.26.2", default-features = false }
|
||||
tokio-stream = { version = "0.1.17" }
|
||||
tokio-tar = "0.3.1"
|
||||
tokio-util = { version = "0.7.15", features = ["io", "compat"] }
|
||||
tonic = { version = "0.13.1", features = ["gzip"] }
|
||||
tonic-build = { version = "0.13.1" }
|
||||
tower = { version = "0.5.2", features = ["timeout"] }
|
||||
tower-http = { version = "0.6.6", features = ["cors"] }
|
||||
tracing = "0.1.41"
|
||||
tracing-core = "0.1.34"
|
||||
tracing-error = "0.2.1"
|
||||
tracing-subscriber = { version = "0.3.19", features = ["env-filter", "time"] }
|
||||
thiserror = "2.0.17"
|
||||
tracing = { version = "0.1.41" }
|
||||
tracing-appender = "0.2.3"
|
||||
tracing-opentelemetry = "0.31.0"
|
||||
tracing-error = "0.2.1"
|
||||
tracing-opentelemetry = "0.32.0"
|
||||
tracing-subscriber = { version = "0.3.20", features = ["env-filter", "time"] }
|
||||
transform-stream = "0.3.1"
|
||||
url = "2.5.4"
|
||||
url = "2.5.7"
|
||||
urlencoding = "2.1.3"
|
||||
uuid = { version = "1.17.0", features = [
|
||||
"v4",
|
||||
"fast-rng",
|
||||
"macro-diagnostics",
|
||||
] }
|
||||
wildmatch = { version = "2.4.0", features = ["serde"] }
|
||||
uuid = { version = "1.18.1", features = ["v4", "fast-rng", "macro-diagnostics"] }
|
||||
vaultrs = { version = "0.7.4" }
|
||||
walkdir = "2.5.0"
|
||||
wildmatch = { version = "2.5.0", features = ["serde"] }
|
||||
winapi = { version = "0.3.9" }
|
||||
xxhash-rust = { version = "0.8.15", features = ["xxh64", "xxh3"] }
|
||||
zip = "2.4.2"
|
||||
zip = "6.0.0"
|
||||
zstd = "0.13.3"
|
||||
|
||||
[profile.wasm-dev]
|
||||
inherits = "dev"
|
||||
opt-level = 1
|
||||
# Observability and Metrics
|
||||
opentelemetry = { version = "0.31.0" }
|
||||
opentelemetry-appender-tracing = { version = "0.31.1", features = ["experimental_use_tracing_span_context", "experimental_metadata_attributes", "spec_unstable_logs_enabled"] }
|
||||
opentelemetry-otlp = { version = "0.31.0", default-features = false, features = ["grpc-tonic", "gzip-tonic", "trace", "metrics", "logs", "internal-logs"] }
|
||||
opentelemetry_sdk = { version = "0.31.0" }
|
||||
opentelemetry-semantic-conventions = { version = "0.31.0", features = ["semconv_experimental"] }
|
||||
opentelemetry-stdout = { version = "0.31.0" }
|
||||
|
||||
[profile.server-dev]
|
||||
inherits = "dev"
|
||||
# Performance Analysis and Memory Profiling
|
||||
# Use tikv-jemallocator as memory allocator and enable performance analysis
|
||||
tikv-jemallocator = { version = "0.6", features = ["profiling", "stats", "unprefixed_malloc_on_supported_platforms", "background_threads"] }
|
||||
# Used to control and obtain statistics for jemalloc at runtime
|
||||
tikv-jemalloc-ctl = { version = "0.6", features = ["use_std", "stats", "profiling"] }
|
||||
# Used to generate pprof-compatible memory profiling data and support symbolization and flame graphs
|
||||
jemalloc_pprof = { version = "0.8.1", features = ["symbolize", "flamegraph"] }
|
||||
# Used to generate CPU performance analysis data and flame diagrams
|
||||
pprof = { version = "0.15.0", features = ["flamegraph", "protobuf-codec"] }
|
||||
mimalloc = "0.1"
|
||||
|
||||
[profile.android-dev]
|
||||
inherits = "dev"
|
||||
|
||||
[workspace.metadata.cargo-shear]
|
||||
ignored = ["rustfs", "rustfs-mcp", "tokio-test", "scopeguard"]
|
||||
|
||||
[profile.release]
|
||||
opt-level = 3
|
||||
|
||||
113
Dockerfile
@@ -1,50 +1,89 @@
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
FROM alpine:3.22 AS build
|
||||
|
||||
FROM alpine:3.18 AS builder
|
||||
ARG TARGETARCH
|
||||
ARG RELEASE=latest
|
||||
|
||||
RUN apk add -U --no-cache \
|
||||
ca-certificates \
|
||||
curl \
|
||||
bash \
|
||||
unzip
|
||||
RUN apk add --no-cache ca-certificates curl unzip
|
||||
WORKDIR /build
|
||||
|
||||
RUN set -eux; \
|
||||
case "$TARGETARCH" in \
|
||||
amd64) ARCH_SUBSTR="x86_64-musl" ;; \
|
||||
arm64) ARCH_SUBSTR="aarch64-musl" ;; \
|
||||
*) echo "Unsupported TARGETARCH=$TARGETARCH" >&2; exit 1 ;; \
|
||||
esac; \
|
||||
if [ "$RELEASE" = "latest" ]; then \
|
||||
TAG="$(curl -fsSL https://api.github.com/repos/rustfs/rustfs/releases \
|
||||
| grep -o '"tag_name": "[^"]*"' | cut -d'"' -f4 | head -n 1)"; \
|
||||
else \
|
||||
TAG="$RELEASE"; \
|
||||
fi; \
|
||||
echo "Using tag: $TAG (arch pattern: $ARCH_SUBSTR)"; \
|
||||
# Find download URL in assets list for this tag that contains arch substring and ends with .zip
|
||||
URL="$(curl -fsSL "https://api.github.com/repos/rustfs/rustfs/releases/tags/$TAG" \
|
||||
| grep -o "\"browser_download_url\": \"[^\"]*${ARCH_SUBSTR}[^\"]*\\.zip\"" \
|
||||
| cut -d'"' -f4 | head -n 1)"; \
|
||||
if [ -z "$URL" ]; then echo "Failed to locate release asset for $ARCH_SUBSTR at tag $TAG" >&2; exit 1; fi; \
|
||||
echo "Downloading: $URL"; \
|
||||
curl -fL "$URL" -o rustfs.zip; \
|
||||
unzip -q rustfs.zip -d /build; \
|
||||
# If binary is not in root directory, try to locate and move from zip to /build/rustfs
|
||||
if [ ! -x /build/rustfs ]; then \
|
||||
BIN_PATH="$(unzip -Z -1 rustfs.zip | grep -E '(^|/)rustfs$' | head -n 1 || true)"; \
|
||||
if [ -n "$BIN_PATH" ]; then \
|
||||
mkdir -p /build/.tmp && unzip -q rustfs.zip "$BIN_PATH" -d /build/.tmp && \
|
||||
mv "/build/.tmp/$BIN_PATH" /build/rustfs; \
|
||||
fi; \
|
||||
fi; \
|
||||
[ -x /build/rustfs ] || { echo "rustfs binary not found in asset" >&2; exit 1; }; \
|
||||
chmod +x /build/rustfs; \
|
||||
rm -rf rustfs.zip /build/.tmp || true
|
||||
|
||||
|
||||
RUN curl -Lo /tmp/rustfs.zip https://dl.rustfs.com/artifacts/rustfs/rustfs-x86_64-unknown-linux-musl.zip && \
|
||||
unzip -o /tmp/rustfs.zip -d /tmp && \
|
||||
mv /tmp/rustfs /rustfs && \
|
||||
chmod +x /rustfs && \
|
||||
rm -rf /tmp/*
|
||||
FROM alpine:3.22
|
||||
|
||||
FROM alpine:3.18
|
||||
ARG RELEASE=latest
|
||||
ARG BUILD_DATE
|
||||
ARG VCS_REF
|
||||
|
||||
RUN apk add -U --no-cache \
|
||||
ca-certificates \
|
||||
bash
|
||||
LABEL name="RustFS" \
|
||||
vendor="RustFS Team" \
|
||||
maintainer="RustFS Team <dev@rustfs.com>" \
|
||||
version="v${RELEASE#v}" \
|
||||
release="${RELEASE}" \
|
||||
build-date="${BUILD_DATE}" \
|
||||
vcs-ref="${VCS_REF}" \
|
||||
summary="High-performance distributed object storage system compatible with S3 API" \
|
||||
description="RustFS is a distributed object storage system written in Rust, supporting erasure coding, multi-tenant management, and observability." \
|
||||
url="https://rustfs.com" \
|
||||
license="Apache-2.0"
|
||||
|
||||
COPY --from=builder /rustfs /usr/local/bin/rustfs
|
||||
RUN apk add --no-cache ca-certificates coreutils curl
|
||||
|
||||
ENV RUSTFS_ACCESS_KEY=rustfsadmin \
|
||||
RUSTFS_SECRET_KEY=rustfsadmin \
|
||||
RUSTFS_ADDRESS=":9000" \
|
||||
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=build /build/rustfs /usr/bin/rustfs
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
|
||||
RUN chmod +x /usr/bin/rustfs /entrypoint.sh && \
|
||||
mkdir -p /data /logs && \
|
||||
chmod 0750 /data /logs
|
||||
|
||||
ENV RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_CONSOLE_ADDRESS=":9001" \
|
||||
RUSTFS_CONSOLE_ENABLE=true \
|
||||
RUST_LOG=warn
|
||||
RUSTFS_ACCESS_KEY="rustfsadmin" \
|
||||
RUSTFS_SECRET_KEY="rustfsadmin" \
|
||||
RUSTFS_CONSOLE_ENABLE="true" \
|
||||
RUSTFS_EXTERNAL_ADDRESS="" \
|
||||
RUSTFS_CORS_ALLOWED_ORIGINS="*" \
|
||||
RUSTFS_CONSOLE_CORS_ALLOWED_ORIGINS="*" \
|
||||
RUSTFS_VOLUMES="/data" \
|
||||
RUST_LOG="warn" \
|
||||
RUSTFS_OBS_LOG_DIRECTORY="/logs" \
|
||||
RUSTFS_SINKS_FILE_PATH="/logs"
|
||||
|
||||
EXPOSE 9000 9001
|
||||
VOLUME ["/data", "/logs"]
|
||||
|
||||
RUN mkdir -p /data
|
||||
VOLUME /data
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
|
||||
CMD ["rustfs", "/data"]
|
||||
CMD ["rustfs"]
|
||||
|
||||
@@ -1,121 +0,0 @@
|
||||
# Multi-stage Dockerfile for RustFS
|
||||
# Supports cross-compilation for amd64 and arm64 architectures
|
||||
ARG TARGETPLATFORM
|
||||
ARG BUILDPLATFORM
|
||||
|
||||
# Build stage
|
||||
FROM --platform=$BUILDPLATFORM rust:1.85-bookworm AS builder
|
||||
|
||||
# Install required build dependencies
|
||||
RUN apt-get update && apt-get install -y \
|
||||
wget \
|
||||
git \
|
||||
curl \
|
||||
unzip \
|
||||
gcc \
|
||||
pkg-config \
|
||||
libssl-dev \
|
||||
lld \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install cross-compilation tools for ARM64
|
||||
RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
|
||||
apt-get update && \
|
||||
apt-get install -y gcc-aarch64-linux-gnu && \
|
||||
rm -rf /var/lib/apt/lists/*; \
|
||||
fi
|
||||
|
||||
# Install protoc
|
||||
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip \
|
||||
&& unzip protoc-31.1-linux-x86_64.zip -d protoc3 \
|
||||
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc \
|
||||
&& mv protoc3/include/* /usr/local/include/ && rm -rf protoc-31.1-linux-x86_64.zip protoc3
|
||||
|
||||
# Install flatc
|
||||
RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip \
|
||||
&& unzip Linux.flatc.binary.g++-13.zip \
|
||||
&& mv flatc /usr/local/bin/ && chmod +x /usr/local/bin/flatc && rm -rf Linux.flatc.binary.g++-13.zip
|
||||
|
||||
# Set up Rust targets based on platform
|
||||
RUN case "$TARGETPLATFORM" in \
|
||||
"linux/amd64") rustup target add x86_64-unknown-linux-gnu ;; \
|
||||
"linux/arm64") rustup target add aarch64-unknown-linux-gnu ;; \
|
||||
*) echo "Unsupported platform: $TARGETPLATFORM" && exit 1 ;; \
|
||||
esac
|
||||
|
||||
# Set up environment for cross-compilation
|
||||
ENV CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=aarch64-linux-gnu-gcc
|
||||
ENV CC_aarch64_unknown_linux_gnu=aarch64-linux-gnu-gcc
|
||||
ENV CXX_aarch64_unknown_linux_gnu=aarch64-linux-gnu-g++
|
||||
|
||||
WORKDIR /usr/src/rustfs
|
||||
|
||||
# Copy Cargo files for dependency caching
|
||||
COPY Cargo.toml Cargo.lock ./
|
||||
COPY */Cargo.toml ./*/
|
||||
|
||||
# Create dummy main.rs files for dependency compilation
|
||||
RUN find . -name "Cargo.toml" -not -path "./Cargo.toml" | \
|
||||
xargs -I {} dirname {} | \
|
||||
xargs -I {} sh -c 'mkdir -p {}/src && echo "fn main() {}" > {}/src/main.rs'
|
||||
|
||||
# Build dependencies only (cache layer)
|
||||
RUN case "$TARGETPLATFORM" in \
|
||||
"linux/amd64") cargo build --release --target x86_64-unknown-linux-gnu ;; \
|
||||
"linux/arm64") cargo build --release --target aarch64-unknown-linux-gnu ;; \
|
||||
esac
|
||||
|
||||
# Copy source code
|
||||
COPY . .
|
||||
|
||||
# Generate protobuf code
|
||||
RUN cargo run --bin gproto
|
||||
|
||||
# Build the actual application
|
||||
RUN case "$TARGETPLATFORM" in \
|
||||
"linux/amd64") \
|
||||
cargo build --release --target x86_64-unknown-linux-gnu --bin rustfs && \
|
||||
cp target/x86_64-unknown-linux-gnu/release/rustfs /usr/local/bin/rustfs \
|
||||
;; \
|
||||
"linux/arm64") \
|
||||
cargo build --release --target aarch64-unknown-linux-gnu --bin rustfs && \
|
||||
cp target/aarch64-unknown-linux-gnu/release/rustfs /usr/local/bin/rustfs \
|
||||
;; \
|
||||
esac
|
||||
|
||||
# Runtime stage - Ubuntu minimal for better compatibility
|
||||
FROM ubuntu:22.04
|
||||
|
||||
# Install runtime dependencies
|
||||
RUN apt-get update && apt-get install -y \
|
||||
ca-certificates \
|
||||
tzdata \
|
||||
wget \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Create rustfs user and group
|
||||
RUN groupadd -g 1000 rustfs && \
|
||||
useradd -d /app -g rustfs -u 1000 -s /bin/bash rustfs
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Create data directories
|
||||
RUN mkdir -p /data/rustfs{0,1,2,3} && \
|
||||
chown -R rustfs:rustfs /data /app
|
||||
|
||||
# Copy binary from builder stage
|
||||
COPY --from=builder /usr/local/bin/rustfs /app/rustfs
|
||||
RUN chmod +x /app/rustfs && chown rustfs:rustfs /app/rustfs
|
||||
|
||||
# Switch to non-root user
|
||||
USER rustfs
|
||||
|
||||
# Expose ports
|
||||
EXPOSE 9000 9001
|
||||
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD wget --no-verbose --tries=1 --spider http://localhost:9000/health || exit 1
|
||||
|
||||
# Set default command
|
||||
CMD ["/app/rustfs"]
|
||||
@@ -1,21 +0,0 @@
|
||||
FROM ubuntu:latest
|
||||
|
||||
# RUN apk add --no-cache <package-name>
|
||||
# 如果 rustfs 有依赖,可以在这里添加,例如:
|
||||
# RUN apk add --no-cache openssl
|
||||
# RUN apk add --no-cache bash # 安装 Bash
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# 创建与 RUSTFS_VOLUMES 一致的目录
|
||||
RUN mkdir -p /root/data/target/volume/test1 /root/data/target/volume/test2 /root/data/target/volume/test3 /root/data/target/volume/test4
|
||||
|
||||
# COPY ./target/x86_64-unknown-linux-musl/release/rustfs /app/rustfs
|
||||
COPY ./target/x86_64-unknown-linux-gnu/release/rustfs /app/rustfs
|
||||
|
||||
RUN chmod +x /app/rustfs
|
||||
|
||||
EXPOSE 9000
|
||||
EXPOSE 9002
|
||||
|
||||
CMD ["/app/rustfs"]
|
||||
181
Dockerfile.source
Normal file
@@ -0,0 +1,181 @@
|
||||
# syntax=docker/dockerfile:1.6
|
||||
# Multi-stage Dockerfile for RustFS - LOCAL DEVELOPMENT ONLY
|
||||
#
|
||||
# IMPORTANT: This Dockerfile builds RustFS from source for local development and testing.
|
||||
# CI/CD uses the production Dockerfile with prebuilt binaries instead.
|
||||
#
|
||||
# Example:
|
||||
# docker build -f Dockerfile.source -t rustfs:dev-local .
|
||||
# docker run --rm -p 9000:9000 rustfs:dev-local
|
||||
#
|
||||
# Supports cross-compilation for amd64 and arm64 via TARGETPLATFORM.
|
||||
|
||||
ARG TARGETPLATFORM
|
||||
ARG BUILDPLATFORM
|
||||
|
||||
# -----------------------------
|
||||
# Build stage
|
||||
# -----------------------------
|
||||
FROM rust:1.88-bookworm AS builder
|
||||
|
||||
# Re-declare args after FROM
|
||||
ARG TARGETPLATFORM
|
||||
ARG BUILDPLATFORM
|
||||
|
||||
# Debug: print platforms
|
||||
RUN echo "Build info -> BUILDPLATFORM=${BUILDPLATFORM}, TARGETPLATFORM=${TARGETPLATFORM}"
|
||||
|
||||
# Install build toolchain and headers
|
||||
# Use distro packages for protoc/flatc to avoid host-arch mismatch
|
||||
RUN set -eux; \
|
||||
export DEBIAN_FRONTEND=noninteractive; \
|
||||
apt-get update; \
|
||||
apt-get install -y --no-install-recommends \
|
||||
build-essential \
|
||||
ca-certificates \
|
||||
curl \
|
||||
git \
|
||||
pkg-config \
|
||||
libssl-dev \
|
||||
lld \
|
||||
protobuf-compiler \
|
||||
flatbuffers-compiler; \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Optional: cross toolchain for aarch64 (only when targeting linux/arm64)
|
||||
RUN set -eux; \
|
||||
if [ "${TARGETPLATFORM:-linux/amd64}" = "linux/arm64" ]; then \
|
||||
export DEBIAN_FRONTEND=noninteractive; \
|
||||
apt-get update; \
|
||||
apt-get install -y --no-install-recommends gcc-aarch64-linux-gnu; \
|
||||
rm -rf /var/lib/apt/lists/*; \
|
||||
fi
|
||||
|
||||
# Add Rust targets based on TARGETPLATFORM
|
||||
RUN set -eux; \
|
||||
case "${TARGETPLATFORM:-linux/amd64}" in \
|
||||
linux/amd64) rustup target add x86_64-unknown-linux-gnu ;; \
|
||||
linux/arm64) rustup target add aarch64-unknown-linux-gnu ;; \
|
||||
*) echo "Unsupported TARGETPLATFORM=${TARGETPLATFORM}" >&2; exit 1 ;; \
|
||||
esac
|
||||
|
||||
# Cross-compilation environment (used only when targeting aarch64)
|
||||
ENV CARGO_TARGET_AARCH64_UNKNOWN_LINUX_GNU_LINKER=aarch64-linux-gnu-gcc
|
||||
ENV CC_aarch64_unknown_linux_gnu=aarch64-linux-gnu-gcc
|
||||
ENV CXX_aarch64_unknown_linux_gnu=aarch64-linux-gnu-g++
|
||||
|
||||
WORKDIR /usr/src/rustfs
|
||||
|
||||
# Layered copy to maximize caching:
|
||||
# 1) top-level manifests
|
||||
COPY Cargo.toml Cargo.lock ./
|
||||
# 2) workspace member manifests (adjust if workspace layout changes)
|
||||
COPY rustfs/Cargo.toml rustfs/Cargo.toml
|
||||
COPY crates/*/Cargo.toml crates/
|
||||
COPY cli/rustfs-gui/Cargo.toml cli/rustfs-gui/Cargo.toml
|
||||
|
||||
# Pre-fetch dependencies for better caching
|
||||
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||
--mount=type=cache,target=/usr/local/cargo/git \
|
||||
cargo fetch --locked || true
|
||||
|
||||
# 3) copy full sources (this is the main cache invalidation point)
|
||||
COPY . .
|
||||
|
||||
# Cargo build configuration for lean release artifacts
|
||||
ENV CARGO_NET_GIT_FETCH_WITH_CLI=true \
|
||||
CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse \
|
||||
CARGO_INCREMENTAL=0 \
|
||||
CARGO_PROFILE_RELEASE_DEBUG=false \
|
||||
CARGO_PROFILE_RELEASE_SPLIT_DEBUGINFO=off \
|
||||
CARGO_PROFILE_RELEASE_STRIP=symbols
|
||||
|
||||
# Generate protobuf/flatbuffers code (uses protoc/flatc from distro)
|
||||
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||
--mount=type=cache,target=/usr/local/cargo/git \
|
||||
--mount=type=cache,target=/usr/src/rustfs/target \
|
||||
cargo run --bin gproto
|
||||
|
||||
# Build RustFS (target depends on TARGETPLATFORM)
|
||||
RUN --mount=type=cache,target=/usr/local/cargo/registry \
|
||||
--mount=type=cache,target=/usr/local/cargo/git \
|
||||
--mount=type=cache,target=/usr/src/rustfs/target \
|
||||
set -eux; \
|
||||
case "${TARGETPLATFORM:-linux/amd64}" in \
|
||||
linux/amd64) \
|
||||
echo "Building for x86_64-unknown-linux-gnu"; \
|
||||
cargo build --release --locked --target x86_64-unknown-linux-gnu --bin rustfs -j "$(nproc)"; \
|
||||
install -m 0755 target/x86_64-unknown-linux-gnu/release/rustfs /usr/local/bin/rustfs \
|
||||
;; \
|
||||
linux/arm64) \
|
||||
echo "Building for aarch64-unknown-linux-gnu"; \
|
||||
cargo build --release --locked --target aarch64-unknown-linux-gnu --bin rustfs -j "$(nproc)"; \
|
||||
install -m 0755 target/aarch64-unknown-linux-gnu/release/rustfs /usr/local/bin/rustfs \
|
||||
;; \
|
||||
*) \
|
||||
echo "Unsupported TARGETPLATFORM=${TARGETPLATFORM}" >&2; exit 1 \
|
||||
;; \
|
||||
esac
|
||||
|
||||
# -----------------------------
|
||||
# Runtime stage (Ubuntu minimal)
|
||||
# -----------------------------
|
||||
FROM ubuntu:22.04
|
||||
|
||||
ARG BUILD_DATE
|
||||
ARG VCS_REF
|
||||
|
||||
LABEL name="RustFS (dev-local)" \
|
||||
maintainer="RustFS Team" \
|
||||
build-date="${BUILD_DATE}" \
|
||||
vcs-ref="${VCS_REF}" \
|
||||
description="RustFS - local development image built from source (NOT for production)."
|
||||
|
||||
# Minimal runtime deps: certificates + tzdata + coreutils (for chroot --userspec)
|
||||
RUN set -eux; \
|
||||
export DEBIAN_FRONTEND=noninteractive; \
|
||||
apt-get update; \
|
||||
apt-get install -y --no-install-recommends \
|
||||
ca-certificates \
|
||||
tzdata \
|
||||
coreutils; \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Create a conventional runtime user/group (final switch happens in entrypoint via chroot --userspec)
|
||||
RUN set -eux; \
|
||||
groupadd -g 1000 rustfs; \
|
||||
useradd -u 1000 -g rustfs -M -s /usr/sbin/nologin rustfs
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Prepare data/log directories with sane defaults
|
||||
RUN set -eux; \
|
||||
mkdir -p /data /logs; \
|
||||
chown -R rustfs:rustfs /data /logs /app; \
|
||||
chmod 0750 /data /logs
|
||||
|
||||
# Copy the freshly built binary and the entrypoint
|
||||
COPY --from=builder /usr/local/bin/rustfs /usr/bin/rustfs
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
RUN chmod +x /usr/bin/rustfs /entrypoint.sh
|
||||
|
||||
# Default environment (override in docker run/compose as needed)
|
||||
ENV RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_ACCESS_KEY="rustfsadmin" \
|
||||
RUSTFS_SECRET_KEY="rustfsadmin" \
|
||||
RUSTFS_CONSOLE_ENABLE="true" \
|
||||
RUSTFS_VOLUMES="/data" \
|
||||
RUST_LOG="warn" \
|
||||
RUSTFS_OBS_LOG_DIRECTORY="/logs" \
|
||||
RUSTFS_SINKS_FILE_PATH="/logs" \
|
||||
RUSTFS_USERNAME="rustfs" \
|
||||
RUSTFS_GROUPNAME="rustfs" \
|
||||
RUSTFS_UID="1000" \
|
||||
RUSTFS_GID="1000"
|
||||
|
||||
EXPOSE 9000
|
||||
VOLUME ["/data", "/logs"]
|
||||
|
||||
# Keep root here; entrypoint will drop privileges using chroot --userspec
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
CMD ["/usr/bin/rustfs"]
|
||||
258
Justfile
Normal file
@@ -0,0 +1,258 @@
|
||||
DOCKER_CLI := env("DOCKER_CLI", "docker")
|
||||
IMAGE_NAME := env("IMAGE_NAME", "rustfs:v1.0.0")
|
||||
DOCKERFILE_SOURCE := env("DOCKERFILE_SOURCE", "Dockerfile.source")
|
||||
DOCKERFILE_PRODUCTION := env("DOCKERFILE_PRODUCTION", "Dockerfile")
|
||||
CONTAINER_NAME := env("CONTAINER_NAME", "rustfs-dev")
|
||||
|
||||
[group("📒 Help")]
|
||||
[private]
|
||||
default:
|
||||
@just --list --list-heading $'🦀 RustFS justfile manual page:\n'
|
||||
|
||||
[doc("show help")]
|
||||
[group("📒 Help")]
|
||||
help: default
|
||||
|
||||
[doc("run `cargo fmt` to format codes")]
|
||||
[group("👆 Code Quality")]
|
||||
fmt:
|
||||
@echo "🔧 Formatting code..."
|
||||
cargo fmt --all
|
||||
|
||||
[doc("run `cargo fmt` in check mode")]
|
||||
[group("👆 Code Quality")]
|
||||
fmt-check:
|
||||
@echo "📝 Checking code formatting..."
|
||||
cargo fmt --all --check
|
||||
|
||||
[doc("run `cargo clippy`")]
|
||||
[group("👆 Code Quality")]
|
||||
clippy:
|
||||
@echo "🔍 Running clippy checks..."
|
||||
cargo clippy --all-targets --all-features --fix --allow-dirty -- -D warnings
|
||||
|
||||
[doc("run `cargo check`")]
|
||||
[group("👆 Code Quality")]
|
||||
check:
|
||||
@echo "🔨 Running compilation check..."
|
||||
cargo check --all-targets
|
||||
|
||||
[doc("run `cargo test`")]
|
||||
[group("👆 Code Quality")]
|
||||
test:
|
||||
@echo "🧪 Running tests..."
|
||||
cargo nextest run --all --exclude e2e_test
|
||||
cargo test --all --doc
|
||||
|
||||
[doc("run `fmt` `clippy` `check` `test` at once")]
|
||||
[group("👆 Code Quality")]
|
||||
pre-commit: fmt clippy check test
|
||||
@echo "✅ All pre-commit checks passed!"
|
||||
|
||||
[group("🤔 Git")]
|
||||
setup-hooks:
|
||||
@echo "🔧 Setting up git hooks..."
|
||||
chmod +x .git/hooks/pre-commit
|
||||
@echo "✅ Git hooks setup complete!"
|
||||
|
||||
[doc("use `release` mode for building")]
|
||||
[group("🔨 Build")]
|
||||
build:
|
||||
@echo "🔨 Building RustFS using build-rustfs.sh script..."
|
||||
./build-rustfs.sh
|
||||
|
||||
[doc("use `debug` mode for building")]
|
||||
[group("🔨 Build")]
|
||||
build-dev:
|
||||
@echo "🔨 Building RustFS in development mode..."
|
||||
./build-rustfs.sh --dev
|
||||
|
||||
[group("🔨 Build")]
|
||||
[private]
|
||||
build-target target:
|
||||
@echo "🔨 Building rustfs for {{ target }}..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform {{ target }}
|
||||
|
||||
[doc("use `x86_64-unknown-linux-musl` target for building")]
|
||||
[group("🔨 Build")]
|
||||
build-musl: (build-target "x86_64-unknown-linux-musl")
|
||||
|
||||
[doc("use `x86_64-unknown-linux-gnu` target for building")]
|
||||
[group("🔨 Build")]
|
||||
build-gnu: (build-target "x86_64-unknown-linux-gnu")
|
||||
|
||||
[doc("use `aarch64-unknown-linux-musl` target for building")]
|
||||
[group("🔨 Build")]
|
||||
build-musl-arm64: (build-target "aarch64-unknown-linux-musl")
|
||||
|
||||
[doc("use `aarch64-unknown-linux-gnu` target for building")]
|
||||
[group("🔨 Build")]
|
||||
build-gnu-arm64: (build-target "aarch64-unknown-linux-gnu")
|
||||
|
||||
[doc("build and deploy to server")]
|
||||
[group("🔨 Build")]
|
||||
deploy-dev ip: build-musl
|
||||
@echo "🚀 Deploying to dev server: {{ ip }}"
|
||||
./scripts/dev_deploy.sh {{ ip }}
|
||||
|
||||
[group("🔨 Build")]
|
||||
[private]
|
||||
build-cross-all-pre:
|
||||
@echo "🔧 Building all target architectures..."
|
||||
@echo "💡 On macOS/Windows, use 'make docker-dev' for reliable multi-arch builds"
|
||||
@echo "🔨 Generating protobuf code..."
|
||||
-cargo run --bin gproto
|
||||
|
||||
[doc("build all targets at once")]
|
||||
[group("🔨 Build")]
|
||||
build-cross-all: build-cross-all-pre && build-gnu build-gnu-arm64 build-musl build-musl-arm64
|
||||
|
||||
# ========================================================================================
|
||||
# Docker Multi-Architecture Builds (Primary Methods)
|
||||
# ========================================================================================
|
||||
|
||||
[doc("build an image and run it")]
|
||||
[group("🐳 Build Image")]
|
||||
build-docker os="rockylinux9.3" cli=(DOCKER_CLI) dockerfile=(DOCKERFILE_SOURCE):
|
||||
#!/usr/bin/env bash
|
||||
SOURCE_BUILD_IMAGE_NAME="rustfs/rustfs-{{ os }}:v1"
|
||||
SOURCE_BUILD_CONTAINER_NAME="rustfs-{{ os }}-build"
|
||||
BUILD_CMD="/root/.cargo/bin/cargo build --release --bin rustfs --target-dir /root/s3-rustfs/target/{{ os }}"
|
||||
echo "🐳 Building RustFS using Docker ({{ os }})..."
|
||||
{{ cli }} buildx build -t $SOURCE_BUILD_IMAGE_NAME -f {{ dockerfile }} .
|
||||
{{ cli }} run --rm --name $SOURCE_BUILD_CONTAINER_NAME -v $(pwd):/root/s3-rustfs -it $SOURCE_BUILD_IMAGE_NAME $BUILD_CMD
|
||||
|
||||
[doc("build an image")]
|
||||
[group("🐳 Build Image")]
|
||||
docker-buildx:
|
||||
@echo "🏗️ Building multi-architecture production Docker images with buildx..."
|
||||
./docker-buildx.sh
|
||||
|
||||
[doc("build an image and push it")]
|
||||
[group("🐳 Build Image")]
|
||||
docker-buildx-push:
|
||||
@echo "🚀 Building and pushing multi-architecture production Docker images with buildx..."
|
||||
./docker-buildx.sh --push
|
||||
|
||||
[doc("build an image with a version")]
|
||||
[group("🐳 Build Image")]
|
||||
docker-buildx-version version:
|
||||
@echo "🏗️ Building multi-architecture production Docker images (version: {{ version }}..."
|
||||
./docker-buildx.sh --release {{ version }}
|
||||
|
||||
[doc("build an image with a version and push it")]
|
||||
[group("🐳 Build Image")]
|
||||
docker-buildx-push-version version:
|
||||
@echo "🚀 Building and pushing multi-architecture production Docker images (version: {{ version }}..."
|
||||
./docker-buildx.sh --release {{ version }} --push
|
||||
|
||||
[doc("build an image with a version and push it to registry")]
|
||||
[group("🐳 Build Image")]
|
||||
docker-dev-push registry cli=(DOCKER_CLI) source=(DOCKERFILE_SOURCE):
|
||||
@echo "🚀 Building and pushing multi-architecture development Docker images..."
|
||||
@echo "💡 push to registry: {{ registry }}"
|
||||
{{ cli }} buildx build \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--file {{ source }} \
|
||||
--tag {{ registry }}/rustfs:source-latest \
|
||||
--tag {{ registry }}/rustfs:dev-latest \
|
||||
--push \
|
||||
.
|
||||
|
||||
# Local production builds using direct buildx (alternative to docker-buildx.sh)
|
||||
|
||||
[group("🐳 Build Image")]
|
||||
docker-buildx-production-local cli=(DOCKER_CLI) source=(DOCKERFILE_PRODUCTION):
|
||||
@echo "🏗️ Building single-architecture production Docker image locally..."
|
||||
@echo "💡 Alternative to docker-buildx.sh for local testing"
|
||||
{{ cli }} buildx build \
|
||||
--file {{ source }} \
|
||||
--tag rustfs:production-latest \
|
||||
--tag rustfs:latest \
|
||||
--load \
|
||||
--build-arg RELEASE=latest \
|
||||
.
|
||||
|
||||
# Development/Source builds using direct buildx commands
|
||||
|
||||
[group("🐳 Build Image")]
|
||||
docker-dev cli=(DOCKER_CLI) source=(DOCKERFILE_SOURCE):
|
||||
@echo "🏗️ Building multi-architecture development Docker images with buildx..."
|
||||
@echo "💡 This builds from source code and is intended for local development and testing"
|
||||
@echo "⚠️ Multi-arch images cannot be loaded locally, use docker-dev-push to push to registry"
|
||||
{{ cli }} buildx build \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--file {{ source }} \
|
||||
--tag rustfs:source-latest \
|
||||
--tag rustfs:dev-latest \
|
||||
.
|
||||
|
||||
[group("🐳 Build Image")]
|
||||
docker-dev-local cli=(DOCKER_CLI) source=(DOCKERFILE_SOURCE):
|
||||
@echo "🏗️ Building single-architecture development Docker image for local use..."
|
||||
@echo "💡 This builds from source code for the current platform and loads locally"
|
||||
{{ cli }} buildx build \
|
||||
--file {{ source }} \
|
||||
--tag rustfs:source-latest \
|
||||
--tag rustfs:dev-latest \
|
||||
--load \
|
||||
.
|
||||
|
||||
# ========================================================================================
|
||||
# Single Architecture Docker Builds (Traditional)
|
||||
# ========================================================================================
|
||||
|
||||
[group("🐳 Build Image")]
|
||||
docker-build-production cli=(DOCKER_CLI) source=(DOCKERFILE_PRODUCTION):
|
||||
@echo "🏗️ Building single-architecture production Docker image..."
|
||||
@echo "💡 Consider using 'make docker-buildx-production-local' for multi-arch support"
|
||||
{{ cli }} build -f {{ source }} -t rustfs:latest .
|
||||
|
||||
[group("🐳 Build Image")]
|
||||
docker-build-source cli=(DOCKER_CLI) source=(DOCKERFILE_SOURCE):
|
||||
@echo "🏗️ Building single-architecture source Docker image..."
|
||||
@echo "💡 Consider using 'make docker-dev-local' for multi-arch support"
|
||||
{{ cli }} build -f {{ source }} -t rustfs:source .
|
||||
|
||||
# ========================================================================================
|
||||
# Development Environment
|
||||
# ========================================================================================
|
||||
|
||||
[group("🏃 Running")]
|
||||
dev-env-start cli=(DOCKER_CLI) source=(DOCKERFILE_SOURCE) container=(CONTAINER_NAME):
|
||||
@echo "🚀 Starting development environment..."
|
||||
{{ cli }} buildx build \
|
||||
--file {{ source }} \
|
||||
--tag rustfs:dev \
|
||||
--load \
|
||||
.
|
||||
-{{ cli }} stop {{ container }} 2>/dev/null
|
||||
-{{ cli }} rm {{ container }} 2>/dev/null
|
||||
{{ cli }} run -d --name {{ container }} \
|
||||
-p 9010:9010 -p 9000:9000 \
|
||||
-v {{ invocation_directory() }}:/workspace \
|
||||
-it rustfs:dev
|
||||
|
||||
[group("🏃 Running")]
|
||||
dev-env-stop cli=(DOCKER_CLI) container=(CONTAINER_NAME):
|
||||
@echo "🛑 Stopping development environment..."
|
||||
-{{ cli }} stop {{ container }} 2>/dev/null
|
||||
-{{ cli }} rm {{ container }} 2>/dev/null
|
||||
|
||||
[group("🏃 Running")]
|
||||
dev-env-restart: dev-env-stop dev-env-start
|
||||
|
||||
[group("👍 E2E")]
|
||||
e2e-server:
|
||||
sh scripts/run.sh
|
||||
|
||||
[group("👍 E2E")]
|
||||
probe-e2e:
|
||||
sh scripts/probe.sh
|
||||
|
||||
[doc("inspect one image")]
|
||||
[group("🚚 Other")]
|
||||
docker-inspect-multiarch image cli=(DOCKER_CLI):
|
||||
@echo "🔍 Inspecting multi-architecture image: {{ image }}"
|
||||
{{ cli }} buildx imagetools inspect {{ image }}
|
||||
369
Makefile
@@ -1,11 +1,13 @@
|
||||
###########
|
||||
# 远程开发,需要 VSCode 安装 Dev Containers, Remote SSH, Remote Explorer
|
||||
# Remote development requires VSCode with Dev Containers, Remote SSH, Remote Explorer
|
||||
# https://code.visualstudio.com/docs/remote/containers
|
||||
###########
|
||||
DOCKER_CLI ?= docker
|
||||
IMAGE_NAME ?= rustfs:v1.0.0
|
||||
CONTAINER_NAME ?= rustfs-dev
|
||||
DOCKERFILE_PATH = $(shell pwd)/.docker
|
||||
# Docker build configurations
|
||||
DOCKERFILE_PRODUCTION = Dockerfile
|
||||
DOCKERFILE_SOURCE = Dockerfile.source
|
||||
|
||||
# Code quality and formatting targets
|
||||
.PHONY: fmt
|
||||
@@ -21,6 +23,7 @@ fmt-check:
|
||||
.PHONY: clippy
|
||||
clippy:
|
||||
@echo "🔍 Running clippy checks..."
|
||||
cargo clippy --fix --allow-dirty
|
||||
cargo clippy --all-targets --all-features -- -D warnings
|
||||
|
||||
.PHONY: check
|
||||
@@ -31,7 +34,13 @@ check:
|
||||
.PHONY: test
|
||||
test:
|
||||
@echo "🧪 Running tests..."
|
||||
cargo test --all --exclude e2e_test
|
||||
@if command -v cargo-nextest >/dev/null 2>&1; then \
|
||||
cargo nextest run --all --exclude e2e_test; \
|
||||
else \
|
||||
echo "ℹ️ cargo-nextest not found; falling back to 'cargo test'"; \
|
||||
cargo test --workspace --exclude e2e_test -- --nocapture; \
|
||||
fi
|
||||
cargo test --all --doc
|
||||
|
||||
.PHONY: pre-commit
|
||||
pre-commit: fmt clippy check test
|
||||
@@ -43,21 +52,6 @@ setup-hooks:
|
||||
chmod +x .git/hooks/pre-commit
|
||||
@echo "✅ Git hooks setup complete!"
|
||||
|
||||
.PHONY: init-devenv
|
||||
init-devenv:
|
||||
$(DOCKER_CLI) build -t $(IMAGE_NAME) -f $(DOCKERFILE_PATH)/Dockerfile.devenv .
|
||||
$(DOCKER_CLI) stop $(CONTAINER_NAME)
|
||||
$(DOCKER_CLI) rm $(CONTAINER_NAME)
|
||||
$(DOCKER_CLI) run -d --name $(CONTAINER_NAME) -p 9010:9010 -p 9000:9000 -v $(shell pwd):/root/s3-rustfs -it $(IMAGE_NAME)
|
||||
|
||||
.PHONY: start
|
||||
start:
|
||||
$(DOCKER_CLI) start $(CONTAINER_NAME)
|
||||
|
||||
.PHONY: stop
|
||||
stop:
|
||||
$(DOCKER_CLI) stop $(CONTAINER_NAME)
|
||||
|
||||
.PHONY: e2e-server
|
||||
e2e-server:
|
||||
sh $(shell pwd)/scripts/run.sh
|
||||
@@ -66,91 +60,203 @@ e2e-server:
|
||||
probe-e2e:
|
||||
sh $(shell pwd)/scripts/probe.sh
|
||||
|
||||
# make BUILD_OS=ubuntu22.04 build
|
||||
# in target/ubuntu22.04/release/rustfs
|
||||
|
||||
# make BUILD_OS=rockylinux9.3 build
|
||||
# in target/rockylinux9.3/release/rustfs
|
||||
BUILD_OS ?= rockylinux9.3
|
||||
# Native build using build-rustfs.sh script
|
||||
.PHONY: build
|
||||
build: ROCKYLINUX_BUILD_IMAGE_NAME = rustfs-$(BUILD_OS):v1
|
||||
build: ROCKYLINUX_BUILD_CONTAINER_NAME = rustfs-$(BUILD_OS)-build
|
||||
build: BUILD_CMD = /root/.cargo/bin/cargo build --release --bin rustfs --target-dir /root/s3-rustfs/target/$(BUILD_OS)
|
||||
build:
|
||||
$(DOCKER_CLI) build -t $(ROCKYLINUX_BUILD_IMAGE_NAME) -f $(DOCKERFILE_PATH)/Dockerfile.$(BUILD_OS) .
|
||||
$(DOCKER_CLI) run --rm --name $(ROCKYLINUX_BUILD_CONTAINER_NAME) -v $(shell pwd):/root/s3-rustfs -it $(ROCKYLINUX_BUILD_IMAGE_NAME) $(BUILD_CMD)
|
||||
@echo "🔨 Building RustFS using build-rustfs.sh script..."
|
||||
./build-rustfs.sh
|
||||
|
||||
.PHONY: build-dev
|
||||
build-dev:
|
||||
@echo "🔨 Building RustFS in development mode..."
|
||||
./build-rustfs.sh --dev
|
||||
|
||||
# Docker-based build (alternative approach)
|
||||
# Usage: make BUILD_OS=ubuntu22.04 build-docker
|
||||
# Output: target/ubuntu22.04/release/rustfs
|
||||
BUILD_OS ?= rockylinux9.3
|
||||
.PHONY: build-docker
|
||||
build-docker: SOURCE_BUILD_IMAGE_NAME = rustfs-$(BUILD_OS):v1
|
||||
build-docker: SOURCE_BUILD_CONTAINER_NAME = rustfs-$(BUILD_OS)-build
|
||||
build-docker: BUILD_CMD = /root/.cargo/bin/cargo build --release --bin rustfs --target-dir /root/s3-rustfs/target/$(BUILD_OS)
|
||||
build-docker:
|
||||
@echo "🐳 Building RustFS using Docker ($(BUILD_OS))..."
|
||||
$(DOCKER_CLI) buildx build -t $(SOURCE_BUILD_IMAGE_NAME) -f $(DOCKERFILE_SOURCE) .
|
||||
$(DOCKER_CLI) run --rm --name $(SOURCE_BUILD_CONTAINER_NAME) -v $(shell pwd):/root/s3-rustfs -it $(SOURCE_BUILD_IMAGE_NAME) $(BUILD_CMD)
|
||||
|
||||
.PHONY: build-musl
|
||||
build-musl:
|
||||
@echo "🔨 Building rustfs for x86_64-unknown-linux-musl..."
|
||||
cargo build --target x86_64-unknown-linux-musl --bin rustfs -r
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-musl
|
||||
|
||||
.PHONY: build-gnu
|
||||
build-gnu:
|
||||
@echo "🔨 Building rustfs for x86_64-unknown-linux-gnu..."
|
||||
cargo build --target x86_64-unknown-linux-gnu --bin rustfs -r
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-gnu
|
||||
|
||||
.PHONY: build-musl-arm64
|
||||
build-musl-arm64:
|
||||
@echo "🔨 Building rustfs for aarch64-unknown-linux-musl..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-musl
|
||||
|
||||
.PHONY: build-gnu-arm64
|
||||
build-gnu-arm64:
|
||||
@echo "🔨 Building rustfs for aarch64-unknown-linux-gnu..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-gnu
|
||||
|
||||
.PHONY: deploy-dev
|
||||
deploy-dev: build-musl
|
||||
@echo "🚀 Deploying to dev server: $${IP}"
|
||||
./scripts/dev_deploy.sh $${IP}
|
||||
|
||||
# Multi-architecture Docker build targets
|
||||
.PHONY: docker-build-multiarch
|
||||
docker-build-multiarch:
|
||||
@echo "🏗️ Building multi-architecture Docker images..."
|
||||
./scripts/build-docker-multiarch.sh
|
||||
# ========================================================================================
|
||||
# Docker Multi-Architecture Builds (Primary Methods)
|
||||
# ========================================================================================
|
||||
|
||||
.PHONY: docker-build-multiarch-push
|
||||
docker-build-multiarch-push:
|
||||
@echo "🚀 Building and pushing multi-architecture Docker images..."
|
||||
./scripts/build-docker-multiarch.sh --push
|
||||
# Production builds using docker-buildx.sh (for CI/CD and production)
|
||||
.PHONY: docker-buildx
|
||||
docker-buildx:
|
||||
@echo "🏗️ Building multi-architecture production Docker images with buildx..."
|
||||
./docker-buildx.sh
|
||||
|
||||
.PHONY: docker-build-multiarch-version
|
||||
docker-build-multiarch-version:
|
||||
.PHONY: docker-buildx-push
|
||||
docker-buildx-push:
|
||||
@echo "🚀 Building and pushing multi-architecture production Docker images with buildx..."
|
||||
./docker-buildx.sh --push
|
||||
|
||||
.PHONY: docker-buildx-version
|
||||
docker-buildx-version:
|
||||
@if [ -z "$(VERSION)" ]; then \
|
||||
echo "❌ 错误: 请指定版本, 例如: make docker-build-multiarch-version VERSION=v1.0.0"; \
|
||||
echo "❌ Error: Please specify version, example: make docker-buildx-version VERSION=v1.0.0"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🏗️ Building multi-architecture Docker images (version: $(VERSION))..."
|
||||
./scripts/build-docker-multiarch.sh --version $(VERSION)
|
||||
@echo "🏗️ Building multi-architecture production Docker images (version: $(VERSION))..."
|
||||
./docker-buildx.sh --release $(VERSION)
|
||||
|
||||
.PHONY: docker-push-multiarch-version
|
||||
docker-push-multiarch-version:
|
||||
.PHONY: docker-buildx-push-version
|
||||
docker-buildx-push-version:
|
||||
@if [ -z "$(VERSION)" ]; then \
|
||||
echo "❌ 错误: 请指定版本, 例如: make docker-push-multiarch-version VERSION=v1.0.0"; \
|
||||
echo "❌ Error: Please specify version, example: make docker-buildx-push-version VERSION=v1.0.0"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🚀 Building and pushing multi-architecture Docker images (version: $(VERSION))..."
|
||||
./scripts/build-docker-multiarch.sh --version $(VERSION) --push
|
||||
@echo "🚀 Building and pushing multi-architecture production Docker images (version: $(VERSION))..."
|
||||
./docker-buildx.sh --release $(VERSION) --push
|
||||
|
||||
.PHONY: docker-build-ubuntu
|
||||
docker-build-ubuntu:
|
||||
@echo "🏗️ Building multi-architecture Ubuntu Docker images..."
|
||||
./scripts/build-docker-multiarch.sh --type ubuntu
|
||||
# Development/Source builds using direct buildx commands
|
||||
.PHONY: docker-dev
|
||||
docker-dev:
|
||||
@echo "🏗️ Building multi-architecture development Docker images with buildx..."
|
||||
@echo "💡 This builds from source code and is intended for local development and testing"
|
||||
@echo "⚠️ Multi-arch images cannot be loaded locally, use docker-dev-push to push to registry"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag rustfs:source-latest \
|
||||
--tag rustfs:dev-latest \
|
||||
.
|
||||
|
||||
.PHONY: docker-build-rockylinux
|
||||
docker-build-rockylinux:
|
||||
@echo "🏗️ Building multi-architecture RockyLinux Docker images..."
|
||||
./scripts/build-docker-multiarch.sh --type rockylinux
|
||||
.PHONY: docker-dev-local
|
||||
docker-dev-local:
|
||||
@echo "🏗️ Building single-architecture development Docker image for local use..."
|
||||
@echo "💡 This builds from source code for the current platform and loads locally"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag rustfs:source-latest \
|
||||
--tag rustfs:dev-latest \
|
||||
--load \
|
||||
.
|
||||
|
||||
.PHONY: docker-build-devenv
|
||||
docker-build-devenv:
|
||||
@echo "🏗️ Building multi-architecture development environment Docker images..."
|
||||
./scripts/build-docker-multiarch.sh --type devenv
|
||||
.PHONY: docker-dev-push
|
||||
docker-dev-push:
|
||||
@if [ -z "$(REGISTRY)" ]; then \
|
||||
echo "❌ Error: Please specify registry, example: make docker-dev-push REGISTRY=ghcr.io/username"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🚀 Building and pushing multi-architecture development Docker images..."
|
||||
@echo "💡 Pushing to registry: $(REGISTRY)"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag $(REGISTRY)/rustfs:source-latest \
|
||||
--tag $(REGISTRY)/rustfs:dev-latest \
|
||||
--push \
|
||||
.
|
||||
|
||||
.PHONY: docker-build-all-types
|
||||
docker-build-all-types:
|
||||
@echo "🏗️ Building all multi-architecture Docker image types..."
|
||||
./scripts/build-docker-multiarch.sh --type production
|
||||
./scripts/build-docker-multiarch.sh --type ubuntu
|
||||
./scripts/build-docker-multiarch.sh --type rockylinux
|
||||
./scripts/build-docker-multiarch.sh --type devenv
|
||||
|
||||
|
||||
# Local production builds using direct buildx (alternative to docker-buildx.sh)
|
||||
.PHONY: docker-buildx-production-local
|
||||
docker-buildx-production-local:
|
||||
@echo "🏗️ Building single-architecture production Docker image locally..."
|
||||
@echo "💡 Alternative to docker-buildx.sh for local testing"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--file $(DOCKERFILE_PRODUCTION) \
|
||||
--tag rustfs:production-latest \
|
||||
--tag rustfs:latest \
|
||||
--load \
|
||||
--build-arg RELEASE=latest \
|
||||
.
|
||||
|
||||
# ========================================================================================
|
||||
# Single Architecture Docker Builds (Traditional)
|
||||
# ========================================================================================
|
||||
|
||||
.PHONY: docker-build-production
|
||||
docker-build-production:
|
||||
@echo "🏗️ Building single-architecture production Docker image..."
|
||||
@echo "💡 Consider using 'make docker-buildx-production-local' for multi-arch support"
|
||||
$(DOCKER_CLI) build -f $(DOCKERFILE_PRODUCTION) -t rustfs:latest .
|
||||
|
||||
.PHONY: docker-build-source
|
||||
docker-build-source:
|
||||
@echo "🏗️ Building single-architecture source Docker image..."
|
||||
@echo "💡 Consider using 'make docker-dev-local' for multi-arch support"
|
||||
DOCKER_BUILDKIT=1 $(DOCKER_CLI) build \
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||
-f $(DOCKERFILE_SOURCE) -t rustfs:source .
|
||||
|
||||
# ========================================================================================
|
||||
# Development Environment
|
||||
# ========================================================================================
|
||||
|
||||
.PHONY: dev-env-start
|
||||
dev-env-start:
|
||||
@echo "🚀 Starting development environment..."
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag rustfs:dev \
|
||||
--load \
|
||||
.
|
||||
$(DOCKER_CLI) stop $(CONTAINER_NAME) 2>/dev/null || true
|
||||
$(DOCKER_CLI) rm $(CONTAINER_NAME) 2>/dev/null || true
|
||||
$(DOCKER_CLI) run -d --name $(CONTAINER_NAME) \
|
||||
-p 9010:9010 -p 9000:9000 \
|
||||
-v $(shell pwd):/workspace \
|
||||
-it rustfs:dev
|
||||
|
||||
.PHONY: dev-env-stop
|
||||
dev-env-stop:
|
||||
@echo "🛑 Stopping development environment..."
|
||||
$(DOCKER_CLI) stop $(CONTAINER_NAME) 2>/dev/null || true
|
||||
$(DOCKER_CLI) rm $(CONTAINER_NAME) 2>/dev/null || true
|
||||
|
||||
.PHONY: dev-env-restart
|
||||
dev-env-restart: dev-env-stop dev-env-start
|
||||
|
||||
|
||||
|
||||
# ========================================================================================
|
||||
# Build Utilities
|
||||
# ========================================================================================
|
||||
|
||||
.PHONY: docker-inspect-multiarch
|
||||
docker-inspect-multiarch:
|
||||
@if [ -z "$(IMAGE)" ]; then \
|
||||
echo "❌ 错误: 请指定镜像, 例如: make docker-inspect-multiarch IMAGE=rustfs/rustfs:latest"; \
|
||||
echo "❌ Error: Please specify image, example: make docker-inspect-multiarch IMAGE=rustfs/rustfs:latest"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🔍 Inspecting multi-architecture image: $(IMAGE)"
|
||||
@@ -159,41 +265,112 @@ docker-inspect-multiarch:
|
||||
.PHONY: build-cross-all
|
||||
build-cross-all:
|
||||
@echo "🔧 Building all target architectures..."
|
||||
@if ! command -v cross &> /dev/null; then \
|
||||
echo "📦 Installing cross..."; \
|
||||
cargo install cross; \
|
||||
fi
|
||||
@echo "💡 On macOS/Windows, use 'make docker-dev' for reliable multi-arch builds"
|
||||
@echo "🔨 Generating protobuf code..."
|
||||
cargo run --bin gproto || true
|
||||
@echo "🔨 Building x86_64-unknown-linux-musl..."
|
||||
cargo build --release --target x86_64-unknown-linux-musl --bin rustfs
|
||||
@echo "🔨 Building x86_64-unknown-linux-gnu..."
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-gnu
|
||||
@echo "🔨 Building aarch64-unknown-linux-gnu..."
|
||||
cross build --release --target aarch64-unknown-linux-gnu --bin rustfs
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-gnu
|
||||
@echo "🔨 Building x86_64-unknown-linux-musl..."
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-musl
|
||||
@echo "🔨 Building aarch64-unknown-linux-musl..."
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-musl
|
||||
@echo "✅ All architectures built successfully!"
|
||||
|
||||
# ========================================================================================
|
||||
# Help and Documentation
|
||||
# ========================================================================================
|
||||
|
||||
.PHONY: help-build
|
||||
help-build:
|
||||
@echo "🔨 RustFS Build Help:"
|
||||
@echo ""
|
||||
@echo "🚀 Local Build (Recommended):"
|
||||
@echo " make build # Build RustFS binary (includes console by default)"
|
||||
@echo " make build-dev # Development mode build"
|
||||
@echo " make build-musl # Build x86_64 musl version"
|
||||
@echo " make build-gnu # Build x86_64 GNU version"
|
||||
@echo " make build-musl-arm64 # Build aarch64 musl version"
|
||||
@echo " make build-gnu-arm64 # Build aarch64 GNU version"
|
||||
@echo ""
|
||||
@echo "🐳 Docker Build:"
|
||||
@echo " make build-docker # Build using Docker container"
|
||||
@echo " make build-docker BUILD_OS=ubuntu22.04 # Specify build system"
|
||||
@echo ""
|
||||
@echo "🏗️ Cross-architecture Build:"
|
||||
@echo " make build-cross-all # Build binaries for all architectures"
|
||||
@echo ""
|
||||
@echo "🔧 Direct usage of build-rustfs.sh script:"
|
||||
@echo " ./build-rustfs.sh --help # View script help"
|
||||
@echo " ./build-rustfs.sh --no-console # Build without console resources"
|
||||
@echo " ./build-rustfs.sh --force-console-update # Force update console resources"
|
||||
@echo " ./build-rustfs.sh --dev # Development mode build"
|
||||
@echo " ./build-rustfs.sh --sign # Sign binary files"
|
||||
@echo " ./build-rustfs.sh --platform x86_64-unknown-linux-gnu # Specify target platform"
|
||||
@echo " ./build-rustfs.sh --skip-verification # Skip binary verification"
|
||||
@echo ""
|
||||
@echo "💡 build-rustfs.sh script provides more options, smart detection and binary verification"
|
||||
|
||||
.PHONY: help-docker
|
||||
help-docker:
|
||||
@echo "🐳 Docker 多架构构建帮助:"
|
||||
@echo "🐳 Docker Multi-architecture Build Help:"
|
||||
@echo ""
|
||||
@echo "基本构建:"
|
||||
@echo " make docker-build-multiarch # 构建多架构镜像(不推送)"
|
||||
@echo " make docker-build-multiarch-push # 构建并推送多架构镜像"
|
||||
@echo "🚀 Production Image Build (Recommended to use docker-buildx.sh):"
|
||||
@echo " make docker-buildx # Build production multi-arch image (no push)"
|
||||
@echo " make docker-buildx-push # Build and push production multi-arch image"
|
||||
@echo " make docker-buildx-version VERSION=v1.0.0 # Build specific version"
|
||||
@echo " make docker-buildx-push-version VERSION=v1.0.0 # Build and push specific version"
|
||||
@echo ""
|
||||
@echo "版本构建:"
|
||||
@echo " make docker-build-multiarch-version VERSION=v1.0.0 # 构建指定版本"
|
||||
@echo " make docker-push-multiarch-version VERSION=v1.0.0 # 构建并推送指定版本"
|
||||
@echo "🔧 Development/Source Image Build (Local development testing):"
|
||||
@echo " make docker-dev # Build dev multi-arch image (cannot load locally)"
|
||||
@echo " make docker-dev-local # Build dev single-arch image (local load)"
|
||||
@echo " make docker-dev-push REGISTRY=xxx # Build and push dev image"
|
||||
@echo ""
|
||||
@echo "镜像类型:"
|
||||
@echo " make docker-build-ubuntu # 构建 Ubuntu 镜像"
|
||||
@echo " make docker-build-rockylinux # 构建 RockyLinux 镜像"
|
||||
@echo " make docker-build-devenv # 构建开发环境镜像"
|
||||
@echo " make docker-build-all-types # 构建所有类型镜像"
|
||||
@echo "🏗️ Local Production Image Build (Alternative):"
|
||||
@echo " make docker-buildx-production-local # Build production single-arch image locally"
|
||||
@echo ""
|
||||
@echo "辅助工具:"
|
||||
@echo " make build-cross-all # 构建所有架构的二进制文件"
|
||||
@echo " make docker-inspect-multiarch IMAGE=xxx # 检查镜像的架构支持"
|
||||
@echo "📦 Single-architecture Build (Traditional way):"
|
||||
@echo " make docker-build-production # Build single-arch production image"
|
||||
@echo " make docker-build-source # Build single-arch source image"
|
||||
@echo ""
|
||||
@echo "环境变量 (在推送时需要设置):"
|
||||
@echo " DOCKERHUB_USERNAME Docker Hub 用户名"
|
||||
@echo " DOCKERHUB_TOKEN Docker Hub 访问令牌"
|
||||
@echo " GITHUB_TOKEN GitHub 访问令牌"
|
||||
@echo "🚀 Development Environment Management:"
|
||||
@echo " make dev-env-start # Start development container environment"
|
||||
@echo " make dev-env-stop # Stop development container environment"
|
||||
@echo " make dev-env-restart # Restart development container environment"
|
||||
@echo ""
|
||||
@echo "🔧 Auxiliary Tools:"
|
||||
@echo " make build-cross-all # Build binaries for all architectures"
|
||||
@echo " make docker-inspect-multiarch IMAGE=xxx # Check image architecture support"
|
||||
@echo ""
|
||||
@echo "📋 Environment Variables:"
|
||||
@echo " REGISTRY Image registry address (required for push)"
|
||||
@echo " DOCKERHUB_USERNAME Docker Hub username"
|
||||
@echo " DOCKERHUB_TOKEN Docker Hub access token"
|
||||
@echo " GITHUB_TOKEN GitHub access token"
|
||||
@echo ""
|
||||
@echo "💡 Suggestions:"
|
||||
@echo " - Production use: Use docker-buildx* commands (based on precompiled binaries)"
|
||||
@echo " - Local development: Use docker-dev* commands (build from source)"
|
||||
@echo " - Development environment: Use dev-env-* commands to manage dev containers"
|
||||
|
||||
.PHONY: help
|
||||
help:
|
||||
@echo "🦀 RustFS Makefile Help:"
|
||||
@echo ""
|
||||
@echo "📋 Main Command Categories:"
|
||||
@echo " make help-build # Show build-related help"
|
||||
@echo " make help-docker # Show Docker-related help"
|
||||
@echo ""
|
||||
@echo "🔧 Code Quality:"
|
||||
@echo " make fmt # Format code"
|
||||
@echo " make clippy # Run clippy checks"
|
||||
@echo " make test # Run tests"
|
||||
@echo " make pre-commit # Run all pre-commit checks"
|
||||
@echo ""
|
||||
@echo "🚀 Quick Start:"
|
||||
@echo " make build # Build RustFS binary"
|
||||
@echo " make docker-dev-local # Build development Docker image (local)"
|
||||
@echo " make dev-env-start # Start development environment"
|
||||
@echo ""
|
||||
@echo "💡 For more help use 'make help-build' or 'make help-docker'"
|
||||
|
||||
162
README.md
@@ -1,37 +1,39 @@
|
||||
[](https://rustfs.com)
|
||||
|
||||
|
||||
<p align="center">RustFS is a high-performance distributed object storage software built using Rust</p>
|
||||
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/rustfs/rustfs/actions/workflows/ci.yml"><img alt="CI" src="https://github.com/rustfs/rustfs/actions/workflows/ci.yml/badge.svg" /></a>
|
||||
<a href="https://github.com/rustfs/rustfs/actions/workflows/docker.yml"><img alt="Build and Push Docker Images" src="https://github.com/rustfs/rustfs/actions/workflows/docker.yml/badge.svg" /></a>
|
||||
<img alt="GitHub commit activity" src="https://img.shields.io/github/commit-activity/m/rustfs/rustfs"/>
|
||||
<img alt="Github Last Commit" src="https://img.shields.io/github/last-commit/rustfs/rustfs"/>
|
||||
<a href="https://hellogithub.com/repository/rustfs/rustfs" target="_blank"><img src="https://abroad.hellogithub.com/v1/widgets/recommend.svg?rid=b95bcb72bdc340b68f16fdf6790b7d5b&claim_uid=MsbvjYeLDKAH457&theme=small" alt="Featured|HelloGitHub" /></a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://docs.rustfs.com/en/introduction.html">Getting Started</a>
|
||||
· <a href="https://docs.rustfs.com/en/">Docs</a>
|
||||
<a href="https://docs.rustfs.com/introduction.html">Getting Started</a>
|
||||
· <a href="https://docs.rustfs.com/">Docs</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/issues">Bug reports</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/discussions">Discussions</a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
English | <a href="https://github.com/rustfs/rustfs/blob/main/README_ZH.md">简体中文</a> |
|
||||
English | <a href="https://github.com/rustfs/rustfs/blob/main/README_ZH.md">简体中文</a> |
|
||||
<!-- Keep these links. Translations will automatically update with the README. -->
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=de">Deutsch</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=es">Español</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=fr">français</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ja">日本語</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ko">한국어</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=pt">Português</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=de">Deutsch</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=es">Español</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=fr">français</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ja">日本語</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ko">한국어</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=pt">Portuguese</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ru">Русский</a>
|
||||
</p>
|
||||
|
||||
RustFS is a high-performance distributed object storage software built using Rust, one of the most popular languages worldwide. Along with MinIO, it shares a range of advantages such as simplicity, S3 compatibility, open-source nature, support for data lakes, AI, and big data. Furthermore, it has a better and more user-friendly open-source license in comparison to other storage systems, being constructed under the Apache license. As Rust serves as its foundation, RustFS provides faster speed and safer distributed features for high-performance object storage.
|
||||
|
||||
RustFS is a high-performance distributed object storage software built using Rust, one of the most popular languages
|
||||
worldwide. Along with MinIO, it shares a range of advantages such as simplicity, S3 compatibility, open-source nature,
|
||||
support for data lakes, AI, and big data. Furthermore, it has a better and more user-friendly open-source license in
|
||||
comparison to other storage systems, being constructed under the Apache license. As Rust serves as its foundation,
|
||||
RustFS provides faster speed and safer distributed features for high-performance object storage.
|
||||
|
||||
> ⚠️ **RustFS is under rapid development. Do NOT use in production environments!**
|
||||
|
||||
@@ -48,60 +50,122 @@ RustFS is a high-performance distributed object storage software built using Rus
|
||||
|
||||
Stress test server parameters
|
||||
|
||||
| Type | parameter | Remark |
|
||||
| - | - | - |
|
||||
|CPU | 2 Core | Intel Xeon(Sapphire Rapids) Platinum 8475B , 2.7/3.2 GHz| |
|
||||
|Memory| 4GB | |
|
||||
|Network | 15Gbp | |
|
||||
|Driver | 40GB x 4 | IOPS 3800 / Driver |
|
||||
| Type | parameter | Remark |
|
||||
|---------|-----------|----------------------------------------------------------|
|
||||
| CPU | 2 Core | Intel Xeon(Sapphire Rapids) Platinum 8475B , 2.7/3.2 GHz | |
|
||||
| Memory | 4GB | |
|
||||
| Network | 15Gbp | |
|
||||
| Driver | 40GB x 4 | IOPS 3800 / Driver |
|
||||
|
||||
<https://github.com/user-attachments/assets/2e4979b5-260c-4f2c-ac12-c87fd558072a>
|
||||
|
||||
### RustFS vs Other object storage
|
||||
|
||||
| RustFS | Other object storage|
|
||||
| - | - |
|
||||
| Powerful Console | Simple and useless Console |
|
||||
| Developed based on Rust language, memory is safer | Developed in Go or C, with potential issues like memory GC/leaks |
|
||||
| Does not report logs to third-party countries | Reporting logs to other third countries may violate national security laws |
|
||||
| Licensed under Apache, more business-friendly | AGPL V3 License and other License, polluted open source and License traps, infringement of intellectual property rights |
|
||||
| Comprehensive S3 support, works with domestic and international cloud providers | Full support for S3, but no local cloud vendor support |
|
||||
| Rust-based development, strong support for secure and innovative devices | Poor support for edge gateways and secure innovative devices|
|
||||
| Stable commercial prices, free community support | High pricing, with costs up to $250,000 for 1PiB |
|
||||
| No risk | Intellectual property risks and risks of prohibited uses |
|
||||
| RustFS | Other object storage |
|
||||
|---------------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------|
|
||||
| Powerful Console | Simple and useless Console |
|
||||
| Developed based on Rust language, memory is safer | Developed in Go or C, with potential issues like memory GC/leaks |
|
||||
| Does not report logs to third-party countries | Reporting logs to other third countries may violate national security laws |
|
||||
| Licensed under Apache, more business-friendly | AGPL V3 License and other License, polluted open source and License traps, infringement of intellectual property rights |
|
||||
| Comprehensive S3 support, works with domestic and international cloud providers | Full support for S3, but no local cloud vendor support |
|
||||
| Rust-based development, strong support for secure and innovative devices | Poor support for edge gateways and secure innovative devices |
|
||||
| Stable commercial prices, free community support | High pricing, with costs up to $250,000 for 1PiB |
|
||||
| No risk | Intellectual property risks and risks of prohibited uses |
|
||||
|
||||
## Quickstart
|
||||
|
||||
To get started with RustFS, follow these steps:
|
||||
|
||||
1. **One-click installation script (Option 1)**
|
||||
1. **One-click installation script (Option 1)**
|
||||
|
||||
```bash
|
||||
curl -O https://rustfs.com/install_rustfs.sh && bash install_rustfs.sh
|
||||
```
|
||||
```bash
|
||||
curl -O https://rustfs.com/install_rustfs.sh && bash install_rustfs.sh
|
||||
```
|
||||
|
||||
2. **Docker Quick Start (Option 2)**
|
||||
|
||||
```bash
|
||||
podman run -d -p 9000:9000 -p 9001:9001 -v /data:/data quay.io/rustfs/rustfs
|
||||
# create data and logs directories
|
||||
mkdir -p data logs
|
||||
|
||||
# using latest alpha version
|
||||
docker run -d -p 9000:9000 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:alpha
|
||||
|
||||
# Specific version
|
||||
docker run -d -p 9000:9000 -v $(pwd)/data:/data -v $(pwd)/logs:/logs rustfs/rustfs:1.0.0.alpha.45
|
||||
```
|
||||
|
||||
For docker installation, you can also run the container with docker compose. With the `docker-compose.yml` file under
|
||||
root directory, running the command:
|
||||
|
||||
3. **Access the Console**: Open your web browser and navigate to `http://localhost:9001` to access the RustFS console, default username and password is `rustfsadmin` .
|
||||
4. **Create a Bucket**: Use the console to create a new bucket for your objects.
|
||||
5. **Upload Objects**: You can upload files directly through the console or use S3-compatible APIs to interact with your RustFS instance.
|
||||
```
|
||||
docker compose --profile observability up -d
|
||||
```
|
||||
|
||||
**NOTE**: You should be better to have a look for `docker-compose.yaml` file. Because, several services contains in the
|
||||
file. Grafan,prometheus,jaeger containers will be launched using docker compose file, which is helpful for rustfs
|
||||
observability. If you want to start redis as well as nginx container, you can specify the corresponding profiles.
|
||||
|
||||
3. **Build from Source (Option 3) - Advanced Users**
|
||||
|
||||
For developers who want to build RustFS Docker images from source with multi-architecture support:
|
||||
|
||||
```bash
|
||||
# Build multi-architecture images locally
|
||||
./docker-buildx.sh --build-arg RELEASE=latest
|
||||
|
||||
# Build and push to registry
|
||||
./docker-buildx.sh --push
|
||||
|
||||
# Build specific version
|
||||
./docker-buildx.sh --release v1.0.0 --push
|
||||
|
||||
# Build for custom registry
|
||||
./docker-buildx.sh --registry your-registry.com --namespace yourname --push
|
||||
```
|
||||
|
||||
The `docker-buildx.sh` script supports:
|
||||
- **Multi-architecture builds**: `linux/amd64`, `linux/arm64`
|
||||
- **Automatic version detection**: Uses git tags or commit hashes
|
||||
- **Registry flexibility**: Supports Docker Hub, GitHub Container Registry, etc.
|
||||
- **Build optimization**: Includes caching and parallel builds
|
||||
|
||||
You can also use Make targets for convenience:
|
||||
|
||||
```bash
|
||||
make docker-buildx # Build locally
|
||||
make docker-buildx-push # Build and push
|
||||
make docker-buildx-version VERSION=v1.0.0 # Build specific version
|
||||
make help-docker # Show all Docker-related commands
|
||||
```
|
||||
|
||||
4. **Build with helm chart(Option 4) - Cloud Native environment**
|
||||
|
||||
Following the instructions on [helm chart README](./helm/README.md) to install RustFS on kubernetes cluster.
|
||||
|
||||
5. **Access the Console**: Open your web browser and navigate to `http://localhost:9000` to access the RustFS console,
|
||||
default username and password is `rustfsadmin` .
|
||||
6. **Create a Bucket**: Use the console to create a new bucket for your objects.
|
||||
7. **Upload Objects**: You can upload files directly through the console or use S3-compatible APIs to interact with your
|
||||
RustFS instance.
|
||||
|
||||
**NOTE**: If you want to access RustFS instance with `https`, you can refer
|
||||
to [TLS configuration docs](https://docs.rustfs.com/integration/tls-configured.html).
|
||||
|
||||
## Documentation
|
||||
|
||||
For detailed documentation, including configuration options, API references, and advanced usage, please visit our [Documentation](https://docs.rustfs.com).
|
||||
For detailed documentation, including configuration options, API references, and advanced usage, please visit
|
||||
our [Documentation](https://docs.rustfs.com).
|
||||
|
||||
## Getting Help
|
||||
|
||||
If you have any questions or need assistance, you can:
|
||||
|
||||
- Check the [FAQ](https://github.com/rustfs/rustfs/discussions/categories/q-a) for common issues and solutions.
|
||||
- Join our [GitHub Discussions](https://github.com/rustfs/rustfs/discussions) to ask questions and share your experiences.
|
||||
- Open an issue on our [GitHub Issues](https://github.com/rustfs/rustfs/issues) page for bug reports or feature requests.
|
||||
- Join our [GitHub Discussions](https://github.com/rustfs/rustfs/discussions) to ask questions and share your
|
||||
experiences.
|
||||
- Open an issue on our [GitHub Issues](https://github.com/rustfs/rustfs/issues) page for bug reports or feature
|
||||
requests.
|
||||
|
||||
## Links
|
||||
|
||||
@@ -119,14 +183,28 @@ If you have any questions or need assistance, you can:
|
||||
|
||||
## Contributors
|
||||
|
||||
RustFS is a community-driven project, and we appreciate all contributions. Check out the [Contributors](https://github.com/rustfs/rustfs/graphs/contributors) page to see the amazing people who have helped make RustFS better.
|
||||
RustFS is a community-driven project, and we appreciate all contributions. Check out
|
||||
the [Contributors](https://github.com/rustfs/rustfs/graphs/contributors) page to see the amazing people who have helped
|
||||
make RustFS better.
|
||||
|
||||
<a href="https://github.com/rustfs/rustfs/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=rustfs/rustfs" />
|
||||
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" alt="Contributors"/>
|
||||
</a>
|
||||
|
||||
## Github Trending Top
|
||||
|
||||
🚀 RustFS is beloved by open-source enthusiasts and enterprise users worldwide, often appearing on the GitHub Trending
|
||||
top charts.
|
||||
|
||||
<a href="https://trendshift.io/repositories/14181" target="_blank"><img src="https://raw.githubusercontent.com/rustfs/rustfs/refs/heads/main/docs/rustfs-trending.jpg" alt="rustfs%2Frustfs | Trendshift" /></a>
|
||||
|
||||
## Star History
|
||||
|
||||
[](https://www.star-history.com/#rustfs/rustfs&type=date&legend=top-left)
|
||||
|
||||
## License
|
||||
|
||||
[Apache 2.0](https://opensource.org/licenses/Apache-2.0)
|
||||
|
||||
**RustFS** is a trademark of RustFS, Inc. All other trademarks are the property of their respective owners.
|
||||
|
||||
|
||||
114
README_ZH.md
@@ -7,6 +7,7 @@
|
||||
<a href="https://github.com/rustfs/rustfs/actions/workflows/docker.yml"><img alt="Build and Push Docker Images" src="https://github.com/rustfs/rustfs/actions/workflows/docker.yml/badge.svg" /></a>
|
||||
<img alt="GitHub commit activity" src="https://img.shields.io/github/commit-activity/m/rustfs/rustfs"/>
|
||||
<img alt="Github Last Commit" src="https://img.shields.io/github/last-commit/rustfs/rustfs"/>
|
||||
<a href="https://hellogithub.com/repository/rustfs/rustfs" target="_blank"><img src="https://abroad.hellogithub.com/v1/widgets/recommend.svg?rid=b95bcb72bdc340b68f16fdf6790b7d5b&claim_uid=MsbvjYeLDKAH457&theme=small" alt="Featured|HelloGitHub" /></a>
|
||||
</p >
|
||||
|
||||
<p align="center">
|
||||
@@ -20,7 +21,9 @@
|
||||
<a href="https://github.com/rustfs/rustfs/blob/main/README.md">English</a > | 简体中文
|
||||
</p >
|
||||
|
||||
RustFS 是一个使用 Rust(全球最受欢迎的编程语言之一)构建的高性能分布式对象存储软件。与 MinIO 一样,它具有简单性、S3 兼容性、开源特性以及对数据湖、AI 和大数据的支持等一系列优势。此外,与其他存储系统相比,它采用 Apache 许可证构建,拥有更好、更用户友好的开源许可证。由于以 Rust 为基础,RustFS 为高性能对象存储提供了更快的速度和更安全的分布式功能。
|
||||
RustFS 是一个使用 Rust(全球最受欢迎的编程语言之一)构建的高性能分布式对象存储软件。与 MinIO 一样,它具有简单性、S3
|
||||
兼容性、开源特性以及对数据湖、AI 和大数据的支持等一系列优势。此外,与其他存储系统相比,它采用 Apache
|
||||
许可证构建,拥有更好、更用户友好的开源许可证。由于以 Rust 为基础,RustFS 为高性能对象存储提供了更快的速度和更安全的分布式功能。
|
||||
|
||||
## 特性
|
||||
|
||||
@@ -35,48 +38,98 @@ RustFS 是一个使用 Rust(全球最受欢迎的编程语言之一)构建
|
||||
|
||||
压力测试服务器参数
|
||||
|
||||
| 类型 | 参数 | 备注 |
|
||||
| - | - | - |
|
||||
|CPU | 2 核心 | Intel Xeon(Sapphire Rapids) Platinum 8475B , 2.7/3.2 GHz| |
|
||||
|内存| 4GB | |
|
||||
|网络 | 15Gbp | |
|
||||
|驱动器 | 40GB x 4 | IOPS 3800 / 驱动器 |
|
||||
| 类型 | 参数 | 备注 |
|
||||
|-----|----------|----------------------------------------------------------|
|
||||
| CPU | 2 核心 | Intel Xeon(Sapphire Rapids) Platinum 8475B , 2.7/3.2 GHz | |
|
||||
| 内存 | 4GB | |
|
||||
| 网络 | 15Gbp | |
|
||||
| 驱动器 | 40GB x 4 | IOPS 3800 / 驱动器 |
|
||||
|
||||
<https://github.com/user-attachments/assets/2e4979b5-260c-4f2c-ac12-c87fd558072a>
|
||||
|
||||
### RustFS vs 其他对象存储
|
||||
|
||||
| RustFS | 其他对象存储|
|
||||
| - | - |
|
||||
| 强大的控制台 | 简单且无用的控制台 |
|
||||
| 基于 Rust 语言开发,内存更安全 | 使用 Go 或 C 开发,存在内存 GC/泄漏等潜在问题 |
|
||||
| 不向第三方国家报告日志 | 向其他第三方国家报告日志可能违反国家安全法律 |
|
||||
| 采用 Apache 许可证,对商业更友好 | AGPL V3 许可证等其他许可证,污染开源和许可证陷阱,侵犯知识产权 |
|
||||
| 全面的 S3 支持,适用于国内外云提供商 | 完全支持 S3,但不支持本地云厂商 |
|
||||
| 基于 Rust 开发,对安全和创新设备有强大支持 | 对边缘网关和安全创新设备支持较差|
|
||||
| 稳定的商业价格,免费社区支持 | 高昂的定价,1PiB 成本高达 $250,000 |
|
||||
| 无风险 | 知识产权风险和禁止使用的风险 |
|
||||
| RustFS | 其他对象存储 |
|
||||
|--------------------------|-------------------------------------|
|
||||
| 强大的控制台 | 简单且无用的控制台 |
|
||||
| 基于 Rust 语言开发,内存更安全 | 使用 Go 或 C 开发,存在内存 GC/泄漏等潜在问题 |
|
||||
| 不向第三方国家报告日志 | 向其他第三方国家报告日志可能违反国家安全法律 |
|
||||
| 采用 Apache 许可证,对商业更友好 | AGPL V3 许可证等其他许可证,污染开源和许可证陷阱,侵犯知识产权 |
|
||||
| 全面的 S3 支持,适用于国内外云提供商 | 完全支持 S3,但不支持本地云厂商 |
|
||||
| 基于 Rust 开发,对安全和创新设备有强大支持 | 对边缘网关和安全创新设备支持较差 |
|
||||
| 稳定的商业价格,免费社区支持 | 高昂的定价,1PiB 成本高达 $250,000 |
|
||||
| 无风险 | 知识产权风险和禁止使用的风险 |
|
||||
|
||||
## 快速开始
|
||||
|
||||
要开始使用 RustFS,请按照以下步骤操作:
|
||||
|
||||
1. **一键脚本快速启动 (方案一)**
|
||||
1. **一键脚本快速启动 (方案一)**
|
||||
|
||||
```bash
|
||||
curl -O https://rustfs.com/install_rustfs.sh && bash install_rustfs.sh
|
||||
```
|
||||
|
||||
2. **Docker快速启动(方案二)**
|
||||
2. **Docker 快速启动(方案二)**
|
||||
|
||||
```bash
|
||||
podman run -d -p 9000:9000 -p 9001:9001 -v /data:/data quay.io/rustfs/rustfs
|
||||
docker run -d -p 9000:9000 -v /data:/data rustfs/rustfs
|
||||
```
|
||||
|
||||
对于使用 Docker 安装来讲,你还可以使用 `docker compose` 来启动 rustfs 实例。在仓库的根目录下面有一个 `docker-compose.yml`
|
||||
文件。运行如下命令即可:
|
||||
|
||||
3. **访问控制台**:打开 Web 浏览器并导航到 `http://localhost:9001` 以访问 RustFS 控制台,默认的用户名和密码是 `rustfsadmin` 。
|
||||
4. **创建存储桶**:使用控制台为您的对象创建新的存储桶。
|
||||
5. **上传对象**:您可以直接通过控制台上传文件,或使用 S3 兼容的 API 与您的 RustFS 实例交互。
|
||||
```
|
||||
docker compose --profile observability up -d
|
||||
```
|
||||
|
||||
**注意**:在使用 `docker compose` 之前,你应该仔细阅读一下 `docker-compose.yaml`,因为该文件中包含多个服务,除了 rustfs
|
||||
以外,还有 grafana、prometheus、jaeger 等,这些是为 rustfs 可观测性服务的,还有 redis 和 nginx。你想启动哪些容器,就需要用
|
||||
`--profile` 参数指定相应的 profile。
|
||||
|
||||
3. **从源码构建(方案三)- 高级用户**
|
||||
|
||||
面向希望从源码构建支持多架构 Docker 镜像的开发者:
|
||||
|
||||
```bash
|
||||
# 本地构建多架构镜像
|
||||
./docker-buildx.sh --build-arg RELEASE=latest
|
||||
|
||||
# 构建并推送至镜像仓库
|
||||
./docker-buildx.sh --push
|
||||
|
||||
# 构建指定版本
|
||||
./docker-buildx.sh --release v1.0.0 --push
|
||||
|
||||
# 构建并推送到自定义镜像仓库
|
||||
./docker-buildx.sh --registry your-registry.com --namespace yourname --push
|
||||
```
|
||||
|
||||
`docker-buildx.sh` 脚本支持:
|
||||
- **多架构构建**:`linux/amd64`、`linux/arm64`
|
||||
- **自动版本检测**:可使用 git 标签或提交哈希
|
||||
- **仓库灵活性**:支持 Docker Hub、GitHub Container Registry 等
|
||||
- **构建优化**:包含缓存和并行构建
|
||||
|
||||
你也可以使用 Makefile 提供的目标命令以提升便捷性:
|
||||
|
||||
```bash
|
||||
make docker-buildx # 本地构建
|
||||
make docker-buildx-push # 构建并推送
|
||||
make docker-buildx-version VERSION=v1.0.0 # 构建指定版本
|
||||
make help-docker # 显示全部 Docker 相关命令
|
||||
```
|
||||
|
||||
4. **使用 Helm Chart 部署(方案四)- 云原生环境**
|
||||
|
||||
按照 [helm chart 说明文档](./helm/README.md) 的指引,在 Kubernetes 集群中安装 RustFS。
|
||||
|
||||
5. **访问控制台**:打开 Web 浏览器并导航到 `http://localhost:9000` 以访问 RustFS 控制台,默认的用户名和密码是
|
||||
`rustfsadmin` 。
|
||||
6. **创建存储桶**:使用控制台为您的对象创建新的存储桶。
|
||||
7. **上传对象**:您可以直接通过控制台上传文件,或使用 S3 兼容的 API 与您的 RustFS 实例交互。
|
||||
|
||||
**注意**:如果你想通过 `https` 来访问 RustFS 实例,请参考 [TLS 配置文档](https://docs.rustfs.com/zh/integration/tls-configured.html)
|
||||
|
||||
## 文档
|
||||
|
||||
@@ -106,12 +159,23 @@ RustFS 是一个使用 Rust(全球最受欢迎的编程语言之一)构建
|
||||
|
||||
## 贡献者
|
||||
|
||||
RustFS 是一个社区驱动的项目,我们感谢所有的贡献。查看[贡献者](https://github.com/rustfs/rustfs/graphs/contributors)页面,了解帮助 RustFS 变得更好的杰出人员。
|
||||
RustFS 是一个社区驱动的项目,我们感谢所有的贡献。查看[贡献者](https://github.com/rustfs/rustfs/graphs/contributors)页面,了解帮助
|
||||
RustFS 变得更好的杰出人员。
|
||||
|
||||
<a href="https://github.com/rustfs/rustfs/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=rustfs/rustfs" />
|
||||
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" alt="贡献者"/>
|
||||
</a >
|
||||
|
||||
## Github 全球推荐榜
|
||||
|
||||
🚀 RustFS 受到了全世界开源爱好者和企业用户的喜欢,多次登顶 Github Trending 全球榜。
|
||||
|
||||
<a href="https://trendshift.io/repositories/14181" target="_blank"><img src="https://raw.githubusercontent.com/rustfs/rustfs/refs/heads/main/docs/rustfs-trending.jpg" alt="rustfs%2Frustfs | Trendshift" /></a>
|
||||
|
||||
## Star 历史图
|
||||
|
||||
[](https://www.star-history.com/#rustfs/rustfs&type=date&legend=top-left)
|
||||
|
||||
## 许可证
|
||||
|
||||
[Apache 2.0](https://opensource.org/licenses/Apache-2.0)
|
||||
|
||||
41
_typos.toml
Normal file
@@ -0,0 +1,41 @@
|
||||
[default]
|
||||
# # Ignore specific spell checking patterns
|
||||
# extend-ignore-identifiers-re = [
|
||||
# # Ignore common patterns in base64 encoding and hash values
|
||||
# "[A-Za-z0-9+/]{8,}={0,2}", # base64 encoding
|
||||
# "[A-Fa-f0-9]{8,}", # hexadecimal hash
|
||||
# "[A-Za-z0-9_-]{20,}", # long random strings
|
||||
# ]
|
||||
|
||||
# # Ignore specific regex patterns in content
|
||||
# extend-ignore-re = [
|
||||
# # Ignore hash values and encoded strings (base64 patterns)
|
||||
# "(?i)[A-Za-z0-9+/]{8,}={0,2}",
|
||||
# # Ignore long strings in quotes (usually hash or base64)
|
||||
# '"[A-Za-z0-9+/=_-]{8,}"',
|
||||
# # Ignore IV values and similar cryptographic strings
|
||||
# '"[A-Za-z0-9+/=]{12,}"',
|
||||
# # Ignore cryptographic signatures and keys (including partial strings)
|
||||
# "[A-Za-z0-9+/]{6,}[A-Za-z0-9+/=]*",
|
||||
# # Ignore base64-like strings in comments (common in examples)
|
||||
# "//.*[A-Za-z0-9+/]{8,}[A-Za-z0-9+/=]*",
|
||||
# ]
|
||||
extend-ignore-re = [
|
||||
# Ignore long strings in quotes (usually hash or base64)
|
||||
'"[A-Za-z0-9+/=_-]{32,}"',
|
||||
# Ignore IV values and similar cryptographic strings
|
||||
'"[A-Za-z0-9+/=]{12,}"',
|
||||
# Ignore cryptographic signatures and keys (including partial strings)
|
||||
"[A-Za-z0-9+/]{16,}[A-Za-z0-9+/=]*",
|
||||
]
|
||||
|
||||
[default.extend-words]
|
||||
bui = "bui"
|
||||
typ = "typ"
|
||||
clen = "clen"
|
||||
datas = "datas"
|
||||
bre = "bre"
|
||||
abd = "abd"
|
||||
|
||||
[files]
|
||||
extend-exclude = []
|
||||
579
build-rustfs.sh
Executable file
@@ -0,0 +1,579 @@
|
||||
#!/bin/bash
|
||||
|
||||
# RustFS Binary Build Script
|
||||
# This script compiles RustFS binaries for different platforms and architectures
|
||||
|
||||
set -e
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Auto-detect current platform
|
||||
detect_platform() {
|
||||
local arch=$(uname -m)
|
||||
local os=$(uname -s | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
case "$os" in
|
||||
"linux")
|
||||
case "$arch" in
|
||||
"x86_64")
|
||||
# Default to GNU for better compatibility
|
||||
echo "x86_64-unknown-linux-gnu"
|
||||
;;
|
||||
"aarch64"|"arm64")
|
||||
echo "aarch64-unknown-linux-gnu"
|
||||
;;
|
||||
"armv7l")
|
||||
echo "armv7-unknown-linux-gnueabihf"
|
||||
;;
|
||||
"loongarch64")
|
||||
echo "loongarch64-unknown-linux-musl"
|
||||
;;
|
||||
*)
|
||||
echo "unknown-platform"
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
"darwin")
|
||||
case "$arch" in
|
||||
"x86_64")
|
||||
echo "x86_64-apple-darwin"
|
||||
;;
|
||||
"arm64"|"aarch64")
|
||||
echo "aarch64-apple-darwin"
|
||||
;;
|
||||
*)
|
||||
echo "unknown-platform"
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
echo "unknown-platform"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Cross-platform SHA256 checksum generation
|
||||
generate_sha256() {
|
||||
local file="$1"
|
||||
local output_file="$2"
|
||||
local os=$(uname -s | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
case "$os" in
|
||||
"linux")
|
||||
if command -v sha256sum &> /dev/null; then
|
||||
sha256sum "$file" > "$output_file"
|
||||
elif command -v shasum &> /dev/null; then
|
||||
shasum -a 256 "$file" > "$output_file"
|
||||
else
|
||||
print_message $RED "❌ No SHA256 command found (sha256sum or shasum)"
|
||||
return 1
|
||||
fi
|
||||
;;
|
||||
"darwin")
|
||||
if command -v shasum &> /dev/null; then
|
||||
shasum -a 256 "$file" > "$output_file"
|
||||
elif command -v sha256sum &> /dev/null; then
|
||||
sha256sum "$file" > "$output_file"
|
||||
else
|
||||
print_message $RED "❌ No SHA256 command found (shasum or sha256sum)"
|
||||
return 1
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
# Try common commands in order
|
||||
if command -v sha256sum &> /dev/null; then
|
||||
sha256sum "$file" > "$output_file"
|
||||
elif command -v shasum &> /dev/null; then
|
||||
shasum -a 256 "$file" > "$output_file"
|
||||
else
|
||||
print_message $RED "❌ No SHA256 command found"
|
||||
return 1
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Default values
|
||||
OUTPUT_DIR="target/release"
|
||||
PLATFORM=$(detect_platform) # Auto-detect current platform
|
||||
BINARY_NAME="rustfs"
|
||||
BUILD_TYPE="release"
|
||||
SIGN=false
|
||||
WITH_CONSOLE=true
|
||||
FORCE_CONSOLE_UPDATE=false
|
||||
CONSOLE_VERSION="latest"
|
||||
SKIP_VERIFICATION=false
|
||||
CUSTOM_PLATFORM=""
|
||||
|
||||
# Print usage
|
||||
usage() {
|
||||
echo "Usage: $0 [OPTIONS]"
|
||||
echo ""
|
||||
echo "Description:"
|
||||
echo " Build RustFS binary for the current platform. Designed for CI/CD pipelines"
|
||||
echo " where different runners build platform-specific binaries natively."
|
||||
echo " Includes automatic verification to ensure the built binary is functional."
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " -o, --output-dir DIR Output directory (default: target/release)"
|
||||
echo " -b, --binary-name NAME Binary name (default: rustfs)"
|
||||
echo " -p, --platform TARGET Target platform (default: auto-detect)"
|
||||
echo " Supported platforms:"
|
||||
echo " x86_64-unknown-linux-gnu"
|
||||
echo " aarch64-unknown-linux-gnu"
|
||||
echo " armv7-unknown-linux-gnueabihf"
|
||||
echo " x86_64-unknown-linux-musl"
|
||||
echo " aarch64-unknown-linux-musl"
|
||||
echo " armv7-unknown-linux-musleabihf"
|
||||
echo " x86_64-apple-darwin"
|
||||
echo " aarch64-apple-darwin"
|
||||
echo " x86_64-pc-windows-msvc"
|
||||
echo " aarch64-pc-windows-msvc"
|
||||
echo " --dev Build in dev mode"
|
||||
echo " --sign Sign binaries after build"
|
||||
echo " --with-console Download console static assets (default)"
|
||||
echo " --no-console Skip console static assets"
|
||||
echo " --force-console-update Force update console assets even if they exist"
|
||||
echo " --console-version VERSION Console version to download (default: latest)"
|
||||
echo " --skip-verification Skip binary verification after build"
|
||||
echo " -h, --help Show this help message"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " $0 # Build for current platform (includes console assets)"
|
||||
echo " $0 --dev # Development build"
|
||||
echo " $0 --sign # Build and sign binary (release CI)"
|
||||
echo " $0 --no-console # Build without console static assets"
|
||||
echo " $0 --force-console-update # Force update console assets"
|
||||
echo " $0 --platform x86_64-unknown-linux-musl # Build for specific platform"
|
||||
echo " $0 --skip-verification # Skip binary verification (for cross-compilation)"
|
||||
echo ""
|
||||
echo "Detected platform: $(detect_platform)"
|
||||
echo "CI Usage: Run this script on each platform's runner to build native binaries"
|
||||
}
|
||||
|
||||
# Print colored message
|
||||
print_message() {
|
||||
local color=$1
|
||||
local message=$2
|
||||
echo -e "${color}${message}${NC}"
|
||||
}
|
||||
|
||||
# Get version from git
|
||||
get_version() {
|
||||
if git describe --abbrev=0 --tags >/dev/null 2>&1; then
|
||||
git describe --abbrev=0 --tags
|
||||
else
|
||||
git rev-parse --short HEAD
|
||||
fi
|
||||
}
|
||||
|
||||
# Setup rust environment
|
||||
setup_rust_environment() {
|
||||
print_message $BLUE "🔧 Setting up Rust environment..."
|
||||
|
||||
# Install required target for current platform
|
||||
print_message $YELLOW "Installing target: $PLATFORM"
|
||||
rustup target add "$PLATFORM"
|
||||
|
||||
# Set up environment variables for musl targets
|
||||
if [[ "$PLATFORM" == *"musl"* ]]; then
|
||||
print_message $YELLOW "Setting up environment for musl target..."
|
||||
export RUSTFLAGS="-C target-feature=-crt-static"
|
||||
|
||||
# For cargo-zigbuild, set up additional environment variables
|
||||
if command -v cargo-zigbuild &> /dev/null; then
|
||||
print_message $YELLOW "Configuring cargo-zigbuild for musl target..."
|
||||
|
||||
# Set environment variables for better musl support
|
||||
export CC_x86_64_unknown_linux_musl="zig cc -target x86_64-linux-musl"
|
||||
export CXX_x86_64_unknown_linux_musl="zig c++ -target x86_64-linux-musl"
|
||||
export AR_x86_64_unknown_linux_musl="zig ar"
|
||||
export CARGO_TARGET_X86_64_UNKNOWN_LINUX_MUSL_LINKER="zig cc -target x86_64-linux-musl"
|
||||
|
||||
export CC_aarch64_unknown_linux_musl="zig cc -target aarch64-linux-musl"
|
||||
export CXX_aarch64_unknown_linux_musl="zig c++ -target aarch64-linux-musl"
|
||||
export AR_aarch64_unknown_linux_musl="zig ar"
|
||||
export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_MUSL_LINKER="zig cc -target aarch64-linux-musl"
|
||||
|
||||
# Set environment variables for zstd-sys to avoid target parsing issues
|
||||
export ZSTD_SYS_USE_PKG_CONFIG=1
|
||||
export PKG_CONFIG_ALLOW_CROSS=1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Install required tools
|
||||
if [ "$SIGN" = true ]; then
|
||||
if ! command -v minisign &> /dev/null; then
|
||||
print_message $YELLOW "Installing minisign for binary signing..."
|
||||
cargo install minisign
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Download console static assets
|
||||
download_console_assets() {
|
||||
local static_dir="rustfs/static"
|
||||
local console_exists=false
|
||||
|
||||
# Check if console assets already exist
|
||||
if [ -d "$static_dir" ] && [ -f "$static_dir/index.html" ]; then
|
||||
console_exists=true
|
||||
local static_size=$(du -sh "$static_dir" 2>/dev/null | cut -f1 || echo "unknown")
|
||||
print_message $YELLOW "Console static assets already exist ($static_size)"
|
||||
fi
|
||||
|
||||
# Determine if we need to download
|
||||
local should_download=false
|
||||
if [ "$WITH_CONSOLE" = true ]; then
|
||||
if [ "$console_exists" = false ]; then
|
||||
print_message $BLUE "🎨 Console assets not found, downloading..."
|
||||
should_download=true
|
||||
elif [ "$FORCE_CONSOLE_UPDATE" = true ]; then
|
||||
print_message $BLUE "🎨 Force updating console assets..."
|
||||
should_download=true
|
||||
else
|
||||
print_message $GREEN "✅ Console assets already available, skipping download"
|
||||
fi
|
||||
else
|
||||
if [ "$console_exists" = true ]; then
|
||||
print_message $GREEN "✅ Using existing console assets"
|
||||
else
|
||||
print_message $YELLOW "⚠️ Console assets not found. Use --download-console to download them."
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$should_download" = true ]; then
|
||||
print_message $BLUE "📥 Downloading console static assets..."
|
||||
|
||||
# Create static directory
|
||||
mkdir -p "$static_dir"
|
||||
|
||||
# Download from GitHub Releases (consistent with Docker build)
|
||||
local download_url
|
||||
if [ "$CONSOLE_VERSION" = "latest" ]; then
|
||||
print_message $YELLOW "Getting latest console release info..."
|
||||
# For now, use dl.rustfs.com as fallback until GitHub Releases includes console assets
|
||||
download_url="https://dl.rustfs.com/artifacts/console/rustfs-console-latest.zip"
|
||||
else
|
||||
download_url="https://dl.rustfs.com/artifacts/console/rustfs-console-${CONSOLE_VERSION}.zip"
|
||||
fi
|
||||
|
||||
print_message $YELLOW "Downloading from: $download_url"
|
||||
|
||||
# Download with retries
|
||||
local temp_file="console-assets-temp.zip"
|
||||
local download_success=false
|
||||
|
||||
for i in {1..3}; do
|
||||
if curl -L "$download_url" -o "$temp_file" --retry 3 --retry-delay 5 --max-time 300; then
|
||||
download_success=true
|
||||
break
|
||||
else
|
||||
print_message $YELLOW "Download attempt $i failed, retrying..."
|
||||
sleep 2
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$download_success" = true ]; then
|
||||
# Verify the downloaded file
|
||||
if [ -f "$temp_file" ] && [ -s "$temp_file" ]; then
|
||||
print_message $BLUE "📦 Extracting console assets..."
|
||||
|
||||
# Extract to static directory
|
||||
if unzip -o "$temp_file" -d "$static_dir"; then
|
||||
rm "$temp_file"
|
||||
local final_size=$(du -sh "$static_dir" 2>/dev/null | cut -f1 || echo "unknown")
|
||||
print_message $GREEN "✅ Console assets downloaded successfully ($final_size)"
|
||||
else
|
||||
print_message $RED "❌ Failed to extract console assets"
|
||||
rm -f "$temp_file"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
print_message $RED "❌ Downloaded file is empty or invalid"
|
||||
rm -f "$temp_file"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
print_message $RED "❌ Failed to download console assets after 3 attempts"
|
||||
print_message $YELLOW "💡 Console assets are optional. Build will continue without them."
|
||||
rm -f "$temp_file"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Verify binary functionality
|
||||
verify_binary() {
|
||||
local binary_path="$1"
|
||||
|
||||
# Check if binary exists
|
||||
if [ ! -f "$binary_path" ]; then
|
||||
print_message $RED "❌ Binary file not found: $binary_path"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check if binary is executable
|
||||
if [ ! -x "$binary_path" ]; then
|
||||
print_message $RED "❌ Binary is not executable: $binary_path"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check basic functionality - try to run help command
|
||||
print_message $YELLOW " Testing --help command..."
|
||||
if ! "$binary_path" --help >/dev/null 2>&1; then
|
||||
print_message $RED "❌ Binary failed to run --help command"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check version command
|
||||
print_message $YELLOW " Testing --version command..."
|
||||
if ! "$binary_path" --version >/dev/null 2>&1; then
|
||||
print_message $YELLOW "⚠️ Binary does not support --version command (this is optional)"
|
||||
fi
|
||||
|
||||
# Try to get some basic info about the binary
|
||||
local file_info=$(file "$binary_path" 2>/dev/null || echo "unknown")
|
||||
print_message $YELLOW " Binary info: $file_info"
|
||||
|
||||
# Check if it's a valid ELF/Mach-O binary
|
||||
if command -v readelf >/dev/null 2>&1; then
|
||||
if readelf -h "$binary_path" >/dev/null 2>&1; then
|
||||
print_message $YELLOW " ELF binary structure: valid"
|
||||
fi
|
||||
elif command -v otool >/dev/null 2>&1; then
|
||||
if otool -h "$binary_path" >/dev/null 2>&1; then
|
||||
print_message $YELLOW " Mach-O binary structure: valid"
|
||||
fi
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Build binary for current platform
|
||||
build_binary() {
|
||||
local version=$(get_version)
|
||||
local output_file="${OUTPUT_DIR}/${PLATFORM}/${BINARY_NAME}"
|
||||
|
||||
print_message $BLUE "🏗️ Building for platform: $PLATFORM"
|
||||
print_message $YELLOW " Version: $version"
|
||||
print_message $YELLOW " Output: $output_file"
|
||||
|
||||
# Create output directory
|
||||
mkdir -p "${OUTPUT_DIR}/${PLATFORM}"
|
||||
|
||||
# Simple build logic matching the working version (4fb4b353)
|
||||
# Force rebuild by touching build.rs
|
||||
touch rustfs/build.rs
|
||||
|
||||
# Determine build command based on platform and cross-compilation needs
|
||||
local build_cmd=""
|
||||
local current_platform=$(detect_platform)
|
||||
|
||||
print_message $BLUE "📦 Using working version build logic..."
|
||||
|
||||
# Check if we need cross-compilation
|
||||
if [ "$PLATFORM" != "$current_platform" ]; then
|
||||
# Cross-compilation needed
|
||||
if [[ "$PLATFORM" == *"apple-darwin"* ]]; then
|
||||
print_message $RED "❌ macOS cross-compilation not supported"
|
||||
print_message $YELLOW "💡 macOS targets must be built natively on macOS runners"
|
||||
return 1
|
||||
elif [[ "$PLATFORM" == *"windows"* ]]; then
|
||||
# Use cross for Windows ARM64
|
||||
if ! command -v cross &> /dev/null; then
|
||||
print_message $YELLOW "📦 Installing cross tool..."
|
||||
cargo install cross --git https://github.com/cross-rs/cross
|
||||
fi
|
||||
build_cmd="cross build"
|
||||
else
|
||||
# Use zigbuild for Linux ARM64 (matches working version)
|
||||
if ! command -v cargo-zigbuild &> /dev/null; then
|
||||
print_message $RED "❌ cargo-zigbuild not found. Please install it first."
|
||||
return 1
|
||||
fi
|
||||
build_cmd="cargo zigbuild"
|
||||
fi
|
||||
else
|
||||
# Native compilation
|
||||
build_cmd="RUSTFLAGS=-Clink-arg=-lm cargo build"
|
||||
fi
|
||||
|
||||
if [ "$BUILD_TYPE" = "release" ]; then
|
||||
build_cmd+=" --release"
|
||||
fi
|
||||
|
||||
build_cmd+=" --target $PLATFORM"
|
||||
build_cmd+=" -p rustfs --bins"
|
||||
|
||||
print_message $BLUE "📦 Executing: $build_cmd"
|
||||
|
||||
# Execute build (this matches exactly what the working version does)
|
||||
if eval $build_cmd; then
|
||||
print_message $GREEN "✅ Successfully built for $PLATFORM"
|
||||
|
||||
# Copy binary to output directory
|
||||
cp "target/${PLATFORM}/${BUILD_TYPE}/${BINARY_NAME}" "$output_file"
|
||||
|
||||
# Generate checksums
|
||||
print_message $BLUE "🔐 Generating checksums..."
|
||||
(cd "${OUTPUT_DIR}/${PLATFORM}" && generate_sha256 "${BINARY_NAME}" "${BINARY_NAME}.sha256sum")
|
||||
|
||||
# Verify binary functionality (if not skipped)
|
||||
if [ "$SKIP_VERIFICATION" = false ]; then
|
||||
print_message $BLUE "🔍 Verifying binary functionality..."
|
||||
if verify_binary "$output_file"; then
|
||||
print_message $GREEN "✅ Binary verification passed"
|
||||
else
|
||||
print_message $RED "❌ Binary verification failed"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
print_message $YELLOW "⚠️ Binary verification skipped by user request"
|
||||
fi
|
||||
|
||||
# Sign binary if requested
|
||||
if [ "$SIGN" = true ]; then
|
||||
print_message $BLUE "✍️ Signing binary..."
|
||||
(cd "${OUTPUT_DIR}/${PLATFORM}" && minisign -S -m "${BINARY_NAME}" -s ~/.minisign/minisign.key)
|
||||
fi
|
||||
|
||||
print_message $GREEN "✅ Build completed successfully"
|
||||
else
|
||||
print_message $RED "❌ Failed to build for $PLATFORM"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
|
||||
# Main build function
|
||||
build_rustfs() {
|
||||
local version=$(get_version)
|
||||
|
||||
print_message $BLUE "🚀 Starting RustFS binary build process..."
|
||||
print_message $YELLOW " Version: $version"
|
||||
print_message $YELLOW " Platform: $PLATFORM"
|
||||
print_message $YELLOW " Output Directory: $OUTPUT_DIR"
|
||||
print_message $YELLOW " Build Type: $BUILD_TYPE"
|
||||
print_message $YELLOW " Sign: $SIGN"
|
||||
print_message $YELLOW " With Console: $WITH_CONSOLE"
|
||||
if [ "$WITH_CONSOLE" = true ]; then
|
||||
print_message $YELLOW " Console Version: $CONSOLE_VERSION"
|
||||
print_message $YELLOW " Force Console Update: $FORCE_CONSOLE_UPDATE"
|
||||
fi
|
||||
print_message $YELLOW " Skip Verification: $SKIP_VERIFICATION"
|
||||
echo ""
|
||||
|
||||
# Setup environment
|
||||
setup_rust_environment
|
||||
echo ""
|
||||
|
||||
# Download console assets if requested
|
||||
download_console_assets
|
||||
echo ""
|
||||
|
||||
# Build binary
|
||||
build_binary
|
||||
echo ""
|
||||
|
||||
print_message $GREEN "🎉 Build process completed successfully!"
|
||||
|
||||
# Show built binary
|
||||
local binary_file="${OUTPUT_DIR}/${PLATFORM}/${BINARY_NAME}"
|
||||
if [ -f "$binary_file" ]; then
|
||||
local size=$(ls -lh "$binary_file" | awk '{print $5}')
|
||||
print_message $BLUE "📋 Built binary: $binary_file ($size)"
|
||||
fi
|
||||
}
|
||||
|
||||
# Parse command line arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
-o|--output-dir)
|
||||
OUTPUT_DIR="$2"
|
||||
shift 2
|
||||
;;
|
||||
-b|--binary-name)
|
||||
BINARY_NAME="$2"
|
||||
shift 2
|
||||
;;
|
||||
-p|--platform)
|
||||
CUSTOM_PLATFORM="$2"
|
||||
shift 2
|
||||
;;
|
||||
--dev)
|
||||
BUILD_TYPE="debug"
|
||||
shift
|
||||
;;
|
||||
--sign)
|
||||
SIGN=true
|
||||
shift
|
||||
;;
|
||||
--with-console)
|
||||
WITH_CONSOLE=true
|
||||
shift
|
||||
;;
|
||||
--no-console)
|
||||
WITH_CONSOLE=false
|
||||
shift
|
||||
;;
|
||||
--force-console-update)
|
||||
FORCE_CONSOLE_UPDATE=true
|
||||
WITH_CONSOLE=true # Auto-enable download when forcing update
|
||||
shift
|
||||
;;
|
||||
--console-version)
|
||||
CONSOLE_VERSION="$2"
|
||||
shift 2
|
||||
;;
|
||||
--skip-verification)
|
||||
SKIP_VERIFICATION=true
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
print_message $RED "❌ Unknown option: $1"
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Main execution
|
||||
main() {
|
||||
print_message $BLUE "🦀 RustFS Binary Build Script"
|
||||
echo ""
|
||||
|
||||
# Check if we're in a Rust project
|
||||
if [ ! -f "Cargo.toml" ]; then
|
||||
print_message $RED "❌ No Cargo.toml found. Are you in a Rust project directory?"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Override platform if specified
|
||||
if [ -n "$CUSTOM_PLATFORM" ]; then
|
||||
PLATFORM="$CUSTOM_PLATFORM"
|
||||
print_message $YELLOW "🎯 Using specified platform: $PLATFORM"
|
||||
|
||||
# Auto-enable skip verification for cross-compilation
|
||||
if [ "$PLATFORM" != "$(detect_platform)" ]; then
|
||||
SKIP_VERIFICATION=true
|
||||
print_message $YELLOW "⚠️ Cross-compilation detected, enabling --skip-verification"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Start build process
|
||||
build_rustfs
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
clear
|
||||
|
||||
# Get the current platform architecture
|
||||
ARCH=$(uname -m)
|
||||
|
||||
# Set the target directory according to the schema
|
||||
if [ "$ARCH" == "x86_64" ]; then
|
||||
TARGET_DIR="target/x86_64"
|
||||
elif [ "$ARCH" == "aarch64" ]; then
|
||||
TARGET_DIR="target/arm64"
|
||||
else
|
||||
TARGET_DIR="target/unknown"
|
||||
fi
|
||||
|
||||
# Set CARGO_TARGET_DIR and build the project
|
||||
CARGO_TARGET_DIR=$TARGET_DIR RUSTFLAGS="-C link-arg=-fuse-ld=mold" cargo build --release --package rustfs
|
||||
|
||||
echo -e "\a"
|
||||
echo -e "\a"
|
||||
echo -e "\a"
|
||||
@@ -1,50 +0,0 @@
|
||||
[application]
|
||||
|
||||
# App (Project) Name
|
||||
name = "rustfs-gui"
|
||||
|
||||
# The static resource path
|
||||
asset_dir = "public"
|
||||
|
||||
[web.app]
|
||||
|
||||
# HTML title tag content
|
||||
title = "rustfs-gui"
|
||||
|
||||
# include `assets` in web platform
|
||||
[web.resource]
|
||||
|
||||
# Additional CSS style files
|
||||
style = []
|
||||
|
||||
# Additional JavaScript files
|
||||
script = []
|
||||
|
||||
[web.resource.dev]
|
||||
|
||||
# Javascript code file
|
||||
# serve: [dev-server] only
|
||||
script = []
|
||||
|
||||
[bundle]
|
||||
identifier = "com.rustfs.cli.gui"
|
||||
|
||||
publisher = "RustFsGUI"
|
||||
|
||||
category = "Utility"
|
||||
|
||||
copyright = "Copyright 2025 rustfs.com"
|
||||
|
||||
icon = [
|
||||
"assets/icons/icon.icns",
|
||||
"assets/icons/icon.ico"
|
||||
]
|
||||
#[bundle.macos]
|
||||
#provider_short_name = "RustFs"
|
||||
[bundle.windows]
|
||||
tsp = true
|
||||
icon_path = "assets/icons/icon.ico"
|
||||
allow_downgrades = true
|
||||
[bundle.windows.webview_install_mode]
|
||||
[bundle.windows.webview_install_mode.EmbedBootstrapper]
|
||||
silent = true
|
||||
@@ -1,34 +0,0 @@
|
||||
## Rustfs GUI
|
||||
|
||||
### Tailwind
|
||||
|
||||
1. Install npm: https://docs.npmjs.com/downloading-and-installing-node-js-and-npm
|
||||
2. Install the Tailwind CSS CLI: https://tailwindcss.com/docs/installation
|
||||
3. Run the following command in the root of the project to start the Tailwind CSS compiler:
|
||||
|
||||
```bash
|
||||
npx tailwindcss -i ./input.css -o ./assets/tailwind.css --watch
|
||||
```
|
||||
|
||||
### Dioxus CLI
|
||||
|
||||
#### Install the stable version (recommended)
|
||||
|
||||
```shell
|
||||
cargo install dioxus-cli
|
||||
```
|
||||
|
||||
### Serving Your App
|
||||
|
||||
Run the following command in the root of your project to start developing with the default platform:
|
||||
|
||||
```bash
|
||||
dx serve
|
||||
```
|
||||
|
||||
To run for a different platform, use the `--platform platform` flag. E.g.
|
||||
|
||||
```bash
|
||||
dx serve --platform desktop
|
||||
```
|
||||
|
||||
|
Before Width: | Height: | Size: 80 KiB |
|
Before Width: | Height: | Size: 80 KiB |
|
Before Width: | Height: | Size: 80 KiB |
|
Before Width: | Height: | Size: 4.5 KiB |
|
Before Width: | Height: | Size: 9.9 KiB |
|
Before Width: | Height: | Size: 498 B |
|
Before Width: | Height: | Size: 969 B |
|
Before Width: | Height: | Size: 9.9 KiB |
|
Before Width: | Height: | Size: 23 KiB |
|
Before Width: | Height: | Size: 969 B |
|
Before Width: | Height: | Size: 2.0 KiB |
|
Before Width: | Height: | Size: 23 KiB |
|
Before Width: | Height: | Size: 47 KiB |
@@ -1,48 +0,0 @@
|
||||
/**
|
||||
* Copyright 2024 RustFS Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
window.switchTab = function (tabId) {
|
||||
// Hide everything
|
||||
document.querySelectorAll('.tab-content').forEach(content => {
|
||||
content.classList.add('hidden');
|
||||
});
|
||||
|
||||
// Reset all label styles
|
||||
document.querySelectorAll('.tab-btn').forEach(btn => {
|
||||
btn.classList.remove('border-b-2', 'border-black');
|
||||
btn.classList.add('text-gray-500');
|
||||
});
|
||||
|
||||
// Displays the selected content
|
||||
const activeContent = document.getElementById(tabId);
|
||||
if (activeContent) {
|
||||
activeContent.classList.remove('hidden');
|
||||
}
|
||||
|
||||
// Updates the selected label style
|
||||
const activeBtn = document.querySelector(`[data-tab="${tabId}"]`);
|
||||
if (activeBtn) {
|
||||
activeBtn.classList.add('border-b-2', 'border-black');
|
||||
activeBtn.classList.remove('text-gray-500');
|
||||
}
|
||||
};
|
||||
|
||||
window.togglePassword = function (button) {
|
||||
const input = button.parentElement.querySelector('input[type="password"], input[type="text"]');
|
||||
if (input) {
|
||||
input.type = input.type === 'password' ? 'text' : 'password';
|
||||
}
|
||||
};
|
||||
|
Before Width: | Height: | Size: 34 KiB |
@@ -1,20 +0,0 @@
|
||||
<svg width="1558" height="260" viewBox="0 0 1558 260" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g clip-path="url(#clip0_0_3)">
|
||||
<path d="M1288.5 112.905H1159.75V58.4404H1262L1270 0L1074 0V260H1159.75V162.997H1296.95L1288.5 112.905Z"
|
||||
fill="#0196D0"/>
|
||||
<path d="M1058.62 58.4404V0H789V58.4404H881.133V260H966.885V58.4404H1058.62Z" fill="#0196D0"/>
|
||||
<path d="M521 179.102V0L454.973 15V161C454.973 181.124 452.084 193.146 443.5 202C434.916 211.257 419.318 214.5 400.5 214.5C381.022 214.5 366.744 210.854 357.5 202C348.916 193.548 346.357 175.721 346.357 156V0L280 15V175.48C280 208.08 290.234 229.412 309.712 241.486C329.19 253.56 358.903 260 400.5 260C440.447 260 470.159 253.56 490.297 241.486C510.766 229.412 521 208.483 521 179.102Z"
|
||||
fill="#0196D0"/>
|
||||
<path d="M172.84 84.2813C172.84 97.7982 168.249 107.737 158.41 113.303C149.883 118.471 137.092 121.254 120.693 122.049V162.997C129.876 163.792 138.076 166.177 144.307 176.514L184.647 260H265L225.316 180.489C213.181 155.046 201.374 149.48 178.744 143.517C212.197 138.349 241.386 118.471 241.386 73.1499C241.386 53.2722 233.843 30.2141 218.756 17.8899C203.998 5.56575 183.991 0 159.394 0H120.693V48.5015H127.58C142.23 48.5015 153.6 51.4169 161.689 57.2477C169.233 62.8135 172.84 71.5596 172.84 84.2813ZM120.693 122.049C119.163 122.049 117.741 122.049 116.43 122.049H68.5457V48.5015H120.693V0H0V260H70.5137V162.997H110.526C113.806 162.997 117.741 162.997 120.693 162.997V122.049Z"
|
||||
fill="#0196D0"/>
|
||||
<path d="M774 179.297C774 160.829 766.671 144.669 752.013 131.972C738.127 119.66 712.025 110.169 673.708 103.5C662.136 101.191 651.722 99.6523 643.235 97.3437C586.532 84.6467 594.632 52.7118 650.564 52.7118C680.651 52.7118 709.582 61.946 738.127 66.9478C742.37 67.7174 743.913 68.1021 744.298 68.1021L750.47 12.697C720.383 3.46282 684.895 0 654.036 0C616.619 0 587.689 6.54088 567.245 19.2379C546.801 31.9349 536 57.7137 536 82.3382C536 103.5 543.715 119.66 559.916 131.972C575.731 143.515 604.276 152.749 645.55 160.059C658.279 162.368 668.694 163.907 676.794 166.215C685.023 168.524 691.066 170.704 694.924 172.756C702.253 176.604 706.11 182.375 706.11 188.531C706.11 196.611 701.481 202.767 692.224 207C664.836 220.081 587.689 212.001 556.83 198.15L543.715 247.784C547.186 248.169 552.972 249.323 559.916 250.477C616.619 259.327 690.681 270.869 741.212 238.935C762.814 225.468 774 206.23 774 179.297Z"
|
||||
fill="#0196D0"/>
|
||||
<path d="M1558 179.568C1558 160.383 1550.42 144.268 1535.67 131.99C1521.32 119.968 1494.34 110.631 1454.74 103.981C1442.38 101.679 1432.01 99.3764 1422.84 97.8416C1422.44 97.8416 1422.04 97.8416 1422.04 97.4579V112.422L1361.04 75.2038L1422.04 38.3692V52.9496C1424.7 52.9496 1427.49 52.9496 1430.41 52.9496C1461.51 52.9496 1491.42 62.5419 1521.32 67.5299C1525.31 67.9136 1526.9 67.9136 1527.3 67.9136L1533.68 12.6619C1502.98 3.83692 1465.9 0 1434 0C1395.33 0 1365.43 6.52277 1345.09 19.5683C1323.16 32.6139 1312 57.9376 1312 82.8776C1312 103.981 1320.37 120.096 1336.72 131.607C1353.46 143.885 1382.97 153.093 1425.23 160.383C1434 161.535 1441.18 162.686 1447.56 164.22L1448.36 150.791L1507.36 190.312L1445.57 224.844L1445.96 212.949C1409.68 215.635 1357.45 209.112 1333.53 197.985L1320.37 247.482C1323.56 248.249 1329.54 248.633 1336.72 250.551C1395.33 259.376 1471.88 270.887 1524.11 238.657C1546.84 225.611 1558 205.659 1558 179.568Z"
|
||||
fill="#0196D0"/>
|
||||
</g>
|
||||
<defs>
|
||||
<clipPath id="clip0_0_3">
|
||||
<rect width="1558" height="260" fill="white"/>
|
||||
</clipPath>
|
||||
</defs>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 3.5 KiB |
@@ -1,33 +0,0 @@
|
||||
/**
|
||||
* Copyright 2024 RustFS Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
#navbar {
|
||||
display: flex;
|
||||
flex-direction: row;
|
||||
}
|
||||
|
||||
#navbar a {
|
||||
color: #ffffff;
|
||||
margin-right: 20px;
|
||||
text-decoration: none;
|
||||
transition: color 0.2s ease;
|
||||
}
|
||||
|
||||
#navbar a:hover {
|
||||
cursor: pointer;
|
||||
color: #ffffff;
|
||||
/ / #91a4d2;
|
||||
}
|
||||
@@ -1,972 +0,0 @@
|
||||
/**
|
||||
* Copyright 2024 RustFS Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
*, ::before, ::after {
|
||||
--tw-border-spacing-x: 0;
|
||||
--tw-border-spacing-y: 0;
|
||||
--tw-translate-x: 0;
|
||||
--tw-translate-y: 0;
|
||||
--tw-rotate: 0;
|
||||
--tw-skew-x: 0;
|
||||
--tw-skew-y: 0;
|
||||
--tw-scale-x: 1;
|
||||
--tw-scale-y: 1;
|
||||
--tw-pan-x: ;
|
||||
--tw-pan-y: ;
|
||||
--tw-pinch-zoom: ;
|
||||
--tw-scroll-snap-strictness: proximity;
|
||||
--tw-gradient-from-position: ;
|
||||
--tw-gradient-via-position: ;
|
||||
--tw-gradient-to-position: ;
|
||||
--tw-ordinal: ;
|
||||
--tw-slashed-zero: ;
|
||||
--tw-numeric-figure: ;
|
||||
--tw-numeric-spacing: ;
|
||||
--tw-numeric-fraction: ;
|
||||
--tw-ring-inset: ;
|
||||
--tw-ring-offset-width: 0px;
|
||||
--tw-ring-offset-color: #fff;
|
||||
--tw-ring-color: rgb(59 130 246 / 0.5);
|
||||
--tw-ring-offset-shadow: 0 0 #0000;
|
||||
--tw-ring-shadow: 0 0 #0000;
|
||||
--tw-shadow: 0 0 #0000;
|
||||
--tw-shadow-colored: 0 0 #0000;
|
||||
--tw-blur: ;
|
||||
--tw-brightness: ;
|
||||
--tw-contrast: ;
|
||||
--tw-grayscale: ;
|
||||
--tw-hue-rotate: ;
|
||||
--tw-invert: ;
|
||||
--tw-saturate: ;
|
||||
--tw-sepia: ;
|
||||
--tw-drop-shadow: ;
|
||||
--tw-backdrop-blur: ;
|
||||
--tw-backdrop-brightness: ;
|
||||
--tw-backdrop-contrast: ;
|
||||
--tw-backdrop-grayscale: ;
|
||||
--tw-backdrop-hue-rotate: ;
|
||||
--tw-backdrop-invert: ;
|
||||
--tw-backdrop-opacity: ;
|
||||
--tw-backdrop-saturate: ;
|
||||
--tw-backdrop-sepia: ;
|
||||
--tw-contain-size: ;
|
||||
--tw-contain-layout: ;
|
||||
--tw-contain-paint: ;
|
||||
--tw-contain-style: ;
|
||||
}
|
||||
|
||||
::backdrop {
|
||||
--tw-border-spacing-x: 0;
|
||||
--tw-border-spacing-y: 0;
|
||||
--tw-translate-x: 0;
|
||||
--tw-translate-y: 0;
|
||||
--tw-rotate: 0;
|
||||
--tw-skew-x: 0;
|
||||
--tw-skew-y: 0;
|
||||
--tw-scale-x: 1;
|
||||
--tw-scale-y: 1;
|
||||
--tw-pan-x: ;
|
||||
--tw-pan-y: ;
|
||||
--tw-pinch-zoom: ;
|
||||
--tw-scroll-snap-strictness: proximity;
|
||||
--tw-gradient-from-position: ;
|
||||
--tw-gradient-via-position: ;
|
||||
--tw-gradient-to-position: ;
|
||||
--tw-ordinal: ;
|
||||
--tw-slashed-zero: ;
|
||||
--tw-numeric-figure: ;
|
||||
--tw-numeric-spacing: ;
|
||||
--tw-numeric-fraction: ;
|
||||
--tw-ring-inset: ;
|
||||
--tw-ring-offset-width: 0px;
|
||||
--tw-ring-offset-color: #fff;
|
||||
--tw-ring-color: rgb(59 130 246 / 0.5);
|
||||
--tw-ring-offset-shadow: 0 0 #0000;
|
||||
--tw-ring-shadow: 0 0 #0000;
|
||||
--tw-shadow: 0 0 #0000;
|
||||
--tw-shadow-colored: 0 0 #0000;
|
||||
--tw-blur: ;
|
||||
--tw-brightness: ;
|
||||
--tw-contrast: ;
|
||||
--tw-grayscale: ;
|
||||
--tw-hue-rotate: ;
|
||||
--tw-invert: ;
|
||||
--tw-saturate: ;
|
||||
--tw-sepia: ;
|
||||
--tw-drop-shadow: ;
|
||||
--tw-backdrop-blur: ;
|
||||
--tw-backdrop-brightness: ;
|
||||
--tw-backdrop-contrast: ;
|
||||
--tw-backdrop-grayscale: ;
|
||||
--tw-backdrop-hue-rotate: ;
|
||||
--tw-backdrop-invert: ;
|
||||
--tw-backdrop-opacity: ;
|
||||
--tw-backdrop-saturate: ;
|
||||
--tw-backdrop-sepia: ;
|
||||
--tw-contain-size: ;
|
||||
--tw-contain-layout: ;
|
||||
--tw-contain-paint: ;
|
||||
--tw-contain-style: ;
|
||||
}
|
||||
|
||||
/*
|
||||
! tailwindcss v3.4.17 | MIT License | https://tailwindcss.com
|
||||
*/
|
||||
|
||||
/*
|
||||
1. Prevent padding and border from affecting element width. (https://github.com/mozdevs/cssremedy/issues/4)
|
||||
2. Allow adding a border to an element by just adding a border-width. (https://github.com/tailwindcss/tailwindcss/pull/116)
|
||||
*/
|
||||
|
||||
*,
|
||||
::before,
|
||||
::after {
|
||||
box-sizing: border-box;
|
||||
/* 1 */
|
||||
border-width: 0;
|
||||
/* 2 */
|
||||
border-style: solid;
|
||||
/* 2 */
|
||||
border-color: #e5e7eb;
|
||||
/* 2 */
|
||||
}
|
||||
|
||||
::before,
|
||||
::after {
|
||||
--tw-content: '';
|
||||
}
|
||||
|
||||
/*
|
||||
1. Use a consistent sensible line-height in all browsers.
|
||||
2. Prevent adjustments of font size after orientation changes in iOS.
|
||||
3. Use a more readable tab size.
|
||||
4. Use the user's configured `sans` font-family by default.
|
||||
5. Use the user's configured `sans` font-feature-settings by default.
|
||||
6. Use the user's configured `sans` font-variation-settings by default.
|
||||
7. Disable tap highlights on iOS
|
||||
*/
|
||||
|
||||
html,
|
||||
:host {
|
||||
line-height: 1.5;
|
||||
/* 1 */
|
||||
-webkit-text-size-adjust: 100%;
|
||||
/* 2 */
|
||||
-moz-tab-size: 4;
|
||||
/* 3 */
|
||||
-o-tab-size: 4;
|
||||
tab-size: 4;
|
||||
/* 3 */
|
||||
font-family: ui-sans-serif, system-ui, sans-serif, "Apple Color Emoji", "Segoe UI Emoji", "Segoe UI Symbol", "Noto Color Emoji";
|
||||
/* 4 */
|
||||
font-feature-settings: normal;
|
||||
/* 5 */
|
||||
font-variation-settings: normal;
|
||||
/* 6 */
|
||||
-webkit-tap-highlight-color: transparent;
|
||||
/* 7 */
|
||||
}
|
||||
|
||||
/*
|
||||
1. Remove the margin in all browsers.
|
||||
2. Inherit line-height from `html` so users can set them as a class directly on the `html` element.
|
||||
*/
|
||||
|
||||
body {
|
||||
margin: 0;
|
||||
/* 1 */
|
||||
line-height: inherit;
|
||||
/* 2 */
|
||||
}
|
||||
|
||||
/*
|
||||
1. Add the correct height in Firefox.
|
||||
2. Correct the inheritance of border color in Firefox. (https://bugzilla.mozilla.org/show_bug.cgi?id=190655)
|
||||
3. Ensure horizontal rules are visible by default.
|
||||
*/
|
||||
|
||||
hr {
|
||||
height: 0;
|
||||
/* 1 */
|
||||
color: inherit;
|
||||
/* 2 */
|
||||
border-top-width: 1px;
|
||||
/* 3 */
|
||||
}
|
||||
|
||||
/*
|
||||
Add the correct text decoration in Chrome, Edge, and Safari.
|
||||
*/
|
||||
|
||||
abbr:where([title]) {
|
||||
-webkit-text-decoration: underline dotted;
|
||||
text-decoration: underline dotted;
|
||||
}
|
||||
|
||||
/*
|
||||
Remove the default font size and weight for headings.
|
||||
*/
|
||||
|
||||
h1,
|
||||
h2,
|
||||
h3,
|
||||
h4,
|
||||
h5,
|
||||
h6 {
|
||||
font-size: inherit;
|
||||
font-weight: inherit;
|
||||
}
|
||||
|
||||
/*
|
||||
Reset links to optimize for opt-in styling instead of opt-out.
|
||||
*/
|
||||
|
||||
a {
|
||||
color: inherit;
|
||||
text-decoration: inherit;
|
||||
}
|
||||
|
||||
/*
|
||||
Add the correct font weight in Edge and Safari.
|
||||
*/
|
||||
|
||||
b,
|
||||
strong {
|
||||
font-weight: bolder;
|
||||
}
|
||||
|
||||
/*
|
||||
1. Use the user's configured `mono` font-family by default.
|
||||
2. Use the user's configured `mono` font-feature-settings by default.
|
||||
3. Use the user's configured `mono` font-variation-settings by default.
|
||||
4. Correct the odd `em` font sizing in all browsers.
|
||||
*/
|
||||
|
||||
code,
|
||||
kbd,
|
||||
samp,
|
||||
pre {
|
||||
font-family: ui-monospace, SFMono-Regular, Menlo, Monaco, Consolas, "Liberation Mono", "Courier New", monospace;
|
||||
/* 1 */
|
||||
font-feature-settings: normal;
|
||||
/* 2 */
|
||||
font-variation-settings: normal;
|
||||
/* 3 */
|
||||
font-size: 1em;
|
||||
/* 4 */
|
||||
}
|
||||
|
||||
/*
|
||||
Add the correct font size in all browsers.
|
||||
*/
|
||||
|
||||
small {
|
||||
font-size: 80%;
|
||||
}
|
||||
|
||||
/*
|
||||
Prevent `sub` and `sup` elements from affecting the line height in all browsers.
|
||||
*/
|
||||
|
||||
sub,
|
||||
sup {
|
||||
font-size: 75%;
|
||||
line-height: 0;
|
||||
position: relative;
|
||||
vertical-align: baseline;
|
||||
}
|
||||
|
||||
sub {
|
||||
bottom: -0.25em;
|
||||
}
|
||||
|
||||
sup {
|
||||
top: -0.5em;
|
||||
}
|
||||
|
||||
/*
|
||||
1. Remove text indentation from table contents in Chrome and Safari. (https://bugs.chromium.org/p/chromium/issues/detail?id=999088, https://bugs.webkit.org/show_bug.cgi?id=201297)
|
||||
2. Correct table border color inheritance in all Chrome and Safari. (https://bugs.chromium.org/p/chromium/issues/detail?id=935729, https://bugs.webkit.org/show_bug.cgi?id=195016)
|
||||
3. Remove gaps between table borders by default.
|
||||
*/
|
||||
|
||||
table {
|
||||
text-indent: 0;
|
||||
/* 1 */
|
||||
border-color: inherit;
|
||||
/* 2 */
|
||||
border-collapse: collapse;
|
||||
/* 3 */
|
||||
}
|
||||
|
||||
/*
|
||||
1. Change the font styles in all browsers.
|
||||
2. Remove the margin in Firefox and Safari.
|
||||
3. Remove default padding in all browsers.
|
||||
*/
|
||||
|
||||
button,
|
||||
input,
|
||||
optgroup,
|
||||
select,
|
||||
textarea {
|
||||
font-family: inherit;
|
||||
/* 1 */
|
||||
font-feature-settings: inherit;
|
||||
/* 1 */
|
||||
font-variation-settings: inherit;
|
||||
/* 1 */
|
||||
font-size: 100%;
|
||||
/* 1 */
|
||||
font-weight: inherit;
|
||||
/* 1 */
|
||||
line-height: inherit;
|
||||
/* 1 */
|
||||
letter-spacing: inherit;
|
||||
/* 1 */
|
||||
color: inherit;
|
||||
/* 1 */
|
||||
margin: 0;
|
||||
/* 2 */
|
||||
padding: 0;
|
||||
/* 3 */
|
||||
}
|
||||
|
||||
/*
|
||||
Remove the inheritance of text transform in Edge and Firefox.
|
||||
*/
|
||||
|
||||
button,
|
||||
select {
|
||||
text-transform: none;
|
||||
}
|
||||
|
||||
/*
|
||||
1. Correct the inability to style clickable types in iOS and Safari.
|
||||
2. Remove default button styles.
|
||||
*/
|
||||
|
||||
button,
|
||||
input:where([type='button']),
|
||||
input:where([type='reset']),
|
||||
input:where([type='submit']) {
|
||||
-webkit-appearance: button;
|
||||
/* 1 */
|
||||
background-color: transparent;
|
||||
/* 2 */
|
||||
background-image: none;
|
||||
/* 2 */
|
||||
}
|
||||
|
||||
/*
|
||||
Use the modern Firefox focus style for all focusable elements.
|
||||
*/
|
||||
|
||||
:-moz-focusring {
|
||||
outline: auto;
|
||||
}
|
||||
|
||||
/*
|
||||
Remove the additional `:invalid` styles in Firefox. (https://github.com/mozilla/gecko-dev/blob/2f9eacd9d3d995c937b4251a5557d95d494c9be1/layout/style/res/forms.css#L728-L737)
|
||||
*/
|
||||
|
||||
:-moz-ui-invalid {
|
||||
box-shadow: none;
|
||||
}
|
||||
|
||||
/*
|
||||
Add the correct vertical alignment in Chrome and Firefox.
|
||||
*/
|
||||
|
||||
progress {
|
||||
vertical-align: baseline;
|
||||
}
|
||||
|
||||
/*
|
||||
Correct the cursor style of increment and decrement buttons in Safari.
|
||||
*/
|
||||
|
||||
::-webkit-inner-spin-button,
|
||||
::-webkit-outer-spin-button {
|
||||
height: auto;
|
||||
}
|
||||
|
||||
/*
|
||||
1. Correct the odd appearance in Chrome and Safari.
|
||||
2. Correct the outline style in Safari.
|
||||
*/
|
||||
|
||||
[type='search'] {
|
||||
-webkit-appearance: textfield;
|
||||
/* 1 */
|
||||
outline-offset: -2px;
|
||||
/* 2 */
|
||||
}
|
||||
|
||||
/*
|
||||
Remove the inner padding in Chrome and Safari on macOS.
|
||||
*/
|
||||
|
||||
::-webkit-search-decoration {
|
||||
-webkit-appearance: none;
|
||||
}
|
||||
|
||||
/*
|
||||
1. Correct the inability to style clickable types in iOS and Safari.
|
||||
2. Change font properties to `inherit` in Safari.
|
||||
*/
|
||||
|
||||
::-webkit-file-upload-button {
|
||||
-webkit-appearance: button;
|
||||
/* 1 */
|
||||
font: inherit;
|
||||
/* 2 */
|
||||
}
|
||||
|
||||
/*
|
||||
Add the correct display in Chrome and Safari.
|
||||
*/
|
||||
|
||||
summary {
|
||||
display: list-item;
|
||||
}
|
||||
|
||||
/*
|
||||
Removes the default spacing and border for appropriate elements.
|
||||
*/
|
||||
|
||||
blockquote,
|
||||
dl,
|
||||
dd,
|
||||
h1,
|
||||
h2,
|
||||
h3,
|
||||
h4,
|
||||
h5,
|
||||
h6,
|
||||
hr,
|
||||
figure,
|
||||
p,
|
||||
pre {
|
||||
margin: 0;
|
||||
}
|
||||
|
||||
fieldset {
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
legend {
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
ol,
|
||||
ul,
|
||||
menu {
|
||||
list-style: none;
|
||||
margin: 0;
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
/*
|
||||
Reset default styling for dialogs.
|
||||
*/
|
||||
|
||||
dialog {
|
||||
padding: 0;
|
||||
}
|
||||
|
||||
/*
|
||||
Prevent resizing textareas horizontally by default.
|
||||
*/
|
||||
|
||||
textarea {
|
||||
resize: vertical;
|
||||
}
|
||||
|
||||
/*
|
||||
1. Reset the default placeholder opacity in Firefox. (https://github.com/tailwindlabs/tailwindcss/issues/3300)
|
||||
2. Set the default placeholder color to the user's configured gray 400 color.
|
||||
*/
|
||||
|
||||
input::-moz-placeholder, textarea::-moz-placeholder {
|
||||
opacity: 1;
|
||||
/* 1 */
|
||||
color: #9ca3af;
|
||||
/* 2 */
|
||||
}
|
||||
|
||||
input::placeholder,
|
||||
textarea::placeholder {
|
||||
opacity: 1;
|
||||
/* 1 */
|
||||
color: #9ca3af;
|
||||
/* 2 */
|
||||
}
|
||||
|
||||
/*
|
||||
Set the default cursor for buttons.
|
||||
*/
|
||||
|
||||
button,
|
||||
[role="button"] {
|
||||
cursor: pointer;
|
||||
}
|
||||
|
||||
/*
|
||||
Make sure disabled buttons don't get the pointer cursor.
|
||||
*/
|
||||
|
||||
:disabled {
|
||||
cursor: default;
|
||||
}
|
||||
|
||||
/*
|
||||
1. Make replaced elements `display: block` by default. (https://github.com/mozdevs/cssremedy/issues/14)
|
||||
2. Add `vertical-align: middle` to align replaced elements more sensibly by default. (https://github.com/jensimmons/cssremedy/issues/14#issuecomment-634934210)
|
||||
This can trigger a poorly considered lint error in some tools but is included by design.
|
||||
*/
|
||||
|
||||
img,
|
||||
svg,
|
||||
video,
|
||||
canvas,
|
||||
audio,
|
||||
iframe,
|
||||
embed,
|
||||
object {
|
||||
display: block;
|
||||
/* 1 */
|
||||
vertical-align: middle;
|
||||
/* 2 */
|
||||
}
|
||||
|
||||
/*
|
||||
Constrain images and videos to the parent width and preserve their intrinsic aspect ratio. (https://github.com/mozdevs/cssremedy/issues/14)
|
||||
*/
|
||||
|
||||
img,
|
||||
video {
|
||||
max-width: 100%;
|
||||
height: auto;
|
||||
}
|
||||
|
||||
/* Make elements with the HTML hidden attribute stay hidden by default */
|
||||
|
||||
[hidden]:where(:not([hidden="until-found"])) {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.static {
|
||||
position: static;
|
||||
}
|
||||
|
||||
.absolute {
|
||||
position: absolute;
|
||||
}
|
||||
|
||||
.relative {
|
||||
position: relative;
|
||||
}
|
||||
|
||||
.right-2 {
|
||||
right: 0.5rem;
|
||||
}
|
||||
|
||||
.right-6 {
|
||||
right: 1.5rem;
|
||||
}
|
||||
|
||||
.top-1\/2 {
|
||||
top: 50%;
|
||||
}
|
||||
|
||||
.top-4 {
|
||||
top: 1rem;
|
||||
}
|
||||
|
||||
.z-10 {
|
||||
z-index: 10;
|
||||
}
|
||||
|
||||
.mb-2 {
|
||||
margin-bottom: 0.5rem;
|
||||
}
|
||||
|
||||
.mb-4 {
|
||||
margin-bottom: 1rem;
|
||||
}
|
||||
|
||||
.mb-6 {
|
||||
margin-bottom: 1.5rem;
|
||||
}
|
||||
|
||||
.mb-8 {
|
||||
margin-bottom: 2rem;
|
||||
}
|
||||
|
||||
.ml-2 {
|
||||
margin-left: 0.5rem;
|
||||
}
|
||||
|
||||
.flex {
|
||||
display: flex;
|
||||
}
|
||||
|
||||
.hidden {
|
||||
display: none;
|
||||
}
|
||||
|
||||
.h-16 {
|
||||
height: 4rem;
|
||||
}
|
||||
|
||||
.h-24 {
|
||||
height: 6rem;
|
||||
}
|
||||
|
||||
.h-4 {
|
||||
height: 1rem;
|
||||
}
|
||||
|
||||
.h-5 {
|
||||
height: 1.25rem;
|
||||
}
|
||||
|
||||
.h-6 {
|
||||
height: 1.5rem;
|
||||
}
|
||||
|
||||
.min-h-screen {
|
||||
min-height: 100vh;
|
||||
}
|
||||
|
||||
.w-16 {
|
||||
width: 4rem;
|
||||
}
|
||||
|
||||
.w-20 {
|
||||
width: 5rem;
|
||||
}
|
||||
|
||||
.w-24 {
|
||||
width: 6rem;
|
||||
}
|
||||
|
||||
.w-4 {
|
||||
width: 1rem;
|
||||
}
|
||||
|
||||
.w-48 {
|
||||
width: 12rem;
|
||||
}
|
||||
|
||||
.w-5 {
|
||||
width: 1.25rem;
|
||||
}
|
||||
|
||||
.w-6 {
|
||||
width: 1.5rem;
|
||||
}
|
||||
|
||||
.w-full {
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
.flex-1 {
|
||||
flex: 1 1 0%;
|
||||
}
|
||||
|
||||
.-translate-y-1\/2 {
|
||||
--tw-translate-y: -50%;
|
||||
transform: translate(var(--tw-translate-x), var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y));
|
||||
}
|
||||
|
||||
.transform {
|
||||
transform: translate(var(--tw-translate-x), var(--tw-translate-y)) rotate(var(--tw-rotate)) skewX(var(--tw-skew-x)) skewY(var(--tw-skew-y)) scaleX(var(--tw-scale-x)) scaleY(var(--tw-scale-y));
|
||||
}
|
||||
|
||||
@keyframes spin {
|
||||
to {
|
||||
transform: rotate(360deg);
|
||||
}
|
||||
}
|
||||
|
||||
.animate-spin {
|
||||
animation: spin 1s linear infinite;
|
||||
}
|
||||
|
||||
.flex-col {
|
||||
flex-direction: column;
|
||||
}
|
||||
|
||||
.items-center {
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
.justify-center {
|
||||
justify-content: center;
|
||||
}
|
||||
|
||||
.space-x-2 > :not([hidden]) ~ :not([hidden]) {
|
||||
--tw-space-x-reverse: 0;
|
||||
margin-right: calc(0.5rem * var(--tw-space-x-reverse));
|
||||
margin-left: calc(0.5rem * calc(1 - var(--tw-space-x-reverse)));
|
||||
}
|
||||
|
||||
.space-x-4 > :not([hidden]) ~ :not([hidden]) {
|
||||
--tw-space-x-reverse: 0;
|
||||
margin-right: calc(1rem * var(--tw-space-x-reverse));
|
||||
margin-left: calc(1rem * calc(1 - var(--tw-space-x-reverse)));
|
||||
}
|
||||
|
||||
.space-x-8 > :not([hidden]) ~ :not([hidden]) {
|
||||
--tw-space-x-reverse: 0;
|
||||
margin-right: calc(2rem * var(--tw-space-x-reverse));
|
||||
margin-left: calc(2rem * calc(1 - var(--tw-space-x-reverse)));
|
||||
}
|
||||
|
||||
.space-y-4 > :not([hidden]) ~ :not([hidden]) {
|
||||
--tw-space-y-reverse: 0;
|
||||
margin-top: calc(1rem * calc(1 - var(--tw-space-y-reverse)));
|
||||
margin-bottom: calc(1rem * var(--tw-space-y-reverse));
|
||||
}
|
||||
|
||||
.space-y-6 > :not([hidden]) ~ :not([hidden]) {
|
||||
--tw-space-y-reverse: 0;
|
||||
margin-top: calc(1.5rem * calc(1 - var(--tw-space-y-reverse)));
|
||||
margin-bottom: calc(1.5rem * var(--tw-space-y-reverse));
|
||||
}
|
||||
|
||||
.rounded {
|
||||
border-radius: 0.25rem;
|
||||
}
|
||||
|
||||
.rounded-full {
|
||||
border-radius: 9999px;
|
||||
}
|
||||
|
||||
.rounded-lg {
|
||||
border-radius: 0.5rem;
|
||||
}
|
||||
|
||||
.rounded-md {
|
||||
border-radius: 0.375rem;
|
||||
}
|
||||
|
||||
.border {
|
||||
border-width: 1px;
|
||||
}
|
||||
|
||||
.border-b {
|
||||
border-bottom-width: 1px;
|
||||
}
|
||||
|
||||
.border-b-2 {
|
||||
border-bottom-width: 2px;
|
||||
}
|
||||
|
||||
.border-black {
|
||||
--tw-border-opacity: 1;
|
||||
border-color: rgb(0 0 0 / var(--tw-border-opacity, 1));
|
||||
}
|
||||
|
||||
.border-gray-200 {
|
||||
--tw-border-opacity: 1;
|
||||
border-color: rgb(229 231 235 / var(--tw-border-opacity, 1));
|
||||
}
|
||||
|
||||
.bg-\[\#111827\] {
|
||||
--tw-bg-opacity: 1;
|
||||
background-color: rgb(17 24 39 / var(--tw-bg-opacity, 1));
|
||||
}
|
||||
|
||||
.bg-gray-100 {
|
||||
--tw-bg-opacity: 1;
|
||||
background-color: rgb(243 244 246 / var(--tw-bg-opacity, 1));
|
||||
}
|
||||
|
||||
.bg-gray-900 {
|
||||
--tw-bg-opacity: 1;
|
||||
background-color: rgb(17 24 39 / var(--tw-bg-opacity, 1));
|
||||
}
|
||||
|
||||
.bg-red-500 {
|
||||
--tw-bg-opacity: 1;
|
||||
background-color: rgb(239 68 68 / var(--tw-bg-opacity, 1));
|
||||
}
|
||||
|
||||
.bg-white {
|
||||
--tw-bg-opacity: 1;
|
||||
background-color: rgb(255 255 255 / var(--tw-bg-opacity, 1));
|
||||
}
|
||||
|
||||
.p-2 {
|
||||
padding: 0.5rem;
|
||||
}
|
||||
|
||||
.p-4 {
|
||||
padding: 1rem;
|
||||
}
|
||||
|
||||
.p-8 {
|
||||
padding: 2rem;
|
||||
}
|
||||
|
||||
.px-1 {
|
||||
padding-left: 0.25rem;
|
||||
padding-right: 0.25rem;
|
||||
}
|
||||
|
||||
.px-3 {
|
||||
padding-left: 0.75rem;
|
||||
padding-right: 0.75rem;
|
||||
}
|
||||
|
||||
.px-4 {
|
||||
padding-left: 1rem;
|
||||
padding-right: 1rem;
|
||||
}
|
||||
|
||||
.py-0\.5 {
|
||||
padding-top: 0.125rem;
|
||||
padding-bottom: 0.125rem;
|
||||
}
|
||||
|
||||
.py-2 {
|
||||
padding-top: 0.5rem;
|
||||
padding-bottom: 0.5rem;
|
||||
}
|
||||
|
||||
.py-4 {
|
||||
padding-top: 1rem;
|
||||
padding-bottom: 1rem;
|
||||
}
|
||||
|
||||
.py-6 {
|
||||
padding-top: 1.5rem;
|
||||
padding-bottom: 1.5rem;
|
||||
}
|
||||
|
||||
.pr-10 {
|
||||
padding-right: 2.5rem;
|
||||
}
|
||||
|
||||
.text-2xl {
|
||||
font-size: 1.5rem;
|
||||
line-height: 2rem;
|
||||
}
|
||||
|
||||
.text-base {
|
||||
font-size: 1rem;
|
||||
line-height: 1.5rem;
|
||||
}
|
||||
|
||||
.text-sm {
|
||||
font-size: 0.875rem;
|
||||
line-height: 1.25rem;
|
||||
}
|
||||
|
||||
.font-medium {
|
||||
font-weight: 500;
|
||||
}
|
||||
|
||||
.font-semibold {
|
||||
font-weight: 600;
|
||||
}
|
||||
|
||||
.text-blue-500 {
|
||||
--tw-text-opacity: 1;
|
||||
color: rgb(59 130 246 / var(--tw-text-opacity, 1));
|
||||
}
|
||||
|
||||
.text-blue-600 {
|
||||
--tw-text-opacity: 1;
|
||||
color: rgb(37 99 235 / var(--tw-text-opacity, 1));
|
||||
}
|
||||
|
||||
.text-gray-400 {
|
||||
--tw-text-opacity: 1;
|
||||
color: rgb(156 163 175 / var(--tw-text-opacity, 1));
|
||||
}
|
||||
|
||||
.text-gray-500 {
|
||||
--tw-text-opacity: 1;
|
||||
color: rgb(107 114 128 / var(--tw-text-opacity, 1));
|
||||
}
|
||||
|
||||
.text-gray-600 {
|
||||
--tw-text-opacity: 1;
|
||||
color: rgb(75 85 99 / var(--tw-text-opacity, 1));
|
||||
}
|
||||
|
||||
.text-white {
|
||||
--tw-text-opacity: 1;
|
||||
color: rgb(255 255 255 / var(--tw-text-opacity, 1));
|
||||
}
|
||||
|
||||
.opacity-25 {
|
||||
opacity: 0.25;
|
||||
}
|
||||
|
||||
.opacity-75 {
|
||||
opacity: 0.75;
|
||||
}
|
||||
|
||||
.filter {
|
||||
filter: var(--tw-blur) var(--tw-brightness) var(--tw-contrast) var(--tw-grayscale) var(--tw-hue-rotate) var(--tw-invert) var(--tw-saturate) var(--tw-sepia) var(--tw-drop-shadow);
|
||||
}
|
||||
|
||||
.hover\:bg-\[\#1f2937\]:hover {
|
||||
--tw-bg-opacity: 1;
|
||||
background-color: rgb(31 41 55 / var(--tw-bg-opacity, 1));
|
||||
}
|
||||
|
||||
.hover\:bg-gray-100:hover {
|
||||
--tw-bg-opacity: 1;
|
||||
background-color: rgb(243 244 246 / var(--tw-bg-opacity, 1));
|
||||
}
|
||||
|
||||
.hover\:bg-red-600:hover {
|
||||
--tw-bg-opacity: 1;
|
||||
background-color: rgb(220 38 38 / var(--tw-bg-opacity, 1));
|
||||
}
|
||||
|
||||
.hover\:text-gray-700:hover {
|
||||
--tw-text-opacity: 1;
|
||||
color: rgb(55 65 81 / var(--tw-text-opacity, 1));
|
||||
}
|
||||
|
||||
.hover\:text-gray-900:hover {
|
||||
--tw-text-opacity: 1;
|
||||
color: rgb(17 24 39 / var(--tw-text-opacity, 1));
|
||||
}
|
||||
|
||||
.focus\:outline-none:focus {
|
||||
outline: 2px solid transparent;
|
||||
outline-offset: 2px;
|
||||
}
|
||||
|
||||
.focus\:ring-2:focus {
|
||||
--tw-ring-offset-shadow: var(--tw-ring-inset) 0 0 0 var(--tw-ring-offset-width) var(--tw-ring-offset-color);
|
||||
--tw-ring-shadow: var(--tw-ring-inset) 0 0 0 calc(2px + var(--tw-ring-offset-width)) var(--tw-ring-color);
|
||||
box-shadow: var(--tw-ring-offset-shadow), var(--tw-ring-shadow), var(--tw-shadow, 0 0 #0000);
|
||||
}
|
||||
|
||||
.focus\:ring-blue-500:focus {
|
||||
--tw-ring-opacity: 1;
|
||||
--tw-ring-color: rgb(59 130 246 / var(--tw-ring-opacity, 1));
|
||||
}
|
||||
@@ -1 +0,0 @@
|
||||
rustfs bin path, do not delete
|
||||
|
Before Width: | Height: | Size: 23 KiB |
@@ -1,19 +0,0 @@
|
||||
/**
|
||||
* Copyright 2024 RustFS Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
@tailwind base;
|
||||
@tailwind components;
|
||||
@tailwind utilities;
|
||||
|
Before Width: | Height: | Size: 23 KiB |
|
Before Width: | Height: | Size: 34 KiB |
@@ -1,15 +0,0 @@
|
||||
<svg width="1558" height="260" viewBox="0 0 1558 260" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g clip-path="url(#clip0_0_3)">
|
||||
<path d="M1288.5 112.905H1159.75V58.4404H1262L1270 0L1074 0V260H1159.75V162.997H1296.95L1288.5 112.905Z" fill="#0196D0"/>
|
||||
<path d="M1058.62 58.4404V0H789V58.4404H881.133V260H966.885V58.4404H1058.62Z" fill="#0196D0"/>
|
||||
<path d="M521 179.102V0L454.973 15V161C454.973 181.124 452.084 193.146 443.5 202C434.916 211.257 419.318 214.5 400.5 214.5C381.022 214.5 366.744 210.854 357.5 202C348.916 193.548 346.357 175.721 346.357 156V0L280 15V175.48C280 208.08 290.234 229.412 309.712 241.486C329.19 253.56 358.903 260 400.5 260C440.447 260 470.159 253.56 490.297 241.486C510.766 229.412 521 208.483 521 179.102Z" fill="#0196D0"/>
|
||||
<path d="M172.84 84.2813C172.84 97.7982 168.249 107.737 158.41 113.303C149.883 118.471 137.092 121.254 120.693 122.049V162.997C129.876 163.792 138.076 166.177 144.307 176.514L184.647 260H265L225.316 180.489C213.181 155.046 201.374 149.48 178.744 143.517C212.197 138.349 241.386 118.471 241.386 73.1499C241.386 53.2722 233.843 30.2141 218.756 17.8899C203.998 5.56575 183.991 0 159.394 0H120.693V48.5015H127.58C142.23 48.5015 153.6 51.4169 161.689 57.2477C169.233 62.8135 172.84 71.5596 172.84 84.2813ZM120.693 122.049C119.163 122.049 117.741 122.049 116.43 122.049H68.5457V48.5015H120.693V0H0V260H70.5137V162.997H110.526C113.806 162.997 117.741 162.997 120.693 162.997V122.049Z" fill="#0196D0"/>
|
||||
<path d="M774 179.297C774 160.829 766.671 144.669 752.013 131.972C738.127 119.66 712.025 110.169 673.708 103.5C662.136 101.191 651.722 99.6523 643.235 97.3437C586.532 84.6467 594.632 52.7118 650.564 52.7118C680.651 52.7118 709.582 61.946 738.127 66.9478C742.37 67.7174 743.913 68.1021 744.298 68.1021L750.47 12.697C720.383 3.46282 684.895 0 654.036 0C616.619 0 587.689 6.54088 567.245 19.2379C546.801 31.9349 536 57.7137 536 82.3382C536 103.5 543.715 119.66 559.916 131.972C575.731 143.515 604.276 152.749 645.55 160.059C658.279 162.368 668.694 163.907 676.794 166.215C685.023 168.524 691.066 170.704 694.924 172.756C702.253 176.604 706.11 182.375 706.11 188.531C706.11 196.611 701.481 202.767 692.224 207C664.836 220.081 587.689 212.001 556.83 198.15L543.715 247.784C547.186 248.169 552.972 249.323 559.916 250.477C616.619 259.327 690.681 270.869 741.212 238.935C762.814 225.468 774 206.23 774 179.297Z" fill="#0196D0"/>
|
||||
<path d="M1558 179.568C1558 160.383 1550.42 144.268 1535.67 131.99C1521.32 119.968 1494.34 110.631 1454.74 103.981C1442.38 101.679 1432.01 99.3764 1422.84 97.8416C1422.44 97.8416 1422.04 97.8416 1422.04 97.4579V112.422L1361.04 75.2038L1422.04 38.3692V52.9496C1424.7 52.9496 1427.49 52.9496 1430.41 52.9496C1461.51 52.9496 1491.42 62.5419 1521.32 67.5299C1525.31 67.9136 1526.9 67.9136 1527.3 67.9136L1533.68 12.6619C1502.98 3.83692 1465.9 0 1434 0C1395.33 0 1365.43 6.52277 1345.09 19.5683C1323.16 32.6139 1312 57.9376 1312 82.8776C1312 103.981 1320.37 120.096 1336.72 131.607C1353.46 143.885 1382.97 153.093 1425.23 160.383C1434 161.535 1441.18 162.686 1447.56 164.22L1448.36 150.791L1507.36 190.312L1445.57 224.844L1445.96 212.949C1409.68 215.635 1357.45 209.112 1333.53 197.985L1320.37 247.482C1323.56 248.249 1329.54 248.633 1336.72 250.551C1395.33 259.376 1471.88 270.887 1524.11 238.657C1546.84 225.611 1558 205.659 1558 179.568Z" fill="#0196D0"/>
|
||||
</g>
|
||||
<defs>
|
||||
<clipPath id="clip0_0_3">
|
||||
<rect width="1558" height="260" fill="white"/>
|
||||
</clipPath>
|
||||
</defs>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 3.4 KiB |
@@ -1,330 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::components::navbar::LoadingSpinner;
|
||||
use crate::route::Route;
|
||||
use crate::utils::{RustFSConfig, ServiceManager};
|
||||
use chrono::Datelike;
|
||||
use dioxus::logger::tracing::debug;
|
||||
use dioxus::prelude::*;
|
||||
use std::time::Duration;
|
||||
|
||||
const HEADER_LOGO: Asset = asset!("/assets/rustfs-logo.svg");
|
||||
const TAILWIND_CSS: Asset = asset!("/assets/tailwind.css");
|
||||
|
||||
/// Define the state of the service
|
||||
#[derive(PartialEq, Debug, Clone)]
|
||||
enum ServiceState {
|
||||
Start,
|
||||
Stop,
|
||||
}
|
||||
|
||||
/// Define the Home component
|
||||
/// The Home component is the main component of the application
|
||||
/// It is responsible for starting and stopping the service
|
||||
/// It also displays the service status and provides a button to toggle the service
|
||||
/// The Home component also displays the footer of the application
|
||||
/// The footer contains links to the official site, documentation, GitHub, and license
|
||||
/// The footer also displays the version of the application
|
||||
/// The Home component also contains a button to change the theme of the application
|
||||
/// The Home component also contains a button to go to the settings page
|
||||
#[component]
|
||||
pub fn Home() -> Element {
|
||||
#[allow(clippy::redundant_closure)]
|
||||
let service = use_signal(|| ServiceManager::new());
|
||||
let conf = RustFSConfig::load().unwrap_or_else(|e| {
|
||||
ServiceManager::show_error(&format!("load config failed: {e}"));
|
||||
RustFSConfig::default()
|
||||
});
|
||||
|
||||
debug!("loaded configurations: {:?}", conf);
|
||||
let config = use_signal(|| conf.clone());
|
||||
|
||||
use dioxus_router::prelude::Link;
|
||||
use document::{Meta, Stylesheet, Title};
|
||||
let mut service_state = use_signal(|| ServiceState::Start);
|
||||
// Create a periodic check on the effect of the service status
|
||||
use_effect(move || {
|
||||
spawn(async move {
|
||||
loop {
|
||||
if let Some(pid) = ServiceManager::check_service_status().await {
|
||||
debug!("service_running true pid: {:?}", pid);
|
||||
service_state.set(ServiceState::Stop);
|
||||
} else {
|
||||
debug!("service_running true pid: 0");
|
||||
service_state.set(ServiceState::Start);
|
||||
}
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
}
|
||||
});
|
||||
});
|
||||
debug!("project start service_state: {:?}", service_state.read());
|
||||
// Use 'use_signal' to manage service status
|
||||
let mut loading = use_signal(|| false);
|
||||
let mut start_service = move |_| {
|
||||
let service = service;
|
||||
let config = config.read().clone();
|
||||
let mut service_state = service_state;
|
||||
// set the loading status
|
||||
loading.set(true);
|
||||
debug!("stop loading_state: {:?}", loading.read());
|
||||
spawn(async move {
|
||||
match service.read().start(config).await {
|
||||
Ok(result) => {
|
||||
if result.success {
|
||||
let duration = result.end_time - result.start_time;
|
||||
debug!("The service starts successfully and takes a long time:{}ms", duration.num_milliseconds());
|
||||
service_state.set(ServiceState::Stop);
|
||||
} else {
|
||||
ServiceManager::show_error(&result.message);
|
||||
service_state.set(ServiceState::Start);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
ServiceManager::show_error(&format!("start service failed: {e}"));
|
||||
}
|
||||
}
|
||||
// Only set loading to false when it's actually done
|
||||
loading.set(false);
|
||||
debug!("start loading_state: {:?}", loading.read());
|
||||
});
|
||||
};
|
||||
|
||||
let mut stop_service = move |_| {
|
||||
let service = service;
|
||||
let mut service_state = service_state;
|
||||
// set the loading status
|
||||
loading.set(true);
|
||||
spawn(async move {
|
||||
match service.read().stop().await {
|
||||
Ok(result) => {
|
||||
if result.success {
|
||||
let duration = result.end_time - result.start_time;
|
||||
debug!("The service stops successfully and takes a long time:{}ms", duration.num_milliseconds());
|
||||
service_state.set(ServiceState::Start);
|
||||
} else {
|
||||
ServiceManager::show_error(&result.message);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
ServiceManager::show_error(&format!("stop service failed: {e}"));
|
||||
}
|
||||
}
|
||||
debug!("service_state: {:?}", service_state.read());
|
||||
// Only set loading to false when it's actually done
|
||||
loading.set(false);
|
||||
debug!("stop loading_state: {:?}", loading.read());
|
||||
});
|
||||
};
|
||||
|
||||
// Toggle the state when the button is clicked
|
||||
let toggle_service = {
|
||||
let mut service_state = service_state;
|
||||
debug!("toggle_service service_state: {:?}", service_state.read());
|
||||
move |_| {
|
||||
if service_state.read().eq(&ServiceState::Stop) {
|
||||
// If the service status is started, you need to run a command to stop the service
|
||||
stop_service(());
|
||||
service_state.set(ServiceState::Start);
|
||||
} else {
|
||||
start_service(());
|
||||
service_state.set(ServiceState::Stop);
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
// Define dynamic styles based on state
|
||||
let button_class = if service_state.read().eq(&ServiceState::Start) {
|
||||
"bg-[#111827] hover:bg-[#1f2937] text-white px-4 py-2 rounded-md flex items-center space-x-2"
|
||||
} else {
|
||||
"bg-red-500 hover:bg-red-600 text-white px-4 py-2 rounded-md flex items-center space-x-2"
|
||||
};
|
||||
|
||||
rsx! {
|
||||
// The Stylesheet component inserts a style link into the head of the document
|
||||
Stylesheet {href: TAILWIND_CSS,}
|
||||
Title { "RustFS APP" }
|
||||
Meta {
|
||||
name: "description",
|
||||
// TODO: translate to english
|
||||
content: "RustFS RustFS 用热门安全的 Rust 语言开发,兼容 S3 协议。适用于 AI/ML 及海量数据存储、大数据、互联网、工业和保密存储等全部场景。近乎免费使用。遵循 Apache 2 协议,支持国产保密设备和系统。",
|
||||
}
|
||||
div { class: "min-h-screen flex flex-col items-center bg-white",
|
||||
div { class: "absolute top-4 right-6 flex space-x-2",
|
||||
// change theme
|
||||
button { class: "p-2 hover:bg-gray-100 rounded-lg", ChangeThemeButton {} }
|
||||
// setting button
|
||||
Link {
|
||||
class: "p-2 hover:bg-gray-100 rounded-lg",
|
||||
to: Route::SettingViews {},
|
||||
SettingButton {}
|
||||
}
|
||||
}
|
||||
main { class: "flex-1 flex flex-col items-center justify-center space-y-6 p-4",
|
||||
div { class: "w-24 h-24 bg-gray-900 rounded-full flex items-center justify-center",
|
||||
img { alt: "Logo", class: "w-16 h-16", src: HEADER_LOGO }
|
||||
}
|
||||
div { class: "text-gray-600",
|
||||
"Service is running on "
|
||||
span { class: "text-blue-600", " 127.0.0.1:9000 " }
|
||||
}
|
||||
LoadingSpinner {
|
||||
loading: loading.read().to_owned(),
|
||||
text: "processing...",
|
||||
}
|
||||
button { class: button_class, onclick: toggle_service,
|
||||
svg {
|
||||
class: "h-4 w-4",
|
||||
fill: "none",
|
||||
stroke: "currentColor",
|
||||
view_box: "0 0 24 24",
|
||||
xmlns: "http://www.w3.org/2000/svg",
|
||||
if service_state.read().eq(&ServiceState::Start) {
|
||||
path {
|
||||
d: "M14.752 11.168l-3.197-2.132A1 1 0 0010 9.87v4.263a1 1 0 001.555.832l3.197-2.132a1 1 0 000-1.664z",
|
||||
stroke_linecap: "round",
|
||||
stroke_linejoin: "round",
|
||||
stroke_width: "2",
|
||||
}
|
||||
path {
|
||||
d: "M21 12a9 9 0 11-18 0 9 9 0 0118 0z",
|
||||
stroke_linecap: "round",
|
||||
stroke_linejoin: "round",
|
||||
stroke_width: "2",
|
||||
}
|
||||
} else {
|
||||
path {
|
||||
stroke_linecap: "round",
|
||||
stroke_linejoin: "round",
|
||||
stroke_width: "2",
|
||||
d: "M21 12a9 9 0 11-18 0 9 9 0 0118 0z",
|
||||
}
|
||||
path {
|
||||
stroke_linecap: "round",
|
||||
stroke_linejoin: "round",
|
||||
stroke_width: "2",
|
||||
d: "M9 10h6v4H9z",
|
||||
}
|
||||
}
|
||||
}
|
||||
span { id: "serviceStatus",
|
||||
if service_state.read().eq(&ServiceState::Start) {
|
||||
"Start service"
|
||||
} else {
|
||||
"Stop service"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
Footer { version: "v1.0.0".to_string() }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[component]
|
||||
pub fn Footer(version: String) -> Element {
|
||||
let now = chrono::Local::now();
|
||||
let year = now.naive_local().year();
|
||||
rsx! {
|
||||
footer { class: "w-full py-6 flex flex-col items-center space-y-4 mb-6",
|
||||
nav { class: "flex space-x-4 text-gray-600",
|
||||
a { class: "hover:text-gray-900", href: "https://rustfs.com", "Official Site" }
|
||||
a {
|
||||
class: "hover:text-gray-900",
|
||||
href: "https://rustfs.com/docs",
|
||||
"Documentation"
|
||||
}
|
||||
a {
|
||||
class: "hover:text-gray-900",
|
||||
href: "https://github.com/rustfs/rustfs",
|
||||
"GitHub"
|
||||
}
|
||||
a {
|
||||
class: "hover:text-gray-900",
|
||||
href: "https://rustfs.com/docs/license/",
|
||||
"License"
|
||||
}
|
||||
a { class: "hover:text-gray-900", href: "#", "Sponsors" }
|
||||
}
|
||||
div { class: "text-gray-500 text-sm", " © rustfs.com {year}, All rights reserved." }
|
||||
div { class: "text-gray-400 text-sm mb-8", " version {version} " }
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[component]
|
||||
pub fn GoBackButtons() -> Element {
|
||||
rsx! {
|
||||
button {
|
||||
class: "p-2 hover:bg-gray-100 rounded-lg",
|
||||
"onclick": "window.history.back()",
|
||||
"Back to the Past"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[component]
|
||||
pub fn GoForwardButtons() -> Element {
|
||||
rsx! {
|
||||
button {
|
||||
class: "p-2 hover:bg-gray-100 rounded-lg",
|
||||
"onclick": "window.history.forward()",
|
||||
"Back to the Future"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[component]
|
||||
pub fn ChangeThemeButton() -> Element {
|
||||
rsx! {
|
||||
svg {
|
||||
class: "h-6 w-6 text-gray-600",
|
||||
fill: "none",
|
||||
stroke: "currentColor",
|
||||
view_box: "0 0 24 24",
|
||||
xmlns: "http://www.w3.org/2000/svg",
|
||||
path {
|
||||
d: "M9 3v2m6-2v2M9 19v2m6-2v2M5 9H3m2 6H3m18-6h-2m2 6h-2M7 19h10a2 2 0 002-2V7a2 2 0 00-2-2H7a2 2 0 00-2 2v10a2 2 0 002 2zM9 9h6v6H9V9z",
|
||||
stroke_linecap: "round",
|
||||
stroke_linejoin: "round",
|
||||
stroke_width: "2",
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[component]
|
||||
pub fn SettingButton() -> Element {
|
||||
rsx! {
|
||||
svg {
|
||||
class: "h-6 w-6 text-gray-600",
|
||||
fill: "none",
|
||||
stroke: "currentColor",
|
||||
view_box: "0 0 24 24",
|
||||
xmlns: "http://www.w3.org/2000/svg",
|
||||
path {
|
||||
d: "M10.325 4.317c.426-1.756 2.924-1.756 3.35 0a1.724 1.724 0 002.573 1.066c1.543-.94 3.31.826 2.37 2.37a1.724 1.724 0 001.065 2.572c1.756.426 1.756 2.924 0 3.35a1.724 1.724 0 00-1.066 2.573c.94 1.543-.826 3.31-2.37 2.37a1.724 1.724 0 00-2.572 1.065c-.426 1.756-2.924 1.756-3.35 0a1.724 1.724 0 00-2.573-1.066c-1.543.94-3.31-.826-2.37-2.37a1.724 1.724 0 00-1.065-2.572c-1.756-.426-1.756-2.924 0-3.35a1.724 1.724 0 001.066-2.573c-.94-1.543.826-3.31 2.37-2.37.996.608 2.296.07 2.572-1.065z",
|
||||
stroke_linecap: "round",
|
||||
stroke_linejoin: "round",
|
||||
stroke_width: "2",
|
||||
}
|
||||
path {
|
||||
d: "M15 12a3 3 0 11-6 0 3 3 0 016 0z",
|
||||
stroke_linecap: "round",
|
||||
stroke_linejoin: "round",
|
||||
stroke_width: "2",
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,20 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod home;
|
||||
pub use home::Home;
|
||||
mod navbar;
|
||||
pub use navbar::Navbar;
|
||||
mod setting;
|
||||
pub use setting::Setting;
|
||||
@@ -1,74 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::route::Route;
|
||||
use dioxus::logger::tracing::debug;
|
||||
use dioxus::prelude::*;
|
||||
|
||||
const NAVBAR_CSS: Asset = asset!("/assets/styling/navbar.css");
|
||||
|
||||
#[component]
|
||||
pub fn Navbar() -> Element {
|
||||
rsx! {
|
||||
document::Link { rel: "stylesheet", href: NAVBAR_CSS }
|
||||
|
||||
div { id: "navbar", class: "hidden", style: "display: none;",
|
||||
Link { to: Route::HomeViews {}, "Home" }
|
||||
Link { to: Route::SettingViews {}, "Setting" }
|
||||
}
|
||||
|
||||
Outlet::<Route> {}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Props, PartialEq, Debug, Clone)]
|
||||
pub struct LoadingSpinnerProps {
|
||||
#[props(default = true)]
|
||||
loading: bool,
|
||||
#[props(default = "正在处理中...")]
|
||||
text: &'static str,
|
||||
}
|
||||
|
||||
#[component]
|
||||
pub fn LoadingSpinner(props: LoadingSpinnerProps) -> Element {
|
||||
debug!("loading: {}", props.loading);
|
||||
if !props.loading {
|
||||
debug!("LoadingSpinner false loading: {}", props.loading);
|
||||
return rsx! {};
|
||||
}
|
||||
rsx! {
|
||||
div { class: "flex items-center justify-center z-10",
|
||||
svg {
|
||||
class: "animate-spin h-5 w-5 text-blue-500",
|
||||
xmlns: "http://www.w3.org/2000/svg",
|
||||
fill: "none",
|
||||
view_box: "0 0 24 24",
|
||||
circle {
|
||||
class: "opacity-25",
|
||||
cx: "12",
|
||||
cy: "12",
|
||||
r: "10",
|
||||
stroke: "currentColor",
|
||||
stroke_width: "4",
|
||||
}
|
||||
path {
|
||||
class: "opacity-75",
|
||||
fill: "currentColor",
|
||||
d: "M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z",
|
||||
}
|
||||
}
|
||||
span { class: "ml-2 text-gray-600", "{props.text}" }
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,216 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::components::navbar::LoadingSpinner;
|
||||
use dioxus::logger::tracing::{debug, error};
|
||||
use dioxus::prelude::*;
|
||||
|
||||
const SETTINGS_JS: Asset = asset!("/assets/js/sts.js");
|
||||
const TAILWIND_CSS: Asset = asset!("/assets/tailwind.css");
|
||||
#[component]
|
||||
pub fn Setting() -> Element {
|
||||
use crate::utils::{RustFSConfig, ServiceManager};
|
||||
use document::{Meta, Script, Stylesheet, Title};
|
||||
|
||||
#[allow(clippy::redundant_closure)]
|
||||
let service = use_signal(|| ServiceManager::new());
|
||||
let conf = RustFSConfig::load().unwrap_or_else(|e| {
|
||||
error!("load config error: {}", e);
|
||||
RustFSConfig::default_config()
|
||||
});
|
||||
debug!("conf address: {:?}", conf.clone().address);
|
||||
|
||||
let config = use_signal(|| conf.clone());
|
||||
let address_state = use_signal(|| conf.address.to_string());
|
||||
let mut host_state = use_signal(|| conf.host.to_string());
|
||||
let mut port_state = use_signal(|| conf.port.to_string());
|
||||
let mut access_key_state = use_signal(|| conf.access_key.to_string());
|
||||
let mut secret_key_state = use_signal(|| conf.secret_key.to_string());
|
||||
let mut volume_name_state = use_signal(|| conf.volume_name.to_string());
|
||||
let loading = use_signal(|| false);
|
||||
|
||||
let save_and_restart = {
|
||||
let host_state = host_state;
|
||||
let port_state = port_state;
|
||||
let access_key_state = access_key_state;
|
||||
let secret_key_state = secret_key_state;
|
||||
let volume_name_state = volume_name_state;
|
||||
let mut loading = loading;
|
||||
debug!("save_and_restart access_key:{}", access_key_state.read());
|
||||
move |_| {
|
||||
// set the loading status
|
||||
loading.set(true);
|
||||
let mut config = config;
|
||||
config.write().address = format!("{}:{}", host_state.read(), port_state.read());
|
||||
config.write().host = host_state.read().to_string();
|
||||
config.write().port = port_state.read().to_string();
|
||||
config.write().access_key = access_key_state.read().to_string();
|
||||
config.write().secret_key = secret_key_state.read().to_string();
|
||||
config.write().volume_name = volume_name_state.read().to_string();
|
||||
// restart service
|
||||
let service = service;
|
||||
let config = config.read().clone();
|
||||
spawn(async move {
|
||||
if let Err(e) = service.read().restart(config).await {
|
||||
ServiceManager::show_error(&format!("发送重启命令失败:{e}"));
|
||||
}
|
||||
// reset the status when you're done
|
||||
loading.set(false);
|
||||
});
|
||||
}
|
||||
};
|
||||
|
||||
rsx! {
|
||||
Title { "Settings - RustFS App" }
|
||||
Meta { name: "description", content: "Settings - RustFS App." }
|
||||
// The Stylesheet component inserts a style link into the head of the document
|
||||
Stylesheet { href: TAILWIND_CSS }
|
||||
Script { src: SETTINGS_JS }
|
||||
div { class: "bg-white p-8",
|
||||
h1 { class: "text-2xl font-semibold mb-6", "Settings" }
|
||||
div { class: "border-b border-gray-200 mb-6",
|
||||
nav { class: "flex space-x-8",
|
||||
button {
|
||||
class: "tab-btn px-1 py-4 text-sm font-medium border-b-2 border-black",
|
||||
"data-tab": "service",
|
||||
"onclick": "switchTab('service')",
|
||||
"Service "
|
||||
}
|
||||
button {
|
||||
class: "tab-btn px-1 py-4 text-sm font-medium text-gray-500 hover:text-gray-700",
|
||||
"data-tab": "user",
|
||||
"onclick": "switchTab('user')",
|
||||
"User "
|
||||
}
|
||||
button {
|
||||
class: "tab-btn px-1 py-4 text-sm font-medium text-gray-500 hover:text-gray-700 hidden",
|
||||
"data-tab": "logs",
|
||||
"onclick": "switchTab('logs')",
|
||||
"Logs "
|
||||
}
|
||||
}
|
||||
}
|
||||
div { id: "tabContent",
|
||||
div { class: "tab-content", id: "service",
|
||||
div { class: "mb-8",
|
||||
h2 { class: "text-base font-medium mb-2", "Service address" }
|
||||
p { class: "text-gray-600 mb-4",
|
||||
" The service address is the IP address and port number of the service. the default address is "
|
||||
code { class: "bg-gray-100 px-1 py-0.5 rounded", {address_state} }
|
||||
". "
|
||||
}
|
||||
div { class: "flex space-x-2",
|
||||
input {
|
||||
class: "border rounded px-3 py-2 w-48 focus:outline-none focus:ring-2 focus:ring-blue-500",
|
||||
r#type: "text",
|
||||
value: host_state,
|
||||
oninput: move |evt| host_state.set(evt.value().clone()),
|
||||
}
|
||||
span { class: "flex items-center", ":" }
|
||||
input {
|
||||
class: "border rounded px-3 py-2 w-20 focus:outline-none focus:ring-2 focus:ring-blue-500",
|
||||
r#type: "text",
|
||||
value: port_state,
|
||||
oninput: move |evt| port_state.set(evt.value().clone()),
|
||||
}
|
||||
}
|
||||
}
|
||||
div { class: "mb-8",
|
||||
h2 { class: "text-base font-medium mb-2", "Storage path" }
|
||||
p { class: "text-gray-600 mb-4",
|
||||
"Update the storage path of the service. the default path is {volume_name_state}."
|
||||
}
|
||||
input {
|
||||
class: "border rounded px-3 py-2 w-full focus:outline-none focus:ring-2 focus:ring-blue-500",
|
||||
r#type: "text",
|
||||
value: volume_name_state,
|
||||
oninput: move |evt| volume_name_state.set(evt.value().clone()),
|
||||
}
|
||||
}
|
||||
}
|
||||
div { class: "tab-content hidden", id: "user",
|
||||
div { class: "mb-8",
|
||||
h2 { class: "text-base font-medium mb-2", "User" }
|
||||
p { class: "text-gray-600 mb-4",
|
||||
"The user is the owner of the service. the default user is "
|
||||
code { class: "bg-gray-100 px-1 py-0.5 rounded", {access_key_state} }
|
||||
}
|
||||
input {
|
||||
class: "border rounded px-3 py-2 w-full focus:outline-none focus:ring-2 focus:ring-blue-500",
|
||||
r#type: "text",
|
||||
value: access_key_state,
|
||||
oninput: move |evt| access_key_state.set(evt.value().clone()),
|
||||
}
|
||||
}
|
||||
div { class: "mb-8",
|
||||
h2 { class: "text-base font-medium mb-2", "Password" }
|
||||
p { class: "text-gray-600 mb-4",
|
||||
"The password is the password of the user. the default password is "
|
||||
code { class: "bg-gray-100 px-1 py-0.5 rounded", {secret_key_state} }
|
||||
}
|
||||
div { class: "relative",
|
||||
input {
|
||||
class: "border rounded px-3 py-2 w-full pr-10 focus:outline-none focus:ring-2 focus:ring-blue-500",
|
||||
r#type: "password",
|
||||
value: secret_key_state,
|
||||
oninput: move |evt| secret_key_state.set(evt.value().clone()),
|
||||
}
|
||||
button {
|
||||
class: "absolute right-2 top-1/2 transform -translate-y-1/2 text-gray-500 hover:text-gray-700",
|
||||
"onclick": "togglePassword(this)",
|
||||
svg {
|
||||
class: "h-5 w-5",
|
||||
fill: "currentColor",
|
||||
view_box: "0 0 20 20",
|
||||
xmlns: "http://www.w3.org/2000/svg",
|
||||
path { d: "M10 12a2 2 0 100-4 2 2 0 000 4z" }
|
||||
path {
|
||||
clip_rule: "evenodd",
|
||||
d: "M.458 10C1.732 5.943 5.522 3 10 3s8.268 2.943 9.542 7c-1.274 4.057-5.064 7-9.542 7S1.732 14.057.458 10zM14 10a4 4 0 11-8 0 4 4 0 018 0z",
|
||||
fill_rule: "evenodd",
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
div { class: "tab-content hidden", id: "logs",
|
||||
div { class: "mb-8",
|
||||
h2 { class: "text-base font-medium mb-2", "Logs storage path" }
|
||||
p { class: "text-gray-600 mb-4",
|
||||
"The logs storage path is the path where the logs are stored. the default path is /var/log/rustfs. "
|
||||
}
|
||||
input {
|
||||
class: "border rounded px-3 py-2 w-full focus:outline-none focus:ring-2 focus:ring-blue-500",
|
||||
r#type: "text",
|
||||
value: "/var/logs/rustfs",
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
div { class: "flex space-x-4",
|
||||
button {
|
||||
class: "bg-[#111827] text-white px-4 py-2 rounded hover:bg-[#1f2937]",
|
||||
onclick: save_and_restart,
|
||||
" Save and restart "
|
||||
}
|
||||
GoBackButton { "Back" }
|
||||
}
|
||||
LoadingSpinner {
|
||||
loading: loading.read().to_owned(),
|
||||
text: "服务处理中...",
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod components;
|
||||
mod route;
|
||||
mod utils;
|
||||
mod views;
|
||||
|
||||
fn main() {
|
||||
let _worker_guard = utils::init_logger();
|
||||
dioxus::launch(views::App);
|
||||
}
|
||||
@@ -1,564 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use keyring::Entry;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::error::Error;
|
||||
|
||||
/// Configuration for the RustFS service
|
||||
///
|
||||
/// # Fields
|
||||
/// * `address` - The address of the RustFS service
|
||||
/// * `host` - The host of the RustFS service
|
||||
/// * `port` - The port of the RustFS service
|
||||
/// * `access_key` - The access key of the RustFS service
|
||||
/// * `secret_key` - The secret key of the RustFS service
|
||||
/// * `domain_name` - The domain name of the RustFS service
|
||||
/// * `volume_name` - The volume name of the RustFS service
|
||||
/// * `console_address` - The console address of the RustFS service
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let config = RustFSConfig {
|
||||
/// address: "127.0.0.1:9000".to_string(),
|
||||
/// host: "127.0.0.1".to_string(),
|
||||
/// port: "9000".to_string(),
|
||||
/// access_key: "rustfsadmin".to_string(),
|
||||
/// secret_key: "rustfsadmin".to_string(),
|
||||
/// domain_name: "demo.rustfs.com".to_string(),
|
||||
/// volume_name: "data".to_string(),
|
||||
/// console_address: "127.0.0.1:9001".to_string(),
|
||||
/// };
|
||||
/// println!("{:?}", config);
|
||||
/// assert_eq!(config.address, "127.0.0.1:9000");
|
||||
/// ```
|
||||
#[derive(Debug, Clone, Default, Deserialize, Serialize, Ord, PartialOrd, Eq, PartialEq)]
|
||||
pub struct RustFSConfig {
|
||||
pub address: String,
|
||||
pub host: String,
|
||||
pub port: String,
|
||||
pub access_key: String,
|
||||
pub secret_key: String,
|
||||
pub domain_name: String,
|
||||
pub volume_name: String,
|
||||
pub console_address: String,
|
||||
}
|
||||
|
||||
impl RustFSConfig {
|
||||
/// keyring the name of the service
|
||||
const SERVICE_NAME: &'static str = "rustfs-service";
|
||||
/// keyring the key of the service
|
||||
const SERVICE_KEY: &'static str = "rustfs_key";
|
||||
/// default domain name
|
||||
const DEFAULT_DOMAIN_NAME_VALUE: &'static str = "demo.rustfs.com";
|
||||
/// default address value
|
||||
const DEFAULT_ADDRESS_VALUE: &'static str = "127.0.0.1:9000";
|
||||
/// default port value
|
||||
const DEFAULT_PORT_VALUE: &'static str = "9000";
|
||||
/// default host value
|
||||
const DEFAULT_HOST_VALUE: &'static str = "127.0.0.1";
|
||||
/// default access key value
|
||||
const DEFAULT_ACCESS_KEY_VALUE: &'static str = "rustfsadmin";
|
||||
/// default secret key value
|
||||
const DEFAULT_SECRET_KEY_VALUE: &'static str = "rustfsadmin";
|
||||
/// default console address value
|
||||
const DEFAULT_CONSOLE_ADDRESS_VALUE: &'static str = "127.0.0.1:9001";
|
||||
|
||||
/// get the default volume_name
|
||||
///
|
||||
/// # Returns
|
||||
/// * The default volume name
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let volume_name = RustFSConfig::default_volume_name();
|
||||
/// ```
|
||||
pub fn default_volume_name() -> String {
|
||||
dirs::home_dir()
|
||||
.map(|home| home.join("rustfs").join("data"))
|
||||
.and_then(|path| path.to_str().map(String::from))
|
||||
.unwrap_or_else(|| "data".to_string())
|
||||
}
|
||||
|
||||
/// create a default configuration
|
||||
///
|
||||
/// # Returns
|
||||
/// * The default configuration
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let config = RustFSConfig::default_config();
|
||||
/// println!("{:?}", config);
|
||||
/// assert_eq!(config.address, "127.0.0.1:9000");
|
||||
/// ```
|
||||
pub fn default_config() -> Self {
|
||||
Self {
|
||||
address: Self::DEFAULT_ADDRESS_VALUE.to_string(),
|
||||
host: Self::DEFAULT_HOST_VALUE.to_string(),
|
||||
port: Self::DEFAULT_PORT_VALUE.to_string(),
|
||||
access_key: Self::DEFAULT_ACCESS_KEY_VALUE.to_string(),
|
||||
secret_key: Self::DEFAULT_SECRET_KEY_VALUE.to_string(),
|
||||
domain_name: Self::DEFAULT_DOMAIN_NAME_VALUE.to_string(),
|
||||
volume_name: Self::default_volume_name(),
|
||||
console_address: Self::DEFAULT_CONSOLE_ADDRESS_VALUE.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
/// Load the configuration from the keyring
|
||||
///
|
||||
/// # Errors
|
||||
/// * If the configuration cannot be loaded from the keyring
|
||||
/// * If the configuration cannot be deserialized
|
||||
/// * If the address cannot be extracted from the configuration
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let config = RustFSConfig::load().unwrap();
|
||||
/// println!("{:?}", config);
|
||||
/// assert_eq!(config.address, "127.0.0.1:9000");
|
||||
/// ```
|
||||
pub fn load() -> Result<Self, Box<dyn Error>> {
|
||||
let mut config = Self::default_config();
|
||||
|
||||
// Try to get the configuration of the storage from the keyring
|
||||
let entry = Entry::new(Self::SERVICE_NAME, Self::SERVICE_KEY)?;
|
||||
if let Ok(stored_json) = entry.get_password() {
|
||||
if let Ok(stored_config) = serde_json::from_str::<RustFSConfig>(&stored_json) {
|
||||
// update fields that are not empty and non default
|
||||
if !stored_config.address.is_empty() && stored_config.address != Self::DEFAULT_ADDRESS_VALUE {
|
||||
config.address = stored_config.address;
|
||||
let (host, port) = Self::extract_host_port(config.address.as_str())
|
||||
.ok_or_else(|| format!("无法从地址 '{}' 中提取主机和端口", config.address))?;
|
||||
config.host = host.to_string();
|
||||
config.port = port.to_string();
|
||||
}
|
||||
if !stored_config.access_key.is_empty() && stored_config.access_key != Self::DEFAULT_ACCESS_KEY_VALUE {
|
||||
config.access_key = stored_config.access_key;
|
||||
}
|
||||
if !stored_config.secret_key.is_empty() && stored_config.secret_key != Self::DEFAULT_SECRET_KEY_VALUE {
|
||||
config.secret_key = stored_config.secret_key;
|
||||
}
|
||||
if !stored_config.domain_name.is_empty() && stored_config.domain_name != Self::DEFAULT_DOMAIN_NAME_VALUE {
|
||||
config.domain_name = stored_config.domain_name;
|
||||
}
|
||||
// The stored volume_name is updated only if it is not empty and different from the default
|
||||
if !stored_config.volume_name.is_empty() && stored_config.volume_name != Self::default_volume_name() {
|
||||
config.volume_name = stored_config.volume_name;
|
||||
}
|
||||
if !stored_config.console_address.is_empty()
|
||||
&& stored_config.console_address != Self::DEFAULT_CONSOLE_ADDRESS_VALUE
|
||||
{
|
||||
config.console_address = stored_config.console_address;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(config)
|
||||
}
|
||||
|
||||
/// Auxiliary method: Extract the host and port from the address string
|
||||
/// # Arguments
|
||||
/// * `address` - The address string
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Some((host, port))` - The host and port
|
||||
///
|
||||
/// # Errors
|
||||
/// * If the address is not in the form 'host:port'
|
||||
/// * If the port is not a valid u16
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let (host, port) = RustFSConfig::extract_host_port("127.0.0.1:9000").unwrap();
|
||||
/// assert_eq!(host, "127.0.0.1");
|
||||
/// assert_eq!(port, 9000);
|
||||
/// ```
|
||||
pub fn extract_host_port(address: &str) -> Option<(&str, u16)> {
|
||||
let parts: Vec<&str> = address.split(':').collect();
|
||||
if parts.len() == 2 {
|
||||
if let Ok(port) = parts[1].parse::<u16>() {
|
||||
return Some((parts[0], port));
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// save the configuration to keyring
|
||||
///
|
||||
/// # Errors
|
||||
/// * If the configuration cannot be serialized
|
||||
/// * If the configuration cannot be saved to the keyring
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let config = RustFSConfig::default_config();
|
||||
/// config.save().unwrap();
|
||||
/// ```
|
||||
pub fn save(&self) -> Result<(), Box<dyn Error>> {
|
||||
let entry = Entry::new(Self::SERVICE_NAME, Self::SERVICE_KEY)?;
|
||||
let json = serde_json::to_string(self)?;
|
||||
entry.set_password(&json)?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Clear the stored configuration from the system keyring
|
||||
///
|
||||
/// # Returns
|
||||
/// `Ok(())` if the configuration was successfully cleared, or an error if the operation failed.
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// RustFSConfig::clear().unwrap();
|
||||
/// ```
|
||||
#[allow(dead_code)]
|
||||
pub fn clear() -> Result<(), Box<dyn Error>> {
|
||||
let entry = Entry::new(Self::SERVICE_NAME, Self::SERVICE_KEY)?;
|
||||
entry.delete_credential()?;
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_rustfs_config_default() {
|
||||
let config = RustFSConfig::default();
|
||||
assert!(config.address.is_empty());
|
||||
assert!(config.host.is_empty());
|
||||
assert!(config.port.is_empty());
|
||||
assert!(config.access_key.is_empty());
|
||||
assert!(config.secret_key.is_empty());
|
||||
assert!(config.domain_name.is_empty());
|
||||
assert!(config.volume_name.is_empty());
|
||||
assert!(config.console_address.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rustfs_config_creation() {
|
||||
let config = RustFSConfig {
|
||||
address: "192.168.1.100:9000".to_string(),
|
||||
host: "192.168.1.100".to_string(),
|
||||
port: "9000".to_string(),
|
||||
access_key: "testuser".to_string(),
|
||||
secret_key: "testpass".to_string(),
|
||||
domain_name: "test.rustfs.com".to_string(),
|
||||
volume_name: "/data/rustfs".to_string(),
|
||||
console_address: "192.168.1.100:9001".to_string(),
|
||||
};
|
||||
|
||||
assert_eq!(config.address, "192.168.1.100:9000");
|
||||
assert_eq!(config.host, "192.168.1.100");
|
||||
assert_eq!(config.port, "9000");
|
||||
assert_eq!(config.access_key, "testuser");
|
||||
assert_eq!(config.secret_key, "testpass");
|
||||
assert_eq!(config.domain_name, "test.rustfs.com");
|
||||
assert_eq!(config.volume_name, "/data/rustfs");
|
||||
assert_eq!(config.console_address, "192.168.1.100:9001");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_volume_name() {
|
||||
let volume_name = RustFSConfig::default_volume_name();
|
||||
assert!(!volume_name.is_empty());
|
||||
// Should either be the home directory path or fallback to "data"
|
||||
assert!(volume_name.contains("rustfs") || volume_name == "data");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_config() {
|
||||
let config = RustFSConfig::default_config();
|
||||
assert_eq!(config.address, RustFSConfig::DEFAULT_ADDRESS_VALUE);
|
||||
assert_eq!(config.host, RustFSConfig::DEFAULT_HOST_VALUE);
|
||||
assert_eq!(config.port, RustFSConfig::DEFAULT_PORT_VALUE);
|
||||
assert_eq!(config.access_key, RustFSConfig::DEFAULT_ACCESS_KEY_VALUE);
|
||||
assert_eq!(config.secret_key, RustFSConfig::DEFAULT_SECRET_KEY_VALUE);
|
||||
assert_eq!(config.domain_name, RustFSConfig::DEFAULT_DOMAIN_NAME_VALUE);
|
||||
assert_eq!(config.console_address, RustFSConfig::DEFAULT_CONSOLE_ADDRESS_VALUE);
|
||||
assert!(!config.volume_name.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_host_port_valid() {
|
||||
let test_cases = vec![
|
||||
("127.0.0.1:9000", Some(("127.0.0.1", 9000))),
|
||||
("localhost:8080", Some(("localhost", 8080))),
|
||||
("192.168.1.100:3000", Some(("192.168.1.100", 3000))),
|
||||
("0.0.0.0:80", Some(("0.0.0.0", 80))),
|
||||
("example.com:443", Some(("example.com", 443))),
|
||||
];
|
||||
|
||||
for (input, expected) in test_cases {
|
||||
let result = RustFSConfig::extract_host_port(input);
|
||||
assert_eq!(result, expected, "Failed for input: {input}");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_host_port_invalid() {
|
||||
let invalid_cases = vec![
|
||||
"127.0.0.1", // Missing port
|
||||
"127.0.0.1:", // Empty port
|
||||
"127.0.0.1:abc", // Invalid port
|
||||
"127.0.0.1:99999", // Port out of range
|
||||
"", // Empty string
|
||||
"127.0.0.1:9000:extra", // Too many parts
|
||||
"invalid", // No colon
|
||||
];
|
||||
|
||||
for input in invalid_cases {
|
||||
let result = RustFSConfig::extract_host_port(input);
|
||||
assert_eq!(result, None, "Should be None for input: {input}");
|
||||
}
|
||||
|
||||
// Special case: empty host but valid port should still work
|
||||
let result = RustFSConfig::extract_host_port(":9000");
|
||||
assert_eq!(result, Some(("", 9000)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_host_port_edge_cases() {
|
||||
// Test edge cases for port numbers
|
||||
assert_eq!(RustFSConfig::extract_host_port("host:0"), Some(("host", 0)));
|
||||
assert_eq!(RustFSConfig::extract_host_port("host:65535"), Some(("host", 65535)));
|
||||
assert_eq!(RustFSConfig::extract_host_port("host:65536"), None); // Out of range
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialization() {
|
||||
let config = RustFSConfig {
|
||||
address: "127.0.0.1:9000".to_string(),
|
||||
host: "127.0.0.1".to_string(),
|
||||
port: "9000".to_string(),
|
||||
access_key: "admin".to_string(),
|
||||
secret_key: "password".to_string(),
|
||||
domain_name: "test.com".to_string(),
|
||||
volume_name: "/data".to_string(),
|
||||
console_address: "127.0.0.1:9001".to_string(),
|
||||
};
|
||||
|
||||
let json = serde_json::to_string(&config).unwrap();
|
||||
assert!(json.contains("127.0.0.1:9000"));
|
||||
assert!(json.contains("admin"));
|
||||
assert!(json.contains("test.com"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_deserialization() {
|
||||
let json = r#"{
|
||||
"address": "192.168.1.100:9000",
|
||||
"host": "192.168.1.100",
|
||||
"port": "9000",
|
||||
"access_key": "testuser",
|
||||
"secret_key": "testpass",
|
||||
"domain_name": "example.com",
|
||||
"volume_name": "/opt/data",
|
||||
"console_address": "192.168.1.100:9001"
|
||||
}"#;
|
||||
|
||||
let config: RustFSConfig = serde_json::from_str(json).unwrap();
|
||||
assert_eq!(config.address, "192.168.1.100:9000");
|
||||
assert_eq!(config.host, "192.168.1.100");
|
||||
assert_eq!(config.port, "9000");
|
||||
assert_eq!(config.access_key, "testuser");
|
||||
assert_eq!(config.secret_key, "testpass");
|
||||
assert_eq!(config.domain_name, "example.com");
|
||||
assert_eq!(config.volume_name, "/opt/data");
|
||||
assert_eq!(config.console_address, "192.168.1.100:9001");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_serialization_deserialization_roundtrip() {
|
||||
let original_config = RustFSConfig {
|
||||
address: "10.0.0.1:8080".to_string(),
|
||||
host: "10.0.0.1".to_string(),
|
||||
port: "8080".to_string(),
|
||||
access_key: "roundtrip_user".to_string(),
|
||||
secret_key: "roundtrip_pass".to_string(),
|
||||
domain_name: "roundtrip.test".to_string(),
|
||||
volume_name: "/tmp/roundtrip".to_string(),
|
||||
console_address: "10.0.0.1:8081".to_string(),
|
||||
};
|
||||
|
||||
let json = serde_json::to_string(&original_config).unwrap();
|
||||
let deserialized_config: RustFSConfig = serde_json::from_str(&json).unwrap();
|
||||
|
||||
assert_eq!(original_config, deserialized_config);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_config_ordering() {
|
||||
let config1 = RustFSConfig {
|
||||
address: "127.0.0.1:9000".to_string(),
|
||||
host: "127.0.0.1".to_string(),
|
||||
port: "9000".to_string(),
|
||||
access_key: "admin".to_string(),
|
||||
secret_key: "password".to_string(),
|
||||
domain_name: "test.com".to_string(),
|
||||
volume_name: "/data".to_string(),
|
||||
console_address: "127.0.0.1:9001".to_string(),
|
||||
};
|
||||
|
||||
let config2 = RustFSConfig {
|
||||
address: "127.0.0.1:9000".to_string(),
|
||||
host: "127.0.0.1".to_string(),
|
||||
port: "9000".to_string(),
|
||||
access_key: "admin".to_string(),
|
||||
secret_key: "password".to_string(),
|
||||
domain_name: "test.com".to_string(),
|
||||
volume_name: "/data".to_string(),
|
||||
console_address: "127.0.0.1:9001".to_string(),
|
||||
};
|
||||
|
||||
let config3 = RustFSConfig {
|
||||
address: "127.0.0.1:9001".to_string(), // Different port
|
||||
host: "127.0.0.1".to_string(),
|
||||
port: "9001".to_string(),
|
||||
access_key: "admin".to_string(),
|
||||
secret_key: "password".to_string(),
|
||||
domain_name: "test.com".to_string(),
|
||||
volume_name: "/data".to_string(),
|
||||
console_address: "127.0.0.1:9002".to_string(),
|
||||
};
|
||||
|
||||
assert_eq!(config1, config2);
|
||||
assert_ne!(config1, config3);
|
||||
assert!(config1 < config3); // Lexicographic ordering
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_clone() {
|
||||
let original = RustFSConfig::default_config();
|
||||
let cloned = original.clone();
|
||||
|
||||
assert_eq!(original, cloned);
|
||||
assert_eq!(original.address, cloned.address);
|
||||
assert_eq!(original.access_key, cloned.access_key);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_debug_format() {
|
||||
let config = RustFSConfig::default_config();
|
||||
let debug_str = format!("{config:?}");
|
||||
|
||||
assert!(debug_str.contains("RustFSConfig"));
|
||||
assert!(debug_str.contains("address"));
|
||||
assert!(debug_str.contains("127.0.0.1:9000"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_constants() {
|
||||
assert_eq!(RustFSConfig::SERVICE_NAME, "rustfs-service");
|
||||
assert_eq!(RustFSConfig::SERVICE_KEY, "rustfs_key");
|
||||
assert_eq!(RustFSConfig::DEFAULT_DOMAIN_NAME_VALUE, "demo.rustfs.com");
|
||||
assert_eq!(RustFSConfig::DEFAULT_ADDRESS_VALUE, "127.0.0.1:9000");
|
||||
assert_eq!(RustFSConfig::DEFAULT_PORT_VALUE, "9000");
|
||||
assert_eq!(RustFSConfig::DEFAULT_HOST_VALUE, "127.0.0.1");
|
||||
assert_eq!(RustFSConfig::DEFAULT_ACCESS_KEY_VALUE, "rustfsadmin");
|
||||
assert_eq!(RustFSConfig::DEFAULT_SECRET_KEY_VALUE, "rustfsadmin");
|
||||
assert_eq!(RustFSConfig::DEFAULT_CONSOLE_ADDRESS_VALUE, "127.0.0.1:9001");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_empty_strings() {
|
||||
let config = RustFSConfig {
|
||||
address: "".to_string(),
|
||||
host: "".to_string(),
|
||||
port: "".to_string(),
|
||||
access_key: "".to_string(),
|
||||
secret_key: "".to_string(),
|
||||
domain_name: "".to_string(),
|
||||
volume_name: "".to_string(),
|
||||
console_address: "".to_string(),
|
||||
};
|
||||
|
||||
assert!(config.address.is_empty());
|
||||
assert!(config.host.is_empty());
|
||||
assert!(config.port.is_empty());
|
||||
assert!(config.access_key.is_empty());
|
||||
assert!(config.secret_key.is_empty());
|
||||
assert!(config.domain_name.is_empty());
|
||||
assert!(config.volume_name.is_empty());
|
||||
assert!(config.console_address.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_very_long_strings() {
|
||||
let long_string = "a".repeat(1000);
|
||||
let config = RustFSConfig {
|
||||
address: format!("{long_string}:9000"),
|
||||
host: long_string.clone(),
|
||||
port: "9000".to_string(),
|
||||
access_key: long_string.clone(),
|
||||
secret_key: long_string.clone(),
|
||||
domain_name: format!("{long_string}.com"),
|
||||
volume_name: format!("/data/{long_string}"),
|
||||
console_address: format!("{long_string}:9001"),
|
||||
};
|
||||
|
||||
assert_eq!(config.host.len(), 1000);
|
||||
assert_eq!(config.access_key.len(), 1000);
|
||||
assert_eq!(config.secret_key.len(), 1000);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_special_characters() {
|
||||
let config = RustFSConfig {
|
||||
address: "127.0.0.1:9000".to_string(),
|
||||
host: "127.0.0.1".to_string(),
|
||||
port: "9000".to_string(),
|
||||
access_key: "user@domain.com".to_string(),
|
||||
secret_key: "p@ssw0rd!#$%".to_string(),
|
||||
domain_name: "test-domain.example.com".to_string(),
|
||||
volume_name: "/data/rust-fs/storage".to_string(),
|
||||
console_address: "127.0.0.1:9001".to_string(),
|
||||
};
|
||||
|
||||
assert!(config.access_key.contains("@"));
|
||||
assert!(config.secret_key.contains("!#$%"));
|
||||
assert!(config.domain_name.contains("-"));
|
||||
assert!(config.volume_name.contains("/"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_unicode_strings() {
|
||||
let config = RustFSConfig {
|
||||
address: "127.0.0.1:9000".to_string(),
|
||||
host: "127.0.0.1".to_string(),
|
||||
port: "9000".to_string(),
|
||||
access_key: "用户名".to_string(),
|
||||
secret_key: "密码 123".to_string(),
|
||||
domain_name: "测试.com".to_string(),
|
||||
volume_name: "/数据/存储".to_string(),
|
||||
console_address: "127.0.0.1:9001".to_string(),
|
||||
};
|
||||
|
||||
assert_eq!(config.access_key, "用户名");
|
||||
assert_eq!(config.secret_key, "密码 123");
|
||||
assert_eq!(config.domain_name, "测试.com");
|
||||
assert_eq!(config.volume_name, "/数据/存储");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_memory_efficiency() {
|
||||
// Test that the structure doesn't use excessive memory
|
||||
assert!(std::mem::size_of::<RustFSConfig>() < 1000);
|
||||
}
|
||||
|
||||
// Note: Keyring-related tests (load, save, clear) are not included here
|
||||
// because they require actual keyring access and would be integration tests
|
||||
// rather than unit tests. They should be tested separately in an integration
|
||||
// test environment where keyring access can be properly mocked or controlled.
|
||||
}
|
||||
@@ -1,901 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::utils::RustFSConfig;
|
||||
use dioxus::logger::tracing::{debug, error, info};
|
||||
use lazy_static::lazy_static;
|
||||
use rust_embed::RustEmbed;
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::error::Error;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::Command as StdCommand;
|
||||
use std::time::Duration;
|
||||
use tokio::fs;
|
||||
use tokio::fs::File;
|
||||
use tokio::io::AsyncWriteExt;
|
||||
use tokio::net::TcpStream;
|
||||
use tokio::sync::{Mutex, mpsc};
|
||||
|
||||
#[derive(RustEmbed)]
|
||||
#[folder = "$CARGO_MANIFEST_DIR/embedded-rustfs/"]
|
||||
struct Asset;
|
||||
|
||||
// Use `lazy_static` to cache the checksum of embedded resources
|
||||
lazy_static! {
|
||||
static ref RUSTFS_HASH: Mutex<String> = {
|
||||
let rustfs_file = if cfg!(windows) { "rustfs.exe" } else { "rustfs" };
|
||||
let rustfs_data = Asset::get(rustfs_file).expect("RustFs binary not embedded");
|
||||
let hash = hex::encode(Sha256::digest(&rustfs_data.data));
|
||||
Mutex::new(hash)
|
||||
};
|
||||
}
|
||||
|
||||
/// Service command
|
||||
/// This enum represents the commands that can be sent to the service manager
|
||||
/// to start, stop, or restart the service
|
||||
/// The `Start` variant contains the configuration for the service
|
||||
/// The `Restart` variant contains the configuration for the service
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let config = RustFSConfig {
|
||||
/// address: "127.0.0.1:9000".to_string(),
|
||||
/// host: "127.0.0.1".to_string(),
|
||||
/// port: "9000".to_string(),
|
||||
/// access_key: "rustfsadmin".to_string(),
|
||||
/// secret_key: "rustfsadmin".to_string(),
|
||||
/// domain_name: "demo.rustfs.com".to_string(),
|
||||
/// volume_name: "data".to_string(),
|
||||
/// console_address: "127.0.0.1:9001".to_string(),
|
||||
/// };
|
||||
///
|
||||
/// let command = ServiceCommand::Start(config);
|
||||
/// println!("{:?}", command);
|
||||
///
|
||||
/// assert_eq!(command, ServiceCommand::Start(config));
|
||||
/// ```
|
||||
pub enum ServiceCommand {
|
||||
Start(RustFSConfig),
|
||||
Stop,
|
||||
Restart(RustFSConfig),
|
||||
}
|
||||
|
||||
/// Service operation result
|
||||
/// This struct represents the result of a service operation
|
||||
/// It contains information about the success of the operation,
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use chrono::Local;
|
||||
///
|
||||
/// let result = ServiceOperationResult {
|
||||
/// success: true,
|
||||
/// start_time: chrono::Local::now(),
|
||||
/// end_time: chrono::Local::now(),
|
||||
/// message: "服务启动成功".to_string(),
|
||||
/// };
|
||||
///
|
||||
/// println!("{:?}", result);
|
||||
/// assert_eq!(result.success, true);
|
||||
/// ```
|
||||
#[derive(Debug)]
|
||||
pub struct ServiceOperationResult {
|
||||
pub success: bool,
|
||||
pub start_time: chrono::DateTime<chrono::Local>,
|
||||
pub end_time: chrono::DateTime<chrono::Local>,
|
||||
pub message: String,
|
||||
}
|
||||
|
||||
/// Service manager
|
||||
/// This struct represents a service manager that can be used to start, stop, or restart a service
|
||||
/// It contains a command sender that can be used to send commands to the service manager
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let service_manager = ServiceManager::new();
|
||||
/// println!("{:?}", service_manager);
|
||||
/// ```
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ServiceManager {
|
||||
command_tx: mpsc::Sender<ServiceCommand>,
|
||||
// process: Arc<Mutex<Option<Child>>>,
|
||||
// pid: Arc<Mutex<Option<u32>>>, // Add PID storage
|
||||
// current_config: Arc<Mutex<Option<RustFSConfig>>>, // Add configuration storage
|
||||
}
|
||||
|
||||
impl ServiceManager {
|
||||
/// check if the service is running and return a pid
|
||||
/// This function is platform dependent
|
||||
/// On Unix systems, it uses the `ps` command to check for the service
|
||||
/// On Windows systems, it uses the `wmic` command to check for the service
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let pid = check_service_status().await;
|
||||
/// println!("{:?}", pid);
|
||||
/// ```
|
||||
pub async fn check_service_status() -> Option<u32> {
|
||||
#[cfg(unix)]
|
||||
{
|
||||
// use the ps command on a unix system
|
||||
if let Ok(output) = StdCommand::new("ps").arg("-ef").output() {
|
||||
let output_str = String::from_utf8_lossy(&output.stdout);
|
||||
for line in output_str.lines() {
|
||||
// match contains `rustfs/bin/rustfs` of the line
|
||||
if line.contains("rustfs/bin/rustfs") && !line.contains("grep") {
|
||||
if let Some(pid_str) = line.split_whitespace().nth(1) {
|
||||
if let Ok(pid) = pid_str.parse::<u32>() {
|
||||
return Some(pid);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
{
|
||||
if let Ok(output) = StdCommand::new("wmic")
|
||||
.arg("process")
|
||||
.arg("where")
|
||||
.arg("caption='rustfs.exe'")
|
||||
.arg("get")
|
||||
.arg("processid")
|
||||
.output()
|
||||
{
|
||||
let output_str = String::from_utf8_lossy(&output.stdout);
|
||||
for line in output_str.lines() {
|
||||
if let Ok(pid) = line.trim().parse::<u32>() {
|
||||
return Some(pid);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// Prepare the service
|
||||
/// This function downloads the service executable if it doesn't exist
|
||||
/// It also creates the necessary directories for the service
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let executable_path = prepare_service().await;
|
||||
/// println!("{:?}", executable_path);
|
||||
/// ```
|
||||
async fn prepare_service() -> Result<PathBuf, Box<dyn Error>> {
|
||||
// get the user directory
|
||||
let home_dir = dirs::home_dir().ok_or("无法获取用户目录")?;
|
||||
let rustfs_dir = home_dir.join("rustfs");
|
||||
let bin_dir = rustfs_dir.join("bin");
|
||||
let data_dir = rustfs_dir.join("data");
|
||||
let logs_dir = rustfs_dir.join("logs");
|
||||
|
||||
// create the necessary directories
|
||||
for dir in [&bin_dir, &data_dir, &logs_dir] {
|
||||
if !dir.exists() {
|
||||
tokio::fs::create_dir_all(dir).await?;
|
||||
}
|
||||
}
|
||||
|
||||
let rustfs_file = if cfg!(windows) { "rustfs.exe" } else { "rustfs" };
|
||||
let executable_path = bin_dir.join(rustfs_file);
|
||||
let hash_path = bin_dir.join("embedded_rustfs.sha256");
|
||||
|
||||
if executable_path.exists() && hash_path.exists() {
|
||||
let cached_hash = fs::read_to_string(&hash_path).await?;
|
||||
let expected_hash = RUSTFS_HASH.lock().await;
|
||||
if cached_hash == *expected_hash {
|
||||
println!("Use cached rustfs: {executable_path:?}");
|
||||
return Ok(executable_path);
|
||||
}
|
||||
}
|
||||
|
||||
// Extract and write files
|
||||
let rustfs_data = Asset::get(rustfs_file).expect("RustFS binary not embedded");
|
||||
let mut file = File::create(&executable_path).await?;
|
||||
file.write_all(&rustfs_data.data).await?;
|
||||
let expected_hash = hex::encode(Sha256::digest(&rustfs_data.data));
|
||||
fs::write(&hash_path, expected_hash).await?;
|
||||
|
||||
// set execution permissions on unix systems
|
||||
#[cfg(unix)]
|
||||
{
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
let mut perms = std::fs::metadata(&executable_path)?.permissions();
|
||||
perms.set_mode(0o755);
|
||||
std::fs::set_permissions(&executable_path, perms)?;
|
||||
}
|
||||
|
||||
Ok(executable_path)
|
||||
}
|
||||
|
||||
/// Helper function: Extracts the port from the address string
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let address = "127.0.0.1:9000";
|
||||
/// let port = extract_port(address);
|
||||
/// println!("{:?}", port);
|
||||
/// ```
|
||||
fn extract_port(address: &str) -> Option<u16> {
|
||||
address.split(':').nth(1)?.parse().ok()
|
||||
}
|
||||
|
||||
/// Create a new instance of the service manager
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let service_manager = ServiceManager::new();
|
||||
/// println!("{:?}", service_manager);
|
||||
/// ```
|
||||
pub(crate) fn new() -> Self {
|
||||
let (command_tx, mut command_rx) = mpsc::channel(10);
|
||||
// Start the control loop
|
||||
tokio::spawn(async move {
|
||||
while let Some(cmd) = command_rx.recv().await {
|
||||
match cmd {
|
||||
ServiceCommand::Start(config) => {
|
||||
if let Err(e) = Self::start_service(&config).await {
|
||||
Self::show_error(&format!("启动服务失败:{e}"));
|
||||
}
|
||||
}
|
||||
ServiceCommand::Stop => {
|
||||
if let Err(e) = Self::stop_service().await {
|
||||
Self::show_error(&format!("停止服务失败:{e}"));
|
||||
}
|
||||
}
|
||||
ServiceCommand::Restart(config) => {
|
||||
if Self::check_service_status().await.is_some() {
|
||||
if let Err(e) = Self::stop_service().await {
|
||||
Self::show_error(&format!("重启服务失败:{e}"));
|
||||
continue;
|
||||
}
|
||||
}
|
||||
if let Err(e) = Self::start_service(&config).await {
|
||||
Self::show_error(&format!("重启服务失败:{e}"));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
ServiceManager { command_tx }
|
||||
}
|
||||
|
||||
/// Start the service
|
||||
/// This function starts the service with the given configuration
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let config = RustFSConfig {
|
||||
/// address: "127.0.0.1:9000".to_string(),
|
||||
/// host: "127.0.0.1".to_string(),
|
||||
/// port: "9000".to_string(),
|
||||
/// access_key: "rustfsadmin".to_string(),
|
||||
/// secret_key: "rustfsadmin".to_string(),
|
||||
/// domain_name: "demo.rustfs.com".to_string(),
|
||||
/// volume_name: "data".to_string(),
|
||||
/// console_address: "127.0.0.1:9001".to_string(),
|
||||
/// };
|
||||
///
|
||||
/// let result = start_service(&config).await;
|
||||
/// println!("{:?}", result);
|
||||
/// ```
|
||||
async fn start_service(config: &RustFSConfig) -> Result<(), Box<dyn Error>> {
|
||||
// Check if the service is already running
|
||||
if let Some(existing_pid) = Self::check_service_status().await {
|
||||
return Err(format!("服务已经在运行,PID: {existing_pid}").into());
|
||||
}
|
||||
|
||||
// Prepare the service program
|
||||
let executable_path = Self::prepare_service().await?;
|
||||
// Check the data catalog
|
||||
let volume_name_path = Path::new(&config.volume_name);
|
||||
if !volume_name_path.exists() {
|
||||
tokio::fs::create_dir_all(&config.volume_name).await?;
|
||||
}
|
||||
|
||||
// Extract the port from the configuration
|
||||
let main_port = Self::extract_port(&config.address).ok_or("无法解析主服务端口")?;
|
||||
let console_port = Self::extract_port(&config.console_address).ok_or("无法解析控制台端口")?;
|
||||
|
||||
let host = config.address.split(':').next().ok_or("无法解析主机地址")?;
|
||||
|
||||
// Check the port
|
||||
let ports = vec![main_port, console_port];
|
||||
for port in ports {
|
||||
if Self::is_port_in_use(host, port).await {
|
||||
return Err(format!("端口 {port} 已被占用").into());
|
||||
}
|
||||
}
|
||||
|
||||
// Start the service
|
||||
let mut child = tokio::process::Command::new(executable_path)
|
||||
.arg("--address")
|
||||
.arg(&config.address)
|
||||
.arg("--access-key")
|
||||
.arg(&config.access_key)
|
||||
.arg("--secret-key")
|
||||
.arg(&config.secret_key)
|
||||
.arg("--console-address")
|
||||
.arg(&config.console_address)
|
||||
.arg(config.volume_name.clone())
|
||||
.spawn()?;
|
||||
|
||||
let process_pid = child.id().unwrap();
|
||||
// Wait for the service to start
|
||||
tokio::time::sleep(Duration::from_secs(2)).await;
|
||||
|
||||
// Check if the service started successfully
|
||||
if Self::is_port_in_use(host, main_port).await {
|
||||
Self::show_info(&format!("服务启动成功!进程 ID: {process_pid}"));
|
||||
|
||||
Ok(())
|
||||
} else {
|
||||
child.kill().await?;
|
||||
Err("服务启动失败".into())
|
||||
}
|
||||
}
|
||||
|
||||
/// Stop the service
|
||||
/// This function stops the service
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let result = stop_service().await;
|
||||
/// println!("{:?}", result);
|
||||
/// ```
|
||||
async fn stop_service() -> Result<(), Box<dyn Error>> {
|
||||
let existing_pid = Self::check_service_status().await;
|
||||
debug!("existing_pid: {:?}", existing_pid);
|
||||
if let Some(service_pid) = existing_pid {
|
||||
// An attempt was made to terminate the process
|
||||
#[cfg(unix)]
|
||||
{
|
||||
StdCommand::new("kill").arg("-9").arg(service_pid.to_string()).output()?;
|
||||
}
|
||||
|
||||
#[cfg(windows)]
|
||||
{
|
||||
StdCommand::new("taskkill")
|
||||
.arg("/F")
|
||||
.arg("/PID")
|
||||
.arg(&service_pid.to_string())
|
||||
.output()?;
|
||||
}
|
||||
|
||||
// Verify that the service is indeed stopped
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
if Self::check_service_status().await.is_some() {
|
||||
return Err("服务停止失败".into());
|
||||
}
|
||||
Self::show_info("服务已成功停止");
|
||||
|
||||
Ok(())
|
||||
} else {
|
||||
Err("服务未运行".into())
|
||||
}
|
||||
}
|
||||
|
||||
/// Check if the port is in use
|
||||
/// This function checks if the given port is in use on the given host
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let host = "127.0.0.1";
|
||||
/// let port = 9000;
|
||||
/// let result = is_port_in_use(host, port).await;
|
||||
/// println!("{:?}", result);
|
||||
/// ```
|
||||
async fn is_port_in_use(host: &str, port: u16) -> bool {
|
||||
TcpStream::connect(format!("{host}:{port}")).await.is_ok()
|
||||
}
|
||||
|
||||
/// Show an error message
|
||||
/// This function shows an error message dialog
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// show_error("This is an error message");
|
||||
/// ```
|
||||
pub(crate) fn show_error(message: &str) {
|
||||
rfd::MessageDialog::new()
|
||||
.set_title("错误")
|
||||
.set_description(message)
|
||||
.set_level(rfd::MessageLevel::Error)
|
||||
.show();
|
||||
}
|
||||
|
||||
/// Show an information message
|
||||
/// This function shows an information message dialog
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// show_info("This is an information message");
|
||||
/// ```
|
||||
pub(crate) fn show_info(message: &str) {
|
||||
rfd::MessageDialog::new()
|
||||
.set_title("成功")
|
||||
.set_description(message)
|
||||
.set_level(rfd::MessageLevel::Info)
|
||||
.show();
|
||||
}
|
||||
|
||||
/// Start the service
|
||||
/// This function sends a `Start` command to the service manager
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let config = RustFSConfig {
|
||||
/// address: "127.0.0.1:9000".to_string(),
|
||||
/// host: "127.0.0.1".to_string(),
|
||||
/// port: "9000".to_string(),
|
||||
/// access_key: "rustfsadmin".to_string(),
|
||||
/// secret_key: "rustfsadmin".to_string(),
|
||||
/// domain_name: "demo.rustfs.com".to_string(),
|
||||
/// volume_name: "data".to_string(),
|
||||
/// console_address: "127.0.0.1:9001".to_string(),
|
||||
/// };
|
||||
///
|
||||
/// let service_manager = ServiceManager::new();
|
||||
/// let result = service_manager.start(config).await;
|
||||
/// println!("{:?}", result);
|
||||
/// ```
|
||||
///
|
||||
/// # Errors
|
||||
/// This function returns an error if the service fails to start
|
||||
///
|
||||
/// # Panics
|
||||
/// This function panics if the port number is invalid
|
||||
///
|
||||
/// # Safety
|
||||
/// This function is not marked as unsafe
|
||||
///
|
||||
/// # Performance
|
||||
/// This function is not optimized for performance
|
||||
///
|
||||
/// # Design
|
||||
/// This function is designed to be simple and easy to use
|
||||
///
|
||||
/// # Security
|
||||
/// This function does not have any security implications
|
||||
pub async fn start(&self, config: RustFSConfig) -> Result<ServiceOperationResult, Box<dyn Error>> {
|
||||
let start_time = chrono::Local::now();
|
||||
self.command_tx.send(ServiceCommand::Start(config.clone())).await?;
|
||||
|
||||
let host = &config.host;
|
||||
let port = config.port.parse::<u16>().expect("无效的端口号");
|
||||
// wait for the service to actually start
|
||||
let mut retries = 0;
|
||||
while retries < 30 {
|
||||
// wait up to 30 seconds
|
||||
if Self::check_service_status().await.is_some() && Self::is_port_in_use(host, port).await {
|
||||
let end_time = chrono::Local::now();
|
||||
return Ok(ServiceOperationResult {
|
||||
success: true,
|
||||
start_time,
|
||||
end_time,
|
||||
message: "服务启动成功".to_string(),
|
||||
});
|
||||
}
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
retries += 1;
|
||||
}
|
||||
|
||||
Err("服务启动超时".into())
|
||||
}
|
||||
|
||||
/// Stop the service
|
||||
/// This function sends a `Stop` command to the service manager
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let service_manager = ServiceManager::new();
|
||||
/// let result = service_manager.stop().await;
|
||||
/// println!("{:?}", result);
|
||||
/// ```
|
||||
///
|
||||
/// # Errors
|
||||
/// This function returns an error if the service fails to stop
|
||||
///
|
||||
/// # Panics
|
||||
/// This function panics if the port number is invalid
|
||||
///
|
||||
/// # Safety
|
||||
/// This function is not marked as unsafe
|
||||
///
|
||||
/// # Performance
|
||||
/// This function is not optimized for performance
|
||||
///
|
||||
/// # Design
|
||||
/// This function is designed to be simple and easy to use
|
||||
///
|
||||
/// # Security
|
||||
/// This function does not have any security implications
|
||||
pub async fn stop(&self) -> Result<ServiceOperationResult, Box<dyn Error>> {
|
||||
let start_time = chrono::Local::now();
|
||||
self.command_tx.send(ServiceCommand::Stop).await?;
|
||||
|
||||
// Wait for the service to actually stop
|
||||
let mut retries = 0;
|
||||
while retries < 15 {
|
||||
// Wait up to 15 seconds
|
||||
if Self::check_service_status().await.is_none() {
|
||||
let end_time = chrono::Local::now();
|
||||
return Ok(ServiceOperationResult {
|
||||
success: true,
|
||||
start_time,
|
||||
end_time,
|
||||
message: "服务停止成功".to_string(),
|
||||
});
|
||||
}
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
retries += 1;
|
||||
}
|
||||
|
||||
Err("服务停止超时".into())
|
||||
}
|
||||
|
||||
/// Restart the service
|
||||
/// This function sends a `Restart` command to the service manager
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// let config = RustFSConfig {
|
||||
/// address: "127.0.0.1:9000".to_string(),
|
||||
/// host: "127.0.0.1".to_string(),
|
||||
/// port: "9000".to_string(),
|
||||
/// access_key: "rustfsadmin".to_string(),
|
||||
/// secret_key: "rustfsadmin".to_string(),
|
||||
/// domain_name: "demo.rustfs.com".to_string(),
|
||||
/// volume_name: "data".to_string(),
|
||||
/// console_address: "127.0.0.1:9001".to_string(),
|
||||
/// };
|
||||
///
|
||||
/// let service_manager = ServiceManager::new();
|
||||
/// let result = service_manager.restart(config).await;
|
||||
/// println!("{:?}", result);
|
||||
/// ```
|
||||
///
|
||||
/// # Errors
|
||||
/// This function returns an error if the service fails to restart
|
||||
///
|
||||
/// # Panics
|
||||
/// This function panics if the port number is invalid
|
||||
///
|
||||
/// # Safety
|
||||
/// This function is not marked as unsafe
|
||||
///
|
||||
/// # Performance
|
||||
/// This function is not optimized for performance
|
||||
///
|
||||
/// # Design
|
||||
/// This function is designed to be simple and easy to use
|
||||
///
|
||||
/// # Security
|
||||
/// This function does not have any security implications
|
||||
pub async fn restart(&self, config: RustFSConfig) -> Result<ServiceOperationResult, Box<dyn Error>> {
|
||||
let start_time = chrono::Local::now();
|
||||
self.command_tx.send(ServiceCommand::Restart(config.clone())).await?;
|
||||
|
||||
let host = &config.host;
|
||||
let port = config.port.parse::<u16>().expect("无效的端口号");
|
||||
|
||||
// wait for the service to restart
|
||||
let mut retries = 0;
|
||||
while retries < 45 {
|
||||
// Longer waiting time is given as both the stop and start processes are involved
|
||||
if Self::check_service_status().await.is_some() && Self::is_port_in_use(host, port).await {
|
||||
match config.save() {
|
||||
Ok(_) => info!("save config success"),
|
||||
Err(e) => {
|
||||
error!("save config error: {}", e);
|
||||
self.command_tx.send(ServiceCommand::Stop).await?;
|
||||
Self::show_error("保存配置失败");
|
||||
return Err("保存配置失败".into());
|
||||
}
|
||||
}
|
||||
let end_time = chrono::Local::now();
|
||||
return Ok(ServiceOperationResult {
|
||||
success: true,
|
||||
start_time,
|
||||
end_time,
|
||||
message: "服务重启成功".to_string(),
|
||||
});
|
||||
}
|
||||
tokio::time::sleep(Duration::from_secs(1)).await;
|
||||
retries += 1;
|
||||
}
|
||||
Err("服务重启超时".into())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::time::Duration;
|
||||
|
||||
#[test]
|
||||
fn test_service_command_creation() {
|
||||
let config = RustFSConfig::default_config();
|
||||
|
||||
let start_cmd = ServiceCommand::Start(config.clone());
|
||||
let stop_cmd = ServiceCommand::Stop;
|
||||
let restart_cmd = ServiceCommand::Restart(config);
|
||||
|
||||
// Test that commands can be created
|
||||
match start_cmd {
|
||||
ServiceCommand::Start(_) => {}
|
||||
_ => panic!("Expected Start command"),
|
||||
}
|
||||
|
||||
match stop_cmd {
|
||||
ServiceCommand::Stop => {}
|
||||
_ => panic!("Expected Stop command"),
|
||||
}
|
||||
|
||||
match restart_cmd {
|
||||
ServiceCommand::Restart(_) => {}
|
||||
_ => panic!("Expected Restart command"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_service_operation_result_creation() {
|
||||
let start_time = chrono::Local::now();
|
||||
let end_time = chrono::Local::now();
|
||||
|
||||
let success_result = ServiceOperationResult {
|
||||
success: true,
|
||||
start_time,
|
||||
end_time,
|
||||
message: "Operation successful".to_string(),
|
||||
};
|
||||
|
||||
let failure_result = ServiceOperationResult {
|
||||
success: false,
|
||||
start_time,
|
||||
end_time,
|
||||
message: "Operation failed".to_string(),
|
||||
};
|
||||
|
||||
assert!(success_result.success);
|
||||
assert_eq!(success_result.message, "Operation successful");
|
||||
|
||||
assert!(!failure_result.success);
|
||||
assert_eq!(failure_result.message, "Operation failed");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_service_operation_result_debug() {
|
||||
let result = ServiceOperationResult {
|
||||
success: true,
|
||||
start_time: chrono::Local::now(),
|
||||
end_time: chrono::Local::now(),
|
||||
message: "Test message".to_string(),
|
||||
};
|
||||
|
||||
let debug_str = format!("{result:?}");
|
||||
assert!(debug_str.contains("ServiceOperationResult"));
|
||||
assert!(debug_str.contains("success: true"));
|
||||
assert!(debug_str.contains("Test message"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_service_manager_creation() {
|
||||
// Test ServiceManager creation in a tokio runtime
|
||||
let rt = tokio::runtime::Runtime::new().unwrap();
|
||||
rt.block_on(async {
|
||||
let service_manager = ServiceManager::new();
|
||||
|
||||
// Test that ServiceManager can be created and cloned
|
||||
let cloned_manager = service_manager.clone();
|
||||
|
||||
// Both should be valid (we can't test much more without async runtime)
|
||||
assert!(format!("{service_manager:?}").contains("ServiceManager"));
|
||||
assert!(format!("{cloned_manager:?}").contains("ServiceManager"));
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_port_valid() {
|
||||
let test_cases = vec![
|
||||
("127.0.0.1:9000", Some(9000)),
|
||||
("localhost:8080", Some(8080)),
|
||||
("192.168.1.100:3000", Some(3000)),
|
||||
("0.0.0.0:80", Some(80)),
|
||||
("example.com:443", Some(443)),
|
||||
("host:65535", Some(65535)),
|
||||
("host:1", Some(1)),
|
||||
];
|
||||
|
||||
for (input, expected) in test_cases {
|
||||
let result = ServiceManager::extract_port(input);
|
||||
assert_eq!(result, expected, "Failed for input: {input}");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_port_invalid() {
|
||||
let invalid_cases = vec![
|
||||
"127.0.0.1", // Missing port
|
||||
"127.0.0.1:", // Empty port
|
||||
"127.0.0.1:abc", // Invalid port
|
||||
"127.0.0.1:99999", // Port out of range
|
||||
"", // Empty string
|
||||
"invalid", // No colon
|
||||
"host:-1", // Negative port
|
||||
"host:0.5", // Decimal port
|
||||
];
|
||||
|
||||
for input in invalid_cases {
|
||||
let result = ServiceManager::extract_port(input);
|
||||
assert_eq!(result, None, "Should be None for input: {input}");
|
||||
}
|
||||
|
||||
// Special case: empty host but valid port should still work
|
||||
assert_eq!(ServiceManager::extract_port(":9000"), Some(9000));
|
||||
|
||||
// Special case: multiple colons - extract_port takes the second part
|
||||
// For "127.0.0.1:9000:extra", it takes "9000" which is valid
|
||||
assert_eq!(ServiceManager::extract_port("127.0.0.1:9000:extra"), Some(9000));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_port_edge_cases() {
|
||||
// Test edge cases for port numbers
|
||||
assert_eq!(ServiceManager::extract_port("host:0"), Some(0));
|
||||
assert_eq!(ServiceManager::extract_port("host:65535"), Some(65535));
|
||||
assert_eq!(ServiceManager::extract_port("host:65536"), None); // Out of range
|
||||
// IPv6-like address - extract_port takes the second part after split(':')
|
||||
// For "::1:8080", split(':') gives ["", "", "1", "8080"], nth(1) gives ""
|
||||
assert_eq!(ServiceManager::extract_port("::1:8080"), None); // Second part is empty
|
||||
// For "[::1]:8080", split(':') gives ["[", "", "1]", "8080"], nth(1) gives ""
|
||||
assert_eq!(ServiceManager::extract_port("[::1]:8080"), None); // Second part is empty
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_show_error() {
|
||||
// Test that show_error function exists and can be called
|
||||
// We can't actually test the dialog in a test environment
|
||||
// so we just verify the function signature
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_show_info() {
|
||||
// Test that show_info function exists and can be called
|
||||
// We can't actually test the dialog in a test environment
|
||||
// so we just verify the function signature
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_service_operation_result_timing() {
|
||||
let start_time = chrono::Local::now();
|
||||
std::thread::sleep(Duration::from_millis(10)); // Small delay
|
||||
let end_time = chrono::Local::now();
|
||||
|
||||
let result = ServiceOperationResult {
|
||||
success: true,
|
||||
start_time,
|
||||
end_time,
|
||||
message: "Timing test".to_string(),
|
||||
};
|
||||
|
||||
// End time should be after start time
|
||||
assert!(result.end_time >= result.start_time);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_service_operation_result_with_unicode() {
|
||||
let result = ServiceOperationResult {
|
||||
success: true,
|
||||
start_time: chrono::Local::now(),
|
||||
end_time: chrono::Local::now(),
|
||||
message: "操作成功 🎉".to_string(),
|
||||
};
|
||||
|
||||
assert_eq!(result.message, "操作成功 🎉");
|
||||
assert!(result.success);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_service_operation_result_with_long_message() {
|
||||
let long_message = "A".repeat(10000);
|
||||
let result = ServiceOperationResult {
|
||||
success: false,
|
||||
start_time: chrono::Local::now(),
|
||||
end_time: chrono::Local::now(),
|
||||
message: long_message.clone(),
|
||||
};
|
||||
|
||||
assert_eq!(result.message.len(), 10000);
|
||||
assert_eq!(result.message, long_message);
|
||||
assert!(!result.success);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_service_command_with_different_configs() {
|
||||
let config1 = RustFSConfig {
|
||||
address: "127.0.0.1:9000".to_string(),
|
||||
host: "127.0.0.1".to_string(),
|
||||
port: "9000".to_string(),
|
||||
access_key: "admin1".to_string(),
|
||||
secret_key: "pass1".to_string(),
|
||||
domain_name: "test1.com".to_string(),
|
||||
volume_name: "/data1".to_string(),
|
||||
console_address: "127.0.0.1:9001".to_string(),
|
||||
};
|
||||
|
||||
let config2 = RustFSConfig {
|
||||
address: "192.168.1.100:8080".to_string(),
|
||||
host: "192.168.1.100".to_string(),
|
||||
port: "8080".to_string(),
|
||||
access_key: "admin2".to_string(),
|
||||
secret_key: "pass2".to_string(),
|
||||
domain_name: "test2.com".to_string(),
|
||||
volume_name: "/data2".to_string(),
|
||||
console_address: "192.168.1.100:8081".to_string(),
|
||||
};
|
||||
|
||||
let start_cmd1 = ServiceCommand::Start(config1);
|
||||
let restart_cmd2 = ServiceCommand::Restart(config2);
|
||||
|
||||
// Test that different configs can be used
|
||||
match start_cmd1 {
|
||||
ServiceCommand::Start(config) => {
|
||||
assert_eq!(config.address, "127.0.0.1:9000");
|
||||
assert_eq!(config.access_key, "admin1");
|
||||
}
|
||||
_ => panic!("Expected Start command"),
|
||||
}
|
||||
|
||||
match restart_cmd2 {
|
||||
ServiceCommand::Restart(config) => {
|
||||
assert_eq!(config.address, "192.168.1.100:8080");
|
||||
assert_eq!(config.access_key, "admin2");
|
||||
}
|
||||
_ => panic!("Expected Restart command"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_memory_efficiency() {
|
||||
// Test that structures don't use excessive memory
|
||||
assert!(std::mem::size_of::<ServiceCommand>() < 2000);
|
||||
assert!(std::mem::size_of::<ServiceOperationResult>() < 1000);
|
||||
assert!(std::mem::size_of::<ServiceManager>() < 1000);
|
||||
}
|
||||
|
||||
// Note: The following methods are not tested here because they require:
|
||||
// - Async runtime (tokio)
|
||||
// - File system access
|
||||
// - Network access
|
||||
// - Process management
|
||||
// - External dependencies (embedded assets)
|
||||
//
|
||||
// These should be tested in integration tests:
|
||||
// - check_service_status()
|
||||
// - prepare_service()
|
||||
// - start_service()
|
||||
// - stop_service()
|
||||
// - is_port_in_use()
|
||||
// - ServiceManager::start()
|
||||
// - ServiceManager::stop()
|
||||
// - ServiceManager::restart()
|
||||
//
|
||||
// The RUSTFS_HASH lazy_static is also not tested here as it depends
|
||||
// on embedded assets that may not be available in unit test environment.
|
||||
}
|
||||
@@ -1,300 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use dioxus::logger::tracing::debug;
|
||||
use tracing_appender::non_blocking::WorkerGuard;
|
||||
use tracing_appender::rolling::{RollingFileAppender, Rotation};
|
||||
use tracing_subscriber::fmt;
|
||||
use tracing_subscriber::layer::SubscriberExt;
|
||||
use tracing_subscriber::util::SubscriberInitExt;
|
||||
|
||||
/// Initialize the logger with a rolling file appender
|
||||
/// that rotates log files daily
|
||||
pub fn init_logger() -> WorkerGuard {
|
||||
// configuring rolling logs rolling by day
|
||||
let home_dir = dirs::home_dir().expect("无法获取用户目录");
|
||||
let rustfs_dir = home_dir.join("rustfs");
|
||||
let logs_dir = rustfs_dir.join("logs");
|
||||
let file_appender = RollingFileAppender::builder()
|
||||
.rotation(Rotation::DAILY) // rotate log files once every hour
|
||||
.filename_prefix("rustfs-cli") // log file names will be prefixed with `myapp.`
|
||||
.filename_suffix("log") // log file names will be suffixed with `.log`
|
||||
.build(logs_dir) // try to build an appender that stores log files in `/ var/ log`
|
||||
.expect("initializing rolling file appender failed");
|
||||
// non-blocking writer for improved performance
|
||||
let (non_blocking_file, worker_guard) = tracing_appender::non_blocking(file_appender);
|
||||
|
||||
// console output layer
|
||||
let console_layer = fmt::layer()
|
||||
.with_writer(std::io::stdout)
|
||||
.with_ansi(true)
|
||||
.with_line_number(true); // enable colors in the console
|
||||
|
||||
// file output layer
|
||||
let file_layer = fmt::layer()
|
||||
.with_writer(non_blocking_file)
|
||||
.with_ansi(false)
|
||||
.with_thread_names(true)
|
||||
.with_target(true)
|
||||
.with_thread_ids(true)
|
||||
.with_level(true)
|
||||
.with_line_number(true); // disable colors in the file
|
||||
|
||||
// Combine all tiers and initialize global subscribers
|
||||
tracing_subscriber::registry()
|
||||
.with(console_layer)
|
||||
.with(file_layer)
|
||||
.with(tracing_subscriber::EnvFilter::new("info")) // filter the log level by environment variables
|
||||
.init();
|
||||
debug!("Logger initialized");
|
||||
worker_guard
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::sync::Once;
|
||||
|
||||
static INIT: Once = Once::new();
|
||||
|
||||
// Helper function to ensure logger is only initialized once in tests
|
||||
fn ensure_logger_init() {
|
||||
INIT.call_once(|| {
|
||||
// Initialize a simple test logger to avoid conflicts
|
||||
let _ = tracing_subscriber::fmt().with_test_writer().try_init();
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_logger_initialization_components() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test that we can create the components used in init_logger
|
||||
// without actually initializing the global logger again
|
||||
|
||||
// Test home directory access
|
||||
let home_dir_result = dirs::home_dir();
|
||||
assert!(home_dir_result.is_some(), "Should be able to get home directory");
|
||||
|
||||
let home_dir = home_dir_result.unwrap();
|
||||
let rustfs_dir = home_dir.join("rustfs");
|
||||
let logs_dir = rustfs_dir.join("logs");
|
||||
|
||||
// Test path construction
|
||||
assert!(rustfs_dir.to_string_lossy().contains("rustfs"));
|
||||
assert!(logs_dir.to_string_lossy().contains("logs"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rolling_file_appender_builder() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test that we can create a RollingFileAppender builder
|
||||
let builder = RollingFileAppender::builder()
|
||||
.rotation(Rotation::DAILY)
|
||||
.filename_prefix("test-rustfs-cli")
|
||||
.filename_suffix("log");
|
||||
|
||||
// We can't actually build it without creating directories,
|
||||
// but we can verify the builder pattern works
|
||||
let debug_str = format!("{builder:?}");
|
||||
// The actual debug format might be different, so just check it's not empty
|
||||
assert!(!debug_str.is_empty());
|
||||
// Check that it contains some expected parts
|
||||
assert!(debug_str.contains("Builder") || debug_str.contains("builder") || debug_str.contains("RollingFileAppender"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_rotation_types() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test different rotation types
|
||||
let daily = Rotation::DAILY;
|
||||
let hourly = Rotation::HOURLY;
|
||||
let minutely = Rotation::MINUTELY;
|
||||
let never = Rotation::NEVER;
|
||||
|
||||
// Test that rotation types can be created and formatted
|
||||
assert!(!format!("{daily:?}").is_empty());
|
||||
assert!(!format!("{hourly:?}").is_empty());
|
||||
assert!(!format!("{minutely:?}").is_empty());
|
||||
assert!(!format!("{never:?}").is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fmt_layer_configuration() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test that we can create fmt layers with different configurations
|
||||
// We can't actually test the layers directly due to type complexity,
|
||||
// but we can test that the configuration values are correct
|
||||
|
||||
// Test console layer settings
|
||||
let console_ansi = true;
|
||||
let console_line_number = true;
|
||||
assert!(console_ansi);
|
||||
assert!(console_line_number);
|
||||
|
||||
// Test file layer settings
|
||||
let file_ansi = false;
|
||||
let file_thread_names = true;
|
||||
let file_target = true;
|
||||
let file_thread_ids = true;
|
||||
let file_level = true;
|
||||
let file_line_number = true;
|
||||
|
||||
assert!(!file_ansi);
|
||||
assert!(file_thread_names);
|
||||
assert!(file_target);
|
||||
assert!(file_thread_ids);
|
||||
assert!(file_level);
|
||||
assert!(file_line_number);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_env_filter_creation() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test that EnvFilter can be created with different levels
|
||||
let info_filter = tracing_subscriber::EnvFilter::new("info");
|
||||
let debug_filter = tracing_subscriber::EnvFilter::new("debug");
|
||||
let warn_filter = tracing_subscriber::EnvFilter::new("warn");
|
||||
let error_filter = tracing_subscriber::EnvFilter::new("error");
|
||||
|
||||
// Test that filters can be created
|
||||
assert!(!format!("{info_filter:?}").is_empty());
|
||||
assert!(!format!("{debug_filter:?}").is_empty());
|
||||
assert!(!format!("{warn_filter:?}").is_empty());
|
||||
assert!(!format!("{error_filter:?}").is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_path_construction() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test path construction logic used in init_logger
|
||||
if let Some(home_dir) = dirs::home_dir() {
|
||||
let rustfs_dir = home_dir.join("rustfs");
|
||||
let logs_dir = rustfs_dir.join("logs");
|
||||
|
||||
// Test that paths are constructed correctly
|
||||
assert!(rustfs_dir.ends_with("rustfs"));
|
||||
assert!(logs_dir.ends_with("logs"));
|
||||
assert!(logs_dir.parent().unwrap().ends_with("rustfs"));
|
||||
|
||||
// Test path string representation
|
||||
let rustfs_str = rustfs_dir.to_string_lossy();
|
||||
let logs_str = logs_dir.to_string_lossy();
|
||||
|
||||
assert!(rustfs_str.contains("rustfs"));
|
||||
assert!(logs_str.contains("rustfs"));
|
||||
assert!(logs_str.contains("logs"));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_filename_patterns() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test the filename patterns used in the logger
|
||||
let prefix = "rustfs-cli";
|
||||
let suffix = "log";
|
||||
|
||||
assert_eq!(prefix, "rustfs-cli");
|
||||
assert_eq!(suffix, "log");
|
||||
|
||||
// Test that these would create valid filenames
|
||||
let sample_filename = format!("{prefix}.2024-01-01.{suffix}");
|
||||
assert_eq!(sample_filename, "rustfs-cli.2024-01-01.log");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_worker_guard_type() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test that WorkerGuard type exists and can be referenced
|
||||
// We can't actually create one without the full setup, but we can test the type
|
||||
let guard_size = std::mem::size_of::<WorkerGuard>();
|
||||
assert!(guard_size > 0, "WorkerGuard should have non-zero size");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_logger_configuration_constants() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test the configuration values used in the logger
|
||||
let default_log_level = "info";
|
||||
let filename_prefix = "rustfs-cli";
|
||||
let filename_suffix = "log";
|
||||
let rotation = Rotation::DAILY;
|
||||
|
||||
assert_eq!(default_log_level, "info");
|
||||
assert_eq!(filename_prefix, "rustfs-cli");
|
||||
assert_eq!(filename_suffix, "log");
|
||||
assert!(matches!(rotation, Rotation::DAILY));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_directory_names() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test the directory names used in the logger setup
|
||||
let rustfs_dir_name = "rustfs";
|
||||
let logs_dir_name = "logs";
|
||||
|
||||
assert_eq!(rustfs_dir_name, "rustfs");
|
||||
assert_eq!(logs_dir_name, "logs");
|
||||
|
||||
// Test path joining
|
||||
let combined = format!("{rustfs_dir_name}/{logs_dir_name}");
|
||||
assert_eq!(combined, "rustfs/logs");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_layer_settings() {
|
||||
ensure_logger_init();
|
||||
|
||||
// Test the boolean settings used in layer configuration
|
||||
let console_ansi = true;
|
||||
let console_line_number = true;
|
||||
let file_ansi = false;
|
||||
let file_thread_names = true;
|
||||
let file_target = true;
|
||||
let file_thread_ids = true;
|
||||
let file_level = true;
|
||||
let file_line_number = true;
|
||||
|
||||
// Verify the settings
|
||||
assert!(console_ansi);
|
||||
assert!(console_line_number);
|
||||
assert!(!file_ansi);
|
||||
assert!(file_thread_names);
|
||||
assert!(file_target);
|
||||
assert!(file_thread_ids);
|
||||
assert!(file_level);
|
||||
assert!(file_line_number);
|
||||
}
|
||||
|
||||
// Note: The actual init_logger() function is not tested here because:
|
||||
// 1. It initializes a global tracing subscriber which can only be done once
|
||||
// 2. It requires file system access to create directories
|
||||
// 3. It has side effects that would interfere with other tests
|
||||
// 4. It returns a WorkerGuard that needs to be kept alive
|
||||
//
|
||||
// This function should be tested in integration tests where:
|
||||
// - File system access can be properly controlled
|
||||
// - The global state can be managed
|
||||
// - The actual logging behavior can be verified
|
||||
// - The WorkerGuard lifecycle can be properly managed
|
||||
}
|
||||
@@ -1,38 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::route::Route;
|
||||
use dioxus::logger::tracing::info;
|
||||
use dioxus::prelude::*;
|
||||
|
||||
const FAVICON: Asset = asset!("/assets/favicon.ico");
|
||||
const TAILWIND_CSS: Asset = asset!("/assets/tailwind.css");
|
||||
|
||||
/// The main application component
|
||||
/// This is the root component of the application
|
||||
/// It contains the global resources and the router
|
||||
/// for the application
|
||||
#[component]
|
||||
pub fn App() -> Element {
|
||||
// Build cool things ✌️
|
||||
use document::{Link, Title};
|
||||
info!("App rendered");
|
||||
rsx! {
|
||||
// Global app resources
|
||||
Link { rel: "icon", href: FAVICON }
|
||||
Link { rel: "stylesheet", href: TAILWIND_CSS }
|
||||
Title { "RustFS" }
|
||||
Router::<Route> {}
|
||||
}
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::components::Home;
|
||||
use dioxus::prelude::*;
|
||||
|
||||
#[component]
|
||||
pub fn HomeViews() -> Element {
|
||||
rsx! {
|
||||
Home {}
|
||||
}
|
||||
}
|
||||
@@ -1,23 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::components::Setting;
|
||||
use dioxus::prelude::*;
|
||||
|
||||
#[component]
|
||||
pub fn SettingViews() -> Element {
|
||||
rsx! {
|
||||
Setting {}
|
||||
}
|
||||
}
|
||||
@@ -1,24 +0,0 @@
|
||||
/**
|
||||
* Copyright 2024 RustFS Team
|
||||
*
|
||||
* Licensed under the Apache License, Version 2.0 (the "License");
|
||||
* you may not use this file except in compliance with the License.
|
||||
* You may obtain a copy of the License at
|
||||
*
|
||||
* http://www.apache.org/licenses/LICENSE-2.0
|
||||
*
|
||||
* Unless required by applicable law or agreed to in writing, software
|
||||
* distributed under the License is distributed on an "AS IS" BASIS,
|
||||
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
* See the License for the specific language governing permissions and
|
||||
* limitations under the License.
|
||||
*/
|
||||
|
||||
module.exports = {
|
||||
mode: "all",
|
||||
content: ["./src/**/*.{rs,html,css}", "./dist/**/*.html"],
|
||||
theme: {
|
||||
extend: {},
|
||||
},
|
||||
plugins: [],
|
||||
};
|
||||
43
crates/ahm/Cargo.toml
Normal file
@@ -0,0 +1,43 @@
|
||||
[package]
|
||||
name = "rustfs-ahm"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors = ["RustFS Team"]
|
||||
license.workspace = true
|
||||
description = "RustFS AHM (Automatic Health Management) Scanner"
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
homepage.workspace = true
|
||||
documentation = "https://docs.rs/rustfs-ahm/latest/rustfs_ahm/"
|
||||
keywords = ["RustFS", "AHM", "health-management", "scanner", "Minio"]
|
||||
categories = ["web-programming", "development-tools", "filesystem"]
|
||||
|
||||
[dependencies]
|
||||
rustfs-ecstore = { workspace = true }
|
||||
rustfs-common = { workspace = true }
|
||||
rustfs-filemeta = { workspace = true }
|
||||
rustfs-madmin = { workspace = true }
|
||||
tokio = { workspace = true, features = ["full"] }
|
||||
tokio-util = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
time = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
uuid = { workspace = true, features = ["v4", "serde"] }
|
||||
anyhow = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
s3s = { workspace = true }
|
||||
chrono = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
reqwest = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
walkdir = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
serde_json = { workspace = true }
|
||||
serial_test = { workspace = true }
|
||||
tracing-subscriber = { workspace = true }
|
||||
tempfile = { workspace = true }
|
||||
heed = { workspace = true }
|
||||
111
crates/ahm/src/error.rs
Normal file
@@ -0,0 +1,111 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use thiserror::Error;
|
||||
|
||||
/// Custom error type for AHM operations
|
||||
/// This enum defines various error variants that can occur during
|
||||
/// the execution of AHM-related tasks, such as I/O errors, storage errors,
|
||||
/// configuration errors, and specific errors related to healing operations.
|
||||
#[derive(Debug, Error)]
|
||||
pub enum Error {
|
||||
#[error("I/O error: {0}")]
|
||||
Io(#[from] std::io::Error),
|
||||
|
||||
#[error("Storage error: {0}")]
|
||||
Storage(#[from] rustfs_ecstore::error::Error),
|
||||
|
||||
#[error("Disk error: {0}")]
|
||||
Disk(#[from] rustfs_ecstore::disk::error::DiskError),
|
||||
|
||||
#[error("Configuration error: {0}")]
|
||||
Config(String),
|
||||
|
||||
#[error("Heal configuration error: {message}")]
|
||||
ConfigurationError { message: String },
|
||||
|
||||
#[error("Other error: {0}")]
|
||||
Other(String),
|
||||
|
||||
#[error(transparent)]
|
||||
Anyhow(#[from] anyhow::Error),
|
||||
|
||||
// Scanner
|
||||
#[error("Scanner error: {0}")]
|
||||
Scanner(String),
|
||||
|
||||
#[error("Metrics error: {0}")]
|
||||
Metrics(String),
|
||||
|
||||
#[error("Serialization error: {0}")]
|
||||
Serialization(String),
|
||||
|
||||
#[error("IO error: {0}")]
|
||||
IO(String),
|
||||
|
||||
#[error("Not found: {0}")]
|
||||
NotFound(String),
|
||||
|
||||
#[error("Invalid checkpoint: {0}")]
|
||||
InvalidCheckpoint(String),
|
||||
|
||||
// Heal
|
||||
#[error("Heal task not found: {task_id}")]
|
||||
TaskNotFound { task_id: String },
|
||||
|
||||
#[error("Heal task already exists: {task_id}")]
|
||||
TaskAlreadyExists { task_id: String },
|
||||
|
||||
#[error("Heal manager is not running")]
|
||||
ManagerNotRunning,
|
||||
|
||||
#[error("Heal task execution failed: {message}")]
|
||||
TaskExecutionFailed { message: String },
|
||||
|
||||
#[error("Invalid heal type: {heal_type}")]
|
||||
InvalidHealType { heal_type: String },
|
||||
|
||||
#[error("Heal task cancelled")]
|
||||
TaskCancelled,
|
||||
|
||||
#[error("Heal task timeout")]
|
||||
TaskTimeout,
|
||||
|
||||
#[error("Heal event processing failed: {message}")]
|
||||
EventProcessingFailed { message: String },
|
||||
|
||||
#[error("Heal progress tracking failed: {message}")]
|
||||
ProgressTrackingFailed { message: String },
|
||||
}
|
||||
|
||||
/// A specialized Result type for AHM operations
|
||||
///This type is a convenient alias for results returned by functions in the AHM crate,
|
||||
/// using the custom Error type defined above.
|
||||
pub type Result<T, E = Error> = std::result::Result<T, E>;
|
||||
|
||||
impl Error {
|
||||
/// Create an Other error from any error type
|
||||
pub fn other<E>(error: E) -> Self
|
||||
where
|
||||
E: Into<Box<dyn std::error::Error + Send + Sync>>,
|
||||
{
|
||||
Error::Other(error.into().to_string())
|
||||
}
|
||||
}
|
||||
|
||||
impl From<Error> for std::io::Error {
|
||||
fn from(err: Error) -> Self {
|
||||
std::io::Error::other(err)
|
||||
}
|
||||
}
|
||||
552
crates/ahm/src/heal/channel.rs
Normal file
@@ -0,0 +1,552 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::heal::{
|
||||
manager::HealManager,
|
||||
task::{HealOptions, HealPriority, HealRequest, HealType},
|
||||
utils,
|
||||
};
|
||||
use crate::{Error, Result};
|
||||
use rustfs_common::heal_channel::{
|
||||
HealChannelCommand, HealChannelPriority, HealChannelReceiver, HealChannelRequest, HealChannelResponse, HealScanMode,
|
||||
publish_heal_response,
|
||||
};
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::mpsc;
|
||||
use tracing::{debug, error, info};
|
||||
|
||||
/// Heal channel processor
|
||||
pub struct HealChannelProcessor {
|
||||
/// Heal manager
|
||||
heal_manager: Arc<HealManager>,
|
||||
/// Response sender
|
||||
response_sender: mpsc::UnboundedSender<HealChannelResponse>,
|
||||
/// Response receiver
|
||||
response_receiver: mpsc::UnboundedReceiver<HealChannelResponse>,
|
||||
}
|
||||
|
||||
impl HealChannelProcessor {
|
||||
/// Create new HealChannelProcessor
|
||||
pub fn new(heal_manager: Arc<HealManager>) -> Self {
|
||||
let (response_tx, response_rx) = mpsc::unbounded_channel();
|
||||
Self {
|
||||
heal_manager,
|
||||
response_sender: response_tx,
|
||||
response_receiver: response_rx,
|
||||
}
|
||||
}
|
||||
|
||||
/// Start processing heal channel requests
|
||||
pub async fn start(&mut self, mut receiver: HealChannelReceiver) -> Result<()> {
|
||||
info!("Starting heal channel processor");
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
command = receiver.recv() => {
|
||||
match command {
|
||||
Some(command) => {
|
||||
if let Err(e) = self.process_command(command).await {
|
||||
error!("Failed to process heal command: {}", e);
|
||||
}
|
||||
}
|
||||
None => {
|
||||
debug!("Heal channel receiver closed, stopping processor");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
response = self.response_receiver.recv() => {
|
||||
if let Some(response) = response {
|
||||
// Handle response if needed
|
||||
info!("Received heal response for request: {}", response.request_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
info!("Heal channel processor stopped");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process heal command
|
||||
async fn process_command(&self, command: HealChannelCommand) -> Result<()> {
|
||||
match command {
|
||||
HealChannelCommand::Start(request) => self.process_start_request(request).await,
|
||||
HealChannelCommand::Query { heal_path, client_token } => self.process_query_request(heal_path, client_token).await,
|
||||
HealChannelCommand::Cancel { heal_path } => self.process_cancel_request(heal_path).await,
|
||||
}
|
||||
}
|
||||
|
||||
/// Process start request
|
||||
async fn process_start_request(&self, request: HealChannelRequest) -> Result<()> {
|
||||
info!("Processing heal start request: {} for bucket: {}", request.id, request.bucket);
|
||||
|
||||
// Convert channel request to heal request
|
||||
let heal_request = self.convert_to_heal_request(request.clone())?;
|
||||
|
||||
// Submit to heal manager
|
||||
match self.heal_manager.submit_heal_request(heal_request).await {
|
||||
Ok(task_id) => {
|
||||
info!("Successfully submitted heal request: {} as task: {}", request.id, task_id);
|
||||
|
||||
let response = HealChannelResponse {
|
||||
request_id: request.id,
|
||||
success: true,
|
||||
data: Some(format!("Task ID: {task_id}").into_bytes()),
|
||||
error: None,
|
||||
};
|
||||
|
||||
self.publish_response(response);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to submit heal request: {} - {}", request.id, e);
|
||||
|
||||
// Send error response
|
||||
let response = HealChannelResponse {
|
||||
request_id: request.id,
|
||||
success: false,
|
||||
data: None,
|
||||
error: Some(e.to_string()),
|
||||
};
|
||||
|
||||
self.publish_response(response);
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process query request
|
||||
async fn process_query_request(&self, heal_path: String, client_token: String) -> Result<()> {
|
||||
info!("Processing heal query request for path: {}", heal_path);
|
||||
|
||||
// TODO: Implement query logic based on heal_path and client_token
|
||||
// For now, return a placeholder response
|
||||
let response = HealChannelResponse {
|
||||
request_id: client_token,
|
||||
success: true,
|
||||
data: Some(format!("Query result for path: {heal_path}").into_bytes()),
|
||||
error: None,
|
||||
};
|
||||
|
||||
self.publish_response(response);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process cancel request
|
||||
async fn process_cancel_request(&self, heal_path: String) -> Result<()> {
|
||||
info!("Processing heal cancel request for path: {}", heal_path);
|
||||
|
||||
// TODO: Implement cancel logic based on heal_path
|
||||
// For now, return a placeholder response
|
||||
let response = HealChannelResponse {
|
||||
request_id: heal_path.clone(),
|
||||
success: true,
|
||||
data: Some(format!("Cancel request for path: {heal_path}").into_bytes()),
|
||||
error: None,
|
||||
};
|
||||
|
||||
self.publish_response(response);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Convert channel request to heal request
|
||||
fn convert_to_heal_request(&self, request: HealChannelRequest) -> Result<HealRequest> {
|
||||
let heal_type = if let Some(disk_id) = &request.disk {
|
||||
let set_disk_id = utils::normalize_set_disk_id(disk_id).ok_or_else(|| Error::InvalidHealType {
|
||||
heal_type: format!("erasure-set({disk_id})"),
|
||||
})?;
|
||||
HealType::ErasureSet {
|
||||
buckets: vec![],
|
||||
set_disk_id,
|
||||
}
|
||||
} else if let Some(prefix) = &request.object_prefix {
|
||||
if !prefix.is_empty() {
|
||||
HealType::Object {
|
||||
bucket: request.bucket.clone(),
|
||||
object: prefix.clone(),
|
||||
version_id: None,
|
||||
}
|
||||
} else {
|
||||
HealType::Bucket {
|
||||
bucket: request.bucket.clone(),
|
||||
}
|
||||
}
|
||||
} else {
|
||||
HealType::Bucket {
|
||||
bucket: request.bucket.clone(),
|
||||
}
|
||||
};
|
||||
|
||||
let priority = match request.priority {
|
||||
HealChannelPriority::Low => HealPriority::Low,
|
||||
HealChannelPriority::Normal => HealPriority::Normal,
|
||||
HealChannelPriority::High => HealPriority::High,
|
||||
HealChannelPriority::Critical => HealPriority::Urgent,
|
||||
};
|
||||
|
||||
// Build HealOptions with all available fields
|
||||
let mut options = HealOptions {
|
||||
scan_mode: request.scan_mode.unwrap_or(HealScanMode::Normal),
|
||||
remove_corrupted: request.remove_corrupted.unwrap_or(false),
|
||||
recreate_missing: request.recreate_missing.unwrap_or(true),
|
||||
update_parity: request.update_parity.unwrap_or(true),
|
||||
recursive: request.recursive.unwrap_or(false),
|
||||
dry_run: request.dry_run.unwrap_or(false),
|
||||
timeout: request.timeout_seconds.map(std::time::Duration::from_secs),
|
||||
pool_index: request.pool_index,
|
||||
set_index: request.set_index,
|
||||
};
|
||||
|
||||
// Apply force_start overrides
|
||||
if request.force_start {
|
||||
options.remove_corrupted = true;
|
||||
options.recreate_missing = true;
|
||||
options.update_parity = true;
|
||||
}
|
||||
|
||||
Ok(HealRequest::new(heal_type, options, priority))
|
||||
}
|
||||
|
||||
fn publish_response(&self, response: HealChannelResponse) {
|
||||
// Try to send to local channel first, but don't block broadcast on failure
|
||||
if let Err(e) = self.response_sender.send(response.clone()) {
|
||||
error!("Failed to enqueue heal response locally: {}", e);
|
||||
}
|
||||
// Always attempt to broadcast, even if local send failed
|
||||
// Use the original response for broadcast; local send uses a clone
|
||||
if let Err(e) = publish_heal_response(response) {
|
||||
error!("Failed to broadcast heal response: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
/// Get response sender for external use
|
||||
pub fn get_response_sender(&self) -> mpsc::UnboundedSender<HealChannelResponse> {
|
||||
self.response_sender.clone()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::heal::storage::HealStorageAPI;
|
||||
use rustfs_common::heal_channel::{HealChannelPriority, HealChannelRequest, HealScanMode};
|
||||
use std::sync::Arc;
|
||||
|
||||
// Mock storage for testing
|
||||
struct MockStorage;
|
||||
#[async_trait::async_trait]
|
||||
impl HealStorageAPI for MockStorage {
|
||||
async fn get_object_meta(
|
||||
&self,
|
||||
_bucket: &str,
|
||||
_object: &str,
|
||||
) -> crate::Result<Option<rustfs_ecstore::store_api::ObjectInfo>> {
|
||||
Ok(None)
|
||||
}
|
||||
async fn get_object_data(&self, _bucket: &str, _object: &str) -> crate::Result<Option<Vec<u8>>> {
|
||||
Ok(None)
|
||||
}
|
||||
async fn put_object_data(&self, _bucket: &str, _object: &str, _data: &[u8]) -> crate::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
async fn delete_object(&self, _bucket: &str, _object: &str) -> crate::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
async fn verify_object_integrity(&self, _bucket: &str, _object: &str) -> crate::Result<bool> {
|
||||
Ok(true)
|
||||
}
|
||||
async fn ec_decode_rebuild(&self, _bucket: &str, _object: &str) -> crate::Result<Vec<u8>> {
|
||||
Ok(vec![])
|
||||
}
|
||||
async fn get_disk_status(
|
||||
&self,
|
||||
_endpoint: &rustfs_ecstore::disk::endpoint::Endpoint,
|
||||
) -> crate::Result<crate::heal::storage::DiskStatus> {
|
||||
Ok(crate::heal::storage::DiskStatus::Ok)
|
||||
}
|
||||
async fn format_disk(&self, _endpoint: &rustfs_ecstore::disk::endpoint::Endpoint) -> crate::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
async fn get_bucket_info(&self, _bucket: &str) -> crate::Result<Option<rustfs_ecstore::store_api::BucketInfo>> {
|
||||
Ok(None)
|
||||
}
|
||||
async fn heal_bucket_metadata(&self, _bucket: &str) -> crate::Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
async fn list_buckets(&self) -> crate::Result<Vec<rustfs_ecstore::store_api::BucketInfo>> {
|
||||
Ok(vec![])
|
||||
}
|
||||
async fn object_exists(&self, _bucket: &str, _object: &str) -> crate::Result<bool> {
|
||||
Ok(false)
|
||||
}
|
||||
async fn get_object_size(&self, _bucket: &str, _object: &str) -> crate::Result<Option<u64>> {
|
||||
Ok(None)
|
||||
}
|
||||
async fn get_object_checksum(&self, _bucket: &str, _object: &str) -> crate::Result<Option<String>> {
|
||||
Ok(None)
|
||||
}
|
||||
async fn heal_object(
|
||||
&self,
|
||||
_bucket: &str,
|
||||
_object: &str,
|
||||
_version_id: Option<&str>,
|
||||
_opts: &rustfs_common::heal_channel::HealOpts,
|
||||
) -> crate::Result<(rustfs_madmin::heal_commands::HealResultItem, Option<crate::Error>)> {
|
||||
Ok((rustfs_madmin::heal_commands::HealResultItem::default(), None))
|
||||
}
|
||||
async fn heal_bucket(
|
||||
&self,
|
||||
_bucket: &str,
|
||||
_opts: &rustfs_common::heal_channel::HealOpts,
|
||||
) -> crate::Result<rustfs_madmin::heal_commands::HealResultItem> {
|
||||
Ok(rustfs_madmin::heal_commands::HealResultItem::default())
|
||||
}
|
||||
async fn heal_format(
|
||||
&self,
|
||||
_dry_run: bool,
|
||||
) -> crate::Result<(rustfs_madmin::heal_commands::HealResultItem, Option<crate::Error>)> {
|
||||
Ok((rustfs_madmin::heal_commands::HealResultItem::default(), None))
|
||||
}
|
||||
async fn list_objects_for_heal(&self, _bucket: &str, _prefix: &str) -> crate::Result<Vec<String>> {
|
||||
Ok(vec![])
|
||||
}
|
||||
async fn get_disk_for_resume(&self, _set_disk_id: &str) -> crate::Result<rustfs_ecstore::disk::DiskStore> {
|
||||
Err(crate::Error::other("Not implemented in mock"))
|
||||
}
|
||||
}
|
||||
|
||||
fn create_test_heal_manager() -> Arc<HealManager> {
|
||||
let storage: Arc<dyn HealStorageAPI> = Arc::new(MockStorage);
|
||||
Arc::new(HealManager::new(storage, None))
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_channel_processor_new() {
|
||||
let heal_manager = create_test_heal_manager();
|
||||
let processor = HealChannelProcessor::new(heal_manager);
|
||||
|
||||
// Verify processor is created successfully
|
||||
let _sender = processor.get_response_sender();
|
||||
// If we can get the sender, processor was created correctly
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_convert_to_heal_request_bucket() {
|
||||
let heal_manager = create_test_heal_manager();
|
||||
let processor = HealChannelProcessor::new(heal_manager);
|
||||
|
||||
let channel_request = HealChannelRequest {
|
||||
id: "test-id".to_string(),
|
||||
bucket: "test-bucket".to_string(),
|
||||
object_prefix: None,
|
||||
disk: None,
|
||||
priority: HealChannelPriority::Normal,
|
||||
scan_mode: None,
|
||||
remove_corrupted: None,
|
||||
recreate_missing: None,
|
||||
update_parity: None,
|
||||
recursive: None,
|
||||
dry_run: None,
|
||||
timeout_seconds: None,
|
||||
pool_index: None,
|
||||
set_index: None,
|
||||
force_start: false,
|
||||
};
|
||||
|
||||
let heal_request = processor.convert_to_heal_request(channel_request).unwrap();
|
||||
assert!(matches!(heal_request.heal_type, HealType::Bucket { .. }));
|
||||
assert_eq!(heal_request.priority, HealPriority::Normal);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_convert_to_heal_request_object() {
|
||||
let heal_manager = create_test_heal_manager();
|
||||
let processor = HealChannelProcessor::new(heal_manager);
|
||||
|
||||
let channel_request = HealChannelRequest {
|
||||
id: "test-id".to_string(),
|
||||
bucket: "test-bucket".to_string(),
|
||||
object_prefix: Some("test-object".to_string()),
|
||||
disk: None,
|
||||
priority: HealChannelPriority::High,
|
||||
scan_mode: Some(HealScanMode::Deep),
|
||||
remove_corrupted: Some(true),
|
||||
recreate_missing: Some(true),
|
||||
update_parity: Some(true),
|
||||
recursive: Some(false),
|
||||
dry_run: Some(false),
|
||||
timeout_seconds: Some(300),
|
||||
pool_index: Some(0),
|
||||
set_index: Some(1),
|
||||
force_start: false,
|
||||
};
|
||||
|
||||
let heal_request = processor.convert_to_heal_request(channel_request).unwrap();
|
||||
assert!(matches!(heal_request.heal_type, HealType::Object { .. }));
|
||||
assert_eq!(heal_request.priority, HealPriority::High);
|
||||
assert_eq!(heal_request.options.scan_mode, HealScanMode::Deep);
|
||||
assert!(heal_request.options.remove_corrupted);
|
||||
assert!(heal_request.options.recreate_missing);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_convert_to_heal_request_erasure_set() {
|
||||
let heal_manager = create_test_heal_manager();
|
||||
let processor = HealChannelProcessor::new(heal_manager);
|
||||
|
||||
let channel_request = HealChannelRequest {
|
||||
id: "test-id".to_string(),
|
||||
bucket: "test-bucket".to_string(),
|
||||
object_prefix: None,
|
||||
disk: Some("pool_0_set_1".to_string()),
|
||||
priority: HealChannelPriority::Critical,
|
||||
scan_mode: None,
|
||||
remove_corrupted: None,
|
||||
recreate_missing: None,
|
||||
update_parity: None,
|
||||
recursive: None,
|
||||
dry_run: None,
|
||||
timeout_seconds: None,
|
||||
pool_index: None,
|
||||
set_index: None,
|
||||
force_start: false,
|
||||
};
|
||||
|
||||
let heal_request = processor.convert_to_heal_request(channel_request).unwrap();
|
||||
assert!(matches!(heal_request.heal_type, HealType::ErasureSet { .. }));
|
||||
assert_eq!(heal_request.priority, HealPriority::Urgent);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_convert_to_heal_request_invalid_disk_id() {
|
||||
let heal_manager = create_test_heal_manager();
|
||||
let processor = HealChannelProcessor::new(heal_manager);
|
||||
|
||||
let channel_request = HealChannelRequest {
|
||||
id: "test-id".to_string(),
|
||||
bucket: "test-bucket".to_string(),
|
||||
object_prefix: None,
|
||||
disk: Some("invalid-disk-id".to_string()),
|
||||
priority: HealChannelPriority::Normal,
|
||||
scan_mode: None,
|
||||
remove_corrupted: None,
|
||||
recreate_missing: None,
|
||||
update_parity: None,
|
||||
recursive: None,
|
||||
dry_run: None,
|
||||
timeout_seconds: None,
|
||||
pool_index: None,
|
||||
set_index: None,
|
||||
force_start: false,
|
||||
};
|
||||
|
||||
let result = processor.convert_to_heal_request(channel_request);
|
||||
assert!(result.is_err());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_convert_to_heal_request_priority_mapping() {
|
||||
let heal_manager = create_test_heal_manager();
|
||||
let processor = HealChannelProcessor::new(heal_manager);
|
||||
|
||||
let priorities = vec![
|
||||
(HealChannelPriority::Low, HealPriority::Low),
|
||||
(HealChannelPriority::Normal, HealPriority::Normal),
|
||||
(HealChannelPriority::High, HealPriority::High),
|
||||
(HealChannelPriority::Critical, HealPriority::Urgent),
|
||||
];
|
||||
|
||||
for (channel_priority, expected_heal_priority) in priorities {
|
||||
let channel_request = HealChannelRequest {
|
||||
id: "test-id".to_string(),
|
||||
bucket: "test-bucket".to_string(),
|
||||
object_prefix: None,
|
||||
disk: None,
|
||||
priority: channel_priority,
|
||||
scan_mode: None,
|
||||
remove_corrupted: None,
|
||||
recreate_missing: None,
|
||||
update_parity: None,
|
||||
recursive: None,
|
||||
dry_run: None,
|
||||
timeout_seconds: None,
|
||||
pool_index: None,
|
||||
set_index: None,
|
||||
force_start: false,
|
||||
};
|
||||
|
||||
let heal_request = processor.convert_to_heal_request(channel_request).unwrap();
|
||||
assert_eq!(heal_request.priority, expected_heal_priority);
|
||||
}
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_convert_to_heal_request_force_start() {
|
||||
let heal_manager = create_test_heal_manager();
|
||||
let processor = HealChannelProcessor::new(heal_manager);
|
||||
|
||||
let channel_request = HealChannelRequest {
|
||||
id: "test-id".to_string(),
|
||||
bucket: "test-bucket".to_string(),
|
||||
object_prefix: None,
|
||||
disk: None,
|
||||
priority: HealChannelPriority::Normal,
|
||||
scan_mode: None,
|
||||
remove_corrupted: Some(false),
|
||||
recreate_missing: Some(false),
|
||||
update_parity: Some(false),
|
||||
recursive: None,
|
||||
dry_run: None,
|
||||
timeout_seconds: None,
|
||||
pool_index: None,
|
||||
set_index: None,
|
||||
force_start: true, // Should override the above false values
|
||||
};
|
||||
|
||||
let heal_request = processor.convert_to_heal_request(channel_request).unwrap();
|
||||
assert!(heal_request.options.remove_corrupted);
|
||||
assert!(heal_request.options.recreate_missing);
|
||||
assert!(heal_request.options.update_parity);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_convert_to_heal_request_empty_object_prefix() {
|
||||
let heal_manager = create_test_heal_manager();
|
||||
let processor = HealChannelProcessor::new(heal_manager);
|
||||
|
||||
let channel_request = HealChannelRequest {
|
||||
id: "test-id".to_string(),
|
||||
bucket: "test-bucket".to_string(),
|
||||
object_prefix: Some("".to_string()), // Empty prefix should be treated as bucket heal
|
||||
disk: None,
|
||||
priority: HealChannelPriority::Normal,
|
||||
scan_mode: None,
|
||||
remove_corrupted: None,
|
||||
recreate_missing: None,
|
||||
update_parity: None,
|
||||
recursive: None,
|
||||
dry_run: None,
|
||||
timeout_seconds: None,
|
||||
pool_index: None,
|
||||
set_index: None,
|
||||
force_start: false,
|
||||
};
|
||||
|
||||
let heal_request = processor.convert_to_heal_request(channel_request).unwrap();
|
||||
assert!(matches!(heal_request.heal_type, HealType::Bucket { .. }));
|
||||
}
|
||||
}
|
||||
506
crates/ahm/src/heal/erasure_healer.rs
Normal file
@@ -0,0 +1,506 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::heal::{
|
||||
progress::HealProgress,
|
||||
resume::{CheckpointManager, ResumeManager, ResumeUtils},
|
||||
storage::HealStorageAPI,
|
||||
};
|
||||
use crate::{Error, Result};
|
||||
use futures::future::join_all;
|
||||
use rustfs_common::heal_channel::{HealOpts, HealScanMode};
|
||||
use rustfs_ecstore::disk::DiskStore;
|
||||
use std::sync::Arc;
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{error, info, warn};
|
||||
|
||||
/// Erasure Set Healer
|
||||
pub struct ErasureSetHealer {
|
||||
storage: Arc<dyn HealStorageAPI>,
|
||||
progress: Arc<RwLock<HealProgress>>,
|
||||
cancel_token: tokio_util::sync::CancellationToken,
|
||||
disk: DiskStore,
|
||||
}
|
||||
|
||||
impl ErasureSetHealer {
|
||||
pub fn new(
|
||||
storage: Arc<dyn HealStorageAPI>,
|
||||
progress: Arc<RwLock<HealProgress>>,
|
||||
cancel_token: tokio_util::sync::CancellationToken,
|
||||
disk: DiskStore,
|
||||
) -> Self {
|
||||
Self {
|
||||
storage,
|
||||
progress,
|
||||
cancel_token,
|
||||
disk,
|
||||
}
|
||||
}
|
||||
|
||||
/// execute erasure set heal with resume
|
||||
pub async fn heal_erasure_set(&self, buckets: &[String], set_disk_id: &str) -> Result<()> {
|
||||
info!("Starting erasure set heal for {} buckets on set disk {}", buckets.len(), set_disk_id);
|
||||
|
||||
// 1. generate or get task id
|
||||
let task_id = self.get_or_create_task_id(set_disk_id).await?;
|
||||
|
||||
// 2. initialize or resume resume state
|
||||
let (resume_manager, checkpoint_manager) = self.initialize_resume_state(&task_id, set_disk_id, buckets).await?;
|
||||
|
||||
// 3. execute heal with resume
|
||||
let result = self
|
||||
.execute_heal_with_resume(buckets, &resume_manager, &checkpoint_manager)
|
||||
.await;
|
||||
|
||||
// 4. cleanup resume state
|
||||
if result.is_ok() {
|
||||
if let Err(e) = resume_manager.cleanup().await {
|
||||
warn!("Failed to cleanup resume state: {}", e);
|
||||
}
|
||||
if let Err(e) = checkpoint_manager.cleanup().await {
|
||||
warn!("Failed to cleanup checkpoint: {}", e);
|
||||
}
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
/// get or create task id
|
||||
async fn get_or_create_task_id(&self, set_disk_id: &str) -> Result<String> {
|
||||
// check if there are resumable tasks
|
||||
let resumable_tasks = ResumeUtils::get_resumable_tasks(&self.disk).await?;
|
||||
|
||||
for task_id in resumable_tasks {
|
||||
match ResumeManager::load_from_disk(self.disk.clone(), &task_id).await {
|
||||
Ok(manager) => {
|
||||
let state = manager.get_state().await;
|
||||
if state.set_disk_id == set_disk_id && ResumeUtils::can_resume_task(&self.disk, &task_id).await {
|
||||
info!("Found resumable task: {} for set {}", task_id, set_disk_id);
|
||||
return Ok(task_id);
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Failed to load resume state for task {}: {}", task_id, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// create new task id
|
||||
let task_id = format!("{}_{}", set_disk_id, ResumeUtils::generate_task_id());
|
||||
info!("Created new heal task: {}", task_id);
|
||||
Ok(task_id)
|
||||
}
|
||||
|
||||
/// initialize or resume resume state
|
||||
async fn initialize_resume_state(
|
||||
&self,
|
||||
task_id: &str,
|
||||
set_disk_id: &str,
|
||||
buckets: &[String],
|
||||
) -> Result<(ResumeManager, CheckpointManager)> {
|
||||
// check if resume state exists
|
||||
if ResumeManager::has_resume_state(&self.disk, task_id).await {
|
||||
info!("Loading existing resume state for task: {}", task_id);
|
||||
|
||||
let resume_manager = ResumeManager::load_from_disk(self.disk.clone(), task_id).await?;
|
||||
let checkpoint_manager = if CheckpointManager::has_checkpoint(&self.disk, task_id).await {
|
||||
CheckpointManager::load_from_disk(self.disk.clone(), task_id).await?
|
||||
} else {
|
||||
CheckpointManager::new(self.disk.clone(), task_id.to_string()).await?
|
||||
};
|
||||
|
||||
Ok((resume_manager, checkpoint_manager))
|
||||
} else {
|
||||
info!("Creating new resume state for task: {}", task_id);
|
||||
|
||||
let resume_manager = ResumeManager::new(
|
||||
self.disk.clone(),
|
||||
task_id.to_string(),
|
||||
"erasure_set".to_string(),
|
||||
set_disk_id.to_string(),
|
||||
buckets.to_vec(),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let checkpoint_manager = CheckpointManager::new(self.disk.clone(), task_id.to_string()).await?;
|
||||
|
||||
Ok((resume_manager, checkpoint_manager))
|
||||
}
|
||||
}
|
||||
|
||||
/// execute heal with resume
|
||||
async fn execute_heal_with_resume(
|
||||
&self,
|
||||
buckets: &[String],
|
||||
resume_manager: &ResumeManager,
|
||||
checkpoint_manager: &CheckpointManager,
|
||||
) -> Result<()> {
|
||||
// 1. get current state
|
||||
let state = resume_manager.get_state().await;
|
||||
let checkpoint = checkpoint_manager.get_checkpoint().await;
|
||||
|
||||
info!(
|
||||
"Resuming from bucket {} object {}",
|
||||
checkpoint.current_bucket_index, checkpoint.current_object_index
|
||||
);
|
||||
|
||||
// 2. initialize progress
|
||||
self.initialize_progress(buckets, &state).await;
|
||||
|
||||
// 3. continue from checkpoint
|
||||
let current_bucket_index = checkpoint.current_bucket_index;
|
||||
let mut current_object_index = checkpoint.current_object_index;
|
||||
|
||||
let mut processed_objects = state.processed_objects;
|
||||
let mut successful_objects = state.successful_objects;
|
||||
let mut failed_objects = state.failed_objects;
|
||||
let mut skipped_objects = state.skipped_objects;
|
||||
|
||||
// 4. process remaining buckets
|
||||
for (bucket_idx, bucket) in buckets.iter().enumerate().skip(current_bucket_index) {
|
||||
// check if completed
|
||||
if state.completed_buckets.contains(bucket) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// update current bucket
|
||||
resume_manager.set_current_item(Some(bucket.clone()), None).await?;
|
||||
|
||||
// process objects in bucket
|
||||
let bucket_result = self
|
||||
.heal_bucket_with_resume(
|
||||
bucket,
|
||||
bucket_idx,
|
||||
&mut current_object_index,
|
||||
&mut processed_objects,
|
||||
&mut successful_objects,
|
||||
&mut failed_objects,
|
||||
&mut skipped_objects,
|
||||
resume_manager,
|
||||
checkpoint_manager,
|
||||
)
|
||||
.await;
|
||||
|
||||
// update checkpoint position
|
||||
checkpoint_manager.update_position(bucket_idx, current_object_index).await?;
|
||||
|
||||
// update progress
|
||||
resume_manager
|
||||
.update_progress(processed_objects, successful_objects, failed_objects, skipped_objects)
|
||||
.await?;
|
||||
|
||||
// check cancel status
|
||||
if self.cancel_token.is_cancelled() {
|
||||
warn!("Heal task cancelled");
|
||||
return Err(Error::TaskCancelled);
|
||||
}
|
||||
|
||||
// process bucket result
|
||||
match bucket_result {
|
||||
Ok(_) => {
|
||||
resume_manager.complete_bucket(bucket).await?;
|
||||
info!("Completed heal for bucket: {}", bucket);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to heal bucket {}: {}", bucket, e);
|
||||
// continue to next bucket, do not interrupt the whole process
|
||||
}
|
||||
}
|
||||
|
||||
// reset object index
|
||||
current_object_index = 0;
|
||||
}
|
||||
|
||||
// 5. mark task completed
|
||||
resume_manager.mark_completed().await?;
|
||||
|
||||
info!("Erasure set heal completed successfully");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// heal single bucket with resume
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
async fn heal_bucket_with_resume(
|
||||
&self,
|
||||
bucket: &str,
|
||||
bucket_index: usize,
|
||||
current_object_index: &mut usize,
|
||||
processed_objects: &mut u64,
|
||||
successful_objects: &mut u64,
|
||||
failed_objects: &mut u64,
|
||||
_skipped_objects: &mut u64,
|
||||
resume_manager: &ResumeManager,
|
||||
checkpoint_manager: &CheckpointManager,
|
||||
) -> Result<()> {
|
||||
info!(target: "rustfs:ahm:heal_bucket_with_resume" ,"Starting heal for bucket: {} from object index {}", bucket, current_object_index);
|
||||
|
||||
// 1. get bucket info
|
||||
let _bucket_info = match self.storage.get_bucket_info(bucket).await? {
|
||||
Some(info) => info,
|
||||
None => {
|
||||
warn!("Bucket {} not found, skipping", bucket);
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
// 2. get objects to heal
|
||||
let objects = self.storage.list_objects_for_heal(bucket, "").await?;
|
||||
|
||||
// 3. continue from checkpoint
|
||||
for (obj_idx, object) in objects.iter().enumerate().skip(*current_object_index) {
|
||||
// check if already processed
|
||||
if checkpoint_manager.get_checkpoint().await.processed_objects.contains(object) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// update current object
|
||||
resume_manager
|
||||
.set_current_item(Some(bucket.to_string()), Some(object.clone()))
|
||||
.await?;
|
||||
|
||||
// Check if object still exists before attempting heal
|
||||
let object_exists = match self.storage.object_exists(bucket, object).await {
|
||||
Ok(exists) => exists,
|
||||
Err(e) => {
|
||||
warn!("Failed to check existence of {}/{}: {}, skipping", bucket, object, e);
|
||||
*current_object_index = obj_idx + 1;
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
if !object_exists {
|
||||
info!(
|
||||
target: "rustfs:ahm:heal_bucket_with_resume" ,"Object {}/{} no longer exists, skipping heal (likely deleted intentionally)",
|
||||
bucket, object
|
||||
);
|
||||
checkpoint_manager.add_processed_object(object.clone()).await?;
|
||||
*successful_objects += 1; // Treat as successful - object is gone as intended
|
||||
*current_object_index = obj_idx + 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
// heal object
|
||||
let heal_opts = HealOpts {
|
||||
scan_mode: HealScanMode::Normal,
|
||||
remove: true,
|
||||
recreate: true, // Keep recreate enabled for legitimate heal scenarios
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
match self.storage.heal_object(bucket, object, None, &heal_opts).await {
|
||||
Ok((_result, None)) => {
|
||||
*successful_objects += 1;
|
||||
checkpoint_manager.add_processed_object(object.clone()).await?;
|
||||
info!("Successfully healed object {}/{}", bucket, object);
|
||||
}
|
||||
Ok((_, Some(err))) => {
|
||||
*failed_objects += 1;
|
||||
checkpoint_manager.add_failed_object(object.clone()).await?;
|
||||
warn!("Failed to heal object {}/{}: {}", bucket, object, err);
|
||||
}
|
||||
Err(err) => {
|
||||
*failed_objects += 1;
|
||||
checkpoint_manager.add_failed_object(object.clone()).await?;
|
||||
warn!("Error healing object {}/{}: {}", bucket, object, err);
|
||||
}
|
||||
}
|
||||
|
||||
*processed_objects += 1;
|
||||
*current_object_index = obj_idx + 1;
|
||||
|
||||
// check cancel status
|
||||
if self.cancel_token.is_cancelled() {
|
||||
info!("Heal task cancelled during object processing");
|
||||
return Err(Error::TaskCancelled);
|
||||
}
|
||||
|
||||
// save checkpoint periodically
|
||||
if obj_idx % 100 == 0 {
|
||||
checkpoint_manager
|
||||
.update_position(bucket_index, *current_object_index)
|
||||
.await?;
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// initialize progress tracking
|
||||
async fn initialize_progress(&self, _buckets: &[String], state: &crate::heal::resume::ResumeState) {
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.objects_scanned = state.total_objects;
|
||||
progress.objects_healed = state.successful_objects;
|
||||
progress.objects_failed = state.failed_objects;
|
||||
progress.bytes_processed = 0; // set to 0 for now, can be extended later
|
||||
progress.set_current_object(state.current_object.clone());
|
||||
}
|
||||
|
||||
/// heal all buckets concurrently
|
||||
#[allow(dead_code)]
|
||||
async fn heal_buckets_concurrently(&self, buckets: &[String]) -> Vec<Result<()>> {
|
||||
// use semaphore to control concurrency, avoid too many concurrent healings
|
||||
let semaphore = Arc::new(tokio::sync::Semaphore::new(4)); // max 4 concurrent healings
|
||||
|
||||
let heal_futures = buckets.iter().map(|bucket| {
|
||||
let bucket = bucket.clone();
|
||||
let storage = self.storage.clone();
|
||||
let progress = self.progress.clone();
|
||||
let semaphore = semaphore.clone();
|
||||
let cancel_token = self.cancel_token.clone();
|
||||
|
||||
async move {
|
||||
let _permit = semaphore
|
||||
.acquire()
|
||||
.await
|
||||
.map_err(|e| Error::other(format!("Failed to acquire semaphore for bucket heal: {}", e)))?;
|
||||
|
||||
if cancel_token.is_cancelled() {
|
||||
return Err(Error::TaskCancelled);
|
||||
}
|
||||
|
||||
Self::heal_single_bucket(&storage, &bucket, &progress).await
|
||||
}
|
||||
});
|
||||
|
||||
// use join_all to process concurrently
|
||||
join_all(heal_futures).await
|
||||
}
|
||||
|
||||
/// heal single bucket
|
||||
#[allow(dead_code)]
|
||||
async fn heal_single_bucket(
|
||||
storage: &Arc<dyn HealStorageAPI>,
|
||||
bucket: &str,
|
||||
progress: &Arc<RwLock<HealProgress>>,
|
||||
) -> Result<()> {
|
||||
info!("Starting heal for bucket: {}", bucket);
|
||||
|
||||
// 1. get bucket info
|
||||
let _bucket_info = match storage.get_bucket_info(bucket).await? {
|
||||
Some(info) => info,
|
||||
None => {
|
||||
warn!("Bucket {} not found, skipping", bucket);
|
||||
return Ok(());
|
||||
}
|
||||
};
|
||||
|
||||
// 2. get objects to heal
|
||||
let objects = storage.list_objects_for_heal(bucket, "").await?;
|
||||
|
||||
// 3. update progress
|
||||
{
|
||||
let mut p = progress.write().await;
|
||||
p.objects_scanned += objects.len() as u64;
|
||||
}
|
||||
|
||||
// 4. heal objects concurrently
|
||||
let heal_opts = HealOpts {
|
||||
scan_mode: HealScanMode::Normal,
|
||||
remove: true, // remove corrupted data
|
||||
recreate: true, // recreate missing data
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let object_results = Self::heal_objects_concurrently(storage, bucket, &objects, &heal_opts, progress).await;
|
||||
|
||||
// 5. count results
|
||||
let (success_count, failure_count) = object_results
|
||||
.into_iter()
|
||||
.fold((0, 0), |(success, failure), result| match result {
|
||||
Ok(_) => (success + 1, failure),
|
||||
Err(_) => (success, failure + 1),
|
||||
});
|
||||
|
||||
// 6. update progress
|
||||
{
|
||||
let mut p = progress.write().await;
|
||||
p.objects_healed += success_count;
|
||||
p.objects_failed += failure_count;
|
||||
p.set_current_object(Some(format!("completed bucket: {bucket}")));
|
||||
}
|
||||
|
||||
info!(
|
||||
"Completed heal for bucket {}: {} success, {} failures",
|
||||
bucket, success_count, failure_count
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// heal objects concurrently
|
||||
#[allow(dead_code)]
|
||||
async fn heal_objects_concurrently(
|
||||
storage: &Arc<dyn HealStorageAPI>,
|
||||
bucket: &str,
|
||||
objects: &[String],
|
||||
heal_opts: &HealOpts,
|
||||
_progress: &Arc<RwLock<HealProgress>>,
|
||||
) -> Vec<Result<()>> {
|
||||
// use semaphore to control object healing concurrency
|
||||
let semaphore = Arc::new(tokio::sync::Semaphore::new(8)); // max 8 concurrent object healings
|
||||
|
||||
let heal_futures = objects.iter().map(|object| {
|
||||
let object = object.clone();
|
||||
let bucket = bucket.to_string();
|
||||
let storage = storage.clone();
|
||||
let heal_opts = *heal_opts;
|
||||
let semaphore = semaphore.clone();
|
||||
|
||||
async move {
|
||||
let _permit = semaphore
|
||||
.acquire()
|
||||
.await
|
||||
.map_err(|e| Error::other(format!("Failed to acquire semaphore for object heal: {}", e)))?;
|
||||
|
||||
match storage.heal_object(&bucket, &object, None, &heal_opts).await {
|
||||
Ok((_result, None)) => {
|
||||
info!("Successfully healed object {}/{}", bucket, object);
|
||||
Ok(())
|
||||
}
|
||||
Ok((_, Some(err))) => {
|
||||
warn!("Failed to heal object {}/{}: {}", bucket, object, err);
|
||||
Err(Error::other(err))
|
||||
}
|
||||
Err(err) => {
|
||||
warn!("Error healing object {}/{}: {}", bucket, object, err);
|
||||
Err(err)
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
join_all(heal_futures).await
|
||||
}
|
||||
|
||||
/// process results
|
||||
#[allow(dead_code)]
|
||||
async fn process_results(&self, results: Vec<Result<()>>) -> Result<()> {
|
||||
let (success_count, failure_count): (usize, usize) =
|
||||
results.into_iter().fold((0, 0), |(success, failure), result| match result {
|
||||
Ok(_) => (success + 1, failure),
|
||||
Err(_) => (success, failure + 1),
|
||||
});
|
||||
|
||||
let total = success_count + failure_count;
|
||||
|
||||
info!("Erasure set heal completed: {}/{} buckets successful", success_count, total);
|
||||
|
||||
if failure_count > 0 {
|
||||
warn!("{} buckets failed to heal", failure_count);
|
||||
return Err(Error::other(format!("{failure_count} buckets failed to heal")));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
682
crates/ahm/src/heal/event.rs
Normal file
@@ -0,0 +1,682 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::heal::{HealOptions, HealPriority, HealRequest, HealType};
|
||||
use crate::{Error, Result};
|
||||
use rustfs_ecstore::disk::endpoint::Endpoint;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::time::SystemTime;
|
||||
|
||||
/// Corruption type
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub enum CorruptionType {
|
||||
/// Data corruption
|
||||
DataCorruption,
|
||||
/// Metadata corruption
|
||||
MetadataCorruption,
|
||||
/// Partial corruption
|
||||
PartialCorruption,
|
||||
/// Complete corruption
|
||||
CompleteCorruption,
|
||||
}
|
||||
|
||||
/// Severity level
|
||||
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
|
||||
pub enum Severity {
|
||||
/// Low severity
|
||||
Low = 0,
|
||||
/// Medium severity
|
||||
Medium = 1,
|
||||
/// High severity
|
||||
High = 2,
|
||||
/// Critical severity
|
||||
Critical = 3,
|
||||
}
|
||||
|
||||
/// Heal event
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum HealEvent {
|
||||
/// Object corruption event
|
||||
ObjectCorruption {
|
||||
bucket: String,
|
||||
object: String,
|
||||
version_id: Option<String>,
|
||||
corruption_type: CorruptionType,
|
||||
severity: Severity,
|
||||
},
|
||||
/// Object missing event
|
||||
ObjectMissing {
|
||||
bucket: String,
|
||||
object: String,
|
||||
version_id: Option<String>,
|
||||
expected_locations: Vec<usize>,
|
||||
available_locations: Vec<usize>,
|
||||
},
|
||||
/// Metadata corruption event
|
||||
MetadataCorruption {
|
||||
bucket: String,
|
||||
object: String,
|
||||
corruption_type: CorruptionType,
|
||||
},
|
||||
/// Disk status change event
|
||||
DiskStatusChange {
|
||||
endpoint: Endpoint,
|
||||
old_status: String,
|
||||
new_status: String,
|
||||
},
|
||||
/// EC decode failure event
|
||||
ECDecodeFailure {
|
||||
bucket: String,
|
||||
object: String,
|
||||
version_id: Option<String>,
|
||||
missing_shards: Vec<usize>,
|
||||
available_shards: Vec<usize>,
|
||||
},
|
||||
/// Checksum mismatch event
|
||||
ChecksumMismatch {
|
||||
bucket: String,
|
||||
object: String,
|
||||
version_id: Option<String>,
|
||||
expected_checksum: String,
|
||||
actual_checksum: String,
|
||||
},
|
||||
/// Bucket metadata corruption event
|
||||
BucketMetadataCorruption {
|
||||
bucket: String,
|
||||
corruption_type: CorruptionType,
|
||||
},
|
||||
/// MRF metadata corruption event
|
||||
MRFMetadataCorruption {
|
||||
meta_path: String,
|
||||
corruption_type: CorruptionType,
|
||||
},
|
||||
}
|
||||
|
||||
impl HealEvent {
|
||||
/// Convert HealEvent to HealRequest
|
||||
pub fn to_heal_request(&self) -> Result<HealRequest> {
|
||||
match self {
|
||||
HealEvent::ObjectCorruption {
|
||||
bucket,
|
||||
object,
|
||||
version_id,
|
||||
severity,
|
||||
..
|
||||
} => Ok(HealRequest::new(
|
||||
HealType::Object {
|
||||
bucket: bucket.clone(),
|
||||
object: object.clone(),
|
||||
version_id: version_id.clone(),
|
||||
},
|
||||
HealOptions::default(),
|
||||
Self::severity_to_priority(severity),
|
||||
)),
|
||||
HealEvent::ObjectMissing {
|
||||
bucket,
|
||||
object,
|
||||
version_id,
|
||||
..
|
||||
} => Ok(HealRequest::new(
|
||||
HealType::Object {
|
||||
bucket: bucket.clone(),
|
||||
object: object.clone(),
|
||||
version_id: version_id.clone(),
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::High,
|
||||
)),
|
||||
HealEvent::MetadataCorruption { bucket, object, .. } => Ok(HealRequest::new(
|
||||
HealType::Metadata {
|
||||
bucket: bucket.clone(),
|
||||
object: object.clone(),
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::High,
|
||||
)),
|
||||
HealEvent::DiskStatusChange { endpoint, .. } => {
|
||||
// Convert disk status change to erasure set heal
|
||||
// Note: This requires access to storage to get bucket list, which is not available here
|
||||
// The actual bucket list will need to be provided by the caller or retrieved differently
|
||||
let set_disk_id = crate::heal::utils::format_set_disk_id_from_i32(endpoint.pool_idx, endpoint.set_idx)
|
||||
.ok_or_else(|| Error::InvalidHealType {
|
||||
heal_type: format!("erasure-set(pool={}, set={})", endpoint.pool_idx, endpoint.set_idx),
|
||||
})?;
|
||||
Ok(HealRequest::new(
|
||||
HealType::ErasureSet {
|
||||
buckets: vec![], // Empty bucket list - caller should populate this
|
||||
set_disk_id,
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::High,
|
||||
))
|
||||
}
|
||||
HealEvent::ECDecodeFailure {
|
||||
bucket,
|
||||
object,
|
||||
version_id,
|
||||
..
|
||||
} => Ok(HealRequest::new(
|
||||
HealType::ECDecode {
|
||||
bucket: bucket.clone(),
|
||||
object: object.clone(),
|
||||
version_id: version_id.clone(),
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::Urgent,
|
||||
)),
|
||||
HealEvent::ChecksumMismatch {
|
||||
bucket,
|
||||
object,
|
||||
version_id,
|
||||
..
|
||||
} => Ok(HealRequest::new(
|
||||
HealType::Object {
|
||||
bucket: bucket.clone(),
|
||||
object: object.clone(),
|
||||
version_id: version_id.clone(),
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::High,
|
||||
)),
|
||||
HealEvent::BucketMetadataCorruption { bucket, .. } => Ok(HealRequest::new(
|
||||
HealType::Bucket { bucket: bucket.clone() },
|
||||
HealOptions::default(),
|
||||
HealPriority::High,
|
||||
)),
|
||||
HealEvent::MRFMetadataCorruption { meta_path, .. } => Ok(HealRequest::new(
|
||||
HealType::MRF {
|
||||
meta_path: meta_path.clone(),
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::High,
|
||||
)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Convert severity to priority
|
||||
fn severity_to_priority(severity: &Severity) -> HealPriority {
|
||||
match severity {
|
||||
Severity::Low => HealPriority::Low,
|
||||
Severity::Medium => HealPriority::Normal,
|
||||
Severity::High => HealPriority::High,
|
||||
Severity::Critical => HealPriority::Urgent,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get event description
|
||||
pub fn description(&self) -> String {
|
||||
match self {
|
||||
HealEvent::ObjectCorruption {
|
||||
bucket,
|
||||
object,
|
||||
corruption_type,
|
||||
..
|
||||
} => {
|
||||
format!("Object corruption detected: {bucket}/{object} - {corruption_type:?}")
|
||||
}
|
||||
HealEvent::ObjectMissing { bucket, object, .. } => {
|
||||
format!("Object missing: {bucket}/{object}")
|
||||
}
|
||||
HealEvent::MetadataCorruption {
|
||||
bucket,
|
||||
object,
|
||||
corruption_type,
|
||||
..
|
||||
} => {
|
||||
format!("Metadata corruption: {bucket}/{object} - {corruption_type:?}")
|
||||
}
|
||||
HealEvent::DiskStatusChange {
|
||||
endpoint,
|
||||
old_status,
|
||||
new_status,
|
||||
..
|
||||
} => {
|
||||
format!("Disk status changed: {endpoint:?} {old_status} -> {new_status}")
|
||||
}
|
||||
HealEvent::ECDecodeFailure {
|
||||
bucket,
|
||||
object,
|
||||
missing_shards,
|
||||
..
|
||||
} => {
|
||||
format!("EC decode failure: {bucket}/{object} - missing shards: {missing_shards:?}")
|
||||
}
|
||||
HealEvent::ChecksumMismatch {
|
||||
bucket,
|
||||
object,
|
||||
expected_checksum,
|
||||
actual_checksum,
|
||||
..
|
||||
} => {
|
||||
format!("Checksum mismatch: {bucket}/{object} - expected: {expected_checksum}, actual: {actual_checksum}")
|
||||
}
|
||||
HealEvent::BucketMetadataCorruption {
|
||||
bucket, corruption_type, ..
|
||||
} => {
|
||||
format!("Bucket metadata corruption: {bucket} - {corruption_type:?}")
|
||||
}
|
||||
HealEvent::MRFMetadataCorruption {
|
||||
meta_path,
|
||||
corruption_type,
|
||||
..
|
||||
} => {
|
||||
format!("MRF metadata corruption: {meta_path} - {corruption_type:?}")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Get event severity
|
||||
pub fn severity(&self) -> Severity {
|
||||
match self {
|
||||
HealEvent::ObjectCorruption { severity, .. } => severity.clone(),
|
||||
HealEvent::ObjectMissing { .. } => Severity::High,
|
||||
HealEvent::MetadataCorruption { .. } => Severity::High,
|
||||
HealEvent::DiskStatusChange { .. } => Severity::High,
|
||||
HealEvent::ECDecodeFailure { .. } => Severity::Critical,
|
||||
HealEvent::ChecksumMismatch { .. } => Severity::High,
|
||||
HealEvent::BucketMetadataCorruption { .. } => Severity::High,
|
||||
HealEvent::MRFMetadataCorruption { .. } => Severity::High,
|
||||
}
|
||||
}
|
||||
|
||||
/// Get event timestamp
|
||||
pub fn timestamp(&self) -> SystemTime {
|
||||
SystemTime::now()
|
||||
}
|
||||
}
|
||||
|
||||
/// Heal event handler
|
||||
pub struct HealEventHandler {
|
||||
/// Event queue
|
||||
events: Vec<HealEvent>,
|
||||
/// Maximum number of events
|
||||
max_events: usize,
|
||||
}
|
||||
|
||||
impl HealEventHandler {
|
||||
pub fn new(max_events: usize) -> Self {
|
||||
Self {
|
||||
events: Vec::new(),
|
||||
max_events,
|
||||
}
|
||||
}
|
||||
|
||||
/// Add event
|
||||
pub fn add_event(&mut self, event: HealEvent) {
|
||||
if self.events.len() >= self.max_events {
|
||||
// Remove oldest event
|
||||
self.events.remove(0);
|
||||
}
|
||||
self.events.push(event);
|
||||
}
|
||||
|
||||
/// Get all events
|
||||
pub fn get_events(&self) -> &[HealEvent] {
|
||||
&self.events
|
||||
}
|
||||
|
||||
/// Clear events
|
||||
pub fn clear_events(&mut self) {
|
||||
self.events.clear();
|
||||
}
|
||||
|
||||
/// Get event count
|
||||
pub fn event_count(&self) -> usize {
|
||||
self.events.len()
|
||||
}
|
||||
|
||||
/// Filter events by severity
|
||||
pub fn filter_by_severity(&self, min_severity: Severity) -> Vec<&HealEvent> {
|
||||
self.events.iter().filter(|event| event.severity() >= min_severity).collect()
|
||||
}
|
||||
|
||||
/// Filter events by type
|
||||
pub fn filter_by_type(&self, event_type: &str) -> Vec<&HealEvent> {
|
||||
self.events
|
||||
.iter()
|
||||
.filter(|event| match event {
|
||||
HealEvent::ObjectCorruption { .. } => event_type == "ObjectCorruption",
|
||||
HealEvent::ObjectMissing { .. } => event_type == "ObjectMissing",
|
||||
HealEvent::MetadataCorruption { .. } => event_type == "MetadataCorruption",
|
||||
HealEvent::DiskStatusChange { .. } => event_type == "DiskStatusChange",
|
||||
HealEvent::ECDecodeFailure { .. } => event_type == "ECDecodeFailure",
|
||||
HealEvent::ChecksumMismatch { .. } => event_type == "ChecksumMismatch",
|
||||
HealEvent::BucketMetadataCorruption { .. } => event_type == "BucketMetadataCorruption",
|
||||
HealEvent::MRFMetadataCorruption { .. } => event_type == "MRFMetadataCorruption",
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for HealEventHandler {
|
||||
fn default() -> Self {
|
||||
Self::new(1000)
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::heal::task::{HealPriority, HealType};
|
||||
|
||||
#[test]
|
||||
fn test_heal_event_object_corruption_to_request() {
|
||||
let event = HealEvent::ObjectCorruption {
|
||||
bucket: "test-bucket".to_string(),
|
||||
object: "test-object".to_string(),
|
||||
version_id: None,
|
||||
corruption_type: CorruptionType::DataCorruption,
|
||||
severity: Severity::High,
|
||||
};
|
||||
|
||||
let request = event.to_heal_request().unwrap();
|
||||
assert!(matches!(request.heal_type, HealType::Object { .. }));
|
||||
assert_eq!(request.priority, HealPriority::High);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_event_object_missing_to_request() {
|
||||
let event = HealEvent::ObjectMissing {
|
||||
bucket: "test-bucket".to_string(),
|
||||
object: "test-object".to_string(),
|
||||
version_id: Some("v1".to_string()),
|
||||
expected_locations: vec![0, 1],
|
||||
available_locations: vec![2, 3],
|
||||
};
|
||||
|
||||
let request = event.to_heal_request().unwrap();
|
||||
assert!(matches!(request.heal_type, HealType::Object { .. }));
|
||||
assert_eq!(request.priority, HealPriority::High);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_event_metadata_corruption_to_request() {
|
||||
let event = HealEvent::MetadataCorruption {
|
||||
bucket: "test-bucket".to_string(),
|
||||
object: "test-object".to_string(),
|
||||
corruption_type: CorruptionType::MetadataCorruption,
|
||||
};
|
||||
|
||||
let request = event.to_heal_request().unwrap();
|
||||
assert!(matches!(request.heal_type, HealType::Metadata { .. }));
|
||||
assert_eq!(request.priority, HealPriority::High);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_event_ec_decode_failure_to_request() {
|
||||
let event = HealEvent::ECDecodeFailure {
|
||||
bucket: "test-bucket".to_string(),
|
||||
object: "test-object".to_string(),
|
||||
version_id: None,
|
||||
missing_shards: vec![0, 1],
|
||||
available_shards: vec![2, 3, 4],
|
||||
};
|
||||
|
||||
let request = event.to_heal_request().unwrap();
|
||||
assert!(matches!(request.heal_type, HealType::ECDecode { .. }));
|
||||
assert_eq!(request.priority, HealPriority::Urgent);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_event_checksum_mismatch_to_request() {
|
||||
let event = HealEvent::ChecksumMismatch {
|
||||
bucket: "test-bucket".to_string(),
|
||||
object: "test-object".to_string(),
|
||||
version_id: None,
|
||||
expected_checksum: "abc123".to_string(),
|
||||
actual_checksum: "def456".to_string(),
|
||||
};
|
||||
|
||||
let request = event.to_heal_request().unwrap();
|
||||
assert!(matches!(request.heal_type, HealType::Object { .. }));
|
||||
assert_eq!(request.priority, HealPriority::High);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_event_bucket_metadata_corruption_to_request() {
|
||||
let event = HealEvent::BucketMetadataCorruption {
|
||||
bucket: "test-bucket".to_string(),
|
||||
corruption_type: CorruptionType::MetadataCorruption,
|
||||
};
|
||||
|
||||
let request = event.to_heal_request().unwrap();
|
||||
assert!(matches!(request.heal_type, HealType::Bucket { .. }));
|
||||
assert_eq!(request.priority, HealPriority::High);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_event_mrf_metadata_corruption_to_request() {
|
||||
let event = HealEvent::MRFMetadataCorruption {
|
||||
meta_path: "test-bucket/test-object".to_string(),
|
||||
corruption_type: CorruptionType::MetadataCorruption,
|
||||
};
|
||||
|
||||
let request = event.to_heal_request().unwrap();
|
||||
assert!(matches!(request.heal_type, HealType::MRF { .. }));
|
||||
assert_eq!(request.priority, HealPriority::High);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_event_severity_to_priority() {
|
||||
let event_low = HealEvent::ObjectCorruption {
|
||||
bucket: "test".to_string(),
|
||||
object: "test".to_string(),
|
||||
version_id: None,
|
||||
corruption_type: CorruptionType::DataCorruption,
|
||||
severity: Severity::Low,
|
||||
};
|
||||
let request = event_low.to_heal_request().unwrap();
|
||||
assert_eq!(request.priority, HealPriority::Low);
|
||||
|
||||
let event_medium = HealEvent::ObjectCorruption {
|
||||
bucket: "test".to_string(),
|
||||
object: "test".to_string(),
|
||||
version_id: None,
|
||||
corruption_type: CorruptionType::DataCorruption,
|
||||
severity: Severity::Medium,
|
||||
};
|
||||
let request = event_medium.to_heal_request().unwrap();
|
||||
assert_eq!(request.priority, HealPriority::Normal);
|
||||
|
||||
let event_high = HealEvent::ObjectCorruption {
|
||||
bucket: "test".to_string(),
|
||||
object: "test".to_string(),
|
||||
version_id: None,
|
||||
corruption_type: CorruptionType::DataCorruption,
|
||||
severity: Severity::High,
|
||||
};
|
||||
let request = event_high.to_heal_request().unwrap();
|
||||
assert_eq!(request.priority, HealPriority::High);
|
||||
|
||||
let event_critical = HealEvent::ObjectCorruption {
|
||||
bucket: "test".to_string(),
|
||||
object: "test".to_string(),
|
||||
version_id: None,
|
||||
corruption_type: CorruptionType::DataCorruption,
|
||||
severity: Severity::Critical,
|
||||
};
|
||||
let request = event_critical.to_heal_request().unwrap();
|
||||
assert_eq!(request.priority, HealPriority::Urgent);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_event_description() {
|
||||
let event = HealEvent::ObjectCorruption {
|
||||
bucket: "test-bucket".to_string(),
|
||||
object: "test-object".to_string(),
|
||||
version_id: None,
|
||||
corruption_type: CorruptionType::DataCorruption,
|
||||
severity: Severity::High,
|
||||
};
|
||||
|
||||
let desc = event.description();
|
||||
assert!(desc.contains("Object corruption detected"));
|
||||
assert!(desc.contains("test-bucket/test-object"));
|
||||
assert!(desc.contains("DataCorruption"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_event_severity() {
|
||||
let event = HealEvent::ECDecodeFailure {
|
||||
bucket: "test".to_string(),
|
||||
object: "test".to_string(),
|
||||
version_id: None,
|
||||
missing_shards: vec![],
|
||||
available_shards: vec![],
|
||||
};
|
||||
assert_eq!(event.severity(), Severity::Critical);
|
||||
|
||||
let event = HealEvent::ObjectMissing {
|
||||
bucket: "test".to_string(),
|
||||
object: "test".to_string(),
|
||||
version_id: None,
|
||||
expected_locations: vec![],
|
||||
available_locations: vec![],
|
||||
};
|
||||
assert_eq!(event.severity(), Severity::High);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_event_handler_new() {
|
||||
let handler = HealEventHandler::new(10);
|
||||
assert_eq!(handler.event_count(), 0);
|
||||
assert_eq!(handler.max_events, 10);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_event_handler_default() {
|
||||
let handler = HealEventHandler::default();
|
||||
assert_eq!(handler.max_events, 1000);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_event_handler_add_event() {
|
||||
let mut handler = HealEventHandler::new(3);
|
||||
let event = HealEvent::ObjectCorruption {
|
||||
bucket: "test".to_string(),
|
||||
object: "test".to_string(),
|
||||
version_id: None,
|
||||
corruption_type: CorruptionType::DataCorruption,
|
||||
severity: Severity::High,
|
||||
};
|
||||
|
||||
handler.add_event(event.clone());
|
||||
assert_eq!(handler.event_count(), 1);
|
||||
|
||||
handler.add_event(event.clone());
|
||||
handler.add_event(event.clone());
|
||||
assert_eq!(handler.event_count(), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_event_handler_max_events() {
|
||||
let mut handler = HealEventHandler::new(2);
|
||||
let event = HealEvent::ObjectCorruption {
|
||||
bucket: "test".to_string(),
|
||||
object: "test".to_string(),
|
||||
version_id: None,
|
||||
corruption_type: CorruptionType::DataCorruption,
|
||||
severity: Severity::High,
|
||||
};
|
||||
|
||||
handler.add_event(event.clone());
|
||||
handler.add_event(event.clone());
|
||||
handler.add_event(event.clone()); // Should remove oldest
|
||||
|
||||
assert_eq!(handler.event_count(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_event_handler_get_events() {
|
||||
let mut handler = HealEventHandler::new(10);
|
||||
let event = HealEvent::ObjectCorruption {
|
||||
bucket: "test".to_string(),
|
||||
object: "test".to_string(),
|
||||
version_id: None,
|
||||
corruption_type: CorruptionType::DataCorruption,
|
||||
severity: Severity::High,
|
||||
};
|
||||
|
||||
handler.add_event(event.clone());
|
||||
handler.add_event(event.clone());
|
||||
|
||||
let events = handler.get_events();
|
||||
assert_eq!(events.len(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_event_handler_clear_events() {
|
||||
let mut handler = HealEventHandler::new(10);
|
||||
let event = HealEvent::ObjectCorruption {
|
||||
bucket: "test".to_string(),
|
||||
object: "test".to_string(),
|
||||
version_id: None,
|
||||
corruption_type: CorruptionType::DataCorruption,
|
||||
severity: Severity::High,
|
||||
};
|
||||
|
||||
handler.add_event(event);
|
||||
assert_eq!(handler.event_count(), 1);
|
||||
|
||||
handler.clear_events();
|
||||
assert_eq!(handler.event_count(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_event_handler_filter_by_severity() {
|
||||
let mut handler = HealEventHandler::new(10);
|
||||
handler.add_event(HealEvent::ObjectCorruption {
|
||||
bucket: "test".to_string(),
|
||||
object: "test".to_string(),
|
||||
version_id: None,
|
||||
corruption_type: CorruptionType::DataCorruption,
|
||||
severity: Severity::Low,
|
||||
});
|
||||
handler.add_event(HealEvent::ECDecodeFailure {
|
||||
bucket: "test".to_string(),
|
||||
object: "test".to_string(),
|
||||
version_id: None,
|
||||
missing_shards: vec![],
|
||||
available_shards: vec![],
|
||||
});
|
||||
|
||||
let high_severity = handler.filter_by_severity(Severity::High);
|
||||
assert_eq!(high_severity.len(), 1); // Only ECDecodeFailure is Critical >= High
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_event_handler_filter_by_type() {
|
||||
let mut handler = HealEventHandler::new(10);
|
||||
handler.add_event(HealEvent::ObjectCorruption {
|
||||
bucket: "test".to_string(),
|
||||
object: "test".to_string(),
|
||||
version_id: None,
|
||||
corruption_type: CorruptionType::DataCorruption,
|
||||
severity: Severity::High,
|
||||
});
|
||||
handler.add_event(HealEvent::ObjectMissing {
|
||||
bucket: "test".to_string(),
|
||||
object: "test".to_string(),
|
||||
version_id: None,
|
||||
expected_locations: vec![],
|
||||
available_locations: vec![],
|
||||
});
|
||||
|
||||
let corruption_events = handler.filter_by_type("ObjectCorruption");
|
||||
assert_eq!(corruption_events.len(), 1);
|
||||
|
||||
let missing_events = handler.filter_by_type("ObjectMissing");
|
||||
assert_eq!(missing_events.len(), 1);
|
||||
}
|
||||
}
|
||||
440
crates/ahm/src/heal/manager.rs
Normal file
@@ -0,0 +1,440 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::heal::{
|
||||
progress::{HealProgress, HealStatistics},
|
||||
storage::HealStorageAPI,
|
||||
task::{HealOptions, HealPriority, HealRequest, HealTask, HealTaskStatus, HealType},
|
||||
};
|
||||
use crate::{Error, Result};
|
||||
use rustfs_ecstore::disk::DiskAPI;
|
||||
use rustfs_ecstore::disk::error::DiskError;
|
||||
use rustfs_ecstore::global::GLOBAL_LOCAL_DISK_MAP;
|
||||
use std::{
|
||||
collections::{HashMap, VecDeque},
|
||||
sync::Arc,
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
use tokio::{
|
||||
sync::{Mutex, RwLock},
|
||||
time::interval,
|
||||
};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::{error, info, warn};
|
||||
|
||||
/// Heal config
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct HealConfig {
|
||||
/// Whether to enable auto heal
|
||||
pub enable_auto_heal: bool,
|
||||
/// Heal interval
|
||||
pub heal_interval: Duration,
|
||||
/// Maximum concurrent heal tasks
|
||||
pub max_concurrent_heals: usize,
|
||||
/// Task timeout
|
||||
pub task_timeout: Duration,
|
||||
/// Queue size
|
||||
pub queue_size: usize,
|
||||
}
|
||||
|
||||
impl Default for HealConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
enable_auto_heal: true,
|
||||
heal_interval: Duration::from_secs(10), // 10 seconds
|
||||
max_concurrent_heals: 4,
|
||||
task_timeout: Duration::from_secs(300), // 5 minutes
|
||||
queue_size: 1000,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Heal state
|
||||
#[derive(Debug, Default)]
|
||||
pub struct HealState {
|
||||
/// Whether running
|
||||
pub is_running: bool,
|
||||
/// Current heal cycle
|
||||
pub current_cycle: u64,
|
||||
/// Last heal time
|
||||
pub last_heal_time: Option<SystemTime>,
|
||||
/// Total healed objects
|
||||
pub total_healed_objects: u64,
|
||||
/// Total heal failures
|
||||
pub total_heal_failures: u64,
|
||||
/// Current active heal tasks
|
||||
pub active_heal_count: usize,
|
||||
}
|
||||
|
||||
/// Heal manager
|
||||
pub struct HealManager {
|
||||
/// Heal config
|
||||
config: Arc<RwLock<HealConfig>>,
|
||||
/// Heal state
|
||||
state: Arc<RwLock<HealState>>,
|
||||
/// Active heal tasks
|
||||
active_heals: Arc<Mutex<HashMap<String, Arc<HealTask>>>>,
|
||||
/// Heal queue
|
||||
heal_queue: Arc<Mutex<VecDeque<HealRequest>>>,
|
||||
/// Storage layer interface
|
||||
storage: Arc<dyn HealStorageAPI>,
|
||||
/// Cancel token
|
||||
cancel_token: CancellationToken,
|
||||
/// Statistics
|
||||
statistics: Arc<RwLock<HealStatistics>>,
|
||||
}
|
||||
|
||||
impl HealManager {
|
||||
/// Create new HealManager
|
||||
pub fn new(storage: Arc<dyn HealStorageAPI>, config: Option<HealConfig>) -> Self {
|
||||
let config = config.unwrap_or_default();
|
||||
Self {
|
||||
config: Arc::new(RwLock::new(config)),
|
||||
state: Arc::new(RwLock::new(HealState::default())),
|
||||
active_heals: Arc::new(Mutex::new(HashMap::new())),
|
||||
heal_queue: Arc::new(Mutex::new(VecDeque::new())),
|
||||
storage,
|
||||
cancel_token: CancellationToken::new(),
|
||||
statistics: Arc::new(RwLock::new(HealStatistics::new())),
|
||||
}
|
||||
}
|
||||
|
||||
/// Start HealManager
|
||||
pub async fn start(&self) -> Result<()> {
|
||||
let mut state = self.state.write().await;
|
||||
if state.is_running {
|
||||
warn!("HealManager is already running");
|
||||
return Ok(());
|
||||
}
|
||||
state.is_running = true;
|
||||
drop(state);
|
||||
|
||||
info!("Starting HealManager");
|
||||
|
||||
// start scheduler
|
||||
self.start_scheduler().await?;
|
||||
|
||||
// start auto disk scanner
|
||||
self.start_auto_disk_scanner().await?;
|
||||
|
||||
info!("HealManager started successfully");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Stop HealManager
|
||||
pub async fn stop(&self) -> Result<()> {
|
||||
info!("Stopping HealManager");
|
||||
|
||||
// cancel all tasks
|
||||
self.cancel_token.cancel();
|
||||
|
||||
// wait for all tasks to complete
|
||||
let mut active_heals = self.active_heals.lock().await;
|
||||
for task in active_heals.values() {
|
||||
if let Err(e) = task.cancel().await {
|
||||
warn!("Failed to cancel task {}: {}", task.id, e);
|
||||
}
|
||||
}
|
||||
active_heals.clear();
|
||||
|
||||
// update state
|
||||
let mut state = self.state.write().await;
|
||||
state.is_running = false;
|
||||
|
||||
info!("HealManager stopped successfully");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Submit heal request
|
||||
pub async fn submit_heal_request(&self, request: HealRequest) -> Result<String> {
|
||||
let config = self.config.read().await;
|
||||
let mut queue = self.heal_queue.lock().await;
|
||||
|
||||
if queue.len() >= config.queue_size {
|
||||
return Err(Error::ConfigurationError {
|
||||
message: "Heal queue is full".to_string(),
|
||||
});
|
||||
}
|
||||
|
||||
let request_id = request.id.clone();
|
||||
queue.push_back(request);
|
||||
drop(queue);
|
||||
|
||||
info!("Submitted heal request: {}", request_id);
|
||||
Ok(request_id)
|
||||
}
|
||||
|
||||
/// Get task status
|
||||
pub async fn get_task_status(&self, task_id: &str) -> Result<HealTaskStatus> {
|
||||
let active_heals = self.active_heals.lock().await;
|
||||
if let Some(task) = active_heals.get(task_id) {
|
||||
Ok(task.get_status().await)
|
||||
} else {
|
||||
Err(Error::TaskNotFound {
|
||||
task_id: task_id.to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Get task progress
|
||||
pub async fn get_active_tasks_count(&self) -> usize {
|
||||
self.active_heals.lock().await.len()
|
||||
}
|
||||
|
||||
pub async fn get_task_progress(&self, task_id: &str) -> Result<HealProgress> {
|
||||
let active_heals = self.active_heals.lock().await;
|
||||
if let Some(task) = active_heals.get(task_id) {
|
||||
Ok(task.get_progress().await)
|
||||
} else {
|
||||
Err(Error::TaskNotFound {
|
||||
task_id: task_id.to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Cancel task
|
||||
pub async fn cancel_task(&self, task_id: &str) -> Result<()> {
|
||||
let mut active_heals = self.active_heals.lock().await;
|
||||
if let Some(task) = active_heals.get(task_id) {
|
||||
task.cancel().await?;
|
||||
active_heals.remove(task_id);
|
||||
info!("Cancelled heal task: {}", task_id);
|
||||
Ok(())
|
||||
} else {
|
||||
Err(Error::TaskNotFound {
|
||||
task_id: task_id.to_string(),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// Get statistics
|
||||
pub async fn get_statistics(&self) -> HealStatistics {
|
||||
self.statistics.read().await.clone()
|
||||
}
|
||||
|
||||
/// Get active task count
|
||||
pub async fn get_active_task_count(&self) -> usize {
|
||||
let active_heals = self.active_heals.lock().await;
|
||||
active_heals.len()
|
||||
}
|
||||
|
||||
/// Get queue length
|
||||
pub async fn get_queue_length(&self) -> usize {
|
||||
let queue = self.heal_queue.lock().await;
|
||||
queue.len()
|
||||
}
|
||||
|
||||
/// Start scheduler
|
||||
async fn start_scheduler(&self) -> Result<()> {
|
||||
let config = self.config.clone();
|
||||
let heal_queue = self.heal_queue.clone();
|
||||
let active_heals = self.active_heals.clone();
|
||||
let cancel_token = self.cancel_token.clone();
|
||||
let statistics = self.statistics.clone();
|
||||
let storage = self.storage.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let mut interval = interval(config.read().await.heal_interval);
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = cancel_token.cancelled() => {
|
||||
info!("Heal scheduler received shutdown signal");
|
||||
break;
|
||||
}
|
||||
_ = interval.tick() => {
|
||||
Self::process_heal_queue(&heal_queue, &active_heals, &config, &statistics, &storage).await;
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Start background task to auto scan local disks and enqueue erasure set heal requests
|
||||
async fn start_auto_disk_scanner(&self) -> Result<()> {
|
||||
let config = self.config.clone();
|
||||
let heal_queue = self.heal_queue.clone();
|
||||
let active_heals = self.active_heals.clone();
|
||||
let cancel_token = self.cancel_token.clone();
|
||||
let storage = self.storage.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
let mut interval = interval(config.read().await.heal_interval);
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = cancel_token.cancelled() => {
|
||||
info!("Auto disk scanner received shutdown signal");
|
||||
break;
|
||||
}
|
||||
_ = interval.tick() => {
|
||||
// Build list of endpoints that need healing
|
||||
let mut endpoints = Vec::new();
|
||||
for (_, disk_opt) in GLOBAL_LOCAL_DISK_MAP.read().await.iter() {
|
||||
if let Some(disk) = disk_opt {
|
||||
// detect unformatted disk via get_disk_id()
|
||||
if let Err(err) = disk.get_disk_id().await {
|
||||
if err == DiskError::UnformattedDisk {
|
||||
endpoints.push(disk.endpoint());
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if endpoints.is_empty() {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Get bucket list for erasure set healing
|
||||
let buckets = match storage.list_buckets().await {
|
||||
Ok(buckets) => buckets.iter().map(|b| b.name.clone()).collect::<Vec<String>>(),
|
||||
Err(e) => {
|
||||
error!("Failed to get bucket list for auto healing: {}", e);
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
// Create erasure set heal requests for each endpoint
|
||||
for ep in endpoints {
|
||||
let Some(set_disk_id) =
|
||||
crate::heal::utils::format_set_disk_id_from_i32(ep.pool_idx, ep.set_idx)
|
||||
else {
|
||||
warn!("Skipping endpoint {} without valid pool/set index", ep);
|
||||
continue;
|
||||
};
|
||||
// skip if already queued or healing
|
||||
// Use consistent lock order: queue first, then active_heals to avoid deadlock
|
||||
let mut skip = false;
|
||||
{
|
||||
let queue = heal_queue.lock().await;
|
||||
if queue.iter().any(|req| {
|
||||
matches!(
|
||||
&req.heal_type,
|
||||
crate::heal::task::HealType::ErasureSet { set_disk_id: queued_id, .. }
|
||||
if queued_id == &set_disk_id
|
||||
)
|
||||
}) {
|
||||
skip = true;
|
||||
}
|
||||
}
|
||||
if !skip {
|
||||
let active = active_heals.lock().await;
|
||||
if active.values().any(|task| {
|
||||
matches!(
|
||||
&task.heal_type,
|
||||
crate::heal::task::HealType::ErasureSet { set_disk_id: active_id, .. }
|
||||
if active_id == &set_disk_id
|
||||
)
|
||||
}) {
|
||||
skip = true;
|
||||
}
|
||||
}
|
||||
|
||||
if skip {
|
||||
continue;
|
||||
}
|
||||
|
||||
// enqueue erasure set heal request for this disk
|
||||
let req = HealRequest::new(
|
||||
HealType::ErasureSet {
|
||||
buckets: buckets.clone(),
|
||||
set_disk_id: set_disk_id.clone(),
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::Normal,
|
||||
);
|
||||
let mut queue = heal_queue.lock().await;
|
||||
queue.push_back(req);
|
||||
info!("Enqueued auto erasure set heal for endpoint: {} (set_disk_id: {})", ep, set_disk_id);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
});
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process heal queue
|
||||
async fn process_heal_queue(
|
||||
heal_queue: &Arc<Mutex<VecDeque<HealRequest>>>,
|
||||
active_heals: &Arc<Mutex<HashMap<String, Arc<HealTask>>>>,
|
||||
config: &Arc<RwLock<HealConfig>>,
|
||||
statistics: &Arc<RwLock<HealStatistics>>,
|
||||
storage: &Arc<dyn HealStorageAPI>,
|
||||
) {
|
||||
let config = config.read().await;
|
||||
let mut active_heals_guard = active_heals.lock().await;
|
||||
|
||||
// check if new heal tasks can be started
|
||||
if active_heals_guard.len() >= config.max_concurrent_heals {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut queue = heal_queue.lock().await;
|
||||
if let Some(request) = queue.pop_front() {
|
||||
let task = Arc::new(HealTask::from_request(request, storage.clone()));
|
||||
let task_id = task.id.clone();
|
||||
active_heals_guard.insert(task_id.clone(), task.clone());
|
||||
drop(active_heals_guard);
|
||||
let active_heals_clone = active_heals.clone();
|
||||
let statistics_clone = statistics.clone();
|
||||
|
||||
// start heal task
|
||||
tokio::spawn(async move {
|
||||
info!("Starting heal task: {}", task_id);
|
||||
let result = task.execute().await;
|
||||
match result {
|
||||
Ok(_) => {
|
||||
info!("Heal task completed successfully: {}", task_id);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Heal task failed: {} - {}", task_id, e);
|
||||
}
|
||||
}
|
||||
let mut active_heals_guard = active_heals_clone.lock().await;
|
||||
if let Some(completed_task) = active_heals_guard.remove(&task_id) {
|
||||
// update statistics
|
||||
let mut stats = statistics_clone.write().await;
|
||||
match completed_task.get_status().await {
|
||||
HealTaskStatus::Completed => {
|
||||
stats.update_task_completion(true);
|
||||
}
|
||||
_ => {
|
||||
stats.update_task_completion(false);
|
||||
}
|
||||
}
|
||||
stats.update_running_tasks(active_heals_guard.len() as u64);
|
||||
}
|
||||
});
|
||||
|
||||
// update statistics
|
||||
let mut stats = statistics.write().await;
|
||||
stats.total_tasks += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for HealManager {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("HealManager")
|
||||
.field("config", &"<config>")
|
||||
.field("state", &"<state>")
|
||||
.field("active_heals_count", &"<active_heals>")
|
||||
.field("queue_length", &"<queue>")
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
@@ -12,17 +12,17 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::components::Navbar;
|
||||
use crate::views::{HomeViews, SettingViews};
|
||||
use dioxus::prelude::*;
|
||||
pub mod channel;
|
||||
pub mod erasure_healer;
|
||||
pub mod event;
|
||||
pub mod manager;
|
||||
pub mod progress;
|
||||
pub mod resume;
|
||||
pub mod storage;
|
||||
pub mod task;
|
||||
pub mod utils;
|
||||
|
||||
/// The router for the application
|
||||
#[derive(Debug, Clone, Routable, PartialEq)]
|
||||
#[rustfmt::skip]
|
||||
pub enum Route {
|
||||
#[layout(Navbar)]
|
||||
#[route("/")]
|
||||
HomeViews {},
|
||||
#[route("/settings")]
|
||||
SettingViews {},
|
||||
}
|
||||
pub use erasure_healer::ErasureSetHealer;
|
||||
pub use manager::HealManager;
|
||||
pub use resume::{CheckpointManager, ResumeCheckpoint, ResumeManager, ResumeState, ResumeUtils};
|
||||
pub use task::{HealOptions, HealPriority, HealRequest, HealTask, HealType};
|
||||
389
crates/ahm/src/heal/progress.rs
Normal file
@@ -0,0 +1,389 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::time::SystemTime;
|
||||
|
||||
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
|
||||
pub struct HealProgress {
|
||||
/// Objects scanned
|
||||
pub objects_scanned: u64,
|
||||
/// Objects healed
|
||||
pub objects_healed: u64,
|
||||
/// Objects failed
|
||||
pub objects_failed: u64,
|
||||
/// Bytes processed
|
||||
pub bytes_processed: u64,
|
||||
/// Current object
|
||||
pub current_object: Option<String>,
|
||||
/// Progress percentage
|
||||
pub progress_percentage: f64,
|
||||
/// Start time
|
||||
pub start_time: Option<SystemTime>,
|
||||
/// Last update time
|
||||
pub last_update_time: Option<SystemTime>,
|
||||
/// Estimated completion time
|
||||
pub estimated_completion_time: Option<SystemTime>,
|
||||
}
|
||||
|
||||
impl HealProgress {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
start_time: Some(SystemTime::now()),
|
||||
last_update_time: Some(SystemTime::now()),
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update_progress(&mut self, scanned: u64, healed: u64, failed: u64, bytes: u64) {
|
||||
self.objects_scanned = scanned;
|
||||
self.objects_healed = healed;
|
||||
self.objects_failed = failed;
|
||||
self.bytes_processed = bytes;
|
||||
self.last_update_time = Some(SystemTime::now());
|
||||
|
||||
// calculate progress percentage
|
||||
let total = scanned + healed + failed;
|
||||
if total > 0 {
|
||||
self.progress_percentage = (healed as f64 / total as f64) * 100.0;
|
||||
}
|
||||
}
|
||||
|
||||
pub fn set_current_object(&mut self, object: Option<String>) {
|
||||
self.current_object = object;
|
||||
self.last_update_time = Some(SystemTime::now());
|
||||
}
|
||||
|
||||
pub fn is_completed(&self) -> bool {
|
||||
self.progress_percentage >= 100.0
|
||||
|| self.objects_scanned > 0 && self.objects_healed + self.objects_failed >= self.objects_scanned
|
||||
}
|
||||
|
||||
pub fn get_success_rate(&self) -> f64 {
|
||||
let total = self.objects_healed + self.objects_failed;
|
||||
if total > 0 {
|
||||
(self.objects_healed as f64 / total as f64) * 100.0
|
||||
} else {
|
||||
0.0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct HealStatistics {
|
||||
/// Total heal tasks
|
||||
pub total_tasks: u64,
|
||||
/// Successful tasks
|
||||
pub successful_tasks: u64,
|
||||
/// Failed tasks
|
||||
pub failed_tasks: u64,
|
||||
/// Running tasks
|
||||
pub running_tasks: u64,
|
||||
/// Total healed objects
|
||||
pub total_objects_healed: u64,
|
||||
/// Total healed bytes
|
||||
pub total_bytes_healed: u64,
|
||||
/// Last update time
|
||||
pub last_update_time: SystemTime,
|
||||
}
|
||||
|
||||
impl Default for HealStatistics {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
impl HealStatistics {
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
total_tasks: 0,
|
||||
successful_tasks: 0,
|
||||
failed_tasks: 0,
|
||||
running_tasks: 0,
|
||||
total_objects_healed: 0,
|
||||
total_bytes_healed: 0,
|
||||
last_update_time: SystemTime::now(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update_task_completion(&mut self, success: bool) {
|
||||
if success {
|
||||
self.successful_tasks += 1;
|
||||
} else {
|
||||
self.failed_tasks += 1;
|
||||
}
|
||||
self.last_update_time = SystemTime::now();
|
||||
}
|
||||
|
||||
pub fn update_running_tasks(&mut self, count: u64) {
|
||||
self.running_tasks = count;
|
||||
self.last_update_time = SystemTime::now();
|
||||
}
|
||||
|
||||
pub fn add_healed_objects(&mut self, count: u64, bytes: u64) {
|
||||
self.total_objects_healed += count;
|
||||
self.total_bytes_healed += bytes;
|
||||
self.last_update_time = SystemTime::now();
|
||||
}
|
||||
|
||||
pub fn get_success_rate(&self) -> f64 {
|
||||
let total = self.successful_tasks + self.failed_tasks;
|
||||
if total > 0 {
|
||||
(self.successful_tasks as f64 / total as f64) * 100.0
|
||||
} else {
|
||||
0.0
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_heal_progress_new() {
|
||||
let progress = HealProgress::new();
|
||||
assert_eq!(progress.objects_scanned, 0);
|
||||
assert_eq!(progress.objects_healed, 0);
|
||||
assert_eq!(progress.objects_failed, 0);
|
||||
assert_eq!(progress.bytes_processed, 0);
|
||||
assert_eq!(progress.progress_percentage, 0.0);
|
||||
assert!(progress.start_time.is_some());
|
||||
assert!(progress.last_update_time.is_some());
|
||||
assert!(progress.current_object.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_progress_update_progress() {
|
||||
let mut progress = HealProgress::new();
|
||||
progress.update_progress(10, 8, 2, 1024);
|
||||
|
||||
assert_eq!(progress.objects_scanned, 10);
|
||||
assert_eq!(progress.objects_healed, 8);
|
||||
assert_eq!(progress.objects_failed, 2);
|
||||
assert_eq!(progress.bytes_processed, 1024);
|
||||
// Progress percentage should be calculated based on healed/total
|
||||
// total = scanned + healed + failed = 10 + 8 + 2 = 20
|
||||
// healed/total = 8/20 = 0.4 = 40%
|
||||
assert!((progress.progress_percentage - 40.0).abs() < 0.001);
|
||||
assert!(progress.last_update_time.is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_progress_update_progress_zero_total() {
|
||||
let mut progress = HealProgress::new();
|
||||
progress.update_progress(0, 0, 0, 0);
|
||||
|
||||
assert_eq!(progress.progress_percentage, 0.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_progress_update_progress_all_healed() {
|
||||
let mut progress = HealProgress::new();
|
||||
// When scanned=0, healed=10, failed=0: total=10, progress = 10/10 = 100%
|
||||
progress.update_progress(0, 10, 0, 2048);
|
||||
|
||||
// All healed, should be 100%
|
||||
assert!((progress.progress_percentage - 100.0).abs() < 0.001);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_progress_set_current_object() {
|
||||
let mut progress = HealProgress::new();
|
||||
let initial_time = progress.last_update_time;
|
||||
|
||||
// Small delay to ensure time difference
|
||||
std::thread::sleep(std::time::Duration::from_millis(10));
|
||||
|
||||
progress.set_current_object(Some("test-bucket/test-object".to_string()));
|
||||
|
||||
assert_eq!(progress.current_object, Some("test-bucket/test-object".to_string()));
|
||||
assert!(progress.last_update_time.is_some());
|
||||
// last_update_time should be updated
|
||||
assert_ne!(progress.last_update_time, initial_time);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_progress_set_current_object_none() {
|
||||
let mut progress = HealProgress::new();
|
||||
progress.set_current_object(Some("test".to_string()));
|
||||
progress.set_current_object(None);
|
||||
|
||||
assert!(progress.current_object.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_progress_is_completed_by_percentage() {
|
||||
let mut progress = HealProgress::new();
|
||||
progress.update_progress(10, 10, 0, 1024);
|
||||
|
||||
assert!(progress.is_completed());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_progress_is_completed_by_processed() {
|
||||
let mut progress = HealProgress::new();
|
||||
progress.objects_scanned = 10;
|
||||
progress.objects_healed = 8;
|
||||
progress.objects_failed = 2;
|
||||
// healed + failed = 8 + 2 = 10 >= scanned = 10
|
||||
assert!(progress.is_completed());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_progress_is_not_completed() {
|
||||
let mut progress = HealProgress::new();
|
||||
progress.objects_scanned = 10;
|
||||
progress.objects_healed = 5;
|
||||
progress.objects_failed = 2;
|
||||
// healed + failed = 5 + 2 = 7 < scanned = 10
|
||||
assert!(!progress.is_completed());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_progress_get_success_rate() {
|
||||
let mut progress = HealProgress::new();
|
||||
progress.objects_healed = 8;
|
||||
progress.objects_failed = 2;
|
||||
|
||||
// success_rate = 8 / (8 + 2) * 100 = 80%
|
||||
assert!((progress.get_success_rate() - 80.0).abs() < 0.001);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_progress_get_success_rate_zero_total() {
|
||||
let progress = HealProgress::new();
|
||||
// No healed or failed objects
|
||||
assert_eq!(progress.get_success_rate(), 0.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_progress_get_success_rate_all_success() {
|
||||
let mut progress = HealProgress::new();
|
||||
progress.objects_healed = 10;
|
||||
progress.objects_failed = 0;
|
||||
|
||||
assert!((progress.get_success_rate() - 100.0).abs() < 0.001);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_statistics_new() {
|
||||
let stats = HealStatistics::new();
|
||||
assert_eq!(stats.total_tasks, 0);
|
||||
assert_eq!(stats.successful_tasks, 0);
|
||||
assert_eq!(stats.failed_tasks, 0);
|
||||
assert_eq!(stats.running_tasks, 0);
|
||||
assert_eq!(stats.total_objects_healed, 0);
|
||||
assert_eq!(stats.total_bytes_healed, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_statistics_default() {
|
||||
let stats = HealStatistics::default();
|
||||
assert_eq!(stats.total_tasks, 0);
|
||||
assert_eq!(stats.successful_tasks, 0);
|
||||
assert_eq!(stats.failed_tasks, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_statistics_update_task_completion_success() {
|
||||
let mut stats = HealStatistics::new();
|
||||
let initial_time = stats.last_update_time;
|
||||
|
||||
std::thread::sleep(std::time::Duration::from_millis(10));
|
||||
stats.update_task_completion(true);
|
||||
|
||||
assert_eq!(stats.successful_tasks, 1);
|
||||
assert_eq!(stats.failed_tasks, 0);
|
||||
assert!(stats.last_update_time > initial_time);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_statistics_update_task_completion_failure() {
|
||||
let mut stats = HealStatistics::new();
|
||||
stats.update_task_completion(false);
|
||||
|
||||
assert_eq!(stats.successful_tasks, 0);
|
||||
assert_eq!(stats.failed_tasks, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_statistics_update_running_tasks() {
|
||||
let mut stats = HealStatistics::new();
|
||||
let initial_time = stats.last_update_time;
|
||||
|
||||
std::thread::sleep(std::time::Duration::from_millis(10));
|
||||
stats.update_running_tasks(5);
|
||||
|
||||
assert_eq!(stats.running_tasks, 5);
|
||||
assert!(stats.last_update_time > initial_time);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_statistics_add_healed_objects() {
|
||||
let mut stats = HealStatistics::new();
|
||||
let initial_time = stats.last_update_time;
|
||||
|
||||
std::thread::sleep(std::time::Duration::from_millis(10));
|
||||
stats.add_healed_objects(10, 10240);
|
||||
|
||||
assert_eq!(stats.total_objects_healed, 10);
|
||||
assert_eq!(stats.total_bytes_healed, 10240);
|
||||
assert!(stats.last_update_time > initial_time);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_statistics_add_healed_objects_accumulative() {
|
||||
let mut stats = HealStatistics::new();
|
||||
stats.add_healed_objects(5, 5120);
|
||||
stats.add_healed_objects(3, 3072);
|
||||
|
||||
assert_eq!(stats.total_objects_healed, 8);
|
||||
assert_eq!(stats.total_bytes_healed, 8192);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_statistics_get_success_rate() {
|
||||
let mut stats = HealStatistics::new();
|
||||
stats.successful_tasks = 8;
|
||||
stats.failed_tasks = 2;
|
||||
|
||||
// success_rate = 8 / (8 + 2) * 100 = 80%
|
||||
assert!((stats.get_success_rate() - 80.0).abs() < 0.001);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_statistics_get_success_rate_zero_total() {
|
||||
let stats = HealStatistics::new();
|
||||
assert_eq!(stats.get_success_rate(), 0.0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_statistics_get_success_rate_all_success() {
|
||||
let mut stats = HealStatistics::new();
|
||||
stats.successful_tasks = 10;
|
||||
stats.failed_tasks = 0;
|
||||
|
||||
assert!((stats.get_success_rate() - 100.0).abs() < 0.001);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_heal_statistics_get_success_rate_all_failure() {
|
||||
let mut stats = HealStatistics::new();
|
||||
stats.successful_tasks = 0;
|
||||
stats.failed_tasks = 5;
|
||||
|
||||
assert_eq!(stats.get_success_rate(), 0.0);
|
||||
}
|
||||
}
|
||||
719
crates/ahm/src/heal/resume.rs
Normal file
@@ -0,0 +1,719 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::{Error, Result};
|
||||
use rustfs_ecstore::disk::{BUCKET_META_PREFIX, DiskAPI, DiskStore, RUSTFS_META_BUCKET};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::path::Path;
|
||||
use std::sync::Arc;
|
||||
use std::time::{SystemTime, UNIX_EPOCH};
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{debug, info, warn};
|
||||
use uuid::Uuid;
|
||||
|
||||
/// resume state file constants
|
||||
const RESUME_STATE_FILE: &str = "ahm_resume_state.json";
|
||||
const RESUME_PROGRESS_FILE: &str = "ahm_progress.json";
|
||||
const RESUME_CHECKPOINT_FILE: &str = "ahm_checkpoint.json";
|
||||
|
||||
/// Helper function to convert Path to &str, returning an error if conversion fails
|
||||
fn path_to_str(path: &Path) -> Result<&str> {
|
||||
path.to_str()
|
||||
.ok_or_else(|| Error::other(format!("Invalid UTF-8 path: {:?}", path)))
|
||||
}
|
||||
|
||||
/// resume state
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ResumeState {
|
||||
/// task id
|
||||
pub task_id: String,
|
||||
/// task type
|
||||
pub task_type: String,
|
||||
/// set disk identifier (for erasure set tasks)
|
||||
#[serde(default)]
|
||||
pub set_disk_id: String,
|
||||
/// start time
|
||||
pub start_time: u64,
|
||||
/// last update time
|
||||
pub last_update: u64,
|
||||
/// completed
|
||||
pub completed: bool,
|
||||
/// total objects
|
||||
pub total_objects: u64,
|
||||
/// processed objects
|
||||
pub processed_objects: u64,
|
||||
/// successful objects
|
||||
pub successful_objects: u64,
|
||||
/// failed objects
|
||||
pub failed_objects: u64,
|
||||
/// skipped objects
|
||||
pub skipped_objects: u64,
|
||||
/// current bucket
|
||||
pub current_bucket: Option<String>,
|
||||
/// current object
|
||||
pub current_object: Option<String>,
|
||||
/// completed buckets
|
||||
pub completed_buckets: Vec<String>,
|
||||
/// pending buckets
|
||||
pub pending_buckets: Vec<String>,
|
||||
/// error message
|
||||
pub error_message: Option<String>,
|
||||
/// retry count
|
||||
pub retry_count: u32,
|
||||
/// max retries
|
||||
pub max_retries: u32,
|
||||
}
|
||||
|
||||
impl ResumeState {
|
||||
pub fn new(task_id: String, task_type: String, set_disk_id: String, buckets: Vec<String>) -> Self {
|
||||
Self {
|
||||
task_id,
|
||||
task_type,
|
||||
set_disk_id,
|
||||
start_time: SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs(),
|
||||
last_update: SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs(),
|
||||
completed: false,
|
||||
total_objects: 0,
|
||||
processed_objects: 0,
|
||||
successful_objects: 0,
|
||||
failed_objects: 0,
|
||||
skipped_objects: 0,
|
||||
current_bucket: None,
|
||||
current_object: None,
|
||||
completed_buckets: Vec::new(),
|
||||
pending_buckets: buckets,
|
||||
error_message: None,
|
||||
retry_count: 0,
|
||||
max_retries: 3,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update_progress(&mut self, processed: u64, successful: u64, failed: u64, skipped: u64) {
|
||||
self.processed_objects = processed;
|
||||
self.successful_objects = successful;
|
||||
self.failed_objects = failed;
|
||||
self.skipped_objects = skipped;
|
||||
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
|
||||
}
|
||||
|
||||
pub fn set_current_item(&mut self, bucket: Option<String>, object: Option<String>) {
|
||||
self.current_bucket = bucket;
|
||||
self.current_object = object;
|
||||
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
|
||||
}
|
||||
|
||||
pub fn complete_bucket(&mut self, bucket: &str) {
|
||||
if !self.completed_buckets.contains(&bucket.to_string()) {
|
||||
self.completed_buckets.push(bucket.to_string());
|
||||
}
|
||||
if let Some(pos) = self.pending_buckets.iter().position(|b| b == bucket) {
|
||||
self.pending_buckets.remove(pos);
|
||||
}
|
||||
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
|
||||
}
|
||||
|
||||
pub fn mark_completed(&mut self) {
|
||||
self.completed = true;
|
||||
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
|
||||
}
|
||||
|
||||
pub fn set_error(&mut self, error: String) {
|
||||
self.error_message = Some(error);
|
||||
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
|
||||
}
|
||||
|
||||
pub fn increment_retry(&mut self) {
|
||||
self.retry_count += 1;
|
||||
self.last_update = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
|
||||
}
|
||||
|
||||
pub fn can_retry(&self) -> bool {
|
||||
self.retry_count < self.max_retries
|
||||
}
|
||||
|
||||
pub fn get_progress_percentage(&self) -> f64 {
|
||||
if self.total_objects == 0 {
|
||||
return 0.0;
|
||||
}
|
||||
(self.processed_objects as f64 / self.total_objects as f64) * 100.0
|
||||
}
|
||||
|
||||
pub fn get_success_rate(&self) -> f64 {
|
||||
let total = self.successful_objects + self.failed_objects;
|
||||
if total == 0 {
|
||||
return 0.0;
|
||||
}
|
||||
(self.successful_objects as f64 / total as f64) * 100.0
|
||||
}
|
||||
}
|
||||
|
||||
/// resume manager
|
||||
pub struct ResumeManager {
|
||||
disk: DiskStore,
|
||||
state: Arc<RwLock<ResumeState>>,
|
||||
}
|
||||
|
||||
impl ResumeManager {
|
||||
/// create new resume manager
|
||||
pub async fn new(
|
||||
disk: DiskStore,
|
||||
task_id: String,
|
||||
task_type: String,
|
||||
set_disk_id: String,
|
||||
buckets: Vec<String>,
|
||||
) -> Result<Self> {
|
||||
let state = ResumeState::new(task_id, task_type, set_disk_id, buckets);
|
||||
let manager = Self {
|
||||
disk,
|
||||
state: Arc::new(RwLock::new(state)),
|
||||
};
|
||||
|
||||
// save initial state
|
||||
manager.save_state().await?;
|
||||
Ok(manager)
|
||||
}
|
||||
|
||||
/// load resume state from disk
|
||||
pub async fn load_from_disk(disk: DiskStore, task_id: &str) -> Result<Self> {
|
||||
let state_data = Self::read_state_file(&disk, task_id).await?;
|
||||
let state: ResumeState = serde_json::from_slice(&state_data).map_err(|e| Error::TaskExecutionFailed {
|
||||
message: format!("Failed to deserialize resume state: {e}"),
|
||||
})?;
|
||||
|
||||
Ok(Self {
|
||||
disk,
|
||||
state: Arc::new(RwLock::new(state)),
|
||||
})
|
||||
}
|
||||
|
||||
/// check if resume state exists
|
||||
pub async fn has_resume_state(disk: &DiskStore, task_id: &str) -> bool {
|
||||
let file_path = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_STATE_FILE}"));
|
||||
match path_to_str(&file_path) {
|
||||
Ok(path_str) => match disk.read_all(RUSTFS_META_BUCKET, path_str).await {
|
||||
Ok(data) => !data.is_empty(),
|
||||
Err(_) => false,
|
||||
},
|
||||
Err(_) => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// get current state
|
||||
pub async fn get_state(&self) -> ResumeState {
|
||||
self.state.read().await.clone()
|
||||
}
|
||||
|
||||
/// update progress
|
||||
pub async fn update_progress(&self, processed: u64, successful: u64, failed: u64, skipped: u64) -> Result<()> {
|
||||
let mut state = self.state.write().await;
|
||||
state.update_progress(processed, successful, failed, skipped);
|
||||
drop(state);
|
||||
self.save_state().await
|
||||
}
|
||||
|
||||
/// set current item
|
||||
pub async fn set_current_item(&self, bucket: Option<String>, object: Option<String>) -> Result<()> {
|
||||
let mut state = self.state.write().await;
|
||||
state.set_current_item(bucket, object);
|
||||
drop(state);
|
||||
self.save_state().await
|
||||
}
|
||||
|
||||
/// complete bucket
|
||||
pub async fn complete_bucket(&self, bucket: &str) -> Result<()> {
|
||||
let mut state = self.state.write().await;
|
||||
state.complete_bucket(bucket);
|
||||
drop(state);
|
||||
self.save_state().await
|
||||
}
|
||||
|
||||
/// mark task completed
|
||||
pub async fn mark_completed(&self) -> Result<()> {
|
||||
let mut state = self.state.write().await;
|
||||
state.mark_completed();
|
||||
drop(state);
|
||||
self.save_state().await
|
||||
}
|
||||
|
||||
/// set error message
|
||||
pub async fn set_error(&self, error: String) -> Result<()> {
|
||||
let mut state = self.state.write().await;
|
||||
state.set_error(error);
|
||||
drop(state);
|
||||
self.save_state().await
|
||||
}
|
||||
|
||||
/// increment retry count
|
||||
pub async fn increment_retry(&self) -> Result<()> {
|
||||
let mut state = self.state.write().await;
|
||||
state.increment_retry();
|
||||
drop(state);
|
||||
self.save_state().await
|
||||
}
|
||||
|
||||
/// cleanup resume state
|
||||
pub async fn cleanup(&self) -> Result<()> {
|
||||
let state = self.state.read().await;
|
||||
let task_id = &state.task_id;
|
||||
|
||||
// delete state files
|
||||
let state_file = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_STATE_FILE}"));
|
||||
let progress_file = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_PROGRESS_FILE}"));
|
||||
let checkpoint_file = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_CHECKPOINT_FILE}"));
|
||||
|
||||
// ignore delete errors, files may not exist
|
||||
if let Ok(path_str) = path_to_str(&state_file) {
|
||||
let _ = self.disk.delete(RUSTFS_META_BUCKET, path_str, Default::default()).await;
|
||||
}
|
||||
if let Ok(path_str) = path_to_str(&progress_file) {
|
||||
let _ = self.disk.delete(RUSTFS_META_BUCKET, path_str, Default::default()).await;
|
||||
}
|
||||
if let Ok(path_str) = path_to_str(&checkpoint_file) {
|
||||
let _ = self.disk.delete(RUSTFS_META_BUCKET, path_str, Default::default()).await;
|
||||
}
|
||||
|
||||
info!("Cleaned up resume state for task: {}", task_id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// save state to disk
|
||||
async fn save_state(&self) -> Result<()> {
|
||||
let state = self.state.read().await;
|
||||
let state_data = serde_json::to_vec(&*state).map_err(|e| Error::TaskExecutionFailed {
|
||||
message: format!("Failed to serialize resume state: {e}"),
|
||||
})?;
|
||||
|
||||
let file_path = Path::new(BUCKET_META_PREFIX).join(format!("{}_{}", state.task_id, RESUME_STATE_FILE));
|
||||
|
||||
let path_str = path_to_str(&file_path)?;
|
||||
self.disk
|
||||
.write_all(RUSTFS_META_BUCKET, path_str, state_data.into())
|
||||
.await
|
||||
.map_err(|e| Error::TaskExecutionFailed {
|
||||
message: format!("Failed to save resume state: {e}"),
|
||||
})?;
|
||||
|
||||
debug!("Saved resume state for task: {}", state.task_id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// read state file from disk
|
||||
async fn read_state_file(disk: &DiskStore, task_id: &str) -> Result<Vec<u8>> {
|
||||
let file_path = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_STATE_FILE}"));
|
||||
|
||||
let path_str = path_to_str(&file_path)?;
|
||||
disk.read_all(RUSTFS_META_BUCKET, path_str)
|
||||
.await
|
||||
.map(|bytes| bytes.to_vec())
|
||||
.map_err(|e| Error::TaskExecutionFailed {
|
||||
message: format!("Failed to read resume state file: {e}"),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// resume checkpoint
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct ResumeCheckpoint {
|
||||
/// task id
|
||||
pub task_id: String,
|
||||
/// checkpoint time
|
||||
pub checkpoint_time: u64,
|
||||
/// current bucket index
|
||||
pub current_bucket_index: usize,
|
||||
/// current object index
|
||||
pub current_object_index: usize,
|
||||
/// processed objects
|
||||
pub processed_objects: Vec<String>,
|
||||
/// failed objects
|
||||
pub failed_objects: Vec<String>,
|
||||
/// skipped objects
|
||||
pub skipped_objects: Vec<String>,
|
||||
}
|
||||
|
||||
impl ResumeCheckpoint {
|
||||
pub fn new(task_id: String) -> Self {
|
||||
Self {
|
||||
task_id,
|
||||
checkpoint_time: SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs(),
|
||||
current_bucket_index: 0,
|
||||
current_object_index: 0,
|
||||
processed_objects: Vec::new(),
|
||||
failed_objects: Vec::new(),
|
||||
skipped_objects: Vec::new(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn update_position(&mut self, bucket_index: usize, object_index: usize) {
|
||||
self.current_bucket_index = bucket_index;
|
||||
self.current_object_index = object_index;
|
||||
self.checkpoint_time = SystemTime::now().duration_since(UNIX_EPOCH).unwrap_or_default().as_secs();
|
||||
}
|
||||
|
||||
pub fn add_processed_object(&mut self, object: String) {
|
||||
if !self.processed_objects.contains(&object) {
|
||||
self.processed_objects.push(object);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_failed_object(&mut self, object: String) {
|
||||
if !self.failed_objects.contains(&object) {
|
||||
self.failed_objects.push(object);
|
||||
}
|
||||
}
|
||||
|
||||
pub fn add_skipped_object(&mut self, object: String) {
|
||||
if !self.skipped_objects.contains(&object) {
|
||||
self.skipped_objects.push(object);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// resume checkpoint manager
|
||||
pub struct CheckpointManager {
|
||||
disk: DiskStore,
|
||||
checkpoint: Arc<RwLock<ResumeCheckpoint>>,
|
||||
}
|
||||
|
||||
impl CheckpointManager {
|
||||
/// create new checkpoint manager
|
||||
pub async fn new(disk: DiskStore, task_id: String) -> Result<Self> {
|
||||
let checkpoint = ResumeCheckpoint::new(task_id);
|
||||
let manager = Self {
|
||||
disk,
|
||||
checkpoint: Arc::new(RwLock::new(checkpoint)),
|
||||
};
|
||||
|
||||
// save initial checkpoint
|
||||
manager.save_checkpoint().await?;
|
||||
Ok(manager)
|
||||
}
|
||||
|
||||
/// load checkpoint from disk
|
||||
pub async fn load_from_disk(disk: DiskStore, task_id: &str) -> Result<Self> {
|
||||
let checkpoint_data = Self::read_checkpoint_file(&disk, task_id).await?;
|
||||
let checkpoint: ResumeCheckpoint = serde_json::from_slice(&checkpoint_data).map_err(|e| Error::TaskExecutionFailed {
|
||||
message: format!("Failed to deserialize checkpoint: {e}"),
|
||||
})?;
|
||||
|
||||
Ok(Self {
|
||||
disk,
|
||||
checkpoint: Arc::new(RwLock::new(checkpoint)),
|
||||
})
|
||||
}
|
||||
|
||||
/// check if checkpoint exists
|
||||
pub async fn has_checkpoint(disk: &DiskStore, task_id: &str) -> bool {
|
||||
let file_path = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_CHECKPOINT_FILE}"));
|
||||
match path_to_str(&file_path) {
|
||||
Ok(path_str) => match disk.read_all(RUSTFS_META_BUCKET, path_str).await {
|
||||
Ok(data) => !data.is_empty(),
|
||||
Err(_) => false,
|
||||
},
|
||||
Err(_) => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// get current checkpoint
|
||||
pub async fn get_checkpoint(&self) -> ResumeCheckpoint {
|
||||
self.checkpoint.read().await.clone()
|
||||
}
|
||||
|
||||
/// update position
|
||||
pub async fn update_position(&self, bucket_index: usize, object_index: usize) -> Result<()> {
|
||||
let mut checkpoint = self.checkpoint.write().await;
|
||||
checkpoint.update_position(bucket_index, object_index);
|
||||
drop(checkpoint);
|
||||
self.save_checkpoint().await
|
||||
}
|
||||
|
||||
/// add processed object
|
||||
pub async fn add_processed_object(&self, object: String) -> Result<()> {
|
||||
let mut checkpoint = self.checkpoint.write().await;
|
||||
checkpoint.add_processed_object(object);
|
||||
drop(checkpoint);
|
||||
self.save_checkpoint().await
|
||||
}
|
||||
|
||||
/// add failed object
|
||||
pub async fn add_failed_object(&self, object: String) -> Result<()> {
|
||||
let mut checkpoint = self.checkpoint.write().await;
|
||||
checkpoint.add_failed_object(object);
|
||||
drop(checkpoint);
|
||||
self.save_checkpoint().await
|
||||
}
|
||||
|
||||
/// add skipped object
|
||||
pub async fn add_skipped_object(&self, object: String) -> Result<()> {
|
||||
let mut checkpoint = self.checkpoint.write().await;
|
||||
checkpoint.add_skipped_object(object);
|
||||
drop(checkpoint);
|
||||
self.save_checkpoint().await
|
||||
}
|
||||
|
||||
/// cleanup checkpoint
|
||||
pub async fn cleanup(&self) -> Result<()> {
|
||||
let checkpoint = self.checkpoint.read().await;
|
||||
let task_id = &checkpoint.task_id;
|
||||
|
||||
let checkpoint_file = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_CHECKPOINT_FILE}"));
|
||||
if let Ok(path_str) = path_to_str(&checkpoint_file) {
|
||||
let _ = self.disk.delete(RUSTFS_META_BUCKET, path_str, Default::default()).await;
|
||||
}
|
||||
|
||||
info!("Cleaned up checkpoint for task: {}", task_id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// save checkpoint to disk
|
||||
async fn save_checkpoint(&self) -> Result<()> {
|
||||
let checkpoint = self.checkpoint.read().await;
|
||||
let checkpoint_data = serde_json::to_vec(&*checkpoint).map_err(|e| Error::TaskExecutionFailed {
|
||||
message: format!("Failed to serialize checkpoint: {e}"),
|
||||
})?;
|
||||
|
||||
let file_path = Path::new(BUCKET_META_PREFIX).join(format!("{}_{}", checkpoint.task_id, RESUME_CHECKPOINT_FILE));
|
||||
|
||||
let path_str = path_to_str(&file_path)?;
|
||||
self.disk
|
||||
.write_all(RUSTFS_META_BUCKET, path_str, checkpoint_data.into())
|
||||
.await
|
||||
.map_err(|e| Error::TaskExecutionFailed {
|
||||
message: format!("Failed to save checkpoint: {e}"),
|
||||
})?;
|
||||
|
||||
debug!("Saved checkpoint for task: {}", checkpoint.task_id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// read checkpoint file from disk
|
||||
async fn read_checkpoint_file(disk: &DiskStore, task_id: &str) -> Result<Vec<u8>> {
|
||||
let file_path = Path::new(BUCKET_META_PREFIX).join(format!("{task_id}_{RESUME_CHECKPOINT_FILE}"));
|
||||
|
||||
let path_str = path_to_str(&file_path)?;
|
||||
disk.read_all(RUSTFS_META_BUCKET, path_str)
|
||||
.await
|
||||
.map(|bytes| bytes.to_vec())
|
||||
.map_err(|e| Error::TaskExecutionFailed {
|
||||
message: format!("Failed to read checkpoint file: {e}"),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
/// resume utils
|
||||
pub struct ResumeUtils;
|
||||
|
||||
impl ResumeUtils {
|
||||
/// generate unique task id
|
||||
pub fn generate_task_id() -> String {
|
||||
Uuid::new_v4().to_string()
|
||||
}
|
||||
|
||||
/// check if task can be resumed
|
||||
pub async fn can_resume_task(disk: &DiskStore, task_id: &str) -> bool {
|
||||
ResumeManager::has_resume_state(disk, task_id).await
|
||||
}
|
||||
|
||||
/// get all resumable task ids
|
||||
pub async fn get_resumable_tasks(disk: &DiskStore) -> Result<Vec<String>> {
|
||||
// List all files in the buckets metadata directory
|
||||
let entries = match disk.list_dir("", RUSTFS_META_BUCKET, BUCKET_META_PREFIX, -1).await {
|
||||
Ok(entries) => entries,
|
||||
Err(e) => {
|
||||
debug!("Failed to list resume state files: {}", e);
|
||||
return Ok(Vec::new());
|
||||
}
|
||||
};
|
||||
|
||||
let mut task_ids = Vec::new();
|
||||
|
||||
// Filter files that end with ahm_resume_state.json and extract task IDs
|
||||
for entry in entries {
|
||||
if entry.ends_with(&format!("_{RESUME_STATE_FILE}")) {
|
||||
// Extract task ID from filename: {task_id}_ahm_resume_state.json
|
||||
if let Some(task_id) = entry.strip_suffix(&format!("_{RESUME_STATE_FILE}")) {
|
||||
if !task_id.is_empty() {
|
||||
task_ids.push(task_id.to_string());
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
debug!("Found {} resumable tasks: {:?}", task_ids.len(), task_ids);
|
||||
Ok(task_ids)
|
||||
}
|
||||
|
||||
/// cleanup expired resume states
|
||||
pub async fn cleanup_expired_states(disk: &DiskStore, max_age_hours: u64) -> Result<()> {
|
||||
let task_ids = Self::get_resumable_tasks(disk).await?;
|
||||
let current_time = SystemTime::now().duration_since(UNIX_EPOCH).unwrap().as_secs();
|
||||
|
||||
for task_id in task_ids {
|
||||
if let Ok(resume_manager) = ResumeManager::load_from_disk(disk.clone(), &task_id).await {
|
||||
let state = resume_manager.get_state().await;
|
||||
let age_hours = (current_time - state.last_update) / 3600;
|
||||
|
||||
if age_hours > max_age_hours {
|
||||
info!("Cleaning up expired resume state for task: {} (age: {} hours)", task_id, age_hours);
|
||||
if let Err(e) = resume_manager.cleanup().await {
|
||||
warn!("Failed to cleanup expired resume state for task {}: {}", task_id, e);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_resume_state_creation() {
|
||||
let task_id = ResumeUtils::generate_task_id();
|
||||
let buckets = vec!["bucket1".to_string(), "bucket2".to_string()];
|
||||
let state = ResumeState::new(task_id.clone(), "erasure_set".to_string(), "pool_0_set_0".to_string(), buckets);
|
||||
|
||||
assert_eq!(state.task_id, task_id);
|
||||
assert_eq!(state.task_type, "erasure_set");
|
||||
assert!(!state.completed);
|
||||
assert_eq!(state.processed_objects, 0);
|
||||
assert_eq!(state.pending_buckets.len(), 2);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_resume_state_progress() {
|
||||
let task_id = ResumeUtils::generate_task_id();
|
||||
let buckets = vec!["bucket1".to_string()];
|
||||
let mut state = ResumeState::new(task_id, "erasure_set".to_string(), "pool_0_set_0".to_string(), buckets);
|
||||
|
||||
state.update_progress(10, 8, 1, 1);
|
||||
assert_eq!(state.processed_objects, 10);
|
||||
assert_eq!(state.successful_objects, 8);
|
||||
assert_eq!(state.failed_objects, 1);
|
||||
assert_eq!(state.skipped_objects, 1);
|
||||
|
||||
let progress = state.get_progress_percentage();
|
||||
assert_eq!(progress, 0.0); // total_objects is 0
|
||||
|
||||
state.total_objects = 100;
|
||||
let progress = state.get_progress_percentage();
|
||||
assert_eq!(progress, 10.0);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_resume_state_bucket_completion() {
|
||||
let task_id = ResumeUtils::generate_task_id();
|
||||
let buckets = vec!["bucket1".to_string(), "bucket2".to_string()];
|
||||
let mut state = ResumeState::new(task_id, "erasure_set".to_string(), "pool_0_set_0".to_string(), buckets);
|
||||
|
||||
assert_eq!(state.pending_buckets.len(), 2);
|
||||
assert_eq!(state.completed_buckets.len(), 0);
|
||||
|
||||
state.complete_bucket("bucket1");
|
||||
assert_eq!(state.pending_buckets.len(), 1);
|
||||
assert_eq!(state.completed_buckets.len(), 1);
|
||||
assert!(state.completed_buckets.contains(&"bucket1".to_string()));
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_resume_utils() {
|
||||
let task_id1 = ResumeUtils::generate_task_id();
|
||||
let task_id2 = ResumeUtils::generate_task_id();
|
||||
|
||||
assert_ne!(task_id1, task_id2);
|
||||
assert_eq!(task_id1.len(), 36); // UUID length
|
||||
assert_eq!(task_id2.len(), 36);
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_resumable_tasks_integration() {
|
||||
use rustfs_ecstore::disk::{DiskOption, endpoint::Endpoint, new_disk};
|
||||
use tempfile::TempDir;
|
||||
|
||||
// Create a temporary directory for testing
|
||||
let temp_dir = TempDir::new().unwrap();
|
||||
let disk_path = temp_dir.path().join("test_disk");
|
||||
std::fs::create_dir_all(&disk_path).unwrap();
|
||||
|
||||
// Create a local disk for testing
|
||||
let endpoint = Endpoint::try_from(disk_path.to_string_lossy().as_ref()).unwrap();
|
||||
let disk_option = DiskOption {
|
||||
cleanup: false,
|
||||
health_check: false,
|
||||
};
|
||||
let disk = new_disk(&endpoint, &disk_option).await.unwrap();
|
||||
|
||||
// Create necessary directories first (ignore if already exist)
|
||||
let _ = disk.make_volume(RUSTFS_META_BUCKET).await;
|
||||
let _ = disk.make_volume(&format!("{RUSTFS_META_BUCKET}/{BUCKET_META_PREFIX}")).await;
|
||||
|
||||
// Create some test resume state files
|
||||
let task_ids = vec![
|
||||
"test-task-1".to_string(),
|
||||
"test-task-2".to_string(),
|
||||
"test-task-3".to_string(),
|
||||
];
|
||||
|
||||
// Save resume state files for each task
|
||||
for task_id in &task_ids {
|
||||
let state = ResumeState::new(
|
||||
task_id.clone(),
|
||||
"erasure_set".to_string(),
|
||||
"pool_0_set_0".to_string(),
|
||||
vec!["bucket1".to_string(), "bucket2".to_string()],
|
||||
);
|
||||
|
||||
let state_data = serde_json::to_vec(&state).unwrap();
|
||||
let file_path = format!("{BUCKET_META_PREFIX}/{task_id}_{RESUME_STATE_FILE}");
|
||||
|
||||
disk.write_all(RUSTFS_META_BUCKET, &file_path, state_data.into())
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
// Also create some non-resume state files to test filtering
|
||||
let non_resume_files = vec![
|
||||
"other_file.txt",
|
||||
"task4_ahm_checkpoint.json",
|
||||
"task5_ahm_progress.json",
|
||||
"_ahm_resume_state.json", // Invalid: empty task ID
|
||||
];
|
||||
|
||||
for file_name in non_resume_files {
|
||||
let file_path = format!("{BUCKET_META_PREFIX}/{file_name}");
|
||||
disk.write_all(RUSTFS_META_BUCKET, &file_path, b"test data".to_vec().into())
|
||||
.await
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
// Now call get_resumable_tasks to see if it finds the correct files
|
||||
let found_task_ids = ResumeUtils::get_resumable_tasks(&disk).await.unwrap();
|
||||
|
||||
// Verify that only the valid resume state files are found
|
||||
assert_eq!(found_task_ids.len(), 3);
|
||||
for task_id in &task_ids {
|
||||
assert!(found_task_ids.contains(task_id), "Task ID {task_id} not found");
|
||||
}
|
||||
|
||||
// Verify that invalid files are not included
|
||||
assert!(!found_task_ids.contains(&"".to_string()));
|
||||
assert!(!found_task_ids.contains(&"task4".to_string()));
|
||||
assert!(!found_task_ids.contains(&"task5".to_string()));
|
||||
|
||||
// Clean up
|
||||
temp_dir.close().unwrap();
|
||||
}
|
||||
}
|
||||
542
crates/ahm/src/heal/storage.rs
Normal file
@@ -0,0 +1,542 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::{Error, Result};
|
||||
use async_trait::async_trait;
|
||||
use rustfs_common::heal_channel::{HealOpts, HealScanMode};
|
||||
use rustfs_ecstore::{
|
||||
disk::{DiskStore, endpoint::Endpoint},
|
||||
store::ECStore,
|
||||
store_api::{BucketInfo, ObjectIO, StorageAPI},
|
||||
};
|
||||
use rustfs_madmin::heal_commands::HealResultItem;
|
||||
use std::sync::Arc;
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
/// Disk status for heal operations
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum DiskStatus {
|
||||
/// Ok
|
||||
Ok,
|
||||
/// Offline
|
||||
Offline,
|
||||
/// Corrupt
|
||||
Corrupt,
|
||||
/// Missing
|
||||
Missing,
|
||||
/// Permission denied
|
||||
PermissionDenied,
|
||||
/// Faulty
|
||||
Faulty,
|
||||
/// Root mount
|
||||
RootMount,
|
||||
/// Unknown
|
||||
Unknown,
|
||||
/// Unformatted
|
||||
Unformatted,
|
||||
}
|
||||
|
||||
/// Heal storage layer interface
|
||||
#[async_trait]
|
||||
pub trait HealStorageAPI: Send + Sync {
|
||||
/// Get object meta
|
||||
async fn get_object_meta(&self, bucket: &str, object: &str) -> Result<Option<rustfs_ecstore::store_api::ObjectInfo>>;
|
||||
|
||||
/// Get object data
|
||||
async fn get_object_data(&self, bucket: &str, object: &str) -> Result<Option<Vec<u8>>>;
|
||||
|
||||
/// Put object data
|
||||
async fn put_object_data(&self, bucket: &str, object: &str, data: &[u8]) -> Result<()>;
|
||||
|
||||
/// Delete object
|
||||
async fn delete_object(&self, bucket: &str, object: &str) -> Result<()>;
|
||||
|
||||
/// Check object integrity
|
||||
async fn verify_object_integrity(&self, bucket: &str, object: &str) -> Result<bool>;
|
||||
|
||||
/// EC decode rebuild
|
||||
async fn ec_decode_rebuild(&self, bucket: &str, object: &str) -> Result<Vec<u8>>;
|
||||
|
||||
/// Get disk status
|
||||
async fn get_disk_status(&self, endpoint: &Endpoint) -> Result<DiskStatus>;
|
||||
|
||||
/// Format disk
|
||||
async fn format_disk(&self, endpoint: &Endpoint) -> Result<()>;
|
||||
|
||||
/// Get bucket info
|
||||
async fn get_bucket_info(&self, bucket: &str) -> Result<Option<BucketInfo>>;
|
||||
|
||||
/// Fix bucket metadata
|
||||
async fn heal_bucket_metadata(&self, bucket: &str) -> Result<()>;
|
||||
|
||||
/// Get all buckets
|
||||
async fn list_buckets(&self) -> Result<Vec<BucketInfo>>;
|
||||
|
||||
/// Check object exists
|
||||
async fn object_exists(&self, bucket: &str, object: &str) -> Result<bool>;
|
||||
|
||||
/// Get object size
|
||||
async fn get_object_size(&self, bucket: &str, object: &str) -> Result<Option<u64>>;
|
||||
|
||||
/// Get object checksum
|
||||
async fn get_object_checksum(&self, bucket: &str, object: &str) -> Result<Option<String>>;
|
||||
|
||||
/// Heal object using ecstore
|
||||
async fn heal_object(
|
||||
&self,
|
||||
bucket: &str,
|
||||
object: &str,
|
||||
version_id: Option<&str>,
|
||||
opts: &HealOpts,
|
||||
) -> Result<(HealResultItem, Option<Error>)>;
|
||||
|
||||
/// Heal bucket using ecstore
|
||||
async fn heal_bucket(&self, bucket: &str, opts: &HealOpts) -> Result<HealResultItem>;
|
||||
|
||||
/// Heal format using ecstore
|
||||
async fn heal_format(&self, dry_run: bool) -> Result<(HealResultItem, Option<Error>)>;
|
||||
|
||||
/// List objects for healing
|
||||
async fn list_objects_for_heal(&self, bucket: &str, prefix: &str) -> Result<Vec<String>>;
|
||||
|
||||
/// Get disk for resume functionality
|
||||
async fn get_disk_for_resume(&self, set_disk_id: &str) -> Result<DiskStore>;
|
||||
}
|
||||
|
||||
/// ECStore Heal storage layer implementation
|
||||
pub struct ECStoreHealStorage {
|
||||
ecstore: Arc<ECStore>,
|
||||
}
|
||||
|
||||
impl ECStoreHealStorage {
|
||||
pub fn new(ecstore: Arc<ECStore>) -> Self {
|
||||
Self { ecstore }
|
||||
}
|
||||
}
|
||||
|
||||
#[async_trait]
|
||||
impl HealStorageAPI for ECStoreHealStorage {
|
||||
async fn get_object_meta(&self, bucket: &str, object: &str) -> Result<Option<rustfs_ecstore::store_api::ObjectInfo>> {
|
||||
debug!("Getting object meta: {}/{}", bucket, object);
|
||||
|
||||
match self.ecstore.get_object_info(bucket, object, &Default::default()).await {
|
||||
Ok(info) => Ok(Some(info)),
|
||||
Err(e) => {
|
||||
// Map ObjectNotFound to None to align with Option return type
|
||||
if matches!(e, rustfs_ecstore::error::StorageError::ObjectNotFound(_, _)) {
|
||||
debug!("Object meta not found: {}/{}", bucket, object);
|
||||
Ok(None)
|
||||
} else {
|
||||
error!("Failed to get object meta: {}/{} - {}", bucket, object, e);
|
||||
Err(Error::other(e))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_object_data(&self, bucket: &str, object: &str) -> Result<Option<Vec<u8>>> {
|
||||
debug!("Getting object data: {}/{}", bucket, object);
|
||||
|
||||
let reader = match (*self.ecstore)
|
||||
.get_object_reader(bucket, object, None, Default::default(), &Default::default())
|
||||
.await
|
||||
{
|
||||
Ok(reader) => reader,
|
||||
Err(e) => {
|
||||
error!("Failed to get object: {}/{} - {}", bucket, object, e);
|
||||
return Err(Error::other(e));
|
||||
}
|
||||
};
|
||||
|
||||
// WARNING: Returning Vec<u8> for large objects is dangerous. To avoid OOM, cap the read size.
|
||||
// If needed, refactor callers to stream instead of buffering entire object.
|
||||
const MAX_READ_BYTES: usize = 16 * 1024 * 1024; // 16 MiB cap
|
||||
let mut buf = Vec::with_capacity(1024 * 1024);
|
||||
use tokio::io::AsyncReadExt as _;
|
||||
let mut n_read: usize = 0;
|
||||
let mut stream = reader.stream;
|
||||
loop {
|
||||
// Read in chunks
|
||||
let mut chunk = vec![0u8; 1024 * 1024];
|
||||
match stream.read(&mut chunk).await {
|
||||
Ok(0) => break,
|
||||
Ok(n) => {
|
||||
buf.extend_from_slice(&chunk[..n]);
|
||||
n_read += n;
|
||||
if n_read > MAX_READ_BYTES {
|
||||
warn!(
|
||||
"Object data exceeds cap ({} bytes), aborting full read to prevent OOM: {}/{}",
|
||||
MAX_READ_BYTES, bucket, object
|
||||
);
|
||||
return Err(Error::other(format!(
|
||||
"Object too large: {} bytes (max: {} bytes) for {}/{}",
|
||||
n_read, MAX_READ_BYTES, bucket, object
|
||||
)));
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to read object data: {}/{} - {}", bucket, object, e);
|
||||
return Err(Error::other(e));
|
||||
}
|
||||
}
|
||||
}
|
||||
Ok(Some(buf))
|
||||
}
|
||||
|
||||
async fn put_object_data(&self, bucket: &str, object: &str, data: &[u8]) -> Result<()> {
|
||||
debug!("Putting object data: {}/{} ({} bytes)", bucket, object, data.len());
|
||||
|
||||
let mut reader = rustfs_ecstore::store_api::PutObjReader::from_vec(data.to_vec());
|
||||
match (*self.ecstore)
|
||||
.put_object(bucket, object, &mut reader, &Default::default())
|
||||
.await
|
||||
{
|
||||
Ok(_) => {
|
||||
info!("Successfully put object: {}/{}", bucket, object);
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to put object: {}/{} - {}", bucket, object, e);
|
||||
Err(Error::other(e))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn delete_object(&self, bucket: &str, object: &str) -> Result<()> {
|
||||
debug!("Deleting object: {}/{}", bucket, object);
|
||||
|
||||
match self.ecstore.delete_object(bucket, object, Default::default()).await {
|
||||
Ok(_) => {
|
||||
info!("Successfully deleted object: {}/{}", bucket, object);
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to delete object: {}/{} - {}", bucket, object, e);
|
||||
Err(Error::other(e))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn verify_object_integrity(&self, bucket: &str, object: &str) -> Result<bool> {
|
||||
debug!("Verifying object integrity: {}/{}", bucket, object);
|
||||
|
||||
// Check object metadata first
|
||||
match self.get_object_meta(bucket, object).await? {
|
||||
Some(obj_info) => {
|
||||
if obj_info.size < 0 {
|
||||
warn!("Object has invalid size: {}/{}", bucket, object);
|
||||
return Ok(false);
|
||||
}
|
||||
|
||||
// Stream-read the object to a sink to avoid loading into memory
|
||||
match (*self.ecstore)
|
||||
.get_object_reader(bucket, object, None, Default::default(), &Default::default())
|
||||
.await
|
||||
{
|
||||
Ok(reader) => {
|
||||
let mut stream = reader.stream;
|
||||
match tokio::io::copy(&mut stream, &mut tokio::io::sink()).await {
|
||||
Ok(_) => {
|
||||
info!("Object integrity check passed: {}/{}", bucket, object);
|
||||
Ok(true)
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Object stream read failed: {}/{} - {}", bucket, object, e);
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("Failed to get object reader: {}/{} - {}", bucket, object, e);
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
}
|
||||
None => {
|
||||
warn!("Object metadata not found: {}/{}", bucket, object);
|
||||
Ok(false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn ec_decode_rebuild(&self, bucket: &str, object: &str) -> Result<Vec<u8>> {
|
||||
debug!("EC decode rebuild: {}/{}", bucket, object);
|
||||
|
||||
// Use ecstore's heal_object to rebuild the object
|
||||
let heal_opts = HealOpts {
|
||||
recursive: false,
|
||||
dry_run: false,
|
||||
remove: false,
|
||||
recreate: true,
|
||||
scan_mode: HealScanMode::Deep,
|
||||
update_parity: true,
|
||||
no_lock: false,
|
||||
pool: None,
|
||||
set: None,
|
||||
};
|
||||
|
||||
match self.heal_object(bucket, object, None, &heal_opts).await {
|
||||
Ok((_result, error)) => {
|
||||
if error.is_some() {
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Heal failed: {error:?}"),
|
||||
});
|
||||
}
|
||||
|
||||
// After healing, try to read the object data
|
||||
match self.get_object_data(bucket, object).await? {
|
||||
Some(data) => {
|
||||
info!("EC decode rebuild successful: {}/{} ({} bytes)", bucket, object, data.len());
|
||||
Ok(data)
|
||||
}
|
||||
None => {
|
||||
error!("Object not found after heal: {}/{}", bucket, object);
|
||||
Err(Error::TaskExecutionFailed {
|
||||
message: format!("Object not found after heal: {bucket}/{object}"),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Heal operation failed: {}/{} - {}", bucket, object, e);
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_disk_status(&self, endpoint: &Endpoint) -> Result<DiskStatus> {
|
||||
debug!("Getting disk status: {:?}", endpoint);
|
||||
|
||||
// TODO: implement disk status check using ecstore
|
||||
// For now, return Ok status
|
||||
info!("Disk status check: {:?} - OK", endpoint);
|
||||
Ok(DiskStatus::Ok)
|
||||
}
|
||||
|
||||
async fn format_disk(&self, endpoint: &Endpoint) -> Result<()> {
|
||||
debug!("Formatting disk: {:?}", endpoint);
|
||||
|
||||
// Use ecstore's heal_format
|
||||
match self.heal_format(false).await {
|
||||
Ok((_, error)) => {
|
||||
if error.is_some() {
|
||||
return Err(Error::other(format!("Format failed: {error:?}")));
|
||||
}
|
||||
info!("Successfully formatted disk: {:?}", endpoint);
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to format disk: {:?} - {}", endpoint, e);
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_bucket_info(&self, bucket: &str) -> Result<Option<BucketInfo>> {
|
||||
debug!("Getting bucket info: {}", bucket);
|
||||
|
||||
match self.ecstore.get_bucket_info(bucket, &Default::default()).await {
|
||||
Ok(info) => Ok(Some(info)),
|
||||
Err(e) => {
|
||||
error!("Failed to get bucket info: {} - {}", bucket, e);
|
||||
Err(Error::other(e))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn heal_bucket_metadata(&self, bucket: &str) -> Result<()> {
|
||||
debug!("Healing bucket metadata: {}", bucket);
|
||||
|
||||
let heal_opts = HealOpts {
|
||||
recursive: true,
|
||||
dry_run: false,
|
||||
remove: false,
|
||||
recreate: false,
|
||||
scan_mode: HealScanMode::Normal,
|
||||
update_parity: false,
|
||||
no_lock: false,
|
||||
pool: None,
|
||||
set: None,
|
||||
};
|
||||
|
||||
match self.heal_bucket(bucket, &heal_opts).await {
|
||||
Ok(_) => {
|
||||
info!("Successfully healed bucket metadata: {}", bucket);
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to heal bucket metadata: {} - {}", bucket, e);
|
||||
Err(e)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn list_buckets(&self) -> Result<Vec<BucketInfo>> {
|
||||
debug!("Listing buckets");
|
||||
|
||||
match self.ecstore.list_bucket(&Default::default()).await {
|
||||
Ok(buckets) => Ok(buckets),
|
||||
Err(e) => {
|
||||
error!("Failed to list buckets: {}", e);
|
||||
Err(Error::other(e))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn object_exists(&self, bucket: &str, object: &str) -> Result<bool> {
|
||||
debug!("Checking object exists: {}/{}", bucket, object);
|
||||
|
||||
// Use get_object_info for efficient existence check without heavy heal operations
|
||||
match self.ecstore.get_object_info(bucket, object, &Default::default()).await {
|
||||
Ok(_) => Ok(true), // Object exists
|
||||
Err(e) => {
|
||||
// Map ObjectNotFound to false, other errors to false as well for safety
|
||||
if matches!(e, rustfs_ecstore::error::StorageError::ObjectNotFound(_, _)) {
|
||||
debug!("Object not found: {}/{}", bucket, object);
|
||||
Ok(false)
|
||||
} else {
|
||||
debug!("Error checking object existence {}/{}: {}", bucket, object, e);
|
||||
Ok(false) // Treat errors as non-existence to be safe
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_object_size(&self, bucket: &str, object: &str) -> Result<Option<u64>> {
|
||||
debug!("Getting object size: {}/{}", bucket, object);
|
||||
|
||||
match self.get_object_meta(bucket, object).await {
|
||||
Ok(Some(obj_info)) => Ok(Some(obj_info.size as u64)),
|
||||
Ok(None) => Ok(None),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_object_checksum(&self, bucket: &str, object: &str) -> Result<Option<String>> {
|
||||
debug!("Getting object checksum: {}/{}", bucket, object);
|
||||
|
||||
match self.get_object_meta(bucket, object).await {
|
||||
Ok(Some(obj_info)) => {
|
||||
// Convert checksum bytes to hex string
|
||||
let checksum = obj_info.checksum.iter().map(|b| format!("{b:02x}")).collect::<String>();
|
||||
Ok(Some(checksum))
|
||||
}
|
||||
Ok(None) => Ok(None),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
}
|
||||
|
||||
async fn heal_object(
|
||||
&self,
|
||||
bucket: &str,
|
||||
object: &str,
|
||||
version_id: Option<&str>,
|
||||
opts: &HealOpts,
|
||||
) -> Result<(HealResultItem, Option<Error>)> {
|
||||
debug!("Healing object: {}/{}", bucket, object);
|
||||
|
||||
let version_id_str = version_id.unwrap_or("");
|
||||
|
||||
match self.ecstore.heal_object(bucket, object, version_id_str, opts).await {
|
||||
Ok((result, ecstore_error)) => {
|
||||
let error = ecstore_error.map(Error::other);
|
||||
info!("Heal object completed: {}/{} - result: {:?}, error: {:?}", bucket, object, result, error);
|
||||
Ok((result, error))
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Heal object failed: {}/{} - {}", bucket, object, e);
|
||||
Err(Error::other(e))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn heal_bucket(&self, bucket: &str, opts: &HealOpts) -> Result<HealResultItem> {
|
||||
debug!("Healing bucket: {}", bucket);
|
||||
|
||||
match self.ecstore.heal_bucket(bucket, opts).await {
|
||||
Ok(result) => {
|
||||
info!("Heal bucket completed: {} - result: {:?}", bucket, result);
|
||||
Ok(result)
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Heal bucket failed: {} - {}", bucket, e);
|
||||
Err(Error::other(e))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn heal_format(&self, dry_run: bool) -> Result<(HealResultItem, Option<Error>)> {
|
||||
debug!("Healing format (dry_run: {})", dry_run);
|
||||
|
||||
match self.ecstore.heal_format(dry_run).await {
|
||||
Ok((result, ecstore_error)) => {
|
||||
let error = ecstore_error.map(Error::other);
|
||||
info!("Heal format completed - result: {:?}, error: {:?}", result, error);
|
||||
Ok((result, error))
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Heal format failed: {}", e);
|
||||
Err(Error::other(e))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn list_objects_for_heal(&self, bucket: &str, prefix: &str) -> Result<Vec<String>> {
|
||||
debug!("Listing objects for heal: {}/{}", bucket, prefix);
|
||||
|
||||
// Use list_objects_v2 to get objects
|
||||
match self
|
||||
.ecstore
|
||||
.clone()
|
||||
.list_objects_v2(bucket, prefix, None, None, 1000, false, None)
|
||||
.await
|
||||
{
|
||||
Ok(list_info) => {
|
||||
let objects: Vec<String> = list_info.objects.into_iter().map(|obj| obj.name).collect();
|
||||
info!("Found {} objects for heal in {}/{}", objects.len(), bucket, prefix);
|
||||
Ok(objects)
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to list objects for heal: {}/{} - {}", bucket, prefix, e);
|
||||
Err(Error::other(e))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn get_disk_for_resume(&self, set_disk_id: &str) -> Result<DiskStore> {
|
||||
debug!("Getting disk for resume: {}", set_disk_id);
|
||||
|
||||
// Parse set_disk_id to extract pool and set indices
|
||||
let (pool_idx, set_idx) = crate::heal::utils::parse_set_disk_id(set_disk_id)?;
|
||||
|
||||
// Get the first available disk from the set
|
||||
let disks = self
|
||||
.ecstore
|
||||
.get_disks(pool_idx, set_idx)
|
||||
.await
|
||||
.map_err(|e| Error::TaskExecutionFailed {
|
||||
message: format!("Failed to get disks for pool {pool_idx} set {set_idx}: {e}"),
|
||||
})?;
|
||||
|
||||
// Find the first available disk
|
||||
if let Some(disk_store) = disks.into_iter().flatten().next() {
|
||||
info!("Found disk for resume: {:?}", disk_store);
|
||||
return Ok(disk_store);
|
||||
}
|
||||
|
||||
Err(Error::TaskExecutionFailed {
|
||||
message: format!("No available disk found for set_disk_id: {set_disk_id}"),
|
||||
})
|
||||
}
|
||||
}
|
||||
999
crates/ahm/src/heal/task.rs
Normal file
@@ -0,0 +1,999 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::heal::{ErasureSetHealer, progress::HealProgress, storage::HealStorageAPI};
|
||||
use crate::{Error, Result};
|
||||
use rustfs_common::heal_channel::{HealOpts, HealScanMode};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
future::Future,
|
||||
sync::Arc,
|
||||
time::{Duration, Instant, SystemTime},
|
||||
};
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{error, info, warn};
|
||||
use uuid::Uuid;
|
||||
|
||||
/// Heal type
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum HealType {
|
||||
/// Object heal
|
||||
Object {
|
||||
bucket: String,
|
||||
object: String,
|
||||
version_id: Option<String>,
|
||||
},
|
||||
/// Bucket heal
|
||||
Bucket { bucket: String },
|
||||
/// Erasure Set heal (includes disk format repair)
|
||||
ErasureSet { buckets: Vec<String>, set_disk_id: String },
|
||||
/// Metadata heal
|
||||
Metadata { bucket: String, object: String },
|
||||
/// MRF heal
|
||||
MRF { meta_path: String },
|
||||
/// EC decode heal
|
||||
ECDecode {
|
||||
bucket: String,
|
||||
object: String,
|
||||
version_id: Option<String>,
|
||||
},
|
||||
}
|
||||
|
||||
/// Heal priority
|
||||
#[derive(Debug, Default, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize, Deserialize)]
|
||||
pub enum HealPriority {
|
||||
/// Low priority
|
||||
Low = 0,
|
||||
/// Normal priority
|
||||
#[default]
|
||||
Normal = 1,
|
||||
/// High priority
|
||||
High = 2,
|
||||
/// Urgent priority
|
||||
Urgent = 3,
|
||||
}
|
||||
|
||||
/// Heal options
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct HealOptions {
|
||||
/// Scan mode
|
||||
pub scan_mode: HealScanMode,
|
||||
/// Whether to remove corrupted data
|
||||
pub remove_corrupted: bool,
|
||||
/// Whether to recreate
|
||||
pub recreate_missing: bool,
|
||||
/// Whether to update parity
|
||||
pub update_parity: bool,
|
||||
/// Whether to recursively process
|
||||
pub recursive: bool,
|
||||
/// Whether to dry run
|
||||
pub dry_run: bool,
|
||||
/// Timeout
|
||||
pub timeout: Option<Duration>,
|
||||
/// pool index
|
||||
pub pool_index: Option<usize>,
|
||||
/// set index
|
||||
pub set_index: Option<usize>,
|
||||
}
|
||||
|
||||
impl Default for HealOptions {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
scan_mode: HealScanMode::Normal,
|
||||
remove_corrupted: false,
|
||||
recreate_missing: true,
|
||||
update_parity: true,
|
||||
recursive: false,
|
||||
dry_run: false,
|
||||
timeout: Some(Duration::from_secs(300)), // 5 minutes default timeout
|
||||
pool_index: None,
|
||||
set_index: None,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Heal task status
|
||||
#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)]
|
||||
pub enum HealTaskStatus {
|
||||
/// Pending
|
||||
Pending,
|
||||
/// Running
|
||||
Running,
|
||||
/// Completed
|
||||
Completed,
|
||||
/// Failed
|
||||
Failed { error: String },
|
||||
/// Cancelled
|
||||
Cancelled,
|
||||
/// Timeout
|
||||
Timeout,
|
||||
}
|
||||
|
||||
/// Heal request
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct HealRequest {
|
||||
/// Request ID
|
||||
pub id: String,
|
||||
/// Heal type
|
||||
pub heal_type: HealType,
|
||||
/// Heal options
|
||||
pub options: HealOptions,
|
||||
/// Priority
|
||||
pub priority: HealPriority,
|
||||
/// Created time
|
||||
pub created_at: SystemTime,
|
||||
}
|
||||
|
||||
impl HealRequest {
|
||||
pub fn new(heal_type: HealType, options: HealOptions, priority: HealPriority) -> Self {
|
||||
Self {
|
||||
id: Uuid::new_v4().to_string(),
|
||||
heal_type,
|
||||
options,
|
||||
priority,
|
||||
created_at: SystemTime::now(),
|
||||
}
|
||||
}
|
||||
|
||||
pub fn object(bucket: String, object: String, version_id: Option<String>) -> Self {
|
||||
Self::new(
|
||||
HealType::Object {
|
||||
bucket,
|
||||
object,
|
||||
version_id,
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::Normal,
|
||||
)
|
||||
}
|
||||
|
||||
pub fn bucket(bucket: String) -> Self {
|
||||
Self::new(HealType::Bucket { bucket }, HealOptions::default(), HealPriority::Normal)
|
||||
}
|
||||
|
||||
pub fn metadata(bucket: String, object: String) -> Self {
|
||||
Self::new(HealType::Metadata { bucket, object }, HealOptions::default(), HealPriority::High)
|
||||
}
|
||||
|
||||
pub fn ec_decode(bucket: String, object: String, version_id: Option<String>) -> Self {
|
||||
Self::new(
|
||||
HealType::ECDecode {
|
||||
bucket,
|
||||
object,
|
||||
version_id,
|
||||
},
|
||||
HealOptions::default(),
|
||||
HealPriority::Urgent,
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
/// Heal task
|
||||
pub struct HealTask {
|
||||
/// Task ID
|
||||
pub id: String,
|
||||
/// Heal type
|
||||
pub heal_type: HealType,
|
||||
/// Heal options
|
||||
pub options: HealOptions,
|
||||
/// Task status
|
||||
pub status: Arc<RwLock<HealTaskStatus>>,
|
||||
/// Progress tracking
|
||||
pub progress: Arc<RwLock<HealProgress>>,
|
||||
/// Created time
|
||||
pub created_at: SystemTime,
|
||||
/// Started time
|
||||
pub started_at: Arc<RwLock<Option<SystemTime>>>,
|
||||
/// Completed time
|
||||
pub completed_at: Arc<RwLock<Option<SystemTime>>>,
|
||||
/// Task start instant for timeout calculation (monotonic)
|
||||
task_start_instant: Arc<RwLock<Option<Instant>>>,
|
||||
/// Cancel token
|
||||
pub cancel_token: tokio_util::sync::CancellationToken,
|
||||
/// Storage layer interface
|
||||
pub storage: Arc<dyn HealStorageAPI>,
|
||||
}
|
||||
|
||||
impl HealTask {
|
||||
pub fn from_request(request: HealRequest, storage: Arc<dyn HealStorageAPI>) -> Self {
|
||||
Self {
|
||||
id: request.id,
|
||||
heal_type: request.heal_type,
|
||||
options: request.options,
|
||||
status: Arc::new(RwLock::new(HealTaskStatus::Pending)),
|
||||
progress: Arc::new(RwLock::new(HealProgress::new())),
|
||||
created_at: request.created_at,
|
||||
started_at: Arc::new(RwLock::new(None)),
|
||||
completed_at: Arc::new(RwLock::new(None)),
|
||||
task_start_instant: Arc::new(RwLock::new(None)),
|
||||
cancel_token: tokio_util::sync::CancellationToken::new(),
|
||||
storage,
|
||||
}
|
||||
}
|
||||
|
||||
async fn remaining_timeout(&self) -> Result<Option<Duration>> {
|
||||
if let Some(total) = self.options.timeout {
|
||||
let start_instant = { *self.task_start_instant.read().await };
|
||||
if let Some(started_at) = start_instant {
|
||||
let elapsed = started_at.elapsed();
|
||||
if elapsed >= total {
|
||||
return Err(Error::TaskTimeout);
|
||||
}
|
||||
return Ok(Some(total - elapsed));
|
||||
}
|
||||
Ok(Some(total))
|
||||
} else {
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
|
||||
async fn check_control_flags(&self) -> Result<()> {
|
||||
if self.cancel_token.is_cancelled() {
|
||||
return Err(Error::TaskCancelled);
|
||||
}
|
||||
// Only interested in propagating an error if the timeout has expired;
|
||||
// the actual Duration value is not needed here
|
||||
let _ = self.remaining_timeout().await?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
async fn await_with_control<F, T>(&self, fut: F) -> Result<T>
|
||||
where
|
||||
F: Future<Output = Result<T>> + Send,
|
||||
T: Send,
|
||||
{
|
||||
let cancel_token = self.cancel_token.clone();
|
||||
if let Some(remaining) = self.remaining_timeout().await? {
|
||||
if remaining.is_zero() {
|
||||
return Err(Error::TaskTimeout);
|
||||
}
|
||||
let mut fut = Box::pin(fut);
|
||||
tokio::select! {
|
||||
_ = cancel_token.cancelled() => Err(Error::TaskCancelled),
|
||||
_ = tokio::time::sleep(remaining) => Err(Error::TaskTimeout),
|
||||
result = &mut fut => result,
|
||||
}
|
||||
} else {
|
||||
tokio::select! {
|
||||
_ = cancel_token.cancelled() => Err(Error::TaskCancelled),
|
||||
result = fut => result,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn execute(&self) -> Result<()> {
|
||||
// update status and timestamps atomically to avoid race conditions
|
||||
let now = SystemTime::now();
|
||||
let start_instant = Instant::now();
|
||||
{
|
||||
let mut status = self.status.write().await;
|
||||
let mut started_at = self.started_at.write().await;
|
||||
let mut task_start_instant = self.task_start_instant.write().await;
|
||||
*status = HealTaskStatus::Running;
|
||||
*started_at = Some(now);
|
||||
*task_start_instant = Some(start_instant);
|
||||
}
|
||||
|
||||
info!("Starting heal task: {} with type: {:?}", self.id, self.heal_type);
|
||||
|
||||
let result = match &self.heal_type {
|
||||
HealType::Object {
|
||||
bucket,
|
||||
object,
|
||||
version_id,
|
||||
} => self.heal_object(bucket, object, version_id.as_deref()).await,
|
||||
HealType::Bucket { bucket } => self.heal_bucket(bucket).await,
|
||||
|
||||
HealType::Metadata { bucket, object } => self.heal_metadata(bucket, object).await,
|
||||
HealType::MRF { meta_path } => self.heal_mrf(meta_path).await,
|
||||
HealType::ECDecode {
|
||||
bucket,
|
||||
object,
|
||||
version_id,
|
||||
} => self.heal_ec_decode(bucket, object, version_id.as_deref()).await,
|
||||
HealType::ErasureSet { buckets, set_disk_id } => self.heal_erasure_set(buckets.clone(), set_disk_id.clone()).await,
|
||||
};
|
||||
|
||||
// update completed time and status
|
||||
{
|
||||
let mut completed_at = self.completed_at.write().await;
|
||||
*completed_at = Some(SystemTime::now());
|
||||
}
|
||||
|
||||
match &result {
|
||||
Ok(_) => {
|
||||
let mut status = self.status.write().await;
|
||||
*status = HealTaskStatus::Completed;
|
||||
info!("Heal task completed successfully: {}", self.id);
|
||||
}
|
||||
Err(Error::TaskCancelled) => {
|
||||
let mut status = self.status.write().await;
|
||||
*status = HealTaskStatus::Cancelled;
|
||||
info!("Heal task was cancelled: {}", self.id);
|
||||
}
|
||||
Err(Error::TaskTimeout) => {
|
||||
let mut status = self.status.write().await;
|
||||
*status = HealTaskStatus::Timeout;
|
||||
warn!("Heal task timed out: {}", self.id);
|
||||
}
|
||||
Err(e) => {
|
||||
let mut status = self.status.write().await;
|
||||
*status = HealTaskStatus::Failed { error: e.to_string() };
|
||||
error!("Heal task failed: {} with error: {}", self.id, e);
|
||||
}
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
pub async fn cancel(&self) -> Result<()> {
|
||||
self.cancel_token.cancel();
|
||||
let mut status = self.status.write().await;
|
||||
*status = HealTaskStatus::Cancelled;
|
||||
info!("Heal task cancelled: {}", self.id);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn get_status(&self) -> HealTaskStatus {
|
||||
self.status.read().await.clone()
|
||||
}
|
||||
|
||||
pub async fn get_progress(&self) -> HealProgress {
|
||||
self.progress.read().await.clone()
|
||||
}
|
||||
|
||||
// specific heal implementation method
|
||||
async fn heal_object(&self, bucket: &str, object: &str, version_id: Option<&str>) -> Result<()> {
|
||||
info!("Healing object: {}/{}", bucket, object);
|
||||
|
||||
// update progress
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.set_current_object(Some(format!("{bucket}/{object}")));
|
||||
progress.update_progress(0, 4, 0, 0);
|
||||
}
|
||||
|
||||
// Step 1: Check if object exists and get metadata
|
||||
info!("Step 1: Checking object existence and metadata");
|
||||
self.check_control_flags().await?;
|
||||
let object_exists = self.await_with_control(self.storage.object_exists(bucket, object)).await?;
|
||||
if !object_exists {
|
||||
warn!("Object does not exist: {}/{}", bucket, object);
|
||||
if self.options.recreate_missing {
|
||||
info!("Attempting to recreate missing object: {}/{}", bucket, object);
|
||||
return self.recreate_missing_object(bucket, object, version_id).await;
|
||||
} else {
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Object not found: {bucket}/{object}"),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(1, 3, 0, 0);
|
||||
}
|
||||
|
||||
// Step 2: directly call ecstore to perform heal
|
||||
info!("Step 2: Performing heal using ecstore");
|
||||
let heal_opts = HealOpts {
|
||||
recursive: self.options.recursive,
|
||||
dry_run: self.options.dry_run,
|
||||
remove: self.options.remove_corrupted,
|
||||
recreate: self.options.recreate_missing,
|
||||
scan_mode: self.options.scan_mode,
|
||||
update_parity: self.options.update_parity,
|
||||
no_lock: false,
|
||||
pool: self.options.pool_index,
|
||||
set: self.options.set_index,
|
||||
};
|
||||
|
||||
let heal_result = self
|
||||
.await_with_control(self.storage.heal_object(bucket, object, version_id, &heal_opts))
|
||||
.await;
|
||||
|
||||
match heal_result {
|
||||
Ok((result, error)) => {
|
||||
if let Some(e) = error {
|
||||
// Check if this is a "File not found" error during delete operations
|
||||
let error_msg = format!("{e}");
|
||||
if error_msg.contains("File not found") || error_msg.contains("not found") {
|
||||
info!(
|
||||
"Object {}/{} not found during heal - likely deleted intentionally, treating as successful",
|
||||
bucket, object
|
||||
);
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(3, 3, 0, 0);
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
error!("Heal operation failed: {}/{} - {}", bucket, object, e);
|
||||
|
||||
// If heal failed and remove_corrupted is enabled, delete the corrupted object
|
||||
if self.options.remove_corrupted {
|
||||
warn!("Removing corrupted object: {}/{}", bucket, object);
|
||||
if !self.options.dry_run {
|
||||
self.await_with_control(self.storage.delete_object(bucket, object)).await?;
|
||||
info!("Successfully deleted corrupted object: {}/{}", bucket, object);
|
||||
} else {
|
||||
info!("Dry run mode - would delete corrupted object: {}/{}", bucket, object);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(3, 3, 0, 0);
|
||||
}
|
||||
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to heal object {bucket}/{object}: {e}"),
|
||||
});
|
||||
}
|
||||
|
||||
// Step 3: Verify heal result
|
||||
info!("Step 3: Verifying heal result");
|
||||
let object_size = result.object_size as u64;
|
||||
info!(
|
||||
"Heal completed successfully: {}/{} ({} bytes, {} drives healed)",
|
||||
bucket,
|
||||
object,
|
||||
object_size,
|
||||
result.after.drives.len()
|
||||
);
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(3, 3, object_size, object_size);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Err(Error::TaskCancelled) => Err(Error::TaskCancelled),
|
||||
Err(Error::TaskTimeout) => Err(Error::TaskTimeout),
|
||||
Err(e) => {
|
||||
// Check if this is a "File not found" error during delete operations
|
||||
let error_msg = format!("{e}");
|
||||
if error_msg.contains("File not found") || error_msg.contains("not found") {
|
||||
info!(
|
||||
"Object {}/{} not found during heal - likely deleted intentionally, treating as successful",
|
||||
bucket, object
|
||||
);
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(3, 3, 0, 0);
|
||||
}
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
error!("Heal operation failed: {}/{} - {}", bucket, object, e);
|
||||
|
||||
// If heal failed and remove_corrupted is enabled, delete the corrupted object
|
||||
if self.options.remove_corrupted {
|
||||
warn!("Removing corrupted object: {}/{}", bucket, object);
|
||||
if !self.options.dry_run {
|
||||
self.await_with_control(self.storage.delete_object(bucket, object)).await?;
|
||||
info!("Successfully deleted corrupted object: {}/{}", bucket, object);
|
||||
} else {
|
||||
info!("Dry run mode - would delete corrupted object: {}/{}", bucket, object);
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(3, 3, 0, 0);
|
||||
}
|
||||
|
||||
Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to heal object {bucket}/{object}: {e}"),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Recreate missing object (for EC decode scenarios)
|
||||
async fn recreate_missing_object(&self, bucket: &str, object: &str, version_id: Option<&str>) -> Result<()> {
|
||||
info!("Attempting to recreate missing object: {}/{}", bucket, object);
|
||||
|
||||
// Use ecstore's heal_object with recreate option
|
||||
let heal_opts = HealOpts {
|
||||
recursive: false,
|
||||
dry_run: self.options.dry_run,
|
||||
remove: false,
|
||||
recreate: true,
|
||||
scan_mode: HealScanMode::Deep,
|
||||
update_parity: true,
|
||||
no_lock: false,
|
||||
pool: None,
|
||||
set: None,
|
||||
};
|
||||
|
||||
match self
|
||||
.await_with_control(self.storage.heal_object(bucket, object, version_id, &heal_opts))
|
||||
.await
|
||||
{
|
||||
Ok((result, error)) => {
|
||||
if let Some(e) = error {
|
||||
error!("Failed to recreate missing object: {}/{} - {}", bucket, object, e);
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to recreate missing object {bucket}/{object}: {e}"),
|
||||
});
|
||||
}
|
||||
|
||||
let object_size = result.object_size as u64;
|
||||
info!("Successfully recreated missing object: {}/{} ({} bytes)", bucket, object, object_size);
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(4, 4, object_size, object_size);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Err(Error::TaskCancelled) => Err(Error::TaskCancelled),
|
||||
Err(Error::TaskTimeout) => Err(Error::TaskTimeout),
|
||||
Err(e) => {
|
||||
error!("Failed to recreate missing object: {}/{} - {}", bucket, object, e);
|
||||
Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to recreate missing object {bucket}/{object}: {e}"),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn heal_bucket(&self, bucket: &str) -> Result<()> {
|
||||
info!("Healing bucket: {}", bucket);
|
||||
|
||||
// update progress
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.set_current_object(Some(format!("bucket: {bucket}")));
|
||||
progress.update_progress(0, 3, 0, 0);
|
||||
}
|
||||
|
||||
// Step 1: Check if bucket exists
|
||||
info!("Step 1: Checking bucket existence");
|
||||
self.check_control_flags().await?;
|
||||
let bucket_exists = self.await_with_control(self.storage.get_bucket_info(bucket)).await?.is_some();
|
||||
if !bucket_exists {
|
||||
warn!("Bucket does not exist: {}", bucket);
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Bucket not found: {bucket}"),
|
||||
});
|
||||
}
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(1, 3, 0, 0);
|
||||
}
|
||||
|
||||
// Step 2: Perform bucket heal using ecstore
|
||||
info!("Step 2: Performing bucket heal using ecstore");
|
||||
let heal_opts = HealOpts {
|
||||
recursive: self.options.recursive,
|
||||
dry_run: self.options.dry_run,
|
||||
remove: self.options.remove_corrupted,
|
||||
recreate: self.options.recreate_missing,
|
||||
scan_mode: self.options.scan_mode,
|
||||
update_parity: self.options.update_parity,
|
||||
no_lock: false,
|
||||
pool: self.options.pool_index,
|
||||
set: self.options.set_index,
|
||||
};
|
||||
|
||||
let heal_result = self.await_with_control(self.storage.heal_bucket(bucket, &heal_opts)).await;
|
||||
|
||||
match heal_result {
|
||||
Ok(result) => {
|
||||
info!("Bucket heal completed successfully: {} ({} drives)", bucket, result.after.drives.len());
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(3, 3, 0, 0);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Err(Error::TaskCancelled) => Err(Error::TaskCancelled),
|
||||
Err(Error::TaskTimeout) => Err(Error::TaskTimeout),
|
||||
Err(e) => {
|
||||
error!("Bucket heal failed: {} - {}", bucket, e);
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(3, 3, 0, 0);
|
||||
}
|
||||
Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to heal bucket {bucket}: {e}"),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn heal_metadata(&self, bucket: &str, object: &str) -> Result<()> {
|
||||
info!("Healing metadata: {}/{}", bucket, object);
|
||||
|
||||
// update progress
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.set_current_object(Some(format!("metadata: {bucket}/{object}")));
|
||||
progress.update_progress(0, 3, 0, 0);
|
||||
}
|
||||
|
||||
// Step 1: Check if object exists
|
||||
info!("Step 1: Checking object existence");
|
||||
self.check_control_flags().await?;
|
||||
let object_exists = self.await_with_control(self.storage.object_exists(bucket, object)).await?;
|
||||
if !object_exists {
|
||||
warn!("Object does not exist: {}/{}", bucket, object);
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Object not found: {bucket}/{object}"),
|
||||
});
|
||||
}
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(1, 3, 0, 0);
|
||||
}
|
||||
|
||||
// Step 2: Perform metadata heal using ecstore
|
||||
info!("Step 2: Performing metadata heal using ecstore");
|
||||
let heal_opts = HealOpts {
|
||||
recursive: false,
|
||||
dry_run: self.options.dry_run,
|
||||
remove: false,
|
||||
recreate: false,
|
||||
scan_mode: HealScanMode::Deep,
|
||||
update_parity: false,
|
||||
no_lock: false,
|
||||
pool: self.options.pool_index,
|
||||
set: self.options.set_index,
|
||||
};
|
||||
|
||||
let heal_result = self
|
||||
.await_with_control(self.storage.heal_object(bucket, object, None, &heal_opts))
|
||||
.await;
|
||||
|
||||
match heal_result {
|
||||
Ok((result, error)) => {
|
||||
if let Some(e) = error {
|
||||
error!("Metadata heal failed: {}/{} - {}", bucket, object, e);
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(3, 3, 0, 0);
|
||||
}
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to heal metadata {bucket}/{object}: {e}"),
|
||||
});
|
||||
}
|
||||
|
||||
info!(
|
||||
"Metadata heal completed successfully: {}/{} ({} drives)",
|
||||
bucket,
|
||||
object,
|
||||
result.after.drives.len()
|
||||
);
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(3, 3, 0, 0);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Err(Error::TaskCancelled) => Err(Error::TaskCancelled),
|
||||
Err(Error::TaskTimeout) => Err(Error::TaskTimeout),
|
||||
Err(e) => {
|
||||
error!("Metadata heal failed: {}/{} - {}", bucket, object, e);
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(3, 3, 0, 0);
|
||||
}
|
||||
Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to heal metadata {bucket}/{object}: {e}"),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn heal_mrf(&self, meta_path: &str) -> Result<()> {
|
||||
info!("Healing MRF: {}", meta_path);
|
||||
|
||||
// update progress
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.set_current_object(Some(format!("mrf: {meta_path}")));
|
||||
progress.update_progress(0, 2, 0, 0);
|
||||
}
|
||||
|
||||
// Parse meta_path to extract bucket and object
|
||||
let parts: Vec<&str> = meta_path.split('/').collect();
|
||||
if parts.len() < 2 {
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Invalid meta path format: {meta_path}"),
|
||||
});
|
||||
}
|
||||
|
||||
let bucket = parts[0];
|
||||
let object = parts[1..].join("/");
|
||||
|
||||
// Step 1: Perform MRF heal using ecstore
|
||||
info!("Step 1: Performing MRF heal using ecstore");
|
||||
let heal_opts = HealOpts {
|
||||
recursive: true,
|
||||
dry_run: self.options.dry_run,
|
||||
remove: self.options.remove_corrupted,
|
||||
recreate: self.options.recreate_missing,
|
||||
scan_mode: HealScanMode::Deep,
|
||||
update_parity: true,
|
||||
no_lock: false,
|
||||
pool: None,
|
||||
set: None,
|
||||
};
|
||||
|
||||
let heal_result = self
|
||||
.await_with_control(self.storage.heal_object(bucket, &object, None, &heal_opts))
|
||||
.await;
|
||||
|
||||
match heal_result {
|
||||
Ok((result, error)) => {
|
||||
if let Some(e) = error {
|
||||
error!("MRF heal failed: {} - {}", meta_path, e);
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(2, 2, 0, 0);
|
||||
}
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to heal MRF {meta_path}: {e}"),
|
||||
});
|
||||
}
|
||||
|
||||
info!("MRF heal completed successfully: {} ({} drives)", meta_path, result.after.drives.len());
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(2, 2, 0, 0);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Err(Error::TaskCancelled) => Err(Error::TaskCancelled),
|
||||
Err(Error::TaskTimeout) => Err(Error::TaskTimeout),
|
||||
Err(e) => {
|
||||
error!("MRF heal failed: {} - {}", meta_path, e);
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(2, 2, 0, 0);
|
||||
}
|
||||
Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to heal MRF {meta_path}: {e}"),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn heal_ec_decode(&self, bucket: &str, object: &str, version_id: Option<&str>) -> Result<()> {
|
||||
info!("Healing EC decode: {}/{}", bucket, object);
|
||||
|
||||
// update progress
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.set_current_object(Some(format!("ec_decode: {bucket}/{object}")));
|
||||
progress.update_progress(0, 3, 0, 0);
|
||||
}
|
||||
|
||||
// Step 1: Check if object exists
|
||||
info!("Step 1: Checking object existence");
|
||||
self.check_control_flags().await?;
|
||||
let object_exists = self.await_with_control(self.storage.object_exists(bucket, object)).await?;
|
||||
if !object_exists {
|
||||
warn!("Object does not exist: {}/{}", bucket, object);
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Object not found: {bucket}/{object}"),
|
||||
});
|
||||
}
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(1, 3, 0, 0);
|
||||
}
|
||||
|
||||
// Step 2: Perform EC decode heal using ecstore
|
||||
info!("Step 2: Performing EC decode heal using ecstore");
|
||||
let heal_opts = HealOpts {
|
||||
recursive: false,
|
||||
dry_run: self.options.dry_run,
|
||||
remove: false,
|
||||
recreate: true,
|
||||
scan_mode: HealScanMode::Deep,
|
||||
update_parity: true,
|
||||
no_lock: false,
|
||||
pool: None,
|
||||
set: None,
|
||||
};
|
||||
|
||||
let heal_result = self
|
||||
.await_with_control(self.storage.heal_object(bucket, object, version_id, &heal_opts))
|
||||
.await;
|
||||
|
||||
match heal_result {
|
||||
Ok((result, error)) => {
|
||||
if let Some(e) = error {
|
||||
error!("EC decode heal failed: {}/{} - {}", bucket, object, e);
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(3, 3, 0, 0);
|
||||
}
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to heal EC decode {bucket}/{object}: {e}"),
|
||||
});
|
||||
}
|
||||
|
||||
let object_size = result.object_size as u64;
|
||||
info!(
|
||||
"EC decode heal completed successfully: {}/{} ({} bytes, {} drives)",
|
||||
bucket,
|
||||
object,
|
||||
object_size,
|
||||
result.after.drives.len()
|
||||
);
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(3, 3, object_size, object_size);
|
||||
}
|
||||
Ok(())
|
||||
}
|
||||
Err(Error::TaskCancelled) => Err(Error::TaskCancelled),
|
||||
Err(Error::TaskTimeout) => Err(Error::TaskTimeout),
|
||||
Err(e) => {
|
||||
error!("EC decode heal failed: {}/{} - {}", bucket, object, e);
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(3, 3, 0, 0);
|
||||
}
|
||||
Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to heal EC decode {bucket}/{object}: {e}"),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn heal_erasure_set(&self, buckets: Vec<String>, set_disk_id: String) -> Result<()> {
|
||||
info!("Healing Erasure Set: {} ({} buckets)", set_disk_id, buckets.len());
|
||||
|
||||
// update progress
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.set_current_object(Some(format!("erasure_set: {} ({} buckets)", set_disk_id, buckets.len())));
|
||||
progress.update_progress(0, 4, 0, 0);
|
||||
}
|
||||
|
||||
let buckets = if buckets.is_empty() {
|
||||
info!("No buckets specified, listing all buckets");
|
||||
let bucket_infos = self.await_with_control(self.storage.list_buckets()).await?;
|
||||
bucket_infos.into_iter().map(|info| info.name).collect()
|
||||
} else {
|
||||
buckets
|
||||
};
|
||||
|
||||
// Step 1: Perform disk format heal using ecstore
|
||||
info!("Step 1: Performing disk format heal using ecstore");
|
||||
let format_result = self.await_with_control(self.storage.heal_format(self.options.dry_run)).await;
|
||||
|
||||
match format_result {
|
||||
Ok((result, error)) => {
|
||||
if let Some(e) = error {
|
||||
error!("Disk format heal failed: {} - {}", set_disk_id, e);
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(4, 4, 0, 0);
|
||||
}
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to heal disk format for {set_disk_id}: {e}"),
|
||||
});
|
||||
}
|
||||
|
||||
info!(
|
||||
"Disk format heal completed successfully: {} ({} drives)",
|
||||
set_disk_id,
|
||||
result.after.drives.len()
|
||||
);
|
||||
}
|
||||
Err(Error::TaskCancelled) => return Err(Error::TaskCancelled),
|
||||
Err(Error::TaskTimeout) => return Err(Error::TaskTimeout),
|
||||
Err(e) => {
|
||||
error!("Disk format heal failed: {} - {}", set_disk_id, e);
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(4, 4, 0, 0);
|
||||
}
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to heal disk format for {set_disk_id}: {e}"),
|
||||
});
|
||||
}
|
||||
}
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(1, 4, 0, 0);
|
||||
}
|
||||
|
||||
// Step 2: Get disk for resume functionality
|
||||
info!("Step 2: Getting disk for resume functionality");
|
||||
let disk = self
|
||||
.await_with_control(self.storage.get_disk_for_resume(&set_disk_id))
|
||||
.await?;
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(2, 4, 0, 0);
|
||||
}
|
||||
|
||||
// Step 3: Heal bucket structure
|
||||
// Check control flags before each iteration to ensure timely cancellation.
|
||||
// Each heal_bucket call may handle timeout/cancellation internally, see its implementation for details.
|
||||
for bucket in buckets.iter() {
|
||||
// Check control flags before starting each bucket heal
|
||||
self.check_control_flags().await?;
|
||||
// heal_bucket internally uses await_with_control for timeout/cancellation handling
|
||||
if let Err(err) = self.heal_bucket(bucket).await {
|
||||
// Check if error is due to cancellation or timeout
|
||||
if matches!(err, Error::TaskCancelled | Error::TaskTimeout) {
|
||||
return Err(err);
|
||||
}
|
||||
info!("Bucket heal failed: {}", err.to_string());
|
||||
}
|
||||
}
|
||||
|
||||
// Step 3: Create erasure set healer with resume support
|
||||
info!("Step 3: Creating erasure set healer with resume support");
|
||||
let erasure_healer = ErasureSetHealer::new(self.storage.clone(), self.progress.clone(), self.cancel_token.clone(), disk);
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(3, 4, 0, 0);
|
||||
}
|
||||
|
||||
// Step 4: Execute erasure set heal with resume
|
||||
info!("Step 4: Executing erasure set heal with resume");
|
||||
let result = erasure_healer.heal_erasure_set(&buckets, &set_disk_id).await;
|
||||
|
||||
{
|
||||
let mut progress = self.progress.write().await;
|
||||
progress.update_progress(4, 4, 0, 0);
|
||||
}
|
||||
|
||||
match result {
|
||||
Ok(_) => {
|
||||
info!("Erasure set heal completed successfully: {} ({} buckets)", set_disk_id, buckets.len());
|
||||
Ok(())
|
||||
}
|
||||
Err(Error::TaskCancelled) => Err(Error::TaskCancelled),
|
||||
Err(Error::TaskTimeout) => Err(Error::TaskTimeout),
|
||||
Err(e) => {
|
||||
error!("Erasure set heal failed: {} - {}", set_disk_id, e);
|
||||
Err(Error::TaskExecutionFailed {
|
||||
message: format!("Failed to heal erasure set {set_disk_id}: {e}"),
|
||||
})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for HealTask {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("HealTask")
|
||||
.field("id", &self.id)
|
||||
.field("heal_type", &self.heal_type)
|
||||
.field("options", &self.options)
|
||||
.field("created_at", &self.created_at)
|
||||
.finish()
|
||||
}
|
||||
}
|
||||
110
crates/ahm/src/heal/utils.rs
Normal file
@@ -0,0 +1,110 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::{Error, Result};
|
||||
|
||||
/// Prefix for pool index in set disk identifiers.
|
||||
const POOL_PREFIX: &str = "pool";
|
||||
/// Prefix for set index in set disk identifiers.
|
||||
const SET_PREFIX: &str = "set";
|
||||
|
||||
/// Format a set disk identifier using unsigned indices.
|
||||
pub fn format_set_disk_id(pool_idx: usize, set_idx: usize) -> String {
|
||||
format!("{POOL_PREFIX}_{pool_idx}_{SET_PREFIX}_{set_idx}")
|
||||
}
|
||||
|
||||
/// Format a set disk identifier from signed indices.
|
||||
pub fn format_set_disk_id_from_i32(pool_idx: i32, set_idx: i32) -> Option<String> {
|
||||
if pool_idx < 0 || set_idx < 0 {
|
||||
None
|
||||
} else {
|
||||
Some(format_set_disk_id(pool_idx as usize, set_idx as usize))
|
||||
}
|
||||
}
|
||||
|
||||
/// Normalise external set disk identifiers into the canonical format.
|
||||
pub fn normalize_set_disk_id(raw: &str) -> Option<String> {
|
||||
if raw.starts_with(&format!("{POOL_PREFIX}_")) {
|
||||
Some(raw.to_string())
|
||||
} else {
|
||||
parse_compact_set_disk_id(raw).map(|(pool, set)| format_set_disk_id(pool, set))
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse a canonical set disk identifier into pool/set indices.
|
||||
pub fn parse_set_disk_id(raw: &str) -> Result<(usize, usize)> {
|
||||
let parts: Vec<&str> = raw.split('_').collect();
|
||||
if parts.len() != 4 || parts[0] != POOL_PREFIX || parts[2] != SET_PREFIX {
|
||||
return Err(Error::TaskExecutionFailed {
|
||||
message: format!("Invalid set_disk_id format: {raw}"),
|
||||
});
|
||||
}
|
||||
|
||||
let pool_idx = parts[1].parse::<usize>().map_err(|_| Error::TaskExecutionFailed {
|
||||
message: format!("Invalid pool index in set_disk_id: {raw}"),
|
||||
})?;
|
||||
let set_idx = parts[3].parse::<usize>().map_err(|_| Error::TaskExecutionFailed {
|
||||
message: format!("Invalid set index in set_disk_id: {raw}"),
|
||||
})?;
|
||||
Ok((pool_idx, set_idx))
|
||||
}
|
||||
|
||||
fn parse_compact_set_disk_id(raw: &str) -> Option<(usize, usize)> {
|
||||
let (pool, set) = raw.split_once('_')?;
|
||||
let pool_idx = pool.parse::<usize>().ok()?;
|
||||
let set_idx = set.parse::<usize>().ok()?;
|
||||
Some((pool_idx, set_idx))
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn format_from_unsigned_indices() {
|
||||
assert_eq!(format_set_disk_id(1, 2), "pool_1_set_2");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn format_from_signed_indices() {
|
||||
assert_eq!(format_set_disk_id_from_i32(3, 4), Some("pool_3_set_4".into()));
|
||||
assert_eq!(format_set_disk_id_from_i32(-1, 4), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn normalize_compact_identifier() {
|
||||
assert_eq!(normalize_set_disk_id("3_5"), Some("pool_3_set_5".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn normalize_prefixed_identifier() {
|
||||
assert_eq!(normalize_set_disk_id("pool_7_set_1"), Some("pool_7_set_1".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn normalize_invalid_identifier() {
|
||||
assert_eq!(normalize_set_disk_id("invalid"), None);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_prefixed_identifier() {
|
||||
assert_eq!(parse_set_disk_id("pool_9_set_3").unwrap(), (9, 3));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn parse_invalid_identifier() {
|
||||
assert!(parse_set_disk_id("bad").is_err());
|
||||
assert!(parse_set_disk_id("pool_X_set_1").is_err());
|
||||
}
|
||||
}
|
||||
111
crates/ahm/src/lib.rs
Normal file
@@ -0,0 +1,111 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod error;
|
||||
pub mod heal;
|
||||
pub mod scanner;
|
||||
|
||||
pub use error::{Error, Result};
|
||||
pub use heal::{HealManager, HealOptions, HealPriority, HealRequest, HealType, channel::HealChannelProcessor};
|
||||
pub use scanner::Scanner;
|
||||
use std::sync::{Arc, OnceLock};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::{error, info};
|
||||
|
||||
// Global cancellation token for AHM services (scanner and other background tasks)
|
||||
static GLOBAL_AHM_SERVICES_CANCEL_TOKEN: OnceLock<CancellationToken> = OnceLock::new();
|
||||
|
||||
/// Initialize the global AHM services cancellation token
|
||||
pub fn init_ahm_services_cancel_token(cancel_token: CancellationToken) -> Result<()> {
|
||||
GLOBAL_AHM_SERVICES_CANCEL_TOKEN
|
||||
.set(cancel_token)
|
||||
.map_err(|_| Error::Config("AHM services cancel token already initialized".to_string()))
|
||||
}
|
||||
|
||||
/// Get the global AHM services cancellation token
|
||||
pub fn get_ahm_services_cancel_token() -> Option<&'static CancellationToken> {
|
||||
GLOBAL_AHM_SERVICES_CANCEL_TOKEN.get()
|
||||
}
|
||||
|
||||
/// Create and initialize the global AHM services cancellation token
|
||||
pub fn create_ahm_services_cancel_token() -> CancellationToken {
|
||||
let cancel_token = CancellationToken::new();
|
||||
init_ahm_services_cancel_token(cancel_token.clone()).expect("AHM services cancel token already initialized");
|
||||
cancel_token
|
||||
}
|
||||
|
||||
/// Shutdown all AHM services gracefully
|
||||
pub fn shutdown_ahm_services() {
|
||||
if let Some(cancel_token) = GLOBAL_AHM_SERVICES_CANCEL_TOKEN.get() {
|
||||
cancel_token.cancel();
|
||||
}
|
||||
}
|
||||
|
||||
/// Global heal manager instance
|
||||
static GLOBAL_HEAL_MANAGER: OnceLock<Arc<HealManager>> = OnceLock::new();
|
||||
|
||||
/// Global heal channel processor instance
|
||||
static GLOBAL_HEAL_CHANNEL_PROCESSOR: OnceLock<Arc<tokio::sync::Mutex<HealChannelProcessor>>> = OnceLock::new();
|
||||
|
||||
/// Initialize and start heal manager with channel processor
|
||||
pub async fn init_heal_manager(
|
||||
storage: Arc<dyn heal::storage::HealStorageAPI>,
|
||||
config: Option<heal::manager::HealConfig>,
|
||||
) -> Result<Arc<HealManager>> {
|
||||
// Create heal manager
|
||||
let heal_manager = Arc::new(HealManager::new(storage, config));
|
||||
|
||||
// Start heal manager
|
||||
heal_manager.start().await?;
|
||||
|
||||
// Store global instance
|
||||
GLOBAL_HEAL_MANAGER
|
||||
.set(heal_manager.clone())
|
||||
.map_err(|_| Error::Config("Heal manager already initialized".to_string()))?;
|
||||
|
||||
// Initialize heal channel
|
||||
let channel_receiver = rustfs_common::heal_channel::init_heal_channel();
|
||||
|
||||
// Create channel processor
|
||||
let channel_processor = HealChannelProcessor::new(heal_manager.clone());
|
||||
|
||||
// Store channel processor instance first
|
||||
GLOBAL_HEAL_CHANNEL_PROCESSOR
|
||||
.set(Arc::new(tokio::sync::Mutex::new(channel_processor)))
|
||||
.map_err(|_| Error::Config("Heal channel processor already initialized".to_string()))?;
|
||||
|
||||
// Start channel processor in background
|
||||
let receiver = channel_receiver;
|
||||
tokio::spawn(async move {
|
||||
if let Some(processor_guard) = GLOBAL_HEAL_CHANNEL_PROCESSOR.get() {
|
||||
let mut processor = processor_guard.lock().await;
|
||||
if let Err(e) = processor.start(receiver).await {
|
||||
error!("Heal channel processor failed: {}", e);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
info!("Heal manager with channel processor initialized successfully");
|
||||
Ok(heal_manager)
|
||||
}
|
||||
|
||||
/// Get global heal manager instance
|
||||
pub fn get_heal_manager() -> Option<&'static Arc<HealManager>> {
|
||||
GLOBAL_HEAL_MANAGER.get()
|
||||
}
|
||||
|
||||
/// Get global heal channel processor instance
|
||||
pub fn get_heal_channel_processor() -> Option<&'static Arc<tokio::sync::Mutex<HealChannelProcessor>>> {
|
||||
GLOBAL_HEAL_CHANNEL_PROCESSOR.get()
|
||||
}
|
||||
326
crates/ahm/src/scanner/checkpoint.rs
Normal file
@@ -0,0 +1,326 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::scanner::node_scanner::ScanProgress;
|
||||
use crate::{Error, Result};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
path::{Path, PathBuf},
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, Clone)]
|
||||
pub struct CheckpointData {
|
||||
pub version: u32,
|
||||
pub timestamp: SystemTime,
|
||||
pub progress: ScanProgress,
|
||||
pub node_id: String,
|
||||
pub checksum: u64,
|
||||
}
|
||||
|
||||
impl CheckpointData {
|
||||
pub fn new(progress: ScanProgress, node_id: String) -> Self {
|
||||
let mut checkpoint = Self {
|
||||
version: 1,
|
||||
timestamp: SystemTime::now(),
|
||||
progress,
|
||||
node_id,
|
||||
checksum: 0,
|
||||
};
|
||||
|
||||
checkpoint.checksum = checkpoint.calculate_checksum();
|
||||
checkpoint
|
||||
}
|
||||
|
||||
fn calculate_checksum(&self) -> u64 {
|
||||
use std::collections::hash_map::DefaultHasher;
|
||||
use std::hash::{Hash, Hasher};
|
||||
|
||||
let mut hasher = DefaultHasher::new();
|
||||
self.version.hash(&mut hasher);
|
||||
self.node_id.hash(&mut hasher);
|
||||
self.progress.current_cycle.hash(&mut hasher);
|
||||
self.progress.current_disk_index.hash(&mut hasher);
|
||||
|
||||
if let Some(ref bucket) = self.progress.current_bucket {
|
||||
bucket.hash(&mut hasher);
|
||||
}
|
||||
|
||||
if let Some(ref key) = self.progress.last_scan_key {
|
||||
key.hash(&mut hasher);
|
||||
}
|
||||
|
||||
hasher.finish()
|
||||
}
|
||||
|
||||
pub fn verify_integrity(&self) -> bool {
|
||||
let calculated_checksum = self.calculate_checksum();
|
||||
self.checksum == calculated_checksum
|
||||
}
|
||||
}
|
||||
|
||||
pub struct CheckpointManager {
|
||||
checkpoint_file: PathBuf,
|
||||
backup_file: PathBuf,
|
||||
temp_file: PathBuf,
|
||||
save_interval: Duration,
|
||||
last_save: RwLock<SystemTime>,
|
||||
node_id: String,
|
||||
}
|
||||
|
||||
impl CheckpointManager {
|
||||
pub fn new(node_id: &str, data_dir: &Path) -> Self {
|
||||
if !data_dir.exists() {
|
||||
if let Err(e) = std::fs::create_dir_all(data_dir) {
|
||||
error!("create data dir failed {:?}: {}", data_dir, e);
|
||||
}
|
||||
}
|
||||
|
||||
let checkpoint_file = data_dir.join(format!("scanner_checkpoint_{node_id}.json"));
|
||||
let backup_file = data_dir.join(format!("scanner_checkpoint_{node_id}.backup"));
|
||||
let temp_file = data_dir.join(format!("scanner_checkpoint_{node_id}.tmp"));
|
||||
|
||||
Self {
|
||||
checkpoint_file,
|
||||
backup_file,
|
||||
temp_file,
|
||||
save_interval: Duration::from_secs(30), // 30s
|
||||
last_save: RwLock::new(SystemTime::UNIX_EPOCH),
|
||||
node_id: node_id.to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
pub async fn save_checkpoint(&self, progress: &ScanProgress) -> Result<()> {
|
||||
let now = SystemTime::now();
|
||||
let last_save = *self.last_save.read().await;
|
||||
|
||||
if now.duration_since(last_save).unwrap_or(Duration::ZERO) < self.save_interval {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let checkpoint_data = CheckpointData::new(progress.clone(), self.node_id.clone());
|
||||
|
||||
let json_data = serde_json::to_string_pretty(&checkpoint_data)
|
||||
.map_err(|e| Error::Serialization(format!("serialize checkpoint failed: {e}")))?;
|
||||
|
||||
tokio::fs::write(&self.temp_file, json_data)
|
||||
.await
|
||||
.map_err(|e| Error::IO(format!("write temp checkpoint file failed: {e}")))?;
|
||||
|
||||
if self.checkpoint_file.exists() {
|
||||
tokio::fs::copy(&self.checkpoint_file, &self.backup_file)
|
||||
.await
|
||||
.map_err(|e| Error::IO(format!("backup checkpoint file failed: {e}")))?;
|
||||
}
|
||||
|
||||
tokio::fs::rename(&self.temp_file, &self.checkpoint_file)
|
||||
.await
|
||||
.map_err(|e| Error::IO(format!("replace checkpoint file failed: {e}")))?;
|
||||
|
||||
*self.last_save.write().await = now;
|
||||
|
||||
debug!(
|
||||
"save checkpoint to {:?}, cycle: {}, disk index: {}",
|
||||
self.checkpoint_file, checkpoint_data.progress.current_cycle, checkpoint_data.progress.current_disk_index
|
||||
);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
pub async fn load_checkpoint(&self) -> Result<Option<ScanProgress>> {
|
||||
// first try main checkpoint file
|
||||
match self.load_checkpoint_from_file(&self.checkpoint_file).await {
|
||||
Ok(checkpoint) => {
|
||||
info!(
|
||||
"restore scan progress from main checkpoint file: cycle={}, disk index={}, last scan key={:?}",
|
||||
checkpoint.current_cycle, checkpoint.current_disk_index, checkpoint.last_scan_key
|
||||
);
|
||||
Ok(Some(checkpoint))
|
||||
}
|
||||
Err(e) => {
|
||||
warn!("main checkpoint file is corrupted or not exists: {}", e);
|
||||
|
||||
// try backup file
|
||||
match self.load_checkpoint_from_file(&self.backup_file).await {
|
||||
Ok(checkpoint) => {
|
||||
warn!(
|
||||
"restore scan progress from backup file: cycle={}, disk index={}",
|
||||
checkpoint.current_cycle, checkpoint.current_disk_index
|
||||
);
|
||||
|
||||
// copy backup file to main checkpoint file
|
||||
if let Err(copy_err) = tokio::fs::copy(&self.backup_file, &self.checkpoint_file).await {
|
||||
warn!("restore main checkpoint file failed: {}", copy_err);
|
||||
}
|
||||
|
||||
Ok(Some(checkpoint))
|
||||
}
|
||||
Err(backup_e) => {
|
||||
warn!("backup file is corrupted or not exists: {}", backup_e);
|
||||
info!("cannot restore scan progress, will start fresh scan");
|
||||
Ok(None)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// load checkpoint from file
|
||||
async fn load_checkpoint_from_file(&self, file_path: &Path) -> Result<ScanProgress> {
|
||||
if !file_path.exists() {
|
||||
return Err(Error::NotFound(format!("checkpoint file not exists: {file_path:?}")));
|
||||
}
|
||||
|
||||
// read file content
|
||||
let content = tokio::fs::read_to_string(file_path)
|
||||
.await
|
||||
.map_err(|e| Error::IO(format!("read checkpoint file failed: {e}")))?;
|
||||
|
||||
// deserialize
|
||||
let checkpoint_data: CheckpointData =
|
||||
serde_json::from_str(&content).map_err(|e| Error::Serialization(format!("deserialize checkpoint failed: {e}")))?;
|
||||
|
||||
// validate checkpoint data
|
||||
self.validate_checkpoint(&checkpoint_data)?;
|
||||
|
||||
Ok(checkpoint_data.progress)
|
||||
}
|
||||
|
||||
/// validate checkpoint data
|
||||
fn validate_checkpoint(&self, checkpoint: &CheckpointData) -> Result<()> {
|
||||
// validate data integrity
|
||||
if !checkpoint.verify_integrity() {
|
||||
return Err(Error::InvalidCheckpoint(
|
||||
"checkpoint data verification failed, may be corrupted".to_string(),
|
||||
));
|
||||
}
|
||||
|
||||
// validate node id match
|
||||
if checkpoint.node_id != self.node_id {
|
||||
return Err(Error::InvalidCheckpoint(format!(
|
||||
"checkpoint node id not match: expected {}, actual {}",
|
||||
self.node_id, checkpoint.node_id
|
||||
)));
|
||||
}
|
||||
|
||||
let now = SystemTime::now();
|
||||
let checkpoint_age = now.duration_since(checkpoint.timestamp).unwrap_or(Duration::MAX);
|
||||
|
||||
// checkpoint is too old (more than 24 hours), may be data expired
|
||||
if checkpoint_age > Duration::from_secs(24 * 3600) {
|
||||
return Err(Error::InvalidCheckpoint(format!("checkpoint data is too old: {checkpoint_age:?}")));
|
||||
}
|
||||
|
||||
// validate version compatibility
|
||||
if checkpoint.version > 1 {
|
||||
return Err(Error::InvalidCheckpoint(format!(
|
||||
"unsupported checkpoint version: {}",
|
||||
checkpoint.version
|
||||
)));
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// clean checkpoint file
|
||||
///
|
||||
/// called when scanner stops or resets
|
||||
pub async fn cleanup_checkpoint(&self) -> Result<()> {
|
||||
// delete main file
|
||||
if self.checkpoint_file.exists() {
|
||||
tokio::fs::remove_file(&self.checkpoint_file)
|
||||
.await
|
||||
.map_err(|e| Error::IO(format!("delete main checkpoint file failed: {e}")))?;
|
||||
}
|
||||
|
||||
// delete backup file
|
||||
if self.backup_file.exists() {
|
||||
tokio::fs::remove_file(&self.backup_file)
|
||||
.await
|
||||
.map_err(|e| Error::IO(format!("delete backup checkpoint file failed: {e}")))?;
|
||||
}
|
||||
|
||||
// delete temp file
|
||||
if self.temp_file.exists() {
|
||||
tokio::fs::remove_file(&self.temp_file)
|
||||
.await
|
||||
.map_err(|e| Error::IO(format!("delete temp checkpoint file failed: {e}")))?;
|
||||
}
|
||||
|
||||
info!("cleaned up all checkpoint files");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// get checkpoint file info
|
||||
pub async fn get_checkpoint_info(&self) -> Result<Option<CheckpointInfo>> {
|
||||
if !self.checkpoint_file.exists() {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let metadata = tokio::fs::metadata(&self.checkpoint_file)
|
||||
.await
|
||||
.map_err(|e| Error::IO(format!("get checkpoint file metadata failed: {e}")))?;
|
||||
|
||||
let content = tokio::fs::read_to_string(&self.checkpoint_file)
|
||||
.await
|
||||
.map_err(|e| Error::IO(format!("read checkpoint file failed: {e}")))?;
|
||||
|
||||
let checkpoint_data: CheckpointData =
|
||||
serde_json::from_str(&content).map_err(|e| Error::Serialization(format!("deserialize checkpoint failed: {e}")))?;
|
||||
|
||||
Ok(Some(CheckpointInfo {
|
||||
file_size: metadata.len(),
|
||||
last_modified: metadata.modified().unwrap_or(SystemTime::UNIX_EPOCH),
|
||||
checkpoint_timestamp: checkpoint_data.timestamp,
|
||||
current_cycle: checkpoint_data.progress.current_cycle,
|
||||
current_disk_index: checkpoint_data.progress.current_disk_index,
|
||||
completed_disks_count: checkpoint_data.progress.completed_disks.len(),
|
||||
is_valid: checkpoint_data.verify_integrity(),
|
||||
}))
|
||||
}
|
||||
|
||||
/// force save checkpoint (ignore time interval limit)
|
||||
pub async fn force_save_checkpoint(&self, progress: &ScanProgress) -> Result<()> {
|
||||
// temporarily reset last save time, force save
|
||||
*self.last_save.write().await = SystemTime::UNIX_EPOCH;
|
||||
self.save_checkpoint(progress).await
|
||||
}
|
||||
|
||||
/// set save interval
|
||||
pub async fn set_save_interval(&mut self, interval: Duration) {
|
||||
self.save_interval = interval;
|
||||
info!("checkpoint save interval set to: {:?}", interval);
|
||||
}
|
||||
}
|
||||
|
||||
/// checkpoint info
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CheckpointInfo {
|
||||
/// file size
|
||||
pub file_size: u64,
|
||||
/// file last modified time
|
||||
pub last_modified: SystemTime,
|
||||
/// checkpoint creation time
|
||||
pub checkpoint_timestamp: SystemTime,
|
||||
/// current scan cycle
|
||||
pub current_cycle: u64,
|
||||
/// current disk index
|
||||
pub current_disk_index: usize,
|
||||
/// completed disks count
|
||||
pub completed_disks_count: usize,
|
||||
/// checkpoint is valid
|
||||
pub is_valid: bool,
|
||||
}
|
||||
3586
crates/ahm/src/scanner/data_scanner.rs
Normal file
305
crates/ahm/src/scanner/histogram.rs
Normal file
@@ -0,0 +1,305 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
sync::atomic::{AtomicU64, Ordering},
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
use tracing::info;
|
||||
|
||||
/// Scanner metrics
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct ScannerMetrics {
|
||||
/// Total objects scanned since server start
|
||||
pub objects_scanned: u64,
|
||||
/// Total object versions scanned since server start
|
||||
pub versions_scanned: u64,
|
||||
/// Total directories scanned since server start
|
||||
pub directories_scanned: u64,
|
||||
/// Total bucket scans started since server start
|
||||
pub bucket_scans_started: u64,
|
||||
/// Total bucket scans finished since server start
|
||||
pub bucket_scans_finished: u64,
|
||||
/// Total objects with health issues found
|
||||
pub objects_with_issues: u64,
|
||||
/// Total heal tasks queued
|
||||
pub heal_tasks_queued: u64,
|
||||
/// Total heal tasks completed
|
||||
pub heal_tasks_completed: u64,
|
||||
/// Total heal tasks failed
|
||||
pub heal_tasks_failed: u64,
|
||||
/// Total healthy objects found
|
||||
pub healthy_objects: u64,
|
||||
/// Total corrupted objects found
|
||||
pub corrupted_objects: u64,
|
||||
/// Last scan activity time
|
||||
pub last_activity: Option<SystemTime>,
|
||||
/// Current scan cycle
|
||||
pub current_cycle: u64,
|
||||
/// Total scan cycles completed
|
||||
pub total_cycles: u64,
|
||||
/// Current scan duration
|
||||
pub current_scan_duration: Option<Duration>,
|
||||
/// Average scan duration
|
||||
pub avg_scan_duration: Duration,
|
||||
/// Objects scanned per second
|
||||
pub objects_per_second: f64,
|
||||
/// Buckets scanned per second
|
||||
pub buckets_per_second: f64,
|
||||
/// Storage metrics by bucket
|
||||
pub bucket_metrics: HashMap<String, BucketMetrics>,
|
||||
/// Disk metrics
|
||||
pub disk_metrics: HashMap<String, DiskMetrics>,
|
||||
}
|
||||
|
||||
/// Bucket-specific metrics
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct BucketMetrics {
|
||||
/// Bucket name
|
||||
pub bucket: String,
|
||||
/// Total objects in bucket
|
||||
pub total_objects: u64,
|
||||
/// Total size of objects in bucket (bytes)
|
||||
pub total_size: u64,
|
||||
/// Objects with health issues
|
||||
pub objects_with_issues: u64,
|
||||
/// Last scan time
|
||||
pub last_scan_time: Option<SystemTime>,
|
||||
/// Scan duration
|
||||
pub scan_duration: Option<Duration>,
|
||||
/// Heal tasks queued for this bucket
|
||||
pub heal_tasks_queued: u64,
|
||||
/// Heal tasks completed for this bucket
|
||||
pub heal_tasks_completed: u64,
|
||||
/// Heal tasks failed for this bucket
|
||||
pub heal_tasks_failed: u64,
|
||||
}
|
||||
|
||||
/// Disk-specific metrics
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct DiskMetrics {
|
||||
/// Disk path
|
||||
pub disk_path: String,
|
||||
/// Total disk space (bytes)
|
||||
pub total_space: u64,
|
||||
/// Used disk space (bytes)
|
||||
pub used_space: u64,
|
||||
/// Free disk space (bytes)
|
||||
pub free_space: u64,
|
||||
/// Objects scanned on this disk
|
||||
pub objects_scanned: u64,
|
||||
/// Objects with issues on this disk
|
||||
pub objects_with_issues: u64,
|
||||
/// Last scan time
|
||||
pub last_scan_time: Option<SystemTime>,
|
||||
/// Whether disk is online
|
||||
pub is_online: bool,
|
||||
/// Whether disk is being scanned
|
||||
pub is_scanning: bool,
|
||||
}
|
||||
|
||||
/// Thread-safe metrics collector
|
||||
pub struct MetricsCollector {
|
||||
/// Atomic counters for real-time metrics
|
||||
objects_scanned: AtomicU64,
|
||||
versions_scanned: AtomicU64,
|
||||
directories_scanned: AtomicU64,
|
||||
bucket_scans_started: AtomicU64,
|
||||
bucket_scans_finished: AtomicU64,
|
||||
objects_with_issues: AtomicU64,
|
||||
heal_tasks_queued: AtomicU64,
|
||||
heal_tasks_completed: AtomicU64,
|
||||
heal_tasks_failed: AtomicU64,
|
||||
current_cycle: AtomicU64,
|
||||
total_cycles: AtomicU64,
|
||||
healthy_objects: AtomicU64,
|
||||
corrupted_objects: AtomicU64,
|
||||
}
|
||||
|
||||
impl MetricsCollector {
|
||||
/// Create a new metrics collector
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
objects_scanned: AtomicU64::new(0),
|
||||
versions_scanned: AtomicU64::new(0),
|
||||
directories_scanned: AtomicU64::new(0),
|
||||
bucket_scans_started: AtomicU64::new(0),
|
||||
bucket_scans_finished: AtomicU64::new(0),
|
||||
objects_with_issues: AtomicU64::new(0),
|
||||
heal_tasks_queued: AtomicU64::new(0),
|
||||
heal_tasks_completed: AtomicU64::new(0),
|
||||
heal_tasks_failed: AtomicU64::new(0),
|
||||
current_cycle: AtomicU64::new(0),
|
||||
total_cycles: AtomicU64::new(0),
|
||||
healthy_objects: AtomicU64::new(0),
|
||||
corrupted_objects: AtomicU64::new(0),
|
||||
}
|
||||
}
|
||||
|
||||
/// Increment objects scanned count
|
||||
pub fn increment_objects_scanned(&self, count: u64) {
|
||||
self.objects_scanned.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment versions scanned count
|
||||
pub fn increment_versions_scanned(&self, count: u64) {
|
||||
self.versions_scanned.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment directories scanned count
|
||||
pub fn increment_directories_scanned(&self, count: u64) {
|
||||
self.directories_scanned.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment bucket scans started count
|
||||
pub fn increment_bucket_scans_started(&self, count: u64) {
|
||||
self.bucket_scans_started.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment bucket scans finished count
|
||||
pub fn increment_bucket_scans_finished(&self, count: u64) {
|
||||
self.bucket_scans_finished.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment objects with issues count
|
||||
pub fn increment_objects_with_issues(&self, count: u64) {
|
||||
self.objects_with_issues.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment heal tasks queued count
|
||||
pub fn increment_heal_tasks_queued(&self, count: u64) {
|
||||
self.heal_tasks_queued.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment heal tasks completed count
|
||||
pub fn increment_heal_tasks_completed(&self, count: u64) {
|
||||
self.heal_tasks_completed.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment heal tasks failed count
|
||||
pub fn increment_heal_tasks_failed(&self, count: u64) {
|
||||
self.heal_tasks_failed.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Set current cycle
|
||||
pub fn set_current_cycle(&self, cycle: u64) {
|
||||
self.current_cycle.store(cycle, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment total cycles
|
||||
pub fn increment_total_cycles(&self) {
|
||||
self.total_cycles.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment healthy objects count
|
||||
pub fn increment_healthy_objects(&self) {
|
||||
self.healthy_objects.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment corrupted objects count
|
||||
pub fn increment_corrupted_objects(&self) {
|
||||
self.corrupted_objects.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Get current metrics snapshot
|
||||
pub fn get_metrics(&self) -> ScannerMetrics {
|
||||
ScannerMetrics {
|
||||
objects_scanned: self.objects_scanned.load(Ordering::Relaxed),
|
||||
versions_scanned: self.versions_scanned.load(Ordering::Relaxed),
|
||||
directories_scanned: self.directories_scanned.load(Ordering::Relaxed),
|
||||
bucket_scans_started: self.bucket_scans_started.load(Ordering::Relaxed),
|
||||
bucket_scans_finished: self.bucket_scans_finished.load(Ordering::Relaxed),
|
||||
objects_with_issues: self.objects_with_issues.load(Ordering::Relaxed),
|
||||
heal_tasks_queued: self.heal_tasks_queued.load(Ordering::Relaxed),
|
||||
heal_tasks_completed: self.heal_tasks_completed.load(Ordering::Relaxed),
|
||||
heal_tasks_failed: self.heal_tasks_failed.load(Ordering::Relaxed),
|
||||
healthy_objects: self.healthy_objects.load(Ordering::Relaxed),
|
||||
corrupted_objects: self.corrupted_objects.load(Ordering::Relaxed),
|
||||
last_activity: Some(SystemTime::now()),
|
||||
current_cycle: self.current_cycle.load(Ordering::Relaxed),
|
||||
total_cycles: self.total_cycles.load(Ordering::Relaxed),
|
||||
current_scan_duration: None, // Will be set by scanner
|
||||
avg_scan_duration: Duration::ZERO, // Will be calculated
|
||||
objects_per_second: 0.0, // Will be calculated
|
||||
buckets_per_second: 0.0, // Will be calculated
|
||||
bucket_metrics: HashMap::new(), // Will be populated by scanner
|
||||
disk_metrics: HashMap::new(), // Will be populated by scanner
|
||||
}
|
||||
}
|
||||
|
||||
/// Reset all metrics
|
||||
pub fn reset(&self) {
|
||||
self.objects_scanned.store(0, Ordering::Relaxed);
|
||||
self.versions_scanned.store(0, Ordering::Relaxed);
|
||||
self.directories_scanned.store(0, Ordering::Relaxed);
|
||||
self.bucket_scans_started.store(0, Ordering::Relaxed);
|
||||
self.bucket_scans_finished.store(0, Ordering::Relaxed);
|
||||
self.objects_with_issues.store(0, Ordering::Relaxed);
|
||||
self.heal_tasks_queued.store(0, Ordering::Relaxed);
|
||||
self.heal_tasks_completed.store(0, Ordering::Relaxed);
|
||||
self.heal_tasks_failed.store(0, Ordering::Relaxed);
|
||||
self.current_cycle.store(0, Ordering::Relaxed);
|
||||
self.total_cycles.store(0, Ordering::Relaxed);
|
||||
self.healthy_objects.store(0, Ordering::Relaxed);
|
||||
self.corrupted_objects.store(0, Ordering::Relaxed);
|
||||
|
||||
info!("Scanner metrics reset");
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for MetricsCollector {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_metrics_collector_creation() {
|
||||
let collector = MetricsCollector::new();
|
||||
let metrics = collector.get_metrics();
|
||||
assert_eq!(metrics.objects_scanned, 0);
|
||||
assert_eq!(metrics.versions_scanned, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_metrics_increment() {
|
||||
let collector = MetricsCollector::new();
|
||||
|
||||
collector.increment_objects_scanned(10);
|
||||
collector.increment_versions_scanned(5);
|
||||
collector.increment_objects_with_issues(2);
|
||||
|
||||
let metrics = collector.get_metrics();
|
||||
assert_eq!(metrics.objects_scanned, 10);
|
||||
assert_eq!(metrics.versions_scanned, 5);
|
||||
assert_eq!(metrics.objects_with_issues, 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_metrics_reset() {
|
||||
let collector = MetricsCollector::new();
|
||||
|
||||
collector.increment_objects_scanned(10);
|
||||
collector.reset();
|
||||
|
||||
let metrics = collector.get_metrics();
|
||||
assert_eq!(metrics.objects_scanned, 0);
|
||||
}
|
||||
}
|
||||
555
crates/ahm/src/scanner/io_monitor.rs
Normal file
@@ -0,0 +1,555 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::Result;
|
||||
use crate::scanner::LoadLevel;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{
|
||||
collections::VecDeque,
|
||||
sync::{
|
||||
Arc,
|
||||
atomic::{AtomicU64, Ordering},
|
||||
},
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
use tokio::sync::RwLock;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::{debug, error, info, warn};
|
||||
|
||||
/// IO monitor config
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct IOMonitorConfig {
|
||||
/// monitor interval
|
||||
pub monitor_interval: Duration,
|
||||
/// history data retention time
|
||||
pub history_retention: Duration,
|
||||
/// load evaluation window size
|
||||
pub load_window_size: usize,
|
||||
/// whether to enable actual system monitoring
|
||||
pub enable_system_monitoring: bool,
|
||||
/// disk path list (for monitoring specific disks)
|
||||
pub disk_paths: Vec<String>,
|
||||
}
|
||||
|
||||
impl Default for IOMonitorConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
monitor_interval: Duration::from_secs(1), // 1 second monitor interval
|
||||
history_retention: Duration::from_secs(300), // keep 5 minutes history
|
||||
load_window_size: 30, // 30 sample points sliding window
|
||||
enable_system_monitoring: false, // default use simulated data
|
||||
disk_paths: Vec::new(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// IO monitor metrics
|
||||
#[derive(Debug, Clone, Serialize, Deserialize)]
|
||||
pub struct IOMetrics {
|
||||
/// timestamp
|
||||
pub timestamp: SystemTime,
|
||||
/// disk IOPS (read + write)
|
||||
pub iops: u64,
|
||||
/// read IOPS
|
||||
pub read_iops: u64,
|
||||
/// write IOPS
|
||||
pub write_iops: u64,
|
||||
/// disk queue depth
|
||||
pub queue_depth: u64,
|
||||
/// average latency (milliseconds)
|
||||
pub avg_latency: u64,
|
||||
/// read latency (milliseconds)
|
||||
pub read_latency: u64,
|
||||
/// write latency (milliseconds)
|
||||
pub write_latency: u64,
|
||||
/// CPU usage (0-100)
|
||||
pub cpu_usage: u8,
|
||||
/// memory usage (0-100)
|
||||
pub memory_usage: u8,
|
||||
/// disk usage (0-100)
|
||||
pub disk_utilization: u8,
|
||||
/// network IO (Mbps)
|
||||
pub network_io: u64,
|
||||
}
|
||||
|
||||
impl Default for IOMetrics {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
timestamp: SystemTime::now(),
|
||||
iops: 0,
|
||||
read_iops: 0,
|
||||
write_iops: 0,
|
||||
queue_depth: 0,
|
||||
avg_latency: 0,
|
||||
read_latency: 0,
|
||||
write_latency: 0,
|
||||
cpu_usage: 0,
|
||||
memory_usage: 0,
|
||||
disk_utilization: 0,
|
||||
network_io: 0,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// load level stats
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct LoadLevelStats {
|
||||
/// low load duration (seconds)
|
||||
pub low_load_duration: u64,
|
||||
/// medium load duration (seconds)
|
||||
pub medium_load_duration: u64,
|
||||
/// high load duration (seconds)
|
||||
pub high_load_duration: u64,
|
||||
/// critical load duration (seconds)
|
||||
pub critical_load_duration: u64,
|
||||
/// load transitions
|
||||
pub load_transitions: u64,
|
||||
}
|
||||
|
||||
/// advanced IO monitor
|
||||
pub struct AdvancedIOMonitor {
|
||||
/// config
|
||||
config: Arc<RwLock<IOMonitorConfig>>,
|
||||
/// current metrics
|
||||
current_metrics: Arc<RwLock<IOMetrics>>,
|
||||
/// history metrics (sliding window)
|
||||
history_metrics: Arc<RwLock<VecDeque<IOMetrics>>>,
|
||||
/// current load level
|
||||
current_load_level: Arc<RwLock<LoadLevel>>,
|
||||
/// load level history
|
||||
load_level_history: Arc<RwLock<VecDeque<(SystemTime, LoadLevel)>>>,
|
||||
/// load level stats
|
||||
load_stats: Arc<RwLock<LoadLevelStats>>,
|
||||
/// business IO metrics (updated by external)
|
||||
business_metrics: Arc<BusinessIOMetrics>,
|
||||
/// cancel token
|
||||
cancel_token: CancellationToken,
|
||||
}
|
||||
|
||||
/// business IO metrics
|
||||
pub struct BusinessIOMetrics {
|
||||
/// business request latency (milliseconds)
|
||||
pub request_latency: AtomicU64,
|
||||
/// business request QPS
|
||||
pub request_qps: AtomicU64,
|
||||
/// business error rate (0-10000, 0.00%-100.00%)
|
||||
pub error_rate: AtomicU64,
|
||||
/// active connections
|
||||
pub active_connections: AtomicU64,
|
||||
/// last update time
|
||||
pub last_update: Arc<RwLock<SystemTime>>,
|
||||
}
|
||||
|
||||
impl Default for BusinessIOMetrics {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
request_latency: AtomicU64::new(0),
|
||||
request_qps: AtomicU64::new(0),
|
||||
error_rate: AtomicU64::new(0),
|
||||
active_connections: AtomicU64::new(0),
|
||||
last_update: Arc::new(RwLock::new(SystemTime::UNIX_EPOCH)),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl AdvancedIOMonitor {
|
||||
/// create new advanced IO monitor
|
||||
pub fn new(config: IOMonitorConfig) -> Self {
|
||||
Self {
|
||||
config: Arc::new(RwLock::new(config)),
|
||||
current_metrics: Arc::new(RwLock::new(IOMetrics::default())),
|
||||
history_metrics: Arc::new(RwLock::new(VecDeque::new())),
|
||||
current_load_level: Arc::new(RwLock::new(LoadLevel::Low)),
|
||||
load_level_history: Arc::new(RwLock::new(VecDeque::new())),
|
||||
load_stats: Arc::new(RwLock::new(LoadLevelStats::default())),
|
||||
business_metrics: Arc::new(BusinessIOMetrics::default()),
|
||||
cancel_token: CancellationToken::new(),
|
||||
}
|
||||
}
|
||||
|
||||
/// start monitoring
|
||||
pub async fn start(&self) -> Result<()> {
|
||||
info!("start advanced IO monitor");
|
||||
|
||||
let monitor = self.clone_for_background();
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = monitor.monitoring_loop().await {
|
||||
error!("IO monitoring loop failed: {}", e);
|
||||
}
|
||||
});
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// stop monitoring
|
||||
pub async fn stop(&self) {
|
||||
info!("stop IO monitor");
|
||||
self.cancel_token.cancel();
|
||||
}
|
||||
|
||||
/// monitoring loop
|
||||
async fn monitoring_loop(&self) -> Result<()> {
|
||||
let mut interval = {
|
||||
let config = self.config.read().await;
|
||||
tokio::time::interval(config.monitor_interval)
|
||||
};
|
||||
|
||||
let mut last_load_level = LoadLevel::Low;
|
||||
let mut load_level_start_time = SystemTime::now();
|
||||
|
||||
loop {
|
||||
tokio::select! {
|
||||
_ = self.cancel_token.cancelled() => {
|
||||
info!("IO monitoring loop cancelled");
|
||||
break;
|
||||
}
|
||||
_ = interval.tick() => {
|
||||
// collect system metrics
|
||||
let metrics = self.collect_system_metrics().await;
|
||||
|
||||
// update current metrics
|
||||
*self.current_metrics.write().await = metrics.clone();
|
||||
|
||||
// update history metrics
|
||||
self.update_metrics_history(metrics.clone()).await;
|
||||
|
||||
// calculate load level
|
||||
let new_load_level = self.calculate_load_level(&metrics).await;
|
||||
|
||||
// check if load level changed
|
||||
if new_load_level != last_load_level {
|
||||
self.handle_load_level_change(last_load_level, new_load_level, load_level_start_time).await;
|
||||
last_load_level = new_load_level;
|
||||
load_level_start_time = SystemTime::now();
|
||||
}
|
||||
|
||||
// update current load level
|
||||
*self.current_load_level.write().await = new_load_level;
|
||||
|
||||
debug!("IO monitor updated: IOPS={}, queue depth={}, latency={}ms, load level={:?}",
|
||||
metrics.iops, metrics.queue_depth, metrics.avg_latency, new_load_level);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// collect system metrics
|
||||
async fn collect_system_metrics(&self) -> IOMetrics {
|
||||
let config = self.config.read().await;
|
||||
|
||||
if config.enable_system_monitoring {
|
||||
// actual system monitoring implementation
|
||||
self.collect_real_system_metrics().await
|
||||
} else {
|
||||
// simulated data
|
||||
self.generate_simulated_metrics().await
|
||||
}
|
||||
}
|
||||
|
||||
/// collect real system metrics (need to be implemented according to specific system)
|
||||
async fn collect_real_system_metrics(&self) -> IOMetrics {
|
||||
// TODO: implement actual system metrics collection
|
||||
// can use procfs, sysfs or other system API
|
||||
|
||||
let metrics = IOMetrics {
|
||||
timestamp: SystemTime::now(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// example: read /proc/diskstats
|
||||
if let Ok(diskstats) = tokio::fs::read_to_string("/proc/diskstats").await {
|
||||
// parse disk stats info
|
||||
// here need to implement specific parsing logic
|
||||
debug!("read disk stats info: {} bytes", diskstats.len());
|
||||
}
|
||||
|
||||
// example: read /proc/stat to get CPU info
|
||||
if let Ok(stat) = tokio::fs::read_to_string("/proc/stat").await {
|
||||
// parse CPU stats info
|
||||
debug!("read CPU stats info: {} bytes", stat.len());
|
||||
}
|
||||
|
||||
// example: read /proc/meminfo to get memory info
|
||||
if let Ok(meminfo) = tokio::fs::read_to_string("/proc/meminfo").await {
|
||||
// parse memory stats info
|
||||
debug!("read memory stats info: {} bytes", meminfo.len());
|
||||
}
|
||||
|
||||
metrics
|
||||
}
|
||||
|
||||
/// generate simulated metrics (for testing and development)
|
||||
async fn generate_simulated_metrics(&self) -> IOMetrics {
|
||||
use rand::Rng;
|
||||
let mut rng = rand::rng();
|
||||
|
||||
// get business metrics impact
|
||||
let business_latency = self.business_metrics.request_latency.load(Ordering::Relaxed);
|
||||
let business_qps = self.business_metrics.request_qps.load(Ordering::Relaxed);
|
||||
|
||||
// generate simulated system metrics based on business load
|
||||
let base_iops = 100 + (business_qps / 10);
|
||||
let base_latency = 5 + (business_latency / 10);
|
||||
|
||||
IOMetrics {
|
||||
timestamp: SystemTime::now(),
|
||||
iops: base_iops + rng.random_range(0..50),
|
||||
read_iops: (base_iops * 6 / 10) + rng.random_range(0..20),
|
||||
write_iops: (base_iops * 4 / 10) + rng.random_range(0..20),
|
||||
queue_depth: rng.random_range(1..20),
|
||||
avg_latency: base_latency + rng.random_range(0..10),
|
||||
read_latency: base_latency + rng.random_range(0..5),
|
||||
write_latency: base_latency + rng.random_range(0..15),
|
||||
cpu_usage: rng.random_range(10..70),
|
||||
memory_usage: rng.random_range(30..80),
|
||||
disk_utilization: rng.random_range(20..90),
|
||||
network_io: rng.random_range(10..1000),
|
||||
}
|
||||
}
|
||||
|
||||
/// update metrics history
|
||||
async fn update_metrics_history(&self, metrics: IOMetrics) {
|
||||
let mut history = self.history_metrics.write().await;
|
||||
let config = self.config.read().await;
|
||||
|
||||
// add new metrics
|
||||
history.push_back(metrics);
|
||||
|
||||
// clean expired data
|
||||
let retention_cutoff = SystemTime::now() - config.history_retention;
|
||||
while let Some(front) = history.front() {
|
||||
if front.timestamp < retention_cutoff {
|
||||
history.pop_front();
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// limit window size
|
||||
while history.len() > config.load_window_size {
|
||||
history.pop_front();
|
||||
}
|
||||
}
|
||||
|
||||
/// calculate load level
|
||||
async fn calculate_load_level(&self, metrics: &IOMetrics) -> LoadLevel {
|
||||
// multi-dimensional load evaluation algorithm
|
||||
let mut load_score = 0u32;
|
||||
|
||||
// IOPS load evaluation (weight: 25%)
|
||||
let iops_score = match metrics.iops {
|
||||
0..=200 => 0,
|
||||
201..=500 => 15,
|
||||
501..=1000 => 25,
|
||||
_ => 35,
|
||||
};
|
||||
load_score += iops_score;
|
||||
|
||||
// latency load evaluation (weight: 30%)
|
||||
let latency_score = match metrics.avg_latency {
|
||||
0..=10 => 0,
|
||||
11..=50 => 20,
|
||||
51..=100 => 30,
|
||||
_ => 40,
|
||||
};
|
||||
load_score += latency_score;
|
||||
|
||||
// queue depth evaluation (weight: 20%)
|
||||
let queue_score = match metrics.queue_depth {
|
||||
0..=5 => 0,
|
||||
6..=15 => 10,
|
||||
16..=30 => 20,
|
||||
_ => 25,
|
||||
};
|
||||
load_score += queue_score;
|
||||
|
||||
// CPU usage evaluation (weight: 15%)
|
||||
let cpu_score = match metrics.cpu_usage {
|
||||
0..=30 => 0,
|
||||
31..=60 => 8,
|
||||
61..=80 => 12,
|
||||
_ => 15,
|
||||
};
|
||||
load_score += cpu_score;
|
||||
|
||||
// disk usage evaluation (weight: 10%)
|
||||
let disk_score = match metrics.disk_utilization {
|
||||
0..=50 => 0,
|
||||
51..=75 => 5,
|
||||
76..=90 => 8,
|
||||
_ => 10,
|
||||
};
|
||||
load_score += disk_score;
|
||||
|
||||
// business metrics impact
|
||||
let business_latency = self.business_metrics.request_latency.load(Ordering::Relaxed);
|
||||
let business_error_rate = self.business_metrics.error_rate.load(Ordering::Relaxed);
|
||||
|
||||
if business_latency > 100 {
|
||||
load_score += 20; // business latency too high
|
||||
}
|
||||
if business_error_rate > 100 {
|
||||
// > 1%
|
||||
load_score += 15; // business error rate too high
|
||||
}
|
||||
|
||||
// history trend analysis
|
||||
let trend_score = self.calculate_trend_score().await;
|
||||
load_score += trend_score;
|
||||
|
||||
// determine load level based on total score
|
||||
match load_score {
|
||||
0..=30 => LoadLevel::Low,
|
||||
31..=60 => LoadLevel::Medium,
|
||||
61..=90 => LoadLevel::High,
|
||||
_ => LoadLevel::Critical,
|
||||
}
|
||||
}
|
||||
|
||||
/// calculate trend score
|
||||
async fn calculate_trend_score(&self) -> u32 {
|
||||
let history = self.history_metrics.read().await;
|
||||
|
||||
if history.len() < 5 {
|
||||
return 0; // data insufficient, cannot analyze trend
|
||||
}
|
||||
|
||||
// analyze trend of last 5 samples
|
||||
let recent: Vec<_> = history.iter().rev().take(5).collect();
|
||||
|
||||
// check IOPS rising trend
|
||||
let mut iops_trend = 0;
|
||||
for i in 1..recent.len() {
|
||||
if recent[i - 1].iops > recent[i].iops {
|
||||
iops_trend += 1;
|
||||
}
|
||||
}
|
||||
|
||||
// check latency rising trend
|
||||
let mut latency_trend = 0;
|
||||
for i in 1..recent.len() {
|
||||
if recent[i - 1].avg_latency > recent[i].avg_latency {
|
||||
latency_trend += 1;
|
||||
}
|
||||
}
|
||||
|
||||
// if IOPS and latency are both rising, increase load score
|
||||
if iops_trend >= 3 && latency_trend >= 3 {
|
||||
15 // obvious rising trend
|
||||
} else if iops_trend >= 2 || latency_trend >= 2 {
|
||||
5 // slight rising trend
|
||||
} else {
|
||||
0 // no obvious trend
|
||||
}
|
||||
}
|
||||
|
||||
/// handle load level change
|
||||
async fn handle_load_level_change(&self, old_level: LoadLevel, new_level: LoadLevel, start_time: SystemTime) {
|
||||
let duration = SystemTime::now().duration_since(start_time).unwrap_or(Duration::ZERO);
|
||||
|
||||
// update stats
|
||||
{
|
||||
let mut stats = self.load_stats.write().await;
|
||||
match old_level {
|
||||
LoadLevel::Low => stats.low_load_duration += duration.as_secs(),
|
||||
LoadLevel::Medium => stats.medium_load_duration += duration.as_secs(),
|
||||
LoadLevel::High => stats.high_load_duration += duration.as_secs(),
|
||||
LoadLevel::Critical => stats.critical_load_duration += duration.as_secs(),
|
||||
}
|
||||
stats.load_transitions += 1;
|
||||
}
|
||||
|
||||
// update history
|
||||
{
|
||||
let mut history = self.load_level_history.write().await;
|
||||
history.push_back((SystemTime::now(), new_level));
|
||||
|
||||
// keep history record in reasonable range
|
||||
while history.len() > 100 {
|
||||
history.pop_front();
|
||||
}
|
||||
}
|
||||
|
||||
info!("load level changed: {:?} -> {:?}, duration: {:?}", old_level, new_level, duration);
|
||||
|
||||
// if enter critical load state, record warning
|
||||
if new_level == LoadLevel::Critical {
|
||||
warn!("system entered critical load state, Scanner will pause running");
|
||||
}
|
||||
}
|
||||
|
||||
/// get current load level
|
||||
pub async fn get_business_load_level(&self) -> LoadLevel {
|
||||
*self.current_load_level.read().await
|
||||
}
|
||||
|
||||
/// get current metrics
|
||||
pub async fn get_current_metrics(&self) -> IOMetrics {
|
||||
self.current_metrics.read().await.clone()
|
||||
}
|
||||
|
||||
/// get history metrics
|
||||
pub async fn get_history_metrics(&self) -> Vec<IOMetrics> {
|
||||
self.history_metrics.read().await.iter().cloned().collect()
|
||||
}
|
||||
|
||||
/// get load stats
|
||||
pub async fn get_load_stats(&self) -> LoadLevelStats {
|
||||
self.load_stats.read().await.clone()
|
||||
}
|
||||
|
||||
/// update business IO metrics
|
||||
pub async fn update_business_metrics(&self, latency: u64, qps: u64, error_rate: u64, connections: u64) {
|
||||
self.business_metrics.request_latency.store(latency, Ordering::Relaxed);
|
||||
self.business_metrics.request_qps.store(qps, Ordering::Relaxed);
|
||||
self.business_metrics.error_rate.store(error_rate, Ordering::Relaxed);
|
||||
self.business_metrics.active_connections.store(connections, Ordering::Relaxed);
|
||||
|
||||
*self.business_metrics.last_update.write().await = SystemTime::now();
|
||||
|
||||
debug!(
|
||||
"update business metrics: latency={}ms, QPS={}, error rate={}‰, connections={}",
|
||||
latency, qps, error_rate, connections
|
||||
);
|
||||
}
|
||||
|
||||
/// clone for background task
|
||||
fn clone_for_background(&self) -> Self {
|
||||
Self {
|
||||
config: self.config.clone(),
|
||||
current_metrics: self.current_metrics.clone(),
|
||||
history_metrics: self.history_metrics.clone(),
|
||||
current_load_level: self.current_load_level.clone(),
|
||||
load_level_history: self.load_level_history.clone(),
|
||||
load_stats: self.load_stats.clone(),
|
||||
business_metrics: self.business_metrics.clone(),
|
||||
cancel_token: self.cancel_token.clone(),
|
||||
}
|
||||
}
|
||||
|
||||
/// reset stats
|
||||
pub async fn reset_stats(&self) {
|
||||
*self.load_stats.write().await = LoadLevelStats::default();
|
||||
self.load_level_history.write().await.clear();
|
||||
self.history_metrics.write().await.clear();
|
||||
info!("IO monitor stats reset");
|
||||
}
|
||||
|
||||
/// get load level history
|
||||
pub async fn get_load_level_history(&self) -> Vec<(SystemTime, LoadLevel)> {
|
||||
self.load_level_history.read().await.iter().cloned().collect()
|
||||
}
|
||||
}
|
||||
499
crates/ahm/src/scanner/io_throttler.rs
Normal file
@@ -0,0 +1,499 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::scanner::LoadLevel;
|
||||
use std::{
|
||||
sync::{
|
||||
Arc,
|
||||
atomic::{AtomicU8, AtomicU64, Ordering},
|
||||
},
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
use tokio::sync::RwLock;
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
/// IO throttler config
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct IOThrottlerConfig {
|
||||
/// max IOPS limit
|
||||
pub max_iops: u64,
|
||||
/// business priority baseline (percentage)
|
||||
pub base_business_priority: u8,
|
||||
/// scanner minimum delay (milliseconds)
|
||||
pub min_scan_delay: u64,
|
||||
/// scanner maximum delay (milliseconds)
|
||||
pub max_scan_delay: u64,
|
||||
/// whether enable dynamic adjustment
|
||||
pub enable_dynamic_adjustment: bool,
|
||||
/// adjustment response time (seconds)
|
||||
pub adjustment_response_time: u64,
|
||||
}
|
||||
|
||||
impl Default for IOThrottlerConfig {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
max_iops: 1000, // default max 1000 IOPS
|
||||
base_business_priority: 95, // business priority 95%
|
||||
min_scan_delay: 5000, // minimum 5s delay
|
||||
max_scan_delay: 60000, // maximum 60s delay
|
||||
enable_dynamic_adjustment: true,
|
||||
adjustment_response_time: 5, // 5 seconds response time
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// resource allocation strategy
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum ResourceAllocationStrategy {
|
||||
/// business priority strategy
|
||||
BusinessFirst,
|
||||
/// balanced strategy
|
||||
Balanced,
|
||||
/// maintenance priority strategy (only used in special cases)
|
||||
MaintenanceFirst,
|
||||
}
|
||||
|
||||
/// throttle decision
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ThrottleDecision {
|
||||
/// whether should pause scanning
|
||||
pub should_pause: bool,
|
||||
/// suggested scanning delay
|
||||
pub suggested_delay: Duration,
|
||||
/// resource allocation suggestion
|
||||
pub resource_allocation: ResourceAllocation,
|
||||
/// decision reason
|
||||
pub reason: String,
|
||||
}
|
||||
|
||||
/// resource allocation
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ResourceAllocation {
|
||||
/// business IO allocation percentage (0-100)
|
||||
pub business_percentage: u8,
|
||||
/// scanner IO allocation percentage (0-100)
|
||||
pub scanner_percentage: u8,
|
||||
/// allocation strategy
|
||||
pub strategy: ResourceAllocationStrategy,
|
||||
}
|
||||
|
||||
/// enhanced IO throttler
|
||||
///
|
||||
/// dynamically adjust the resource usage of the scanner based on real-time system load and business demand,
|
||||
/// ensure business IO gets priority protection.
|
||||
pub struct AdvancedIOThrottler {
|
||||
/// config
|
||||
config: Arc<RwLock<IOThrottlerConfig>>,
|
||||
/// current IOPS usage (reserved field)
|
||||
#[allow(dead_code)]
|
||||
current_iops: Arc<AtomicU64>,
|
||||
/// business priority weight (0-100)
|
||||
business_priority: Arc<AtomicU8>,
|
||||
/// scanning operation delay (milliseconds)
|
||||
scan_delay: Arc<AtomicU64>,
|
||||
/// resource allocation strategy
|
||||
allocation_strategy: Arc<RwLock<ResourceAllocationStrategy>>,
|
||||
/// throttle history record
|
||||
throttle_history: Arc<RwLock<Vec<ThrottleRecord>>>,
|
||||
/// last adjustment time (reserved field)
|
||||
#[allow(dead_code)]
|
||||
last_adjustment: Arc<RwLock<SystemTime>>,
|
||||
}
|
||||
|
||||
/// throttle record
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ThrottleRecord {
|
||||
/// timestamp
|
||||
pub timestamp: SystemTime,
|
||||
/// load level
|
||||
pub load_level: LoadLevel,
|
||||
/// decision
|
||||
pub decision: ThrottleDecision,
|
||||
/// system metrics snapshot
|
||||
pub metrics_snapshot: MetricsSnapshot,
|
||||
}
|
||||
|
||||
/// metrics snapshot
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct MetricsSnapshot {
|
||||
/// IOPS
|
||||
pub iops: u64,
|
||||
/// latency
|
||||
pub latency: u64,
|
||||
/// CPU usage
|
||||
pub cpu_usage: u8,
|
||||
/// memory usage
|
||||
pub memory_usage: u8,
|
||||
}
|
||||
|
||||
impl AdvancedIOThrottler {
|
||||
/// create new advanced IO throttler
|
||||
pub fn new(config: IOThrottlerConfig) -> Self {
|
||||
Self {
|
||||
config: Arc::new(RwLock::new(config)),
|
||||
current_iops: Arc::new(AtomicU64::new(0)),
|
||||
business_priority: Arc::new(AtomicU8::new(95)),
|
||||
scan_delay: Arc::new(AtomicU64::new(5000)),
|
||||
allocation_strategy: Arc::new(RwLock::new(ResourceAllocationStrategy::BusinessFirst)),
|
||||
throttle_history: Arc::new(RwLock::new(Vec::new())),
|
||||
last_adjustment: Arc::new(RwLock::new(SystemTime::UNIX_EPOCH)),
|
||||
}
|
||||
}
|
||||
|
||||
/// adjust scanning delay based on load level
|
||||
pub async fn adjust_for_load_level(&self, load_level: LoadLevel) -> Duration {
|
||||
let config = self.config.read().await;
|
||||
|
||||
let delay_ms = match load_level {
|
||||
LoadLevel::Low => {
|
||||
// low load: use minimum delay
|
||||
self.scan_delay.store(config.min_scan_delay, Ordering::Relaxed);
|
||||
self.business_priority
|
||||
.store(config.base_business_priority.saturating_sub(5), Ordering::Relaxed);
|
||||
config.min_scan_delay
|
||||
}
|
||||
LoadLevel::Medium => {
|
||||
// medium load: increase delay moderately
|
||||
let delay = config.min_scan_delay * 5; // 500ms
|
||||
self.scan_delay.store(delay, Ordering::Relaxed);
|
||||
self.business_priority.store(config.base_business_priority, Ordering::Relaxed);
|
||||
delay
|
||||
}
|
||||
LoadLevel::High => {
|
||||
// high load: increase delay significantly
|
||||
let delay = config.min_scan_delay * 10; // 50s
|
||||
self.scan_delay.store(delay, Ordering::Relaxed);
|
||||
self.business_priority
|
||||
.store(config.base_business_priority.saturating_add(3), Ordering::Relaxed);
|
||||
delay
|
||||
}
|
||||
LoadLevel::Critical => {
|
||||
// critical load: maximum delay or pause
|
||||
let delay = config.max_scan_delay; // 60s
|
||||
self.scan_delay.store(delay, Ordering::Relaxed);
|
||||
self.business_priority.store(99, Ordering::Relaxed);
|
||||
delay
|
||||
}
|
||||
};
|
||||
|
||||
let duration = Duration::from_millis(delay_ms);
|
||||
|
||||
debug!("Adjust scanning delay based on load level {:?}: {:?}", load_level, duration);
|
||||
|
||||
duration
|
||||
}
|
||||
|
||||
/// create throttle decision
|
||||
pub async fn make_throttle_decision(&self, load_level: LoadLevel, metrics: Option<MetricsSnapshot>) -> ThrottleDecision {
|
||||
let _config = self.config.read().await;
|
||||
|
||||
let should_pause = matches!(load_level, LoadLevel::Critical);
|
||||
|
||||
let suggested_delay = self.adjust_for_load_level(load_level).await;
|
||||
|
||||
let resource_allocation = self.calculate_resource_allocation(load_level).await;
|
||||
|
||||
let reason = match load_level {
|
||||
LoadLevel::Low => "system load is low, scanner can run normally".to_string(),
|
||||
LoadLevel::Medium => "system load is moderate, scanner is running at reduced speed".to_string(),
|
||||
LoadLevel::High => "system load is high, scanner is running at significantly reduced speed".to_string(),
|
||||
LoadLevel::Critical => "system load is too high, scanner is paused".to_string(),
|
||||
};
|
||||
|
||||
let decision = ThrottleDecision {
|
||||
should_pause,
|
||||
suggested_delay,
|
||||
resource_allocation,
|
||||
reason,
|
||||
};
|
||||
|
||||
// record decision history
|
||||
if let Some(snapshot) = metrics {
|
||||
self.record_throttle_decision(load_level, decision.clone(), snapshot).await;
|
||||
}
|
||||
|
||||
decision
|
||||
}
|
||||
|
||||
/// calculate resource allocation
|
||||
async fn calculate_resource_allocation(&self, load_level: LoadLevel) -> ResourceAllocation {
|
||||
let strategy = *self.allocation_strategy.read().await;
|
||||
|
||||
let (business_pct, scanner_pct) = match (strategy, load_level) {
|
||||
(ResourceAllocationStrategy::BusinessFirst, LoadLevel::Low) => (90, 10),
|
||||
(ResourceAllocationStrategy::BusinessFirst, LoadLevel::Medium) => (95, 5),
|
||||
(ResourceAllocationStrategy::BusinessFirst, LoadLevel::High) => (98, 2),
|
||||
(ResourceAllocationStrategy::BusinessFirst, LoadLevel::Critical) => (99, 1),
|
||||
|
||||
(ResourceAllocationStrategy::Balanced, LoadLevel::Low) => (80, 20),
|
||||
(ResourceAllocationStrategy::Balanced, LoadLevel::Medium) => (85, 15),
|
||||
(ResourceAllocationStrategy::Balanced, LoadLevel::High) => (90, 10),
|
||||
(ResourceAllocationStrategy::Balanced, LoadLevel::Critical) => (95, 5),
|
||||
|
||||
(ResourceAllocationStrategy::MaintenanceFirst, _) => (70, 30), // special maintenance mode
|
||||
};
|
||||
|
||||
ResourceAllocation {
|
||||
business_percentage: business_pct,
|
||||
scanner_percentage: scanner_pct,
|
||||
strategy,
|
||||
}
|
||||
}
|
||||
|
||||
/// check whether should pause scanning
|
||||
pub async fn should_pause_scanning(&self, load_level: LoadLevel) -> bool {
|
||||
match load_level {
|
||||
LoadLevel::Critical => {
|
||||
warn!("System load reached critical level, pausing scanner");
|
||||
true
|
||||
}
|
||||
_ => false,
|
||||
}
|
||||
}
|
||||
|
||||
/// record throttle decision
|
||||
async fn record_throttle_decision(&self, load_level: LoadLevel, decision: ThrottleDecision, metrics: MetricsSnapshot) {
|
||||
let record = ThrottleRecord {
|
||||
timestamp: SystemTime::now(),
|
||||
load_level,
|
||||
decision,
|
||||
metrics_snapshot: metrics,
|
||||
};
|
||||
|
||||
let mut history = self.throttle_history.write().await;
|
||||
history.push(record);
|
||||
|
||||
// keep history record in reasonable range (last 1000 records)
|
||||
while history.len() > 1000 {
|
||||
history.remove(0);
|
||||
}
|
||||
}
|
||||
|
||||
/// set resource allocation strategy
|
||||
pub async fn set_allocation_strategy(&self, strategy: ResourceAllocationStrategy) {
|
||||
*self.allocation_strategy.write().await = strategy;
|
||||
info!("Set resource allocation strategy: {:?}", strategy);
|
||||
}
|
||||
|
||||
/// get current resource allocation
|
||||
pub async fn get_current_allocation(&self) -> ResourceAllocation {
|
||||
let current_load = LoadLevel::Low; // need to get from external
|
||||
self.calculate_resource_allocation(current_load).await
|
||||
}
|
||||
|
||||
/// get throttle history
|
||||
pub async fn get_throttle_history(&self) -> Vec<ThrottleRecord> {
|
||||
self.throttle_history.read().await.clone()
|
||||
}
|
||||
|
||||
/// get throttle stats
|
||||
pub async fn get_throttle_stats(&self) -> ThrottleStats {
|
||||
let history = self.throttle_history.read().await;
|
||||
|
||||
let total_decisions = history.len();
|
||||
let pause_decisions = history.iter().filter(|r| r.decision.should_pause).count();
|
||||
|
||||
let mut delay_sum = Duration::ZERO;
|
||||
for record in history.iter() {
|
||||
delay_sum += record.decision.suggested_delay;
|
||||
}
|
||||
|
||||
let avg_delay = if total_decisions > 0 {
|
||||
delay_sum / total_decisions as u32
|
||||
} else {
|
||||
Duration::ZERO
|
||||
};
|
||||
|
||||
// count by load level
|
||||
let low_count = history.iter().filter(|r| r.load_level == LoadLevel::Low).count();
|
||||
let medium_count = history.iter().filter(|r| r.load_level == LoadLevel::Medium).count();
|
||||
let high_count = history.iter().filter(|r| r.load_level == LoadLevel::High).count();
|
||||
let critical_count = history.iter().filter(|r| r.load_level == LoadLevel::Critical).count();
|
||||
|
||||
ThrottleStats {
|
||||
total_decisions,
|
||||
pause_decisions,
|
||||
average_delay: avg_delay,
|
||||
load_level_distribution: LoadLevelDistribution {
|
||||
low_count,
|
||||
medium_count,
|
||||
high_count,
|
||||
critical_count,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/// reset throttle history
|
||||
pub async fn reset_history(&self) {
|
||||
self.throttle_history.write().await.clear();
|
||||
info!("Reset throttle history");
|
||||
}
|
||||
|
||||
/// update config
|
||||
pub async fn update_config(&self, new_config: IOThrottlerConfig) {
|
||||
*self.config.write().await = new_config;
|
||||
info!("Updated IO throttler configuration");
|
||||
}
|
||||
|
||||
/// get current scanning delay
|
||||
pub fn get_current_scan_delay(&self) -> Duration {
|
||||
let delay_ms = self.scan_delay.load(Ordering::Relaxed);
|
||||
Duration::from_millis(delay_ms)
|
||||
}
|
||||
|
||||
/// get current business priority
|
||||
pub fn get_current_business_priority(&self) -> u8 {
|
||||
self.business_priority.load(Ordering::Relaxed)
|
||||
}
|
||||
|
||||
/// simulate business load pressure test
|
||||
pub async fn simulate_business_pressure(&self, duration: Duration) -> SimulationResult {
|
||||
info!("Start simulating business load pressure test, duration: {:?}", duration);
|
||||
|
||||
let start_time = SystemTime::now();
|
||||
let mut simulation_records = Vec::new();
|
||||
|
||||
// simulate different load level changes
|
||||
let load_levels = [
|
||||
LoadLevel::Low,
|
||||
LoadLevel::Medium,
|
||||
LoadLevel::High,
|
||||
LoadLevel::Critical,
|
||||
LoadLevel::High,
|
||||
LoadLevel::Medium,
|
||||
LoadLevel::Low,
|
||||
];
|
||||
|
||||
let step_duration = duration / load_levels.len() as u32;
|
||||
|
||||
for (i, &load_level) in load_levels.iter().enumerate() {
|
||||
let _step_start = SystemTime::now();
|
||||
|
||||
// simulate metrics for this load level
|
||||
let metrics = MetricsSnapshot {
|
||||
iops: match load_level {
|
||||
LoadLevel::Low => 200,
|
||||
LoadLevel::Medium => 500,
|
||||
LoadLevel::High => 800,
|
||||
LoadLevel::Critical => 1200,
|
||||
},
|
||||
latency: match load_level {
|
||||
LoadLevel::Low => 10,
|
||||
LoadLevel::Medium => 25,
|
||||
LoadLevel::High => 60,
|
||||
LoadLevel::Critical => 150,
|
||||
},
|
||||
cpu_usage: match load_level {
|
||||
LoadLevel::Low => 30,
|
||||
LoadLevel::Medium => 50,
|
||||
LoadLevel::High => 75,
|
||||
LoadLevel::Critical => 95,
|
||||
},
|
||||
memory_usage: match load_level {
|
||||
LoadLevel::Low => 40,
|
||||
LoadLevel::Medium => 60,
|
||||
LoadLevel::High => 80,
|
||||
LoadLevel::Critical => 90,
|
||||
},
|
||||
};
|
||||
|
||||
let decision = self.make_throttle_decision(load_level, Some(metrics.clone())).await;
|
||||
|
||||
simulation_records.push(SimulationRecord {
|
||||
step: i + 1,
|
||||
load_level,
|
||||
metrics,
|
||||
decision: decision.clone(),
|
||||
step_duration,
|
||||
});
|
||||
|
||||
info!(
|
||||
"simulate step {}: load={:?}, delay={:?}, pause={}",
|
||||
i + 1,
|
||||
load_level,
|
||||
decision.suggested_delay,
|
||||
decision.should_pause
|
||||
);
|
||||
|
||||
// wait for step duration
|
||||
tokio::time::sleep(step_duration).await;
|
||||
}
|
||||
|
||||
let total_duration = SystemTime::now().duration_since(start_time).unwrap_or(Duration::ZERO);
|
||||
|
||||
SimulationResult {
|
||||
total_duration,
|
||||
simulation_records,
|
||||
final_stats: self.get_throttle_stats().await,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// throttle stats
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ThrottleStats {
|
||||
/// total decisions
|
||||
pub total_decisions: usize,
|
||||
/// pause decisions
|
||||
pub pause_decisions: usize,
|
||||
/// average delay
|
||||
pub average_delay: Duration,
|
||||
/// load level distribution
|
||||
pub load_level_distribution: LoadLevelDistribution,
|
||||
}
|
||||
|
||||
/// load level distribution
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct LoadLevelDistribution {
|
||||
/// low load count
|
||||
pub low_count: usize,
|
||||
/// medium load count
|
||||
pub medium_count: usize,
|
||||
/// high load count
|
||||
pub high_count: usize,
|
||||
/// critical load count
|
||||
pub critical_count: usize,
|
||||
}
|
||||
|
||||
/// simulation result
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SimulationResult {
|
||||
/// total duration
|
||||
pub total_duration: Duration,
|
||||
/// simulation records
|
||||
pub simulation_records: Vec<SimulationRecord>,
|
||||
/// final stats
|
||||
pub final_stats: ThrottleStats,
|
||||
}
|
||||
|
||||
/// simulation record
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SimulationRecord {
|
||||
/// step number
|
||||
pub step: usize,
|
||||
/// load level
|
||||
pub load_level: LoadLevel,
|
||||
/// metrics snapshot
|
||||
pub metrics: MetricsSnapshot,
|
||||
/// throttle decision
|
||||
pub decision: ThrottleDecision,
|
||||
/// step duration
|
||||
pub step_duration: Duration,
|
||||
}
|
||||
|
||||
impl Default for AdvancedIOThrottler {
|
||||
fn default() -> Self {
|
||||
Self::new(IOThrottlerConfig::default())
|
||||
}
|
||||
}
|
||||