Compare commits
28 Commits
1.0.0-alph
...
1.0.0-alph
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6267872ddb | ||
|
|
618779a89d | ||
|
|
b3ec2325ed | ||
|
|
49a5643e76 | ||
|
|
657395af8a | ||
|
|
4de62ed77e | ||
|
|
505f493729 | ||
|
|
be05b704b0 | ||
|
|
b33c2fa3cf | ||
|
|
98674c60d4 | ||
|
|
e39eb86967 | ||
|
|
646070ae7a | ||
|
|
2525b66658 | ||
|
|
58c5a633e2 | ||
|
|
aefd894fc2 | ||
|
|
1e1d4646a2 | ||
|
|
b97845fffd | ||
|
|
84f5a4cb48 | ||
|
|
2832f0e089 | ||
|
|
a3b5445824 | ||
|
|
363e37c791 | ||
|
|
1b0b041530 | ||
|
|
7d5fc87002 | ||
|
|
13130e9dd4 | ||
|
|
1061ce11a3 | ||
|
|
9f9a74000d | ||
|
|
d1863018df | ||
|
|
166080aac8 |
@@ -517,7 +517,7 @@ let results = join_all(futures).await;
|
||||
|
||||
### 3. Caching Strategy
|
||||
|
||||
- Use `lazy_static` or `OnceCell` for global caching
|
||||
- Use `LazyLock` for global caching
|
||||
- Implement LRU cache to avoid memory leaks
|
||||
|
||||
## Testing Guidelines
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
FROM ubuntu:22.04
|
||||
|
||||
ENV LANG C.UTF-8
|
||||
|
||||
RUN sed -i s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g /etc/apt/sources.list
|
||||
|
||||
RUN apt-get clean && apt-get update && apt-get install wget git curl unzip gcc pkg-config libssl-dev lld libdbus-1-dev libwayland-dev libwebkit2gtk-4.1-dev libxdo-dev -y
|
||||
|
||||
# install protoc
|
||||
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip \
|
||||
&& unzip protoc-31.1-linux-x86_64.zip -d protoc3 \
|
||||
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc \
|
||||
&& mv protoc3/include/* /usr/local/include/ && rm -rf protoc-31.1-linux-x86_64.zip protoc3
|
||||
|
||||
# install flatc
|
||||
RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip \
|
||||
&& unzip Linux.flatc.binary.g++-13.zip \
|
||||
&& mv flatc /usr/local/bin/ && chmod +x /usr/local/bin/flatc && rm -rf Linux.flatc.binary.g++-13.zip
|
||||
|
||||
# install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
COPY .docker/cargo.config.toml /root/.cargo/config.toml
|
||||
|
||||
WORKDIR /root/s3-rustfs
|
||||
|
||||
CMD [ "bash", "-c", "while true; do sleep 1; done" ]
|
||||
@@ -1,32 +0,0 @@
|
||||
FROM rockylinux:9.3 AS builder
|
||||
|
||||
ENV LANG C.UTF-8
|
||||
|
||||
RUN sed -e 's|^mirrorlist=|#mirrorlist=|g' \
|
||||
-e 's|^#baseurl=http://dl.rockylinux.org/$contentdir|baseurl=https://mirrors.ustc.edu.cn/rocky|g' \
|
||||
-i.bak \
|
||||
/etc/yum.repos.d/rocky-extras.repo \
|
||||
/etc/yum.repos.d/rocky.repo
|
||||
|
||||
RUN dnf makecache
|
||||
|
||||
RUN yum install wget git unzip gcc openssl-devel pkgconf-pkg-config -y
|
||||
|
||||
# install protoc
|
||||
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip \
|
||||
&& unzip protoc-31.1-linux-x86_64.zip -d protoc3 \
|
||||
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc \
|
||||
&& mv protoc3/include/* /usr/local/include/ && rm -rf protoc-31.1-linux-x86_64.zip protoc3
|
||||
|
||||
# install flatc
|
||||
RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip \
|
||||
&& unzip Linux.flatc.binary.g++-13.zip \
|
||||
&& mv flatc /usr/local/bin/ && chmod +x /usr/local/bin/flatc \
|
||||
&& rm -rf Linux.flatc.binary.g++-13.zip
|
||||
|
||||
# install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
COPY .docker/cargo.config.toml /root/.cargo/config.toml
|
||||
|
||||
WORKDIR /root/s3-rustfs
|
||||
@@ -1,25 +0,0 @@
|
||||
FROM ubuntu:22.04
|
||||
|
||||
ENV LANG C.UTF-8
|
||||
|
||||
RUN sed -i s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g /etc/apt/sources.list
|
||||
|
||||
RUN apt-get clean && apt-get update && apt-get install wget git curl unzip gcc pkg-config libssl-dev lld libdbus-1-dev libwayland-dev libwebkit2gtk-4.1-dev libxdo-dev -y
|
||||
|
||||
# install protoc
|
||||
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip \
|
||||
&& unzip protoc-31.1-linux-x86_64.zip -d protoc3 \
|
||||
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc \
|
||||
&& mv protoc3/include/* /usr/local/include/ && rm -rf protoc-31.1-linux-x86_64.zip protoc3
|
||||
|
||||
# install flatc
|
||||
RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip \
|
||||
&& unzip Linux.flatc.binary.g++-13.zip \
|
||||
&& mv flatc /usr/local/bin/ && chmod +x /usr/local/bin/flatc && rm -rf Linux.flatc.binary.g++-13.zip
|
||||
|
||||
# install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
COPY .docker/cargo.config.toml /root/.cargo/config.toml
|
||||
|
||||
WORKDIR /root/s3-rustfs
|
||||
157
.docker/README.md
Normal file
@@ -0,0 +1,157 @@
|
||||
# RustFS Docker Images
|
||||
|
||||
This directory contains organized Dockerfile configurations for building RustFS container images across multiple platforms and system versions.
|
||||
|
||||
## 📁 Directory Structure
|
||||
|
||||
```
|
||||
.docker/
|
||||
├── alpine/ # Alpine Linux variants
|
||||
│ ├── Dockerfile.prebuild # Alpine + pre-built binaries
|
||||
│ └── Dockerfile.source # Alpine + source compilation
|
||||
├── ubuntu/ # Ubuntu variants
|
||||
│ ├── Dockerfile.prebuild # Ubuntu + pre-built binaries
|
||||
│ ├── Dockerfile.source # Ubuntu + source compilation
|
||||
│ └── Dockerfile.dev # Ubuntu + development environment
|
||||
└── cargo.config.toml # Rust cargo configuration
|
||||
```
|
||||
|
||||
## 🎯 Image Variants
|
||||
|
||||
### Production Images
|
||||
|
||||
| Variant | Base OS | Build Method | Size | Use Case |
|
||||
|---------|---------|--------------|------|----------|
|
||||
| `production` (default) | Alpine 3.18 | Pre-built | Smallest | Production deployment |
|
||||
| `alpine` | Alpine 3.18 | Pre-built | Small | Explicit Alpine choice |
|
||||
| `alpine-source` | Alpine 3.18 | Source build | Small | Custom Alpine builds |
|
||||
| `ubuntu` | Ubuntu 22.04 | Pre-built | Medium | Ubuntu environments |
|
||||
| `ubuntu-source` | Ubuntu 22.04 | Source build | Medium | Full Ubuntu compatibility |
|
||||
|
||||
### Development Images
|
||||
|
||||
| Variant | Base OS | Features | Use Case |
|
||||
|---------|---------|----------|----------|
|
||||
| `ubuntu-dev` | Ubuntu 22.04 | Full toolchain + dev tools | Interactive development |
|
||||
|
||||
## 🚀 Usage Examples
|
||||
|
||||
### Quick Start (Production)
|
||||
|
||||
```bash
|
||||
# Default production image (Alpine + pre-built)
|
||||
docker run -p 9000:9000 rustfs/rustfs:latest
|
||||
|
||||
# Specific version with production variant
|
||||
docker run -p 9000:9000 rustfs/rustfs:1.2.3-production
|
||||
|
||||
# Explicit Alpine variant
|
||||
docker run -p 9000:9000 rustfs/rustfs:latest-alpine
|
||||
|
||||
# Ubuntu-based production
|
||||
docker run -p 9000:9000 rustfs/rustfs:latest-ubuntu
|
||||
```
|
||||
|
||||
### Complete Tag Strategy Examples
|
||||
|
||||
```bash
|
||||
# Stable Releases
|
||||
docker run rustfs/rustfs:1.2.3 # Main version (production)
|
||||
docker run rustfs/rustfs:1.2.3-production # Explicit production variant
|
||||
docker run rustfs/rustfs:1.2.3-alpine # Explicit Alpine variant
|
||||
docker run rustfs/rustfs:1.2.3-alpine-source # Alpine source build
|
||||
docker run rustfs/rustfs:latest # Latest stable
|
||||
|
||||
# Prerelease Versions
|
||||
docker run rustfs/rustfs:1.3.0-alpha.2 # Specific alpha version
|
||||
docker run rustfs/rustfs:1.3.0-alpha.2-alpine # Alpha with Alpine
|
||||
docker run rustfs/rustfs:alpha # Latest alpha
|
||||
docker run rustfs/rustfs:beta # Latest beta
|
||||
docker run rustfs/rustfs:rc # Latest release candidate
|
||||
|
||||
# Development Versions
|
||||
docker run rustfs/rustfs:dev # Latest development
|
||||
docker run rustfs/rustfs:dev-13e4a0b # Specific commit
|
||||
docker run rustfs/rustfs:dev-alpine # Development Alpine
|
||||
```
|
||||
|
||||
### Development Environment
|
||||
|
||||
```bash
|
||||
# Start development container
|
||||
docker run -it -v $(pwd):/app -p 9000:9000 rustfs/rustfs:latest-ubuntu-dev
|
||||
|
||||
# Inside container:
|
||||
cd /app
|
||||
cargo build --release
|
||||
cargo run
|
||||
```
|
||||
|
||||
## 🏗️ Build Arguments
|
||||
|
||||
All images support dynamic version selection:
|
||||
|
||||
```bash
|
||||
# Build with specific version
|
||||
docker build \
|
||||
--build-arg VERSION="1.0.0" \
|
||||
--build-arg BUILD_TYPE="release" \
|
||||
-f .docker/alpine/Dockerfile.prebuild \
|
||||
-t rustfs:1.0.0-alpine .
|
||||
```
|
||||
|
||||
## 🌐 Multi-Platform Support
|
||||
|
||||
All images support multiple architectures:
|
||||
|
||||
- `linux/amd64` (Intel/AMD 64-bit)
|
||||
- `linux/arm64` (ARM 64-bit, Apple Silicon, etc.)
|
||||
|
||||
## ⚡ Build Speed Optimizations
|
||||
|
||||
### Docker Build Optimizations
|
||||
|
||||
- **Multi-layer caching**: GitHub Actions cache + Registry cache
|
||||
- **Parallel matrix builds**: All 5 variants build simultaneously
|
||||
- **Multi-platform builds**: amd64/arm64 built in parallel
|
||||
- **BuildKit features**: Advanced caching and inline cache
|
||||
|
||||
### Rust Compilation Optimizations
|
||||
|
||||
- **sccache**: Distributed compilation cache for Rust builds
|
||||
- **Parallel compilation**: Uses all available CPU cores (`-j $(nproc)`)
|
||||
- **Optimized cargo config**: Sparse registry protocol, fast linker (lld)
|
||||
- **Dependency caching**: Separate Docker layers for dependencies vs. source code
|
||||
- **Release optimizations**: LTO, strip symbols, optimized codegen
|
||||
|
||||
### Cache Strategy
|
||||
|
||||
```yaml
|
||||
# GitHub Actions cache
|
||||
cache-from: type=gha,scope=docker-{variant}
|
||||
cache-to: type=gha,mode=max,scope=docker-{variant}
|
||||
|
||||
# Registry cache (persistent across runs)
|
||||
cache-from: type=registry,ref=ghcr.io/rustfs/rustfs:buildcache-{variant}
|
||||
cache-to: type=registry,ref=ghcr.io/rustfs/rustfs:buildcache-{variant}
|
||||
```
|
||||
|
||||
### Build Performance Comparison
|
||||
|
||||
| Build Type | Time (Est.) | Cache Hit | Cache Miss |
|
||||
|------------|-------------|-----------|-----------|
|
||||
| Production (Alpine pre-built) | ~2-3 min | ~1 min | ~2 min |
|
||||
| Alpine pre-built | ~2-3 min | ~1 min | ~2 min |
|
||||
| Alpine source | ~8-12 min | ~3-5 min | ~10 min |
|
||||
| Ubuntu pre-built | ~3-4 min | ~1-2 min | ~3 min |
|
||||
| Ubuntu source | ~10-15 min | ~4-6 min | ~12 min |
|
||||
|
||||
## 📋 Build Matrix
|
||||
|
||||
| Trigger | Version Format | Download Path | Image Tags |
|
||||
|---------|---------------|---------------|------------|
|
||||
| `push main` | `dev-{sha}` | `artifacts/rustfs/dev/` | `dev-{sha}-{variant}`, `dev-{variant}`, `dev` |
|
||||
| `push 1.2.3` | `1.2.3` | `artifacts/rustfs/release/` | `1.2.3-{variant}`, `1.2.3`, `latest-{variant}`, `latest` |
|
||||
| `push 1.3.0-alpha.2` | `1.3.0-alpha.2` | `artifacts/rustfs/release/` | `1.3.0-alpha.2-{variant}`, `alpha-{variant}`, `alpha` |
|
||||
| `push 1.3.0-beta.1` | `1.3.0-beta.1` | `artifacts/rustfs/release/` | `1.3.0-beta.1-{variant}`, `beta-{variant}`, `beta` |
|
||||
| `push 1.3.0-rc.1` | `1.3.0-rc.1` | `artifacts/rustfs/release/` | `1.3.0-rc.1-{variant}`, `rc-{variant}`, `rc` |
|
||||
117
.docker/alpine/Dockerfile.prebuild
Normal file
@@ -0,0 +1,117 @@
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Multi-stage Alpine build for minimal runtime image
|
||||
FROM rust:1.88-alpine AS builder
|
||||
|
||||
# Build arguments for dynamic artifact download
|
||||
ARG VERSION=""
|
||||
ARG BUILD_TYPE="release"
|
||||
ARG TARGETARCH
|
||||
|
||||
# Install build dependencies
|
||||
RUN apk add --no-cache \
|
||||
musl-dev \
|
||||
pkgconfig \
|
||||
openssl-dev \
|
||||
openssl-libs-static \
|
||||
curl \
|
||||
unzip \
|
||||
bash \
|
||||
wget \
|
||||
ca-certificates
|
||||
|
||||
# Install protoc
|
||||
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip \
|
||||
&& unzip protoc-31.1-linux-x86_64.zip -d protoc3 \
|
||||
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc \
|
||||
&& mv protoc3/include/* /usr/local/include/ && rm -rf protoc-31.1-linux-x86_64.zip protoc3
|
||||
|
||||
# Install flatc
|
||||
RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip \
|
||||
&& unzip Linux.flatc.binary.g++-13.zip \
|
||||
&& mv flatc /usr/local/bin/ && chmod +x /usr/local/bin/flatc \
|
||||
&& rm -rf Linux.flatc.binary.g++-13.zip
|
||||
|
||||
# Option A: Download pre-built binary (faster)
|
||||
RUN if [ -n "$VERSION" ]; then \
|
||||
# Map TARGETARCH to our naming convention
|
||||
case "${TARGETARCH}" in \
|
||||
amd64) ARCH="x86_64" ;; \
|
||||
arm64) ARCH="aarch64" ;; \
|
||||
*) echo "Unsupported architecture: ${TARGETARCH}" && exit 1 ;; \
|
||||
esac; \
|
||||
\
|
||||
# Determine download path and filename
|
||||
if [ "${BUILD_TYPE}" = "development" ]; then \
|
||||
DOWNLOAD_PATH="artifacts/rustfs/dev"; \
|
||||
FILENAME="rustfs-linux-${ARCH}-dev-${VERSION}.zip"; \
|
||||
else \
|
||||
DOWNLOAD_PATH="artifacts/rustfs/release"; \
|
||||
FILENAME="rustfs-linux-${ARCH}-v${VERSION}.zip"; \
|
||||
fi; \
|
||||
\
|
||||
# Download the binary
|
||||
DOWNLOAD_URL="https://dl.rustfs.com/${DOWNLOAD_PATH}/${FILENAME}"; \
|
||||
echo "Downloading RustFS binary from: ${DOWNLOAD_URL}"; \
|
||||
curl -Lo /tmp/rustfs.zip "${DOWNLOAD_URL}"; \
|
||||
unzip -o /tmp/rustfs.zip -d /tmp; \
|
||||
mv /tmp/rustfs /usr/local/bin/rustfs; \
|
||||
chmod +x /usr/local/bin/rustfs; \
|
||||
rm -rf /tmp/*; \
|
||||
else \
|
||||
echo "No VERSION provided, will build from source"; \
|
||||
echo "Source build not yet implemented in Alpine variant"; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
# Final Alpine runtime image
|
||||
FROM alpine:3.18
|
||||
|
||||
RUN apk add --no-cache \
|
||||
ca-certificates \
|
||||
tzdata \
|
||||
bash
|
||||
|
||||
# Create rustfs user for security
|
||||
RUN addgroup -g 1000 rustfs && \
|
||||
adduser -D -u 1000 -G rustfs rustfs
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy binary from builder
|
||||
COPY --from=builder /usr/local/bin/rustfs /app/rustfs
|
||||
RUN chmod +x /app/rustfs && chown rustfs:rustfs /app/rustfs
|
||||
|
||||
# Create data directories
|
||||
RUN mkdir -p /data && chown -R rustfs:rustfs /data /app
|
||||
|
||||
# Switch to non-root user
|
||||
USER rustfs
|
||||
|
||||
# Environment variables
|
||||
ENV RUSTFS_ACCESS_KEY=rustfsadmin \
|
||||
RUSTFS_SECRET_KEY=rustfsadmin \
|
||||
RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_CONSOLE_ENABLE=true \
|
||||
RUSTFS_VOLUMES=/data \
|
||||
RUST_LOG=warn
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD wget --no-verbose --tries=1 --spider http://localhost:9000/health || exit 1
|
||||
|
||||
CMD ["/app/rustfs"]
|
||||
126
.docker/alpine/Dockerfile.source
Normal file
@@ -0,0 +1,126 @@
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Multi-stage Alpine build from source
|
||||
FROM rust:1.88-alpine AS builder
|
||||
|
||||
# Install build dependencies
|
||||
RUN apk add --no-cache \
|
||||
musl-dev \
|
||||
pkgconfig \
|
||||
openssl-dev \
|
||||
openssl-libs-static \
|
||||
curl \
|
||||
unzip \
|
||||
bash \
|
||||
wget \
|
||||
ca-certificates \
|
||||
git
|
||||
|
||||
# Install sccache for Rust compilation caching
|
||||
RUN wget https://github.com/mozilla/sccache/releases/download/v0.8.1/sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz \
|
||||
&& tar -xzf sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz \
|
||||
&& mv sccache-v0.8.1-x86_64-unknown-linux-musl/sccache /usr/local/bin/ \
|
||||
&& chmod +x /usr/local/bin/sccache \
|
||||
&& rm -rf sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz sccache-v0.8.1-x86_64-unknown-linux-musl
|
||||
|
||||
# Set up sccache environment
|
||||
ENV RUSTC_WRAPPER=sccache \
|
||||
SCCACHE_DIR=/tmp/sccache \
|
||||
SCCACHE_CACHE_SIZE=2G
|
||||
|
||||
# Install protoc
|
||||
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip \
|
||||
&& unzip protoc-31.1-linux-x86_64.zip -d protoc3 \
|
||||
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc \
|
||||
&& mv protoc3/include/* /usr/local/include/ && rm -rf protoc-31.1-linux-x86_64.zip protoc3
|
||||
|
||||
# Install flatc
|
||||
RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip \
|
||||
&& unzip Linux.flatc.binary.g++-13.zip \
|
||||
&& mv flatc /usr/local/bin/ && chmod +x /usr/local/bin/flatc \
|
||||
&& rm -rf Linux.flatc.binary.g++-13.zip
|
||||
|
||||
WORKDIR /usr/src/rustfs
|
||||
|
||||
# Copy cargo configuration for optimized builds
|
||||
COPY .docker/cargo.config.toml ./.cargo/config.toml
|
||||
|
||||
# Copy cargo files for dependency caching
|
||||
COPY Cargo.toml Cargo.lock ./
|
||||
COPY */Cargo.toml ./*/
|
||||
|
||||
# Create dummy main.rs files for dependency compilation
|
||||
RUN find . -name "Cargo.toml" -not -path "./Cargo.toml" | \
|
||||
xargs -I {} dirname {} | \
|
||||
xargs -I {} sh -c 'mkdir -p {}/src && echo "fn main() {}" > {}/src/main.rs'
|
||||
|
||||
# Configure cargo for optimized builds
|
||||
ENV CARGO_NET_GIT_FETCH_WITH_CLI=true \
|
||||
CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse \
|
||||
CARGO_INCREMENTAL=0 \
|
||||
CARGO_PROFILE_RELEASE_DEBUG=false \
|
||||
CARGO_PROFILE_RELEASE_SPLIT_DEBUGINFO=off \
|
||||
CARGO_PROFILE_RELEASE_STRIP=symbols
|
||||
|
||||
# Build dependencies only (cache layer) with optimizations
|
||||
RUN cargo build --release --target x86_64-unknown-linux-musl -j $(nproc)
|
||||
|
||||
# Copy source code
|
||||
COPY . .
|
||||
|
||||
# Build the actual application with optimizations
|
||||
RUN sccache --start-server 2>/dev/null || true && \
|
||||
cargo build --release --target x86_64-unknown-linux-musl --bin rustfs -j $(nproc) && \
|
||||
sccache --show-stats || true
|
||||
|
||||
# Final Alpine runtime image
|
||||
FROM alpine:3.18
|
||||
|
||||
RUN apk add --no-cache \
|
||||
ca-certificates \
|
||||
tzdata \
|
||||
bash
|
||||
|
||||
# Create rustfs user for security
|
||||
RUN addgroup -g 1000 rustfs && \
|
||||
adduser -D -u 1000 -G rustfs rustfs
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy binary from builder
|
||||
COPY --from=builder /usr/src/rustfs/target/x86_64-unknown-linux-musl/release/rustfs /app/rustfs
|
||||
RUN chmod +x /app/rustfs && chown rustfs:rustfs /app/rustfs
|
||||
|
||||
# Create data directories
|
||||
RUN mkdir -p /data && chown -R rustfs:rustfs /data /app
|
||||
|
||||
# Switch to non-root user
|
||||
USER rustfs
|
||||
|
||||
# Environment variables
|
||||
ENV RUSTFS_ACCESS_KEY=rustfsadmin \
|
||||
RUSTFS_SECRET_KEY=rustfsadmin \
|
||||
RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_CONSOLE_ENABLE=true \
|
||||
RUSTFS_VOLUMES=/data \
|
||||
RUST_LOG=warn
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD wget --no-verbose --tries=1 --spider http://localhost:9000/health || exit 1
|
||||
|
||||
CMD ["/app/rustfs"]
|
||||
@@ -12,8 +12,44 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Build optimization settings
|
||||
[build]
|
||||
# Use all available CPU cores for parallel compilation
|
||||
jobs = 0 # 0 = use all cores
|
||||
|
||||
[target.x86_64-unknown-linux-musl]
|
||||
# Use lld linker for faster linking
|
||||
linker = "lld"
|
||||
|
||||
[target.x86_64-unknown-linux-gnu]
|
||||
# Use lld linker for faster linking
|
||||
linker = "lld"
|
||||
|
||||
[target.aarch64-unknown-linux-gnu]
|
||||
# Use lld linker for faster linking
|
||||
linker = "lld"
|
||||
|
||||
[profile.release]
|
||||
# Optimize for size and speed
|
||||
codegen-units = 1
|
||||
lto = true
|
||||
panic = "abort"
|
||||
strip = true
|
||||
debug = false
|
||||
|
||||
[profile.dev]
|
||||
# Faster incremental builds during development
|
||||
incremental = true
|
||||
debug = true
|
||||
|
||||
[registry]
|
||||
# Use sparse registry protocol for faster dependency resolution
|
||||
default = "sparse+https://index.crates.io/"
|
||||
|
||||
[source.crates-io]
|
||||
registry = "https://github.com/rust-lang/crates.io-index"
|
||||
# Use sparse registry protocol
|
||||
replace-with = "sparse+https://index.crates.io/"
|
||||
|
||||
[net]
|
||||
# Use git CLI for better performance with private repos
|
||||
git-fetch-with-cli = true
|
||||
|
||||
80
.docker/compose/README.md
Normal file
@@ -0,0 +1,80 @@
|
||||
# Docker Compose Configurations
|
||||
|
||||
This directory contains specialized Docker Compose configurations for different use cases.
|
||||
|
||||
## 📁 Configuration Files
|
||||
|
||||
This directory contains specialized Docker Compose configurations and their associated Dockerfiles, keeping related files organized together.
|
||||
|
||||
### Main Configuration (Root Directory)
|
||||
|
||||
- **`../../docker-compose.yml`** - **Default Production Setup**
|
||||
- Complete production-ready configuration
|
||||
- Includes RustFS server + full observability stack
|
||||
- Supports multiple profiles: `dev`, `observability`, `cache`, `proxy`
|
||||
- Recommended for most users
|
||||
|
||||
### Specialized Configurations
|
||||
|
||||
- **`docker-compose.cluster.yaml`** - **Distributed Testing**
|
||||
- 4-node cluster setup for testing distributed storage
|
||||
- Uses local compiled binaries
|
||||
- Simulates multi-node environment
|
||||
- Ideal for development and cluster testing
|
||||
|
||||
- **`docker-compose.observability.yaml`** - **Observability Focus**
|
||||
- Specialized setup for testing observability features
|
||||
- Includes OpenTelemetry, Jaeger, Prometheus, Loki, Grafana
|
||||
- Uses `../../Dockerfile.obs` for builds
|
||||
- Perfect for observability development
|
||||
|
||||
## 🚀 Usage Examples
|
||||
|
||||
### Production Setup
|
||||
|
||||
```bash
|
||||
# Start main service
|
||||
docker-compose up -d
|
||||
|
||||
# Start with development profile
|
||||
docker-compose --profile dev up -d
|
||||
|
||||
# Start with full observability
|
||||
docker-compose --profile observability up -d
|
||||
```
|
||||
|
||||
### Cluster Testing
|
||||
|
||||
```bash
|
||||
# Build and start 4-node cluster (run from project root)
|
||||
cd .docker/compose
|
||||
docker-compose -f docker-compose.cluster.yaml up -d
|
||||
|
||||
# Or run directly from project root
|
||||
docker-compose -f .docker/compose/docker-compose.cluster.yaml up -d
|
||||
```
|
||||
|
||||
### Observability Testing
|
||||
|
||||
```bash
|
||||
# Start observability-focused environment (run from project root)
|
||||
cd .docker/compose
|
||||
docker-compose -f docker-compose.observability.yaml up -d
|
||||
|
||||
# Or run directly from project root
|
||||
docker-compose -f .docker/compose/docker-compose.observability.yaml up -d
|
||||
```
|
||||
|
||||
## 🔧 Configuration Overview
|
||||
|
||||
| Configuration | Nodes | Storage | Observability | Use Case |
|
||||
|---------------|-------|---------|---------------|----------|
|
||||
| **Main** | 1 | Volume mounts | Full stack | Production |
|
||||
| **Cluster** | 4 | HTTP endpoints | Basic | Testing |
|
||||
| **Observability** | 4 | Local data | Advanced | Development |
|
||||
|
||||
## 📝 Notes
|
||||
|
||||
- Always ensure you have built the required binaries before starting cluster tests
|
||||
- The main configuration is sufficient for most use cases
|
||||
- Specialized configurations are for specific testing scenarios
|
||||
@@ -14,70 +14,69 @@
|
||||
|
||||
services:
|
||||
node0:
|
||||
image: rustfs:v1 # 替换为你的镜像名称和标签
|
||||
image: rustfs/rustfs:latest # Replace with your image name and label
|
||||
container_name: node0
|
||||
hostname: node0
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=http://node{0...3}:9000/data/rustfs{0...3}
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9002
|
||||
- RUSTFS_ACCESS_KEY=rustfsadmin
|
||||
- RUSTFS_SECRET_KEY=rustfsadmin
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "9000:9000" # 映射宿主机的 9001 端口到容器的 9000 端口
|
||||
- "8000:9001" # 映射宿主机的 9001 端口到容器的 9000 端口
|
||||
- "9000:9000" # Map port 9001 of the host to port 9000 of the container
|
||||
volumes:
|
||||
- ./target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs
|
||||
# - ./data/node0:/data # 将当前路径挂载到容器内的 /root/data
|
||||
- ../../target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs
|
||||
command: "/app/rustfs"
|
||||
|
||||
node1:
|
||||
image: rustfs:v1
|
||||
image: rustfs/rustfs:latest
|
||||
container_name: node1
|
||||
hostname: node1
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=http://node{0...3}:9000/data/rustfs{0...3}
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9002
|
||||
- RUSTFS_ACCESS_KEY=rustfsadmin
|
||||
- RUSTFS_SECRET_KEY=rustfsadmin
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "9001:9000" # 映射宿主机的 9002 端口到容器的 9000 端口
|
||||
- "9001:9000" # Map port 9002 of the host to port 9000 of the container
|
||||
volumes:
|
||||
- ./target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs
|
||||
# - ./data/node1:/data
|
||||
- ../../target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs
|
||||
command: "/app/rustfs"
|
||||
|
||||
node2:
|
||||
image: rustfs:v1
|
||||
image: rustfs/rustfs:latest
|
||||
container_name: node2
|
||||
hostname: node2
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=http://node{0...3}:9000/data/rustfs{0...3}
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9002
|
||||
- RUSTFS_ACCESS_KEY=rustfsadmin
|
||||
- RUSTFS_SECRET_KEY=rustfsadmin
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "9002:9000" # 映射宿主机的 9003 端口到容器的 9000 端口
|
||||
- "9002:9000" # Map port 9003 of the host to port 9000 of the container
|
||||
volumes:
|
||||
- ./target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs
|
||||
# - ./data/node2:/data
|
||||
- ../../target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs
|
||||
command: "/app/rustfs"
|
||||
|
||||
node3:
|
||||
image: rustfs:v1
|
||||
image: rustfs/rustfs:latest
|
||||
container_name: node3
|
||||
hostname: node3
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=http://node{0...3}:9000/data/rustfs{0...3}
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9002
|
||||
- RUSTFS_ACCESS_KEY=rustfsadmin
|
||||
- RUSTFS_SECRET_KEY=rustfsadmin
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "9003:9000" # 映射宿主机的 9004 端口到容器的 9000 端口
|
||||
- "9003:9000" # Map port 9004 of the host to port 9000 of the container
|
||||
volumes:
|
||||
- ./target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs
|
||||
# - ./data/node3:/data
|
||||
- ../../target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs
|
||||
command: "/app/rustfs"
|
||||
@@ -14,11 +14,11 @@
|
||||
|
||||
services:
|
||||
otel-collector:
|
||||
image: ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-contrib:0.127.0
|
||||
image: otel/opentelemetry-collector-contrib:0.129.1
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
volumes:
|
||||
- ./.docker/observability/otel-collector-config.yaml:/etc/otelcol-contrib/config.yaml
|
||||
- ../../.docker/observability/otel-collector-config.yaml:/etc/otelcol-contrib/config.yaml
|
||||
ports:
|
||||
- 1888:1888
|
||||
- 8888:8888
|
||||
@@ -30,7 +30,7 @@ services:
|
||||
networks:
|
||||
- rustfs-network
|
||||
jaeger:
|
||||
image: jaegertracing/jaeger:2.6.0
|
||||
image: jaegertracing/jaeger:2.8.0
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
ports:
|
||||
@@ -40,11 +40,11 @@ services:
|
||||
networks:
|
||||
- rustfs-network
|
||||
prometheus:
|
||||
image: prom/prometheus:v3.4.1
|
||||
image: prom/prometheus:v3.4.2
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
volumes:
|
||||
- ./.docker/observability/prometheus.yml:/etc/prometheus/prometheus.yml
|
||||
- ../../.docker/observability/prometheus.yml:/etc/prometheus/prometheus.yml
|
||||
ports:
|
||||
- "9090:9090"
|
||||
networks:
|
||||
@@ -54,16 +54,16 @@ services:
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
volumes:
|
||||
- ./.docker/observability/loki-config.yaml:/etc/loki/local-config.yaml
|
||||
- ../../.docker/observability/loki-config.yaml:/etc/loki/local-config.yaml
|
||||
ports:
|
||||
- "3100:3100"
|
||||
command: -config.file=/etc/loki/local-config.yaml
|
||||
networks:
|
||||
- rustfs-network
|
||||
grafana:
|
||||
image: grafana/grafana:12.0.1
|
||||
image: grafana/grafana:12.0.2
|
||||
ports:
|
||||
- "3000:3000" # Web UI
|
||||
- "3000:3000" # Web UI
|
||||
environment:
|
||||
- GF_SECURITY_ADMIN_PASSWORD=admin
|
||||
- TZ=Asia/Shanghai
|
||||
@@ -72,85 +72,69 @@ services:
|
||||
|
||||
node1:
|
||||
build:
|
||||
context: .
|
||||
context: ../..
|
||||
dockerfile: Dockerfile.obs
|
||||
container_name: node1
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=http://node{1...4}:9000/root/data/target/volume/test{1...4}
|
||||
- RUSTFS_ADDRESS=:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=:9002
|
||||
- RUSTFS_OBS_CONFIG=/etc/observability/config/obs-multi.toml
|
||||
- RUSTFS_OBS_ENDPOINT=http://otel-collector:4317
|
||||
- RUSTFS_OBS_LOGGER_LEVEL=debug
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "9001:9000" # 映射宿主机的 9001 端口到容器的 9000 端口
|
||||
- "9101:9002"
|
||||
volumes:
|
||||
# - ./data:/root/data # 将当前路径挂载到容器内的 /root/data
|
||||
- ./.docker/observability/config:/etc/observability/config
|
||||
- "9001:9000" # Map port 9001 of the host to port 9000 of the container
|
||||
networks:
|
||||
- rustfs-network
|
||||
|
||||
node2:
|
||||
build:
|
||||
context: .
|
||||
context: ../..
|
||||
dockerfile: Dockerfile.obs
|
||||
container_name: node2
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=http://node{1...4}:9000/root/data/target/volume/test{1...4}
|
||||
- RUSTFS_ADDRESS=:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=:9002
|
||||
- RUSTFS_OBS_CONFIG=/etc/observability/config/obs-multi.toml
|
||||
- RUSTFS_OBS_ENDPOINT=http://otel-collector:4317
|
||||
- RUSTFS_OBS_LOGGER_LEVEL=debug
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "9002:9000" # 映射宿主机的 9002 端口到容器的 9000 端口
|
||||
- "9102:9002"
|
||||
volumes:
|
||||
# - ./data:/root/data
|
||||
- ./.docker/observability/config:/etc/observability/config
|
||||
- "9002:9000" # Map port 9002 of the host to port 9000 of the container
|
||||
networks:
|
||||
- rustfs-network
|
||||
|
||||
node3:
|
||||
build:
|
||||
context: .
|
||||
context: ../..
|
||||
dockerfile: Dockerfile.obs
|
||||
container_name: node3
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=http://node{1...4}:9000/root/data/target/volume/test{1...4}
|
||||
- RUSTFS_ADDRESS=:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=:9002
|
||||
- RUSTFS_OBS_CONFIG=/etc/observability/config/obs-multi.toml
|
||||
- RUSTFS_OBS_ENDPOINT=http://otel-collector:4317
|
||||
- RUSTFS_OBS_LOGGER_LEVEL=debug
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "9003:9000" # 映射宿主机的 9003 端口到容器的 9000 端口
|
||||
- "9103:9002"
|
||||
volumes:
|
||||
# - ./data:/root/data
|
||||
- ./.docker/observability/config:/etc/observability/config
|
||||
- "9003:9000" # Map port 9003 of the host to port 9000 of the container
|
||||
networks:
|
||||
- rustfs-network
|
||||
|
||||
node4:
|
||||
build:
|
||||
context: .
|
||||
context: ../..
|
||||
dockerfile: Dockerfile.obs
|
||||
container_name: node4
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=http://node{1...4}:9000/root/data/target/volume/test{1...4}
|
||||
- RUSTFS_ADDRESS=:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=:9002
|
||||
- RUSTFS_OBS_CONFIG=/etc/observability/config/obs-multi.toml
|
||||
- RUSTFS_OBS_ENDPOINT=http://otel-collector:4317
|
||||
- RUSTFS_OBS_LOGGER_LEVEL=debug
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "9004:9000" # 映射宿主机的 9004 端口到容器的 9000 端口
|
||||
- "9104:9002"
|
||||
volumes:
|
||||
# - ./data:/root/data
|
||||
- ./.docker/observability/config:/etc/observability/config
|
||||
- "9004:9000" # Map port 9004 of the host to port 9000 of the container
|
||||
networks:
|
||||
- rustfs-network
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
services:
|
||||
otel-collector:
|
||||
image: ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-contrib:0.127.0
|
||||
image: otel/opentelemetry-collector-contrib:0.129.1
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
volumes:
|
||||
@@ -30,7 +30,7 @@ services:
|
||||
networks:
|
||||
- otel-network
|
||||
jaeger:
|
||||
image: jaegertracing/jaeger:2.7.0
|
||||
image: jaegertracing/jaeger:2.8.0
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
ports:
|
||||
@@ -40,7 +40,7 @@ services:
|
||||
networks:
|
||||
- otel-network
|
||||
prometheus:
|
||||
image: prom/prometheus:v3.4.1
|
||||
image: prom/prometheus:v3.4.2
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
volumes:
|
||||
|
||||
@@ -66,6 +66,12 @@ service:
|
||||
logs:
|
||||
level: "info" # Collector 日志级别
|
||||
metrics:
|
||||
address: "0.0.0.0:8888" # Collector 自身指标暴露
|
||||
level: "detailed" # 可以是 basic, normal, detailed
|
||||
readers:
|
||||
- periodic:
|
||||
exporter:
|
||||
otlp:
|
||||
protocol: http/protobuf
|
||||
endpoint: http://otel-collector:4318
|
||||
|
||||
|
||||
|
||||
96
.docker/ubuntu/Dockerfile.dev
Normal file
@@ -0,0 +1,96 @@
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Ubuntu-based development environment
|
||||
# Provides full development toolchain for building RustFS from source
|
||||
FROM ubuntu:22.04
|
||||
|
||||
ENV LANG=C.UTF-8
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# Use faster mirrors for better build performance
|
||||
RUN sed -i s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g /etc/apt/sources.list
|
||||
|
||||
# Install development dependencies
|
||||
RUN apt-get clean && apt-get update && apt-get install -y \
|
||||
wget \
|
||||
git \
|
||||
curl \
|
||||
unzip \
|
||||
gcc \
|
||||
pkg-config \
|
||||
libssl-dev \
|
||||
lld \
|
||||
libdbus-1-dev \
|
||||
libwayland-dev \
|
||||
libwebkit2gtk-4.1-dev \
|
||||
libxdo-dev \
|
||||
ca-certificates \
|
||||
bash \
|
||||
vim \
|
||||
nano \
|
||||
htop \
|
||||
tree \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install protoc
|
||||
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip \
|
||||
&& unzip protoc-31.1-linux-x86_64.zip -d protoc3 \
|
||||
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc \
|
||||
&& mv protoc3/include/* /usr/local/include/ && rm -rf protoc-31.1-linux-x86_64.zip protoc3
|
||||
|
||||
# Install flatc
|
||||
RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip \
|
||||
&& unzip Linux.flatc.binary.g++-13.zip \
|
||||
&& mv flatc /usr/local/bin/ && chmod +x /usr/local/bin/flatc && rm -rf Linux.flatc.binary.g++-13.zip
|
||||
|
||||
# Install rust for development
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||
|
||||
# Install additional Rust tools for development
|
||||
RUN /root/.cargo/bin/cargo install \
|
||||
cargo-watch \
|
||||
cargo-nextest \
|
||||
cargo-audit \
|
||||
cargo-outdated
|
||||
|
||||
# Copy cargo config for Chinese users
|
||||
COPY .docker/cargo.config.toml /root/.cargo/config.toml
|
||||
|
||||
# Create development user
|
||||
RUN groupadd -g 1000 rustfs && \
|
||||
useradd -d /app -g rustfs -u 1000 -s /bin/bash rustfs
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Create data directories for testing
|
||||
RUN mkdir -p /data && chown -R rustfs:rustfs /data /app
|
||||
|
||||
# Environment variables for development
|
||||
ENV RUSTFS_ACCESS_KEY=devadmin \
|
||||
RUSTFS_SECRET_KEY=devadmin \
|
||||
RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_CONSOLE_ENABLE=true \
|
||||
RUSTFS_VOLUMES=/data \
|
||||
RUST_LOG=debug \
|
||||
RUST_BACKTRACE=1
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
# Development mode: keep container alive for interactive development
|
||||
CMD echo "RustFS Development Environment" && \
|
||||
echo "Source code should be mounted at /app" && \
|
||||
echo "Use 'cargo build' to build, 'cargo run' to run" && \
|
||||
exec bash -c "while true; do sleep 1; done"
|
||||
130
.docker/ubuntu/Dockerfile.prebuild
Normal file
@@ -0,0 +1,130 @@
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Multi-purpose Ubuntu-based Dockerfile
|
||||
# Can be used as development environment or ubuntu runtime variant
|
||||
FROM ubuntu:22.04
|
||||
|
||||
# Build arguments for dynamic artifact download (when used as runtime)
|
||||
ARG VERSION=""
|
||||
ARG BUILD_TYPE="release"
|
||||
ARG TARGETARCH
|
||||
|
||||
ENV LANG=C.UTF-8
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# Use faster mirrors for better build performance
|
||||
RUN sed -i s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g /etc/apt/sources.list
|
||||
|
||||
# Install dependencies
|
||||
RUN apt-get clean && apt-get update && apt-get install -y \
|
||||
wget \
|
||||
git \
|
||||
curl \
|
||||
unzip \
|
||||
gcc \
|
||||
pkg-config \
|
||||
libssl-dev \
|
||||
lld \
|
||||
libdbus-1-dev \
|
||||
libwayland-dev \
|
||||
libwebkit2gtk-4.1-dev \
|
||||
libxdo-dev \
|
||||
ca-certificates \
|
||||
bash \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install protoc
|
||||
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip \
|
||||
&& unzip protoc-31.1-linux-x86_64.zip -d protoc3 \
|
||||
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc \
|
||||
&& mv protoc3/include/* /usr/local/include/ && rm -rf protoc-31.1-linux-x86_64.zip protoc3
|
||||
|
||||
# Install flatc
|
||||
RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip \
|
||||
&& unzip Linux.flatc.binary.g++-13.zip \
|
||||
&& mv flatc /usr/local/bin/ && chmod +x /usr/local/bin/flatc && rm -rf Linux.flatc.binary.g++-13.zip
|
||||
|
||||
# Install rust (for development use)
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||
|
||||
# Copy cargo config for Chinese users
|
||||
COPY .docker/cargo.config.toml /root/.cargo/config.toml
|
||||
|
||||
# If VERSION is provided, download pre-built binary (for runtime use)
|
||||
# Otherwise, this acts as a development environment
|
||||
RUN if [ -n "$VERSION" ]; then \
|
||||
# Map TARGETARCH to our naming convention
|
||||
case "${TARGETARCH}" in \
|
||||
amd64) ARCH="x86_64" ;; \
|
||||
arm64) ARCH="aarch64" ;; \
|
||||
*) echo "Unsupported architecture: ${TARGETARCH}" && exit 1 ;; \
|
||||
esac; \
|
||||
\
|
||||
# Determine download path and filename
|
||||
if [ "${BUILD_TYPE}" = "development" ]; then \
|
||||
DOWNLOAD_PATH="artifacts/rustfs/dev"; \
|
||||
FILENAME="rustfs-linux-${ARCH}-dev-${VERSION}.zip"; \
|
||||
else \
|
||||
DOWNLOAD_PATH="artifacts/rustfs/release"; \
|
||||
FILENAME="rustfs-linux-${ARCH}-v${VERSION}.zip"; \
|
||||
fi; \
|
||||
\
|
||||
# Download the binary
|
||||
DOWNLOAD_URL="https://dl.rustfs.com/${DOWNLOAD_PATH}/${FILENAME}"; \
|
||||
echo "Downloading RustFS binary from: ${DOWNLOAD_URL}"; \
|
||||
curl -Lo /tmp/rustfs.zip "${DOWNLOAD_URL}" || { \
|
||||
echo "Failed to download, continuing as development environment"; \
|
||||
}; \
|
||||
if [ -f /tmp/rustfs.zip ]; then \
|
||||
unzip -o /tmp/rustfs.zip -d /tmp; \
|
||||
mv /tmp/rustfs /usr/local/bin/rustfs; \
|
||||
chmod +x /usr/local/bin/rustfs; \
|
||||
rm -rf /tmp/*; \
|
||||
fi; \
|
||||
fi
|
||||
|
||||
# Create rustfs user for security
|
||||
RUN groupadd -g 1000 rustfs && \
|
||||
useradd -d /app -g rustfs -u 1000 -s /bin/bash rustfs
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Create data directories
|
||||
RUN mkdir -p /data && chown -R rustfs:rustfs /data /app
|
||||
|
||||
# Environment variables
|
||||
ENV RUSTFS_ACCESS_KEY=rustfsadmin \
|
||||
RUSTFS_SECRET_KEY=rustfsadmin \
|
||||
RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_CONSOLE_ENABLE=true \
|
||||
RUSTFS_VOLUMES=/data \
|
||||
RUST_LOG=warn
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
# Health check (only if rustfs binary exists)
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD if [ -f /usr/local/bin/rustfs ]; then wget --no-verbose --tries=1 --spider http://localhost:9000/health || exit 1; else exit 0; fi
|
||||
|
||||
# Default command: if rustfs binary exists, run it; otherwise, keep container alive for development
|
||||
CMD if [ -f /usr/local/bin/rustfs ]; then \
|
||||
echo "Starting RustFS server..."; \
|
||||
exec /usr/local/bin/rustfs; \
|
||||
else \
|
||||
echo "Running in development mode..."; \
|
||||
echo "RustFS source code should be mounted at /app"; \
|
||||
exec bash -c "while true; do sleep 1; done"; \
|
||||
fi
|
||||
@@ -4,7 +4,7 @@ ARG TARGETPLATFORM
|
||||
ARG BUILDPLATFORM
|
||||
|
||||
# Build stage
|
||||
FROM --platform=$BUILDPLATFORM rust:1.85-bookworm AS builder
|
||||
FROM --platform=$BUILDPLATFORM rust:1.88-bookworm AS builder
|
||||
|
||||
# Install required build dependencies
|
||||
RUN apt-get update && apt-get install -y \
|
||||
@@ -18,6 +18,18 @@ RUN apt-get update && apt-get install -y \
|
||||
lld \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install sccache for Rust compilation caching
|
||||
RUN wget https://github.com/mozilla/sccache/releases/download/v0.8.1/sccache-v0.8.1-x86_64-unknown-linux-gnu.tar.gz \
|
||||
&& tar -xzf sccache-v0.8.1-x86_64-unknown-linux-gnu.tar.gz \
|
||||
&& mv sccache-v0.8.1-x86_64-unknown-linux-gnu/sccache /usr/local/bin/ \
|
||||
&& chmod +x /usr/local/bin/sccache \
|
||||
&& rm -rf sccache-v0.8.1-x86_64-unknown-linux-gnu.tar.gz sccache-v0.8.1-x86_64-unknown-linux-gnu
|
||||
|
||||
# Set up sccache environment
|
||||
ENV RUSTC_WRAPPER=sccache \
|
||||
SCCACHE_DIR=/tmp/sccache \
|
||||
SCCACHE_CACHE_SIZE=2G
|
||||
|
||||
# Install cross-compilation tools for ARM64
|
||||
RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
|
||||
apt-get update && \
|
||||
@@ -50,6 +62,9 @@ ENV CXX_aarch64_unknown_linux_gnu=aarch64-linux-gnu-g++
|
||||
|
||||
WORKDIR /usr/src/rustfs
|
||||
|
||||
# Copy cargo configuration for optimized builds
|
||||
COPY .docker/cargo.config.toml ./.cargo/config.toml
|
||||
|
||||
# Copy Cargo files for dependency caching
|
||||
COPY Cargo.toml Cargo.lock ./
|
||||
COPY */Cargo.toml ./*/
|
||||
@@ -59,10 +74,19 @@ RUN find . -name "Cargo.toml" -not -path "./Cargo.toml" | \
|
||||
xargs -I {} dirname {} | \
|
||||
xargs -I {} sh -c 'mkdir -p {}/src && echo "fn main() {}" > {}/src/main.rs'
|
||||
|
||||
# Build dependencies only (cache layer)
|
||||
RUN case "$TARGETPLATFORM" in \
|
||||
"linux/amd64") cargo build --release --target x86_64-unknown-linux-gnu ;; \
|
||||
"linux/arm64") cargo build --release --target aarch64-unknown-linux-gnu ;; \
|
||||
# Configure cargo for optimized builds
|
||||
ENV CARGO_NET_GIT_FETCH_WITH_CLI=true \
|
||||
CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse \
|
||||
CARGO_INCREMENTAL=0 \
|
||||
CARGO_PROFILE_RELEASE_DEBUG=false \
|
||||
CARGO_PROFILE_RELEASE_SPLIT_DEBUGINFO=off \
|
||||
CARGO_PROFILE_RELEASE_STRIP=symbols
|
||||
|
||||
# Build dependencies only (cache layer) with optimizations
|
||||
RUN sccache --start-server 2>/dev/null || true && \
|
||||
case "$TARGETPLATFORM" in \
|
||||
"linux/amd64") cargo build --release --target x86_64-unknown-linux-gnu -j $(nproc) ;; \
|
||||
"linux/arm64") cargo build --release --target aarch64-unknown-linux-gnu -j $(nproc) ;; \
|
||||
esac
|
||||
|
||||
# Copy source code
|
||||
@@ -71,17 +95,19 @@ COPY . .
|
||||
# Generate protobuf code
|
||||
RUN cargo run --bin gproto
|
||||
|
||||
# Build the actual application
|
||||
RUN case "$TARGETPLATFORM" in \
|
||||
# Build the actual application with optimizations
|
||||
RUN sccache --start-server 2>/dev/null || true && \
|
||||
case "$TARGETPLATFORM" in \
|
||||
"linux/amd64") \
|
||||
cargo build --release --target x86_64-unknown-linux-gnu --bin rustfs && \
|
||||
cargo build --release --target x86_64-unknown-linux-gnu --bin rustfs -j $(nproc) && \
|
||||
cp target/x86_64-unknown-linux-gnu/release/rustfs /usr/local/bin/rustfs \
|
||||
;; \
|
||||
"linux/arm64") \
|
||||
cargo build --release --target aarch64-unknown-linux-gnu --bin rustfs && \
|
||||
cargo build --release --target aarch64-unknown-linux-gnu --bin rustfs -j $(nproc) && \
|
||||
cp target/aarch64-unknown-linux-gnu/release/rustfs /usr/local/bin/rustfs \
|
||||
;; \
|
||||
esac
|
||||
esac && \
|
||||
sccache --show-stats || true
|
||||
|
||||
# Runtime stage - Ubuntu minimal for better compatibility
|
||||
FROM ubuntu:22.04
|
||||
@@ -111,7 +137,15 @@ RUN chmod +x /app/rustfs && chown rustfs:rustfs /app/rustfs
|
||||
USER rustfs
|
||||
|
||||
# Expose ports
|
||||
EXPOSE 9000 9001
|
||||
EXPOSE 9000
|
||||
|
||||
# Environment variables
|
||||
ENV RUSTFS_ACCESS_KEY=rustfsadmin \
|
||||
RUSTFS_SECRET_KEY=rustfsadmin \
|
||||
RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_CONSOLE_ENABLE=true \
|
||||
RUSTFS_VOLUMES=/data \
|
||||
RUST_LOG=warn
|
||||
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
15
.github/actions/setup/action.yml
vendored
@@ -60,15 +60,7 @@ runs:
|
||||
pkg-config \
|
||||
libssl-dev
|
||||
|
||||
- name: Cache protoc binary
|
||||
id: cache-protoc
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.local/bin/protoc
|
||||
key: protoc-31.1-${{ runner.os }}-${{ runner.arch }}
|
||||
|
||||
- name: Install protoc
|
||||
if: steps.cache-protoc.outputs.cache-hit != 'true'
|
||||
uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
version: "31.1"
|
||||
@@ -94,6 +86,9 @@ runs:
|
||||
if: inputs.install-cross-tools == 'true'
|
||||
uses: taiki-e/install-action@cargo-zigbuild
|
||||
|
||||
- name: Install cargo-nextest
|
||||
uses: taiki-e/install-action@cargo-nextest
|
||||
|
||||
- name: Setup Rust cache
|
||||
uses: Swatinem/rust-cache@v2
|
||||
with:
|
||||
@@ -101,7 +96,3 @@ runs:
|
||||
cache-on-failure: true
|
||||
shared-key: ${{ inputs.cache-shared-key }}
|
||||
save-if: ${{ inputs.cache-save-if }}
|
||||
# Cache workspace dependencies
|
||||
workspaces: |
|
||||
. -> target
|
||||
cli/rustfs-gui -> cli/rustfs-gui/target
|
||||
|
||||
423
.github/workflows/build.yml
vendored
@@ -16,7 +16,7 @@ name: Build and Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags: ["*"]
|
||||
tags: ["*.*.*"]
|
||||
branches: [main]
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
@@ -65,39 +65,79 @@ env:
|
||||
CARGO_INCREMENTAL: 0
|
||||
|
||||
jobs:
|
||||
# Second layer: Business logic level checks (handling build strategy)
|
||||
# Build strategy check - determine build type based on trigger
|
||||
build-check:
|
||||
name: Build Strategy Check
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
should_build: ${{ steps.check.outputs.should_build }}
|
||||
build_type: ${{ steps.check.outputs.build_type }}
|
||||
version: ${{ steps.check.outputs.version }}
|
||||
short_sha: ${{ steps.check.outputs.short_sha }}
|
||||
is_prerelease: ${{ steps.check.outputs.is_prerelease }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Determine build strategy
|
||||
id: check
|
||||
run: |
|
||||
should_build=false
|
||||
build_type="none"
|
||||
version=""
|
||||
short_sha=""
|
||||
is_prerelease=false
|
||||
|
||||
# Business logic: when we need to build
|
||||
if [[ "${{ github.event_name }}" == "schedule" ]] || \
|
||||
[[ "${{ github.event_name }}" == "workflow_dispatch" ]] || \
|
||||
[[ "${{ github.event.inputs.force_build }}" == "true" ]] || \
|
||||
[[ "${{ contains(github.event.head_commit.message, '--build') }}" == "true" ]]; then
|
||||
# Get short SHA for all builds
|
||||
short_sha=$(git rev-parse --short HEAD)
|
||||
|
||||
# Determine build type based on trigger
|
||||
if [[ "${{ startsWith(github.ref, 'refs/tags/') }}" == "true" ]]; then
|
||||
# Tag push - release or prerelease
|
||||
should_build=true
|
||||
tag_name="${GITHUB_REF#refs/tags/}"
|
||||
version="${tag_name}"
|
||||
|
||||
# Check if this is a prerelease
|
||||
if [[ "$tag_name" == *"alpha"* ]] || [[ "$tag_name" == *"beta"* ]] || [[ "$tag_name" == *"rc"* ]]; then
|
||||
build_type="prerelease"
|
||||
is_prerelease=true
|
||||
echo "🚀 Prerelease build detected: $tag_name"
|
||||
else
|
||||
build_type="release"
|
||||
echo "📦 Release build detected: $tag_name"
|
||||
fi
|
||||
elif [[ "${{ github.ref }}" == "refs/heads/main" ]]; then
|
||||
# Main branch push - development build
|
||||
should_build=true
|
||||
build_type="development"
|
||||
fi
|
||||
|
||||
# Always build for tag pushes (version releases)
|
||||
if [[ "${{ startsWith(github.ref, 'refs/tags/') }}" == "true" ]]; then
|
||||
version="dev-${short_sha}"
|
||||
echo "🛠️ Development build detected"
|
||||
elif [[ "${{ github.event_name }}" == "schedule" ]] || \
|
||||
[[ "${{ github.event_name }}" == "workflow_dispatch" ]] || \
|
||||
[[ "${{ github.event.inputs.force_build }}" == "true" ]] || \
|
||||
[[ "${{ contains(github.event.head_commit.message, '--build') }}" == "true" ]]; then
|
||||
# Scheduled or manual build
|
||||
should_build=true
|
||||
build_type="release"
|
||||
echo "🏷️ Tag detected: forcing release build"
|
||||
build_type="development"
|
||||
version="dev-${short_sha}"
|
||||
echo "⚡ Manual/scheduled build detected"
|
||||
fi
|
||||
|
||||
echo "should_build=$should_build" >> $GITHUB_OUTPUT
|
||||
echo "build_type=$build_type" >> $GITHUB_OUTPUT
|
||||
echo "Build needed: $should_build (type: $build_type)"
|
||||
echo "version=$version" >> $GITHUB_OUTPUT
|
||||
echo "short_sha=$short_sha" >> $GITHUB_OUTPUT
|
||||
echo "is_prerelease=$is_prerelease" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "📊 Build Summary:"
|
||||
echo " - Should build: $should_build"
|
||||
echo " - Build type: $build_type"
|
||||
echo " - Version: $version"
|
||||
echo " - Short SHA: $short_sha"
|
||||
echo " - Is prerelease: $is_prerelease"
|
||||
|
||||
# Build RustFS binaries
|
||||
build-rustfs:
|
||||
@@ -168,6 +208,7 @@ jobs:
|
||||
echo "// Static assets not available" > ./rustfs/static/empty.txt
|
||||
fi
|
||||
else
|
||||
chmod +w ./rustfs/static/LICENSE || true
|
||||
curl -L "https://dl.rustfs.com/artifacts/console/rustfs-console-latest.zip" \
|
||||
-o console.zip --retry 3 --retry-delay 5 --max-time 300
|
||||
if [[ $? -eq 0 ]]; then
|
||||
@@ -201,9 +242,40 @@ jobs:
|
||||
id: package
|
||||
shell: bash
|
||||
run: |
|
||||
PACKAGE_NAME="rustfs-${{ matrix.target }}"
|
||||
BUILD_TYPE="${{ needs.build-check.outputs.build_type }}"
|
||||
VERSION="${{ needs.build-check.outputs.version }}"
|
||||
SHORT_SHA="${{ needs.build-check.outputs.short_sha }}"
|
||||
|
||||
# Create zip packages for all platforms
|
||||
# Extract platform and arch from target
|
||||
TARGET="${{ matrix.target }}"
|
||||
PLATFORM="${{ matrix.platform }}"
|
||||
|
||||
# Map target to architecture
|
||||
case "$TARGET" in
|
||||
*x86_64*)
|
||||
ARCH="x86_64"
|
||||
;;
|
||||
*aarch64*|*arm64*)
|
||||
ARCH="aarch64"
|
||||
;;
|
||||
*armv7*)
|
||||
ARCH="armv7"
|
||||
;;
|
||||
*)
|
||||
ARCH="unknown"
|
||||
;;
|
||||
esac
|
||||
|
||||
# Generate package name based on build type
|
||||
if [[ "$BUILD_TYPE" == "development" ]]; then
|
||||
# Development build: rustfs-${platform}-${arch}-dev-${short_sha}.zip
|
||||
PACKAGE_NAME="rustfs-${PLATFORM}-${ARCH}-dev-${SHORT_SHA}"
|
||||
else
|
||||
# Release/Prerelease build: rustfs-${platform}-${arch}-v${version}.zip
|
||||
PACKAGE_NAME="rustfs-${PLATFORM}-${ARCH}-v${VERSION}"
|
||||
fi
|
||||
|
||||
# Create zip packages for all platforms
|
||||
# Ensure zip is available
|
||||
if ! command -v zip &> /dev/null; then
|
||||
if [[ "${{ matrix.os }}" == "ubuntu-latest" ]]; then
|
||||
@@ -214,9 +286,15 @@ jobs:
|
||||
cd target/${{ matrix.target }}/release
|
||||
zip "../../../${PACKAGE_NAME}.zip" rustfs
|
||||
cd ../../..
|
||||
|
||||
echo "package_name=${PACKAGE_NAME}" >> $GITHUB_OUTPUT
|
||||
echo "package_file=${PACKAGE_NAME}.zip" >> $GITHUB_OUTPUT
|
||||
echo "Package created: ${PACKAGE_NAME}.zip"
|
||||
echo "build_type=${BUILD_TYPE}" >> $GITHUB_OUTPUT
|
||||
echo "version=${VERSION}" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "📦 Package created: ${PACKAGE_NAME}.zip"
|
||||
echo "🔧 Build type: ${BUILD_TYPE}"
|
||||
echo "📊 Version: ${VERSION}"
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
@@ -226,242 +304,117 @@ jobs:
|
||||
retention-days: ${{ startsWith(github.ref, 'refs/tags/') && 30 || 7 }}
|
||||
|
||||
- name: Upload to Aliyun OSS
|
||||
if: needs.build-check.outputs.build_type == 'release' && env.OSS_ACCESS_KEY_ID != ''
|
||||
if: env.OSS_ACCESS_KEY_ID != '' && (needs.build-check.outputs.build_type == 'release' || needs.build-check.outputs.build_type == 'prerelease' || needs.build-check.outputs.build_type == 'development')
|
||||
env:
|
||||
OSS_ACCESS_KEY_ID: ${{ secrets.ALICLOUDOSS_KEY_ID }}
|
||||
OSS_ACCESS_KEY_SECRET: ${{ secrets.ALICLOUDOSS_KEY_SECRET }}
|
||||
OSS_REGION: cn-beijing
|
||||
OSS_ENDPOINT: https://oss-cn-beijing.aliyuncs.com
|
||||
run: |
|
||||
BUILD_TYPE="${{ needs.build-check.outputs.build_type }}"
|
||||
|
||||
# Install ossutil (platform-specific)
|
||||
OSSUTIL_VERSION="2.1.1"
|
||||
case "${{ matrix.platform }}" in
|
||||
linux|macos)
|
||||
sudo -v ; curl https://gosspublic.alicdn.com/ossutil/install.sh | sudo bash
|
||||
linux)
|
||||
if [[ "$(uname -m)" == "arm64" ]]; then
|
||||
ARCH="arm64"
|
||||
else
|
||||
ARCH="amd64"
|
||||
fi
|
||||
OSSUTIL_ZIP="ossutil-${OSSUTIL_VERSION}-linux-${ARCH}.zip"
|
||||
OSSUTIL_DIR="ossutil-${OSSUTIL_VERSION}-linux-${ARCH}"
|
||||
|
||||
curl -o "$OSSUTIL_ZIP" "https://gosspublic.alicdn.com/ossutil/v2/${OSSUTIL_VERSION}/${OSSUTIL_ZIP}"
|
||||
unzip "$OSSUTIL_ZIP"
|
||||
mv "${OSSUTIL_DIR}/ossutil" /usr/local/bin/
|
||||
rm -rf "$OSSUTIL_DIR" "$OSSUTIL_ZIP"
|
||||
chmod +x /usr/local/bin/ossutil
|
||||
OSSUTIL_BIN=ossutil
|
||||
;;
|
||||
macos)
|
||||
if [[ "$(uname -m)" == "arm64" ]]; then
|
||||
ARCH="arm64"
|
||||
else
|
||||
ARCH="amd64"
|
||||
fi
|
||||
OSSUTIL_ZIP="ossutil-${OSSUTIL_VERSION}-mac-${ARCH}.zip"
|
||||
OSSUTIL_DIR="ossutil-${OSSUTIL_VERSION}-mac-${ARCH}"
|
||||
|
||||
curl -o "$OSSUTIL_ZIP" "https://gosspublic.alicdn.com/ossutil/v2/${OSSUTIL_VERSION}/${OSSUTIL_ZIP}"
|
||||
unzip "$OSSUTIL_ZIP"
|
||||
mv "${OSSUTIL_DIR}/ossutil" /usr/local/bin/
|
||||
rm -rf "$OSSUTIL_DIR" "$OSSUTIL_ZIP"
|
||||
chmod +x /usr/local/bin/ossutil
|
||||
OSSUTIL_BIN=ossutil
|
||||
;;
|
||||
# windows)
|
||||
# 暂不支持 Windows ossutil
|
||||
# ;;
|
||||
esac
|
||||
|
||||
# Upload the package file directly to OSS
|
||||
echo "Uploading ${{ steps.package.outputs.package_file }} to OSS..."
|
||||
$OSSUTIL_BIN cp "${{ steps.package.outputs.package_file }}" oss://rustfs-artifacts/artifacts/rustfs/ --force
|
||||
|
||||
# Create latest.json (only for the first Linux build to avoid duplication)
|
||||
if [[ "${{ matrix.target }}" == "x86_64-unknown-linux-musl" ]]; then
|
||||
VERSION="${GITHUB_REF#refs/tags/v}"
|
||||
echo "{\"version\":\"${VERSION}\",\"release_date\":\"$(date -u +%Y-%m-%dT%H:%M:%SZ)\"}" > latest.json
|
||||
$OSSUTIL_BIN cp latest.json oss://rustfs-version/latest.json --force
|
||||
# Determine upload path based on build type
|
||||
if [[ "$BUILD_TYPE" == "development" ]]; then
|
||||
OSS_PATH="oss://rustfs-artifacts/artifacts/rustfs/dev/"
|
||||
echo "📤 Uploading development build to OSS dev directory"
|
||||
else
|
||||
OSS_PATH="oss://rustfs-artifacts/artifacts/rustfs/release/"
|
||||
echo "📤 Uploading release build to OSS release directory"
|
||||
fi
|
||||
|
||||
# Upload the package file to OSS
|
||||
echo "Uploading ${{ steps.package.outputs.package_file }} to $OSS_PATH..."
|
||||
$OSSUTIL_BIN cp "${{ steps.package.outputs.package_file }}" "$OSS_PATH" --force
|
||||
|
||||
# Release management
|
||||
release:
|
||||
name: GitHub Release
|
||||
# For release and prerelease builds, also create a latest version
|
||||
if [[ "$BUILD_TYPE" == "release" ]] || [[ "$BUILD_TYPE" == "prerelease" ]]; then
|
||||
# Extract platform and arch from package name
|
||||
PACKAGE_NAME="${{ steps.package.outputs.package_name }}"
|
||||
|
||||
# Create latest version filename
|
||||
# Convert from rustfs-linux-x86_64-v1.0.0 to rustfs-linux-x86_64-latest
|
||||
LATEST_NAME=$(echo "$PACKAGE_NAME" | sed 's/-v[0-9].*$/-latest/')
|
||||
LATEST_FILE="${LATEST_NAME}.zip"
|
||||
|
||||
# Copy the original file to latest version
|
||||
cp "${{ steps.package.outputs.package_file }}" "$LATEST_FILE"
|
||||
|
||||
# Upload the latest version
|
||||
echo "Uploading latest version: $LATEST_FILE to $OSS_PATH..."
|
||||
$OSSUTIL_BIN cp "$LATEST_FILE" "$OSS_PATH" --force
|
||||
|
||||
echo "✅ Latest version uploaded: $LATEST_FILE"
|
||||
fi
|
||||
|
||||
echo "✅ Upload completed successfully"
|
||||
|
||||
# Build summary
|
||||
build-summary:
|
||||
name: Build Summary
|
||||
needs: [build-check, build-rustfs]
|
||||
if: always() && needs.build-check.outputs.build_type == 'release'
|
||||
if: always() && needs.build-check.outputs.should_build == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Download all artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: ./release-artifacts
|
||||
|
||||
- name: Prepare release assets
|
||||
id: release_prep
|
||||
- name: Build completion summary
|
||||
run: |
|
||||
VERSION="${GITHUB_REF#refs/tags/}"
|
||||
VERSION_CLEAN="${VERSION#v}"
|
||||
|
||||
echo "version=${VERSION}" >> $GITHUB_OUTPUT
|
||||
echo "version_clean=${VERSION_CLEAN}" >> $GITHUB_OUTPUT
|
||||
|
||||
# Organize artifacts
|
||||
mkdir -p ./release-files
|
||||
|
||||
# Copy all artifacts (.zip files)
|
||||
find ./release-artifacts -name "*.zip" -exec cp {} ./release-files/ \;
|
||||
|
||||
# Generate checksums for all files
|
||||
cd ./release-files
|
||||
if ls *.zip >/dev/null 2>&1; then
|
||||
sha256sum *.zip >> SHA256SUMS
|
||||
sha512sum *.zip >> SHA512SUMS
|
||||
fi
|
||||
cd ..
|
||||
|
||||
# Display what we're releasing
|
||||
echo "=== Release Files ==="
|
||||
ls -la ./release-files/
|
||||
|
||||
- name: Create GitHub Release
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
VERSION="${{ steps.release_prep.outputs.version }}"
|
||||
VERSION_CLEAN="${{ steps.release_prep.outputs.version_clean }}"
|
||||
|
||||
# Check if release already exists
|
||||
if gh release view "$VERSION" >/dev/null 2>&1; then
|
||||
echo "Release $VERSION already exists, skipping creation"
|
||||
else
|
||||
# Get release notes from tag message
|
||||
RELEASE_NOTES=$(git tag -l --format='%(contents)' "${VERSION}")
|
||||
if [[ -z "$RELEASE_NOTES" || "$RELEASE_NOTES" =~ ^[[:space:]]*$ ]]; then
|
||||
RELEASE_NOTES="Release ${VERSION_CLEAN}"
|
||||
fi
|
||||
|
||||
# Determine if this is a prerelease
|
||||
PRERELEASE_FLAG=""
|
||||
if [[ "$VERSION" == *"alpha"* ]] || [[ "$VERSION" == *"beta"* ]] || [[ "$VERSION" == *"rc"* ]]; then
|
||||
PRERELEASE_FLAG="--prerelease"
|
||||
fi
|
||||
|
||||
# Create the release only if it doesn't exist
|
||||
gh release create "$VERSION" \
|
||||
--title "RustFS $VERSION_CLEAN" \
|
||||
--notes "$RELEASE_NOTES" \
|
||||
$PRERELEASE_FLAG
|
||||
fi
|
||||
|
||||
- name: Upload release assets
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
VERSION="${{ steps.release_prep.outputs.version }}"
|
||||
|
||||
cd ./release-files
|
||||
|
||||
# Upload all binary files
|
||||
for file in *.zip; do
|
||||
if [[ -f "$file" ]]; then
|
||||
echo "Uploading $file..."
|
||||
gh release upload "$VERSION" "$file" --clobber
|
||||
fi
|
||||
done
|
||||
|
||||
# Upload checksum files
|
||||
if [[ -f "SHA256SUMS" ]]; then
|
||||
echo "Uploading SHA256SUMS..."
|
||||
gh release upload "$VERSION" "SHA256SUMS" --clobber
|
||||
fi
|
||||
|
||||
if [[ -f "SHA512SUMS" ]]; then
|
||||
echo "Uploading SHA512SUMS..."
|
||||
gh release upload "$VERSION" "SHA512SUMS" --clobber
|
||||
fi
|
||||
|
||||
- name: Update release notes
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
VERSION="${{ steps.release_prep.outputs.version }}"
|
||||
VERSION_CLEAN="${{ steps.release_prep.outputs.version_clean }}"
|
||||
|
||||
# Check if release already has custom notes (not auto-generated)
|
||||
EXISTING_NOTES=$(gh release view "$VERSION" --json body --jq '.body' 2>/dev/null || echo "")
|
||||
|
||||
# Only update if release notes are empty or auto-generated
|
||||
if [[ -z "$EXISTING_NOTES" ]] || [[ "$EXISTING_NOTES" == *"Release ${VERSION_CLEAN}"* ]]; then
|
||||
echo "Updating release notes for $VERSION"
|
||||
|
||||
# Get original release notes from tag
|
||||
ORIGINAL_NOTES=$(git tag -l --format='%(contents)' "${VERSION}")
|
||||
if [[ -z "$ORIGINAL_NOTES" || "$ORIGINAL_NOTES" =~ ^[[:space:]]*$ ]]; then
|
||||
ORIGINAL_NOTES="Release ${VERSION_CLEAN}"
|
||||
fi
|
||||
|
||||
# Create comprehensive release notes
|
||||
cat > enhanced_notes.md << EOF
|
||||
## RustFS ${VERSION_CLEAN}
|
||||
|
||||
${ORIGINAL_NOTES}
|
||||
|
||||
---
|
||||
|
||||
### 🚀 Quick Download
|
||||
|
||||
**Linux (Static Binaries - No Dependencies):**
|
||||
\`\`\`bash
|
||||
# x86_64 (Intel/AMD)
|
||||
curl -LO https://github.com/rustfs/rustfs/releases/download/${VERSION}/rustfs-x86_64-unknown-linux-musl.zip
|
||||
unzip rustfs-x86_64-unknown-linux-musl.zip
|
||||
sudo mv rustfs /usr/local/bin/
|
||||
|
||||
# ARM64 (Graviton, Apple Silicon VMs)
|
||||
curl -LO https://github.com/rustfs/rustfs/releases/download/${VERSION}/rustfs-aarch64-unknown-linux-musl.zip
|
||||
unzip rustfs-aarch64-unknown-linux-musl.zip
|
||||
sudo mv rustfs /usr/local/bin/
|
||||
\`\`\`
|
||||
|
||||
**macOS:**
|
||||
\`\`\`bash
|
||||
# Apple Silicon (M1/M2/M3)
|
||||
curl -LO https://github.com/rustfs/rustfs/releases/download/${VERSION}/rustfs-aarch64-apple-darwin.zip
|
||||
unzip rustfs-aarch64-apple-darwin.zip
|
||||
sudo mv rustfs /usr/local/bin/
|
||||
|
||||
# Intel
|
||||
curl -LO https://github.com/rustfs/rustfs/releases/download/${VERSION}/rustfs-x86_64-apple-darwin.zip
|
||||
unzip rustfs-x86_64-apple-darwin.zip
|
||||
sudo mv rustfs /usr/local/bin/
|
||||
\`\`\`
|
||||
|
||||
### 📁 Available Downloads
|
||||
|
||||
| Platform | Architecture | File | Description |
|
||||
|----------|-------------|------|-------------|
|
||||
| Linux | x86_64 | \`rustfs-x86_64-unknown-linux-musl.zip\` | Static binary, no dependencies |
|
||||
| Linux | ARM64 | \`rustfs-aarch64-unknown-linux-musl.zip\` | Static binary, no dependencies |
|
||||
| macOS | Apple Silicon | \`rustfs-aarch64-apple-darwin.zip\` | Native binary, ZIP archive |
|
||||
| macOS | Intel | \`rustfs-x86_64-apple-darwin.zip\` | Native binary, ZIP archive |
|
||||
|
||||
### 🔐 Verification
|
||||
|
||||
Download checksums and verify your download:
|
||||
\`\`\`bash
|
||||
# Download checksums
|
||||
curl -LO https://github.com/rustfs/rustfs/releases/download/${VERSION}/SHA256SUMS
|
||||
|
||||
# Verify (Linux)
|
||||
sha256sum -c SHA256SUMS --ignore-missing
|
||||
|
||||
# Verify (macOS)
|
||||
shasum -a 256 -c SHA256SUMS --ignore-missing
|
||||
\`\`\`
|
||||
|
||||
### 🛠️ System Requirements
|
||||
|
||||
- **Linux**: Any distribution with glibc 2.17+ (CentOS 7+, Ubuntu 16.04+)
|
||||
- **macOS**: 10.15+ (Catalina or later)
|
||||
- **Windows**: Windows 10 version 1809 or later
|
||||
|
||||
### 📚 Documentation
|
||||
|
||||
- [Installation Guide](https://github.com/rustfs/rustfs#installation)
|
||||
- [Quick Start](https://github.com/rustfs/rustfs#quick-start)
|
||||
- [Configuration](https://github.com/rustfs/rustfs/blob/main/docs/)
|
||||
- [API Documentation](https://docs.rs/rustfs)
|
||||
|
||||
### 🆘 Support
|
||||
|
||||
- 🐛 [Report Issues](https://github.com/rustfs/rustfs/issues)
|
||||
- 💬 [Community Discussions](https://github.com/rustfs/rustfs/discussions)
|
||||
- 📖 [Documentation](https://github.com/rustfs/rustfs/tree/main/docs)
|
||||
EOF
|
||||
|
||||
# Update the release with enhanced notes
|
||||
gh release edit "$VERSION" --notes-file enhanced_notes.md
|
||||
else
|
||||
echo "Release $VERSION already has custom notes, skipping update to preserve manual edits"
|
||||
fi
|
||||
BUILD_TYPE="${{ needs.build-check.outputs.build_type }}"
|
||||
VERSION="${{ needs.build-check.outputs.version }}"
|
||||
|
||||
echo "🎉 Build completed successfully!"
|
||||
echo "📦 Build type: $BUILD_TYPE"
|
||||
echo "🔢 Version: $VERSION"
|
||||
echo ""
|
||||
|
||||
case "$BUILD_TYPE" in
|
||||
"development")
|
||||
echo "🛠️ Development build artifacts have been uploaded to OSS dev directory"
|
||||
echo "⚠️ This is a development build - not suitable for production use"
|
||||
;;
|
||||
"release")
|
||||
echo "🚀 Release build artifacts have been uploaded to OSS release directory"
|
||||
echo "✅ This build is ready for production use"
|
||||
echo "🏷️ GitHub Release will be created automatically by the release workflow"
|
||||
;;
|
||||
"prerelease")
|
||||
echo "🧪 Prerelease build artifacts have been uploaded to OSS release directory"
|
||||
echo "⚠️ This is a prerelease build - use with caution"
|
||||
echo "🏷️ GitHub Release will be created automatically by the release workflow"
|
||||
;;
|
||||
esac
|
||||
|
||||
6
.github/workflows/ci.yml
vendored
@@ -81,7 +81,7 @@ jobs:
|
||||
cancel_others: true
|
||||
paths_ignore: '["*.md", "docs/**", "deploy/**"]'
|
||||
# Never skip release events and tag pushes
|
||||
do_not_skip: '["release", "push"]'
|
||||
do_not_skip: '["workflow_dispatch", "schedule", "merge_group", "release", "push"]'
|
||||
|
||||
test-and-lint:
|
||||
name: Test and Lint
|
||||
@@ -102,7 +102,9 @@ jobs:
|
||||
cache-save-if: ${{ github.ref == 'refs/heads/main' }}
|
||||
|
||||
- name: Run tests
|
||||
run: cargo test --all --exclude e2e_test
|
||||
run: |
|
||||
cargo nextest run --all --exclude e2e_test
|
||||
cargo test --all --doc
|
||||
|
||||
- name: Check code formatting
|
||||
run: cargo fmt --all --check
|
||||
|
||||
264
.github/workflows/docker.yml
vendored
@@ -16,7 +16,7 @@ name: Docker Images
|
||||
|
||||
on:
|
||||
push:
|
||||
tags: ["*"]
|
||||
tags: ["*.*.*"]
|
||||
branches: [main]
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
@@ -62,19 +62,37 @@ env:
|
||||
REGISTRY_GHCR: ghcr.io/${{ github.repository }}
|
||||
|
||||
jobs:
|
||||
# Check if we should build
|
||||
# Docker build strategy check
|
||||
build-check:
|
||||
name: Build Check
|
||||
name: Docker Build Check
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
should_build: ${{ steps.check.outputs.should_build }}
|
||||
should_push: ${{ steps.check.outputs.should_push }}
|
||||
build_type: ${{ steps.check.outputs.build_type }}
|
||||
version: ${{ steps.check.outputs.version }}
|
||||
short_sha: ${{ steps.check.outputs.short_sha }}
|
||||
is_prerelease: ${{ steps.check.outputs.is_prerelease }}
|
||||
create_latest: ${{ steps.check.outputs.create_latest }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Check build conditions
|
||||
id: check
|
||||
run: |
|
||||
should_build=false
|
||||
should_push=false
|
||||
build_type="none"
|
||||
version=""
|
||||
short_sha=""
|
||||
is_prerelease=false
|
||||
create_latest=false
|
||||
|
||||
# Get short SHA for all builds
|
||||
short_sha=$(git rev-parse --short HEAD)
|
||||
|
||||
# Always build on workflow_dispatch or when changes detected
|
||||
if [[ "${{ github.event_name }}" == "workflow_dispatch" ]] || \
|
||||
@@ -83,6 +101,34 @@ jobs:
|
||||
should_build=true
|
||||
fi
|
||||
|
||||
# Determine build type and version
|
||||
if [[ "${{ startsWith(github.ref, 'refs/tags/') }}" == "true" ]]; then
|
||||
# Tag push - release or prerelease
|
||||
tag_name="${GITHUB_REF#refs/tags/}"
|
||||
version="${tag_name}"
|
||||
|
||||
# Check if this is a prerelease
|
||||
if [[ "$tag_name" == *"alpha"* ]] || [[ "$tag_name" == *"beta"* ]] || [[ "$tag_name" == *"rc"* ]]; then
|
||||
build_type="prerelease"
|
||||
is_prerelease=true
|
||||
echo "🚀 Docker prerelease build detected: $tag_name"
|
||||
else
|
||||
build_type="release"
|
||||
create_latest=true
|
||||
echo "📦 Docker release build detected: $tag_name"
|
||||
fi
|
||||
elif [[ "${{ github.ref }}" == "refs/heads/main" ]]; then
|
||||
# Main branch push - development build
|
||||
build_type="development"
|
||||
version="dev-${short_sha}"
|
||||
echo "🛠️ Docker development build detected"
|
||||
else
|
||||
# Other branches - development build
|
||||
build_type="development"
|
||||
version="dev-${short_sha}"
|
||||
echo "🔧 Docker development build detected"
|
||||
fi
|
||||
|
||||
# Push only on main branch, tags, or manual trigger
|
||||
if [[ "${{ github.ref }}" == "refs/heads/main" ]] || \
|
||||
[[ "${{ startsWith(github.ref, 'refs/tags/') }}" == "true" ]] || \
|
||||
@@ -92,7 +138,20 @@ jobs:
|
||||
|
||||
echo "should_build=$should_build" >> $GITHUB_OUTPUT
|
||||
echo "should_push=$should_push" >> $GITHUB_OUTPUT
|
||||
echo "Build: $should_build, Push: $should_push"
|
||||
echo "build_type=$build_type" >> $GITHUB_OUTPUT
|
||||
echo "version=$version" >> $GITHUB_OUTPUT
|
||||
echo "short_sha=$short_sha" >> $GITHUB_OUTPUT
|
||||
echo "is_prerelease=$is_prerelease" >> $GITHUB_OUTPUT
|
||||
echo "create_latest=$create_latest" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "🐳 Docker Build Summary:"
|
||||
echo " - Should build: $should_build"
|
||||
echo " - Should push: $should_push"
|
||||
echo " - Build type: $build_type"
|
||||
echo " - Version: $version"
|
||||
echo " - Short SHA: $short_sha"
|
||||
echo " - Is prerelease: $is_prerelease"
|
||||
echo " - Create latest: $create_latest"
|
||||
|
||||
# Build multi-arch Docker images
|
||||
build-docker:
|
||||
@@ -108,11 +167,17 @@ jobs:
|
||||
- name: production
|
||||
dockerfile: Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
- name: ubuntu
|
||||
dockerfile: .docker/Dockerfile.ubuntu22.04
|
||||
platforms: linux/amd64,linux/arm64
|
||||
- name: alpine
|
||||
dockerfile: .docker/Dockerfile.alpine
|
||||
dockerfile: .docker/alpine/Dockerfile.prebuild
|
||||
platforms: linux/amd64,linux/arm64
|
||||
- name: alpine-source
|
||||
dockerfile: .docker/alpine/Dockerfile.source
|
||||
platforms: linux/amd64,linux/arm64
|
||||
- name: ubuntu
|
||||
dockerfile: .docker/ubuntu/Dockerfile.prebuild
|
||||
platforms: linux/amd64,linux/arm64
|
||||
- name: ubuntu-source
|
||||
dockerfile: .docker/ubuntu/Dockerfile.source
|
||||
platforms: linux/amd64,linux/arm64
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
@@ -139,21 +204,96 @@ jobs:
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata
|
||||
- name: Extract metadata and generate tags
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: |
|
||||
${{ env.REGISTRY_DOCKERHUB }}
|
||||
${{ env.REGISTRY_GHCR }}
|
||||
tags: |
|
||||
type=ref,event=branch,suffix=-${{ matrix.variant.name }}
|
||||
type=ref,event=pr,suffix=-${{ matrix.variant.name }}
|
||||
type=semver,pattern={{version}},suffix=-${{ matrix.variant.name }}
|
||||
type=semver,pattern={{major}}.{{minor}},suffix=-${{ matrix.variant.name }}
|
||||
type=raw,value=latest,suffix=-${{ matrix.variant.name }},enable={{is_default_branch}}
|
||||
flavor: |
|
||||
latest=false
|
||||
run: |
|
||||
BUILD_TYPE="${{ needs.build-check.outputs.build_type }}"
|
||||
VERSION="${{ needs.build-check.outputs.version }}"
|
||||
SHORT_SHA="${{ needs.build-check.outputs.short_sha }}"
|
||||
CREATE_LATEST="${{ needs.build-check.outputs.create_latest }}"
|
||||
VARIANT="${{ matrix.variant.name }}"
|
||||
|
||||
# Generate tags based on build type
|
||||
TAGS=""
|
||||
|
||||
if [[ "$BUILD_TYPE" == "development" ]]; then
|
||||
# Development build: dev-${short_sha}-${variant} and dev-${variant}
|
||||
TAGS="${{ env.REGISTRY_DOCKERHUB }}:dev-${SHORT_SHA}-${VARIANT}"
|
||||
TAGS="$TAGS,${{ env.REGISTRY_GHCR }}:dev-${SHORT_SHA}-${VARIANT}"
|
||||
|
||||
# Add rolling dev tag for each variant
|
||||
TAGS="$TAGS,${{ env.REGISTRY_DOCKERHUB }}:dev-${VARIANT}"
|
||||
TAGS="$TAGS,${{ env.REGISTRY_GHCR }}:dev-${VARIANT}"
|
||||
|
||||
# Special handling for production variant
|
||||
if [[ "$VARIANT" == "production" ]]; then
|
||||
TAGS="$TAGS,${{ env.REGISTRY_DOCKERHUB }}:dev-${SHORT_SHA}"
|
||||
TAGS="$TAGS,${{ env.REGISTRY_GHCR }}:dev-${SHORT_SHA}"
|
||||
TAGS="$TAGS,${{ env.REGISTRY_DOCKERHUB }}:dev"
|
||||
TAGS="$TAGS,${{ env.REGISTRY_GHCR }}:dev"
|
||||
fi
|
||||
else
|
||||
# Release/Prerelease build: ${version}-${variant}
|
||||
TAGS="${{ env.REGISTRY_DOCKERHUB }}:${VERSION}-${VARIANT}"
|
||||
TAGS="$TAGS,${{ env.REGISTRY_GHCR }}:${VERSION}-${VARIANT}"
|
||||
|
||||
# Special handling for production variant - create main version tag
|
||||
if [[ "$VARIANT" == "production" ]]; then
|
||||
TAGS="$TAGS,${{ env.REGISTRY_DOCKERHUB }}:${VERSION}"
|
||||
TAGS="$TAGS,${{ env.REGISTRY_GHCR }}:${VERSION}"
|
||||
fi
|
||||
|
||||
# Add channel tags for prereleases and latest for stable
|
||||
if [[ "$CREATE_LATEST" == "true" ]]; then
|
||||
# Stable release
|
||||
if [[ "$VARIANT" == "production" ]]; then
|
||||
TAGS="$TAGS,${{ env.REGISTRY_DOCKERHUB }}:latest"
|
||||
TAGS="$TAGS,${{ env.REGISTRY_GHCR }}:latest"
|
||||
else
|
||||
TAGS="$TAGS,${{ env.REGISTRY_DOCKERHUB }}:latest-${VARIANT}"
|
||||
TAGS="$TAGS,${{ env.REGISTRY_GHCR }}:latest-${VARIANT}"
|
||||
fi
|
||||
elif [[ "$BUILD_TYPE" == "prerelease" ]]; then
|
||||
# Prerelease channel tags (alpha, beta, rc)
|
||||
if [[ "$VERSION" == *"alpha"* ]]; then
|
||||
CHANNEL="alpha"
|
||||
elif [[ "$VERSION" == *"beta"* ]]; then
|
||||
CHANNEL="beta"
|
||||
elif [[ "$VERSION" == *"rc"* ]]; then
|
||||
CHANNEL="rc"
|
||||
fi
|
||||
|
||||
if [[ -n "$CHANNEL" ]]; then
|
||||
if [[ "$VARIANT" == "production" ]]; then
|
||||
TAGS="$TAGS,${{ env.REGISTRY_DOCKERHUB }}:${CHANNEL}"
|
||||
TAGS="$TAGS,${{ env.REGISTRY_GHCR }}:${CHANNEL}"
|
||||
else
|
||||
TAGS="$TAGS,${{ env.REGISTRY_DOCKERHUB }}:${CHANNEL}-${VARIANT}"
|
||||
TAGS="$TAGS,${{ env.REGISTRY_GHCR }}:${CHANNEL}-${VARIANT}"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Output tags
|
||||
echo "tags=$TAGS" >> $GITHUB_OUTPUT
|
||||
|
||||
# Generate labels
|
||||
LABELS="org.opencontainers.image.title=RustFS"
|
||||
LABELS="$LABELS,org.opencontainers.image.description=RustFS distributed object storage system"
|
||||
LABELS="$LABELS,org.opencontainers.image.version=$VERSION"
|
||||
LABELS="$LABELS,org.opencontainers.image.revision=${{ github.sha }}"
|
||||
LABELS="$LABELS,org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}"
|
||||
LABELS="$LABELS,org.opencontainers.image.created=$(date -u +'%Y-%m-%dT%H:%M:%SZ')"
|
||||
LABELS="$LABELS,org.opencontainers.image.variant=$VARIANT"
|
||||
LABELS="$LABELS,org.opencontainers.image.build-type=$BUILD_TYPE"
|
||||
|
||||
echo "labels=$LABELS" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "🐳 Generated Docker tags:"
|
||||
echo "$TAGS" | tr ',' '\n' | sed 's/^/ - /'
|
||||
echo "📋 Build type: $BUILD_TYPE"
|
||||
echo "🔖 Version: $VERSION"
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
@@ -164,18 +304,27 @@ jobs:
|
||||
push: ${{ needs.build-check.outputs.should_push == 'true' }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
cache-from: type=gha,scope=docker-${{ matrix.variant.name }}
|
||||
cache-to: type=gha,mode=max,scope=docker-${{ matrix.variant.name }}
|
||||
cache-from: |
|
||||
type=gha,scope=docker-${{ matrix.variant.name }}
|
||||
type=registry,ref=${{ env.REGISTRY_GHCR }}:buildcache-${{ matrix.variant.name }}
|
||||
cache-to: |
|
||||
type=gha,mode=max,scope=docker-${{ matrix.variant.name }}
|
||||
type=registry,ref=${{ env.REGISTRY_GHCR }}:buildcache-${{ matrix.variant.name }},mode=max
|
||||
build-args: |
|
||||
BUILDTIME=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.created'] }}
|
||||
VERSION=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.version'] }}
|
||||
REVISION=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.revision'] }}
|
||||
BUILDTIME=$(date -u +'%Y-%m-%dT%H:%M:%SZ')
|
||||
VERSION=${{ needs.build-check.outputs.version }}
|
||||
BUILD_TYPE=${{ needs.build-check.outputs.build_type }}
|
||||
REVISION=${{ github.sha }}
|
||||
BUILDKIT_INLINE_CACHE=1
|
||||
# Enable advanced BuildKit features for better performance
|
||||
provenance: false
|
||||
sbom: false
|
||||
|
||||
# Create manifest for main production image
|
||||
# Create manifest for main production image (only for stable releases)
|
||||
create-manifest:
|
||||
name: Create Manifest
|
||||
needs: [build-check, build-docker]
|
||||
if: needs.build-check.outputs.should_push == 'true' && startsWith(github.ref, 'refs/tags/')
|
||||
if: needs.build-check.outputs.should_push == 'true' && needs.build-check.outputs.create_latest == 'true' && needs.build-check.outputs.build_type == 'release'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Login to Docker Hub
|
||||
@@ -194,17 +343,50 @@ jobs:
|
||||
|
||||
- name: Create and push manifest
|
||||
run: |
|
||||
VERSION=${GITHUB_REF#refs/tags/}
|
||||
VERSION="${{ needs.build-check.outputs.version }}"
|
||||
|
||||
# Create main image tag (without variant suffix)
|
||||
if [[ -n "${{ secrets.DOCKERHUB_USERNAME }}" ]]; then
|
||||
docker buildx imagetools create \
|
||||
-t ${{ env.REGISTRY_DOCKERHUB }}:${VERSION} \
|
||||
-t ${{ env.REGISTRY_DOCKERHUB }}:latest \
|
||||
${{ env.REGISTRY_DOCKERHUB }}:${VERSION}-production
|
||||
fi
|
||||
echo "🐳 Creating manifest for stable release: $VERSION"
|
||||
|
||||
docker buildx imagetools create \
|
||||
-t ${{ env.REGISTRY_GHCR }}:${VERSION} \
|
||||
-t ${{ env.REGISTRY_GHCR }}:latest \
|
||||
${{ env.REGISTRY_GHCR }}:${VERSION}-production
|
||||
# Create main image tag (without variant suffix) for stable releases only
|
||||
# Note: The "production" variant already creates the main tags without suffix
|
||||
echo "Manifest creation is handled by the production variant build step"
|
||||
echo "Main tags ${VERSION} and latest are created directly by the production variant"
|
||||
|
||||
echo "✅ Manifest created successfully for stable release"
|
||||
|
||||
# Docker build summary
|
||||
docker-summary:
|
||||
name: Docker Build Summary
|
||||
needs: [build-check, build-docker]
|
||||
if: always() && needs.build-check.outputs.should_build == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Docker build completion summary
|
||||
run: |
|
||||
BUILD_TYPE="${{ needs.build-check.outputs.build_type }}"
|
||||
VERSION="${{ needs.build-check.outputs.version }}"
|
||||
CREATE_LATEST="${{ needs.build-check.outputs.create_latest }}"
|
||||
|
||||
echo "🐳 Docker build completed successfully!"
|
||||
echo "📦 Build type: $BUILD_TYPE"
|
||||
echo "🔢 Version: $VERSION"
|
||||
echo ""
|
||||
|
||||
case "$BUILD_TYPE" in
|
||||
"development")
|
||||
echo "🛠️ Development Docker images have been built with dev-${VERSION} tags"
|
||||
echo "⚠️ These are development images - not suitable for production use"
|
||||
;;
|
||||
"release")
|
||||
echo "🚀 Release Docker images have been built with v${VERSION} tags"
|
||||
echo "✅ These images are ready for production use"
|
||||
if [[ "$CREATE_LATEST" == "true" ]]; then
|
||||
echo "🏷️ Latest tags have been created for stable release"
|
||||
fi
|
||||
;;
|
||||
"prerelease")
|
||||
echo "🧪 Prerelease Docker images have been built with v${VERSION} tags"
|
||||
echo "⚠️ These are prerelease images - use with caution"
|
||||
echo "🚫 Latest tags NOT created for prerelease"
|
||||
;;
|
||||
esac
|
||||
|
||||
24
.github/workflows/issue-translator.yml
vendored
@@ -1,8 +1,22 @@
|
||||
name: 'issue-translator'
|
||||
on:
|
||||
issue_comment:
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name: "issue-translator"
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
issues:
|
||||
issues:
|
||||
types: [opened]
|
||||
|
||||
jobs:
|
||||
@@ -14,5 +28,5 @@ jobs:
|
||||
IS_MODIFY_TITLE: false
|
||||
# not require, default false, . Decide whether to modify the issue title
|
||||
# if true, the robot account @Issues-translate-bot must have modification permissions, invite @Issues-translate-bot to your project or use your custom bot.
|
||||
CUSTOM_BOT_NOTE: Bot detected the issue body's language is not English, translate it automatically.
|
||||
CUSTOM_BOT_NOTE: Bot detected the issue body's language is not English, translate it automatically.
|
||||
# not require. Customize the translation robot prefix message.
|
||||
|
||||
78
.github/workflows/release-notes-template.md
vendored
Normal file
@@ -0,0 +1,78 @@
|
||||
## RustFS ${VERSION_CLEAN}
|
||||
|
||||
${ORIGINAL_NOTES}
|
||||
|
||||
---
|
||||
|
||||
### 🚀 Quick Download
|
||||
|
||||
**Linux (Static Binaries - No Dependencies):**
|
||||
|
||||
```bash
|
||||
# x86_64 (Intel/AMD)
|
||||
curl -LO https://github.com/rustfs/rustfs/releases/download/${VERSION}/rustfs-x86_64-unknown-linux-musl.zip
|
||||
unzip rustfs-x86_64-unknown-linux-musl.zip
|
||||
sudo mv rustfs /usr/local/bin/
|
||||
|
||||
# ARM64 (Graviton, Apple Silicon VMs)
|
||||
curl -LO https://github.com/rustfs/rustfs/releases/download/${VERSION}/rustfs-aarch64-unknown-linux-musl.zip
|
||||
unzip rustfs-aarch64-unknown-linux-musl.zip
|
||||
sudo mv rustfs /usr/local/bin/
|
||||
```
|
||||
|
||||
**macOS:**
|
||||
|
||||
```bash
|
||||
# Apple Silicon (M1/M2/M3)
|
||||
curl -LO https://github.com/rustfs/rustfs/releases/download/${VERSION}/rustfs-aarch64-apple-darwin.zip
|
||||
unzip rustfs-aarch64-apple-darwin.zip
|
||||
sudo mv rustfs /usr/local/bin/
|
||||
|
||||
# Intel
|
||||
curl -LO https://github.com/rustfs/rustfs/releases/download/${VERSION}/rustfs-x86_64-apple-darwin.zip
|
||||
unzip rustfs-x86_64-apple-darwin.zip
|
||||
sudo mv rustfs /usr/local/bin/
|
||||
```
|
||||
|
||||
### 📁 Available Downloads
|
||||
|
||||
| Platform | Architecture | File | Description |
|
||||
|----------|-------------|------|-------------|
|
||||
| Linux | x86_64 | `rustfs-x86_64-unknown-linux-musl.zip` | Static binary, no dependencies |
|
||||
| Linux | ARM64 | `rustfs-aarch64-unknown-linux-musl.zip` | Static binary, no dependencies |
|
||||
| macOS | Apple Silicon | `rustfs-aarch64-apple-darwin.zip` | Native binary, ZIP archive |
|
||||
| macOS | Intel | `rustfs-x86_64-apple-darwin.zip` | Native binary, ZIP archive |
|
||||
|
||||
### 🔐 Verification
|
||||
|
||||
Download checksums and verify your download:
|
||||
|
||||
```bash
|
||||
# Download checksums
|
||||
curl -LO https://github.com/rustfs/rustfs/releases/download/${VERSION}/SHA256SUMS
|
||||
|
||||
# Verify (Linux)
|
||||
sha256sum -c SHA256SUMS --ignore-missing
|
||||
|
||||
# Verify (macOS)
|
||||
shasum -a 256 -c SHA256SUMS --ignore-missing
|
||||
```
|
||||
|
||||
### 🛠️ System Requirements
|
||||
|
||||
- **Linux**: Any distribution with glibc 2.17+ (CentOS 7+, Ubuntu 16.04+)
|
||||
- **macOS**: 10.15+ (Catalina or later)
|
||||
- **Windows**: Windows 10 version 1809 or later
|
||||
|
||||
### 📚 Documentation
|
||||
|
||||
- [Installation Guide](https://github.com/rustfs/rustfs#installation)
|
||||
- [Quick Start](https://github.com/rustfs/rustfs#quick-start)
|
||||
- [Configuration](https://github.com/rustfs/rustfs/blob/main/docs/)
|
||||
- [API Documentation](https://docs.rs/rustfs)
|
||||
|
||||
### 🆘 Support
|
||||
|
||||
- 🐛 [Report Issues](https://github.com/rustfs/rustfs/issues)
|
||||
- 💬 [Community Discussions](https://github.com/rustfs/rustfs/discussions)
|
||||
- 📖 [Documentation](https://github.com/rustfs/rustfs/tree/main/docs)
|
||||
353
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,353 @@
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name: Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags: ["*.*.*"]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: "Tag to create release for"
|
||||
required: true
|
||||
type: string
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
# Determine release type
|
||||
release-check:
|
||||
name: Release Type Check
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
tag: ${{ steps.check.outputs.tag }}
|
||||
version: ${{ steps.check.outputs.version }}
|
||||
is_prerelease: ${{ steps.check.outputs.is_prerelease }}
|
||||
release_type: ${{ steps.check.outputs.release_type }}
|
||||
steps:
|
||||
- name: Determine release type
|
||||
id: check
|
||||
run: |
|
||||
if [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then
|
||||
TAG="${{ github.event.inputs.tag }}"
|
||||
else
|
||||
TAG="${GITHUB_REF#refs/tags/}"
|
||||
fi
|
||||
|
||||
VERSION="${TAG}"
|
||||
|
||||
# Check if this is a prerelease
|
||||
IS_PRERELEASE=false
|
||||
RELEASE_TYPE="release"
|
||||
|
||||
if [[ "$TAG" == *"alpha"* ]] || [[ "$TAG" == *"beta"* ]] || [[ "$TAG" == *"rc"* ]]; then
|
||||
IS_PRERELEASE=true
|
||||
if [[ "$TAG" == *"alpha"* ]]; then
|
||||
RELEASE_TYPE="alpha"
|
||||
elif [[ "$TAG" == *"beta"* ]]; then
|
||||
RELEASE_TYPE="beta"
|
||||
elif [[ "$TAG" == *"rc"* ]]; then
|
||||
RELEASE_TYPE="rc"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "tag=$TAG" >> $GITHUB_OUTPUT
|
||||
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
||||
echo "is_prerelease=$IS_PRERELEASE" >> $GITHUB_OUTPUT
|
||||
echo "release_type=$RELEASE_TYPE" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "📦 Release Type: $RELEASE_TYPE"
|
||||
echo "🏷️ Tag: $TAG"
|
||||
echo "🔢 Version: $VERSION"
|
||||
echo "🚀 Is Prerelease: $IS_PRERELEASE"
|
||||
|
||||
# Create GitHub Release
|
||||
create-release:
|
||||
name: Create GitHub Release
|
||||
needs: release-check
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
outputs:
|
||||
release_id: ${{ steps.create.outputs.release_id }}
|
||||
release_url: ${{ steps.create.outputs.release_url }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Create GitHub Release
|
||||
id: create
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
TAG="${{ needs.release-check.outputs.tag }}"
|
||||
VERSION="${{ needs.release-check.outputs.version }}"
|
||||
IS_PRERELEASE="${{ needs.release-check.outputs.is_prerelease }}"
|
||||
RELEASE_TYPE="${{ needs.release-check.outputs.release_type }}"
|
||||
|
||||
# Check if release already exists
|
||||
if gh release view "$TAG" >/dev/null 2>&1; then
|
||||
echo "Release $TAG already exists"
|
||||
RELEASE_ID=$(gh release view "$TAG" --json databaseId --jq '.databaseId')
|
||||
RELEASE_URL=$(gh release view "$TAG" --json url --jq '.url')
|
||||
else
|
||||
# Get release notes from tag message
|
||||
RELEASE_NOTES=$(git tag -l --format='%(contents)' "${TAG}")
|
||||
if [[ -z "$RELEASE_NOTES" || "$RELEASE_NOTES" =~ ^[[:space:]]*$ ]]; then
|
||||
if [[ "$IS_PRERELEASE" == "true" ]]; then
|
||||
RELEASE_NOTES="Pre-release ${VERSION} (${RELEASE_TYPE})"
|
||||
else
|
||||
RELEASE_NOTES="Release ${VERSION}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Create release title
|
||||
if [[ "$IS_PRERELEASE" == "true" ]]; then
|
||||
TITLE="RustFS $VERSION (${RELEASE_TYPE})"
|
||||
else
|
||||
TITLE="RustFS $VERSION"
|
||||
fi
|
||||
|
||||
# Create the release
|
||||
PRERELEASE_FLAG=""
|
||||
if [[ "$IS_PRERELEASE" == "true" ]]; then
|
||||
PRERELEASE_FLAG="--prerelease"
|
||||
fi
|
||||
|
||||
gh release create "$TAG" \
|
||||
--title "$TITLE" \
|
||||
--notes "$RELEASE_NOTES" \
|
||||
$PRERELEASE_FLAG \
|
||||
--draft
|
||||
|
||||
RELEASE_ID=$(gh release view "$TAG" --json databaseId --jq '.databaseId')
|
||||
RELEASE_URL=$(gh release view "$TAG" --json url --jq '.url')
|
||||
fi
|
||||
|
||||
echo "release_id=$RELEASE_ID" >> $GITHUB_OUTPUT
|
||||
echo "release_url=$RELEASE_URL" >> $GITHUB_OUTPUT
|
||||
echo "Created release: $RELEASE_URL"
|
||||
|
||||
# Wait for build artifacts from build.yml
|
||||
wait-for-artifacts:
|
||||
name: Wait for Build Artifacts
|
||||
needs: release-check
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Wait for build workflow
|
||||
uses: lewagon/wait-on-check-action@v1.3.1
|
||||
with:
|
||||
ref: ${{ needs.release-check.outputs.tag }}
|
||||
check-name: "Build RustFS"
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
wait-interval: 30
|
||||
allowed-conclusions: success
|
||||
|
||||
# Download and prepare release assets
|
||||
prepare-assets:
|
||||
name: Prepare Release Assets
|
||||
needs: [release-check, wait-for-artifacts]
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
assets_prepared: ${{ steps.prepare.outputs.assets_prepared }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Download artifacts from build workflow
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: ./artifacts
|
||||
pattern: rustfs-*
|
||||
merge-multiple: true
|
||||
|
||||
- name: Prepare release assets
|
||||
id: prepare
|
||||
run: |
|
||||
VERSION="${{ needs.release-check.outputs.version }}"
|
||||
TAG="${{ needs.release-check.outputs.tag }}"
|
||||
|
||||
mkdir -p ./release-assets
|
||||
|
||||
# Copy and verify artifacts
|
||||
ASSETS_COUNT=0
|
||||
for file in ./artifacts/rustfs-*.zip; do
|
||||
if [[ -f "$file" ]]; then
|
||||
cp "$file" ./release-assets/
|
||||
ASSETS_COUNT=$((ASSETS_COUNT + 1))
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ $ASSETS_COUNT -eq 0 ]]; then
|
||||
echo "❌ No artifacts found!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd ./release-assets
|
||||
|
||||
# Generate checksums
|
||||
if ls *.zip >/dev/null 2>&1; then
|
||||
sha256sum *.zip > SHA256SUMS
|
||||
sha512sum *.zip > SHA512SUMS
|
||||
fi
|
||||
|
||||
# TODO: Add GPG signing for signatures
|
||||
# For now, create placeholder signature files
|
||||
for file in *.zip; do
|
||||
echo "# Signature for $file" > "${file}.asc"
|
||||
echo "# GPG signature will be added in future versions" >> "${file}.asc"
|
||||
done
|
||||
|
||||
echo "assets_prepared=true" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "📦 Prepared assets:"
|
||||
ls -la
|
||||
|
||||
echo "🔢 Asset count: $ASSETS_COUNT"
|
||||
|
||||
- name: Upload prepared assets
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: release-assets-${{ needs.release-check.outputs.tag }}
|
||||
path: ./release-assets/
|
||||
retention-days: 30
|
||||
|
||||
# Upload assets to GitHub Release
|
||||
upload-assets:
|
||||
name: Upload Release Assets
|
||||
needs: [release-check, create-release, prepare-assets]
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Download prepared assets
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: release-assets-${{ needs.release-check.outputs.tag }}
|
||||
path: ./release-assets
|
||||
|
||||
- name: Upload to GitHub Release
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
TAG="${{ needs.release-check.outputs.tag }}"
|
||||
|
||||
cd ./release-assets
|
||||
|
||||
# Upload all files
|
||||
for file in *; do
|
||||
if [[ -f "$file" ]]; then
|
||||
echo "📤 Uploading $file..."
|
||||
gh release upload "$TAG" "$file" --clobber
|
||||
fi
|
||||
done
|
||||
|
||||
echo "✅ All assets uploaded successfully"
|
||||
|
||||
# Update latest.json for stable releases only
|
||||
update-latest:
|
||||
name: Update Latest Version
|
||||
needs: [release-check, upload-assets]
|
||||
if: needs.release-check.outputs.is_prerelease == 'false'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Update latest.json
|
||||
env:
|
||||
OSS_ACCESS_KEY_ID: ${{ secrets.ALICLOUDOSS_KEY_ID }}
|
||||
OSS_ACCESS_KEY_SECRET: ${{ secrets.ALICLOUDOSS_KEY_SECRET }}
|
||||
run: |
|
||||
if [[ -z "$OSS_ACCESS_KEY_ID" ]]; then
|
||||
echo "⚠️ OSS credentials not available, skipping latest.json update"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
VERSION="${{ needs.release-check.outputs.version }}"
|
||||
TAG="${{ needs.release-check.outputs.tag }}"
|
||||
|
||||
# Install ossutil
|
||||
OSSUTIL_VERSION="2.1.1"
|
||||
OSSUTIL_ZIP="ossutil-${OSSUTIL_VERSION}-linux-amd64.zip"
|
||||
OSSUTIL_DIR="ossutil-${OSSUTIL_VERSION}-linux-amd64"
|
||||
|
||||
curl -o "$OSSUTIL_ZIP" "https://gosspublic.alicdn.com/ossutil/v2/${OSSUTIL_VERSION}/${OSSUTIL_ZIP}"
|
||||
unzip "$OSSUTIL_ZIP"
|
||||
chmod +x "${OSSUTIL_DIR}/ossutil"
|
||||
|
||||
# Create latest.json
|
||||
cat > latest.json << EOF
|
||||
{
|
||||
"version": "${VERSION}",
|
||||
"tag": "${TAG}",
|
||||
"release_date": "$(date -u +%Y-%m-%dT%H:%M:%SZ)",
|
||||
"release_type": "stable",
|
||||
"download_url": "https://github.com/${{ github.repository }}/releases/tag/${TAG}"
|
||||
}
|
||||
EOF
|
||||
|
||||
# Upload to OSS
|
||||
./${OSSUTIL_DIR}/ossutil cp latest.json oss://rustfs-version/latest.json --force
|
||||
|
||||
echo "✅ Updated latest.json for stable release $VERSION"
|
||||
|
||||
# Publish release (remove draft status)
|
||||
publish-release:
|
||||
name: Publish Release
|
||||
needs: [release-check, create-release, upload-assets]
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Update release notes and publish
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
TAG="${{ needs.release-check.outputs.tag }}"
|
||||
VERSION="${{ needs.release-check.outputs.version }}"
|
||||
IS_PRERELEASE="${{ needs.release-check.outputs.is_prerelease }}"
|
||||
RELEASE_TYPE="${{ needs.release-check.outputs.release_type }}"
|
||||
|
||||
# Get original release notes from tag
|
||||
ORIGINAL_NOTES=$(git tag -l --format='%(contents)' "${TAG}")
|
||||
if [[ -z "$ORIGINAL_NOTES" || "$ORIGINAL_NOTES" =~ ^[[:space:]]*$ ]]; then
|
||||
if [[ "$IS_PRERELEASE" == "true" ]]; then
|
||||
ORIGINAL_NOTES="Pre-release ${VERSION} (${RELEASE_TYPE})"
|
||||
else
|
||||
ORIGINAL_NOTES="Release ${VERSION}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Use release notes template if available
|
||||
if [[ -f ".github/workflows/release-notes-template.md" ]]; then
|
||||
# Substitute variables in template
|
||||
sed -e "s/\${VERSION}/$TAG/g" \
|
||||
-e "s/\${VERSION_CLEAN}/$VERSION/g" \
|
||||
-e "s/\${ORIGINAL_NOTES}/$(echo "$ORIGINAL_NOTES" | sed 's/[[\.*^$()+?{|]/\\&/g')/g" \
|
||||
.github/workflows/release-notes-template.md > enhanced_notes.md
|
||||
|
||||
# Update release notes
|
||||
gh release edit "$TAG" --notes-file enhanced_notes.md
|
||||
fi
|
||||
|
||||
# Publish the release (remove draft status)
|
||||
gh release edit "$TAG" --draft=false
|
||||
|
||||
echo "🎉 Released $TAG successfully!"
|
||||
echo "📄 Release URL: ${{ needs.create-release.outputs.release_url }}"
|
||||
679
Cargo.lock
generated
23
Cargo.toml
@@ -36,6 +36,7 @@ members = [
|
||||
"crates/utils", # Utility functions and helpers
|
||||
"crates/workers", # Worker thread pools and task scheduling
|
||||
"crates/zip", # ZIP file handling and compression
|
||||
"crates/ahm",
|
||||
]
|
||||
resolver = "2"
|
||||
|
||||
@@ -62,6 +63,7 @@ rustfs-filemeta = { path = "crates/filemeta" }
|
||||
rustfs-rio = { path = "crates/rio" }
|
||||
|
||||
[workspace.dependencies]
|
||||
rustfs-ahm = { path = "crates/ahm", version = "0.0.5" }
|
||||
rustfs-s3select-api = { path = "crates/s3select-api", version = "0.0.5" }
|
||||
rustfs-appauth = { path = "crates/appauth", version = "0.0.5" }
|
||||
rustfs-common = { path = "crates/common", version = "0.0.5" }
|
||||
@@ -87,7 +89,7 @@ aes-gcm = { version = "0.10.3", features = ["std"] }
|
||||
arc-swap = "1.7.1"
|
||||
argon2 = { version = "0.5.3", features = ["std"] }
|
||||
atoi = "2.0.0"
|
||||
async-channel = "2.4.0"
|
||||
async-channel = "2.5.0"
|
||||
async-recursion = "1.1.1"
|
||||
async-trait = "0.1.88"
|
||||
async-compression = { version = "0.4.0" }
|
||||
@@ -105,7 +107,7 @@ byteorder = "1.5.0"
|
||||
cfg-if = "1.0.1"
|
||||
chacha20poly1305 = { version = "0.10.1" }
|
||||
chrono = { version = "0.4.41", features = ["serde"] }
|
||||
clap = { version = "4.5.40", features = ["derive", "env"] }
|
||||
clap = { version = "4.5.41", features = ["derive", "env"] }
|
||||
const-str = { version = "0.6.2", features = ["std", "proc"] }
|
||||
crc32fast = "1.4.2"
|
||||
criterion = { version = "0.5", features = ["html_reports"] }
|
||||
@@ -114,7 +116,7 @@ datafusion = "46.0.1"
|
||||
derive_builder = "0.20.2"
|
||||
dioxus = { version = "0.6.3", features = ["router"] }
|
||||
dirs = "6.0.0"
|
||||
enumset = "1.1.6"
|
||||
enumset = "1.1.7"
|
||||
flatbuffers = "25.2.10"
|
||||
flate2 = "1.1.2"
|
||||
flexi_logger = { version = "0.31.2", features = ["trc", "dont_minimize_extra_stacks"] }
|
||||
@@ -128,7 +130,7 @@ hex-simd = "0.8.0"
|
||||
highway = { version = "1.3.0" }
|
||||
hmac = "0.12.1"
|
||||
hyper = "1.6.0"
|
||||
hyper-util = { version = "0.1.14", features = [
|
||||
hyper-util = { version = "0.1.15", features = [
|
||||
"tokio",
|
||||
"server-auto",
|
||||
"server-graceful",
|
||||
@@ -180,9 +182,9 @@ pbkdf2 = "0.12.2"
|
||||
percent-encoding = "2.3.1"
|
||||
pin-project-lite = "0.2.16"
|
||||
prost = "0.13.5"
|
||||
quick-xml = "0.37.5"
|
||||
quick-xml = "0.38.0"
|
||||
rand = "0.9.1"
|
||||
rdkafka = { version = "0.37.0", features = ["tokio"] }
|
||||
rdkafka = { version = "0.38.0", features = ["tokio"] }
|
||||
reed-solomon-simd = { version = "3.0.1" }
|
||||
regex = { version = "1.11.1" }
|
||||
reqwest = { version = "0.12.22", default-features = false, features = [
|
||||
@@ -205,7 +207,7 @@ rumqttc = { version = "0.24" }
|
||||
rust-embed = { version = "8.7.2" }
|
||||
rust-i18n = { version = "3.1.5" }
|
||||
rustfs-rsc = "2025.506.1"
|
||||
rustls = { version = "0.23.28" }
|
||||
rustls = { version = "0.23.29" }
|
||||
rustls-pki-types = "1.12.0"
|
||||
rustls-pemfile = "2.2.0"
|
||||
s3s = { version = "0.12.0-minio-preview.1" }
|
||||
@@ -220,10 +222,11 @@ siphasher = "1.0.1"
|
||||
smallvec = { version = "1.15.1", features = ["serde"] }
|
||||
snafu = "0.8.6"
|
||||
snap = "1.1.1"
|
||||
socket2 = "0.5.10"
|
||||
socket2 = "0.6.0"
|
||||
strum = { version = "0.27.1", features = ["derive"] }
|
||||
sysinfo = "0.35.2"
|
||||
sysinfo = "0.36.0"
|
||||
tempfile = "3.20.0"
|
||||
temp-env = "0.3.6"
|
||||
test-case = "3.3.1"
|
||||
thiserror = "2.0.12"
|
||||
time = { version = "0.3.41", features = [
|
||||
@@ -237,6 +240,7 @@ tokio = { version = "1.46.1", features = ["fs", "rt-multi-thread"] }
|
||||
tokio-rustls = { version = "0.26.2", default-features = false }
|
||||
tokio-stream = { version = "0.1.17" }
|
||||
tokio-tar = "0.3.1"
|
||||
tokio-test = "0.4.4"
|
||||
tokio-util = { version = "0.7.15", features = ["io", "compat"] }
|
||||
tonic = { version = "0.13.1", features = ["gzip"] }
|
||||
tonic-build = { version = "0.13.1" }
|
||||
@@ -261,6 +265,7 @@ winapi = { version = "0.3.9" }
|
||||
xxhash-rust = { version = "0.8.15", features = ["xxh64", "xxh3"] }
|
||||
zip = "2.4.2"
|
||||
zstd = "0.13.3"
|
||||
anyhow = "1.0.98"
|
||||
|
||||
[profile.wasm-dev]
|
||||
inherits = "dev"
|
||||
|
||||
100
Dockerfile
@@ -12,40 +12,106 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM alpine:3.18 AS builder
|
||||
# Multi-stage Alpine build for minimal runtime image
|
||||
FROM rust:1.85-alpine AS builder
|
||||
|
||||
RUN apk add -U --no-cache \
|
||||
ca-certificates \
|
||||
# Build arguments for dynamic artifact download
|
||||
ARG VERSION=""
|
||||
ARG BUILD_TYPE="release"
|
||||
ARG TARGETARCH
|
||||
|
||||
# Install build dependencies
|
||||
RUN apk add --no-cache \
|
||||
musl-dev \
|
||||
pkgconfig \
|
||||
openssl-dev \
|
||||
openssl-libs-static \
|
||||
curl \
|
||||
unzip \
|
||||
bash \
|
||||
unzip
|
||||
wget \
|
||||
ca-certificates
|
||||
|
||||
# Install protoc
|
||||
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip \
|
||||
&& unzip protoc-31.1-linux-x86_64.zip -d protoc3 \
|
||||
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc \
|
||||
&& mv protoc3/include/* /usr/local/include/ && rm -rf protoc-31.1-linux-x86_64.zip protoc3
|
||||
|
||||
RUN curl -Lo /tmp/rustfs.zip https://dl.rustfs.com/artifacts/rustfs/rustfs-release-x86_64-unknown-linux-musl.latest.zip && \
|
||||
unzip -o /tmp/rustfs.zip -d /tmp && \
|
||||
tar -xzf /tmp/rustfs-x86_64-unknown-linux-musl.tar.gz -C /tmp && \
|
||||
mv /tmp/rustfs-x86_64-unknown-linux-musl/bin/rustfs /rustfs && \
|
||||
chmod +x /rustfs && \
|
||||
rm -rf /tmp/*
|
||||
# Install flatc
|
||||
RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip \
|
||||
&& unzip Linux.flatc.binary.g++-13.zip \
|
||||
&& mv flatc /usr/local/bin/ && chmod +x /usr/local/bin/flatc \
|
||||
&& rm -rf Linux.flatc.binary.g++-13.zip
|
||||
|
||||
# Option A: Download pre-built binary (faster)
|
||||
RUN if [ -n "$VERSION" ]; then \
|
||||
# Map TARGETARCH to our naming convention
|
||||
case "${TARGETARCH}" in \
|
||||
amd64) ARCH="x86_64" ;; \
|
||||
arm64) ARCH="aarch64" ;; \
|
||||
*) echo "Unsupported architecture: ${TARGETARCH}" && exit 1 ;; \
|
||||
esac; \
|
||||
\
|
||||
# Determine download path and filename
|
||||
if [ "${BUILD_TYPE}" = "development" ]; then \
|
||||
DOWNLOAD_PATH="artifacts/rustfs/dev"; \
|
||||
FILENAME="rustfs-linux-${ARCH}-dev-${VERSION}.zip"; \
|
||||
else \
|
||||
DOWNLOAD_PATH="artifacts/rustfs/release"; \
|
||||
FILENAME="rustfs-linux-${ARCH}-v${VERSION}.zip"; \
|
||||
fi; \
|
||||
\
|
||||
# Download the binary
|
||||
DOWNLOAD_URL="https://dl.rustfs.com/${DOWNLOAD_PATH}/${FILENAME}"; \
|
||||
echo "Downloading RustFS binary from: ${DOWNLOAD_URL}"; \
|
||||
curl -Lo /tmp/rustfs.zip "${DOWNLOAD_URL}"; \
|
||||
unzip -o /tmp/rustfs.zip -d /tmp; \
|
||||
mv /tmp/rustfs /usr/local/bin/rustfs; \
|
||||
chmod +x /usr/local/bin/rustfs; \
|
||||
rm -rf /tmp/*; \
|
||||
else \
|
||||
echo "No VERSION provided, will build from source"; \
|
||||
echo "Source build not yet implemented in Alpine variant"; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
# Final Alpine runtime image
|
||||
FROM alpine:3.18
|
||||
|
||||
RUN apk add -U --no-cache \
|
||||
RUN apk add --no-cache \
|
||||
ca-certificates \
|
||||
tzdata \
|
||||
bash
|
||||
|
||||
COPY --from=builder /rustfs /usr/local/bin/rustfs
|
||||
# Create rustfs user for security
|
||||
RUN addgroup -g 1000 rustfs && \
|
||||
adduser -D -u 1000 -G rustfs rustfs
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy binary from builder
|
||||
COPY --from=builder /usr/local/bin/rustfs /app/rustfs
|
||||
RUN chmod +x /app/rustfs && chown rustfs:rustfs /app/rustfs
|
||||
|
||||
# Create data directories
|
||||
RUN mkdir -p /data && chown -R rustfs:rustfs /data /app
|
||||
|
||||
# Switch to non-root user
|
||||
USER rustfs
|
||||
|
||||
# Environment variables
|
||||
ENV RUSTFS_ACCESS_KEY=rustfsadmin \
|
||||
RUSTFS_SECRET_KEY=rustfsadmin \
|
||||
RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_CONSOLE_ADDRESS=":9001" \
|
||||
RUSTFS_CONSOLE_ENABLE=true \
|
||||
RUSTFS_VOLUMES=/data \
|
||||
RUST_LOG=warn
|
||||
|
||||
EXPOSE 9000 9001
|
||||
EXPOSE 9000
|
||||
|
||||
RUN mkdir -p /data
|
||||
VOLUME /data
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD wget --no-verbose --tries=1 --spider http://localhost:9000/health || exit 1
|
||||
|
||||
CMD ["rustfs", "/data"]
|
||||
CMD ["/app/rustfs"]
|
||||
|
||||
@@ -1,21 +1,61 @@
|
||||
FROM ubuntu:latest
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# RUN apk add --no-cache <package-name>
|
||||
# 如果 rustfs 有依赖,可以在这里添加,例如:
|
||||
# RUN apk add --no-cache openssl
|
||||
# RUN apk add --no-cache bash # 安装 Bash
|
||||
# Dockerfile for RustFS with observability features
|
||||
FROM ubuntu:22.04
|
||||
|
||||
# Avoid interactive prompts during build
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# Install runtime dependencies
|
||||
RUN apt-get update && apt-get install -y \
|
||||
ca-certificates \
|
||||
wget \
|
||||
curl \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# 创建与 RUSTFS_VOLUMES 一致的目录
|
||||
RUN mkdir -p /root/data/target/volume/test1 /root/data/target/volume/test2 /root/data/target/volume/test3 /root/data/target/volume/test4
|
||||
# Create rustfs user for security
|
||||
RUN groupadd -g 1000 rustfs && \
|
||||
useradd -d /app -g rustfs -u 1000 -s /bin/bash rustfs
|
||||
|
||||
# COPY ./target/x86_64-unknown-linux-musl/release/rustfs /app/rustfs
|
||||
# Create data directories matching RUSTFS_VOLUMES pattern
|
||||
RUN mkdir -p /data/rustfs{0,1,2,3} && \
|
||||
chown -R rustfs:rustfs /data /app
|
||||
|
||||
# Copy RustFS binary (expects it to be built with observability features)
|
||||
# Note: This assumes the binary is built locally with observability features enabled
|
||||
COPY ./target/x86_64-unknown-linux-gnu/release/rustfs /app/rustfs
|
||||
RUN chmod +x /app/rustfs && chown rustfs:rustfs /app/rustfs
|
||||
|
||||
RUN chmod +x /app/rustfs
|
||||
# Switch to non-root user
|
||||
USER rustfs
|
||||
|
||||
# Environment variables for observability
|
||||
ENV RUSTFS_ACCESS_KEY=rustfsadmin \
|
||||
RUSTFS_SECRET_KEY=rustfsadmin \
|
||||
RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_CONSOLE_ENABLE=true \
|
||||
RUSTFS_VOLUMES=/data/rustfs0,/data/rustfs1,/data/rustfs2,/data/rustfs3 \
|
||||
RUSTFS_OBS_ENDPOINT=http://otel-collector:4317 \
|
||||
RUST_LOG=info
|
||||
|
||||
EXPOSE 9000
|
||||
EXPOSE 9002
|
||||
|
||||
CMD ["/app/rustfs"]
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD wget --no-verbose --tries=1 --spider http://localhost:9000/health || exit 1
|
||||
|
||||
CMD ["/app/rustfs"]
|
||||
|
||||
3
Makefile
@@ -31,7 +31,8 @@ check:
|
||||
.PHONY: test
|
||||
test:
|
||||
@echo "🧪 Running tests..."
|
||||
cargo test --all --exclude e2e_test
|
||||
cargo nextest run --all --exclude e2e_test
|
||||
cargo test --all --doc
|
||||
|
||||
.PHONY: pre-commit
|
||||
pre-commit: fmt clippy check test
|
||||
|
||||
@@ -83,11 +83,11 @@ To get started with RustFS, follow these steps:
|
||||
2. **Docker Quick Start (Option 2)**
|
||||
|
||||
```bash
|
||||
podman run -d -p 9000:9000 -p 9001:9001 -v /data:/data quay.io/rustfs/rustfs
|
||||
docker run -d -p 9000:9000 -v /data:/data rustfs/rustfs
|
||||
```
|
||||
|
||||
|
||||
3. **Access the Console**: Open your web browser and navigate to `http://localhost:9001` to access the RustFS console, default username and password is `rustfsadmin` .
|
||||
3. **Access the Console**: Open your web browser and navigate to `http://localhost:9000` to access the RustFS console, default username and password is `rustfsadmin` .
|
||||
4. **Create a Bucket**: Use the console to create a new bucket for your objects.
|
||||
5. **Upload Objects**: You can upload files directly through the console or use S3-compatible APIs to interact with your RustFS instance.
|
||||
|
||||
@@ -122,7 +122,7 @@ If you have any questions or need assistance, you can:
|
||||
RustFS is a community-driven project, and we appreciate all contributions. Check out the [Contributors](https://github.com/rustfs/rustfs/graphs/contributors) page to see the amazing people who have helped make RustFS better.
|
||||
|
||||
<a href="https://github.com/rustfs/rustfs/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=rustfs/rustfs" />
|
||||
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" />
|
||||
</a>
|
||||
|
||||
## License
|
||||
|
||||
@@ -70,11 +70,11 @@ RustFS 是一个使用 Rust(全球最受欢迎的编程语言之一)构建
|
||||
2. **Docker快速启动(方案二)**
|
||||
|
||||
```bash
|
||||
podman run -d -p 9000:9000 -p 9001:9001 -v /data:/data quay.io/rustfs/rustfs
|
||||
docker run -d -p 9000:9000 -v /data:/data rustfs/rustfs
|
||||
```
|
||||
|
||||
|
||||
3. **访问控制台**:打开 Web 浏览器并导航到 `http://localhost:9001` 以访问 RustFS 控制台,默认的用户名和密码是 `rustfsadmin` 。
|
||||
3. **访问控制台**:打开 Web 浏览器并导航到 `http://localhost:9000` 以访问 RustFS 控制台,默认的用户名和密码是 `rustfsadmin` 。
|
||||
4. **创建存储桶**:使用控制台为您的对象创建新的存储桶。
|
||||
5. **上传对象**:您可以直接通过控制台上传文件,或使用 S3 兼容的 API 与您的 RustFS 实例交互。
|
||||
|
||||
|
||||
@@ -26,7 +26,6 @@ dioxus = { workspace = true, features = ["router"] }
|
||||
dirs = { workspace = true }
|
||||
hex = { workspace = true }
|
||||
keyring = { workspace = true }
|
||||
lazy_static = { workspace = true }
|
||||
rfd = { workspace = true }
|
||||
rust-embed = { workspace = true, features = ["interpolate-folder-path"] }
|
||||
rust-i18n = { workspace = true }
|
||||
|
||||
@@ -37,7 +37,9 @@ copyright = "Copyright 2025 rustfs.com"
|
||||
|
||||
icon = [
|
||||
"assets/icons/icon.icns",
|
||||
"assets/icons/icon.ico"
|
||||
"assets/icons/icon.ico",
|
||||
"assets/icons/icon.png",
|
||||
"assets/icons/rustfs-icon.png",
|
||||
]
|
||||
#[bundle.macos]
|
||||
#provider_short_name = "RustFs"
|
||||
|
||||
|
Before Width: | Height: | Size: 23 KiB After Width: | Height: | Size: 23 KiB |
|
Before Width: | Height: | Size: 23 KiB After Width: | Height: | Size: 23 KiB |
BIN
cli/rustfs-gui/assets/icons/rustfs-icon.png
Normal file
|
After Width: | Height: | Size: 23 KiB |
BIN
cli/rustfs-gui/assets/rustfs-icon.png
Normal file
|
After Width: | Height: | Size: 23 KiB |
@@ -1,20 +1,15 @@
|
||||
<svg width="1558" height="260" viewBox="0 0 1558 260" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g clip-path="url(#clip0_0_3)">
|
||||
<path d="M1288.5 112.905H1159.75V58.4404H1262L1270 0L1074 0V260H1159.75V162.997H1296.95L1288.5 112.905Z"
|
||||
fill="#0196D0"/>
|
||||
<path d="M1058.62 58.4404V0H789V58.4404H881.133V260H966.885V58.4404H1058.62Z" fill="#0196D0"/>
|
||||
<path d="M521 179.102V0L454.973 15V161C454.973 181.124 452.084 193.146 443.5 202C434.916 211.257 419.318 214.5 400.5 214.5C381.022 214.5 366.744 210.854 357.5 202C348.916 193.548 346.357 175.721 346.357 156V0L280 15V175.48C280 208.08 290.234 229.412 309.712 241.486C329.19 253.56 358.903 260 400.5 260C440.447 260 470.159 253.56 490.297 241.486C510.766 229.412 521 208.483 521 179.102Z"
|
||||
fill="#0196D0"/>
|
||||
<path d="M172.84 84.2813C172.84 97.7982 168.249 107.737 158.41 113.303C149.883 118.471 137.092 121.254 120.693 122.049V162.997C129.876 163.792 138.076 166.177 144.307 176.514L184.647 260H265L225.316 180.489C213.181 155.046 201.374 149.48 178.744 143.517C212.197 138.349 241.386 118.471 241.386 73.1499C241.386 53.2722 233.843 30.2141 218.756 17.8899C203.998 5.56575 183.991 0 159.394 0H120.693V48.5015H127.58C142.23 48.5015 153.6 51.4169 161.689 57.2477C169.233 62.8135 172.84 71.5596 172.84 84.2813ZM120.693 122.049C119.163 122.049 117.741 122.049 116.43 122.049H68.5457V48.5015H120.693V0H0V260H70.5137V162.997H110.526C113.806 162.997 117.741 162.997 120.693 162.997V122.049Z"
|
||||
fill="#0196D0"/>
|
||||
<path d="M774 179.297C774 160.829 766.671 144.669 752.013 131.972C738.127 119.66 712.025 110.169 673.708 103.5C662.136 101.191 651.722 99.6523 643.235 97.3437C586.532 84.6467 594.632 52.7118 650.564 52.7118C680.651 52.7118 709.582 61.946 738.127 66.9478C742.37 67.7174 743.913 68.1021 744.298 68.1021L750.47 12.697C720.383 3.46282 684.895 0 654.036 0C616.619 0 587.689 6.54088 567.245 19.2379C546.801 31.9349 536 57.7137 536 82.3382C536 103.5 543.715 119.66 559.916 131.972C575.731 143.515 604.276 152.749 645.55 160.059C658.279 162.368 668.694 163.907 676.794 166.215C685.023 168.524 691.066 170.704 694.924 172.756C702.253 176.604 706.11 182.375 706.11 188.531C706.11 196.611 701.481 202.767 692.224 207C664.836 220.081 587.689 212.001 556.83 198.15L543.715 247.784C547.186 248.169 552.972 249.323 559.916 250.477C616.619 259.327 690.681 270.869 741.212 238.935C762.814 225.468 774 206.23 774 179.297Z"
|
||||
fill="#0196D0"/>
|
||||
<path d="M1558 179.568C1558 160.383 1550.42 144.268 1535.67 131.99C1521.32 119.968 1494.34 110.631 1454.74 103.981C1442.38 101.679 1432.01 99.3764 1422.84 97.8416C1422.44 97.8416 1422.04 97.8416 1422.04 97.4579V112.422L1361.04 75.2038L1422.04 38.3692V52.9496C1424.7 52.9496 1427.49 52.9496 1430.41 52.9496C1461.51 52.9496 1491.42 62.5419 1521.32 67.5299C1525.31 67.9136 1526.9 67.9136 1527.3 67.9136L1533.68 12.6619C1502.98 3.83692 1465.9 0 1434 0C1395.33 0 1365.43 6.52277 1345.09 19.5683C1323.16 32.6139 1312 57.9376 1312 82.8776C1312 103.981 1320.37 120.096 1336.72 131.607C1353.46 143.885 1382.97 153.093 1425.23 160.383C1434 161.535 1441.18 162.686 1447.56 164.22L1448.36 150.791L1507.36 190.312L1445.57 224.844L1445.96 212.949C1409.68 215.635 1357.45 209.112 1333.53 197.985L1320.37 247.482C1323.56 248.249 1329.54 248.633 1336.72 250.551C1395.33 259.376 1471.88 270.887 1524.11 238.657C1546.84 225.611 1558 205.659 1558 179.568Z"
|
||||
fill="#0196D0"/>
|
||||
</g>
|
||||
<defs>
|
||||
<clipPath id="clip0_0_3">
|
||||
<rect width="1558" height="260" fill="white"/>
|
||||
</clipPath>
|
||||
</defs>
|
||||
<g clip-path="url(#clip0_0_3)">
|
||||
<path d="M1288.5 112.905H1159.75V58.4404H1262L1270 0L1074 0V260H1159.75V162.997H1296.95L1288.5 112.905Z" fill="#0196D0"/>
|
||||
<path d="M1058.62 58.4404V0H789V58.4404H881.133V260H966.885V58.4404H1058.62Z" fill="#0196D0"/>
|
||||
<path d="M521 179.102V0L454.973 15V161C454.973 181.124 452.084 193.146 443.5 202C434.916 211.257 419.318 214.5 400.5 214.5C381.022 214.5 366.744 210.854 357.5 202C348.916 193.548 346.357 175.721 346.357 156V0L280 15V175.48C280 208.08 290.234 229.412 309.712 241.486C329.19 253.56 358.903 260 400.5 260C440.447 260 470.159 253.56 490.297 241.486C510.766 229.412 521 208.483 521 179.102Z" fill="#0196D0"/>
|
||||
<path d="M172.84 84.2813C172.84 97.7982 168.249 107.737 158.41 113.303C149.883 118.471 137.092 121.254 120.693 122.049V162.997C129.876 163.792 138.076 166.177 144.307 176.514L184.647 260H265L225.316 180.489C213.181 155.046 201.374 149.48 178.744 143.517C212.197 138.349 241.386 118.471 241.386 73.1499C241.386 53.2722 233.843 30.2141 218.756 17.8899C203.998 5.56575 183.991 0 159.394 0H120.693V48.5015H127.58C142.23 48.5015 153.6 51.4169 161.689 57.2477C169.233 62.8135 172.84 71.5596 172.84 84.2813ZM120.693 122.049C119.163 122.049 117.741 122.049 116.43 122.049H68.5457V48.5015H120.693V0H0V260H70.5137V162.997H110.526C113.806 162.997 117.741 162.997 120.693 162.997V122.049Z" fill="#0196D0"/>
|
||||
<path d="M774 179.297C774 160.829 766.671 144.669 752.013 131.972C738.127 119.66 712.025 110.169 673.708 103.5C662.136 101.191 651.722 99.6523 643.235 97.3437C586.532 84.6467 594.632 52.7118 650.564 52.7118C680.651 52.7118 709.582 61.946 738.127 66.9478C742.37 67.7174 743.913 68.1021 744.298 68.1021L750.47 12.697C720.383 3.46282 684.895 0 654.036 0C616.619 0 587.689 6.54088 567.245 19.2379C546.801 31.9349 536 57.7137 536 82.3382C536 103.5 543.715 119.66 559.916 131.972C575.731 143.515 604.276 152.749 645.55 160.059C658.279 162.368 668.694 163.907 676.794 166.215C685.023 168.524 691.066 170.704 694.924 172.756C702.253 176.604 706.11 182.375 706.11 188.531C706.11 196.611 701.481 202.767 692.224 207C664.836 220.081 587.689 212.001 556.83 198.15L543.715 247.784C547.186 248.169 552.972 249.323 559.916 250.477C616.619 259.327 690.681 270.869 741.212 238.935C762.814 225.468 774 206.23 774 179.297Z" fill="#0196D0"/>
|
||||
<path d="M1558 179.568C1558 160.383 1550.42 144.268 1535.67 131.99C1521.32 119.968 1494.34 110.631 1454.74 103.981C1442.38 101.679 1432.01 99.3764 1422.84 97.8416C1422.44 97.8416 1422.04 97.8416 1422.04 97.4579V112.422L1361.04 75.2038L1422.04 38.3692V52.9496C1424.7 52.9496 1427.49 52.9496 1430.41 52.9496C1461.51 52.9496 1491.42 62.5419 1521.32 67.5299C1525.31 67.9136 1526.9 67.9136 1527.3 67.9136L1533.68 12.6619C1502.98 3.83692 1465.9 0 1434 0C1395.33 0 1365.43 6.52277 1345.09 19.5683C1323.16 32.6139 1312 57.9376 1312 82.8776C1312 103.981 1320.37 120.096 1336.72 131.607C1353.46 143.885 1382.97 153.093 1425.23 160.383C1434 161.535 1441.18 162.686 1447.56 164.22L1448.36 150.791L1507.36 190.312L1445.57 224.844L1445.96 212.949C1409.68 215.635 1357.45 209.112 1333.53 197.985L1320.37 247.482C1323.56 248.249 1329.54 248.633 1336.72 250.551C1395.33 259.376 1471.88 270.887 1524.11 238.657C1546.84 225.611 1558 205.659 1558 179.568Z" fill="#0196D0"/>
|
||||
</g>
|
||||
<defs>
|
||||
<clipPath id="clip0_0_3">
|
||||
<rect width="1558" height="260" fill="white"/>
|
||||
</clipPath>
|
||||
</defs>
|
||||
</svg>
|
||||
|
||||
|
Before Width: | Height: | Size: 3.5 KiB After Width: | Height: | Size: 3.4 KiB |
|
Before Width: | Height: | Size: 34 KiB |
@@ -1,15 +0,0 @@
|
||||
<svg width="1558" height="260" viewBox="0 0 1558 260" fill="none" xmlns="http://www.w3.org/2000/svg">
|
||||
<g clip-path="url(#clip0_0_3)">
|
||||
<path d="M1288.5 112.905H1159.75V58.4404H1262L1270 0L1074 0V260H1159.75V162.997H1296.95L1288.5 112.905Z" fill="#0196D0"/>
|
||||
<path d="M1058.62 58.4404V0H789V58.4404H881.133V260H966.885V58.4404H1058.62Z" fill="#0196D0"/>
|
||||
<path d="M521 179.102V0L454.973 15V161C454.973 181.124 452.084 193.146 443.5 202C434.916 211.257 419.318 214.5 400.5 214.5C381.022 214.5 366.744 210.854 357.5 202C348.916 193.548 346.357 175.721 346.357 156V0L280 15V175.48C280 208.08 290.234 229.412 309.712 241.486C329.19 253.56 358.903 260 400.5 260C440.447 260 470.159 253.56 490.297 241.486C510.766 229.412 521 208.483 521 179.102Z" fill="#0196D0"/>
|
||||
<path d="M172.84 84.2813C172.84 97.7982 168.249 107.737 158.41 113.303C149.883 118.471 137.092 121.254 120.693 122.049V162.997C129.876 163.792 138.076 166.177 144.307 176.514L184.647 260H265L225.316 180.489C213.181 155.046 201.374 149.48 178.744 143.517C212.197 138.349 241.386 118.471 241.386 73.1499C241.386 53.2722 233.843 30.2141 218.756 17.8899C203.998 5.56575 183.991 0 159.394 0H120.693V48.5015H127.58C142.23 48.5015 153.6 51.4169 161.689 57.2477C169.233 62.8135 172.84 71.5596 172.84 84.2813ZM120.693 122.049C119.163 122.049 117.741 122.049 116.43 122.049H68.5457V48.5015H120.693V0H0V260H70.5137V162.997H110.526C113.806 162.997 117.741 162.997 120.693 162.997V122.049Z" fill="#0196D0"/>
|
||||
<path d="M774 179.297C774 160.829 766.671 144.669 752.013 131.972C738.127 119.66 712.025 110.169 673.708 103.5C662.136 101.191 651.722 99.6523 643.235 97.3437C586.532 84.6467 594.632 52.7118 650.564 52.7118C680.651 52.7118 709.582 61.946 738.127 66.9478C742.37 67.7174 743.913 68.1021 744.298 68.1021L750.47 12.697C720.383 3.46282 684.895 0 654.036 0C616.619 0 587.689 6.54088 567.245 19.2379C546.801 31.9349 536 57.7137 536 82.3382C536 103.5 543.715 119.66 559.916 131.972C575.731 143.515 604.276 152.749 645.55 160.059C658.279 162.368 668.694 163.907 676.794 166.215C685.023 168.524 691.066 170.704 694.924 172.756C702.253 176.604 706.11 182.375 706.11 188.531C706.11 196.611 701.481 202.767 692.224 207C664.836 220.081 587.689 212.001 556.83 198.15L543.715 247.784C547.186 248.169 552.972 249.323 559.916 250.477C616.619 259.327 690.681 270.869 741.212 238.935C762.814 225.468 774 206.23 774 179.297Z" fill="#0196D0"/>
|
||||
<path d="M1558 179.568C1558 160.383 1550.42 144.268 1535.67 131.99C1521.32 119.968 1494.34 110.631 1454.74 103.981C1442.38 101.679 1432.01 99.3764 1422.84 97.8416C1422.44 97.8416 1422.04 97.8416 1422.04 97.4579V112.422L1361.04 75.2038L1422.04 38.3692V52.9496C1424.7 52.9496 1427.49 52.9496 1430.41 52.9496C1461.51 52.9496 1491.42 62.5419 1521.32 67.5299C1525.31 67.9136 1526.9 67.9136 1527.3 67.9136L1533.68 12.6619C1502.98 3.83692 1465.9 0 1434 0C1395.33 0 1365.43 6.52277 1345.09 19.5683C1323.16 32.6139 1312 57.9376 1312 82.8776C1312 103.981 1320.37 120.096 1336.72 131.607C1353.46 143.885 1382.97 153.093 1425.23 160.383C1434 161.535 1441.18 162.686 1447.56 164.22L1448.36 150.791L1507.36 190.312L1445.57 224.844L1445.96 212.949C1409.68 215.635 1357.45 209.112 1333.53 197.985L1320.37 247.482C1323.56 248.249 1329.54 248.633 1336.72 250.551C1395.33 259.376 1471.88 270.887 1524.11 238.657C1546.84 225.611 1558 205.659 1558 179.568Z" fill="#0196D0"/>
|
||||
</g>
|
||||
<defs>
|
||||
<clipPath id="clip0_0_3">
|
||||
<rect width="1558" height="260" fill="white"/>
|
||||
</clipPath>
|
||||
</defs>
|
||||
</svg>
|
||||
|
Before Width: | Height: | Size: 3.4 KiB |
@@ -14,12 +14,12 @@
|
||||
|
||||
use crate::utils::RustFSConfig;
|
||||
use dioxus::logger::tracing::{debug, error, info};
|
||||
use lazy_static::lazy_static;
|
||||
use rust_embed::RustEmbed;
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::error::Error;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::Command as StdCommand;
|
||||
use std::sync::LazyLock;
|
||||
use std::time::Duration;
|
||||
use tokio::fs;
|
||||
use tokio::fs::File;
|
||||
@@ -31,15 +31,13 @@ use tokio::sync::{Mutex, mpsc};
|
||||
#[folder = "$CARGO_MANIFEST_DIR/embedded-rustfs/"]
|
||||
struct Asset;
|
||||
|
||||
// Use `lazy_static` to cache the checksum of embedded resources
|
||||
lazy_static! {
|
||||
static ref RUSTFS_HASH: Mutex<String> = {
|
||||
let rustfs_file = if cfg!(windows) { "rustfs.exe" } else { "rustfs" };
|
||||
let rustfs_data = Asset::get(rustfs_file).expect("RustFs binary not embedded");
|
||||
let hash = hex::encode(Sha256::digest(&rustfs_data.data));
|
||||
Mutex::new(hash)
|
||||
};
|
||||
}
|
||||
// Use `LazyLock` to cache the checksum of embedded resources
|
||||
static RUSTFS_HASH: LazyLock<Mutex<String>> = LazyLock::new(|| {
|
||||
let rustfs_file = if cfg!(windows) { "rustfs.exe" } else { "rustfs" };
|
||||
let rustfs_data = Asset::get(rustfs_file).expect("RustFs binary not embedded");
|
||||
let hash = hex::encode(Sha256::digest(&rustfs_data.data));
|
||||
Mutex::new(hash)
|
||||
});
|
||||
|
||||
/// Service command
|
||||
/// This enum represents the commands that can be sent to the service manager
|
||||
|
||||
41
crates/ahm/Cargo.toml
Normal file
@@ -0,0 +1,41 @@
|
||||
[package]
|
||||
name = "rustfs-ahm"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors = ["RustFS Team"]
|
||||
license.workspace = true
|
||||
description = "RustFS AHM (Automatic Health Management) Scanner"
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
homepage.workspace = true
|
||||
documentation = "https://docs.rs/rustfs-ahm/latest/rustfs_ahm/"
|
||||
keywords = ["RustFS", "AHM", "health-management", "scanner", "Minio"]
|
||||
categories = ["web-programming", "development-tools", "filesystem"]
|
||||
|
||||
[dependencies]
|
||||
rustfs-ecstore = { workspace = true }
|
||||
rustfs-common = { workspace = true }
|
||||
rustfs-filemeta = { workspace = true }
|
||||
rustfs-madmin = { workspace = true }
|
||||
rustfs-utils = { workspace = true }
|
||||
tokio = { workspace = true, features = ["full"] }
|
||||
tokio-util = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
serde = { workspace = true, features = ["derive"] }
|
||||
serde_json = { workspace = true }
|
||||
thiserror = { workspace = true }
|
||||
bytes = { workspace = true }
|
||||
time = { workspace = true, features = ["serde"] }
|
||||
uuid = { workspace = true, features = ["v4", "serde"] }
|
||||
anyhow = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
url = { workspace = true }
|
||||
rustfs-lock = { workspace = true }
|
||||
|
||||
lazy_static = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
rmp-serde = { workspace = true }
|
||||
tokio-test = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
45
crates/ahm/src/error.rs
Normal file
@@ -0,0 +1,45 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use thiserror::Error;
|
||||
|
||||
#[derive(Debug, Error)]
|
||||
pub enum Error {
|
||||
#[error("I/O error: {0}")]
|
||||
Io(#[from] std::io::Error),
|
||||
|
||||
#[error("Storage error: {0}")]
|
||||
Storage(#[from] rustfs_ecstore::error::Error),
|
||||
|
||||
#[error("Configuration error: {0}")]
|
||||
Config(String),
|
||||
|
||||
#[error("Scanner error: {0}")]
|
||||
Scanner(String),
|
||||
|
||||
#[error("Metrics error: {0}")]
|
||||
Metrics(String),
|
||||
|
||||
#[error(transparent)]
|
||||
Other(#[from] anyhow::Error),
|
||||
}
|
||||
|
||||
pub type Result<T, E = Error> = std::result::Result<T, E>;
|
||||
|
||||
// Implement conversion from ahm::Error to std::io::Error for use in main.rs
|
||||
impl From<Error> for std::io::Error {
|
||||
fn from(err: Error) -> Self {
|
||||
std::io::Error::other(err)
|
||||
}
|
||||
}
|
||||
54
crates/ahm/src/lib.rs
Normal file
@@ -0,0 +1,54 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::sync::OnceLock;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
pub mod error;
|
||||
pub mod scanner;
|
||||
|
||||
pub use error::{Error, Result};
|
||||
pub use scanner::{
|
||||
BucketTargetUsageInfo, BucketUsageInfo, DataUsageInfo, Scanner, ScannerMetrics, load_data_usage_from_backend,
|
||||
store_data_usage_in_backend,
|
||||
};
|
||||
|
||||
// Global cancellation token for AHM services (scanner and other background tasks)
|
||||
static GLOBAL_AHM_SERVICES_CANCEL_TOKEN: OnceLock<CancellationToken> = OnceLock::new();
|
||||
|
||||
/// Initialize the global AHM services cancellation token
|
||||
pub fn init_ahm_services_cancel_token(cancel_token: CancellationToken) -> Result<()> {
|
||||
GLOBAL_AHM_SERVICES_CANCEL_TOKEN
|
||||
.set(cancel_token)
|
||||
.map_err(|_| Error::Config("AHM services cancel token already initialized".to_string()))
|
||||
}
|
||||
|
||||
/// Get the global AHM services cancellation token
|
||||
pub fn get_ahm_services_cancel_token() -> Option<&'static CancellationToken> {
|
||||
GLOBAL_AHM_SERVICES_CANCEL_TOKEN.get()
|
||||
}
|
||||
|
||||
/// Create and initialize the global AHM services cancellation token
|
||||
pub fn create_ahm_services_cancel_token() -> CancellationToken {
|
||||
let cancel_token = CancellationToken::new();
|
||||
init_ahm_services_cancel_token(cancel_token.clone()).expect("AHM services cancel token already initialized");
|
||||
cancel_token
|
||||
}
|
||||
|
||||
/// Shutdown all AHM services gracefully
|
||||
pub fn shutdown_ahm_services() {
|
||||
if let Some(cancel_token) = GLOBAL_AHM_SERVICES_CANCEL_TOKEN.get() {
|
||||
cancel_token.cancel();
|
||||
}
|
||||
}
|
||||
1247
crates/ahm/src/scanner/data_scanner.rs
Normal file
671
crates/ahm/src/scanner/data_usage.rs
Normal file
@@ -0,0 +1,671 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::{collections::HashMap, sync::Arc, time::SystemTime};
|
||||
|
||||
use rustfs_ecstore::{bucket::metadata_sys::get_replication_config, config::com::read_config, store::ECStore};
|
||||
use rustfs_utils::path::SLASH_SEPARATOR;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tracing::{error, info, warn};
|
||||
|
||||
use crate::error::{Error, Result};
|
||||
|
||||
// Data usage storage constants
|
||||
pub const DATA_USAGE_ROOT: &str = SLASH_SEPARATOR;
|
||||
const DATA_USAGE_OBJ_NAME: &str = ".usage.json";
|
||||
const DATA_USAGE_BLOOM_NAME: &str = ".bloomcycle.bin";
|
||||
pub const DATA_USAGE_CACHE_NAME: &str = ".usage-cache.bin";
|
||||
|
||||
// Data usage storage paths
|
||||
lazy_static::lazy_static! {
|
||||
pub static ref DATA_USAGE_BUCKET: String = format!("{}{}{}",
|
||||
rustfs_ecstore::disk::RUSTFS_META_BUCKET,
|
||||
SLASH_SEPARATOR,
|
||||
rustfs_ecstore::disk::BUCKET_META_PREFIX
|
||||
);
|
||||
pub static ref DATA_USAGE_OBJ_NAME_PATH: String = format!("{}{}{}",
|
||||
rustfs_ecstore::disk::BUCKET_META_PREFIX,
|
||||
SLASH_SEPARATOR,
|
||||
DATA_USAGE_OBJ_NAME
|
||||
);
|
||||
pub static ref DATA_USAGE_BLOOM_NAME_PATH: String = format!("{}{}{}",
|
||||
rustfs_ecstore::disk::BUCKET_META_PREFIX,
|
||||
SLASH_SEPARATOR,
|
||||
DATA_USAGE_BLOOM_NAME
|
||||
);
|
||||
}
|
||||
|
||||
/// Bucket target usage info provides replication statistics
|
||||
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
|
||||
pub struct BucketTargetUsageInfo {
|
||||
pub replication_pending_size: u64,
|
||||
pub replication_failed_size: u64,
|
||||
pub replicated_size: u64,
|
||||
pub replica_size: u64,
|
||||
pub replication_pending_count: u64,
|
||||
pub replication_failed_count: u64,
|
||||
pub replicated_count: u64,
|
||||
}
|
||||
|
||||
/// Bucket usage info provides bucket-level statistics
|
||||
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
|
||||
pub struct BucketUsageInfo {
|
||||
pub size: u64,
|
||||
// Following five fields suffixed with V1 are here for backward compatibility
|
||||
// Total Size for objects that have not yet been replicated
|
||||
pub replication_pending_size_v1: u64,
|
||||
// Total size for objects that have witness one or more failures and will be retried
|
||||
pub replication_failed_size_v1: u64,
|
||||
// Total size for objects that have been replicated to destination
|
||||
pub replicated_size_v1: u64,
|
||||
// Total number of objects pending replication
|
||||
pub replication_pending_count_v1: u64,
|
||||
// Total number of objects that failed replication
|
||||
pub replication_failed_count_v1: u64,
|
||||
|
||||
pub objects_count: u64,
|
||||
pub object_size_histogram: HashMap<String, u64>,
|
||||
pub object_versions_histogram: HashMap<String, u64>,
|
||||
pub versions_count: u64,
|
||||
pub delete_markers_count: u64,
|
||||
pub replica_size: u64,
|
||||
pub replica_count: u64,
|
||||
pub replication_info: HashMap<String, BucketTargetUsageInfo>,
|
||||
}
|
||||
|
||||
/// DataUsageInfo represents data usage stats of the underlying storage
|
||||
#[derive(Debug, Default, Clone, Serialize, Deserialize)]
|
||||
pub struct DataUsageInfo {
|
||||
/// Total capacity
|
||||
pub total_capacity: u64,
|
||||
/// Total used capacity
|
||||
pub total_used_capacity: u64,
|
||||
/// Total free capacity
|
||||
pub total_free_capacity: u64,
|
||||
|
||||
/// LastUpdate is the timestamp of when the data usage info was last updated
|
||||
pub last_update: Option<SystemTime>,
|
||||
|
||||
/// Objects total count across all buckets
|
||||
pub objects_total_count: u64,
|
||||
/// Versions total count across all buckets
|
||||
pub versions_total_count: u64,
|
||||
/// Delete markers total count across all buckets
|
||||
pub delete_markers_total_count: u64,
|
||||
/// Objects total size across all buckets
|
||||
pub objects_total_size: u64,
|
||||
/// Replication info across all buckets
|
||||
pub replication_info: HashMap<String, BucketTargetUsageInfo>,
|
||||
|
||||
/// Total number of buckets in this cluster
|
||||
pub buckets_count: u64,
|
||||
/// Buckets usage info provides following information across all buckets
|
||||
pub buckets_usage: HashMap<String, BucketUsageInfo>,
|
||||
/// Deprecated kept here for backward compatibility reasons
|
||||
pub bucket_sizes: HashMap<String, u64>,
|
||||
}
|
||||
|
||||
/// Size summary for a single object or group of objects
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct SizeSummary {
|
||||
/// Total size
|
||||
pub total_size: usize,
|
||||
/// Number of versions
|
||||
pub versions: usize,
|
||||
/// Number of delete markers
|
||||
pub delete_markers: usize,
|
||||
/// Replicated size
|
||||
pub replicated_size: usize,
|
||||
/// Replicated count
|
||||
pub replicated_count: usize,
|
||||
/// Pending size
|
||||
pub pending_size: usize,
|
||||
/// Failed size
|
||||
pub failed_size: usize,
|
||||
/// Replica size
|
||||
pub replica_size: usize,
|
||||
/// Replica count
|
||||
pub replica_count: usize,
|
||||
/// Pending count
|
||||
pub pending_count: usize,
|
||||
/// Failed count
|
||||
pub failed_count: usize,
|
||||
/// Replication target stats
|
||||
pub repl_target_stats: HashMap<String, ReplTargetSizeSummary>,
|
||||
}
|
||||
|
||||
/// Replication target size summary
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct ReplTargetSizeSummary {
|
||||
/// Replicated size
|
||||
pub replicated_size: usize,
|
||||
/// Replicated count
|
||||
pub replicated_count: usize,
|
||||
/// Pending size
|
||||
pub pending_size: usize,
|
||||
/// Failed size
|
||||
pub failed_size: usize,
|
||||
/// Pending count
|
||||
pub pending_count: usize,
|
||||
/// Failed count
|
||||
pub failed_count: usize,
|
||||
}
|
||||
|
||||
impl DataUsageInfo {
|
||||
/// Create a new DataUsageInfo
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
/// Add object metadata to data usage statistics
|
||||
pub fn add_object(&mut self, object_path: &str, meta_object: &rustfs_filemeta::MetaObject) {
|
||||
// This method is kept for backward compatibility
|
||||
// For accurate version counting, use add_object_from_file_meta instead
|
||||
let bucket_name = match self.extract_bucket_from_path(object_path) {
|
||||
Ok(name) => name,
|
||||
Err(_) => return,
|
||||
};
|
||||
|
||||
// Update bucket statistics
|
||||
if let Some(bucket_usage) = self.buckets_usage.get_mut(&bucket_name) {
|
||||
bucket_usage.size += meta_object.size as u64;
|
||||
bucket_usage.objects_count += 1;
|
||||
bucket_usage.versions_count += 1; // Simplified: assume 1 version per object
|
||||
|
||||
// Update size histogram
|
||||
let total_size = meta_object.size as u64;
|
||||
let size_ranges = [
|
||||
("0-1KB", 0, 1024),
|
||||
("1KB-1MB", 1024, 1024 * 1024),
|
||||
("1MB-10MB", 1024 * 1024, 10 * 1024 * 1024),
|
||||
("10MB-100MB", 10 * 1024 * 1024, 100 * 1024 * 1024),
|
||||
("100MB-1GB", 100 * 1024 * 1024, 1024 * 1024 * 1024),
|
||||
("1GB+", 1024 * 1024 * 1024, u64::MAX),
|
||||
];
|
||||
|
||||
for (range_name, min_size, max_size) in size_ranges {
|
||||
if total_size >= min_size && total_size < max_size {
|
||||
*bucket_usage.object_size_histogram.entry(range_name.to_string()).or_insert(0) += 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Update version histogram (simplified - count as single version)
|
||||
*bucket_usage
|
||||
.object_versions_histogram
|
||||
.entry("SINGLE_VERSION".to_string())
|
||||
.or_insert(0) += 1;
|
||||
} else {
|
||||
// Create new bucket usage
|
||||
let mut bucket_usage = BucketUsageInfo {
|
||||
size: meta_object.size as u64,
|
||||
objects_count: 1,
|
||||
versions_count: 1,
|
||||
..Default::default()
|
||||
};
|
||||
bucket_usage.object_size_histogram.insert("0-1KB".to_string(), 1);
|
||||
bucket_usage.object_versions_histogram.insert("SINGLE_VERSION".to_string(), 1);
|
||||
self.buckets_usage.insert(bucket_name, bucket_usage);
|
||||
}
|
||||
|
||||
// Update global statistics
|
||||
self.objects_total_size += meta_object.size as u64;
|
||||
self.objects_total_count += 1;
|
||||
self.versions_total_count += 1;
|
||||
}
|
||||
|
||||
/// Add object from FileMeta for accurate version counting
|
||||
pub fn add_object_from_file_meta(&mut self, object_path: &str, file_meta: &rustfs_filemeta::FileMeta) {
|
||||
let bucket_name = match self.extract_bucket_from_path(object_path) {
|
||||
Ok(name) => name,
|
||||
Err(_) => return,
|
||||
};
|
||||
|
||||
// Calculate accurate statistics from all versions
|
||||
let mut total_size = 0u64;
|
||||
let mut versions_count = 0u64;
|
||||
let mut delete_markers_count = 0u64;
|
||||
let mut latest_object_size = 0u64;
|
||||
|
||||
// Process all versions to get accurate counts
|
||||
for version in &file_meta.versions {
|
||||
match rustfs_filemeta::FileMetaVersion::try_from(version.clone()) {
|
||||
Ok(ver) => {
|
||||
if let Some(obj) = ver.object {
|
||||
total_size += obj.size as u64;
|
||||
versions_count += 1;
|
||||
latest_object_size = obj.size as u64; // Keep track of latest object size
|
||||
} else if ver.delete_marker.is_some() {
|
||||
delete_markers_count += 1;
|
||||
}
|
||||
}
|
||||
Err(_) => {
|
||||
// Skip invalid versions
|
||||
continue;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update bucket statistics
|
||||
if let Some(bucket_usage) = self.buckets_usage.get_mut(&bucket_name) {
|
||||
bucket_usage.size += total_size;
|
||||
bucket_usage.objects_count += 1;
|
||||
bucket_usage.versions_count += versions_count;
|
||||
bucket_usage.delete_markers_count += delete_markers_count;
|
||||
|
||||
// Update size histogram based on latest object size
|
||||
let size_ranges = [
|
||||
("0-1KB", 0, 1024),
|
||||
("1KB-1MB", 1024, 1024 * 1024),
|
||||
("1MB-10MB", 1024 * 1024, 10 * 1024 * 1024),
|
||||
("10MB-100MB", 10 * 1024 * 1024, 100 * 1024 * 1024),
|
||||
("100MB-1GB", 100 * 1024 * 1024, 1024 * 1024 * 1024),
|
||||
("1GB+", 1024 * 1024 * 1024, u64::MAX),
|
||||
];
|
||||
|
||||
for (range_name, min_size, max_size) in size_ranges {
|
||||
if latest_object_size >= min_size && latest_object_size < max_size {
|
||||
*bucket_usage.object_size_histogram.entry(range_name.to_string()).or_insert(0) += 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Update version histogram based on actual version count
|
||||
let version_ranges = [
|
||||
("1", 1, 1),
|
||||
("2-5", 2, 5),
|
||||
("6-10", 6, 10),
|
||||
("11-50", 11, 50),
|
||||
("51-100", 51, 100),
|
||||
("100+", 101, usize::MAX),
|
||||
];
|
||||
|
||||
for (range_name, min_versions, max_versions) in version_ranges {
|
||||
if versions_count as usize >= min_versions && versions_count as usize <= max_versions {
|
||||
*bucket_usage
|
||||
.object_versions_histogram
|
||||
.entry(range_name.to_string())
|
||||
.or_insert(0) += 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Create new bucket usage
|
||||
let mut bucket_usage = BucketUsageInfo {
|
||||
size: total_size,
|
||||
objects_count: 1,
|
||||
versions_count,
|
||||
delete_markers_count,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
// Set size histogram
|
||||
let size_ranges = [
|
||||
("0-1KB", 0, 1024),
|
||||
("1KB-1MB", 1024, 1024 * 1024),
|
||||
("1MB-10MB", 1024 * 1024, 10 * 1024 * 1024),
|
||||
("10MB-100MB", 10 * 1024 * 1024, 100 * 1024 * 1024),
|
||||
("100MB-1GB", 100 * 1024 * 1024, 1024 * 1024 * 1024),
|
||||
("1GB+", 1024 * 1024 * 1024, u64::MAX),
|
||||
];
|
||||
|
||||
for (range_name, min_size, max_size) in size_ranges {
|
||||
if latest_object_size >= min_size && latest_object_size < max_size {
|
||||
bucket_usage.object_size_histogram.insert(range_name.to_string(), 1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
// Set version histogram
|
||||
let version_ranges = [
|
||||
("1", 1, 1),
|
||||
("2-5", 2, 5),
|
||||
("6-10", 6, 10),
|
||||
("11-50", 11, 50),
|
||||
("51-100", 51, 100),
|
||||
("100+", 101, usize::MAX),
|
||||
];
|
||||
|
||||
for (range_name, min_versions, max_versions) in version_ranges {
|
||||
if versions_count as usize >= min_versions && versions_count as usize <= max_versions {
|
||||
bucket_usage.object_versions_histogram.insert(range_name.to_string(), 1);
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
self.buckets_usage.insert(bucket_name, bucket_usage);
|
||||
// Update buckets count when adding new bucket
|
||||
self.buckets_count = self.buckets_usage.len() as u64;
|
||||
}
|
||||
|
||||
// Update global statistics
|
||||
self.objects_total_size += total_size;
|
||||
self.objects_total_count += 1;
|
||||
self.versions_total_count += versions_count;
|
||||
self.delete_markers_total_count += delete_markers_count;
|
||||
}
|
||||
|
||||
/// Extract bucket name from object path
|
||||
fn extract_bucket_from_path(&self, object_path: &str) -> Result<String> {
|
||||
let parts: Vec<&str> = object_path.split('/').collect();
|
||||
if parts.is_empty() {
|
||||
return Err(Error::Scanner("Invalid object path: empty".to_string()));
|
||||
}
|
||||
Ok(parts[0].to_string())
|
||||
}
|
||||
|
||||
/// Update capacity information
|
||||
pub fn update_capacity(&mut self, total: u64, used: u64, free: u64) {
|
||||
self.total_capacity = total;
|
||||
self.total_used_capacity = used;
|
||||
self.total_free_capacity = free;
|
||||
self.last_update = Some(SystemTime::now());
|
||||
}
|
||||
|
||||
/// Add bucket usage info
|
||||
pub fn add_bucket_usage(&mut self, bucket: String, usage: BucketUsageInfo) {
|
||||
self.buckets_usage.insert(bucket.clone(), usage);
|
||||
self.buckets_count = self.buckets_usage.len() as u64;
|
||||
self.last_update = Some(SystemTime::now());
|
||||
}
|
||||
|
||||
/// Get bucket usage info
|
||||
pub fn get_bucket_usage(&self, bucket: &str) -> Option<&BucketUsageInfo> {
|
||||
self.buckets_usage.get(bucket)
|
||||
}
|
||||
|
||||
/// Calculate total statistics from all buckets
|
||||
pub fn calculate_totals(&mut self) {
|
||||
self.objects_total_count = 0;
|
||||
self.versions_total_count = 0;
|
||||
self.delete_markers_total_count = 0;
|
||||
self.objects_total_size = 0;
|
||||
|
||||
for usage in self.buckets_usage.values() {
|
||||
self.objects_total_count += usage.objects_count;
|
||||
self.versions_total_count += usage.versions_count;
|
||||
self.delete_markers_total_count += usage.delete_markers_count;
|
||||
self.objects_total_size += usage.size;
|
||||
}
|
||||
}
|
||||
|
||||
/// Merge another DataUsageInfo into this one
|
||||
pub fn merge(&mut self, other: &DataUsageInfo) {
|
||||
// Merge bucket usage
|
||||
for (bucket, usage) in &other.buckets_usage {
|
||||
if let Some(existing) = self.buckets_usage.get_mut(bucket) {
|
||||
existing.merge(usage);
|
||||
} else {
|
||||
self.buckets_usage.insert(bucket.clone(), usage.clone());
|
||||
}
|
||||
}
|
||||
|
||||
// Recalculate totals
|
||||
self.calculate_totals();
|
||||
|
||||
// Ensure buckets_count stays consistent with buckets_usage
|
||||
self.buckets_count = self.buckets_usage.len() as u64;
|
||||
|
||||
// Update last update time
|
||||
if let Some(other_update) = other.last_update {
|
||||
if self.last_update.is_none() || other_update > self.last_update.unwrap() {
|
||||
self.last_update = Some(other_update);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl BucketUsageInfo {
|
||||
/// Create a new BucketUsageInfo
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
/// Add size summary to this bucket usage
|
||||
pub fn add_size_summary(&mut self, summary: &SizeSummary) {
|
||||
self.size += summary.total_size as u64;
|
||||
self.versions_count += summary.versions as u64;
|
||||
self.delete_markers_count += summary.delete_markers as u64;
|
||||
self.replica_size += summary.replica_size as u64;
|
||||
self.replica_count += summary.replica_count as u64;
|
||||
}
|
||||
|
||||
/// Merge another BucketUsageInfo into this one
|
||||
pub fn merge(&mut self, other: &BucketUsageInfo) {
|
||||
self.size += other.size;
|
||||
self.objects_count += other.objects_count;
|
||||
self.versions_count += other.versions_count;
|
||||
self.delete_markers_count += other.delete_markers_count;
|
||||
self.replica_size += other.replica_size;
|
||||
self.replica_count += other.replica_count;
|
||||
|
||||
// Merge histograms
|
||||
for (key, value) in &other.object_size_histogram {
|
||||
*self.object_size_histogram.entry(key.clone()).or_insert(0) += value;
|
||||
}
|
||||
|
||||
for (key, value) in &other.object_versions_histogram {
|
||||
*self.object_versions_histogram.entry(key.clone()).or_insert(0) += value;
|
||||
}
|
||||
|
||||
// Merge replication info
|
||||
for (target, info) in &other.replication_info {
|
||||
let entry = self.replication_info.entry(target.clone()).or_default();
|
||||
entry.replicated_size += info.replicated_size;
|
||||
entry.replica_size += info.replica_size;
|
||||
entry.replication_pending_size += info.replication_pending_size;
|
||||
entry.replication_failed_size += info.replication_failed_size;
|
||||
entry.replication_pending_count += info.replication_pending_count;
|
||||
entry.replication_failed_count += info.replication_failed_count;
|
||||
entry.replicated_count += info.replicated_count;
|
||||
}
|
||||
|
||||
// Merge backward compatibility fields
|
||||
self.replication_pending_size_v1 += other.replication_pending_size_v1;
|
||||
self.replication_failed_size_v1 += other.replication_failed_size_v1;
|
||||
self.replicated_size_v1 += other.replicated_size_v1;
|
||||
self.replication_pending_count_v1 += other.replication_pending_count_v1;
|
||||
self.replication_failed_count_v1 += other.replication_failed_count_v1;
|
||||
}
|
||||
}
|
||||
|
||||
impl SizeSummary {
|
||||
/// Create a new SizeSummary
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
/// Add another SizeSummary to this one
|
||||
pub fn add(&mut self, other: &SizeSummary) {
|
||||
self.total_size += other.total_size;
|
||||
self.versions += other.versions;
|
||||
self.delete_markers += other.delete_markers;
|
||||
self.replicated_size += other.replicated_size;
|
||||
self.replicated_count += other.replicated_count;
|
||||
self.pending_size += other.pending_size;
|
||||
self.failed_size += other.failed_size;
|
||||
self.replica_size += other.replica_size;
|
||||
self.replica_count += other.replica_count;
|
||||
self.pending_count += other.pending_count;
|
||||
self.failed_count += other.failed_count;
|
||||
|
||||
// Merge replication target stats
|
||||
for (target, stats) in &other.repl_target_stats {
|
||||
let entry = self.repl_target_stats.entry(target.clone()).or_default();
|
||||
entry.replicated_size += stats.replicated_size;
|
||||
entry.replicated_count += stats.replicated_count;
|
||||
entry.pending_size += stats.pending_size;
|
||||
entry.failed_size += stats.failed_size;
|
||||
entry.pending_count += stats.pending_count;
|
||||
entry.failed_count += stats.failed_count;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Store data usage info to backend storage
|
||||
pub async fn store_data_usage_in_backend(data_usage_info: DataUsageInfo, store: Arc<ECStore>) -> Result<()> {
|
||||
let data =
|
||||
serde_json::to_vec(&data_usage_info).map_err(|e| Error::Config(format!("Failed to serialize data usage info: {e}")))?;
|
||||
|
||||
// Save to backend using the same mechanism as original code
|
||||
rustfs_ecstore::config::com::save_config(store, &DATA_USAGE_OBJ_NAME_PATH, data)
|
||||
.await
|
||||
.map_err(Error::Storage)?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Load data usage info from backend storage
|
||||
pub async fn load_data_usage_from_backend(store: Arc<ECStore>) -> Result<DataUsageInfo> {
|
||||
let buf = match read_config(store, &DATA_USAGE_OBJ_NAME_PATH).await {
|
||||
Ok(data) => data,
|
||||
Err(e) => {
|
||||
error!("Failed to read data usage info from backend: {}", e);
|
||||
if e == rustfs_ecstore::error::Error::ConfigNotFound {
|
||||
return Ok(DataUsageInfo::default());
|
||||
}
|
||||
return Err(Error::Storage(e));
|
||||
}
|
||||
};
|
||||
|
||||
let mut data_usage_info: DataUsageInfo =
|
||||
serde_json::from_slice(&buf).map_err(|e| Error::Config(format!("Failed to deserialize data usage info: {e}")))?;
|
||||
|
||||
warn!("Loaded data usage info from backend {:?}", &data_usage_info);
|
||||
|
||||
// Handle backward compatibility like original code
|
||||
if data_usage_info.buckets_usage.is_empty() {
|
||||
data_usage_info.buckets_usage = data_usage_info
|
||||
.bucket_sizes
|
||||
.iter()
|
||||
.map(|(bucket, &size)| {
|
||||
(
|
||||
bucket.clone(),
|
||||
BucketUsageInfo {
|
||||
size,
|
||||
..Default::default()
|
||||
},
|
||||
)
|
||||
})
|
||||
.collect();
|
||||
}
|
||||
|
||||
if data_usage_info.bucket_sizes.is_empty() {
|
||||
data_usage_info.bucket_sizes = data_usage_info
|
||||
.buckets_usage
|
||||
.iter()
|
||||
.map(|(bucket, bui)| (bucket.clone(), bui.size))
|
||||
.collect();
|
||||
}
|
||||
|
||||
for (bucket, bui) in &data_usage_info.buckets_usage {
|
||||
if bui.replicated_size_v1 > 0
|
||||
|| bui.replication_failed_count_v1 > 0
|
||||
|| bui.replication_failed_size_v1 > 0
|
||||
|| bui.replication_pending_count_v1 > 0
|
||||
{
|
||||
if let Ok((cfg, _)) = get_replication_config(bucket).await {
|
||||
if !cfg.role.is_empty() {
|
||||
data_usage_info.replication_info.insert(
|
||||
cfg.role.clone(),
|
||||
BucketTargetUsageInfo {
|
||||
replication_failed_size: bui.replication_failed_size_v1,
|
||||
replication_failed_count: bui.replication_failed_count_v1,
|
||||
replicated_size: bui.replicated_size_v1,
|
||||
replication_pending_count: bui.replication_pending_count_v1,
|
||||
replication_pending_size: bui.replication_pending_size_v1,
|
||||
..Default::default()
|
||||
},
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Ok(data_usage_info)
|
||||
}
|
||||
|
||||
/// Example function showing how to use AHM data usage functionality
|
||||
/// This demonstrates the integration pattern for DataUsageInfoHandler
|
||||
pub async fn example_data_usage_integration() -> Result<()> {
|
||||
// Get the global storage instance
|
||||
let Some(store) = rustfs_ecstore::new_object_layer_fn() else {
|
||||
return Err(Error::Config("Storage not initialized".to_string()));
|
||||
};
|
||||
|
||||
// Load data usage from backend (this replaces the original load_data_usage_from_backend)
|
||||
let data_usage = load_data_usage_from_backend(store).await?;
|
||||
|
||||
info!(
|
||||
"Loaded data usage info: {} buckets, {} total objects",
|
||||
data_usage.buckets_count, data_usage.objects_total_count
|
||||
);
|
||||
|
||||
// Example: Store updated data usage back to backend
|
||||
// This would typically be called by the scanner after collecting new statistics
|
||||
// store_data_usage_in_backend(data_usage, store).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_data_usage_info_creation() {
|
||||
let mut info = DataUsageInfo::new();
|
||||
info.update_capacity(1000, 500, 500);
|
||||
|
||||
assert_eq!(info.total_capacity, 1000);
|
||||
assert_eq!(info.total_used_capacity, 500);
|
||||
assert_eq!(info.total_free_capacity, 500);
|
||||
assert!(info.last_update.is_some());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_bucket_usage_info_merge() {
|
||||
let mut usage1 = BucketUsageInfo::new();
|
||||
usage1.size = 100;
|
||||
usage1.objects_count = 10;
|
||||
usage1.versions_count = 5;
|
||||
|
||||
let mut usage2 = BucketUsageInfo::new();
|
||||
usage2.size = 200;
|
||||
usage2.objects_count = 20;
|
||||
usage2.versions_count = 10;
|
||||
|
||||
usage1.merge(&usage2);
|
||||
|
||||
assert_eq!(usage1.size, 300);
|
||||
assert_eq!(usage1.objects_count, 30);
|
||||
assert_eq!(usage1.versions_count, 15);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_size_summary_add() {
|
||||
let mut summary1 = SizeSummary::new();
|
||||
summary1.total_size = 100;
|
||||
summary1.versions = 5;
|
||||
|
||||
let mut summary2 = SizeSummary::new();
|
||||
summary2.total_size = 200;
|
||||
summary2.versions = 10;
|
||||
|
||||
summary1.add(&summary2);
|
||||
|
||||
assert_eq!(summary1.total_size, 300);
|
||||
assert_eq!(summary1.versions, 15);
|
||||
}
|
||||
}
|
||||
277
crates/ahm/src/scanner/histogram.rs
Normal file
@@ -0,0 +1,277 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Size interval for object size histogram
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SizeInterval {
|
||||
pub start: u64,
|
||||
pub end: u64,
|
||||
pub name: &'static str,
|
||||
}
|
||||
|
||||
/// Version interval for object versions histogram
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct VersionInterval {
|
||||
pub start: u64,
|
||||
pub end: u64,
|
||||
pub name: &'static str,
|
||||
}
|
||||
|
||||
/// Object size histogram intervals
|
||||
pub const OBJECTS_HISTOGRAM_INTERVALS: &[SizeInterval] = &[
|
||||
SizeInterval {
|
||||
start: 0,
|
||||
end: 1024 - 1,
|
||||
name: "LESS_THAN_1_KiB",
|
||||
},
|
||||
SizeInterval {
|
||||
start: 1024,
|
||||
end: 1024 * 1024 - 1,
|
||||
name: "1_KiB_TO_1_MiB",
|
||||
},
|
||||
SizeInterval {
|
||||
start: 1024 * 1024,
|
||||
end: 10 * 1024 * 1024 - 1,
|
||||
name: "1_MiB_TO_10_MiB",
|
||||
},
|
||||
SizeInterval {
|
||||
start: 10 * 1024 * 1024,
|
||||
end: 64 * 1024 * 1024 - 1,
|
||||
name: "10_MiB_TO_64_MiB",
|
||||
},
|
||||
SizeInterval {
|
||||
start: 64 * 1024 * 1024,
|
||||
end: 128 * 1024 * 1024 - 1,
|
||||
name: "64_MiB_TO_128_MiB",
|
||||
},
|
||||
SizeInterval {
|
||||
start: 128 * 1024 * 1024,
|
||||
end: 512 * 1024 * 1024 - 1,
|
||||
name: "128_MiB_TO_512_MiB",
|
||||
},
|
||||
SizeInterval {
|
||||
start: 512 * 1024 * 1024,
|
||||
end: u64::MAX,
|
||||
name: "MORE_THAN_512_MiB",
|
||||
},
|
||||
];
|
||||
|
||||
/// Object version count histogram intervals
|
||||
pub const OBJECTS_VERSION_COUNT_INTERVALS: &[VersionInterval] = &[
|
||||
VersionInterval {
|
||||
start: 1,
|
||||
end: 1,
|
||||
name: "1_VERSION",
|
||||
},
|
||||
VersionInterval {
|
||||
start: 2,
|
||||
end: 10,
|
||||
name: "2_TO_10_VERSIONS",
|
||||
},
|
||||
VersionInterval {
|
||||
start: 11,
|
||||
end: 100,
|
||||
name: "11_TO_100_VERSIONS",
|
||||
},
|
||||
VersionInterval {
|
||||
start: 101,
|
||||
end: 1000,
|
||||
name: "101_TO_1000_VERSIONS",
|
||||
},
|
||||
VersionInterval {
|
||||
start: 1001,
|
||||
end: u64::MAX,
|
||||
name: "MORE_THAN_1000_VERSIONS",
|
||||
},
|
||||
];
|
||||
|
||||
/// Size histogram for object size distribution
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct SizeHistogram {
|
||||
counts: Vec<u64>,
|
||||
}
|
||||
|
||||
/// Versions histogram for object version count distribution
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct VersionsHistogram {
|
||||
counts: Vec<u64>,
|
||||
}
|
||||
|
||||
impl SizeHistogram {
|
||||
/// Create a new size histogram
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
counts: vec![0; OBJECTS_HISTOGRAM_INTERVALS.len()],
|
||||
}
|
||||
}
|
||||
|
||||
/// Add a size to the histogram
|
||||
pub fn add(&mut self, size: u64) {
|
||||
for (idx, interval) in OBJECTS_HISTOGRAM_INTERVALS.iter().enumerate() {
|
||||
if size >= interval.start && size <= interval.end {
|
||||
self.counts[idx] += 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the histogram as a map
|
||||
pub fn to_map(&self) -> HashMap<String, u64> {
|
||||
let mut result = HashMap::new();
|
||||
for (idx, count) in self.counts.iter().enumerate() {
|
||||
let interval = &OBJECTS_HISTOGRAM_INTERVALS[idx];
|
||||
result.insert(interval.name.to_string(), *count);
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
/// Merge another histogram into this one
|
||||
pub fn merge(&mut self, other: &SizeHistogram) {
|
||||
for (idx, count) in other.counts.iter().enumerate() {
|
||||
self.counts[idx] += count;
|
||||
}
|
||||
}
|
||||
|
||||
/// Get total count
|
||||
pub fn total_count(&self) -> u64 {
|
||||
self.counts.iter().sum()
|
||||
}
|
||||
|
||||
/// Reset the histogram
|
||||
pub fn reset(&mut self) {
|
||||
for count in &mut self.counts {
|
||||
*count = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl VersionsHistogram {
|
||||
/// Create a new versions histogram
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
counts: vec![0; OBJECTS_VERSION_COUNT_INTERVALS.len()],
|
||||
}
|
||||
}
|
||||
|
||||
/// Add a version count to the histogram
|
||||
pub fn add(&mut self, versions: u64) {
|
||||
for (idx, interval) in OBJECTS_VERSION_COUNT_INTERVALS.iter().enumerate() {
|
||||
if versions >= interval.start && versions <= interval.end {
|
||||
self.counts[idx] += 1;
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Get the histogram as a map
|
||||
pub fn to_map(&self) -> HashMap<String, u64> {
|
||||
let mut result = HashMap::new();
|
||||
for (idx, count) in self.counts.iter().enumerate() {
|
||||
let interval = &OBJECTS_VERSION_COUNT_INTERVALS[idx];
|
||||
result.insert(interval.name.to_string(), *count);
|
||||
}
|
||||
result
|
||||
}
|
||||
|
||||
/// Merge another histogram into this one
|
||||
pub fn merge(&mut self, other: &VersionsHistogram) {
|
||||
for (idx, count) in other.counts.iter().enumerate() {
|
||||
self.counts[idx] += count;
|
||||
}
|
||||
}
|
||||
|
||||
/// Get total count
|
||||
pub fn total_count(&self) -> u64 {
|
||||
self.counts.iter().sum()
|
||||
}
|
||||
|
||||
/// Reset the histogram
|
||||
pub fn reset(&mut self) {
|
||||
for count in &mut self.counts {
|
||||
*count = 0;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_size_histogram() {
|
||||
let mut histogram = SizeHistogram::new();
|
||||
|
||||
// Add some sizes
|
||||
histogram.add(512); // LESS_THAN_1_KiB
|
||||
histogram.add(1024); // 1_KiB_TO_1_MiB
|
||||
histogram.add(1024 * 1024); // 1_MiB_TO_10_MiB
|
||||
histogram.add(5 * 1024 * 1024); // 1_MiB_TO_10_MiB
|
||||
|
||||
let map = histogram.to_map();
|
||||
|
||||
assert_eq!(map.get("LESS_THAN_1_KiB"), Some(&1));
|
||||
assert_eq!(map.get("1_KiB_TO_1_MiB"), Some(&1));
|
||||
assert_eq!(map.get("1_MiB_TO_10_MiB"), Some(&2));
|
||||
assert_eq!(map.get("10_MiB_TO_64_MiB"), Some(&0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_versions_histogram() {
|
||||
let mut histogram = VersionsHistogram::new();
|
||||
|
||||
// Add some version counts
|
||||
histogram.add(1); // 1_VERSION
|
||||
histogram.add(5); // 2_TO_10_VERSIONS
|
||||
histogram.add(50); // 11_TO_100_VERSIONS
|
||||
histogram.add(500); // 101_TO_1000_VERSIONS
|
||||
|
||||
let map = histogram.to_map();
|
||||
|
||||
assert_eq!(map.get("1_VERSION"), Some(&1));
|
||||
assert_eq!(map.get("2_TO_10_VERSIONS"), Some(&1));
|
||||
assert_eq!(map.get("11_TO_100_VERSIONS"), Some(&1));
|
||||
assert_eq!(map.get("101_TO_1000_VERSIONS"), Some(&1));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_histogram_merge() {
|
||||
let mut histogram1 = SizeHistogram::new();
|
||||
histogram1.add(1024);
|
||||
histogram1.add(1024 * 1024);
|
||||
|
||||
let mut histogram2 = SizeHistogram::new();
|
||||
histogram2.add(1024);
|
||||
histogram2.add(5 * 1024 * 1024);
|
||||
|
||||
histogram1.merge(&histogram2);
|
||||
|
||||
let map = histogram1.to_map();
|
||||
assert_eq!(map.get("1_KiB_TO_1_MiB"), Some(&2)); // 1 from histogram1 + 1 from histogram2
|
||||
assert_eq!(map.get("1_MiB_TO_10_MiB"), Some(&2)); // 1 from histogram1 + 1 from histogram2
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_histogram_reset() {
|
||||
let mut histogram = SizeHistogram::new();
|
||||
histogram.add(1024);
|
||||
histogram.add(1024 * 1024);
|
||||
|
||||
assert_eq!(histogram.total_count(), 2);
|
||||
|
||||
histogram.reset();
|
||||
assert_eq!(histogram.total_count(), 0);
|
||||
}
|
||||
}
|
||||
284
crates/ahm/src/scanner/metrics.rs
Normal file
@@ -0,0 +1,284 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
sync::atomic::{AtomicU64, Ordering},
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
|
||||
use serde::{Deserialize, Serialize};
|
||||
use tracing::info;
|
||||
|
||||
/// Scanner metrics
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct ScannerMetrics {
|
||||
/// Total objects scanned since server start
|
||||
pub objects_scanned: u64,
|
||||
/// Total object versions scanned since server start
|
||||
pub versions_scanned: u64,
|
||||
/// Total directories scanned since server start
|
||||
pub directories_scanned: u64,
|
||||
/// Total bucket scans started since server start
|
||||
pub bucket_scans_started: u64,
|
||||
/// Total bucket scans finished since server start
|
||||
pub bucket_scans_finished: u64,
|
||||
/// Total objects with health issues found
|
||||
pub objects_with_issues: u64,
|
||||
/// Total heal tasks queued
|
||||
pub heal_tasks_queued: u64,
|
||||
/// Total heal tasks completed
|
||||
pub heal_tasks_completed: u64,
|
||||
/// Total heal tasks failed
|
||||
pub heal_tasks_failed: u64,
|
||||
/// Last scan activity time
|
||||
pub last_activity: Option<SystemTime>,
|
||||
/// Current scan cycle
|
||||
pub current_cycle: u64,
|
||||
/// Total scan cycles completed
|
||||
pub total_cycles: u64,
|
||||
/// Current scan duration
|
||||
pub current_scan_duration: Option<Duration>,
|
||||
/// Average scan duration
|
||||
pub avg_scan_duration: Duration,
|
||||
/// Objects scanned per second
|
||||
pub objects_per_second: f64,
|
||||
/// Buckets scanned per second
|
||||
pub buckets_per_second: f64,
|
||||
/// Storage metrics by bucket
|
||||
pub bucket_metrics: HashMap<String, BucketMetrics>,
|
||||
/// Disk metrics
|
||||
pub disk_metrics: HashMap<String, DiskMetrics>,
|
||||
}
|
||||
|
||||
/// Bucket-specific metrics
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct BucketMetrics {
|
||||
/// Bucket name
|
||||
pub bucket: String,
|
||||
/// Total objects in bucket
|
||||
pub total_objects: u64,
|
||||
/// Total size of objects in bucket (bytes)
|
||||
pub total_size: u64,
|
||||
/// Objects with health issues
|
||||
pub objects_with_issues: u64,
|
||||
/// Last scan time
|
||||
pub last_scan_time: Option<SystemTime>,
|
||||
/// Scan duration
|
||||
pub scan_duration: Option<Duration>,
|
||||
/// Heal tasks queued for this bucket
|
||||
pub heal_tasks_queued: u64,
|
||||
/// Heal tasks completed for this bucket
|
||||
pub heal_tasks_completed: u64,
|
||||
/// Heal tasks failed for this bucket
|
||||
pub heal_tasks_failed: u64,
|
||||
}
|
||||
|
||||
/// Disk-specific metrics
|
||||
#[derive(Debug, Clone, Default, Serialize, Deserialize)]
|
||||
pub struct DiskMetrics {
|
||||
/// Disk path
|
||||
pub disk_path: String,
|
||||
/// Total disk space (bytes)
|
||||
pub total_space: u64,
|
||||
/// Used disk space (bytes)
|
||||
pub used_space: u64,
|
||||
/// Free disk space (bytes)
|
||||
pub free_space: u64,
|
||||
/// Objects scanned on this disk
|
||||
pub objects_scanned: u64,
|
||||
/// Objects with issues on this disk
|
||||
pub objects_with_issues: u64,
|
||||
/// Last scan time
|
||||
pub last_scan_time: Option<SystemTime>,
|
||||
/// Whether disk is online
|
||||
pub is_online: bool,
|
||||
/// Whether disk is being scanned
|
||||
pub is_scanning: bool,
|
||||
}
|
||||
|
||||
/// Thread-safe metrics collector
|
||||
pub struct MetricsCollector {
|
||||
/// Atomic counters for real-time metrics
|
||||
objects_scanned: AtomicU64,
|
||||
versions_scanned: AtomicU64,
|
||||
directories_scanned: AtomicU64,
|
||||
bucket_scans_started: AtomicU64,
|
||||
bucket_scans_finished: AtomicU64,
|
||||
objects_with_issues: AtomicU64,
|
||||
heal_tasks_queued: AtomicU64,
|
||||
heal_tasks_completed: AtomicU64,
|
||||
heal_tasks_failed: AtomicU64,
|
||||
current_cycle: AtomicU64,
|
||||
total_cycles: AtomicU64,
|
||||
}
|
||||
|
||||
impl MetricsCollector {
|
||||
/// Create a new metrics collector
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
objects_scanned: AtomicU64::new(0),
|
||||
versions_scanned: AtomicU64::new(0),
|
||||
directories_scanned: AtomicU64::new(0),
|
||||
bucket_scans_started: AtomicU64::new(0),
|
||||
bucket_scans_finished: AtomicU64::new(0),
|
||||
objects_with_issues: AtomicU64::new(0),
|
||||
heal_tasks_queued: AtomicU64::new(0),
|
||||
heal_tasks_completed: AtomicU64::new(0),
|
||||
heal_tasks_failed: AtomicU64::new(0),
|
||||
current_cycle: AtomicU64::new(0),
|
||||
total_cycles: AtomicU64::new(0),
|
||||
}
|
||||
}
|
||||
|
||||
/// Increment objects scanned count
|
||||
pub fn increment_objects_scanned(&self, count: u64) {
|
||||
self.objects_scanned.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment versions scanned count
|
||||
pub fn increment_versions_scanned(&self, count: u64) {
|
||||
self.versions_scanned.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment directories scanned count
|
||||
pub fn increment_directories_scanned(&self, count: u64) {
|
||||
self.directories_scanned.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment bucket scans started count
|
||||
pub fn increment_bucket_scans_started(&self, count: u64) {
|
||||
self.bucket_scans_started.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment bucket scans finished count
|
||||
pub fn increment_bucket_scans_finished(&self, count: u64) {
|
||||
self.bucket_scans_finished.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment objects with issues count
|
||||
pub fn increment_objects_with_issues(&self, count: u64) {
|
||||
self.objects_with_issues.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment heal tasks queued count
|
||||
pub fn increment_heal_tasks_queued(&self, count: u64) {
|
||||
self.heal_tasks_queued.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment heal tasks completed count
|
||||
pub fn increment_heal_tasks_completed(&self, count: u64) {
|
||||
self.heal_tasks_completed.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment heal tasks failed count
|
||||
pub fn increment_heal_tasks_failed(&self, count: u64) {
|
||||
self.heal_tasks_failed.fetch_add(count, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Set current cycle
|
||||
pub fn set_current_cycle(&self, cycle: u64) {
|
||||
self.current_cycle.store(cycle, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Increment total cycles
|
||||
pub fn increment_total_cycles(&self) {
|
||||
self.total_cycles.fetch_add(1, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Get current metrics snapshot
|
||||
pub fn get_metrics(&self) -> ScannerMetrics {
|
||||
ScannerMetrics {
|
||||
objects_scanned: self.objects_scanned.load(Ordering::Relaxed),
|
||||
versions_scanned: self.versions_scanned.load(Ordering::Relaxed),
|
||||
directories_scanned: self.directories_scanned.load(Ordering::Relaxed),
|
||||
bucket_scans_started: self.bucket_scans_started.load(Ordering::Relaxed),
|
||||
bucket_scans_finished: self.bucket_scans_finished.load(Ordering::Relaxed),
|
||||
objects_with_issues: self.objects_with_issues.load(Ordering::Relaxed),
|
||||
heal_tasks_queued: self.heal_tasks_queued.load(Ordering::Relaxed),
|
||||
heal_tasks_completed: self.heal_tasks_completed.load(Ordering::Relaxed),
|
||||
heal_tasks_failed: self.heal_tasks_failed.load(Ordering::Relaxed),
|
||||
last_activity: Some(SystemTime::now()),
|
||||
current_cycle: self.current_cycle.load(Ordering::Relaxed),
|
||||
total_cycles: self.total_cycles.load(Ordering::Relaxed),
|
||||
current_scan_duration: None, // Will be set by scanner
|
||||
avg_scan_duration: Duration::ZERO, // Will be calculated
|
||||
objects_per_second: 0.0, // Will be calculated
|
||||
buckets_per_second: 0.0, // Will be calculated
|
||||
bucket_metrics: HashMap::new(), // Will be populated by scanner
|
||||
disk_metrics: HashMap::new(), // Will be populated by scanner
|
||||
}
|
||||
}
|
||||
|
||||
/// Reset all metrics
|
||||
pub fn reset(&self) {
|
||||
self.objects_scanned.store(0, Ordering::Relaxed);
|
||||
self.versions_scanned.store(0, Ordering::Relaxed);
|
||||
self.directories_scanned.store(0, Ordering::Relaxed);
|
||||
self.bucket_scans_started.store(0, Ordering::Relaxed);
|
||||
self.bucket_scans_finished.store(0, Ordering::Relaxed);
|
||||
self.objects_with_issues.store(0, Ordering::Relaxed);
|
||||
self.heal_tasks_queued.store(0, Ordering::Relaxed);
|
||||
self.heal_tasks_completed.store(0, Ordering::Relaxed);
|
||||
self.heal_tasks_failed.store(0, Ordering::Relaxed);
|
||||
self.current_cycle.store(0, Ordering::Relaxed);
|
||||
self.total_cycles.store(0, Ordering::Relaxed);
|
||||
|
||||
info!("Scanner metrics reset");
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for MetricsCollector {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_metrics_collector_creation() {
|
||||
let collector = MetricsCollector::new();
|
||||
let metrics = collector.get_metrics();
|
||||
assert_eq!(metrics.objects_scanned, 0);
|
||||
assert_eq!(metrics.versions_scanned, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_metrics_increment() {
|
||||
let collector = MetricsCollector::new();
|
||||
|
||||
collector.increment_objects_scanned(10);
|
||||
collector.increment_versions_scanned(5);
|
||||
collector.increment_objects_with_issues(2);
|
||||
|
||||
let metrics = collector.get_metrics();
|
||||
assert_eq!(metrics.objects_scanned, 10);
|
||||
assert_eq!(metrics.versions_scanned, 5);
|
||||
assert_eq!(metrics.objects_with_issues, 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_metrics_reset() {
|
||||
let collector = MetricsCollector::new();
|
||||
|
||||
collector.increment_objects_scanned(10);
|
||||
collector.reset();
|
||||
|
||||
let metrics = collector.get_metrics();
|
||||
assert_eq!(metrics.objects_scanned, 0);
|
||||
}
|
||||
}
|
||||
@@ -11,3 +11,15 @@
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod data_scanner;
|
||||
pub mod data_usage;
|
||||
pub mod histogram;
|
||||
pub mod metrics;
|
||||
|
||||
// Re-export main types for convenience
|
||||
pub use data_scanner::Scanner;
|
||||
pub use data_usage::{
|
||||
BucketTargetUsageInfo, BucketUsageInfo, DataUsageInfo, load_data_usage_from_backend, store_data_usage_in_backend,
|
||||
};
|
||||
pub use metrics::ScannerMetrics;
|
||||
@@ -28,6 +28,5 @@ categories = ["web-programming", "development-tools", "data-structures"]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
lazy_static.workspace = true
|
||||
tokio.workspace = true
|
||||
tonic = { workspace = true }
|
||||
|
||||
@@ -12,19 +12,19 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
#![allow(non_upper_case_globals)] // FIXME
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::LazyLock;
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
use tokio::sync::RwLock;
|
||||
use tonic::transport::Channel;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref GLOBAL_Local_Node_Name: RwLock<String> = RwLock::new("".to_string());
|
||||
pub static ref GLOBAL_Rustfs_Host: RwLock<String> = RwLock::new("".to_string());
|
||||
pub static ref GLOBAL_Rustfs_Port: RwLock<String> = RwLock::new("9000".to_string());
|
||||
pub static ref GLOBAL_Rustfs_Addr: RwLock<String> = RwLock::new("".to_string());
|
||||
pub static ref GLOBAL_Conn_Map: RwLock<HashMap<String, Channel>> = RwLock::new(HashMap::new());
|
||||
}
|
||||
pub static GLOBAL_Local_Node_Name: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
|
||||
pub static GLOBAL_Rustfs_Host: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
|
||||
pub static GLOBAL_Rustfs_Port: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("9000".to_string()));
|
||||
pub static GLOBAL_Rustfs_Addr: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
|
||||
pub static GLOBAL_Conn_Map: LazyLock<RwLock<HashMap<String, Channel>>> = LazyLock::new(|| RwLock::new(HashMap::new()));
|
||||
|
||||
pub async fn set_global_addr(addr: &str) {
|
||||
*GLOBAL_Rustfs_Addr.write().await = addr.to_string();
|
||||
|
||||
@@ -109,8 +109,8 @@ winapi = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { workspace = true, features = ["rt-multi-thread", "macros"] }
|
||||
criterion = { version = "0.5", features = ["html_reports"] }
|
||||
temp-env = "0.3.6"
|
||||
criterion = { workspace = true, features = ["html_reports"] }
|
||||
temp-env = { workspace = true }
|
||||
|
||||
[build-dependencies]
|
||||
shadow-rs = { workspace = true, features = ["build", "metadata"] }
|
||||
|
||||
@@ -145,8 +145,8 @@ impl Debug for LocalDisk {
|
||||
impl LocalDisk {
|
||||
pub async fn new(ep: &Endpoint, cleanup: bool) -> Result<Self> {
|
||||
debug!("Creating local disk");
|
||||
let root = match fs::canonicalize(ep.get_file_path()).await {
|
||||
Ok(path) => path,
|
||||
let root = match PathBuf::from(ep.get_file_path()).absolutize() {
|
||||
Ok(path) => path.into_owned(),
|
||||
Err(e) => {
|
||||
if e.kind() == ErrorKind::NotFound {
|
||||
return Err(DiskError::VolumeNotFound);
|
||||
|
||||
@@ -30,6 +30,7 @@ use std::{
|
||||
time::SystemTime,
|
||||
};
|
||||
use tokio::sync::{OnceCell, RwLock};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use uuid::Uuid;
|
||||
|
||||
pub const DISK_ASSUME_UNKNOWN_SIZE: u64 = 1 << 30;
|
||||
@@ -66,6 +67,9 @@ pub static ref GLOBAL_NodeNamesHex: HashMap<String, ()> = HashMap::new();
|
||||
pub static ref GLOBAL_REGION: OnceLock<String> = OnceLock::new();
|
||||
}
|
||||
|
||||
// Global cancellation token for background services (data scanner and auto heal)
|
||||
static GLOBAL_BACKGROUND_SERVICES_CANCEL_TOKEN: OnceLock<CancellationToken> = OnceLock::new();
|
||||
|
||||
static GLOBAL_ACTIVE_CRED: OnceLock<Credentials> = OnceLock::new();
|
||||
|
||||
pub fn init_global_action_cred(ak: Option<String>, sk: Option<String>) {
|
||||
@@ -192,3 +196,27 @@ pub fn set_global_region(region: String) {
|
||||
pub fn get_global_region() -> Option<String> {
|
||||
GLOBAL_REGION.get().cloned()
|
||||
}
|
||||
|
||||
/// Initialize the global background services cancellation token
|
||||
pub fn init_background_services_cancel_token(cancel_token: CancellationToken) -> Result<(), CancellationToken> {
|
||||
GLOBAL_BACKGROUND_SERVICES_CANCEL_TOKEN.set(cancel_token)
|
||||
}
|
||||
|
||||
/// Get the global background services cancellation token
|
||||
pub fn get_background_services_cancel_token() -> Option<&'static CancellationToken> {
|
||||
GLOBAL_BACKGROUND_SERVICES_CANCEL_TOKEN.get()
|
||||
}
|
||||
|
||||
/// Create and initialize the global background services cancellation token
|
||||
pub fn create_background_services_cancel_token() -> CancellationToken {
|
||||
let cancel_token = CancellationToken::new();
|
||||
init_background_services_cancel_token(cancel_token.clone()).expect("Background services cancel token already initialized");
|
||||
cancel_token
|
||||
}
|
||||
|
||||
/// Shutdown all background services gracefully
|
||||
pub fn shutdown_background_services() {
|
||||
if let Some(cancel_token) = GLOBAL_BACKGROUND_SERVICES_CANCEL_TOKEN.get() {
|
||||
cancel_token.cancel();
|
||||
}
|
||||
}
|
||||
|
||||
@@ -24,6 +24,7 @@ use tokio::{
|
||||
},
|
||||
time::interval,
|
||||
};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::{error, info};
|
||||
use uuid::Uuid;
|
||||
|
||||
@@ -32,7 +33,7 @@ use super::{
|
||||
heal_ops::{HealSequence, new_bg_heal_sequence},
|
||||
};
|
||||
use crate::error::{Error, Result};
|
||||
use crate::global::GLOBAL_MRFState;
|
||||
use crate::global::{GLOBAL_MRFState, get_background_services_cancel_token};
|
||||
use crate::heal::error::ERR_RETRY_HEALING;
|
||||
use crate::heal::heal_commands::{HEAL_ITEM_BUCKET, HealScanMode};
|
||||
use crate::heal::heal_ops::{BG_HEALING_UUID, HealSource};
|
||||
@@ -54,6 +55,13 @@ use crate::{
|
||||
pub static DEFAULT_MONITOR_NEW_DISK_INTERVAL: Duration = Duration::from_secs(10);
|
||||
|
||||
pub async fn init_auto_heal() {
|
||||
info!("Initializing auto heal background task");
|
||||
|
||||
let Some(cancel_token) = get_background_services_cancel_token() else {
|
||||
error!("Background services cancel token not initialized");
|
||||
return;
|
||||
};
|
||||
|
||||
init_background_healing().await;
|
||||
let v = env::var("_RUSTFS_AUTO_DRIVE_HEALING").unwrap_or("on".to_string());
|
||||
if v == "on" {
|
||||
@@ -61,12 +69,16 @@ pub async fn init_auto_heal() {
|
||||
GLOBAL_BackgroundHealState
|
||||
.push_heal_local_disks(&get_local_disks_to_heal().await)
|
||||
.await;
|
||||
spawn(async {
|
||||
monitor_local_disks_and_heal().await;
|
||||
|
||||
let cancel_clone = cancel_token.clone();
|
||||
spawn(async move {
|
||||
monitor_local_disks_and_heal(cancel_clone).await;
|
||||
});
|
||||
}
|
||||
spawn(async {
|
||||
GLOBAL_MRFState.heal_routine().await;
|
||||
|
||||
let cancel_clone = cancel_token.clone();
|
||||
spawn(async move {
|
||||
GLOBAL_MRFState.heal_routine_with_cancel(cancel_clone).await;
|
||||
});
|
||||
}
|
||||
|
||||
@@ -108,50 +120,66 @@ pub async fn get_local_disks_to_heal() -> Vec<Endpoint> {
|
||||
disks_to_heal
|
||||
}
|
||||
|
||||
async fn monitor_local_disks_and_heal() {
|
||||
async fn monitor_local_disks_and_heal(cancel_token: CancellationToken) {
|
||||
info!("Auto heal monitor started");
|
||||
let mut interval = interval(DEFAULT_MONITOR_NEW_DISK_INTERVAL);
|
||||
|
||||
loop {
|
||||
interval.tick().await;
|
||||
let heal_disks = GLOBAL_BackgroundHealState.get_heal_local_disk_endpoints().await;
|
||||
if heal_disks.is_empty() {
|
||||
info!("heal local disks is empty");
|
||||
interval.reset();
|
||||
continue;
|
||||
}
|
||||
tokio::select! {
|
||||
_ = cancel_token.cancelled() => {
|
||||
info!("Auto heal monitor received shutdown signal, exiting gracefully");
|
||||
break;
|
||||
}
|
||||
_ = interval.tick() => {
|
||||
let heal_disks = GLOBAL_BackgroundHealState.get_heal_local_disk_endpoints().await;
|
||||
if heal_disks.is_empty() {
|
||||
info!("heal local disks is empty");
|
||||
interval.reset();
|
||||
continue;
|
||||
}
|
||||
|
||||
info!("heal local disks: {:?}", heal_disks);
|
||||
info!("heal local disks: {:?}", heal_disks);
|
||||
|
||||
let store = new_object_layer_fn().expect("errServerNotInitialized");
|
||||
if let (_result, Some(err)) = store.heal_format(false).await.expect("heal format failed") {
|
||||
error!("heal local disk format error: {}", err);
|
||||
if err == Error::NoHealRequired {
|
||||
} else {
|
||||
info!("heal format err: {}", err.to_string());
|
||||
let store = new_object_layer_fn().expect("errServerNotInitialized");
|
||||
if let (_result, Some(err)) = store.heal_format(false).await.expect("heal format failed") {
|
||||
error!("heal local disk format error: {}", err);
|
||||
if err == Error::NoHealRequired {
|
||||
} else {
|
||||
info!("heal format err: {}", err.to_string());
|
||||
interval.reset();
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
let mut futures = Vec::new();
|
||||
for disk in heal_disks.into_ref().iter() {
|
||||
let disk_clone = disk.clone();
|
||||
let cancel_clone = cancel_token.clone();
|
||||
futures.push(async move {
|
||||
let disk_for_cancel = disk_clone.clone();
|
||||
tokio::select! {
|
||||
_ = cancel_clone.cancelled() => {
|
||||
info!("Disk healing task cancelled for disk: {}", disk_for_cancel);
|
||||
}
|
||||
_ = async {
|
||||
GLOBAL_BackgroundHealState
|
||||
.set_disk_healing_status(disk_clone.clone(), true)
|
||||
.await;
|
||||
if heal_fresh_disk(&disk_clone).await.is_err() {
|
||||
info!("heal_fresh_disk is err");
|
||||
GLOBAL_BackgroundHealState
|
||||
.set_disk_healing_status(disk_clone.clone(), false)
|
||||
.await;
|
||||
}
|
||||
GLOBAL_BackgroundHealState.pop_heal_local_disks(&[disk_clone]).await;
|
||||
} => {}
|
||||
}
|
||||
});
|
||||
}
|
||||
let _ = join_all(futures).await;
|
||||
interval.reset();
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
let mut futures = Vec::new();
|
||||
for disk in heal_disks.into_ref().iter() {
|
||||
let disk_clone = disk.clone();
|
||||
futures.push(async move {
|
||||
GLOBAL_BackgroundHealState
|
||||
.set_disk_healing_status(disk_clone.clone(), true)
|
||||
.await;
|
||||
if heal_fresh_disk(&disk_clone).await.is_err() {
|
||||
info!("heal_fresh_disk is err");
|
||||
GLOBAL_BackgroundHealState
|
||||
.set_disk_healing_status(disk_clone.clone(), false)
|
||||
.await;
|
||||
return;
|
||||
}
|
||||
GLOBAL_BackgroundHealState.pop_heal_local_disks(&[disk_clone]).await;
|
||||
});
|
||||
}
|
||||
let _ = join_all(futures).await;
|
||||
interval.reset();
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -20,14 +20,13 @@ use std::{
|
||||
path::{Path, PathBuf},
|
||||
pin::Pin,
|
||||
sync::{
|
||||
Arc, OnceLock,
|
||||
Arc,
|
||||
atomic::{AtomicBool, AtomicU32, AtomicU64, Ordering},
|
||||
},
|
||||
time::{Duration, SystemTime},
|
||||
};
|
||||
|
||||
use time::{self, OffsetDateTime};
|
||||
use tokio_util::sync::CancellationToken;
|
||||
|
||||
use super::{
|
||||
data_scanner_metric::{ScannerMetric, ScannerMetrics, globalScannerMetrics},
|
||||
@@ -51,7 +50,7 @@ use crate::{
|
||||
metadata_sys,
|
||||
},
|
||||
event_notification::{EventArgs, send_event},
|
||||
global::GLOBAL_LocalNodeName,
|
||||
global::{GLOBAL_LocalNodeName, get_background_services_cancel_token},
|
||||
store_api::{ObjectOptions, ObjectToDelete, StorageAPI},
|
||||
};
|
||||
use crate::{
|
||||
@@ -128,8 +127,6 @@ lazy_static! {
|
||||
pub static ref globalHealConfig: Arc<RwLock<Config>> = Arc::new(RwLock::new(Config::default()));
|
||||
}
|
||||
|
||||
static GLOBAL_SCANNER_CANCEL_TOKEN: OnceLock<CancellationToken> = OnceLock::new();
|
||||
|
||||
struct DynamicSleeper {
|
||||
factor: f64,
|
||||
max_sleep: Duration,
|
||||
@@ -198,21 +195,18 @@ fn new_dynamic_sleeper(factor: f64, max_wait: Duration, is_scanner: bool) -> Dyn
|
||||
/// - Minimum sleep duration to avoid excessive CPU usage
|
||||
/// - Proper error handling and logging
|
||||
///
|
||||
/// # Returns
|
||||
/// A CancellationToken that can be used to gracefully shutdown the scanner
|
||||
///
|
||||
/// # Architecture
|
||||
/// 1. Initialize with random seed for sleep intervals
|
||||
/// 2. Run scanner cycles in a loop
|
||||
/// 3. Use randomized sleep between cycles to avoid thundering herd
|
||||
/// 4. Ensure minimum sleep duration to prevent CPU thrashing
|
||||
pub async fn init_data_scanner() -> CancellationToken {
|
||||
pub async fn init_data_scanner() {
|
||||
info!("Initializing data scanner background task");
|
||||
|
||||
let cancel_token = CancellationToken::new();
|
||||
GLOBAL_SCANNER_CANCEL_TOKEN
|
||||
.set(cancel_token.clone())
|
||||
.expect("Scanner already initialized");
|
||||
let Some(cancel_token) = get_background_services_cancel_token() else {
|
||||
error!("Background services cancel token not initialized");
|
||||
return;
|
||||
};
|
||||
|
||||
let cancel_clone = cancel_token.clone();
|
||||
tokio::spawn(async move {
|
||||
@@ -256,8 +250,6 @@ pub async fn init_data_scanner() -> CancellationToken {
|
||||
|
||||
info!("Data scanner background task stopped gracefully");
|
||||
});
|
||||
|
||||
cancel_token
|
||||
}
|
||||
|
||||
/// Run a single data scanner cycle
|
||||
@@ -282,7 +274,7 @@ async fn run_data_scanner_cycle() {
|
||||
};
|
||||
|
||||
// Check for cancellation before starting expensive operations
|
||||
if let Some(token) = GLOBAL_SCANNER_CANCEL_TOKEN.get() {
|
||||
if let Some(token) = get_background_services_cancel_token() {
|
||||
if token.is_cancelled() {
|
||||
debug!("Scanner cancelled before starting cycle");
|
||||
return;
|
||||
@@ -397,9 +389,8 @@ async fn execute_namespace_scan(
|
||||
cycle: u64,
|
||||
scan_mode: HealScanMode,
|
||||
) -> Result<()> {
|
||||
let cancel_token = GLOBAL_SCANNER_CANCEL_TOKEN
|
||||
.get()
|
||||
.ok_or_else(|| Error::other("Scanner not initialized"))?;
|
||||
let cancel_token =
|
||||
get_background_services_cancel_token().ok_or_else(|| Error::other("Background services not initialized"))?;
|
||||
|
||||
tokio::select! {
|
||||
result = store.ns_scanner(tx, cycle as usize, scan_mode) => {
|
||||
|
||||
@@ -25,7 +25,8 @@ use std::time::Duration;
|
||||
use tokio::sync::RwLock;
|
||||
use tokio::sync::mpsc::{Receiver, Sender};
|
||||
use tokio::time::sleep;
|
||||
use tracing::error;
|
||||
use tokio_util::sync::CancellationToken;
|
||||
use tracing::{error, info};
|
||||
use uuid::Uuid;
|
||||
|
||||
pub const MRF_OPS_QUEUE_SIZE: u64 = 100000;
|
||||
@@ -87,56 +88,96 @@ impl MRFState {
|
||||
let _ = self.tx.send(op).await;
|
||||
}
|
||||
|
||||
pub async fn heal_routine(&self) {
|
||||
/// Enhanced heal routine with cancellation support
|
||||
///
|
||||
/// This method implements the same healing logic as the original heal_routine,
|
||||
/// but adds proper cancellation support via CancellationToken.
|
||||
/// The core logic remains identical to maintain compatibility.
|
||||
pub async fn heal_routine_with_cancel(&self, cancel_token: CancellationToken) {
|
||||
info!("MRF heal routine started with cancellation support");
|
||||
|
||||
loop {
|
||||
// rx used only there,
|
||||
if let Some(op) = self.rx.write().await.recv().await {
|
||||
if op.bucket == RUSTFS_META_BUCKET {
|
||||
for pattern in &*PATTERNS {
|
||||
if pattern.is_match(&op.object) {
|
||||
return;
|
||||
tokio::select! {
|
||||
_ = cancel_token.cancelled() => {
|
||||
info!("MRF heal routine received shutdown signal, exiting gracefully");
|
||||
break;
|
||||
}
|
||||
op_result = async {
|
||||
let mut rx_guard = self.rx.write().await;
|
||||
rx_guard.recv().await
|
||||
} => {
|
||||
if let Some(op) = op_result {
|
||||
// Special path filtering (original logic)
|
||||
if op.bucket == RUSTFS_META_BUCKET {
|
||||
for pattern in &*PATTERNS {
|
||||
if pattern.is_match(&op.object) {
|
||||
continue; // Skip this operation, continue with next
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let now = Utc::now();
|
||||
if now.sub(op.queued).num_seconds() < 1 {
|
||||
sleep(Duration::from_secs(1)).await;
|
||||
}
|
||||
// Network reconnection delay (original logic)
|
||||
let now = Utc::now();
|
||||
if now.sub(op.queued).num_seconds() < 1 {
|
||||
tokio::select! {
|
||||
_ = cancel_token.cancelled() => {
|
||||
info!("MRF heal routine cancelled during reconnection delay");
|
||||
break;
|
||||
}
|
||||
_ = sleep(Duration::from_secs(1)) => {}
|
||||
}
|
||||
}
|
||||
|
||||
let scan_mode = if op.bitrot_scan { HEAL_DEEP_SCAN } else { HEAL_NORMAL_SCAN };
|
||||
if op.object.is_empty() {
|
||||
if let Err(err) = heal_bucket(&op.bucket).await {
|
||||
error!("heal bucket failed, bucket: {}, err: {:?}", op.bucket, err);
|
||||
}
|
||||
} else if op.versions.is_empty() {
|
||||
if let Err(err) =
|
||||
heal_object(&op.bucket, &op.object, &op.version_id.clone().unwrap_or_default(), scan_mode).await
|
||||
{
|
||||
error!("heal object failed, bucket: {}, object: {}, err: {:?}", op.bucket, op.object, err);
|
||||
}
|
||||
} else {
|
||||
let vers = op.versions.len() / 16;
|
||||
if vers > 0 {
|
||||
for i in 0..vers {
|
||||
let start = i * 16;
|
||||
let end = start + 16;
|
||||
// Core healing logic (original logic preserved)
|
||||
let scan_mode = if op.bitrot_scan { HEAL_DEEP_SCAN } else { HEAL_NORMAL_SCAN };
|
||||
|
||||
if op.object.is_empty() {
|
||||
// Heal bucket (original logic)
|
||||
if let Err(err) = heal_bucket(&op.bucket).await {
|
||||
error!("heal bucket failed, bucket: {}, err: {:?}", op.bucket, err);
|
||||
}
|
||||
} else if op.versions.is_empty() {
|
||||
// Heal single object (original logic)
|
||||
if let Err(err) = heal_object(
|
||||
&op.bucket,
|
||||
&op.object,
|
||||
&Uuid::from_slice(&op.versions[start..end]).expect("").to_string(),
|
||||
scan_mode,
|
||||
)
|
||||
.await
|
||||
{
|
||||
&op.version_id.clone().unwrap_or_default(),
|
||||
scan_mode
|
||||
).await {
|
||||
error!("heal object failed, bucket: {}, object: {}, err: {:?}", op.bucket, op.object, err);
|
||||
}
|
||||
} else {
|
||||
// Heal multiple versions (original logic)
|
||||
let vers = op.versions.len() / 16;
|
||||
if vers > 0 {
|
||||
for i in 0..vers {
|
||||
// Check for cancellation before each version
|
||||
if cancel_token.is_cancelled() {
|
||||
info!("MRF heal routine cancelled during version processing");
|
||||
return;
|
||||
}
|
||||
|
||||
let start = i * 16;
|
||||
let end = start + 16;
|
||||
if let Err(err) = heal_object(
|
||||
&op.bucket,
|
||||
&op.object,
|
||||
&Uuid::from_slice(&op.versions[start..end]).expect("").to_string(),
|
||||
scan_mode,
|
||||
).await {
|
||||
error!("heal object failed, bucket: {}, object: {}, err: {:?}", op.bucket, op.object, err);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
info!("MRF heal routine channel closed, exiting");
|
||||
break;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
return;
|
||||
}
|
||||
}
|
||||
|
||||
info!("MRF heal routine stopped gracefully");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -2508,14 +2508,14 @@ fn check_object_name_for_length_and_slash(bucket: &str, object: &str) -> Result<
|
||||
|
||||
#[cfg(target_os = "windows")]
|
||||
{
|
||||
if object.contains('\\')
|
||||
|| object.contains(':')
|
||||
if object.contains(':')
|
||||
|| object.contains('*')
|
||||
|| object.contains('?')
|
||||
|| object.contains('"')
|
||||
|| object.contains('|')
|
||||
|| object.contains('<')
|
||||
|| object.contains('>')
|
||||
// || object.contains('\\')
|
||||
{
|
||||
return Err(StorageError::ObjectNameInvalid(bucket.to_owned(), object.to_owned()));
|
||||
}
|
||||
@@ -2549,9 +2549,9 @@ fn check_bucket_and_object_names(bucket: &str, object: &str) -> Result<()> {
|
||||
return Err(StorageError::ObjectNameInvalid(bucket.to_string(), object.to_string()));
|
||||
}
|
||||
|
||||
if cfg!(target_os = "windows") && object.contains('\\') {
|
||||
return Err(StorageError::ObjectNameInvalid(bucket.to_string(), object.to_string()));
|
||||
}
|
||||
// if cfg!(target_os = "windows") && object.contains('\\') {
|
||||
// return Err(StorageError::ObjectNameInvalid(bucket.to_string(), object.to_string()));
|
||||
// }
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
@@ -45,7 +45,6 @@ base64-simd = { workspace = true }
|
||||
jsonwebtoken = { workspace = true }
|
||||
tracing.workspace = true
|
||||
rustfs-madmin.workspace = true
|
||||
lazy_static.workspace = true
|
||||
rustfs-utils = { workspace = true, features = ["path"] }
|
||||
|
||||
[dev-dependencies]
|
||||
|
||||
@@ -20,7 +20,6 @@ use crate::{
|
||||
manager::{extract_jwt_claims, get_default_policyes},
|
||||
};
|
||||
use futures::future::join_all;
|
||||
use lazy_static::lazy_static;
|
||||
use rustfs_ecstore::{
|
||||
config::{
|
||||
RUSTFS_CONFIG_PREFIX,
|
||||
@@ -34,25 +33,28 @@ use rustfs_ecstore::{
|
||||
use rustfs_policy::{auth::UserIdentity, policy::PolicyDoc};
|
||||
use rustfs_utils::path::{SLASH_SEPARATOR, path_join_buf};
|
||||
use serde::{Serialize, de::DeserializeOwned};
|
||||
use std::sync::LazyLock;
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
use tokio::sync::broadcast::{self, Receiver as B_Receiver};
|
||||
use tokio::sync::mpsc::{self, Sender};
|
||||
use tracing::{info, warn};
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
lazy_static! {
|
||||
pub static ref IAM_CONFIG_PREFIX: String = format!("{}/iam", RUSTFS_CONFIG_PREFIX);
|
||||
pub static ref IAM_CONFIG_USERS_PREFIX: String = format!("{}/iam/users/", RUSTFS_CONFIG_PREFIX);
|
||||
pub static ref IAM_CONFIG_SERVICE_ACCOUNTS_PREFIX: String = format!("{}/iam/service-accounts/", RUSTFS_CONFIG_PREFIX);
|
||||
pub static ref IAM_CONFIG_GROUPS_PREFIX: String = format!("{}/iam/groups/", RUSTFS_CONFIG_PREFIX);
|
||||
pub static ref IAM_CONFIG_POLICIES_PREFIX: String = format!("{}/iam/policies/", RUSTFS_CONFIG_PREFIX);
|
||||
pub static ref IAM_CONFIG_STS_PREFIX: String = format!("{}/iam/sts/", RUSTFS_CONFIG_PREFIX);
|
||||
pub static ref IAM_CONFIG_POLICY_DB_PREFIX: String = format!("{}/iam/policydb/", RUSTFS_CONFIG_PREFIX);
|
||||
pub static ref IAM_CONFIG_POLICY_DB_USERS_PREFIX: String = format!("{}/iam/policydb/users/", RUSTFS_CONFIG_PREFIX);
|
||||
pub static ref IAM_CONFIG_POLICY_DB_STS_USERS_PREFIX: String = format!("{}/iam/policydb/sts-users/", RUSTFS_CONFIG_PREFIX);
|
||||
pub static ref IAM_CONFIG_POLICY_DB_SERVICE_ACCOUNTS_PREFIX: String =
|
||||
format!("{}/iam/policydb/service-accounts/", RUSTFS_CONFIG_PREFIX);
|
||||
pub static ref IAM_CONFIG_POLICY_DB_GROUPS_PREFIX: String = format!("{}/iam/policydb/groups/", RUSTFS_CONFIG_PREFIX);
|
||||
}
|
||||
pub static IAM_CONFIG_PREFIX: LazyLock<String> = LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam"));
|
||||
pub static IAM_CONFIG_USERS_PREFIX: LazyLock<String> = LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/users/"));
|
||||
pub static IAM_CONFIG_SERVICE_ACCOUNTS_PREFIX: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/service-accounts/"));
|
||||
pub static IAM_CONFIG_GROUPS_PREFIX: LazyLock<String> = LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/groups/"));
|
||||
pub static IAM_CONFIG_POLICIES_PREFIX: LazyLock<String> = LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/policies/"));
|
||||
pub static IAM_CONFIG_STS_PREFIX: LazyLock<String> = LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/sts/"));
|
||||
pub static IAM_CONFIG_POLICY_DB_PREFIX: LazyLock<String> = LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/policydb/"));
|
||||
pub static IAM_CONFIG_POLICY_DB_USERS_PREFIX: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/policydb/users/"));
|
||||
pub static IAM_CONFIG_POLICY_DB_STS_USERS_PREFIX: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/policydb/sts-users/"));
|
||||
pub static IAM_CONFIG_POLICY_DB_SERVICE_ACCOUNTS_PREFIX: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/policydb/service-accounts/"));
|
||||
pub static IAM_CONFIG_POLICY_DB_GROUPS_PREFIX: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/policydb/groups/"));
|
||||
|
||||
const IAM_IDENTITY_FILE: &str = "identity.json";
|
||||
const IAM_POLICY_FILE: &str = "policy.json";
|
||||
@@ -370,7 +372,15 @@ impl Store for ObjectStore {
|
||||
async fn load_iam_config<Item: DeserializeOwned>(&self, path: impl AsRef<str> + Send) -> Result<Item> {
|
||||
let mut data = read_config(self.object_api.clone(), path.as_ref()).await?;
|
||||
|
||||
data = Self::decrypt_data(&data)?;
|
||||
data = match Self::decrypt_data(&data) {
|
||||
Ok(v) => v,
|
||||
Err(err) => {
|
||||
debug!("decrypt_data failed: {}", err);
|
||||
// delete the config file when decrypt failed
|
||||
let _ = self.delete_iam_config(path.as_ref()).await;
|
||||
return Err(Error::ConfigNotFound);
|
||||
}
|
||||
};
|
||||
|
||||
Ok(serde_json::from_slice(&data)?)
|
||||
}
|
||||
|
||||
@@ -30,7 +30,6 @@ workspace = true
|
||||
|
||||
[dependencies]
|
||||
async-trait.workspace = true
|
||||
lazy_static.workspace = true
|
||||
rustfs-protos.workspace = true
|
||||
rand.workspace = true
|
||||
serde.workspace = true
|
||||
|
||||
@@ -14,12 +14,12 @@
|
||||
// limitations under the License.
|
||||
|
||||
use async_trait::async_trait;
|
||||
use lazy_static::lazy_static;
|
||||
use local_locker::LocalLocker;
|
||||
use lock_args::LockArgs;
|
||||
use remote_client::RemoteClient;
|
||||
use std::io::Result;
|
||||
use std::sync::Arc;
|
||||
use std::sync::LazyLock;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
pub mod drwmutex;
|
||||
@@ -29,9 +29,7 @@ pub mod lrwmutex;
|
||||
pub mod namespace_lock;
|
||||
pub mod remote_client;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref GLOBAL_LOCAL_SERVER: Arc<RwLock<LocalLocker>> = Arc::new(RwLock::new(LocalLocker::new()));
|
||||
}
|
||||
pub static GLOBAL_LOCAL_SERVER: LazyLock<Arc<RwLock<LocalLocker>>> = LazyLock::new(|| Arc::new(RwLock::new(LocalLocker::new())));
|
||||
|
||||
type LockClient = dyn Locker;
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
#![allow(unused_imports)]
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@@ -12,6 +11,8 @@
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![allow(unused_imports)]
|
||||
#![allow(clippy::all)]
|
||||
pub mod proto_gen;
|
||||
|
||||
|
||||
@@ -45,5 +45,4 @@ serde_json.workspace = true
|
||||
md-5 = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
#criterion = { version = "0.5.1", features = ["async", "async_tokio", "tokio"] }
|
||||
tokio-test = "0.4"
|
||||
tokio-test = { workspace = true }
|
||||
|
||||
@@ -32,7 +32,6 @@ async-trait.workspace = true
|
||||
datafusion = { workspace = true }
|
||||
derive_builder = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
lazy_static = { workspace = true }
|
||||
parking_lot = { workspace = true }
|
||||
s3s.workspace = true
|
||||
snafu = { workspace = true, features = ["backtrace"] }
|
||||
|
||||
@@ -33,7 +33,6 @@ use datafusion::{
|
||||
execution::{RecordBatchStream, SendableRecordBatchStream},
|
||||
};
|
||||
use futures::{Stream, StreamExt};
|
||||
use lazy_static::lazy_static;
|
||||
use rustfs_s3select_api::{
|
||||
QueryError, QueryResult,
|
||||
query::{
|
||||
@@ -48,6 +47,7 @@ use rustfs_s3select_api::{
|
||||
},
|
||||
};
|
||||
use s3s::dto::{FileHeaderInfo, SelectObjectContentInput};
|
||||
use std::sync::LazyLock;
|
||||
|
||||
use crate::{
|
||||
execution::factory::QueryExecutionFactoryRef,
|
||||
@@ -55,11 +55,9 @@ use crate::{
|
||||
sql::logical::planner::DefaultLogicalPlanner,
|
||||
};
|
||||
|
||||
lazy_static! {
|
||||
static ref IGNORE: FileHeaderInfo = FileHeaderInfo::from_static(FileHeaderInfo::IGNORE);
|
||||
static ref NONE: FileHeaderInfo = FileHeaderInfo::from_static(FileHeaderInfo::NONE);
|
||||
static ref USE: FileHeaderInfo = FileHeaderInfo::from_static(FileHeaderInfo::USE);
|
||||
}
|
||||
static IGNORE: LazyLock<FileHeaderInfo> = LazyLock::new(|| FileHeaderInfo::from_static(FileHeaderInfo::IGNORE));
|
||||
static NONE: LazyLock<FileHeaderInfo> = LazyLock::new(|| FileHeaderInfo::from_static(FileHeaderInfo::NONE));
|
||||
static USE: LazyLock<FileHeaderInfo> = LazyLock::new(|| FileHeaderInfo::from_static(FileHeaderInfo::USE));
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct SimpleQueryDispatcher {
|
||||
|
||||
@@ -27,7 +27,6 @@ documentation = "https://docs.rs/rustfs-signer/latest/rustfs_signer/"
|
||||
|
||||
[dependencies]
|
||||
tracing.workspace = true
|
||||
lazy_static.workspace = true
|
||||
bytes = { workspace = true }
|
||||
http.workspace = true
|
||||
time.workspace = true
|
||||
|
||||
@@ -13,8 +13,6 @@
|
||||
// limitations under the License.
|
||||
|
||||
use http::{HeaderMap, HeaderValue, request};
|
||||
use lazy_static::lazy_static;
|
||||
use std::collections::HashMap;
|
||||
use time::{OffsetDateTime, macros::format_description};
|
||||
|
||||
use super::request_signature_v4::{SERVICE_TYPE_S3, get_scope, get_signature, get_signing_key};
|
||||
@@ -32,15 +30,13 @@ const _CRLF_LEN: i64 = 2;
|
||||
const _TRAILER_KV_SEPARATOR: &str = ":";
|
||||
const _TRAILER_SIGNATURE: &str = "x-amz-trailer-signature";
|
||||
|
||||
lazy_static! {
|
||||
static ref ignored_streaming_headers: HashMap<String, bool> = {
|
||||
let mut m = <HashMap<String, bool>>::new();
|
||||
m.insert("authorization".to_string(), true);
|
||||
m.insert("user-agent".to_string(), true);
|
||||
m.insert("content-type".to_string(), true);
|
||||
m
|
||||
};
|
||||
}
|
||||
// static ignored_streaming_headers: LazyLock<HashMap<String, bool>> = LazyLock::new(|| {
|
||||
// let mut m = <HashMap<String, bool>>::new();
|
||||
// m.insert("authorization".to_string(), true);
|
||||
// m.insert("user-agent".to_string(), true);
|
||||
// m.insert("content-type".to_string(), true);
|
||||
// m
|
||||
// });
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn build_chunk_string_to_sign(t: OffsetDateTime, region: &str, previous_sig: &str, chunk_check_sum: &str) -> String {
|
||||
|
||||
@@ -16,9 +16,9 @@ use bytes::BytesMut;
|
||||
use http::HeaderMap;
|
||||
use http::Uri;
|
||||
use http::request;
|
||||
use lazy_static::lazy_static;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::Write;
|
||||
use std::sync::LazyLock;
|
||||
use time::{OffsetDateTime, macros::format_description};
|
||||
use tracing::debug;
|
||||
|
||||
@@ -32,15 +32,14 @@ pub const SIGN_V4_ALGORITHM: &str = "AWS4-HMAC-SHA256";
|
||||
pub const SERVICE_TYPE_S3: &str = "s3";
|
||||
pub const SERVICE_TYPE_STS: &str = "sts";
|
||||
|
||||
lazy_static! {
|
||||
static ref v4_ignored_headers: HashMap<String, bool> = {
|
||||
let mut m = <HashMap<String, bool>>::new();
|
||||
m.insert("accept-encoding".to_string(), true);
|
||||
m.insert("authorization".to_string(), true);
|
||||
m.insert("user-agent".to_string(), true);
|
||||
m
|
||||
};
|
||||
}
|
||||
#[allow(non_upper_case_globals)] // FIXME
|
||||
static v4_ignored_headers: LazyLock<HashMap<String, bool>> = LazyLock::new(|| {
|
||||
let mut m = <HashMap<String, bool>>::new();
|
||||
m.insert("accept-encoding".to_string(), true);
|
||||
m.insert("authorization".to_string(), true);
|
||||
m.insert("user-agent".to_string(), true);
|
||||
m
|
||||
});
|
||||
|
||||
pub fn get_signing_key(secret: &str, loc: &str, t: OffsetDateTime, service_type: &str) -> [u8; 32] {
|
||||
let mut s = "AWS4".to_string();
|
||||
|
||||
@@ -30,7 +30,6 @@ blake3 = { workspace = true, optional = true }
|
||||
crc32fast.workspace = true
|
||||
hex-simd = { workspace = true, optional = true }
|
||||
highway = { workspace = true, optional = true }
|
||||
lazy_static = { workspace = true, optional = true }
|
||||
local-ip-address = { workspace = true, optional = true }
|
||||
md-5 = { workspace = true, optional = true }
|
||||
netif = { workspace = true, optional = true }
|
||||
@@ -77,12 +76,12 @@ workspace = true
|
||||
default = ["ip"] # features that are enabled by default
|
||||
ip = ["dep:local-ip-address"] # ip characteristics and their dependencies
|
||||
tls = ["dep:rustls", "dep:rustls-pemfile", "dep:rustls-pki-types"] # tls characteristics and their dependencies
|
||||
net = ["ip", "dep:url", "dep:netif", "dep:lazy_static", "dep:futures", "dep:transform-stream", "dep:bytes", "dep:s3s", "dep:hyper", "dep:hyper-util"] # empty network features
|
||||
net = ["ip", "dep:url", "dep:netif", "dep:futures", "dep:transform-stream", "dep:bytes", "dep:s3s", "dep:hyper", "dep:hyper-util"] # empty network features
|
||||
io = ["dep:tokio"]
|
||||
path = []
|
||||
notify = ["dep:hyper", "dep:s3s"] # file system notification features
|
||||
compress = ["dep:flate2", "dep:brotli", "dep:snap", "dep:lz4", "dep:zstd"]
|
||||
string = ["dep:regex", "dep:lazy_static", "dep:rand"]
|
||||
string = ["dep:regex", "dep:rand"]
|
||||
crypto = ["dep:base64-simd", "dep:hex-simd", "dep:hmac", "dep:hyper", "dep:sha1"]
|
||||
hash = ["dep:highway", "dep:md-5", "dep:sha2", "dep:blake3", "dep:serde", "dep:siphasher", "dep:hex-simd", "dep:base64-simd"]
|
||||
os = ["dep:nix", "dep:tempfile", "winapi"] # operating system utilities
|
||||
|
||||
@@ -17,8 +17,8 @@ use futures::pin_mut;
|
||||
use futures::{Stream, StreamExt};
|
||||
use hyper::client::conn::http2::Builder;
|
||||
use hyper_util::rt::TokioExecutor;
|
||||
use lazy_static::lazy_static;
|
||||
use std::net::Ipv6Addr;
|
||||
use std::sync::LazyLock;
|
||||
use std::{
|
||||
collections::HashSet,
|
||||
fmt::Display,
|
||||
@@ -27,9 +27,7 @@ use std::{
|
||||
use transform_stream::AsyncTryStream;
|
||||
use url::{Host, Url};
|
||||
|
||||
lazy_static! {
|
||||
static ref LOCAL_IPS: Vec<IpAddr> = must_get_local_ips().unwrap();
|
||||
}
|
||||
static LOCAL_IPS: LazyLock<Vec<IpAddr>> = LazyLock::new(|| must_get_local_ips().unwrap());
|
||||
|
||||
/// helper for validating if the provided arg is an ip address.
|
||||
pub fn is_socket_addr(addr: &str) -> bool {
|
||||
@@ -178,7 +176,7 @@ impl Display for XHost {
|
||||
impl TryFrom<String> for XHost {
|
||||
type Error = std::io::Error;
|
||||
|
||||
fn try_from(value: String) -> std::result::Result<Self, Self::Error> {
|
||||
fn try_from(value: String) -> Result<Self, Self::Error> {
|
||||
if let Some(addr) = value.to_socket_addrs()?.next() {
|
||||
Ok(Self {
|
||||
name: addr.ip().to_string(),
|
||||
@@ -214,9 +212,9 @@ pub fn parse_and_resolve_address(addr_str: &str) -> std::io::Result<SocketAddr>
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn bytes_stream<S, E>(stream: S, content_length: usize) -> impl Stream<Item = std::result::Result<Bytes, E>> + Send + 'static
|
||||
pub fn bytes_stream<S, E>(stream: S, content_length: usize) -> impl Stream<Item = Result<Bytes, E>> + Send + 'static
|
||||
where
|
||||
S: Stream<Item = std::result::Result<Bytes, E>> + Send + 'static,
|
||||
S: Stream<Item = Result<Bytes, E>> + Send + 'static,
|
||||
E: Send + 'static,
|
||||
{
|
||||
AsyncTryStream::<Bytes, E, _>::new(|mut y| async move {
|
||||
|
||||
@@ -12,10 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use lazy_static::*;
|
||||
use rand::{Rng, RngCore};
|
||||
use regex::Regex;
|
||||
use std::io::{Error, Result};
|
||||
use std::sync::LazyLock;
|
||||
|
||||
pub fn parse_bool(str: &str) -> Result<bool> {
|
||||
match str {
|
||||
@@ -116,9 +116,7 @@ pub fn match_as_pattern_prefix(pattern: &str, text: &str) -> bool {
|
||||
text.len() <= pattern.len()
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref ELLIPSES_RE: Regex = Regex::new(r"(.*)(\{[0-9a-z]*\.\.\.[0-9a-z]*\})(.*)").unwrap();
|
||||
}
|
||||
static ELLIPSES_RE: LazyLock<Regex> = LazyLock::new(|| Regex::new(r"(.*)(\{[0-9a-z]*\.\.\.[0-9a-z]*\})(.*)").unwrap());
|
||||
|
||||
/// Ellipses constants
|
||||
const OPEN_BRACES: &str = "{";
|
||||
|
||||
@@ -25,17 +25,16 @@ managing and monitoring the system.
|
||||
|--certs
|
||||
| ├── rustfs_cert.pem // Default|fallback certificate
|
||||
| ├── rustfs_key.pem // Default|fallback private key
|
||||
| ├── example.com/ // certificate directory of specific domain names
|
||||
| ├── rustfs.com/ // certificate directory of specific domain names
|
||||
| │ ├── rustfs_cert.pem
|
||||
| │ └── rustfs_key.pem
|
||||
| ├── api.example.com/
|
||||
| ├── api.rustfs.com/
|
||||
| │ ├── rustfs_cert.pem
|
||||
| │ └── rustfs_key.pem
|
||||
| └── cdn.example.com/
|
||||
| └── cdn.rustfs.com/
|
||||
| ├── rustfs_cert.pem
|
||||
| └── rustfs_key.pem
|
||||
|--config
|
||||
| |--rustfs.env // env config
|
||||
| |--rustfs-zh.env // env config in Chinese
|
||||
| |--event.example.toml // event config
|
||||
```
|
||||
@@ -36,15 +36,11 @@ Environment=RUSTFS_SECRET_KEY=rustfsadmin
|
||||
ExecStart=/usr/local/bin/rustfs \
|
||||
--address 0.0.0.0:9000 \
|
||||
--volumes /data/rustfs/vol1,/data/rustfs/vol2 \
|
||||
--obs-config /etc/rustfs/obs.yaml \
|
||||
--console-enable \
|
||||
--console-address 0.0.0.0:9001
|
||||
--console-enable
|
||||
# 定义启动命令,运行 /usr/local/bin/rustfs,带参数:
|
||||
# --address 0.0.0.0:9000:服务监听所有接口的 9000 端口。
|
||||
# --volumes:指定存储卷路径为 /data/rustfs/vol1 和 /data/rustfs/vol2。
|
||||
# --obs-config:指定配置文件路径为 /etc/rustfs/obs.yaml。
|
||||
# --console-enable:启用控制台功能。
|
||||
# --console-address 0.0.0.0:9001:控制台监听所有接口的 9001 端口。
|
||||
|
||||
# 定义环境变量配置,用于传递给服务程序,推荐使用且简洁
|
||||
# rustfs 示例文件 详见: `../config/rustfs-zh.env`
|
||||
|
||||
@@ -83,7 +83,6 @@ sudo journalctl -u rustfs --since today
|
||||
```bash
|
||||
# 检查服务端口
|
||||
ss -tunlp | grep 9000
|
||||
ss -tunlp | grep 9001
|
||||
|
||||
# 测试服务可用性
|
||||
curl -I http://localhost:9000
|
||||
|
||||
@@ -83,7 +83,6 @@ sudo journalctl -u rustfs --since today
|
||||
```bash
|
||||
# Check service ports
|
||||
ss -tunlp | grep 9000
|
||||
ss -tunlp | grep 9001
|
||||
|
||||
# Test service availability
|
||||
curl -I http://localhost:9000
|
||||
|
||||
@@ -22,9 +22,7 @@ Environment=RUSTFS_SECRET_KEY=rustfsadmin
|
||||
ExecStart=/usr/local/bin/rustfs \
|
||||
--address 0.0.0.0:9000 \
|
||||
--volumes /data/rustfs/vol1,/data/rustfs/vol2 \
|
||||
--obs-config /etc/rustfs/obs.yaml \
|
||||
--console-enable \
|
||||
--console-address 0.0.0.0:9001
|
||||
--console-enable
|
||||
|
||||
# environment variable configuration (Option 2: Use environment variables)
|
||||
# rustfs example file see: `../config/rustfs.env`
|
||||
|
||||
@@ -36,13 +36,13 @@ cd deploy/certs/
|
||||
ls -la
|
||||
├── rustfs_cert.pem // Default|fallback certificate
|
||||
├── rustfs_key.pem // Default|fallback private key
|
||||
├── example.com/ // certificate directory of specific domain names
|
||||
├── rustfs.com/ // certificate directory of specific domain names
|
||||
│ ├── rustfs_cert.pem
|
||||
│ └── rustfs_key.pem
|
||||
├── api.example.com/
|
||||
├── api.rustfs.com/
|
||||
│ ├── rustfs_cert.pem
|
||||
│ └── rustfs_key.pem
|
||||
└── cdn.example.com/
|
||||
└── cdn.rustfs.com/
|
||||
├── rustfs_cert.pem
|
||||
└── rustfs_key.pem
|
||||
```
|
||||
@@ -7,22 +7,16 @@ RUSTFS_ROOT_PASSWORD=rustfsadmin
|
||||
# RustFS 数据卷存储路径,支持多卷配置,vol1 到 vol4
|
||||
RUSTFS_VOLUMES="./deploy/deploy/vol{1...4}"
|
||||
# RustFS 服务启动参数,指定监听地址和端口
|
||||
RUSTFS_OPTS="--address 0.0.0.0:9000"
|
||||
RUSTFS_OPTS="--address :9000"
|
||||
# RustFS 服务监听地址和端口
|
||||
RUSTFS_ADDRESS="0.0.0.0:9000"
|
||||
RUSTFS_ADDRESS=":9000"
|
||||
# 是否启用 RustFS 控制台功能
|
||||
RUSTFS_CONSOLE_ENABLE=true
|
||||
# RustFS 控制台监听地址和端口
|
||||
RUSTFS_CONSOLE_ADDRESS="0.0.0.0:9001"
|
||||
# RustFS 服务端点地址,用于客户端访问
|
||||
RUSTFS_SERVER_ENDPOINT="http://127.0.0.1:9000"
|
||||
# RustFS 服务域名配置
|
||||
RUSTFS_SERVER_DOMAINS=127.0.0.1:9001
|
||||
RUSTFS_SERVER_DOMAINS=127.0.0.1:9000
|
||||
# RustFS 许可证内容
|
||||
RUSTFS_LICENSE="license content"
|
||||
# 可观测性配置Endpoint:http://localhost:4317
|
||||
RUSTFS_OBS_ENDPOINT=http://localhost:4317
|
||||
# TLS 证书目录路径:deploy/certs
|
||||
RUSTFS_TLS_PATH=/etc/default/tls
|
||||
# 事件通知配置文件路径:deploy/config/event.example.toml
|
||||
RUSTFS_EVENT_CONFIG=/etc/default/event.toml
|
||||
RUSTFS_TLS_PATH=/etc/default/tls
|
||||
@@ -7,22 +7,16 @@ RUSTFS_ROOT_PASSWORD=rustfsadmin
|
||||
# RustFS data volume storage paths, supports multiple volumes from vol1 to vol4
|
||||
RUSTFS_VOLUMES="./deploy/deploy/vol{1...4}"
|
||||
# RustFS service startup parameters, specifying listen address and port
|
||||
RUSTFS_OPTS="--address 0.0.0.0:9000"
|
||||
RUSTFS_OPTS="--address :9000"
|
||||
# RustFS service listen address and port
|
||||
RUSTFS_ADDRESS="0.0.0.0:9000"
|
||||
RUSTFS_ADDRESS=":9000"
|
||||
# Enable RustFS console functionality
|
||||
RUSTFS_CONSOLE_ENABLE=true
|
||||
# RustFS console listen address and port
|
||||
RUSTFS_CONSOLE_ADDRESS="0.0.0.0:9001"
|
||||
# RustFS service endpoint for client access
|
||||
RUSTFS_SERVER_ENDPOINT="http://127.0.0.1:9000"
|
||||
# RustFS service domain configuration
|
||||
RUSTFS_SERVER_DOMAINS=127.0.0.1:9001
|
||||
RUSTFS_SERVER_DOMAINS=127.0.0.1:9000
|
||||
# RustFS license content
|
||||
RUSTFS_LICENSE="license content"
|
||||
# Observability configuration endpoint: RUSTFS_OBS_ENDPOINT
|
||||
RUSTFS_OBS_ENDPOINT=http://localhost:4317
|
||||
# TLS certificates directory path: deploy/certs
|
||||
RUSTFS_TLS_PATH=/etc/default/tls
|
||||
# event notification configuration file path: deploy/config/event.example.toml
|
||||
RUSTFS_EVENT_CONFIG=/etc/default/event.toml
|
||||
RUSTFS_TLS_PATH=/etc/default/tls
|
||||
@@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
version: '3.8'
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
# RustFS main service
|
||||
@@ -23,17 +23,15 @@ services:
|
||||
container_name: rustfs-server
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.multi-stage
|
||||
dockerfile: .docker/ubuntu/Dockerfile.source
|
||||
args:
|
||||
TARGETPLATFORM: linux/amd64
|
||||
ports:
|
||||
- "9000:9000" # S3 API port
|
||||
- "9001:9001" # Console port
|
||||
- "9000:9000" # S3 API port
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=/data/rustfs0,/data/rustfs1,/data/rustfs2,/data/rustfs3
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9001
|
||||
- RUSTFS_ACCESS_KEY=rustfsadmin
|
||||
- RUSTFS_SECRET_KEY=rustfsadmin
|
||||
- RUSTFS_LOG_LEVEL=info
|
||||
@@ -48,7 +46,15 @@ services:
|
||||
- rustfs-network
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: [ "CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:9000/health" ]
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--no-verbose",
|
||||
"--tries=1",
|
||||
"--spider",
|
||||
"http://localhost:9000/health",
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
@@ -62,20 +68,19 @@ services:
|
||||
container_name: rustfs-dev
|
||||
build:
|
||||
context: .
|
||||
dockerfile: .docker/Dockerfile.devenv
|
||||
dockerfile: .docker/ubuntu/Dockerfile.dev
|
||||
# Pure development environment
|
||||
ports:
|
||||
- "9010:9000"
|
||||
- "9011:9001"
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=/data/rustfs0,/data/rustfs1
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9001
|
||||
- RUSTFS_ACCESS_KEY=devadmin
|
||||
- RUSTFS_SECRET_KEY=devadmin
|
||||
- RUSTFS_LOG_LEVEL=debug
|
||||
volumes:
|
||||
- .:/root/s3-rustfs
|
||||
- .:/app # Mount source code to /app for development
|
||||
- rustfs_dev_data:/data
|
||||
networks:
|
||||
- rustfs-network
|
||||
@@ -92,10 +97,10 @@ services:
|
||||
volumes:
|
||||
- ./.docker/observability/otel-collector.yml:/etc/otelcol-contrib/otel-collector.yml:ro
|
||||
ports:
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
- "8888:8888" # Prometheus metrics
|
||||
- "8889:8889" # Prometheus exporter metrics
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
- "8888:8888" # Prometheus metrics
|
||||
- "8889:8889" # Prometheus exporter metrics
|
||||
networks:
|
||||
- rustfs-network
|
||||
restart: unless-stopped
|
||||
@@ -107,8 +112,8 @@ services:
|
||||
image: jaegertracing/all-in-one:latest
|
||||
container_name: jaeger
|
||||
ports:
|
||||
- "16686:16686" # Jaeger UI
|
||||
- "14250:14250" # Jaeger gRPC
|
||||
- "16686:16686" # Jaeger UI
|
||||
- "14250:14250" # Jaeger gRPC
|
||||
environment:
|
||||
- COLLECTOR_OTLP_ENABLED=true
|
||||
networks:
|
||||
@@ -127,12 +132,12 @@ services:
|
||||
- ./.docker/observability/prometheus.yml:/etc/prometheus/prometheus.yml:ro
|
||||
- prometheus_data:/prometheus
|
||||
command:
|
||||
- '--config.file=/etc/prometheus/prometheus.yml'
|
||||
- '--storage.tsdb.path=/prometheus'
|
||||
- '--web.console.libraries=/etc/prometheus/console_libraries'
|
||||
- '--web.console.templates=/etc/prometheus/consoles'
|
||||
- '--storage.tsdb.retention.time=200h'
|
||||
- '--web.enable-lifecycle'
|
||||
- "--config.file=/etc/prometheus/prometheus.yml"
|
||||
- "--storage.tsdb.path=/prometheus"
|
||||
- "--web.console.libraries=/etc/prometheus/console_libraries"
|
||||
- "--web.console.templates=/etc/prometheus/consoles"
|
||||
- "--storage.tsdb.retention.time=200h"
|
||||
- "--web.enable-lifecycle"
|
||||
networks:
|
||||
- rustfs-network
|
||||
restart: unless-stopped
|
||||
|
||||
@@ -1,530 +0,0 @@
|
||||
# RustFS Docker Build and Deployment Guide
|
||||
|
||||
This document describes how to build and deploy RustFS using Docker, including the automated GitHub Actions workflow for building and pushing images to Docker Hub and GitHub Container Registry.
|
||||
|
||||
## 🚀 Quick Start
|
||||
|
||||
### Using Pre-built Images
|
||||
|
||||
```bash
|
||||
# Pull and run the latest RustFS image
|
||||
docker run -d \
|
||||
--name rustfs \
|
||||
-p 9000:9000 \
|
||||
-p 9001:9001 \
|
||||
-v rustfs_data:/data \
|
||||
-e RUSTFS_VOLUMES=/data/rustfs0,/data/rustfs1,/data/rustfs2,/data/rustfs3 \
|
||||
-e RUSTFS_ACCESS_KEY=rustfsadmin \
|
||||
-e RUSTFS_SECRET_KEY=rustfsadmin \
|
||||
-e RUSTFS_CONSOLE_ENABLE=true \
|
||||
rustfs/rustfs:latest
|
||||
```
|
||||
|
||||
### Using Docker Compose
|
||||
|
||||
```bash
|
||||
# Basic deployment
|
||||
docker-compose up -d
|
||||
|
||||
# Development environment
|
||||
docker-compose --profile dev up -d
|
||||
|
||||
# With observability stack
|
||||
docker-compose --profile observability up -d
|
||||
|
||||
# Full stack with all services
|
||||
docker-compose --profile dev --profile observability --profile testing up -d
|
||||
```
|
||||
|
||||
## 📦 Available Images
|
||||
|
||||
Our GitHub Actions workflow builds multiple image variants:
|
||||
|
||||
### Image Registries
|
||||
|
||||
- **Docker Hub**: `rustfs/rustfs`
|
||||
- **GitHub Container Registry**: `ghcr.io/rustfs/s3-rustfs`
|
||||
|
||||
### Image Variants
|
||||
|
||||
| Variant | Tag Suffix | Description | Use Case |
|
||||
|---------|------------|-------------|----------|
|
||||
| Production | *(none)* | Minimal Ubuntu-based runtime | Production deployment |
|
||||
| Ubuntu | `-ubuntu22.04` | Ubuntu 22.04 based build environment | Development/Testing |
|
||||
| Rocky Linux | `-rockylinux9.3` | Rocky Linux 9.3 based build environment | Enterprise environments |
|
||||
| Development | `-devenv` | Full development environment | Development/Debugging |
|
||||
|
||||
### Supported Architectures
|
||||
|
||||
All images support multi-architecture:
|
||||
- `linux/amd64` (x86_64-unknown-linux-musl)
|
||||
- `linux/arm64` (aarch64-unknown-linux-gnu)
|
||||
|
||||
### Tag Examples
|
||||
|
||||
```bash
|
||||
# Latest production image
|
||||
rustfs/rustfs:latest
|
||||
rustfs/rustfs:main
|
||||
|
||||
# Specific version
|
||||
rustfs/rustfs:v1.0.0
|
||||
rustfs/rustfs:v1.0.0-ubuntu22.04
|
||||
|
||||
# Development environment
|
||||
rustfs/rustfs:latest-devenv
|
||||
rustfs/rustfs:main-devenv
|
||||
```
|
||||
|
||||
## 🔧 GitHub Actions Workflow
|
||||
|
||||
The Docker build workflow (`.github/workflows/docker.yml`) automatically:
|
||||
|
||||
1. **Builds cross-platform binaries** for `amd64` and `arm64`
|
||||
2. **Creates Docker images** for all variants
|
||||
3. **Pushes to registries** (Docker Hub and GitHub Container Registry)
|
||||
4. **Creates multi-arch manifests** for seamless platform selection
|
||||
5. **Performs security scanning** using Trivy
|
||||
|
||||
### Cross-Compilation Strategy
|
||||
|
||||
To handle complex native dependencies, we use different compilation strategies:
|
||||
|
||||
- **x86_64**: Native compilation with `x86_64-unknown-linux-musl` for static linking
|
||||
- **aarch64**: Cross-compilation with `aarch64-unknown-linux-gnu` using the `cross` tool
|
||||
|
||||
This approach ensures compatibility with various C libraries while maintaining performance.
|
||||
|
||||
### Workflow Triggers
|
||||
|
||||
- **Push to main branch**: Builds and pushes `main` and `latest` tags
|
||||
- **Tag push** (`v*`): Builds and pushes version tags
|
||||
- **Pull requests**: Builds images without pushing
|
||||
- **Manual trigger**: Workflow dispatch with options
|
||||
|
||||
### Required Secrets
|
||||
|
||||
Configure these secrets in your GitHub repository:
|
||||
|
||||
```bash
|
||||
# Docker Hub credentials
|
||||
DOCKERHUB_USERNAME=your-dockerhub-username
|
||||
DOCKERHUB_TOKEN=your-dockerhub-access-token
|
||||
|
||||
# GitHub token is automatically available
|
||||
GITHUB_TOKEN=automatically-provided
|
||||
```
|
||||
|
||||
## 🏗️ Building Locally
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Docker with BuildKit enabled
|
||||
- Rust toolchain (1.85+)
|
||||
- Protocol Buffers compiler (protoc 31.1+)
|
||||
- FlatBuffers compiler (flatc 25.2.10+)
|
||||
- `cross` tool for ARM64 compilation
|
||||
|
||||
### Installation Commands
|
||||
|
||||
```bash
|
||||
# Install Rust targets
|
||||
rustup target add x86_64-unknown-linux-musl
|
||||
rustup target add aarch64-unknown-linux-gnu
|
||||
|
||||
# Install cross for ARM64 compilation
|
||||
cargo install cross --git https://github.com/cross-rs/cross
|
||||
|
||||
# Install protoc (macOS)
|
||||
brew install protobuf
|
||||
|
||||
# Install protoc (Ubuntu)
|
||||
sudo apt-get install protobuf-compiler
|
||||
|
||||
# Install flatc
|
||||
# Download from: https://github.com/google/flatbuffers/releases
|
||||
```
|
||||
|
||||
### Build Commands
|
||||
|
||||
```bash
|
||||
# Test cross-compilation setup
|
||||
./scripts/test-cross-build.sh
|
||||
|
||||
# Build production image for local platform
|
||||
docker build -t rustfs:local .
|
||||
|
||||
# Build multi-stage production image
|
||||
docker build -f Dockerfile.multi-stage -t rustfs:multi-stage .
|
||||
|
||||
# Build specific variant
|
||||
docker build -f .docker/Dockerfile.ubuntu22.04 -t rustfs:ubuntu .
|
||||
|
||||
# Build for specific platform
|
||||
docker build --platform linux/amd64 -t rustfs:amd64 .
|
||||
docker build --platform linux/arm64 -t rustfs:arm64 .
|
||||
|
||||
# Build multi-platform image
|
||||
docker buildx build --platform linux/amd64,linux/arm64 -t rustfs:multi .
|
||||
```
|
||||
|
||||
### Cross-Compilation
|
||||
|
||||
```bash
|
||||
# Generate protobuf code first
|
||||
cargo run --bin gproto
|
||||
|
||||
# Native x86_64 build
|
||||
cargo build --release --target x86_64-unknown-linux-musl --bin rustfs
|
||||
|
||||
# Cross-compile for ARM64
|
||||
cross build --release --target aarch64-unknown-linux-gnu --bin rustfs
|
||||
```
|
||||
|
||||
### Build with Docker Compose
|
||||
|
||||
```bash
|
||||
# Build all services
|
||||
docker-compose build
|
||||
|
||||
# Build specific service
|
||||
docker-compose build rustfs
|
||||
|
||||
# Build development environment
|
||||
docker-compose build rustfs-dev
|
||||
```
|
||||
|
||||
## 🚀 Deployment Options
|
||||
|
||||
### 1. Single Container
|
||||
|
||||
```bash
|
||||
docker run -d \
|
||||
--name rustfs \
|
||||
--restart unless-stopped \
|
||||
-p 9000:9000 \
|
||||
-p 9001:9001 \
|
||||
-v /data/rustfs:/data \
|
||||
-e RUSTFS_VOLUMES=/data/rustfs0,/data/rustfs1,/data/rustfs2,/data/rustfs3 \
|
||||
-e RUSTFS_ADDRESS=0.0.0.0:9000 \
|
||||
-e RUSTFS_CONSOLE_ENABLE=true \
|
||||
-e RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9001 \
|
||||
-e RUSTFS_ACCESS_KEY=rustfsadmin \
|
||||
-e RUSTFS_SECRET_KEY=rustfsadmin \
|
||||
rustfs/rustfs:latest
|
||||
```
|
||||
|
||||
### 2. Docker Compose Profiles
|
||||
|
||||
```bash
|
||||
# Production deployment
|
||||
docker-compose up -d
|
||||
|
||||
# Development with debugging
|
||||
docker-compose --profile dev up -d
|
||||
|
||||
# With monitoring stack
|
||||
docker-compose --profile observability up -d
|
||||
|
||||
# Complete testing environment
|
||||
docker-compose --profile dev --profile observability --profile testing up -d
|
||||
```
|
||||
|
||||
### 3. Kubernetes Deployment
|
||||
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: rustfs
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: rustfs
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: rustfs
|
||||
spec:
|
||||
containers:
|
||||
- name: rustfs
|
||||
image: rustfs/rustfs:latest
|
||||
ports:
|
||||
- containerPort: 9000
|
||||
- containerPort: 9001
|
||||
env:
|
||||
- name: RUSTFS_VOLUMES
|
||||
value: "/data/rustfs0,/data/rustfs1,/data/rustfs2,/data/rustfs3"
|
||||
- name: RUSTFS_ADDRESS
|
||||
value: "0.0.0.0:9000"
|
||||
- name: RUSTFS_CONSOLE_ENABLE
|
||||
value: "true"
|
||||
- name: RUSTFS_CONSOLE_ADDRESS
|
||||
value: "0.0.0.0:9001"
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /data
|
||||
volumes:
|
||||
- name: data
|
||||
persistentVolumeClaim:
|
||||
claimName: rustfs-data
|
||||
```
|
||||
|
||||
## ⚙️ Configuration
|
||||
|
||||
### Environment Variables
|
||||
|
||||
| Variable | Description | Default |
|
||||
|----------|-------------|---------|
|
||||
| `RUSTFS_VOLUMES` | Comma-separated list of data volumes | Required |
|
||||
| `RUSTFS_ADDRESS` | Server bind address | `0.0.0.0:9000` |
|
||||
| `RUSTFS_CONSOLE_ENABLE` | Enable web console | `false` |
|
||||
| `RUSTFS_CONSOLE_ADDRESS` | Console bind address | `0.0.0.0:9001` |
|
||||
| `RUSTFS_ACCESS_KEY` | S3 access key | `rustfsadmin` |
|
||||
| `RUSTFS_SECRET_KEY` | S3 secret key | `rustfsadmin` |
|
||||
| `RUSTFS_LOG_LEVEL` | Log level | `info` |
|
||||
| `RUSTFS_OBS_ENDPOINT` | Observability endpoint | `""` |
|
||||
| `RUSTFS_TLS_PATH` | TLS certificates path | `""` |
|
||||
|
||||
### Volume Mounts
|
||||
|
||||
- **Data volumes**: `/data/rustfs{0,1,2,3}` - RustFS data storage
|
||||
- **Logs**: `/app/logs` - Application logs
|
||||
- **Config**: `/etc/rustfs/` - Configuration files
|
||||
- **TLS**: `/etc/ssl/rustfs/` - TLS certificates
|
||||
|
||||
### Ports
|
||||
|
||||
- **9000**: S3 API endpoint
|
||||
- **9001**: Web console (if enabled)
|
||||
- **9002**: Admin API (if enabled)
|
||||
- **50051**: gRPC API (if enabled)
|
||||
|
||||
## 🔍 Monitoring and Observability
|
||||
|
||||
### Health Checks
|
||||
|
||||
The Docker images include built-in health checks:
|
||||
|
||||
```bash
|
||||
# Check container health
|
||||
docker ps --filter "name=rustfs" --format "table {{.Names}}\t{{.Status}}"
|
||||
|
||||
# View health check logs
|
||||
docker inspect rustfs --format='{{json .State.Health}}'
|
||||
```
|
||||
|
||||
### Metrics and Tracing
|
||||
|
||||
When using the observability profile:
|
||||
|
||||
- **Prometheus**: http://localhost:9090
|
||||
- **Grafana**: http://localhost:3000 (admin/admin)
|
||||
- **Jaeger**: http://localhost:16686
|
||||
- **OpenTelemetry Collector**: http://localhost:8888/metrics
|
||||
|
||||
### Log Collection
|
||||
|
||||
```bash
|
||||
# View container logs
|
||||
docker logs rustfs -f
|
||||
|
||||
# Export logs
|
||||
docker logs rustfs > rustfs.log 2>&1
|
||||
```
|
||||
|
||||
## 🛠️ Development
|
||||
|
||||
### Development Environment
|
||||
|
||||
```bash
|
||||
# Start development container
|
||||
docker-compose --profile dev up -d rustfs-dev
|
||||
|
||||
# Access development container
|
||||
docker exec -it rustfs-dev bash
|
||||
|
||||
# Mount source code for live development
|
||||
docker run -it --rm \
|
||||
-v $(pwd):/root/s3-rustfs \
|
||||
-p 9000:9000 \
|
||||
rustfs/rustfs:devenv \
|
||||
bash
|
||||
```
|
||||
|
||||
### Building from Source in Container
|
||||
|
||||
```bash
|
||||
# Use development image for building
|
||||
docker run --rm \
|
||||
-v $(pwd):/root/s3-rustfs \
|
||||
-w /root/s3-rustfs \
|
||||
rustfs/rustfs:ubuntu22.04 \
|
||||
cargo build --release --bin rustfs
|
||||
```
|
||||
|
||||
### Testing Cross-Compilation
|
||||
|
||||
```bash
|
||||
# Run the test script to verify cross-compilation setup
|
||||
./scripts/test-cross-build.sh
|
||||
|
||||
# This will test:
|
||||
# - x86_64-unknown-linux-musl compilation
|
||||
# - aarch64-unknown-linux-gnu cross-compilation
|
||||
# - Docker builds for both architectures
|
||||
```
|
||||
|
||||
## 🔐 Security
|
||||
|
||||
### Security Scanning
|
||||
|
||||
The workflow includes Trivy security scanning:
|
||||
|
||||
```bash
|
||||
# Run security scan locally
|
||||
docker run --rm -v /var/run/docker.sock:/var/run/docker.sock \
|
||||
-v $HOME/Library/Caches:/root/.cache/ \
|
||||
aquasec/trivy:latest image rustfs/rustfs:latest
|
||||
```
|
||||
|
||||
### Security Best Practices
|
||||
|
||||
1. **Use non-root user**: Images run as `rustfs` user (UID 1000)
|
||||
2. **Minimal base images**: Ubuntu minimal for production
|
||||
3. **Security updates**: Regular base image updates
|
||||
4. **Secret management**: Use Docker secrets or environment files
|
||||
5. **Network security**: Use Docker networks and proper firewall rules
|
||||
|
||||
## 📝 Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
#### 1. Cross-Compilation Failures
|
||||
|
||||
**Problem**: ARM64 build fails with linking errors
|
||||
```bash
|
||||
error: linking with `aarch64-linux-gnu-gcc` failed
|
||||
```
|
||||
|
||||
**Solution**: Use the `cross` tool instead of native cross-compilation:
|
||||
```bash
|
||||
# Install cross tool
|
||||
cargo install cross --git https://github.com/cross-rs/cross
|
||||
|
||||
# Use cross for ARM64 builds
|
||||
cross build --release --target aarch64-unknown-linux-gnu --bin rustfs
|
||||
```
|
||||
|
||||
#### 2. Protobuf Generation Issues
|
||||
|
||||
**Problem**: Missing protobuf definitions
|
||||
```bash
|
||||
error: failed to run custom build command for `protos`
|
||||
```
|
||||
|
||||
**Solution**: Generate protobuf code first:
|
||||
```bash
|
||||
cargo run --bin gproto
|
||||
```
|
||||
|
||||
#### 3. Docker Build Failures
|
||||
|
||||
**Problem**: Binary not found in Docker build
|
||||
```bash
|
||||
COPY failed: file not found in build context
|
||||
```
|
||||
|
||||
**Solution**: Ensure binaries are built before Docker build:
|
||||
```bash
|
||||
# Build binaries first
|
||||
cargo build --release --target x86_64-unknown-linux-musl --bin rustfs
|
||||
cross build --release --target aarch64-unknown-linux-gnu --bin rustfs
|
||||
|
||||
# Then build Docker image
|
||||
docker build .
|
||||
```
|
||||
|
||||
### Debug Commands
|
||||
|
||||
```bash
|
||||
# Check container status
|
||||
docker ps -a
|
||||
|
||||
# View container logs
|
||||
docker logs rustfs --tail 100
|
||||
|
||||
# Access container shell
|
||||
docker exec -it rustfs bash
|
||||
|
||||
# Check resource usage
|
||||
docker stats rustfs
|
||||
|
||||
# Inspect container configuration
|
||||
docker inspect rustfs
|
||||
|
||||
# Test cross-compilation setup
|
||||
./scripts/test-cross-build.sh
|
||||
```
|
||||
|
||||
## 🔄 CI/CD Integration
|
||||
|
||||
### GitHub Actions
|
||||
|
||||
The provided workflow can be customized:
|
||||
|
||||
```yaml
|
||||
# Override image names
|
||||
env:
|
||||
REGISTRY_IMAGE_DOCKERHUB: myorg/rustfs
|
||||
REGISTRY_IMAGE_GHCR: ghcr.io/myorg/rustfs
|
||||
```
|
||||
|
||||
### GitLab CI
|
||||
|
||||
```yaml
|
||||
build:
|
||||
stage: build
|
||||
image: docker:latest
|
||||
services:
|
||||
- docker:dind
|
||||
script:
|
||||
- docker build -t $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA .
|
||||
- docker push $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
|
||||
```
|
||||
|
||||
### Jenkins Pipeline
|
||||
|
||||
```groovy
|
||||
pipeline {
|
||||
agent any
|
||||
stages {
|
||||
stage('Build') {
|
||||
steps {
|
||||
script {
|
||||
docker.build("rustfs:${env.BUILD_ID}")
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Push') {
|
||||
steps {
|
||||
script {
|
||||
docker.withRegistry('https://registry.hub.docker.com', 'dockerhub-credentials') {
|
||||
docker.image("rustfs:${env.BUILD_ID}").push()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## 📚 Additional Resources
|
||||
|
||||
- [Docker Official Documentation](https://docs.docker.com/)
|
||||
- [Docker Compose Reference](https://docs.docker.com/compose/)
|
||||
- [GitHub Actions Documentation](https://docs.github.com/en/actions)
|
||||
- [Cross-compilation with Rust](https://rust-lang.github.io/rustup/cross-compilation.html)
|
||||
- [Cross tool documentation](https://github.com/cross-rs/cross)
|
||||
- [RustFS Configuration Guide](../README.md)
|
||||
@@ -1,57 +0,0 @@
|
||||
## Summary
|
||||
|
||||
This PR modifies the GitHub Actions workflows to ensure that **version releases never get skipped** during CI/CD execution, addressing the issue where duplicate action detection could skip important release processes.
|
||||
|
||||
## Changes Made
|
||||
|
||||
### 🔧 Core Modifications
|
||||
|
||||
1. **Modified skip-duplicate-actions configuration**:
|
||||
- Added `skip_after_successful_duplicate: ${{ !startsWith(github.ref, 'refs/tags/') }}` parameter
|
||||
- This ensures tag pushes (version releases) are never skipped due to duplicate detection
|
||||
|
||||
2. **Updated workflow job conditions**:
|
||||
- **CI Workflow** (`ci.yml`): Modified `test-and-lint` and `e2e-tests` jobs
|
||||
- **Build Workflow** (`build.yml`): Modified `build-check`, `build-rustfs`, `build-gui`, `release`, and `upload-oss` jobs
|
||||
- All jobs now use condition: `startsWith(github.ref, 'refs/tags/') || needs.skip-check.outputs.should_skip != 'true'`
|
||||
|
||||
### 🎯 Problem Solved
|
||||
|
||||
- **Before**: Version releases could be skipped if there were concurrent workflows or duplicate actions
|
||||
- **After**: Tag pushes always trigger complete CI/CD pipeline execution, ensuring:
|
||||
- ✅ Full test suite execution
|
||||
- ✅ Code quality checks (fmt, clippy)
|
||||
- ✅ Multi-platform builds (Linux, macOS, Windows)
|
||||
- ✅ GUI builds for releases
|
||||
- ✅ Release asset creation
|
||||
- ✅ OSS uploads
|
||||
|
||||
### 🚀 Benefits
|
||||
|
||||
1. **Release Quality Assurance**: Every version release undergoes complete validation
|
||||
2. **Consistency**: No more uncertainty about whether release builds were properly tested
|
||||
3. **Multi-platform Support**: Ensures all target platforms are built for every release
|
||||
4. **Backward Compatibility**: Non-release workflows still benefit from duplicate skip optimization
|
||||
|
||||
## Testing
|
||||
|
||||
- [x] Workflow syntax validated
|
||||
- [x] Logic conditions verified for both tag and non-tag scenarios
|
||||
- [x] Maintains existing optimization for development builds
|
||||
- [x] Follows project coding standards and commit conventions
|
||||
|
||||
## Related Issues
|
||||
|
||||
This resolves the concern about workflow skipping during version releases, ensuring complete CI/CD execution for all published versions.
|
||||
|
||||
## Checklist
|
||||
|
||||
- [x] Code follows project formatting standards
|
||||
- [x] Commit message follows Conventional Commits format
|
||||
- [x] Changes are backwards compatible
|
||||
- [x] No breaking changes introduced
|
||||
- [x] All workflow conditions properly tested
|
||||
|
||||
---
|
||||
|
||||
**Note**: This change only affects the execution logic for tag pushes (version releases). Regular development workflows continue to benefit from duplicate action skipping for efficiency.
|
||||
@@ -34,6 +34,7 @@ path = "src/main.rs"
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
rustfs-ahm = { workspace = true }
|
||||
rustfs-zip = { workspace = true }
|
||||
rustfs-madmin = { workspace = true }
|
||||
rustfs-s3select-api = { workspace = true }
|
||||
@@ -66,7 +67,6 @@ hyper.workspace = true
|
||||
hyper-util.workspace = true
|
||||
http.workspace = true
|
||||
http-body.workspace = true
|
||||
lazy_static.workspace = true
|
||||
matchit = { workspace = true }
|
||||
mime_guess = { workspace = true }
|
||||
opentelemetry = { workspace = true }
|
||||
|
||||
401
rustfs/src/admin/console.rs
Normal file
@@ -0,0 +1,401 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// use crate::license::get_license;
|
||||
use axum::{
|
||||
// Router,
|
||||
body::Body,
|
||||
http::{Response, StatusCode},
|
||||
response::IntoResponse,
|
||||
// routing::get,
|
||||
};
|
||||
// use axum_extra::extract::Host;
|
||||
// use rustfs_config::{RUSTFS_TLS_CERT, RUSTFS_TLS_KEY};
|
||||
// use rustfs_utils::net::parse_and_resolve_address;
|
||||
// use std::io;
|
||||
|
||||
use http::Uri;
|
||||
// use axum::response::Redirect;
|
||||
// use axum_server::tls_rustls::RustlsConfig;
|
||||
// use http::{HeaderMap, HeaderName, Uri, header};
|
||||
use mime_guess::from_path;
|
||||
use rust_embed::RustEmbed;
|
||||
// use serde::Serialize;
|
||||
// use shadow_rs::shadow;
|
||||
// use std::net::{IpAddr, SocketAddr};
|
||||
// use std::sync::OnceLock;
|
||||
// use std::time::Duration;
|
||||
// use tokio::signal;
|
||||
// use tower_http::cors::{Any, CorsLayer};
|
||||
// use tower_http::trace::TraceLayer;
|
||||
// use tracing::{debug, error, info, instrument};
|
||||
|
||||
// shadow!(build);
|
||||
|
||||
// const RUSTFS_ADMIN_PREFIX: &str = "/rustfs/admin/v3";
|
||||
|
||||
#[derive(RustEmbed)]
|
||||
#[folder = "$CARGO_MANIFEST_DIR/static"]
|
||||
struct StaticFiles;
|
||||
|
||||
/// Static file handler
|
||||
pub(crate) async fn static_handler(uri: Uri) -> impl IntoResponse {
|
||||
let mut path = uri.path().trim_start_matches('/');
|
||||
if path.is_empty() {
|
||||
path = "index.html"
|
||||
}
|
||||
if let Some(file) = StaticFiles::get(path) {
|
||||
let mime_type = from_path(path).first_or_octet_stream();
|
||||
Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header("Content-Type", mime_type.to_string())
|
||||
.body(Body::from(file.data))
|
||||
.unwrap()
|
||||
} else if let Some(file) = StaticFiles::get("index.html") {
|
||||
let mime_type = from_path("index.html").first_or_octet_stream();
|
||||
Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header("Content-Type", mime_type.to_string())
|
||||
.body(Body::from(file.data))
|
||||
.unwrap()
|
||||
} else {
|
||||
Response::builder()
|
||||
.status(StatusCode::NOT_FOUND)
|
||||
.body(Body::from("404 Not Found"))
|
||||
.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
// #[derive(Debug, Serialize, Clone)]
|
||||
// pub(crate) struct Config {
|
||||
// #[serde(skip)]
|
||||
// port: u16,
|
||||
// api: Api,
|
||||
// s3: S3,
|
||||
// release: Release,
|
||||
// license: License,
|
||||
// doc: String,
|
||||
// }
|
||||
|
||||
// impl Config {
|
||||
// fn new(local_ip: IpAddr, port: u16, version: &str, date: &str) -> Self {
|
||||
// Config {
|
||||
// port,
|
||||
// api: Api {
|
||||
// base_url: format!("http://{local_ip}:{port}/{RUSTFS_ADMIN_PREFIX}"),
|
||||
// },
|
||||
// s3: S3 {
|
||||
// endpoint: format!("http://{local_ip}:{port}"),
|
||||
// region: "cn-east-1".to_owned(),
|
||||
// },
|
||||
// release: Release {
|
||||
// version: version.to_string(),
|
||||
// date: date.to_string(),
|
||||
// },
|
||||
// license: License {
|
||||
// name: "Apache-2.0".to_string(),
|
||||
// url: "https://www.apache.org/licenses/LICENSE-2.0".to_string(),
|
||||
// },
|
||||
// doc: "https://rustfs.com/docs/".to_string(),
|
||||
// }
|
||||
// }
|
||||
|
||||
// fn to_json(&self) -> String {
|
||||
// serde_json::to_string(self).unwrap_or_default()
|
||||
// }
|
||||
|
||||
// pub(crate) fn version_info(&self) -> String {
|
||||
// format!(
|
||||
// "RELEASE.{}@{} (rust {} {})",
|
||||
// self.release.date.clone(),
|
||||
// self.release.version.clone().trim_start_matches('@'),
|
||||
// build::RUST_VERSION,
|
||||
// build::BUILD_TARGET
|
||||
// )
|
||||
// }
|
||||
|
||||
// pub(crate) fn version(&self) -> String {
|
||||
// self.release.version.clone()
|
||||
// }
|
||||
|
||||
// pub(crate) fn license(&self) -> String {
|
||||
// format!("{} {}", self.license.name.clone(), self.license.url.clone())
|
||||
// }
|
||||
|
||||
// pub(crate) fn doc(&self) -> String {
|
||||
// self.doc.clone()
|
||||
// }
|
||||
// }
|
||||
|
||||
// #[derive(Debug, Serialize, Clone)]
|
||||
// struct Api {
|
||||
// #[serde(rename = "baseURL")]
|
||||
// base_url: String,
|
||||
// }
|
||||
|
||||
// #[derive(Debug, Serialize, Clone)]
|
||||
// struct S3 {
|
||||
// endpoint: String,
|
||||
// region: String,
|
||||
// }
|
||||
|
||||
// #[derive(Debug, Serialize, Clone)]
|
||||
// struct Release {
|
||||
// version: String,
|
||||
// date: String,
|
||||
// }
|
||||
|
||||
// #[derive(Debug, Serialize, Clone)]
|
||||
// struct License {
|
||||
// name: String,
|
||||
// url: String,
|
||||
// }
|
||||
|
||||
// pub(crate) static CONSOLE_CONFIG: OnceLock<Config> = OnceLock::new();
|
||||
|
||||
// #[allow(clippy::const_is_empty)]
|
||||
// pub(crate) fn init_console_cfg(local_ip: IpAddr, port: u16) {
|
||||
// CONSOLE_CONFIG.get_or_init(|| {
|
||||
// let ver = {
|
||||
// if !build::TAG.is_empty() {
|
||||
// build::TAG.to_string()
|
||||
// } else if !build::SHORT_COMMIT.is_empty() {
|
||||
// format!("@{}", build::SHORT_COMMIT)
|
||||
// } else {
|
||||
// build::PKG_VERSION.to_string()
|
||||
// }
|
||||
// };
|
||||
|
||||
// Config::new(local_ip, port, ver.as_str(), build::COMMIT_DATE_3339)
|
||||
// });
|
||||
// }
|
||||
|
||||
// // fn is_socket_addr_or_ip_addr(host: &str) -> bool {
|
||||
// // host.parse::<SocketAddr>().is_ok() || host.parse::<IpAddr>().is_ok()
|
||||
// // }
|
||||
|
||||
// #[allow(dead_code)]
|
||||
// async fn license_handler() -> impl IntoResponse {
|
||||
// let license = get_license().unwrap_or_default();
|
||||
|
||||
// Response::builder()
|
||||
// .header("content-type", "application/json")
|
||||
// .status(StatusCode::OK)
|
||||
// .body(Body::from(serde_json::to_string(&license).unwrap_or_default()))
|
||||
// .unwrap()
|
||||
// }
|
||||
|
||||
// fn _is_private_ip(ip: IpAddr) -> bool {
|
||||
// match ip {
|
||||
// IpAddr::V4(ip) => {
|
||||
// let octets = ip.octets();
|
||||
// // 10.0.0.0/8
|
||||
// octets[0] == 10 ||
|
||||
// // 172.16.0.0/12
|
||||
// (octets[0] == 172 && (octets[1] >= 16 && octets[1] <= 31)) ||
|
||||
// // 192.168.0.0/16
|
||||
// (octets[0] == 192 && octets[1] == 168)
|
||||
// }
|
||||
// IpAddr::V6(_) => false,
|
||||
// }
|
||||
// }
|
||||
|
||||
// #[allow(clippy::const_is_empty)]
|
||||
// #[allow(dead_code)]
|
||||
// #[instrument(fields(host))]
|
||||
// async fn config_handler(uri: Uri, Host(host): Host, headers: HeaderMap) -> impl IntoResponse {
|
||||
// // Get the scheme from the headers or use the URI scheme
|
||||
// let scheme = headers
|
||||
// .get(HeaderName::from_static("x-forwarded-proto"))
|
||||
// .and_then(|value| value.to_str().ok())
|
||||
// .unwrap_or_else(|| uri.scheme().map(|s| s.as_str()).unwrap_or("http"));
|
||||
|
||||
// // Print logs for debugging
|
||||
// info!("Scheme: {}, ", scheme);
|
||||
|
||||
// // Get the host from the uri and use the value of the host extractor if it doesn't have one
|
||||
// let host = uri.host().unwrap_or(host.as_str());
|
||||
|
||||
// let host = if let Ok(socket_addr) = host.parse::<SocketAddr>() {
|
||||
// // Successfully parsed, it's in IP:Port format.
|
||||
// // For IPv6, we need to enclose it in brackets to form a valid URL.
|
||||
// let ip = socket_addr.ip();
|
||||
// if ip.is_ipv6() { format!("[{ip}]") } else { format!("{ip}") }
|
||||
// } else {
|
||||
// // Failed to parse, it might be a domain name or a bare IP, use it as is.
|
||||
// host.to_string()
|
||||
// };
|
||||
|
||||
// // Make a copy of the current configuration
|
||||
// let mut cfg = match CONSOLE_CONFIG.get() {
|
||||
// Some(cfg) => cfg.clone(),
|
||||
// None => {
|
||||
// error!("Console configuration not initialized");
|
||||
// return Response::builder()
|
||||
// .status(StatusCode::INTERNAL_SERVER_ERROR)
|
||||
// .body(Body::from("Console configuration not initialized"))
|
||||
// .unwrap();
|
||||
// }
|
||||
// };
|
||||
|
||||
// let url = format!("{}://{}:{}", scheme, host, cfg.port);
|
||||
// cfg.api.base_url = format!("{url}{RUSTFS_ADMIN_PREFIX}");
|
||||
// cfg.s3.endpoint = url;
|
||||
|
||||
// Response::builder()
|
||||
// .header("content-type", "application/json")
|
||||
// .status(StatusCode::OK)
|
||||
// .body(Body::from(cfg.to_json()))
|
||||
// .unwrap()
|
||||
// }
|
||||
|
||||
// pub fn register_router() -> Router {
|
||||
// Router::new()
|
||||
// // .route("/license", get(license_handler))
|
||||
// // .route("/config.json", get(config_handler))
|
||||
// .fallback_service(get(static_handler))
|
||||
// }
|
||||
|
||||
// #[allow(dead_code)]
|
||||
// pub async fn start_static_file_server(
|
||||
// addrs: &str,
|
||||
// local_ip: IpAddr,
|
||||
// access_key: &str,
|
||||
// secret_key: &str,
|
||||
// tls_path: Option<String>,
|
||||
// ) {
|
||||
// // Configure CORS
|
||||
// let cors = CorsLayer::new()
|
||||
// .allow_origin(Any) // In the production environment, we recommend that you specify a specific domain name
|
||||
// .allow_methods([http::Method::GET, http::Method::POST])
|
||||
// .allow_headers([header::CONTENT_TYPE]);
|
||||
|
||||
// // Create a route
|
||||
// let app = register_router()
|
||||
// .layer(cors)
|
||||
// .layer(tower_http::compression::CompressionLayer::new().gzip(true).deflate(true))
|
||||
// .layer(TraceLayer::new_for_http());
|
||||
|
||||
// let server_addr = parse_and_resolve_address(addrs).expect("Failed to parse socket address");
|
||||
// let server_port = server_addr.port();
|
||||
// let server_address = server_addr.to_string();
|
||||
|
||||
// info!(
|
||||
// "WebUI: http://{}:{} http://127.0.0.1:{} http://{}",
|
||||
// local_ip, server_port, server_port, server_address
|
||||
// );
|
||||
// info!(" RootUser: {}", access_key);
|
||||
// info!(" RootPass: {}", secret_key);
|
||||
|
||||
// // Check and start the HTTPS/HTTP server
|
||||
// match start_server(server_addr, tls_path, app.clone()).await {
|
||||
// Ok(_) => info!("Server shutdown gracefully"),
|
||||
// Err(e) => error!("Server error: {}", e),
|
||||
// }
|
||||
// }
|
||||
|
||||
// async fn start_server(server_addr: SocketAddr, tls_path: Option<String>, app: Router) -> io::Result<()> {
|
||||
// let tls_path = tls_path.unwrap_or_default();
|
||||
// let key_path = format!("{tls_path}/{RUSTFS_TLS_KEY}");
|
||||
// let cert_path = format!("{tls_path}/{RUSTFS_TLS_CERT}");
|
||||
// let handle = axum_server::Handle::new();
|
||||
// // create a signal off listening task
|
||||
// let handle_clone = handle.clone();
|
||||
// tokio::spawn(async move {
|
||||
// shutdown_signal().await;
|
||||
// info!("Initiating graceful shutdown...");
|
||||
// handle_clone.graceful_shutdown(Some(Duration::from_secs(10)));
|
||||
// });
|
||||
|
||||
// let has_tls_certs = tokio::try_join!(tokio::fs::metadata(&key_path), tokio::fs::metadata(&cert_path)).is_ok();
|
||||
// info!("Console TLS certs: {:?}", has_tls_certs);
|
||||
// if has_tls_certs {
|
||||
// info!("Found TLS certificates, starting with HTTPS");
|
||||
// match RustlsConfig::from_pem_file(cert_path, key_path).await {
|
||||
// Ok(config) => {
|
||||
// info!("Starting HTTPS server...");
|
||||
// axum_server::bind_rustls(server_addr, config)
|
||||
// .handle(handle.clone())
|
||||
// .serve(app.into_make_service())
|
||||
// .await
|
||||
// .map_err(io::Error::other)?;
|
||||
|
||||
// info!("HTTPS server running on https://{}", server_addr);
|
||||
|
||||
// Ok(())
|
||||
// }
|
||||
// Err(e) => {
|
||||
// error!("Failed to create TLS config: {}", e);
|
||||
// start_http_server(server_addr, app, handle).await
|
||||
// }
|
||||
// }
|
||||
// } else {
|
||||
// info!("TLS certificates not found at {} and {}", key_path, cert_path);
|
||||
// start_http_server(server_addr, app, handle).await
|
||||
// }
|
||||
// }
|
||||
|
||||
// #[allow(dead_code)]
|
||||
// /// 308 redirect for HTTP to HTTPS
|
||||
// fn redirect_to_https(https_port: u16) -> Router {
|
||||
// Router::new().route(
|
||||
// "/*path",
|
||||
// get({
|
||||
// move |uri: Uri, req: http::Request<Body>| async move {
|
||||
// let host = req
|
||||
// .headers()
|
||||
// .get("host")
|
||||
// .map_or("localhost", |h| h.to_str().unwrap_or("localhost"));
|
||||
// let path = uri.path_and_query().map(|pq| pq.as_str()).unwrap_or("");
|
||||
// let https_url = format!("https://{host}:{https_port}{path}");
|
||||
// Redirect::permanent(&https_url)
|
||||
// }
|
||||
// }),
|
||||
// )
|
||||
// }
|
||||
|
||||
// async fn start_http_server(addr: SocketAddr, app: Router, handle: axum_server::Handle) -> io::Result<()> {
|
||||
// debug!("Starting HTTP server...");
|
||||
// axum_server::bind(addr)
|
||||
// .handle(handle)
|
||||
// .serve(app.into_make_service())
|
||||
// .await
|
||||
// .map_err(io::Error::other)
|
||||
// }
|
||||
|
||||
// async fn shutdown_signal() {
|
||||
// let ctrl_c = async {
|
||||
// signal::ctrl_c().await.expect("failed to install Ctrl+C handler");
|
||||
// };
|
||||
|
||||
// #[cfg(unix)]
|
||||
// let terminate = async {
|
||||
// signal::unix::signal(signal::unix::SignalKind::terminate())
|
||||
// .expect("failed to install signal handler")
|
||||
// .recv()
|
||||
// .await;
|
||||
// };
|
||||
|
||||
// #[cfg(not(unix))]
|
||||
// let terminate = std::future::pending::<()>();
|
||||
|
||||
// tokio::select! {
|
||||
// _ = ctrl_c => {
|
||||
// info!("shutdown_signal ctrl_c")
|
||||
// },
|
||||
// _ = terminate => {
|
||||
// info!("shutdown_signal terminate")
|
||||
// },
|
||||
// }
|
||||
// }
|
||||
@@ -31,6 +31,7 @@ use rustfs_ecstore::cmd::bucket_targets::{self, GLOBAL_Bucket_Target_Sys};
|
||||
use rustfs_ecstore::error::StorageError;
|
||||
use rustfs_ecstore::global::GLOBAL_ALlHealState;
|
||||
use rustfs_ecstore::global::get_global_action_cred;
|
||||
// use rustfs_ecstore::heal::data_usage::load_data_usage_from_backend;
|
||||
use rustfs_ecstore::heal::data_usage::load_data_usage_from_backend;
|
||||
use rustfs_ecstore::heal::heal_commands::HealOpts;
|
||||
use rustfs_ecstore::heal::heal_ops::new_heal_sequence;
|
||||
|
||||
@@ -596,6 +596,7 @@ impl Operation for ImportBucketMetadata {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ use matchit::Params;
|
||||
use rustfs_config::notify::{NOTIFY_MQTT_SUB_SYS, NOTIFY_WEBHOOK_SUB_SYS};
|
||||
use rustfs_notify::EventName;
|
||||
use rustfs_notify::rules::{BucketNotificationConfig, PatternRules};
|
||||
use s3s::header::CONTENT_LENGTH;
|
||||
use s3s::{Body, S3Error, S3ErrorCode, S3Request, S3Response, S3Result, header::CONTENT_TYPE, s3_error};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_urlencoded::from_bytes;
|
||||
@@ -103,6 +104,7 @@ impl Operation for SetNotificationTarget {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
@@ -181,6 +183,7 @@ impl Operation for RemoveNotificationTarget {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
@@ -226,6 +229,7 @@ impl Operation for SetBucketNotification {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
@@ -289,6 +293,7 @@ impl Operation for RemoveBucketNotification {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,7 +17,11 @@ use matchit::Params;
|
||||
use rustfs_ecstore::global::get_global_action_cred;
|
||||
use rustfs_iam::error::{is_err_no_such_group, is_err_no_such_user};
|
||||
use rustfs_madmin::GroupAddRemove;
|
||||
use s3s::{Body, S3Error, S3ErrorCode, S3Request, S3Response, S3Result, header::CONTENT_TYPE, s3_error};
|
||||
use s3s::{
|
||||
Body, S3Error, S3ErrorCode, S3Request, S3Response, S3Result,
|
||||
header::{CONTENT_LENGTH, CONTENT_TYPE},
|
||||
s3_error,
|
||||
};
|
||||
use serde::Deserialize;
|
||||
use serde_urlencoded::from_bytes;
|
||||
use tracing::warn;
|
||||
@@ -129,7 +133,7 @@ impl Operation for SetGroupStatus {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
@@ -214,7 +218,7 @@ impl Operation for UpdateGroupMembers {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,7 +19,11 @@ use rustfs_ecstore::global::get_global_action_cred;
|
||||
use rustfs_iam::error::is_err_no_such_user;
|
||||
use rustfs_iam::store::MappedPolicy;
|
||||
use rustfs_policy::policy::Policy;
|
||||
use s3s::{Body, S3Error, S3ErrorCode, S3Request, S3Response, S3Result, header::CONTENT_TYPE, s3_error};
|
||||
use s3s::{
|
||||
Body, S3Error, S3ErrorCode, S3Request, S3Response, S3Result,
|
||||
header::{CONTENT_LENGTH, CONTENT_TYPE},
|
||||
s3_error,
|
||||
};
|
||||
use serde::Deserialize;
|
||||
use serde_urlencoded::from_bytes;
|
||||
use std::collections::HashMap;
|
||||
@@ -123,7 +127,7 @@ impl Operation for AddCannedPolicy {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
@@ -198,7 +202,7 @@ impl Operation for RemoveCannedPolicy {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
@@ -284,7 +288,7 @@ impl Operation for SetPolicyForUserOrGroup {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,7 +22,11 @@ use rustfs_ecstore::{
|
||||
rebalance::{DiskStat, RebalSaveOpt},
|
||||
store_api::BucketOptions,
|
||||
};
|
||||
use s3s::{Body, S3Request, S3Response, S3Result, header::CONTENT_TYPE, s3_error};
|
||||
use s3s::{
|
||||
Body, S3Request, S3Response, S3Result,
|
||||
header::{CONTENT_LENGTH, CONTENT_TYPE},
|
||||
s3_error,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::time::Duration;
|
||||
use time::OffsetDateTime;
|
||||
@@ -265,7 +269,10 @@ impl Operation for RebalanceStop {
|
||||
warn!("handle RebalanceStop notification_sys load_rebalance_meta done");
|
||||
}
|
||||
|
||||
Ok(S3Response::new((StatusCode::OK, Body::empty())))
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -28,6 +28,7 @@ use rustfs_madmin::{
|
||||
use rustfs_policy::policy::action::{Action, AdminAction};
|
||||
use rustfs_policy::policy::{Args, Policy};
|
||||
use s3s::S3ErrorCode::InvalidRequest;
|
||||
use s3s::header::CONTENT_LENGTH;
|
||||
use s3s::{Body, S3Error, S3ErrorCode, S3Request, S3Response, S3Result, header::CONTENT_TYPE, s3_error};
|
||||
use serde::Deserialize;
|
||||
use serde_urlencoded::from_bytes;
|
||||
@@ -306,7 +307,7 @@ impl Operation for UpdateServiceAccount {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
@@ -607,7 +608,7 @@ impl Operation for DeleteServiceAccount {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,7 +16,11 @@
|
||||
use http::{HeaderMap, StatusCode};
|
||||
//use iam::get_global_action_cred;
|
||||
use matchit::Params;
|
||||
use s3s::{Body, S3Error, S3ErrorCode, S3Request, S3Response, S3Result, header::CONTENT_TYPE, s3_error};
|
||||
use s3s::{
|
||||
Body, S3Error, S3ErrorCode, S3Request, S3Response, S3Result,
|
||||
header::{CONTENT_LENGTH, CONTENT_TYPE},
|
||||
s3_error,
|
||||
};
|
||||
use serde_urlencoded::from_bytes;
|
||||
use time::OffsetDateTime;
|
||||
use tracing::{debug, warn};
|
||||
@@ -169,7 +173,7 @@ impl Operation for AddTier {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
@@ -236,7 +240,7 @@ impl Operation for EditTier {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
@@ -332,7 +336,7 @@ impl Operation for RemoveTier {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
@@ -366,7 +370,7 @@ impl Operation for VerifyTier {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
@@ -457,7 +461,7 @@ impl Operation for ClearTier {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
@@ -636,7 +640,7 @@ impl Operation for PostRestoreObject {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}*/
|
||||
|
||||
@@ -146,7 +146,7 @@ impl Operation for AddUser {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
@@ -193,7 +193,7 @@ impl Operation for SetUserStatus {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
@@ -310,7 +310,7 @@ impl Operation for RemoveUser {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod console;
|
||||
pub mod handlers;
|
||||
pub mod router;
|
||||
mod rpc;
|
||||
@@ -32,8 +33,8 @@ use s3s::route::S3Route;
|
||||
|
||||
const ADMIN_PREFIX: &str = "/rustfs/admin";
|
||||
|
||||
pub fn make_admin_route() -> std::io::Result<impl S3Route> {
|
||||
let mut r: S3Router<AdminOperation> = S3Router::new();
|
||||
pub fn make_admin_route(console_enabled: bool) -> std::io::Result<impl S3Route> {
|
||||
let mut r: S3Router<AdminOperation> = S3Router::new(console_enabled);
|
||||
|
||||
// 1
|
||||
r.insert(Method::POST, "/", AdminOperation(&sts::AssumeRoleHandle {}))?;
|
||||
|
||||
@@ -12,6 +12,7 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use axum::routing::get;
|
||||
use hyper::HeaderMap;
|
||||
use hyper::Method;
|
||||
use hyper::StatusCode;
|
||||
@@ -27,20 +28,41 @@ use s3s::S3Result;
|
||||
use s3s::header;
|
||||
use s3s::route::S3Route;
|
||||
use s3s::s3_error;
|
||||
use tower::Service;
|
||||
use tracing::error;
|
||||
|
||||
use super::ADMIN_PREFIX;
|
||||
use super::rpc::RPC_PREFIX;
|
||||
use crate::admin::ADMIN_PREFIX;
|
||||
use crate::admin::console;
|
||||
use crate::admin::rpc::RPC_PREFIX;
|
||||
|
||||
const CONSOLE_PREFIX: &str = "/rustfs/console";
|
||||
|
||||
pub struct S3Router<T> {
|
||||
router: Router<T>,
|
||||
console_enabled: bool,
|
||||
console_router: Option<axum::routing::RouterIntoService<Body>>,
|
||||
}
|
||||
|
||||
impl<T: Operation> S3Router<T> {
|
||||
pub fn new() -> Self {
|
||||
pub fn new(console_enabled: bool) -> Self {
|
||||
let router = Router::new();
|
||||
|
||||
Self { router }
|
||||
let console_router = if console_enabled {
|
||||
Some(
|
||||
axum::Router::new()
|
||||
.nest(CONSOLE_PREFIX, axum::Router::new().fallback_service(get(console::static_handler)))
|
||||
.fallback_service(get(console::static_handler))
|
||||
.into_service::<Body>(),
|
||||
)
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
Self {
|
||||
router,
|
||||
console_enabled,
|
||||
console_router,
|
||||
}
|
||||
}
|
||||
|
||||
pub fn insert(&mut self, method: Method, path: &str, operation: T) -> std::io::Result<()> {
|
||||
@@ -60,7 +82,7 @@ impl<T: Operation> S3Router<T> {
|
||||
|
||||
impl<T: Operation> Default for S3Router<T> {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
Self::new(false)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -79,10 +101,23 @@ where
|
||||
}
|
||||
}
|
||||
|
||||
uri.path().starts_with(ADMIN_PREFIX) || uri.path().starts_with(RPC_PREFIX)
|
||||
uri.path().starts_with(ADMIN_PREFIX) || uri.path().starts_with(RPC_PREFIX) || uri.path().starts_with(CONSOLE_PREFIX)
|
||||
}
|
||||
|
||||
async fn call(&self, req: S3Request<Body>) -> S3Result<S3Response<Body>> {
|
||||
if self.console_enabled && req.uri.path().starts_with(CONSOLE_PREFIX) {
|
||||
if let Some(console_router) = &self.console_router {
|
||||
let mut console_router = console_router.clone();
|
||||
let req = convert_request(req);
|
||||
let result = console_router.call(req).await;
|
||||
return match result {
|
||||
Ok(resp) => Ok(convert_response(resp)),
|
||||
Err(e) => Err(s3_error!(InternalError, "{}", e)),
|
||||
};
|
||||
}
|
||||
return Err(s3_error!(InternalError, "console is not enabled"));
|
||||
}
|
||||
|
||||
let uri = format!("{}|{}", &req.method, req.uri.path());
|
||||
|
||||
// warn!("get uri {}", &uri);
|
||||
@@ -99,6 +134,10 @@ where
|
||||
|
||||
// check_access before call
|
||||
async fn check_access(&self, req: &mut S3Request<Body>) -> S3Result<()> {
|
||||
if self.console_enabled && req.uri.path().starts_with(CONSOLE_PREFIX) {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
// Check RPC signature verification
|
||||
if req.uri.path().starts_with(RPC_PREFIX) {
|
||||
// Skip signature verification for HEAD requests (health checks)
|
||||
@@ -134,3 +173,34 @@ impl Operation for AdminOperation {
|
||||
self.0.call(req, params).await
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Extra {
|
||||
pub credentials: Option<s3s::auth::Credentials>,
|
||||
pub region: Option<String>,
|
||||
pub service: Option<String>,
|
||||
}
|
||||
|
||||
fn convert_request(req: S3Request<Body>) -> http::Request<Body> {
|
||||
let (mut parts, _) = http::Request::new(Body::empty()).into_parts();
|
||||
parts.method = req.method;
|
||||
parts.uri = req.uri;
|
||||
parts.headers = req.headers;
|
||||
parts.extensions = req.extensions;
|
||||
parts.extensions.insert(Extra {
|
||||
credentials: req.credentials,
|
||||
region: req.region,
|
||||
service: req.service,
|
||||
});
|
||||
http::Request::from_parts(parts, req.input)
|
||||
}
|
||||
|
||||
fn convert_response(resp: http::Response<axum::body::Body>) -> S3Response<Body> {
|
||||
let (parts, body) = resp.into_parts();
|
||||
let mut s3_resp = S3Response::new(Body::http_body_unsync(body));
|
||||
s3_resp.status = Some(parts.status);
|
||||
s3_resp.headers = parts.headers;
|
||||
s3_resp.extensions = parts.extensions;
|
||||
s3_resp
|
||||
}
|
||||
|
||||
@@ -68,15 +68,7 @@ pub struct Opt {
|
||||
#[arg(long, default_value_t = true, env = "RUSTFS_CONSOLE_ENABLE")]
|
||||
pub console_enable: bool,
|
||||
|
||||
/// Console server bind address
|
||||
#[arg(long, default_value_t = rustfs_config::DEFAULT_CONSOLE_ADDRESS.to_string(), env = "RUSTFS_CONSOLE_ADDRESS")]
|
||||
pub console_address: String,
|
||||
|
||||
/// rustfs endpoint for console
|
||||
#[arg(long, env = "RUSTFS_CONSOLE_FS_ENDPOINT")]
|
||||
pub console_fs_endpoint: Option<String>,
|
||||
|
||||
/// Observability configuration file
|
||||
/// Observability endpoint for trace, metrics and logs,only support grpc mode.
|
||||
#[arg(long, default_value_t = rustfs_config::DEFAULT_OBS_ENDPOINT.to_string(), env = "RUSTFS_OBS_ENDPOINT")]
|
||||
pub obs_endpoint: String,
|
||||
|
||||
|
||||
@@ -1,391 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::license::get_license;
|
||||
use axum::{
|
||||
Router,
|
||||
body::Body,
|
||||
http::{Response, StatusCode},
|
||||
response::IntoResponse,
|
||||
routing::get,
|
||||
};
|
||||
use axum_extra::extract::Host;
|
||||
use rustfs_config::{RUSTFS_TLS_CERT, RUSTFS_TLS_KEY};
|
||||
use rustfs_utils::net::parse_and_resolve_address;
|
||||
use std::io;
|
||||
|
||||
use axum::response::Redirect;
|
||||
use axum_server::tls_rustls::RustlsConfig;
|
||||
use http::{HeaderMap, HeaderName, Uri, header};
|
||||
use mime_guess::from_path;
|
||||
use rust_embed::RustEmbed;
|
||||
use serde::Serialize;
|
||||
use shadow_rs::shadow;
|
||||
use std::net::{IpAddr, SocketAddr};
|
||||
use std::sync::OnceLock;
|
||||
use std::time::Duration;
|
||||
use tokio::signal;
|
||||
use tower_http::cors::{Any, CorsLayer};
|
||||
use tower_http::trace::TraceLayer;
|
||||
use tracing::{debug, error, info, instrument};
|
||||
|
||||
shadow!(build);
|
||||
|
||||
const RUSTFS_ADMIN_PREFIX: &str = "/rustfs/admin/v3";
|
||||
|
||||
#[derive(RustEmbed)]
|
||||
#[folder = "$CARGO_MANIFEST_DIR/static"]
|
||||
struct StaticFiles;
|
||||
|
||||
/// Static file handler
|
||||
async fn static_handler(uri: Uri) -> impl IntoResponse {
|
||||
let mut path = uri.path().trim_start_matches('/');
|
||||
if path.is_empty() {
|
||||
path = "index.html"
|
||||
}
|
||||
if let Some(file) = StaticFiles::get(path) {
|
||||
let mime_type = from_path(path).first_or_octet_stream();
|
||||
Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header("Content-Type", mime_type.to_string())
|
||||
.body(Body::from(file.data))
|
||||
.unwrap()
|
||||
} else if let Some(file) = StaticFiles::get("index.html") {
|
||||
let mime_type = from_path("index.html").first_or_octet_stream();
|
||||
Response::builder()
|
||||
.status(StatusCode::OK)
|
||||
.header("Content-Type", mime_type.to_string())
|
||||
.body(Body::from(file.data))
|
||||
.unwrap()
|
||||
} else {
|
||||
Response::builder()
|
||||
.status(StatusCode::NOT_FOUND)
|
||||
.body(Body::from("404 Not Found"))
|
||||
.unwrap()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Clone)]
|
||||
pub(crate) struct Config {
|
||||
#[serde(skip)]
|
||||
port: u16,
|
||||
api: Api,
|
||||
s3: S3,
|
||||
release: Release,
|
||||
license: License,
|
||||
doc: String,
|
||||
}
|
||||
|
||||
impl Config {
|
||||
fn new(local_ip: IpAddr, port: u16, version: &str, date: &str) -> Self {
|
||||
Config {
|
||||
port,
|
||||
api: Api {
|
||||
base_url: format!("http://{local_ip}:{port}/{RUSTFS_ADMIN_PREFIX}"),
|
||||
},
|
||||
s3: S3 {
|
||||
endpoint: format!("http://{local_ip}:{port}"),
|
||||
region: "cn-east-1".to_owned(),
|
||||
},
|
||||
release: Release {
|
||||
version: version.to_string(),
|
||||
date: date.to_string(),
|
||||
},
|
||||
license: License {
|
||||
name: "Apache-2.0".to_string(),
|
||||
url: "https://www.apache.org/licenses/LICENSE-2.0".to_string(),
|
||||
},
|
||||
doc: "https://rustfs.com/docs/".to_string(),
|
||||
}
|
||||
}
|
||||
|
||||
fn to_json(&self) -> String {
|
||||
serde_json::to_string(self).unwrap_or_default()
|
||||
}
|
||||
|
||||
pub(crate) fn version_info(&self) -> String {
|
||||
format!(
|
||||
"RELEASE.{}@{} (rust {} {})",
|
||||
self.release.date.clone(),
|
||||
self.release.version.clone().trim_start_matches('@'),
|
||||
build::RUST_VERSION,
|
||||
build::BUILD_TARGET
|
||||
)
|
||||
}
|
||||
|
||||
pub(crate) fn version(&self) -> String {
|
||||
self.release.version.clone()
|
||||
}
|
||||
|
||||
pub(crate) fn license(&self) -> String {
|
||||
format!("{} {}", self.license.name.clone(), self.license.url.clone())
|
||||
}
|
||||
|
||||
pub(crate) fn doc(&self) -> String {
|
||||
self.doc.clone()
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Clone)]
|
||||
struct Api {
|
||||
#[serde(rename = "baseURL")]
|
||||
base_url: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Clone)]
|
||||
struct S3 {
|
||||
endpoint: String,
|
||||
region: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Clone)]
|
||||
struct Release {
|
||||
version: String,
|
||||
date: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Clone)]
|
||||
struct License {
|
||||
name: String,
|
||||
url: String,
|
||||
}
|
||||
|
||||
pub(crate) static CONSOLE_CONFIG: OnceLock<Config> = OnceLock::new();
|
||||
|
||||
#[allow(clippy::const_is_empty)]
|
||||
pub(crate) fn init_console_cfg(local_ip: IpAddr, port: u16) {
|
||||
CONSOLE_CONFIG.get_or_init(|| {
|
||||
let ver = {
|
||||
if !build::TAG.is_empty() {
|
||||
build::TAG.to_string()
|
||||
} else if !build::SHORT_COMMIT.is_empty() {
|
||||
format!("@{}", build::SHORT_COMMIT)
|
||||
} else {
|
||||
build::PKG_VERSION.to_string()
|
||||
}
|
||||
};
|
||||
|
||||
Config::new(local_ip, port, ver.as_str(), build::COMMIT_DATE_3339)
|
||||
});
|
||||
}
|
||||
|
||||
// fn is_socket_addr_or_ip_addr(host: &str) -> bool {
|
||||
// host.parse::<SocketAddr>().is_ok() || host.parse::<IpAddr>().is_ok()
|
||||
// }
|
||||
|
||||
async fn license_handler() -> impl IntoResponse {
|
||||
let license = get_license().unwrap_or_default();
|
||||
|
||||
Response::builder()
|
||||
.header("content-type", "application/json")
|
||||
.status(StatusCode::OK)
|
||||
.body(Body::from(serde_json::to_string(&license).unwrap_or_default()))
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
fn _is_private_ip(ip: IpAddr) -> bool {
|
||||
match ip {
|
||||
IpAddr::V4(ip) => {
|
||||
let octets = ip.octets();
|
||||
// 10.0.0.0/8
|
||||
octets[0] == 10 ||
|
||||
// 172.16.0.0/12
|
||||
(octets[0] == 172 && (octets[1] >= 16 && octets[1] <= 31)) ||
|
||||
// 192.168.0.0/16
|
||||
(octets[0] == 192 && octets[1] == 168)
|
||||
}
|
||||
IpAddr::V6(_) => false,
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(clippy::const_is_empty)]
|
||||
#[instrument(fields(host))]
|
||||
async fn config_handler(uri: Uri, Host(host): Host, headers: HeaderMap) -> impl IntoResponse {
|
||||
// Get the scheme from the headers or use the URI scheme
|
||||
let scheme = headers
|
||||
.get(HeaderName::from_static("x-forwarded-proto"))
|
||||
.and_then(|value| value.to_str().ok())
|
||||
.unwrap_or_else(|| uri.scheme().map(|s| s.as_str()).unwrap_or("http"));
|
||||
|
||||
// Print logs for debugging
|
||||
info!("Scheme: {}, ", scheme);
|
||||
|
||||
// Get the host from the uri and use the value of the host extractor if it doesn't have one
|
||||
let host = uri.host().unwrap_or(host.as_str());
|
||||
|
||||
let host = if let Ok(socket_addr) = host.parse::<SocketAddr>() {
|
||||
// Successfully parsed, it's in IP:Port format.
|
||||
// For IPv6, we need to enclose it in brackets to form a valid URL.
|
||||
let ip = socket_addr.ip();
|
||||
if ip.is_ipv6() { format!("[{ip}]") } else { format!("{ip}") }
|
||||
} else {
|
||||
// Failed to parse, it might be a domain name or a bare IP, use it as is.
|
||||
host.to_string()
|
||||
};
|
||||
|
||||
// Make a copy of the current configuration
|
||||
let mut cfg = match CONSOLE_CONFIG.get() {
|
||||
Some(cfg) => cfg.clone(),
|
||||
None => {
|
||||
error!("Console configuration not initialized");
|
||||
return Response::builder()
|
||||
.status(StatusCode::INTERNAL_SERVER_ERROR)
|
||||
.body(Body::from("Console configuration not initialized"))
|
||||
.unwrap();
|
||||
}
|
||||
};
|
||||
|
||||
let url = format!("{}://{}:{}", scheme, host, cfg.port);
|
||||
cfg.api.base_url = format!("{url}{RUSTFS_ADMIN_PREFIX}");
|
||||
cfg.s3.endpoint = url;
|
||||
|
||||
Response::builder()
|
||||
.header("content-type", "application/json")
|
||||
.status(StatusCode::OK)
|
||||
.body(Body::from(cfg.to_json()))
|
||||
.unwrap()
|
||||
}
|
||||
|
||||
pub async fn start_static_file_server(
|
||||
addrs: &str,
|
||||
local_ip: IpAddr,
|
||||
access_key: &str,
|
||||
secret_key: &str,
|
||||
tls_path: Option<String>,
|
||||
) {
|
||||
// Configure CORS
|
||||
let cors = CorsLayer::new()
|
||||
.allow_origin(Any) // In the production environment, we recommend that you specify a specific domain name
|
||||
.allow_methods([http::Method::GET, http::Method::POST])
|
||||
.allow_headers([header::CONTENT_TYPE]);
|
||||
// Create a route
|
||||
let app = Router::new()
|
||||
.route("/license", get(license_handler))
|
||||
.route("/config.json", get(config_handler))
|
||||
.fallback_service(get(static_handler))
|
||||
.layer(cors)
|
||||
.layer(tower_http::compression::CompressionLayer::new().gzip(true).deflate(true))
|
||||
.layer(TraceLayer::new_for_http());
|
||||
|
||||
let server_addr = parse_and_resolve_address(addrs).expect("Failed to parse socket address");
|
||||
let server_port = server_addr.port();
|
||||
let server_address = server_addr.to_string();
|
||||
|
||||
info!(
|
||||
"WebUI: http://{}:{} http://127.0.0.1:{} http://{}",
|
||||
local_ip, server_port, server_port, server_address
|
||||
);
|
||||
info!(" RootUser: {}", access_key);
|
||||
info!(" RootPass: {}", secret_key);
|
||||
|
||||
// Check and start the HTTPS/HTTP server
|
||||
match start_server(server_addr, tls_path, app.clone()).await {
|
||||
Ok(_) => info!("Server shutdown gracefully"),
|
||||
Err(e) => error!("Server error: {}", e),
|
||||
}
|
||||
}
|
||||
async fn start_server(server_addr: SocketAddr, tls_path: Option<String>, app: Router) -> io::Result<()> {
|
||||
let tls_path = tls_path.unwrap_or_default();
|
||||
let key_path = format!("{tls_path}/{RUSTFS_TLS_KEY}");
|
||||
let cert_path = format!("{tls_path}/{RUSTFS_TLS_CERT}");
|
||||
let handle = axum_server::Handle::new();
|
||||
// create a signal off listening task
|
||||
let handle_clone = handle.clone();
|
||||
tokio::spawn(async move {
|
||||
shutdown_signal().await;
|
||||
info!("Initiating graceful shutdown...");
|
||||
handle_clone.graceful_shutdown(Some(Duration::from_secs(10)));
|
||||
});
|
||||
|
||||
let has_tls_certs = tokio::try_join!(tokio::fs::metadata(&key_path), tokio::fs::metadata(&cert_path)).is_ok();
|
||||
info!("Console TLS certs: {:?}", has_tls_certs);
|
||||
if has_tls_certs {
|
||||
info!("Found TLS certificates, starting with HTTPS");
|
||||
match RustlsConfig::from_pem_file(cert_path, key_path).await {
|
||||
Ok(config) => {
|
||||
info!("Starting HTTPS server...");
|
||||
axum_server::bind_rustls(server_addr, config)
|
||||
.handle(handle.clone())
|
||||
.serve(app.into_make_service())
|
||||
.await
|
||||
.map_err(io::Error::other)?;
|
||||
|
||||
info!("HTTPS server running on https://{}", server_addr);
|
||||
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to create TLS config: {}", e);
|
||||
start_http_server(server_addr, app, handle).await
|
||||
}
|
||||
}
|
||||
} else {
|
||||
info!("TLS certificates not found at {} and {}", key_path, cert_path);
|
||||
start_http_server(server_addr, app, handle).await
|
||||
}
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
/// 308 redirect for HTTP to HTTPS
|
||||
fn redirect_to_https(https_port: u16) -> Router {
|
||||
Router::new().route(
|
||||
"/*path",
|
||||
get({
|
||||
move |uri: Uri, req: http::Request<Body>| async move {
|
||||
let host = req
|
||||
.headers()
|
||||
.get("host")
|
||||
.map_or("localhost", |h| h.to_str().unwrap_or("localhost"));
|
||||
let path = uri.path_and_query().map(|pq| pq.as_str()).unwrap_or("");
|
||||
let https_url = format!("https://{host}:{https_port}{path}");
|
||||
Redirect::permanent(&https_url)
|
||||
}
|
||||
}),
|
||||
)
|
||||
}
|
||||
|
||||
async fn start_http_server(addr: SocketAddr, app: Router, handle: axum_server::Handle) -> io::Result<()> {
|
||||
debug!("Starting HTTP server...");
|
||||
axum_server::bind(addr)
|
||||
.handle(handle)
|
||||
.serve(app.into_make_service())
|
||||
.await
|
||||
.map_err(io::Error::other)
|
||||
}
|
||||
|
||||
async fn shutdown_signal() {
|
||||
let ctrl_c = async {
|
||||
signal::ctrl_c().await.expect("failed to install Ctrl+C handler");
|
||||
};
|
||||
|
||||
#[cfg(unix)]
|
||||
let terminate = async {
|
||||
signal::unix::signal(signal::unix::SignalKind::terminate())
|
||||
.expect("failed to install signal handler")
|
||||
.recv()
|
||||
.await;
|
||||
};
|
||||
|
||||
#[cfg(not(unix))]
|
||||
let terminate = std::future::pending::<()>();
|
||||
|
||||
tokio::select! {
|
||||
_ = ctrl_c => {
|
||||
info!("shutdown_signal ctrl_c")
|
||||
},
|
||||
_ = terminate => {
|
||||
info!("shutdown_signal terminate")
|
||||
},
|
||||
}
|
||||
}
|
||||
@@ -1,78 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use rustfs_config::DEFAULT_DELIMITER;
|
||||
use rustfs_ecstore::config::GLOBAL_ServerConfig;
|
||||
use tracing::{error, info, instrument};
|
||||
|
||||
#[instrument]
|
||||
pub(crate) async fn init_event_notifier() {
|
||||
info!("Initializing event notifier...");
|
||||
|
||||
// 1. Get the global configuration loaded by ecstore
|
||||
let server_config = match GLOBAL_ServerConfig.get() {
|
||||
Some(config) => config.clone(), // Clone the config to pass ownership
|
||||
None => {
|
||||
error!("Event notifier initialization failed: Global server config not loaded.");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
info!("Global server configuration loaded successfully. config: {:?}", server_config);
|
||||
// 2. Check if the notify subsystem exists in the configuration, and skip initialization if it doesn't
|
||||
if server_config
|
||||
.get_value(rustfs_config::notify::NOTIFY_MQTT_SUB_SYS, DEFAULT_DELIMITER)
|
||||
.is_none()
|
||||
|| server_config
|
||||
.get_value(rustfs_config::notify::NOTIFY_WEBHOOK_SUB_SYS, DEFAULT_DELIMITER)
|
||||
.is_none()
|
||||
{
|
||||
info!("'notify' subsystem not configured, skipping event notifier initialization.");
|
||||
return;
|
||||
}
|
||||
|
||||
info!("Event notifier configuration found, proceeding with initialization.");
|
||||
|
||||
// 3. Initialize the notification system asynchronously with a global configuration
|
||||
// Put it into a separate task to avoid blocking the main initialization process
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = rustfs_notify::initialize(server_config).await {
|
||||
error!("Failed to initialize event notifier system: {}", e);
|
||||
} else {
|
||||
info!("Event notifier system initialized successfully.");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/// Shuts down the event notifier system gracefully
|
||||
pub async fn shutdown_event_notifier() {
|
||||
info!("Shutting down event notifier system...");
|
||||
|
||||
if !rustfs_notify::is_notification_system_initialized() {
|
||||
info!("Event notifier system is not initialized, nothing to shut down.");
|
||||
return;
|
||||
}
|
||||
|
||||
let system = match rustfs_notify::notification_system() {
|
||||
Some(sys) => sys,
|
||||
None => {
|
||||
error!("Event notifier system is not initialized.");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
// Call the shutdown function from the rustfs_notify module
|
||||
system.shutdown().await;
|
||||
info!("Event notifier system shut down successfully.");
|
||||
}
|
||||
@@ -20,9 +20,7 @@ use std::time::UNIX_EPOCH;
|
||||
use tracing::error;
|
||||
use tracing::info;
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
static ref LICENSE: OnceLock<Token> = OnceLock::new();
|
||||
}
|
||||
static LICENSE: OnceLock<Token> = OnceLock::new();
|
||||
|
||||
/// Initialize the license
|
||||
pub fn init_license(license: Option<String>) {
|
||||
|
||||
@@ -15,98 +15,59 @@
|
||||
mod admin;
|
||||
mod auth;
|
||||
mod config;
|
||||
mod console;
|
||||
mod error;
|
||||
mod event;
|
||||
// mod grpc;
|
||||
pub mod license;
|
||||
mod logging;
|
||||
mod server;
|
||||
mod service;
|
||||
mod storage;
|
||||
mod update_checker;
|
||||
mod update;
|
||||
mod version;
|
||||
|
||||
use crate::auth::IAMAuth;
|
||||
use crate::console::{CONSOLE_CONFIG, init_console_cfg};
|
||||
// Ensure the correct path for parse_license is imported
|
||||
use crate::event::shutdown_event_notifier;
|
||||
use crate::server::{SHUTDOWN_TIMEOUT, ServiceState, ServiceStateManager, ShutdownSignal, wait_for_shutdown};
|
||||
use bytes::Bytes;
|
||||
use crate::server::{SHUTDOWN_TIMEOUT, ServiceState, ServiceStateManager, ShutdownSignal, start_http_server, wait_for_shutdown};
|
||||
use chrono::Datelike;
|
||||
use clap::Parser;
|
||||
use http::{HeaderMap, Request as HttpRequest, Response};
|
||||
use hyper_util::server::graceful::GracefulShutdown;
|
||||
use hyper_util::{
|
||||
rt::{TokioExecutor, TokioIo},
|
||||
server::conn::auto::Builder as ConnBuilder,
|
||||
service::TowerToHyperService,
|
||||
};
|
||||
use license::init_license;
|
||||
use rustfs_ahm::scanner::data_scanner::ScannerConfig;
|
||||
use rustfs_ahm::{Scanner, create_ahm_services_cancel_token, shutdown_ahm_services};
|
||||
use rustfs_common::globals::set_global_addr;
|
||||
use rustfs_config::{DEFAULT_ACCESS_KEY, DEFAULT_SECRET_KEY, RUSTFS_TLS_CERT, RUSTFS_TLS_KEY};
|
||||
use rustfs_config::DEFAULT_DELIMITER;
|
||||
use rustfs_ecstore::bucket::metadata_sys::init_bucket_metadata_sys;
|
||||
use rustfs_ecstore::cmd::bucket_replication::init_bucket_replication_pool;
|
||||
use rustfs_ecstore::config as ecconfig;
|
||||
use rustfs_ecstore::config::GLOBAL_ConfigSys;
|
||||
use rustfs_ecstore::heal::background_heal_ops::init_auto_heal;
|
||||
use rustfs_ecstore::rpc::make_server;
|
||||
use rustfs_ecstore::config::GLOBAL_ServerConfig;
|
||||
use rustfs_ecstore::store_api::BucketOptions;
|
||||
use rustfs_ecstore::{
|
||||
StorageAPI, endpoints::EndpointServerPools, global::set_global_rustfs_port, heal::data_scanner::init_data_scanner,
|
||||
notification_sys::new_global_notification_sys, set_global_endpoints, store::ECStore, store::init_local_disks,
|
||||
StorageAPI,
|
||||
endpoints::EndpointServerPools,
|
||||
global::{set_global_rustfs_port, shutdown_background_services},
|
||||
notification_sys::new_global_notification_sys,
|
||||
set_global_endpoints,
|
||||
store::ECStore,
|
||||
store::init_local_disks,
|
||||
update_erasure_type,
|
||||
};
|
||||
use rustfs_iam::init_iam_sys;
|
||||
use rustfs_obs::{SystemObserver, init_obs, set_global_guard};
|
||||
use rustfs_protos::proto_gen::node_service::node_service_server::NodeServiceServer;
|
||||
use rustfs_obs::{init_obs, set_global_guard};
|
||||
use rustfs_utils::net::parse_and_resolve_address;
|
||||
use rustls::ServerConfig;
|
||||
use s3s::service::S3Service;
|
||||
use s3s::{host::MultiDomain, service::S3ServiceBuilder};
|
||||
use service::hybrid;
|
||||
use socket2::SockRef;
|
||||
use std::io::{Error, Result};
|
||||
use std::net::SocketAddr;
|
||||
use std::sync::Arc;
|
||||
use std::time::Duration;
|
||||
use tokio::net::{TcpListener, TcpStream};
|
||||
#[cfg(unix)]
|
||||
use tokio::signal::unix::{SignalKind, signal};
|
||||
use tokio_rustls::TlsAcceptor;
|
||||
use tonic::{Request, Status, metadata::MetadataValue};
|
||||
use tower::ServiceBuilder;
|
||||
use tower_http::catch_panic::CatchPanicLayer;
|
||||
use tower_http::cors::CorsLayer;
|
||||
use tower_http::trace::TraceLayer;
|
||||
use tracing::{Span, debug, error, info, instrument, warn};
|
||||
|
||||
const MI_B: usize = 1024 * 1024;
|
||||
use tracing::{debug, error, info, instrument, warn};
|
||||
|
||||
#[cfg(all(target_os = "linux", target_env = "gnu"))]
|
||||
#[global_allocator]
|
||||
static GLOBAL: tikv_jemallocator::Jemalloc = tikv_jemallocator::Jemalloc;
|
||||
|
||||
#[allow(clippy::result_large_err)]
|
||||
fn check_auth(req: Request<()>) -> std::result::Result<Request<()>, Status> {
|
||||
let token: MetadataValue<_> = "rustfs rpc".parse().unwrap();
|
||||
|
||||
match req.metadata().get("authorization") {
|
||||
Some(t) if token == t => Ok(req),
|
||||
_ => Err(Status::unauthenticated("No valid auth token")),
|
||||
}
|
||||
}
|
||||
|
||||
#[instrument]
|
||||
fn print_server_info() {
|
||||
let cfg = CONSOLE_CONFIG.get().unwrap();
|
||||
let current_year = chrono::Utc::now().year();
|
||||
|
||||
// Use custom macros to print server information
|
||||
info!("RustFS Object Storage Server");
|
||||
info!("Copyright: 2024-{} RustFS, Inc", current_year);
|
||||
info!("License: {}", cfg.license());
|
||||
info!("Version: {}", cfg.version_info());
|
||||
info!("Docs: {}", cfg.doc());
|
||||
info!("License: Apache-2.0 https://www.apache.org/licenses/LICENSE-2.0");
|
||||
info!("Version: {}", version::get_version());
|
||||
info!("Docs: https://rustfs.com/docs/");
|
||||
}
|
||||
|
||||
#[tokio::main]
|
||||
@@ -127,55 +88,12 @@ async fn main() -> Result<()> {
|
||||
run(opt).await
|
||||
}
|
||||
|
||||
/// Sets up the TLS acceptor if certificates are available.
|
||||
#[instrument(skip(tls_path))]
|
||||
async fn setup_tls_acceptor(tls_path: &str) -> Result<Option<TlsAcceptor>> {
|
||||
if tls_path.is_empty() || tokio::fs::metadata(tls_path).await.is_err() {
|
||||
debug!("TLS path is not provided or does not exist, starting with HTTP");
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
debug!("Found TLS directory, checking for certificates");
|
||||
|
||||
// 1. Try to load all certificates from the directory (multi-cert support)
|
||||
if let Ok(cert_key_pairs) = rustfs_utils::load_all_certs_from_directory(tls_path) {
|
||||
if !cert_key_pairs.is_empty() {
|
||||
debug!("Found {} certificates, creating multi-cert resolver", cert_key_pairs.len());
|
||||
let _ = rustls::crypto::aws_lc_rs::default_provider().install_default();
|
||||
let mut server_config = ServerConfig::builder()
|
||||
.with_no_client_auth()
|
||||
.with_cert_resolver(Arc::new(rustfs_utils::create_multi_cert_resolver(cert_key_pairs)?));
|
||||
server_config.alpn_protocols = vec![b"h2".to_vec(), b"http/1.1".to_vec(), b"http/1.0".to_vec()];
|
||||
return Ok(Some(TlsAcceptor::from(Arc::new(server_config))));
|
||||
}
|
||||
}
|
||||
|
||||
// 2. Fallback to legacy single certificate mode
|
||||
let key_path = format!("{tls_path}/{RUSTFS_TLS_KEY}");
|
||||
let cert_path = format!("{tls_path}/{RUSTFS_TLS_CERT}");
|
||||
if tokio::try_join!(tokio::fs::metadata(&key_path), tokio::fs::metadata(&cert_path)).is_ok() {
|
||||
debug!("Found legacy single TLS certificate, starting with HTTPS");
|
||||
let _ = rustls::crypto::aws_lc_rs::default_provider().install_default();
|
||||
let certs = rustfs_utils::load_certs(&cert_path).map_err(|e| rustfs_utils::certs_error(e.to_string()))?;
|
||||
let key = rustfs_utils::load_private_key(&key_path).map_err(|e| rustfs_utils::certs_error(e.to_string()))?;
|
||||
let mut server_config = ServerConfig::builder()
|
||||
.with_no_client_auth()
|
||||
.with_single_cert(certs, key)
|
||||
.map_err(|e| rustfs_utils::certs_error(e.to_string()))?;
|
||||
server_config.alpn_protocols = vec![b"h2".to_vec(), b"http/1.1".to_vec(), b"http/1.0".to_vec()];
|
||||
return Ok(Some(TlsAcceptor::from(Arc::new(server_config))));
|
||||
}
|
||||
|
||||
debug!("No valid TLS certificates found in the directory, starting with HTTP");
|
||||
Ok(None)
|
||||
}
|
||||
|
||||
#[instrument(skip(opt))]
|
||||
async fn run(opt: config::Opt) -> Result<()> {
|
||||
debug!("opt: {:?}", &opt);
|
||||
|
||||
if let Some(region) = opt.region {
|
||||
rustfs_ecstore::global::set_global_region(region);
|
||||
if let Some(region) = &opt.region {
|
||||
rustfs_ecstore::global::set_global_region(region.clone());
|
||||
}
|
||||
|
||||
let server_addr = parse_and_resolve_address(opt.address.as_str()).map_err(Error::other)?;
|
||||
@@ -189,21 +107,7 @@ async fn run(opt: config::Opt) -> Result<()> {
|
||||
|
||||
set_global_rustfs_port(server_port);
|
||||
|
||||
// The listening address and port are obtained from the parameters
|
||||
let listener = TcpListener::bind(server_address.clone()).await?;
|
||||
// Obtain the listener address
|
||||
let local_addr: SocketAddr = listener.local_addr()?;
|
||||
debug!("Listening on: {}", local_addr);
|
||||
let local_ip = match rustfs_utils::get_local_ip() {
|
||||
Some(ip) => {
|
||||
debug!("Obtained local IP address: {}", ip);
|
||||
ip
|
||||
}
|
||||
None => {
|
||||
warn!("Unable to obtain local IP address, using fallback IP: {}", local_addr.ip());
|
||||
local_addr.ip()
|
||||
}
|
||||
};
|
||||
set_global_addr(&opt.address).await;
|
||||
|
||||
// For RPC
|
||||
let (endpoint_pools, setup_type) =
|
||||
@@ -222,19 +126,6 @@ async fn run(opt: config::Opt) -> Result<()> {
|
||||
}
|
||||
}
|
||||
|
||||
// Detailed endpoint information (showing all API endpoints)
|
||||
let api_endpoints = format!("http://{local_ip}:{server_port}");
|
||||
let localhost_endpoint = format!("http://127.0.0.1:{server_port}");
|
||||
info!(" API: {} {}", api_endpoints, localhost_endpoint);
|
||||
info!(" RootUser: {}", opt.access_key.clone());
|
||||
info!(" RootPass: {}", opt.secret_key.clone());
|
||||
if DEFAULT_ACCESS_KEY.eq(&opt.access_key) && DEFAULT_SECRET_KEY.eq(&opt.secret_key) {
|
||||
warn!(
|
||||
"Detected default credentials '{}:{}', we recommend that you change these values with 'RUSTFS_ACCESS_KEY' and 'RUSTFS_SECRET_KEY' environment variables",
|
||||
DEFAULT_ACCESS_KEY, DEFAULT_SECRET_KEY
|
||||
);
|
||||
}
|
||||
|
||||
for (i, eps) in endpoint_pools.as_ref().iter().enumerate() {
|
||||
info!(
|
||||
"created endpoints {}, set_count:{}, drives_per_set: {}, cmd: {:?}",
|
||||
@@ -246,7 +137,11 @@ async fn run(opt: config::Opt) -> Result<()> {
|
||||
}
|
||||
}
|
||||
|
||||
set_global_addr(&opt.address).await;
|
||||
let state_manager = ServiceStateManager::new();
|
||||
// Update service status to Starting
|
||||
state_manager.update(ServiceState::Starting);
|
||||
|
||||
let shutdown_tx = start_http_server(&opt, state_manager.clone()).await?;
|
||||
|
||||
set_global_endpoints(endpoint_pools.as_ref().clone());
|
||||
update_erasure_type(setup_type).await;
|
||||
@@ -254,163 +149,6 @@ async fn run(opt: config::Opt) -> Result<()> {
|
||||
// Initialize the local disk
|
||||
init_local_disks(endpoint_pools.clone()).await.map_err(Error::other)?;
|
||||
|
||||
// Setup S3 service
|
||||
// This project uses the S3S library to implement S3 services
|
||||
let s3_service = {
|
||||
let store = storage::ecfs::FS::new();
|
||||
let mut b = S3ServiceBuilder::new(store.clone());
|
||||
|
||||
let access_key = opt.access_key.clone();
|
||||
let secret_key = opt.secret_key.clone();
|
||||
debug!("authentication is enabled {}, {}", &access_key, &secret_key);
|
||||
|
||||
b.set_auth(IAMAuth::new(access_key, secret_key));
|
||||
b.set_access(store.clone());
|
||||
b.set_route(admin::make_admin_route()?);
|
||||
|
||||
if !opt.server_domains.is_empty() {
|
||||
info!("virtual-hosted-style requests are enabled use domain_name {:?}", &opt.server_domains);
|
||||
b.set_host(MultiDomain::new(&opt.server_domains).map_err(Error::other)?);
|
||||
}
|
||||
|
||||
b.build()
|
||||
};
|
||||
|
||||
tokio::spawn(async move {
|
||||
// Record the PID-related metrics of the current process
|
||||
let meter = opentelemetry::global::meter("system");
|
||||
let obs_result = SystemObserver::init_process_observer(meter).await;
|
||||
match obs_result {
|
||||
Ok(_) => {
|
||||
info!("Process observer initialized successfully");
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to initialize process observer: {}", e);
|
||||
}
|
||||
}
|
||||
});
|
||||
|
||||
let tls_acceptor = setup_tls_acceptor(opt.tls_path.as_deref().unwrap_or_default()).await?;
|
||||
|
||||
let state_manager = ServiceStateManager::new();
|
||||
let worker_state_manager = state_manager.clone();
|
||||
// Update service status to Starting
|
||||
state_manager.update(ServiceState::Starting);
|
||||
|
||||
// Create shutdown channel
|
||||
let (shutdown_tx, mut shutdown_rx) = tokio::sync::broadcast::channel(1);
|
||||
let shutdown_tx_clone = shutdown_tx.clone();
|
||||
|
||||
tokio::spawn(async move {
|
||||
#[cfg(unix)]
|
||||
let (mut sigterm_inner, mut sigint_inner) = {
|
||||
// Unix platform specific code
|
||||
let sigterm_inner = signal(SignalKind::terminate()).expect("Failed to create SIGTERM signal handler");
|
||||
let sigint_inner = signal(SignalKind::interrupt()).expect("Failed to create SIGINT signal handler");
|
||||
(sigterm_inner, sigint_inner)
|
||||
};
|
||||
|
||||
let http_server = Arc::new(ConnBuilder::new(TokioExecutor::new()));
|
||||
let mut ctrl_c = std::pin::pin!(tokio::signal::ctrl_c());
|
||||
let graceful = Arc::new(GracefulShutdown::new());
|
||||
debug!("graceful initiated");
|
||||
|
||||
// service ready
|
||||
worker_state_manager.update(ServiceState::Ready);
|
||||
let tls_acceptor = tls_acceptor.map(Arc::new);
|
||||
|
||||
loop {
|
||||
debug!("Waiting for new connection...");
|
||||
let (socket, _) = {
|
||||
#[cfg(unix)]
|
||||
{
|
||||
tokio::select! {
|
||||
res = listener.accept() => match res {
|
||||
Ok(conn) => conn,
|
||||
Err(err) => {
|
||||
error!("error accepting connection: {err}");
|
||||
continue;
|
||||
}
|
||||
},
|
||||
_ = ctrl_c.as_mut() => {
|
||||
info!("Ctrl-C received in worker thread");
|
||||
let _ = shutdown_tx_clone.send(());
|
||||
break;
|
||||
},
|
||||
Some(_) = sigint_inner.recv() => {
|
||||
info!("SIGINT received in worker thread");
|
||||
let _ = shutdown_tx_clone.send(());
|
||||
break;
|
||||
},
|
||||
Some(_) = sigterm_inner.recv() => {
|
||||
info!("SIGTERM received in worker thread");
|
||||
let _ = shutdown_tx_clone.send(());
|
||||
break;
|
||||
},
|
||||
_ = shutdown_rx.recv() => {
|
||||
info!("Shutdown signal received in worker thread");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
#[cfg(not(unix))]
|
||||
{
|
||||
tokio::select! {
|
||||
res = listener.accept() => match res {
|
||||
Ok(conn) => conn,
|
||||
Err(err) => {
|
||||
error!("error accepting connection: {err}");
|
||||
continue;
|
||||
}
|
||||
},
|
||||
_ = ctrl_c.as_mut() => {
|
||||
info!("Ctrl-C received in worker thread");
|
||||
let _ = shutdown_tx_clone.send(());
|
||||
break;
|
||||
},
|
||||
_ = shutdown_rx.recv() => {
|
||||
info!("Shutdown signal received in worker thread");
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
};
|
||||
|
||||
let socket_ref = SockRef::from(&socket);
|
||||
if let Err(err) = socket_ref.set_nodelay(true) {
|
||||
warn!(?err, "Failed to set TCP_NODELAY");
|
||||
}
|
||||
if let Err(err) = socket_ref.set_recv_buffer_size(4 * MI_B) {
|
||||
warn!(?err, "Failed to set set_recv_buffer_size");
|
||||
}
|
||||
if let Err(err) = socket_ref.set_send_buffer_size(4 * MI_B) {
|
||||
warn!(?err, "Failed to set set_send_buffer_size");
|
||||
}
|
||||
|
||||
process_connection(socket, tls_acceptor.clone(), http_server.clone(), s3_service.clone(), graceful.clone());
|
||||
}
|
||||
|
||||
worker_state_manager.update(ServiceState::Stopping);
|
||||
match Arc::try_unwrap(graceful) {
|
||||
Ok(g) => {
|
||||
tokio::select! {
|
||||
() = g.shutdown() => {
|
||||
debug!("Gracefully shutdown!");
|
||||
},
|
||||
() = tokio::time::sleep(Duration::from_secs(10)) => {
|
||||
debug!("Waited 10 seconds for graceful shutdown, aborting...");
|
||||
}
|
||||
}
|
||||
}
|
||||
Err(arc_graceful) => {
|
||||
error!("Cannot perform graceful shutdown, other references exist err: {:?}", arc_graceful);
|
||||
tokio::time::sleep(Duration::from_secs(10)).await;
|
||||
debug!("Timeout reached, forcing shutdown");
|
||||
}
|
||||
}
|
||||
worker_state_manager.update(ServiceState::Stopped);
|
||||
});
|
||||
|
||||
// init store
|
||||
let store = ECStore::new(server_addr, endpoint_pools.clone()).await.inspect_err(|err| {
|
||||
error!("ECStore::new {:?}", err);
|
||||
@@ -421,7 +159,7 @@ async fn run(opt: config::Opt) -> Result<()> {
|
||||
GLOBAL_ConfigSys.init(store.clone()).await?;
|
||||
|
||||
// Initialize event notifier
|
||||
event::init_event_notifier().await;
|
||||
init_event_notifier().await;
|
||||
|
||||
let buckets_list = store
|
||||
.list_bucket(&BucketOptions {
|
||||
@@ -442,19 +180,19 @@ async fn run(opt: config::Opt) -> Result<()> {
|
||||
Error::other(err)
|
||||
})?;
|
||||
|
||||
// init scanner
|
||||
let scanner_cancel_token = init_data_scanner().await;
|
||||
// init auto heal
|
||||
init_auto_heal().await;
|
||||
// init console configuration
|
||||
init_console_cfg(local_ip, server_port);
|
||||
|
||||
// init scanner and auto heal with unified cancellation token
|
||||
// let _background_services_cancel_token = create_background_services_cancel_token();
|
||||
// init_data_scanner().await;
|
||||
// init_auto_heal().await;
|
||||
let _ = create_ahm_services_cancel_token();
|
||||
let scanner = Scanner::new(Some(ScannerConfig::default()));
|
||||
scanner.start().await?;
|
||||
print_server_info();
|
||||
init_bucket_replication_pool().await;
|
||||
|
||||
// Async update check (optional)
|
||||
tokio::spawn(async {
|
||||
use crate::update_checker::{UpdateCheckError, check_updates};
|
||||
use crate::update::{UpdateCheckError, check_updates};
|
||||
|
||||
match check_updates().await {
|
||||
Ok(result) => {
|
||||
@@ -484,34 +222,17 @@ async fn run(opt: config::Opt) -> Result<()> {
|
||||
}
|
||||
});
|
||||
|
||||
if opt.console_enable {
|
||||
debug!("console is enabled");
|
||||
let access_key = opt.access_key.clone();
|
||||
let secret_key = opt.secret_key.clone();
|
||||
let console_address = opt.console_address.clone();
|
||||
let tls_path = opt.tls_path.clone();
|
||||
|
||||
if console_address.is_empty() {
|
||||
error!("console_address is empty");
|
||||
return Err(Error::other("console_address is empty".to_string()));
|
||||
}
|
||||
|
||||
tokio::spawn(async move {
|
||||
console::start_static_file_server(&console_address, local_ip, &access_key, &secret_key, tls_path).await;
|
||||
});
|
||||
}
|
||||
|
||||
// Perform hibernation for 1 second
|
||||
tokio::time::sleep(SHUTDOWN_TIMEOUT).await;
|
||||
// listen to the shutdown signal
|
||||
match wait_for_shutdown().await {
|
||||
#[cfg(unix)]
|
||||
ShutdownSignal::CtrlC | ShutdownSignal::Sigint | ShutdownSignal::Sigterm => {
|
||||
handle_shutdown(&state_manager, &shutdown_tx, &scanner_cancel_token).await;
|
||||
handle_shutdown(&state_manager, &shutdown_tx).await;
|
||||
}
|
||||
#[cfg(not(unix))]
|
||||
ShutdownSignal::CtrlC => {
|
||||
handle_shutdown(&state_manager, &shutdown_tx, &scanner_cancel_token).await;
|
||||
handle_shutdown(&state_manager, &shutdown_tx).await;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -519,116 +240,19 @@ async fn run(opt: config::Opt) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Process a single incoming TCP connection.
|
||||
///
|
||||
/// This function is executed in a new Tokio task and it will:
|
||||
/// 1. If TLS is configured, perform TLS handshake.
|
||||
/// 2. Build a complete service stack for this connection, including S3, RPC services, and all middleware.
|
||||
/// 3. Use Hyper to handle HTTP requests on this connection.
|
||||
/// 4. Incorporate connections into the management of elegant closures.
|
||||
#[instrument(skip_all, fields(peer_addr = %socket.peer_addr().map(|a| a.to_string()).unwrap_or_else(|_| "unknown".to_string())))]
|
||||
fn process_connection(
|
||||
socket: TcpStream,
|
||||
tls_acceptor: Option<Arc<TlsAcceptor>>,
|
||||
http_server: Arc<ConnBuilder<TokioExecutor>>,
|
||||
s3_service: S3Service,
|
||||
graceful: Arc<GracefulShutdown>,
|
||||
) {
|
||||
tokio::spawn(async move {
|
||||
// Build services inside each connected task to avoid passing complex service types across tasks,
|
||||
// It also ensures that each connection has an independent service instance.
|
||||
let rpc_service = NodeServiceServer::with_interceptor(make_server(), check_auth);
|
||||
let hybrid_service = ServiceBuilder::new()
|
||||
.layer(CatchPanicLayer::new())
|
||||
.layer(
|
||||
TraceLayer::new_for_http()
|
||||
.make_span_with(|request: &HttpRequest<_>| {
|
||||
let span = tracing::info_span!("http-request",
|
||||
status_code = tracing::field::Empty,
|
||||
method = %request.method(),
|
||||
uri = %request.uri(),
|
||||
version = ?request.version(),
|
||||
);
|
||||
for (header_name, header_value) in request.headers() {
|
||||
if header_name == "user-agent" || header_name == "content-type" || header_name == "content-length" {
|
||||
span.record(header_name.as_str(), header_value.to_str().unwrap_or("invalid"));
|
||||
}
|
||||
}
|
||||
|
||||
span
|
||||
})
|
||||
.on_request(|request: &HttpRequest<_>, _span: &Span| {
|
||||
info!(
|
||||
counter.rustfs_api_requests_total = 1_u64,
|
||||
key_request_method = %request.method().to_string(),
|
||||
key_request_uri_path = %request.uri().path().to_owned(),
|
||||
"handle request api total",
|
||||
);
|
||||
debug!("http started method: {}, url path: {}", request.method(), request.uri().path())
|
||||
})
|
||||
.on_response(|response: &Response<_>, latency: Duration, _span: &Span| {
|
||||
_span.record("http response status_code", tracing::field::display(response.status()));
|
||||
debug!("http response generated in {:?}", latency)
|
||||
})
|
||||
.on_body_chunk(|chunk: &Bytes, latency: Duration, _span: &Span| {
|
||||
info!(histogram.request.body.len = chunk.len(), "histogram request body length",);
|
||||
debug!("http body sending {} bytes in {:?}", chunk.len(), latency)
|
||||
})
|
||||
.on_eos(|_trailers: Option<&HeaderMap>, stream_duration: Duration, _span: &Span| {
|
||||
debug!("http stream closed after {:?}", stream_duration)
|
||||
})
|
||||
.on_failure(|_error, latency: Duration, _span: &Span| {
|
||||
info!(counter.rustfs_api_requests_failure_total = 1_u64, "handle request api failure total");
|
||||
debug!("http request failure error: {:?} in {:?}", _error, latency)
|
||||
}),
|
||||
)
|
||||
.layer(CorsLayer::permissive())
|
||||
.service(hybrid(s3_service, rpc_service));
|
||||
let hybrid_service = TowerToHyperService::new(hybrid_service);
|
||||
|
||||
// Decide whether to handle HTTPS or HTTP connections based on the existence of TLS Acceptor
|
||||
if let Some(acceptor) = tls_acceptor {
|
||||
debug!("TLS handshake start");
|
||||
match acceptor.accept(socket).await {
|
||||
Ok(tls_socket) => {
|
||||
debug!("TLS handshake successful");
|
||||
let stream = TokioIo::new(tls_socket);
|
||||
let conn = http_server.serve_connection(stream, hybrid_service);
|
||||
if let Err(err) = graceful.watch(conn).await {
|
||||
handle_connection_error(&*err);
|
||||
}
|
||||
}
|
||||
Err(err) => {
|
||||
error!(?err, "TLS handshake failed");
|
||||
return; // Failed to end the task directly
|
||||
}
|
||||
}
|
||||
debug!("TLS handshake success");
|
||||
} else {
|
||||
debug!("Http handshake start");
|
||||
let stream = TokioIo::new(socket);
|
||||
let conn = http_server.serve_connection(stream, hybrid_service);
|
||||
if let Err(err) = graceful.watch(conn).await {
|
||||
handle_connection_error(&*err);
|
||||
}
|
||||
debug!("Http handshake success");
|
||||
};
|
||||
});
|
||||
}
|
||||
|
||||
/// Handles the shutdown process of the server
|
||||
async fn handle_shutdown(
|
||||
state_manager: &ServiceStateManager,
|
||||
shutdown_tx: &tokio::sync::broadcast::Sender<()>,
|
||||
scanner_cancel_token: &tokio_util::sync::CancellationToken,
|
||||
) {
|
||||
async fn handle_shutdown(state_manager: &ServiceStateManager, shutdown_tx: &tokio::sync::broadcast::Sender<()>) {
|
||||
info!("Shutdown signal received in main thread");
|
||||
// update the status to stopping first
|
||||
state_manager.update(ServiceState::Stopping);
|
||||
|
||||
// Stop data scanner gracefully
|
||||
info!("Stopping data scanner...");
|
||||
scanner_cancel_token.cancel();
|
||||
// Stop background services (data scanner and auto heal) gracefully
|
||||
info!("Stopping background services (data scanner and auto heal)...");
|
||||
shutdown_background_services();
|
||||
|
||||
// Stop AHM services gracefully
|
||||
info!("Stopping AHM services...");
|
||||
shutdown_ahm_services();
|
||||
|
||||
// Stop the notification system
|
||||
shutdown_event_notifier().await;
|
||||
@@ -644,25 +268,63 @@ async fn handle_shutdown(
|
||||
info!("Server stopped current ");
|
||||
}
|
||||
|
||||
/// Handles connection errors by logging them with appropriate severity
|
||||
fn handle_connection_error(err: &(dyn std::error::Error + 'static)) {
|
||||
if let Some(hyper_err) = err.downcast_ref::<hyper::Error>() {
|
||||
if hyper_err.is_incomplete_message() {
|
||||
warn!("The HTTP connection is closed prematurely and the message is not completed:{}", hyper_err);
|
||||
} else if hyper_err.is_closed() {
|
||||
warn!("The HTTP connection is closed:{}", hyper_err);
|
||||
} else if hyper_err.is_parse() {
|
||||
error!("HTTP message parsing failed:{}", hyper_err);
|
||||
} else if hyper_err.is_user() {
|
||||
error!("HTTP user-custom error:{}", hyper_err);
|
||||
} else if hyper_err.is_canceled() {
|
||||
warn!("The HTTP connection is canceled:{}", hyper_err);
|
||||
} else {
|
||||
error!("Unknown hyper error:{:?}", hyper_err);
|
||||
#[instrument]
|
||||
pub(crate) async fn init_event_notifier() {
|
||||
info!("Initializing event notifier...");
|
||||
|
||||
// 1. Get the global configuration loaded by ecstore
|
||||
let server_config = match GLOBAL_ServerConfig.get() {
|
||||
Some(config) => config.clone(), // Clone the config to pass ownership
|
||||
None => {
|
||||
error!("Event notifier initialization failed: Global server config not loaded.");
|
||||
return;
|
||||
}
|
||||
} else if let Some(io_err) = err.downcast_ref::<Error>() {
|
||||
error!("Unknown connection IO error:{}", io_err);
|
||||
} else {
|
||||
error!("Unknown connection error type:{:?}", err);
|
||||
};
|
||||
|
||||
info!("Global server configuration loaded successfully. config: {:?}", server_config);
|
||||
// 2. Check if the notify subsystem exists in the configuration, and skip initialization if it doesn't
|
||||
if server_config
|
||||
.get_value(rustfs_config::notify::NOTIFY_MQTT_SUB_SYS, DEFAULT_DELIMITER)
|
||||
.is_none()
|
||||
|| server_config
|
||||
.get_value(rustfs_config::notify::NOTIFY_WEBHOOK_SUB_SYS, DEFAULT_DELIMITER)
|
||||
.is_none()
|
||||
{
|
||||
info!("'notify' subsystem not configured, skipping event notifier initialization.");
|
||||
return;
|
||||
}
|
||||
|
||||
info!("Event notifier configuration found, proceeding with initialization.");
|
||||
|
||||
// 3. Initialize the notification system asynchronously with a global configuration
|
||||
// Put it into a separate task to avoid blocking the main initialization process
|
||||
tokio::spawn(async move {
|
||||
if let Err(e) = rustfs_notify::initialize(server_config).await {
|
||||
error!("Failed to initialize event notifier system: {}", e);
|
||||
} else {
|
||||
info!("Event notifier system initialized successfully.");
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
/// Shuts down the event notifier system gracefully
|
||||
pub async fn shutdown_event_notifier() {
|
||||
info!("Shutting down event notifier system...");
|
||||
|
||||
if !rustfs_notify::is_notification_system_initialized() {
|
||||
info!("Event notifier system is not initialized, nothing to shut down.");
|
||||
return;
|
||||
}
|
||||
|
||||
let system = match rustfs_notify::notification_system() {
|
||||
Some(sys) => sys,
|
||||
None => {
|
||||
error!("Event notifier system is not initialized.");
|
||||
return;
|
||||
}
|
||||
};
|
||||
|
||||
// Call the shutdown function from the rustfs_notify module
|
||||
system.shutdown().await;
|
||||
info!("Event notifier system shut down successfully.");
|
||||
}
|
||||
|
||||