mirror of
https://github.com/rustfs/rustfs.git
synced 2026-01-17 09:40:32 +00:00
Compare commits
15 Commits
1.0.0-alph
...
1.0.0-alph
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
6267872ddb | ||
|
|
618779a89d | ||
|
|
b3ec2325ed | ||
|
|
49a5643e76 | ||
|
|
657395af8a | ||
|
|
4de62ed77e | ||
|
|
505f493729 | ||
|
|
be05b704b0 | ||
|
|
b33c2fa3cf | ||
|
|
98674c60d4 | ||
|
|
e39eb86967 | ||
|
|
646070ae7a | ||
|
|
2525b66658 | ||
|
|
58c5a633e2 | ||
|
|
aefd894fc2 |
@@ -517,7 +517,7 @@ let results = join_all(futures).await;
|
||||
|
||||
### 3. Caching Strategy
|
||||
|
||||
- Use `lazy_static` or `OnceCell` for global caching
|
||||
- Use `LazyLock` for global caching
|
||||
- Implement LRU cache to avoid memory leaks
|
||||
|
||||
## Testing Guidelines
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
FROM ubuntu:22.04
|
||||
|
||||
ENV LANG C.UTF-8
|
||||
|
||||
RUN sed -i s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g /etc/apt/sources.list
|
||||
|
||||
RUN apt-get clean && apt-get update && apt-get install wget git curl unzip gcc pkg-config libssl-dev lld libdbus-1-dev libwayland-dev libwebkit2gtk-4.1-dev libxdo-dev -y
|
||||
|
||||
# install protoc
|
||||
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip \
|
||||
&& unzip protoc-31.1-linux-x86_64.zip -d protoc3 \
|
||||
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc \
|
||||
&& mv protoc3/include/* /usr/local/include/ && rm -rf protoc-31.1-linux-x86_64.zip protoc3
|
||||
|
||||
# install flatc
|
||||
RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip \
|
||||
&& unzip Linux.flatc.binary.g++-13.zip \
|
||||
&& mv flatc /usr/local/bin/ && chmod +x /usr/local/bin/flatc && rm -rf Linux.flatc.binary.g++-13.zip
|
||||
|
||||
# install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
COPY .docker/cargo.config.toml /root/.cargo/config.toml
|
||||
|
||||
WORKDIR /root/s3-rustfs
|
||||
|
||||
CMD [ "bash", "-c", "while true; do sleep 1; done" ]
|
||||
@@ -1,32 +0,0 @@
|
||||
FROM rockylinux:9.3 AS builder
|
||||
|
||||
ENV LANG C.UTF-8
|
||||
|
||||
RUN sed -e 's|^mirrorlist=|#mirrorlist=|g' \
|
||||
-e 's|^#baseurl=http://dl.rockylinux.org/$contentdir|baseurl=https://mirrors.ustc.edu.cn/rocky|g' \
|
||||
-i.bak \
|
||||
/etc/yum.repos.d/rocky-extras.repo \
|
||||
/etc/yum.repos.d/rocky.repo
|
||||
|
||||
RUN dnf makecache
|
||||
|
||||
RUN yum install wget git unzip gcc openssl-devel pkgconf-pkg-config -y
|
||||
|
||||
# install protoc
|
||||
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip \
|
||||
&& unzip protoc-31.1-linux-x86_64.zip -d protoc3 \
|
||||
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc \
|
||||
&& mv protoc3/include/* /usr/local/include/ && rm -rf protoc-31.1-linux-x86_64.zip protoc3
|
||||
|
||||
# install flatc
|
||||
RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip \
|
||||
&& unzip Linux.flatc.binary.g++-13.zip \
|
||||
&& mv flatc /usr/local/bin/ && chmod +x /usr/local/bin/flatc \
|
||||
&& rm -rf Linux.flatc.binary.g++-13.zip
|
||||
|
||||
# install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
COPY .docker/cargo.config.toml /root/.cargo/config.toml
|
||||
|
||||
WORKDIR /root/s3-rustfs
|
||||
@@ -1,25 +0,0 @@
|
||||
FROM ubuntu:22.04
|
||||
|
||||
ENV LANG C.UTF-8
|
||||
|
||||
RUN sed -i s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g /etc/apt/sources.list
|
||||
|
||||
RUN apt-get clean && apt-get update && apt-get install wget git curl unzip gcc pkg-config libssl-dev lld libdbus-1-dev libwayland-dev libwebkit2gtk-4.1-dev libxdo-dev -y
|
||||
|
||||
# install protoc
|
||||
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip \
|
||||
&& unzip protoc-31.1-linux-x86_64.zip -d protoc3 \
|
||||
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc \
|
||||
&& mv protoc3/include/* /usr/local/include/ && rm -rf protoc-31.1-linux-x86_64.zip protoc3
|
||||
|
||||
# install flatc
|
||||
RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip \
|
||||
&& unzip Linux.flatc.binary.g++-13.zip \
|
||||
&& mv flatc /usr/local/bin/ && chmod +x /usr/local/bin/flatc && rm -rf Linux.flatc.binary.g++-13.zip
|
||||
|
||||
# install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
COPY .docker/cargo.config.toml /root/.cargo/config.toml
|
||||
|
||||
WORKDIR /root/s3-rustfs
|
||||
157
.docker/README.md
Normal file
157
.docker/README.md
Normal file
@@ -0,0 +1,157 @@
|
||||
# RustFS Docker Images
|
||||
|
||||
This directory contains organized Dockerfile configurations for building RustFS container images across multiple platforms and system versions.
|
||||
|
||||
## 📁 Directory Structure
|
||||
|
||||
```
|
||||
.docker/
|
||||
├── alpine/ # Alpine Linux variants
|
||||
│ ├── Dockerfile.prebuild # Alpine + pre-built binaries
|
||||
│ └── Dockerfile.source # Alpine + source compilation
|
||||
├── ubuntu/ # Ubuntu variants
|
||||
│ ├── Dockerfile.prebuild # Ubuntu + pre-built binaries
|
||||
│ ├── Dockerfile.source # Ubuntu + source compilation
|
||||
│ └── Dockerfile.dev # Ubuntu + development environment
|
||||
└── cargo.config.toml # Rust cargo configuration
|
||||
```
|
||||
|
||||
## 🎯 Image Variants
|
||||
|
||||
### Production Images
|
||||
|
||||
| Variant | Base OS | Build Method | Size | Use Case |
|
||||
|---------|---------|--------------|------|----------|
|
||||
| `production` (default) | Alpine 3.18 | Pre-built | Smallest | Production deployment |
|
||||
| `alpine` | Alpine 3.18 | Pre-built | Small | Explicit Alpine choice |
|
||||
| `alpine-source` | Alpine 3.18 | Source build | Small | Custom Alpine builds |
|
||||
| `ubuntu` | Ubuntu 22.04 | Pre-built | Medium | Ubuntu environments |
|
||||
| `ubuntu-source` | Ubuntu 22.04 | Source build | Medium | Full Ubuntu compatibility |
|
||||
|
||||
### Development Images
|
||||
|
||||
| Variant | Base OS | Features | Use Case |
|
||||
|---------|---------|----------|----------|
|
||||
| `ubuntu-dev` | Ubuntu 22.04 | Full toolchain + dev tools | Interactive development |
|
||||
|
||||
## 🚀 Usage Examples
|
||||
|
||||
### Quick Start (Production)
|
||||
|
||||
```bash
|
||||
# Default production image (Alpine + pre-built)
|
||||
docker run -p 9000:9000 rustfs/rustfs:latest
|
||||
|
||||
# Specific version with production variant
|
||||
docker run -p 9000:9000 rustfs/rustfs:1.2.3-production
|
||||
|
||||
# Explicit Alpine variant
|
||||
docker run -p 9000:9000 rustfs/rustfs:latest-alpine
|
||||
|
||||
# Ubuntu-based production
|
||||
docker run -p 9000:9000 rustfs/rustfs:latest-ubuntu
|
||||
```
|
||||
|
||||
### Complete Tag Strategy Examples
|
||||
|
||||
```bash
|
||||
# Stable Releases
|
||||
docker run rustfs/rustfs:1.2.3 # Main version (production)
|
||||
docker run rustfs/rustfs:1.2.3-production # Explicit production variant
|
||||
docker run rustfs/rustfs:1.2.3-alpine # Explicit Alpine variant
|
||||
docker run rustfs/rustfs:1.2.3-alpine-source # Alpine source build
|
||||
docker run rustfs/rustfs:latest # Latest stable
|
||||
|
||||
# Prerelease Versions
|
||||
docker run rustfs/rustfs:1.3.0-alpha.2 # Specific alpha version
|
||||
docker run rustfs/rustfs:1.3.0-alpha.2-alpine # Alpha with Alpine
|
||||
docker run rustfs/rustfs:alpha # Latest alpha
|
||||
docker run rustfs/rustfs:beta # Latest beta
|
||||
docker run rustfs/rustfs:rc # Latest release candidate
|
||||
|
||||
# Development Versions
|
||||
docker run rustfs/rustfs:dev # Latest development
|
||||
docker run rustfs/rustfs:dev-13e4a0b # Specific commit
|
||||
docker run rustfs/rustfs:dev-alpine # Development Alpine
|
||||
```
|
||||
|
||||
### Development Environment
|
||||
|
||||
```bash
|
||||
# Start development container
|
||||
docker run -it -v $(pwd):/app -p 9000:9000 rustfs/rustfs:latest-ubuntu-dev
|
||||
|
||||
# Inside container:
|
||||
cd /app
|
||||
cargo build --release
|
||||
cargo run
|
||||
```
|
||||
|
||||
## 🏗️ Build Arguments
|
||||
|
||||
All images support dynamic version selection:
|
||||
|
||||
```bash
|
||||
# Build with specific version
|
||||
docker build \
|
||||
--build-arg VERSION="1.0.0" \
|
||||
--build-arg BUILD_TYPE="release" \
|
||||
-f .docker/alpine/Dockerfile.prebuild \
|
||||
-t rustfs:1.0.0-alpine .
|
||||
```
|
||||
|
||||
## 🌐 Multi-Platform Support
|
||||
|
||||
All images support multiple architectures:
|
||||
|
||||
- `linux/amd64` (Intel/AMD 64-bit)
|
||||
- `linux/arm64` (ARM 64-bit, Apple Silicon, etc.)
|
||||
|
||||
## ⚡ Build Speed Optimizations
|
||||
|
||||
### Docker Build Optimizations
|
||||
|
||||
- **Multi-layer caching**: GitHub Actions cache + Registry cache
|
||||
- **Parallel matrix builds**: All 5 variants build simultaneously
|
||||
- **Multi-platform builds**: amd64/arm64 built in parallel
|
||||
- **BuildKit features**: Advanced caching and inline cache
|
||||
|
||||
### Rust Compilation Optimizations
|
||||
|
||||
- **sccache**: Distributed compilation cache for Rust builds
|
||||
- **Parallel compilation**: Uses all available CPU cores (`-j $(nproc)`)
|
||||
- **Optimized cargo config**: Sparse registry protocol, fast linker (lld)
|
||||
- **Dependency caching**: Separate Docker layers for dependencies vs. source code
|
||||
- **Release optimizations**: LTO, strip symbols, optimized codegen
|
||||
|
||||
### Cache Strategy
|
||||
|
||||
```yaml
|
||||
# GitHub Actions cache
|
||||
cache-from: type=gha,scope=docker-{variant}
|
||||
cache-to: type=gha,mode=max,scope=docker-{variant}
|
||||
|
||||
# Registry cache (persistent across runs)
|
||||
cache-from: type=registry,ref=ghcr.io/rustfs/rustfs:buildcache-{variant}
|
||||
cache-to: type=registry,ref=ghcr.io/rustfs/rustfs:buildcache-{variant}
|
||||
```
|
||||
|
||||
### Build Performance Comparison
|
||||
|
||||
| Build Type | Time (Est.) | Cache Hit | Cache Miss |
|
||||
|------------|-------------|-----------|-----------|
|
||||
| Production (Alpine pre-built) | ~2-3 min | ~1 min | ~2 min |
|
||||
| Alpine pre-built | ~2-3 min | ~1 min | ~2 min |
|
||||
| Alpine source | ~8-12 min | ~3-5 min | ~10 min |
|
||||
| Ubuntu pre-built | ~3-4 min | ~1-2 min | ~3 min |
|
||||
| Ubuntu source | ~10-15 min | ~4-6 min | ~12 min |
|
||||
|
||||
## 📋 Build Matrix
|
||||
|
||||
| Trigger | Version Format | Download Path | Image Tags |
|
||||
|---------|---------------|---------------|------------|
|
||||
| `push main` | `dev-{sha}` | `artifacts/rustfs/dev/` | `dev-{sha}-{variant}`, `dev-{variant}`, `dev` |
|
||||
| `push 1.2.3` | `1.2.3` | `artifacts/rustfs/release/` | `1.2.3-{variant}`, `1.2.3`, `latest-{variant}`, `latest` |
|
||||
| `push 1.3.0-alpha.2` | `1.3.0-alpha.2` | `artifacts/rustfs/release/` | `1.3.0-alpha.2-{variant}`, `alpha-{variant}`, `alpha` |
|
||||
| `push 1.3.0-beta.1` | `1.3.0-beta.1` | `artifacts/rustfs/release/` | `1.3.0-beta.1-{variant}`, `beta-{variant}`, `beta` |
|
||||
| `push 1.3.0-rc.1` | `1.3.0-rc.1` | `artifacts/rustfs/release/` | `1.3.0-rc.1-{variant}`, `rc-{variant}`, `rc` |
|
||||
117
.docker/alpine/Dockerfile.prebuild
Normal file
117
.docker/alpine/Dockerfile.prebuild
Normal file
@@ -0,0 +1,117 @@
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Multi-stage Alpine build for minimal runtime image
|
||||
FROM rust:1.88-alpine AS builder
|
||||
|
||||
# Build arguments for dynamic artifact download
|
||||
ARG VERSION=""
|
||||
ARG BUILD_TYPE="release"
|
||||
ARG TARGETARCH
|
||||
|
||||
# Install build dependencies
|
||||
RUN apk add --no-cache \
|
||||
musl-dev \
|
||||
pkgconfig \
|
||||
openssl-dev \
|
||||
openssl-libs-static \
|
||||
curl \
|
||||
unzip \
|
||||
bash \
|
||||
wget \
|
||||
ca-certificates
|
||||
|
||||
# Install protoc
|
||||
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip \
|
||||
&& unzip protoc-31.1-linux-x86_64.zip -d protoc3 \
|
||||
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc \
|
||||
&& mv protoc3/include/* /usr/local/include/ && rm -rf protoc-31.1-linux-x86_64.zip protoc3
|
||||
|
||||
# Install flatc
|
||||
RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip \
|
||||
&& unzip Linux.flatc.binary.g++-13.zip \
|
||||
&& mv flatc /usr/local/bin/ && chmod +x /usr/local/bin/flatc \
|
||||
&& rm -rf Linux.flatc.binary.g++-13.zip
|
||||
|
||||
# Option A: Download pre-built binary (faster)
|
||||
RUN if [ -n "$VERSION" ]; then \
|
||||
# Map TARGETARCH to our naming convention
|
||||
case "${TARGETARCH}" in \
|
||||
amd64) ARCH="x86_64" ;; \
|
||||
arm64) ARCH="aarch64" ;; \
|
||||
*) echo "Unsupported architecture: ${TARGETARCH}" && exit 1 ;; \
|
||||
esac; \
|
||||
\
|
||||
# Determine download path and filename
|
||||
if [ "${BUILD_TYPE}" = "development" ]; then \
|
||||
DOWNLOAD_PATH="artifacts/rustfs/dev"; \
|
||||
FILENAME="rustfs-linux-${ARCH}-dev-${VERSION}.zip"; \
|
||||
else \
|
||||
DOWNLOAD_PATH="artifacts/rustfs/release"; \
|
||||
FILENAME="rustfs-linux-${ARCH}-v${VERSION}.zip"; \
|
||||
fi; \
|
||||
\
|
||||
# Download the binary
|
||||
DOWNLOAD_URL="https://dl.rustfs.com/${DOWNLOAD_PATH}/${FILENAME}"; \
|
||||
echo "Downloading RustFS binary from: ${DOWNLOAD_URL}"; \
|
||||
curl -Lo /tmp/rustfs.zip "${DOWNLOAD_URL}"; \
|
||||
unzip -o /tmp/rustfs.zip -d /tmp; \
|
||||
mv /tmp/rustfs /usr/local/bin/rustfs; \
|
||||
chmod +x /usr/local/bin/rustfs; \
|
||||
rm -rf /tmp/*; \
|
||||
else \
|
||||
echo "No VERSION provided, will build from source"; \
|
||||
echo "Source build not yet implemented in Alpine variant"; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
# Final Alpine runtime image
|
||||
FROM alpine:3.18
|
||||
|
||||
RUN apk add --no-cache \
|
||||
ca-certificates \
|
||||
tzdata \
|
||||
bash
|
||||
|
||||
# Create rustfs user for security
|
||||
RUN addgroup -g 1000 rustfs && \
|
||||
adduser -D -u 1000 -G rustfs rustfs
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy binary from builder
|
||||
COPY --from=builder /usr/local/bin/rustfs /app/rustfs
|
||||
RUN chmod +x /app/rustfs && chown rustfs:rustfs /app/rustfs
|
||||
|
||||
# Create data directories
|
||||
RUN mkdir -p /data && chown -R rustfs:rustfs /data /app
|
||||
|
||||
# Switch to non-root user
|
||||
USER rustfs
|
||||
|
||||
# Environment variables
|
||||
ENV RUSTFS_ACCESS_KEY=rustfsadmin \
|
||||
RUSTFS_SECRET_KEY=rustfsadmin \
|
||||
RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_CONSOLE_ENABLE=true \
|
||||
RUSTFS_VOLUMES=/data \
|
||||
RUST_LOG=warn
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD wget --no-verbose --tries=1 --spider http://localhost:9000/health || exit 1
|
||||
|
||||
CMD ["/app/rustfs"]
|
||||
126
.docker/alpine/Dockerfile.source
Normal file
126
.docker/alpine/Dockerfile.source
Normal file
@@ -0,0 +1,126 @@
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Multi-stage Alpine build from source
|
||||
FROM rust:1.88-alpine AS builder
|
||||
|
||||
# Install build dependencies
|
||||
RUN apk add --no-cache \
|
||||
musl-dev \
|
||||
pkgconfig \
|
||||
openssl-dev \
|
||||
openssl-libs-static \
|
||||
curl \
|
||||
unzip \
|
||||
bash \
|
||||
wget \
|
||||
ca-certificates \
|
||||
git
|
||||
|
||||
# Install sccache for Rust compilation caching
|
||||
RUN wget https://github.com/mozilla/sccache/releases/download/v0.8.1/sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz \
|
||||
&& tar -xzf sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz \
|
||||
&& mv sccache-v0.8.1-x86_64-unknown-linux-musl/sccache /usr/local/bin/ \
|
||||
&& chmod +x /usr/local/bin/sccache \
|
||||
&& rm -rf sccache-v0.8.1-x86_64-unknown-linux-musl.tar.gz sccache-v0.8.1-x86_64-unknown-linux-musl
|
||||
|
||||
# Set up sccache environment
|
||||
ENV RUSTC_WRAPPER=sccache \
|
||||
SCCACHE_DIR=/tmp/sccache \
|
||||
SCCACHE_CACHE_SIZE=2G
|
||||
|
||||
# Install protoc
|
||||
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip \
|
||||
&& unzip protoc-31.1-linux-x86_64.zip -d protoc3 \
|
||||
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc \
|
||||
&& mv protoc3/include/* /usr/local/include/ && rm -rf protoc-31.1-linux-x86_64.zip protoc3
|
||||
|
||||
# Install flatc
|
||||
RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip \
|
||||
&& unzip Linux.flatc.binary.g++-13.zip \
|
||||
&& mv flatc /usr/local/bin/ && chmod +x /usr/local/bin/flatc \
|
||||
&& rm -rf Linux.flatc.binary.g++-13.zip
|
||||
|
||||
WORKDIR /usr/src/rustfs
|
||||
|
||||
# Copy cargo configuration for optimized builds
|
||||
COPY .docker/cargo.config.toml ./.cargo/config.toml
|
||||
|
||||
# Copy cargo files for dependency caching
|
||||
COPY Cargo.toml Cargo.lock ./
|
||||
COPY */Cargo.toml ./*/
|
||||
|
||||
# Create dummy main.rs files for dependency compilation
|
||||
RUN find . -name "Cargo.toml" -not -path "./Cargo.toml" | \
|
||||
xargs -I {} dirname {} | \
|
||||
xargs -I {} sh -c 'mkdir -p {}/src && echo "fn main() {}" > {}/src/main.rs'
|
||||
|
||||
# Configure cargo for optimized builds
|
||||
ENV CARGO_NET_GIT_FETCH_WITH_CLI=true \
|
||||
CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse \
|
||||
CARGO_INCREMENTAL=0 \
|
||||
CARGO_PROFILE_RELEASE_DEBUG=false \
|
||||
CARGO_PROFILE_RELEASE_SPLIT_DEBUGINFO=off \
|
||||
CARGO_PROFILE_RELEASE_STRIP=symbols
|
||||
|
||||
# Build dependencies only (cache layer) with optimizations
|
||||
RUN cargo build --release --target x86_64-unknown-linux-musl -j $(nproc)
|
||||
|
||||
# Copy source code
|
||||
COPY . .
|
||||
|
||||
# Build the actual application with optimizations
|
||||
RUN sccache --start-server 2>/dev/null || true && \
|
||||
cargo build --release --target x86_64-unknown-linux-musl --bin rustfs -j $(nproc) && \
|
||||
sccache --show-stats || true
|
||||
|
||||
# Final Alpine runtime image
|
||||
FROM alpine:3.18
|
||||
|
||||
RUN apk add --no-cache \
|
||||
ca-certificates \
|
||||
tzdata \
|
||||
bash
|
||||
|
||||
# Create rustfs user for security
|
||||
RUN addgroup -g 1000 rustfs && \
|
||||
adduser -D -u 1000 -G rustfs rustfs
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy binary from builder
|
||||
COPY --from=builder /usr/src/rustfs/target/x86_64-unknown-linux-musl/release/rustfs /app/rustfs
|
||||
RUN chmod +x /app/rustfs && chown rustfs:rustfs /app/rustfs
|
||||
|
||||
# Create data directories
|
||||
RUN mkdir -p /data && chown -R rustfs:rustfs /data /app
|
||||
|
||||
# Switch to non-root user
|
||||
USER rustfs
|
||||
|
||||
# Environment variables
|
||||
ENV RUSTFS_ACCESS_KEY=rustfsadmin \
|
||||
RUSTFS_SECRET_KEY=rustfsadmin \
|
||||
RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_CONSOLE_ENABLE=true \
|
||||
RUSTFS_VOLUMES=/data \
|
||||
RUST_LOG=warn
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD wget --no-verbose --tries=1 --spider http://localhost:9000/health || exit 1
|
||||
|
||||
CMD ["/app/rustfs"]
|
||||
@@ -12,8 +12,44 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Build optimization settings
|
||||
[build]
|
||||
# Use all available CPU cores for parallel compilation
|
||||
jobs = 0 # 0 = use all cores
|
||||
|
||||
[target.x86_64-unknown-linux-musl]
|
||||
# Use lld linker for faster linking
|
||||
linker = "lld"
|
||||
|
||||
[target.x86_64-unknown-linux-gnu]
|
||||
# Use lld linker for faster linking
|
||||
linker = "lld"
|
||||
|
||||
[target.aarch64-unknown-linux-gnu]
|
||||
# Use lld linker for faster linking
|
||||
linker = "lld"
|
||||
|
||||
[profile.release]
|
||||
# Optimize for size and speed
|
||||
codegen-units = 1
|
||||
lto = true
|
||||
panic = "abort"
|
||||
strip = true
|
||||
debug = false
|
||||
|
||||
[profile.dev]
|
||||
# Faster incremental builds during development
|
||||
incremental = true
|
||||
debug = true
|
||||
|
||||
[registry]
|
||||
# Use sparse registry protocol for faster dependency resolution
|
||||
default = "sparse+https://index.crates.io/"
|
||||
|
||||
[source.crates-io]
|
||||
registry = "https://github.com/rust-lang/crates.io-index"
|
||||
# Use sparse registry protocol
|
||||
replace-with = "sparse+https://index.crates.io/"
|
||||
|
||||
[net]
|
||||
# Use git CLI for better performance with private repos
|
||||
git-fetch-with-cli = true
|
||||
|
||||
80
.docker/compose/README.md
Normal file
80
.docker/compose/README.md
Normal file
@@ -0,0 +1,80 @@
|
||||
# Docker Compose Configurations
|
||||
|
||||
This directory contains specialized Docker Compose configurations for different use cases.
|
||||
|
||||
## 📁 Configuration Files
|
||||
|
||||
This directory contains specialized Docker Compose configurations and their associated Dockerfiles, keeping related files organized together.
|
||||
|
||||
### Main Configuration (Root Directory)
|
||||
|
||||
- **`../../docker-compose.yml`** - **Default Production Setup**
|
||||
- Complete production-ready configuration
|
||||
- Includes RustFS server + full observability stack
|
||||
- Supports multiple profiles: `dev`, `observability`, `cache`, `proxy`
|
||||
- Recommended for most users
|
||||
|
||||
### Specialized Configurations
|
||||
|
||||
- **`docker-compose.cluster.yaml`** - **Distributed Testing**
|
||||
- 4-node cluster setup for testing distributed storage
|
||||
- Uses local compiled binaries
|
||||
- Simulates multi-node environment
|
||||
- Ideal for development and cluster testing
|
||||
|
||||
- **`docker-compose.observability.yaml`** - **Observability Focus**
|
||||
- Specialized setup for testing observability features
|
||||
- Includes OpenTelemetry, Jaeger, Prometheus, Loki, Grafana
|
||||
- Uses `../../Dockerfile.obs` for builds
|
||||
- Perfect for observability development
|
||||
|
||||
## 🚀 Usage Examples
|
||||
|
||||
### Production Setup
|
||||
|
||||
```bash
|
||||
# Start main service
|
||||
docker-compose up -d
|
||||
|
||||
# Start with development profile
|
||||
docker-compose --profile dev up -d
|
||||
|
||||
# Start with full observability
|
||||
docker-compose --profile observability up -d
|
||||
```
|
||||
|
||||
### Cluster Testing
|
||||
|
||||
```bash
|
||||
# Build and start 4-node cluster (run from project root)
|
||||
cd .docker/compose
|
||||
docker-compose -f docker-compose.cluster.yaml up -d
|
||||
|
||||
# Or run directly from project root
|
||||
docker-compose -f .docker/compose/docker-compose.cluster.yaml up -d
|
||||
```
|
||||
|
||||
### Observability Testing
|
||||
|
||||
```bash
|
||||
# Start observability-focused environment (run from project root)
|
||||
cd .docker/compose
|
||||
docker-compose -f docker-compose.observability.yaml up -d
|
||||
|
||||
# Or run directly from project root
|
||||
docker-compose -f .docker/compose/docker-compose.observability.yaml up -d
|
||||
```
|
||||
|
||||
## 🔧 Configuration Overview
|
||||
|
||||
| Configuration | Nodes | Storage | Observability | Use Case |
|
||||
|---------------|-------|---------|---------------|----------|
|
||||
| **Main** | 1 | Volume mounts | Full stack | Production |
|
||||
| **Cluster** | 4 | HTTP endpoints | Basic | Testing |
|
||||
| **Observability** | 4 | Local data | Advanced | Development |
|
||||
|
||||
## 📝 Notes
|
||||
|
||||
- Always ensure you have built the required binaries before starting cluster tests
|
||||
- The main configuration is sufficient for most use cases
|
||||
- Specialized configurations are for specific testing scenarios
|
||||
@@ -14,70 +14,69 @@
|
||||
|
||||
services:
|
||||
node0:
|
||||
image: rustfs:v1 # 替换为你的镜像名称和标签
|
||||
image: rustfs/rustfs:latest # Replace with your image name and label
|
||||
container_name: node0
|
||||
hostname: node0
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=http://node{0...3}:9000/data/rustfs{0...3}
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9002
|
||||
- RUSTFS_ACCESS_KEY=rustfsadmin
|
||||
- RUSTFS_SECRET_KEY=rustfsadmin
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "9000:9000" # 映射宿主机的 9001 端口到容器的 9000 端口
|
||||
- "8000:9001" # 映射宿主机的 9001 端口到容器的 9000 端口
|
||||
- "9000:9000" # Map port 9001 of the host to port 9000 of the container
|
||||
volumes:
|
||||
- ./target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs
|
||||
# - ./data/node0:/data # 将当前路径挂载到容器内的 /root/data
|
||||
- ../../target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs
|
||||
command: "/app/rustfs"
|
||||
|
||||
node1:
|
||||
image: rustfs:v1
|
||||
image: rustfs/rustfs:latest
|
||||
container_name: node1
|
||||
hostname: node1
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=http://node{0...3}:9000/data/rustfs{0...3}
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9002
|
||||
- RUSTFS_ACCESS_KEY=rustfsadmin
|
||||
- RUSTFS_SECRET_KEY=rustfsadmin
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "9001:9000" # 映射宿主机的 9002 端口到容器的 9000 端口
|
||||
- "9001:9000" # Map port 9002 of the host to port 9000 of the container
|
||||
volumes:
|
||||
- ./target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs
|
||||
# - ./data/node1:/data
|
||||
- ../../target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs
|
||||
command: "/app/rustfs"
|
||||
|
||||
node2:
|
||||
image: rustfs:v1
|
||||
image: rustfs/rustfs:latest
|
||||
container_name: node2
|
||||
hostname: node2
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=http://node{0...3}:9000/data/rustfs{0...3}
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9002
|
||||
- RUSTFS_ACCESS_KEY=rustfsadmin
|
||||
- RUSTFS_SECRET_KEY=rustfsadmin
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "9002:9000" # 映射宿主机的 9003 端口到容器的 9000 端口
|
||||
- "9002:9000" # Map port 9003 of the host to port 9000 of the container
|
||||
volumes:
|
||||
- ./target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs
|
||||
# - ./data/node2:/data
|
||||
- ../../target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs
|
||||
command: "/app/rustfs"
|
||||
|
||||
node3:
|
||||
image: rustfs:v1
|
||||
image: rustfs/rustfs:latest
|
||||
container_name: node3
|
||||
hostname: node3
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=http://node{0...3}:9000/data/rustfs{0...3}
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9002
|
||||
- RUSTFS_ACCESS_KEY=rustfsadmin
|
||||
- RUSTFS_SECRET_KEY=rustfsadmin
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "9003:9000" # 映射宿主机的 9004 端口到容器的 9000 端口
|
||||
- "9003:9000" # Map port 9004 of the host to port 9000 of the container
|
||||
volumes:
|
||||
- ./target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs
|
||||
# - ./data/node3:/data
|
||||
- ../../target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs
|
||||
command: "/app/rustfs"
|
||||
@@ -14,11 +14,11 @@
|
||||
|
||||
services:
|
||||
otel-collector:
|
||||
image: ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-contrib:0.127.0
|
||||
image: otel/opentelemetry-collector-contrib:0.129.1
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
volumes:
|
||||
- ./.docker/observability/otel-collector-config.yaml:/etc/otelcol-contrib/config.yaml
|
||||
- ../../.docker/observability/otel-collector-config.yaml:/etc/otelcol-contrib/config.yaml
|
||||
ports:
|
||||
- 1888:1888
|
||||
- 8888:8888
|
||||
@@ -30,7 +30,7 @@ services:
|
||||
networks:
|
||||
- rustfs-network
|
||||
jaeger:
|
||||
image: jaegertracing/jaeger:2.6.0
|
||||
image: jaegertracing/jaeger:2.8.0
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
ports:
|
||||
@@ -40,11 +40,11 @@ services:
|
||||
networks:
|
||||
- rustfs-network
|
||||
prometheus:
|
||||
image: prom/prometheus:v3.4.1
|
||||
image: prom/prometheus:v3.4.2
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
volumes:
|
||||
- ./.docker/observability/prometheus.yml:/etc/prometheus/prometheus.yml
|
||||
- ../../.docker/observability/prometheus.yml:/etc/prometheus/prometheus.yml
|
||||
ports:
|
||||
- "9090:9090"
|
||||
networks:
|
||||
@@ -54,16 +54,16 @@ services:
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
volumes:
|
||||
- ./.docker/observability/loki-config.yaml:/etc/loki/local-config.yaml
|
||||
- ../../.docker/observability/loki-config.yaml:/etc/loki/local-config.yaml
|
||||
ports:
|
||||
- "3100:3100"
|
||||
command: -config.file=/etc/loki/local-config.yaml
|
||||
networks:
|
||||
- rustfs-network
|
||||
grafana:
|
||||
image: grafana/grafana:12.0.1
|
||||
image: grafana/grafana:12.0.2
|
||||
ports:
|
||||
- "3000:3000" # Web UI
|
||||
- "3000:3000" # Web UI
|
||||
environment:
|
||||
- GF_SECURITY_ADMIN_PASSWORD=admin
|
||||
- TZ=Asia/Shanghai
|
||||
@@ -72,85 +72,69 @@ services:
|
||||
|
||||
node1:
|
||||
build:
|
||||
context: .
|
||||
context: ../..
|
||||
dockerfile: Dockerfile.obs
|
||||
container_name: node1
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=http://node{1...4}:9000/root/data/target/volume/test{1...4}
|
||||
- RUSTFS_ADDRESS=:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=:9002
|
||||
- RUSTFS_OBS_CONFIG=/etc/observability/config/obs-multi.toml
|
||||
- RUSTFS_OBS_ENDPOINT=http://otel-collector:4317
|
||||
- RUSTFS_OBS_LOGGER_LEVEL=debug
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "9001:9000" # 映射宿主机的 9001 端口到容器的 9000 端口
|
||||
- "9101:9002"
|
||||
volumes:
|
||||
# - ./data:/root/data # 将当前路径挂载到容器内的 /root/data
|
||||
- ./.docker/observability/config:/etc/observability/config
|
||||
- "9001:9000" # Map port 9001 of the host to port 9000 of the container
|
||||
networks:
|
||||
- rustfs-network
|
||||
|
||||
node2:
|
||||
build:
|
||||
context: .
|
||||
context: ../..
|
||||
dockerfile: Dockerfile.obs
|
||||
container_name: node2
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=http://node{1...4}:9000/root/data/target/volume/test{1...4}
|
||||
- RUSTFS_ADDRESS=:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=:9002
|
||||
- RUSTFS_OBS_CONFIG=/etc/observability/config/obs-multi.toml
|
||||
- RUSTFS_OBS_ENDPOINT=http://otel-collector:4317
|
||||
- RUSTFS_OBS_LOGGER_LEVEL=debug
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "9002:9000" # 映射宿主机的 9002 端口到容器的 9000 端口
|
||||
- "9102:9002"
|
||||
volumes:
|
||||
# - ./data:/root/data
|
||||
- ./.docker/observability/config:/etc/observability/config
|
||||
- "9002:9000" # Map port 9002 of the host to port 9000 of the container
|
||||
networks:
|
||||
- rustfs-network
|
||||
|
||||
node3:
|
||||
build:
|
||||
context: .
|
||||
context: ../..
|
||||
dockerfile: Dockerfile.obs
|
||||
container_name: node3
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=http://node{1...4}:9000/root/data/target/volume/test{1...4}
|
||||
- RUSTFS_ADDRESS=:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=:9002
|
||||
- RUSTFS_OBS_CONFIG=/etc/observability/config/obs-multi.toml
|
||||
- RUSTFS_OBS_ENDPOINT=http://otel-collector:4317
|
||||
- RUSTFS_OBS_LOGGER_LEVEL=debug
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "9003:9000" # 映射宿主机的 9003 端口到容器的 9000 端口
|
||||
- "9103:9002"
|
||||
volumes:
|
||||
# - ./data:/root/data
|
||||
- ./.docker/observability/config:/etc/observability/config
|
||||
- "9003:9000" # Map port 9003 of the host to port 9000 of the container
|
||||
networks:
|
||||
- rustfs-network
|
||||
|
||||
node4:
|
||||
build:
|
||||
context: .
|
||||
context: ../..
|
||||
dockerfile: Dockerfile.obs
|
||||
container_name: node4
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=http://node{1...4}:9000/root/data/target/volume/test{1...4}
|
||||
- RUSTFS_ADDRESS=:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=:9002
|
||||
- RUSTFS_OBS_CONFIG=/etc/observability/config/obs-multi.toml
|
||||
- RUSTFS_OBS_ENDPOINT=http://otel-collector:4317
|
||||
- RUSTFS_OBS_LOGGER_LEVEL=debug
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "9004:9000" # 映射宿主机的 9004 端口到容器的 9000 端口
|
||||
- "9104:9002"
|
||||
volumes:
|
||||
# - ./data:/root/data
|
||||
- ./.docker/observability/config:/etc/observability/config
|
||||
- "9004:9000" # Map port 9004 of the host to port 9000 of the container
|
||||
networks:
|
||||
- rustfs-network
|
||||
|
||||
@@ -14,7 +14,7 @@
|
||||
|
||||
services:
|
||||
otel-collector:
|
||||
image: ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-contrib:0.127.0
|
||||
image: otel/opentelemetry-collector-contrib:0.129.1
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
volumes:
|
||||
@@ -30,7 +30,7 @@ services:
|
||||
networks:
|
||||
- otel-network
|
||||
jaeger:
|
||||
image: jaegertracing/jaeger:2.7.0
|
||||
image: jaegertracing/jaeger:2.8.0
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
ports:
|
||||
@@ -40,7 +40,7 @@ services:
|
||||
networks:
|
||||
- otel-network
|
||||
prometheus:
|
||||
image: prom/prometheus:v3.4.1
|
||||
image: prom/prometheus:v3.4.2
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
volumes:
|
||||
|
||||
@@ -66,6 +66,12 @@ service:
|
||||
logs:
|
||||
level: "info" # Collector 日志级别
|
||||
metrics:
|
||||
address: "0.0.0.0:8888" # Collector 自身指标暴露
|
||||
level: "detailed" # 可以是 basic, normal, detailed
|
||||
readers:
|
||||
- periodic:
|
||||
exporter:
|
||||
otlp:
|
||||
protocol: http/protobuf
|
||||
endpoint: http://otel-collector:4318
|
||||
|
||||
|
||||
|
||||
96
.docker/ubuntu/Dockerfile.dev
Normal file
96
.docker/ubuntu/Dockerfile.dev
Normal file
@@ -0,0 +1,96 @@
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Ubuntu-based development environment
|
||||
# Provides full development toolchain for building RustFS from source
|
||||
FROM ubuntu:22.04
|
||||
|
||||
ENV LANG=C.UTF-8
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# Use faster mirrors for better build performance
|
||||
RUN sed -i s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g /etc/apt/sources.list
|
||||
|
||||
# Install development dependencies
|
||||
RUN apt-get clean && apt-get update && apt-get install -y \
|
||||
wget \
|
||||
git \
|
||||
curl \
|
||||
unzip \
|
||||
gcc \
|
||||
pkg-config \
|
||||
libssl-dev \
|
||||
lld \
|
||||
libdbus-1-dev \
|
||||
libwayland-dev \
|
||||
libwebkit2gtk-4.1-dev \
|
||||
libxdo-dev \
|
||||
ca-certificates \
|
||||
bash \
|
||||
vim \
|
||||
nano \
|
||||
htop \
|
||||
tree \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install protoc
|
||||
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip \
|
||||
&& unzip protoc-31.1-linux-x86_64.zip -d protoc3 \
|
||||
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc \
|
||||
&& mv protoc3/include/* /usr/local/include/ && rm -rf protoc-31.1-linux-x86_64.zip protoc3
|
||||
|
||||
# Install flatc
|
||||
RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip \
|
||||
&& unzip Linux.flatc.binary.g++-13.zip \
|
||||
&& mv flatc /usr/local/bin/ && chmod +x /usr/local/bin/flatc && rm -rf Linux.flatc.binary.g++-13.zip
|
||||
|
||||
# Install rust for development
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||
|
||||
# Install additional Rust tools for development
|
||||
RUN /root/.cargo/bin/cargo install \
|
||||
cargo-watch \
|
||||
cargo-nextest \
|
||||
cargo-audit \
|
||||
cargo-outdated
|
||||
|
||||
# Copy cargo config for Chinese users
|
||||
COPY .docker/cargo.config.toml /root/.cargo/config.toml
|
||||
|
||||
# Create development user
|
||||
RUN groupadd -g 1000 rustfs && \
|
||||
useradd -d /app -g rustfs -u 1000 -s /bin/bash rustfs
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Create data directories for testing
|
||||
RUN mkdir -p /data && chown -R rustfs:rustfs /data /app
|
||||
|
||||
# Environment variables for development
|
||||
ENV RUSTFS_ACCESS_KEY=devadmin \
|
||||
RUSTFS_SECRET_KEY=devadmin \
|
||||
RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_CONSOLE_ENABLE=true \
|
||||
RUSTFS_VOLUMES=/data \
|
||||
RUST_LOG=debug \
|
||||
RUST_BACKTRACE=1
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
# Development mode: keep container alive for interactive development
|
||||
CMD echo "RustFS Development Environment" && \
|
||||
echo "Source code should be mounted at /app" && \
|
||||
echo "Use 'cargo build' to build, 'cargo run' to run" && \
|
||||
exec bash -c "while true; do sleep 1; done"
|
||||
130
.docker/ubuntu/Dockerfile.prebuild
Normal file
130
.docker/ubuntu/Dockerfile.prebuild
Normal file
@@ -0,0 +1,130 @@
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Multi-purpose Ubuntu-based Dockerfile
|
||||
# Can be used as development environment or ubuntu runtime variant
|
||||
FROM ubuntu:22.04
|
||||
|
||||
# Build arguments for dynamic artifact download (when used as runtime)
|
||||
ARG VERSION=""
|
||||
ARG BUILD_TYPE="release"
|
||||
ARG TARGETARCH
|
||||
|
||||
ENV LANG=C.UTF-8
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# Use faster mirrors for better build performance
|
||||
RUN sed -i s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g /etc/apt/sources.list
|
||||
|
||||
# Install dependencies
|
||||
RUN apt-get clean && apt-get update && apt-get install -y \
|
||||
wget \
|
||||
git \
|
||||
curl \
|
||||
unzip \
|
||||
gcc \
|
||||
pkg-config \
|
||||
libssl-dev \
|
||||
lld \
|
||||
libdbus-1-dev \
|
||||
libwayland-dev \
|
||||
libwebkit2gtk-4.1-dev \
|
||||
libxdo-dev \
|
||||
ca-certificates \
|
||||
bash \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install protoc
|
||||
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip \
|
||||
&& unzip protoc-31.1-linux-x86_64.zip -d protoc3 \
|
||||
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc \
|
||||
&& mv protoc3/include/* /usr/local/include/ && rm -rf protoc-31.1-linux-x86_64.zip protoc3
|
||||
|
||||
# Install flatc
|
||||
RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip \
|
||||
&& unzip Linux.flatc.binary.g++-13.zip \
|
||||
&& mv flatc /usr/local/bin/ && chmod +x /usr/local/bin/flatc && rm -rf Linux.flatc.binary.g++-13.zip
|
||||
|
||||
# Install rust (for development use)
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
ENV PATH="/root/.cargo/bin:${PATH}"
|
||||
|
||||
# Copy cargo config for Chinese users
|
||||
COPY .docker/cargo.config.toml /root/.cargo/config.toml
|
||||
|
||||
# If VERSION is provided, download pre-built binary (for runtime use)
|
||||
# Otherwise, this acts as a development environment
|
||||
RUN if [ -n "$VERSION" ]; then \
|
||||
# Map TARGETARCH to our naming convention
|
||||
case "${TARGETARCH}" in \
|
||||
amd64) ARCH="x86_64" ;; \
|
||||
arm64) ARCH="aarch64" ;; \
|
||||
*) echo "Unsupported architecture: ${TARGETARCH}" && exit 1 ;; \
|
||||
esac; \
|
||||
\
|
||||
# Determine download path and filename
|
||||
if [ "${BUILD_TYPE}" = "development" ]; then \
|
||||
DOWNLOAD_PATH="artifacts/rustfs/dev"; \
|
||||
FILENAME="rustfs-linux-${ARCH}-dev-${VERSION}.zip"; \
|
||||
else \
|
||||
DOWNLOAD_PATH="artifacts/rustfs/release"; \
|
||||
FILENAME="rustfs-linux-${ARCH}-v${VERSION}.zip"; \
|
||||
fi; \
|
||||
\
|
||||
# Download the binary
|
||||
DOWNLOAD_URL="https://dl.rustfs.com/${DOWNLOAD_PATH}/${FILENAME}"; \
|
||||
echo "Downloading RustFS binary from: ${DOWNLOAD_URL}"; \
|
||||
curl -Lo /tmp/rustfs.zip "${DOWNLOAD_URL}" || { \
|
||||
echo "Failed to download, continuing as development environment"; \
|
||||
}; \
|
||||
if [ -f /tmp/rustfs.zip ]; then \
|
||||
unzip -o /tmp/rustfs.zip -d /tmp; \
|
||||
mv /tmp/rustfs /usr/local/bin/rustfs; \
|
||||
chmod +x /usr/local/bin/rustfs; \
|
||||
rm -rf /tmp/*; \
|
||||
fi; \
|
||||
fi
|
||||
|
||||
# Create rustfs user for security
|
||||
RUN groupadd -g 1000 rustfs && \
|
||||
useradd -d /app -g rustfs -u 1000 -s /bin/bash rustfs
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Create data directories
|
||||
RUN mkdir -p /data && chown -R rustfs:rustfs /data /app
|
||||
|
||||
# Environment variables
|
||||
ENV RUSTFS_ACCESS_KEY=rustfsadmin \
|
||||
RUSTFS_SECRET_KEY=rustfsadmin \
|
||||
RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_CONSOLE_ENABLE=true \
|
||||
RUSTFS_VOLUMES=/data \
|
||||
RUST_LOG=warn
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
# Health check (only if rustfs binary exists)
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD if [ -f /usr/local/bin/rustfs ]; then wget --no-verbose --tries=1 --spider http://localhost:9000/health || exit 1; else exit 0; fi
|
||||
|
||||
# Default command: if rustfs binary exists, run it; otherwise, keep container alive for development
|
||||
CMD if [ -f /usr/local/bin/rustfs ]; then \
|
||||
echo "Starting RustFS server..."; \
|
||||
exec /usr/local/bin/rustfs; \
|
||||
else \
|
||||
echo "Running in development mode..."; \
|
||||
echo "RustFS source code should be mounted at /app"; \
|
||||
exec bash -c "while true; do sleep 1; done"; \
|
||||
fi
|
||||
@@ -4,7 +4,7 @@ ARG TARGETPLATFORM
|
||||
ARG BUILDPLATFORM
|
||||
|
||||
# Build stage
|
||||
FROM --platform=$BUILDPLATFORM rust:1.85-bookworm AS builder
|
||||
FROM --platform=$BUILDPLATFORM rust:1.88-bookworm AS builder
|
||||
|
||||
# Install required build dependencies
|
||||
RUN apt-get update && apt-get install -y \
|
||||
@@ -18,6 +18,18 @@ RUN apt-get update && apt-get install -y \
|
||||
lld \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Install sccache for Rust compilation caching
|
||||
RUN wget https://github.com/mozilla/sccache/releases/download/v0.8.1/sccache-v0.8.1-x86_64-unknown-linux-gnu.tar.gz \
|
||||
&& tar -xzf sccache-v0.8.1-x86_64-unknown-linux-gnu.tar.gz \
|
||||
&& mv sccache-v0.8.1-x86_64-unknown-linux-gnu/sccache /usr/local/bin/ \
|
||||
&& chmod +x /usr/local/bin/sccache \
|
||||
&& rm -rf sccache-v0.8.1-x86_64-unknown-linux-gnu.tar.gz sccache-v0.8.1-x86_64-unknown-linux-gnu
|
||||
|
||||
# Set up sccache environment
|
||||
ENV RUSTC_WRAPPER=sccache \
|
||||
SCCACHE_DIR=/tmp/sccache \
|
||||
SCCACHE_CACHE_SIZE=2G
|
||||
|
||||
# Install cross-compilation tools for ARM64
|
||||
RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
|
||||
apt-get update && \
|
||||
@@ -50,6 +62,9 @@ ENV CXX_aarch64_unknown_linux_gnu=aarch64-linux-gnu-g++
|
||||
|
||||
WORKDIR /usr/src/rustfs
|
||||
|
||||
# Copy cargo configuration for optimized builds
|
||||
COPY .docker/cargo.config.toml ./.cargo/config.toml
|
||||
|
||||
# Copy Cargo files for dependency caching
|
||||
COPY Cargo.toml Cargo.lock ./
|
||||
COPY */Cargo.toml ./*/
|
||||
@@ -59,10 +74,19 @@ RUN find . -name "Cargo.toml" -not -path "./Cargo.toml" | \
|
||||
xargs -I {} dirname {} | \
|
||||
xargs -I {} sh -c 'mkdir -p {}/src && echo "fn main() {}" > {}/src/main.rs'
|
||||
|
||||
# Build dependencies only (cache layer)
|
||||
RUN case "$TARGETPLATFORM" in \
|
||||
"linux/amd64") cargo build --release --target x86_64-unknown-linux-gnu ;; \
|
||||
"linux/arm64") cargo build --release --target aarch64-unknown-linux-gnu ;; \
|
||||
# Configure cargo for optimized builds
|
||||
ENV CARGO_NET_GIT_FETCH_WITH_CLI=true \
|
||||
CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse \
|
||||
CARGO_INCREMENTAL=0 \
|
||||
CARGO_PROFILE_RELEASE_DEBUG=false \
|
||||
CARGO_PROFILE_RELEASE_SPLIT_DEBUGINFO=off \
|
||||
CARGO_PROFILE_RELEASE_STRIP=symbols
|
||||
|
||||
# Build dependencies only (cache layer) with optimizations
|
||||
RUN sccache --start-server 2>/dev/null || true && \
|
||||
case "$TARGETPLATFORM" in \
|
||||
"linux/amd64") cargo build --release --target x86_64-unknown-linux-gnu -j $(nproc) ;; \
|
||||
"linux/arm64") cargo build --release --target aarch64-unknown-linux-gnu -j $(nproc) ;; \
|
||||
esac
|
||||
|
||||
# Copy source code
|
||||
@@ -71,17 +95,19 @@ COPY . .
|
||||
# Generate protobuf code
|
||||
RUN cargo run --bin gproto
|
||||
|
||||
# Build the actual application
|
||||
RUN case "$TARGETPLATFORM" in \
|
||||
# Build the actual application with optimizations
|
||||
RUN sccache --start-server 2>/dev/null || true && \
|
||||
case "$TARGETPLATFORM" in \
|
||||
"linux/amd64") \
|
||||
cargo build --release --target x86_64-unknown-linux-gnu --bin rustfs && \
|
||||
cargo build --release --target x86_64-unknown-linux-gnu --bin rustfs -j $(nproc) && \
|
||||
cp target/x86_64-unknown-linux-gnu/release/rustfs /usr/local/bin/rustfs \
|
||||
;; \
|
||||
"linux/arm64") \
|
||||
cargo build --release --target aarch64-unknown-linux-gnu --bin rustfs && \
|
||||
cargo build --release --target aarch64-unknown-linux-gnu --bin rustfs -j $(nproc) && \
|
||||
cp target/aarch64-unknown-linux-gnu/release/rustfs /usr/local/bin/rustfs \
|
||||
;; \
|
||||
esac
|
||||
esac && \
|
||||
sccache --show-stats || true
|
||||
|
||||
# Runtime stage - Ubuntu minimal for better compatibility
|
||||
FROM ubuntu:22.04
|
||||
@@ -111,7 +137,15 @@ RUN chmod +x /app/rustfs && chown rustfs:rustfs /app/rustfs
|
||||
USER rustfs
|
||||
|
||||
# Expose ports
|
||||
EXPOSE 9000 9001
|
||||
EXPOSE 9000
|
||||
|
||||
# Environment variables
|
||||
ENV RUSTFS_ACCESS_KEY=rustfsadmin \
|
||||
RUSTFS_SECRET_KEY=rustfsadmin \
|
||||
RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_CONSOLE_ENABLE=true \
|
||||
RUSTFS_VOLUMES=/data \
|
||||
RUST_LOG=warn
|
||||
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
12
.github/actions/setup/action.yml
vendored
12
.github/actions/setup/action.yml
vendored
@@ -60,15 +60,7 @@ runs:
|
||||
pkg-config \
|
||||
libssl-dev
|
||||
|
||||
- name: Cache protoc binary
|
||||
id: cache-protoc
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.local/bin/protoc
|
||||
key: protoc-31.1-${{ runner.os }}-${{ runner.arch }}
|
||||
|
||||
- name: Install protoc
|
||||
if: steps.cache-protoc.outputs.cache-hit != 'true'
|
||||
uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
version: "31.1"
|
||||
@@ -104,7 +96,3 @@ runs:
|
||||
cache-on-failure: true
|
||||
shared-key: ${{ inputs.cache-shared-key }}
|
||||
save-if: ${{ inputs.cache-save-if }}
|
||||
# Cache workspace dependencies
|
||||
workspaces: |
|
||||
. -> target
|
||||
cli/rustfs-gui -> cli/rustfs-gui/target
|
||||
|
||||
312
.github/workflows/build.yml
vendored
312
.github/workflows/build.yml
vendored
@@ -16,7 +16,7 @@ name: Build and Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags: ["*"]
|
||||
tags: ["*.*.*"]
|
||||
branches: [main]
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
@@ -65,39 +65,79 @@ env:
|
||||
CARGO_INCREMENTAL: 0
|
||||
|
||||
jobs:
|
||||
# Second layer: Business logic level checks (handling build strategy)
|
||||
# Build strategy check - determine build type based on trigger
|
||||
build-check:
|
||||
name: Build Strategy Check
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
should_build: ${{ steps.check.outputs.should_build }}
|
||||
build_type: ${{ steps.check.outputs.build_type }}
|
||||
version: ${{ steps.check.outputs.version }}
|
||||
short_sha: ${{ steps.check.outputs.short_sha }}
|
||||
is_prerelease: ${{ steps.check.outputs.is_prerelease }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Determine build strategy
|
||||
id: check
|
||||
run: |
|
||||
should_build=false
|
||||
build_type="none"
|
||||
version=""
|
||||
short_sha=""
|
||||
is_prerelease=false
|
||||
|
||||
# Business logic: when we need to build
|
||||
if [[ "${{ github.event_name }}" == "schedule" ]] || \
|
||||
[[ "${{ github.event_name }}" == "workflow_dispatch" ]] || \
|
||||
[[ "${{ github.event.inputs.force_build }}" == "true" ]] || \
|
||||
[[ "${{ contains(github.event.head_commit.message, '--build') }}" == "true" ]]; then
|
||||
# Get short SHA for all builds
|
||||
short_sha=$(git rev-parse --short HEAD)
|
||||
|
||||
# Determine build type based on trigger
|
||||
if [[ "${{ startsWith(github.ref, 'refs/tags/') }}" == "true" ]]; then
|
||||
# Tag push - release or prerelease
|
||||
should_build=true
|
||||
tag_name="${GITHUB_REF#refs/tags/}"
|
||||
version="${tag_name}"
|
||||
|
||||
# Check if this is a prerelease
|
||||
if [[ "$tag_name" == *"alpha"* ]] || [[ "$tag_name" == *"beta"* ]] || [[ "$tag_name" == *"rc"* ]]; then
|
||||
build_type="prerelease"
|
||||
is_prerelease=true
|
||||
echo "🚀 Prerelease build detected: $tag_name"
|
||||
else
|
||||
build_type="release"
|
||||
echo "📦 Release build detected: $tag_name"
|
||||
fi
|
||||
elif [[ "${{ github.ref }}" == "refs/heads/main" ]]; then
|
||||
# Main branch push - development build
|
||||
should_build=true
|
||||
build_type="development"
|
||||
fi
|
||||
|
||||
# Always build for tag pushes (version releases)
|
||||
if [[ "${{ startsWith(github.ref, 'refs/tags/') }}" == "true" ]]; then
|
||||
version="dev-${short_sha}"
|
||||
echo "🛠️ Development build detected"
|
||||
elif [[ "${{ github.event_name }}" == "schedule" ]] || \
|
||||
[[ "${{ github.event_name }}" == "workflow_dispatch" ]] || \
|
||||
[[ "${{ github.event.inputs.force_build }}" == "true" ]] || \
|
||||
[[ "${{ contains(github.event.head_commit.message, '--build') }}" == "true" ]]; then
|
||||
# Scheduled or manual build
|
||||
should_build=true
|
||||
build_type="release"
|
||||
echo "🏷️ Tag detected: forcing release build"
|
||||
build_type="development"
|
||||
version="dev-${short_sha}"
|
||||
echo "⚡ Manual/scheduled build detected"
|
||||
fi
|
||||
|
||||
echo "should_build=$should_build" >> $GITHUB_OUTPUT
|
||||
echo "build_type=$build_type" >> $GITHUB_OUTPUT
|
||||
echo "Build needed: $should_build (type: $build_type)"
|
||||
echo "version=$version" >> $GITHUB_OUTPUT
|
||||
echo "short_sha=$short_sha" >> $GITHUB_OUTPUT
|
||||
echo "is_prerelease=$is_prerelease" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "📊 Build Summary:"
|
||||
echo " - Should build: $should_build"
|
||||
echo " - Build type: $build_type"
|
||||
echo " - Version: $version"
|
||||
echo " - Short SHA: $short_sha"
|
||||
echo " - Is prerelease: $is_prerelease"
|
||||
|
||||
# Build RustFS binaries
|
||||
build-rustfs:
|
||||
@@ -158,7 +198,6 @@ jobs:
|
||||
- name: Download static console assets
|
||||
run: |
|
||||
mkdir -p ./rustfs/static
|
||||
rm -rf ./rustfs/static/*
|
||||
if [[ "${{ matrix.platform }}" == "windows" ]]; then
|
||||
curl.exe -L "https://dl.rustfs.com/artifacts/console/rustfs-console-latest.zip" -o console.zip --retry 3 --retry-delay 5 --max-time 300
|
||||
if [[ $? -eq 0 ]]; then
|
||||
@@ -169,6 +208,7 @@ jobs:
|
||||
echo "// Static assets not available" > ./rustfs/static/empty.txt
|
||||
fi
|
||||
else
|
||||
chmod +w ./rustfs/static/LICENSE || true
|
||||
curl -L "https://dl.rustfs.com/artifacts/console/rustfs-console-latest.zip" \
|
||||
-o console.zip --retry 3 --retry-delay 5 --max-time 300
|
||||
if [[ $? -eq 0 ]]; then
|
||||
@@ -202,7 +242,38 @@ jobs:
|
||||
id: package
|
||||
shell: bash
|
||||
run: |
|
||||
PACKAGE_NAME="rustfs-${{ matrix.target }}"
|
||||
BUILD_TYPE="${{ needs.build-check.outputs.build_type }}"
|
||||
VERSION="${{ needs.build-check.outputs.version }}"
|
||||
SHORT_SHA="${{ needs.build-check.outputs.short_sha }}"
|
||||
|
||||
# Extract platform and arch from target
|
||||
TARGET="${{ matrix.target }}"
|
||||
PLATFORM="${{ matrix.platform }}"
|
||||
|
||||
# Map target to architecture
|
||||
case "$TARGET" in
|
||||
*x86_64*)
|
||||
ARCH="x86_64"
|
||||
;;
|
||||
*aarch64*|*arm64*)
|
||||
ARCH="aarch64"
|
||||
;;
|
||||
*armv7*)
|
||||
ARCH="armv7"
|
||||
;;
|
||||
*)
|
||||
ARCH="unknown"
|
||||
;;
|
||||
esac
|
||||
|
||||
# Generate package name based on build type
|
||||
if [[ "$BUILD_TYPE" == "development" ]]; then
|
||||
# Development build: rustfs-${platform}-${arch}-dev-${short_sha}.zip
|
||||
PACKAGE_NAME="rustfs-${PLATFORM}-${ARCH}-dev-${SHORT_SHA}"
|
||||
else
|
||||
# Release/Prerelease build: rustfs-${platform}-${arch}-v${version}.zip
|
||||
PACKAGE_NAME="rustfs-${PLATFORM}-${ARCH}-v${VERSION}"
|
||||
fi
|
||||
|
||||
# Create zip packages for all platforms
|
||||
# Ensure zip is available
|
||||
@@ -215,9 +286,15 @@ jobs:
|
||||
cd target/${{ matrix.target }}/release
|
||||
zip "../../../${PACKAGE_NAME}.zip" rustfs
|
||||
cd ../../..
|
||||
|
||||
echo "package_name=${PACKAGE_NAME}" >> $GITHUB_OUTPUT
|
||||
echo "package_file=${PACKAGE_NAME}.zip" >> $GITHUB_OUTPUT
|
||||
echo "Package created: ${PACKAGE_NAME}.zip"
|
||||
echo "build_type=${BUILD_TYPE}" >> $GITHUB_OUTPUT
|
||||
echo "version=${VERSION}" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "📦 Package created: ${PACKAGE_NAME}.zip"
|
||||
echo "🔧 Build type: ${BUILD_TYPE}"
|
||||
echo "📊 Version: ${VERSION}"
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
@@ -227,13 +304,15 @@ jobs:
|
||||
retention-days: ${{ startsWith(github.ref, 'refs/tags/') && 30 || 7 }}
|
||||
|
||||
- name: Upload to Aliyun OSS
|
||||
if: needs.build-check.outputs.build_type == 'release' && env.OSS_ACCESS_KEY_ID != ''
|
||||
if: env.OSS_ACCESS_KEY_ID != '' && (needs.build-check.outputs.build_type == 'release' || needs.build-check.outputs.build_type == 'prerelease' || needs.build-check.outputs.build_type == 'development')
|
||||
env:
|
||||
OSS_ACCESS_KEY_ID: ${{ secrets.ALICLOUDOSS_KEY_ID }}
|
||||
OSS_ACCESS_KEY_SECRET: ${{ secrets.ALICLOUDOSS_KEY_SECRET }}
|
||||
OSS_REGION: cn-beijing
|
||||
OSS_ENDPOINT: https://oss-cn-beijing.aliyuncs.com
|
||||
run: |
|
||||
BUILD_TYPE="${{ needs.build-check.outputs.build_type }}"
|
||||
|
||||
# Install ossutil (platform-specific)
|
||||
OSSUTIL_VERSION="2.1.1"
|
||||
case "${{ matrix.platform }}" in
|
||||
@@ -271,148 +350,71 @@ jobs:
|
||||
;;
|
||||
esac
|
||||
|
||||
# Upload the package file directly to OSS
|
||||
echo "Uploading ${{ steps.package.outputs.package_file }} to OSS..."
|
||||
$OSSUTIL_BIN cp "${{ steps.package.outputs.package_file }}" oss://rustfs-artifacts/artifacts/rustfs/ --force
|
||||
|
||||
# Create latest.json (only for the first Linux build to avoid duplication)
|
||||
if [[ "${{ matrix.target }}" == "x86_64-unknown-linux-musl" ]]; then
|
||||
VERSION="${GITHUB_REF#refs/tags/v}"
|
||||
echo "{\"version\":\"${VERSION}\",\"release_date\":\"$(date -u +%Y-%m-%dT%H:%M:%SZ)\"}" > latest.json
|
||||
$OSSUTIL_BIN cp latest.json oss://rustfs-version/latest.json --force
|
||||
# Determine upload path based on build type
|
||||
if [[ "$BUILD_TYPE" == "development" ]]; then
|
||||
OSS_PATH="oss://rustfs-artifacts/artifacts/rustfs/dev/"
|
||||
echo "📤 Uploading development build to OSS dev directory"
|
||||
else
|
||||
OSS_PATH="oss://rustfs-artifacts/artifacts/rustfs/release/"
|
||||
echo "📤 Uploading release build to OSS release directory"
|
||||
fi
|
||||
|
||||
# Release management
|
||||
release:
|
||||
name: GitHub Release
|
||||
# Upload the package file to OSS
|
||||
echo "Uploading ${{ steps.package.outputs.package_file }} to $OSS_PATH..."
|
||||
$OSSUTIL_BIN cp "${{ steps.package.outputs.package_file }}" "$OSS_PATH" --force
|
||||
|
||||
# For release and prerelease builds, also create a latest version
|
||||
if [[ "$BUILD_TYPE" == "release" ]] || [[ "$BUILD_TYPE" == "prerelease" ]]; then
|
||||
# Extract platform and arch from package name
|
||||
PACKAGE_NAME="${{ steps.package.outputs.package_name }}"
|
||||
|
||||
# Create latest version filename
|
||||
# Convert from rustfs-linux-x86_64-v1.0.0 to rustfs-linux-x86_64-latest
|
||||
LATEST_NAME=$(echo "$PACKAGE_NAME" | sed 's/-v[0-9].*$/-latest/')
|
||||
LATEST_FILE="${LATEST_NAME}.zip"
|
||||
|
||||
# Copy the original file to latest version
|
||||
cp "${{ steps.package.outputs.package_file }}" "$LATEST_FILE"
|
||||
|
||||
# Upload the latest version
|
||||
echo "Uploading latest version: $LATEST_FILE to $OSS_PATH..."
|
||||
$OSSUTIL_BIN cp "$LATEST_FILE" "$OSS_PATH" --force
|
||||
|
||||
echo "✅ Latest version uploaded: $LATEST_FILE"
|
||||
fi
|
||||
|
||||
echo "✅ Upload completed successfully"
|
||||
|
||||
# Build summary
|
||||
build-summary:
|
||||
name: Build Summary
|
||||
needs: [build-check, build-rustfs]
|
||||
if: always() && needs.build-check.outputs.build_type == 'release'
|
||||
if: always() && needs.build-check.outputs.should_build == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Download all artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: ./release-artifacts
|
||||
|
||||
- name: Prepare release assets
|
||||
id: release_prep
|
||||
- name: Build completion summary
|
||||
run: |
|
||||
VERSION="${GITHUB_REF#refs/tags/}"
|
||||
VERSION_CLEAN="${VERSION#v}"
|
||||
BUILD_TYPE="${{ needs.build-check.outputs.build_type }}"
|
||||
VERSION="${{ needs.build-check.outputs.version }}"
|
||||
|
||||
echo "version=${VERSION}" >> $GITHUB_OUTPUT
|
||||
echo "version_clean=${VERSION_CLEAN}" >> $GITHUB_OUTPUT
|
||||
echo "🎉 Build completed successfully!"
|
||||
echo "📦 Build type: $BUILD_TYPE"
|
||||
echo "🔢 Version: $VERSION"
|
||||
echo ""
|
||||
|
||||
# Organize artifacts
|
||||
mkdir -p ./release-files
|
||||
|
||||
# Copy all artifacts (.zip files)
|
||||
find ./release-artifacts -name "*.zip" -exec cp {} ./release-files/ \;
|
||||
|
||||
# Generate checksums for all files
|
||||
cd ./release-files
|
||||
if ls *.zip >/dev/null 2>&1; then
|
||||
sha256sum *.zip >> SHA256SUMS
|
||||
sha512sum *.zip >> SHA512SUMS
|
||||
fi
|
||||
cd ..
|
||||
|
||||
# Display what we're releasing
|
||||
echo "=== Release Files ==="
|
||||
ls -la ./release-files/
|
||||
|
||||
- name: Create GitHub Release
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
VERSION="${{ steps.release_prep.outputs.version }}"
|
||||
VERSION_CLEAN="${{ steps.release_prep.outputs.version_clean }}"
|
||||
|
||||
# Check if release already exists
|
||||
if gh release view "$VERSION" >/dev/null 2>&1; then
|
||||
echo "Release $VERSION already exists, skipping creation"
|
||||
else
|
||||
# Get release notes from tag message
|
||||
RELEASE_NOTES=$(git tag -l --format='%(contents)' "${VERSION}")
|
||||
if [[ -z "$RELEASE_NOTES" || "$RELEASE_NOTES" =~ ^[[:space:]]*$ ]]; then
|
||||
RELEASE_NOTES="Release ${VERSION_CLEAN}"
|
||||
fi
|
||||
|
||||
# Determine if this is a prerelease
|
||||
PRERELEASE_FLAG=""
|
||||
if [[ "$VERSION" == *"alpha"* ]] || [[ "$VERSION" == *"beta"* ]] || [[ "$VERSION" == *"rc"* ]]; then
|
||||
PRERELEASE_FLAG="--prerelease"
|
||||
fi
|
||||
|
||||
# Create the release only if it doesn't exist
|
||||
gh release create "$VERSION" \
|
||||
--title "RustFS $VERSION_CLEAN" \
|
||||
--notes "$RELEASE_NOTES" \
|
||||
$PRERELEASE_FLAG
|
||||
fi
|
||||
|
||||
- name: Upload release assets
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
VERSION="${{ steps.release_prep.outputs.version }}"
|
||||
|
||||
cd ./release-files
|
||||
|
||||
# Upload all binary files
|
||||
for file in *.zip; do
|
||||
if [[ -f "$file" ]]; then
|
||||
echo "Uploading $file..."
|
||||
gh release upload "$VERSION" "$file" --clobber
|
||||
fi
|
||||
done
|
||||
|
||||
# Upload checksum files
|
||||
if [[ -f "SHA256SUMS" ]]; then
|
||||
echo "Uploading SHA256SUMS..."
|
||||
gh release upload "$VERSION" "SHA256SUMS" --clobber
|
||||
fi
|
||||
|
||||
if [[ -f "SHA512SUMS" ]]; then
|
||||
echo "Uploading SHA512SUMS..."
|
||||
gh release upload "$VERSION" "SHA512SUMS" --clobber
|
||||
fi
|
||||
|
||||
- name: Update release notes
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
VERSION="${{ steps.release_prep.outputs.version }}"
|
||||
VERSION_CLEAN="${{ steps.release_prep.outputs.version_clean }}"
|
||||
|
||||
# Check if release already has custom notes (not auto-generated)
|
||||
EXISTING_NOTES=$(gh release view "$VERSION" --json body --jq '.body' 2>/dev/null || echo "")
|
||||
|
||||
# Only update if release notes are empty or auto-generated
|
||||
if [[ -z "$EXISTING_NOTES" ]] || [[ "$EXISTING_NOTES" == *"Release ${VERSION_CLEAN}"* ]]; then
|
||||
echo "Updating release notes for $VERSION"
|
||||
|
||||
# Get original release notes from tag
|
||||
ORIGINAL_NOTES=$(git tag -l --format='%(contents)' "${VERSION}")
|
||||
if [[ -z "$ORIGINAL_NOTES" || "$ORIGINAL_NOTES" =~ ^[[:space:]]*$ ]]; then
|
||||
ORIGINAL_NOTES="Release ${VERSION_CLEAN}"
|
||||
fi
|
||||
|
||||
# Use external template file and substitute variables
|
||||
sed -e "s/\${VERSION}/$VERSION/g" \
|
||||
-e "s/\${VERSION_CLEAN}/$VERSION_CLEAN/g" \
|
||||
-e "s/\${ORIGINAL_NOTES}/$(echo "$ORIGINAL_NOTES" | sed 's/[[\.*^$()+?{|]/\\&/g')/g" \
|
||||
.github/workflows/release-notes-template.md > enhanced_notes.md
|
||||
|
||||
# Update the release with enhanced notes
|
||||
gh release edit "$VERSION" --notes-file enhanced_notes.md
|
||||
else
|
||||
echo "Release $VERSION already has custom notes, skipping update to preserve manual edits"
|
||||
fi
|
||||
case "$BUILD_TYPE" in
|
||||
"development")
|
||||
echo "🛠️ Development build artifacts have been uploaded to OSS dev directory"
|
||||
echo "⚠️ This is a development build - not suitable for production use"
|
||||
;;
|
||||
"release")
|
||||
echo "🚀 Release build artifacts have been uploaded to OSS release directory"
|
||||
echo "✅ This build is ready for production use"
|
||||
echo "🏷️ GitHub Release will be created automatically by the release workflow"
|
||||
;;
|
||||
"prerelease")
|
||||
echo "🧪 Prerelease build artifacts have been uploaded to OSS release directory"
|
||||
echo "⚠️ This is a prerelease build - use with caution"
|
||||
echo "🏷️ GitHub Release will be created automatically by the release workflow"
|
||||
;;
|
||||
esac
|
||||
|
||||
2
.github/workflows/ci.yml
vendored
2
.github/workflows/ci.yml
vendored
@@ -81,7 +81,7 @@ jobs:
|
||||
cancel_others: true
|
||||
paths_ignore: '["*.md", "docs/**", "deploy/**"]'
|
||||
# Never skip release events and tag pushes
|
||||
do_not_skip: '["release", "push"]'
|
||||
do_not_skip: '["workflow_dispatch", "schedule", "merge_group", "release", "push"]'
|
||||
|
||||
test-and-lint:
|
||||
name: Test and Lint
|
||||
|
||||
264
.github/workflows/docker.yml
vendored
264
.github/workflows/docker.yml
vendored
@@ -16,7 +16,7 @@ name: Docker Images
|
||||
|
||||
on:
|
||||
push:
|
||||
tags: ["*"]
|
||||
tags: ["*.*.*"]
|
||||
branches: [main]
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
@@ -62,19 +62,37 @@ env:
|
||||
REGISTRY_GHCR: ghcr.io/${{ github.repository }}
|
||||
|
||||
jobs:
|
||||
# Check if we should build
|
||||
# Docker build strategy check
|
||||
build-check:
|
||||
name: Build Check
|
||||
name: Docker Build Check
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
should_build: ${{ steps.check.outputs.should_build }}
|
||||
should_push: ${{ steps.check.outputs.should_push }}
|
||||
build_type: ${{ steps.check.outputs.build_type }}
|
||||
version: ${{ steps.check.outputs.version }}
|
||||
short_sha: ${{ steps.check.outputs.short_sha }}
|
||||
is_prerelease: ${{ steps.check.outputs.is_prerelease }}
|
||||
create_latest: ${{ steps.check.outputs.create_latest }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Check build conditions
|
||||
id: check
|
||||
run: |
|
||||
should_build=false
|
||||
should_push=false
|
||||
build_type="none"
|
||||
version=""
|
||||
short_sha=""
|
||||
is_prerelease=false
|
||||
create_latest=false
|
||||
|
||||
# Get short SHA for all builds
|
||||
short_sha=$(git rev-parse --short HEAD)
|
||||
|
||||
# Always build on workflow_dispatch or when changes detected
|
||||
if [[ "${{ github.event_name }}" == "workflow_dispatch" ]] || \
|
||||
@@ -83,6 +101,34 @@ jobs:
|
||||
should_build=true
|
||||
fi
|
||||
|
||||
# Determine build type and version
|
||||
if [[ "${{ startsWith(github.ref, 'refs/tags/') }}" == "true" ]]; then
|
||||
# Tag push - release or prerelease
|
||||
tag_name="${GITHUB_REF#refs/tags/}"
|
||||
version="${tag_name}"
|
||||
|
||||
# Check if this is a prerelease
|
||||
if [[ "$tag_name" == *"alpha"* ]] || [[ "$tag_name" == *"beta"* ]] || [[ "$tag_name" == *"rc"* ]]; then
|
||||
build_type="prerelease"
|
||||
is_prerelease=true
|
||||
echo "🚀 Docker prerelease build detected: $tag_name"
|
||||
else
|
||||
build_type="release"
|
||||
create_latest=true
|
||||
echo "📦 Docker release build detected: $tag_name"
|
||||
fi
|
||||
elif [[ "${{ github.ref }}" == "refs/heads/main" ]]; then
|
||||
# Main branch push - development build
|
||||
build_type="development"
|
||||
version="dev-${short_sha}"
|
||||
echo "🛠️ Docker development build detected"
|
||||
else
|
||||
# Other branches - development build
|
||||
build_type="development"
|
||||
version="dev-${short_sha}"
|
||||
echo "🔧 Docker development build detected"
|
||||
fi
|
||||
|
||||
# Push only on main branch, tags, or manual trigger
|
||||
if [[ "${{ github.ref }}" == "refs/heads/main" ]] || \
|
||||
[[ "${{ startsWith(github.ref, 'refs/tags/') }}" == "true" ]] || \
|
||||
@@ -92,7 +138,20 @@ jobs:
|
||||
|
||||
echo "should_build=$should_build" >> $GITHUB_OUTPUT
|
||||
echo "should_push=$should_push" >> $GITHUB_OUTPUT
|
||||
echo "Build: $should_build, Push: $should_push"
|
||||
echo "build_type=$build_type" >> $GITHUB_OUTPUT
|
||||
echo "version=$version" >> $GITHUB_OUTPUT
|
||||
echo "short_sha=$short_sha" >> $GITHUB_OUTPUT
|
||||
echo "is_prerelease=$is_prerelease" >> $GITHUB_OUTPUT
|
||||
echo "create_latest=$create_latest" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "🐳 Docker Build Summary:"
|
||||
echo " - Should build: $should_build"
|
||||
echo " - Should push: $should_push"
|
||||
echo " - Build type: $build_type"
|
||||
echo " - Version: $version"
|
||||
echo " - Short SHA: $short_sha"
|
||||
echo " - Is prerelease: $is_prerelease"
|
||||
echo " - Create latest: $create_latest"
|
||||
|
||||
# Build multi-arch Docker images
|
||||
build-docker:
|
||||
@@ -108,11 +167,17 @@ jobs:
|
||||
- name: production
|
||||
dockerfile: Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
- name: ubuntu
|
||||
dockerfile: .docker/Dockerfile.ubuntu22.04
|
||||
platforms: linux/amd64,linux/arm64
|
||||
- name: alpine
|
||||
dockerfile: .docker/Dockerfile.alpine
|
||||
dockerfile: .docker/alpine/Dockerfile.prebuild
|
||||
platforms: linux/amd64,linux/arm64
|
||||
- name: alpine-source
|
||||
dockerfile: .docker/alpine/Dockerfile.source
|
||||
platforms: linux/amd64,linux/arm64
|
||||
- name: ubuntu
|
||||
dockerfile: .docker/ubuntu/Dockerfile.prebuild
|
||||
platforms: linux/amd64,linux/arm64
|
||||
- name: ubuntu-source
|
||||
dockerfile: .docker/ubuntu/Dockerfile.source
|
||||
platforms: linux/amd64,linux/arm64
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
@@ -139,21 +204,96 @@ jobs:
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata
|
||||
- name: Extract metadata and generate tags
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: |
|
||||
${{ env.REGISTRY_DOCKERHUB }}
|
||||
${{ env.REGISTRY_GHCR }}
|
||||
tags: |
|
||||
type=ref,event=branch,suffix=-${{ matrix.variant.name }}
|
||||
type=ref,event=pr,suffix=-${{ matrix.variant.name }}
|
||||
type=semver,pattern={{version}},suffix=-${{ matrix.variant.name }}
|
||||
type=semver,pattern={{major}}.{{minor}},suffix=-${{ matrix.variant.name }}
|
||||
type=raw,value=latest,suffix=-${{ matrix.variant.name }},enable={{is_default_branch}}
|
||||
flavor: |
|
||||
latest=false
|
||||
run: |
|
||||
BUILD_TYPE="${{ needs.build-check.outputs.build_type }}"
|
||||
VERSION="${{ needs.build-check.outputs.version }}"
|
||||
SHORT_SHA="${{ needs.build-check.outputs.short_sha }}"
|
||||
CREATE_LATEST="${{ needs.build-check.outputs.create_latest }}"
|
||||
VARIANT="${{ matrix.variant.name }}"
|
||||
|
||||
# Generate tags based on build type
|
||||
TAGS=""
|
||||
|
||||
if [[ "$BUILD_TYPE" == "development" ]]; then
|
||||
# Development build: dev-${short_sha}-${variant} and dev-${variant}
|
||||
TAGS="${{ env.REGISTRY_DOCKERHUB }}:dev-${SHORT_SHA}-${VARIANT}"
|
||||
TAGS="$TAGS,${{ env.REGISTRY_GHCR }}:dev-${SHORT_SHA}-${VARIANT}"
|
||||
|
||||
# Add rolling dev tag for each variant
|
||||
TAGS="$TAGS,${{ env.REGISTRY_DOCKERHUB }}:dev-${VARIANT}"
|
||||
TAGS="$TAGS,${{ env.REGISTRY_GHCR }}:dev-${VARIANT}"
|
||||
|
||||
# Special handling for production variant
|
||||
if [[ "$VARIANT" == "production" ]]; then
|
||||
TAGS="$TAGS,${{ env.REGISTRY_DOCKERHUB }}:dev-${SHORT_SHA}"
|
||||
TAGS="$TAGS,${{ env.REGISTRY_GHCR }}:dev-${SHORT_SHA}"
|
||||
TAGS="$TAGS,${{ env.REGISTRY_DOCKERHUB }}:dev"
|
||||
TAGS="$TAGS,${{ env.REGISTRY_GHCR }}:dev"
|
||||
fi
|
||||
else
|
||||
# Release/Prerelease build: ${version}-${variant}
|
||||
TAGS="${{ env.REGISTRY_DOCKERHUB }}:${VERSION}-${VARIANT}"
|
||||
TAGS="$TAGS,${{ env.REGISTRY_GHCR }}:${VERSION}-${VARIANT}"
|
||||
|
||||
# Special handling for production variant - create main version tag
|
||||
if [[ "$VARIANT" == "production" ]]; then
|
||||
TAGS="$TAGS,${{ env.REGISTRY_DOCKERHUB }}:${VERSION}"
|
||||
TAGS="$TAGS,${{ env.REGISTRY_GHCR }}:${VERSION}"
|
||||
fi
|
||||
|
||||
# Add channel tags for prereleases and latest for stable
|
||||
if [[ "$CREATE_LATEST" == "true" ]]; then
|
||||
# Stable release
|
||||
if [[ "$VARIANT" == "production" ]]; then
|
||||
TAGS="$TAGS,${{ env.REGISTRY_DOCKERHUB }}:latest"
|
||||
TAGS="$TAGS,${{ env.REGISTRY_GHCR }}:latest"
|
||||
else
|
||||
TAGS="$TAGS,${{ env.REGISTRY_DOCKERHUB }}:latest-${VARIANT}"
|
||||
TAGS="$TAGS,${{ env.REGISTRY_GHCR }}:latest-${VARIANT}"
|
||||
fi
|
||||
elif [[ "$BUILD_TYPE" == "prerelease" ]]; then
|
||||
# Prerelease channel tags (alpha, beta, rc)
|
||||
if [[ "$VERSION" == *"alpha"* ]]; then
|
||||
CHANNEL="alpha"
|
||||
elif [[ "$VERSION" == *"beta"* ]]; then
|
||||
CHANNEL="beta"
|
||||
elif [[ "$VERSION" == *"rc"* ]]; then
|
||||
CHANNEL="rc"
|
||||
fi
|
||||
|
||||
if [[ -n "$CHANNEL" ]]; then
|
||||
if [[ "$VARIANT" == "production" ]]; then
|
||||
TAGS="$TAGS,${{ env.REGISTRY_DOCKERHUB }}:${CHANNEL}"
|
||||
TAGS="$TAGS,${{ env.REGISTRY_GHCR }}:${CHANNEL}"
|
||||
else
|
||||
TAGS="$TAGS,${{ env.REGISTRY_DOCKERHUB }}:${CHANNEL}-${VARIANT}"
|
||||
TAGS="$TAGS,${{ env.REGISTRY_GHCR }}:${CHANNEL}-${VARIANT}"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
# Output tags
|
||||
echo "tags=$TAGS" >> $GITHUB_OUTPUT
|
||||
|
||||
# Generate labels
|
||||
LABELS="org.opencontainers.image.title=RustFS"
|
||||
LABELS="$LABELS,org.opencontainers.image.description=RustFS distributed object storage system"
|
||||
LABELS="$LABELS,org.opencontainers.image.version=$VERSION"
|
||||
LABELS="$LABELS,org.opencontainers.image.revision=${{ github.sha }}"
|
||||
LABELS="$LABELS,org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}"
|
||||
LABELS="$LABELS,org.opencontainers.image.created=$(date -u +'%Y-%m-%dT%H:%M:%SZ')"
|
||||
LABELS="$LABELS,org.opencontainers.image.variant=$VARIANT"
|
||||
LABELS="$LABELS,org.opencontainers.image.build-type=$BUILD_TYPE"
|
||||
|
||||
echo "labels=$LABELS" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "🐳 Generated Docker tags:"
|
||||
echo "$TAGS" | tr ',' '\n' | sed 's/^/ - /'
|
||||
echo "📋 Build type: $BUILD_TYPE"
|
||||
echo "🔖 Version: $VERSION"
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
@@ -164,18 +304,27 @@ jobs:
|
||||
push: ${{ needs.build-check.outputs.should_push == 'true' }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
cache-from: type=gha,scope=docker-${{ matrix.variant.name }}
|
||||
cache-to: type=gha,mode=max,scope=docker-${{ matrix.variant.name }}
|
||||
cache-from: |
|
||||
type=gha,scope=docker-${{ matrix.variant.name }}
|
||||
type=registry,ref=${{ env.REGISTRY_GHCR }}:buildcache-${{ matrix.variant.name }}
|
||||
cache-to: |
|
||||
type=gha,mode=max,scope=docker-${{ matrix.variant.name }}
|
||||
type=registry,ref=${{ env.REGISTRY_GHCR }}:buildcache-${{ matrix.variant.name }},mode=max
|
||||
build-args: |
|
||||
BUILDTIME=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.created'] }}
|
||||
VERSION=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.version'] }}
|
||||
REVISION=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.revision'] }}
|
||||
BUILDTIME=$(date -u +'%Y-%m-%dT%H:%M:%SZ')
|
||||
VERSION=${{ needs.build-check.outputs.version }}
|
||||
BUILD_TYPE=${{ needs.build-check.outputs.build_type }}
|
||||
REVISION=${{ github.sha }}
|
||||
BUILDKIT_INLINE_CACHE=1
|
||||
# Enable advanced BuildKit features for better performance
|
||||
provenance: false
|
||||
sbom: false
|
||||
|
||||
# Create manifest for main production image
|
||||
# Create manifest for main production image (only for stable releases)
|
||||
create-manifest:
|
||||
name: Create Manifest
|
||||
needs: [build-check, build-docker]
|
||||
if: needs.build-check.outputs.should_push == 'true' && startsWith(github.ref, 'refs/tags/')
|
||||
if: needs.build-check.outputs.should_push == 'true' && needs.build-check.outputs.create_latest == 'true' && needs.build-check.outputs.build_type == 'release'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Login to Docker Hub
|
||||
@@ -194,17 +343,50 @@ jobs:
|
||||
|
||||
- name: Create and push manifest
|
||||
run: |
|
||||
VERSION=${GITHUB_REF#refs/tags/}
|
||||
VERSION="${{ needs.build-check.outputs.version }}"
|
||||
|
||||
# Create main image tag (without variant suffix)
|
||||
if [[ -n "${{ secrets.DOCKERHUB_USERNAME }}" ]]; then
|
||||
docker buildx imagetools create \
|
||||
-t ${{ env.REGISTRY_DOCKERHUB }}:${VERSION} \
|
||||
-t ${{ env.REGISTRY_DOCKERHUB }}:latest \
|
||||
${{ env.REGISTRY_DOCKERHUB }}:${VERSION}-production
|
||||
fi
|
||||
echo "🐳 Creating manifest for stable release: $VERSION"
|
||||
|
||||
docker buildx imagetools create \
|
||||
-t ${{ env.REGISTRY_GHCR }}:${VERSION} \
|
||||
-t ${{ env.REGISTRY_GHCR }}:latest \
|
||||
${{ env.REGISTRY_GHCR }}:${VERSION}-production
|
||||
# Create main image tag (without variant suffix) for stable releases only
|
||||
# Note: The "production" variant already creates the main tags without suffix
|
||||
echo "Manifest creation is handled by the production variant build step"
|
||||
echo "Main tags ${VERSION} and latest are created directly by the production variant"
|
||||
|
||||
echo "✅ Manifest created successfully for stable release"
|
||||
|
||||
# Docker build summary
|
||||
docker-summary:
|
||||
name: Docker Build Summary
|
||||
needs: [build-check, build-docker]
|
||||
if: always() && needs.build-check.outputs.should_build == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Docker build completion summary
|
||||
run: |
|
||||
BUILD_TYPE="${{ needs.build-check.outputs.build_type }}"
|
||||
VERSION="${{ needs.build-check.outputs.version }}"
|
||||
CREATE_LATEST="${{ needs.build-check.outputs.create_latest }}"
|
||||
|
||||
echo "🐳 Docker build completed successfully!"
|
||||
echo "📦 Build type: $BUILD_TYPE"
|
||||
echo "🔢 Version: $VERSION"
|
||||
echo ""
|
||||
|
||||
case "$BUILD_TYPE" in
|
||||
"development")
|
||||
echo "🛠️ Development Docker images have been built with dev-${VERSION} tags"
|
||||
echo "⚠️ These are development images - not suitable for production use"
|
||||
;;
|
||||
"release")
|
||||
echo "🚀 Release Docker images have been built with v${VERSION} tags"
|
||||
echo "✅ These images are ready for production use"
|
||||
if [[ "$CREATE_LATEST" == "true" ]]; then
|
||||
echo "🏷️ Latest tags have been created for stable release"
|
||||
fi
|
||||
;;
|
||||
"prerelease")
|
||||
echo "🧪 Prerelease Docker images have been built with v${VERSION} tags"
|
||||
echo "⚠️ These are prerelease images - use with caution"
|
||||
echo "🚫 Latest tags NOT created for prerelease"
|
||||
;;
|
||||
esac
|
||||
|
||||
24
.github/workflows/issue-translator.yml
vendored
24
.github/workflows/issue-translator.yml
vendored
@@ -1,8 +1,22 @@
|
||||
name: 'issue-translator'
|
||||
on:
|
||||
issue_comment:
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name: "issue-translator"
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
issues:
|
||||
issues:
|
||||
types: [opened]
|
||||
|
||||
jobs:
|
||||
@@ -14,5 +28,5 @@ jobs:
|
||||
IS_MODIFY_TITLE: false
|
||||
# not require, default false, . Decide whether to modify the issue title
|
||||
# if true, the robot account @Issues-translate-bot must have modification permissions, invite @Issues-translate-bot to your project or use your custom bot.
|
||||
CUSTOM_BOT_NOTE: Bot detected the issue body's language is not English, translate it automatically.
|
||||
CUSTOM_BOT_NOTE: Bot detected the issue body's language is not English, translate it automatically.
|
||||
# not require. Customize the translation robot prefix message.
|
||||
|
||||
353
.github/workflows/release.yml
vendored
Normal file
353
.github/workflows/release.yml
vendored
Normal file
@@ -0,0 +1,353 @@
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name: Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags: ["*.*.*"]
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
tag:
|
||||
description: "Tag to create release for"
|
||||
required: true
|
||||
type: string
|
||||
|
||||
env:
|
||||
CARGO_TERM_COLOR: always
|
||||
|
||||
jobs:
|
||||
# Determine release type
|
||||
release-check:
|
||||
name: Release Type Check
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
tag: ${{ steps.check.outputs.tag }}
|
||||
version: ${{ steps.check.outputs.version }}
|
||||
is_prerelease: ${{ steps.check.outputs.is_prerelease }}
|
||||
release_type: ${{ steps.check.outputs.release_type }}
|
||||
steps:
|
||||
- name: Determine release type
|
||||
id: check
|
||||
run: |
|
||||
if [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then
|
||||
TAG="${{ github.event.inputs.tag }}"
|
||||
else
|
||||
TAG="${GITHUB_REF#refs/tags/}"
|
||||
fi
|
||||
|
||||
VERSION="${TAG}"
|
||||
|
||||
# Check if this is a prerelease
|
||||
IS_PRERELEASE=false
|
||||
RELEASE_TYPE="release"
|
||||
|
||||
if [[ "$TAG" == *"alpha"* ]] || [[ "$TAG" == *"beta"* ]] || [[ "$TAG" == *"rc"* ]]; then
|
||||
IS_PRERELEASE=true
|
||||
if [[ "$TAG" == *"alpha"* ]]; then
|
||||
RELEASE_TYPE="alpha"
|
||||
elif [[ "$TAG" == *"beta"* ]]; then
|
||||
RELEASE_TYPE="beta"
|
||||
elif [[ "$TAG" == *"rc"* ]]; then
|
||||
RELEASE_TYPE="rc"
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "tag=$TAG" >> $GITHUB_OUTPUT
|
||||
echo "version=$VERSION" >> $GITHUB_OUTPUT
|
||||
echo "is_prerelease=$IS_PRERELEASE" >> $GITHUB_OUTPUT
|
||||
echo "release_type=$RELEASE_TYPE" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "📦 Release Type: $RELEASE_TYPE"
|
||||
echo "🏷️ Tag: $TAG"
|
||||
echo "🔢 Version: $VERSION"
|
||||
echo "🚀 Is Prerelease: $IS_PRERELEASE"
|
||||
|
||||
# Create GitHub Release
|
||||
create-release:
|
||||
name: Create GitHub Release
|
||||
needs: release-check
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
outputs:
|
||||
release_id: ${{ steps.create.outputs.release_id }}
|
||||
release_url: ${{ steps.create.outputs.release_url }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Create GitHub Release
|
||||
id: create
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
TAG="${{ needs.release-check.outputs.tag }}"
|
||||
VERSION="${{ needs.release-check.outputs.version }}"
|
||||
IS_PRERELEASE="${{ needs.release-check.outputs.is_prerelease }}"
|
||||
RELEASE_TYPE="${{ needs.release-check.outputs.release_type }}"
|
||||
|
||||
# Check if release already exists
|
||||
if gh release view "$TAG" >/dev/null 2>&1; then
|
||||
echo "Release $TAG already exists"
|
||||
RELEASE_ID=$(gh release view "$TAG" --json databaseId --jq '.databaseId')
|
||||
RELEASE_URL=$(gh release view "$TAG" --json url --jq '.url')
|
||||
else
|
||||
# Get release notes from tag message
|
||||
RELEASE_NOTES=$(git tag -l --format='%(contents)' "${TAG}")
|
||||
if [[ -z "$RELEASE_NOTES" || "$RELEASE_NOTES" =~ ^[[:space:]]*$ ]]; then
|
||||
if [[ "$IS_PRERELEASE" == "true" ]]; then
|
||||
RELEASE_NOTES="Pre-release ${VERSION} (${RELEASE_TYPE})"
|
||||
else
|
||||
RELEASE_NOTES="Release ${VERSION}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Create release title
|
||||
if [[ "$IS_PRERELEASE" == "true" ]]; then
|
||||
TITLE="RustFS $VERSION (${RELEASE_TYPE})"
|
||||
else
|
||||
TITLE="RustFS $VERSION"
|
||||
fi
|
||||
|
||||
# Create the release
|
||||
PRERELEASE_FLAG=""
|
||||
if [[ "$IS_PRERELEASE" == "true" ]]; then
|
||||
PRERELEASE_FLAG="--prerelease"
|
||||
fi
|
||||
|
||||
gh release create "$TAG" \
|
||||
--title "$TITLE" \
|
||||
--notes "$RELEASE_NOTES" \
|
||||
$PRERELEASE_FLAG \
|
||||
--draft
|
||||
|
||||
RELEASE_ID=$(gh release view "$TAG" --json databaseId --jq '.databaseId')
|
||||
RELEASE_URL=$(gh release view "$TAG" --json url --jq '.url')
|
||||
fi
|
||||
|
||||
echo "release_id=$RELEASE_ID" >> $GITHUB_OUTPUT
|
||||
echo "release_url=$RELEASE_URL" >> $GITHUB_OUTPUT
|
||||
echo "Created release: $RELEASE_URL"
|
||||
|
||||
# Wait for build artifacts from build.yml
|
||||
wait-for-artifacts:
|
||||
name: Wait for Build Artifacts
|
||||
needs: release-check
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Wait for build workflow
|
||||
uses: lewagon/wait-on-check-action@v1.3.1
|
||||
with:
|
||||
ref: ${{ needs.release-check.outputs.tag }}
|
||||
check-name: "Build RustFS"
|
||||
repo-token: ${{ secrets.GITHUB_TOKEN }}
|
||||
wait-interval: 30
|
||||
allowed-conclusions: success
|
||||
|
||||
# Download and prepare release assets
|
||||
prepare-assets:
|
||||
name: Prepare Release Assets
|
||||
needs: [release-check, wait-for-artifacts]
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
assets_prepared: ${{ steps.prepare.outputs.assets_prepared }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Download artifacts from build workflow
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: ./artifacts
|
||||
pattern: rustfs-*
|
||||
merge-multiple: true
|
||||
|
||||
- name: Prepare release assets
|
||||
id: prepare
|
||||
run: |
|
||||
VERSION="${{ needs.release-check.outputs.version }}"
|
||||
TAG="${{ needs.release-check.outputs.tag }}"
|
||||
|
||||
mkdir -p ./release-assets
|
||||
|
||||
# Copy and verify artifacts
|
||||
ASSETS_COUNT=0
|
||||
for file in ./artifacts/rustfs-*.zip; do
|
||||
if [[ -f "$file" ]]; then
|
||||
cp "$file" ./release-assets/
|
||||
ASSETS_COUNT=$((ASSETS_COUNT + 1))
|
||||
fi
|
||||
done
|
||||
|
||||
if [[ $ASSETS_COUNT -eq 0 ]]; then
|
||||
echo "❌ No artifacts found!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
cd ./release-assets
|
||||
|
||||
# Generate checksums
|
||||
if ls *.zip >/dev/null 2>&1; then
|
||||
sha256sum *.zip > SHA256SUMS
|
||||
sha512sum *.zip > SHA512SUMS
|
||||
fi
|
||||
|
||||
# TODO: Add GPG signing for signatures
|
||||
# For now, create placeholder signature files
|
||||
for file in *.zip; do
|
||||
echo "# Signature for $file" > "${file}.asc"
|
||||
echo "# GPG signature will be added in future versions" >> "${file}.asc"
|
||||
done
|
||||
|
||||
echo "assets_prepared=true" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "📦 Prepared assets:"
|
||||
ls -la
|
||||
|
||||
echo "🔢 Asset count: $ASSETS_COUNT"
|
||||
|
||||
- name: Upload prepared assets
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: release-assets-${{ needs.release-check.outputs.tag }}
|
||||
path: ./release-assets/
|
||||
retention-days: 30
|
||||
|
||||
# Upload assets to GitHub Release
|
||||
upload-assets:
|
||||
name: Upload Release Assets
|
||||
needs: [release-check, create-release, prepare-assets]
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Download prepared assets
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
name: release-assets-${{ needs.release-check.outputs.tag }}
|
||||
path: ./release-assets
|
||||
|
||||
- name: Upload to GitHub Release
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
TAG="${{ needs.release-check.outputs.tag }}"
|
||||
|
||||
cd ./release-assets
|
||||
|
||||
# Upload all files
|
||||
for file in *; do
|
||||
if [[ -f "$file" ]]; then
|
||||
echo "📤 Uploading $file..."
|
||||
gh release upload "$TAG" "$file" --clobber
|
||||
fi
|
||||
done
|
||||
|
||||
echo "✅ All assets uploaded successfully"
|
||||
|
||||
# Update latest.json for stable releases only
|
||||
update-latest:
|
||||
name: Update Latest Version
|
||||
needs: [release-check, upload-assets]
|
||||
if: needs.release-check.outputs.is_prerelease == 'false'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Update latest.json
|
||||
env:
|
||||
OSS_ACCESS_KEY_ID: ${{ secrets.ALICLOUDOSS_KEY_ID }}
|
||||
OSS_ACCESS_KEY_SECRET: ${{ secrets.ALICLOUDOSS_KEY_SECRET }}
|
||||
run: |
|
||||
if [[ -z "$OSS_ACCESS_KEY_ID" ]]; then
|
||||
echo "⚠️ OSS credentials not available, skipping latest.json update"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
VERSION="${{ needs.release-check.outputs.version }}"
|
||||
TAG="${{ needs.release-check.outputs.tag }}"
|
||||
|
||||
# Install ossutil
|
||||
OSSUTIL_VERSION="2.1.1"
|
||||
OSSUTIL_ZIP="ossutil-${OSSUTIL_VERSION}-linux-amd64.zip"
|
||||
OSSUTIL_DIR="ossutil-${OSSUTIL_VERSION}-linux-amd64"
|
||||
|
||||
curl -o "$OSSUTIL_ZIP" "https://gosspublic.alicdn.com/ossutil/v2/${OSSUTIL_VERSION}/${OSSUTIL_ZIP}"
|
||||
unzip "$OSSUTIL_ZIP"
|
||||
chmod +x "${OSSUTIL_DIR}/ossutil"
|
||||
|
||||
# Create latest.json
|
||||
cat > latest.json << EOF
|
||||
{
|
||||
"version": "${VERSION}",
|
||||
"tag": "${TAG}",
|
||||
"release_date": "$(date -u +%Y-%m-%dT%H:%M:%SZ)",
|
||||
"release_type": "stable",
|
||||
"download_url": "https://github.com/${{ github.repository }}/releases/tag/${TAG}"
|
||||
}
|
||||
EOF
|
||||
|
||||
# Upload to OSS
|
||||
./${OSSUTIL_DIR}/ossutil cp latest.json oss://rustfs-version/latest.json --force
|
||||
|
||||
echo "✅ Updated latest.json for stable release $VERSION"
|
||||
|
||||
# Publish release (remove draft status)
|
||||
publish-release:
|
||||
name: Publish Release
|
||||
needs: [release-check, create-release, upload-assets]
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Update release notes and publish
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
TAG="${{ needs.release-check.outputs.tag }}"
|
||||
VERSION="${{ needs.release-check.outputs.version }}"
|
||||
IS_PRERELEASE="${{ needs.release-check.outputs.is_prerelease }}"
|
||||
RELEASE_TYPE="${{ needs.release-check.outputs.release_type }}"
|
||||
|
||||
# Get original release notes from tag
|
||||
ORIGINAL_NOTES=$(git tag -l --format='%(contents)' "${TAG}")
|
||||
if [[ -z "$ORIGINAL_NOTES" || "$ORIGINAL_NOTES" =~ ^[[:space:]]*$ ]]; then
|
||||
if [[ "$IS_PRERELEASE" == "true" ]]; then
|
||||
ORIGINAL_NOTES="Pre-release ${VERSION} (${RELEASE_TYPE})"
|
||||
else
|
||||
ORIGINAL_NOTES="Release ${VERSION}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Use release notes template if available
|
||||
if [[ -f ".github/workflows/release-notes-template.md" ]]; then
|
||||
# Substitute variables in template
|
||||
sed -e "s/\${VERSION}/$TAG/g" \
|
||||
-e "s/\${VERSION_CLEAN}/$VERSION/g" \
|
||||
-e "s/\${ORIGINAL_NOTES}/$(echo "$ORIGINAL_NOTES" | sed 's/[[\.*^$()+?{|]/\\&/g')/g" \
|
||||
.github/workflows/release-notes-template.md > enhanced_notes.md
|
||||
|
||||
# Update release notes
|
||||
gh release edit "$TAG" --notes-file enhanced_notes.md
|
||||
fi
|
||||
|
||||
# Publish the release (remove draft status)
|
||||
gh release edit "$TAG" --draft=false
|
||||
|
||||
echo "🎉 Released $TAG successfully!"
|
||||
echo "📄 Release URL: ${{ needs.create-release.outputs.release_url }}"
|
||||
652
Cargo.lock
generated
652
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
24
Cargo.toml
24
Cargo.toml
@@ -63,7 +63,7 @@ rustfs-filemeta = { path = "crates/filemeta" }
|
||||
rustfs-rio = { path = "crates/rio" }
|
||||
|
||||
[workspace.dependencies]
|
||||
rustfs-ahm = { path = "crates/ahm", version = "0.0.3" }
|
||||
rustfs-ahm = { path = "crates/ahm", version = "0.0.5" }
|
||||
rustfs-s3select-api = { path = "crates/s3select-api", version = "0.0.5" }
|
||||
rustfs-appauth = { path = "crates/appauth", version = "0.0.5" }
|
||||
rustfs-common = { path = "crates/common", version = "0.0.5" }
|
||||
@@ -89,7 +89,7 @@ aes-gcm = { version = "0.10.3", features = ["std"] }
|
||||
arc-swap = "1.7.1"
|
||||
argon2 = { version = "0.5.3", features = ["std"] }
|
||||
atoi = "2.0.0"
|
||||
async-channel = "2.4.0"
|
||||
async-channel = "2.5.0"
|
||||
async-recursion = "1.1.1"
|
||||
async-trait = "0.1.88"
|
||||
async-compression = { version = "0.4.0" }
|
||||
@@ -107,7 +107,7 @@ byteorder = "1.5.0"
|
||||
cfg-if = "1.0.1"
|
||||
chacha20poly1305 = { version = "0.10.1" }
|
||||
chrono = { version = "0.4.41", features = ["serde"] }
|
||||
clap = { version = "4.5.40", features = ["derive", "env"] }
|
||||
clap = { version = "4.5.41", features = ["derive", "env"] }
|
||||
const-str = { version = "0.6.2", features = ["std", "proc"] }
|
||||
crc32fast = "1.4.2"
|
||||
criterion = { version = "0.5", features = ["html_reports"] }
|
||||
@@ -116,7 +116,7 @@ datafusion = "46.0.1"
|
||||
derive_builder = "0.20.2"
|
||||
dioxus = { version = "0.6.3", features = ["router"] }
|
||||
dirs = "6.0.0"
|
||||
enumset = "1.1.6"
|
||||
enumset = "1.1.7"
|
||||
flatbuffers = "25.2.10"
|
||||
flate2 = "1.1.2"
|
||||
flexi_logger = { version = "0.31.2", features = ["trc", "dont_minimize_extra_stacks"] }
|
||||
@@ -130,7 +130,7 @@ hex-simd = "0.8.0"
|
||||
highway = { version = "1.3.0" }
|
||||
hmac = "0.12.1"
|
||||
hyper = "1.6.0"
|
||||
hyper-util = { version = "0.1.14", features = [
|
||||
hyper-util = { version = "0.1.15", features = [
|
||||
"tokio",
|
||||
"server-auto",
|
||||
"server-graceful",
|
||||
@@ -182,9 +182,9 @@ pbkdf2 = "0.12.2"
|
||||
percent-encoding = "2.3.1"
|
||||
pin-project-lite = "0.2.16"
|
||||
prost = "0.13.5"
|
||||
quick-xml = "0.37.5"
|
||||
quick-xml = "0.38.0"
|
||||
rand = "0.9.1"
|
||||
rdkafka = { version = "0.37.0", features = ["tokio"] }
|
||||
rdkafka = { version = "0.38.0", features = ["tokio"] }
|
||||
reed-solomon-simd = { version = "3.0.1" }
|
||||
regex = { version = "1.11.1" }
|
||||
reqwest = { version = "0.12.22", default-features = false, features = [
|
||||
@@ -207,7 +207,7 @@ rumqttc = { version = "0.24" }
|
||||
rust-embed = { version = "8.7.2" }
|
||||
rust-i18n = { version = "3.1.5" }
|
||||
rustfs-rsc = "2025.506.1"
|
||||
rustls = { version = "0.23.28" }
|
||||
rustls = { version = "0.23.29" }
|
||||
rustls-pki-types = "1.12.0"
|
||||
rustls-pemfile = "2.2.0"
|
||||
s3s = { version = "0.12.0-minio-preview.1" }
|
||||
@@ -222,10 +222,11 @@ siphasher = "1.0.1"
|
||||
smallvec = { version = "1.15.1", features = ["serde"] }
|
||||
snafu = "0.8.6"
|
||||
snap = "1.1.1"
|
||||
socket2 = "0.5.10"
|
||||
socket2 = "0.6.0"
|
||||
strum = { version = "0.27.1", features = ["derive"] }
|
||||
sysinfo = "0.35.2"
|
||||
sysinfo = "0.36.0"
|
||||
tempfile = "3.20.0"
|
||||
temp-env = "0.3.6"
|
||||
test-case = "3.3.1"
|
||||
thiserror = "2.0.12"
|
||||
time = { version = "0.3.41", features = [
|
||||
@@ -239,6 +240,7 @@ tokio = { version = "1.46.1", features = ["fs", "rt-multi-thread"] }
|
||||
tokio-rustls = { version = "0.26.2", default-features = false }
|
||||
tokio-stream = { version = "0.1.17" }
|
||||
tokio-tar = "0.3.1"
|
||||
tokio-test = "0.4.4"
|
||||
tokio-util = { version = "0.7.15", features = ["io", "compat"] }
|
||||
tonic = { version = "0.13.1", features = ["gzip"] }
|
||||
tonic-build = { version = "0.13.1" }
|
||||
@@ -263,7 +265,7 @@ winapi = { version = "0.3.9" }
|
||||
xxhash-rust = { version = "0.8.15", features = ["xxh64", "xxh3"] }
|
||||
zip = "2.4.2"
|
||||
zstd = "0.13.3"
|
||||
anyhow = "1.0.86"
|
||||
anyhow = "1.0.98"
|
||||
|
||||
[profile.wasm-dev]
|
||||
inherits = "dev"
|
||||
|
||||
96
Dockerfile
96
Dockerfile
@@ -12,38 +12,106 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
FROM alpine:3.18 AS builder
|
||||
# Multi-stage Alpine build for minimal runtime image
|
||||
FROM rust:1.85-alpine AS builder
|
||||
|
||||
RUN apk add -U --no-cache \
|
||||
ca-certificates \
|
||||
# Build arguments for dynamic artifact download
|
||||
ARG VERSION=""
|
||||
ARG BUILD_TYPE="release"
|
||||
ARG TARGETARCH
|
||||
|
||||
# Install build dependencies
|
||||
RUN apk add --no-cache \
|
||||
musl-dev \
|
||||
pkgconfig \
|
||||
openssl-dev \
|
||||
openssl-libs-static \
|
||||
curl \
|
||||
unzip \
|
||||
bash \
|
||||
unzip
|
||||
wget \
|
||||
ca-certificates
|
||||
|
||||
# Install protoc
|
||||
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip \
|
||||
&& unzip protoc-31.1-linux-x86_64.zip -d protoc3 \
|
||||
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc \
|
||||
&& mv protoc3/include/* /usr/local/include/ && rm -rf protoc-31.1-linux-x86_64.zip protoc3
|
||||
|
||||
RUN curl -Lo /tmp/rustfs.zip https://dl.rustfs.com/artifacts/rustfs/rustfs-x86_64-unknown-linux-musl.zip && \
|
||||
unzip -o /tmp/rustfs.zip -d /tmp && \
|
||||
mv /tmp/rustfs /rustfs && \
|
||||
chmod +x /rustfs && \
|
||||
rm -rf /tmp/*
|
||||
# Install flatc
|
||||
RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip \
|
||||
&& unzip Linux.flatc.binary.g++-13.zip \
|
||||
&& mv flatc /usr/local/bin/ && chmod +x /usr/local/bin/flatc \
|
||||
&& rm -rf Linux.flatc.binary.g++-13.zip
|
||||
|
||||
# Option A: Download pre-built binary (faster)
|
||||
RUN if [ -n "$VERSION" ]; then \
|
||||
# Map TARGETARCH to our naming convention
|
||||
case "${TARGETARCH}" in \
|
||||
amd64) ARCH="x86_64" ;; \
|
||||
arm64) ARCH="aarch64" ;; \
|
||||
*) echo "Unsupported architecture: ${TARGETARCH}" && exit 1 ;; \
|
||||
esac; \
|
||||
\
|
||||
# Determine download path and filename
|
||||
if [ "${BUILD_TYPE}" = "development" ]; then \
|
||||
DOWNLOAD_PATH="artifacts/rustfs/dev"; \
|
||||
FILENAME="rustfs-linux-${ARCH}-dev-${VERSION}.zip"; \
|
||||
else \
|
||||
DOWNLOAD_PATH="artifacts/rustfs/release"; \
|
||||
FILENAME="rustfs-linux-${ARCH}-v${VERSION}.zip"; \
|
||||
fi; \
|
||||
\
|
||||
# Download the binary
|
||||
DOWNLOAD_URL="https://dl.rustfs.com/${DOWNLOAD_PATH}/${FILENAME}"; \
|
||||
echo "Downloading RustFS binary from: ${DOWNLOAD_URL}"; \
|
||||
curl -Lo /tmp/rustfs.zip "${DOWNLOAD_URL}"; \
|
||||
unzip -o /tmp/rustfs.zip -d /tmp; \
|
||||
mv /tmp/rustfs /usr/local/bin/rustfs; \
|
||||
chmod +x /usr/local/bin/rustfs; \
|
||||
rm -rf /tmp/*; \
|
||||
else \
|
||||
echo "No VERSION provided, will build from source"; \
|
||||
echo "Source build not yet implemented in Alpine variant"; \
|
||||
exit 1; \
|
||||
fi
|
||||
|
||||
# Final Alpine runtime image
|
||||
FROM alpine:3.18
|
||||
|
||||
RUN apk add -U --no-cache \
|
||||
RUN apk add --no-cache \
|
||||
ca-certificates \
|
||||
tzdata \
|
||||
bash
|
||||
|
||||
COPY --from=builder /rustfs /usr/local/bin/rustfs
|
||||
# Create rustfs user for security
|
||||
RUN addgroup -g 1000 rustfs && \
|
||||
adduser -D -u 1000 -G rustfs rustfs
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# Copy binary from builder
|
||||
COPY --from=builder /usr/local/bin/rustfs /app/rustfs
|
||||
RUN chmod +x /app/rustfs && chown rustfs:rustfs /app/rustfs
|
||||
|
||||
# Create data directories
|
||||
RUN mkdir -p /data && chown -R rustfs:rustfs /data /app
|
||||
|
||||
# Switch to non-root user
|
||||
USER rustfs
|
||||
|
||||
# Environment variables
|
||||
ENV RUSTFS_ACCESS_KEY=rustfsadmin \
|
||||
RUSTFS_SECRET_KEY=rustfsadmin \
|
||||
RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_CONSOLE_ENABLE=true \
|
||||
RUSTFS_VOLUMES=/data \
|
||||
RUST_LOG=warn
|
||||
|
||||
EXPOSE 9000
|
||||
|
||||
RUN mkdir -p /data
|
||||
VOLUME /data
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD wget --no-verbose --tries=1 --spider http://localhost:9000/health || exit 1
|
||||
|
||||
CMD ["rustfs", "/data"]
|
||||
CMD ["/app/rustfs"]
|
||||
|
||||
@@ -1,21 +1,61 @@
|
||||
FROM ubuntu:latest
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# RUN apk add --no-cache <package-name>
|
||||
# 如果 rustfs 有依赖,可以在这里添加,例如:
|
||||
# RUN apk add --no-cache openssl
|
||||
# RUN apk add --no-cache bash # 安装 Bash
|
||||
# Dockerfile for RustFS with observability features
|
||||
FROM ubuntu:22.04
|
||||
|
||||
# Avoid interactive prompts during build
|
||||
ENV DEBIAN_FRONTEND=noninteractive
|
||||
|
||||
# Install runtime dependencies
|
||||
RUN apt-get update && apt-get install -y \
|
||||
ca-certificates \
|
||||
wget \
|
||||
curl \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# 创建与 RUSTFS_VOLUMES 一致的目录
|
||||
RUN mkdir -p /root/data/target/volume/test1 /root/data/target/volume/test2 /root/data/target/volume/test3 /root/data/target/volume/test4
|
||||
# Create rustfs user for security
|
||||
RUN groupadd -g 1000 rustfs && \
|
||||
useradd -d /app -g rustfs -u 1000 -s /bin/bash rustfs
|
||||
|
||||
# COPY ./target/x86_64-unknown-linux-musl/release/rustfs /app/rustfs
|
||||
# Create data directories matching RUSTFS_VOLUMES pattern
|
||||
RUN mkdir -p /data/rustfs{0,1,2,3} && \
|
||||
chown -R rustfs:rustfs /data /app
|
||||
|
||||
# Copy RustFS binary (expects it to be built with observability features)
|
||||
# Note: This assumes the binary is built locally with observability features enabled
|
||||
COPY ./target/x86_64-unknown-linux-gnu/release/rustfs /app/rustfs
|
||||
RUN chmod +x /app/rustfs && chown rustfs:rustfs /app/rustfs
|
||||
|
||||
RUN chmod +x /app/rustfs
|
||||
# Switch to non-root user
|
||||
USER rustfs
|
||||
|
||||
# Environment variables for observability
|
||||
ENV RUSTFS_ACCESS_KEY=rustfsadmin \
|
||||
RUSTFS_SECRET_KEY=rustfsadmin \
|
||||
RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_CONSOLE_ENABLE=true \
|
||||
RUSTFS_VOLUMES=/data/rustfs0,/data/rustfs1,/data/rustfs2,/data/rustfs3 \
|
||||
RUSTFS_OBS_ENDPOINT=http://otel-collector:4317 \
|
||||
RUST_LOG=info
|
||||
|
||||
EXPOSE 9000
|
||||
EXPOSE 9002
|
||||
|
||||
CMD ["/app/rustfs"]
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD wget --no-verbose --tries=1 --spider http://localhost:9000/health || exit 1
|
||||
|
||||
CMD ["/app/rustfs"]
|
||||
|
||||
@@ -83,11 +83,11 @@ To get started with RustFS, follow these steps:
|
||||
2. **Docker Quick Start (Option 2)**
|
||||
|
||||
```bash
|
||||
podman run -d -p 9000:9000 -p 9001:9001 -v /data:/data quay.io/rustfs/rustfs
|
||||
docker run -d -p 9000:9000 -v /data:/data rustfs/rustfs
|
||||
```
|
||||
|
||||
|
||||
3. **Access the Console**: Open your web browser and navigate to `http://localhost:9001` to access the RustFS console, default username and password is `rustfsadmin` .
|
||||
3. **Access the Console**: Open your web browser and navigate to `http://localhost:9000` to access the RustFS console, default username and password is `rustfsadmin` .
|
||||
4. **Create a Bucket**: Use the console to create a new bucket for your objects.
|
||||
5. **Upload Objects**: You can upload files directly through the console or use S3-compatible APIs to interact with your RustFS instance.
|
||||
|
||||
@@ -122,7 +122,7 @@ If you have any questions or need assistance, you can:
|
||||
RustFS is a community-driven project, and we appreciate all contributions. Check out the [Contributors](https://github.com/rustfs/rustfs/graphs/contributors) page to see the amazing people who have helped make RustFS better.
|
||||
|
||||
<a href="https://github.com/rustfs/rustfs/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=rustfs/rustfs" />
|
||||
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" />
|
||||
</a>
|
||||
|
||||
## License
|
||||
|
||||
@@ -70,11 +70,11 @@ RustFS 是一个使用 Rust(全球最受欢迎的编程语言之一)构建
|
||||
2. **Docker快速启动(方案二)**
|
||||
|
||||
```bash
|
||||
podman run -d -p 9000:9000 -p 9001:9001 -v /data:/data quay.io/rustfs/rustfs
|
||||
docker run -d -p 9000:9000 -v /data:/data rustfs/rustfs
|
||||
```
|
||||
|
||||
|
||||
3. **访问控制台**:打开 Web 浏览器并导航到 `http://localhost:9001` 以访问 RustFS 控制台,默认的用户名和密码是 `rustfsadmin` 。
|
||||
3. **访问控制台**:打开 Web 浏览器并导航到 `http://localhost:9000` 以访问 RustFS 控制台,默认的用户名和密码是 `rustfsadmin` 。
|
||||
4. **创建存储桶**:使用控制台为您的对象创建新的存储桶。
|
||||
5. **上传对象**:您可以直接通过控制台上传文件,或使用 S3 兼容的 API 与您的 RustFS 实例交互。
|
||||
|
||||
|
||||
@@ -26,7 +26,6 @@ dioxus = { workspace = true, features = ["router"] }
|
||||
dirs = { workspace = true }
|
||||
hex = { workspace = true }
|
||||
keyring = { workspace = true }
|
||||
lazy_static = { workspace = true }
|
||||
rfd = { workspace = true }
|
||||
rust-embed = { workspace = true, features = ["interpolate-folder-path"] }
|
||||
rust-i18n = { workspace = true }
|
||||
|
||||
@@ -14,12 +14,12 @@
|
||||
|
||||
use crate::utils::RustFSConfig;
|
||||
use dioxus::logger::tracing::{debug, error, info};
|
||||
use lazy_static::lazy_static;
|
||||
use rust_embed::RustEmbed;
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::error::Error;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::Command as StdCommand;
|
||||
use std::sync::LazyLock;
|
||||
use std::time::Duration;
|
||||
use tokio::fs;
|
||||
use tokio::fs::File;
|
||||
@@ -31,15 +31,13 @@ use tokio::sync::{Mutex, mpsc};
|
||||
#[folder = "$CARGO_MANIFEST_DIR/embedded-rustfs/"]
|
||||
struct Asset;
|
||||
|
||||
// Use `lazy_static` to cache the checksum of embedded resources
|
||||
lazy_static! {
|
||||
static ref RUSTFS_HASH: Mutex<String> = {
|
||||
let rustfs_file = if cfg!(windows) { "rustfs.exe" } else { "rustfs" };
|
||||
let rustfs_data = Asset::get(rustfs_file).expect("RustFs binary not embedded");
|
||||
let hash = hex::encode(Sha256::digest(&rustfs_data.data));
|
||||
Mutex::new(hash)
|
||||
};
|
||||
}
|
||||
// Use `LazyLock` to cache the checksum of embedded resources
|
||||
static RUSTFS_HASH: LazyLock<Mutex<String>> = LazyLock::new(|| {
|
||||
let rustfs_file = if cfg!(windows) { "rustfs.exe" } else { "rustfs" };
|
||||
let rustfs_data = Asset::get(rustfs_file).expect("RustFs binary not embedded");
|
||||
let hash = hex::encode(Sha256::digest(&rustfs_data.data));
|
||||
Mutex::new(hash)
|
||||
});
|
||||
|
||||
/// Service command
|
||||
/// This enum represents the commands that can be sent to the service manager
|
||||
|
||||
@@ -1,10 +1,16 @@
|
||||
[package]
|
||||
name = "rustfs-ahm"
|
||||
version = "0.0.3"
|
||||
edition = "2021"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors = ["RustFS Team"]
|
||||
license = "Apache-2.0"
|
||||
license.workspace = true
|
||||
description = "RustFS AHM (Automatic Health Management) Scanner"
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
homepage.workspace = true
|
||||
documentation = "https://docs.rs/rustfs-ahm/latest/rustfs_ahm/"
|
||||
keywords = ["RustFS", "AHM", "health-management", "scanner", "Minio"]
|
||||
categories = ["web-programming", "development-tools", "filesystem"]
|
||||
|
||||
[dependencies]
|
||||
rustfs-ecstore = { workspace = true }
|
||||
@@ -31,5 +37,5 @@ lazy_static = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
rmp-serde = { workspace = true }
|
||||
tokio-test = "0.4"
|
||||
serde_json = "1.0"
|
||||
tokio-test = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
|
||||
@@ -20,8 +20,8 @@ pub mod scanner;
|
||||
|
||||
pub use error::{Error, Result};
|
||||
pub use scanner::{
|
||||
load_data_usage_from_backend, store_data_usage_in_backend, BucketTargetUsageInfo, BucketUsageInfo, DataUsageInfo, Scanner,
|
||||
ScannerMetrics,
|
||||
BucketTargetUsageInfo, BucketUsageInfo, DataUsageInfo, Scanner, ScannerMetrics, load_data_usage_from_backend,
|
||||
store_data_usage_in_backend,
|
||||
};
|
||||
|
||||
// Global cancellation token for AHM services (scanner and other background tasks)
|
||||
|
||||
@@ -1016,8 +1016,8 @@ mod tests {
|
||||
use rustfs_ecstore::endpoints::{EndpointServerPools, Endpoints, PoolEndpoints};
|
||||
use rustfs_ecstore::store::ECStore;
|
||||
use rustfs_ecstore::{
|
||||
store_api::{MakeBucketOptions, ObjectIO, PutObjReader},
|
||||
StorageAPI,
|
||||
store_api::{MakeBucketOptions, ObjectIO, PutObjReader},
|
||||
};
|
||||
use std::fs;
|
||||
use std::net::SocketAddr;
|
||||
|
||||
@@ -20,6 +20,6 @@ pub mod metrics;
|
||||
// Re-export main types for convenience
|
||||
pub use data_scanner::Scanner;
|
||||
pub use data_usage::{
|
||||
load_data_usage_from_backend, store_data_usage_in_backend, BucketTargetUsageInfo, BucketUsageInfo, DataUsageInfo,
|
||||
BucketTargetUsageInfo, BucketUsageInfo, DataUsageInfo, load_data_usage_from_backend, store_data_usage_in_backend,
|
||||
};
|
||||
pub use metrics::ScannerMetrics;
|
||||
|
||||
@@ -28,6 +28,5 @@ categories = ["web-programming", "development-tools", "data-structures"]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
lazy_static.workspace = true
|
||||
tokio.workspace = true
|
||||
tonic = { workspace = true }
|
||||
|
||||
@@ -12,19 +12,19 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
#![allow(non_upper_case_globals)] // FIXME
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::LazyLock;
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
use tokio::sync::RwLock;
|
||||
use tonic::transport::Channel;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref GLOBAL_Local_Node_Name: RwLock<String> = RwLock::new("".to_string());
|
||||
pub static ref GLOBAL_Rustfs_Host: RwLock<String> = RwLock::new("".to_string());
|
||||
pub static ref GLOBAL_Rustfs_Port: RwLock<String> = RwLock::new("9000".to_string());
|
||||
pub static ref GLOBAL_Rustfs_Addr: RwLock<String> = RwLock::new("".to_string());
|
||||
pub static ref GLOBAL_Conn_Map: RwLock<HashMap<String, Channel>> = RwLock::new(HashMap::new());
|
||||
}
|
||||
pub static GLOBAL_Local_Node_Name: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
|
||||
pub static GLOBAL_Rustfs_Host: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
|
||||
pub static GLOBAL_Rustfs_Port: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("9000".to_string()));
|
||||
pub static GLOBAL_Rustfs_Addr: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
|
||||
pub static GLOBAL_Conn_Map: LazyLock<RwLock<HashMap<String, Channel>>> = LazyLock::new(|| RwLock::new(HashMap::new()));
|
||||
|
||||
pub async fn set_global_addr(addr: &str) {
|
||||
*GLOBAL_Rustfs_Addr.write().await = addr.to_string();
|
||||
|
||||
@@ -109,8 +109,8 @@ winapi = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { workspace = true, features = ["rt-multi-thread", "macros"] }
|
||||
criterion = { version = "0.5", features = ["html_reports"] }
|
||||
temp-env = "0.3.6"
|
||||
criterion = { workspace = true, features = ["html_reports"] }
|
||||
temp-env = { workspace = true }
|
||||
|
||||
[build-dependencies]
|
||||
shadow-rs = { workspace = true, features = ["build", "metadata"] }
|
||||
|
||||
@@ -45,7 +45,6 @@ base64-simd = { workspace = true }
|
||||
jsonwebtoken = { workspace = true }
|
||||
tracing.workspace = true
|
||||
rustfs-madmin.workspace = true
|
||||
lazy_static.workspace = true
|
||||
rustfs-utils = { workspace = true, features = ["path"] }
|
||||
|
||||
[dev-dependencies]
|
||||
|
||||
@@ -20,7 +20,6 @@ use crate::{
|
||||
manager::{extract_jwt_claims, get_default_policyes},
|
||||
};
|
||||
use futures::future::join_all;
|
||||
use lazy_static::lazy_static;
|
||||
use rustfs_ecstore::{
|
||||
config::{
|
||||
RUSTFS_CONFIG_PREFIX,
|
||||
@@ -34,25 +33,28 @@ use rustfs_ecstore::{
|
||||
use rustfs_policy::{auth::UserIdentity, policy::PolicyDoc};
|
||||
use rustfs_utils::path::{SLASH_SEPARATOR, path_join_buf};
|
||||
use serde::{Serialize, de::DeserializeOwned};
|
||||
use std::sync::LazyLock;
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
use tokio::sync::broadcast::{self, Receiver as B_Receiver};
|
||||
use tokio::sync::mpsc::{self, Sender};
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
lazy_static! {
|
||||
pub static ref IAM_CONFIG_PREFIX: String = format!("{}/iam", RUSTFS_CONFIG_PREFIX);
|
||||
pub static ref IAM_CONFIG_USERS_PREFIX: String = format!("{}/iam/users/", RUSTFS_CONFIG_PREFIX);
|
||||
pub static ref IAM_CONFIG_SERVICE_ACCOUNTS_PREFIX: String = format!("{}/iam/service-accounts/", RUSTFS_CONFIG_PREFIX);
|
||||
pub static ref IAM_CONFIG_GROUPS_PREFIX: String = format!("{}/iam/groups/", RUSTFS_CONFIG_PREFIX);
|
||||
pub static ref IAM_CONFIG_POLICIES_PREFIX: String = format!("{}/iam/policies/", RUSTFS_CONFIG_PREFIX);
|
||||
pub static ref IAM_CONFIG_STS_PREFIX: String = format!("{}/iam/sts/", RUSTFS_CONFIG_PREFIX);
|
||||
pub static ref IAM_CONFIG_POLICY_DB_PREFIX: String = format!("{}/iam/policydb/", RUSTFS_CONFIG_PREFIX);
|
||||
pub static ref IAM_CONFIG_POLICY_DB_USERS_PREFIX: String = format!("{}/iam/policydb/users/", RUSTFS_CONFIG_PREFIX);
|
||||
pub static ref IAM_CONFIG_POLICY_DB_STS_USERS_PREFIX: String = format!("{}/iam/policydb/sts-users/", RUSTFS_CONFIG_PREFIX);
|
||||
pub static ref IAM_CONFIG_POLICY_DB_SERVICE_ACCOUNTS_PREFIX: String =
|
||||
format!("{}/iam/policydb/service-accounts/", RUSTFS_CONFIG_PREFIX);
|
||||
pub static ref IAM_CONFIG_POLICY_DB_GROUPS_PREFIX: String = format!("{}/iam/policydb/groups/", RUSTFS_CONFIG_PREFIX);
|
||||
}
|
||||
pub static IAM_CONFIG_PREFIX: LazyLock<String> = LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam"));
|
||||
pub static IAM_CONFIG_USERS_PREFIX: LazyLock<String> = LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/users/"));
|
||||
pub static IAM_CONFIG_SERVICE_ACCOUNTS_PREFIX: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/service-accounts/"));
|
||||
pub static IAM_CONFIG_GROUPS_PREFIX: LazyLock<String> = LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/groups/"));
|
||||
pub static IAM_CONFIG_POLICIES_PREFIX: LazyLock<String> = LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/policies/"));
|
||||
pub static IAM_CONFIG_STS_PREFIX: LazyLock<String> = LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/sts/"));
|
||||
pub static IAM_CONFIG_POLICY_DB_PREFIX: LazyLock<String> = LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/policydb/"));
|
||||
pub static IAM_CONFIG_POLICY_DB_USERS_PREFIX: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/policydb/users/"));
|
||||
pub static IAM_CONFIG_POLICY_DB_STS_USERS_PREFIX: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/policydb/sts-users/"));
|
||||
pub static IAM_CONFIG_POLICY_DB_SERVICE_ACCOUNTS_PREFIX: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/policydb/service-accounts/"));
|
||||
pub static IAM_CONFIG_POLICY_DB_GROUPS_PREFIX: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/policydb/groups/"));
|
||||
|
||||
const IAM_IDENTITY_FILE: &str = "identity.json";
|
||||
const IAM_POLICY_FILE: &str = "policy.json";
|
||||
|
||||
@@ -30,7 +30,6 @@ workspace = true
|
||||
|
||||
[dependencies]
|
||||
async-trait.workspace = true
|
||||
lazy_static.workspace = true
|
||||
rustfs-protos.workspace = true
|
||||
rand.workspace = true
|
||||
serde.workspace = true
|
||||
|
||||
@@ -14,12 +14,12 @@
|
||||
// limitations under the License.
|
||||
|
||||
use async_trait::async_trait;
|
||||
use lazy_static::lazy_static;
|
||||
use local_locker::LocalLocker;
|
||||
use lock_args::LockArgs;
|
||||
use remote_client::RemoteClient;
|
||||
use std::io::Result;
|
||||
use std::sync::Arc;
|
||||
use std::sync::LazyLock;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
pub mod drwmutex;
|
||||
@@ -29,9 +29,7 @@ pub mod lrwmutex;
|
||||
pub mod namespace_lock;
|
||||
pub mod remote_client;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref GLOBAL_LOCAL_SERVER: Arc<RwLock<LocalLocker>> = Arc::new(RwLock::new(LocalLocker::new()));
|
||||
}
|
||||
pub static GLOBAL_LOCAL_SERVER: LazyLock<Arc<RwLock<LocalLocker>>> = LazyLock::new(|| Arc::new(RwLock::new(LocalLocker::new())));
|
||||
|
||||
type LockClient = dyn Locker;
|
||||
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
#![allow(unused_imports)]
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
@@ -12,6 +11,8 @@
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
#![allow(unused_imports)]
|
||||
#![allow(clippy::all)]
|
||||
pub mod proto_gen;
|
||||
|
||||
|
||||
@@ -45,5 +45,4 @@ serde_json.workspace = true
|
||||
md-5 = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
#criterion = { version = "0.5.1", features = ["async", "async_tokio", "tokio"] }
|
||||
tokio-test = "0.4"
|
||||
tokio-test = { workspace = true }
|
||||
|
||||
@@ -32,7 +32,6 @@ async-trait.workspace = true
|
||||
datafusion = { workspace = true }
|
||||
derive_builder = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
lazy_static = { workspace = true }
|
||||
parking_lot = { workspace = true }
|
||||
s3s.workspace = true
|
||||
snafu = { workspace = true, features = ["backtrace"] }
|
||||
|
||||
@@ -33,7 +33,6 @@ use datafusion::{
|
||||
execution::{RecordBatchStream, SendableRecordBatchStream},
|
||||
};
|
||||
use futures::{Stream, StreamExt};
|
||||
use lazy_static::lazy_static;
|
||||
use rustfs_s3select_api::{
|
||||
QueryError, QueryResult,
|
||||
query::{
|
||||
@@ -48,6 +47,7 @@ use rustfs_s3select_api::{
|
||||
},
|
||||
};
|
||||
use s3s::dto::{FileHeaderInfo, SelectObjectContentInput};
|
||||
use std::sync::LazyLock;
|
||||
|
||||
use crate::{
|
||||
execution::factory::QueryExecutionFactoryRef,
|
||||
@@ -55,11 +55,9 @@ use crate::{
|
||||
sql::logical::planner::DefaultLogicalPlanner,
|
||||
};
|
||||
|
||||
lazy_static! {
|
||||
static ref IGNORE: FileHeaderInfo = FileHeaderInfo::from_static(FileHeaderInfo::IGNORE);
|
||||
static ref NONE: FileHeaderInfo = FileHeaderInfo::from_static(FileHeaderInfo::NONE);
|
||||
static ref USE: FileHeaderInfo = FileHeaderInfo::from_static(FileHeaderInfo::USE);
|
||||
}
|
||||
static IGNORE: LazyLock<FileHeaderInfo> = LazyLock::new(|| FileHeaderInfo::from_static(FileHeaderInfo::IGNORE));
|
||||
static NONE: LazyLock<FileHeaderInfo> = LazyLock::new(|| FileHeaderInfo::from_static(FileHeaderInfo::NONE));
|
||||
static USE: LazyLock<FileHeaderInfo> = LazyLock::new(|| FileHeaderInfo::from_static(FileHeaderInfo::USE));
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct SimpleQueryDispatcher {
|
||||
|
||||
@@ -27,7 +27,6 @@ documentation = "https://docs.rs/rustfs-signer/latest/rustfs_signer/"
|
||||
|
||||
[dependencies]
|
||||
tracing.workspace = true
|
||||
lazy_static.workspace = true
|
||||
bytes = { workspace = true }
|
||||
http.workspace = true
|
||||
time.workspace = true
|
||||
|
||||
@@ -13,8 +13,6 @@
|
||||
// limitations under the License.
|
||||
|
||||
use http::{HeaderMap, HeaderValue, request};
|
||||
use lazy_static::lazy_static;
|
||||
use std::collections::HashMap;
|
||||
use time::{OffsetDateTime, macros::format_description};
|
||||
|
||||
use super::request_signature_v4::{SERVICE_TYPE_S3, get_scope, get_signature, get_signing_key};
|
||||
@@ -32,15 +30,13 @@ const _CRLF_LEN: i64 = 2;
|
||||
const _TRAILER_KV_SEPARATOR: &str = ":";
|
||||
const _TRAILER_SIGNATURE: &str = "x-amz-trailer-signature";
|
||||
|
||||
lazy_static! {
|
||||
static ref ignored_streaming_headers: HashMap<String, bool> = {
|
||||
let mut m = <HashMap<String, bool>>::new();
|
||||
m.insert("authorization".to_string(), true);
|
||||
m.insert("user-agent".to_string(), true);
|
||||
m.insert("content-type".to_string(), true);
|
||||
m
|
||||
};
|
||||
}
|
||||
// static ignored_streaming_headers: LazyLock<HashMap<String, bool>> = LazyLock::new(|| {
|
||||
// let mut m = <HashMap<String, bool>>::new();
|
||||
// m.insert("authorization".to_string(), true);
|
||||
// m.insert("user-agent".to_string(), true);
|
||||
// m.insert("content-type".to_string(), true);
|
||||
// m
|
||||
// });
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn build_chunk_string_to_sign(t: OffsetDateTime, region: &str, previous_sig: &str, chunk_check_sum: &str) -> String {
|
||||
|
||||
@@ -16,9 +16,9 @@ use bytes::BytesMut;
|
||||
use http::HeaderMap;
|
||||
use http::Uri;
|
||||
use http::request;
|
||||
use lazy_static::lazy_static;
|
||||
use std::collections::HashMap;
|
||||
use std::fmt::Write;
|
||||
use std::sync::LazyLock;
|
||||
use time::{OffsetDateTime, macros::format_description};
|
||||
use tracing::debug;
|
||||
|
||||
@@ -32,15 +32,14 @@ pub const SIGN_V4_ALGORITHM: &str = "AWS4-HMAC-SHA256";
|
||||
pub const SERVICE_TYPE_S3: &str = "s3";
|
||||
pub const SERVICE_TYPE_STS: &str = "sts";
|
||||
|
||||
lazy_static! {
|
||||
static ref v4_ignored_headers: HashMap<String, bool> = {
|
||||
let mut m = <HashMap<String, bool>>::new();
|
||||
m.insert("accept-encoding".to_string(), true);
|
||||
m.insert("authorization".to_string(), true);
|
||||
m.insert("user-agent".to_string(), true);
|
||||
m
|
||||
};
|
||||
}
|
||||
#[allow(non_upper_case_globals)] // FIXME
|
||||
static v4_ignored_headers: LazyLock<HashMap<String, bool>> = LazyLock::new(|| {
|
||||
let mut m = <HashMap<String, bool>>::new();
|
||||
m.insert("accept-encoding".to_string(), true);
|
||||
m.insert("authorization".to_string(), true);
|
||||
m.insert("user-agent".to_string(), true);
|
||||
m
|
||||
});
|
||||
|
||||
pub fn get_signing_key(secret: &str, loc: &str, t: OffsetDateTime, service_type: &str) -> [u8; 32] {
|
||||
let mut s = "AWS4".to_string();
|
||||
|
||||
@@ -30,7 +30,6 @@ blake3 = { workspace = true, optional = true }
|
||||
crc32fast.workspace = true
|
||||
hex-simd = { workspace = true, optional = true }
|
||||
highway = { workspace = true, optional = true }
|
||||
lazy_static = { workspace = true, optional = true }
|
||||
local-ip-address = { workspace = true, optional = true }
|
||||
md-5 = { workspace = true, optional = true }
|
||||
netif = { workspace = true, optional = true }
|
||||
@@ -77,12 +76,12 @@ workspace = true
|
||||
default = ["ip"] # features that are enabled by default
|
||||
ip = ["dep:local-ip-address"] # ip characteristics and their dependencies
|
||||
tls = ["dep:rustls", "dep:rustls-pemfile", "dep:rustls-pki-types"] # tls characteristics and their dependencies
|
||||
net = ["ip", "dep:url", "dep:netif", "dep:lazy_static", "dep:futures", "dep:transform-stream", "dep:bytes", "dep:s3s", "dep:hyper", "dep:hyper-util"] # empty network features
|
||||
net = ["ip", "dep:url", "dep:netif", "dep:futures", "dep:transform-stream", "dep:bytes", "dep:s3s", "dep:hyper", "dep:hyper-util"] # empty network features
|
||||
io = ["dep:tokio"]
|
||||
path = []
|
||||
notify = ["dep:hyper", "dep:s3s"] # file system notification features
|
||||
compress = ["dep:flate2", "dep:brotli", "dep:snap", "dep:lz4", "dep:zstd"]
|
||||
string = ["dep:regex", "dep:lazy_static", "dep:rand"]
|
||||
string = ["dep:regex", "dep:rand"]
|
||||
crypto = ["dep:base64-simd", "dep:hex-simd", "dep:hmac", "dep:hyper", "dep:sha1"]
|
||||
hash = ["dep:highway", "dep:md-5", "dep:sha2", "dep:blake3", "dep:serde", "dep:siphasher", "dep:hex-simd", "dep:base64-simd"]
|
||||
os = ["dep:nix", "dep:tempfile", "winapi"] # operating system utilities
|
||||
|
||||
@@ -17,8 +17,8 @@ use futures::pin_mut;
|
||||
use futures::{Stream, StreamExt};
|
||||
use hyper::client::conn::http2::Builder;
|
||||
use hyper_util::rt::TokioExecutor;
|
||||
use lazy_static::lazy_static;
|
||||
use std::net::Ipv6Addr;
|
||||
use std::sync::LazyLock;
|
||||
use std::{
|
||||
collections::HashSet,
|
||||
fmt::Display,
|
||||
@@ -27,9 +27,7 @@ use std::{
|
||||
use transform_stream::AsyncTryStream;
|
||||
use url::{Host, Url};
|
||||
|
||||
lazy_static! {
|
||||
static ref LOCAL_IPS: Vec<IpAddr> = must_get_local_ips().unwrap();
|
||||
}
|
||||
static LOCAL_IPS: LazyLock<Vec<IpAddr>> = LazyLock::new(|| must_get_local_ips().unwrap());
|
||||
|
||||
/// helper for validating if the provided arg is an ip address.
|
||||
pub fn is_socket_addr(addr: &str) -> bool {
|
||||
@@ -178,7 +176,7 @@ impl Display for XHost {
|
||||
impl TryFrom<String> for XHost {
|
||||
type Error = std::io::Error;
|
||||
|
||||
fn try_from(value: String) -> std::result::Result<Self, Self::Error> {
|
||||
fn try_from(value: String) -> Result<Self, Self::Error> {
|
||||
if let Some(addr) = value.to_socket_addrs()?.next() {
|
||||
Ok(Self {
|
||||
name: addr.ip().to_string(),
|
||||
@@ -214,9 +212,9 @@ pub fn parse_and_resolve_address(addr_str: &str) -> std::io::Result<SocketAddr>
|
||||
}
|
||||
|
||||
#[allow(dead_code)]
|
||||
pub fn bytes_stream<S, E>(stream: S, content_length: usize) -> impl Stream<Item = std::result::Result<Bytes, E>> + Send + 'static
|
||||
pub fn bytes_stream<S, E>(stream: S, content_length: usize) -> impl Stream<Item = Result<Bytes, E>> + Send + 'static
|
||||
where
|
||||
S: Stream<Item = std::result::Result<Bytes, E>> + Send + 'static,
|
||||
S: Stream<Item = Result<Bytes, E>> + Send + 'static,
|
||||
E: Send + 'static,
|
||||
{
|
||||
AsyncTryStream::<Bytes, E, _>::new(|mut y| async move {
|
||||
|
||||
@@ -12,10 +12,10 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use lazy_static::*;
|
||||
use rand::{Rng, RngCore};
|
||||
use regex::Regex;
|
||||
use std::io::{Error, Result};
|
||||
use std::sync::LazyLock;
|
||||
|
||||
pub fn parse_bool(str: &str) -> Result<bool> {
|
||||
match str {
|
||||
@@ -116,9 +116,7 @@ pub fn match_as_pattern_prefix(pattern: &str, text: &str) -> bool {
|
||||
text.len() <= pattern.len()
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref ELLIPSES_RE: Regex = Regex::new(r"(.*)(\{[0-9a-z]*\.\.\.[0-9a-z]*\})(.*)").unwrap();
|
||||
}
|
||||
static ELLIPSES_RE: LazyLock<Regex> = LazyLock::new(|| Regex::new(r"(.*)(\{[0-9a-z]*\.\.\.[0-9a-z]*\})(.*)").unwrap());
|
||||
|
||||
/// Ellipses constants
|
||||
const OPEN_BRACES: &str = "{";
|
||||
|
||||
@@ -25,17 +25,16 @@ managing and monitoring the system.
|
||||
|--certs
|
||||
| ├── rustfs_cert.pem // Default|fallback certificate
|
||||
| ├── rustfs_key.pem // Default|fallback private key
|
||||
| ├── example.com/ // certificate directory of specific domain names
|
||||
| ├── rustfs.com/ // certificate directory of specific domain names
|
||||
| │ ├── rustfs_cert.pem
|
||||
| │ └── rustfs_key.pem
|
||||
| ├── api.example.com/
|
||||
| ├── api.rustfs.com/
|
||||
| │ ├── rustfs_cert.pem
|
||||
| │ └── rustfs_key.pem
|
||||
| └── cdn.example.com/
|
||||
| └── cdn.rustfs.com/
|
||||
| ├── rustfs_cert.pem
|
||||
| └── rustfs_key.pem
|
||||
|--config
|
||||
| |--rustfs.env // env config
|
||||
| |--rustfs-zh.env // env config in Chinese
|
||||
| |--event.example.toml // event config
|
||||
```
|
||||
@@ -36,15 +36,11 @@ Environment=RUSTFS_SECRET_KEY=rustfsadmin
|
||||
ExecStart=/usr/local/bin/rustfs \
|
||||
--address 0.0.0.0:9000 \
|
||||
--volumes /data/rustfs/vol1,/data/rustfs/vol2 \
|
||||
--obs-config /etc/rustfs/obs.yaml \
|
||||
--console-enable \
|
||||
--console-address 0.0.0.0:9001
|
||||
--console-enable
|
||||
# 定义启动命令,运行 /usr/local/bin/rustfs,带参数:
|
||||
# --address 0.0.0.0:9000:服务监听所有接口的 9000 端口。
|
||||
# --volumes:指定存储卷路径为 /data/rustfs/vol1 和 /data/rustfs/vol2。
|
||||
# --obs-config:指定配置文件路径为 /etc/rustfs/obs.yaml。
|
||||
# --console-enable:启用控制台功能。
|
||||
# --console-address 0.0.0.0:9001:控制台监听所有接口的 9001 端口。
|
||||
|
||||
# 定义环境变量配置,用于传递给服务程序,推荐使用且简洁
|
||||
# rustfs 示例文件 详见: `../config/rustfs-zh.env`
|
||||
|
||||
@@ -83,7 +83,6 @@ sudo journalctl -u rustfs --since today
|
||||
```bash
|
||||
# 检查服务端口
|
||||
ss -tunlp | grep 9000
|
||||
ss -tunlp | grep 9001
|
||||
|
||||
# 测试服务可用性
|
||||
curl -I http://localhost:9000
|
||||
|
||||
@@ -83,7 +83,6 @@ sudo journalctl -u rustfs --since today
|
||||
```bash
|
||||
# Check service ports
|
||||
ss -tunlp | grep 9000
|
||||
ss -tunlp | grep 9001
|
||||
|
||||
# Test service availability
|
||||
curl -I http://localhost:9000
|
||||
|
||||
@@ -22,9 +22,7 @@ Environment=RUSTFS_SECRET_KEY=rustfsadmin
|
||||
ExecStart=/usr/local/bin/rustfs \
|
||||
--address 0.0.0.0:9000 \
|
||||
--volumes /data/rustfs/vol1,/data/rustfs/vol2 \
|
||||
--obs-config /etc/rustfs/obs.yaml \
|
||||
--console-enable \
|
||||
--console-address 0.0.0.0:9001
|
||||
--console-enable
|
||||
|
||||
# environment variable configuration (Option 2: Use environment variables)
|
||||
# rustfs example file see: `../config/rustfs.env`
|
||||
|
||||
@@ -36,13 +36,13 @@ cd deploy/certs/
|
||||
ls -la
|
||||
├── rustfs_cert.pem // Default|fallback certificate
|
||||
├── rustfs_key.pem // Default|fallback private key
|
||||
├── example.com/ // certificate directory of specific domain names
|
||||
├── rustfs.com/ // certificate directory of specific domain names
|
||||
│ ├── rustfs_cert.pem
|
||||
│ └── rustfs_key.pem
|
||||
├── api.example.com/
|
||||
├── api.rustfs.com/
|
||||
│ ├── rustfs_cert.pem
|
||||
│ └── rustfs_key.pem
|
||||
└── cdn.example.com/
|
||||
└── cdn.rustfs.com/
|
||||
├── rustfs_cert.pem
|
||||
└── rustfs_key.pem
|
||||
```
|
||||
@@ -7,22 +7,16 @@ RUSTFS_ROOT_PASSWORD=rustfsadmin
|
||||
# RustFS 数据卷存储路径,支持多卷配置,vol1 到 vol4
|
||||
RUSTFS_VOLUMES="./deploy/deploy/vol{1...4}"
|
||||
# RustFS 服务启动参数,指定监听地址和端口
|
||||
RUSTFS_OPTS="--address 0.0.0.0:9000"
|
||||
RUSTFS_OPTS="--address :9000"
|
||||
# RustFS 服务监听地址和端口
|
||||
RUSTFS_ADDRESS="0.0.0.0:9000"
|
||||
RUSTFS_ADDRESS=":9000"
|
||||
# 是否启用 RustFS 控制台功能
|
||||
RUSTFS_CONSOLE_ENABLE=true
|
||||
# RustFS 控制台监听地址和端口
|
||||
RUSTFS_CONSOLE_ADDRESS="0.0.0.0:9001"
|
||||
# RustFS 服务端点地址,用于客户端访问
|
||||
RUSTFS_SERVER_ENDPOINT="http://127.0.0.1:9000"
|
||||
# RustFS 服务域名配置
|
||||
RUSTFS_SERVER_DOMAINS=127.0.0.1:9001
|
||||
RUSTFS_SERVER_DOMAINS=127.0.0.1:9000
|
||||
# RustFS 许可证内容
|
||||
RUSTFS_LICENSE="license content"
|
||||
# 可观测性配置Endpoint:http://localhost:4317
|
||||
RUSTFS_OBS_ENDPOINT=http://localhost:4317
|
||||
# TLS 证书目录路径:deploy/certs
|
||||
RUSTFS_TLS_PATH=/etc/default/tls
|
||||
# 事件通知配置文件路径:deploy/config/event.example.toml
|
||||
RUSTFS_EVENT_CONFIG=/etc/default/event.toml
|
||||
RUSTFS_TLS_PATH=/etc/default/tls
|
||||
@@ -7,22 +7,16 @@ RUSTFS_ROOT_PASSWORD=rustfsadmin
|
||||
# RustFS data volume storage paths, supports multiple volumes from vol1 to vol4
|
||||
RUSTFS_VOLUMES="./deploy/deploy/vol{1...4}"
|
||||
# RustFS service startup parameters, specifying listen address and port
|
||||
RUSTFS_OPTS="--address 0.0.0.0:9000"
|
||||
RUSTFS_OPTS="--address :9000"
|
||||
# RustFS service listen address and port
|
||||
RUSTFS_ADDRESS="0.0.0.0:9000"
|
||||
RUSTFS_ADDRESS=":9000"
|
||||
# Enable RustFS console functionality
|
||||
RUSTFS_CONSOLE_ENABLE=true
|
||||
# RustFS console listen address and port
|
||||
RUSTFS_CONSOLE_ADDRESS="0.0.0.0:9001"
|
||||
# RustFS service endpoint for client access
|
||||
RUSTFS_SERVER_ENDPOINT="http://127.0.0.1:9000"
|
||||
# RustFS service domain configuration
|
||||
RUSTFS_SERVER_DOMAINS=127.0.0.1:9001
|
||||
RUSTFS_SERVER_DOMAINS=127.0.0.1:9000
|
||||
# RustFS license content
|
||||
RUSTFS_LICENSE="license content"
|
||||
# Observability configuration endpoint: RUSTFS_OBS_ENDPOINT
|
||||
RUSTFS_OBS_ENDPOINT=http://localhost:4317
|
||||
# TLS certificates directory path: deploy/certs
|
||||
RUSTFS_TLS_PATH=/etc/default/tls
|
||||
# event notification configuration file path: deploy/config/event.example.toml
|
||||
RUSTFS_EVENT_CONFIG=/etc/default/event.toml
|
||||
RUSTFS_TLS_PATH=/etc/default/tls
|
||||
@@ -12,7 +12,7 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
version: '3.8'
|
||||
version: "3.8"
|
||||
|
||||
services:
|
||||
# RustFS main service
|
||||
@@ -23,17 +23,15 @@ services:
|
||||
container_name: rustfs-server
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.multi-stage
|
||||
dockerfile: .docker/ubuntu/Dockerfile.source
|
||||
args:
|
||||
TARGETPLATFORM: linux/amd64
|
||||
ports:
|
||||
- "9000:9000" # S3 API port
|
||||
- "9001:9001" # Console port
|
||||
- "9000:9000" # S3 API port
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=/data/rustfs0,/data/rustfs1,/data/rustfs2,/data/rustfs3
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9001
|
||||
- RUSTFS_ACCESS_KEY=rustfsadmin
|
||||
- RUSTFS_SECRET_KEY=rustfsadmin
|
||||
- RUSTFS_LOG_LEVEL=info
|
||||
@@ -48,7 +46,15 @@ services:
|
||||
- rustfs-network
|
||||
restart: unless-stopped
|
||||
healthcheck:
|
||||
test: [ "CMD", "wget", "--no-verbose", "--tries=1", "--spider", "http://localhost:9000/health" ]
|
||||
test:
|
||||
[
|
||||
"CMD",
|
||||
"wget",
|
||||
"--no-verbose",
|
||||
"--tries=1",
|
||||
"--spider",
|
||||
"http://localhost:9000/health",
|
||||
]
|
||||
interval: 30s
|
||||
timeout: 10s
|
||||
retries: 3
|
||||
@@ -62,20 +68,19 @@ services:
|
||||
container_name: rustfs-dev
|
||||
build:
|
||||
context: .
|
||||
dockerfile: .docker/Dockerfile.devenv
|
||||
dockerfile: .docker/ubuntu/Dockerfile.dev
|
||||
# Pure development environment
|
||||
ports:
|
||||
- "9010:9000"
|
||||
- "9011:9001"
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=/data/rustfs0,/data/rustfs1
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9001
|
||||
- RUSTFS_ACCESS_KEY=devadmin
|
||||
- RUSTFS_SECRET_KEY=devadmin
|
||||
- RUSTFS_LOG_LEVEL=debug
|
||||
volumes:
|
||||
- .:/root/s3-rustfs
|
||||
- .:/app # Mount source code to /app for development
|
||||
- rustfs_dev_data:/data
|
||||
networks:
|
||||
- rustfs-network
|
||||
@@ -92,10 +97,10 @@ services:
|
||||
volumes:
|
||||
- ./.docker/observability/otel-collector.yml:/etc/otelcol-contrib/otel-collector.yml:ro
|
||||
ports:
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
- "8888:8888" # Prometheus metrics
|
||||
- "8889:8889" # Prometheus exporter metrics
|
||||
- "4317:4317" # OTLP gRPC receiver
|
||||
- "4318:4318" # OTLP HTTP receiver
|
||||
- "8888:8888" # Prometheus metrics
|
||||
- "8889:8889" # Prometheus exporter metrics
|
||||
networks:
|
||||
- rustfs-network
|
||||
restart: unless-stopped
|
||||
@@ -107,8 +112,8 @@ services:
|
||||
image: jaegertracing/all-in-one:latest
|
||||
container_name: jaeger
|
||||
ports:
|
||||
- "16686:16686" # Jaeger UI
|
||||
- "14250:14250" # Jaeger gRPC
|
||||
- "16686:16686" # Jaeger UI
|
||||
- "14250:14250" # Jaeger gRPC
|
||||
environment:
|
||||
- COLLECTOR_OTLP_ENABLED=true
|
||||
networks:
|
||||
@@ -127,12 +132,12 @@ services:
|
||||
- ./.docker/observability/prometheus.yml:/etc/prometheus/prometheus.yml:ro
|
||||
- prometheus_data:/prometheus
|
||||
command:
|
||||
- '--config.file=/etc/prometheus/prometheus.yml'
|
||||
- '--storage.tsdb.path=/prometheus'
|
||||
- '--web.console.libraries=/etc/prometheus/console_libraries'
|
||||
- '--web.console.templates=/etc/prometheus/consoles'
|
||||
- '--storage.tsdb.retention.time=200h'
|
||||
- '--web.enable-lifecycle'
|
||||
- "--config.file=/etc/prometheus/prometheus.yml"
|
||||
- "--storage.tsdb.path=/prometheus"
|
||||
- "--web.console.libraries=/etc/prometheus/console_libraries"
|
||||
- "--web.console.templates=/etc/prometheus/consoles"
|
||||
- "--storage.tsdb.retention.time=200h"
|
||||
- "--web.enable-lifecycle"
|
||||
networks:
|
||||
- rustfs-network
|
||||
restart: unless-stopped
|
||||
|
||||
@@ -1,530 +0,0 @@
|
||||
# RustFS Docker Build and Deployment Guide
|
||||
|
||||
This document describes how to build and deploy RustFS using Docker, including the automated GitHub Actions workflow for building and pushing images to Docker Hub and GitHub Container Registry.
|
||||
|
||||
## 🚀 Quick Start
|
||||
|
||||
### Using Pre-built Images
|
||||
|
||||
```bash
|
||||
# Pull and run the latest RustFS image
|
||||
docker run -d \
|
||||
--name rustfs \
|
||||
-p 9000:9000 \
|
||||
-p 9001:9001 \
|
||||
-v rustfs_data:/data \
|
||||
-e RUSTFS_VOLUMES=/data/rustfs0,/data/rustfs1,/data/rustfs2,/data/rustfs3 \
|
||||
-e RUSTFS_ACCESS_KEY=rustfsadmin \
|
||||
-e RUSTFS_SECRET_KEY=rustfsadmin \
|
||||
-e RUSTFS_CONSOLE_ENABLE=true \
|
||||
rustfs/rustfs:latest
|
||||
```
|
||||
|
||||
### Using Docker Compose
|
||||
|
||||
```bash
|
||||
# Basic deployment
|
||||
docker-compose up -d
|
||||
|
||||
# Development environment
|
||||
docker-compose --profile dev up -d
|
||||
|
||||
# With observability stack
|
||||
docker-compose --profile observability up -d
|
||||
|
||||
# Full stack with all services
|
||||
docker-compose --profile dev --profile observability --profile testing up -d
|
||||
```
|
||||
|
||||
## 📦 Available Images
|
||||
|
||||
Our GitHub Actions workflow builds multiple image variants:
|
||||
|
||||
### Image Registries
|
||||
|
||||
- **Docker Hub**: `rustfs/rustfs`
|
||||
- **GitHub Container Registry**: `ghcr.io/rustfs/s3-rustfs`
|
||||
|
||||
### Image Variants
|
||||
|
||||
| Variant | Tag Suffix | Description | Use Case |
|
||||
|---------|------------|-------------|----------|
|
||||
| Production | *(none)* | Minimal Ubuntu-based runtime | Production deployment |
|
||||
| Ubuntu | `-ubuntu22.04` | Ubuntu 22.04 based build environment | Development/Testing |
|
||||
| Rocky Linux | `-rockylinux9.3` | Rocky Linux 9.3 based build environment | Enterprise environments |
|
||||
| Development | `-devenv` | Full development environment | Development/Debugging |
|
||||
|
||||
### Supported Architectures
|
||||
|
||||
All images support multi-architecture:
|
||||
- `linux/amd64` (x86_64-unknown-linux-musl)
|
||||
- `linux/arm64` (aarch64-unknown-linux-gnu)
|
||||
|
||||
### Tag Examples
|
||||
|
||||
```bash
|
||||
# Latest production image
|
||||
rustfs/rustfs:latest
|
||||
rustfs/rustfs:main
|
||||
|
||||
# Specific version
|
||||
rustfs/rustfs:v1.0.0
|
||||
rustfs/rustfs:v1.0.0-ubuntu22.04
|
||||
|
||||
# Development environment
|
||||
rustfs/rustfs:latest-devenv
|
||||
rustfs/rustfs:main-devenv
|
||||
```
|
||||
|
||||
## 🔧 GitHub Actions Workflow
|
||||
|
||||
The Docker build workflow (`.github/workflows/docker.yml`) automatically:
|
||||
|
||||
1. **Builds cross-platform binaries** for `amd64` and `arm64`
|
||||
2. **Creates Docker images** for all variants
|
||||
3. **Pushes to registries** (Docker Hub and GitHub Container Registry)
|
||||
4. **Creates multi-arch manifests** for seamless platform selection
|
||||
5. **Performs security scanning** using Trivy
|
||||
|
||||
### Cross-Compilation Strategy
|
||||
|
||||
To handle complex native dependencies, we use different compilation strategies:
|
||||
|
||||
- **x86_64**: Native compilation with `x86_64-unknown-linux-musl` for static linking
|
||||
- **aarch64**: Cross-compilation with `aarch64-unknown-linux-gnu` using the `cross` tool
|
||||
|
||||
This approach ensures compatibility with various C libraries while maintaining performance.
|
||||
|
||||
### Workflow Triggers
|
||||
|
||||
- **Push to main branch**: Builds and pushes `main` and `latest` tags
|
||||
- **Tag push** (`v*`): Builds and pushes version tags
|
||||
- **Pull requests**: Builds images without pushing
|
||||
- **Manual trigger**: Workflow dispatch with options
|
||||
|
||||
### Required Secrets
|
||||
|
||||
Configure these secrets in your GitHub repository:
|
||||
|
||||
```bash
|
||||
# Docker Hub credentials
|
||||
DOCKERHUB_USERNAME=your-dockerhub-username
|
||||
DOCKERHUB_TOKEN=your-dockerhub-access-token
|
||||
|
||||
# GitHub token is automatically available
|
||||
GITHUB_TOKEN=automatically-provided
|
||||
```
|
||||
|
||||
## 🏗️ Building Locally
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Docker with BuildKit enabled
|
||||
- Rust toolchain (1.85+)
|
||||
- Protocol Buffers compiler (protoc 31.1+)
|
||||
- FlatBuffers compiler (flatc 25.2.10+)
|
||||
- `cross` tool for ARM64 compilation
|
||||
|
||||
### Installation Commands
|
||||
|
||||
```bash
|
||||
# Install Rust targets
|
||||
rustup target add x86_64-unknown-linux-musl
|
||||
rustup target add aarch64-unknown-linux-gnu
|
||||
|
||||
# Install cross for ARM64 compilation
|
||||
cargo install cross --git https://github.com/cross-rs/cross
|
||||
|
||||
# Install protoc (macOS)
|
||||
brew install protobuf
|
||||
|
||||
# Install protoc (Ubuntu)
|
||||
sudo apt-get install protobuf-compiler
|
||||
|
||||
# Install flatc
|
||||
# Download from: https://github.com/google/flatbuffers/releases
|
||||
```
|
||||
|
||||
### Build Commands
|
||||
|
||||
```bash
|
||||
# Test cross-compilation setup
|
||||
./scripts/test-cross-build.sh
|
||||
|
||||
# Build production image for local platform
|
||||
docker build -t rustfs:local .
|
||||
|
||||
# Build multi-stage production image
|
||||
docker build -f Dockerfile.multi-stage -t rustfs:multi-stage .
|
||||
|
||||
# Build specific variant
|
||||
docker build -f .docker/Dockerfile.ubuntu22.04 -t rustfs:ubuntu .
|
||||
|
||||
# Build for specific platform
|
||||
docker build --platform linux/amd64 -t rustfs:amd64 .
|
||||
docker build --platform linux/arm64 -t rustfs:arm64 .
|
||||
|
||||
# Build multi-platform image
|
||||
docker buildx build --platform linux/amd64,linux/arm64 -t rustfs:multi .
|
||||
```
|
||||
|
||||
### Cross-Compilation
|
||||
|
||||
```bash
|
||||
# Generate protobuf code first
|
||||
cargo run --bin gproto
|
||||
|
||||
# Native x86_64 build
|
||||
cargo build --release --target x86_64-unknown-linux-musl --bin rustfs
|
||||
|
||||
# Cross-compile for ARM64
|
||||
cross build --release --target aarch64-unknown-linux-gnu --bin rustfs
|
||||
```
|
||||
|
||||
### Build with Docker Compose
|
||||
|
||||
```bash
|
||||
# Build all services
|
||||
docker-compose build
|
||||
|
||||
# Build specific service
|
||||
docker-compose build rustfs
|
||||
|
||||
# Build development environment
|
||||
docker-compose build rustfs-dev
|
||||
```
|
||||
|
||||
## 🚀 Deployment Options
|
||||
|
||||
### 1. Single Container
|
||||
|
||||
```bash
|
||||
docker run -d \
|
||||
--name rustfs \
|
||||
--restart unless-stopped \
|
||||
-p 9000:9000 \
|
||||
-p 9001:9001 \
|
||||
-v /data/rustfs:/data \
|
||||
-e RUSTFS_VOLUMES=/data/rustfs0,/data/rustfs1,/data/rustfs2,/data/rustfs3 \
|
||||
-e RUSTFS_ADDRESS=0.0.0.0:9000 \
|
||||
-e RUSTFS_CONSOLE_ENABLE=true \
|
||||
-e RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9001 \
|
||||
-e RUSTFS_ACCESS_KEY=rustfsadmin \
|
||||
-e RUSTFS_SECRET_KEY=rustfsadmin \
|
||||
rustfs/rustfs:latest
|
||||
```
|
||||
|
||||
### 2. Docker Compose Profiles
|
||||
|
||||
```bash
|
||||
# Production deployment
|
||||
docker-compose up -d
|
||||
|
||||
# Development with debugging
|
||||
docker-compose --profile dev up -d
|
||||
|
||||
# With monitoring stack
|
||||
docker-compose --profile observability up -d
|
||||
|
||||
# Complete testing environment
|
||||
docker-compose --profile dev --profile observability --profile testing up -d
|
||||
```
|
||||
|
||||
### 3. Kubernetes Deployment
|
||||
|
||||
```yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: rustfs
|
||||
spec:
|
||||
replicas: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: rustfs
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app: rustfs
|
||||
spec:
|
||||
containers:
|
||||
- name: rustfs
|
||||
image: rustfs/rustfs:latest
|
||||
ports:
|
||||
- containerPort: 9000
|
||||
- containerPort: 9001
|
||||
env:
|
||||
- name: RUSTFS_VOLUMES
|
||||
value: "/data/rustfs0,/data/rustfs1,/data/rustfs2,/data/rustfs3"
|
||||
- name: RUSTFS_ADDRESS
|
||||
value: "0.0.0.0:9000"
|
||||
- name: RUSTFS_CONSOLE_ENABLE
|
||||
value: "true"
|
||||
- name: RUSTFS_CONSOLE_ADDRESS
|
||||
value: "0.0.0.0:9001"
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /data
|
||||
volumes:
|
||||
- name: data
|
||||
persistentVolumeClaim:
|
||||
claimName: rustfs-data
|
||||
```
|
||||
|
||||
## ⚙️ Configuration
|
||||
|
||||
### Environment Variables
|
||||
|
||||
| Variable | Description | Default |
|
||||
|----------|-------------|---------|
|
||||
| `RUSTFS_VOLUMES` | Comma-separated list of data volumes | Required |
|
||||
| `RUSTFS_ADDRESS` | Server bind address | `0.0.0.0:9000` |
|
||||
| `RUSTFS_CONSOLE_ENABLE` | Enable web console | `false` |
|
||||
| `RUSTFS_CONSOLE_ADDRESS` | Console bind address | `0.0.0.0:9001` |
|
||||
| `RUSTFS_ACCESS_KEY` | S3 access key | `rustfsadmin` |
|
||||
| `RUSTFS_SECRET_KEY` | S3 secret key | `rustfsadmin` |
|
||||
| `RUSTFS_LOG_LEVEL` | Log level | `info` |
|
||||
| `RUSTFS_OBS_ENDPOINT` | Observability endpoint | `""` |
|
||||
| `RUSTFS_TLS_PATH` | TLS certificates path | `""` |
|
||||
|
||||
### Volume Mounts
|
||||
|
||||
- **Data volumes**: `/data/rustfs{0,1,2,3}` - RustFS data storage
|
||||
- **Logs**: `/app/logs` - Application logs
|
||||
- **Config**: `/etc/rustfs/` - Configuration files
|
||||
- **TLS**: `/etc/ssl/rustfs/` - TLS certificates
|
||||
|
||||
### Ports
|
||||
|
||||
- **9000**: S3 API endpoint
|
||||
- **9001**: Web console (if enabled)
|
||||
- **9002**: Admin API (if enabled)
|
||||
- **50051**: gRPC API (if enabled)
|
||||
|
||||
## 🔍 Monitoring and Observability
|
||||
|
||||
### Health Checks
|
||||
|
||||
The Docker images include built-in health checks:
|
||||
|
||||
```bash
|
||||
# Check container health
|
||||
docker ps --filter "name=rustfs" --format "table {{.Names}}\t{{.Status}}"
|
||||
|
||||
# View health check logs
|
||||
docker inspect rustfs --format='{{json .State.Health}}'
|
||||
```
|
||||
|
||||
### Metrics and Tracing
|
||||
|
||||
When using the observability profile:
|
||||
|
||||
- **Prometheus**: http://localhost:9090
|
||||
- **Grafana**: http://localhost:3000 (admin/admin)
|
||||
- **Jaeger**: http://localhost:16686
|
||||
- **OpenTelemetry Collector**: http://localhost:8888/metrics
|
||||
|
||||
### Log Collection
|
||||
|
||||
```bash
|
||||
# View container logs
|
||||
docker logs rustfs -f
|
||||
|
||||
# Export logs
|
||||
docker logs rustfs > rustfs.log 2>&1
|
||||
```
|
||||
|
||||
## 🛠️ Development
|
||||
|
||||
### Development Environment
|
||||
|
||||
```bash
|
||||
# Start development container
|
||||
docker-compose --profile dev up -d rustfs-dev
|
||||
|
||||
# Access development container
|
||||
docker exec -it rustfs-dev bash
|
||||
|
||||
# Mount source code for live development
|
||||
docker run -it --rm \
|
||||
-v $(pwd):/root/s3-rustfs \
|
||||
-p 9000:9000 \
|
||||
rustfs/rustfs:devenv \
|
||||
bash
|
||||
```
|
||||
|
||||
### Building from Source in Container
|
||||
|
||||
```bash
|
||||
# Use development image for building
|
||||
docker run --rm \
|
||||
-v $(pwd):/root/s3-rustfs \
|
||||
-w /root/s3-rustfs \
|
||||
rustfs/rustfs:ubuntu22.04 \
|
||||
cargo build --release --bin rustfs
|
||||
```
|
||||
|
||||
### Testing Cross-Compilation
|
||||
|
||||
```bash
|
||||
# Run the test script to verify cross-compilation setup
|
||||
./scripts/test-cross-build.sh
|
||||
|
||||
# This will test:
|
||||
# - x86_64-unknown-linux-musl compilation
|
||||
# - aarch64-unknown-linux-gnu cross-compilation
|
||||
# - Docker builds for both architectures
|
||||
```
|
||||
|
||||
## 🔐 Security
|
||||
|
||||
### Security Scanning
|
||||
|
||||
The workflow includes Trivy security scanning:
|
||||
|
||||
```bash
|
||||
# Run security scan locally
|
||||
docker run --rm -v /var/run/docker.sock:/var/run/docker.sock \
|
||||
-v $HOME/Library/Caches:/root/.cache/ \
|
||||
aquasec/trivy:latest image rustfs/rustfs:latest
|
||||
```
|
||||
|
||||
### Security Best Practices
|
||||
|
||||
1. **Use non-root user**: Images run as `rustfs` user (UID 1000)
|
||||
2. **Minimal base images**: Ubuntu minimal for production
|
||||
3. **Security updates**: Regular base image updates
|
||||
4. **Secret management**: Use Docker secrets or environment files
|
||||
5. **Network security**: Use Docker networks and proper firewall rules
|
||||
|
||||
## 📝 Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
#### 1. Cross-Compilation Failures
|
||||
|
||||
**Problem**: ARM64 build fails with linking errors
|
||||
```bash
|
||||
error: linking with `aarch64-linux-gnu-gcc` failed
|
||||
```
|
||||
|
||||
**Solution**: Use the `cross` tool instead of native cross-compilation:
|
||||
```bash
|
||||
# Install cross tool
|
||||
cargo install cross --git https://github.com/cross-rs/cross
|
||||
|
||||
# Use cross for ARM64 builds
|
||||
cross build --release --target aarch64-unknown-linux-gnu --bin rustfs
|
||||
```
|
||||
|
||||
#### 2. Protobuf Generation Issues
|
||||
|
||||
**Problem**: Missing protobuf definitions
|
||||
```bash
|
||||
error: failed to run custom build command for `protos`
|
||||
```
|
||||
|
||||
**Solution**: Generate protobuf code first:
|
||||
```bash
|
||||
cargo run --bin gproto
|
||||
```
|
||||
|
||||
#### 3. Docker Build Failures
|
||||
|
||||
**Problem**: Binary not found in Docker build
|
||||
```bash
|
||||
COPY failed: file not found in build context
|
||||
```
|
||||
|
||||
**Solution**: Ensure binaries are built before Docker build:
|
||||
```bash
|
||||
# Build binaries first
|
||||
cargo build --release --target x86_64-unknown-linux-musl --bin rustfs
|
||||
cross build --release --target aarch64-unknown-linux-gnu --bin rustfs
|
||||
|
||||
# Then build Docker image
|
||||
docker build .
|
||||
```
|
||||
|
||||
### Debug Commands
|
||||
|
||||
```bash
|
||||
# Check container status
|
||||
docker ps -a
|
||||
|
||||
# View container logs
|
||||
docker logs rustfs --tail 100
|
||||
|
||||
# Access container shell
|
||||
docker exec -it rustfs bash
|
||||
|
||||
# Check resource usage
|
||||
docker stats rustfs
|
||||
|
||||
# Inspect container configuration
|
||||
docker inspect rustfs
|
||||
|
||||
# Test cross-compilation setup
|
||||
./scripts/test-cross-build.sh
|
||||
```
|
||||
|
||||
## 🔄 CI/CD Integration
|
||||
|
||||
### GitHub Actions
|
||||
|
||||
The provided workflow can be customized:
|
||||
|
||||
```yaml
|
||||
# Override image names
|
||||
env:
|
||||
REGISTRY_IMAGE_DOCKERHUB: myorg/rustfs
|
||||
REGISTRY_IMAGE_GHCR: ghcr.io/myorg/rustfs
|
||||
```
|
||||
|
||||
### GitLab CI
|
||||
|
||||
```yaml
|
||||
build:
|
||||
stage: build
|
||||
image: docker:latest
|
||||
services:
|
||||
- docker:dind
|
||||
script:
|
||||
- docker build -t $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA .
|
||||
- docker push $CI_REGISTRY_IMAGE:$CI_COMMIT_SHA
|
||||
```
|
||||
|
||||
### Jenkins Pipeline
|
||||
|
||||
```groovy
|
||||
pipeline {
|
||||
agent any
|
||||
stages {
|
||||
stage('Build') {
|
||||
steps {
|
||||
script {
|
||||
docker.build("rustfs:${env.BUILD_ID}")
|
||||
}
|
||||
}
|
||||
}
|
||||
stage('Push') {
|
||||
steps {
|
||||
script {
|
||||
docker.withRegistry('https://registry.hub.docker.com', 'dockerhub-credentials') {
|
||||
docker.image("rustfs:${env.BUILD_ID}").push()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## 📚 Additional Resources
|
||||
|
||||
- [Docker Official Documentation](https://docs.docker.com/)
|
||||
- [Docker Compose Reference](https://docs.docker.com/compose/)
|
||||
- [GitHub Actions Documentation](https://docs.github.com/en/actions)
|
||||
- [Cross-compilation with Rust](https://rust-lang.github.io/rustup/cross-compilation.html)
|
||||
- [Cross tool documentation](https://github.com/cross-rs/cross)
|
||||
- [RustFS Configuration Guide](../README.md)
|
||||
@@ -1,57 +0,0 @@
|
||||
## Summary
|
||||
|
||||
This PR modifies the GitHub Actions workflows to ensure that **version releases never get skipped** during CI/CD execution, addressing the issue where duplicate action detection could skip important release processes.
|
||||
|
||||
## Changes Made
|
||||
|
||||
### 🔧 Core Modifications
|
||||
|
||||
1. **Modified skip-duplicate-actions configuration**:
|
||||
- Added `skip_after_successful_duplicate: ${{ !startsWith(github.ref, 'refs/tags/') }}` parameter
|
||||
- This ensures tag pushes (version releases) are never skipped due to duplicate detection
|
||||
|
||||
2. **Updated workflow job conditions**:
|
||||
- **CI Workflow** (`ci.yml`): Modified `test-and-lint` and `e2e-tests` jobs
|
||||
- **Build Workflow** (`build.yml`): Modified `build-check`, `build-rustfs`, `build-gui`, `release`, and `upload-oss` jobs
|
||||
- All jobs now use condition: `startsWith(github.ref, 'refs/tags/') || needs.skip-check.outputs.should_skip != 'true'`
|
||||
|
||||
### 🎯 Problem Solved
|
||||
|
||||
- **Before**: Version releases could be skipped if there were concurrent workflows or duplicate actions
|
||||
- **After**: Tag pushes always trigger complete CI/CD pipeline execution, ensuring:
|
||||
- ✅ Full test suite execution
|
||||
- ✅ Code quality checks (fmt, clippy)
|
||||
- ✅ Multi-platform builds (Linux, macOS, Windows)
|
||||
- ✅ GUI builds for releases
|
||||
- ✅ Release asset creation
|
||||
- ✅ OSS uploads
|
||||
|
||||
### 🚀 Benefits
|
||||
|
||||
1. **Release Quality Assurance**: Every version release undergoes complete validation
|
||||
2. **Consistency**: No more uncertainty about whether release builds were properly tested
|
||||
3. **Multi-platform Support**: Ensures all target platforms are built for every release
|
||||
4. **Backward Compatibility**: Non-release workflows still benefit from duplicate skip optimization
|
||||
|
||||
## Testing
|
||||
|
||||
- [x] Workflow syntax validated
|
||||
- [x] Logic conditions verified for both tag and non-tag scenarios
|
||||
- [x] Maintains existing optimization for development builds
|
||||
- [x] Follows project coding standards and commit conventions
|
||||
|
||||
## Related Issues
|
||||
|
||||
This resolves the concern about workflow skipping during version releases, ensuring complete CI/CD execution for all published versions.
|
||||
|
||||
## Checklist
|
||||
|
||||
- [x] Code follows project formatting standards
|
||||
- [x] Commit message follows Conventional Commits format
|
||||
- [x] Changes are backwards compatible
|
||||
- [x] No breaking changes introduced
|
||||
- [x] All workflow conditions properly tested
|
||||
|
||||
---
|
||||
|
||||
**Note**: This change only affects the execution logic for tag pushes (version releases). Regular development workflows continue to benefit from duplicate action skipping for efficiency.
|
||||
@@ -67,7 +67,6 @@ hyper.workspace = true
|
||||
hyper-util.workspace = true
|
||||
http.workspace = true
|
||||
http-body.workspace = true
|
||||
lazy_static.workspace = true
|
||||
matchit = { workspace = true }
|
||||
mime_guess = { workspace = true }
|
||||
opentelemetry = { workspace = true }
|
||||
|
||||
@@ -596,6 +596,7 @@ impl Operation for ImportBucketMetadata {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,6 +19,7 @@ use matchit::Params;
|
||||
use rustfs_config::notify::{NOTIFY_MQTT_SUB_SYS, NOTIFY_WEBHOOK_SUB_SYS};
|
||||
use rustfs_notify::EventName;
|
||||
use rustfs_notify::rules::{BucketNotificationConfig, PatternRules};
|
||||
use s3s::header::CONTENT_LENGTH;
|
||||
use s3s::{Body, S3Error, S3ErrorCode, S3Request, S3Response, S3Result, header::CONTENT_TYPE, s3_error};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_urlencoded::from_bytes;
|
||||
@@ -103,6 +104,7 @@ impl Operation for SetNotificationTarget {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
@@ -181,6 +183,7 @@ impl Operation for RemoveNotificationTarget {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
@@ -226,6 +229,7 @@ impl Operation for SetBucketNotification {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
@@ -289,6 +293,7 @@ impl Operation for RemoveBucketNotification {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,7 +17,11 @@ use matchit::Params;
|
||||
use rustfs_ecstore::global::get_global_action_cred;
|
||||
use rustfs_iam::error::{is_err_no_such_group, is_err_no_such_user};
|
||||
use rustfs_madmin::GroupAddRemove;
|
||||
use s3s::{Body, S3Error, S3ErrorCode, S3Request, S3Response, S3Result, header::CONTENT_TYPE, s3_error};
|
||||
use s3s::{
|
||||
Body, S3Error, S3ErrorCode, S3Request, S3Response, S3Result,
|
||||
header::{CONTENT_LENGTH, CONTENT_TYPE},
|
||||
s3_error,
|
||||
};
|
||||
use serde::Deserialize;
|
||||
use serde_urlencoded::from_bytes;
|
||||
use tracing::warn;
|
||||
@@ -129,7 +133,7 @@ impl Operation for SetGroupStatus {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
@@ -214,7 +218,7 @@ impl Operation for UpdateGroupMembers {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -19,7 +19,11 @@ use rustfs_ecstore::global::get_global_action_cred;
|
||||
use rustfs_iam::error::is_err_no_such_user;
|
||||
use rustfs_iam::store::MappedPolicy;
|
||||
use rustfs_policy::policy::Policy;
|
||||
use s3s::{Body, S3Error, S3ErrorCode, S3Request, S3Response, S3Result, header::CONTENT_TYPE, s3_error};
|
||||
use s3s::{
|
||||
Body, S3Error, S3ErrorCode, S3Request, S3Response, S3Result,
|
||||
header::{CONTENT_LENGTH, CONTENT_TYPE},
|
||||
s3_error,
|
||||
};
|
||||
use serde::Deserialize;
|
||||
use serde_urlencoded::from_bytes;
|
||||
use std::collections::HashMap;
|
||||
@@ -123,7 +127,7 @@ impl Operation for AddCannedPolicy {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
@@ -198,7 +202,7 @@ impl Operation for RemoveCannedPolicy {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
@@ -284,7 +288,7 @@ impl Operation for SetPolicyForUserOrGroup {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -22,7 +22,11 @@ use rustfs_ecstore::{
|
||||
rebalance::{DiskStat, RebalSaveOpt},
|
||||
store_api::BucketOptions,
|
||||
};
|
||||
use s3s::{Body, S3Request, S3Response, S3Result, header::CONTENT_TYPE, s3_error};
|
||||
use s3s::{
|
||||
Body, S3Request, S3Response, S3Result,
|
||||
header::{CONTENT_LENGTH, CONTENT_TYPE},
|
||||
s3_error,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::time::Duration;
|
||||
use time::OffsetDateTime;
|
||||
@@ -265,7 +269,10 @@ impl Operation for RebalanceStop {
|
||||
warn!("handle RebalanceStop notification_sys load_rebalance_meta done");
|
||||
}
|
||||
|
||||
Ok(S3Response::new((StatusCode::OK, Body::empty())))
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -28,6 +28,7 @@ use rustfs_madmin::{
|
||||
use rustfs_policy::policy::action::{Action, AdminAction};
|
||||
use rustfs_policy::policy::{Args, Policy};
|
||||
use s3s::S3ErrorCode::InvalidRequest;
|
||||
use s3s::header::CONTENT_LENGTH;
|
||||
use s3s::{Body, S3Error, S3ErrorCode, S3Request, S3Response, S3Result, header::CONTENT_TYPE, s3_error};
|
||||
use serde::Deserialize;
|
||||
use serde_urlencoded::from_bytes;
|
||||
@@ -306,7 +307,7 @@ impl Operation for UpdateServiceAccount {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
@@ -607,7 +608,7 @@ impl Operation for DeleteServiceAccount {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -16,7 +16,11 @@
|
||||
use http::{HeaderMap, StatusCode};
|
||||
//use iam::get_global_action_cred;
|
||||
use matchit::Params;
|
||||
use s3s::{Body, S3Error, S3ErrorCode, S3Request, S3Response, S3Result, header::CONTENT_TYPE, s3_error};
|
||||
use s3s::{
|
||||
Body, S3Error, S3ErrorCode, S3Request, S3Response, S3Result,
|
||||
header::{CONTENT_LENGTH, CONTENT_TYPE},
|
||||
s3_error,
|
||||
};
|
||||
use serde_urlencoded::from_bytes;
|
||||
use time::OffsetDateTime;
|
||||
use tracing::{debug, warn};
|
||||
@@ -169,7 +173,7 @@ impl Operation for AddTier {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
@@ -236,7 +240,7 @@ impl Operation for EditTier {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
@@ -332,7 +336,7 @@ impl Operation for RemoveTier {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
@@ -366,7 +370,7 @@ impl Operation for VerifyTier {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
@@ -457,7 +461,7 @@ impl Operation for ClearTier {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
@@ -636,7 +640,7 @@ impl Operation for PostRestoreObject {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}*/
|
||||
|
||||
@@ -146,7 +146,7 @@ impl Operation for AddUser {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
@@ -193,7 +193,7 @@ impl Operation for SetUserStatus {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
@@ -310,7 +310,7 @@ impl Operation for RemoveUser {
|
||||
|
||||
let mut header = HeaderMap::new();
|
||||
header.insert(CONTENT_TYPE, "application/json".parse().unwrap());
|
||||
|
||||
header.insert(CONTENT_LENGTH, "0".parse().unwrap());
|
||||
Ok(S3Response::with_headers((StatusCode::OK, Body::empty()), header))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -68,15 +68,7 @@ pub struct Opt {
|
||||
#[arg(long, default_value_t = true, env = "RUSTFS_CONSOLE_ENABLE")]
|
||||
pub console_enable: bool,
|
||||
|
||||
/// Console server bind address
|
||||
#[arg(long, default_value_t = rustfs_config::DEFAULT_CONSOLE_ADDRESS.to_string(), env = "RUSTFS_CONSOLE_ADDRESS")]
|
||||
pub console_address: String,
|
||||
|
||||
/// rustfs endpoint for console
|
||||
#[arg(long, env = "RUSTFS_CONSOLE_FS_ENDPOINT")]
|
||||
pub console_fs_endpoint: Option<String>,
|
||||
|
||||
/// Observability configuration file
|
||||
/// Observability endpoint for trace, metrics and logs,only support grpc mode.
|
||||
#[arg(long, default_value_t = rustfs_config::DEFAULT_OBS_ENDPOINT.to_string(), env = "RUSTFS_OBS_ENDPOINT")]
|
||||
pub obs_endpoint: String,
|
||||
|
||||
|
||||
@@ -20,9 +20,7 @@ use std::time::UNIX_EPOCH;
|
||||
use tracing::error;
|
||||
use tracing::info;
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
static ref LICENSE: OnceLock<Token> = OnceLock::new();
|
||||
}
|
||||
static LICENSE: OnceLock<Token> = OnceLock::new();
|
||||
|
||||
/// Initialize the license
|
||||
pub fn init_license(license: Option<String>) {
|
||||
|
||||
@@ -1,13 +0,0 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
@@ -18,14 +18,11 @@ mod config;
|
||||
mod error;
|
||||
// mod grpc;
|
||||
pub mod license;
|
||||
mod logging;
|
||||
mod server;
|
||||
mod storage;
|
||||
mod update;
|
||||
mod version;
|
||||
|
||||
use rustfs_config::DEFAULT_DELIMITER;
|
||||
use rustfs_ecstore::config::GLOBAL_ServerConfig;
|
||||
// Ensure the correct path for parse_license is imported
|
||||
use crate::server::{SHUTDOWN_TIMEOUT, ServiceState, ServiceStateManager, ShutdownSignal, start_http_server, wait_for_shutdown};
|
||||
use chrono::Datelike;
|
||||
@@ -34,10 +31,12 @@ use license::init_license;
|
||||
use rustfs_ahm::scanner::data_scanner::ScannerConfig;
|
||||
use rustfs_ahm::{Scanner, create_ahm_services_cancel_token, shutdown_ahm_services};
|
||||
use rustfs_common::globals::set_global_addr;
|
||||
use rustfs_config::DEFAULT_DELIMITER;
|
||||
use rustfs_ecstore::bucket::metadata_sys::init_bucket_metadata_sys;
|
||||
use rustfs_ecstore::cmd::bucket_replication::init_bucket_replication_pool;
|
||||
use rustfs_ecstore::config as ecconfig;
|
||||
use rustfs_ecstore::config::GLOBAL_ConfigSys;
|
||||
use rustfs_ecstore::config::GLOBAL_ServerConfig;
|
||||
use rustfs_ecstore::store_api::BucketOptions;
|
||||
use rustfs_ecstore::{
|
||||
StorageAPI,
|
||||
|
||||
@@ -1,9 +1,24 @@
|
||||
// use crate::admin::console::{CONSOLE_CONFIG, init_console_cfg};
|
||||
use crate::auth::IAMAuth;
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Ensure the correct path for parse_license is imported
|
||||
use crate::admin;
|
||||
// use crate::admin::console::{CONSOLE_CONFIG, init_console_cfg};
|
||||
use crate::auth::IAMAuth;
|
||||
use crate::config;
|
||||
use crate::server::hybrid::hybrid;
|
||||
use crate::server::layer::RedirectLayer;
|
||||
use crate::server::{ServiceState, ServiceStateManager};
|
||||
use crate::storage;
|
||||
use bytes::Bytes;
|
||||
@@ -43,7 +58,7 @@ const MI_B: usize = 1024 * 1024;
|
||||
pub async fn start_http_server(
|
||||
opt: &config::Opt,
|
||||
worker_state_manager: ServiceStateManager,
|
||||
) -> std::io::Result<tokio::sync::broadcast::Sender<()>> {
|
||||
) -> Result<tokio::sync::broadcast::Sender<()>> {
|
||||
let server_addr = parse_and_resolve_address(opt.address.as_str()).map_err(Error::other)?;
|
||||
let server_port = server_addr.port();
|
||||
let server_address = server_addr.to_string();
|
||||
@@ -200,7 +215,7 @@ pub async fn start_http_server(
|
||||
};
|
||||
|
||||
let socket_ref = SockRef::from(&socket);
|
||||
if let Err(err) = socket_ref.set_nodelay(true) {
|
||||
if let Err(err) = socket_ref.set_tcp_nodelay(true) {
|
||||
warn!(?err, "Failed to set TCP_NODELAY");
|
||||
}
|
||||
if let Err(err) = socket_ref.set_recv_buffer_size(4 * MI_B) {
|
||||
@@ -346,6 +361,7 @@ fn process_connection(
|
||||
}),
|
||||
)
|
||||
.layer(CorsLayer::permissive())
|
||||
.layer(RedirectLayer)
|
||||
.service(service);
|
||||
let hybrid_service = TowerToHyperService::new(hybrid_service);
|
||||
|
||||
|
||||
91
rustfs/src/server/layer.rs
Normal file
91
rustfs/src/server/layer.rs
Normal file
@@ -0,0 +1,91 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::server::hybrid::HybridBody;
|
||||
use http::{Request as HttpRequest, Response, StatusCode};
|
||||
use hyper::body::Incoming;
|
||||
use std::future::Future;
|
||||
use std::pin::Pin;
|
||||
use std::task::{Context, Poll};
|
||||
use tower::{Layer, Service};
|
||||
use tracing::debug;
|
||||
|
||||
/// Redirect layer that redirects browser requests to the console
|
||||
#[derive(Clone)]
|
||||
pub struct RedirectLayer;
|
||||
|
||||
impl<S> Layer<S> for RedirectLayer {
|
||||
type Service = RedirectService<S>;
|
||||
|
||||
fn layer(&self, inner: S) -> Self::Service {
|
||||
RedirectService { inner }
|
||||
}
|
||||
}
|
||||
|
||||
/// Service implementation for redirect functionality
|
||||
#[derive(Clone)]
|
||||
pub struct RedirectService<S> {
|
||||
inner: S,
|
||||
}
|
||||
|
||||
impl<S, RestBody, GrpcBody> Service<HttpRequest<Incoming>> for RedirectService<S>
|
||||
where
|
||||
S: Service<HttpRequest<Incoming>, Response = Response<HybridBody<RestBody, GrpcBody>>> + Clone + Send + 'static,
|
||||
S::Future: Send + 'static,
|
||||
S::Error: Into<Box<dyn std::error::Error + Send + Sync>> + Send + 'static,
|
||||
RestBody: Default + Send + 'static,
|
||||
GrpcBody: Send + 'static,
|
||||
{
|
||||
type Response = Response<HybridBody<RestBody, GrpcBody>>;
|
||||
type Error = Box<dyn std::error::Error + Send + Sync>;
|
||||
type Future = Pin<Box<dyn Future<Output = std::result::Result<Self::Response, Self::Error>> + Send>>;
|
||||
|
||||
fn poll_ready(&mut self, cx: &mut Context<'_>) -> Poll<std::result::Result<(), Self::Error>> {
|
||||
self.inner.poll_ready(cx).map_err(Into::into)
|
||||
}
|
||||
|
||||
fn call(&mut self, req: HttpRequest<Incoming>) -> Self::Future {
|
||||
// Check if this is a GET request without Authorization header and User-Agent contains Mozilla
|
||||
// and the path is either "/" or "/index.html"
|
||||
let path = req.uri().path().trim_end_matches('/');
|
||||
let should_redirect = req.method() == http::Method::GET
|
||||
&& !req.headers().contains_key(http::header::AUTHORIZATION)
|
||||
&& req
|
||||
.headers()
|
||||
.get(http::header::USER_AGENT)
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.map(|ua| ua.contains("Mozilla"))
|
||||
.unwrap_or(false)
|
||||
&& (path.is_empty() || path == "/rustfs" || path == "/index.html");
|
||||
|
||||
if should_redirect {
|
||||
debug!("Redirecting browser request from {} to console", path);
|
||||
|
||||
// Create redirect response
|
||||
let redirect_response = Response::builder()
|
||||
.status(StatusCode::FOUND)
|
||||
.header(http::header::LOCATION, "/rustfs/console/")
|
||||
.body(HybridBody::Rest {
|
||||
rest_body: RestBody::default(),
|
||||
})
|
||||
.expect("failed to build redirect response");
|
||||
|
||||
return Box::pin(async move { Ok(redirect_response) });
|
||||
}
|
||||
|
||||
// Otherwise, forward to the next service
|
||||
let mut inner = self.inner.clone();
|
||||
Box::pin(async move { inner.call(req).await.map_err(Into::into) })
|
||||
}
|
||||
}
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
mod http;
|
||||
mod hybrid;
|
||||
mod layer;
|
||||
mod service_state;
|
||||
pub(crate) use http::start_http_server;
|
||||
pub(crate) use service_state::SHUTDOWN_TIMEOUT;
|
||||
|
||||
@@ -36,7 +36,6 @@ use rustfs_s3select_api::server::dbms::DatabaseManagerSystem;
|
||||
// use rustfs_ecstore::store_api::RESERVED_METADATA_PREFIX;
|
||||
use futures::StreamExt;
|
||||
use http::HeaderMap;
|
||||
use lazy_static::lazy_static;
|
||||
use rustfs_ecstore::bucket::lifecycle::bucket_lifecycle_ops::validate_transition_tier;
|
||||
use rustfs_ecstore::bucket::lifecycle::lifecycle::Lifecycle;
|
||||
use rustfs_ecstore::bucket::metadata::BUCKET_LIFECYCLE_CONFIG;
|
||||
@@ -102,6 +101,7 @@ use std::fmt::Debug;
|
||||
use std::path::Path;
|
||||
use std::str::FromStr;
|
||||
use std::sync::Arc;
|
||||
use std::sync::LazyLock;
|
||||
use time::OffsetDateTime;
|
||||
use time::format_description::well_known::Rfc3339;
|
||||
use tokio::sync::mpsc;
|
||||
@@ -126,12 +126,10 @@ macro_rules! try_ {
|
||||
};
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
static ref RUSTFS_OWNER: Owner = Owner {
|
||||
display_name: Some("rustfs".to_owned()),
|
||||
id: Some("c19050dbcee97fda828689dda99097a6321af2248fa760517237346e5d9c8a66".to_owned()),
|
||||
};
|
||||
}
|
||||
static RUSTFS_OWNER: LazyLock<Owner> = LazyLock::new(|| Owner {
|
||||
display_name: Some("rustfs".to_owned()),
|
||||
id: Some("c19050dbcee97fda828689dda99097a6321af2248fa760517237346e5d9c8a66".to_owned()),
|
||||
});
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct FS {
|
||||
|
||||
@@ -13,13 +13,13 @@
|
||||
// limitations under the License.
|
||||
|
||||
use http::{HeaderMap, HeaderValue};
|
||||
use lazy_static::lazy_static;
|
||||
use rustfs_ecstore::bucket::versioning_sys::BucketVersioningSys;
|
||||
use rustfs_ecstore::error::Result;
|
||||
use rustfs_ecstore::error::StorageError;
|
||||
use rustfs_ecstore::store_api::ObjectOptions;
|
||||
use rustfs_utils::path::is_dir_object;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::LazyLock;
|
||||
use uuid::Uuid;
|
||||
|
||||
/// Creates options for deleting an object in a bucket.
|
||||
@@ -214,9 +214,9 @@ pub fn extract_metadata_from_mime(headers: &HeaderMap<HeaderValue>, metadata: &m
|
||||
}
|
||||
}
|
||||
|
||||
lazy_static! {
|
||||
/// List of supported headers.
|
||||
static ref SUPPORTED_HEADERS: Vec<&'static str> = vec![
|
||||
/// List of supported headers.
|
||||
static SUPPORTED_HEADERS: LazyLock<Vec<&'static str>> = LazyLock::new(|| {
|
||||
vec![
|
||||
"content-type",
|
||||
"cache-control",
|
||||
"content-language",
|
||||
@@ -225,9 +225,9 @@ lazy_static! {
|
||||
"x-amz-storage-class",
|
||||
"x-amz-tagging",
|
||||
"expires",
|
||||
"x-amz-replication-status"
|
||||
];
|
||||
}
|
||||
"x-amz-replication-status",
|
||||
]
|
||||
});
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
|
||||
@@ -4,7 +4,6 @@ RUSTFS_ROOT_PASSWORD=rustfsadmin
|
||||
RUSTFS_VOLUMES="http://node{1...4}:7000/data/rustfs{0...3} http://node{5...8}:7000/data/rustfs{0...3}"
|
||||
RUSTFS_ADDRESS=":7000"
|
||||
RUSTFS_CONSOLE_ENABLE=true
|
||||
RUSTFS_CONSOLE_ADDRESS=":7001"
|
||||
RUST_LOG=warn
|
||||
RUSTFS_OBS_LOG_DIRECTORY="/var/logs/rustfs/"
|
||||
RUSTFS_NS_SCANNER_INTERVAL=60
|
||||
|
||||
@@ -57,7 +57,7 @@ export RUSTFS_OBS_ENDPOINT=http://localhost:4317 # OpenTelemetry Collector 的
|
||||
#export RUSTFS_OBS_METER_INTERVAL=1 # 采样间隔,单位为秒
|
||||
#export RUSTFS_OBS_SERVICE_NAME=rustfs # 服务名称
|
||||
#export RUSTFS_OBS_SERVICE_VERSION=0.1.0 # 服务版本
|
||||
#export RUSTFS_OBS_ENVIRONMENT=develop # 环境名称
|
||||
export RUSTFS_OBS_ENVIRONMENT=develop # 环境名称
|
||||
export RUSTFS_OBS_LOGGER_LEVEL=debug # 日志级别,支持 trace, debug, info, warn, error
|
||||
export RUSTFS_OBS_LOCAL_LOGGING_ENABLED=true # 是否启用本地日志记录
|
||||
export RUSTFS_OBS_LOG_DIRECTORY="$current_dir/deploy/logs" # Log directory
|
||||
|
||||
Reference in New Issue
Block a user