mirror of
https://github.com/rustfs/rustfs.git
synced 2026-01-17 09:40:32 +00:00
Compare commits
84 Commits
1.0.0-alph
...
1.0.0-alph
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
bebd78fbbb | ||
|
|
3f095e75cb | ||
|
|
f7d30da9e0 | ||
|
|
823d4b6f79 | ||
|
|
051ea7786f | ||
|
|
42b645e355 | ||
|
|
f27ee96014 | ||
|
|
20cd117aa6 | ||
|
|
fc8931d69f | ||
|
|
0167b2decd | ||
|
|
e67980ff3c | ||
|
|
96760bba5a | ||
|
|
2501d7d241 | ||
|
|
55b84262b5 | ||
|
|
ce4252eb1a | ||
|
|
db708917b4 | ||
|
|
8ddb45627d | ||
|
|
550c225b79 | ||
|
|
0d46b550a8 | ||
|
|
0693cca1a4 | ||
|
|
0d9f9e381a | ||
|
|
6c7aa5a7ae | ||
|
|
a27d935925 | ||
|
|
b4f87a4fee | ||
|
|
ee5f94a2e2 | ||
|
|
9c3cf554d3 | ||
|
|
addbfa5487 | ||
|
|
5eb461d7b7 | ||
|
|
1ea45afcd7 | ||
|
|
dbd86f6aee | ||
|
|
af693f7b3f | ||
|
|
3be5ee6445 | ||
|
|
0acc8fe26a | ||
|
|
ecf40eb86c | ||
|
|
48ce7055f8 | ||
|
|
749f55d688 | ||
|
|
e5d17f5382 | ||
|
|
982cc66c74 | ||
|
|
74bf4909c8 | ||
|
|
9c956b4445 | ||
|
|
4c1fc9317e | ||
|
|
a9d77a618f | ||
|
|
38cdc87e93 | ||
|
|
f5ff93b65e | ||
|
|
6ef6f188e5 | ||
|
|
ccad91a4a9 | ||
|
|
63b79ae151 | ||
|
|
9284f64e2a | ||
|
|
b9bbae27de | ||
|
|
36e3efb5a5 | ||
|
|
04d1c8724d | ||
|
|
4fb4b353f8 | ||
|
|
564a02f344 | ||
|
|
5b582a4234 | ||
|
|
2e9792577f | ||
|
|
2066e0a03b | ||
|
|
a4d49a500f | ||
|
|
a8fbced928 | ||
|
|
99ca405279 | ||
|
|
2e1d1018aa | ||
|
|
c57b4be1c7 | ||
|
|
238a016242 | ||
|
|
2c0c7fafa3 | ||
|
|
ee4962fe31 | ||
|
|
55895d0a10 | ||
|
|
676897d389 | ||
|
|
5205ff6695 | ||
|
|
15cf3ce92b | ||
|
|
c0441b2412 | ||
|
|
6267872ddb | ||
|
|
618779a89d | ||
|
|
b3ec2325ed | ||
|
|
49a5643e76 | ||
|
|
657395af8a | ||
|
|
4de62ed77e | ||
|
|
505f493729 | ||
|
|
be05b704b0 | ||
|
|
b33c2fa3cf | ||
|
|
98674c60d4 | ||
|
|
e39eb86967 | ||
|
|
646070ae7a | ||
|
|
2525b66658 | ||
|
|
58c5a633e2 | ||
|
|
aefd894fc2 |
49
.cursorrules
49
.cursorrules
@@ -1,22 +1,39 @@
|
||||
# RustFS Project Cursor Rules
|
||||
|
||||
## ⚠️ CRITICAL DEVELOPMENT RULES ⚠️
|
||||
## 🚨🚨🚨 CRITICAL DEVELOPMENT RULES - ZERO TOLERANCE 🚨🚨🚨
|
||||
|
||||
### 🚨 NEVER COMMIT DIRECTLY TO MASTER/MAIN BRANCH 🚨
|
||||
### ⛔️ ABSOLUTE PROHIBITION: NEVER COMMIT DIRECTLY TO MASTER/MAIN BRANCH ⛔️
|
||||
|
||||
- **This is the most important rule - NEVER modify code directly on main or master branch**
|
||||
- **ALL CHANGES MUST GO THROUGH PULL REQUESTS - NO EXCEPTIONS**
|
||||
- **Always work on feature branches and use pull requests for all changes**
|
||||
- **Any direct commits to master/main branch are strictly forbidden**
|
||||
- **Pull requests are the ONLY way to merge code to main branch**
|
||||
- Before starting any development, always:
|
||||
1. `git checkout main` (switch to main branch)
|
||||
2. `git pull` (get latest changes)
|
||||
3. `git checkout -b feat/your-feature-name` (create and switch to feature branch)
|
||||
4. Make your changes on the feature branch
|
||||
5. Commit and push to the feature branch
|
||||
6. **Create a pull request for review - THIS IS MANDATORY**
|
||||
7. **Wait for PR approval and merge through GitHub interface only**
|
||||
**🔥 THIS IS THE MOST CRITICAL RULE - VIOLATION WILL RESULT IN IMMEDIATE REVERSAL 🔥**
|
||||
|
||||
- **🚫 ZERO DIRECT COMMITS TO MAIN/MASTER BRANCH - ABSOLUTELY FORBIDDEN**
|
||||
- **🚫 ANY DIRECT COMMIT TO MAIN BRANCH MUST BE IMMEDIATELY REVERTED**
|
||||
- **🚫 NO EXCEPTIONS FOR HOTFIXES, EMERGENCIES, OR URGENT CHANGES**
|
||||
- **🚫 NO EXCEPTIONS FOR SMALL CHANGES, TYPOS, OR DOCUMENTATION UPDATES**
|
||||
- **🚫 NO EXCEPTIONS FOR ANYONE - MAINTAINERS, CONTRIBUTORS, OR ADMINS**
|
||||
|
||||
### 📋 MANDATORY WORKFLOW - STRICTLY ENFORCED
|
||||
|
||||
**EVERY SINGLE CHANGE MUST FOLLOW THIS WORKFLOW:**
|
||||
|
||||
1. **Check current branch**: `git branch` (MUST NOT be on main/master)
|
||||
2. **Switch to main**: `git checkout main`
|
||||
3. **Pull latest**: `git pull origin main`
|
||||
4. **Create feature branch**: `git checkout -b feat/your-feature-name`
|
||||
5. **Make changes ONLY on feature branch**
|
||||
6. **Test thoroughly before committing**
|
||||
7. **Commit and push to feature branch**: `git push origin feat/your-feature-name`
|
||||
8. **Create Pull Request**: Use `gh pr create` (MANDATORY)
|
||||
9. **Wait for PR approval**: NO self-merging allowed
|
||||
10. **Merge through GitHub interface**: ONLY after approval
|
||||
|
||||
### 🔒 ENFORCEMENT MECHANISMS
|
||||
|
||||
- **Branch protection rules**: Main branch is protected
|
||||
- **Pre-commit hooks**: Will block direct commits to main
|
||||
- **CI/CD checks**: All PRs must pass before merging
|
||||
- **Code review requirement**: At least one approval needed
|
||||
- **Automated reversal**: Direct commits to main will be automatically reverted
|
||||
|
||||
## Project Overview
|
||||
|
||||
@@ -517,7 +534,7 @@ let results = join_all(futures).await;
|
||||
|
||||
### 3. Caching Strategy
|
||||
|
||||
- Use `lazy_static` or `OnceCell` for global caching
|
||||
- Use `LazyLock` for global caching
|
||||
- Implement LRU cache to avoid memory leaks
|
||||
|
||||
## Testing Guidelines
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
FROM ubuntu:22.04
|
||||
|
||||
ENV LANG C.UTF-8
|
||||
|
||||
RUN sed -i s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g /etc/apt/sources.list
|
||||
|
||||
RUN apt-get clean && apt-get update && apt-get install wget git curl unzip gcc pkg-config libssl-dev lld libdbus-1-dev libwayland-dev libwebkit2gtk-4.1-dev libxdo-dev -y
|
||||
|
||||
# install protoc
|
||||
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip \
|
||||
&& unzip protoc-31.1-linux-x86_64.zip -d protoc3 \
|
||||
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc \
|
||||
&& mv protoc3/include/* /usr/local/include/ && rm -rf protoc-31.1-linux-x86_64.zip protoc3
|
||||
|
||||
# install flatc
|
||||
RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip \
|
||||
&& unzip Linux.flatc.binary.g++-13.zip \
|
||||
&& mv flatc /usr/local/bin/ && chmod +x /usr/local/bin/flatc && rm -rf Linux.flatc.binary.g++-13.zip
|
||||
|
||||
# install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
COPY .docker/cargo.config.toml /root/.cargo/config.toml
|
||||
|
||||
WORKDIR /root/s3-rustfs
|
||||
|
||||
CMD [ "bash", "-c", "while true; do sleep 1; done" ]
|
||||
@@ -1,32 +0,0 @@
|
||||
FROM rockylinux:9.3 AS builder
|
||||
|
||||
ENV LANG C.UTF-8
|
||||
|
||||
RUN sed -e 's|^mirrorlist=|#mirrorlist=|g' \
|
||||
-e 's|^#baseurl=http://dl.rockylinux.org/$contentdir|baseurl=https://mirrors.ustc.edu.cn/rocky|g' \
|
||||
-i.bak \
|
||||
/etc/yum.repos.d/rocky-extras.repo \
|
||||
/etc/yum.repos.d/rocky.repo
|
||||
|
||||
RUN dnf makecache
|
||||
|
||||
RUN yum install wget git unzip gcc openssl-devel pkgconf-pkg-config -y
|
||||
|
||||
# install protoc
|
||||
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip \
|
||||
&& unzip protoc-31.1-linux-x86_64.zip -d protoc3 \
|
||||
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc \
|
||||
&& mv protoc3/include/* /usr/local/include/ && rm -rf protoc-31.1-linux-x86_64.zip protoc3
|
||||
|
||||
# install flatc
|
||||
RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip \
|
||||
&& unzip Linux.flatc.binary.g++-13.zip \
|
||||
&& mv flatc /usr/local/bin/ && chmod +x /usr/local/bin/flatc \
|
||||
&& rm -rf Linux.flatc.binary.g++-13.zip
|
||||
|
||||
# install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
COPY .docker/cargo.config.toml /root/.cargo/config.toml
|
||||
|
||||
WORKDIR /root/s3-rustfs
|
||||
@@ -1,25 +0,0 @@
|
||||
FROM ubuntu:22.04
|
||||
|
||||
ENV LANG C.UTF-8
|
||||
|
||||
RUN sed -i s@http://.*archive.ubuntu.com@http://repo.huaweicloud.com@g /etc/apt/sources.list
|
||||
|
||||
RUN apt-get clean && apt-get update && apt-get install wget git curl unzip gcc pkg-config libssl-dev lld libdbus-1-dev libwayland-dev libwebkit2gtk-4.1-dev libxdo-dev -y
|
||||
|
||||
# install protoc
|
||||
RUN wget https://github.com/protocolbuffers/protobuf/releases/download/v31.1/protoc-31.1-linux-x86_64.zip \
|
||||
&& unzip protoc-31.1-linux-x86_64.zip -d protoc3 \
|
||||
&& mv protoc3/bin/* /usr/local/bin/ && chmod +x /usr/local/bin/protoc \
|
||||
&& mv protoc3/include/* /usr/local/include/ && rm -rf protoc-31.1-linux-x86_64.zip protoc3
|
||||
|
||||
# install flatc
|
||||
RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.flatc.binary.g++-13.zip \
|
||||
&& unzip Linux.flatc.binary.g++-13.zip \
|
||||
&& mv flatc /usr/local/bin/ && chmod +x /usr/local/bin/flatc && rm -rf Linux.flatc.binary.g++-13.zip
|
||||
|
||||
# install rust
|
||||
RUN curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y
|
||||
|
||||
COPY .docker/cargo.config.toml /root/.cargo/config.toml
|
||||
|
||||
WORKDIR /root/s3-rustfs
|
||||
261
.docker/README.md
Normal file
261
.docker/README.md
Normal file
@@ -0,0 +1,261 @@
|
||||
# RustFS Docker Images
|
||||
|
||||
This directory contains Docker configuration files and supporting infrastructure for building and running RustFS container images.
|
||||
|
||||
## 📁 Directory Structure
|
||||
|
||||
```
|
||||
rustfs/
|
||||
├── Dockerfile # Production image (Alpine + pre-built binaries)
|
||||
├── Dockerfile.source # Development image (Debian + source build)
|
||||
├── docker-buildx.sh # Multi-architecture build script
|
||||
├── Makefile # Build automation with simplified commands
|
||||
└── .docker/ # Supporting infrastructure
|
||||
├── observability/ # Monitoring and observability configs
|
||||
├── compose/ # Docker Compose configurations
|
||||
├── mqtt/ # MQTT broker configs
|
||||
└── openobserve-otel/ # OpenObserve + OpenTelemetry configs
|
||||
```
|
||||
|
||||
## 🎯 Image Variants
|
||||
|
||||
### Core Images
|
||||
|
||||
| Image | Base OS | Build Method | Size | Use Case |
|
||||
|-------|---------|--------------|------|----------|
|
||||
| `production` (default) | Alpine 3.18 | GitHub Releases | Smallest | Production deployment |
|
||||
| `source` | Debian Bookworm | Source build | Medium | Custom builds with cross-compilation |
|
||||
| `dev` | Debian Bookworm | Development tools | Large | Interactive development |
|
||||
|
||||
## 🚀 Usage Examples
|
||||
|
||||
### Quick Start (Production)
|
||||
|
||||
```bash
|
||||
# Default production image (Alpine + GitHub Releases)
|
||||
docker run -p 9000:9000 rustfs/rustfs:latest
|
||||
|
||||
# Specific version
|
||||
docker run -p 9000:9000 rustfs/rustfs:1.2.3
|
||||
```
|
||||
|
||||
### Complete Tag Strategy Examples
|
||||
|
||||
```bash
|
||||
# Stable Releases
|
||||
docker run rustfs/rustfs:1.2.3 # Main version (production)
|
||||
docker run rustfs/rustfs:1.2.3-production # Explicit production variant
|
||||
docker run rustfs/rustfs:1.2.3-source # Source build variant
|
||||
docker run rustfs/rustfs:latest # Latest stable
|
||||
|
||||
# Prerelease Versions
|
||||
docker run rustfs/rustfs:1.3.0-alpha.2 # Specific alpha version
|
||||
docker run rustfs/rustfs:alpha # Latest alpha
|
||||
docker run rustfs/rustfs:beta # Latest beta
|
||||
docker run rustfs/rustfs:rc # Latest release candidate
|
||||
|
||||
# Development Versions
|
||||
docker run rustfs/rustfs:dev # Latest main branch development
|
||||
docker run rustfs/rustfs:dev-13e4a0b # Specific commit
|
||||
docker run rustfs/rustfs:dev-latest # Latest development
|
||||
docker run rustfs/rustfs:main-latest # Main branch latest
|
||||
```
|
||||
|
||||
### Development Environment
|
||||
|
||||
```bash
|
||||
# Quick setup using Makefile (recommended)
|
||||
make docker-dev-local # Build development image locally
|
||||
make dev-env-start # Start development container
|
||||
|
||||
# Manual Docker commands
|
||||
docker run -it -v $(pwd):/workspace -p 9000:9000 rustfs/rustfs:latest-dev
|
||||
|
||||
# Build from source locally
|
||||
docker build -f Dockerfile.source -t rustfs:custom .
|
||||
|
||||
# Development with hot reload
|
||||
docker-compose up rustfs-dev
|
||||
```
|
||||
|
||||
## 🏗️ Build Arguments and Scripts
|
||||
|
||||
### Using Makefile Commands (Recommended)
|
||||
|
||||
The easiest way to build images using simplified commands:
|
||||
|
||||
```bash
|
||||
# Development images (build from source)
|
||||
make docker-dev-local # Build for local use (single arch)
|
||||
make docker-dev # Build multi-arch (for CI/CD)
|
||||
make docker-dev-push REGISTRY=xxx # Build and push to registry
|
||||
|
||||
# Production images (using pre-built binaries)
|
||||
make docker-buildx # Build multi-arch production images
|
||||
make docker-buildx-push # Build and push production images
|
||||
make docker-buildx-version VERSION=v1.0.0 # Build specific version
|
||||
|
||||
# Development environment
|
||||
make dev-env-start # Start development container
|
||||
make dev-env-stop # Stop development container
|
||||
make dev-env-restart # Restart development container
|
||||
|
||||
# Help
|
||||
make help-docker # Show all Docker-related commands
|
||||
```
|
||||
|
||||
### Using docker-buildx.sh (Advanced)
|
||||
|
||||
For direct script usage and advanced scenarios:
|
||||
|
||||
```bash
|
||||
# Build latest version for all architectures
|
||||
./docker-buildx.sh
|
||||
|
||||
# Build and push to registry
|
||||
./docker-buildx.sh --push
|
||||
|
||||
# Build specific version
|
||||
./docker-buildx.sh --release v1.2.3
|
||||
|
||||
# Build and push specific version
|
||||
./docker-buildx.sh --release v1.2.3 --push
|
||||
```
|
||||
|
||||
### Manual Docker Builds
|
||||
|
||||
All images support dynamic version selection:
|
||||
|
||||
```bash
|
||||
# Build production image with latest release
|
||||
docker build --build-arg RELEASE="latest" -t rustfs:latest .
|
||||
|
||||
# Build from source with specific target
|
||||
docker build -f Dockerfile.source \
|
||||
--build-arg TARGETPLATFORM="linux/amd64" \
|
||||
-t rustfs:source .
|
||||
|
||||
# Development build
|
||||
docker build -f Dockerfile.source -t rustfs:dev .
|
||||
```
|
||||
|
||||
## 🔧 Binary Download Sources
|
||||
|
||||
### Unified GitHub Releases
|
||||
|
||||
The production image downloads from GitHub Releases for reliability and transparency:
|
||||
|
||||
- ✅ **production** → GitHub Releases API with automatic latest detection
|
||||
- ✅ **Checksum verification** → SHA256SUMS validation when available
|
||||
- ✅ **Multi-architecture** → Supports amd64 and arm64
|
||||
|
||||
### Source Build
|
||||
|
||||
The source variant compiles from source code with advanced features:
|
||||
|
||||
- 🔧 **Cross-compilation** → Supports multiple target platforms via `TARGETPLATFORM`
|
||||
- ⚡ **Build caching** → sccache for faster compilation
|
||||
- 🎯 **Optimized builds** → Release optimizations with LTO and symbol stripping
|
||||
|
||||
## 📋 Architecture Support
|
||||
|
||||
All variants support multi-architecture builds:
|
||||
|
||||
- **linux/amd64** (x86_64)
|
||||
- **linux/arm64** (aarch64)
|
||||
|
||||
Architecture is automatically detected during build using Docker's `TARGETARCH` build argument.
|
||||
|
||||
## 🔐 Security Features
|
||||
|
||||
- **Checksum Verification**: Production image verifies SHA256SUMS when available
|
||||
- **Non-root User**: All images run as user `rustfs` (UID 1000)
|
||||
- **Minimal Runtime**: Production image only includes necessary dependencies
|
||||
- **Secure Defaults**: No hardcoded credentials or keys
|
||||
|
||||
## 🛠️ Development Workflow
|
||||
|
||||
### Quick Start with Makefile (Recommended)
|
||||
|
||||
```bash
|
||||
# 1. Start development environment
|
||||
make dev-env-start
|
||||
|
||||
# 2. Your development container is now running with:
|
||||
# - Port 9000 exposed for RustFS
|
||||
# - Port 9010 exposed for admin console
|
||||
# - Current directory mounted as /workspace
|
||||
|
||||
# 3. Stop when done
|
||||
make dev-env-stop
|
||||
```
|
||||
|
||||
### Manual Development Setup
|
||||
|
||||
```bash
|
||||
# Build development image from source
|
||||
make docker-dev-local
|
||||
|
||||
# Or use traditional Docker commands
|
||||
docker build -f Dockerfile.source -t rustfs:dev .
|
||||
|
||||
# Run with development tools
|
||||
docker run -it -v $(pwd):/workspace -p 9000:9000 rustfs:dev bash
|
||||
|
||||
# Or use docker-compose for complex setups
|
||||
docker-compose up rustfs-dev
|
||||
```
|
||||
|
||||
### Common Development Tasks
|
||||
|
||||
```bash
|
||||
# Build and test locally
|
||||
make build # Build binary natively
|
||||
make docker-dev-local # Build development Docker image
|
||||
make test # Run tests
|
||||
make fmt # Format code
|
||||
make clippy # Run linter
|
||||
|
||||
# Get help
|
||||
make help # General help
|
||||
make help-docker # Docker-specific help
|
||||
make help-build # Build-specific help
|
||||
```
|
||||
|
||||
## 🚀 CI/CD Integration
|
||||
|
||||
The project uses GitHub Actions for automated multi-architecture Docker builds:
|
||||
|
||||
### Automated Builds
|
||||
|
||||
- **Tags**: Automatic builds triggered on version tags (e.g., `v1.2.3`)
|
||||
- **Main Branch**: Development builds with `dev-latest` and `main-latest` tags
|
||||
- **Pull Requests**: Test builds without registry push
|
||||
|
||||
### Build Variants
|
||||
|
||||
Each build creates three image variants:
|
||||
|
||||
- `rustfs/rustfs:v1.2.3` (production - Alpine-based)
|
||||
- `rustfs/rustfs:v1.2.3-source` (source build - Debian-based)
|
||||
- `rustfs/rustfs:v1.2.3-dev` (development - Debian-based with tools)
|
||||
|
||||
### Manual Builds
|
||||
|
||||
Trigger custom builds via GitHub Actions:
|
||||
|
||||
```bash
|
||||
# Use workflow_dispatch to build specific versions
|
||||
# Available options: latest, main-latest, dev-latest, v1.2.3, dev-abc123
|
||||
```
|
||||
|
||||
## 📦 Supporting Infrastructure
|
||||
|
||||
The `.docker/` directory contains supporting configuration files:
|
||||
|
||||
- **observability/** - Prometheus, Grafana, OpenTelemetry configs
|
||||
- **compose/** - Multi-service Docker Compose setups
|
||||
- **mqtt/** - MQTT broker configurations
|
||||
- **openobserve-otel/** - Log aggregation and tracing setup
|
||||
|
||||
See individual README files in each subdirectory for specific usage instructions.
|
||||
@@ -1,19 +0,0 @@
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
[source.crates-io]
|
||||
registry = "https://github.com/rust-lang/crates.io-index"
|
||||
|
||||
[net]
|
||||
git-fetch-with-cli = true
|
||||
80
.docker/compose/README.md
Normal file
80
.docker/compose/README.md
Normal file
@@ -0,0 +1,80 @@
|
||||
# Docker Compose Configurations
|
||||
|
||||
This directory contains specialized Docker Compose configurations for different use cases.
|
||||
|
||||
## 📁 Configuration Files
|
||||
|
||||
This directory contains specialized Docker Compose configurations and their associated Dockerfiles, keeping related files organized together.
|
||||
|
||||
### Main Configuration (Root Directory)
|
||||
|
||||
- **`../../docker-compose.yml`** - **Default Production Setup**
|
||||
- Complete production-ready configuration
|
||||
- Includes RustFS server + full observability stack
|
||||
- Supports multiple profiles: `dev`, `observability`, `cache`, `proxy`
|
||||
- Recommended for most users
|
||||
|
||||
### Specialized Configurations
|
||||
|
||||
- **`docker-compose.cluster.yaml`** - **Distributed Testing**
|
||||
- 4-node cluster setup for testing distributed storage
|
||||
- Uses local compiled binaries
|
||||
- Simulates multi-node environment
|
||||
- Ideal for development and cluster testing
|
||||
|
||||
- **`docker-compose.observability.yaml`** - **Observability Focus**
|
||||
- Specialized setup for testing observability features
|
||||
- Includes OpenTelemetry, Jaeger, Prometheus, Loki, Grafana
|
||||
- Uses `../../Dockerfile.source` for builds
|
||||
- Perfect for observability development
|
||||
|
||||
## 🚀 Usage Examples
|
||||
|
||||
### Production Setup
|
||||
|
||||
```bash
|
||||
# Start main service
|
||||
docker-compose up -d
|
||||
|
||||
# Start with development profile
|
||||
docker-compose --profile dev up -d
|
||||
|
||||
# Start with full observability
|
||||
docker-compose --profile observability up -d
|
||||
```
|
||||
|
||||
### Cluster Testing
|
||||
|
||||
```bash
|
||||
# Build and start 4-node cluster (run from project root)
|
||||
cd .docker/compose
|
||||
docker-compose -f docker-compose.cluster.yaml up -d
|
||||
|
||||
# Or run directly from project root
|
||||
docker-compose -f .docker/compose/docker-compose.cluster.yaml up -d
|
||||
```
|
||||
|
||||
### Observability Testing
|
||||
|
||||
```bash
|
||||
# Start observability-focused environment (run from project root)
|
||||
cd .docker/compose
|
||||
docker-compose -f docker-compose.observability.yaml up -d
|
||||
|
||||
# Or run directly from project root
|
||||
docker-compose -f .docker/compose/docker-compose.observability.yaml up -d
|
||||
```
|
||||
|
||||
## 🔧 Configuration Overview
|
||||
|
||||
| Configuration | Nodes | Storage | Observability | Use Case |
|
||||
|---------------|-------|---------|---------------|----------|
|
||||
| **Main** | 1 | Volume mounts | Full stack | Production |
|
||||
| **Cluster** | 4 | HTTP endpoints | Basic | Testing |
|
||||
| **Observability** | 4 | Local data | Advanced | Development |
|
||||
|
||||
## 📝 Notes
|
||||
|
||||
- Always ensure you have built the required binaries before starting cluster tests
|
||||
- The main configuration is sufficient for most use cases
|
||||
- Specialized configurations are for specific testing scenarios
|
||||
@@ -14,70 +14,69 @@
|
||||
|
||||
services:
|
||||
node0:
|
||||
image: rustfs:v1 # 替换为你的镜像名称和标签
|
||||
image: rustfs/rustfs:latest # Replace with your image name and label
|
||||
container_name: node0
|
||||
hostname: node0
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=http://node{0...3}:9000/data/rustfs{0...3}
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9002
|
||||
- RUSTFS_ACCESS_KEY=rustfsadmin
|
||||
- RUSTFS_SECRET_KEY=rustfsadmin
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "9000:9000" # 映射宿主机的 9001 端口到容器的 9000 端口
|
||||
- "8000:9001" # 映射宿主机的 9001 端口到容器的 9000 端口
|
||||
- "9000:9000" # Map port 9001 of the host to port 9000 of the container
|
||||
volumes:
|
||||
- ./target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs
|
||||
# - ./data/node0:/data # 将当前路径挂载到容器内的 /root/data
|
||||
- ../../target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs
|
||||
command: "/app/rustfs"
|
||||
|
||||
node1:
|
||||
image: rustfs:v1
|
||||
image: rustfs/rustfs:latest
|
||||
container_name: node1
|
||||
hostname: node1
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=http://node{0...3}:9000/data/rustfs{0...3}
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9002
|
||||
- RUSTFS_ACCESS_KEY=rustfsadmin
|
||||
- RUSTFS_SECRET_KEY=rustfsadmin
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "9001:9000" # 映射宿主机的 9002 端口到容器的 9000 端口
|
||||
- "9001:9000" # Map port 9002 of the host to port 9000 of the container
|
||||
volumes:
|
||||
- ./target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs
|
||||
# - ./data/node1:/data
|
||||
- ../../target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs
|
||||
command: "/app/rustfs"
|
||||
|
||||
node2:
|
||||
image: rustfs:v1
|
||||
image: rustfs/rustfs:latest
|
||||
container_name: node2
|
||||
hostname: node2
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=http://node{0...3}:9000/data/rustfs{0...3}
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9002
|
||||
- RUSTFS_ACCESS_KEY=rustfsadmin
|
||||
- RUSTFS_SECRET_KEY=rustfsadmin
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "9002:9000" # 映射宿主机的 9003 端口到容器的 9000 端口
|
||||
- "9002:9000" # Map port 9003 of the host to port 9000 of the container
|
||||
volumes:
|
||||
- ./target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs
|
||||
# - ./data/node2:/data
|
||||
- ../../target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs
|
||||
command: "/app/rustfs"
|
||||
|
||||
node3:
|
||||
image: rustfs:v1
|
||||
image: rustfs/rustfs:latest
|
||||
container_name: node3
|
||||
hostname: node3
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=http://node{0...3}:9000/data/rustfs{0...3}
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=0.0.0.0:9002
|
||||
- RUSTFS_ACCESS_KEY=rustfsadmin
|
||||
- RUSTFS_SECRET_KEY=rustfsadmin
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "9003:9000" # 映射宿主机的 9004 端口到容器的 9000 端口
|
||||
- "9003:9000" # Map port 9004 of the host to port 9000 of the container
|
||||
volumes:
|
||||
- ./target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs
|
||||
# - ./data/node3:/data
|
||||
- ../../target/x86_64-unknown-linux-musl/release/rustfs:/app/rustfs
|
||||
command: "/app/rustfs"
|
||||
@@ -14,11 +14,11 @@
|
||||
|
||||
services:
|
||||
otel-collector:
|
||||
image: ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-contrib:0.127.0
|
||||
image: otel/opentelemetry-collector-contrib:0.129.1
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
volumes:
|
||||
- ./.docker/observability/otel-collector-config.yaml:/etc/otelcol-contrib/config.yaml
|
||||
- ../../.docker/observability/otel-collector-config.yaml:/etc/otelcol-contrib/config.yaml
|
||||
ports:
|
||||
- 1888:1888
|
||||
- 8888:8888
|
||||
@@ -30,7 +30,7 @@ services:
|
||||
networks:
|
||||
- rustfs-network
|
||||
jaeger:
|
||||
image: jaegertracing/jaeger:2.6.0
|
||||
image: jaegertracing/jaeger:2.8.0
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
ports:
|
||||
@@ -40,11 +40,11 @@ services:
|
||||
networks:
|
||||
- rustfs-network
|
||||
prometheus:
|
||||
image: prom/prometheus:v3.4.1
|
||||
image: prom/prometheus:v3.4.2
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
volumes:
|
||||
- ./.docker/observability/prometheus.yml:/etc/prometheus/prometheus.yml
|
||||
- ../../.docker/observability/prometheus.yml:/etc/prometheus/prometheus.yml
|
||||
ports:
|
||||
- "9090:9090"
|
||||
networks:
|
||||
@@ -54,16 +54,16 @@ services:
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
volumes:
|
||||
- ./.docker/observability/loki-config.yaml:/etc/loki/local-config.yaml
|
||||
- ../../.docker/observability/loki-config.yaml:/etc/loki/local-config.yaml
|
||||
ports:
|
||||
- "3100:3100"
|
||||
command: -config.file=/etc/loki/local-config.yaml
|
||||
networks:
|
||||
- rustfs-network
|
||||
grafana:
|
||||
image: grafana/grafana:12.0.1
|
||||
image: grafana/grafana:12.0.2
|
||||
ports:
|
||||
- "3000:3000" # Web UI
|
||||
- "3000:3000" # Web UI
|
||||
environment:
|
||||
- GF_SECURITY_ADMIN_PASSWORD=admin
|
||||
- TZ=Asia/Shanghai
|
||||
@@ -72,85 +72,69 @@ services:
|
||||
|
||||
node1:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.obs
|
||||
context: ../..
|
||||
dockerfile: Dockerfile.source
|
||||
container_name: node1
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=http://node{1...4}:9000/root/data/target/volume/test{1...4}
|
||||
- RUSTFS_ADDRESS=:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=:9002
|
||||
- RUSTFS_OBS_CONFIG=/etc/observability/config/obs-multi.toml
|
||||
- RUSTFS_OBS_ENDPOINT=http://otel-collector:4317
|
||||
- RUSTFS_OBS_LOGGER_LEVEL=debug
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "9001:9000" # 映射宿主机的 9001 端口到容器的 9000 端口
|
||||
- "9101:9002"
|
||||
volumes:
|
||||
# - ./data:/root/data # 将当前路径挂载到容器内的 /root/data
|
||||
- ./.docker/observability/config:/etc/observability/config
|
||||
- "9001:9000" # Map port 9001 of the host to port 9000 of the container
|
||||
networks:
|
||||
- rustfs-network
|
||||
|
||||
node2:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.obs
|
||||
context: ../..
|
||||
dockerfile: Dockerfile.source
|
||||
container_name: node2
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=http://node{1...4}:9000/root/data/target/volume/test{1...4}
|
||||
- RUSTFS_ADDRESS=:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=:9002
|
||||
- RUSTFS_OBS_CONFIG=/etc/observability/config/obs-multi.toml
|
||||
- RUSTFS_OBS_ENDPOINT=http://otel-collector:4317
|
||||
- RUSTFS_OBS_LOGGER_LEVEL=debug
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "9002:9000" # 映射宿主机的 9002 端口到容器的 9000 端口
|
||||
- "9102:9002"
|
||||
volumes:
|
||||
# - ./data:/root/data
|
||||
- ./.docker/observability/config:/etc/observability/config
|
||||
- "9002:9000" # Map port 9002 of the host to port 9000 of the container
|
||||
networks:
|
||||
- rustfs-network
|
||||
|
||||
node3:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.obs
|
||||
context: ../..
|
||||
dockerfile: Dockerfile.source
|
||||
container_name: node3
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=http://node{1...4}:9000/root/data/target/volume/test{1...4}
|
||||
- RUSTFS_ADDRESS=:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=:9002
|
||||
- RUSTFS_OBS_CONFIG=/etc/observability/config/obs-multi.toml
|
||||
- RUSTFS_OBS_ENDPOINT=http://otel-collector:4317
|
||||
- RUSTFS_OBS_LOGGER_LEVEL=debug
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "9003:9000" # 映射宿主机的 9003 端口到容器的 9000 端口
|
||||
- "9103:9002"
|
||||
volumes:
|
||||
# - ./data:/root/data
|
||||
- ./.docker/observability/config:/etc/observability/config
|
||||
- "9003:9000" # Map port 9003 of the host to port 9000 of the container
|
||||
networks:
|
||||
- rustfs-network
|
||||
|
||||
node4:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: Dockerfile.obs
|
||||
context: ../..
|
||||
dockerfile: Dockerfile.source
|
||||
container_name: node4
|
||||
environment:
|
||||
- RUSTFS_VOLUMES=http://node{1...4}:9000/root/data/target/volume/test{1...4}
|
||||
- RUSTFS_ADDRESS=:9000
|
||||
- RUSTFS_CONSOLE_ENABLE=true
|
||||
- RUSTFS_CONSOLE_ADDRESS=:9002
|
||||
- RUSTFS_OBS_CONFIG=/etc/observability/config/obs-multi.toml
|
||||
- RUSTFS_OBS_ENDPOINT=http://otel-collector:4317
|
||||
- RUSTFS_OBS_LOGGER_LEVEL=debug
|
||||
platform: linux/amd64
|
||||
ports:
|
||||
- "9004:9000" # 映射宿主机的 9004 端口到容器的 9000 端口
|
||||
- "9104:9002"
|
||||
volumes:
|
||||
# - ./data:/root/data
|
||||
- ./.docker/observability/config:/etc/observability/config
|
||||
- "9004:9000" # Map port 9004 of the host to port 9000 of the container
|
||||
networks:
|
||||
- rustfs-network
|
||||
|
||||
@@ -13,24 +13,40 @@
|
||||
# limitations under the License.
|
||||
|
||||
services:
|
||||
|
||||
tempo:
|
||||
image: grafana/tempo:latest
|
||||
#user: root # The container must be started with root to execute chown in the script
|
||||
#entrypoint: [ "/etc/tempo/entrypoint.sh" ] # Specify a custom entry point
|
||||
command: [ "-config.file=/etc/tempo.yaml" ] # This is passed as a parameter to the entry point script
|
||||
volumes:
|
||||
- ./tempo-entrypoint.sh:/etc/tempo/entrypoint.sh # Mount entry point script
|
||||
- ./tempo.yaml:/etc/tempo.yaml
|
||||
- ./tempo-data:/var/tempo
|
||||
ports:
|
||||
- "3200:3200" # tempo
|
||||
- "24317:4317" # otlp grpc
|
||||
networks:
|
||||
- otel-network
|
||||
|
||||
otel-collector:
|
||||
image: ghcr.io/open-telemetry/opentelemetry-collector-releases/opentelemetry-collector-contrib:0.127.0
|
||||
image: otel/opentelemetry-collector-contrib:0.129.1
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
volumes:
|
||||
- ./otel-collector-config.yaml:/etc/otelcol-contrib/config.yaml
|
||||
ports:
|
||||
- 1888:1888
|
||||
- 8888:8888
|
||||
- 8889:8889
|
||||
- 13133:13133
|
||||
- 4317:4317
|
||||
- 4318:4318
|
||||
- 55679:55679
|
||||
- "1888:1888"
|
||||
- "8888:8888"
|
||||
- "8889:8889"
|
||||
- "13133:13133"
|
||||
- "4317:4317"
|
||||
- "4318:4318"
|
||||
- "55679:55679"
|
||||
networks:
|
||||
- otel-network
|
||||
jaeger:
|
||||
image: jaegertracing/jaeger:2.7.0
|
||||
image: jaegertracing/jaeger:2.8.0
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
ports:
|
||||
@@ -40,7 +56,7 @@ services:
|
||||
networks:
|
||||
- otel-network
|
||||
prometheus:
|
||||
image: prom/prometheus:v3.4.1
|
||||
image: prom/prometheus:v3.4.2
|
||||
environment:
|
||||
- TZ=Asia/Shanghai
|
||||
volumes:
|
||||
@@ -64,6 +80,8 @@ services:
|
||||
image: grafana/grafana:12.0.2
|
||||
ports:
|
||||
- "3000:3000" # Web UI
|
||||
volumes:
|
||||
- ./grafana-datasources.yaml:/etc/grafana/provisioning/datasources/datasources.yaml
|
||||
environment:
|
||||
- GF_SECURITY_ADMIN_PASSWORD=admin
|
||||
- TZ=Asia/Shanghai
|
||||
|
||||
32
.docker/observability/grafana-datasources.yaml
Normal file
32
.docker/observability/grafana-datasources.yaml
Normal file
@@ -0,0 +1,32 @@
|
||||
apiVersion: 1
|
||||
|
||||
datasources:
|
||||
- name: Prometheus
|
||||
type: prometheus
|
||||
uid: prometheus
|
||||
access: proxy
|
||||
orgId: 1
|
||||
url: http://prometheus:9090
|
||||
basicAuth: false
|
||||
isDefault: false
|
||||
version: 1
|
||||
editable: false
|
||||
jsonData:
|
||||
httpMethod: GET
|
||||
- name: Tempo
|
||||
type: tempo
|
||||
access: proxy
|
||||
orgId: 1
|
||||
url: http://tempo:3200
|
||||
basicAuth: false
|
||||
isDefault: true
|
||||
version: 1
|
||||
editable: false
|
||||
apiVersion: 1
|
||||
uid: tempo
|
||||
jsonData:
|
||||
httpMethod: GET
|
||||
serviceMap:
|
||||
datasourceUid: prometheus
|
||||
streamingEnabled:
|
||||
search: true
|
||||
@@ -33,6 +33,10 @@ exporters:
|
||||
endpoint: "jaeger:4317" # Jaeger 的 OTLP gRPC 端点
|
||||
tls:
|
||||
insecure: true # 开发环境禁用 TLS,生产环境需配置证书
|
||||
otlp/tempo: # OTLP 导出器,用于跟踪数据
|
||||
endpoint: "tempo:4317" # tempo 的 OTLP gRPC 端点
|
||||
tls:
|
||||
insecure: true # 开发环境禁用 TLS,生产环境需配置证书
|
||||
prometheus: # Prometheus 导出器,用于指标数据
|
||||
endpoint: "0.0.0.0:8889" # Prometheus 刮取端点
|
||||
namespace: "rustfs" # 指标前缀
|
||||
@@ -53,7 +57,7 @@ service:
|
||||
traces:
|
||||
receivers: [ otlp ]
|
||||
processors: [ memory_limiter,batch ]
|
||||
exporters: [ otlp/traces ]
|
||||
exporters: [ otlp/traces,otlp/tempo ]
|
||||
metrics:
|
||||
receivers: [ otlp ]
|
||||
processors: [ batch ]
|
||||
@@ -66,6 +70,12 @@ service:
|
||||
logs:
|
||||
level: "info" # Collector 日志级别
|
||||
metrics:
|
||||
address: "0.0.0.0:8888" # Collector 自身指标暴露
|
||||
level: "detailed" # 可以是 basic, normal, detailed
|
||||
readers:
|
||||
- periodic:
|
||||
exporter:
|
||||
otlp:
|
||||
protocol: http/protobuf
|
||||
endpoint: http://otel-collector:4318
|
||||
|
||||
|
||||
|
||||
@@ -18,8 +18,11 @@ global:
|
||||
scrape_configs:
|
||||
- job_name: 'otel-collector'
|
||||
static_configs:
|
||||
- targets: ['otel-collector:8888'] # 从 Collector 刮取指标
|
||||
- targets: [ 'otel-collector:8888' ] # 从 Collector 刮取指标
|
||||
- job_name: 'otel-metrics'
|
||||
static_configs:
|
||||
- targets: ['otel-collector:8889'] # 应用指标
|
||||
- targets: [ 'otel-collector:8889' ] # 应用指标
|
||||
- job_name: 'tempo'
|
||||
static_configs:
|
||||
- targets: [ 'tempo:3200' ]
|
||||
|
||||
|
||||
1
.docker/observability/tempo-data/.gitignore
vendored
Normal file
1
.docker/observability/tempo-data/.gitignore
vendored
Normal file
@@ -0,0 +1 @@
|
||||
*
|
||||
8
.docker/observability/tempo-entrypoint.sh
Executable file
8
.docker/observability/tempo-entrypoint.sh
Executable file
@@ -0,0 +1,8 @@
|
||||
#!/bin/sh
|
||||
# Run as root to fix directory permissions
|
||||
chown -R 10001:10001 /var/tempo
|
||||
|
||||
# Use su-exec (a lightweight sudo/gosu alternative, commonly used in Alpine mirroring)
|
||||
# Switch to user 10001 and execute the original command (CMD) passed to the script
|
||||
# "$@" represents all parameters passed to this script, i.e. command in docker-compose
|
||||
exec su-exec 10001:10001 /tempo "$@"
|
||||
55
.docker/observability/tempo.yaml
Normal file
55
.docker/observability/tempo.yaml
Normal file
@@ -0,0 +1,55 @@
|
||||
stream_over_http_enabled: true
|
||||
server:
|
||||
http_listen_port: 3200
|
||||
log_level: info
|
||||
|
||||
query_frontend:
|
||||
search:
|
||||
duration_slo: 5s
|
||||
throughput_bytes_slo: 1.073741824e+09
|
||||
metadata_slo:
|
||||
duration_slo: 5s
|
||||
throughput_bytes_slo: 1.073741824e+09
|
||||
trace_by_id:
|
||||
duration_slo: 5s
|
||||
|
||||
distributor:
|
||||
receivers:
|
||||
otlp:
|
||||
protocols:
|
||||
grpc:
|
||||
endpoint: "tempo:4317"
|
||||
|
||||
ingester:
|
||||
max_block_duration: 5m # cut the headblock when this much time passes. this is being set for demo purposes and should probably be left alone normally
|
||||
|
||||
compactor:
|
||||
compaction:
|
||||
block_retention: 1h # overall Tempo trace retention. set for demo purposes
|
||||
|
||||
metrics_generator:
|
||||
registry:
|
||||
external_labels:
|
||||
source: tempo
|
||||
cluster: docker-compose
|
||||
storage:
|
||||
path: /var/tempo/generator/wal
|
||||
remote_write:
|
||||
- url: http://prometheus:9090/api/v1/write
|
||||
send_exemplars: true
|
||||
traces_storage:
|
||||
path: /var/tempo/generator/traces
|
||||
|
||||
storage:
|
||||
trace:
|
||||
backend: local # backend configuration to use
|
||||
wal:
|
||||
path: /var/tempo/wal # where to store the wal locally
|
||||
local:
|
||||
path: /var/tempo/blocks
|
||||
|
||||
overrides:
|
||||
defaults:
|
||||
metrics_generator:
|
||||
processors: [ service-graphs, span-metrics, local-blocks ] # enables metrics generator
|
||||
generate_native_histograms: both
|
||||
12
.github/actions/setup/action.yml
vendored
12
.github/actions/setup/action.yml
vendored
@@ -60,15 +60,7 @@ runs:
|
||||
pkg-config \
|
||||
libssl-dev
|
||||
|
||||
- name: Cache protoc binary
|
||||
id: cache-protoc
|
||||
uses: actions/cache@v4
|
||||
with:
|
||||
path: ~/.local/bin/protoc
|
||||
key: protoc-31.1-${{ runner.os }}-${{ runner.arch }}
|
||||
|
||||
- name: Install protoc
|
||||
if: steps.cache-protoc.outputs.cache-hit != 'true'
|
||||
uses: arduino/setup-protoc@v3
|
||||
with:
|
||||
version: "31.1"
|
||||
@@ -104,7 +96,3 @@ runs:
|
||||
cache-on-failure: true
|
||||
shared-key: ${{ inputs.cache-shared-key }}
|
||||
save-if: ${{ inputs.cache-save-if }}
|
||||
# Cache workspace dependencies
|
||||
workspaces: |
|
||||
. -> target
|
||||
cli/rustfs-gui -> cli/rustfs-gui/target
|
||||
|
||||
598
.github/workflows/build.yml
vendored
598
.github/workflows/build.yml
vendored
@@ -12,11 +12,23 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Build and Release Workflow
|
||||
#
|
||||
# This workflow builds RustFS binaries and automatically triggers Docker image builds.
|
||||
#
|
||||
# Flow:
|
||||
# 1. Build binaries for multiple platforms
|
||||
# 2. Upload binaries to OSS storage
|
||||
# 3. Trigger docker.yml to build and push images using the uploaded binaries
|
||||
#
|
||||
# Manual Parameters:
|
||||
# - build_docker: Build and push Docker images (default: true)
|
||||
|
||||
name: Build and Release
|
||||
|
||||
on:
|
||||
push:
|
||||
tags: ["*"]
|
||||
tags: ["*.*.*"]
|
||||
branches: [main]
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
@@ -52,10 +64,10 @@ on:
|
||||
- cron: "0 0 * * 0" # Weekly on Sunday at midnight UTC
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
force_build:
|
||||
description: "Force build even without changes"
|
||||
build_docker:
|
||||
description: "Build and push Docker images after binary build"
|
||||
required: false
|
||||
default: false
|
||||
default: true
|
||||
type: boolean
|
||||
|
||||
env:
|
||||
@@ -65,39 +77,78 @@ env:
|
||||
CARGO_INCREMENTAL: 0
|
||||
|
||||
jobs:
|
||||
# Second layer: Business logic level checks (handling build strategy)
|
||||
# Build strategy check - determine build type based on trigger
|
||||
build-check:
|
||||
name: Build Strategy Check
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
should_build: ${{ steps.check.outputs.should_build }}
|
||||
build_type: ${{ steps.check.outputs.build_type }}
|
||||
version: ${{ steps.check.outputs.version }}
|
||||
short_sha: ${{ steps.check.outputs.short_sha }}
|
||||
is_prerelease: ${{ steps.check.outputs.is_prerelease }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Determine build strategy
|
||||
id: check
|
||||
run: |
|
||||
should_build=false
|
||||
build_type="none"
|
||||
version=""
|
||||
short_sha=""
|
||||
is_prerelease=false
|
||||
|
||||
# Business logic: when we need to build
|
||||
if [[ "${{ github.event_name }}" == "schedule" ]] || \
|
||||
[[ "${{ github.event_name }}" == "workflow_dispatch" ]] || \
|
||||
[[ "${{ github.event.inputs.force_build }}" == "true" ]] || \
|
||||
[[ "${{ contains(github.event.head_commit.message, '--build') }}" == "true" ]]; then
|
||||
# Get short SHA for all builds
|
||||
short_sha=$(git rev-parse --short HEAD)
|
||||
|
||||
# Determine build type based on trigger
|
||||
if [[ "${{ startsWith(github.ref, 'refs/tags/') }}" == "true" ]]; then
|
||||
# Tag push - release or prerelease
|
||||
should_build=true
|
||||
tag_name="${GITHUB_REF#refs/tags/}"
|
||||
version="${tag_name}"
|
||||
|
||||
# Check if this is a prerelease
|
||||
if [[ "$tag_name" == *"alpha"* ]] || [[ "$tag_name" == *"beta"* ]] || [[ "$tag_name" == *"rc"* ]]; then
|
||||
build_type="prerelease"
|
||||
is_prerelease=true
|
||||
echo "🚀 Prerelease build detected: $tag_name"
|
||||
else
|
||||
build_type="release"
|
||||
echo "📦 Release build detected: $tag_name"
|
||||
fi
|
||||
elif [[ "${{ github.ref }}" == "refs/heads/main" ]]; then
|
||||
# Main branch push - development build
|
||||
should_build=true
|
||||
build_type="development"
|
||||
fi
|
||||
|
||||
# Always build for tag pushes (version releases)
|
||||
if [[ "${{ startsWith(github.ref, 'refs/tags/') }}" == "true" ]]; then
|
||||
version="dev-${short_sha}"
|
||||
echo "🛠️ Development build detected"
|
||||
elif [[ "${{ github.event_name }}" == "schedule" ]] || \
|
||||
[[ "${{ github.event_name }}" == "workflow_dispatch" ]] || \
|
||||
[[ "${{ contains(github.event.head_commit.message, '--build') }}" == "true" ]]; then
|
||||
# Scheduled or manual build
|
||||
should_build=true
|
||||
build_type="release"
|
||||
echo "🏷️ Tag detected: forcing release build"
|
||||
build_type="development"
|
||||
version="dev-${short_sha}"
|
||||
echo "⚡ Manual/scheduled build detected"
|
||||
fi
|
||||
|
||||
echo "should_build=$should_build" >> $GITHUB_OUTPUT
|
||||
echo "build_type=$build_type" >> $GITHUB_OUTPUT
|
||||
echo "Build needed: $should_build (type: $build_type)"
|
||||
echo "version=$version" >> $GITHUB_OUTPUT
|
||||
echo "short_sha=$short_sha" >> $GITHUB_OUTPUT
|
||||
echo "is_prerelease=$is_prerelease" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "📊 Build Summary:"
|
||||
echo " - Should build: $should_build"
|
||||
echo " - Build type: $build_type"
|
||||
echo " - Version: $version"
|
||||
echo " - Short SHA: $short_sha"
|
||||
echo " - Is prerelease: $is_prerelease"
|
||||
|
||||
# Build RustFS binaries
|
||||
build-rustfs:
|
||||
@@ -121,6 +172,14 @@ jobs:
|
||||
target: aarch64-unknown-linux-musl
|
||||
cross: true
|
||||
platform: linux
|
||||
- os: ubuntu-latest
|
||||
target: x86_64-unknown-linux-gnu
|
||||
cross: false
|
||||
platform: linux
|
||||
- os: ubuntu-latest
|
||||
target: aarch64-unknown-linux-gnu
|
||||
cross: true
|
||||
platform: linux
|
||||
# macOS builds
|
||||
- os: macos-latest
|
||||
target: aarch64-apple-darwin
|
||||
@@ -130,15 +189,15 @@ jobs:
|
||||
target: x86_64-apple-darwin
|
||||
cross: false
|
||||
platform: macos
|
||||
# # Windows builds (temporarily disabled)
|
||||
# - os: windows-latest
|
||||
# target: x86_64-pc-windows-msvc
|
||||
# cross: false
|
||||
# platform: windows
|
||||
# - os: windows-latest
|
||||
# target: aarch64-pc-windows-msvc
|
||||
# cross: true
|
||||
# platform: windows
|
||||
# Windows builds (temporarily disabled)
|
||||
- os: windows-latest
|
||||
target: x86_64-pc-windows-msvc
|
||||
cross: false
|
||||
platform: windows
|
||||
- os: windows-latest
|
||||
target: aarch64-pc-windows-msvc
|
||||
cross: true
|
||||
platform: windows
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
@@ -158,7 +217,6 @@ jobs:
|
||||
- name: Download static console assets
|
||||
run: |
|
||||
mkdir -p ./rustfs/static
|
||||
rm -rf ./rustfs/static/*
|
||||
if [[ "${{ matrix.platform }}" == "windows" ]]; then
|
||||
curl.exe -L "https://dl.rustfs.com/artifacts/console/rustfs-console-latest.zip" -o console.zip --retry 3 --retry-delay 5 --max-time 300
|
||||
if [[ $? -eq 0 ]]; then
|
||||
@@ -169,6 +227,7 @@ jobs:
|
||||
echo "// Static assets not available" > ./rustfs/static/empty.txt
|
||||
fi
|
||||
else
|
||||
chmod +w ./rustfs/static/LICENSE || true
|
||||
curl -L "https://dl.rustfs.com/artifacts/console/rustfs-console-latest.zip" \
|
||||
-o console.zip --retry 3 --retry-delay 5 --max-time 300
|
||||
if [[ $? -eq 0 ]]; then
|
||||
@@ -191,7 +250,7 @@ jobs:
|
||||
cargo install cross --git https://github.com/cross-rs/cross
|
||||
cross build --release --target ${{ matrix.target }} -p rustfs --bins
|
||||
else
|
||||
# Use zigbuild for Linux ARM64
|
||||
# Use zigbuild for other cross-compilation
|
||||
cargo zigbuild --release --target ${{ matrix.target }} -p rustfs --bins
|
||||
fi
|
||||
else
|
||||
@@ -202,7 +261,38 @@ jobs:
|
||||
id: package
|
||||
shell: bash
|
||||
run: |
|
||||
PACKAGE_NAME="rustfs-${{ matrix.target }}"
|
||||
BUILD_TYPE="${{ needs.build-check.outputs.build_type }}"
|
||||
VERSION="${{ needs.build-check.outputs.version }}"
|
||||
SHORT_SHA="${{ needs.build-check.outputs.short_sha }}"
|
||||
|
||||
# Extract platform and arch from target
|
||||
TARGET="${{ matrix.target }}"
|
||||
PLATFORM="${{ matrix.platform }}"
|
||||
|
||||
# Map target to architecture
|
||||
case "$TARGET" in
|
||||
*x86_64*)
|
||||
ARCH="x86_64"
|
||||
;;
|
||||
*aarch64*|*arm64*)
|
||||
ARCH="aarch64"
|
||||
;;
|
||||
*armv7*)
|
||||
ARCH="armv7"
|
||||
;;
|
||||
*)
|
||||
ARCH="unknown"
|
||||
;;
|
||||
esac
|
||||
|
||||
# Generate package name based on build type
|
||||
if [[ "$BUILD_TYPE" == "development" ]]; then
|
||||
# Development build: rustfs-${platform}-${arch}-dev-${short_sha}.zip
|
||||
PACKAGE_NAME="rustfs-${PLATFORM}-${ARCH}-dev-${SHORT_SHA}"
|
||||
else
|
||||
# Release/Prerelease build: rustfs-${platform}-${arch}-v${version}.zip
|
||||
PACKAGE_NAME="rustfs-${PLATFORM}-${ARCH}-v${VERSION}"
|
||||
fi
|
||||
|
||||
# Create zip packages for all platforms
|
||||
# Ensure zip is available
|
||||
@@ -215,9 +305,15 @@ jobs:
|
||||
cd target/${{ matrix.target }}/release
|
||||
zip "../../../${PACKAGE_NAME}.zip" rustfs
|
||||
cd ../../..
|
||||
|
||||
echo "package_name=${PACKAGE_NAME}" >> $GITHUB_OUTPUT
|
||||
echo "package_file=${PACKAGE_NAME}.zip" >> $GITHUB_OUTPUT
|
||||
echo "Package created: ${PACKAGE_NAME}.zip"
|
||||
echo "build_type=${BUILD_TYPE}" >> $GITHUB_OUTPUT
|
||||
echo "version=${VERSION}" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "📦 Package created: ${PACKAGE_NAME}.zip"
|
||||
echo "🔧 Build type: ${BUILD_TYPE}"
|
||||
echo "📊 Version: ${VERSION}"
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
@@ -227,13 +323,15 @@ jobs:
|
||||
retention-days: ${{ startsWith(github.ref, 'refs/tags/') && 30 || 7 }}
|
||||
|
||||
- name: Upload to Aliyun OSS
|
||||
if: needs.build-check.outputs.build_type == 'release' && env.OSS_ACCESS_KEY_ID != ''
|
||||
if: env.OSS_ACCESS_KEY_ID != '' && (needs.build-check.outputs.build_type == 'release' || needs.build-check.outputs.build_type == 'prerelease' || needs.build-check.outputs.build_type == 'development')
|
||||
env:
|
||||
OSS_ACCESS_KEY_ID: ${{ secrets.ALICLOUDOSS_KEY_ID }}
|
||||
OSS_ACCESS_KEY_SECRET: ${{ secrets.ALICLOUDOSS_KEY_SECRET }}
|
||||
OSS_REGION: cn-beijing
|
||||
OSS_ENDPOINT: https://oss-cn-beijing.aliyuncs.com
|
||||
run: |
|
||||
BUILD_TYPE="${{ needs.build-check.outputs.build_type }}"
|
||||
|
||||
# Install ossutil (platform-specific)
|
||||
OSSUTIL_VERSION="2.1.1"
|
||||
case "${{ matrix.platform }}" in
|
||||
@@ -271,148 +369,396 @@ jobs:
|
||||
;;
|
||||
esac
|
||||
|
||||
# Upload the package file directly to OSS
|
||||
echo "Uploading ${{ steps.package.outputs.package_file }} to OSS..."
|
||||
$OSSUTIL_BIN cp "${{ steps.package.outputs.package_file }}" oss://rustfs-artifacts/artifacts/rustfs/ --force
|
||||
|
||||
# Create latest.json (only for the first Linux build to avoid duplication)
|
||||
if [[ "${{ matrix.target }}" == "x86_64-unknown-linux-musl" ]]; then
|
||||
VERSION="${GITHUB_REF#refs/tags/v}"
|
||||
echo "{\"version\":\"${VERSION}\",\"release_date\":\"$(date -u +%Y-%m-%dT%H:%M:%SZ)\"}" > latest.json
|
||||
$OSSUTIL_BIN cp latest.json oss://rustfs-version/latest.json --force
|
||||
# Determine upload path based on build type
|
||||
if [[ "$BUILD_TYPE" == "development" ]]; then
|
||||
OSS_PATH="oss://rustfs-artifacts/artifacts/rustfs/dev/"
|
||||
echo "📤 Uploading development build to OSS dev directory"
|
||||
else
|
||||
OSS_PATH="oss://rustfs-artifacts/artifacts/rustfs/release/"
|
||||
echo "📤 Uploading release build to OSS release directory"
|
||||
fi
|
||||
|
||||
# Release management
|
||||
release:
|
||||
name: GitHub Release
|
||||
# Upload the package file to OSS
|
||||
echo "Uploading ${{ steps.package.outputs.package_file }} to $OSS_PATH..."
|
||||
$OSSUTIL_BIN cp "${{ steps.package.outputs.package_file }}" "$OSS_PATH" --force
|
||||
|
||||
# For release and prerelease builds, also create a latest version
|
||||
if [[ "$BUILD_TYPE" == "release" ]] || [[ "$BUILD_TYPE" == "prerelease" ]]; then
|
||||
# Extract platform and arch from package name
|
||||
PACKAGE_NAME="${{ steps.package.outputs.package_name }}"
|
||||
|
||||
# Create latest version filename
|
||||
# Convert from rustfs-linux-x86_64-v1.0.0 to rustfs-linux-x86_64-latest
|
||||
LATEST_FILE="${PACKAGE_NAME%-v*}-latest.zip"
|
||||
|
||||
# Copy the original file to latest version
|
||||
cp "${{ steps.package.outputs.package_file }}" "$LATEST_FILE"
|
||||
|
||||
# Upload the latest version
|
||||
echo "Uploading latest version: $LATEST_FILE to $OSS_PATH..."
|
||||
$OSSUTIL_BIN cp "$LATEST_FILE" "$OSS_PATH" --force
|
||||
|
||||
echo "✅ Latest version uploaded: $LATEST_FILE"
|
||||
fi
|
||||
|
||||
# For development builds, create dev-latest version
|
||||
if [[ "$BUILD_TYPE" == "development" ]]; then
|
||||
# Extract platform and arch from package name
|
||||
PACKAGE_NAME="${{ steps.package.outputs.package_name }}"
|
||||
|
||||
# Create dev-latest version filename
|
||||
# Convert from rustfs-linux-x86_64-dev-abc123 to rustfs-linux-x86_64-dev-latest
|
||||
DEV_LATEST_FILE="${PACKAGE_NAME%-*}-latest.zip"
|
||||
|
||||
# Copy the original file to dev-latest version
|
||||
cp "${{ steps.package.outputs.package_file }}" "$DEV_LATEST_FILE"
|
||||
|
||||
# Upload the dev-latest version
|
||||
echo "Uploading dev-latest version: $DEV_LATEST_FILE to $OSS_PATH..."
|
||||
$OSSUTIL_BIN cp "$DEV_LATEST_FILE" "$OSS_PATH" --force
|
||||
|
||||
echo "✅ Dev-latest version uploaded: $DEV_LATEST_FILE"
|
||||
|
||||
# For main branch builds, also create a main-latest version
|
||||
if [[ "${{ github.ref }}" == "refs/heads/main" ]]; then
|
||||
# Create main-latest version filename
|
||||
# Convert from rustfs-linux-x86_64-dev-abc123 to rustfs-linux-x86_64-main-latest
|
||||
MAIN_LATEST_FILE="${PACKAGE_NAME%-dev-*}-main-latest.zip"
|
||||
|
||||
# Copy the original file to main-latest version
|
||||
cp "${{ steps.package.outputs.package_file }}" "$MAIN_LATEST_FILE"
|
||||
|
||||
# Upload the main-latest version
|
||||
echo "Uploading main-latest version: $MAIN_LATEST_FILE to $OSS_PATH..."
|
||||
$OSSUTIL_BIN cp "$MAIN_LATEST_FILE" "$OSS_PATH" --force
|
||||
|
||||
echo "✅ Main-latest version uploaded: $MAIN_LATEST_FILE"
|
||||
|
||||
# Also create a generic main-latest for Docker builds
|
||||
if [[ "${{ matrix.platform }}" == "linux" ]]; then
|
||||
DOCKER_MAIN_LATEST_FILE="rustfs-linux-${{ matrix.target == 'x86_64-unknown-linux-musl' && 'x86_64' || 'aarch64' }}-main-latest.zip"
|
||||
|
||||
cp "${{ steps.package.outputs.package_file }}" "$DOCKER_MAIN_LATEST_FILE"
|
||||
$OSSUTIL_BIN cp "$DOCKER_MAIN_LATEST_FILE" "$OSS_PATH" --force
|
||||
echo "✅ Docker main-latest version uploaded: $DOCKER_MAIN_LATEST_FILE"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
echo "✅ Upload completed successfully"
|
||||
|
||||
# Build summary
|
||||
build-summary:
|
||||
name: Build Summary
|
||||
needs: [build-check, build-rustfs]
|
||||
if: always() && needs.build-check.outputs.build_type == 'release'
|
||||
if: always() && needs.build-check.outputs.should_build == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Build completion summary
|
||||
run: |
|
||||
BUILD_TYPE="${{ needs.build-check.outputs.build_type }}"
|
||||
VERSION="${{ needs.build-check.outputs.version }}"
|
||||
|
||||
echo "🎉 Build completed successfully!"
|
||||
echo "📦 Build type: $BUILD_TYPE"
|
||||
echo "🔢 Version: $VERSION"
|
||||
echo ""
|
||||
|
||||
# Check build status
|
||||
BUILD_STATUS="${{ needs.build-rustfs.result }}"
|
||||
|
||||
echo "📊 Build Results:"
|
||||
echo " 📦 All platforms: $BUILD_STATUS"
|
||||
echo ""
|
||||
|
||||
case "$BUILD_TYPE" in
|
||||
"development")
|
||||
echo "🛠️ Development build artifacts have been uploaded to OSS dev directory"
|
||||
echo "⚠️ This is a development build - not suitable for production use"
|
||||
;;
|
||||
"release")
|
||||
echo "🚀 Release build artifacts have been uploaded to OSS release directory"
|
||||
echo "✅ This build is ready for production use"
|
||||
echo "🏷️ GitHub Release will be created in this workflow"
|
||||
;;
|
||||
"prerelease")
|
||||
echo "🧪 Prerelease build artifacts have been uploaded to OSS release directory"
|
||||
echo "⚠️ This is a prerelease build - use with caution"
|
||||
echo "🏷️ GitHub Release will be created in this workflow"
|
||||
;;
|
||||
esac
|
||||
|
||||
echo ""
|
||||
echo "🐳 Docker Images:"
|
||||
if [[ "${{ github.event.inputs.build_docker }}" == "false" ]]; then
|
||||
echo "⏭️ Docker image build was skipped (binary only build)"
|
||||
elif [[ "$BUILD_STATUS" == "success" ]]; then
|
||||
echo "🔄 Docker images will be built and pushed automatically via workflow_run event"
|
||||
else
|
||||
echo "❌ Docker image build will be skipped due to build failure"
|
||||
fi
|
||||
|
||||
# Create GitHub Release (only for tag pushes)
|
||||
create-release:
|
||||
name: Create GitHub Release
|
||||
needs: [build-check, build-rustfs]
|
||||
if: startsWith(github.ref, 'refs/tags/') && needs.build-check.outputs.build_type != 'development'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
outputs:
|
||||
release_id: ${{ steps.create.outputs.release_id }}
|
||||
release_url: ${{ steps.create.outputs.release_url }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Download all artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: ./release-artifacts
|
||||
|
||||
- name: Prepare release assets
|
||||
id: release_prep
|
||||
run: |
|
||||
VERSION="${GITHUB_REF#refs/tags/}"
|
||||
VERSION_CLEAN="${VERSION#v}"
|
||||
|
||||
echo "version=${VERSION}" >> $GITHUB_OUTPUT
|
||||
echo "version_clean=${VERSION_CLEAN}" >> $GITHUB_OUTPUT
|
||||
|
||||
# Organize artifacts
|
||||
mkdir -p ./release-files
|
||||
|
||||
# Copy all artifacts (.zip files)
|
||||
find ./release-artifacts -name "*.zip" -exec cp {} ./release-files/ \;
|
||||
|
||||
# Generate checksums for all files
|
||||
cd ./release-files
|
||||
if ls *.zip >/dev/null 2>&1; then
|
||||
sha256sum *.zip >> SHA256SUMS
|
||||
sha512sum *.zip >> SHA512SUMS
|
||||
fi
|
||||
cd ..
|
||||
|
||||
# Display what we're releasing
|
||||
echo "=== Release Files ==="
|
||||
ls -la ./release-files/
|
||||
|
||||
- name: Create GitHub Release
|
||||
id: create
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
VERSION="${{ steps.release_prep.outputs.version }}"
|
||||
VERSION_CLEAN="${{ steps.release_prep.outputs.version_clean }}"
|
||||
TAG="${{ needs.build-check.outputs.version }}"
|
||||
VERSION="${{ needs.build-check.outputs.version }}"
|
||||
IS_PRERELEASE="${{ needs.build-check.outputs.is_prerelease }}"
|
||||
BUILD_TYPE="${{ needs.build-check.outputs.build_type }}"
|
||||
|
||||
# Determine release type for title
|
||||
if [[ "$BUILD_TYPE" == "prerelease" ]]; then
|
||||
if [[ "$TAG" == *"alpha"* ]]; then
|
||||
RELEASE_TYPE="alpha"
|
||||
elif [[ "$TAG" == *"beta"* ]]; then
|
||||
RELEASE_TYPE="beta"
|
||||
elif [[ "$TAG" == *"rc"* ]]; then
|
||||
RELEASE_TYPE="rc"
|
||||
else
|
||||
RELEASE_TYPE="prerelease"
|
||||
fi
|
||||
else
|
||||
RELEASE_TYPE="release"
|
||||
fi
|
||||
|
||||
# Check if release already exists
|
||||
if gh release view "$VERSION" >/dev/null 2>&1; then
|
||||
echo "Release $VERSION already exists, skipping creation"
|
||||
if gh release view "$TAG" >/dev/null 2>&1; then
|
||||
echo "Release $TAG already exists"
|
||||
RELEASE_ID=$(gh release view "$TAG" --json databaseId --jq '.databaseId')
|
||||
RELEASE_URL=$(gh release view "$TAG" --json url --jq '.url')
|
||||
else
|
||||
# Get release notes from tag message
|
||||
RELEASE_NOTES=$(git tag -l --format='%(contents)' "${VERSION}")
|
||||
RELEASE_NOTES=$(git tag -l --format='%(contents)' "${TAG}")
|
||||
if [[ -z "$RELEASE_NOTES" || "$RELEASE_NOTES" =~ ^[[:space:]]*$ ]]; then
|
||||
RELEASE_NOTES="Release ${VERSION_CLEAN}"
|
||||
if [[ "$IS_PRERELEASE" == "true" ]]; then
|
||||
RELEASE_NOTES="Pre-release ${VERSION} (${RELEASE_TYPE})"
|
||||
else
|
||||
RELEASE_NOTES="Release ${VERSION}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Determine if this is a prerelease
|
||||
# Create release title
|
||||
if [[ "$IS_PRERELEASE" == "true" ]]; then
|
||||
TITLE="RustFS $VERSION (${RELEASE_TYPE})"
|
||||
else
|
||||
TITLE="RustFS $VERSION"
|
||||
fi
|
||||
|
||||
# Create the release
|
||||
PRERELEASE_FLAG=""
|
||||
if [[ "$VERSION" == *"alpha"* ]] || [[ "$VERSION" == *"beta"* ]] || [[ "$VERSION" == *"rc"* ]]; then
|
||||
if [[ "$IS_PRERELEASE" == "true" ]]; then
|
||||
PRERELEASE_FLAG="--prerelease"
|
||||
fi
|
||||
|
||||
# Create the release only if it doesn't exist
|
||||
gh release create "$VERSION" \
|
||||
--title "RustFS $VERSION_CLEAN" \
|
||||
gh release create "$TAG" \
|
||||
--title "$TITLE" \
|
||||
--notes "$RELEASE_NOTES" \
|
||||
$PRERELEASE_FLAG
|
||||
$PRERELEASE_FLAG \
|
||||
--draft
|
||||
|
||||
RELEASE_ID=$(gh release view "$TAG" --json databaseId --jq '.databaseId')
|
||||
RELEASE_URL=$(gh release view "$TAG" --json url --jq '.url')
|
||||
fi
|
||||
|
||||
- name: Upload release assets
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
echo "release_id=$RELEASE_ID" >> $GITHUB_OUTPUT
|
||||
echo "release_url=$RELEASE_URL" >> $GITHUB_OUTPUT
|
||||
echo "Created release: $RELEASE_URL"
|
||||
|
||||
# Prepare and upload release assets
|
||||
upload-release-assets:
|
||||
name: Upload Release Assets
|
||||
needs: [build-check, build-rustfs, create-release]
|
||||
if: startsWith(github.ref, 'refs/tags/') && needs.build-check.outputs.build_type != 'development'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
actions: read
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Download all build artifacts
|
||||
uses: actions/download-artifact@v4
|
||||
with:
|
||||
path: ./artifacts
|
||||
pattern: rustfs-*
|
||||
merge-multiple: true
|
||||
|
||||
- name: Prepare release assets
|
||||
id: prepare
|
||||
run: |
|
||||
VERSION="${{ steps.release_prep.outputs.version }}"
|
||||
VERSION="${{ needs.build-check.outputs.version }}"
|
||||
TAG="${{ needs.build-check.outputs.version }}"
|
||||
|
||||
cd ./release-files
|
||||
mkdir -p ./release-assets
|
||||
|
||||
# Upload all binary files
|
||||
for file in *.zip; do
|
||||
# Copy and verify artifacts
|
||||
ASSETS_COUNT=0
|
||||
for file in ./artifacts/*.zip; do
|
||||
if [[ -f "$file" ]]; then
|
||||
echo "Uploading $file..."
|
||||
gh release upload "$VERSION" "$file" --clobber
|
||||
cp "$file" ./release-assets/
|
||||
ASSETS_COUNT=$((ASSETS_COUNT + 1))
|
||||
fi
|
||||
done
|
||||
|
||||
# Upload checksum files
|
||||
if [[ -f "SHA256SUMS" ]]; then
|
||||
echo "Uploading SHA256SUMS..."
|
||||
gh release upload "$VERSION" "SHA256SUMS" --clobber
|
||||
if [[ $ASSETS_COUNT -eq 0 ]]; then
|
||||
echo "❌ No artifacts found!"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -f "SHA512SUMS" ]]; then
|
||||
echo "Uploading SHA512SUMS..."
|
||||
gh release upload "$VERSION" "SHA512SUMS" --clobber
|
||||
cd ./release-assets
|
||||
|
||||
# Generate checksums
|
||||
if ls *.zip >/dev/null 2>&1; then
|
||||
sha256sum *.zip > SHA256SUMS
|
||||
sha512sum *.zip > SHA512SUMS
|
||||
fi
|
||||
|
||||
- name: Update release notes
|
||||
# Create signature placeholder files
|
||||
for file in *.zip; do
|
||||
echo "# Signature for $file" > "${file}.asc"
|
||||
echo "# GPG signature will be added in future versions" >> "${file}.asc"
|
||||
done
|
||||
|
||||
echo "📦 Prepared assets:"
|
||||
ls -la
|
||||
|
||||
echo "🔢 Asset count: $ASSETS_COUNT"
|
||||
|
||||
- name: Upload to GitHub Release
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
VERSION="${{ steps.release_prep.outputs.version }}"
|
||||
VERSION_CLEAN="${{ steps.release_prep.outputs.version_clean }}"
|
||||
TAG="${{ needs.build-check.outputs.version }}"
|
||||
|
||||
# Check if release already has custom notes (not auto-generated)
|
||||
EXISTING_NOTES=$(gh release view "$VERSION" --json body --jq '.body' 2>/dev/null || echo "")
|
||||
cd ./release-assets
|
||||
|
||||
# Only update if release notes are empty or auto-generated
|
||||
if [[ -z "$EXISTING_NOTES" ]] || [[ "$EXISTING_NOTES" == *"Release ${VERSION_CLEAN}"* ]]; then
|
||||
echo "Updating release notes for $VERSION"
|
||||
|
||||
# Get original release notes from tag
|
||||
ORIGINAL_NOTES=$(git tag -l --format='%(contents)' "${VERSION}")
|
||||
if [[ -z "$ORIGINAL_NOTES" || "$ORIGINAL_NOTES" =~ ^[[:space:]]*$ ]]; then
|
||||
ORIGINAL_NOTES="Release ${VERSION_CLEAN}"
|
||||
# Upload all files
|
||||
for file in *; do
|
||||
if [[ -f "$file" ]]; then
|
||||
echo "📤 Uploading $file..."
|
||||
gh release upload "$TAG" "$file" --clobber
|
||||
fi
|
||||
done
|
||||
|
||||
# Use external template file and substitute variables
|
||||
sed -e "s/\${VERSION}/$VERSION/g" \
|
||||
-e "s/\${VERSION_CLEAN}/$VERSION_CLEAN/g" \
|
||||
-e "s/\${ORIGINAL_NOTES}/$(echo "$ORIGINAL_NOTES" | sed 's/[[\.*^$()+?{|]/\\&/g')/g" \
|
||||
.github/workflows/release-notes-template.md > enhanced_notes.md
|
||||
echo "✅ All assets uploaded successfully"
|
||||
|
||||
# Update the release with enhanced notes
|
||||
gh release edit "$VERSION" --notes-file enhanced_notes.md
|
||||
else
|
||||
echo "Release $VERSION already has custom notes, skipping update to preserve manual edits"
|
||||
# Update latest.json for stable releases only
|
||||
update-latest-version:
|
||||
name: Update Latest Version
|
||||
needs: [build-check, upload-release-assets]
|
||||
if: startsWith(github.ref, 'refs/tags/')
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Update latest.json
|
||||
env:
|
||||
OSS_ACCESS_KEY_ID: ${{ secrets.ALICLOUDOSS_KEY_ID }}
|
||||
OSS_ACCESS_KEY_SECRET: ${{ secrets.ALICLOUDOSS_KEY_SECRET }}
|
||||
OSS_REGION: cn-beijing
|
||||
OSS_ENDPOINT: https://oss-cn-beijing.aliyuncs.com
|
||||
run: |
|
||||
if [[ -z "$OSS_ACCESS_KEY_ID" ]]; then
|
||||
echo "⚠️ OSS credentials not available, skipping latest.json update"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
VERSION="${{ needs.build-check.outputs.version }}"
|
||||
TAG="${{ needs.build-check.outputs.version }}"
|
||||
|
||||
# Install ossutil
|
||||
OSSUTIL_VERSION="2.1.1"
|
||||
OSSUTIL_ZIP="ossutil-${OSSUTIL_VERSION}-linux-amd64.zip"
|
||||
OSSUTIL_DIR="ossutil-${OSSUTIL_VERSION}-linux-amd64"
|
||||
|
||||
curl -o "$OSSUTIL_ZIP" "https://gosspublic.alicdn.com/ossutil/v2/${OSSUTIL_VERSION}/${OSSUTIL_ZIP}"
|
||||
unzip "$OSSUTIL_ZIP"
|
||||
mv "${OSSUTIL_DIR}/ossutil" /usr/local/bin/
|
||||
rm -rf "$OSSUTIL_DIR" "$OSSUTIL_ZIP"
|
||||
chmod +x /usr/local/bin/ossutil
|
||||
|
||||
# Create latest.json
|
||||
cat > latest.json << EOF
|
||||
{
|
||||
"version": "${VERSION}",
|
||||
"tag": "${TAG}",
|
||||
"release_date": "$(date -u +%Y-%m-%dT%H:%M:%SZ)",
|
||||
"release_type": "stable",
|
||||
"download_url": "https://github.com/${{ github.repository }}/releases/tag/${TAG}"
|
||||
}
|
||||
EOF
|
||||
|
||||
# Upload to OSS
|
||||
ossutil cp latest.json oss://rustfs-version/latest.json --force
|
||||
|
||||
echo "✅ Updated latest.json for stable release $VERSION"
|
||||
|
||||
# Publish release (remove draft status)
|
||||
publish-release:
|
||||
name: Publish Release
|
||||
needs: [build-check, create-release, upload-release-assets]
|
||||
if: startsWith(github.ref, 'refs/tags/') && needs.build-check.outputs.build_type != 'development'
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Update release notes and publish
|
||||
env:
|
||||
GH_TOKEN: ${{ github.token }}
|
||||
run: |
|
||||
TAG="${{ needs.build-check.outputs.version }}"
|
||||
VERSION="${{ needs.build-check.outputs.version }}"
|
||||
IS_PRERELEASE="${{ needs.build-check.outputs.is_prerelease }}"
|
||||
BUILD_TYPE="${{ needs.build-check.outputs.build_type }}"
|
||||
|
||||
# Determine release type
|
||||
if [[ "$BUILD_TYPE" == "prerelease" ]]; then
|
||||
if [[ "$TAG" == *"alpha"* ]]; then
|
||||
RELEASE_TYPE="alpha"
|
||||
elif [[ "$TAG" == *"beta"* ]]; then
|
||||
RELEASE_TYPE="beta"
|
||||
elif [[ "$TAG" == *"rc"* ]]; then
|
||||
RELEASE_TYPE="rc"
|
||||
else
|
||||
RELEASE_TYPE="prerelease"
|
||||
fi
|
||||
else
|
||||
RELEASE_TYPE="release"
|
||||
fi
|
||||
|
||||
# Get original release notes from tag
|
||||
ORIGINAL_NOTES=$(git tag -l --format='%(contents)' "${TAG}")
|
||||
if [[ -z "$ORIGINAL_NOTES" || "$ORIGINAL_NOTES" =~ ^[[:space:]]*$ ]]; then
|
||||
if [[ "$IS_PRERELEASE" == "true" ]]; then
|
||||
ORIGINAL_NOTES="Pre-release ${VERSION} (${RELEASE_TYPE})"
|
||||
else
|
||||
ORIGINAL_NOTES="Release ${VERSION}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Publish the release (remove draft status)
|
||||
gh release edit "$TAG" --draft=false
|
||||
|
||||
echo "🎉 Released $TAG successfully!"
|
||||
echo "📄 Release URL: ${{ needs.create-release.outputs.release_url }}"
|
||||
|
||||
12
.github/workflows/ci.yml
vendored
12
.github/workflows/ci.yml
vendored
@@ -81,7 +81,17 @@ jobs:
|
||||
cancel_others: true
|
||||
paths_ignore: '["*.md", "docs/**", "deploy/**"]'
|
||||
# Never skip release events and tag pushes
|
||||
do_not_skip: '["release", "push"]'
|
||||
do_not_skip: '["workflow_dispatch", "schedule", "merge_group", "release", "push"]'
|
||||
|
||||
|
||||
typos:
|
||||
name: Typos
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: dtolnay/rust-toolchain@stable
|
||||
- name: Typos check with custom config file
|
||||
uses: crate-ci/typos@master
|
||||
|
||||
test-and-lint:
|
||||
name: Test and Lint
|
||||
|
||||
463
.github/workflows/docker.yml
vendored
463
.github/workflows/docker.yml
vendored
@@ -12,42 +12,33 @@
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
# Docker Images Workflow
|
||||
#
|
||||
# This workflow builds Docker images using pre-built binaries from the build workflow.
|
||||
#
|
||||
# Trigger Types:
|
||||
# 1. workflow_run: Automatically triggered when "Build and Release" workflow completes
|
||||
# 2. workflow_dispatch: Manual trigger for standalone Docker builds
|
||||
#
|
||||
# Key Features:
|
||||
# - Only triggers when Linux builds (x86_64 + aarch64) are successful
|
||||
# - Independent of macOS/Windows build status
|
||||
# - Uses workflow_run event for precise control
|
||||
# - Only builds Docker images for releases and prereleases (development builds are skipped)
|
||||
|
||||
name: Docker Images
|
||||
|
||||
# Permissions needed for workflow_run event and Docker registry access
|
||||
permissions:
|
||||
contents: read
|
||||
packages: write
|
||||
|
||||
on:
|
||||
push:
|
||||
tags: ["*"]
|
||||
branches: [main]
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
- "**.txt"
|
||||
- ".github/**"
|
||||
- "docs/**"
|
||||
- "deploy/**"
|
||||
- "scripts/dev_*.sh"
|
||||
- "LICENSE*"
|
||||
- "README*"
|
||||
- "**/*.png"
|
||||
- "**/*.jpg"
|
||||
- "**/*.svg"
|
||||
- ".gitignore"
|
||||
- ".dockerignore"
|
||||
pull_request:
|
||||
branches: [main]
|
||||
paths-ignore:
|
||||
- "**.md"
|
||||
- "**.txt"
|
||||
- ".github/**"
|
||||
- "docs/**"
|
||||
- "deploy/**"
|
||||
- "scripts/dev_*.sh"
|
||||
- "LICENSE*"
|
||||
- "README*"
|
||||
- "**/*.png"
|
||||
- "**/*.jpg"
|
||||
- "**/*.svg"
|
||||
- ".gitignore"
|
||||
- ".dockerignore"
|
||||
# Automatically triggered when build workflow completes
|
||||
workflow_run:
|
||||
workflows: ["Build and Release"]
|
||||
types: [completed]
|
||||
# Manual trigger with same parameters for consistency
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
push_images:
|
||||
@@ -55,156 +46,372 @@ on:
|
||||
required: false
|
||||
default: true
|
||||
type: boolean
|
||||
version:
|
||||
description: "Version to build (latest for stable release, or specific version like v1.0.0, v1.0.0-alpha1)"
|
||||
required: false
|
||||
default: "latest"
|
||||
type: string
|
||||
force_rebuild:
|
||||
description: "Force rebuild even if binary exists (useful for testing)"
|
||||
required: false
|
||||
default: false
|
||||
type: boolean
|
||||
|
||||
env:
|
||||
DOCKERHUB_USERNAME: rustfs
|
||||
CARGO_TERM_COLOR: always
|
||||
REGISTRY_DOCKERHUB: rustfs/rustfs
|
||||
REGISTRY_GHCR: ghcr.io/${{ github.repository }}
|
||||
DOCKER_PLATFORMS: linux/amd64,linux/arm64
|
||||
|
||||
jobs:
|
||||
# Check if we should build
|
||||
# Check if we should build Docker images
|
||||
build-check:
|
||||
name: Build Check
|
||||
name: Docker Build Check
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
should_build: ${{ steps.check.outputs.should_build }}
|
||||
should_push: ${{ steps.check.outputs.should_push }}
|
||||
build_type: ${{ steps.check.outputs.build_type }}
|
||||
version: ${{ steps.check.outputs.version }}
|
||||
short_sha: ${{ steps.check.outputs.short_sha }}
|
||||
is_prerelease: ${{ steps.check.outputs.is_prerelease }}
|
||||
create_latest: ${{ steps.check.outputs.create_latest }}
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
# For workflow_run events, checkout the specific commit that triggered the workflow
|
||||
ref: ${{ github.event.workflow_run.head_sha || github.sha }}
|
||||
|
||||
- name: Check build conditions
|
||||
id: check
|
||||
run: |
|
||||
should_build=false
|
||||
should_push=false
|
||||
build_type="none"
|
||||
version=""
|
||||
short_sha=""
|
||||
is_prerelease=false
|
||||
create_latest=false
|
||||
|
||||
# Always build on workflow_dispatch or when changes detected
|
||||
if [[ "${{ github.event_name }}" == "workflow_dispatch" ]] || \
|
||||
[[ "${{ github.event_name }}" == "push" ]] || \
|
||||
[[ "${{ github.event_name }}" == "pull_request" ]]; then
|
||||
if [[ "${{ github.event_name }}" == "workflow_run" ]]; then
|
||||
# Triggered by build workflow completion
|
||||
echo "🔗 Triggered by build workflow completion"
|
||||
|
||||
# Check if the triggering workflow was successful
|
||||
# If the workflow succeeded, it means ALL builds (including Linux x86_64 and aarch64) succeeded
|
||||
if [[ "${{ github.event.workflow_run.conclusion }}" == "success" ]]; then
|
||||
echo "✅ Build workflow succeeded, all builds including Linux are successful"
|
||||
should_build=true
|
||||
should_push=true
|
||||
else
|
||||
echo "❌ Build workflow failed (conclusion: ${{ github.event.workflow_run.conclusion }}), skipping Docker build"
|
||||
should_build=false
|
||||
fi
|
||||
|
||||
# Extract version info from commit message or use commit SHA
|
||||
# Use Git to generate consistent short SHA (ensures uniqueness like build.yml)
|
||||
short_sha=$(git rev-parse --short "${{ github.event.workflow_run.head_sha }}")
|
||||
|
||||
# Determine build type based on triggering workflow event and ref
|
||||
triggering_event="${{ github.event.workflow_run.event }}"
|
||||
head_branch="${{ github.event.workflow_run.head_branch }}"
|
||||
|
||||
echo "🔍 Analyzing triggering workflow:"
|
||||
echo " 📋 Event: $triggering_event"
|
||||
echo " 🌿 Head branch: $head_branch"
|
||||
echo " 📎 Head SHA: ${{ github.event.workflow_run.head_sha }}"
|
||||
|
||||
# Check if this was triggered by a tag push
|
||||
if [[ "$triggering_event" == "push" ]]; then
|
||||
# For tag pushes, head_branch will be like "refs/tags/v1.0.0" or just "v1.0.0"
|
||||
if [[ "$head_branch" == refs/tags/* ]]; then
|
||||
# Extract tag name from refs/tags/TAG_NAME
|
||||
tag_name="${head_branch#refs/tags/}"
|
||||
version="$tag_name"
|
||||
elif [[ "$head_branch" =~ ^v?[0-9]+\.[0-9]+\.[0-9]+ ]]; then
|
||||
# Direct tag name like "v1.0.0" or "1.0.0-alpha.1"
|
||||
version="$head_branch"
|
||||
elif [[ "$head_branch" == "main" ]]; then
|
||||
# Regular branch push to main
|
||||
build_type="development"
|
||||
version="dev-${short_sha}"
|
||||
should_build=false
|
||||
echo "⏭️ Skipping Docker build for development version (main branch push)"
|
||||
else
|
||||
# Other branch push
|
||||
build_type="development"
|
||||
version="dev-${short_sha}"
|
||||
should_build=false
|
||||
echo "⏭️ Skipping Docker build for development version (branch: $head_branch)"
|
||||
fi
|
||||
|
||||
# If we extracted a version (tag), determine release type
|
||||
if [[ -n "$version" ]] && [[ "$version" != "dev-${short_sha}" ]]; then
|
||||
# Remove 'v' prefix if present for consistent version format
|
||||
if [[ "$version" == v* ]]; then
|
||||
version="${version#v}"
|
||||
fi
|
||||
|
||||
if [[ "$version" == *"alpha"* ]] || [[ "$version" == *"beta"* ]] || [[ "$version" == *"rc"* ]]; then
|
||||
build_type="prerelease"
|
||||
is_prerelease=true
|
||||
echo "🧪 Building Docker image for prerelease: $version"
|
||||
else
|
||||
build_type="release"
|
||||
create_latest=true
|
||||
echo "🚀 Building Docker image for release: $version"
|
||||
fi
|
||||
fi
|
||||
else
|
||||
# Non-push events
|
||||
build_type="development"
|
||||
version="dev-${short_sha}"
|
||||
should_build=false
|
||||
echo "⏭️ Skipping Docker build for development version (event: $triggering_event)"
|
||||
fi
|
||||
|
||||
echo "🔄 Build triggered by workflow_run:"
|
||||
echo " 📋 Conclusion: ${{ github.event.workflow_run.conclusion }}"
|
||||
echo " 🌿 Branch: ${{ github.event.workflow_run.head_branch }}"
|
||||
echo " 📎 SHA: ${{ github.event.workflow_run.head_sha }}"
|
||||
echo " 🎯 Event: ${{ github.event.workflow_run.event }}"
|
||||
|
||||
elif [[ "${{ github.event_name }}" == "workflow_dispatch" ]]; then
|
||||
# Manual trigger
|
||||
input_version="${{ github.event.inputs.version }}"
|
||||
version="${input_version}"
|
||||
should_push="${{ github.event.inputs.push_images }}"
|
||||
should_build=true
|
||||
fi
|
||||
|
||||
# Push only on main branch, tags, or manual trigger
|
||||
if [[ "${{ github.ref }}" == "refs/heads/main" ]] || \
|
||||
[[ "${{ startsWith(github.ref, 'refs/tags/') }}" == "true" ]] || \
|
||||
[[ "${{ github.event.inputs.push_images }}" == "true" ]]; then
|
||||
should_push=true
|
||||
# Get short SHA
|
||||
short_sha=$(git rev-parse --short HEAD)
|
||||
|
||||
echo "🎯 Manual Docker build triggered:"
|
||||
echo " 📋 Requested version: $input_version"
|
||||
echo " 🔧 Force rebuild: ${{ github.event.inputs.force_rebuild }}"
|
||||
echo " 🚀 Push images: $should_push"
|
||||
|
||||
case "$input_version" in
|
||||
"latest")
|
||||
build_type="release"
|
||||
create_latest=true
|
||||
echo "🚀 Building with latest stable release version"
|
||||
;;
|
||||
# Prerelease versions (must match first, more specific)
|
||||
v*alpha*|v*beta*|v*rc*|*alpha*|*beta*|*rc*)
|
||||
build_type="prerelease"
|
||||
is_prerelease=true
|
||||
echo "🧪 Building with prerelease version: $input_version"
|
||||
;;
|
||||
# Release versions (match after prereleases, more general)
|
||||
v[0-9]*|[0-9]*.*.*)
|
||||
build_type="release"
|
||||
create_latest=true
|
||||
echo "📦 Building with specific release version: $input_version"
|
||||
;;
|
||||
*)
|
||||
# Invalid version for Docker build
|
||||
should_build=false
|
||||
echo "❌ Invalid version for Docker build: $input_version"
|
||||
echo "⚠️ Only release versions (latest, v1.0.0, 1.0.0) and prereleases (v1.0.0-alpha1, 1.0.0-beta2) are supported"
|
||||
;;
|
||||
esac
|
||||
fi
|
||||
|
||||
echo "should_build=$should_build" >> $GITHUB_OUTPUT
|
||||
echo "should_push=$should_push" >> $GITHUB_OUTPUT
|
||||
echo "Build: $should_build, Push: $should_push"
|
||||
echo "build_type=$build_type" >> $GITHUB_OUTPUT
|
||||
echo "version=$version" >> $GITHUB_OUTPUT
|
||||
echo "short_sha=$short_sha" >> $GITHUB_OUTPUT
|
||||
echo "is_prerelease=$is_prerelease" >> $GITHUB_OUTPUT
|
||||
echo "create_latest=$create_latest" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "🐳 Docker Build Summary:"
|
||||
echo " - Should build: $should_build"
|
||||
echo " - Should push: $should_push"
|
||||
echo " - Build type: $build_type"
|
||||
echo " - Version: $version"
|
||||
echo " - Short SHA: $short_sha"
|
||||
echo " - Is prerelease: $is_prerelease"
|
||||
echo " - Create latest: $create_latest"
|
||||
|
||||
# Build multi-arch Docker images
|
||||
# Strategy: Build images using pre-built binaries from dl.rustfs.com
|
||||
# Supports both release and dev channel binaries based on build context
|
||||
# Only runs when should_build is true (which includes workflow success check)
|
||||
build-docker:
|
||||
name: Build Docker Images
|
||||
needs: build-check
|
||||
if: needs.build-check.outputs.should_build == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
timeout-minutes: 60
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
variant:
|
||||
- name: production
|
||||
dockerfile: Dockerfile
|
||||
platforms: linux/amd64,linux/arm64
|
||||
- name: ubuntu
|
||||
dockerfile: .docker/Dockerfile.ubuntu22.04
|
||||
platforms: linux/amd64,linux/arm64
|
||||
- name: alpine
|
||||
dockerfile: .docker/Dockerfile.alpine
|
||||
platforms: linux/amd64,linux/arm64
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ env.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
# - name: Login to GitHub Container Registry
|
||||
# uses: docker/login-action@v3
|
||||
# with:
|
||||
# registry: ghcr.io
|
||||
# username: ${{ github.actor }}
|
||||
# password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v3
|
||||
|
||||
- name: Login to Docker Hub
|
||||
if: needs.build-check.outputs.should_push == 'true' && secrets.DOCKERHUB_USERNAME != ''
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
- name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
if: needs.build-check.outputs.should_push == 'true'
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Extract metadata
|
||||
- name: Extract metadata and generate tags
|
||||
id: meta
|
||||
uses: docker/metadata-action@v5
|
||||
with:
|
||||
images: |
|
||||
${{ env.REGISTRY_DOCKERHUB }}
|
||||
${{ env.REGISTRY_GHCR }}
|
||||
tags: |
|
||||
type=ref,event=branch,suffix=-${{ matrix.variant.name }}
|
||||
type=ref,event=pr,suffix=-${{ matrix.variant.name }}
|
||||
type=semver,pattern={{version}},suffix=-${{ matrix.variant.name }}
|
||||
type=semver,pattern={{major}}.{{minor}},suffix=-${{ matrix.variant.name }}
|
||||
type=raw,value=latest,suffix=-${{ matrix.variant.name }},enable={{is_default_branch}}
|
||||
flavor: |
|
||||
latest=false
|
||||
run: |
|
||||
BUILD_TYPE="${{ needs.build-check.outputs.build_type }}"
|
||||
VERSION="${{ needs.build-check.outputs.version }}"
|
||||
SHORT_SHA="${{ needs.build-check.outputs.short_sha }}"
|
||||
CREATE_LATEST="${{ needs.build-check.outputs.create_latest }}"
|
||||
|
||||
# Convert version format for Dockerfile compatibility
|
||||
case "$VERSION" in
|
||||
"latest")
|
||||
# For stable latest, use RELEASE=latest + release CHANNEL
|
||||
DOCKER_RELEASE="latest"
|
||||
DOCKER_CHANNEL="release"
|
||||
;;
|
||||
v*)
|
||||
# For versioned releases (v1.0.0), remove 'v' prefix for Dockerfile
|
||||
DOCKER_RELEASE="${VERSION#v}"
|
||||
DOCKER_CHANNEL="release"
|
||||
;;
|
||||
*)
|
||||
# For other versions, pass as-is
|
||||
DOCKER_RELEASE="${VERSION}"
|
||||
DOCKER_CHANNEL="release"
|
||||
;;
|
||||
esac
|
||||
|
||||
echo "docker_release=$DOCKER_RELEASE" >> $GITHUB_OUTPUT
|
||||
echo "docker_channel=$DOCKER_CHANNEL" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "🐳 Docker build parameters:"
|
||||
echo " - Original version: $VERSION"
|
||||
echo " - Docker RELEASE: $DOCKER_RELEASE"
|
||||
echo " - Docker CHANNEL: $DOCKER_CHANNEL"
|
||||
|
||||
# Generate tags based on build type
|
||||
# Only support release and prerelease builds (no development builds)
|
||||
TAGS="${{ env.REGISTRY_DOCKERHUB }}:${VERSION}"
|
||||
|
||||
# Add channel tags for prereleases and latest for stable
|
||||
if [[ "$CREATE_LATEST" == "true" ]]; then
|
||||
# Stable release
|
||||
TAGS="$TAGS,${{ env.REGISTRY_DOCKERHUB }}:latest"
|
||||
elif [[ "$BUILD_TYPE" == "prerelease" ]]; then
|
||||
# Prerelease channel tags (alpha, beta, rc)
|
||||
if [[ "$VERSION" == *"alpha"* ]]; then
|
||||
CHANNEL="alpha"
|
||||
elif [[ "$VERSION" == *"beta"* ]]; then
|
||||
CHANNEL="beta"
|
||||
elif [[ "$VERSION" == *"rc"* ]]; then
|
||||
CHANNEL="rc"
|
||||
fi
|
||||
|
||||
if [[ -n "$CHANNEL" ]]; then
|
||||
TAGS="$TAGS,${{ env.REGISTRY_DOCKERHUB }}:${CHANNEL}"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Output tags
|
||||
echo "tags=$TAGS" >> $GITHUB_OUTPUT
|
||||
|
||||
# Generate labels
|
||||
LABELS="org.opencontainers.image.title=RustFS"
|
||||
LABELS="$LABELS,org.opencontainers.image.description=RustFS distributed object storage system"
|
||||
LABELS="$LABELS,org.opencontainers.image.version=$VERSION"
|
||||
LABELS="$LABELS,org.opencontainers.image.revision=${{ github.sha }}"
|
||||
LABELS="$LABELS,org.opencontainers.image.source=${{ github.server_url }}/${{ github.repository }}"
|
||||
LABELS="$LABELS,org.opencontainers.image.created=$(date -u +'%Y-%m-%dT%H:%M:%SZ')"
|
||||
LABELS="$LABELS,org.opencontainers.image.build-type=$BUILD_TYPE"
|
||||
|
||||
echo "labels=$LABELS" >> $GITHUB_OUTPUT
|
||||
|
||||
echo "🐳 Generated Docker tags:"
|
||||
echo "$TAGS" | tr ',' '\n' | sed 's/^/ - /'
|
||||
echo "📋 Build type: $BUILD_TYPE"
|
||||
echo "🔖 Version: $VERSION"
|
||||
|
||||
- name: Build and push Docker image
|
||||
uses: docker/build-push-action@v5
|
||||
uses: docker/build-push-action@v6
|
||||
with:
|
||||
context: .
|
||||
file: ${{ matrix.variant.dockerfile }}
|
||||
platforms: ${{ matrix.variant.platforms }}
|
||||
file: Dockerfile
|
||||
platforms: ${{ env.DOCKER_PLATFORMS }}
|
||||
push: ${{ needs.build-check.outputs.should_push == 'true' }}
|
||||
tags: ${{ steps.meta.outputs.tags }}
|
||||
labels: ${{ steps.meta.outputs.labels }}
|
||||
cache-from: type=gha,scope=docker-${{ matrix.variant.name }}
|
||||
cache-to: type=gha,mode=max,scope=docker-${{ matrix.variant.name }}
|
||||
cache-from: |
|
||||
type=gha,scope=docker-binary
|
||||
cache-to: |
|
||||
type=gha,mode=max,scope=docker-binary
|
||||
build-args: |
|
||||
BUILDTIME=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.created'] }}
|
||||
VERSION=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.version'] }}
|
||||
REVISION=${{ fromJSON(steps.meta.outputs.json).labels['org.opencontainers.image.revision'] }}
|
||||
BUILDTIME=$(date -u +'%Y-%m-%dT%H:%M:%SZ')
|
||||
VERSION=${{ needs.build-check.outputs.version }}
|
||||
BUILD_TYPE=${{ needs.build-check.outputs.build_type }}
|
||||
REVISION=${{ github.sha }}
|
||||
RELEASE=${{ steps.meta.outputs.docker_release }}
|
||||
CHANNEL=${{ steps.meta.outputs.docker_channel }}
|
||||
BUILDKIT_INLINE_CACHE=1
|
||||
# Enable advanced BuildKit features for better performance
|
||||
provenance: false
|
||||
sbom: false
|
||||
# Add retry mechanism by splitting the build process
|
||||
no-cache: false
|
||||
pull: true
|
||||
|
||||
# Create manifest for main production image
|
||||
create-manifest:
|
||||
name: Create Manifest
|
||||
# Note: Manifest creation is no longer needed as we only build one variant
|
||||
# Multi-arch manifests are automatically created by docker/build-push-action
|
||||
|
||||
# Docker build summary
|
||||
docker-summary:
|
||||
name: Docker Build Summary
|
||||
needs: [build-check, build-docker]
|
||||
if: needs.build-check.outputs.should_push == 'true' && startsWith(github.ref, 'refs/tags/')
|
||||
if: always() && needs.build-check.outputs.should_build == 'true'
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Login to Docker Hub
|
||||
if: secrets.DOCKERHUB_USERNAME != ''
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Login to GitHub Container Registry
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
registry: ghcr.io
|
||||
username: ${{ github.actor }}
|
||||
password: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
- name: Create and push manifest
|
||||
- name: Docker build completion summary
|
||||
run: |
|
||||
VERSION=${GITHUB_REF#refs/tags/}
|
||||
BUILD_TYPE="${{ needs.build-check.outputs.build_type }}"
|
||||
VERSION="${{ needs.build-check.outputs.version }}"
|
||||
CREATE_LATEST="${{ needs.build-check.outputs.create_latest }}"
|
||||
|
||||
# Create main image tag (without variant suffix)
|
||||
if [[ -n "${{ secrets.DOCKERHUB_USERNAME }}" ]]; then
|
||||
docker buildx imagetools create \
|
||||
-t ${{ env.REGISTRY_DOCKERHUB }}:${VERSION} \
|
||||
-t ${{ env.REGISTRY_DOCKERHUB }}:latest \
|
||||
${{ env.REGISTRY_DOCKERHUB }}:${VERSION}-production
|
||||
fi
|
||||
echo "🐳 Docker build completed successfully!"
|
||||
echo "📦 Build type: $BUILD_TYPE"
|
||||
echo "🔢 Version: $VERSION"
|
||||
echo "🚀 Strategy: Images using pre-built binaries (release channel only)"
|
||||
echo ""
|
||||
|
||||
docker buildx imagetools create \
|
||||
-t ${{ env.REGISTRY_GHCR }}:${VERSION} \
|
||||
-t ${{ env.REGISTRY_GHCR }}:latest \
|
||||
${{ env.REGISTRY_GHCR }}:${VERSION}-production
|
||||
case "$BUILD_TYPE" in
|
||||
"release")
|
||||
echo "🚀 Release Docker image has been built with ${VERSION} tags"
|
||||
echo "✅ This image is ready for production use"
|
||||
if [[ "$CREATE_LATEST" == "true" ]]; then
|
||||
echo "🏷️ Latest tag has been created for stable release"
|
||||
fi
|
||||
;;
|
||||
"prerelease")
|
||||
echo "🧪 Prerelease Docker image has been built with ${VERSION} tags"
|
||||
echo "⚠️ This is a prerelease image - use with caution"
|
||||
echo "🚫 Latest tag NOT created for prerelease"
|
||||
;;
|
||||
*)
|
||||
echo "❌ Unexpected build type: $BUILD_TYPE"
|
||||
;;
|
||||
esac
|
||||
|
||||
24
.github/workflows/issue-translator.yml
vendored
24
.github/workflows/issue-translator.yml
vendored
@@ -1,8 +1,22 @@
|
||||
name: 'issue-translator'
|
||||
on:
|
||||
issue_comment:
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name: "issue-translator"
|
||||
on:
|
||||
issue_comment:
|
||||
types: [created]
|
||||
issues:
|
||||
issues:
|
||||
types: [opened]
|
||||
|
||||
jobs:
|
||||
@@ -14,5 +28,5 @@ jobs:
|
||||
IS_MODIFY_TITLE: false
|
||||
# not require, default false, . Decide whether to modify the issue title
|
||||
# if true, the robot account @Issues-translate-bot must have modification permissions, invite @Issues-translate-bot to your project or use your custom bot.
|
||||
CUSTOM_BOT_NOTE: Bot detected the issue body's language is not English, translate it automatically.
|
||||
CUSTOM_BOT_NOTE: Bot detected the issue body's language is not English, translate it automatically.
|
||||
# not require. Customize the translation robot prefix message.
|
||||
|
||||
17
.github/workflows/performance.yml
vendored
17
.github/workflows/performance.yml
vendored
@@ -18,10 +18,10 @@ on:
|
||||
push:
|
||||
branches: [main]
|
||||
paths:
|
||||
- '**/*.rs'
|
||||
- '**/Cargo.toml'
|
||||
- '**/Cargo.lock'
|
||||
- '.github/workflows/performance.yml'
|
||||
- "**/*.rs"
|
||||
- "**/Cargo.toml"
|
||||
- "**/Cargo.lock"
|
||||
- ".github/workflows/performance.yml"
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
profile_duration:
|
||||
@@ -73,12 +73,11 @@ jobs:
|
||||
echo "RUSTFS_VOLUMES=./target/volume/test{0...4}" >> $GITHUB_ENV
|
||||
echo "RUST_LOG=rustfs=info,ecstore=info,s3s=info,iam=info,rustfs-obs=info" >> $GITHUB_ENV
|
||||
|
||||
- name: Download static files
|
||||
- name: Verify console static assets
|
||||
run: |
|
||||
curl -L "https://dl.rustfs.com/artifacts/console/rustfs-console-latest.zip" \
|
||||
-o tempfile.zip --retry 3 --retry-delay 5
|
||||
unzip -o tempfile.zip -d ./rustfs/static
|
||||
rm tempfile.zip
|
||||
# Console static assets are already embedded in the repository
|
||||
echo "Console static assets size: $(du -sh rustfs/static/)"
|
||||
echo "Console static assets are embedded via rust-embed, no external download needed"
|
||||
|
||||
- name: Build with profiling optimizations
|
||||
run: |
|
||||
|
||||
78
.github/workflows/release-notes-template.md
vendored
78
.github/workflows/release-notes-template.md
vendored
@@ -1,78 +0,0 @@
|
||||
## RustFS ${VERSION_CLEAN}
|
||||
|
||||
${ORIGINAL_NOTES}
|
||||
|
||||
---
|
||||
|
||||
### 🚀 Quick Download
|
||||
|
||||
**Linux (Static Binaries - No Dependencies):**
|
||||
|
||||
```bash
|
||||
# x86_64 (Intel/AMD)
|
||||
curl -LO https://github.com/rustfs/rustfs/releases/download/${VERSION}/rustfs-x86_64-unknown-linux-musl.zip
|
||||
unzip rustfs-x86_64-unknown-linux-musl.zip
|
||||
sudo mv rustfs /usr/local/bin/
|
||||
|
||||
# ARM64 (Graviton, Apple Silicon VMs)
|
||||
curl -LO https://github.com/rustfs/rustfs/releases/download/${VERSION}/rustfs-aarch64-unknown-linux-musl.zip
|
||||
unzip rustfs-aarch64-unknown-linux-musl.zip
|
||||
sudo mv rustfs /usr/local/bin/
|
||||
```
|
||||
|
||||
**macOS:**
|
||||
|
||||
```bash
|
||||
# Apple Silicon (M1/M2/M3)
|
||||
curl -LO https://github.com/rustfs/rustfs/releases/download/${VERSION}/rustfs-aarch64-apple-darwin.zip
|
||||
unzip rustfs-aarch64-apple-darwin.zip
|
||||
sudo mv rustfs /usr/local/bin/
|
||||
|
||||
# Intel
|
||||
curl -LO https://github.com/rustfs/rustfs/releases/download/${VERSION}/rustfs-x86_64-apple-darwin.zip
|
||||
unzip rustfs-x86_64-apple-darwin.zip
|
||||
sudo mv rustfs /usr/local/bin/
|
||||
```
|
||||
|
||||
### 📁 Available Downloads
|
||||
|
||||
| Platform | Architecture | File | Description |
|
||||
|----------|-------------|------|-------------|
|
||||
| Linux | x86_64 | `rustfs-x86_64-unknown-linux-musl.zip` | Static binary, no dependencies |
|
||||
| Linux | ARM64 | `rustfs-aarch64-unknown-linux-musl.zip` | Static binary, no dependencies |
|
||||
| macOS | Apple Silicon | `rustfs-aarch64-apple-darwin.zip` | Native binary, ZIP archive |
|
||||
| macOS | Intel | `rustfs-x86_64-apple-darwin.zip` | Native binary, ZIP archive |
|
||||
|
||||
### 🔐 Verification
|
||||
|
||||
Download checksums and verify your download:
|
||||
|
||||
```bash
|
||||
# Download checksums
|
||||
curl -LO https://github.com/rustfs/rustfs/releases/download/${VERSION}/SHA256SUMS
|
||||
|
||||
# Verify (Linux)
|
||||
sha256sum -c SHA256SUMS --ignore-missing
|
||||
|
||||
# Verify (macOS)
|
||||
shasum -a 256 -c SHA256SUMS --ignore-missing
|
||||
```
|
||||
|
||||
### 🛠️ System Requirements
|
||||
|
||||
- **Linux**: Any distribution with glibc 2.17+ (CentOS 7+, Ubuntu 16.04+)
|
||||
- **macOS**: 10.15+ (Catalina or later)
|
||||
- **Windows**: Windows 10 version 1809 or later
|
||||
|
||||
### 📚 Documentation
|
||||
|
||||
- [Installation Guide](https://github.com/rustfs/rustfs#installation)
|
||||
- [Quick Start](https://github.com/rustfs/rustfs#quick-start)
|
||||
- [Configuration](https://github.com/rustfs/rustfs/blob/main/docs/)
|
||||
- [API Documentation](https://docs.rs/rustfs)
|
||||
|
||||
### 🆘 Support
|
||||
|
||||
- 🐛 [Report Issues](https://github.com/rustfs/rustfs/issues)
|
||||
- 💬 [Community Discussions](https://github.com/rustfs/rustfs/discussions)
|
||||
- 📖 [Documentation](https://github.com/rustfs/rustfs/tree/main/docs)
|
||||
1
.gitignore
vendored
1
.gitignore
vendored
@@ -19,3 +19,4 @@ deploy/certs/*
|
||||
profile.json
|
||||
.docker/openobserve-otel/data
|
||||
*.zst
|
||||
.secrets
|
||||
|
||||
906
Cargo.lock
generated
906
Cargo.lock
generated
File diff suppressed because it is too large
Load Diff
33
Cargo.toml
33
Cargo.toml
@@ -63,7 +63,7 @@ rustfs-filemeta = { path = "crates/filemeta" }
|
||||
rustfs-rio = { path = "crates/rio" }
|
||||
|
||||
[workspace.dependencies]
|
||||
rustfs-ahm = { path = "crates/ahm", version = "0.0.3" }
|
||||
rustfs-ahm = { path = "crates/ahm", version = "0.0.5" }
|
||||
rustfs-s3select-api = { path = "crates/s3select-api", version = "0.0.5" }
|
||||
rustfs-appauth = { path = "crates/appauth", version = "0.0.5" }
|
||||
rustfs-common = { path = "crates/common", version = "0.0.5" }
|
||||
@@ -89,7 +89,7 @@ aes-gcm = { version = "0.10.3", features = ["std"] }
|
||||
arc-swap = "1.7.1"
|
||||
argon2 = { version = "0.5.3", features = ["std"] }
|
||||
atoi = "2.0.0"
|
||||
async-channel = "2.4.0"
|
||||
async-channel = "2.5.0"
|
||||
async-recursion = "1.1.1"
|
||||
async-trait = "0.1.88"
|
||||
async-compression = { version = "0.4.0" }
|
||||
@@ -107,16 +107,16 @@ byteorder = "1.5.0"
|
||||
cfg-if = "1.0.1"
|
||||
chacha20poly1305 = { version = "0.10.1" }
|
||||
chrono = { version = "0.4.41", features = ["serde"] }
|
||||
clap = { version = "4.5.40", features = ["derive", "env"] }
|
||||
const-str = { version = "0.6.2", features = ["std", "proc"] }
|
||||
crc32fast = "1.4.2"
|
||||
clap = { version = "4.5.41", features = ["derive", "env"] }
|
||||
const-str = { version = "0.6.3", features = ["std", "proc"] }
|
||||
crc32fast = "1.5.0"
|
||||
criterion = { version = "0.5", features = ["html_reports"] }
|
||||
dashmap = "6.1.0"
|
||||
datafusion = "46.0.1"
|
||||
derive_builder = "0.20.2"
|
||||
dioxus = { version = "0.6.3", features = ["router"] }
|
||||
dirs = "6.0.0"
|
||||
enumset = "1.1.6"
|
||||
enumset = "1.1.7"
|
||||
flatbuffers = "25.2.10"
|
||||
flate2 = "1.1.2"
|
||||
flexi_logger = { version = "0.31.2", features = ["trc", "dont_minimize_extra_stacks"] }
|
||||
@@ -130,7 +130,7 @@ hex-simd = "0.8.0"
|
||||
highway = { version = "1.3.0" }
|
||||
hmac = "0.12.1"
|
||||
hyper = "1.6.0"
|
||||
hyper-util = { version = "0.1.14", features = [
|
||||
hyper-util = { version = "0.1.15", features = [
|
||||
"tokio",
|
||||
"server-auto",
|
||||
"server-graceful",
|
||||
@@ -182,9 +182,9 @@ pbkdf2 = "0.12.2"
|
||||
percent-encoding = "2.3.1"
|
||||
pin-project-lite = "0.2.16"
|
||||
prost = "0.13.5"
|
||||
quick-xml = "0.37.5"
|
||||
quick-xml = "0.38.0"
|
||||
rand = "0.9.1"
|
||||
rdkafka = { version = "0.37.0", features = ["tokio"] }
|
||||
rdkafka = { version = "0.38.0", features = ["tokio"] }
|
||||
reed-solomon-simd = { version = "3.0.1" }
|
||||
regex = { version = "1.11.1" }
|
||||
reqwest = { version = "0.12.22", default-features = false, features = [
|
||||
@@ -196,7 +196,7 @@ reqwest = { version = "0.12.22", default-features = false, features = [
|
||||
"json",
|
||||
"blocking",
|
||||
] }
|
||||
rfd = { version = "0.15.3", default-features = false, features = [
|
||||
rfd = { version = "0.15.4", default-features = false, features = [
|
||||
"xdg-portal",
|
||||
"tokio",
|
||||
] }
|
||||
@@ -207,10 +207,10 @@ rumqttc = { version = "0.24" }
|
||||
rust-embed = { version = "8.7.2" }
|
||||
rust-i18n = { version = "3.1.5" }
|
||||
rustfs-rsc = "2025.506.1"
|
||||
rustls = { version = "0.23.28" }
|
||||
rustls = { version = "0.23.29" }
|
||||
rustls-pki-types = "1.12.0"
|
||||
rustls-pemfile = "2.2.0"
|
||||
s3s = { version = "0.12.0-minio-preview.1" }
|
||||
s3s = { version = "0.12.0-minio-preview.2" }
|
||||
shadow-rs = { version = "1.2.0", default-features = false }
|
||||
serde = { version = "1.0.219", features = ["derive"] }
|
||||
serde_json = { version = "1.0.140", features = ["raw_value"] }
|
||||
@@ -222,10 +222,12 @@ siphasher = "1.0.1"
|
||||
smallvec = { version = "1.15.1", features = ["serde"] }
|
||||
snafu = "0.8.6"
|
||||
snap = "1.1.1"
|
||||
socket2 = "0.5.10"
|
||||
socket2 = "0.6.0"
|
||||
strum = { version = "0.27.1", features = ["derive"] }
|
||||
sysinfo = "0.35.2"
|
||||
sysinfo = "0.36.0"
|
||||
sysctl = "0.6.0"
|
||||
tempfile = "3.20.0"
|
||||
temp-env = "0.3.6"
|
||||
test-case = "3.3.1"
|
||||
thiserror = "2.0.12"
|
||||
time = { version = "0.3.41", features = [
|
||||
@@ -239,6 +241,7 @@ tokio = { version = "1.46.1", features = ["fs", "rt-multi-thread"] }
|
||||
tokio-rustls = { version = "0.26.2", default-features = false }
|
||||
tokio-stream = { version = "0.1.17" }
|
||||
tokio-tar = "0.3.1"
|
||||
tokio-test = "0.4.4"
|
||||
tokio-util = { version = "0.7.15", features = ["io", "compat"] }
|
||||
tonic = { version = "0.13.1", features = ["gzip"] }
|
||||
tonic-build = { version = "0.13.1" }
|
||||
@@ -263,7 +266,7 @@ winapi = { version = "0.3.9" }
|
||||
xxhash-rust = { version = "0.8.15", features = ["xxh64", "xxh3"] }
|
||||
zip = "2.4.2"
|
||||
zstd = "0.13.3"
|
||||
anyhow = "1.0.86"
|
||||
anyhow = "1.0.98"
|
||||
|
||||
[profile.wasm-dev]
|
||||
inherits = "dev"
|
||||
|
||||
116
Dockerfile
116
Dockerfile
@@ -1,49 +1,95 @@
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
# Multi-stage build for RustFS production image
|
||||
|
||||
FROM alpine:3.18 AS builder
|
||||
# Build stage: Download and extract RustFS binary
|
||||
FROM alpine:3.22 AS build
|
||||
|
||||
RUN apk add -U --no-cache \
|
||||
ca-certificates \
|
||||
curl \
|
||||
bash \
|
||||
unzip
|
||||
# Build arguments for platform and release
|
||||
ARG TARGETARCH
|
||||
ARG RELEASE=latest
|
||||
|
||||
# Install minimal dependencies for downloading and extracting
|
||||
RUN apk add --no-cache ca-certificates curl unzip
|
||||
|
||||
RUN curl -Lo /tmp/rustfs.zip https://dl.rustfs.com/artifacts/rustfs/rustfs-x86_64-unknown-linux-musl.zip && \
|
||||
unzip -o /tmp/rustfs.zip -d /tmp && \
|
||||
mv /tmp/rustfs /rustfs && \
|
||||
chmod +x /rustfs && \
|
||||
rm -rf /tmp/*
|
||||
# Create build directory
|
||||
WORKDIR /build
|
||||
|
||||
FROM alpine:3.18
|
||||
# Detect architecture and download corresponding binary
|
||||
RUN case "${TARGETARCH}" in \
|
||||
amd64) ARCH="x86_64" ;; \
|
||||
arm64) ARCH="aarch64" ;; \
|
||||
*) echo "Unsupported architecture: ${TARGETARCH}" >&2 && exit 1 ;; \
|
||||
esac && \
|
||||
if [ "${RELEASE}" = "latest" ]; then \
|
||||
VERSION="latest"; \
|
||||
else \
|
||||
VERSION="v${RELEASE#v}"; \
|
||||
fi && \
|
||||
BASE_URL="https://dl.rustfs.com/artifacts/rustfs/release" && \
|
||||
PACKAGE_NAME="rustfs-linux-${ARCH}-${VERSION}.zip" && \
|
||||
DOWNLOAD_URL="${BASE_URL}/${PACKAGE_NAME}" && \
|
||||
echo "Downloading ${PACKAGE_NAME} from ${DOWNLOAD_URL}" >&2 && \
|
||||
curl -f -L "${DOWNLOAD_URL}" -o rustfs.zip && \
|
||||
unzip rustfs.zip -d /build && \
|
||||
chmod +x /build/rustfs && \
|
||||
rm rustfs.zip || { echo "Failed to download or extract ${PACKAGE_NAME}" >&2; exit 1; }
|
||||
|
||||
RUN apk add -U --no-cache \
|
||||
ca-certificates \
|
||||
bash
|
||||
# Runtime stage: Configure runtime environment
|
||||
FROM alpine:3.22.1
|
||||
|
||||
COPY --from=builder /rustfs /usr/local/bin/rustfs
|
||||
# Build arguments and labels
|
||||
ARG RELEASE=latest
|
||||
ARG BUILD_DATE
|
||||
ARG VCS_REF
|
||||
|
||||
ENV RUSTFS_ACCESS_KEY=rustfsadmin \
|
||||
LABEL name="RustFS" \
|
||||
vendor="RustFS Team" \
|
||||
maintainer="RustFS Team <dev@rustfs.com>" \
|
||||
version="${RELEASE}" \
|
||||
release="${RELEASE}" \
|
||||
build-date="${BUILD_DATE}" \
|
||||
vcs-ref="${VCS_REF}" \
|
||||
summary="High-performance distributed object storage system compatible with S3 API" \
|
||||
description="RustFS is a distributed object storage system written in Rust, supporting erasure coding, multi-tenant management, and observability." \
|
||||
url="https://rustfs.com" \
|
||||
license="Apache-2.0"
|
||||
|
||||
# Install runtime dependencies
|
||||
RUN echo "https://dl-cdn.alpinelinux.org/alpine/v3.20/community" >> /etc/apk/repositories && \
|
||||
apk update && \
|
||||
apk add --no-cache ca-certificates bash gosu coreutils shadow && \
|
||||
addgroup -g 1000 rustfs && \
|
||||
adduser -u 1000 -G rustfs -s /bin/bash -D rustfs
|
||||
|
||||
# Copy CA certificates and RustFS binary from build stage
|
||||
COPY --from=build /etc/ssl/certs/ca-certificates.crt /etc/ssl/certs/
|
||||
COPY --from=build /build/rustfs /usr/bin/rustfs
|
||||
|
||||
# Copy entry point script
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
|
||||
# Set permissions
|
||||
RUN chmod +x /usr/bin/rustfs /entrypoint.sh && \
|
||||
mkdir -p /data /logs && \
|
||||
chown rustfs:rustfs /data /logs && \
|
||||
chmod 700 /data /logs
|
||||
|
||||
# Environment variables (credentials should be set via environment or secrets)
|
||||
ENV RUSTFS_ADDRESS=:9000 \
|
||||
RUSTFS_ACCESS_KEY=rustfsadmin \
|
||||
RUSTFS_SECRET_KEY=rustfsadmin \
|
||||
RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_CONSOLE_ENABLE=true \
|
||||
RUST_LOG=warn
|
||||
RUSTFS_VOLUMES=/data \
|
||||
RUST_LOG=warn \
|
||||
RUSTFS_OBS_LOG_DIRECTORY=/logs \
|
||||
RUSTFS_SINKS_FILE_PATH=/logs
|
||||
|
||||
# Expose port
|
||||
EXPOSE 9000
|
||||
|
||||
RUN mkdir -p /data
|
||||
VOLUME /data
|
||||
# Volumes for data and logs
|
||||
VOLUME ["/data", "/logs"]
|
||||
|
||||
# Set entry point
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
CMD ["/usr/bin/rustfs"]
|
||||
|
||||
CMD ["rustfs", "/data"]
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
FROM ubuntu:latest
|
||||
|
||||
# RUN apk add --no-cache <package-name>
|
||||
# 如果 rustfs 有依赖,可以在这里添加,例如:
|
||||
# RUN apk add --no-cache openssl
|
||||
# RUN apk add --no-cache bash # 安装 Bash
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
# 创建与 RUSTFS_VOLUMES 一致的目录
|
||||
RUN mkdir -p /root/data/target/volume/test1 /root/data/target/volume/test2 /root/data/target/volume/test3 /root/data/target/volume/test4
|
||||
|
||||
# COPY ./target/x86_64-unknown-linux-musl/release/rustfs /app/rustfs
|
||||
COPY ./target/x86_64-unknown-linux-gnu/release/rustfs /app/rustfs
|
||||
|
||||
RUN chmod +x /app/rustfs
|
||||
|
||||
EXPOSE 9000
|
||||
EXPOSE 9002
|
||||
|
||||
CMD ["/app/rustfs"]
|
||||
@@ -1,10 +1,26 @@
|
||||
# Multi-stage Dockerfile for RustFS
|
||||
# Multi-stage Dockerfile for RustFS - LOCAL DEVELOPMENT ONLY
|
||||
#
|
||||
# ⚠️ IMPORTANT: This Dockerfile is for local development and testing only.
|
||||
# ⚠️ It builds RustFS from source code and is NOT used in CI/CD pipelines.
|
||||
# ⚠️ CI/CD pipeline uses pre-built binaries from Dockerfile instead.
|
||||
#
|
||||
# Usage for local development:
|
||||
# docker build -f Dockerfile.source -t rustfs:dev-local .
|
||||
# docker run --rm -p 9000:9000 rustfs:dev-local
|
||||
#
|
||||
# Supports cross-compilation for amd64 and arm64 architectures
|
||||
ARG TARGETPLATFORM
|
||||
ARG BUILDPLATFORM
|
||||
|
||||
# Build stage
|
||||
FROM --platform=$BUILDPLATFORM rust:1.85-bookworm AS builder
|
||||
FROM --platform=$BUILDPLATFORM rust:1.88-bookworm AS builder
|
||||
|
||||
# Re-declare build arguments after FROM (required for multi-stage builds)
|
||||
ARG TARGETPLATFORM
|
||||
ARG BUILDPLATFORM
|
||||
|
||||
# Debug: Print platform information
|
||||
RUN echo "🐳 Build Info: BUILDPLATFORM=$BUILDPLATFORM, TARGETPLATFORM=$TARGETPLATFORM"
|
||||
|
||||
# Install required build dependencies
|
||||
RUN apt-get update && apt-get install -y \
|
||||
@@ -18,6 +34,8 @@ RUN apt-get update && apt-get install -y \
|
||||
lld \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Note: sccache removed for simpler builds
|
||||
|
||||
# Install cross-compilation tools for ARM64
|
||||
RUN if [ "$TARGETPLATFORM" = "linux/arm64" ]; then \
|
||||
apt-get update && \
|
||||
@@ -37,10 +55,13 @@ RUN wget https://github.com/google/flatbuffers/releases/download/v25.2.10/Linux.
|
||||
&& mv flatc /usr/local/bin/ && chmod +x /usr/local/bin/flatc && rm -rf Linux.flatc.binary.g++-13.zip
|
||||
|
||||
# Set up Rust targets based on platform
|
||||
RUN case "$TARGETPLATFORM" in \
|
||||
RUN set -e && \
|
||||
PLATFORM="${TARGETPLATFORM:-linux/amd64}" && \
|
||||
echo "🎯 Setting up Rust target for platform: $PLATFORM" && \
|
||||
case "$PLATFORM" in \
|
||||
"linux/amd64") rustup target add x86_64-unknown-linux-gnu ;; \
|
||||
"linux/arm64") rustup target add aarch64-unknown-linux-gnu ;; \
|
||||
*) echo "Unsupported platform: $TARGETPLATFORM" && exit 1 ;; \
|
||||
*) echo "❌ Unsupported platform: $PLATFORM" && exit 1 ;; \
|
||||
esac
|
||||
|
||||
# Set up environment for cross-compilation
|
||||
@@ -50,37 +71,37 @@ ENV CXX_aarch64_unknown_linux_gnu=aarch64-linux-gnu-g++
|
||||
|
||||
WORKDIR /usr/src/rustfs
|
||||
|
||||
# Copy Cargo files for dependency caching
|
||||
COPY Cargo.toml Cargo.lock ./
|
||||
COPY */Cargo.toml ./*/
|
||||
|
||||
# Create dummy main.rs files for dependency compilation
|
||||
RUN find . -name "Cargo.toml" -not -path "./Cargo.toml" | \
|
||||
xargs -I {} dirname {} | \
|
||||
xargs -I {} sh -c 'mkdir -p {}/src && echo "fn main() {}" > {}/src/main.rs'
|
||||
|
||||
# Build dependencies only (cache layer)
|
||||
RUN case "$TARGETPLATFORM" in \
|
||||
"linux/amd64") cargo build --release --target x86_64-unknown-linux-gnu ;; \
|
||||
"linux/arm64") cargo build --release --target aarch64-unknown-linux-gnu ;; \
|
||||
esac
|
||||
|
||||
# Copy source code
|
||||
# Copy all source code
|
||||
COPY . .
|
||||
|
||||
# Configure cargo for optimized builds
|
||||
ENV CARGO_NET_GIT_FETCH_WITH_CLI=true \
|
||||
CARGO_REGISTRIES_CRATES_IO_PROTOCOL=sparse \
|
||||
CARGO_INCREMENTAL=0 \
|
||||
CARGO_PROFILE_RELEASE_DEBUG=false \
|
||||
CARGO_PROFILE_RELEASE_SPLIT_DEBUGINFO=off \
|
||||
CARGO_PROFILE_RELEASE_STRIP=symbols
|
||||
|
||||
# Generate protobuf code
|
||||
RUN cargo run --bin gproto
|
||||
|
||||
# Build the actual application
|
||||
# Build the actual application with optimizations
|
||||
RUN case "$TARGETPLATFORM" in \
|
||||
"linux/amd64") \
|
||||
cargo build --release --target x86_64-unknown-linux-gnu --bin rustfs && \
|
||||
echo "🔨 Building for amd64..." && \
|
||||
rustup target add x86_64-unknown-linux-gnu && \
|
||||
cargo build --release --target x86_64-unknown-linux-gnu --bin rustfs -j $(nproc) && \
|
||||
cp target/x86_64-unknown-linux-gnu/release/rustfs /usr/local/bin/rustfs \
|
||||
;; \
|
||||
"linux/arm64") \
|
||||
cargo build --release --target aarch64-unknown-linux-gnu --bin rustfs && \
|
||||
echo "🔨 Building for arm64..." && \
|
||||
rustup target add aarch64-unknown-linux-gnu && \
|
||||
cargo build --release --target aarch64-unknown-linux-gnu --bin rustfs -j $(nproc) && \
|
||||
cp target/aarch64-unknown-linux-gnu/release/rustfs /usr/local/bin/rustfs \
|
||||
;; \
|
||||
*) \
|
||||
echo "❌ Unsupported platform: $TARGETPLATFORM" && exit 1 \
|
||||
;; \
|
||||
esac
|
||||
|
||||
# Runtime stage - Ubuntu minimal for better compatibility
|
||||
@@ -91,6 +112,8 @@ RUN apt-get update && apt-get install -y \
|
||||
ca-certificates \
|
||||
tzdata \
|
||||
wget \
|
||||
coreutils \
|
||||
passwd \
|
||||
&& rm -rf /var/lib/apt/lists/*
|
||||
|
||||
# Create rustfs user and group
|
||||
@@ -107,15 +130,27 @@ RUN mkdir -p /data/rustfs{0,1,2,3} && \
|
||||
COPY --from=builder /usr/local/bin/rustfs /app/rustfs
|
||||
RUN chmod +x /app/rustfs && chown rustfs:rustfs /app/rustfs
|
||||
|
||||
# Copy entrypoint script
|
||||
COPY entrypoint.sh /entrypoint.sh
|
||||
RUN chmod +x /entrypoint.sh
|
||||
|
||||
# Switch to non-root user
|
||||
USER rustfs
|
||||
|
||||
# Expose ports
|
||||
EXPOSE 9000 9001
|
||||
EXPOSE 9000
|
||||
|
||||
# Health check
|
||||
HEALTHCHECK --interval=30s --timeout=10s --start-period=5s --retries=3 \
|
||||
CMD wget --no-verbose --tries=1 --spider http://localhost:9000/health || exit 1
|
||||
# Environment variables
|
||||
ENV RUSTFS_ACCESS_KEY=rustfsadmin \
|
||||
RUSTFS_SECRET_KEY=rustfsadmin \
|
||||
RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_CONSOLE_ENABLE=true \
|
||||
RUSTFS_VOLUMES=/data \
|
||||
RUST_LOG=warn
|
||||
|
||||
# Set default command
|
||||
# Volume for data
|
||||
VOLUME ["/data"]
|
||||
|
||||
# Set entrypoint and default command
|
||||
ENTRYPOINT ["/entrypoint.sh"]
|
||||
CMD ["/app/rustfs"]
|
||||
322
Makefile
322
Makefile
@@ -5,7 +5,9 @@
|
||||
DOCKER_CLI ?= docker
|
||||
IMAGE_NAME ?= rustfs:v1.0.0
|
||||
CONTAINER_NAME ?= rustfs-dev
|
||||
DOCKERFILE_PATH = $(shell pwd)/.docker
|
||||
# Docker build configurations
|
||||
DOCKERFILE_PRODUCTION = Dockerfile
|
||||
DOCKERFILE_SOURCE = Dockerfile.source
|
||||
|
||||
# Code quality and formatting targets
|
||||
.PHONY: fmt
|
||||
@@ -44,21 +46,6 @@ setup-hooks:
|
||||
chmod +x .git/hooks/pre-commit
|
||||
@echo "✅ Git hooks setup complete!"
|
||||
|
||||
.PHONY: init-devenv
|
||||
init-devenv:
|
||||
$(DOCKER_CLI) build -t $(IMAGE_NAME) -f $(DOCKERFILE_PATH)/Dockerfile.devenv .
|
||||
$(DOCKER_CLI) stop $(CONTAINER_NAME)
|
||||
$(DOCKER_CLI) rm $(CONTAINER_NAME)
|
||||
$(DOCKER_CLI) run -d --name $(CONTAINER_NAME) -p 9010:9010 -p 9000:9000 -v $(shell pwd):/root/s3-rustfs -it $(IMAGE_NAME)
|
||||
|
||||
.PHONY: start
|
||||
start:
|
||||
$(DOCKER_CLI) start $(CONTAINER_NAME)
|
||||
|
||||
.PHONY: stop
|
||||
stop:
|
||||
$(DOCKER_CLI) stop $(CONTAINER_NAME)
|
||||
|
||||
.PHONY: e2e-server
|
||||
e2e-server:
|
||||
sh $(shell pwd)/scripts/run.sh
|
||||
@@ -67,86 +54,184 @@ e2e-server:
|
||||
probe-e2e:
|
||||
sh $(shell pwd)/scripts/probe.sh
|
||||
|
||||
# make BUILD_OS=ubuntu22.04 build
|
||||
# in target/ubuntu22.04/release/rustfs
|
||||
|
||||
# make BUILD_OS=rockylinux9.3 build
|
||||
# in target/rockylinux9.3/release/rustfs
|
||||
BUILD_OS ?= rockylinux9.3
|
||||
# Native build using build-rustfs.sh script
|
||||
.PHONY: build
|
||||
build: ROCKYLINUX_BUILD_IMAGE_NAME = rustfs-$(BUILD_OS):v1
|
||||
build: ROCKYLINUX_BUILD_CONTAINER_NAME = rustfs-$(BUILD_OS)-build
|
||||
build: BUILD_CMD = /root/.cargo/bin/cargo build --release --bin rustfs --target-dir /root/s3-rustfs/target/$(BUILD_OS)
|
||||
build:
|
||||
$(DOCKER_CLI) build -t $(ROCKYLINUX_BUILD_IMAGE_NAME) -f $(DOCKERFILE_PATH)/Dockerfile.$(BUILD_OS) .
|
||||
$(DOCKER_CLI) run --rm --name $(ROCKYLINUX_BUILD_CONTAINER_NAME) -v $(shell pwd):/root/s3-rustfs -it $(ROCKYLINUX_BUILD_IMAGE_NAME) $(BUILD_CMD)
|
||||
@echo "🔨 Building RustFS using build-rustfs.sh script..."
|
||||
./build-rustfs.sh
|
||||
|
||||
.PHONY: build-dev
|
||||
build-dev:
|
||||
@echo "🔨 Building RustFS in development mode..."
|
||||
./build-rustfs.sh --dev
|
||||
|
||||
# Docker-based build (alternative approach)
|
||||
# Usage: make BUILD_OS=ubuntu22.04 build-docker
|
||||
# Output: target/ubuntu22.04/release/rustfs
|
||||
BUILD_OS ?= rockylinux9.3
|
||||
.PHONY: build-docker
|
||||
build-docker: SOURCE_BUILD_IMAGE_NAME = rustfs-$(BUILD_OS):v1
|
||||
build-docker: SOURCE_BUILD_CONTAINER_NAME = rustfs-$(BUILD_OS)-build
|
||||
build-docker: BUILD_CMD = /root/.cargo/bin/cargo build --release --bin rustfs --target-dir /root/s3-rustfs/target/$(BUILD_OS)
|
||||
build-docker:
|
||||
@echo "🐳 Building RustFS using Docker ($(BUILD_OS))..."
|
||||
$(DOCKER_CLI) build -t $(SOURCE_BUILD_IMAGE_NAME) -f $(DOCKERFILE_SOURCE) .
|
||||
$(DOCKER_CLI) run --rm --name $(SOURCE_BUILD_CONTAINER_NAME) -v $(shell pwd):/root/s3-rustfs -it $(SOURCE_BUILD_IMAGE_NAME) $(BUILD_CMD)
|
||||
|
||||
.PHONY: build-musl
|
||||
build-musl:
|
||||
@echo "🔨 Building rustfs for x86_64-unknown-linux-musl..."
|
||||
cargo build --target x86_64-unknown-linux-musl --bin rustfs -r
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-musl
|
||||
|
||||
.PHONY: build-gnu
|
||||
build-gnu:
|
||||
@echo "🔨 Building rustfs for x86_64-unknown-linux-gnu..."
|
||||
cargo build --target x86_64-unknown-linux-gnu --bin rustfs -r
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-gnu
|
||||
|
||||
.PHONY: deploy-dev
|
||||
deploy-dev: build-musl
|
||||
@echo "🚀 Deploying to dev server: $${IP}"
|
||||
./scripts/dev_deploy.sh $${IP}
|
||||
|
||||
# Multi-architecture Docker build targets
|
||||
.PHONY: docker-build-multiarch
|
||||
docker-build-multiarch:
|
||||
@echo "🏗️ Building multi-architecture Docker images..."
|
||||
./scripts/build-docker-multiarch.sh
|
||||
# ========================================================================================
|
||||
# Docker Multi-Architecture Builds (Primary Methods)
|
||||
# ========================================================================================
|
||||
|
||||
.PHONY: docker-build-multiarch-push
|
||||
docker-build-multiarch-push:
|
||||
@echo "🚀 Building and pushing multi-architecture Docker images..."
|
||||
./scripts/build-docker-multiarch.sh --push
|
||||
# Production builds using docker-buildx.sh (for CI/CD and production)
|
||||
.PHONY: docker-buildx
|
||||
docker-buildx:
|
||||
@echo "🏗️ Building multi-architecture production Docker images with buildx..."
|
||||
./docker-buildx.sh
|
||||
|
||||
.PHONY: docker-build-multiarch-version
|
||||
docker-build-multiarch-version:
|
||||
.PHONY: docker-buildx-push
|
||||
docker-buildx-push:
|
||||
@echo "🚀 Building and pushing multi-architecture production Docker images with buildx..."
|
||||
./docker-buildx.sh --push
|
||||
|
||||
.PHONY: docker-buildx-version
|
||||
docker-buildx-version:
|
||||
@if [ -z "$(VERSION)" ]; then \
|
||||
echo "❌ 错误: 请指定版本, 例如: make docker-build-multiarch-version VERSION=v1.0.0"; \
|
||||
echo "❌ 错误: 请指定版本, 例如: make docker-buildx-version VERSION=v1.0.0"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🏗️ Building multi-architecture Docker images (version: $(VERSION))..."
|
||||
./scripts/build-docker-multiarch.sh --version $(VERSION)
|
||||
@echo "🏗️ Building multi-architecture production Docker images (version: $(VERSION))..."
|
||||
./docker-buildx.sh --release $(VERSION)
|
||||
|
||||
.PHONY: docker-push-multiarch-version
|
||||
docker-push-multiarch-version:
|
||||
.PHONY: docker-buildx-push-version
|
||||
docker-buildx-push-version:
|
||||
@if [ -z "$(VERSION)" ]; then \
|
||||
echo "❌ 错误: 请指定版本, 例如: make docker-push-multiarch-version VERSION=v1.0.0"; \
|
||||
echo "❌ 错误: 请指定版本, 例如: make docker-buildx-push-version VERSION=v1.0.0"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🚀 Building and pushing multi-architecture Docker images (version: $(VERSION))..."
|
||||
./scripts/build-docker-multiarch.sh --version $(VERSION) --push
|
||||
@echo "🚀 Building and pushing multi-architecture production Docker images (version: $(VERSION))..."
|
||||
./docker-buildx.sh --release $(VERSION) --push
|
||||
|
||||
.PHONY: docker-build-ubuntu
|
||||
docker-build-ubuntu:
|
||||
@echo "🏗️ Building multi-architecture Ubuntu Docker images..."
|
||||
./scripts/build-docker-multiarch.sh --type ubuntu
|
||||
# Development/Source builds using direct buildx commands
|
||||
.PHONY: docker-dev
|
||||
docker-dev:
|
||||
@echo "🏗️ Building multi-architecture development Docker images with buildx..."
|
||||
@echo "💡 This builds from source code and is intended for local development and testing"
|
||||
@echo "⚠️ Multi-arch images cannot be loaded locally, use docker-dev-push to push to registry"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag rustfs:source-latest \
|
||||
--tag rustfs:dev-latest \
|
||||
.
|
||||
|
||||
.PHONY: docker-build-rockylinux
|
||||
docker-build-rockylinux:
|
||||
@echo "🏗️ Building multi-architecture RockyLinux Docker images..."
|
||||
./scripts/build-docker-multiarch.sh --type rockylinux
|
||||
.PHONY: docker-dev-local
|
||||
docker-dev-local:
|
||||
@echo "🏗️ Building single-architecture development Docker image for local use..."
|
||||
@echo "💡 This builds from source code for the current platform and loads locally"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag rustfs:source-latest \
|
||||
--tag rustfs:dev-latest \
|
||||
--load \
|
||||
.
|
||||
|
||||
.PHONY: docker-build-devenv
|
||||
docker-build-devenv:
|
||||
@echo "🏗️ Building multi-architecture development environment Docker images..."
|
||||
./scripts/build-docker-multiarch.sh --type devenv
|
||||
.PHONY: docker-dev-push
|
||||
docker-dev-push:
|
||||
@if [ -z "$(REGISTRY)" ]; then \
|
||||
echo "❌ 错误: 请指定镜像仓库, 例如: make docker-dev-push REGISTRY=ghcr.io/username"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🚀 Building and pushing multi-architecture development Docker images..."
|
||||
@echo "💡 推送到仓库: $(REGISTRY)"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag $(REGISTRY)/rustfs:source-latest \
|
||||
--tag $(REGISTRY)/rustfs:dev-latest \
|
||||
--push \
|
||||
.
|
||||
|
||||
.PHONY: docker-build-all-types
|
||||
docker-build-all-types:
|
||||
@echo "🏗️ Building all multi-architecture Docker image types..."
|
||||
./scripts/build-docker-multiarch.sh --type production
|
||||
./scripts/build-docker-multiarch.sh --type ubuntu
|
||||
./scripts/build-docker-multiarch.sh --type rockylinux
|
||||
./scripts/build-docker-multiarch.sh --type devenv
|
||||
|
||||
|
||||
# Local production builds using direct buildx (alternative to docker-buildx.sh)
|
||||
.PHONY: docker-buildx-production-local
|
||||
docker-buildx-production-local:
|
||||
@echo "🏗️ Building single-architecture production Docker image locally..."
|
||||
@echo "💡 Alternative to docker-buildx.sh for local testing"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--file $(DOCKERFILE_PRODUCTION) \
|
||||
--tag rustfs:production-latest \
|
||||
--tag rustfs:latest \
|
||||
--load \
|
||||
--build-arg RELEASE=latest \
|
||||
.
|
||||
|
||||
# ========================================================================================
|
||||
# Single Architecture Docker Builds (Traditional)
|
||||
# ========================================================================================
|
||||
|
||||
.PHONY: docker-build-production
|
||||
docker-build-production:
|
||||
@echo "🏗️ Building single-architecture production Docker image..."
|
||||
@echo "💡 Consider using 'make docker-buildx-production-local' for multi-arch support"
|
||||
$(DOCKER_CLI) build -f $(DOCKERFILE_PRODUCTION) -t rustfs:latest .
|
||||
|
||||
.PHONY: docker-build-source
|
||||
docker-build-source:
|
||||
@echo "🏗️ Building single-architecture source Docker image..."
|
||||
@echo "💡 Consider using 'make docker-dev-local' for multi-arch support"
|
||||
$(DOCKER_CLI) build -f $(DOCKERFILE_SOURCE) -t rustfs:source .
|
||||
|
||||
# ========================================================================================
|
||||
# Development Environment
|
||||
# ========================================================================================
|
||||
|
||||
.PHONY: dev-env-start
|
||||
dev-env-start:
|
||||
@echo "🚀 Starting development environment..."
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag rustfs:dev \
|
||||
--load \
|
||||
.
|
||||
$(DOCKER_CLI) stop $(CONTAINER_NAME) 2>/dev/null || true
|
||||
$(DOCKER_CLI) rm $(CONTAINER_NAME) 2>/dev/null || true
|
||||
$(DOCKER_CLI) run -d --name $(CONTAINER_NAME) \
|
||||
-p 9010:9010 -p 9000:9000 \
|
||||
-v $(shell pwd):/workspace \
|
||||
-it rustfs:dev
|
||||
|
||||
.PHONY: dev-env-stop
|
||||
dev-env-stop:
|
||||
@echo "🛑 Stopping development environment..."
|
||||
$(DOCKER_CLI) stop $(CONTAINER_NAME) 2>/dev/null || true
|
||||
$(DOCKER_CLI) rm $(CONTAINER_NAME) 2>/dev/null || true
|
||||
|
||||
.PHONY: dev-env-restart
|
||||
dev-env-restart: dev-env-stop dev-env-start
|
||||
|
||||
|
||||
|
||||
# ========================================================================================
|
||||
# Build Utilities
|
||||
# ========================================================================================
|
||||
|
||||
.PHONY: docker-inspect-multiarch
|
||||
docker-inspect-multiarch:
|
||||
@@ -160,41 +245,106 @@ docker-inspect-multiarch:
|
||||
.PHONY: build-cross-all
|
||||
build-cross-all:
|
||||
@echo "🔧 Building all target architectures..."
|
||||
@if ! command -v cross &> /dev/null; then \
|
||||
echo "📦 Installing cross..."; \
|
||||
cargo install cross; \
|
||||
fi
|
||||
@echo "💡 On macOS/Windows, use 'make docker-dev' for reliable multi-arch builds"
|
||||
@echo "🔨 Generating protobuf code..."
|
||||
cargo run --bin gproto || true
|
||||
@echo "🔨 Building x86_64-unknown-linux-musl..."
|
||||
cargo build --release --target x86_64-unknown-linux-musl --bin rustfs
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-musl
|
||||
@echo "🔨 Building aarch64-unknown-linux-gnu..."
|
||||
cross build --release --target aarch64-unknown-linux-gnu --bin rustfs
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-gnu
|
||||
@echo "✅ All architectures built successfully!"
|
||||
|
||||
# ========================================================================================
|
||||
# Help and Documentation
|
||||
# ========================================================================================
|
||||
|
||||
.PHONY: help-build
|
||||
help-build:
|
||||
@echo "🔨 RustFS 构建帮助:"
|
||||
@echo ""
|
||||
@echo "🚀 本地构建 (推荐使用):"
|
||||
@echo " make build # 构建 RustFS 二进制文件 (默认包含 console)"
|
||||
@echo " make build-dev # 开发模式构建"
|
||||
@echo " make build-musl # 构建 musl 版本"
|
||||
@echo " make build-gnu # 构建 GNU 版本"
|
||||
@echo ""
|
||||
@echo "🐳 Docker 构建:"
|
||||
@echo " make build-docker # 使用 Docker 容器构建"
|
||||
@echo " make build-docker BUILD_OS=ubuntu22.04 # 指定构建系统"
|
||||
@echo ""
|
||||
@echo "🏗️ 跨架构构建:"
|
||||
@echo " make build-cross-all # 构建所有架构的二进制文件"
|
||||
@echo ""
|
||||
@echo "🔧 直接使用 build-rustfs.sh 脚本:"
|
||||
@echo " ./build-rustfs.sh --help # 查看脚本帮助"
|
||||
@echo " ./build-rustfs.sh --no-console # 构建时跳过 console 资源"
|
||||
@echo " ./build-rustfs.sh --force-console-update # 强制更新 console 资源"
|
||||
@echo " ./build-rustfs.sh --dev # 开发模式构建"
|
||||
@echo " ./build-rustfs.sh --sign # 签名二进制文件"
|
||||
@echo " ./build-rustfs.sh --platform x86_64-unknown-linux-musl # 指定目标平台"
|
||||
@echo " ./build-rustfs.sh --skip-verification # 跳过二进制验证"
|
||||
@echo ""
|
||||
@echo "💡 build-rustfs.sh 脚本提供了更多选项、智能检测和二进制验证功能"
|
||||
|
||||
.PHONY: help-docker
|
||||
help-docker:
|
||||
@echo "🐳 Docker 多架构构建帮助:"
|
||||
@echo ""
|
||||
@echo "基本构建:"
|
||||
@echo " make docker-build-multiarch # 构建多架构镜像(不推送)"
|
||||
@echo " make docker-build-multiarch-push # 构建并推送多架构镜像"
|
||||
@echo "🚀 生产镜像构建 (推荐使用 docker-buildx.sh):"
|
||||
@echo " make docker-buildx # 构建生产多架构镜像(不推送)"
|
||||
@echo " make docker-buildx-push # 构建并推送生产多架构镜像"
|
||||
@echo " make docker-buildx-version VERSION=v1.0.0 # 构建指定版本"
|
||||
@echo " make docker-buildx-push-version VERSION=v1.0.0 # 构建并推送指定版本"
|
||||
@echo ""
|
||||
@echo "版本构建:"
|
||||
@echo " make docker-build-multiarch-version VERSION=v1.0.0 # 构建指定版本"
|
||||
@echo " make docker-push-multiarch-version VERSION=v1.0.0 # 构建并推送指定版本"
|
||||
@echo "🔧 开发/源码镜像构建 (本地开发测试):"
|
||||
@echo " make docker-dev # 构建开发多架构镜像(无法本地加载)"
|
||||
@echo " make docker-dev-local # 构建开发单架构镜像(本地加载)"
|
||||
@echo " make docker-dev-push REGISTRY=xxx # 构建并推送开发镜像"
|
||||
@echo ""
|
||||
@echo "镜像类型:"
|
||||
@echo " make docker-build-ubuntu # 构建 Ubuntu 镜像"
|
||||
@echo " make docker-build-rockylinux # 构建 RockyLinux 镜像"
|
||||
@echo " make docker-build-devenv # 构建开发环境镜像"
|
||||
@echo " make docker-build-all-types # 构建所有类型镜像"
|
||||
@echo "🏗️ 本地生产镜像构建 (替代方案):"
|
||||
@echo " make docker-buildx-production-local # 本地构建生产单架构镜像"
|
||||
@echo ""
|
||||
@echo "辅助工具:"
|
||||
@echo "📦 单架构构建 (传统方式):"
|
||||
@echo " make docker-build-production # 构建单架构生产镜像"
|
||||
@echo " make docker-build-source # 构建单架构源码镜像"
|
||||
@echo ""
|
||||
@echo "🚀 开发环境管理:"
|
||||
@echo " make dev-env-start # 启动开发容器环境"
|
||||
@echo " make dev-env-stop # 停止开发容器环境"
|
||||
@echo " make dev-env-restart # 重启开发容器环境"
|
||||
@echo ""
|
||||
@echo "🔧 辅助工具:"
|
||||
@echo " make build-cross-all # 构建所有架构的二进制文件"
|
||||
@echo " make docker-inspect-multiarch IMAGE=xxx # 检查镜像的架构支持"
|
||||
@echo ""
|
||||
@echo "环境变量 (在推送时需要设置):"
|
||||
@echo "📋 环境变量:"
|
||||
@echo " REGISTRY 镜像仓库地址 (推送时需要)"
|
||||
@echo " DOCKERHUB_USERNAME Docker Hub 用户名"
|
||||
@echo " DOCKERHUB_TOKEN Docker Hub 访问令牌"
|
||||
@echo " GITHUB_TOKEN GitHub 访问令牌"
|
||||
@echo ""
|
||||
@echo "💡 建议:"
|
||||
@echo " - 生产用途: 使用 docker-buildx* 命令 (基于预编译二进制)"
|
||||
@echo " - 本地开发: 使用 docker-dev* 命令 (从源码构建)"
|
||||
@echo " - 开发环境: 使用 dev-env-* 命令管理开发容器"
|
||||
|
||||
.PHONY: help
|
||||
help:
|
||||
@echo "🦀 RustFS Makefile 帮助:"
|
||||
@echo ""
|
||||
@echo "📋 主要命令分类:"
|
||||
@echo " make help-build # 显示构建相关帮助"
|
||||
@echo " make help-docker # 显示 Docker 相关帮助"
|
||||
@echo ""
|
||||
@echo "🔧 代码质量:"
|
||||
@echo " make fmt # 格式化代码"
|
||||
@echo " make clippy # 运行 clippy 检查"
|
||||
@echo " make test # 运行测试"
|
||||
@echo " make pre-commit # 运行所有预提交检查"
|
||||
@echo ""
|
||||
@echo "🚀 快速开始:"
|
||||
@echo " make build # 构建 RustFS 二进制"
|
||||
@echo " make docker-dev-local # 构建开发 Docker 镜像(本地)"
|
||||
@echo " make dev-env-start # 启动开发环境"
|
||||
@echo ""
|
||||
@echo "💡 更多帮助请使用 'make help-build' 或 'make help-docker'"
|
||||
|
||||
69
README.md
69
README.md
@@ -1,14 +1,13 @@
|
||||
[](https://rustfs.com)
|
||||
|
||||
|
||||
<p align="center">RustFS is a high-performance distributed object storage software built using Rust</p>
|
||||
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/rustfs/rustfs/actions/workflows/ci.yml"><img alt="CI" src="https://github.com/rustfs/rustfs/actions/workflows/ci.yml/badge.svg" /></a>
|
||||
<a href="https://github.com/rustfs/rustfs/actions/workflows/docker.yml"><img alt="Build and Push Docker Images" src="https://github.com/rustfs/rustfs/actions/workflows/docker.yml/badge.svg" /></a>
|
||||
<img alt="GitHub commit activity" src="https://img.shields.io/github/commit-activity/m/rustfs/rustfs"/>
|
||||
<img alt="Github Last Commit" src="https://img.shields.io/github/last-commit/rustfs/rustfs"/>
|
||||
<a href="https://hellogithub.com/repository/rustfs/rustfs" target="_blank"><img src="https://abroad.hellogithub.com/v1/widgets/recommend.svg?rid=b95bcb72bdc340b68f16fdf6790b7d5b&claim_uid=MsbvjYeLDKAH457&theme=small" alt="Featured|HelloGitHub" /></a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
@@ -19,20 +18,19 @@
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
English | <a href="https://github.com/rustfs/rustfs/blob/main/README_ZH.md">简体中文</a> |
|
||||
English | <a href="https://github.com/rustfs/rustfs/blob/main/README_ZH.md">简体中文</a> |
|
||||
<!-- Keep these links. Translations will automatically update with the README. -->
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=de">Deutsch</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=es">Español</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=fr">français</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ja">日本語</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ko">한국어</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=pt">Português</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=de">Deutsch</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=es">Español</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=fr">français</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ja">日本語</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ko">한국어</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=pt">Português</a> |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=ru">Русский</a>
|
||||
</p>
|
||||
|
||||
RustFS is a high-performance distributed object storage software built using Rust, one of the most popular languages worldwide. Along with MinIO, it shares a range of advantages such as simplicity, S3 compatibility, open-source nature, support for data lakes, AI, and big data. Furthermore, it has a better and more user-friendly open-source license in comparison to other storage systems, being constructed under the Apache license. As Rust serves as its foundation, RustFS provides faster speed and safer distributed features for high-performance object storage.
|
||||
|
||||
|
||||
> ⚠️ **RustFS is under rapid development. Do NOT use in production environments!**
|
||||
|
||||
## Features
|
||||
@@ -74,7 +72,7 @@ Stress test server parameters
|
||||
|
||||
To get started with RustFS, follow these steps:
|
||||
|
||||
1. **One-click installation script (Option 1)**
|
||||
1. **One-click installation script (Option 1)**
|
||||
|
||||
```bash
|
||||
curl -O https://rustfs.com/install_rustfs.sh && bash install_rustfs.sh
|
||||
@@ -83,13 +81,52 @@ To get started with RustFS, follow these steps:
|
||||
2. **Docker Quick Start (Option 2)**
|
||||
|
||||
```bash
|
||||
podman run -d -p 9000:9000 -p 9001:9001 -v /data:/data quay.io/rustfs/rustfs
|
||||
# Latest stable release
|
||||
docker run -d -p 9000:9000 -v /data:/data rustfs/rustfs:latest
|
||||
|
||||
# Development version (main branch)
|
||||
docker run -d -p 9000:9000 -v /data:/data rustfs/rustfs:main-latest
|
||||
|
||||
# Specific version
|
||||
docker run -d -p 9000:9000 -v /data:/data rustfs/rustfs:v1.0.0
|
||||
```
|
||||
|
||||
3. **Build from Source (Option 3) - Advanced Users**
|
||||
|
||||
3. **Access the Console**: Open your web browser and navigate to `http://localhost:9001` to access the RustFS console, default username and password is `rustfsadmin` .
|
||||
4. **Create a Bucket**: Use the console to create a new bucket for your objects.
|
||||
5. **Upload Objects**: You can upload files directly through the console or use S3-compatible APIs to interact with your RustFS instance.
|
||||
For developers who want to build RustFS Docker images from source with multi-architecture support:
|
||||
|
||||
```bash
|
||||
# Build multi-architecture images locally
|
||||
./docker-buildx.sh --build-arg RELEASE=latest
|
||||
|
||||
# Build and push to registry
|
||||
./docker-buildx.sh --push
|
||||
|
||||
# Build specific version
|
||||
./docker-buildx.sh --release v1.0.0 --push
|
||||
|
||||
# Build for custom registry
|
||||
./docker-buildx.sh --registry your-registry.com --namespace yourname --push
|
||||
```
|
||||
|
||||
The `docker-buildx.sh` script supports:
|
||||
- **Multi-architecture builds**: `linux/amd64`, `linux/arm64`
|
||||
- **Automatic version detection**: Uses git tags or commit hashes
|
||||
- **Registry flexibility**: Supports Docker Hub, GitHub Container Registry, etc.
|
||||
- **Build optimization**: Includes caching and parallel builds
|
||||
|
||||
You can also use Make targets for convenience:
|
||||
|
||||
```bash
|
||||
make docker-buildx # Build locally
|
||||
make docker-buildx-push # Build and push
|
||||
make docker-buildx-version VERSION=v1.0.0 # Build specific version
|
||||
make help-docker # Show all Docker-related commands
|
||||
```
|
||||
|
||||
4. **Access the Console**: Open your web browser and navigate to `http://localhost:9000` to access the RustFS console, default username and password is `rustfsadmin` .
|
||||
5. **Create a Bucket**: Use the console to create a new bucket for your objects.
|
||||
6. **Upload Objects**: You can upload files directly through the console or use S3-compatible APIs to interact with your RustFS instance.
|
||||
|
||||
## Documentation
|
||||
|
||||
@@ -122,7 +159,7 @@ If you have any questions or need assistance, you can:
|
||||
RustFS is a community-driven project, and we appreciate all contributions. Check out the [Contributors](https://github.com/rustfs/rustfs/graphs/contributors) page to see the amazing people who have helped make RustFS better.
|
||||
|
||||
<a href="https://github.com/rustfs/rustfs/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=rustfs/rustfs" />
|
||||
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" />
|
||||
</a>
|
||||
|
||||
## License
|
||||
|
||||
10
README_ZH.md
10
README_ZH.md
@@ -7,6 +7,7 @@
|
||||
<a href="https://github.com/rustfs/rustfs/actions/workflows/docker.yml"><img alt="Build and Push Docker Images" src="https://github.com/rustfs/rustfs/actions/workflows/docker.yml/badge.svg" /></a>
|
||||
<img alt="GitHub commit activity" src="https://img.shields.io/github/commit-activity/m/rustfs/rustfs"/>
|
||||
<img alt="Github Last Commit" src="https://img.shields.io/github/last-commit/rustfs/rustfs"/>
|
||||
<a href="https://hellogithub.com/repository/rustfs/rustfs" target="_blank"><img src="https://abroad.hellogithub.com/v1/widgets/recommend.svg?rid=b95bcb72bdc340b68f16fdf6790b7d5b&claim_uid=MsbvjYeLDKAH457&theme=small" alt="Featured|HelloGitHub" /></a>
|
||||
</p >
|
||||
|
||||
<p align="center">
|
||||
@@ -61,7 +62,7 @@ RustFS 是一个使用 Rust(全球最受欢迎的编程语言之一)构建
|
||||
|
||||
要开始使用 RustFS,请按照以下步骤操作:
|
||||
|
||||
1. **一键脚本快速启动 (方案一)**
|
||||
1. **一键脚本快速启动 (方案一)**
|
||||
|
||||
```bash
|
||||
curl -O https://rustfs.com/install_rustfs.sh && bash install_rustfs.sh
|
||||
@@ -70,11 +71,10 @@ RustFS 是一个使用 Rust(全球最受欢迎的编程语言之一)构建
|
||||
2. **Docker快速启动(方案二)**
|
||||
|
||||
```bash
|
||||
podman run -d -p 9000:9000 -p 9001:9001 -v /data:/data quay.io/rustfs/rustfs
|
||||
docker run -d -p 9000:9000 -v /data:/data rustfs/rustfs
|
||||
```
|
||||
|
||||
|
||||
3. **访问控制台**:打开 Web 浏览器并导航到 `http://localhost:9001` 以访问 RustFS 控制台,默认的用户名和密码是 `rustfsadmin` 。
|
||||
3. **访问控制台**:打开 Web 浏览器并导航到 `http://localhost:9000` 以访问 RustFS 控制台,默认的用户名和密码是 `rustfsadmin` 。
|
||||
4. **创建存储桶**:使用控制台为您的对象创建新的存储桶。
|
||||
5. **上传对象**:您可以直接通过控制台上传文件,或使用 S3 兼容的 API 与您的 RustFS 实例交互。
|
||||
|
||||
@@ -109,7 +109,7 @@ RustFS 是一个使用 Rust(全球最受欢迎的编程语言之一)构建
|
||||
RustFS 是一个社区驱动的项目,我们感谢所有的贡献。查看[贡献者](https://github.com/rustfs/rustfs/graphs/contributors)页面,了解帮助 RustFS 变得更好的杰出人员。
|
||||
|
||||
<a href="https://github.com/rustfs/rustfs/graphs/contributors">
|
||||
<img src="https://contrib.rocks/image?repo=rustfs/rustfs" />
|
||||
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" />
|
||||
</a >
|
||||
|
||||
## 许可证
|
||||
|
||||
41
_typos.toml
Normal file
41
_typos.toml
Normal file
@@ -0,0 +1,41 @@
|
||||
[default]
|
||||
# # Ignore specific spell checking patterns
|
||||
# extend-ignore-identifiers-re = [
|
||||
# # Ignore common patterns in base64 encoding and hash values
|
||||
# "[A-Za-z0-9+/]{8,}={0,2}", # base64 encoding
|
||||
# "[A-Fa-f0-9]{8,}", # hexadecimal hash
|
||||
# "[A-Za-z0-9_-]{20,}", # long random strings
|
||||
# ]
|
||||
|
||||
# # Ignore specific regex patterns in content
|
||||
# extend-ignore-re = [
|
||||
# # Ignore hash values and encoded strings (base64 patterns)
|
||||
# "(?i)[A-Za-z0-9+/]{8,}={0,2}",
|
||||
# # Ignore long strings in quotes (usually hash or base64)
|
||||
# '"[A-Za-z0-9+/=_-]{8,}"',
|
||||
# # Ignore IV values and similar cryptographic strings
|
||||
# '"[A-Za-z0-9+/=]{12,}"',
|
||||
# # Ignore cryptographic signatures and keys (including partial strings)
|
||||
# "[A-Za-z0-9+/]{6,}[A-Za-z0-9+/=]*",
|
||||
# # Ignore base64-like strings in comments (common in examples)
|
||||
# "//.*[A-Za-z0-9+/]{8,}[A-Za-z0-9+/=]*",
|
||||
# ]
|
||||
extend-ignore-re = [
|
||||
# Ignore long strings in quotes (usually hash or base64)
|
||||
'"[A-Za-z0-9+/=_-]{32,}"',
|
||||
# Ignore IV values and similar cryptographic strings
|
||||
'"[A-Za-z0-9+/=]{12,}"',
|
||||
# Ignore cryptographic signatures and keys (including partial strings)
|
||||
"[A-Za-z0-9+/]{16,}[A-Za-z0-9+/=]*",
|
||||
]
|
||||
|
||||
[default.extend-words]
|
||||
bui = "bui"
|
||||
typ = "typ"
|
||||
clen = "clen"
|
||||
datas = "datas"
|
||||
bre = "bre"
|
||||
abd = "abd"
|
||||
|
||||
[files]
|
||||
extend-exclude = []
|
||||
564
build-rustfs.sh
Executable file
564
build-rustfs.sh
Executable file
@@ -0,0 +1,564 @@
|
||||
#!/bin/bash
|
||||
|
||||
# RustFS Binary Build Script
|
||||
# This script compiles RustFS binaries for different platforms and architectures
|
||||
|
||||
set -e
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Auto-detect current platform
|
||||
detect_platform() {
|
||||
local arch=$(uname -m)
|
||||
local os=$(uname -s | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
case "$os" in
|
||||
"linux")
|
||||
case "$arch" in
|
||||
"x86_64")
|
||||
echo "x86_64-unknown-linux-musl"
|
||||
;;
|
||||
"aarch64"|"arm64")
|
||||
echo "aarch64-unknown-linux-musl"
|
||||
;;
|
||||
"armv7l")
|
||||
echo "armv7-unknown-linux-musleabihf"
|
||||
;;
|
||||
*)
|
||||
echo "unknown-platform"
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
"darwin")
|
||||
case "$arch" in
|
||||
"x86_64")
|
||||
echo "x86_64-apple-darwin"
|
||||
;;
|
||||
"arm64"|"aarch64")
|
||||
echo "aarch64-apple-darwin"
|
||||
;;
|
||||
*)
|
||||
echo "unknown-platform"
|
||||
;;
|
||||
esac
|
||||
;;
|
||||
*)
|
||||
echo "unknown-platform"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Cross-platform SHA256 checksum generation
|
||||
generate_sha256() {
|
||||
local file="$1"
|
||||
local output_file="$2"
|
||||
local os=$(uname -s | tr '[:upper:]' '[:lower:]')
|
||||
|
||||
case "$os" in
|
||||
"linux")
|
||||
if command -v sha256sum &> /dev/null; then
|
||||
sha256sum "$file" > "$output_file"
|
||||
elif command -v shasum &> /dev/null; then
|
||||
shasum -a 256 "$file" > "$output_file"
|
||||
else
|
||||
print_message $RED "❌ No SHA256 command found (sha256sum or shasum)"
|
||||
return 1
|
||||
fi
|
||||
;;
|
||||
"darwin")
|
||||
if command -v shasum &> /dev/null; then
|
||||
shasum -a 256 "$file" > "$output_file"
|
||||
elif command -v sha256sum &> /dev/null; then
|
||||
sha256sum "$file" > "$output_file"
|
||||
else
|
||||
print_message $RED "❌ No SHA256 command found (shasum or sha256sum)"
|
||||
return 1
|
||||
fi
|
||||
;;
|
||||
*)
|
||||
# Try common commands in order
|
||||
if command -v sha256sum &> /dev/null; then
|
||||
sha256sum "$file" > "$output_file"
|
||||
elif command -v shasum &> /dev/null; then
|
||||
shasum -a 256 "$file" > "$output_file"
|
||||
else
|
||||
print_message $RED "❌ No SHA256 command found"
|
||||
return 1
|
||||
fi
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Default values
|
||||
OUTPUT_DIR="target/release"
|
||||
PLATFORM=$(detect_platform) # Auto-detect current platform
|
||||
BINARY_NAME="rustfs"
|
||||
BUILD_TYPE="release"
|
||||
SIGN=false
|
||||
WITH_CONSOLE=true
|
||||
FORCE_CONSOLE_UPDATE=false
|
||||
CONSOLE_VERSION="latest"
|
||||
SKIP_VERIFICATION=false
|
||||
CUSTOM_PLATFORM=""
|
||||
|
||||
# Print usage
|
||||
usage() {
|
||||
echo "Usage: $0 [OPTIONS]"
|
||||
echo ""
|
||||
echo "Description:"
|
||||
echo " Build RustFS binary for the current platform. Designed for CI/CD pipelines"
|
||||
echo " where different runners build platform-specific binaries natively."
|
||||
echo " Includes automatic verification to ensure the built binary is functional."
|
||||
echo ""
|
||||
echo "Options:"
|
||||
echo " -o, --output-dir DIR Output directory (default: target/release)"
|
||||
echo " -b, --binary-name NAME Binary name (default: rustfs)"
|
||||
echo " -p, --platform TARGET Target platform (default: auto-detect)"
|
||||
echo " --dev Build in dev mode"
|
||||
echo " --sign Sign binaries after build"
|
||||
echo " --with-console Download console static assets (default)"
|
||||
echo " --no-console Skip console static assets"
|
||||
echo " --force-console-update Force update console assets even if they exist"
|
||||
echo " --console-version VERSION Console version to download (default: latest)"
|
||||
echo " --skip-verification Skip binary verification after build"
|
||||
echo " -h, --help Show this help message"
|
||||
echo ""
|
||||
echo "Examples:"
|
||||
echo " $0 # Build for current platform (includes console assets)"
|
||||
echo " $0 --dev # Development build"
|
||||
echo " $0 --sign # Build and sign binary (release CI)"
|
||||
echo " $0 --no-console # Build without console static assets"
|
||||
echo " $0 --force-console-update # Force update console assets"
|
||||
echo " $0 --platform x86_64-unknown-linux-musl # Build for specific platform"
|
||||
echo " $0 --skip-verification # Skip binary verification (for cross-compilation)"
|
||||
echo ""
|
||||
echo "Detected platform: $(detect_platform)"
|
||||
echo "CI Usage: Run this script on each platform's runner to build native binaries"
|
||||
}
|
||||
|
||||
# Print colored message
|
||||
print_message() {
|
||||
local color=$1
|
||||
local message=$2
|
||||
echo -e "${color}${message}${NC}"
|
||||
}
|
||||
|
||||
# Get version from git
|
||||
get_version() {
|
||||
if git describe --abbrev=0 --tags >/dev/null 2>&1; then
|
||||
git describe --abbrev=0 --tags
|
||||
else
|
||||
git rev-parse --short HEAD
|
||||
fi
|
||||
}
|
||||
|
||||
# Setup rust environment
|
||||
setup_rust_environment() {
|
||||
print_message $BLUE "🔧 Setting up Rust environment..."
|
||||
|
||||
# Install required target for current platform
|
||||
print_message $YELLOW "Installing target: $PLATFORM"
|
||||
rustup target add "$PLATFORM"
|
||||
|
||||
# Set up environment variables for musl targets
|
||||
if [[ "$PLATFORM" == *"musl"* ]]; then
|
||||
print_message $YELLOW "Setting up environment for musl target..."
|
||||
export RUSTFLAGS="-C target-feature=-crt-static"
|
||||
|
||||
# For cargo-zigbuild, set up additional environment variables
|
||||
if command -v cargo-zigbuild &> /dev/null; then
|
||||
print_message $YELLOW "Configuring cargo-zigbuild for musl target..."
|
||||
|
||||
# Set environment variables for better musl support
|
||||
export CC_x86_64_unknown_linux_musl="zig cc -target x86_64-linux-musl"
|
||||
export CXX_x86_64_unknown_linux_musl="zig c++ -target x86_64-linux-musl"
|
||||
export AR_x86_64_unknown_linux_musl="zig ar"
|
||||
export CARGO_TARGET_X86_64_UNKNOWN_LINUX_MUSL_LINKER="zig cc -target x86_64-linux-musl"
|
||||
|
||||
export CC_aarch64_unknown_linux_musl="zig cc -target aarch64-linux-musl"
|
||||
export CXX_aarch64_unknown_linux_musl="zig c++ -target aarch64-linux-musl"
|
||||
export AR_aarch64_unknown_linux_musl="zig ar"
|
||||
export CARGO_TARGET_AARCH64_UNKNOWN_LINUX_MUSL_LINKER="zig cc -target aarch64-linux-musl"
|
||||
|
||||
# Set environment variables for zstd-sys to avoid target parsing issues
|
||||
export ZSTD_SYS_USE_PKG_CONFIG=1
|
||||
export PKG_CONFIG_ALLOW_CROSS=1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Install required tools
|
||||
if [ "$SIGN" = true ]; then
|
||||
if ! command -v minisign &> /dev/null; then
|
||||
print_message $YELLOW "Installing minisign for binary signing..."
|
||||
cargo install minisign
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Download console static assets
|
||||
download_console_assets() {
|
||||
local static_dir="rustfs/static"
|
||||
local console_exists=false
|
||||
|
||||
# Check if console assets already exist
|
||||
if [ -d "$static_dir" ] && [ -f "$static_dir/index.html" ]; then
|
||||
console_exists=true
|
||||
local static_size=$(du -sh "$static_dir" 2>/dev/null | cut -f1 || echo "unknown")
|
||||
print_message $YELLOW "Console static assets already exist ($static_size)"
|
||||
fi
|
||||
|
||||
# Determine if we need to download
|
||||
local should_download=false
|
||||
if [ "$WITH_CONSOLE" = true ]; then
|
||||
if [ "$console_exists" = false ]; then
|
||||
print_message $BLUE "🎨 Console assets not found, downloading..."
|
||||
should_download=true
|
||||
elif [ "$FORCE_CONSOLE_UPDATE" = true ]; then
|
||||
print_message $BLUE "🎨 Force updating console assets..."
|
||||
should_download=true
|
||||
else
|
||||
print_message $GREEN "✅ Console assets already available, skipping download"
|
||||
fi
|
||||
else
|
||||
if [ "$console_exists" = true ]; then
|
||||
print_message $GREEN "✅ Using existing console assets"
|
||||
else
|
||||
print_message $YELLOW "⚠️ Console assets not found. Use --download-console to download them."
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$should_download" = true ]; then
|
||||
print_message $BLUE "📥 Downloading console static assets..."
|
||||
|
||||
# Create static directory
|
||||
mkdir -p "$static_dir"
|
||||
|
||||
# Download from GitHub Releases (consistent with Docker build)
|
||||
local download_url
|
||||
if [ "$CONSOLE_VERSION" = "latest" ]; then
|
||||
print_message $YELLOW "Getting latest console release info..."
|
||||
# For now, use dl.rustfs.com as fallback until GitHub Releases includes console assets
|
||||
download_url="https://dl.rustfs.com/artifacts/console/rustfs-console-latest.zip"
|
||||
else
|
||||
download_url="https://dl.rustfs.com/artifacts/console/rustfs-console-${CONSOLE_VERSION}.zip"
|
||||
fi
|
||||
|
||||
print_message $YELLOW "Downloading from: $download_url"
|
||||
|
||||
# Download with retries
|
||||
local temp_file="console-assets-temp.zip"
|
||||
local download_success=false
|
||||
|
||||
for i in {1..3}; do
|
||||
if curl -L "$download_url" -o "$temp_file" --retry 3 --retry-delay 5 --max-time 300; then
|
||||
download_success=true
|
||||
break
|
||||
else
|
||||
print_message $YELLOW "Download attempt $i failed, retrying..."
|
||||
sleep 2
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$download_success" = true ]; then
|
||||
# Verify the downloaded file
|
||||
if [ -f "$temp_file" ] && [ -s "$temp_file" ]; then
|
||||
print_message $BLUE "📦 Extracting console assets..."
|
||||
|
||||
# Extract to static directory
|
||||
if unzip -o "$temp_file" -d "$static_dir"; then
|
||||
rm "$temp_file"
|
||||
local final_size=$(du -sh "$static_dir" 2>/dev/null | cut -f1 || echo "unknown")
|
||||
print_message $GREEN "✅ Console assets downloaded successfully ($final_size)"
|
||||
else
|
||||
print_message $RED "❌ Failed to extract console assets"
|
||||
rm -f "$temp_file"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
print_message $RED "❌ Downloaded file is empty or invalid"
|
||||
rm -f "$temp_file"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
print_message $RED "❌ Failed to download console assets after 3 attempts"
|
||||
print_message $YELLOW "💡 Console assets are optional. Build will continue without them."
|
||||
rm -f "$temp_file"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
# Verify binary functionality
|
||||
verify_binary() {
|
||||
local binary_path="$1"
|
||||
|
||||
# Check if binary exists
|
||||
if [ ! -f "$binary_path" ]; then
|
||||
print_message $RED "❌ Binary file not found: $binary_path"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check if binary is executable
|
||||
if [ ! -x "$binary_path" ]; then
|
||||
print_message $RED "❌ Binary is not executable: $binary_path"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check basic functionality - try to run help command
|
||||
print_message $YELLOW " Testing --help command..."
|
||||
if ! "$binary_path" --help >/dev/null 2>&1; then
|
||||
print_message $RED "❌ Binary failed to run --help command"
|
||||
return 1
|
||||
fi
|
||||
|
||||
# Check version command
|
||||
print_message $YELLOW " Testing --version command..."
|
||||
if ! "$binary_path" --version >/dev/null 2>&1; then
|
||||
print_message $YELLOW "⚠️ Binary does not support --version command (this is optional)"
|
||||
fi
|
||||
|
||||
# Try to get some basic info about the binary
|
||||
local file_info=$(file "$binary_path" 2>/dev/null || echo "unknown")
|
||||
print_message $YELLOW " Binary info: $file_info"
|
||||
|
||||
# Check if it's a valid ELF/Mach-O binary
|
||||
if command -v readelf >/dev/null 2>&1; then
|
||||
if readelf -h "$binary_path" >/dev/null 2>&1; then
|
||||
print_message $YELLOW " ELF binary structure: valid"
|
||||
fi
|
||||
elif command -v otool >/dev/null 2>&1; then
|
||||
if otool -h "$binary_path" >/dev/null 2>&1; then
|
||||
print_message $YELLOW " Mach-O binary structure: valid"
|
||||
fi
|
||||
fi
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
# Build binary for current platform
|
||||
build_binary() {
|
||||
local version=$(get_version)
|
||||
local output_file="${OUTPUT_DIR}/${PLATFORM}/${BINARY_NAME}"
|
||||
|
||||
print_message $BLUE "🏗️ Building for platform: $PLATFORM"
|
||||
print_message $YELLOW " Version: $version"
|
||||
print_message $YELLOW " Output: $output_file"
|
||||
|
||||
# Create output directory
|
||||
mkdir -p "${OUTPUT_DIR}/${PLATFORM}"
|
||||
|
||||
# Simple build logic matching the working version (4fb4b353)
|
||||
# Force rebuild by touching build.rs
|
||||
touch rustfs/build.rs
|
||||
|
||||
# Determine build command based on platform and cross-compilation needs
|
||||
local build_cmd=""
|
||||
local current_platform=$(detect_platform)
|
||||
|
||||
print_message $BLUE "📦 Using working version build logic..."
|
||||
|
||||
# Check if we need cross-compilation
|
||||
if [ "$PLATFORM" != "$current_platform" ]; then
|
||||
# Cross-compilation needed
|
||||
if [[ "$PLATFORM" == *"apple-darwin"* ]]; then
|
||||
print_message $RED "❌ macOS cross-compilation not supported"
|
||||
print_message $YELLOW "💡 macOS targets must be built natively on macOS runners"
|
||||
return 1
|
||||
elif [[ "$PLATFORM" == *"windows"* ]]; then
|
||||
# Use cross for Windows ARM64
|
||||
if ! command -v cross &> /dev/null; then
|
||||
print_message $YELLOW "📦 Installing cross tool..."
|
||||
cargo install cross --git https://github.com/cross-rs/cross
|
||||
fi
|
||||
build_cmd="cross build"
|
||||
else
|
||||
# Use zigbuild for Linux ARM64 (matches working version)
|
||||
if ! command -v cargo-zigbuild &> /dev/null; then
|
||||
print_message $RED "❌ cargo-zigbuild not found. Please install it first."
|
||||
return 1
|
||||
fi
|
||||
build_cmd="cargo zigbuild"
|
||||
fi
|
||||
else
|
||||
# Native compilation
|
||||
build_cmd="cargo build"
|
||||
fi
|
||||
|
||||
if [ "$BUILD_TYPE" = "release" ]; then
|
||||
build_cmd+=" --release"
|
||||
fi
|
||||
|
||||
build_cmd+=" --target $PLATFORM"
|
||||
build_cmd+=" -p rustfs --bins"
|
||||
|
||||
print_message $BLUE "📦 Executing: $build_cmd"
|
||||
|
||||
# Execute build (this matches exactly what the working version does)
|
||||
if eval $build_cmd; then
|
||||
print_message $GREEN "✅ Successfully built for $PLATFORM"
|
||||
|
||||
# Copy binary to output directory
|
||||
cp "target/${PLATFORM}/${BUILD_TYPE}/${BINARY_NAME}" "$output_file"
|
||||
|
||||
# Generate checksums
|
||||
print_message $BLUE "🔐 Generating checksums..."
|
||||
(cd "${OUTPUT_DIR}/${PLATFORM}" && generate_sha256 "${BINARY_NAME}" "${BINARY_NAME}.sha256sum")
|
||||
|
||||
# Verify binary functionality (if not skipped)
|
||||
if [ "$SKIP_VERIFICATION" = false ]; then
|
||||
print_message $BLUE "🔍 Verifying binary functionality..."
|
||||
if verify_binary "$output_file"; then
|
||||
print_message $GREEN "✅ Binary verification passed"
|
||||
else
|
||||
print_message $RED "❌ Binary verification failed"
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
print_message $YELLOW "⚠️ Binary verification skipped by user request"
|
||||
fi
|
||||
|
||||
# Sign binary if requested
|
||||
if [ "$SIGN" = true ]; then
|
||||
print_message $BLUE "✍️ Signing binary..."
|
||||
(cd "${OUTPUT_DIR}/${PLATFORM}" && minisign -S -m "${BINARY_NAME}" -s ~/.minisign/minisign.key)
|
||||
fi
|
||||
|
||||
print_message $GREEN "✅ Build completed successfully"
|
||||
else
|
||||
print_message $RED "❌ Failed to build for $PLATFORM"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
|
||||
|
||||
# Main build function
|
||||
build_rustfs() {
|
||||
local version=$(get_version)
|
||||
|
||||
print_message $BLUE "🚀 Starting RustFS binary build process..."
|
||||
print_message $YELLOW " Version: $version"
|
||||
print_message $YELLOW " Platform: $PLATFORM"
|
||||
print_message $YELLOW " Output Directory: $OUTPUT_DIR"
|
||||
print_message $YELLOW " Build Type: $BUILD_TYPE"
|
||||
print_message $YELLOW " Sign: $SIGN"
|
||||
print_message $YELLOW " With Console: $WITH_CONSOLE"
|
||||
if [ "$WITH_CONSOLE" = true ]; then
|
||||
print_message $YELLOW " Console Version: $CONSOLE_VERSION"
|
||||
print_message $YELLOW " Force Console Update: $FORCE_CONSOLE_UPDATE"
|
||||
fi
|
||||
print_message $YELLOW " Skip Verification: $SKIP_VERIFICATION"
|
||||
echo ""
|
||||
|
||||
# Setup environment
|
||||
setup_rust_environment
|
||||
echo ""
|
||||
|
||||
# Download console assets if requested
|
||||
download_console_assets
|
||||
echo ""
|
||||
|
||||
# Build binary
|
||||
build_binary
|
||||
echo ""
|
||||
|
||||
print_message $GREEN "🎉 Build process completed successfully!"
|
||||
|
||||
# Show built binary
|
||||
local binary_file="${OUTPUT_DIR}/${PLATFORM}/${BINARY_NAME}"
|
||||
if [ -f "$binary_file" ]; then
|
||||
local size=$(ls -lh "$binary_file" | awk '{print $5}')
|
||||
print_message $BLUE "📋 Built binary: $binary_file ($size)"
|
||||
fi
|
||||
}
|
||||
|
||||
# Parse command line arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
-o|--output-dir)
|
||||
OUTPUT_DIR="$2"
|
||||
shift 2
|
||||
;;
|
||||
-b|--binary-name)
|
||||
BINARY_NAME="$2"
|
||||
shift 2
|
||||
;;
|
||||
-p|--platform)
|
||||
CUSTOM_PLATFORM="$2"
|
||||
shift 2
|
||||
;;
|
||||
--dev)
|
||||
BUILD_TYPE="debug"
|
||||
shift
|
||||
;;
|
||||
--sign)
|
||||
SIGN=true
|
||||
shift
|
||||
;;
|
||||
--with-console)
|
||||
WITH_CONSOLE=true
|
||||
shift
|
||||
;;
|
||||
--no-console)
|
||||
WITH_CONSOLE=false
|
||||
shift
|
||||
;;
|
||||
--force-console-update)
|
||||
FORCE_CONSOLE_UPDATE=true
|
||||
WITH_CONSOLE=true # Auto-enable download when forcing update
|
||||
shift
|
||||
;;
|
||||
--console-version)
|
||||
CONSOLE_VERSION="$2"
|
||||
shift 2
|
||||
;;
|
||||
--skip-verification)
|
||||
SKIP_VERIFICATION=true
|
||||
shift
|
||||
;;
|
||||
-h|--help)
|
||||
usage
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
print_message $RED "❌ Unknown option: $1"
|
||||
usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
# Main execution
|
||||
main() {
|
||||
print_message $BLUE "🦀 RustFS Binary Build Script"
|
||||
echo ""
|
||||
|
||||
# Check if we're in a Rust project
|
||||
if [ ! -f "Cargo.toml" ]; then
|
||||
print_message $RED "❌ No Cargo.toml found. Are you in a Rust project directory?"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Override platform if specified
|
||||
if [ -n "$CUSTOM_PLATFORM" ]; then
|
||||
PLATFORM="$CUSTOM_PLATFORM"
|
||||
print_message $YELLOW "🎯 Using specified platform: $PLATFORM"
|
||||
|
||||
# Auto-enable skip verification for cross-compilation
|
||||
if [ "$PLATFORM" != "$(detect_platform)" ]; then
|
||||
SKIP_VERIFICATION=true
|
||||
print_message $YELLOW "⚠️ Cross-compilation detected, enabling --skip-verification"
|
||||
fi
|
||||
fi
|
||||
|
||||
# Start build process
|
||||
build_rustfs
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main
|
||||
|
||||
@@ -1,35 +0,0 @@
|
||||
#!/bin/bash
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
clear
|
||||
|
||||
# Get the current platform architecture
|
||||
ARCH=$(uname -m)
|
||||
|
||||
# Set the target directory according to the schema
|
||||
if [ "$ARCH" == "x86_64" ]; then
|
||||
TARGET_DIR="target/x86_64"
|
||||
elif [ "$ARCH" == "aarch64" ]; then
|
||||
TARGET_DIR="target/arm64"
|
||||
else
|
||||
TARGET_DIR="target/unknown"
|
||||
fi
|
||||
|
||||
# Set CARGO_TARGET_DIR and build the project
|
||||
CARGO_TARGET_DIR=$TARGET_DIR RUSTFLAGS="-C link-arg=-fuse-ld=mold" cargo build --release --package rustfs
|
||||
|
||||
echo -e "\a"
|
||||
echo -e "\a"
|
||||
echo -e "\a"
|
||||
@@ -26,7 +26,6 @@ dioxus = { workspace = true, features = ["router"] }
|
||||
dirs = { workspace = true }
|
||||
hex = { workspace = true }
|
||||
keyring = { workspace = true }
|
||||
lazy_static = { workspace = true }
|
||||
rfd = { workspace = true }
|
||||
rust-embed = { workspace = true, features = ["interpolate-folder-path"] }
|
||||
rust-i18n = { workspace = true }
|
||||
|
||||
@@ -14,12 +14,12 @@
|
||||
|
||||
use crate::utils::RustFSConfig;
|
||||
use dioxus::logger::tracing::{debug, error, info};
|
||||
use lazy_static::lazy_static;
|
||||
use rust_embed::RustEmbed;
|
||||
use sha2::{Digest, Sha256};
|
||||
use std::error::Error;
|
||||
use std::path::{Path, PathBuf};
|
||||
use std::process::Command as StdCommand;
|
||||
use std::sync::LazyLock;
|
||||
use std::time::Duration;
|
||||
use tokio::fs;
|
||||
use tokio::fs::File;
|
||||
@@ -31,15 +31,13 @@ use tokio::sync::{Mutex, mpsc};
|
||||
#[folder = "$CARGO_MANIFEST_DIR/embedded-rustfs/"]
|
||||
struct Asset;
|
||||
|
||||
// Use `lazy_static` to cache the checksum of embedded resources
|
||||
lazy_static! {
|
||||
static ref RUSTFS_HASH: Mutex<String> = {
|
||||
let rustfs_file = if cfg!(windows) { "rustfs.exe" } else { "rustfs" };
|
||||
let rustfs_data = Asset::get(rustfs_file).expect("RustFs binary not embedded");
|
||||
let hash = hex::encode(Sha256::digest(&rustfs_data.data));
|
||||
Mutex::new(hash)
|
||||
};
|
||||
}
|
||||
// Use `LazyLock` to cache the checksum of embedded resources
|
||||
static RUSTFS_HASH: LazyLock<Mutex<String>> = LazyLock::new(|| {
|
||||
let rustfs_file = if cfg!(windows) { "rustfs.exe" } else { "rustfs" };
|
||||
let rustfs_data = Asset::get(rustfs_file).expect("RustFs binary not embedded");
|
||||
let hash = hex::encode(Sha256::digest(&rustfs_data.data));
|
||||
Mutex::new(hash)
|
||||
});
|
||||
|
||||
/// Service command
|
||||
/// This enum represents the commands that can be sent to the service manager
|
||||
|
||||
@@ -1,10 +1,16 @@
|
||||
[package]
|
||||
name = "rustfs-ahm"
|
||||
version = "0.0.3"
|
||||
edition = "2021"
|
||||
version.workspace = true
|
||||
edition.workspace = true
|
||||
authors = ["RustFS Team"]
|
||||
license = "Apache-2.0"
|
||||
license.workspace = true
|
||||
description = "RustFS AHM (Automatic Health Management) Scanner"
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
homepage.workspace = true
|
||||
documentation = "https://docs.rs/rustfs-ahm/latest/rustfs_ahm/"
|
||||
keywords = ["RustFS", "AHM", "health-management", "scanner", "Minio"]
|
||||
categories = ["web-programming", "development-tools", "filesystem"]
|
||||
|
||||
[dependencies]
|
||||
rustfs-ecstore = { workspace = true }
|
||||
@@ -31,5 +37,5 @@ lazy_static = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
rmp-serde = { workspace = true }
|
||||
tokio-test = "0.4"
|
||||
serde_json = "1.0"
|
||||
tokio-test = { workspace = true }
|
||||
serde_json = { workspace = true }
|
||||
|
||||
@@ -20,8 +20,8 @@ pub mod scanner;
|
||||
|
||||
pub use error::{Error, Result};
|
||||
pub use scanner::{
|
||||
load_data_usage_from_backend, store_data_usage_in_backend, BucketTargetUsageInfo, BucketUsageInfo, DataUsageInfo, Scanner,
|
||||
ScannerMetrics,
|
||||
BucketTargetUsageInfo, BucketUsageInfo, DataUsageInfo, Scanner, ScannerMetrics, load_data_usage_from_backend,
|
||||
store_data_usage_in_backend,
|
||||
};
|
||||
|
||||
// Global cancellation token for AHM services (scanner and other background tasks)
|
||||
|
||||
@@ -1016,8 +1016,8 @@ mod tests {
|
||||
use rustfs_ecstore::endpoints::{EndpointServerPools, Endpoints, PoolEndpoints};
|
||||
use rustfs_ecstore::store::ECStore;
|
||||
use rustfs_ecstore::{
|
||||
store_api::{MakeBucketOptions, ObjectIO, PutObjReader},
|
||||
StorageAPI,
|
||||
store_api::{MakeBucketOptions, ObjectIO, PutObjReader},
|
||||
};
|
||||
use std::fs;
|
||||
use std::net::SocketAddr;
|
||||
|
||||
@@ -20,6 +20,6 @@ pub mod metrics;
|
||||
// Re-export main types for convenience
|
||||
pub use data_scanner::Scanner;
|
||||
pub use data_usage::{
|
||||
load_data_usage_from_backend, store_data_usage_in_backend, BucketTargetUsageInfo, BucketUsageInfo, DataUsageInfo,
|
||||
BucketTargetUsageInfo, BucketUsageInfo, DataUsageInfo, load_data_usage_from_backend, store_data_usage_in_backend,
|
||||
};
|
||||
pub use metrics::ScannerMetrics;
|
||||
|
||||
@@ -28,6 +28,5 @@ categories = ["web-programming", "development-tools", "data-structures"]
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
lazy_static.workspace = true
|
||||
tokio.workspace = true
|
||||
tonic = { workspace = true }
|
||||
|
||||
@@ -12,19 +12,19 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use std::collections::HashMap;
|
||||
#![allow(non_upper_case_globals)] // FIXME
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::LazyLock;
|
||||
|
||||
use lazy_static::lazy_static;
|
||||
use tokio::sync::RwLock;
|
||||
use tonic::transport::Channel;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref GLOBAL_Local_Node_Name: RwLock<String> = RwLock::new("".to_string());
|
||||
pub static ref GLOBAL_Rustfs_Host: RwLock<String> = RwLock::new("".to_string());
|
||||
pub static ref GLOBAL_Rustfs_Port: RwLock<String> = RwLock::new("9000".to_string());
|
||||
pub static ref GLOBAL_Rustfs_Addr: RwLock<String> = RwLock::new("".to_string());
|
||||
pub static ref GLOBAL_Conn_Map: RwLock<HashMap<String, Channel>> = RwLock::new(HashMap::new());
|
||||
}
|
||||
pub static GLOBAL_Local_Node_Name: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
|
||||
pub static GLOBAL_Rustfs_Host: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
|
||||
pub static GLOBAL_Rustfs_Port: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("9000".to_string()));
|
||||
pub static GLOBAL_Rustfs_Addr: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
|
||||
pub static GLOBAL_Conn_Map: LazyLock<RwLock<HashMap<String, Channel>>> = LazyLock::new(|| RwLock::new(HashMap::new()));
|
||||
|
||||
pub async fn set_global_addr(addr: &str) {
|
||||
*GLOBAL_Rustfs_Addr.write().await = addr.to_string();
|
||||
|
||||
@@ -108,14 +108,26 @@ pub const DEFAULT_CONSOLE_ADDRESS: &str = concat!(":", DEFAULT_CONSOLE_PORT);
|
||||
/// It is used to store the logs of the application.
|
||||
/// Default value: rustfs.log
|
||||
/// Environment variable: RUSTFS_OBSERVABILITY_LOG_FILENAME
|
||||
pub const DEFAULT_LOG_FILENAME: &str = "rustfs.log";
|
||||
pub const DEFAULT_LOG_FILENAME: &str = "rustfs";
|
||||
|
||||
/// Default OBS log filename for rustfs
|
||||
/// This is the default log filename for OBS.
|
||||
/// It is used to store the logs of the application.
|
||||
/// Default value: rustfs.log
|
||||
pub const DEFAULT_OBS_LOG_FILENAME: &str = concat!(DEFAULT_LOG_FILENAME, ".log");
|
||||
|
||||
/// Default sink file log file for rustfs
|
||||
/// This is the default sink file log file for rustfs.
|
||||
/// It is used to store the logs of the application.
|
||||
/// Default value: rustfs-sink.log
|
||||
pub const DEFAULT_SINK_FILE_LOG_FILE: &str = concat!(DEFAULT_LOG_FILENAME, "-sink.log");
|
||||
|
||||
/// Default log directory for rustfs
|
||||
/// This is the default log directory for rustfs.
|
||||
/// It is used to store the logs of the application.
|
||||
/// Default value: logs
|
||||
/// Environment variable: RUSTFS_OBSERVABILITY_LOG_DIRECTORY
|
||||
pub const DEFAULT_LOG_DIR: &str = "deploy/logs";
|
||||
pub const DEFAULT_LOG_DIR: &str = "/logs";
|
||||
|
||||
/// Default log rotation size mb for rustfs
|
||||
/// This is the default log rotation size for rustfs.
|
||||
|
||||
@@ -33,11 +33,11 @@ pub fn decrypt_data(password: &[u8], data: &[u8]) -> Result<Vec<u8>, crate::Erro
|
||||
match id {
|
||||
ID::Argon2idChaCHa20Poly1305 => {
|
||||
let key = id.get_key(password, salt)?;
|
||||
decryp(ChaCha20Poly1305::new_from_slice(&key)?, nonce, data)
|
||||
decrypt(ChaCha20Poly1305::new_from_slice(&key)?, nonce, data)
|
||||
}
|
||||
_ => {
|
||||
let key = id.get_key(password, salt)?;
|
||||
decryp(Aes256Gcm::new_from_slice(&key)?, nonce, data)
|
||||
decrypt(Aes256Gcm::new_from_slice(&key)?, nonce, data)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -135,7 +135,7 @@ pub fn decrypt_data(password: &[u8], data: &[u8]) -> Result<Vec<u8>, crate::Erro
|
||||
|
||||
#[cfg(any(test, feature = "crypto"))]
|
||||
#[inline]
|
||||
fn decryp<T: aes_gcm::aead::Aead>(stream: T, nonce: &[u8], data: &[u8]) -> Result<Vec<u8>, crate::Error> {
|
||||
fn decrypt<T: aes_gcm::aead::Aead>(stream: T, nonce: &[u8], data: &[u8]) -> Result<Vec<u8>, crate::Error> {
|
||||
use crate::error::Error;
|
||||
stream
|
||||
.decrypt(aes_gcm::Nonce::from_slice(nonce), data)
|
||||
|
||||
@@ -109,8 +109,8 @@ winapi = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
tokio = { workspace = true, features = ["rt-multi-thread", "macros"] }
|
||||
criterion = { version = "0.5", features = ["html_reports"] }
|
||||
temp-env = "0.3.6"
|
||||
criterion = { workspace = true, features = ["html_reports"] }
|
||||
temp-env = { workspace = true }
|
||||
|
||||
[build-dependencies]
|
||||
shadow-rs = { workspace = true, features = ["build", "metadata"] }
|
||||
|
||||
@@ -253,7 +253,7 @@ pub async fn get_server_info(get_pools: bool) -> InfoMessage {
|
||||
|
||||
warn!("load_data_usage_from_backend end {:?}", after3 - after2);
|
||||
|
||||
let backen_info = store.clone().backend_info().await;
|
||||
let backend_info = store.clone().backend_info().await;
|
||||
|
||||
let after4 = OffsetDateTime::now_utc();
|
||||
|
||||
@@ -272,10 +272,10 @@ pub async fn get_server_info(get_pools: bool) -> InfoMessage {
|
||||
backend_type: rustfs_madmin::BackendType::ErasureType,
|
||||
online_disks: online_disks.sum(),
|
||||
offline_disks: offline_disks.sum(),
|
||||
standard_sc_parity: backen_info.standard_sc_parity,
|
||||
rr_sc_parity: backen_info.rr_sc_parity,
|
||||
total_sets: backen_info.total_sets,
|
||||
drives_per_set: backen_info.drives_per_set,
|
||||
standard_sc_parity: backend_info.standard_sc_parity,
|
||||
rr_sc_parity: backend_info.rr_sc_parity,
|
||||
total_sets: backend_info.total_sets,
|
||||
drives_per_set: backend_info.drives_per_set,
|
||||
};
|
||||
if get_pools {
|
||||
pools = get_pools_info(&all_disks).await.unwrap_or_default();
|
||||
|
||||
@@ -43,15 +43,16 @@ pub async fn create_bitrot_reader(
|
||||
) -> disk::error::Result<Option<BitrotReader<Box<dyn AsyncRead + Send + Sync + Unpin>>>> {
|
||||
// Calculate the total length to read, including the checksum overhead
|
||||
let length = length.div_ceil(shard_size) * checksum_algo.size() + length;
|
||||
|
||||
let offset = offset.div_ceil(shard_size) * checksum_algo.size() + offset;
|
||||
if let Some(data) = inline_data {
|
||||
// Use inline data
|
||||
let rd = Cursor::new(data.to_vec());
|
||||
let mut rd = Cursor::new(data.to_vec());
|
||||
rd.set_position(offset as u64);
|
||||
let reader = BitrotReader::new(Box::new(rd) as Box<dyn AsyncRead + Send + Sync + Unpin>, shard_size, checksum_algo);
|
||||
Ok(Some(reader))
|
||||
} else if let Some(disk) = disk {
|
||||
// Read from disk
|
||||
match disk.read_file_stream(bucket, path, offset, length).await {
|
||||
match disk.read_file_stream(bucket, path, offset, length - offset).await {
|
||||
Ok(rd) => {
|
||||
let reader = BitrotReader::new(rd, shard_size, checksum_algo);
|
||||
Ok(Some(reader))
|
||||
|
||||
@@ -18,7 +18,7 @@ use futures::future::join_all;
|
||||
use rustfs_filemeta::{MetaCacheEntries, MetaCacheEntry, MetacacheReader, is_io_eof};
|
||||
use std::{future::Future, pin::Pin, sync::Arc};
|
||||
use tokio::{spawn, sync::broadcast::Receiver as B_Receiver};
|
||||
use tracing::error;
|
||||
use tracing::{error, warn};
|
||||
|
||||
pub type AgreedFn = Box<dyn Fn(MetaCacheEntry) -> Pin<Box<dyn Future<Output = ()> + Send>> + Send + 'static>;
|
||||
pub type PartialFn =
|
||||
@@ -31,7 +31,7 @@ pub struct ListPathRawOptions {
|
||||
pub fallback_disks: Vec<Option<DiskStore>>,
|
||||
pub bucket: String,
|
||||
pub path: String,
|
||||
pub recursice: bool,
|
||||
pub recursive: bool,
|
||||
pub filter_prefix: Option<String>,
|
||||
pub forward_to: Option<String>,
|
||||
pub min_disks: usize,
|
||||
@@ -52,7 +52,7 @@ impl Clone for ListPathRawOptions {
|
||||
fallback_disks: self.fallback_disks.clone(),
|
||||
bucket: self.bucket.clone(),
|
||||
path: self.path.clone(),
|
||||
recursice: self.recursice,
|
||||
recursive: self.recursive,
|
||||
filter_prefix: self.filter_prefix.clone(),
|
||||
forward_to: self.forward_to.clone(),
|
||||
min_disks: self.min_disks,
|
||||
@@ -85,7 +85,7 @@ pub async fn list_path_raw(mut rx: B_Receiver<bool>, opts: ListPathRawOptions) -
|
||||
let wakl_opts = WalkDirOptions {
|
||||
bucket: opts_clone.bucket.clone(),
|
||||
base_dir: opts_clone.path.clone(),
|
||||
recursive: opts_clone.recursice,
|
||||
recursive: opts_clone.recursive,
|
||||
report_notfound: opts_clone.report_not_found,
|
||||
filter_prefix: opts_clone.filter_prefix.clone(),
|
||||
forward_to: opts_clone.forward_to.clone(),
|
||||
@@ -118,10 +118,14 @@ pub async fn list_path_raw(mut rx: B_Receiver<bool>, opts: ListPathRawOptions) -
|
||||
if let Some(disk) = d.clone() {
|
||||
disk
|
||||
} else {
|
||||
warn!("list_path_raw: fallback disk is none");
|
||||
break;
|
||||
}
|
||||
}
|
||||
None => break,
|
||||
None => {
|
||||
warn!("list_path_raw: fallback disk is none2");
|
||||
break;
|
||||
}
|
||||
};
|
||||
match disk
|
||||
.as_ref()
|
||||
@@ -129,7 +133,7 @@ pub async fn list_path_raw(mut rx: B_Receiver<bool>, opts: ListPathRawOptions) -
|
||||
WalkDirOptions {
|
||||
bucket: opts_clone.bucket.clone(),
|
||||
base_dir: opts_clone.path.clone(),
|
||||
recursive: opts_clone.recursice,
|
||||
recursive: opts_clone.recursive,
|
||||
report_notfound: opts_clone.report_not_found,
|
||||
filter_prefix: opts_clone.filter_prefix.clone(),
|
||||
forward_to: opts_clone.forward_to.clone(),
|
||||
|
||||
@@ -41,7 +41,7 @@
|
||||
// pin_mut!(body);
|
||||
// // 上一次没用完的数据
|
||||
// let mut prev_bytes = Bytes::new();
|
||||
// let mut readed_size = 0;
|
||||
// let mut read_size = 0;
|
||||
|
||||
// loop {
|
||||
// let data: Vec<Bytes> = {
|
||||
@@ -51,9 +51,9 @@
|
||||
// Some(Err(e)) => return Err(e),
|
||||
// Some(Ok((data, remaining_bytes))) => {
|
||||
// // debug!(
|
||||
// // "content_length:{},readed_size:{}, read_data data:{}, remaining_bytes: {} ",
|
||||
// // "content_length:{},read_size:{}, read_data data:{}, remaining_bytes: {} ",
|
||||
// // content_length,
|
||||
// // readed_size,
|
||||
// // read_size,
|
||||
// // data.len(),
|
||||
// // remaining_bytes.len()
|
||||
// // );
|
||||
@@ -65,15 +65,15 @@
|
||||
// };
|
||||
|
||||
// for bytes in data {
|
||||
// readed_size += bytes.len();
|
||||
// // debug!("readed_size {}, content_length {}", readed_size, content_length,);
|
||||
// read_size += bytes.len();
|
||||
// // debug!("read_size {}, content_length {}", read_size, content_length,);
|
||||
// y.yield_ok(bytes).await;
|
||||
// }
|
||||
|
||||
// if readed_size + prev_bytes.len() >= content_length {
|
||||
// if read_size + prev_bytes.len() >= content_length {
|
||||
// // debug!(
|
||||
// // "读完了 readed_size:{} + prev_bytes.len({}) == content_length {}",
|
||||
// // readed_size,
|
||||
// // "读完了 read_size:{} + prev_bytes.len({}) == content_length {}",
|
||||
// // read_size,
|
||||
// // prev_bytes.len(),
|
||||
// // content_length,
|
||||
// // );
|
||||
|
||||
@@ -135,7 +135,7 @@ impl Default for PutObjectOptions {
|
||||
|
||||
#[allow(dead_code)]
|
||||
impl PutObjectOptions {
|
||||
fn set_matche_tag(&mut self, etag: &str) {
|
||||
fn set_match_tag(&mut self, etag: &str) {
|
||||
if etag == "*" {
|
||||
self.custom_header
|
||||
.insert("If-Match", HeaderValue::from_str("*").expect("err"));
|
||||
@@ -145,7 +145,7 @@ impl PutObjectOptions {
|
||||
}
|
||||
}
|
||||
|
||||
fn set_matche_tag_except(&mut self, etag: &str) {
|
||||
fn set_match_tag_except(&mut self, etag: &str) {
|
||||
if etag == "*" {
|
||||
self.custom_header
|
||||
.insert("If-None-Match", HeaderValue::from_str("*").expect("err"));
|
||||
@@ -181,7 +181,7 @@ impl PutObjectOptions {
|
||||
header.insert(
|
||||
"Expires",
|
||||
HeaderValue::from_str(&self.expires.format(ISO8601_DATEFORMAT).unwrap()).expect("err"),
|
||||
); //rustfs invalid heade
|
||||
); //rustfs invalid header
|
||||
}
|
||||
|
||||
if self.mode.as_str() != "" {
|
||||
|
||||
@@ -2422,7 +2422,7 @@ impl ReplicateObjectInfo {
|
||||
// let mut arns = Vec::new();
|
||||
// let mut tgts_map = std::collections::HashSet::new();
|
||||
// for rule in cfg.rules {
|
||||
// if rule.status.as_str() == "Disabe" {
|
||||
// if rule.status.as_str() == "Disable" {
|
||||
// continue;
|
||||
// }
|
||||
|
||||
|
||||
@@ -95,7 +95,7 @@ impl ArnTarget {
|
||||
Self {
|
||||
client: TargetClient {
|
||||
bucket,
|
||||
storage_class: "STANDRD".to_string(),
|
||||
storage_class: "STANDARD".to_string(),
|
||||
disable_proxy: false,
|
||||
health_check_duration: Duration::from_secs(100),
|
||||
endpoint,
|
||||
@@ -361,7 +361,7 @@ impl BucketTargetSys {
|
||||
// // Mocked implementation for obtaining a remote client
|
||||
// let tcli = TargetClient {
|
||||
// bucket: _tgt.target_bucket.clone(),
|
||||
// storage_class: "STANDRD".to_string(),
|
||||
// storage_class: "STANDARD".to_string(),
|
||||
// disable_proxy: false,
|
||||
// health_check_duration: Duration::from_secs(100),
|
||||
// endpoint: _tgt.endpoint.clone(),
|
||||
@@ -379,7 +379,7 @@ impl BucketTargetSys {
|
||||
// // Mocked implementation for obtaining a remote client
|
||||
// let tcli = TargetClient {
|
||||
// bucket: _tgt.target_bucket.clone(),
|
||||
// storage_class: "STANDRD".to_string(),
|
||||
// storage_class: "STANDARD".to_string(),
|
||||
// disable_proxy: false,
|
||||
// health_check_duration: Duration::from_secs(100),
|
||||
// endpoint: _tgt.endpoint.clone(),
|
||||
@@ -403,7 +403,7 @@ impl BucketTargetSys {
|
||||
match store.get_bucket_info(_bucket, &store_api::BucketOptions::default()).await {
|
||||
Ok(info) => {
|
||||
println!("Bucket Info: {info:?}");
|
||||
info.versionning
|
||||
info.versioning
|
||||
}
|
||||
Err(err) => {
|
||||
eprintln!("Error: {err:?}");
|
||||
@@ -431,7 +431,7 @@ impl BucketTargetSys {
|
||||
// {
|
||||
// Ok(info) => {
|
||||
// println!("Bucket Info: {:?}", info);
|
||||
// info.versionning
|
||||
// info.versioning
|
||||
// }
|
||||
// Err(err) => {
|
||||
// eprintln!("Error: {:?}", err);
|
||||
@@ -475,8 +475,7 @@ impl BucketTargetSys {
|
||||
{
|
||||
Ok(info) => {
|
||||
println!("Bucket Info: {info:?}");
|
||||
if !info.versionning {
|
||||
println!("2222222222 {}", info.versionning);
|
||||
if !info.versioning {
|
||||
return Err(SetTargetError::TargetNotVersioned(tgt.target_bucket.to_string()));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -288,6 +288,12 @@ impl From<rmp_serde::encode::Error> for DiskError {
|
||||
}
|
||||
}
|
||||
|
||||
impl From<rmp_serde::decode::Error> for DiskError {
|
||||
fn from(e: rmp_serde::decode::Error) -> Self {
|
||||
DiskError::other(e)
|
||||
}
|
||||
}
|
||||
|
||||
impl From<rmp::encode::ValueWriteError> for DiskError {
|
||||
fn from(e: rmp::encode::ValueWriteError) -> Self {
|
||||
DiskError::other(e)
|
||||
|
||||
@@ -57,8 +57,8 @@ use bytes::Bytes;
|
||||
use path_absolutize::Absolutize;
|
||||
use rustfs_common::defer;
|
||||
use rustfs_filemeta::{
|
||||
Cache, FileInfo, FileInfoOpts, FileMeta, MetaCacheEntry, MetacacheWriter, Opts, RawFileInfo, UpdateFn, get_file_info,
|
||||
read_xl_meta_no_data,
|
||||
Cache, FileInfo, FileInfoOpts, FileMeta, MetaCacheEntry, MetacacheWriter, ObjectPartInfo, Opts, RawFileInfo, UpdateFn,
|
||||
get_file_info, read_xl_meta_no_data,
|
||||
};
|
||||
use rustfs_utils::HashAlgorithm;
|
||||
use rustfs_utils::os::get_info;
|
||||
@@ -563,7 +563,7 @@ impl LocalDisk {
|
||||
}
|
||||
|
||||
async fn read_metadata(&self, file_path: impl AsRef<Path>) -> Result<Vec<u8>> {
|
||||
// TODO: suport timeout
|
||||
// TODO: support timeout
|
||||
let (data, _) = self.read_metadata_with_dmtime(file_path.as_ref()).await?;
|
||||
Ok(data)
|
||||
}
|
||||
@@ -595,7 +595,7 @@ impl LocalDisk {
|
||||
}
|
||||
|
||||
async fn read_all_data(&self, volume: &str, volume_dir: impl AsRef<Path>, file_path: impl AsRef<Path>) -> Result<Vec<u8>> {
|
||||
// TODO: timeout suport
|
||||
// TODO: timeout support
|
||||
let (data, _) = self.read_all_data_with_dmtime(volume, volume_dir, file_path).await?;
|
||||
Ok(data)
|
||||
}
|
||||
@@ -750,7 +750,7 @@ impl LocalDisk {
|
||||
|
||||
let mut f = {
|
||||
if sync {
|
||||
// TODO: suport sync
|
||||
// TODO: support sync
|
||||
self.open_file(file_path, flags, skip_parent).await?
|
||||
} else {
|
||||
self.open_file(file_path, flags, skip_parent).await?
|
||||
@@ -1312,6 +1312,67 @@ impl DiskAPI for LocalDisk {
|
||||
Ok(resp)
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn read_parts(&self, bucket: &str, paths: &[String]) -> Result<Vec<ObjectPartInfo>> {
|
||||
let volume_dir = self.get_bucket_path(bucket)?;
|
||||
|
||||
let mut ret = vec![ObjectPartInfo::default(); paths.len()];
|
||||
|
||||
for (i, path_str) in paths.iter().enumerate() {
|
||||
let path = Path::new(path_str);
|
||||
let file_name = path.file_name().and_then(|v| v.to_str()).unwrap_or_default();
|
||||
let num = file_name
|
||||
.strip_prefix("part.")
|
||||
.and_then(|v| v.strip_suffix(".meta"))
|
||||
.and_then(|v| v.parse::<usize>().ok())
|
||||
.unwrap_or_default();
|
||||
|
||||
if let Err(err) = access(
|
||||
volume_dir
|
||||
.clone()
|
||||
.join(path.parent().unwrap_or(Path::new("")).join(format!("part.{num}"))),
|
||||
)
|
||||
.await
|
||||
{
|
||||
ret[i] = ObjectPartInfo {
|
||||
number: num,
|
||||
error: Some(err.to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
continue;
|
||||
}
|
||||
|
||||
let data = match self
|
||||
.read_all_data(bucket, volume_dir.clone(), volume_dir.clone().join(path))
|
||||
.await
|
||||
{
|
||||
Ok(data) => data,
|
||||
Err(err) => {
|
||||
ret[i] = ObjectPartInfo {
|
||||
number: num,
|
||||
error: Some(err.to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
continue;
|
||||
}
|
||||
};
|
||||
|
||||
match ObjectPartInfo::unmarshal(&data) {
|
||||
Ok(meta) => {
|
||||
ret[i] = meta;
|
||||
}
|
||||
Err(err) => {
|
||||
ret[i] = ObjectPartInfo {
|
||||
number: num,
|
||||
error: Some(err.to_string()),
|
||||
..Default::default()
|
||||
};
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
Ok(ret)
|
||||
}
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn check_parts(&self, volume: &str, path: &str, fi: &FileInfo) -> Result<CheckPartsResp> {
|
||||
let volume_dir = self.get_bucket_path(volume)?;
|
||||
@@ -1550,11 +1611,6 @@ impl DiskAPI for LocalDisk {
|
||||
|
||||
#[tracing::instrument(level = "debug", skip(self))]
|
||||
async fn read_file_stream(&self, volume: &str, path: &str, offset: usize, length: usize) -> Result<FileReader> {
|
||||
// warn!(
|
||||
// "disk read_file_stream: volume: {}, path: {}, offset: {}, length: {}",
|
||||
// volume, path, offset, length
|
||||
// );
|
||||
|
||||
let volume_dir = self.get_bucket_path(volume)?;
|
||||
if !skip_access_checks(volume) {
|
||||
access(&volume_dir)
|
||||
@@ -2280,7 +2336,7 @@ impl DiskAPI for LocalDisk {
|
||||
};
|
||||
done_sz(buf.len() as u64);
|
||||
res.insert("metasize".to_string(), buf.len().to_string());
|
||||
item.transform_meda_dir();
|
||||
item.transform_meta_dir();
|
||||
let meta_cache = MetaCacheEntry {
|
||||
name: item.object_path().to_string_lossy().to_string(),
|
||||
metadata: buf,
|
||||
|
||||
@@ -41,7 +41,7 @@ use endpoint::Endpoint;
|
||||
use error::DiskError;
|
||||
use error::{Error, Result};
|
||||
use local::LocalDisk;
|
||||
use rustfs_filemeta::{FileInfo, RawFileInfo};
|
||||
use rustfs_filemeta::{FileInfo, ObjectPartInfo, RawFileInfo};
|
||||
use rustfs_madmin::info_commands::DiskMetrics;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::{fmt::Debug, path::PathBuf, sync::Arc};
|
||||
@@ -331,6 +331,14 @@ impl DiskAPI for Disk {
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn read_parts(&self, bucket: &str, paths: &[String]) -> Result<Vec<ObjectPartInfo>> {
|
||||
match self {
|
||||
Disk::Local(local_disk) => local_disk.read_parts(bucket, paths).await,
|
||||
Disk::Remote(remote_disk) => remote_disk.read_parts(bucket, paths).await,
|
||||
}
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn rename_part(&self, src_volume: &str, src_path: &str, dst_volume: &str, dst_path: &str, meta: Bytes) -> Result<()> {
|
||||
match self {
|
||||
@@ -513,7 +521,7 @@ pub trait DiskAPI: Debug + Send + Sync + 'static {
|
||||
// CheckParts
|
||||
async fn check_parts(&self, volume: &str, path: &str, fi: &FileInfo) -> Result<CheckPartsResp>;
|
||||
// StatInfoFile
|
||||
// ReadParts
|
||||
async fn read_parts(&self, bucket: &str, paths: &[String]) -> Result<Vec<ObjectPartInfo>>;
|
||||
async fn read_multiple(&self, req: ReadMultipleReq) -> Result<Vec<ReadMultipleResp>>;
|
||||
// CleanAbandonedData
|
||||
async fn write_all(&self, volume: &str, path: &str, data: Bytes) -> Result<()>;
|
||||
|
||||
@@ -308,7 +308,7 @@ impl Erasure {
|
||||
// ec encode, 结果会写进 data_buffer
|
||||
let data_slices: SmallVec<[&mut [u8]; 16]> = data_buffer.chunks_exact_mut(shard_size).collect();
|
||||
|
||||
// partiy 数量大于 0 才 ec
|
||||
// parity 数量大于 0 才 ec
|
||||
if self.parity_shards > 0 {
|
||||
self.encoder.as_ref().unwrap().encode(data_slices).map_err(Error::other)?;
|
||||
}
|
||||
|
||||
@@ -563,12 +563,12 @@ impl CurrentScannerCycle {
|
||||
}
|
||||
}
|
||||
|
||||
// 将 SystemTime 转换为时间戳
|
||||
// Convert `SystemTime` to timestamp
|
||||
fn system_time_to_timestamp(time: &DateTime<Utc>) -> i64 {
|
||||
time.timestamp_micros()
|
||||
}
|
||||
|
||||
// 将时间戳转换为 SystemTime
|
||||
// Convert timestamp to `SystemTime`
|
||||
fn timestamp_to_system_time(timestamp: i64) -> DateTime<Utc> {
|
||||
DateTime::from_timestamp_micros(timestamp).unwrap_or_default()
|
||||
}
|
||||
@@ -593,7 +593,7 @@ pub struct ScannerItem {
|
||||
}
|
||||
|
||||
impl ScannerItem {
|
||||
pub fn transform_meda_dir(&mut self) {
|
||||
pub fn transform_meta_dir(&mut self) {
|
||||
let split = self.prefix.split(SLASH_SEPARATOR).map(PathBuf::from).collect::<Vec<_>>();
|
||||
if split.len() > 1 {
|
||||
self.prefix = path_join(&split[0..split.len() - 1]).to_string_lossy().to_string();
|
||||
@@ -1101,7 +1101,7 @@ impl FolderScanner {
|
||||
// successfully read means we have a valid object.
|
||||
found_objects = true;
|
||||
// Remove filename i.e is the meta file to construct object name
|
||||
item.transform_meda_dir();
|
||||
item.transform_meta_dir();
|
||||
// Object already accounted for, remove from heal map,
|
||||
// simply because getSize() function already heals the
|
||||
// object.
|
||||
@@ -1262,7 +1262,7 @@ impl FolderScanner {
|
||||
disks: self.disks.clone(),
|
||||
bucket: bucket.clone(),
|
||||
path: prefix.clone(),
|
||||
recursice: true,
|
||||
recursive: true,
|
||||
report_not_found: true,
|
||||
min_disks: self.disks_quorum,
|
||||
agreed: Some(Box::new(move |entry: MetaCacheEntry| {
|
||||
|
||||
@@ -1355,7 +1355,7 @@ impl SetDisks {
|
||||
disks: disks.iter().cloned().map(Some).collect(),
|
||||
bucket: bucket_info.name.clone(),
|
||||
path: bucket_info.prefix.clone(),
|
||||
recursice: true,
|
||||
recursive: true,
|
||||
min_disks: listing_quorum,
|
||||
agreed: Some(Box::new(move |entry: MetaCacheEntry| Box::pin(cb1(entry)))),
|
||||
partial: Some(Box::new(move |entries: MetaCacheEntries, _: &[Option<DiskError>]| {
|
||||
|
||||
@@ -1172,7 +1172,7 @@ impl SetDisks {
|
||||
ListPathRawOptions {
|
||||
disks: disks.iter().cloned().map(Some).collect(),
|
||||
bucket: bucket.clone(),
|
||||
recursice: true,
|
||||
recursive: true,
|
||||
min_disks: listing_quorum,
|
||||
agreed: Some(Box::new(move |entry: MetaCacheEntry| {
|
||||
info!("list_objects_to_rebalance: agreed: {:?}", &entry.name);
|
||||
|
||||
@@ -449,7 +449,7 @@ impl PeerS3Client for LocalPeerS3Client {
|
||||
op.as_ref().map(|v| BucketInfo {
|
||||
name: v.name.clone(),
|
||||
created: v.created,
|
||||
versionning: versioned,
|
||||
versioning: versioned,
|
||||
..Default::default()
|
||||
})
|
||||
})
|
||||
|
||||
@@ -22,8 +22,8 @@ use rustfs_protos::{
|
||||
proto_gen::node_service::{
|
||||
CheckPartsRequest, DeletePathsRequest, DeleteRequest, DeleteVersionRequest, DeleteVersionsRequest, DeleteVolumeRequest,
|
||||
DiskInfoRequest, ListDirRequest, ListVolumesRequest, MakeVolumeRequest, MakeVolumesRequest, NsScannerRequest,
|
||||
ReadAllRequest, ReadMultipleRequest, ReadVersionRequest, ReadXlRequest, RenameDataRequest, RenameFileRequest,
|
||||
StatVolumeRequest, UpdateMetadataRequest, VerifyFileRequest, WriteAllRequest, WriteMetadataRequest,
|
||||
ReadAllRequest, ReadMultipleRequest, ReadPartsRequest, ReadVersionRequest, ReadXlRequest, RenameDataRequest,
|
||||
RenameFileRequest, StatVolumeRequest, UpdateMetadataRequest, VerifyFileRequest, WriteAllRequest, WriteMetadataRequest,
|
||||
},
|
||||
};
|
||||
|
||||
@@ -44,7 +44,7 @@ use crate::{
|
||||
heal_commands::{HealScanMode, HealingTracker},
|
||||
},
|
||||
};
|
||||
use rustfs_filemeta::{FileInfo, RawFileInfo};
|
||||
use rustfs_filemeta::{FileInfo, ObjectPartInfo, RawFileInfo};
|
||||
use rustfs_protos::proto_gen::node_service::RenamePartRequest;
|
||||
use rustfs_rio::{HttpReader, HttpWriter};
|
||||
use tokio::{
|
||||
@@ -790,6 +790,27 @@ impl DiskAPI for RemoteDisk {
|
||||
Ok(check_parts_resp)
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn read_parts(&self, bucket: &str, paths: &[String]) -> Result<Vec<ObjectPartInfo>> {
|
||||
let mut client = node_service_time_out_client(&self.addr)
|
||||
.await
|
||||
.map_err(|err| Error::other(format!("can not get client, err: {err}")))?;
|
||||
let request = Request::new(ReadPartsRequest {
|
||||
disk: self.endpoint.to_string(),
|
||||
bucket: bucket.to_string(),
|
||||
paths: paths.to_vec(),
|
||||
});
|
||||
|
||||
let response = client.read_parts(request).await?.into_inner();
|
||||
if !response.success {
|
||||
return Err(response.error.unwrap_or_default().into());
|
||||
}
|
||||
|
||||
let read_parts_resp = rmp_serde::from_slice::<Vec<ObjectPartInfo>>(&response.object_part_infos)?;
|
||||
|
||||
Ok(read_parts_resp)
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn check_parts(&self, volume: &str, path: &str, fi: &FileInfo) -> Result<CheckPartsResp> {
|
||||
info!("check_parts");
|
||||
|
||||
@@ -404,7 +404,42 @@ impl Node for NodeService {
|
||||
}))
|
||||
}
|
||||
}
|
||||
async fn read_parts(&self, request: Request<ReadPartsRequest>) -> Result<Response<ReadPartsResponse>, Status> {
|
||||
let request = request.into_inner();
|
||||
if let Some(disk) = self.find_disk(&request.disk).await {
|
||||
match disk.read_parts(&request.bucket, &request.paths).await {
|
||||
Ok(data) => {
|
||||
let data = match rmp_serde::to_vec(&data) {
|
||||
Ok(data) => data,
|
||||
Err(err) => {
|
||||
return Ok(tonic::Response::new(ReadPartsResponse {
|
||||
success: false,
|
||||
object_part_infos: Bytes::new(),
|
||||
error: Some(DiskError::other(format!("encode data failed: {err}")).into()),
|
||||
}));
|
||||
}
|
||||
};
|
||||
Ok(tonic::Response::new(ReadPartsResponse {
|
||||
success: true,
|
||||
object_part_infos: Bytes::copy_from_slice(&data),
|
||||
error: None,
|
||||
}))
|
||||
}
|
||||
|
||||
Err(err) => Ok(tonic::Response::new(ReadPartsResponse {
|
||||
success: false,
|
||||
object_part_infos: Bytes::new(),
|
||||
error: Some(err.into()),
|
||||
})),
|
||||
}
|
||||
} else {
|
||||
Ok(tonic::Response::new(ReadPartsResponse {
|
||||
success: false,
|
||||
object_part_infos: Bytes::new(),
|
||||
error: Some(DiskError::other("can not find disk".to_string()).into()),
|
||||
}))
|
||||
}
|
||||
}
|
||||
async fn check_parts(&self, request: Request<CheckPartsRequest>) -> Result<Response<CheckPartsResponse>, Status> {
|
||||
let request = request.into_inner();
|
||||
if let Some(disk) = self.find_disk(&request.disk).await {
|
||||
|
||||
@@ -24,13 +24,13 @@ use crate::disk::{
|
||||
};
|
||||
use crate::erasure_coding;
|
||||
use crate::erasure_coding::bitrot_verify;
|
||||
use crate::error::ObjectApiError;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::error::{ObjectApiError, is_err_object_not_found};
|
||||
use crate::global::GLOBAL_MRFState;
|
||||
use crate::global::{GLOBAL_LocalNodeName, GLOBAL_TierConfigMgr};
|
||||
use crate::heal::data_usage_cache::DataUsageCache;
|
||||
use crate::heal::heal_ops::{HealEntryFn, HealSequence};
|
||||
use crate::store_api::ObjectToDelete;
|
||||
use crate::store_api::{ListPartsInfo, ObjectToDelete};
|
||||
use crate::{
|
||||
bucket::lifecycle::bucket_lifecycle_ops::{gen_transition_objname, get_transitioned_object_reader, put_restore_opts},
|
||||
cache_value::metacache_set::{ListPathRawOptions, list_path_raw},
|
||||
@@ -119,6 +119,7 @@ use tracing::{debug, info, warn};
|
||||
use uuid::Uuid;
|
||||
|
||||
pub const DEFAULT_READ_BUFFER_SIZE: usize = 1024 * 1024;
|
||||
pub const MAX_PARTS_COUNT: usize = 10000;
|
||||
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SetDisks {
|
||||
@@ -316,6 +317,9 @@ impl SetDisks {
|
||||
.filter(|v| v.as_ref().is_some_and(|d| d.is_local()))
|
||||
.collect()
|
||||
}
|
||||
fn default_read_quorum(&self) -> usize {
|
||||
self.set_drive_count - self.default_parity_count
|
||||
}
|
||||
fn default_write_quorum(&self) -> usize {
|
||||
let mut data_count = self.set_drive_count - self.default_parity_count;
|
||||
if data_count == self.default_parity_count {
|
||||
@@ -550,6 +554,183 @@ impl SetDisks {
|
||||
}
|
||||
}
|
||||
|
||||
async fn read_parts(
|
||||
disks: &[Option<DiskStore>],
|
||||
bucket: &str,
|
||||
part_meta_paths: &[String],
|
||||
part_numbers: &[usize],
|
||||
read_quorum: usize,
|
||||
) -> disk::error::Result<Vec<ObjectPartInfo>> {
|
||||
let mut futures = Vec::with_capacity(disks.len());
|
||||
for (i, disk) in disks.iter().enumerate() {
|
||||
futures.push(async move {
|
||||
if let Some(disk) = disk {
|
||||
disk.read_parts(bucket, part_meta_paths).await
|
||||
} else {
|
||||
Err(DiskError::DiskNotFound)
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
let mut errs = Vec::with_capacity(disks.len());
|
||||
let mut object_parts = Vec::with_capacity(disks.len());
|
||||
|
||||
let results = join_all(futures).await;
|
||||
for result in results {
|
||||
match result {
|
||||
Ok(res) => {
|
||||
errs.push(None);
|
||||
object_parts.push(res);
|
||||
}
|
||||
Err(e) => {
|
||||
errs.push(Some(e));
|
||||
object_parts.push(vec![]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(err) = reduce_read_quorum_errs(&errs, OBJECT_OP_IGNORED_ERRS, read_quorum) {
|
||||
return Err(err);
|
||||
}
|
||||
|
||||
let mut ret = vec![ObjectPartInfo::default(); part_meta_paths.len()];
|
||||
|
||||
for (part_idx, part_info) in part_meta_paths.iter().enumerate() {
|
||||
let mut part_meta_quorum = HashMap::new();
|
||||
let mut part_infos = Vec::new();
|
||||
for (j, parts) in object_parts.iter().enumerate() {
|
||||
if parts.len() != part_meta_paths.len() {
|
||||
*part_meta_quorum.entry(part_info.clone()).or_insert(0) += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
if !parts[part_idx].etag.is_empty() {
|
||||
*part_meta_quorum.entry(parts[part_idx].etag.clone()).or_insert(0) += 1;
|
||||
part_infos.push(parts[part_idx].clone());
|
||||
continue;
|
||||
}
|
||||
|
||||
*part_meta_quorum.entry(part_info.clone()).or_insert(0) += 1;
|
||||
}
|
||||
|
||||
let mut max_quorum = 0;
|
||||
let mut max_etag = None;
|
||||
let mut max_part_meta = None;
|
||||
for (etag, quorum) in part_meta_quorum.iter() {
|
||||
if quorum > &max_quorum {
|
||||
max_quorum = *quorum;
|
||||
max_etag = Some(etag);
|
||||
max_part_meta = Some(etag);
|
||||
}
|
||||
}
|
||||
|
||||
let mut found = None;
|
||||
for info in part_infos.iter() {
|
||||
if let Some(etag) = max_etag
|
||||
&& info.etag == *etag
|
||||
{
|
||||
found = Some(info.clone());
|
||||
break;
|
||||
}
|
||||
|
||||
if let Some(part_meta) = max_part_meta
|
||||
&& info.etag.is_empty()
|
||||
&& part_meta.ends_with(format!("part.{0}.meta", info.number).as_str())
|
||||
{
|
||||
found = Some(info.clone());
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
if let (Some(found), Some(max_etag)) = (found, max_etag)
|
||||
&& !found.etag.is_empty()
|
||||
&& part_meta_quorum.get(max_etag).unwrap_or(&0) >= &read_quorum
|
||||
{
|
||||
ret[part_idx] = found;
|
||||
} else {
|
||||
ret[part_idx] = ObjectPartInfo {
|
||||
number: part_numbers[part_idx],
|
||||
error: Some(format!("part.{} not found", part_numbers[part_idx])),
|
||||
..Default::default()
|
||||
};
|
||||
}
|
||||
}
|
||||
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
async fn list_parts(disks: &[Option<DiskStore>], part_path: &str, read_quorum: usize) -> disk::error::Result<Vec<usize>> {
|
||||
let mut futures = Vec::with_capacity(disks.len());
|
||||
for (i, disk) in disks.iter().enumerate() {
|
||||
futures.push(async move {
|
||||
if let Some(disk) = disk {
|
||||
disk.list_dir(RUSTFS_META_MULTIPART_BUCKET, RUSTFS_META_MULTIPART_BUCKET, part_path, -1)
|
||||
.await
|
||||
} else {
|
||||
Err(DiskError::DiskNotFound)
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
let mut errs = Vec::with_capacity(disks.len());
|
||||
let mut object_parts = Vec::with_capacity(disks.len());
|
||||
|
||||
let results = join_all(futures).await;
|
||||
for result in results {
|
||||
match result {
|
||||
Ok(res) => {
|
||||
errs.push(None);
|
||||
object_parts.push(res);
|
||||
}
|
||||
Err(e) => {
|
||||
errs.push(Some(e));
|
||||
object_parts.push(vec![]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if let Some(err) = reduce_read_quorum_errs(&errs, OBJECT_OP_IGNORED_ERRS, read_quorum) {
|
||||
return Err(err);
|
||||
}
|
||||
|
||||
let mut part_quorum_map: HashMap<usize, usize> = HashMap::new();
|
||||
|
||||
for drive_parts in object_parts {
|
||||
let mut parts_with_meta_count: HashMap<usize, usize> = HashMap::new();
|
||||
|
||||
// part files can be either part.N or part.N.meta
|
||||
for part_path in drive_parts {
|
||||
if let Some(num_str) = part_path.strip_prefix("part.") {
|
||||
if let Some(meta_idx) = num_str.find(".meta") {
|
||||
if let Ok(part_num) = num_str[..meta_idx].parse::<usize>() {
|
||||
*parts_with_meta_count.entry(part_num).or_insert(0) += 1;
|
||||
}
|
||||
} else if let Ok(part_num) = num_str.parse::<usize>() {
|
||||
*parts_with_meta_count.entry(part_num).or_insert(0) += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Include only part.N.meta files with corresponding part.N
|
||||
for (&part_num, &cnt) in &parts_with_meta_count {
|
||||
if cnt >= 2 {
|
||||
*part_quorum_map.entry(part_num).or_insert(0) += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
let mut part_numbers = Vec::with_capacity(part_quorum_map.len());
|
||||
for (part_num, count) in part_quorum_map {
|
||||
if count >= read_quorum {
|
||||
part_numbers.push(part_num);
|
||||
}
|
||||
}
|
||||
|
||||
part_numbers.sort();
|
||||
|
||||
Ok(part_numbers)
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(disks, meta))]
|
||||
async fn rename_part(
|
||||
disks: &[Option<DiskStore>],
|
||||
@@ -1052,7 +1233,7 @@ impl SetDisks {
|
||||
return Err(DiskError::ErasureReadQuorum);
|
||||
}
|
||||
|
||||
let mut meta_hashs = vec![None; metas.len()];
|
||||
let mut meta_hashes = vec![None; metas.len()];
|
||||
let mut hasher = Sha256::new();
|
||||
|
||||
for (i, meta) in metas.iter().enumerate() {
|
||||
@@ -1084,7 +1265,7 @@ impl SetDisks {
|
||||
|
||||
hasher.flush()?;
|
||||
|
||||
meta_hashs[i] = Some(hex(hasher.clone().finalize().as_slice()));
|
||||
meta_hashes[i] = Some(hex(hasher.clone().finalize().as_slice()));
|
||||
|
||||
hasher.reset();
|
||||
}
|
||||
@@ -1092,7 +1273,7 @@ impl SetDisks {
|
||||
|
||||
let mut count_map = HashMap::new();
|
||||
|
||||
for hash in meta_hashs.iter().flatten() {
|
||||
for hash in meta_hashes.iter().flatten() {
|
||||
*count_map.entry(hash).or_insert(0) += 1;
|
||||
}
|
||||
|
||||
@@ -1116,7 +1297,7 @@ impl SetDisks {
|
||||
|
||||
let mut valid_obj_map = HashMap::new();
|
||||
|
||||
for (i, op_hash) in meta_hashs.iter().enumerate() {
|
||||
for (i, op_hash) in meta_hashes.iter().enumerate() {
|
||||
if let Some(hash) = op_hash {
|
||||
if let Some(max_hash) = max_val {
|
||||
if hash == max_hash {
|
||||
@@ -1568,8 +1749,8 @@ impl SetDisks {
|
||||
|
||||
// for res in results {
|
||||
// match res {
|
||||
// Ok(entrys) => {
|
||||
// ress.push(Some(entrys));
|
||||
// Ok(entries) => {
|
||||
// ress.push(Some(entries));
|
||||
// errs.push(None);
|
||||
// }
|
||||
// Err(e) => {
|
||||
@@ -1927,21 +2108,23 @@ impl SetDisks {
|
||||
|
||||
let erasure = erasure_coding::Erasure::new(fi.erasure.data_blocks, fi.erasure.parity_blocks, fi.erasure.block_size);
|
||||
|
||||
let mut total_readed = 0;
|
||||
let mut total_read = 0;
|
||||
for i in part_index..=last_part_index {
|
||||
if total_readed == length {
|
||||
if total_read == length {
|
||||
break;
|
||||
}
|
||||
|
||||
let part_number = fi.parts[i].number;
|
||||
let part_size = fi.parts[i].size;
|
||||
let mut part_length = part_size - part_offset;
|
||||
if part_length > (length - total_readed) {
|
||||
part_length = length - total_readed
|
||||
if part_length > (length - total_read) {
|
||||
part_length = length - total_read
|
||||
}
|
||||
|
||||
let till_offset = erasure.shard_file_offset(part_offset, part_length, part_size);
|
||||
|
||||
let read_offset = (part_offset / erasure.block_size) * erasure.shard_size();
|
||||
|
||||
let mut readers = Vec::with_capacity(disks.len());
|
||||
let mut errors = Vec::with_capacity(disks.len());
|
||||
for (idx, disk_op) in disks.iter().enumerate() {
|
||||
@@ -1950,7 +2133,7 @@ impl SetDisks {
|
||||
disk_op.as_ref(),
|
||||
bucket,
|
||||
&format!("{}/{}/part.{}", object, files[idx].data_dir.unwrap_or_default(), part_number),
|
||||
part_offset,
|
||||
read_offset,
|
||||
till_offset,
|
||||
erasure.shard_size(),
|
||||
HashAlgorithm::HighwayHash256,
|
||||
@@ -2020,7 +2203,7 @@ impl SetDisks {
|
||||
|
||||
// debug!("ec decode {} written size {}", part_number, n);
|
||||
|
||||
total_readed += part_length;
|
||||
total_read += part_length;
|
||||
part_offset = 0;
|
||||
}
|
||||
|
||||
@@ -2123,7 +2306,7 @@ impl SetDisks {
|
||||
Some(filter_prefix.to_string())
|
||||
}
|
||||
},
|
||||
recursice: true,
|
||||
recursive: true,
|
||||
forward_to: None,
|
||||
min_disks: 1,
|
||||
report_not_found: false,
|
||||
@@ -2298,12 +2481,12 @@ impl SetDisks {
|
||||
};
|
||||
|
||||
match Self::pick_valid_fileinfo(&parts_metadata, mod_time, etag, read_quorum as usize) {
|
||||
Ok(lastest_meta) => {
|
||||
Ok(latest_meta) => {
|
||||
let (available_disks, data_errs_by_disk, data_errs_by_part) = disks_with_all_parts(
|
||||
&online_disks,
|
||||
&mut parts_metadata,
|
||||
&errs,
|
||||
&lastest_meta,
|
||||
&latest_meta,
|
||||
bucket,
|
||||
object,
|
||||
opts.scan_mode,
|
||||
@@ -2311,22 +2494,22 @@ impl SetDisks {
|
||||
.await?;
|
||||
|
||||
// info!(
|
||||
// "disks_with_all_parts: got available_disks: {:?}, data_errs_by_disk: {:?}, data_errs_by_part: {:?}, lastest_meta: {:?}",
|
||||
// available_disks, data_errs_by_disk, data_errs_by_part, lastest_meta
|
||||
// "disks_with_all_parts: got available_disks: {:?}, data_errs_by_disk: {:?}, data_errs_by_part: {:?}, latest_meta: {:?}",
|
||||
// available_disks, data_errs_by_disk, data_errs_by_part, latest_meta
|
||||
// );
|
||||
let erasure = if !lastest_meta.deleted && !lastest_meta.is_remote() {
|
||||
let erasure = if !latest_meta.deleted && !latest_meta.is_remote() {
|
||||
// Initialize erasure coding
|
||||
erasure_coding::Erasure::new(
|
||||
lastest_meta.erasure.data_blocks,
|
||||
lastest_meta.erasure.parity_blocks,
|
||||
lastest_meta.erasure.block_size,
|
||||
latest_meta.erasure.data_blocks,
|
||||
latest_meta.erasure.parity_blocks,
|
||||
latest_meta.erasure.block_size,
|
||||
)
|
||||
} else {
|
||||
erasure_coding::Erasure::default()
|
||||
};
|
||||
|
||||
result.object_size =
|
||||
ObjectInfo::from_file_info(&lastest_meta, bucket, object, true).get_actual_size()? as usize;
|
||||
ObjectInfo::from_file_info(&latest_meta, bucket, object, true).get_actual_size()? as usize;
|
||||
// Loop to find number of disks with valid data, per-drive
|
||||
// data state and a list of outdated disks on which data needs
|
||||
// to be healed.
|
||||
@@ -2334,15 +2517,15 @@ impl SetDisks {
|
||||
let mut disks_to_heal_count = 0;
|
||||
|
||||
// info!(
|
||||
// "errs: {:?}, data_errs_by_disk: {:?}, lastest_meta: {:?}",
|
||||
// errs, data_errs_by_disk, lastest_meta
|
||||
// "errs: {:?}, data_errs_by_disk: {:?}, latest_meta: {:?}",
|
||||
// errs, data_errs_by_disk, latest_meta
|
||||
// );
|
||||
for index in 0..available_disks.len() {
|
||||
let (yes, reason) = should_heal_object_on_disk(
|
||||
&errs[index],
|
||||
&data_errs_by_disk[&index],
|
||||
&parts_metadata[index],
|
||||
&lastest_meta,
|
||||
&latest_meta,
|
||||
);
|
||||
if yes {
|
||||
outdate_disks[index] = disks[index].clone();
|
||||
@@ -2400,10 +2583,10 @@ impl SetDisks {
|
||||
return Ok((result, None));
|
||||
}
|
||||
|
||||
if !lastest_meta.deleted && disks_to_heal_count > lastest_meta.erasure.parity_blocks {
|
||||
if !latest_meta.deleted && disks_to_heal_count > latest_meta.erasure.parity_blocks {
|
||||
error!(
|
||||
"file({} : {}) part corrupt too much, can not to fix, disks_to_heal_count: {}, parity_blocks: {}",
|
||||
bucket, object, disks_to_heal_count, lastest_meta.erasure.parity_blocks
|
||||
bucket, object, disks_to_heal_count, latest_meta.erasure.parity_blocks
|
||||
);
|
||||
|
||||
// Allow for dangling deletes, on versions that have DataDir missing etc.
|
||||
@@ -2450,39 +2633,37 @@ impl SetDisks {
|
||||
};
|
||||
}
|
||||
|
||||
if !lastest_meta.deleted && lastest_meta.erasure.distribution.len() != available_disks.len() {
|
||||
if !latest_meta.deleted && latest_meta.erasure.distribution.len() != available_disks.len() {
|
||||
let err_str = format!(
|
||||
"unexpected file distribution ({:?}) from available disks ({:?}), looks like backend disks have been manually modified refusing to heal {}/{}({})",
|
||||
lastest_meta.erasure.distribution, available_disks, bucket, object, version_id
|
||||
latest_meta.erasure.distribution, available_disks, bucket, object, version_id
|
||||
);
|
||||
warn!(err_str);
|
||||
let err = DiskError::other(err_str);
|
||||
return Ok((
|
||||
self.default_heal_result(lastest_meta, &errs, bucket, object, version_id)
|
||||
.await,
|
||||
self.default_heal_result(latest_meta, &errs, bucket, object, version_id).await,
|
||||
Some(err),
|
||||
));
|
||||
}
|
||||
|
||||
let latest_disks = Self::shuffle_disks(&available_disks, &lastest_meta.erasure.distribution);
|
||||
if !lastest_meta.deleted && lastest_meta.erasure.distribution.len() != outdate_disks.len() {
|
||||
let latest_disks = Self::shuffle_disks(&available_disks, &latest_meta.erasure.distribution);
|
||||
if !latest_meta.deleted && latest_meta.erasure.distribution.len() != outdate_disks.len() {
|
||||
let err_str = format!(
|
||||
"unexpected file distribution ({:?}) from outdated disks ({:?}), looks like backend disks have been manually modified refusing to heal {}/{}({})",
|
||||
lastest_meta.erasure.distribution, outdate_disks, bucket, object, version_id
|
||||
latest_meta.erasure.distribution, outdate_disks, bucket, object, version_id
|
||||
);
|
||||
warn!(err_str);
|
||||
let err = DiskError::other(err_str);
|
||||
return Ok((
|
||||
self.default_heal_result(lastest_meta, &errs, bucket, object, version_id)
|
||||
.await,
|
||||
self.default_heal_result(latest_meta, &errs, bucket, object, version_id).await,
|
||||
Some(err),
|
||||
));
|
||||
}
|
||||
|
||||
if !lastest_meta.deleted && lastest_meta.erasure.distribution.len() != parts_metadata.len() {
|
||||
if !latest_meta.deleted && latest_meta.erasure.distribution.len() != parts_metadata.len() {
|
||||
let err_str = format!(
|
||||
"unexpected file distribution ({:?}) from metadata entries ({:?}), looks like backend disks have been manually modified refusing to heal {}/{}({})",
|
||||
lastest_meta.erasure.distribution,
|
||||
latest_meta.erasure.distribution,
|
||||
parts_metadata.len(),
|
||||
bucket,
|
||||
object,
|
||||
@@ -2491,15 +2672,13 @@ impl SetDisks {
|
||||
warn!(err_str);
|
||||
let err = DiskError::other(err_str);
|
||||
return Ok((
|
||||
self.default_heal_result(lastest_meta, &errs, bucket, object, version_id)
|
||||
.await,
|
||||
self.default_heal_result(latest_meta, &errs, bucket, object, version_id).await,
|
||||
Some(err),
|
||||
));
|
||||
}
|
||||
|
||||
let out_dated_disks = Self::shuffle_disks(&outdate_disks, &lastest_meta.erasure.distribution);
|
||||
let mut parts_metadata =
|
||||
Self::shuffle_parts_metadata(&parts_metadata, &lastest_meta.erasure.distribution);
|
||||
let out_dated_disks = Self::shuffle_disks(&outdate_disks, &latest_meta.erasure.distribution);
|
||||
let mut parts_metadata = Self::shuffle_parts_metadata(&parts_metadata, &latest_meta.erasure.distribution);
|
||||
let mut copy_parts_metadata = vec![None; parts_metadata.len()];
|
||||
for (index, disk) in latest_disks.iter().enumerate() {
|
||||
if disk.is_some() {
|
||||
@@ -2520,18 +2699,18 @@ impl SetDisks {
|
||||
if disk.is_some() {
|
||||
// Make sure to write the FileInfo information
|
||||
// that is expected to be in quorum.
|
||||
parts_metadata[index] = clean_file_info(&lastest_meta);
|
||||
parts_metadata[index] = clean_file_info(&latest_meta);
|
||||
}
|
||||
}
|
||||
|
||||
// We write at temporary location and then rename to final location.
|
||||
let tmp_id = Uuid::new_v4().to_string();
|
||||
let src_data_dir = lastest_meta.data_dir.unwrap().to_string();
|
||||
let dst_data_dir = lastest_meta.data_dir.unwrap();
|
||||
let src_data_dir = latest_meta.data_dir.unwrap().to_string();
|
||||
let dst_data_dir = latest_meta.data_dir.unwrap();
|
||||
|
||||
if !lastest_meta.deleted && !lastest_meta.is_remote() {
|
||||
let erasure_info = lastest_meta.erasure;
|
||||
for part in lastest_meta.parts.iter() {
|
||||
if !latest_meta.deleted && !latest_meta.is_remote() {
|
||||
let erasure_info = latest_meta.erasure;
|
||||
for part in latest_meta.parts.iter() {
|
||||
let till_offset = erasure.shard_file_offset(0, part.size, part.size);
|
||||
let checksum_algo = erasure_info.get_checksum_info(part.number).algorithm;
|
||||
let mut readers = Vec::with_capacity(latest_disks.len());
|
||||
@@ -2576,7 +2755,7 @@ impl SetDisks {
|
||||
|
||||
let is_inline_buffer = {
|
||||
if let Some(sc) = GLOBAL_StorageClass.get() {
|
||||
sc.should_inline(erasure.shard_file_size(lastest_meta.size), false)
|
||||
sc.should_inline(erasure.shard_file_size(latest_meta.size), false)
|
||||
} else {
|
||||
false
|
||||
}
|
||||
@@ -3657,7 +3836,7 @@ impl SetDisks {
|
||||
disks,
|
||||
fallback_disks,
|
||||
bucket: bucket.clone(),
|
||||
recursice: true,
|
||||
recursive: true,
|
||||
forward_to,
|
||||
min_disks: 1,
|
||||
report_not_found: false,
|
||||
@@ -4134,7 +4313,7 @@ impl ObjectIO for SetDisks {
|
||||
|
||||
fi.is_latest = true;
|
||||
|
||||
// TODO: version suport
|
||||
// TODO: version support
|
||||
Ok(ObjectInfo::from_file_info(&fi, bucket, object, opts.versioned || opts.version_suspended))
|
||||
}
|
||||
}
|
||||
@@ -4884,7 +5063,7 @@ impl StorageAPI for SetDisks {
|
||||
) -> Result<PartInfo> {
|
||||
let upload_id_path = Self::get_upload_id_dir(bucket, object, upload_id);
|
||||
|
||||
let (mut fi, _) = self.check_upload_id_exists(bucket, object, upload_id, true).await?;
|
||||
let (fi, _) = self.check_upload_id_exists(bucket, object, upload_id, true).await?;
|
||||
|
||||
let write_quorum = fi.write_quorum(self.default_write_quorum());
|
||||
|
||||
@@ -5037,9 +5216,9 @@ impl StorageAPI for SetDisks {
|
||||
|
||||
// debug!("put_object_part part_info {:?}", part_info);
|
||||
|
||||
fi.parts = vec![part_info];
|
||||
// fi.parts = vec![part_info.clone()];
|
||||
|
||||
let fi_buff = fi.marshal_msg()?;
|
||||
let part_info_buff = part_info.marshal_msg()?;
|
||||
|
||||
drop(writers); // drop writers to close all files
|
||||
|
||||
@@ -5050,7 +5229,7 @@ impl StorageAPI for SetDisks {
|
||||
&tmp_part_path,
|
||||
RUSTFS_META_MULTIPART_BUCKET,
|
||||
&part_path,
|
||||
fi_buff.into(),
|
||||
part_info_buff.into(),
|
||||
write_quorum,
|
||||
)
|
||||
.await?;
|
||||
@@ -5068,6 +5247,123 @@ impl StorageAPI for SetDisks {
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn list_object_parts(
|
||||
&self,
|
||||
bucket: &str,
|
||||
object: &str,
|
||||
upload_id: &str,
|
||||
part_number_marker: Option<usize>,
|
||||
mut max_parts: usize,
|
||||
opts: &ObjectOptions,
|
||||
) -> Result<ListPartsInfo> {
|
||||
let (fi, _) = self.check_upload_id_exists(bucket, object, upload_id, false).await?;
|
||||
|
||||
let upload_id_path = Self::get_upload_id_dir(bucket, object, upload_id);
|
||||
|
||||
if max_parts > MAX_PARTS_COUNT {
|
||||
max_parts = MAX_PARTS_COUNT;
|
||||
}
|
||||
|
||||
let part_number_marker = part_number_marker.unwrap_or_default();
|
||||
|
||||
let mut ret = ListPartsInfo {
|
||||
bucket: bucket.to_owned(),
|
||||
object: object.to_owned(),
|
||||
upload_id: upload_id.to_owned(),
|
||||
max_parts,
|
||||
part_number_marker,
|
||||
user_defined: fi.metadata.clone(),
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
if max_parts == 0 {
|
||||
return Ok(ret);
|
||||
}
|
||||
|
||||
let online_disks = self.get_disks_internal().await;
|
||||
|
||||
let read_quorum = fi.read_quorum(self.default_read_quorum());
|
||||
|
||||
let part_path = format!(
|
||||
"{}{}",
|
||||
path_join_buf(&[
|
||||
&upload_id_path,
|
||||
fi.data_dir.map(|v| v.to_string()).unwrap_or_default().as_str(),
|
||||
]),
|
||||
SLASH_SEPARATOR
|
||||
);
|
||||
|
||||
let mut part_numbers = match Self::list_parts(&online_disks, &part_path, read_quorum).await {
|
||||
Ok(parts) => parts,
|
||||
Err(err) => {
|
||||
if err == DiskError::FileNotFound {
|
||||
return Ok(ret);
|
||||
}
|
||||
|
||||
return Err(to_object_err(err.into(), vec![bucket, object]));
|
||||
}
|
||||
};
|
||||
|
||||
if part_numbers.is_empty() {
|
||||
return Ok(ret);
|
||||
}
|
||||
let start_op = part_numbers.iter().find(|&&v| v != 0 && v == part_number_marker);
|
||||
if part_number_marker > 0 && start_op.is_none() {
|
||||
return Ok(ret);
|
||||
}
|
||||
|
||||
if let Some(start) = start_op {
|
||||
if start + 1 > part_numbers.len() {
|
||||
return Ok(ret);
|
||||
}
|
||||
|
||||
part_numbers = part_numbers[start + 1..].to_vec();
|
||||
}
|
||||
|
||||
let mut parts = Vec::with_capacity(part_numbers.len());
|
||||
|
||||
let part_meta_paths = part_numbers
|
||||
.iter()
|
||||
.map(|v| format!("{part_path}part.{v}.meta"))
|
||||
.collect::<Vec<String>>();
|
||||
|
||||
let object_parts =
|
||||
Self::read_parts(&online_disks, RUSTFS_META_MULTIPART_BUCKET, &part_meta_paths, &part_numbers, read_quorum)
|
||||
.await
|
||||
.map_err(|e| to_object_err(e.into(), vec![bucket, object, upload_id]))?;
|
||||
|
||||
let mut count = max_parts;
|
||||
|
||||
for (i, part) in object_parts.iter().enumerate() {
|
||||
if let Some(err) = &part.error {
|
||||
warn!("list_object_parts part error: {:?}", &err);
|
||||
}
|
||||
|
||||
parts.push(PartInfo {
|
||||
etag: Some(part.etag.clone()),
|
||||
part_num: part.number,
|
||||
last_mod: part.mod_time,
|
||||
size: part.size,
|
||||
actual_size: part.actual_size,
|
||||
});
|
||||
|
||||
count -= 1;
|
||||
if count == 0 {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
ret.parts = parts;
|
||||
|
||||
if object_parts.len() > ret.parts.len() {
|
||||
ret.is_truncated = true;
|
||||
ret.next_part_number_marker = ret.parts.last().map(|v| v.part_num).unwrap_or_default();
|
||||
}
|
||||
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn list_multipart_uploads(
|
||||
&self,
|
||||
@@ -5143,8 +5439,8 @@ impl StorageAPI for SetDisks {
|
||||
|
||||
let splits: Vec<&str> = upload_id.split("x").collect();
|
||||
if splits.len() == 2 {
|
||||
if let Ok(unix) = splits[1].parse::<i64>() {
|
||||
OffsetDateTime::from_unix_timestamp(unix)?
|
||||
if let Ok(unix) = splits[1].parse::<i128>() {
|
||||
OffsetDateTime::from_unix_timestamp_nanos(unix)?
|
||||
} else {
|
||||
now
|
||||
}
|
||||
@@ -5363,49 +5659,31 @@ impl StorageAPI for SetDisks {
|
||||
|
||||
let part_path = format!("{}/{}/", upload_id_path, fi.data_dir.unwrap_or(Uuid::nil()));
|
||||
|
||||
let files: Vec<String> = uploaded_parts.iter().map(|v| format!("part.{}.meta", v.part_num)).collect();
|
||||
let part_meta_paths = uploaded_parts
|
||||
.iter()
|
||||
.map(|v| format!("{part_path}part.{0}.meta", v.part_num))
|
||||
.collect::<Vec<String>>();
|
||||
|
||||
// readMultipleFiles
|
||||
let part_numbers = uploaded_parts.iter().map(|v| v.part_num).collect::<Vec<usize>>();
|
||||
|
||||
let req = ReadMultipleReq {
|
||||
bucket: RUSTFS_META_MULTIPART_BUCKET.to_string(),
|
||||
prefix: part_path,
|
||||
files,
|
||||
max_size: 1 << 20,
|
||||
metadata_only: true,
|
||||
abort404: true,
|
||||
max_results: 0,
|
||||
};
|
||||
let object_parts =
|
||||
Self::read_parts(&disks, RUSTFS_META_MULTIPART_BUCKET, &part_meta_paths, &part_numbers, write_quorum).await?;
|
||||
|
||||
let part_files_resp = Self::read_multiple_files(&disks, req, write_quorum).await;
|
||||
|
||||
if part_files_resp.len() != uploaded_parts.len() {
|
||||
if object_parts.len() != uploaded_parts.len() {
|
||||
return Err(Error::other("part result number err"));
|
||||
}
|
||||
|
||||
for (i, res) in part_files_resp.iter().enumerate() {
|
||||
let part_id = uploaded_parts[i].part_num;
|
||||
if !res.error.is_empty() || !res.exists {
|
||||
error!("complete_multipart_upload part_id err {:?}, exists={}", res, res.exists);
|
||||
return Err(Error::InvalidPart(part_id, bucket.to_owned(), object.to_owned()));
|
||||
for (i, part) in object_parts.iter().enumerate() {
|
||||
if let Some(err) = &part.error {
|
||||
error!("complete_multipart_upload part error: {:?}", &err);
|
||||
}
|
||||
|
||||
let part_fi = FileInfo::unmarshal(&res.data).map_err(|e| {
|
||||
if uploaded_parts[i].part_num != part.number {
|
||||
error!(
|
||||
"complete_multipart_upload FileInfo::unmarshal err {:?}, part_id={}, bucket={}, object={}",
|
||||
e, part_id, bucket, object
|
||||
"complete_multipart_upload part_id err part_id != part_num {} != {}",
|
||||
uploaded_parts[i].part_num, part.number
|
||||
);
|
||||
Error::InvalidPart(part_id, bucket.to_owned(), object.to_owned())
|
||||
})?;
|
||||
let part = &part_fi.parts[0];
|
||||
let part_num = part.number;
|
||||
|
||||
// debug!("complete part {} file info {:?}", part_num, &part_fi);
|
||||
// debug!("complete part {} object info {:?}", part_num, &part);
|
||||
|
||||
if part_id != part_num {
|
||||
error!("complete_multipart_upload part_id err part_id != part_num {} != {}", part_id, part_num);
|
||||
return Err(Error::InvalidPart(part_id, bucket.to_owned(), object.to_owned()));
|
||||
return Err(Error::InvalidPart(uploaded_parts[i].part_num, bucket.to_owned(), object.to_owned()));
|
||||
}
|
||||
|
||||
fi.add_object_part(
|
||||
@@ -5860,7 +6138,7 @@ async fn disks_with_all_parts(
|
||||
online_disks: &[Option<DiskStore>],
|
||||
parts_metadata: &mut [FileInfo],
|
||||
errs: &[Option<DiskError>],
|
||||
lastest_meta: &FileInfo,
|
||||
latest_meta: &FileInfo,
|
||||
bucket: &str,
|
||||
object: &str,
|
||||
scan_mode: HealScanMode,
|
||||
@@ -5868,10 +6146,10 @@ async fn disks_with_all_parts(
|
||||
let mut available_disks = vec![None; online_disks.len()];
|
||||
let mut data_errs_by_disk: HashMap<usize, Vec<usize>> = HashMap::new();
|
||||
for i in 0..online_disks.len() {
|
||||
data_errs_by_disk.insert(i, vec![1; lastest_meta.parts.len()]);
|
||||
data_errs_by_disk.insert(i, vec![1; latest_meta.parts.len()]);
|
||||
}
|
||||
let mut data_errs_by_part: HashMap<usize, Vec<usize>> = HashMap::new();
|
||||
for i in 0..lastest_meta.parts.len() {
|
||||
for i in 0..latest_meta.parts.len() {
|
||||
data_errs_by_part.insert(i, vec![1; online_disks.len()]);
|
||||
}
|
||||
|
||||
@@ -5909,7 +6187,7 @@ async fn disks_with_all_parts(
|
||||
continue;
|
||||
}
|
||||
let meta = &parts_metadata[index];
|
||||
if !meta.mod_time.eq(&lastest_meta.mod_time) || !meta.data_dir.eq(&lastest_meta.data_dir) {
|
||||
if !meta.mod_time.eq(&latest_meta.mod_time) || !meta.data_dir.eq(&latest_meta.data_dir) {
|
||||
warn!("mod_time is not Eq, file corrupt, index: {index}");
|
||||
meta_errs[index] = Some(DiskError::FileCorrupt);
|
||||
parts_metadata[index] = FileInfo::default();
|
||||
@@ -5935,7 +6213,7 @@ async fn disks_with_all_parts(
|
||||
meta_errs.iter().enumerate().for_each(|(index, err)| {
|
||||
if err.is_some() {
|
||||
let part_err = conv_part_err_to_int(err);
|
||||
for p in 0..lastest_meta.parts.len() {
|
||||
for p in 0..latest_meta.parts.len() {
|
||||
data_errs_by_part.entry(p).or_insert(vec![0; meta_errs.len()])[index] = part_err;
|
||||
}
|
||||
}
|
||||
@@ -5987,7 +6265,7 @@ async fn disks_with_all_parts(
|
||||
|
||||
let mut verify_resp = CheckPartsResp::default();
|
||||
let mut verify_err = None;
|
||||
meta.data_dir = lastest_meta.data_dir;
|
||||
meta.data_dir = latest_meta.data_dir;
|
||||
if scan_mode == HEAL_DEEP_SCAN {
|
||||
// disk has a valid xl.meta but may not have all the
|
||||
// parts. This is considered an outdated disk, since
|
||||
@@ -6011,7 +6289,7 @@ async fn disks_with_all_parts(
|
||||
}
|
||||
}
|
||||
|
||||
for p in 0..lastest_meta.parts.len() {
|
||||
for p in 0..latest_meta.parts.len() {
|
||||
if let Some(vec) = data_errs_by_part.get_mut(&p) {
|
||||
if index < vec.len() {
|
||||
if verify_err.is_some() {
|
||||
@@ -6049,7 +6327,7 @@ pub fn should_heal_object_on_disk(
|
||||
err: &Option<DiskError>,
|
||||
parts_errs: &[usize],
|
||||
meta: &FileInfo,
|
||||
lastest_meta: &FileInfo,
|
||||
latest_meta: &FileInfo,
|
||||
) -> (bool, Option<DiskError>) {
|
||||
if let Some(err) = err {
|
||||
if err == &DiskError::FileNotFound || err == &DiskError::FileVersionNotFound || err == &DiskError::FileCorrupt {
|
||||
@@ -6057,12 +6335,12 @@ pub fn should_heal_object_on_disk(
|
||||
}
|
||||
}
|
||||
|
||||
if lastest_meta.volume != meta.volume
|
||||
|| lastest_meta.name != meta.name
|
||||
|| lastest_meta.version_id != meta.version_id
|
||||
|| lastest_meta.deleted != meta.deleted
|
||||
if latest_meta.volume != meta.volume
|
||||
|| latest_meta.name != meta.name
|
||||
|| latest_meta.version_id != meta.version_id
|
||||
|| latest_meta.deleted != meta.deleted
|
||||
{
|
||||
info!("lastest_meta not Eq meta, lastest_meta: {:?}, meta: {:?}", lastest_meta, meta);
|
||||
info!("latest_meta not Eq meta, latest_meta: {:?}, meta: {:?}", latest_meta, meta);
|
||||
return (true, Some(DiskError::OutdatedXLMeta));
|
||||
}
|
||||
if !meta.deleted && !meta.is_remote() {
|
||||
|
||||
@@ -17,6 +17,7 @@ use std::{collections::HashMap, sync::Arc};
|
||||
|
||||
use crate::disk::error_reduce::count_errs;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::store_api::ListPartsInfo;
|
||||
use crate::{
|
||||
disk::{
|
||||
DiskAPI, DiskInfo, DiskOption, DiskStore,
|
||||
@@ -64,7 +65,7 @@ pub struct Sets {
|
||||
pub pool_idx: usize,
|
||||
pub endpoints: PoolEndpoints,
|
||||
pub format: FormatV3,
|
||||
pub partiy_count: usize,
|
||||
pub parity_count: usize,
|
||||
pub set_count: usize,
|
||||
pub set_drive_count: usize,
|
||||
pub default_parity_count: usize,
|
||||
@@ -81,13 +82,13 @@ impl Drop for Sets {
|
||||
}
|
||||
|
||||
impl Sets {
|
||||
#[tracing::instrument(level = "debug", skip(disks, endpoints, fm, pool_idx, partiy_count))]
|
||||
#[tracing::instrument(level = "debug", skip(disks, endpoints, fm, pool_idx, parity_count))]
|
||||
pub async fn new(
|
||||
disks: Vec<Option<DiskStore>>,
|
||||
endpoints: &PoolEndpoints,
|
||||
fm: &FormatV3,
|
||||
pool_idx: usize,
|
||||
partiy_count: usize,
|
||||
parity_count: usize,
|
||||
) -> Result<Arc<Self>> {
|
||||
let set_count = fm.erasure.sets.len();
|
||||
let set_drive_count = fm.erasure.sets[0].len();
|
||||
@@ -172,7 +173,7 @@ impl Sets {
|
||||
Arc::new(RwLock::new(NsLockMap::new(is_dist_erasure().await))),
|
||||
Arc::new(RwLock::new(set_drive)),
|
||||
set_drive_count,
|
||||
partiy_count,
|
||||
parity_count,
|
||||
i,
|
||||
pool_idx,
|
||||
set_endpoints,
|
||||
@@ -193,10 +194,10 @@ impl Sets {
|
||||
pool_idx,
|
||||
endpoints: endpoints.clone(),
|
||||
format: fm.clone(),
|
||||
partiy_count,
|
||||
parity_count,
|
||||
set_count,
|
||||
set_drive_count,
|
||||
default_parity_count: partiy_count,
|
||||
default_parity_count: parity_count,
|
||||
distribution_algo: fm.erasure.distribution_algo.clone(),
|
||||
exit_signal: Some(tx),
|
||||
});
|
||||
@@ -619,6 +620,20 @@ impl StorageAPI for Sets {
|
||||
Ok((del_objects, del_errs))
|
||||
}
|
||||
|
||||
async fn list_object_parts(
|
||||
&self,
|
||||
bucket: &str,
|
||||
object: &str,
|
||||
upload_id: &str,
|
||||
part_number_marker: Option<usize>,
|
||||
max_parts: usize,
|
||||
opts: &ObjectOptions,
|
||||
) -> Result<ListPartsInfo> {
|
||||
self.get_disks_by_key(object)
|
||||
.list_object_parts(bucket, object, upload_id, part_number_marker, max_parts, opts)
|
||||
.await
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn list_multipart_uploads(
|
||||
&self,
|
||||
|
||||
@@ -38,7 +38,7 @@ use crate::new_object_layer_fn;
|
||||
use crate::notification_sys::get_global_notification_sys;
|
||||
use crate::pools::PoolMeta;
|
||||
use crate::rebalance::RebalanceMeta;
|
||||
use crate::store_api::{ListMultipartsInfo, ListObjectVersionsInfo, MultipartInfo, ObjectIO};
|
||||
use crate::store_api::{ListMultipartsInfo, ListObjectVersionsInfo, ListPartsInfo, MultipartInfo, ObjectIO};
|
||||
use crate::store_init::{check_disk_fatal_errs, ec_drives_no_config};
|
||||
use crate::{
|
||||
bucket::{lifecycle::bucket_lifecycle_ops::TransitionState, metadata::BucketMetadata},
|
||||
@@ -152,7 +152,7 @@ impl ECStore {
|
||||
common_parity_drives = parity_drives;
|
||||
}
|
||||
|
||||
// validate_parity(partiy_count, pool_eps.drives_per_set)?;
|
||||
// validate_parity(parity_count, pool_eps.drives_per_set)?;
|
||||
|
||||
let (disks, errs) = store_init::init_disks(
|
||||
&pool_eps.endpoints,
|
||||
@@ -302,13 +302,13 @@ impl ECStore {
|
||||
}
|
||||
|
||||
let pools = meta.return_resumable_pools();
|
||||
let mut pool_indeces = Vec::with_capacity(pools.len());
|
||||
let mut pool_indices = Vec::with_capacity(pools.len());
|
||||
|
||||
let endpoints = get_global_endpoints();
|
||||
|
||||
for p in pools.iter() {
|
||||
if let Some(idx) = endpoints.get_pool_idx(&p.cmd_line) {
|
||||
pool_indeces.push(idx);
|
||||
pool_indices.push(idx);
|
||||
} else {
|
||||
return Err(Error::other(format!(
|
||||
"unexpected state present for decommission status pool({}) not found",
|
||||
@@ -317,8 +317,8 @@ impl ECStore {
|
||||
}
|
||||
}
|
||||
|
||||
if !pool_indeces.is_empty() {
|
||||
let idx = pool_indeces[0];
|
||||
if !pool_indices.is_empty() {
|
||||
let idx = pool_indices[0];
|
||||
if endpoints.as_ref()[idx].endpoints.as_ref()[0].is_local {
|
||||
let (_tx, rx) = broadcast::channel(1);
|
||||
|
||||
@@ -328,9 +328,9 @@ impl ECStore {
|
||||
// wait 3 minutes for cluster init
|
||||
tokio::time::sleep(Duration::from_secs(60 * 3)).await;
|
||||
|
||||
if let Err(err) = store.decommission(rx.resubscribe(), pool_indeces.clone()).await {
|
||||
if let Err(err) = store.decommission(rx.resubscribe(), pool_indices.clone()).await {
|
||||
if err == StorageError::DecommissionAlreadyRunning {
|
||||
for i in pool_indeces.iter() {
|
||||
for i in pool_indices.iter() {
|
||||
store.do_decommission_in_routine(rx.resubscribe(), *i).await;
|
||||
}
|
||||
return;
|
||||
@@ -417,9 +417,9 @@ impl ECStore {
|
||||
// // TODO handle errs
|
||||
// continue;
|
||||
// }
|
||||
// let entrys = disks_res.as_ref().unwrap();
|
||||
// let entries = disks_res.as_ref().unwrap();
|
||||
|
||||
// for entry in entrys {
|
||||
// for entry in entries {
|
||||
// // warn!("lst_merged entry---- {}", &entry.name);
|
||||
|
||||
// if !opts.prefix.is_empty() && !entry.name.starts_with(&opts.prefix) {
|
||||
@@ -1415,7 +1415,7 @@ impl StorageAPI for ECStore {
|
||||
|
||||
if let Ok(sys) = metadata_sys::get(bucket).await {
|
||||
info.created = Some(sys.created);
|
||||
info.versionning = sys.versioning();
|
||||
info.versioning = sys.versioning();
|
||||
info.object_locking = sys.object_locking();
|
||||
}
|
||||
|
||||
@@ -1810,6 +1810,47 @@ impl StorageAPI for ECStore {
|
||||
Ok((del_objects, del_errs))
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn list_object_parts(
|
||||
&self,
|
||||
bucket: &str,
|
||||
object: &str,
|
||||
upload_id: &str,
|
||||
part_number_marker: Option<usize>,
|
||||
max_parts: usize,
|
||||
opts: &ObjectOptions,
|
||||
) -> Result<ListPartsInfo> {
|
||||
check_list_parts_args(bucket, object, upload_id)?;
|
||||
|
||||
// TODO: nslock
|
||||
|
||||
if self.single_pool() {
|
||||
return self.pools[0]
|
||||
.list_object_parts(bucket, object, upload_id, part_number_marker, max_parts, opts)
|
||||
.await;
|
||||
}
|
||||
|
||||
for pool in self.pools.iter() {
|
||||
if self.is_suspended(pool.pool_idx).await {
|
||||
continue;
|
||||
}
|
||||
match pool
|
||||
.list_object_parts(bucket, object, upload_id, part_number_marker, max_parts, opts)
|
||||
.await
|
||||
{
|
||||
Ok(res) => return Ok(res),
|
||||
Err(err) => {
|
||||
if is_err_invalid_upload_id(&err) {
|
||||
continue;
|
||||
}
|
||||
return Err(err);
|
||||
}
|
||||
};
|
||||
}
|
||||
|
||||
Err(StorageError::InvalidUploadID(bucket.to_owned(), object.to_owned(), upload_id.to_owned()))
|
||||
}
|
||||
|
||||
#[tracing::instrument(skip(self))]
|
||||
async fn list_multipart_uploads(
|
||||
&self,
|
||||
|
||||
@@ -201,7 +201,7 @@ impl GetObjectReader {
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Debug)]
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct HTTPRangeSpec {
|
||||
pub is_suffix_length: bool,
|
||||
pub start: i64,
|
||||
@@ -276,7 +276,10 @@ impl HTTPRangeSpec {
|
||||
return Ok(range_length);
|
||||
}
|
||||
|
||||
Err(Error::other("range value invaild"))
|
||||
Err(Error::other(format!(
|
||||
"range value invalid: start={}, end={}, expected start <= end and end >= -1",
|
||||
self.start, self.end
|
||||
)))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -336,7 +339,7 @@ pub struct BucketInfo {
|
||||
pub name: String,
|
||||
pub created: Option<OffsetDateTime>,
|
||||
pub deleted: Option<OffsetDateTime>,
|
||||
pub versionning: bool,
|
||||
pub versioning: bool,
|
||||
pub object_locking: bool,
|
||||
}
|
||||
|
||||
@@ -548,6 +551,7 @@ impl ObjectInfo {
|
||||
mod_time: part.mod_time,
|
||||
checksums: part.checksums.clone(),
|
||||
number: part.number,
|
||||
error: part.error.clone(),
|
||||
})
|
||||
.collect();
|
||||
|
||||
@@ -844,6 +848,48 @@ pub struct ListMultipartsInfo {
|
||||
// encoding_type: String, // Not supported yet.
|
||||
}
|
||||
|
||||
/// ListPartsInfo - represents list of all parts.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct ListPartsInfo {
|
||||
/// Name of the bucket.
|
||||
pub bucket: String,
|
||||
|
||||
/// Name of the object.
|
||||
pub object: String,
|
||||
|
||||
/// Upload ID identifying the multipart upload whose parts are being listed.
|
||||
pub upload_id: String,
|
||||
|
||||
/// The class of storage used to store the object.
|
||||
pub storage_class: String,
|
||||
|
||||
/// Part number after which listing begins.
|
||||
pub part_number_marker: usize,
|
||||
|
||||
/// When a list is truncated, this element specifies the last part in the list,
|
||||
/// as well as the value to use for the part-number-marker request parameter
|
||||
/// in a subsequent request.
|
||||
pub next_part_number_marker: usize,
|
||||
|
||||
/// Maximum number of parts that were allowed in the response.
|
||||
pub max_parts: usize,
|
||||
|
||||
/// Indicates whether the returned list of parts is truncated.
|
||||
pub is_truncated: bool,
|
||||
|
||||
/// List of all parts.
|
||||
pub parts: Vec<PartInfo>,
|
||||
|
||||
/// Any metadata set during InitMultipartUpload, including encryption headers.
|
||||
pub user_defined: HashMap<String, String>,
|
||||
|
||||
/// ChecksumAlgorithm if set
|
||||
pub checksum_algorithm: String,
|
||||
|
||||
/// ChecksumType if set
|
||||
pub checksum_type: String,
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, Clone)]
|
||||
pub struct ObjectToDelete {
|
||||
pub object_name: String,
|
||||
@@ -923,10 +969,7 @@ pub trait StorageAPI: ObjectIO {
|
||||
) -> Result<ListObjectVersionsInfo>;
|
||||
// Walk TODO:
|
||||
|
||||
// GetObjectNInfo ObjectIO
|
||||
async fn get_object_info(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo>;
|
||||
// PutObject ObjectIO
|
||||
// CopyObject
|
||||
async fn copy_object(
|
||||
&self,
|
||||
src_bucket: &str,
|
||||
@@ -949,7 +992,6 @@ pub trait StorageAPI: ObjectIO {
|
||||
// TransitionObject TODO:
|
||||
// RestoreTransitionedObject TODO:
|
||||
|
||||
// ListMultipartUploads
|
||||
async fn list_multipart_uploads(
|
||||
&self,
|
||||
bucket: &str,
|
||||
@@ -960,7 +1002,6 @@ pub trait StorageAPI: ObjectIO {
|
||||
max_uploads: usize,
|
||||
) -> Result<ListMultipartsInfo>;
|
||||
async fn new_multipart_upload(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<MultipartUploadResult>;
|
||||
// CopyObjectPart
|
||||
async fn copy_object_part(
|
||||
&self,
|
||||
src_bucket: &str,
|
||||
@@ -984,7 +1025,6 @@ pub trait StorageAPI: ObjectIO {
|
||||
data: &mut PutObjReader,
|
||||
opts: &ObjectOptions,
|
||||
) -> Result<PartInfo>;
|
||||
// GetMultipartInfo
|
||||
async fn get_multipart_info(
|
||||
&self,
|
||||
bucket: &str,
|
||||
@@ -992,7 +1032,15 @@ pub trait StorageAPI: ObjectIO {
|
||||
upload_id: &str,
|
||||
opts: &ObjectOptions,
|
||||
) -> Result<MultipartInfo>;
|
||||
// ListObjectParts
|
||||
async fn list_object_parts(
|
||||
&self,
|
||||
bucket: &str,
|
||||
object: &str,
|
||||
upload_id: &str,
|
||||
part_number_marker: Option<usize>,
|
||||
max_parts: usize,
|
||||
opts: &ObjectOptions,
|
||||
) -> Result<ListPartsInfo>;
|
||||
async fn abort_multipart_upload(&self, bucket: &str, object: &str, upload_id: &str, opts: &ObjectOptions) -> Result<()>;
|
||||
async fn complete_multipart_upload(
|
||||
self: Arc<Self>,
|
||||
@@ -1002,13 +1050,10 @@ pub trait StorageAPI: ObjectIO {
|
||||
uploaded_parts: Vec<CompletePart>,
|
||||
opts: &ObjectOptions,
|
||||
) -> Result<ObjectInfo>;
|
||||
// GetDisks
|
||||
async fn get_disks(&self, pool_idx: usize, set_idx: usize) -> Result<Vec<Option<DiskStore>>>;
|
||||
// SetDriveCounts
|
||||
fn set_drive_counts(&self) -> Vec<usize>;
|
||||
|
||||
// Health TODO:
|
||||
// PutObjectMetadata
|
||||
async fn put_object_metadata(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<ObjectInfo>;
|
||||
// DecomTieredObject
|
||||
async fn get_object_tags(&self, bucket: &str, object: &str, opts: &ObjectOptions) -> Result<String>;
|
||||
|
||||
@@ -222,7 +222,7 @@ fn check_format_erasure_value(format: &FormatV3) -> Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
// load_format_erasure_all 读取所有 foramt.json
|
||||
// load_format_erasure_all 读取所有 format.json
|
||||
pub async fn load_format_erasure_all(disks: &[Option<DiskStore>], heal: bool) -> (Vec<Option<FormatV3>>, Vec<Option<DiskError>>) {
|
||||
let mut futures = Vec::with_capacity(disks.len());
|
||||
let mut datas = Vec::with_capacity(disks.len());
|
||||
|
||||
@@ -776,7 +776,7 @@ impl ECStore {
|
||||
fallback_disks: fallback_disks.iter().cloned().map(Some).collect(),
|
||||
bucket: bucket.to_owned(),
|
||||
path,
|
||||
recursice: true,
|
||||
recursive: true,
|
||||
filter_prefix: Some(filter_prefix),
|
||||
forward_to: opts.marker.clone(),
|
||||
min_disks: listing_quorum,
|
||||
@@ -851,8 +851,8 @@ impl ECStore {
|
||||
}
|
||||
};
|
||||
|
||||
if let Some(fiter) = opts.filter {
|
||||
if fiter(&fi) {
|
||||
if let Some(filter) = opts.filter {
|
||||
if filter(&fi) {
|
||||
let item = ObjectInfoOrErr {
|
||||
item: Some(ObjectInfo::from_file_info(&fi, &bucket, &fi.name, {
|
||||
if let Some(v) = &vcf { v.versioned(&fi.name) } else { false }
|
||||
@@ -899,8 +899,8 @@ impl ECStore {
|
||||
}
|
||||
|
||||
for fi in fvs.versions.iter() {
|
||||
if let Some(fiter) = opts.filter {
|
||||
if fiter(fi) {
|
||||
if let Some(filter) = opts.filter {
|
||||
if filter(fi) {
|
||||
let item = ObjectInfoOrErr {
|
||||
item: Some(ObjectInfo::from_file_info(fi, &bucket, &fi.name, {
|
||||
if let Some(v) = &vcf { v.versioned(&fi.name) } else { false }
|
||||
@@ -972,7 +972,7 @@ async fn gather_results(
|
||||
let mut sender = Some(results_tx);
|
||||
|
||||
let mut recv = recv;
|
||||
let mut entrys = Vec::new();
|
||||
let mut entries = Vec::new();
|
||||
while let Some(mut entry) = recv.recv().await {
|
||||
if returned {
|
||||
continue;
|
||||
@@ -1009,11 +1009,11 @@ async fn gather_results(
|
||||
|
||||
// TODO: Lifecycle
|
||||
|
||||
if opts.limit > 0 && entrys.len() >= opts.limit as usize {
|
||||
if opts.limit > 0 && entries.len() >= opts.limit as usize {
|
||||
if let Some(tx) = sender {
|
||||
tx.send(MetaCacheEntriesSortedResult {
|
||||
entries: Some(MetaCacheEntriesSorted {
|
||||
o: MetaCacheEntries(entrys.clone()),
|
||||
o: MetaCacheEntries(entries.clone()),
|
||||
..Default::default()
|
||||
}),
|
||||
err: None,
|
||||
@@ -1027,15 +1027,15 @@ async fn gather_results(
|
||||
continue;
|
||||
}
|
||||
|
||||
entrys.push(Some(entry));
|
||||
// entrys.push(entry);
|
||||
entries.push(Some(entry));
|
||||
// entries.push(entry);
|
||||
}
|
||||
|
||||
// finish not full, return eof
|
||||
if let Some(tx) = sender {
|
||||
tx.send(MetaCacheEntriesSortedResult {
|
||||
entries: Some(MetaCacheEntriesSorted {
|
||||
o: MetaCacheEntries(entrys.clone()),
|
||||
o: MetaCacheEntries(entries.clone()),
|
||||
..Default::default()
|
||||
}),
|
||||
err: Some(Error::Unexpected.into()),
|
||||
@@ -1125,10 +1125,10 @@ async fn merge_entry_channels(
|
||||
|
||||
if path::clean(&best_entry.name) == path::clean(&other_entry.name) {
|
||||
let dir_matches = best_entry.is_dir() && other_entry.is_dir();
|
||||
let suffix_matche =
|
||||
let suffix_matches =
|
||||
best_entry.name.ends_with(SLASH_SEPARATOR) == other_entry.name.ends_with(SLASH_SEPARATOR);
|
||||
|
||||
if dir_matches && suffix_matche {
|
||||
if dir_matches && suffix_matches {
|
||||
to_merge.push(other_idx);
|
||||
continue;
|
||||
}
|
||||
@@ -1286,7 +1286,7 @@ impl SetDisks {
|
||||
fallback_disks: fallback_disks.iter().cloned().map(Some).collect(),
|
||||
bucket: opts.bucket,
|
||||
path: opts.base_dir,
|
||||
recursice: opts.recursive,
|
||||
recursive: opts.recursive,
|
||||
filter_prefix: opts.filter_prefix,
|
||||
forward_to: opts.marker,
|
||||
min_disks: listing_quorum,
|
||||
|
||||
@@ -46,6 +46,20 @@ pub struct ObjectPartInfo {
|
||||
pub index: Option<Bytes>,
|
||||
// Checksums holds checksums of the part
|
||||
pub checksums: Option<HashMap<String, String>>,
|
||||
pub error: Option<String>,
|
||||
}
|
||||
|
||||
impl ObjectPartInfo {
|
||||
pub fn marshal_msg(&self) -> Result<Vec<u8>> {
|
||||
let mut buf = Vec::new();
|
||||
self.serialize(&mut Serializer::new(&mut buf))?;
|
||||
Ok(buf)
|
||||
}
|
||||
|
||||
pub fn unmarshal(buf: &[u8]) -> Result<Self> {
|
||||
let t: ObjectPartInfo = rmp_serde::from_slice(buf)?;
|
||||
Ok(t)
|
||||
}
|
||||
}
|
||||
|
||||
#[derive(Serialize, Deserialize, Debug, PartialEq, Default, Clone)]
|
||||
@@ -201,7 +215,7 @@ pub struct FileInfo {
|
||||
|
||||
impl FileInfo {
|
||||
pub fn new(object: &str, data_blocks: usize, parity_blocks: usize) -> Self {
|
||||
let indexs = {
|
||||
let indices = {
|
||||
let cardinality = data_blocks + parity_blocks;
|
||||
let mut nums = vec![0; cardinality];
|
||||
let key_crc = crc32fast::hash(object.as_bytes());
|
||||
@@ -219,7 +233,7 @@ impl FileInfo {
|
||||
data_blocks,
|
||||
parity_blocks,
|
||||
block_size: BLOCK_SIZE_V2,
|
||||
distribution: indexs,
|
||||
distribution: indices,
|
||||
..Default::default()
|
||||
},
|
||||
..Default::default()
|
||||
@@ -287,6 +301,7 @@ impl FileInfo {
|
||||
actual_size,
|
||||
index,
|
||||
checksums: None,
|
||||
error: None,
|
||||
};
|
||||
|
||||
for p in self.parts.iter_mut() {
|
||||
|
||||
@@ -702,7 +702,7 @@ impl FileMeta {
|
||||
})
|
||||
}
|
||||
|
||||
pub fn lastest_mod_time(&self) -> Option<OffsetDateTime> {
|
||||
pub fn latest_mod_time(&self) -> Option<OffsetDateTime> {
|
||||
if self.versions.is_empty() {
|
||||
return None;
|
||||
}
|
||||
@@ -1762,7 +1762,7 @@ impl MetaDeleteMarker {
|
||||
|
||||
// self.meta_sys = Some(map);
|
||||
// }
|
||||
// name => return Err(Error::other(format!("not suport field name {name}"))),
|
||||
// name => return Err(Error::other(format!("not support field name {name}"))),
|
||||
// }
|
||||
// }
|
||||
|
||||
@@ -1962,32 +1962,32 @@ pub fn merge_file_meta_versions(
|
||||
n_versions += 1;
|
||||
}
|
||||
} else {
|
||||
let mut lastest_count = 0;
|
||||
let mut latest_count = 0;
|
||||
for (i, ver) in tops.iter().enumerate() {
|
||||
if ver.header == latest.header {
|
||||
lastest_count += 1;
|
||||
latest_count += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
if i == 0 || ver.header.sorts_before(&latest.header) {
|
||||
if i == 0 || lastest_count == 0 {
|
||||
lastest_count = 1;
|
||||
if i == 0 || latest_count == 0 {
|
||||
latest_count = 1;
|
||||
} else if !strict && ver.header.matches_not_strict(&latest.header) {
|
||||
lastest_count += 1;
|
||||
latest_count += 1;
|
||||
} else {
|
||||
lastest_count = 1;
|
||||
latest_count = 1;
|
||||
}
|
||||
latest = ver.clone();
|
||||
continue;
|
||||
}
|
||||
|
||||
// Mismatch, but older.
|
||||
if lastest_count > 0 && !strict && ver.header.matches_not_strict(&latest.header) {
|
||||
lastest_count += 1;
|
||||
if latest_count > 0 && !strict && ver.header.matches_not_strict(&latest.header) {
|
||||
latest_count += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
if lastest_count > 0 && ver.header.version_id == latest.header.version_id {
|
||||
if latest_count > 0 && ver.header.version_id == latest.header.version_id {
|
||||
let mut x: HashMap<FileMetaVersionHeader, usize> = HashMap::new();
|
||||
for a in tops.iter() {
|
||||
if a.header.version_id != ver.header.version_id {
|
||||
@@ -1999,12 +1999,12 @@ pub fn merge_file_meta_versions(
|
||||
}
|
||||
*x.entry(a_clone.header).or_insert(1) += 1;
|
||||
}
|
||||
lastest_count = 0;
|
||||
latest_count = 0;
|
||||
for (k, v) in x.iter() {
|
||||
if *v < lastest_count {
|
||||
if *v < latest_count {
|
||||
continue;
|
||||
}
|
||||
if *v == lastest_count && latest.header.sorts_before(k) {
|
||||
if *v == latest_count && latest.header.sorts_before(k) {
|
||||
continue;
|
||||
}
|
||||
tops.iter().for_each(|a| {
|
||||
@@ -2017,12 +2017,12 @@ pub fn merge_file_meta_versions(
|
||||
}
|
||||
});
|
||||
|
||||
lastest_count = *v;
|
||||
latest_count = *v;
|
||||
}
|
||||
break;
|
||||
}
|
||||
}
|
||||
if lastest_count >= quorum {
|
||||
if latest_count >= quorum {
|
||||
if !latest.header.free_version() {
|
||||
n_versions += 1;
|
||||
}
|
||||
|
||||
@@ -221,7 +221,7 @@ impl MetaCacheEntry {
|
||||
};
|
||||
|
||||
if self_vers.versions.len() != other_vers.versions.len() {
|
||||
match self_vers.lastest_mod_time().cmp(&other_vers.lastest_mod_time()) {
|
||||
match self_vers.latest_mod_time().cmp(&other_vers.latest_mod_time()) {
|
||||
Ordering::Greater => return (Some(self.clone()), false),
|
||||
Ordering::Less => return (Some(other.clone()), false),
|
||||
_ => {}
|
||||
|
||||
@@ -45,7 +45,6 @@ base64-simd = { workspace = true }
|
||||
jsonwebtoken = { workspace = true }
|
||||
tracing.workspace = true
|
||||
rustfs-madmin.workspace = true
|
||||
lazy_static.workspace = true
|
||||
rustfs-utils = { workspace = true, features = ["path"] }
|
||||
|
||||
[dev-dependencies]
|
||||
|
||||
@@ -13,6 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use crate::error::{Error, Result, is_err_config_not_found};
|
||||
use crate::sys::get_claims_from_token_with_secret;
|
||||
use crate::{
|
||||
cache::{Cache, CacheEntity},
|
||||
error::{Error as IamError, is_err_no_such_group, is_err_no_such_policy, is_err_no_such_user},
|
||||
@@ -26,7 +27,7 @@ use rustfs_ecstore::global::get_global_action_cred;
|
||||
use rustfs_madmin::{AccountStatus, AddOrUpdateUserReq, GroupDesc};
|
||||
use rustfs_policy::{
|
||||
arn::ARN,
|
||||
auth::{self, Credentials, UserIdentity, get_claims_from_token_with_secret, is_secret_key_valid, jwt_sign},
|
||||
auth::{self, Credentials, UserIdentity, is_secret_key_valid, jwt_sign},
|
||||
format::Format,
|
||||
policy::{
|
||||
EMBEDDED_POLICY_TYPE, INHERITED_POLICY_TYPE, Policy, PolicyDoc, default::DEFAULT_POLICIES, iam_policy_claim_name_sa,
|
||||
@@ -89,7 +90,7 @@ where
|
||||
T: Store,
|
||||
{
|
||||
pub(crate) async fn new(api: T) -> Arc<Self> {
|
||||
let (sender, reciver) = mpsc::channel::<i64>(100);
|
||||
let (sender, receiver) = mpsc::channel::<i64>(100);
|
||||
|
||||
let sys = Arc::new(Self {
|
||||
api,
|
||||
@@ -100,11 +101,11 @@ where
|
||||
last_timestamp: AtomicI64::new(0),
|
||||
});
|
||||
|
||||
sys.clone().init(reciver).await.unwrap();
|
||||
sys.clone().init(receiver).await.unwrap();
|
||||
sys
|
||||
}
|
||||
|
||||
async fn init(self: Arc<Self>, reciver: Receiver<i64>) -> Result<()> {
|
||||
async fn init(self: Arc<Self>, receiver: Receiver<i64>) -> Result<()> {
|
||||
self.clone().save_iam_formatter().await?;
|
||||
self.clone().load().await?;
|
||||
|
||||
@@ -117,7 +118,7 @@ where
|
||||
let s = Arc::clone(&self);
|
||||
async move {
|
||||
let ticker = tokio::time::interval(Duration::from_secs(120));
|
||||
tokio::pin!(ticker, reciver);
|
||||
tokio::pin!(ticker, receiver);
|
||||
loop {
|
||||
select! {
|
||||
_ = ticker.tick() => {
|
||||
@@ -126,13 +127,13 @@ where
|
||||
error!("iam load err {:?}", err);
|
||||
}
|
||||
},
|
||||
i = reciver.recv() => {
|
||||
info!("iam load reciver");
|
||||
i = receiver.recv() => {
|
||||
info!("iam load receiver");
|
||||
match i {
|
||||
Some(t) => {
|
||||
let last = s.last_timestamp.load(Ordering::Relaxed);
|
||||
if last <= t {
|
||||
info!("iam load reciver load");
|
||||
info!("iam load receiver load");
|
||||
if let Err(err) =s.clone().load().await{
|
||||
error!("iam load err {:?}", err);
|
||||
}
|
||||
@@ -813,7 +814,7 @@ where
|
||||
let mp = MappedPolicy::new(policy);
|
||||
let (_, combined_policy_stmt) = filter_policies(&self.cache, &mp.policies, "temp");
|
||||
if combined_policy_stmt.is_empty() {
|
||||
return Err(Error::other(format!("need poliy not found {}", IamError::NoSuchPolicy)));
|
||||
return Err(Error::other(format!("Required policy not found: {}", IamError::NoSuchPolicy)));
|
||||
}
|
||||
|
||||
self.api
|
||||
@@ -986,7 +987,7 @@ where
|
||||
_ => auth::ACCOUNT_OFF,
|
||||
}
|
||||
};
|
||||
let user_entiry = UserIdentity::from(Credentials {
|
||||
let user_entry = UserIdentity::from(Credentials {
|
||||
access_key: access_key.to_string(),
|
||||
secret_key: args.secret_key.to_string(),
|
||||
status: status.to_owned(),
|
||||
@@ -994,10 +995,10 @@ where
|
||||
});
|
||||
|
||||
self.api
|
||||
.save_user_identity(access_key, UserType::Reg, user_entiry.clone(), None)
|
||||
.save_user_identity(access_key, UserType::Reg, user_entry.clone(), None)
|
||||
.await?;
|
||||
|
||||
self.update_user_with_claims(access_key, user_entiry)?;
|
||||
self.update_user_with_claims(access_key, user_entry)?;
|
||||
|
||||
Ok(OffsetDateTime::now_utc())
|
||||
}
|
||||
@@ -1103,7 +1104,7 @@ where
|
||||
}
|
||||
};
|
||||
|
||||
let user_entiry = UserIdentity::from(Credentials {
|
||||
let user_entry = UserIdentity::from(Credentials {
|
||||
access_key: access_key.to_string(),
|
||||
secret_key: u.credentials.secret_key.clone(),
|
||||
status: status.to_owned(),
|
||||
@@ -1111,10 +1112,10 @@ where
|
||||
});
|
||||
|
||||
self.api
|
||||
.save_user_identity(access_key, UserType::Reg, user_entiry.clone(), None)
|
||||
.save_user_identity(access_key, UserType::Reg, user_entry.clone(), None)
|
||||
.await?;
|
||||
|
||||
self.update_user_with_claims(access_key, user_entiry)?;
|
||||
self.update_user_with_claims(access_key, user_entry)?;
|
||||
|
||||
Ok(OffsetDateTime::now_utc())
|
||||
}
|
||||
|
||||
@@ -62,8 +62,12 @@ pub trait Store: Clone + Send + Sync + 'static {
|
||||
is_group: bool,
|
||||
m: &mut HashMap<String, MappedPolicy>,
|
||||
) -> Result<()>;
|
||||
async fn load_mapped_policys(&self, user_type: UserType, is_group: bool, m: &mut HashMap<String, MappedPolicy>)
|
||||
-> Result<()>;
|
||||
async fn load_mapped_policies(
|
||||
&self,
|
||||
user_type: UserType,
|
||||
is_group: bool,
|
||||
m: &mut HashMap<String, MappedPolicy>,
|
||||
) -> Result<()>;
|
||||
|
||||
async fn load_all(&self, cache: &Cache) -> Result<()>;
|
||||
}
|
||||
|
||||
@@ -20,7 +20,6 @@ use crate::{
|
||||
manager::{extract_jwt_claims, get_default_policyes},
|
||||
};
|
||||
use futures::future::join_all;
|
||||
use lazy_static::lazy_static;
|
||||
use rustfs_ecstore::{
|
||||
config::{
|
||||
RUSTFS_CONFIG_PREFIX,
|
||||
@@ -34,25 +33,28 @@ use rustfs_ecstore::{
|
||||
use rustfs_policy::{auth::UserIdentity, policy::PolicyDoc};
|
||||
use rustfs_utils::path::{SLASH_SEPARATOR, path_join_buf};
|
||||
use serde::{Serialize, de::DeserializeOwned};
|
||||
use std::sync::LazyLock;
|
||||
use std::{collections::HashMap, sync::Arc};
|
||||
use tokio::sync::broadcast::{self, Receiver as B_Receiver};
|
||||
use tokio::sync::mpsc::{self, Sender};
|
||||
use tracing::{debug, info, warn};
|
||||
|
||||
lazy_static! {
|
||||
pub static ref IAM_CONFIG_PREFIX: String = format!("{}/iam", RUSTFS_CONFIG_PREFIX);
|
||||
pub static ref IAM_CONFIG_USERS_PREFIX: String = format!("{}/iam/users/", RUSTFS_CONFIG_PREFIX);
|
||||
pub static ref IAM_CONFIG_SERVICE_ACCOUNTS_PREFIX: String = format!("{}/iam/service-accounts/", RUSTFS_CONFIG_PREFIX);
|
||||
pub static ref IAM_CONFIG_GROUPS_PREFIX: String = format!("{}/iam/groups/", RUSTFS_CONFIG_PREFIX);
|
||||
pub static ref IAM_CONFIG_POLICIES_PREFIX: String = format!("{}/iam/policies/", RUSTFS_CONFIG_PREFIX);
|
||||
pub static ref IAM_CONFIG_STS_PREFIX: String = format!("{}/iam/sts/", RUSTFS_CONFIG_PREFIX);
|
||||
pub static ref IAM_CONFIG_POLICY_DB_PREFIX: String = format!("{}/iam/policydb/", RUSTFS_CONFIG_PREFIX);
|
||||
pub static ref IAM_CONFIG_POLICY_DB_USERS_PREFIX: String = format!("{}/iam/policydb/users/", RUSTFS_CONFIG_PREFIX);
|
||||
pub static ref IAM_CONFIG_POLICY_DB_STS_USERS_PREFIX: String = format!("{}/iam/policydb/sts-users/", RUSTFS_CONFIG_PREFIX);
|
||||
pub static ref IAM_CONFIG_POLICY_DB_SERVICE_ACCOUNTS_PREFIX: String =
|
||||
format!("{}/iam/policydb/service-accounts/", RUSTFS_CONFIG_PREFIX);
|
||||
pub static ref IAM_CONFIG_POLICY_DB_GROUPS_PREFIX: String = format!("{}/iam/policydb/groups/", RUSTFS_CONFIG_PREFIX);
|
||||
}
|
||||
pub static IAM_CONFIG_PREFIX: LazyLock<String> = LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam"));
|
||||
pub static IAM_CONFIG_USERS_PREFIX: LazyLock<String> = LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/users/"));
|
||||
pub static IAM_CONFIG_SERVICE_ACCOUNTS_PREFIX: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/service-accounts/"));
|
||||
pub static IAM_CONFIG_GROUPS_PREFIX: LazyLock<String> = LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/groups/"));
|
||||
pub static IAM_CONFIG_POLICIES_PREFIX: LazyLock<String> = LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/policies/"));
|
||||
pub static IAM_CONFIG_STS_PREFIX: LazyLock<String> = LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/sts/"));
|
||||
pub static IAM_CONFIG_POLICY_DB_PREFIX: LazyLock<String> = LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/policydb/"));
|
||||
pub static IAM_CONFIG_POLICY_DB_USERS_PREFIX: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/policydb/users/"));
|
||||
pub static IAM_CONFIG_POLICY_DB_STS_USERS_PREFIX: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/policydb/sts-users/"));
|
||||
pub static IAM_CONFIG_POLICY_DB_SERVICE_ACCOUNTS_PREFIX: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/policydb/service-accounts/"));
|
||||
pub static IAM_CONFIG_POLICY_DB_GROUPS_PREFIX: LazyLock<String> =
|
||||
LazyLock::new(|| format!("{RUSTFS_CONFIG_PREFIX}/iam/policydb/groups/"));
|
||||
|
||||
const IAM_IDENTITY_FILE: &str = "identity.json";
|
||||
const IAM_POLICY_FILE: &str = "policy.json";
|
||||
@@ -654,7 +656,7 @@ impl Store for ObjectStore {
|
||||
|
||||
Ok(())
|
||||
}
|
||||
async fn load_mapped_policys(
|
||||
async fn load_mapped_policies(
|
||||
&self,
|
||||
user_type: UserType,
|
||||
is_group: bool,
|
||||
|
||||
@@ -23,6 +23,7 @@ use crate::store::GroupInfo;
|
||||
use crate::store::MappedPolicy;
|
||||
use crate::store::Store;
|
||||
use crate::store::UserType;
|
||||
use crate::utils::extract_claims;
|
||||
use rustfs_ecstore::global::get_global_action_cred;
|
||||
use rustfs_madmin::AddOrUpdateUserReq;
|
||||
use rustfs_madmin::GroupDesc;
|
||||
@@ -123,13 +124,13 @@ impl<T: Store> IamSys<T> {
|
||||
})
|
||||
}
|
||||
|
||||
pub async fn load_mapped_policys(
|
||||
pub async fn load_mapped_policies(
|
||||
&self,
|
||||
user_type: UserType,
|
||||
is_group: bool,
|
||||
m: &mut HashMap<String, MappedPolicy>,
|
||||
) -> Result<()> {
|
||||
self.store.api.load_mapped_policys(user_type, is_group, m).await
|
||||
self.store.api.load_mapped_policies(user_type, is_group, m).await
|
||||
}
|
||||
|
||||
pub async fn list_polices(&self, bucket_name: &str) -> Result<HashMap<String, Policy>> {
|
||||
@@ -542,7 +543,7 @@ impl<T: Store> IamSys<T> {
|
||||
}
|
||||
};
|
||||
|
||||
if policies.is_empty() {
|
||||
if !is_owner && policies.is_empty() {
|
||||
return false;
|
||||
}
|
||||
|
||||
@@ -732,3 +733,18 @@ pub struct UpdateServiceAccountOpts {
|
||||
pub expiration: Option<OffsetDateTime>,
|
||||
pub status: Option<String>,
|
||||
}
|
||||
|
||||
pub fn get_claims_from_token_with_secret(token: &str, secret: &str) -> Result<HashMap<String, Value>> {
|
||||
let mut ms =
|
||||
extract_claims::<HashMap<String, Value>>(token, secret).map_err(|e| Error::other(format!("extract claims err {e}")))?;
|
||||
|
||||
if let Some(session_policy) = ms.claims.get(SESSION_POLICY_NAME) {
|
||||
let policy_str = session_policy.as_str().unwrap_or_default();
|
||||
let policy = base64_decode(policy_str.as_bytes()).map_err(|e| Error::other(format!("base64 decode err {e}")))?;
|
||||
ms.claims.insert(
|
||||
SESSION_POLICY_NAME_EXTRACTED.to_string(),
|
||||
Value::String(String::from_utf8(policy).map_err(|e| Error::other(format!("utf8 decode err {e}")))?),
|
||||
);
|
||||
}
|
||||
Ok(ms.claims)
|
||||
}
|
||||
|
||||
@@ -30,7 +30,6 @@ workspace = true
|
||||
|
||||
[dependencies]
|
||||
async-trait.workspace = true
|
||||
lazy_static.workspace = true
|
||||
rustfs-protos.workspace = true
|
||||
rand.workspace = true
|
||||
serde.workspace = true
|
||||
|
||||
@@ -14,12 +14,12 @@
|
||||
// limitations under the License.
|
||||
|
||||
use async_trait::async_trait;
|
||||
use lazy_static::lazy_static;
|
||||
use local_locker::LocalLocker;
|
||||
use lock_args::LockArgs;
|
||||
use remote_client::RemoteClient;
|
||||
use std::io::Result;
|
||||
use std::sync::Arc;
|
||||
use std::sync::LazyLock;
|
||||
use tokio::sync::RwLock;
|
||||
|
||||
pub mod drwmutex;
|
||||
@@ -29,9 +29,7 @@ pub mod lrwmutex;
|
||||
pub mod namespace_lock;
|
||||
pub mod remote_client;
|
||||
|
||||
lazy_static! {
|
||||
pub static ref GLOBAL_LOCAL_SERVER: Arc<RwLock<LocalLocker>> = Arc::new(RwLock::new(LocalLocker::new()));
|
||||
}
|
||||
pub static GLOBAL_LOCAL_SERVER: LazyLock<Arc<RwLock<LocalLocker>>> = LazyLock::new(|| Arc::new(RwLock::new(LocalLocker::new())));
|
||||
|
||||
type LockClient = dyn Locker;
|
||||
|
||||
|
||||
@@ -22,7 +22,7 @@ pub struct LRWMutex {
|
||||
id: RwLock<String>,
|
||||
source: RwLock<String>,
|
||||
is_write: RwLock<bool>,
|
||||
refrence: RwLock<usize>,
|
||||
reference: RwLock<usize>,
|
||||
}
|
||||
|
||||
impl LRWMutex {
|
||||
@@ -66,13 +66,13 @@ impl LRWMutex {
|
||||
|
||||
let mut locked = false;
|
||||
if is_write {
|
||||
if *self.refrence.read().await == 0 && !*self.is_write.read().await {
|
||||
*self.refrence.write().await = 1;
|
||||
if *self.reference.read().await == 0 && !*self.is_write.read().await {
|
||||
*self.reference.write().await = 1;
|
||||
*self.is_write.write().await = true;
|
||||
locked = true;
|
||||
}
|
||||
} else if !*self.is_write.read().await {
|
||||
*self.refrence.write().await += 1;
|
||||
*self.reference.write().await += 1;
|
||||
locked = true;
|
||||
}
|
||||
|
||||
@@ -115,13 +115,13 @@ impl LRWMutex {
|
||||
async fn unlock(&self, is_write: bool) -> bool {
|
||||
let mut unlocked = false;
|
||||
if is_write {
|
||||
if *self.is_write.read().await && *self.refrence.read().await == 1 {
|
||||
*self.refrence.write().await = 0;
|
||||
if *self.is_write.read().await && *self.reference.read().await == 1 {
|
||||
*self.reference.write().await = 0;
|
||||
*self.is_write.write().await = false;
|
||||
unlocked = true;
|
||||
}
|
||||
} else if !*self.is_write.read().await && *self.refrence.read().await > 0 {
|
||||
*self.refrence.write().await -= 1;
|
||||
} else if !*self.is_write.read().await && *self.reference.read().await > 0 {
|
||||
*self.reference.write().await -= 1;
|
||||
unlocked = true;
|
||||
}
|
||||
|
||||
@@ -129,7 +129,7 @@ impl LRWMutex {
|
||||
}
|
||||
|
||||
pub async fn force_un_lock(&self) {
|
||||
*self.refrence.write().await = 0;
|
||||
*self.reference.write().await = 0;
|
||||
*self.is_write.write().await = false;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -14,7 +14,8 @@
|
||||
|
||||
use rustfs_config::{
|
||||
APP_NAME, DEFAULT_LOG_DIR, DEFAULT_LOG_FILENAME, DEFAULT_LOG_KEEP_FILES, DEFAULT_LOG_LEVEL, DEFAULT_LOG_ROTATION_SIZE_MB,
|
||||
DEFAULT_LOG_ROTATION_TIME, ENVIRONMENT, METER_INTERVAL, SAMPLE_RATIO, SERVICE_VERSION, USE_STDOUT,
|
||||
DEFAULT_LOG_ROTATION_TIME, DEFAULT_OBS_LOG_FILENAME, DEFAULT_SINK_FILE_LOG_FILE, ENVIRONMENT, METER_INTERVAL, SAMPLE_RATIO,
|
||||
SERVICE_VERSION, USE_STDOUT,
|
||||
};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::env;
|
||||
@@ -104,7 +105,7 @@ impl OtelConfig {
|
||||
log_filename: env::var("RUSTFS_OBS_LOG_FILENAME")
|
||||
.ok()
|
||||
.and_then(|v| v.parse().ok())
|
||||
.or(Some(DEFAULT_LOG_FILENAME.to_string())),
|
||||
.or(Some(DEFAULT_OBS_LOG_FILENAME.to_string())),
|
||||
log_rotation_size_mb: env::var("RUSTFS_OBS_LOG_ROTATION_SIZE_MB")
|
||||
.ok()
|
||||
.and_then(|v| v.parse().ok())
|
||||
@@ -210,16 +211,15 @@ pub struct FileSinkConfig {
|
||||
|
||||
impl FileSinkConfig {
|
||||
pub fn get_default_log_path() -> String {
|
||||
let temp_dir = env::temp_dir().join("rustfs");
|
||||
|
||||
let temp_dir = env::temp_dir().join(DEFAULT_LOG_FILENAME);
|
||||
if let Err(e) = std::fs::create_dir_all(&temp_dir) {
|
||||
eprintln!("Failed to create log directory: {e}");
|
||||
return "rustfs/rustfs.log".to_string();
|
||||
return DEFAULT_LOG_DIR.to_string();
|
||||
}
|
||||
temp_dir
|
||||
.join("rustfs.log")
|
||||
.join(DEFAULT_SINK_FILE_LOG_FILE)
|
||||
.to_str()
|
||||
.unwrap_or("rustfs/rustfs.log")
|
||||
.unwrap_or(DEFAULT_LOG_DIR)
|
||||
.to_string()
|
||||
}
|
||||
pub fn new() -> Self {
|
||||
|
||||
@@ -77,8 +77,7 @@ impl Logger {
|
||||
}
|
||||
|
||||
/// Asynchronous logging of unified log entries
|
||||
#[tracing::instrument(skip(self), fields(log_source = "logger"))]
|
||||
#[tracing::instrument(level = "error", skip_all)]
|
||||
#[tracing::instrument(skip_all, fields(log_source = "logger"))]
|
||||
pub async fn log_entry(&self, entry: UnifiedLogEntry) -> Result<(), GlobalError> {
|
||||
// Extract information for tracing based on entry type
|
||||
match &entry {
|
||||
|
||||
@@ -14,6 +14,7 @@
|
||||
|
||||
use crate::{AppConfig, SinkConfig, UnifiedLogEntry};
|
||||
use async_trait::async_trait;
|
||||
use rustfs_config::DEFAULT_SINK_FILE_LOG_FILE;
|
||||
use std::sync::Arc;
|
||||
|
||||
#[cfg(feature = "file")]
|
||||
@@ -71,7 +72,7 @@ pub async fn create_sinks(config: &AppConfig) -> Vec<Arc<dyn Sink>> {
|
||||
SinkConfig::File(file_config) => {
|
||||
tracing::debug!("FileSink: Using path: {}", file_config.path);
|
||||
match file::FileSink::new(
|
||||
file_config.path.clone(),
|
||||
format!("{}/{}", file_config.path.clone(), DEFAULT_SINK_FILE_LOG_FILE),
|
||||
file_config.buffer_size.unwrap_or(8192),
|
||||
file_config.flush_interval_ms.unwrap_or(1000),
|
||||
file_config.flush_threshold.unwrap_or(100),
|
||||
|
||||
@@ -36,6 +36,7 @@ use rustfs_config::{
|
||||
use rustfs_utils::get_local_ip_with_default;
|
||||
use smallvec::SmallVec;
|
||||
use std::borrow::Cow;
|
||||
use std::fs;
|
||||
use std::io::IsTerminal;
|
||||
use tracing::info;
|
||||
use tracing_error::ErrorLayer;
|
||||
@@ -295,6 +296,21 @@ pub(crate) fn init_telemetry(config: &OtelConfig) -> OtelGuard {
|
||||
let log_directory = config.log_directory.as_deref().unwrap_or(DEFAULT_LOG_DIR);
|
||||
let log_filename = config.log_filename.as_deref().unwrap_or(service_name);
|
||||
|
||||
if let Err(e) = fs::create_dir_all(log_directory) {
|
||||
eprintln!("Failed to create log directory {log_directory}: {e}");
|
||||
}
|
||||
#[cfg(unix)]
|
||||
{
|
||||
// Linux/macOS Setting Permissions
|
||||
// Set the log directory permissions to 755 (rwxr-xr-x)
|
||||
use std::fs::Permissions;
|
||||
use std::os::unix::fs::PermissionsExt;
|
||||
match fs::set_permissions(log_directory, Permissions::from_mode(0o755)) {
|
||||
Ok(_) => eprintln!("Log directory permissions set to 755: {log_directory}"),
|
||||
Err(e) => eprintln!("Failed to set log directory permissions {log_directory}: {e}"),
|
||||
}
|
||||
}
|
||||
|
||||
// Build log cutting conditions
|
||||
let rotation_criterion = match (config.log_rotation_time.as_deref(), config.log_rotation_size_mb) {
|
||||
// Cut by time and size at the same time
|
||||
@@ -354,13 +370,15 @@ pub(crate) fn init_telemetry(config: &OtelConfig) -> OtelGuard {
|
||||
FileSpec::default()
|
||||
.directory(log_directory)
|
||||
.basename(log_filename)
|
||||
.suffix("log"),
|
||||
.suppress_timestamp(),
|
||||
)
|
||||
.rotate(rotation_criterion, Naming::TimestampsDirect, Cleanup::KeepLogFiles(keep_files.into()))
|
||||
.format_for_files(format_for_file) // Add a custom formatting function for file output
|
||||
.duplicate_to_stdout(level_filter) // Use dynamic levels
|
||||
.format_for_stdout(format_with_color) // Add a custom formatting function for terminal output
|
||||
.write_mode(WriteMode::Async)
|
||||
.write_mode(WriteMode::BufferAndFlush)
|
||||
.append() // Avoid clearing existing logs at startup
|
||||
.print_message() // Startup information output to console
|
||||
.start();
|
||||
|
||||
if let Ok(logger) = flexi_logger_result {
|
||||
@@ -420,7 +438,7 @@ fn format_with_color(w: &mut dyn std::io::Write, now: &mut DeferredNow, record:
|
||||
writeln!(
|
||||
w,
|
||||
"[{}] {} [{}] [{}:{}] [{}:{}] {}",
|
||||
now.now().format("%Y-%m-%d %H:%M:%S%.6f"),
|
||||
now.now().format(flexi_logger::TS_DASHES_BLANK_COLONS_DOT_BLANK),
|
||||
level_style.paint(level.to_string()),
|
||||
Color::Magenta.paint(record.target()),
|
||||
Color::Blue.paint(record.file().unwrap_or("unknown")),
|
||||
@@ -444,7 +462,7 @@ fn format_for_file(w: &mut dyn std::io::Write, now: &mut DeferredNow, record: &R
|
||||
writeln!(
|
||||
w,
|
||||
"[{}] {} [{}] [{}:{}] [{}:{}] {}",
|
||||
now.now().format("%Y-%m-%d %H:%M:%S%.6f"),
|
||||
now.now().format(flexi_logger::TS_DASHES_BLANK_COLONS_DOT_BLANK),
|
||||
level,
|
||||
record.target(),
|
||||
record.file().unwrap_or("unknown"),
|
||||
|
||||
@@ -16,8 +16,6 @@ use crate::error::Error as IamError;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::policy::{INHERITED_POLICY_TYPE, Policy, Validator, iam_policy_claim_name_sa};
|
||||
use crate::utils;
|
||||
use crate::utils::extract_claims;
|
||||
use serde::de::DeserializeOwned;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::{Value, json};
|
||||
use std::collections::HashMap;
|
||||
@@ -253,12 +251,6 @@ pub fn create_new_credentials_with_metadata(
|
||||
})
|
||||
}
|
||||
|
||||
pub fn get_claims_from_token_with_secret<T: DeserializeOwned>(token: &str, secret: &str) -> Result<T> {
|
||||
let ms = extract_claims::<T>(token, secret)?;
|
||||
// TODO SessionPolicyName
|
||||
Ok(ms.claims)
|
||||
}
|
||||
|
||||
pub fn jwt_sign<T: Serialize>(claims: &T, token_secret: &str) -> Result<String> {
|
||||
let token = utils::generate_jwt(claims, token_secret)?;
|
||||
Ok(token)
|
||||
|
||||
@@ -107,7 +107,7 @@ mod tests {
|
||||
#[test_case("jwt:dwebsite/aaa")]
|
||||
#[test_case("sfvc:DuratdionSeconds")]
|
||||
#[test_case("svc:DursationSeconds/aaa")]
|
||||
fn test_deserialize_falied(key: &str) {
|
||||
fn test_deserialize_failed(key: &str) {
|
||||
let val = serde_json::from_str::<Key>(key);
|
||||
assert!(val.is_err());
|
||||
}
|
||||
|
||||
@@ -30,9 +30,9 @@ use super::{
|
||||
pub struct ResourceSet(pub HashSet<Resource>);
|
||||
|
||||
impl ResourceSet {
|
||||
pub fn is_match(&self, resource: &str, conditons: &HashMap<String, Vec<String>>) -> bool {
|
||||
pub fn is_match(&self, resource: &str, conditions: &HashMap<String, Vec<String>>) -> bool {
|
||||
for re in self.0.iter() {
|
||||
if re.is_match(resource, conditons) {
|
||||
if re.is_match(resource, conditions) {
|
||||
return true;
|
||||
}
|
||||
}
|
||||
@@ -85,14 +85,14 @@ pub enum Resource {
|
||||
impl Resource {
|
||||
pub const S3_PREFIX: &'static str = "arn:aws:s3:::";
|
||||
|
||||
pub fn is_match(&self, resource: &str, conditons: &HashMap<String, Vec<String>>) -> bool {
|
||||
pub fn is_match(&self, resource: &str, conditions: &HashMap<String, Vec<String>>) -> bool {
|
||||
let mut pattern = match self {
|
||||
Resource::S3(s) => s.to_owned(),
|
||||
Resource::Kms(s) => s.to_owned(),
|
||||
};
|
||||
if !conditons.is_empty() {
|
||||
if !conditions.is_empty() {
|
||||
for key in KeyName::COMMON_KEYS {
|
||||
if let Some(rvalue) = conditons.get(key.name()) {
|
||||
if let Some(rvalue) = conditions.get(key.name()) {
|
||||
if matches!(rvalue.first().map(|c| !c.is_empty()), Some(true)) {
|
||||
pattern = pattern.replace(&key.var_name(), &rvalue[0]);
|
||||
}
|
||||
|
||||
@@ -1,15 +1 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod models;
|
||||
|
||||
@@ -1,17 +1,3 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// automatically generated by the FlatBuffers compiler, do not modify
|
||||
|
||||
// @generated
|
||||
|
||||
@@ -1,17 +1,4 @@
|
||||
#![allow(unused_imports)]
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
#![allow(clippy::all)]
|
||||
pub mod proto_gen;
|
||||
|
||||
|
||||
@@ -1,15 +1 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
pub mod node_service;
|
||||
|
||||
@@ -1,17 +1,3 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// This file is @generated by prost-build.
|
||||
/// --------------------------------------------------------------------
|
||||
#[derive(Clone, PartialEq, ::prost::Message)]
|
||||
@@ -184,6 +170,24 @@ pub struct VerifyFileResponse {
|
||||
pub error: ::core::option::Option<Error>,
|
||||
}
|
||||
#[derive(Clone, PartialEq, ::prost::Message)]
|
||||
pub struct ReadPartsRequest {
|
||||
#[prost(string, tag = "1")]
|
||||
pub disk: ::prost::alloc::string::String,
|
||||
#[prost(string, tag = "2")]
|
||||
pub bucket: ::prost::alloc::string::String,
|
||||
#[prost(string, repeated, tag = "3")]
|
||||
pub paths: ::prost::alloc::vec::Vec<::prost::alloc::string::String>,
|
||||
}
|
||||
#[derive(Clone, PartialEq, ::prost::Message)]
|
||||
pub struct ReadPartsResponse {
|
||||
#[prost(bool, tag = "1")]
|
||||
pub success: bool,
|
||||
#[prost(bytes = "bytes", tag = "2")]
|
||||
pub object_part_infos: ::prost::bytes::Bytes,
|
||||
#[prost(message, optional, tag = "3")]
|
||||
pub error: ::core::option::Option<Error>,
|
||||
}
|
||||
#[derive(Clone, PartialEq, ::prost::Message)]
|
||||
pub struct CheckPartsRequest {
|
||||
/// indicate which one in the disks
|
||||
#[prost(string, tag = "1")]
|
||||
@@ -1295,6 +1299,21 @@ pub mod node_service_client {
|
||||
.insert(GrpcMethod::new("node_service.NodeService", "VerifyFile"));
|
||||
self.inner.unary(req, path, codec).await
|
||||
}
|
||||
pub async fn read_parts(
|
||||
&mut self,
|
||||
request: impl tonic::IntoRequest<super::ReadPartsRequest>,
|
||||
) -> std::result::Result<tonic::Response<super::ReadPartsResponse>, tonic::Status> {
|
||||
self.inner
|
||||
.ready()
|
||||
.await
|
||||
.map_err(|e| tonic::Status::unknown(format!("Service was not ready: {}", e.into())))?;
|
||||
let codec = tonic::codec::ProstCodec::default();
|
||||
let path = http::uri::PathAndQuery::from_static("/node_service.NodeService/ReadParts");
|
||||
let mut req = request.into_request();
|
||||
req.extensions_mut()
|
||||
.insert(GrpcMethod::new("node_service.NodeService", "ReadParts"));
|
||||
self.inner.unary(req, path, codec).await
|
||||
}
|
||||
pub async fn check_parts(
|
||||
&mut self,
|
||||
request: impl tonic::IntoRequest<super::CheckPartsRequest>,
|
||||
@@ -2338,6 +2357,10 @@ pub mod node_service_server {
|
||||
&self,
|
||||
request: tonic::Request<super::VerifyFileRequest>,
|
||||
) -> std::result::Result<tonic::Response<super::VerifyFileResponse>, tonic::Status>;
|
||||
async fn read_parts(
|
||||
&self,
|
||||
request: tonic::Request<super::ReadPartsRequest>,
|
||||
) -> std::result::Result<tonic::Response<super::ReadPartsResponse>, tonic::Status>;
|
||||
async fn check_parts(
|
||||
&self,
|
||||
request: tonic::Request<super::CheckPartsRequest>,
|
||||
@@ -2972,6 +2995,34 @@ pub mod node_service_server {
|
||||
};
|
||||
Box::pin(fut)
|
||||
}
|
||||
"/node_service.NodeService/ReadParts" => {
|
||||
#[allow(non_camel_case_types)]
|
||||
struct ReadPartsSvc<T: NodeService>(pub Arc<T>);
|
||||
impl<T: NodeService> tonic::server::UnaryService<super::ReadPartsRequest> for ReadPartsSvc<T> {
|
||||
type Response = super::ReadPartsResponse;
|
||||
type Future = BoxFuture<tonic::Response<Self::Response>, tonic::Status>;
|
||||
fn call(&mut self, request: tonic::Request<super::ReadPartsRequest>) -> Self::Future {
|
||||
let inner = Arc::clone(&self.0);
|
||||
let fut = async move { <T as NodeService>::read_parts(&inner, request).await };
|
||||
Box::pin(fut)
|
||||
}
|
||||
}
|
||||
let accept_compression_encodings = self.accept_compression_encodings;
|
||||
let send_compression_encodings = self.send_compression_encodings;
|
||||
let max_decoding_message_size = self.max_decoding_message_size;
|
||||
let max_encoding_message_size = self.max_encoding_message_size;
|
||||
let inner = self.inner.clone();
|
||||
let fut = async move {
|
||||
let method = ReadPartsSvc(inner);
|
||||
let codec = tonic::codec::ProstCodec::default();
|
||||
let mut grpc = tonic::server::Grpc::new(codec)
|
||||
.apply_compression_config(accept_compression_encodings, send_compression_encodings)
|
||||
.apply_max_message_size_config(max_decoding_message_size, max_encoding_message_size);
|
||||
let res = grpc.unary(method, req).await;
|
||||
Ok(res)
|
||||
};
|
||||
Box::pin(fut)
|
||||
}
|
||||
"/node_service.NodeService/CheckParts" => {
|
||||
#[allow(non_camel_case_types)]
|
||||
struct CheckPartsSvc<T: NodeService>(pub Arc<T>);
|
||||
|
||||
@@ -39,14 +39,21 @@ pub async fn node_service_time_out_client(
|
||||
Box<dyn Error>,
|
||||
> {
|
||||
let token: MetadataValue<_> = "rustfs rpc".parse()?;
|
||||
let channel = match GLOBAL_Conn_Map.read().await.get(addr) {
|
||||
Some(channel) => channel.clone(),
|
||||
|
||||
let channel = { GLOBAL_Conn_Map.read().await.get(addr).cloned() };
|
||||
|
||||
let channel = match channel {
|
||||
Some(channel) => channel,
|
||||
None => {
|
||||
let connector = Endpoint::from_shared(addr.to_string())?.connect_timeout(Duration::from_secs(60));
|
||||
connector.connect().await?
|
||||
let channel = connector.connect().await?;
|
||||
|
||||
{
|
||||
GLOBAL_Conn_Map.write().await.insert(addr.to_string(), channel.clone());
|
||||
}
|
||||
channel
|
||||
}
|
||||
};
|
||||
GLOBAL_Conn_Map.write().await.insert(addr.to_string(), channel.clone());
|
||||
|
||||
// let timeout_channel = Timeout::new(channel, Duration::from_secs(60));
|
||||
Ok(NodeServiceClient::with_interceptor(
|
||||
|
||||
@@ -45,7 +45,7 @@ fn main() -> Result<(), AnyError> {
|
||||
}
|
||||
|
||||
// path of proto file
|
||||
let project_root_dir = env::current_dir()?.join("");
|
||||
let project_root_dir = env::current_dir()?.join("crates/protos/src");
|
||||
let proto_dir = project_root_dir.clone();
|
||||
let proto_files = &["node.proto"];
|
||||
let proto_out_dir = project_root_dir.join("generated").join("proto_gen");
|
||||
@@ -268,7 +268,7 @@ fn protobuf_compiler_version() -> Result<Version, String> {
|
||||
}
|
||||
|
||||
fn fmt() {
|
||||
let output = Command::new("cargo").arg("fmt").arg("-p").arg("protos").status();
|
||||
let output = Command::new("cargo").arg("fmt").arg("-p").arg("rustfs-protos").status();
|
||||
|
||||
match output {
|
||||
Ok(status) => {
|
||||
|
||||
@@ -130,6 +130,18 @@ message VerifyFileResponse {
|
||||
optional Error error = 3;
|
||||
}
|
||||
|
||||
message ReadPartsRequest {
|
||||
string disk = 1;
|
||||
string bucket = 2;
|
||||
repeated string paths = 3;
|
||||
}
|
||||
|
||||
message ReadPartsResponse {
|
||||
bool success = 1;
|
||||
bytes object_part_infos = 2;
|
||||
optional Error error = 3;
|
||||
}
|
||||
|
||||
message CheckPartsRequest {
|
||||
string disk = 1; // indicate which one in the disks
|
||||
string volume = 2;
|
||||
@@ -768,6 +780,7 @@ service NodeService {
|
||||
rpc WriteAll(WriteAllRequest) returns (WriteAllResponse) {};
|
||||
rpc Delete(DeleteRequest) returns (DeleteResponse) {};
|
||||
rpc VerifyFile(VerifyFileRequest) returns (VerifyFileResponse) {};
|
||||
rpc ReadParts(ReadPartsRequest) returns (ReadPartsResponse) {};
|
||||
rpc CheckParts(CheckPartsRequest) returns (CheckPartsResponse) {};
|
||||
rpc RenamePart(RenamePartRequest) returns (RenamePartResponse) {};
|
||||
rpc RenameFile(RenameFileRequest) returns (RenameFileResponse) {};
|
||||
|
||||
@@ -45,5 +45,4 @@ serde_json.workspace = true
|
||||
md-5 = { workspace = true }
|
||||
|
||||
[dev-dependencies]
|
||||
#criterion = { version = "0.5.1", features = ["async", "async_tokio", "tokio"] }
|
||||
tokio-test = "0.4"
|
||||
tokio-test = { workspace = true }
|
||||
|
||||
@@ -32,7 +32,6 @@ async-trait.workspace = true
|
||||
datafusion = { workspace = true }
|
||||
derive_builder = { workspace = true }
|
||||
futures = { workspace = true }
|
||||
lazy_static = { workspace = true }
|
||||
parking_lot = { workspace = true }
|
||||
s3s.workspace = true
|
||||
snafu = { workspace = true, features = ["backtrace"] }
|
||||
|
||||
@@ -33,7 +33,6 @@ use datafusion::{
|
||||
execution::{RecordBatchStream, SendableRecordBatchStream},
|
||||
};
|
||||
use futures::{Stream, StreamExt};
|
||||
use lazy_static::lazy_static;
|
||||
use rustfs_s3select_api::{
|
||||
QueryError, QueryResult,
|
||||
query::{
|
||||
@@ -48,6 +47,7 @@ use rustfs_s3select_api::{
|
||||
},
|
||||
};
|
||||
use s3s::dto::{FileHeaderInfo, SelectObjectContentInput};
|
||||
use std::sync::LazyLock;
|
||||
|
||||
use crate::{
|
||||
execution::factory::QueryExecutionFactoryRef,
|
||||
@@ -55,11 +55,9 @@ use crate::{
|
||||
sql::logical::planner::DefaultLogicalPlanner,
|
||||
};
|
||||
|
||||
lazy_static! {
|
||||
static ref IGNORE: FileHeaderInfo = FileHeaderInfo::from_static(FileHeaderInfo::IGNORE);
|
||||
static ref NONE: FileHeaderInfo = FileHeaderInfo::from_static(FileHeaderInfo::NONE);
|
||||
static ref USE: FileHeaderInfo = FileHeaderInfo::from_static(FileHeaderInfo::USE);
|
||||
}
|
||||
static IGNORE: LazyLock<FileHeaderInfo> = LazyLock::new(|| FileHeaderInfo::from_static(FileHeaderInfo::IGNORE));
|
||||
static NONE: LazyLock<FileHeaderInfo> = LazyLock::new(|| FileHeaderInfo::from_static(FileHeaderInfo::NONE));
|
||||
static USE: LazyLock<FileHeaderInfo> = LazyLock::new(|| FileHeaderInfo::from_static(FileHeaderInfo::USE));
|
||||
|
||||
#[derive(Clone)]
|
||||
pub struct SimpleQueryDispatcher {
|
||||
|
||||
@@ -27,7 +27,6 @@ documentation = "https://docs.rs/rustfs-signer/latest/rustfs_signer/"
|
||||
|
||||
[dependencies]
|
||||
tracing.workspace = true
|
||||
lazy_static.workspace = true
|
||||
bytes = { workspace = true }
|
||||
http.workspace = true
|
||||
time.workspace = true
|
||||
|
||||
@@ -13,8 +13,6 @@
|
||||
// limitations under the License.
|
||||
|
||||
use http::{HeaderMap, HeaderValue, request};
|
||||
use lazy_static::lazy_static;
|
||||
use std::collections::HashMap;
|
||||
use time::{OffsetDateTime, macros::format_description};
|
||||
|
||||
use super::request_signature_v4::{SERVICE_TYPE_S3, get_scope, get_signature, get_signing_key};
|
||||
@@ -32,15 +30,13 @@ const _CRLF_LEN: i64 = 2;
|
||||
const _TRAILER_KV_SEPARATOR: &str = ":";
|
||||
const _TRAILER_SIGNATURE: &str = "x-amz-trailer-signature";
|
||||
|
||||
lazy_static! {
|
||||
static ref ignored_streaming_headers: HashMap<String, bool> = {
|
||||
let mut m = <HashMap<String, bool>>::new();
|
||||
m.insert("authorization".to_string(), true);
|
||||
m.insert("user-agent".to_string(), true);
|
||||
m.insert("content-type".to_string(), true);
|
||||
m
|
||||
};
|
||||
}
|
||||
// static ignored_streaming_headers: LazyLock<HashMap<String, bool>> = LazyLock::new(|| {
|
||||
// let mut m = <HashMap<String, bool>>::new();
|
||||
// m.insert("authorization".to_string(), true);
|
||||
// m.insert("user-agent".to_string(), true);
|
||||
// m.insert("content-type".to_string(), true);
|
||||
// m
|
||||
// });
|
||||
|
||||
#[allow(dead_code)]
|
||||
fn build_chunk_string_to_sign(t: OffsetDateTime, region: &str, previous_sig: &str, chunk_check_sum: &str) -> String {
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user