mirror of
https://github.com/rustfs/rustfs.git
synced 2026-01-17 09:40:32 +00:00
Compare commits
17 Commits
1.0.0-alph
...
1.0.0-alph
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
3c14947878 | ||
|
|
2924b4e463 | ||
|
|
b4ba62fa33 | ||
|
|
a5b3522880 | ||
|
|
056a0ee62b | ||
|
|
4603ece708 | ||
|
|
eb33e82b56 | ||
|
|
c7e2b4d8e7 | ||
|
|
71c59d1187 | ||
|
|
e3a0a07495 | ||
|
|
136db7e0c9 | ||
|
|
2e3c5f695a | ||
|
|
fe9609fd17 | ||
|
|
f2d79b485e | ||
|
|
3d6681c9e5 | ||
|
|
07a26fadad | ||
|
|
a083fca17a |
64
.config/make/build-docker-buildx-dev.mak
Normal file
64
.config/make/build-docker-buildx-dev.mak
Normal file
@@ -0,0 +1,64 @@
|
||||
## —— Development/Source builds using direct buildx commands ---------------------------------------
|
||||
|
||||
.PHONY: docker-dev
|
||||
docker-dev: ## Build dev multi-arch image (cannot load locally)
|
||||
@echo "🏗️ Building multi-architecture development Docker images with buildx..."
|
||||
@echo "💡 This builds from source code and is intended for local development and testing"
|
||||
@echo "⚠️ Multi-arch images cannot be loaded locally, use docker-dev-push to push to registry"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag rustfs:source-latest \
|
||||
--tag rustfs:dev-latest \
|
||||
.
|
||||
|
||||
.PHONY: docker-dev-local
|
||||
docker-dev-local: ## Build dev single-arch image (local load)
|
||||
@echo "🏗️ Building single-architecture development Docker image for local use..."
|
||||
@echo "💡 This builds from source code for the current platform and loads locally"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag rustfs:source-latest \
|
||||
--tag rustfs:dev-latest \
|
||||
--load \
|
||||
.
|
||||
|
||||
.PHONY: docker-dev-push
|
||||
docker-dev-push: ## Build and push multi-arch development image # e.g (make docker-dev-push REGISTRY=xxx)
|
||||
@if [ -z "$(REGISTRY)" ]; then \
|
||||
echo "❌ Error: Please specify registry, example: make docker-dev-push REGISTRY=ghcr.io/username"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🚀 Building and pushing multi-architecture development Docker images..."
|
||||
@echo "💡 Pushing to registry: $(REGISTRY)"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag $(REGISTRY)/rustfs:source-latest \
|
||||
--tag $(REGISTRY)/rustfs:dev-latest \
|
||||
--push \
|
||||
.
|
||||
|
||||
.PHONY: dev-env-start
|
||||
dev-env-start: ## Start development container environment
|
||||
@echo "🚀 Starting development environment..."
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag rustfs:dev \
|
||||
--load \
|
||||
.
|
||||
$(DOCKER_CLI) stop $(CONTAINER_NAME) 2>/dev/null || true
|
||||
$(DOCKER_CLI) rm $(CONTAINER_NAME) 2>/dev/null || true
|
||||
$(DOCKER_CLI) run -d --name $(CONTAINER_NAME) \
|
||||
-p 9010:9010 -p 9000:9000 \
|
||||
-v $(shell pwd):/workspace \
|
||||
-it rustfs:dev
|
||||
|
||||
.PHONY: dev-env-stop
|
||||
dev-env-stop: ## Stop development container environment
|
||||
@echo "🛑 Stopping development environment..."
|
||||
$(DOCKER_CLI) stop $(CONTAINER_NAME) 2>/dev/null || true
|
||||
$(DOCKER_CLI) rm $(CONTAINER_NAME) 2>/dev/null || true
|
||||
|
||||
.PHONY: dev-env-restart
|
||||
dev-env-restart: dev-env-stop dev-env-start ## Restart development container environment
|
||||
41
.config/make/build-docker-buildx-production.mak
Normal file
41
.config/make/build-docker-buildx-production.mak
Normal file
@@ -0,0 +1,41 @@
|
||||
## —— Production builds using docker buildx (for CI/CD and production) -----------------------------
|
||||
|
||||
.PHONY: docker-buildx
|
||||
docker-buildx: ## Build production multi-arch image (no push)
|
||||
@echo "🏗️ Building multi-architecture production Docker images with buildx..."
|
||||
./docker-buildx.sh
|
||||
|
||||
.PHONY: docker-buildx-push
|
||||
docker-buildx-push: ## Build and push production multi-arch image
|
||||
@echo "🚀 Building and pushing multi-architecture production Docker images with buildx..."
|
||||
./docker-buildx.sh --push
|
||||
|
||||
.PHONY: docker-buildx-version
|
||||
docker-buildx-version: ## Build and version production multi-arch image # e.g (make docker-buildx-version VERSION=v1.0.0)
|
||||
@if [ -z "$(VERSION)" ]; then \
|
||||
echo "❌ Error: Please specify version, example: make docker-buildx-version VERSION=v1.0.0"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🏗️ Building multi-architecture production Docker images (version: $(VERSION))..."
|
||||
./docker-buildx.sh --release $(VERSION)
|
||||
|
||||
.PHONY: docker-buildx-push-version
|
||||
docker-buildx-push-version: ## Build and version and push production multi-arch image # e.g (make docker-buildx-push-version VERSION=v1.0.0)
|
||||
@if [ -z "$(VERSION)" ]; then \
|
||||
echo "❌ Error: Please specify version, example: make docker-buildx-push-version VERSION=v1.0.0"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🚀 Building and pushing multi-architecture production Docker images (version: $(VERSION))..."
|
||||
./docker-buildx.sh --release $(VERSION) --push
|
||||
|
||||
.PHONY: docker-buildx-production-local
|
||||
docker-buildx-production-local: ## Build production single-arch image locally
|
||||
@echo "🏗️ Building single-architecture production Docker image locally..."
|
||||
@echo "💡 Alternative to docker-buildx.sh for local testing"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--file $(DOCKERFILE_PRODUCTION) \
|
||||
--tag rustfs:production-latest \
|
||||
--tag rustfs:latest \
|
||||
--load \
|
||||
--build-arg RELEASE=latest \
|
||||
.
|
||||
16
.config/make/build-docker-production.mak
Normal file
16
.config/make/build-docker-production.mak
Normal file
@@ -0,0 +1,16 @@
|
||||
## —— Single Architecture Docker Builds (Traditional) ----------------------------------------------
|
||||
|
||||
.PHONY: docker-build-production
|
||||
docker-build-production: ## Build single-arch production image
|
||||
@echo "🏗️ Building single-architecture production Docker image..."
|
||||
@echo "💡 Consider using 'make docker-buildx-production-local' for multi-arch support"
|
||||
$(DOCKER_CLI) build -f $(DOCKERFILE_PRODUCTION) -t rustfs:latest .
|
||||
|
||||
.PHONY: docker-build-source
|
||||
docker-build-source: ## Build single-arch source image
|
||||
@echo "🏗️ Building single-architecture source Docker image..."
|
||||
@echo "💡 Consider using 'make docker-dev-local' for multi-arch support"
|
||||
DOCKER_BUILDKIT=1 $(DOCKER_CLI) build \
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||
-f $(DOCKERFILE_SOURCE) -t rustfs:source .
|
||||
|
||||
22
.config/make/build-docker.mak
Normal file
22
.config/make/build-docker.mak
Normal file
@@ -0,0 +1,22 @@
|
||||
## —— Docker-based build (alternative approach) ----------------------------------------------------
|
||||
|
||||
# Usage: make BUILD_OS=ubuntu22.04 build-docker
|
||||
# Output: target/ubuntu22.04/release/rustfs
|
||||
|
||||
.PHONY: build-docker
|
||||
build-docker: SOURCE_BUILD_IMAGE_NAME = rustfs-$(BUILD_OS):v1
|
||||
build-docker: SOURCE_BUILD_CONTAINER_NAME = rustfs-$(BUILD_OS)-build
|
||||
build-docker: BUILD_CMD = /root/.cargo/bin/cargo build --release --bin rustfs --target-dir /root/s3-rustfs/target/$(BUILD_OS)
|
||||
build-docker: ## Build using Docker container # e.g (make build-docker BUILD_OS=ubuntu22.04)
|
||||
@echo "🐳 Building RustFS using Docker ($(BUILD_OS))..."
|
||||
$(DOCKER_CLI) buildx build -t $(SOURCE_BUILD_IMAGE_NAME) -f $(DOCKERFILE_SOURCE) .
|
||||
$(DOCKER_CLI) run --rm --name $(SOURCE_BUILD_CONTAINER_NAME) -v $(shell pwd):/root/s3-rustfs -it $(SOURCE_BUILD_IMAGE_NAME) $(BUILD_CMD)
|
||||
|
||||
.PHONY: docker-inspect-multiarch
|
||||
docker-inspect-multiarch: ## Check image architecture support
|
||||
@if [ -z "$(IMAGE)" ]; then \
|
||||
echo "❌ Error: Please specify image, example: make docker-inspect-multiarch IMAGE=rustfs/rustfs:latest"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🔍 Inspecting multi-architecture image: $(IMAGE)"
|
||||
docker buildx imagetools inspect $(IMAGE)
|
||||
55
.config/make/build.mak
Normal file
55
.config/make/build.mak
Normal file
@@ -0,0 +1,55 @@
|
||||
## —— Local Native Build using build-rustfs.sh script (Recommended) --------------------------------
|
||||
|
||||
.PHONY: build
|
||||
build: ## Build RustFS binary (includes console by default)
|
||||
@echo "🔨 Building RustFS using build-rustfs.sh script..."
|
||||
./build-rustfs.sh
|
||||
|
||||
.PHONY: build-dev
|
||||
build-dev: ## Build RustFS in Development mode
|
||||
@echo "🔨 Building RustFS in development mode..."
|
||||
./build-rustfs.sh --dev
|
||||
|
||||
.PHONY: build-musl
|
||||
build-musl: ## Build x86_64 musl version
|
||||
@echo "🔨 Building rustfs for x86_64-unknown-linux-musl..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-musl
|
||||
|
||||
.PHONY: build-gnu
|
||||
build-gnu: ## Build x86_64 GNU version
|
||||
@echo "🔨 Building rustfs for x86_64-unknown-linux-gnu..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-gnu
|
||||
|
||||
.PHONY: build-musl-arm64
|
||||
build-musl-arm64: ## Build aarch64 musl version
|
||||
@echo "🔨 Building rustfs for aarch64-unknown-linux-musl..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-musl
|
||||
|
||||
.PHONY: build-gnu-arm64
|
||||
build-gnu-arm64: ## Build aarch64 GNU version
|
||||
@echo "🔨 Building rustfs for aarch64-unknown-linux-gnu..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-gnu
|
||||
|
||||
|
||||
.PHONY: build-cross-all
|
||||
build-cross-all: core-deps ## Build binaries for all architectures
|
||||
@echo "🔧 Building all target architectures..."
|
||||
@echo "💡 On macOS/Windows, use 'make docker-dev' for reliable multi-arch builds"
|
||||
@echo "🔨 Generating protobuf code..."
|
||||
cargo run --bin gproto || true
|
||||
|
||||
@echo "🔨 Building rustfs for x86_64-unknown-linux-musl..."
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-musl
|
||||
|
||||
@echo "🔨 Building rustfs for x86_64-unknown-linux-gnu..."
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-gnu
|
||||
|
||||
@echo "🔨 Building rustfs for aarch64-unknown-linux-musl..."
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-musl
|
||||
|
||||
@echo "🔨 Building rustfs for aarch64-unknown-linux-gnu..."
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-gnu
|
||||
24
.config/make/check.mak
Normal file
24
.config/make/check.mak
Normal file
@@ -0,0 +1,24 @@
|
||||
## —— Check and Inform Dependencies ----------------------------------------------------------------
|
||||
|
||||
# Fatal check
|
||||
# Checks all required dependencies and exits with error if not found
|
||||
# (e.g., cargo, rustfmt)
|
||||
check-%:
|
||||
@command -v $* >/dev/null 2>&1 || { \
|
||||
echo >&2 "❌ '$*' is not installed."; \
|
||||
exit 1; \
|
||||
}
|
||||
|
||||
# Warning-only check
|
||||
# Checks for optional dependencies and issues a warning if not found
|
||||
# (e.g., cargo-nextest for enhanced testing)
|
||||
warn-%:
|
||||
@command -v $* >/dev/null 2>&1 || { \
|
||||
echo >&2 "⚠️ '$*' is not installed."; \
|
||||
}
|
||||
|
||||
# For checking dependencies use check-<dep-name> or warn-<dep-name>
|
||||
.PHONY: core-deps fmt-deps test-deps
|
||||
core-deps: check-cargo ## Check core dependencies
|
||||
fmt-deps: check-rustfmt ## Check lint and formatting dependencies
|
||||
test-deps: warn-cargo-nextest ## Check tests dependencies
|
||||
6
.config/make/deploy.mak
Normal file
6
.config/make/deploy.mak
Normal file
@@ -0,0 +1,6 @@
|
||||
## —— Deploy using dev_deploy.sh script ------------------------------------------------------------
|
||||
|
||||
.PHONY: deploy-dev
|
||||
deploy-dev: build-musl ## Deploy to dev server
|
||||
@echo "🚀 Deploying to dev server: $${IP}"
|
||||
./scripts/dev_deploy.sh $${IP}
|
||||
38
.config/make/help.mak
Normal file
38
.config/make/help.mak
Normal file
@@ -0,0 +1,38 @@
|
||||
## —— Help, Help Build and Help Docker -------------------------------------------------------------
|
||||
|
||||
|
||||
.PHONY: help
|
||||
help: ## Shows This Help Menu
|
||||
echo -e "$$HEADER"
|
||||
grep -E '(^[a-zA-Z0-9_-]+:.*?## .*$$)|(^## )' $(MAKEFILE_LIST) | sed 's/^[^:]*://g' | awk 'BEGIN {FS = ":.*?## | #"} ; {printf "${cyan}%-30s${reset} ${white}%s${reset} ${green}%s${reset}\n", $$1, $$2, $$3}' | sed -e 's/\[36m##/\n[32m##/'
|
||||
|
||||
.PHONY: help-build
|
||||
help-build: ## Shows RustFS build help
|
||||
@echo ""
|
||||
@echo "💡 build-rustfs.sh script provides more options, smart detection and binary verification"
|
||||
@echo ""
|
||||
@echo "🔧 Direct usage of build-rustfs.sh script:"
|
||||
@echo ""
|
||||
@echo " ./build-rustfs.sh --help # View script help"
|
||||
@echo " ./build-rustfs.sh --no-console # Build without console resources"
|
||||
@echo " ./build-rustfs.sh --force-console-update # Force update console resources"
|
||||
@echo " ./build-rustfs.sh --dev # Development mode build"
|
||||
@echo " ./build-rustfs.sh --sign # Sign binary files"
|
||||
@echo " ./build-rustfs.sh --platform x86_64-unknown-linux-gnu # Specify target platform"
|
||||
@echo " ./build-rustfs.sh --skip-verification # Skip binary verification"
|
||||
@echo ""
|
||||
|
||||
.PHONY: help-docker
|
||||
help-docker: ## Shows docker environment and suggestion help
|
||||
@echo ""
|
||||
@echo "📋 Environment Variables:"
|
||||
@echo " REGISTRY Image registry address (required for push)"
|
||||
@echo " DOCKERHUB_USERNAME Docker Hub username"
|
||||
@echo " DOCKERHUB_TOKEN Docker Hub access token"
|
||||
@echo " GITHUB_TOKEN GitHub access token"
|
||||
@echo ""
|
||||
@echo "💡 Suggestions:"
|
||||
@echo " Production use: Use docker-buildx* commands (based on precompiled binaries)"
|
||||
@echo " Local development: Use docker-dev* commands (build from source)"
|
||||
@echo " Development environment: Use dev-env-* commands to manage dev containers"
|
||||
@echo ""
|
||||
22
.config/make/lint-fmt.mak
Normal file
22
.config/make/lint-fmt.mak
Normal file
@@ -0,0 +1,22 @@
|
||||
## —— Code quality and Formatting ------------------------------------------------------------------
|
||||
|
||||
.PHONY: fmt
|
||||
fmt: core-deps fmt-deps ## Format code
|
||||
@echo "🔧 Formatting code..."
|
||||
cargo fmt --all
|
||||
|
||||
.PHONY: fmt-check
|
||||
fmt-check: core-deps fmt-deps ## Check code formatting
|
||||
@echo "📝 Checking code formatting..."
|
||||
cargo fmt --all --check
|
||||
|
||||
.PHONY: clippy-check
|
||||
clippy-check: core-deps ## Run clippy checks
|
||||
@echo "🔍 Running clippy checks..."
|
||||
cargo clippy --fix --allow-dirty
|
||||
cargo clippy --all-targets --all-features -- -D warnings
|
||||
|
||||
.PHONY: compilation-check
|
||||
compilation-check: core-deps ## Run compilation check
|
||||
@echo "🔨 Running compilation check..."
|
||||
cargo check --all-targets
|
||||
11
.config/make/pre-commit.mak
Normal file
11
.config/make/pre-commit.mak
Normal file
@@ -0,0 +1,11 @@
|
||||
## —— Pre Commit Checks ----------------------------------------------------------------------------
|
||||
|
||||
.PHONY: setup-hooks
|
||||
setup-hooks: ## Set up git hooks
|
||||
@echo "🔧 Setting up git hooks..."
|
||||
chmod +x .git/hooks/pre-commit
|
||||
@echo "✅ Git hooks setup complete!"
|
||||
|
||||
.PHONY: pre-commit
|
||||
pre-commit: fmt clippy-check compilation-check test ## Run pre-commit checks
|
||||
@echo "✅ All pre-commit checks passed!"
|
||||
20
.config/make/tests.mak
Normal file
20
.config/make/tests.mak
Normal file
@@ -0,0 +1,20 @@
|
||||
## —— Tests and e2e test ---------------------------------------------------------------------------
|
||||
|
||||
.PHONY: test
|
||||
test: core-deps test-deps ## Run all tests
|
||||
@echo "🧪 Running tests..."
|
||||
@if command -v cargo-nextest >/dev/null 2>&1; then \
|
||||
cargo nextest run --all --exclude e2e_test; \
|
||||
else \
|
||||
echo "ℹ️ cargo-nextest not found; falling back to 'cargo test'"; \
|
||||
cargo test --workspace --exclude e2e_test -- --nocapture; \
|
||||
fi
|
||||
cargo test --all --doc
|
||||
|
||||
.PHONY: e2e-server
|
||||
e2e-server: ## Run e2e-server tests
|
||||
sh $(shell pwd)/scripts/run.sh
|
||||
|
||||
.PHONY: probe-e2e
|
||||
probe-e2e: ## Probe e2e tests
|
||||
sh $(shell pwd)/scripts/probe.sh
|
||||
260
.github/workflows/e2e-mint.yml
vendored
260
.github/workflows/e2e-mint.yml
vendored
@@ -1,260 +0,0 @@
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
name: e2e-mint
|
||||
|
||||
on:
|
||||
push:
|
||||
branches: [ main ]
|
||||
paths:
|
||||
- ".github/workflows/e2e-mint.yml"
|
||||
- "Dockerfile.source"
|
||||
- "rustfs/**"
|
||||
- "crates/**"
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
run-multi:
|
||||
description: "Run multi-node Mint as well"
|
||||
required: false
|
||||
default: "false"
|
||||
|
||||
env:
|
||||
ACCESS_KEY: rustfsadmin
|
||||
SECRET_KEY: rustfsadmin
|
||||
RUST_LOG: info
|
||||
PLATFORM: linux/amd64
|
||||
|
||||
jobs:
|
||||
mint-single:
|
||||
runs-on: ubicloud-standard-2
|
||||
timeout-minutes: 40
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Enable buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build RustFS image (source)
|
||||
run: |
|
||||
DOCKER_BUILDKIT=1 docker buildx build --load \
|
||||
--platform ${PLATFORM} \
|
||||
-t rustfs-ci \
|
||||
-f Dockerfile.source .
|
||||
|
||||
- name: Create network
|
||||
run: |
|
||||
docker network inspect rustfs-net >/dev/null 2>&1 || docker network create rustfs-net
|
||||
|
||||
- name: Remove existing rustfs-single (if any)
|
||||
run: docker rm -f rustfs-single >/dev/null 2>&1 || true
|
||||
|
||||
- name: Start single RustFS
|
||||
run: |
|
||||
docker run -d --name rustfs-single \
|
||||
--network rustfs-net \
|
||||
-e RUSTFS_ADDRESS=0.0.0.0:9000 \
|
||||
-e RUSTFS_ACCESS_KEY=$ACCESS_KEY \
|
||||
-e RUSTFS_SECRET_KEY=$SECRET_KEY \
|
||||
-e RUSTFS_VOLUMES="/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3" \
|
||||
-v /tmp/rustfs-single:/data \
|
||||
rustfs-ci
|
||||
|
||||
- name: Wait for RustFS ready
|
||||
run: |
|
||||
for i in {1..30}; do
|
||||
if docker exec rustfs-single curl -sf http://localhost:9000/health >/dev/null; then
|
||||
exit 0
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
echo "RustFS did not become ready" >&2
|
||||
docker logs rustfs-single || true
|
||||
exit 1
|
||||
|
||||
- name: Run Mint (single, S3-only)
|
||||
run: |
|
||||
mkdir -p artifacts/mint-single
|
||||
docker run --rm --network rustfs-net \
|
||||
--platform ${PLATFORM} \
|
||||
-e SERVER_ENDPOINT=rustfs-single:9000 \
|
||||
-e ACCESS_KEY=$ACCESS_KEY \
|
||||
-e SECRET_KEY=$SECRET_KEY \
|
||||
-e ENABLE_HTTPS=0 \
|
||||
-e SERVER_REGION=us-east-1 \
|
||||
-e RUN_ON_FAIL=1 \
|
||||
-e MINT_MODE=core \
|
||||
-v ${GITHUB_WORKSPACE}/artifacts/mint-single:/mint/log \
|
||||
--entrypoint /mint/mint.sh \
|
||||
minio/mint:edge \
|
||||
awscli aws-sdk-go aws-sdk-java-v2 aws-sdk-php aws-sdk-ruby s3cmd s3select
|
||||
|
||||
- name: Collect RustFS logs
|
||||
run: |
|
||||
mkdir -p artifacts/rustfs-single
|
||||
docker logs rustfs-single > artifacts/rustfs-single/rustfs.log || true
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: mint-single
|
||||
path: artifacts/**
|
||||
|
||||
mint-multi:
|
||||
if: github.event_name == 'workflow_dispatch' && github.event.inputs.run-multi == 'true'
|
||||
needs: mint-single
|
||||
runs-on: ubicloud-standard-2
|
||||
timeout-minutes: 60
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Enable buildx
|
||||
uses: docker/setup-buildx-action@v3
|
||||
|
||||
- name: Build RustFS image (source)
|
||||
run: |
|
||||
DOCKER_BUILDKIT=1 docker buildx build --load \
|
||||
--platform ${PLATFORM} \
|
||||
-t rustfs-ci \
|
||||
-f Dockerfile.source .
|
||||
|
||||
- name: Prepare cluster compose
|
||||
run: |
|
||||
cat > compose.yml <<'EOF'
|
||||
version: '3.8'
|
||||
services:
|
||||
rustfs1:
|
||||
image: rustfs-ci
|
||||
hostname: rustfs1
|
||||
networks: [rustfs-net]
|
||||
environment:
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_ACCESS_KEY=${ACCESS_KEY}
|
||||
- RUSTFS_SECRET_KEY=${SECRET_KEY}
|
||||
- RUSTFS_VOLUMES=/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3
|
||||
volumes:
|
||||
- rustfs1-data:/data
|
||||
rustfs2:
|
||||
image: rustfs-ci
|
||||
hostname: rustfs2
|
||||
networks: [rustfs-net]
|
||||
environment:
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_ACCESS_KEY=${ACCESS_KEY}
|
||||
- RUSTFS_SECRET_KEY=${SECRET_KEY}
|
||||
- RUSTFS_VOLUMES=/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3
|
||||
volumes:
|
||||
- rustfs2-data:/data
|
||||
rustfs3:
|
||||
image: rustfs-ci
|
||||
hostname: rustfs3
|
||||
networks: [rustfs-net]
|
||||
environment:
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_ACCESS_KEY=${ACCESS_KEY}
|
||||
- RUSTFS_SECRET_KEY=${SECRET_KEY}
|
||||
- RUSTFS_VOLUMES=/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3
|
||||
volumes:
|
||||
- rustfs3-data:/data
|
||||
rustfs4:
|
||||
image: rustfs-ci
|
||||
hostname: rustfs4
|
||||
networks: [rustfs-net]
|
||||
environment:
|
||||
- RUSTFS_ADDRESS=0.0.0.0:9000
|
||||
- RUSTFS_ACCESS_KEY=${ACCESS_KEY}
|
||||
- RUSTFS_SECRET_KEY=${SECRET_KEY}
|
||||
- RUSTFS_VOLUMES=/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3
|
||||
volumes:
|
||||
- rustfs4-data:/data
|
||||
lb:
|
||||
image: haproxy:2.9
|
||||
hostname: lb
|
||||
networks: [rustfs-net]
|
||||
ports:
|
||||
- "9000:9000"
|
||||
volumes:
|
||||
- ./haproxy.cfg:/usr/local/etc/haproxy/haproxy.cfg:ro
|
||||
networks:
|
||||
rustfs-net:
|
||||
name: rustfs-net
|
||||
volumes:
|
||||
rustfs1-data:
|
||||
rustfs2-data:
|
||||
rustfs3-data:
|
||||
rustfs4-data:
|
||||
EOF
|
||||
|
||||
cat > haproxy.cfg <<'EOF'
|
||||
defaults
|
||||
mode http
|
||||
timeout connect 5s
|
||||
timeout client 30s
|
||||
timeout server 30s
|
||||
|
||||
frontend fe_s3
|
||||
bind *:9000
|
||||
default_backend be_s3
|
||||
|
||||
backend be_s3
|
||||
balance roundrobin
|
||||
server s1 rustfs1:9000 check
|
||||
server s2 rustfs2:9000 check
|
||||
server s3 rustfs3:9000 check
|
||||
server s4 rustfs4:9000 check
|
||||
EOF
|
||||
|
||||
- name: Launch cluster
|
||||
run: docker compose -f compose.yml up -d
|
||||
|
||||
- name: Wait for LB ready
|
||||
run: |
|
||||
for i in {1..60}; do
|
||||
if docker run --rm --network rustfs-net curlimages/curl -sf http://lb:9000/health >/dev/null; then
|
||||
exit 0
|
||||
fi
|
||||
sleep 2
|
||||
done
|
||||
echo "LB or backend not ready" >&2
|
||||
docker compose -f compose.yml logs --tail=200 || true
|
||||
exit 1
|
||||
|
||||
- name: Run Mint (multi, S3-only)
|
||||
run: |
|
||||
mkdir -p artifacts/mint-multi
|
||||
docker run --rm --network rustfs-net \
|
||||
--platform ${PLATFORM} \
|
||||
-e SERVER_ENDPOINT=lb:9000 \
|
||||
-e ACCESS_KEY=$ACCESS_KEY \
|
||||
-e SECRET_KEY=$SECRET_KEY \
|
||||
-e ENABLE_HTTPS=0 \
|
||||
-e SERVER_REGION=us-east-1 \
|
||||
-e RUN_ON_FAIL=1 \
|
||||
-e MINT_MODE=core \
|
||||
-v ${GITHUB_WORKSPACE}/artifacts/mint-multi:/mint/log \
|
||||
--entrypoint /mint/mint.sh \
|
||||
minio/mint:edge \
|
||||
awscli aws-sdk-go aws-sdk-java-v2 aws-sdk-php aws-sdk-ruby s3cmd s3select
|
||||
|
||||
- name: Collect logs
|
||||
run: |
|
||||
mkdir -p artifacts/cluster
|
||||
docker compose -f compose.yml logs --no-color > artifacts/cluster/cluster.log || true
|
||||
|
||||
- name: Upload artifacts
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: mint-multi
|
||||
path: artifacts/**
|
||||
4
.gitignore
vendored
4
.gitignore
vendored
@@ -25,7 +25,7 @@ profile.json
|
||||
*.pb
|
||||
*.svg
|
||||
deploy/logs/*.log.*
|
||||
|
||||
artifacts/
|
||||
# s3-tests local artifacts (root directory only)
|
||||
/s3-tests/
|
||||
/s3-tests-local/
|
||||
@@ -33,4 +33,4 @@ deploy/logs/*.log.*
|
||||
/s3tests.conf.*
|
||||
*.events
|
||||
*.audit
|
||||
*.snappy
|
||||
*.snappy
|
||||
|
||||
171
Cargo.lock
generated
171
Cargo.lock
generated
@@ -24,7 +24,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "03d2d54c4d9e7006f132f615a167865bff927a79ca63d8f637237575ce0a9795"
|
||||
dependencies = [
|
||||
"crypto-common 0.2.0-rc.5",
|
||||
"inout 0.2.1",
|
||||
"inout 0.2.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -1068,9 +1068,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "axum-core"
|
||||
version = "0.5.5"
|
||||
version = "0.5.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "59446ce19cd142f8833f856eb31f3eb097812d1479ab224f54d72428ca21ea22"
|
||||
checksum = "08c78f31d7b1291f7ee735c1c6780ccde7785daae9a9206026862dab7d8792d1"
|
||||
dependencies = [
|
||||
"bytes",
|
||||
"futures-core",
|
||||
@@ -1085,27 +1085,6 @@ dependencies = [
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "axum-extra"
|
||||
version = "0.12.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6dfbd6109d91702d55fc56df06aae7ed85c465a7a451db6c0e54a4b9ca5983d1"
|
||||
dependencies = [
|
||||
"axum",
|
||||
"axum-core",
|
||||
"bytes",
|
||||
"futures-core",
|
||||
"futures-util",
|
||||
"http 1.4.0",
|
||||
"http-body 1.0.1",
|
||||
"http-body-util",
|
||||
"mime",
|
||||
"pin-project-lite",
|
||||
"tower-layer",
|
||||
"tower-service",
|
||||
"tracing",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "axum-server"
|
||||
version = "0.8.0"
|
||||
@@ -1185,9 +1164,9 @@ checksum = "0e050f626429857a27ddccb31e0aca21356bfa709c04041aefddac081a8f068a"
|
||||
|
||||
[[package]]
|
||||
name = "bigdecimal"
|
||||
version = "0.4.9"
|
||||
version = "0.4.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "560f42649de9fa436b73517378a147ec21f6c997a546581df4b4b31677828934"
|
||||
checksum = "4d6867f1565b3aad85681f1015055b087fcfd840d6aeee6eee7f2da317603695"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"libm",
|
||||
@@ -1459,9 +1438,9 @@ checksum = "37b2a672a2cb129a2e41c10b1224bb368f9f37a2b16b612598138befd7b37eb5"
|
||||
|
||||
[[package]]
|
||||
name = "cc"
|
||||
version = "1.2.50"
|
||||
version = "1.2.51"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9f50d563227a1c37cc0a263f64eca3334388c01c5e4c4861a9def205c614383c"
|
||||
checksum = "7a0aeaff4ff1a90589618835a598e545176939b97874f7abc7851caa0618f203"
|
||||
dependencies = [
|
||||
"find-msvc-tools",
|
||||
"jobserver",
|
||||
@@ -1574,7 +1553,7 @@ checksum = "155e4a260750fa4f7754649f049748aacc31db238a358d85fd721002f230f92f"
|
||||
dependencies = [
|
||||
"block-buffer 0.11.0",
|
||||
"crypto-common 0.2.0-rc.5",
|
||||
"inout 0.2.1",
|
||||
"inout 0.2.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -3367,9 +3346,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "find-msvc-tools"
|
||||
version = "0.1.5"
|
||||
version = "0.1.6"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3a3076410a55c90011c298b04d0cfa770b00fa04e1e3c97d3f6c9de105a03844"
|
||||
checksum = "645cbb3a84e60b7531617d5ae4e57f7e27308f6445f5abf653209ea76dec8dff"
|
||||
|
||||
[[package]]
|
||||
name = "findshlibs"
|
||||
@@ -3473,9 +3452,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "fs-err"
|
||||
version = "3.2.1"
|
||||
version = "3.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "824f08d01d0f496b3eca4f001a13cf17690a6ee930043d20817f547455fd98f8"
|
||||
checksum = "baf68cef89750956493a66a10f512b9e58d9db21f2a573c079c0bdf1207a54a7"
|
||||
dependencies = [
|
||||
"autocfg",
|
||||
"tokio",
|
||||
@@ -3629,6 +3608,18 @@ dependencies = [
|
||||
"wasm-bindgen",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "getrandom"
|
||||
version = "0.4.0-rc.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "3b99f0d993a2b9b97b9a201193aa8ad21305cde06a3be9a7e1f8f4201e5cc27e"
|
||||
dependencies = [
|
||||
"cfg-if",
|
||||
"libc",
|
||||
"r-efi",
|
||||
"wasip2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "getset"
|
||||
version = "0.1.6"
|
||||
@@ -4546,9 +4537,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "inout"
|
||||
version = "0.2.1"
|
||||
version = "0.2.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c7357b6e7aa75618c7864ebd0634b115a7218b0615f4cb1df33ac3eca23943d4"
|
||||
checksum = "4250ce6452e92010fdf7268ccc5d14faa80bb12fc741938534c58f16804e03c7"
|
||||
dependencies = [
|
||||
"hybrid-array",
|
||||
]
|
||||
@@ -4576,9 +4567,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "iri-string"
|
||||
version = "0.7.9"
|
||||
version = "0.7.10"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "4f867b9d1d896b67beb18518eda36fdb77a32ea590de864f1325b294a6d14397"
|
||||
checksum = "c91338f0783edbd6195decb37bae672fd3b165faffb89bf7b9e6942f8b1a731a"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
"serde",
|
||||
@@ -4627,9 +4618,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "itoa"
|
||||
version = "1.0.16"
|
||||
version = "1.0.17"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7ee5b5339afb4c41626dde77b7a611bd4f2c202b897852b4bcf5d03eddc61010"
|
||||
checksum = "92ecc6618181def0457392ccd0ee51198e065e016d1d527a7ac1b6dc7c1f09d2"
|
||||
|
||||
[[package]]
|
||||
name = "jemalloc_pprof"
|
||||
@@ -4817,13 +4808,13 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "libredox"
|
||||
version = "0.1.11"
|
||||
version = "0.1.12"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "df15f6eac291ed1cf25865b1ee60399f57e7c227e7f51bdbd4c5270396a9ed50"
|
||||
checksum = "3d0b95e02c851351f877147b7deea7b1afb1df71b63aa5f8270716e0c5720616"
|
||||
dependencies = [
|
||||
"bitflags 2.10.0",
|
||||
"libc",
|
||||
"redox_syscall 0.6.0",
|
||||
"redox_syscall 0.7.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -5007,9 +4998,9 @@ checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3"
|
||||
|
||||
[[package]]
|
||||
name = "matchit"
|
||||
version = "0.9.0"
|
||||
version = "0.9.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9ea5f97102eb9e54ab99fb70bb175589073f554bdadfb74d9bd656482ea73e2a"
|
||||
checksum = "b3eede3bdf92f3b4f9dc04072a9ce5ab557d5ec9038773bf9ffcd5588b3cc05b"
|
||||
|
||||
[[package]]
|
||||
name = "md-5"
|
||||
@@ -5517,9 +5508,9 @@ checksum = "d6790f58c7ff633d8771f42965289203411a5e5c68388703c06e14f24770b41e"
|
||||
|
||||
[[package]]
|
||||
name = "openssl-probe"
|
||||
version = "0.1.6"
|
||||
version = "0.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d05e27ee213611ffe7d6348b942e8f942b37114c00cc03cec254295a4a17852e"
|
||||
checksum = "9f50d9b3dabb09ecd771ad0aa242ca6894994c130308ca3d7684634df8037391"
|
||||
|
||||
[[package]]
|
||||
name = "opentelemetry"
|
||||
@@ -5761,11 +5752,11 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "password-hash"
|
||||
version = "0.6.0-rc.6"
|
||||
version = "0.6.0-rc.7"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "383d290055c99f2dd7dece082088d89494dff6d79277fbac4a7da21c1bf2ab6b"
|
||||
checksum = "c351143b5ab27b1f1d24712f21ea4d0458fe74f60dd5839297dabcc2ecd24d58"
|
||||
dependencies = [
|
||||
"getrandom 0.3.4",
|
||||
"getrandom 0.4.0-rc.0",
|
||||
"phc",
|
||||
]
|
||||
|
||||
@@ -5883,12 +5874,12 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "phc"
|
||||
version = "0.6.0-rc.0"
|
||||
version = "0.6.0-rc.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "c61f960577aaac5c259bc0866d685ba315c0ed30793c602d7287f54980913863"
|
||||
checksum = "71d390c5fe8d102c2c18ff39f1e72b9ad5996de282c2d831b0312f56910f5508"
|
||||
dependencies = [
|
||||
"base64ct",
|
||||
"getrandom 0.3.4",
|
||||
"getrandom 0.4.0-rc.0",
|
||||
"subtle",
|
||||
]
|
||||
|
||||
@@ -6098,9 +6089,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "portable-atomic"
|
||||
version = "1.12.0"
|
||||
version = "1.13.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f59e70c4aef1e55797c2e8fd94a4f2a973fc972cfde0e0b05f683667b0cd39dd"
|
||||
checksum = "f89776e4d69bb58bc6993e99ffa1d11f228b839984854c7daeb5d37f87cbe950"
|
||||
|
||||
[[package]]
|
||||
name = "potential_utf"
|
||||
@@ -6233,9 +6224,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "proc-macro2"
|
||||
version = "1.0.103"
|
||||
version = "1.0.104"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5ee95bc4ef87b8d5ba32e8b7714ccc834865276eab0aed5c9958d00ec45f49e8"
|
||||
checksum = "9695f8df41bb4f3d222c95a67532365f569318332d03d5f3f67f37b20e6ebdf0"
|
||||
dependencies = [
|
||||
"unicode-ident",
|
||||
]
|
||||
@@ -6635,9 +6626,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "redox_syscall"
|
||||
version = "0.6.0"
|
||||
version = "0.7.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ec96166dafa0886eb81fe1c0a388bece180fbef2135f97c1e2cf8302e74b43b5"
|
||||
checksum = "49f3fe0889e69e2ae9e41f4d6c4c0181701d00e4697b356fb1f74173a5e0ee27"
|
||||
dependencies = [
|
||||
"bitflags 2.10.0",
|
||||
]
|
||||
@@ -6830,7 +6821,7 @@ dependencies = [
|
||||
"pastey",
|
||||
"pin-project-lite",
|
||||
"rmcp-macros",
|
||||
"schemars 1.1.0",
|
||||
"schemars 1.2.0",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"thiserror 2.0.17",
|
||||
@@ -7025,7 +7016,6 @@ dependencies = [
|
||||
"atoi",
|
||||
"atomic_enum",
|
||||
"axum",
|
||||
"axum-extra",
|
||||
"axum-server",
|
||||
"base64",
|
||||
"base64-simd",
|
||||
@@ -7045,7 +7035,7 @@ dependencies = [
|
||||
"hyper-util",
|
||||
"jemalloc_pprof",
|
||||
"libsystemd",
|
||||
"matchit 0.9.0",
|
||||
"matchit 0.9.1",
|
||||
"md5",
|
||||
"metrics",
|
||||
"mimalloc",
|
||||
@@ -7061,6 +7051,7 @@ dependencies = [
|
||||
"rustfs-audit",
|
||||
"rustfs-common",
|
||||
"rustfs-config",
|
||||
"rustfs-credentials",
|
||||
"rustfs-ecstore",
|
||||
"rustfs-filemeta",
|
||||
"rustfs-iam",
|
||||
@@ -7078,6 +7069,7 @@ dependencies = [
|
||||
"rustfs-utils",
|
||||
"rustfs-zip",
|
||||
"rustls 0.23.35",
|
||||
"rustls-pemfile",
|
||||
"s3s",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -7210,6 +7202,17 @@ dependencies = [
|
||||
"const-str",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustfs-credentials"
|
||||
version = "0.0.5"
|
||||
dependencies = [
|
||||
"base64-simd",
|
||||
"rand 0.10.0-rc.5",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"time",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustfs-crypto"
|
||||
version = "0.0.5"
|
||||
@@ -7276,6 +7279,7 @@ dependencies = [
|
||||
"rustfs-checksums",
|
||||
"rustfs-common",
|
||||
"rustfs-config",
|
||||
"rustfs-credentials",
|
||||
"rustfs-filemeta",
|
||||
"rustfs-lock",
|
||||
"rustfs-madmin",
|
||||
@@ -7318,7 +7322,6 @@ dependencies = [
|
||||
"bytes",
|
||||
"crc-fast",
|
||||
"criterion",
|
||||
"lazy_static",
|
||||
"regex",
|
||||
"rmp",
|
||||
"rmp-serde",
|
||||
@@ -7344,6 +7347,7 @@ dependencies = [
|
||||
"jsonwebtoken",
|
||||
"pollster",
|
||||
"rand 0.10.0-rc.5",
|
||||
"rustfs-credentials",
|
||||
"rustfs-crypto",
|
||||
"rustfs-ecstore",
|
||||
"rustfs-madmin",
|
||||
@@ -7427,7 +7431,7 @@ dependencies = [
|
||||
"clap",
|
||||
"mime_guess",
|
||||
"rmcp",
|
||||
"schemars 1.1.0",
|
||||
"schemars 1.2.0",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"tokio",
|
||||
@@ -7505,10 +7509,10 @@ dependencies = [
|
||||
"jsonwebtoken",
|
||||
"moka",
|
||||
"pollster",
|
||||
"rand 0.10.0-rc.5",
|
||||
"regex",
|
||||
"reqwest",
|
||||
"rustfs-config",
|
||||
"rustfs-credentials",
|
||||
"rustfs-crypto",
|
||||
"serde",
|
||||
"serde_json",
|
||||
@@ -7528,6 +7532,7 @@ dependencies = [
|
||||
"flatbuffers",
|
||||
"prost 0.14.1",
|
||||
"rustfs-common",
|
||||
"rustfs-credentials",
|
||||
"tonic",
|
||||
"tonic-prost",
|
||||
"tonic-prost-build",
|
||||
@@ -7550,6 +7555,7 @@ dependencies = [
|
||||
"pin-project-lite",
|
||||
"rand 0.10.0-rc.5",
|
||||
"reqwest",
|
||||
"rustfs-config",
|
||||
"rustfs-utils",
|
||||
"s3s",
|
||||
"serde",
|
||||
@@ -7795,9 +7801,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "rustls-native-certs"
|
||||
version = "0.8.2"
|
||||
version = "0.8.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9980d917ebb0c0536119ba501e90834767bffc3d60641457fd84a1f3fd337923"
|
||||
checksum = "612460d5f7bea540c490b2b6395d8e34a953e52b491accd6c86c8164c5932a63"
|
||||
dependencies = [
|
||||
"openssl-probe",
|
||||
"rustls-pki-types",
|
||||
@@ -7865,14 +7871,14 @@ checksum = "b39cdef0fa800fc44525c84ccb54a029961a8215f9619753635a9c0d2538d46d"
|
||||
|
||||
[[package]]
|
||||
name = "ryu"
|
||||
version = "1.0.21"
|
||||
version = "1.0.22"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "62049b2877bf12821e8f9ad256ee38fdc31db7387ec2d3b3f403024de2034aea"
|
||||
checksum = "a50f4cf475b65d88e057964e0e9bb1f0aa9bbb2036dc65c64596b42932536984"
|
||||
|
||||
[[package]]
|
||||
name = "s3s"
|
||||
version = "0.13.0-alpha"
|
||||
source = "git+https://github.com/s3s-project/s3s.git?branch=main#f6198bbf49abe60066fe47cbbefcb7078863b3e9"
|
||||
source = "git+https://github.com/s3s-project/s3s.git?branch=main#9e41304ed549b89cfb03ede98e9c0d2ac7522051"
|
||||
dependencies = [
|
||||
"arrayvec",
|
||||
"async-trait",
|
||||
@@ -7959,9 +7965,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "schemars"
|
||||
version = "1.1.0"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9558e172d4e8533736ba97870c4b2cd63f84b382a3d6eb063da41b91cce17289"
|
||||
checksum = "54e910108742c57a770f492731f99be216a52fadd361b06c8fb59d74ccc267d2"
|
||||
dependencies = [
|
||||
"chrono",
|
||||
"dyn-clone",
|
||||
@@ -7973,9 +7979,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "schemars_derive"
|
||||
version = "1.1.0"
|
||||
version = "1.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "301858a4023d78debd2353c7426dc486001bddc91ae31a76fb1f55132f7e2633"
|
||||
checksum = "4908ad288c5035a8eb12cfdf0d49270def0a268ee162b75eeee0f85d155a7c45"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -8124,9 +8130,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "serde_json"
|
||||
version = "1.0.147"
|
||||
version = "1.0.148"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "6af14725505314343e673e9ecb7cd7e8a36aa9791eb936235a3567cc31447ae4"
|
||||
checksum = "3084b546a1dd6289475996f182a22aba973866ea8e8b02c51d9f46b1336a22da"
|
||||
dependencies = [
|
||||
"itoa",
|
||||
"memchr",
|
||||
@@ -8179,7 +8185,7 @@ dependencies = [
|
||||
"indexmap 1.9.3",
|
||||
"indexmap 2.12.1",
|
||||
"schemars 0.9.0",
|
||||
"schemars 1.1.0",
|
||||
"schemars 1.2.0",
|
||||
"serde_core",
|
||||
"serde_json",
|
||||
"serde_with_macros",
|
||||
@@ -8317,10 +8323,11 @@ checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
|
||||
|
||||
[[package]]
|
||||
name = "signal-hook-registry"
|
||||
version = "1.4.7"
|
||||
version = "1.4.8"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7664a098b8e616bdfcc2dc0e9ac44eb231eedf41db4e9fe95d8d32ec728dedad"
|
||||
checksum = "c4db69cba1110affc0e9f7bcd48bbf87b3f4fc7c61fc9155afd4c469eb3d6c1b"
|
||||
dependencies = [
|
||||
"errno",
|
||||
"libc",
|
||||
]
|
||||
|
||||
@@ -10354,9 +10361,9 @@ dependencies = [
|
||||
|
||||
[[package]]
|
||||
name = "zeroize_derive"
|
||||
version = "1.4.2"
|
||||
version = "1.4.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "ce36e65b0d2999d2aafac989fb249189a141aee1f53c612c1f37d72631959f69"
|
||||
checksum = "85a5b4158499876c763cb03bc4e49185d3cccbabb15b33c627f7884f43db852e"
|
||||
dependencies = [
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
@@ -10432,9 +10439,9 @@ checksum = "40990edd51aae2c2b6907af74ffb635029d5788228222c4bb811e9351c0caad3"
|
||||
|
||||
[[package]]
|
||||
name = "zmij"
|
||||
version = "0.1.7"
|
||||
version = "1.0.3"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9e404bcd8afdaf006e529269d3e85a743f9480c3cef60034d77860d02964f3ba"
|
||||
checksum = "e9747e91771f56fd7893e1164abd78febd14a670ceec257caad15e051de35f06"
|
||||
|
||||
[[package]]
|
||||
name = "zopfli"
|
||||
|
||||
11
Cargo.toml
11
Cargo.toml
@@ -19,6 +19,7 @@ members = [
|
||||
"crates/audit", # Audit target management system with multi-target fan-out
|
||||
"crates/common", # Shared utilities and data structures
|
||||
"crates/config", # Configuration management
|
||||
"crates/credentials", # Credential management system
|
||||
"crates/crypto", # Cryptography and security features
|
||||
"crates/ecstore", # Erasure coding storage implementation
|
||||
"crates/e2e_test", # End-to-end test suite
|
||||
@@ -71,6 +72,7 @@ rustfs-audit = { path = "crates/audit", version = "0.0.5" }
|
||||
rustfs-checksums = { path = "crates/checksums", version = "0.0.5" }
|
||||
rustfs-common = { path = "crates/common", version = "0.0.5" }
|
||||
rustfs-config = { path = "./crates/config", version = "0.0.5" }
|
||||
rustfs-credentials = { path = "crates/credentials", version = "0.0.5" }
|
||||
rustfs-crypto = { path = "crates/crypto", version = "0.0.5" }
|
||||
rustfs-ecstore = { path = "crates/ecstore", version = "0.0.5" }
|
||||
rustfs-filemeta = { path = "crates/filemeta", version = "0.0.5" }
|
||||
@@ -98,7 +100,6 @@ async-compression = { version = "0.4.19" }
|
||||
async-recursion = "1.1.1"
|
||||
async-trait = "0.1.89"
|
||||
axum = "0.8.8"
|
||||
axum-extra = "0.12.3"
|
||||
axum-server = { version = "0.8.0", features = ["tls-rustls-no-provider"], default-features = false }
|
||||
futures = "0.3.31"
|
||||
futures-core = "0.3.31"
|
||||
@@ -135,9 +136,9 @@ rmcp = { version = "0.12.0" }
|
||||
rmp = { version = "0.8.15" }
|
||||
rmp-serde = { version = "1.3.1" }
|
||||
serde = { version = "1.0.228", features = ["derive"] }
|
||||
serde_json = { version = "1.0.147", features = ["raw_value"] }
|
||||
serde_json = { version = "1.0.148", features = ["raw_value"] }
|
||||
serde_urlencoded = "0.7.1"
|
||||
schemars = "1.1.0"
|
||||
schemars = "1.2.0"
|
||||
|
||||
# Cryptography and Security
|
||||
aes-gcm = { version = "0.11.0-rc.2", features = ["rand_core"] }
|
||||
@@ -200,7 +201,7 @@ libc = "0.2.178"
|
||||
libsystemd = "0.7.2"
|
||||
local-ip-address = "0.6.8"
|
||||
lz4 = "1.28.1"
|
||||
matchit = "0.9.0"
|
||||
matchit = "0.9.1"
|
||||
md-5 = "0.11.0-rc.3"
|
||||
md5 = "0.8.0"
|
||||
mime_guess = "2.0.5"
|
||||
@@ -276,8 +277,6 @@ jemalloc_pprof = { version = "0.8.1", features = ["symbolize", "flamegraph"] }
|
||||
# Used to generate CPU performance analysis data and flame diagrams
|
||||
pprof = { version = "0.15.0", features = ["flamegraph", "protobuf-codec"] }
|
||||
|
||||
|
||||
|
||||
[workspace.metadata.cargo-shear]
|
||||
ignored = ["rustfs", "rustfs-mcp"]
|
||||
|
||||
|
||||
@@ -148,8 +148,8 @@ ENV RUSTFS_ADDRESS=":9000" \
|
||||
RUSTFS_OBS_LOG_DIRECTORY="/logs" \
|
||||
RUSTFS_USERNAME="rustfs" \
|
||||
RUSTFS_GROUPNAME="rustfs" \
|
||||
RUSTFS_UID="1000" \
|
||||
RUSTFS_GID="1000"
|
||||
RUSTFS_UID="10001" \
|
||||
RUSTFS_GID="10001"
|
||||
|
||||
# Note: We don't COPY source here because we expect it to be mounted at /app
|
||||
# We rely on cargo run to build and run
|
||||
@@ -187,8 +187,8 @@ RUN set -eux; \
|
||||
|
||||
# Create a conventional runtime user/group (final switch happens in entrypoint via chroot --userspec)
|
||||
RUN set -eux; \
|
||||
groupadd -g 1000 rustfs; \
|
||||
useradd -u 1000 -g rustfs -M -s /usr/sbin/nologin rustfs
|
||||
groupadd -g 10001 rustfs; \
|
||||
useradd -u 10001 -g rustfs -M -s /usr/sbin/nologin rustfs
|
||||
|
||||
WORKDIR /app
|
||||
|
||||
@@ -212,8 +212,8 @@ ENV RUSTFS_ADDRESS=":9000" \
|
||||
RUST_LOG="warn" \
|
||||
RUSTFS_USERNAME="rustfs" \
|
||||
RUSTFS_GROUPNAME="rustfs" \
|
||||
RUSTFS_UID="1000" \
|
||||
RUSTFS_GID="1000"
|
||||
RUSTFS_UID="10001" \
|
||||
RUSTFS_GID="10001"
|
||||
|
||||
EXPOSE 9000
|
||||
VOLUME ["/data"]
|
||||
|
||||
430
Makefile
430
Makefile
@@ -2,394 +2,80 @@
|
||||
# Remote development requires VSCode with Dev Containers, Remote SSH, Remote Explorer
|
||||
# https://code.visualstudio.com/docs/remote/containers
|
||||
###########
|
||||
|
||||
.PHONY: SHELL
|
||||
|
||||
# Makefile global config
|
||||
# Use config.mak to override any of the following variables.
|
||||
# Do not make changes here.
|
||||
|
||||
.DEFAULT_GOAL := help
|
||||
.EXPORT_ALL_VARIABLES:
|
||||
.ONESHELL:
|
||||
.SILENT:
|
||||
|
||||
NUM_CORES := $(shell nproc 2>/dev/null || sysctl -n hw.ncpu)
|
||||
|
||||
MAKEFLAGS += -j$(NUM_CORES) -l$(NUM_CORES)
|
||||
MAKEFLAGS += --silent
|
||||
|
||||
SHELL:= /bin/bash
|
||||
.SHELLFLAGS = -eu -o pipefail -c
|
||||
|
||||
DOCKER_CLI ?= docker
|
||||
IMAGE_NAME ?= rustfs:v1.0.0
|
||||
CONTAINER_NAME ?= rustfs-dev
|
||||
# Docker build configurations
|
||||
DOCKERFILE_PRODUCTION = Dockerfile
|
||||
DOCKERFILE_SOURCE = Dockerfile.source
|
||||
|
||||
# Fatal check
|
||||
# Checks all required dependencies and exits with error if not found
|
||||
# (e.g., cargo, rustfmt)
|
||||
check-%:
|
||||
@command -v $* >/dev/null 2>&1 || { \
|
||||
echo >&2 "❌ '$*' is not installed."; \
|
||||
exit 1; \
|
||||
}
|
||||
|
||||
# Warning-only check
|
||||
# Checks for optional dependencies and issues a warning if not found
|
||||
# (e.g., cargo-nextest for enhanced testing)
|
||||
warn-%:
|
||||
@command -v $* >/dev/null 2>&1 || { \
|
||||
echo >&2 "⚠️ '$*' is not installed."; \
|
||||
}
|
||||
|
||||
# For checking dependencies use check-<dep-name> or warn-<dep-name>
|
||||
.PHONY: core-deps fmt-deps test-deps
|
||||
core-deps: check-cargo
|
||||
fmt-deps: check-rustfmt
|
||||
test-deps: warn-cargo-nextest
|
||||
|
||||
# Code quality and formatting targets
|
||||
.PHONY: fmt
|
||||
fmt: core-deps fmt-deps
|
||||
@echo "🔧 Formatting code..."
|
||||
cargo fmt --all
|
||||
|
||||
.PHONY: fmt-check
|
||||
fmt-check: core-deps fmt-deps
|
||||
@echo "📝 Checking code formatting..."
|
||||
cargo fmt --all --check
|
||||
|
||||
.PHONY: clippy
|
||||
clippy: core-deps
|
||||
@echo "🔍 Running clippy checks..."
|
||||
cargo clippy --fix --allow-dirty
|
||||
cargo clippy --all-targets --all-features -- -D warnings
|
||||
|
||||
.PHONY: check
|
||||
check: core-deps
|
||||
@echo "🔨 Running compilation check..."
|
||||
cargo check --all-targets
|
||||
|
||||
.PHONY: test
|
||||
test: core-deps test-deps
|
||||
@echo "🧪 Running tests..."
|
||||
@if command -v cargo-nextest >/dev/null 2>&1; then \
|
||||
cargo nextest run --all --exclude e2e_test; \
|
||||
else \
|
||||
echo "ℹ️ cargo-nextest not found; falling back to 'cargo test'"; \
|
||||
cargo test --workspace --exclude e2e_test -- --nocapture; \
|
||||
fi
|
||||
cargo test --all --doc
|
||||
|
||||
.PHONY: setup-hooks
|
||||
setup-hooks:
|
||||
@echo "🔧 Setting up git hooks..."
|
||||
chmod +x .git/hooks/pre-commit
|
||||
@echo "✅ Git hooks setup complete!"
|
||||
|
||||
.PHONY: pre-commit
|
||||
pre-commit: fmt clippy check test
|
||||
@echo "✅ All pre-commit checks passed!"
|
||||
|
||||
.PHONY: e2e-server
|
||||
e2e-server:
|
||||
sh $(shell pwd)/scripts/run.sh
|
||||
|
||||
.PHONY: probe-e2e
|
||||
probe-e2e:
|
||||
sh $(shell pwd)/scripts/probe.sh
|
||||
|
||||
# Native build using build-rustfs.sh script
|
||||
.PHONY: build
|
||||
build:
|
||||
@echo "🔨 Building RustFS using build-rustfs.sh script..."
|
||||
./build-rustfs.sh
|
||||
|
||||
.PHONY: build-dev
|
||||
build-dev:
|
||||
@echo "🔨 Building RustFS in development mode..."
|
||||
./build-rustfs.sh --dev
|
||||
|
||||
# Docker-based build (alternative approach)
|
||||
# Usage: make BUILD_OS=ubuntu22.04 build-docker
|
||||
# Output: target/ubuntu22.04/release/rustfs
|
||||
BUILD_OS ?= rockylinux9.3
|
||||
.PHONY: build-docker
|
||||
build-docker: SOURCE_BUILD_IMAGE_NAME = rustfs-$(BUILD_OS):v1
|
||||
build-docker: SOURCE_BUILD_CONTAINER_NAME = rustfs-$(BUILD_OS)-build
|
||||
build-docker: BUILD_CMD = /root/.cargo/bin/cargo build --release --bin rustfs --target-dir /root/s3-rustfs/target/$(BUILD_OS)
|
||||
build-docker:
|
||||
@echo "🐳 Building RustFS using Docker ($(BUILD_OS))..."
|
||||
$(DOCKER_CLI) buildx build -t $(SOURCE_BUILD_IMAGE_NAME) -f $(DOCKERFILE_SOURCE) .
|
||||
$(DOCKER_CLI) run --rm --name $(SOURCE_BUILD_CONTAINER_NAME) -v $(shell pwd):/root/s3-rustfs -it $(SOURCE_BUILD_IMAGE_NAME) $(BUILD_CMD)
|
||||
|
||||
.PHONY: build-musl
|
||||
build-musl:
|
||||
@echo "🔨 Building rustfs for x86_64-unknown-linux-musl..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-musl
|
||||
# Makefile colors config
|
||||
bold := $(shell tput bold)
|
||||
normal := $(shell tput sgr0)
|
||||
errorTitle := $(shell tput setab 1 && tput bold && echo '\n')
|
||||
recommendation := $(shell tput setab 4)
|
||||
underline := $(shell tput smul)
|
||||
reset := $(shell tput -Txterm sgr0)
|
||||
black := $(shell tput setaf 0)
|
||||
red := $(shell tput setaf 1)
|
||||
green := $(shell tput setaf 2)
|
||||
yellow := $(shell tput setaf 3)
|
||||
blue := $(shell tput setaf 4)
|
||||
magenta := $(shell tput setaf 5)
|
||||
cyan := $(shell tput setaf 6)
|
||||
white := $(shell tput setaf 7)
|
||||
|
||||
.PHONY: build-gnu
|
||||
build-gnu:
|
||||
@echo "🔨 Building rustfs for x86_64-unknown-linux-gnu..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-gnu
|
||||
define HEADER
|
||||
How to use me:
|
||||
# To get help for each target
|
||||
${bold}make help${reset}
|
||||
|
||||
.PHONY: build-musl-arm64
|
||||
build-musl-arm64:
|
||||
@echo "🔨 Building rustfs for aarch64-unknown-linux-musl..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-musl
|
||||
# To run and execute a target
|
||||
${bold}make ${cyan}<target>${reset}
|
||||
|
||||
.PHONY: build-gnu-arm64
|
||||
build-gnu-arm64:
|
||||
@echo "🔨 Building rustfs for aarch64-unknown-linux-gnu..."
|
||||
@echo "💡 On macOS/Windows, use 'make build-docker' or 'make docker-dev' instead"
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-gnu
|
||||
💡 For more help use 'make help', 'make help-build' or 'make help-docker'
|
||||
|
||||
.PHONY: deploy-dev
|
||||
deploy-dev: build-musl
|
||||
@echo "🚀 Deploying to dev server: $${IP}"
|
||||
./scripts/dev_deploy.sh $${IP}
|
||||
🦀 RustFS Makefile Help:
|
||||
|
||||
# ========================================================================================
|
||||
# Docker Multi-Architecture Builds (Primary Methods)
|
||||
# ========================================================================================
|
||||
📋 Main Command Categories:
|
||||
make help-build # Show build-related help
|
||||
make help-docker # Show Docker-related help
|
||||
|
||||
# Production builds using docker-buildx.sh (for CI/CD and production)
|
||||
.PHONY: docker-buildx
|
||||
docker-buildx:
|
||||
@echo "🏗️ Building multi-architecture production Docker images with buildx..."
|
||||
./docker-buildx.sh
|
||||
🔧 Code Quality:
|
||||
make fmt # Format code
|
||||
make clippy # Run clippy checks
|
||||
make test # Run tests
|
||||
make pre-commit # Run all pre-commit checks
|
||||
|
||||
.PHONY: docker-buildx-push
|
||||
docker-buildx-push:
|
||||
@echo "🚀 Building and pushing multi-architecture production Docker images with buildx..."
|
||||
./docker-buildx.sh --push
|
||||
🚀 Quick Start:
|
||||
make build # Build RustFS binary
|
||||
make docker-dev-local # Build development Docker image (local)
|
||||
make dev-env-start # Start development environment
|
||||
|
||||
.PHONY: docker-buildx-version
|
||||
docker-buildx-version:
|
||||
@if [ -z "$(VERSION)" ]; then \
|
||||
echo "❌ Error: Please specify version, example: make docker-buildx-version VERSION=v1.0.0"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🏗️ Building multi-architecture production Docker images (version: $(VERSION))..."
|
||||
./docker-buildx.sh --release $(VERSION)
|
||||
|
||||
.PHONY: docker-buildx-push-version
|
||||
docker-buildx-push-version:
|
||||
@if [ -z "$(VERSION)" ]; then \
|
||||
echo "❌ Error: Please specify version, example: make docker-buildx-push-version VERSION=v1.0.0"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🚀 Building and pushing multi-architecture production Docker images (version: $(VERSION))..."
|
||||
./docker-buildx.sh --release $(VERSION) --push
|
||||
endef
|
||||
export HEADER
|
||||
|
||||
# Development/Source builds using direct buildx commands
|
||||
.PHONY: docker-dev
|
||||
docker-dev:
|
||||
@echo "🏗️ Building multi-architecture development Docker images with buildx..."
|
||||
@echo "💡 This builds from source code and is intended for local development and testing"
|
||||
@echo "⚠️ Multi-arch images cannot be loaded locally, use docker-dev-push to push to registry"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag rustfs:source-latest \
|
||||
--tag rustfs:dev-latest \
|
||||
.
|
||||
-include $(addsuffix /*.mak, $(shell find .config/make -type d))
|
||||
|
||||
.PHONY: docker-dev-local
|
||||
docker-dev-local:
|
||||
@echo "🏗️ Building single-architecture development Docker image for local use..."
|
||||
@echo "💡 This builds from source code for the current platform and loads locally"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag rustfs:source-latest \
|
||||
--tag rustfs:dev-latest \
|
||||
--load \
|
||||
.
|
||||
|
||||
.PHONY: docker-dev-push
|
||||
docker-dev-push:
|
||||
@if [ -z "$(REGISTRY)" ]; then \
|
||||
echo "❌ Error: Please specify registry, example: make docker-dev-push REGISTRY=ghcr.io/username"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🚀 Building and pushing multi-architecture development Docker images..."
|
||||
@echo "💡 Pushing to registry: $(REGISTRY)"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--platform linux/amd64,linux/arm64 \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag $(REGISTRY)/rustfs:source-latest \
|
||||
--tag $(REGISTRY)/rustfs:dev-latest \
|
||||
--push \
|
||||
.
|
||||
|
||||
# Local production builds using direct buildx (alternative to docker-buildx.sh)
|
||||
.PHONY: docker-buildx-production-local
|
||||
docker-buildx-production-local:
|
||||
@echo "🏗️ Building single-architecture production Docker image locally..."
|
||||
@echo "💡 Alternative to docker-buildx.sh for local testing"
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--file $(DOCKERFILE_PRODUCTION) \
|
||||
--tag rustfs:production-latest \
|
||||
--tag rustfs:latest \
|
||||
--load \
|
||||
--build-arg RELEASE=latest \
|
||||
.
|
||||
|
||||
# ========================================================================================
|
||||
# Single Architecture Docker Builds (Traditional)
|
||||
# ========================================================================================
|
||||
|
||||
.PHONY: docker-build-production
|
||||
docker-build-production:
|
||||
@echo "🏗️ Building single-architecture production Docker image..."
|
||||
@echo "💡 Consider using 'make docker-buildx-production-local' for multi-arch support"
|
||||
$(DOCKER_CLI) build -f $(DOCKERFILE_PRODUCTION) -t rustfs:latest .
|
||||
|
||||
.PHONY: docker-build-source
|
||||
docker-build-source:
|
||||
@echo "🏗️ Building single-architecture source Docker image..."
|
||||
@echo "💡 Consider using 'make docker-dev-local' for multi-arch support"
|
||||
DOCKER_BUILDKIT=1 $(DOCKER_CLI) build \
|
||||
--build-arg BUILDKIT_INLINE_CACHE=1 \
|
||||
-f $(DOCKERFILE_SOURCE) -t rustfs:source .
|
||||
|
||||
# ========================================================================================
|
||||
# Development Environment
|
||||
# ========================================================================================
|
||||
|
||||
.PHONY: dev-env-start
|
||||
dev-env-start:
|
||||
@echo "🚀 Starting development environment..."
|
||||
$(DOCKER_CLI) buildx build \
|
||||
--file $(DOCKERFILE_SOURCE) \
|
||||
--tag rustfs:dev \
|
||||
--load \
|
||||
.
|
||||
$(DOCKER_CLI) stop $(CONTAINER_NAME) 2>/dev/null || true
|
||||
$(DOCKER_CLI) rm $(CONTAINER_NAME) 2>/dev/null || true
|
||||
$(DOCKER_CLI) run -d --name $(CONTAINER_NAME) \
|
||||
-p 9010:9010 -p 9000:9000 \
|
||||
-v $(shell pwd):/workspace \
|
||||
-it rustfs:dev
|
||||
|
||||
.PHONY: dev-env-stop
|
||||
dev-env-stop:
|
||||
@echo "🛑 Stopping development environment..."
|
||||
$(DOCKER_CLI) stop $(CONTAINER_NAME) 2>/dev/null || true
|
||||
$(DOCKER_CLI) rm $(CONTAINER_NAME) 2>/dev/null || true
|
||||
|
||||
.PHONY: dev-env-restart
|
||||
dev-env-restart: dev-env-stop dev-env-start
|
||||
|
||||
# ========================================================================================
|
||||
# Build Utilities
|
||||
# ========================================================================================
|
||||
|
||||
.PHONY: docker-inspect-multiarch
|
||||
docker-inspect-multiarch:
|
||||
@if [ -z "$(IMAGE)" ]; then \
|
||||
echo "❌ Error: Please specify image, example: make docker-inspect-multiarch IMAGE=rustfs/rustfs:latest"; \
|
||||
exit 1; \
|
||||
fi
|
||||
@echo "🔍 Inspecting multi-architecture image: $(IMAGE)"
|
||||
docker buildx imagetools inspect $(IMAGE)
|
||||
|
||||
.PHONY: build-cross-all
|
||||
build-cross-all:
|
||||
@echo "🔧 Building all target architectures..."
|
||||
@echo "💡 On macOS/Windows, use 'make docker-dev' for reliable multi-arch builds"
|
||||
@echo "🔨 Generating protobuf code..."
|
||||
cargo run --bin gproto || true
|
||||
@echo "🔨 Building x86_64-unknown-linux-gnu..."
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-gnu
|
||||
@echo "🔨 Building aarch64-unknown-linux-gnu..."
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-gnu
|
||||
@echo "🔨 Building x86_64-unknown-linux-musl..."
|
||||
./build-rustfs.sh --platform x86_64-unknown-linux-musl
|
||||
@echo "🔨 Building aarch64-unknown-linux-musl..."
|
||||
./build-rustfs.sh --platform aarch64-unknown-linux-musl
|
||||
@echo "✅ All architectures built successfully!"
|
||||
|
||||
# ========================================================================================
|
||||
# Help and Documentation
|
||||
# ========================================================================================
|
||||
|
||||
.PHONY: help-build
|
||||
help-build:
|
||||
@echo "🔨 RustFS Build Help:"
|
||||
@echo ""
|
||||
@echo "🚀 Local Build (Recommended):"
|
||||
@echo " make build # Build RustFS binary (includes console by default)"
|
||||
@echo " make build-dev # Development mode build"
|
||||
@echo " make build-musl # Build x86_64 musl version"
|
||||
@echo " make build-gnu # Build x86_64 GNU version"
|
||||
@echo " make build-musl-arm64 # Build aarch64 musl version"
|
||||
@echo " make build-gnu-arm64 # Build aarch64 GNU version"
|
||||
@echo ""
|
||||
@echo "🐳 Docker Build:"
|
||||
@echo " make build-docker # Build using Docker container"
|
||||
@echo " make build-docker BUILD_OS=ubuntu22.04 # Specify build system"
|
||||
@echo ""
|
||||
@echo "🏗️ Cross-architecture Build:"
|
||||
@echo " make build-cross-all # Build binaries for all architectures"
|
||||
@echo ""
|
||||
@echo "🔧 Direct usage of build-rustfs.sh script:"
|
||||
@echo " ./build-rustfs.sh --help # View script help"
|
||||
@echo " ./build-rustfs.sh --no-console # Build without console resources"
|
||||
@echo " ./build-rustfs.sh --force-console-update # Force update console resources"
|
||||
@echo " ./build-rustfs.sh --dev # Development mode build"
|
||||
@echo " ./build-rustfs.sh --sign # Sign binary files"
|
||||
@echo " ./build-rustfs.sh --platform x86_64-unknown-linux-gnu # Specify target platform"
|
||||
@echo " ./build-rustfs.sh --skip-verification # Skip binary verification"
|
||||
@echo ""
|
||||
@echo "💡 build-rustfs.sh script provides more options, smart detection and binary verification"
|
||||
|
||||
.PHONY: help-docker
|
||||
help-docker:
|
||||
@echo "🐳 Docker Multi-architecture Build Help:"
|
||||
@echo ""
|
||||
@echo "🚀 Production Image Build (Recommended to use docker-buildx.sh):"
|
||||
@echo " make docker-buildx # Build production multi-arch image (no push)"
|
||||
@echo " make docker-buildx-push # Build and push production multi-arch image"
|
||||
@echo " make docker-buildx-version VERSION=v1.0.0 # Build specific version"
|
||||
@echo " make docker-buildx-push-version VERSION=v1.0.0 # Build and push specific version"
|
||||
@echo ""
|
||||
@echo "🔧 Development/Source Image Build (Local development testing):"
|
||||
@echo " make docker-dev # Build dev multi-arch image (cannot load locally)"
|
||||
@echo " make docker-dev-local # Build dev single-arch image (local load)"
|
||||
@echo " make docker-dev-push REGISTRY=xxx # Build and push dev image"
|
||||
@echo ""
|
||||
@echo "🏗️ Local Production Image Build (Alternative):"
|
||||
@echo " make docker-buildx-production-local # Build production single-arch image locally"
|
||||
@echo ""
|
||||
@echo "📦 Single-architecture Build (Traditional way):"
|
||||
@echo " make docker-build-production # Build single-arch production image"
|
||||
@echo " make docker-build-source # Build single-arch source image"
|
||||
@echo ""
|
||||
@echo "🚀 Development Environment Management:"
|
||||
@echo " make dev-env-start # Start development container environment"
|
||||
@echo " make dev-env-stop # Stop development container environment"
|
||||
@echo " make dev-env-restart # Restart development container environment"
|
||||
@echo ""
|
||||
@echo "🔧 Auxiliary Tools:"
|
||||
@echo " make build-cross-all # Build binaries for all architectures"
|
||||
@echo " make docker-inspect-multiarch IMAGE=xxx # Check image architecture support"
|
||||
@echo ""
|
||||
@echo "📋 Environment Variables:"
|
||||
@echo " REGISTRY Image registry address (required for push)"
|
||||
@echo " DOCKERHUB_USERNAME Docker Hub username"
|
||||
@echo " DOCKERHUB_TOKEN Docker Hub access token"
|
||||
@echo " GITHUB_TOKEN GitHub access token"
|
||||
@echo ""
|
||||
@echo "💡 Suggestions:"
|
||||
@echo " - Production use: Use docker-buildx* commands (based on precompiled binaries)"
|
||||
@echo " - Local development: Use docker-dev* commands (build from source)"
|
||||
@echo " - Development environment: Use dev-env-* commands to manage dev containers"
|
||||
|
||||
.PHONY: help
|
||||
help:
|
||||
@echo "🦀 RustFS Makefile Help:"
|
||||
@echo ""
|
||||
@echo "📋 Main Command Categories:"
|
||||
@echo " make help-build # Show build-related help"
|
||||
@echo " make help-docker # Show Docker-related help"
|
||||
@echo ""
|
||||
@echo "🔧 Code Quality:"
|
||||
@echo " make fmt # Format code"
|
||||
@echo " make clippy # Run clippy checks"
|
||||
@echo " make test # Run tests"
|
||||
@echo " make pre-commit # Run all pre-commit checks"
|
||||
@echo ""
|
||||
@echo "🚀 Quick Start:"
|
||||
@echo " make build # Build RustFS binary"
|
||||
@echo " make docker-dev-local # Build development Docker image (local)"
|
||||
@echo " make dev-env-start # Start development environment"
|
||||
@echo ""
|
||||
@echo "💡 For more help use 'make help-build' or 'make help-docker'"
|
||||
|
||||
14
README.md
14
README.md
@@ -10,6 +10,11 @@
|
||||
<a href="https://hellogithub.com/repository/rustfs/rustfs" target="_blank"><img src="https://abroad.hellogithub.com/v1/widgets/recommend.svg?rid=b95bcb72bdc340b68f16fdf6790b7d5b&claim_uid=MsbvjYeLDKAH457&theme=small" alt="Featured|HelloGitHub" /></a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://trendshift.io/repositories/14181" target="_blank"><img src="https://trendshift.io/api/badge/repositories/14181" alt="rustfs%2Frustfs | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
||||
</p>
|
||||
|
||||
|
||||
<p align="center">
|
||||
<a href="https://docs.rustfs.com/installation/">Getting Started</a>
|
||||
· <a href="https://docs.rustfs.com/">Docs</a>
|
||||
@@ -45,10 +50,10 @@ Unlike other storage systems, RustFS is released under the permissible Apache 2.
|
||||
| :--- | :--- | :--- | :--- |
|
||||
| **S3 Core Features** | ✅ Available | **Bitrot Protection** | ✅ Available |
|
||||
| **Upload / Download** | ✅ Available | **Single Node Mode** | ✅ Available |
|
||||
| **Versioning** | ✅ Available | **Bucket Replication** | ⚠️ Partial Support |
|
||||
| **Versioning** | ✅ Available | **Bucket Replication** | ✅ Available |
|
||||
| **Logging** | ✅ Available | **Lifecycle Management** | 🚧 Under Testing |
|
||||
| **Event Notifications** | ✅ Available | **Distributed Mode** | 🚧 Under Testing |
|
||||
| **K8s Helm Charts** | ✅ Available | **OPA (Open Policy Agent)** | 🚧 Under Testing |
|
||||
| **K8s Helm Charts** | ✅ Available | **RustFS KMS** | 🚧 Under Testing |
|
||||
|
||||
|
||||
|
||||
@@ -215,11 +220,6 @@ RustFS is a community-driven project, and we appreciate all contributions. Check
|
||||
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" alt="Contributors" />
|
||||
</a>
|
||||
|
||||
## Github Trending Top
|
||||
|
||||
🚀 RustFS is beloved by open-source enthusiasts and enterprise users worldwide, often appearing on the GitHub Trending top charts.
|
||||
|
||||
<a href="https://trendshift.io/repositories/14181" target="_blank"><img src="https://raw.githubusercontent.com/rustfs/rustfs/refs/heads/main/docs/rustfs-trending.jpg" alt="rustfs%2Frustfs | Trendshift" /></a>
|
||||
|
||||
## Star History
|
||||
|
||||
|
||||
12
README_ZH.md
12
README_ZH.md
@@ -10,6 +10,10 @@
|
||||
<a href="https://hellogithub.com/repository/rustfs/rustfs" target="_blank"><img src="https://abroad.hellogithub.com/v1/widgets/recommend.svg?rid=b95bcb72bdc340b68f16fdf6790b7d5b&claim_uid=MsbvjYeLDKAH457&theme=small" alt="Featured|HelloGitHub" /></a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://trendshift.io/repositories/14181" target="_blank"><img src="https://trendshift.io/api/badge/repositories/14181" alt="rustfs%2Frustfs | Trendshift" style="width: 250px; height: 55px;" width="250" height="55"/></a>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://docs.rustfs.com/installation/">快速开始</a>
|
||||
· <a href="https://docs.rustfs.com/">文档</a>
|
||||
@@ -17,6 +21,8 @@
|
||||
· <a href="https://github.com/rustfs/rustfs/discussions">社区讨论</a>
|
||||
</p>
|
||||
|
||||
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/rustfs/rustfs/blob/main/README.md">English</a> | 简体中文 |
|
||||
<a href="https://readme-i18n.com/rustfs/rustfs?lang=de">Deutsch</a> |
|
||||
@@ -46,7 +52,7 @@ RustFS 是一个基于 Rust 构建的高性能分布式对象存储系统。Rust
|
||||
| :--- | :--- | :--- | :--- |
|
||||
| **S3 核心功能** | ✅ 可用 | **Bitrot (防数据腐烂)** | ✅ 可用 |
|
||||
| **上传 / 下载** | ✅ 可用 | **单机模式** | ✅ 可用 |
|
||||
| **版本控制** | ✅ 可用 | **存储桶复制** | ⚠️ 部分可用 |
|
||||
| **版本控制** | ✅ 可用 | **存储桶复制** | ✅ 可用 |
|
||||
| **日志功能** | ✅ 可用 | **生命周期管理** | 🚧 测试中 |
|
||||
| **事件通知** | ✅ 可用 | **分布式模式** | 🚧 测试中 |
|
||||
| **K8s Helm Chart** | ✅ 可用 | **OPA (策略引擎)** | 🚧 测试中 |
|
||||
@@ -200,11 +206,7 @@ RustFS 是一个社区驱动的项目,我们感谢所有的贡献。请查看
|
||||
<img src="https://opencollective.com/rustfs/contributors.svg?width=890&limit=500&button=false" alt="Contributors" />
|
||||
</a>
|
||||
|
||||
## Github Trending Top
|
||||
|
||||
🚀 RustFS 深受全球开源爱好者和企业用户的喜爱,经常荣登 GitHub Trending 榜单。
|
||||
|
||||
<a href="https://trendshift.io/repositories/14181" target="_blank"><img src="https://raw.githubusercontent.com/rustfs/rustfs/refs/heads/main/docs/rustfs-trending.jpg" alt="rustfs%2Frustfs | Trendshift" /></a>
|
||||
|
||||
## Star 历史
|
||||
|
||||
|
||||
@@ -36,6 +36,7 @@ clen = "clen"
|
||||
datas = "datas"
|
||||
bre = "bre"
|
||||
abd = "abd"
|
||||
mak = "mak"
|
||||
|
||||
[files]
|
||||
extend-exclude = []
|
||||
@@ -39,4 +39,4 @@ path-clean = { workspace = true }
|
||||
rmp-serde = { workspace = true }
|
||||
async-trait = { workspace = true }
|
||||
s3s = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
tracing = { workspace = true }
|
||||
@@ -25,18 +25,42 @@ pub static GLOBAL_RUSTFS_PORT: LazyLock<RwLock<String>> = LazyLock::new(|| RwLoc
|
||||
pub static GLOBAL_RUSTFS_ADDR: LazyLock<RwLock<String>> = LazyLock::new(|| RwLock::new("".to_string()));
|
||||
pub static GLOBAL_CONN_MAP: LazyLock<RwLock<HashMap<String, Channel>>> = LazyLock::new(|| RwLock::new(HashMap::new()));
|
||||
pub static GLOBAL_ROOT_CERT: LazyLock<RwLock<Option<Vec<u8>>>> = LazyLock::new(|| RwLock::new(None));
|
||||
pub static GLOBAL_MTLS_IDENTITY: LazyLock<RwLock<Option<MtlsIdentityPem>>> = LazyLock::new(|| RwLock::new(None));
|
||||
|
||||
/// Set the global RustFS address used for gRPC connections.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `addr` - A string slice representing the RustFS address (e.g., "https://node1:9000").
|
||||
pub async fn set_global_addr(addr: &str) {
|
||||
*GLOBAL_RUSTFS_ADDR.write().await = addr.to_string();
|
||||
}
|
||||
|
||||
/// Set the global root CA certificate for outbound gRPC clients.
|
||||
/// This certificate is used to validate server TLS certificates.
|
||||
/// When set to None, clients use the system default root CAs.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `cert` - A vector of bytes representing the PEM-encoded root CA certificate.
|
||||
pub async fn set_global_root_cert(cert: Vec<u8>) {
|
||||
*GLOBAL_ROOT_CERT.write().await = Some(cert);
|
||||
}
|
||||
|
||||
/// Set the global mTLS identity (cert+key PEM) for outbound gRPC clients.
|
||||
/// When set, clients will present this identity to servers requesting/requiring mTLS.
|
||||
/// When None, clients proceed with standard server-authenticated TLS.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `identity` - An optional MtlsIdentityPem struct containing the cert and key PEM.
|
||||
pub async fn set_global_mtls_identity(identity: Option<MtlsIdentityPem>) {
|
||||
*GLOBAL_MTLS_IDENTITY.write().await = identity;
|
||||
}
|
||||
|
||||
/// Evict a stale/dead connection from the global connection cache.
|
||||
/// This is critical for cluster recovery when a node dies unexpectedly (e.g., power-off).
|
||||
/// By removing the cached connection, subsequent requests will establish a fresh connection.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `addr` - The address of the connection to evict.
|
||||
pub async fn evict_connection(addr: &str) {
|
||||
let removed = GLOBAL_CONN_MAP.write().await.remove(addr);
|
||||
if removed.is_some() {
|
||||
@@ -45,6 +69,12 @@ pub async fn evict_connection(addr: &str) {
|
||||
}
|
||||
|
||||
/// Check if a connection exists in the cache for the given address.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `addr` - The address to check.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `bool` - True if a cached connection exists, false otherwise.
|
||||
pub async fn has_cached_connection(addr: &str) -> bool {
|
||||
GLOBAL_CONN_MAP.read().await.contains_key(addr)
|
||||
}
|
||||
@@ -58,3 +88,12 @@ pub async fn clear_all_connections() {
|
||||
tracing::warn!("Cleared {} cached connections from global map", count);
|
||||
}
|
||||
}
|
||||
/// Optional client identity (cert+key PEM) for outbound mTLS.
|
||||
///
|
||||
/// When present, gRPC clients will present this identity to servers requesting/requiring mTLS.
|
||||
/// When absent, clients proceed with standard server-authenticated TLS.
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct MtlsIdentityPem {
|
||||
pub cert_pem: Vec<u8>,
|
||||
pub key_pem: Vec<u8>,
|
||||
}
|
||||
|
||||
@@ -49,21 +49,6 @@ pub const SERVICE_VERSION: &str = "1.0.0";
|
||||
/// Default value: production
|
||||
pub const ENVIRONMENT: &str = "production";
|
||||
|
||||
/// Default Access Key
|
||||
/// Default value: rustfsadmin
|
||||
/// Environment variable: RUSTFS_ACCESS_KEY
|
||||
/// Command line argument: --access-key
|
||||
/// Example: RUSTFS_ACCESS_KEY=rustfsadmin
|
||||
/// Example: --access-key rustfsadmin
|
||||
pub const DEFAULT_ACCESS_KEY: &str = "rustfsadmin";
|
||||
/// Default Secret Key
|
||||
/// Default value: rustfsadmin
|
||||
/// Environment variable: RUSTFS_SECRET_KEY
|
||||
/// Command line argument: --secret-key
|
||||
/// Example: RUSTFS_SECRET_KEY=rustfsadmin
|
||||
/// Example: --secret-key rustfsadmin
|
||||
pub const DEFAULT_SECRET_KEY: &str = "rustfsadmin";
|
||||
|
||||
/// Default console enable
|
||||
/// This is the default value for the console server.
|
||||
/// It is used to enable or disable the console server.
|
||||
@@ -185,6 +170,12 @@ pub const KI_B: usize = 1024;
|
||||
/// Default value: 1048576
|
||||
pub const MI_B: usize = 1024 * 1024;
|
||||
|
||||
/// Environment variable for gRPC authentication token
|
||||
/// Used to set the authentication token for gRPC communication
|
||||
/// Example: RUSTFS_GRPC_AUTH_TOKEN=your_token_here
|
||||
/// Default value: No default value. RUSTFS_SECRET_KEY value is recommended.
|
||||
pub const ENV_GRPC_AUTH_TOKEN: &str = "RUSTFS_GRPC_AUTH_TOKEN";
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
@@ -225,20 +216,6 @@ mod tests {
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_security_constants() {
|
||||
// Test security related constants
|
||||
assert_eq!(DEFAULT_ACCESS_KEY, "rustfsadmin");
|
||||
assert!(DEFAULT_ACCESS_KEY.len() >= 8, "Access key should be at least 8 characters");
|
||||
|
||||
assert_eq!(DEFAULT_SECRET_KEY, "rustfsadmin");
|
||||
assert!(DEFAULT_SECRET_KEY.len() >= 8, "Secret key should be at least 8 characters");
|
||||
|
||||
// In production environment, access key and secret key should be different
|
||||
// These are default values, so being the same is acceptable, but should be warned in documentation
|
||||
println!("Warning: Default access key and secret key are the same. Change them in production!");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_file_path_constants() {
|
||||
assert_eq!(RUSTFS_TLS_KEY, "rustfs_key.pem");
|
||||
@@ -300,8 +277,6 @@ mod tests {
|
||||
DEFAULT_LOG_LEVEL,
|
||||
SERVICE_VERSION,
|
||||
ENVIRONMENT,
|
||||
DEFAULT_ACCESS_KEY,
|
||||
DEFAULT_SECRET_KEY,
|
||||
RUSTFS_TLS_KEY,
|
||||
RUSTFS_TLS_CERT,
|
||||
DEFAULT_ADDRESS,
|
||||
@@ -331,29 +306,6 @@ mod tests {
|
||||
assert_ne!(DEFAULT_CONSOLE_PORT, 0, "Console port should not be zero");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_security_best_practices() {
|
||||
// Test security best practices
|
||||
|
||||
// These are default values, should be changed in production environments
|
||||
println!("Security Warning: Default credentials detected!");
|
||||
println!("Access Key: {DEFAULT_ACCESS_KEY}");
|
||||
println!("Secret Key: {DEFAULT_SECRET_KEY}");
|
||||
println!("These should be changed in production environments!");
|
||||
|
||||
// Verify that key lengths meet minimum security requirements
|
||||
assert!(DEFAULT_ACCESS_KEY.len() >= 8, "Access key should be at least 8 characters");
|
||||
assert!(DEFAULT_SECRET_KEY.len() >= 8, "Secret key should be at least 8 characters");
|
||||
|
||||
// Check if default credentials contain common insecure patterns
|
||||
let _insecure_patterns = ["admin", "password", "123456", "default"];
|
||||
let _access_key_lower = DEFAULT_ACCESS_KEY.to_lowercase();
|
||||
let _secret_key_lower = DEFAULT_SECRET_KEY.to_lowercase();
|
||||
|
||||
// Note: More security check logic can be added here
|
||||
// For example, check if keys contain insecure patterns
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_configuration_consistency() {
|
||||
// Test configuration consistency
|
||||
|
||||
@@ -35,3 +35,52 @@ pub const ENV_TRUST_SYSTEM_CA: &str = "RUSTFS_TRUST_SYSTEM_CA";
|
||||
/// By default, RustFS does not trust system CA certificates.
|
||||
/// To change this behavior, set the environment variable RUSTFS_TRUST_SYSTEM_CA=1
|
||||
pub const DEFAULT_TRUST_SYSTEM_CA: bool = false;
|
||||
|
||||
/// Environment variable to trust leaf certificates as CA
|
||||
/// When set to "1", RustFS will treat leaf certificates as CA certificates for trust validation.
|
||||
/// By default, this is disabled.
|
||||
/// To enable, set the environment variable RUSTFS_TRUST_LEAF_CERT_AS_CA=1
|
||||
pub const ENV_TRUST_LEAF_CERT_AS_CA: &str = "RUSTFS_TRUST_LEAF_CERT_AS_CA";
|
||||
|
||||
/// Default value for trusting leaf certificates as CA
|
||||
/// By default, RustFS does not trust leaf certificates as CA.
|
||||
/// To change this behavior, set the environment variable RUSTFS_TRUST_LEAF_CERT_AS_CA=1
|
||||
pub const DEFAULT_TRUST_LEAF_CERT_AS_CA: bool = false;
|
||||
|
||||
/// Default filename for client CA certificate
|
||||
/// client_ca.crt (CA bundle for verifying client certificates in server mTLS)
|
||||
pub const RUSTFS_CLIENT_CA_CERT_FILENAME: &str = "client_ca.crt";
|
||||
|
||||
/// Environment variable for client certificate file path
|
||||
/// RUSTFS_MTLS_CLIENT_CERT
|
||||
/// Specifies the file path to the client certificate used for mTLS authentication.
|
||||
/// If not set, RustFS will look for the default filename "client_cert.pem" in the current directory.
|
||||
/// To set, use the environment variable RUSTFS_MTLS_CLIENT_CERT=/path/to/client_cert.pem
|
||||
pub const ENV_MTLS_CLIENT_CERT: &str = "RUSTFS_MTLS_CLIENT_CERT";
|
||||
|
||||
/// Default filename for client certificate
|
||||
/// client_cert.pem
|
||||
pub const RUSTFS_CLIENT_CERT_FILENAME: &str = "client_cert.pem";
|
||||
|
||||
/// Environment variable for client private key file path
|
||||
/// RUSTFS_MTLS_CLIENT_KEY
|
||||
/// Specifies the file path to the client private key used for mTLS authentication.
|
||||
/// If not set, RustFS will look for the default filename "client_key.pem" in the current directory.
|
||||
/// To set, use the environment variable RUSTFS_MTLS_CLIENT_KEY=/path/to/client_key.pem
|
||||
pub const ENV_MTLS_CLIENT_KEY: &str = "RUSTFS_MTLS_CLIENT_KEY";
|
||||
|
||||
/// Default filename for client private key
|
||||
/// client_key.pem
|
||||
pub const RUSTFS_CLIENT_KEY_FILENAME: &str = "client_key.pem";
|
||||
|
||||
/// RUSTFS_SERVER_MTLS_ENABLE
|
||||
/// Environment variable to enable server mTLS
|
||||
/// When set to "1", RustFS server will require client certificates for authentication.
|
||||
/// By default, this is disabled.
|
||||
/// To enable, set the environment variable RUSTFS_SERVER_MTLS_ENABLE=1
|
||||
pub const ENV_SERVER_MTLS_ENABLE: &str = "RUSTFS_SERVER_MTLS_ENABLE";
|
||||
|
||||
/// Default value for enabling server mTLS
|
||||
/// By default, RustFS server mTLS is disabled.
|
||||
/// To change this behavior, set the environment variable RUSTFS_SERVER_MTLS_ENABLE=1
|
||||
pub const DEFAULT_SERVER_MTLS_ENABLE: bool = false;
|
||||
|
||||
21
crates/credentials/Cargo.toml
Normal file
21
crates/credentials/Cargo.toml
Normal file
@@ -0,0 +1,21 @@
|
||||
[package]
|
||||
name = "rustfs-credentials"
|
||||
edition.workspace = true
|
||||
license.workspace = true
|
||||
repository.workspace = true
|
||||
rust-version.workspace = true
|
||||
version.workspace = true
|
||||
homepage.workspace = true
|
||||
description = "Credentials management utilities for RustFS, enabling secure handling of authentication and authorization data."
|
||||
keywords = ["rustfs", "Minio", "credentials", "authentication", "authorization"]
|
||||
categories = ["web-programming", "development-tools", "data-structures", "security"]
|
||||
|
||||
[dependencies]
|
||||
base64-simd = { workspace = true }
|
||||
rand = { workspace = true }
|
||||
serde = { workspace = true }
|
||||
serde_json.workspace = true
|
||||
time = { workspace = true, features = ["serde-human-readable"] }
|
||||
|
||||
[lints]
|
||||
workspace = true
|
||||
44
crates/credentials/README.md
Normal file
44
crates/credentials/README.md
Normal file
@@ -0,0 +1,44 @@
|
||||
[](https://rustfs.com)
|
||||
|
||||
# RustFS Credentials - Credential Management Module
|
||||
|
||||
<p align="center">
|
||||
<strong>A module for managing credentials within the RustFS distributed object storage system.</strong>
|
||||
</p>
|
||||
|
||||
<p align="center">
|
||||
<a href="https://github.com/rustfs/rustfs/actions/workflows/ci.yml"><img alt="CI" src="https://github.com/rustfs/rustfs/actions/workflows/ci.yml/badge.svg" /></a>
|
||||
<a href="https://docs.rustfs.com/">📖 Documentation</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/issues">🐛 Bug Reports</a>
|
||||
· <a href="https://github.com/rustfs/rustfs/discussions">💬 Discussions</a>
|
||||
</p>
|
||||
|
||||
---
|
||||
|
||||
This module provides a secure and efficient way to handle various types of credentials,
|
||||
such as API keys, access tokens, and cryptographic keys, required for interacting with
|
||||
the RustFS ecosystem and external services.
|
||||
|
||||
## 📖 Overview
|
||||
|
||||
**RustFS Credentials** is a module dedicated to managing credentials for the [RustFS](https://rustfs.com) distributed
|
||||
object storage system. For the complete RustFS experience,
|
||||
please visit the [main RustFS repository](https://github.com/rustfs/rustfs)
|
||||
|
||||
## ✨ Features
|
||||
|
||||
- Secure storage and retrieval of credentials
|
||||
- Support for multiple credential types (API keys, tokens, etc.)
|
||||
- Encryption of sensitive credential data
|
||||
- Integration with external secret management systems
|
||||
- Easy-to-use API for credential management
|
||||
- Credential rotation and expiration handling
|
||||
|
||||
## 📚 Documentation
|
||||
|
||||
For comprehensive documentation, examples, and usage guides, please visit the
|
||||
main [RustFS repository](https://github.com/rustfs/rustfs).
|
||||
|
||||
## 📄 License
|
||||
|
||||
This project is licensed under the Apache License 2.0 - see the [LICENSE](../../LICENSE) file for details.
|
||||
94
crates/credentials/src/constants.rs
Normal file
94
crates/credentials/src/constants.rs
Normal file
@@ -0,0 +1,94 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
/// Default Access Key
|
||||
/// Default value: rustfsadmin
|
||||
/// Environment variable: RUSTFS_ACCESS_KEY
|
||||
/// Command line argument: --access-key
|
||||
/// Example: RUSTFS_ACCESS_KEY=rustfsadmin
|
||||
/// Example: --access-key rustfsadmin
|
||||
pub const DEFAULT_ACCESS_KEY: &str = "rustfsadmin";
|
||||
/// Default Secret Key
|
||||
/// Default value: rustfsadmin
|
||||
/// Environment variable: RUSTFS_SECRET_KEY
|
||||
/// Command line argument: --secret-key
|
||||
/// Example: RUSTFS_SECRET_KEY=rustfsadmin
|
||||
/// Example: --secret-key rustfsadmin
|
||||
pub const DEFAULT_SECRET_KEY: &str = "rustfsadmin";
|
||||
|
||||
/// Environment variable for gRPC authentication token
|
||||
/// Used to set the authentication token for gRPC communication
|
||||
/// Example: RUSTFS_GRPC_AUTH_TOKEN=your_token_here
|
||||
/// Default value: No default value. RUSTFS_SECRET_KEY value is recommended.
|
||||
pub const ENV_GRPC_AUTH_TOKEN: &str = "RUSTFS_GRPC_AUTH_TOKEN";
|
||||
|
||||
/// IAM Policy Types
|
||||
/// Used to differentiate between embedded and inherited policies
|
||||
/// Example: "embedded-policy" or "inherited-policy"
|
||||
/// Default value: "embedded-policy"
|
||||
pub const EMBEDDED_POLICY_TYPE: &str = "embedded-policy";
|
||||
|
||||
/// IAM Policy Types
|
||||
/// Used to differentiate between embedded and inherited policies
|
||||
/// Example: "embedded-policy" or "inherited-policy"
|
||||
/// Default value: "inherited-policy"
|
||||
pub const INHERITED_POLICY_TYPE: &str = "inherited-policy";
|
||||
|
||||
/// IAM Policy Claim Name for Service Account
|
||||
/// Used to identify the service account policy claim in JWT tokens
|
||||
/// Example: "sa-policy"
|
||||
/// Default value: "sa-policy"
|
||||
pub const IAM_POLICY_CLAIM_NAME_SA: &str = "sa-policy";
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_security_constants() {
|
||||
// Test security related constants
|
||||
assert_eq!(DEFAULT_ACCESS_KEY, "rustfsadmin");
|
||||
assert!(DEFAULT_ACCESS_KEY.len() >= 8, "Access key should be at least 8 characters");
|
||||
|
||||
assert_eq!(DEFAULT_SECRET_KEY, "rustfsadmin");
|
||||
assert!(DEFAULT_SECRET_KEY.len() >= 8, "Secret key should be at least 8 characters");
|
||||
|
||||
// In production environment, access key and secret key should be different
|
||||
// These are default values, so being the same is acceptable, but should be warned in documentation
|
||||
println!("Warning: Default access key and secret key are the same. Change them in production!");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_security_best_practices() {
|
||||
// Test security best practices
|
||||
|
||||
// These are default values, should be changed in production environments
|
||||
println!("Security Warning: Default credentials detected!");
|
||||
println!("Access Key: {DEFAULT_ACCESS_KEY}");
|
||||
println!("Secret Key: {DEFAULT_SECRET_KEY}");
|
||||
println!("These should be changed in production environments!");
|
||||
|
||||
// Verify that key lengths meet minimum security requirements
|
||||
assert!(DEFAULT_ACCESS_KEY.len() >= 8, "Access key should be at least 8 characters");
|
||||
assert!(DEFAULT_SECRET_KEY.len() >= 8, "Secret key should be at least 8 characters");
|
||||
|
||||
// Check if default credentials contain common insecure patterns
|
||||
let _insecure_patterns = ["admin", "password", "123456", "default"];
|
||||
let _access_key_lower = DEFAULT_ACCESS_KEY.to_lowercase();
|
||||
let _secret_key_lower = DEFAULT_SECRET_KEY.to_lowercase();
|
||||
|
||||
// Note: More security check logic can be added here
|
||||
// For example, check if keys contain insecure patterns
|
||||
}
|
||||
}
|
||||
386
crates/credentials/src/credentials.rs
Normal file
386
crates/credentials/src/credentials.rs
Normal file
@@ -0,0 +1,386 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::{DEFAULT_SECRET_KEY, ENV_GRPC_AUTH_TOKEN, IAM_POLICY_CLAIM_NAME_SA, INHERITED_POLICY_TYPE};
|
||||
use rand::{Rng, RngCore};
|
||||
use serde::{Deserialize, Serialize};
|
||||
use serde_json::Value;
|
||||
use std::collections::HashMap;
|
||||
use std::env;
|
||||
use std::io::Error;
|
||||
use std::sync::OnceLock;
|
||||
use time::OffsetDateTime;
|
||||
|
||||
/// Global active credentials
|
||||
static GLOBAL_ACTIVE_CRED: OnceLock<Credentials> = OnceLock::new();
|
||||
|
||||
/// Global gRPC authentication token
|
||||
static GLOBAL_GRPC_AUTH_TOKEN: OnceLock<String> = OnceLock::new();
|
||||
|
||||
/// Initialize the global action credentials
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `ak` - Optional access key
|
||||
/// * `sk` - Optional secret key
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Result<(), Box<Credentials>>` - Ok if successful, Err with existing credentials if already initialized
|
||||
///
|
||||
/// # Panics
|
||||
/// This function panics if automatic credential generation fails when `ak` or `sk`
|
||||
/// are `None`, for example if the random number generator fails while calling
|
||||
/// `gen_access_key` or `gen_secret_key`.
|
||||
pub fn init_global_action_credentials(ak: Option<String>, sk: Option<String>) -> Result<(), Box<Credentials>> {
|
||||
let ak = ak.unwrap_or_else(|| gen_access_key(20).expect("Failed to generate access key"));
|
||||
let sk = sk.unwrap_or_else(|| gen_secret_key(32).expect("Failed to generate secret key"));
|
||||
|
||||
let cred = Credentials {
|
||||
access_key: ak,
|
||||
secret_key: sk,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
GLOBAL_ACTIVE_CRED.set(cred).map_err(|e| {
|
||||
Box::new(Credentials {
|
||||
access_key: e.access_key.clone(),
|
||||
..Default::default()
|
||||
})
|
||||
})
|
||||
}
|
||||
|
||||
/// Get the global action credentials
|
||||
pub fn get_global_action_cred() -> Option<Credentials> {
|
||||
GLOBAL_ACTIVE_CRED.get().cloned()
|
||||
}
|
||||
|
||||
/// Get the global secret key
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Option<String>` - The global secret key, if set
|
||||
///
|
||||
pub fn get_global_secret_key_opt() -> Option<String> {
|
||||
GLOBAL_ACTIVE_CRED.get().map(|cred| cred.secret_key.clone())
|
||||
}
|
||||
|
||||
/// Get the global secret key
|
||||
///
|
||||
/// # Returns
|
||||
/// * `String` - The global secret key, or empty string if not set
|
||||
///
|
||||
pub fn get_global_secret_key() -> String {
|
||||
GLOBAL_ACTIVE_CRED
|
||||
.get()
|
||||
.map(|cred| cred.secret_key.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
/// Get the global access key
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Option<String>` - The global access key, if set
|
||||
///
|
||||
pub fn get_global_access_key_opt() -> Option<String> {
|
||||
GLOBAL_ACTIVE_CRED.get().map(|cred| cred.access_key.clone())
|
||||
}
|
||||
|
||||
/// Get the global access key
|
||||
///
|
||||
/// # Returns
|
||||
/// * `String` - The global access key, or empty string if not set
|
||||
///
|
||||
pub fn get_global_access_key() -> String {
|
||||
GLOBAL_ACTIVE_CRED
|
||||
.get()
|
||||
.map(|cred| cred.access_key.clone())
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
/// Generates a random access key of the specified length.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `length` - The length of the access key to generate
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Result<String>` - A result containing the generated access key or an error if the length is too short
|
||||
///
|
||||
/// # Errors
|
||||
/// This function will return an error if the specified length is less than 3.
|
||||
///
|
||||
/// Examples
|
||||
/// ```no_run
|
||||
/// use rustfs_credentials::gen_access_key;
|
||||
///
|
||||
/// let access_key = gen_access_key(16).unwrap();
|
||||
/// println!("Generated access key: {}", access_key);
|
||||
/// ```
|
||||
///
|
||||
pub fn gen_access_key(length: usize) -> std::io::Result<String> {
|
||||
const ALPHA_NUMERIC_TABLE: [char; 36] = [
|
||||
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',
|
||||
'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
|
||||
];
|
||||
|
||||
if length < 3 {
|
||||
return Err(Error::other("access key length is too short"));
|
||||
}
|
||||
|
||||
let mut result = String::with_capacity(length);
|
||||
let mut rng = rand::rng();
|
||||
|
||||
for _ in 0..length {
|
||||
result.push(ALPHA_NUMERIC_TABLE[rng.random_range(0..ALPHA_NUMERIC_TABLE.len())]);
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Generates a random secret key of the specified length.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `length` - The length of the secret key to generate
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Result<String>` - A result containing the generated secret key or an error if the length is too short
|
||||
///
|
||||
/// # Errors
|
||||
/// This function will return an error if the specified length is less than 8.
|
||||
///
|
||||
/// # Examples
|
||||
/// ```no_run
|
||||
/// use rustfs_credentials::gen_secret_key;
|
||||
///
|
||||
/// let secret_key = gen_secret_key(32).unwrap();
|
||||
/// println!("Generated secret key: {}", secret_key);
|
||||
/// ```
|
||||
///
|
||||
pub fn gen_secret_key(length: usize) -> std::io::Result<String> {
|
||||
use base64_simd::URL_SAFE_NO_PAD;
|
||||
|
||||
if length < 8 {
|
||||
return Err(Error::other("secret key length is too short"));
|
||||
}
|
||||
let mut rng = rand::rng();
|
||||
|
||||
let mut key = vec![0u8; URL_SAFE_NO_PAD.estimated_decoded_length(length)];
|
||||
rng.fill_bytes(&mut key);
|
||||
|
||||
let encoded = URL_SAFE_NO_PAD.encode_to_string(&key);
|
||||
let key_str = encoded.replace("/", "+");
|
||||
|
||||
Ok(key_str)
|
||||
}
|
||||
|
||||
/// Get the gRPC authentication token from environment variable
|
||||
///
|
||||
/// # Returns
|
||||
/// * `String` - The gRPC authentication token
|
||||
///
|
||||
pub fn get_grpc_token() -> String {
|
||||
GLOBAL_GRPC_AUTH_TOKEN
|
||||
.get_or_init(|| {
|
||||
env::var(ENV_GRPC_AUTH_TOKEN)
|
||||
.unwrap_or_else(|_| get_global_secret_key_opt().unwrap_or_else(|| DEFAULT_SECRET_KEY.to_string()))
|
||||
})
|
||||
.clone()
|
||||
}
|
||||
|
||||
/// Credentials structure
|
||||
///
|
||||
/// Fields:
|
||||
/// - access_key: Access key string
|
||||
/// - secret_key: Secret key string
|
||||
/// - session_token: Session token string
|
||||
/// - expiration: Optional expiration time as OffsetDateTime
|
||||
/// - status: Status string (e.g., "active", "off")
|
||||
/// - parent_user: Parent user string
|
||||
/// - groups: Optional list of groups
|
||||
/// - claims: Optional map of claims
|
||||
/// - name: Optional name string
|
||||
/// - description: Optional description string
|
||||
///
|
||||
#[derive(Serialize, Deserialize, Clone, Default, Debug)]
|
||||
pub struct Credentials {
|
||||
pub access_key: String,
|
||||
pub secret_key: String,
|
||||
pub session_token: String,
|
||||
pub expiration: Option<OffsetDateTime>,
|
||||
pub status: String,
|
||||
pub parent_user: String,
|
||||
pub groups: Option<Vec<String>>,
|
||||
pub claims: Option<HashMap<String, Value>>,
|
||||
pub name: Option<String>,
|
||||
pub description: Option<String>,
|
||||
}
|
||||
|
||||
impl Credentials {
|
||||
pub fn is_expired(&self) -> bool {
|
||||
if self.expiration.is_none() {
|
||||
return false;
|
||||
}
|
||||
|
||||
self.expiration
|
||||
.as_ref()
|
||||
.map(|e| OffsetDateTime::now_utc() > *e)
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
pub fn is_temp(&self) -> bool {
|
||||
!self.session_token.is_empty() && !self.is_expired()
|
||||
}
|
||||
|
||||
pub fn is_service_account(&self) -> bool {
|
||||
self.claims
|
||||
.as_ref()
|
||||
.map(|x| x.get(IAM_POLICY_CLAIM_NAME_SA).is_some_and(|_| !self.parent_user.is_empty()))
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
pub fn is_implied_policy(&self) -> bool {
|
||||
if self.is_service_account() {
|
||||
return self
|
||||
.claims
|
||||
.as_ref()
|
||||
.map(|x| x.get(IAM_POLICY_CLAIM_NAME_SA).is_some_and(|v| v == INHERITED_POLICY_TYPE))
|
||||
.unwrap_or_default();
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
pub fn is_valid(&self) -> bool {
|
||||
if self.status == "off" {
|
||||
return false;
|
||||
}
|
||||
|
||||
self.access_key.len() >= 3 && self.secret_key.len() >= 8 && !self.is_expired()
|
||||
}
|
||||
|
||||
pub fn is_owner(&self) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::{IAM_POLICY_CLAIM_NAME_SA, INHERITED_POLICY_TYPE};
|
||||
use time::Duration;
|
||||
|
||||
#[test]
|
||||
fn test_credentials_is_expired() {
|
||||
let mut cred = Credentials::default();
|
||||
assert!(!cred.is_expired());
|
||||
|
||||
cred.expiration = Some(OffsetDateTime::now_utc() + Duration::hours(1));
|
||||
assert!(!cred.is_expired());
|
||||
|
||||
cred.expiration = Some(OffsetDateTime::now_utc() - Duration::hours(1));
|
||||
assert!(cred.is_expired());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_credentials_is_temp() {
|
||||
let mut cred = Credentials::default();
|
||||
assert!(!cred.is_temp());
|
||||
|
||||
cred.session_token = "token".to_string();
|
||||
assert!(cred.is_temp());
|
||||
|
||||
cred.expiration = Some(OffsetDateTime::now_utc() - Duration::hours(1));
|
||||
assert!(!cred.is_temp());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_credentials_is_service_account() {
|
||||
let mut cred = Credentials::default();
|
||||
assert!(!cred.is_service_account());
|
||||
|
||||
let mut claims = HashMap::new();
|
||||
claims.insert(IAM_POLICY_CLAIM_NAME_SA.to_string(), Value::String("policy".to_string()));
|
||||
cred.claims = Some(claims);
|
||||
cred.parent_user = "parent".to_string();
|
||||
assert!(cred.is_service_account());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_credentials_is_implied_policy() {
|
||||
let mut cred = Credentials::default();
|
||||
assert!(!cred.is_implied_policy());
|
||||
|
||||
let mut claims = HashMap::new();
|
||||
claims.insert(IAM_POLICY_CLAIM_NAME_SA.to_string(), Value::String(INHERITED_POLICY_TYPE.to_string()));
|
||||
cred.claims = Some(claims);
|
||||
cred.parent_user = "parent".to_string();
|
||||
assert!(cred.is_implied_policy());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_credentials_is_valid() {
|
||||
let mut cred = Credentials::default();
|
||||
assert!(!cred.is_valid());
|
||||
|
||||
cred.access_key = "abc".to_string();
|
||||
cred.secret_key = "12345678".to_string();
|
||||
assert!(cred.is_valid());
|
||||
|
||||
cred.status = "off".to_string();
|
||||
assert!(!cred.is_valid());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_credentials_is_owner() {
|
||||
let cred = Credentials::default();
|
||||
assert!(!cred.is_owner());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_global_credentials_flow() {
|
||||
// Since OnceLock can only be set once, we put together all globally related tests
|
||||
// If it has already been initialized (possibly from other tests), we verify the results directly
|
||||
if get_global_action_cred().is_none() {
|
||||
// Verify that the initial state is empty
|
||||
assert!(get_global_access_key_opt().is_none());
|
||||
assert_eq!(get_global_access_key(), "");
|
||||
assert!(get_global_secret_key_opt().is_none());
|
||||
assert_eq!(get_global_secret_key(), "");
|
||||
|
||||
// Initialize
|
||||
let test_ak = "test_access_key".to_string();
|
||||
let test_sk = "test_secret_key_123456".to_string();
|
||||
init_global_action_credentials(Some(test_ak.clone()), Some(test_sk.clone())).ok();
|
||||
}
|
||||
|
||||
// Verify the state after initialization
|
||||
let cred = get_global_action_cred().expect("Global credentials should be set");
|
||||
assert!(!cred.access_key.is_empty());
|
||||
assert!(!cred.secret_key.is_empty());
|
||||
|
||||
assert!(get_global_access_key_opt().is_some());
|
||||
assert!(!get_global_access_key().is_empty());
|
||||
assert!(get_global_secret_key_opt().is_some());
|
||||
assert!(!get_global_secret_key().is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_init_global_credentials_auto_gen() {
|
||||
// If it hasn't already been initialized, the test automatically generates logic
|
||||
if get_global_action_cred().is_none() {
|
||||
init_global_action_credentials(None, None).ok();
|
||||
let ak = get_global_access_key();
|
||||
let sk = get_global_secret_key();
|
||||
assert_eq!(ak.len(), 20);
|
||||
assert_eq!(sk.len(), 32);
|
||||
}
|
||||
}
|
||||
}
|
||||
19
crates/credentials/src/lib.rs
Normal file
19
crates/credentials/src/lib.rs
Normal file
@@ -0,0 +1,19 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod constants;
|
||||
mod credentials;
|
||||
|
||||
pub use constants::*;
|
||||
pub use credentials::*;
|
||||
@@ -18,6 +18,9 @@ mod reliant;
|
||||
#[cfg(test)]
|
||||
pub mod common;
|
||||
|
||||
#[cfg(test)]
|
||||
mod version_id_regression_test;
|
||||
|
||||
// Data usage regression tests
|
||||
#[cfg(test)]
|
||||
mod data_usage_test;
|
||||
|
||||
398
crates/e2e_test/src/version_id_regression_test.rs
Normal file
398
crates/e2e_test/src/version_id_regression_test.rs
Normal file
@@ -0,0 +1,398 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//! Regression test for Issue #1066: Veeam VBR - S3 returned empty versionId
|
||||
//!
|
||||
//! This test verifies that:
|
||||
//! 1. PutObject returns version_id when versioning is enabled
|
||||
//! 2. CopyObject returns version_id when versioning is enabled
|
||||
//! 3. CompleteMultipartUpload returns version_id when versioning is enabled
|
||||
//! 4. Basic S3 operations still work correctly (no regression)
|
||||
//! 5. Operations on non-versioned buckets work as expected
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use crate::common::{RustFSTestEnvironment, init_logging};
|
||||
use aws_sdk_s3::Client;
|
||||
use aws_sdk_s3::primitives::ByteStream;
|
||||
use aws_sdk_s3::types::{BucketVersioningStatus, CompletedMultipartUpload, CompletedPart, VersioningConfiguration};
|
||||
use serial_test::serial;
|
||||
use tracing::info;
|
||||
|
||||
fn create_s3_client(env: &RustFSTestEnvironment) -> Client {
|
||||
env.create_s3_client()
|
||||
}
|
||||
|
||||
async fn create_bucket(client: &Client, bucket: &str) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
match client.create_bucket().bucket(bucket).send().await {
|
||||
Ok(_) => {
|
||||
info!("✅ Bucket {} created successfully", bucket);
|
||||
Ok(())
|
||||
}
|
||||
Err(e) => {
|
||||
if e.to_string().contains("BucketAlreadyOwnedByYou") || e.to_string().contains("BucketAlreadyExists") {
|
||||
info!("ℹ️ Bucket {} already exists", bucket);
|
||||
Ok(())
|
||||
} else {
|
||||
Err(Box::new(e))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async fn enable_versioning(client: &Client, bucket: &str) -> Result<(), Box<dyn std::error::Error + Send + Sync>> {
|
||||
let versioning_config = VersioningConfiguration::builder()
|
||||
.status(BucketVersioningStatus::Enabled)
|
||||
.build();
|
||||
|
||||
client
|
||||
.put_bucket_versioning()
|
||||
.bucket(bucket)
|
||||
.versioning_configuration(versioning_config)
|
||||
.send()
|
||||
.await?;
|
||||
|
||||
info!("✅ Versioning enabled for bucket {}", bucket);
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Test 1: PutObject should return version_id when versioning is enabled
|
||||
/// This directly addresses the Veeam issue from #1066
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_put_object_returns_version_id_with_versioning() {
|
||||
init_logging();
|
||||
info!("🧪 TEST: PutObject returns version_id with versioning enabled");
|
||||
|
||||
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
|
||||
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
|
||||
|
||||
let client = create_s3_client(&env);
|
||||
let bucket = "test-put-version-id";
|
||||
|
||||
create_bucket(&client, bucket).await.expect("Failed to create bucket");
|
||||
enable_versioning(&client, bucket).await.expect("Failed to enable versioning");
|
||||
|
||||
let key = "test-file.txt";
|
||||
let content = b"Test content for version ID test";
|
||||
|
||||
info!("📤 Uploading object with key: {}", key);
|
||||
let result = client
|
||||
.put_object()
|
||||
.bucket(bucket)
|
||||
.key(key)
|
||||
.body(ByteStream::from_static(content))
|
||||
.send()
|
||||
.await;
|
||||
|
||||
assert!(result.is_ok(), "PutObject failed: {:?}", result.err());
|
||||
let output = result.unwrap();
|
||||
|
||||
info!("📥 PutObject response - version_id: {:?}", output.version_id);
|
||||
assert!(
|
||||
output.version_id.is_some(),
|
||||
"❌ FAILED: version_id should be present when versioning is enabled"
|
||||
);
|
||||
assert!(
|
||||
!output.version_id.as_ref().unwrap().is_empty(),
|
||||
"❌ FAILED: version_id should not be empty"
|
||||
);
|
||||
|
||||
info!("✅ PASSED: PutObject correctly returns version_id");
|
||||
}
|
||||
|
||||
/// Test 2: CopyObject should return version_id when versioning is enabled
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_copy_object_returns_version_id_with_versioning() {
|
||||
init_logging();
|
||||
info!("🧪 TEST: CopyObject returns version_id with versioning enabled");
|
||||
|
||||
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
|
||||
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
|
||||
|
||||
let client = create_s3_client(&env);
|
||||
let bucket = "test-copy-version-id";
|
||||
|
||||
create_bucket(&client, bucket).await.expect("Failed to create bucket");
|
||||
enable_versioning(&client, bucket).await.expect("Failed to enable versioning");
|
||||
|
||||
let source_key = "source-file.txt";
|
||||
let dest_key = "dest-file.txt";
|
||||
let content = b"Content to copy";
|
||||
|
||||
// First, create source object
|
||||
client
|
||||
.put_object()
|
||||
.bucket(bucket)
|
||||
.key(source_key)
|
||||
.body(ByteStream::from_static(content))
|
||||
.send()
|
||||
.await
|
||||
.expect("Failed to create source object");
|
||||
|
||||
info!("📤 Copying object from {} to {}", source_key, dest_key);
|
||||
let copy_result = client
|
||||
.copy_object()
|
||||
.bucket(bucket)
|
||||
.key(dest_key)
|
||||
.copy_source(format!("{}/{}", bucket, source_key))
|
||||
.send()
|
||||
.await;
|
||||
|
||||
assert!(copy_result.is_ok(), "CopyObject failed: {:?}", copy_result.err());
|
||||
let output = copy_result.unwrap();
|
||||
|
||||
info!("📥 CopyObject response - version_id: {:?}", output.version_id);
|
||||
assert!(
|
||||
output.version_id.is_some(),
|
||||
"❌ FAILED: version_id should be present when versioning is enabled"
|
||||
);
|
||||
assert!(
|
||||
!output.version_id.as_ref().unwrap().is_empty(),
|
||||
"❌ FAILED: version_id should not be empty"
|
||||
);
|
||||
|
||||
info!("✅ PASSED: CopyObject correctly returns version_id");
|
||||
}
|
||||
|
||||
/// Test 3: CompleteMultipartUpload should return version_id when versioning is enabled
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_multipart_upload_returns_version_id_with_versioning() {
|
||||
init_logging();
|
||||
info!("🧪 TEST: CompleteMultipartUpload returns version_id with versioning enabled");
|
||||
|
||||
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
|
||||
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
|
||||
|
||||
let client = create_s3_client(&env);
|
||||
let bucket = "test-multipart-version-id";
|
||||
|
||||
create_bucket(&client, bucket).await.expect("Failed to create bucket");
|
||||
enable_versioning(&client, bucket).await.expect("Failed to enable versioning");
|
||||
|
||||
let key = "multipart-file.txt";
|
||||
let content = b"Part 1 content for multipart upload test";
|
||||
|
||||
info!("📤 Creating multipart upload for key: {}", key);
|
||||
let create_result = client
|
||||
.create_multipart_upload()
|
||||
.bucket(bucket)
|
||||
.key(key)
|
||||
.send()
|
||||
.await
|
||||
.expect("Failed to create multipart upload");
|
||||
|
||||
let upload_id = create_result.upload_id().expect("No upload_id returned");
|
||||
|
||||
info!("📤 Uploading part 1");
|
||||
let upload_part_result = client
|
||||
.upload_part()
|
||||
.bucket(bucket)
|
||||
.key(key)
|
||||
.upload_id(upload_id)
|
||||
.part_number(1)
|
||||
.body(ByteStream::from_static(content))
|
||||
.send()
|
||||
.await
|
||||
.expect("Failed to upload part");
|
||||
|
||||
let etag = upload_part_result.e_tag().expect("No etag returned").to_string();
|
||||
|
||||
let completed_part = CompletedPart::builder().part_number(1).e_tag(etag).build();
|
||||
|
||||
let completed_upload = CompletedMultipartUpload::builder().parts(completed_part).build();
|
||||
|
||||
info!("📤 Completing multipart upload");
|
||||
let complete_result = client
|
||||
.complete_multipart_upload()
|
||||
.bucket(bucket)
|
||||
.key(key)
|
||||
.upload_id(upload_id)
|
||||
.multipart_upload(completed_upload)
|
||||
.send()
|
||||
.await;
|
||||
|
||||
assert!(complete_result.is_ok(), "CompleteMultipartUpload failed: {:?}", complete_result.err());
|
||||
let output = complete_result.unwrap();
|
||||
|
||||
info!("📥 CompleteMultipartUpload response - version_id: {:?}", output.version_id);
|
||||
assert!(
|
||||
output.version_id.is_some(),
|
||||
"❌ FAILED: version_id should be present when versioning is enabled"
|
||||
);
|
||||
assert!(
|
||||
!output.version_id.as_ref().unwrap().is_empty(),
|
||||
"❌ FAILED: version_id should not be empty"
|
||||
);
|
||||
|
||||
info!("✅ PASSED: CompleteMultipartUpload correctly returns version_id");
|
||||
}
|
||||
|
||||
/// Test 4: PutObject should NOT return version_id when versioning is NOT enabled
|
||||
/// This ensures we didn't break non-versioned buckets
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_put_object_without_versioning() {
|
||||
init_logging();
|
||||
info!("🧪 TEST: PutObject behavior without versioning (no regression)");
|
||||
|
||||
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
|
||||
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
|
||||
|
||||
let client = create_s3_client(&env);
|
||||
let bucket = "test-no-versioning";
|
||||
|
||||
create_bucket(&client, bucket).await.expect("Failed to create bucket");
|
||||
// Note: NOT enabling versioning here
|
||||
|
||||
let key = "test-file.txt";
|
||||
let content = b"Test content without versioning";
|
||||
|
||||
info!("📤 Uploading object to non-versioned bucket");
|
||||
let result = client
|
||||
.put_object()
|
||||
.bucket(bucket)
|
||||
.key(key)
|
||||
.body(ByteStream::from_static(content))
|
||||
.send()
|
||||
.await;
|
||||
|
||||
assert!(result.is_ok(), "PutObject failed: {:?}", result.err());
|
||||
let output = result.unwrap();
|
||||
|
||||
info!("📥 PutObject response - version_id: {:?}", output.version_id);
|
||||
// version_id can be None or Some("null") for non-versioned buckets
|
||||
info!("✅ PASSED: PutObject works correctly without versioning");
|
||||
}
|
||||
|
||||
/// Test 5: Basic S3 operations still work correctly (no regression)
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_basic_s3_operations_no_regression() {
|
||||
init_logging();
|
||||
info!("🧪 TEST: Basic S3 operations work correctly (no regression)");
|
||||
|
||||
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
|
||||
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
|
||||
|
||||
let client = create_s3_client(&env);
|
||||
let bucket = "test-basic-operations";
|
||||
|
||||
create_bucket(&client, bucket).await.expect("Failed to create bucket");
|
||||
enable_versioning(&client, bucket).await.expect("Failed to enable versioning");
|
||||
|
||||
let key = "test-basic-file.txt";
|
||||
let content = b"Basic operations test content";
|
||||
|
||||
// Test PUT
|
||||
info!("📤 Testing PUT operation");
|
||||
let put_result = client
|
||||
.put_object()
|
||||
.bucket(bucket)
|
||||
.key(key)
|
||||
.body(ByteStream::from_static(content))
|
||||
.send()
|
||||
.await;
|
||||
assert!(put_result.is_ok(), "PUT operation failed");
|
||||
let _version_id = put_result.unwrap().version_id;
|
||||
|
||||
// Test GET
|
||||
info!("📥 Testing GET operation");
|
||||
let get_result = client.get_object().bucket(bucket).key(key).send().await;
|
||||
assert!(get_result.is_ok(), "GET operation failed");
|
||||
let body = get_result.unwrap().body.collect().await.unwrap().to_vec();
|
||||
assert_eq!(body, content, "Content mismatch after GET");
|
||||
|
||||
// Test HEAD
|
||||
info!("📋 Testing HEAD operation");
|
||||
let head_result = client.head_object().bucket(bucket).key(key).send().await;
|
||||
assert!(head_result.is_ok(), "HEAD operation failed");
|
||||
|
||||
// Test LIST
|
||||
info!("📝 Testing LIST operation");
|
||||
let list_result = client.list_objects_v2().bucket(bucket).send().await;
|
||||
assert!(list_result.is_ok(), "LIST operation failed");
|
||||
let list_output = list_result.unwrap();
|
||||
let objects = list_output.contents();
|
||||
assert!(objects.iter().any(|obj| obj.key() == Some(key)), "Object not found in LIST");
|
||||
|
||||
// Test DELETE
|
||||
info!("🗑️ Testing DELETE operation");
|
||||
let delete_result = client.delete_object().bucket(bucket).key(key).send().await;
|
||||
assert!(delete_result.is_ok(), "DELETE operation failed");
|
||||
|
||||
// Verify object is deleted (should return NoSuchKey or version marker)
|
||||
let get_after_delete = client.get_object().bucket(bucket).key(key).send().await;
|
||||
assert!(
|
||||
get_after_delete.is_err() || get_after_delete.unwrap().delete_marker == Some(true),
|
||||
"Object should be deleted or have delete marker"
|
||||
);
|
||||
|
||||
info!("✅ PASSED: All basic S3 operations work correctly");
|
||||
}
|
||||
|
||||
/// Test 6: Veeam-specific scenario simulation
|
||||
/// Simulates the exact workflow that Veeam uses when backing up data
|
||||
#[tokio::test]
|
||||
#[serial]
|
||||
async fn test_veeam_backup_workflow_simulation() {
|
||||
init_logging();
|
||||
info!("🧪 TEST: Veeam VBR backup workflow simulation (Issue #1066)");
|
||||
|
||||
let mut env = RustFSTestEnvironment::new().await.expect("Failed to create test environment");
|
||||
env.start_rustfs_server(vec![]).await.expect("Failed to start RustFS");
|
||||
|
||||
let client = create_s3_client(&env);
|
||||
let bucket = "veeam-backup-test";
|
||||
|
||||
create_bucket(&client, bucket).await.expect("Failed to create bucket");
|
||||
enable_versioning(&client, bucket).await.expect("Failed to enable versioning");
|
||||
|
||||
// Veeam typically creates multiple objects in a backup session
|
||||
let test_paths = vec![
|
||||
"Veeam/Backup/Clients/test-client-id/test-backup-id/CloudStg/Meta/Blocks/History/CheckpointHistory.dat",
|
||||
"Veeam/Backup/Clients/test-client-id/test-backup-id/Metadata/Lock/create.checkpoint/declare",
|
||||
];
|
||||
|
||||
for path in test_paths {
|
||||
info!("📤 Simulating Veeam upload to: {}", path);
|
||||
let content = format!("Veeam backup data for {}", path);
|
||||
|
||||
let put_result = client
|
||||
.put_object()
|
||||
.bucket(bucket)
|
||||
.key(path)
|
||||
.body(ByteStream::from(content.into_bytes()))
|
||||
.send()
|
||||
.await;
|
||||
|
||||
assert!(put_result.is_ok(), "Veeam upload failed for path: {}", path);
|
||||
let output = put_result.unwrap();
|
||||
|
||||
info!("📥 Response version_id: {:?}", output.version_id);
|
||||
assert!(output.version_id.is_some(), "❌ FAILED: Veeam expects version_id for path: {}", path);
|
||||
assert!(
|
||||
!output.version_id.as_ref().unwrap().is_empty(),
|
||||
"❌ FAILED: version_id should not be empty for path: {}",
|
||||
path
|
||||
);
|
||||
|
||||
info!("✅ Veeam upload successful with version_id for: {}", path);
|
||||
}
|
||||
|
||||
info!("✅ PASSED: Veeam backup workflow simulation completed successfully");
|
||||
}
|
||||
}
|
||||
@@ -34,12 +34,19 @@ workspace = true
|
||||
default = []
|
||||
|
||||
[dependencies]
|
||||
rustfs-filemeta.workspace = true
|
||||
rustfs-utils = { workspace = true, features = ["full"] }
|
||||
rustfs-rio.workspace = true
|
||||
rustfs-signer.workspace = true
|
||||
rustfs-checksums.workspace = true
|
||||
rustfs-config = { workspace = true, features = ["constants", "notify", "audit"] }
|
||||
rustfs-credentials = { workspace = true }
|
||||
rustfs-common.workspace = true
|
||||
rustfs-policy.workspace = true
|
||||
rustfs-protos.workspace = true
|
||||
async-trait.workspace = true
|
||||
bytes.workspace = true
|
||||
byteorder = { workspace = true }
|
||||
rustfs-common.workspace = true
|
||||
rustfs-policy.workspace = true
|
||||
chrono.workspace = true
|
||||
glob = { workspace = true }
|
||||
thiserror.workspace = true
|
||||
@@ -60,7 +67,6 @@ lazy_static.workspace = true
|
||||
rustfs-lock.workspace = true
|
||||
regex = { workspace = true }
|
||||
path-absolutize = { workspace = true }
|
||||
rustfs-protos.workspace = true
|
||||
rmp.workspace = true
|
||||
rmp-serde.workspace = true
|
||||
tokio-util = { workspace = true, features = ["io", "compat"] }
|
||||
@@ -91,11 +97,6 @@ aws-sdk-s3 = { workspace = true }
|
||||
urlencoding = { workspace = true }
|
||||
smallvec = { workspace = true }
|
||||
shadow-rs.workspace = true
|
||||
rustfs-filemeta.workspace = true
|
||||
rustfs-utils = { workspace = true, features = ["full"] }
|
||||
rustfs-rio.workspace = true
|
||||
rustfs-signer.workspace = true
|
||||
rustfs-checksums.workspace = true
|
||||
async-recursion.workspace = true
|
||||
aws-credential-types = { workspace = true }
|
||||
aws-smithy-types = { workspace = true }
|
||||
|
||||
@@ -1,19 +0,0 @@
|
||||
# ECStore - Erasure Coding Storage
|
||||
|
||||
ECStore provides erasure coding functionality for the RustFS project, using high-performance Reed-Solomon SIMD implementation for optimal performance.
|
||||
|
||||
## Features
|
||||
|
||||
- **Reed-Solomon Implementation**: High-performance SIMD-optimized erasure coding
|
||||
- **Cross-Platform Compatibility**: Support for x86_64, aarch64, and other architectures
|
||||
- **Performance Optimized**: SIMD instructions for maximum throughput
|
||||
- **Thread Safety**: Safe concurrent access with caching optimizations
|
||||
- **Scalable**: Excellent performance for high-throughput scenarios
|
||||
|
||||
## Documentation
|
||||
|
||||
For complete documentation, examples, and usage information, please visit the main [RustFS repository](https://github.com/rustfs/rustfs).
|
||||
|
||||
## License
|
||||
|
||||
This project is licensed under the Apache License, Version 2.0.
|
||||
@@ -132,6 +132,25 @@ pub enum BucketLookupType {
|
||||
BucketLookupPath,
|
||||
}
|
||||
|
||||
fn load_root_store_from_tls_path() -> Option<rustls::RootCertStore> {
|
||||
// Load the root certificate bundle from the path specified by the
|
||||
// RUSTFS_TLS_PATH environment variable.
|
||||
let tp = std::env::var("RUSTFS_TLS_PATH").ok()?;
|
||||
let ca = std::path::Path::new(&tp).join(rustfs_config::RUSTFS_CA_CERT);
|
||||
if !ca.exists() {
|
||||
return None;
|
||||
}
|
||||
|
||||
let der_list = rustfs_utils::load_cert_bundle_der_bytes(ca.to_str().unwrap_or_default()).ok()?;
|
||||
let mut store = rustls::RootCertStore::empty();
|
||||
for der in der_list {
|
||||
if let Err(e) = store.add(der.into()) {
|
||||
warn!("Warning: failed to add certificate from '{}' to root store: {e}", ca.display());
|
||||
}
|
||||
}
|
||||
Some(store)
|
||||
}
|
||||
|
||||
impl TransitionClient {
|
||||
pub async fn new(endpoint: &str, opts: Options, tier_type: &str) -> Result<TransitionClient, std::io::Error> {
|
||||
let clnt = Self::private_new(endpoint, opts, tier_type).await?;
|
||||
@@ -142,18 +161,22 @@ impl TransitionClient {
|
||||
async fn private_new(endpoint: &str, opts: Options, tier_type: &str) -> Result<TransitionClient, std::io::Error> {
|
||||
let endpoint_url = get_endpoint_url(endpoint, opts.secure)?;
|
||||
|
||||
//#[cfg(feature = "ring")]
|
||||
let _ = rustls::crypto::ring::default_provider().install_default();
|
||||
//#[cfg(feature = "aws-lc-rs")]
|
||||
// let _ = rustls::crypto::aws_lc_rs::default_provider().install_default();
|
||||
|
||||
let scheme = endpoint_url.scheme();
|
||||
let client;
|
||||
let tls = rustls::ClientConfig::builder().with_native_roots()?.with_no_client_auth();
|
||||
let tls = if let Some(store) = load_root_store_from_tls_path() {
|
||||
rustls::ClientConfig::builder()
|
||||
.with_root_certificates(store)
|
||||
.with_no_client_auth()
|
||||
} else {
|
||||
rustls::ClientConfig::builder().with_native_roots()?.with_no_client_auth()
|
||||
};
|
||||
|
||||
let https = hyper_rustls::HttpsConnectorBuilder::new()
|
||||
.with_tls_config(tls)
|
||||
.https_or_http()
|
||||
.enable_http1()
|
||||
.enable_http2()
|
||||
.build();
|
||||
client = Client::builder(TokioExecutor::new()).build(https);
|
||||
|
||||
|
||||
@@ -21,7 +21,6 @@ use crate::{
|
||||
tier::tier::TierConfigMgr,
|
||||
};
|
||||
use lazy_static::lazy_static;
|
||||
use rustfs_policy::auth::Credentials;
|
||||
use std::{
|
||||
collections::HashMap,
|
||||
sync::{Arc, OnceLock},
|
||||
@@ -61,49 +60,6 @@ lazy_static! {
|
||||
/// Global cancellation token for background services (data scanner and auto heal)
|
||||
static GLOBAL_BACKGROUND_SERVICES_CANCEL_TOKEN: OnceLock<CancellationToken> = OnceLock::new();
|
||||
|
||||
/// Global active credentials
|
||||
static GLOBAL_ACTIVE_CRED: OnceLock<Credentials> = OnceLock::new();
|
||||
|
||||
/// Initialize the global action credentials
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `ak` - Optional access key
|
||||
/// * `sk` - Optional secret key
|
||||
///
|
||||
/// # Returns
|
||||
/// * None
|
||||
///
|
||||
pub fn init_global_action_credentials(ak: Option<String>, sk: Option<String>) {
|
||||
let ak = {
|
||||
if let Some(k) = ak {
|
||||
k
|
||||
} else {
|
||||
rustfs_utils::string::gen_access_key(20).unwrap_or_default()
|
||||
}
|
||||
};
|
||||
|
||||
let sk = {
|
||||
if let Some(k) = sk {
|
||||
k
|
||||
} else {
|
||||
rustfs_utils::string::gen_secret_key(32).unwrap_or_default()
|
||||
}
|
||||
};
|
||||
|
||||
GLOBAL_ACTIVE_CRED
|
||||
.set(Credentials {
|
||||
access_key: ak,
|
||||
secret_key: sk,
|
||||
..Default::default()
|
||||
})
|
||||
.unwrap();
|
||||
}
|
||||
|
||||
/// Get the global action credentials
|
||||
pub fn get_global_action_cred() -> Option<Credentials> {
|
||||
GLOBAL_ACTIVE_CRED.get().cloned()
|
||||
}
|
||||
|
||||
/// Get the global rustfs port
|
||||
///
|
||||
/// # Returns
|
||||
|
||||
@@ -12,7 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::global::get_global_action_cred;
|
||||
use base64::Engine as _;
|
||||
use base64::engine::general_purpose;
|
||||
use hmac::{Hmac, KeyInit, Mac};
|
||||
@@ -20,6 +19,7 @@ use http::HeaderMap;
|
||||
use http::HeaderValue;
|
||||
use http::Method;
|
||||
use http::Uri;
|
||||
use rustfs_credentials::get_global_action_cred;
|
||||
use sha2::Sha256;
|
||||
use time::OffsetDateTime;
|
||||
use tracing::error;
|
||||
|
||||
@@ -15,8 +15,7 @@
|
||||
use crate::config::storageclass::STANDARD;
|
||||
use crate::disk::RUSTFS_META_BUCKET;
|
||||
use regex::Regex;
|
||||
use rustfs_utils::http::headers::AMZ_OBJECT_TAGGING;
|
||||
use rustfs_utils::http::headers::AMZ_STORAGE_CLASS;
|
||||
use rustfs_utils::http::headers::{AMZ_OBJECT_TAGGING, AMZ_STORAGE_CLASS};
|
||||
use std::collections::HashMap;
|
||||
use std::io::{Error, Result};
|
||||
|
||||
|
||||
@@ -35,12 +35,11 @@ uuid = { workspace = true, features = ["v4", "fast-rng", "serde"] }
|
||||
tokio = { workspace = true, features = ["io-util", "macros", "sync"] }
|
||||
xxhash-rust = { workspace = true, features = ["xxh64"] }
|
||||
bytes.workspace = true
|
||||
rustfs-utils = { workspace = true, features = ["hash","http"] }
|
||||
rustfs-utils = { workspace = true, features = ["hash", "http"] }
|
||||
byteorder = { workspace = true }
|
||||
tracing.workspace = true
|
||||
thiserror.workspace = true
|
||||
s3s.workspace = true
|
||||
lazy_static.workspace = true
|
||||
regex.workspace = true
|
||||
|
||||
[dev-dependencies]
|
||||
|
||||
@@ -19,6 +19,7 @@ use rustfs_utils::http::RESERVED_METADATA_PREFIX_LOWER;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use std::any::Any;
|
||||
use std::collections::HashMap;
|
||||
use std::sync::LazyLock;
|
||||
use std::time::Duration;
|
||||
use time::OffsetDateTime;
|
||||
use uuid::Uuid;
|
||||
@@ -773,9 +774,7 @@ impl ReplicationWorkerOperation for ReplicateObjectInfo {
|
||||
}
|
||||
}
|
||||
|
||||
lazy_static::lazy_static! {
|
||||
static ref REPL_STATUS_REGEX: Regex = Regex::new(r"([^=].*?)=([^,].*?);").unwrap();
|
||||
}
|
||||
static REPL_STATUS_REGEX: LazyLock<Regex> = LazyLock::new(|| Regex::new(r"([^=].*?)=([^,].*?);").unwrap());
|
||||
|
||||
impl ReplicateObjectInfo {
|
||||
/// Returns replication status of a target
|
||||
|
||||
@@ -29,6 +29,7 @@ documentation = "https://docs.rs/rustfs-iam/latest/rustfs_iam/"
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
rustfs-credentials = { workspace = true }
|
||||
tokio.workspace = true
|
||||
time = { workspace = true, features = ["serde-human-readable"] }
|
||||
serde = { workspace = true, features = ["derive", "rc"] }
|
||||
|
||||
@@ -24,15 +24,13 @@ use crate::{
|
||||
},
|
||||
};
|
||||
use futures::future::join_all;
|
||||
use rustfs_ecstore::global::get_global_action_cred;
|
||||
use rustfs_credentials::{Credentials, EMBEDDED_POLICY_TYPE, INHERITED_POLICY_TYPE, get_global_action_cred};
|
||||
use rustfs_madmin::{AccountStatus, AddOrUpdateUserReq, GroupDesc};
|
||||
use rustfs_policy::{
|
||||
arn::ARN,
|
||||
auth::{self, Credentials, UserIdentity, is_secret_key_valid, jwt_sign},
|
||||
auth::{self, UserIdentity, is_secret_key_valid, jwt_sign},
|
||||
format::Format,
|
||||
policy::{
|
||||
EMBEDDED_POLICY_TYPE, INHERITED_POLICY_TYPE, Policy, PolicyDoc, default::DEFAULT_POLICIES, iam_policy_claim_name_sa,
|
||||
},
|
||||
policy::{Policy, PolicyDoc, default::DEFAULT_POLICIES, iam_policy_claim_name_sa},
|
||||
};
|
||||
use rustfs_utils::path::path_join_buf;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
@@ -20,6 +20,7 @@ use crate::{
|
||||
manager::{extract_jwt_claims, get_default_policyes},
|
||||
};
|
||||
use futures::future::join_all;
|
||||
use rustfs_credentials::get_global_action_cred;
|
||||
use rustfs_ecstore::StorageAPI as _;
|
||||
use rustfs_ecstore::store_api::{ObjectInfoOrErr, WalkOptions};
|
||||
use rustfs_ecstore::{
|
||||
@@ -27,7 +28,6 @@ use rustfs_ecstore::{
|
||||
RUSTFS_CONFIG_PREFIX,
|
||||
com::{delete_config, read_config, read_config_with_metadata, save_config},
|
||||
},
|
||||
global::get_global_action_cred,
|
||||
store::ECStore,
|
||||
store_api::{ObjectInfo, ObjectOptions},
|
||||
};
|
||||
@@ -410,9 +410,8 @@ impl Store for ObjectStore {
|
||||
data = match Self::decrypt_data(&data) {
|
||||
Ok(v) => v,
|
||||
Err(err) => {
|
||||
warn!("delete the config file when decrypt failed failed: {}, path: {}", err, path.as_ref());
|
||||
// delete the config file when decrypt failed
|
||||
let _ = self.delete_iam_config(path.as_ref()).await;
|
||||
warn!("config decrypt failed, keeping file: {}, path: {}", err, path.as_ref());
|
||||
// keep the config file when decrypt failed - do not delete
|
||||
return Err(Error::ConfigNotFound);
|
||||
}
|
||||
};
|
||||
|
||||
@@ -24,19 +24,18 @@ use crate::store::MappedPolicy;
|
||||
use crate::store::Store;
|
||||
use crate::store::UserType;
|
||||
use crate::utils::extract_claims;
|
||||
use rustfs_ecstore::global::get_global_action_cred;
|
||||
use rustfs_credentials::{Credentials, EMBEDDED_POLICY_TYPE, INHERITED_POLICY_TYPE, get_global_action_cred};
|
||||
use rustfs_ecstore::notification_sys::get_global_notification_sys;
|
||||
use rustfs_madmin::AddOrUpdateUserReq;
|
||||
use rustfs_madmin::GroupDesc;
|
||||
use rustfs_policy::arn::ARN;
|
||||
use rustfs_policy::auth::Credentials;
|
||||
use rustfs_policy::auth::{
|
||||
ACCOUNT_ON, UserIdentity, contains_reserved_chars, create_new_credentials_with_metadata, generate_credentials,
|
||||
is_access_key_valid, is_secret_key_valid,
|
||||
};
|
||||
use rustfs_policy::policy::Args;
|
||||
use rustfs_policy::policy::opa;
|
||||
use rustfs_policy::policy::{EMBEDDED_POLICY_TYPE, INHERITED_POLICY_TYPE, Policy, PolicyDoc, iam_policy_claim_name_sa};
|
||||
use rustfs_policy::policy::{Policy, PolicyDoc, iam_policy_claim_name_sa};
|
||||
use serde_json::Value;
|
||||
use serde_json::json;
|
||||
use std::collections::HashMap;
|
||||
|
||||
@@ -29,7 +29,8 @@ documentation = "https://docs.rs/rustfs-policy/latest/rustfs_policy/"
|
||||
workspace = true
|
||||
|
||||
[dependencies]
|
||||
rustfs-config = { workspace = true, features = ["constants","opa"] }
|
||||
rustfs-credentials = { workspace = true }
|
||||
rustfs-config = { workspace = true, features = ["constants", "opa"] }
|
||||
tokio = { workspace = true, features = ["full"] }
|
||||
time = { workspace = true, features = ["serde-human-readable"] }
|
||||
serde = { workspace = true, features = ["derive", "rc"] }
|
||||
@@ -38,7 +39,6 @@ thiserror.workspace = true
|
||||
strum = { workspace = true, features = ["derive"] }
|
||||
rustfs-crypto = { workspace = true }
|
||||
ipnetwork = { workspace = true, features = ["serde"] }
|
||||
rand.workspace = true
|
||||
base64-simd = { workspace = true }
|
||||
jsonwebtoken = { workspace = true }
|
||||
regex = { workspace = true }
|
||||
|
||||
@@ -12,13 +12,14 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::error::Error as IamError;
|
||||
use crate::error::{Error, Result};
|
||||
use crate::policy::{INHERITED_POLICY_TYPE, Policy, Validator, iam_policy_claim_name_sa};
|
||||
use crate::policy::{Policy, Validator};
|
||||
use crate::utils;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use rustfs_credentials::Credentials;
|
||||
use serde::Serialize;
|
||||
use serde_json::{Value, json};
|
||||
use std::collections::HashMap;
|
||||
use std::convert::TryFrom;
|
||||
use time::OffsetDateTime;
|
||||
use tracing::warn;
|
||||
|
||||
@@ -32,178 +33,82 @@ pub const ACCOUNT_OFF: &str = "off";
|
||||
|
||||
const RESERVED_CHARS: &str = "=,";
|
||||
|
||||
// ContainsReservedChars - returns whether the input string contains reserved characters.
|
||||
/// ContainsReservedChars - returns whether the input string contains reserved characters.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `s` - input string to check.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `bool` - true if contains reserved characters, false otherwise.
|
||||
///
|
||||
pub fn contains_reserved_chars(s: &str) -> bool {
|
||||
s.contains(RESERVED_CHARS)
|
||||
}
|
||||
|
||||
// IsAccessKeyValid - validate access key for right length.
|
||||
/// IsAccessKeyValid - validate access key for right length.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `access_key` - access key to validate.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `bool` - true if valid, false otherwise.
|
||||
///
|
||||
pub fn is_access_key_valid(access_key: &str) -> bool {
|
||||
access_key.len() >= ACCESS_KEY_MIN_LEN
|
||||
}
|
||||
|
||||
// IsSecretKeyValid - validate secret key for right length.
|
||||
/// IsSecretKeyValid - validate secret key for right length.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `secret_key` - secret key to validate.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `bool` - true if valid, false otherwise.
|
||||
///
|
||||
pub fn is_secret_key_valid(secret_key: &str) -> bool {
|
||||
secret_key.len() >= SECRET_KEY_MIN_LEN
|
||||
}
|
||||
|
||||
// #[cfg_attr(test, derive(PartialEq, Eq, Debug))]
|
||||
// struct CredentialHeader {
|
||||
// access_key: String,
|
||||
// scop: CredentialHeaderScope,
|
||||
// }
|
||||
|
||||
// #[cfg_attr(test, derive(PartialEq, Eq, Debug))]
|
||||
// struct CredentialHeaderScope {
|
||||
// date: Date,
|
||||
// region: String,
|
||||
// service: ServiceType,
|
||||
// request: String,
|
||||
// }
|
||||
|
||||
// impl TryFrom<&str> for CredentialHeader {
|
||||
// type Error = Error;
|
||||
// fn try_from(value: &str) -> Result<Self, Self::Error> {
|
||||
// let mut elem = value.trim().splitn(2, '=');
|
||||
// let (Some(h), Some(cred_elems)) = (elem.next(), elem.next()) else {
|
||||
// return Err(IamError::ErrCredMalformed));
|
||||
// };
|
||||
|
||||
// if h != "Credential" {
|
||||
// return Err(IamError::ErrCredMalformed));
|
||||
// }
|
||||
|
||||
// let mut cred_elems = cred_elems.trim().rsplitn(5, '/');
|
||||
|
||||
// let Some(request) = cred_elems.next() else {
|
||||
// return Err(IamError::ErrCredMalformed));
|
||||
// };
|
||||
|
||||
// let Some(service) = cred_elems.next() else {
|
||||
// return Err(IamError::ErrCredMalformed));
|
||||
// };
|
||||
|
||||
// let Some(region) = cred_elems.next() else {
|
||||
// return Err(IamError::ErrCredMalformed));
|
||||
// };
|
||||
|
||||
// let Some(date) = cred_elems.next() else {
|
||||
// return Err(IamError::ErrCredMalformed));
|
||||
// };
|
||||
|
||||
// let Some(ak) = cred_elems.next() else {
|
||||
// return Err(IamError::ErrCredMalformed));
|
||||
// };
|
||||
|
||||
// if ak.len() < 3 {
|
||||
// return Err(IamError::ErrCredMalformed));
|
||||
// }
|
||||
|
||||
// if request != "aws4_request" {
|
||||
// return Err(IamError::ErrCredMalformed));
|
||||
// }
|
||||
|
||||
// Ok(CredentialHeader {
|
||||
// access_key: ak.to_owned(),
|
||||
// scop: CredentialHeaderScope {
|
||||
// date: {
|
||||
// const FORMATTER: LazyCell<Vec<BorrowedFormatItem<'static>>> =
|
||||
// LazyCell::new(|| time::format_description::parse("[year][month][day]").unwrap());
|
||||
|
||||
// Date::parse(date, &FORMATTER).map_err(|_| IamError::ErrCredMalformed))?
|
||||
// },
|
||||
// region: region.to_owned(),
|
||||
// service: service.try_into()?,
|
||||
// request: request.to_owned(),
|
||||
// },
|
||||
// })
|
||||
// }
|
||||
// }
|
||||
|
||||
#[derive(Serialize, Deserialize, Clone, Default, Debug)]
|
||||
pub struct Credentials {
|
||||
pub access_key: String,
|
||||
pub secret_key: String,
|
||||
pub session_token: String,
|
||||
pub expiration: Option<OffsetDateTime>,
|
||||
pub status: String,
|
||||
pub parent_user: String,
|
||||
pub groups: Option<Vec<String>>,
|
||||
pub claims: Option<HashMap<String, Value>>,
|
||||
pub name: Option<String>,
|
||||
pub description: Option<String>,
|
||||
}
|
||||
|
||||
impl Credentials {
|
||||
// pub fn new(elem: &str) -> Result<Self> {
|
||||
// let header: CredentialHeader = elem.try_into()?;
|
||||
// Self::check_key_value(header)
|
||||
// }
|
||||
|
||||
// pub fn check_key_value(_header: CredentialHeader) -> Result<Self> {
|
||||
// todo!()
|
||||
// }
|
||||
|
||||
pub fn is_expired(&self) -> bool {
|
||||
if self.expiration.is_none() {
|
||||
return false;
|
||||
}
|
||||
|
||||
self.expiration
|
||||
.as_ref()
|
||||
.map(|e| time::OffsetDateTime::now_utc() > *e)
|
||||
.unwrap_or(false)
|
||||
}
|
||||
|
||||
pub fn is_temp(&self) -> bool {
|
||||
!self.session_token.is_empty() && !self.is_expired()
|
||||
}
|
||||
|
||||
pub fn is_service_account(&self) -> bool {
|
||||
const IAM_POLICY_CLAIM_NAME_SA: &str = "sa-policy";
|
||||
self.claims
|
||||
.as_ref()
|
||||
.map(|x| x.get(IAM_POLICY_CLAIM_NAME_SA).is_some_and(|_| !self.parent_user.is_empty()))
|
||||
.unwrap_or_default()
|
||||
}
|
||||
|
||||
pub fn is_implied_policy(&self) -> bool {
|
||||
if self.is_service_account() {
|
||||
return self
|
||||
.claims
|
||||
.as_ref()
|
||||
.map(|x| x.get(&iam_policy_claim_name_sa()).is_some_and(|v| v == INHERITED_POLICY_TYPE))
|
||||
.unwrap_or_default();
|
||||
}
|
||||
|
||||
false
|
||||
}
|
||||
|
||||
pub fn is_valid(&self) -> bool {
|
||||
if self.status == "off" {
|
||||
return false;
|
||||
}
|
||||
|
||||
self.access_key.len() >= 3 && self.secret_key.len() >= 8 && !self.is_expired()
|
||||
}
|
||||
|
||||
pub fn is_owner(&self) -> bool {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// GenerateCredentials - generate a new access key and secret key pair.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Ok((String, String))` - access key and secret key pair.
|
||||
/// * `Err(Error)` - if an error occurs during generation.
|
||||
///
|
||||
pub fn generate_credentials() -> Result<(String, String)> {
|
||||
let ak = utils::gen_access_key(20)?;
|
||||
let sk = utils::gen_secret_key(40)?;
|
||||
let ak = rustfs_credentials::gen_access_key(20)?;
|
||||
let sk = rustfs_credentials::gen_secret_key(40)?;
|
||||
Ok((ak, sk))
|
||||
}
|
||||
|
||||
/// GetNewCredentialsWithMetadata - generate new credentials with metadata claims and token secret.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `claims` - metadata claims to be included in the token.
|
||||
/// * `token_secret` - secret used to sign the token.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Ok(Credentials)` - newly generated credentials.
|
||||
/// * `Err(Error)` - if an error occurs during generation.
|
||||
///
|
||||
pub fn get_new_credentials_with_metadata(claims: &HashMap<String, Value>, token_secret: &str) -> Result<Credentials> {
|
||||
let (ak, sk) = generate_credentials()?;
|
||||
|
||||
create_new_credentials_with_metadata(&ak, &sk, claims, token_secret)
|
||||
}
|
||||
|
||||
/// CreateNewCredentialsWithMetadata - create new credentials with provided access key, secret key, metadata claims, and token secret.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `ak` - access key.
|
||||
/// * `sk` - secret key.
|
||||
/// * `claims` - metadata claims to be included in the token.
|
||||
/// * `token_secret` - secret used to sign the token.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Ok(Credentials)` - newly created credentials.
|
||||
/// * `Err(Error)` - if an error occurs during creation.
|
||||
///
|
||||
pub fn create_new_credentials_with_metadata(
|
||||
ak: &str,
|
||||
sk: &str,
|
||||
@@ -211,11 +116,11 @@ pub fn create_new_credentials_with_metadata(
|
||||
token_secret: &str,
|
||||
) -> Result<Credentials> {
|
||||
if ak.len() < ACCESS_KEY_MIN_LEN || ak.len() > ACCESS_KEY_MAX_LEN {
|
||||
return Err(IamError::InvalidAccessKeyLength);
|
||||
return Err(Error::InvalidAccessKeyLength);
|
||||
}
|
||||
|
||||
if sk.len() < SECRET_KEY_MIN_LEN || sk.len() > SECRET_KEY_MAX_LEN {
|
||||
return Err(IamError::InvalidAccessKeyLength);
|
||||
return Err(Error::InvalidAccessKeyLength);
|
||||
}
|
||||
|
||||
if token_secret.is_empty() {
|
||||
@@ -253,6 +158,16 @@ pub fn create_new_credentials_with_metadata(
|
||||
})
|
||||
}
|
||||
|
||||
/// JWTSign - sign the provided claims with the given token secret to generate a JWT token.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `claims` - claims to be included in the token.
|
||||
/// * `token_secret` - secret used to sign the token.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Ok(String)` - generated JWT token.
|
||||
/// * `Err(Error)` - if an error occurs during signing.
|
||||
///
|
||||
pub fn jwt_sign<T: Serialize>(claims: &T, token_secret: &str) -> Result<String> {
|
||||
let token = utils::generate_jwt(claims, token_secret)?;
|
||||
Ok(token)
|
||||
@@ -267,16 +182,29 @@ pub struct CredentialsBuilder {
|
||||
description: Option<String>,
|
||||
expiration: Option<OffsetDateTime>,
|
||||
allow_site_replicator_account: bool,
|
||||
claims: Option<serde_json::Value>,
|
||||
claims: Option<Value>,
|
||||
parent_user: String,
|
||||
groups: Option<Vec<String>>,
|
||||
}
|
||||
|
||||
impl CredentialsBuilder {
|
||||
/// Create a new CredentialsBuilder instance.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `CredentialsBuilder` - a new instance of CredentialsBuilder.
|
||||
///
|
||||
pub fn new() -> Self {
|
||||
Self::default()
|
||||
}
|
||||
|
||||
/// Set the session policy for the credentials.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `policy` - an optional Policy to set as the session policy.
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Self` - the updated CredentialsBuilder instance.
|
||||
///
|
||||
pub fn session_policy(mut self, policy: Option<Policy>) -> Self {
|
||||
self.session_policy = policy;
|
||||
self
|
||||
@@ -312,7 +240,7 @@ impl CredentialsBuilder {
|
||||
self
|
||||
}
|
||||
|
||||
pub fn claims(mut self, claims: serde_json::Value) -> Self {
|
||||
pub fn claims(mut self, claims: Value) -> Self {
|
||||
self.claims = Some(claims);
|
||||
self
|
||||
}
|
||||
@@ -336,7 +264,7 @@ impl TryFrom<CredentialsBuilder> for Credentials {
|
||||
type Error = Error;
|
||||
fn try_from(mut value: CredentialsBuilder) -> std::result::Result<Self, Self::Error> {
|
||||
if value.parent_user.is_empty() {
|
||||
return Err(IamError::InvalidArgument);
|
||||
return Err(Error::InvalidArgument);
|
||||
}
|
||||
|
||||
if (value.access_key.is_empty() && !value.secret_key.is_empty())
|
||||
@@ -346,27 +274,27 @@ impl TryFrom<CredentialsBuilder> for Credentials {
|
||||
}
|
||||
|
||||
if value.parent_user == value.access_key.as_str() {
|
||||
return Err(IamError::InvalidArgument);
|
||||
return Err(Error::InvalidArgument);
|
||||
}
|
||||
|
||||
if value.access_key == "site-replicator-0" && !value.allow_site_replicator_account {
|
||||
return Err(IamError::InvalidArgument);
|
||||
return Err(Error::InvalidArgument);
|
||||
}
|
||||
|
||||
let mut claim = serde_json::json!({
|
||||
let mut claim = json!({
|
||||
"parent": value.parent_user
|
||||
});
|
||||
|
||||
if let Some(p) = value.session_policy {
|
||||
p.is_valid()?;
|
||||
let policy_buf = serde_json::to_vec(&p).map_err(|_| IamError::InvalidArgument)?;
|
||||
let policy_buf = serde_json::to_vec(&p).map_err(|_| Error::InvalidArgument)?;
|
||||
if policy_buf.len() > 4096 {
|
||||
return Err(Error::other("session policy is too large"));
|
||||
}
|
||||
claim["sessionPolicy"] = serde_json::json!(base64_simd::STANDARD.encode_to_string(&policy_buf));
|
||||
claim["sa-policy"] = serde_json::json!("embedded-policy");
|
||||
claim["sessionPolicy"] = json!(base64_simd::STANDARD.encode_to_string(&policy_buf));
|
||||
claim[rustfs_credentials::IAM_POLICY_CLAIM_NAME_SA] = json!(rustfs_credentials::EMBEDDED_POLICY_TYPE);
|
||||
} else {
|
||||
claim["sa-policy"] = serde_json::json!("inherited-policy");
|
||||
claim[rustfs_credentials::IAM_POLICY_CLAIM_NAME_SA] = json!(rustfs_credentials::INHERITED_POLICY_TYPE);
|
||||
}
|
||||
|
||||
if let Some(Value::Object(obj)) = value.claims {
|
||||
@@ -379,11 +307,11 @@ impl TryFrom<CredentialsBuilder> for Credentials {
|
||||
}
|
||||
|
||||
if value.access_key.is_empty() {
|
||||
value.access_key = utils::gen_access_key(20)?;
|
||||
value.access_key = rustfs_credentials::gen_access_key(20)?;
|
||||
}
|
||||
|
||||
if value.secret_key.is_empty() {
|
||||
value.access_key = utils::gen_secret_key(40)?;
|
||||
value.secret_key = rustfs_credentials::gen_secret_key(40)?;
|
||||
}
|
||||
|
||||
claim["accessKey"] = json!(&value.access_key);
|
||||
|
||||
@@ -14,8 +14,9 @@
|
||||
|
||||
mod credentials;
|
||||
|
||||
pub use credentials::Credentials;
|
||||
pub use credentials::*;
|
||||
|
||||
use rustfs_credentials::Credentials;
|
||||
use serde::{Deserialize, Serialize};
|
||||
use time::OffsetDateTime;
|
||||
|
||||
@@ -27,6 +28,13 @@ pub struct UserIdentity {
|
||||
}
|
||||
|
||||
impl UserIdentity {
|
||||
/// Create a new UserIdentity
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `credentials` - Credentials object
|
||||
///
|
||||
/// # Returns
|
||||
/// * UserIdentity
|
||||
pub fn new(credentials: Credentials) -> Self {
|
||||
UserIdentity {
|
||||
version: 1,
|
||||
@@ -28,7 +28,6 @@ pub mod variables;
|
||||
|
||||
pub use action::ActionSet;
|
||||
pub use doc::PolicyDoc;
|
||||
|
||||
pub use effect::Effect;
|
||||
pub use function::Functions;
|
||||
pub use id::ID;
|
||||
@@ -37,9 +36,6 @@ pub use principal::Principal;
|
||||
pub use resource::ResourceSet;
|
||||
pub use statement::Statement;
|
||||
|
||||
pub const EMBEDDED_POLICY_TYPE: &str = "embedded-policy";
|
||||
pub const INHERITED_POLICY_TYPE: &str = "inherited-policy";
|
||||
|
||||
#[derive(thiserror::Error, Debug)]
|
||||
#[cfg_attr(test, derive(Eq, PartialEq))]
|
||||
pub enum Error {
|
||||
|
||||
@@ -258,7 +258,7 @@ pub fn get_policies_from_claims(claims: &HashMap<String, Value>, policy_claim_na
|
||||
}
|
||||
|
||||
pub fn iam_policy_claim_name_sa() -> String {
|
||||
"sa-policy".to_string()
|
||||
rustfs_credentials::IAM_POLICY_CLAIM_NAME_SA.to_string()
|
||||
}
|
||||
|
||||
pub mod default {
|
||||
|
||||
@@ -13,46 +13,7 @@
|
||||
// limitations under the License.
|
||||
|
||||
use jsonwebtoken::{Algorithm, DecodingKey, EncodingKey, Header};
|
||||
use rand::{Rng, RngCore};
|
||||
use serde::{Serialize, de::DeserializeOwned};
|
||||
use std::io::{Error, Result};
|
||||
|
||||
pub fn gen_access_key(length: usize) -> Result<String> {
|
||||
const ALPHA_NUMERIC_TABLE: [char; 36] = [
|
||||
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',
|
||||
'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
|
||||
];
|
||||
|
||||
if length < 3 {
|
||||
return Err(Error::other("access key length is too short"));
|
||||
}
|
||||
|
||||
let mut result = String::with_capacity(length);
|
||||
let mut rng = rand::rng();
|
||||
|
||||
for _ in 0..length {
|
||||
result.push(ALPHA_NUMERIC_TABLE[rng.random_range(0..ALPHA_NUMERIC_TABLE.len())]);
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
pub fn gen_secret_key(length: usize) -> Result<String> {
|
||||
use base64_simd::URL_SAFE_NO_PAD;
|
||||
|
||||
if length < 8 {
|
||||
return Err(Error::other("secret key length is too short"));
|
||||
}
|
||||
let mut rng = rand::rng();
|
||||
|
||||
let mut key = vec![0u8; URL_SAFE_NO_PAD.estimated_decoded_length(length)];
|
||||
rng.fill_bytes(&mut key);
|
||||
|
||||
let encoded = URL_SAFE_NO_PAD.encode_to_string(&key);
|
||||
let key_str = encoded.replace("/", "+");
|
||||
|
||||
Ok(key_str)
|
||||
}
|
||||
|
||||
pub fn generate_jwt<T: Serialize>(claims: &T, secret: &str) -> std::result::Result<String, jsonwebtoken::errors::Error> {
|
||||
let header = Header::new(Algorithm::HS512);
|
||||
@@ -72,26 +33,9 @@ pub fn extract_claims<T: DeserializeOwned + Clone>(
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::{gen_access_key, gen_secret_key, generate_jwt};
|
||||
use super::generate_jwt;
|
||||
use serde::{Deserialize, Serialize};
|
||||
|
||||
#[test]
|
||||
fn test_gen_access_key() {
|
||||
let a = gen_access_key(10).unwrap();
|
||||
let b = gen_access_key(10).unwrap();
|
||||
|
||||
assert_eq!(a.len(), 10);
|
||||
assert_eq!(b.len(), 10);
|
||||
assert_ne!(a, b);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_gen_secret_key() {
|
||||
let a = gen_secret_key(10).unwrap();
|
||||
let b = gen_secret_key(10).unwrap();
|
||||
assert_ne!(a, b);
|
||||
}
|
||||
|
||||
#[derive(Debug, Serialize, Deserialize, PartialEq)]
|
||||
struct Claims {
|
||||
sub: String,
|
||||
|
||||
@@ -34,6 +34,7 @@ path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
rustfs-common.workspace = true
|
||||
rustfs-credentials = { workspace = true }
|
||||
flatbuffers = { workspace = true }
|
||||
prost = { workspace = true }
|
||||
tonic = { workspace = true, features = ["transport"] }
|
||||
|
||||
@@ -16,7 +16,7 @@
|
||||
mod generated;
|
||||
|
||||
use proto_gen::node_service::node_service_client::NodeServiceClient;
|
||||
use rustfs_common::{GLOBAL_CONN_MAP, GLOBAL_ROOT_CERT, evict_connection};
|
||||
use rustfs_common::{GLOBAL_CONN_MAP, GLOBAL_MTLS_IDENTITY, GLOBAL_ROOT_CERT, evict_connection};
|
||||
use std::{error::Error, time::Duration};
|
||||
use tonic::{
|
||||
Request, Status,
|
||||
@@ -24,7 +24,7 @@ use tonic::{
|
||||
service::interceptor::InterceptedService,
|
||||
transport::{Certificate, Channel, ClientTlsConfig, Endpoint},
|
||||
};
|
||||
use tracing::{debug, warn};
|
||||
use tracing::{debug, error, warn};
|
||||
|
||||
// Type alias for the complex client type
|
||||
pub type NodeServiceClientType = NodeServiceClient<
|
||||
@@ -83,6 +83,11 @@ async fn create_new_channel(addr: &str) -> Result<Channel, Box<dyn Error>> {
|
||||
|
||||
let root_cert = GLOBAL_ROOT_CERT.read().await;
|
||||
if addr.starts_with(RUSTFS_HTTPS_PREFIX) {
|
||||
if root_cert.is_none() {
|
||||
debug!("No custom root certificate configured; using system roots for TLS: {}", addr);
|
||||
// If no custom root cert is configured, try to use system roots.
|
||||
connector = connector.tls_config(ClientTlsConfig::new())?;
|
||||
}
|
||||
if let Some(cert_pem) = root_cert.as_ref() {
|
||||
let ca = Certificate::from_pem(cert_pem);
|
||||
// Derive the hostname from the HTTPS URL for TLS hostname verification.
|
||||
@@ -95,7 +100,13 @@ async fn create_new_channel(addr: &str) -> Result<Channel, Box<dyn Error>> {
|
||||
.next()
|
||||
.unwrap_or("");
|
||||
let tls = if !domain.is_empty() {
|
||||
ClientTlsConfig::new().ca_certificate(ca).domain_name(domain)
|
||||
let mut cfg = ClientTlsConfig::new().ca_certificate(ca).domain_name(domain);
|
||||
let mtls_identity = GLOBAL_MTLS_IDENTITY.read().await;
|
||||
if let Some(id) = mtls_identity.as_ref() {
|
||||
let identity = tonic::transport::Identity::from_pem(id.cert_pem.clone(), id.key_pem.clone());
|
||||
cfg = cfg.identity(identity);
|
||||
}
|
||||
cfg
|
||||
} else {
|
||||
// Fallback: configure TLS without explicit domain if parsing fails.
|
||||
ClientTlsConfig::new().ca_certificate(ca)
|
||||
@@ -103,12 +114,9 @@ async fn create_new_channel(addr: &str) -> Result<Channel, Box<dyn Error>> {
|
||||
connector = connector.tls_config(tls)?;
|
||||
debug!("Configured TLS with custom root certificate for: {}", addr);
|
||||
} else {
|
||||
debug!("Using system root certificates for TLS: {}", addr);
|
||||
}
|
||||
} else {
|
||||
// Custom root certificates are configured but will be ignored for non-HTTPS addresses.
|
||||
if root_cert.is_some() {
|
||||
warn!("Custom root certificates are configured but not used because the address does not use HTTPS: {addr}");
|
||||
return Err(std::io::Error::other(
|
||||
"HTTPS requested but no trusted roots are configured. Provide tls/ca.crt (or enable system roots via RUSTFS_TRUST_SYSTEM_CA=true)."
|
||||
).into());
|
||||
}
|
||||
}
|
||||
|
||||
@@ -150,7 +158,18 @@ pub async fn node_service_time_out_client(
|
||||
>,
|
||||
Box<dyn Error>,
|
||||
> {
|
||||
let token: MetadataValue<_> = "rustfs rpc".parse()?;
|
||||
debug!("Obtaining gRPC client for NodeService at: {}", addr);
|
||||
let token_str = rustfs_credentials::get_grpc_token();
|
||||
let token: MetadataValue<_> = token_str.parse().map_err(|e| {
|
||||
error!(
|
||||
"Failed to parse gRPC auth token into MetadataValue: {:?}; env={} token_len={} token_prefix={}",
|
||||
e,
|
||||
rustfs_credentials::ENV_GRPC_AUTH_TOKEN,
|
||||
token_str.len(),
|
||||
token_str.chars().take(2).collect::<String>(),
|
||||
);
|
||||
e
|
||||
})?;
|
||||
|
||||
// Try to get cached channel
|
||||
let cached_channel = { GLOBAL_CONN_MAP.read().await.get(addr).cloned() };
|
||||
|
||||
@@ -41,7 +41,8 @@ reqwest.workspace = true
|
||||
tokio-util.workspace = true
|
||||
faster-hex.workspace = true
|
||||
futures.workspace = true
|
||||
rustfs-utils = { workspace = true, features = ["io", "hash", "compress"] }
|
||||
rustfs-config = { workspace = true, features = ["constants"] }
|
||||
rustfs-utils = { workspace = true, features = ["io", "hash", "compress", "tls"] }
|
||||
serde_json.workspace = true
|
||||
md-5 = { workspace = true }
|
||||
tracing.workspace = true
|
||||
|
||||
@@ -12,11 +12,12 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::{EtagResolvable, HashReaderDetector, HashReaderMut};
|
||||
use bytes::Bytes;
|
||||
use futures::{Stream, TryStreamExt as _};
|
||||
use http::HeaderMap;
|
||||
use pin_project_lite::pin_project;
|
||||
use reqwest::{Client, Method, RequestBuilder};
|
||||
use reqwest::{Certificate, Client, Identity, Method, RequestBuilder};
|
||||
use std::error::Error as _;
|
||||
use std::io::{self, Error};
|
||||
use std::ops::Not as _;
|
||||
@@ -26,21 +27,88 @@ use std::task::{Context, Poll};
|
||||
use tokio::io::{AsyncRead, AsyncWrite, ReadBuf};
|
||||
use tokio::sync::mpsc;
|
||||
use tokio_util::io::StreamReader;
|
||||
use tracing::error;
|
||||
|
||||
use crate::{EtagResolvable, HashReaderDetector, HashReaderMut};
|
||||
/// Get the TLS path from the RUSTFS_TLS_PATH environment variable.
|
||||
/// If the variable is not set, return None.
|
||||
fn tls_path() -> Option<&'static std::path::PathBuf> {
|
||||
static TLS_PATH: LazyLock<Option<std::path::PathBuf>> = LazyLock::new(|| {
|
||||
std::env::var("RUSTFS_TLS_PATH")
|
||||
.ok()
|
||||
.and_then(|s| if s.is_empty() { None } else { Some(s.into()) })
|
||||
});
|
||||
TLS_PATH.as_ref()
|
||||
}
|
||||
|
||||
/// Load CA root certificates from the RUSTFS_TLS_PATH directory.
|
||||
/// The CA certificates should be in PEM format and stored in the file
|
||||
/// specified by the RUSTFS_CA_CERT constant.
|
||||
/// If the file does not exist or cannot be read, return the builder unchanged.
|
||||
fn load_ca_roots_from_tls_path(builder: reqwest::ClientBuilder) -> reqwest::ClientBuilder {
|
||||
let Some(tp) = tls_path() else {
|
||||
return builder;
|
||||
};
|
||||
let ca_path = tp.join(rustfs_config::RUSTFS_CA_CERT);
|
||||
if !ca_path.exists() {
|
||||
return builder;
|
||||
}
|
||||
|
||||
let Ok(certs_der) = rustfs_utils::load_cert_bundle_der_bytes(ca_path.to_str().unwrap_or_default()) else {
|
||||
return builder;
|
||||
};
|
||||
|
||||
let mut b = builder;
|
||||
for der in certs_der {
|
||||
if let Ok(cert) = Certificate::from_der(&der) {
|
||||
b = b.add_root_certificate(cert);
|
||||
}
|
||||
}
|
||||
b
|
||||
}
|
||||
|
||||
/// Load optional mTLS identity from the RUSTFS_TLS_PATH directory.
|
||||
/// The client certificate and private key should be in PEM format and stored in the files
|
||||
/// specified by RUSTFS_CLIENT_CERT_FILENAME and RUSTFS_CLIENT_KEY_FILENAME constants.
|
||||
/// If the files do not exist or cannot be read, return None.
|
||||
fn load_optional_mtls_identity_from_tls_path() -> Option<Identity> {
|
||||
let tp = tls_path()?;
|
||||
let cert = std::fs::read(tp.join(rustfs_config::RUSTFS_CLIENT_CERT_FILENAME)).ok()?;
|
||||
let key = std::fs::read(tp.join(rustfs_config::RUSTFS_CLIENT_KEY_FILENAME)).ok()?;
|
||||
|
||||
let mut pem = Vec::with_capacity(cert.len() + key.len() + 1);
|
||||
pem.extend_from_slice(&cert);
|
||||
if !pem.ends_with(b"\n") {
|
||||
pem.push(b'\n');
|
||||
}
|
||||
pem.extend_from_slice(&key);
|
||||
|
||||
match Identity::from_pem(&pem) {
|
||||
Ok(id) => Some(id),
|
||||
Err(e) => {
|
||||
error!("Failed to load mTLS identity from PEM: {e}");
|
||||
None
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fn get_http_client() -> Client {
|
||||
// Reuse the HTTP connection pool in the global `reqwest::Client` instance
|
||||
// TODO: interact with load balancing?
|
||||
static CLIENT: LazyLock<Client> = LazyLock::new(|| {
|
||||
Client::builder()
|
||||
let mut builder = Client::builder()
|
||||
.connect_timeout(std::time::Duration::from_secs(5))
|
||||
.tcp_keepalive(std::time::Duration::from_secs(10))
|
||||
.http2_keep_alive_interval(std::time::Duration::from_secs(5))
|
||||
.http2_keep_alive_timeout(std::time::Duration::from_secs(3))
|
||||
.http2_keep_alive_while_idle(true)
|
||||
.build()
|
||||
.expect("Failed to create global HTTP client")
|
||||
.http2_keep_alive_while_idle(true);
|
||||
|
||||
// HTTPS root trust + optional mTLS identity from RUSTFS_TLS_PATH
|
||||
builder = load_ca_roots_from_tls_path(builder);
|
||||
if let Some(id) = load_optional_mtls_identity_from_tls_path() {
|
||||
builder = builder.identity(id);
|
||||
}
|
||||
|
||||
builder.build().expect("Failed to create global HTTP client")
|
||||
});
|
||||
CLIENT.clone()
|
||||
}
|
||||
|
||||
@@ -86,11 +86,12 @@ io = ["dep:tokio"]
|
||||
path = []
|
||||
notify = ["dep:hyper", "dep:s3s", "dep:hashbrown", "dep:thiserror", "dep:serde", "dep:libc", "dep:url", "dep:regex"] # file system notification features
|
||||
compress = ["dep:flate2", "dep:brotli", "dep:snap", "dep:lz4", "dep:zstd"]
|
||||
string = ["dep:regex", "dep:rand"]
|
||||
string = ["dep:regex"]
|
||||
crypto = ["dep:base64-simd", "dep:hex-simd", "dep:hmac", "dep:hyper", "dep:sha1"]
|
||||
hash = ["dep:highway", "dep:md-5", "dep:sha2", "dep:blake3", "dep:serde", "dep:siphasher", "dep:hex-simd", "dep:base64-simd", "dep:crc-fast"]
|
||||
hash = ["dep:highway", "dep:md-5", "dep:sha2", "dep:blake3", "dep:serde", "dep:siphasher", "dep:hex-simd", "dep:crc-fast"]
|
||||
os = ["dep:nix", "dep:tempfile", "winapi"] # operating system utilities
|
||||
integration = [] # integration test features
|
||||
sys = ["dep:sysinfo"] # system information features
|
||||
http = ["dep:convert_case", "dep:http", "dep:regex"]
|
||||
full = ["ip", "tls", "net", "io", "hash", "os", "integration", "path", "crypto", "string", "compress", "sys", "notify", "http"] # all features
|
||||
obj = ["http"] # object storage features
|
||||
full = ["ip", "tls", "net", "io", "hash", "os", "integration", "path", "crypto", "string", "compress", "sys", "notify", "http", "obj"] # all features
|
||||
|
||||
@@ -12,8 +12,11 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::get_env_bool;
|
||||
use rustfs_config::{RUSTFS_TLS_CERT, RUSTFS_TLS_KEY};
|
||||
use rustls::server::{ClientHello, ResolvesServerCert, ResolvesServerCertUsingSni};
|
||||
use rustls::RootCertStore;
|
||||
use rustls::server::danger::ClientCertVerifier;
|
||||
use rustls::server::{ClientHello, ResolvesServerCert, ResolvesServerCertUsingSni, WebPkiClientVerifier};
|
||||
use rustls::sign::CertifiedKey;
|
||||
use rustls_pemfile::{certs, private_key};
|
||||
use rustls_pki_types::{CertificateDer, PrivateKeyDer};
|
||||
@@ -48,6 +51,79 @@ pub fn load_certs(filename: &str) -> io::Result<Vec<CertificateDer<'static>>> {
|
||||
Ok(certs)
|
||||
}
|
||||
|
||||
/// Load a PEM certificate bundle and return each certificate as DER bytes.
|
||||
///
|
||||
/// This is a low-level helper intended for TLS clients (reqwest/hyper-rustls) that
|
||||
/// need to add root certificates one-by-one.
|
||||
///
|
||||
/// - Input: a PEM file that may contain multiple cert blocks.
|
||||
/// - Output: Vec of DER-encoded cert bytes, one per cert.
|
||||
///
|
||||
/// NOTE: This intentionally returns raw bytes to avoid forcing downstream crates
|
||||
/// to depend on rustls types.
|
||||
pub fn load_cert_bundle_der_bytes(path: &str) -> io::Result<Vec<Vec<u8>>> {
|
||||
let pem = fs::read(path)?;
|
||||
let mut reader = io::BufReader::new(&pem[..]);
|
||||
|
||||
let certs = certs(&mut reader)
|
||||
.collect::<Result<Vec<_>, _>>()
|
||||
.map_err(|e| certs_error(format!("Failed to parse PEM certs from {path}: {e}")))?;
|
||||
|
||||
Ok(certs.into_iter().map(|c| c.to_vec()).collect())
|
||||
}
|
||||
|
||||
/// Builds a WebPkiClientVerifier for mTLS if enabled via environment variable.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `tls_path` - Directory containing client CA certificates
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Ok(Some(verifier))` if mTLS is enabled and CA certs are found
|
||||
/// * `Ok(None)` if mTLS is disabled
|
||||
/// * `Err` if mTLS is enabled but configuration is invalid
|
||||
pub fn build_webpki_client_verifier(tls_path: &str) -> io::Result<Option<Arc<dyn ClientCertVerifier>>> {
|
||||
if !get_env_bool(rustfs_config::ENV_SERVER_MTLS_ENABLE, rustfs_config::DEFAULT_SERVER_MTLS_ENABLE) {
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
let ca_path = mtls_ca_bundle_path(tls_path).ok_or_else(|| {
|
||||
Error::other(format!(
|
||||
"RUSTFS_SERVER_MTLS_ENABLE=true but missing {}/client_ca.crt (or fallback {}/ca.crt)",
|
||||
tls_path, tls_path
|
||||
))
|
||||
})?;
|
||||
|
||||
let der_list = load_cert_bundle_der_bytes(ca_path.to_str().unwrap_or_default())?;
|
||||
|
||||
let mut store = RootCertStore::empty();
|
||||
for der in der_list {
|
||||
store
|
||||
.add(der.into())
|
||||
.map_err(|e| Error::other(format!("Invalid client CA cert: {e}")))?;
|
||||
}
|
||||
|
||||
let verifier = WebPkiClientVerifier::builder(Arc::new(store))
|
||||
.build()
|
||||
.map_err(|e| Error::other(format!("Build client cert verifier failed: {e}")))?;
|
||||
|
||||
Ok(Some(verifier))
|
||||
}
|
||||
|
||||
/// Locate the mTLS client CA bundle in the specified TLS path
|
||||
fn mtls_ca_bundle_path(tls_path: &str) -> Option<std::path::PathBuf> {
|
||||
use std::path::Path;
|
||||
|
||||
let p1 = Path::new(tls_path).join(rustfs_config::RUSTFS_CLIENT_CA_CERT_FILENAME);
|
||||
if p1.exists() {
|
||||
return Some(p1);
|
||||
}
|
||||
let p2 = Path::new(tls_path).join(rustfs_config::RUSTFS_CA_CERT);
|
||||
if p2.exists() {
|
||||
return Some(p2);
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Load private key from file.
|
||||
/// This function loads a private key from the specified file.
|
||||
///
|
||||
|
||||
@@ -82,5 +82,8 @@ pub use sys::user_agent::*;
|
||||
#[cfg(feature = "notify")]
|
||||
pub use notify::*;
|
||||
|
||||
#[cfg(feature = "obj")]
|
||||
pub mod obj;
|
||||
|
||||
mod envs;
|
||||
pub use envs::*;
|
||||
|
||||
268
crates/utils/src/obj/metadata.rs
Normal file
268
crates/utils/src/obj/metadata.rs
Normal file
@@ -0,0 +1,268 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use crate::http::{RESERVED_METADATA_PREFIX_LOWER, is_minio_header, is_rustfs_header};
|
||||
use std::collections::HashMap;
|
||||
|
||||
/// Extract user-defined metadata keys from object metadata.
|
||||
///
|
||||
/// This function filters out system-level metadata and returns only user-defined keys.
|
||||
///
|
||||
/// Excluded keys include:
|
||||
/// - S3 standard headers: content-type, cache-control, content-encoding, content-disposition,
|
||||
/// content-language, expires
|
||||
/// - x-amz-* headers (except user metadata with x-amz-meta- prefix which are stripped)
|
||||
/// - x-rustfs-internal-* headers (system internal metadata)
|
||||
/// - Storage/replication system keys: x-amz-storage-class, x-amz-tagging, x-amz-replication-status
|
||||
/// - Object metadata: etag, md5Sum, last-modified
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `metadata` - The complete metadata HashMap from ObjectInfo.user_defined
|
||||
///
|
||||
/// # Returns
|
||||
/// A new HashMap containing only user-defined metadata entries. Keys that use
|
||||
/// the user-metadata prefix (for example `x-amz-meta-`) are returned with that
|
||||
/// prefix stripped.
|
||||
///
|
||||
/// Note: The keys in the returned map may therefore differ from the keys in
|
||||
/// the input `metadata` map and cannot be used directly to remove entries
|
||||
/// from `metadata`. If you need to identify which original keys to remove,
|
||||
/// consider using an in-place filtering approach or returning the original
|
||||
/// keys instead.
|
||||
///
|
||||
/// # Example
|
||||
/// ```
|
||||
/// use std::collections::HashMap;
|
||||
/// use rustfs_utils::obj::extract_user_defined_metadata;
|
||||
///
|
||||
/// let mut metadata = HashMap::new();
|
||||
/// metadata.insert("content-type".to_string(), "application/json".to_string());
|
||||
/// metadata.insert("x-minio-key".to_string(), "application/json".to_string());
|
||||
/// metadata.insert("x-amz-grant-sse".to_string(), "application/json".to_string());
|
||||
/// metadata.insert("x-amz-meta-user-key".to_string(), "user-value".to_string());
|
||||
/// metadata.insert("my-custom-key".to_string(), "custom-value".to_string());
|
||||
///
|
||||
/// let user_keys = extract_user_defined_metadata(&metadata);
|
||||
/// assert_eq!(user_keys.len(), 2);
|
||||
/// assert_eq!(user_keys.get("user-key"), Some(&"user-value".to_string()));
|
||||
/// assert_eq!(user_keys.get("my-custom-key"), Some(&"custom-value".to_string()));
|
||||
/// ```
|
||||
pub fn extract_user_defined_metadata(metadata: &HashMap<String, String>) -> HashMap<String, String> {
|
||||
let mut user_metadata = HashMap::new();
|
||||
|
||||
let system_headers = [
|
||||
"content-type",
|
||||
"cache-control",
|
||||
"content-encoding",
|
||||
"content-disposition",
|
||||
"content-language",
|
||||
"expires",
|
||||
"content-length",
|
||||
"content-md5",
|
||||
"content-range",
|
||||
"last-modified",
|
||||
"etag",
|
||||
"md5sum",
|
||||
"date",
|
||||
];
|
||||
|
||||
for (key, value) in metadata {
|
||||
let lower_key = key.to_ascii_lowercase();
|
||||
|
||||
if lower_key.starts_with(RESERVED_METADATA_PREFIX_LOWER) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if system_headers.contains(&lower_key.as_str()) {
|
||||
continue;
|
||||
}
|
||||
|
||||
if let Some(user_key) = lower_key.strip_prefix("x-amz-meta-") {
|
||||
if !user_key.is_empty() {
|
||||
user_metadata.insert(user_key.to_string(), value.clone());
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Check if it's x-rustfs-meta-* and extract user key
|
||||
if let Some(user_key) = lower_key.strip_prefix("x-rustfs-meta-") {
|
||||
if !user_key.is_empty() {
|
||||
user_metadata.insert(user_key.to_string(), value.clone());
|
||||
}
|
||||
continue;
|
||||
}
|
||||
|
||||
// Skip other x-amz-* headers
|
||||
if lower_key.starts_with("x-amz-") {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Skip other RustFS headers (x-rustfs-replication-*, etc.)
|
||||
if is_rustfs_header(key) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// Skip MinIO headers (compatibility)
|
||||
if is_minio_header(key) {
|
||||
continue;
|
||||
}
|
||||
|
||||
// All other keys are considered user-defined
|
||||
user_metadata.insert(key.clone(), value.clone());
|
||||
}
|
||||
|
||||
user_metadata
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_extract_user_defined_metadata_basic() {
|
||||
let mut metadata = HashMap::new();
|
||||
metadata.insert("my-key".to_string(), "my-value".to_string());
|
||||
metadata.insert("custom-header".to_string(), "custom-value".to_string());
|
||||
|
||||
let user_metadata = extract_user_defined_metadata(&metadata);
|
||||
|
||||
assert_eq!(user_metadata.len(), 2);
|
||||
assert_eq!(user_metadata.get("my-key"), Some(&"my-value".to_string()));
|
||||
assert_eq!(user_metadata.get("custom-header"), Some(&"custom-value".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_user_defined_metadata_exclude_system_headers() {
|
||||
let mut metadata = HashMap::new();
|
||||
metadata.insert("content-type".to_string(), "application/json".to_string());
|
||||
metadata.insert("cache-control".to_string(), "no-cache".to_string());
|
||||
metadata.insert("content-encoding".to_string(), "gzip".to_string());
|
||||
metadata.insert("content-disposition".to_string(), "attachment".to_string());
|
||||
metadata.insert("content-language".to_string(), "en-US".to_string());
|
||||
metadata.insert("expires".to_string(), "Wed, 21 Oct 2015 07:28:00 GMT".to_string());
|
||||
metadata.insert("etag".to_string(), "abc123".to_string());
|
||||
metadata.insert("last-modified".to_string(), "Tue, 20 Oct 2015 07:28:00 GMT".to_string());
|
||||
metadata.insert("my-key".to_string(), "my-value".to_string());
|
||||
|
||||
let user_metadata = extract_user_defined_metadata(&metadata);
|
||||
|
||||
assert_eq!(user_metadata.len(), 1);
|
||||
assert_eq!(user_metadata.get("my-key"), Some(&"my-value".to_string()));
|
||||
assert!(!user_metadata.contains_key("content-type"));
|
||||
assert!(!user_metadata.contains_key("cache-control"));
|
||||
assert!(!user_metadata.contains_key("etag"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_user_defined_metadata_strip_amz_meta_prefix() {
|
||||
let mut metadata = HashMap::new();
|
||||
metadata.insert("x-amz-meta-user-id".to_string(), "12345".to_string());
|
||||
metadata.insert("x-amz-meta-project".to_string(), "test-project".to_string());
|
||||
metadata.insert("x-amz-storage-class".to_string(), "STANDARD".to_string());
|
||||
metadata.insert("x-amz-tagging".to_string(), "key=value".to_string());
|
||||
metadata.insert("x-amz-replication-status".to_string(), "COMPLETED".to_string());
|
||||
|
||||
let user_metadata = extract_user_defined_metadata(&metadata);
|
||||
|
||||
assert_eq!(user_metadata.len(), 2);
|
||||
assert_eq!(user_metadata.get("user-id"), Some(&"12345".to_string()));
|
||||
assert_eq!(user_metadata.get("project"), Some(&"test-project".to_string()));
|
||||
assert!(!user_metadata.contains_key("x-amz-meta-user-id"));
|
||||
assert!(!user_metadata.contains_key("x-amz-storage-class"));
|
||||
assert!(!user_metadata.contains_key("x-amz-tagging"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_user_defined_metadata_exclude_rustfs_internal() {
|
||||
let mut metadata: HashMap<String, String> = HashMap::new();
|
||||
metadata.insert("x-rustfs-internal-healing".to_string(), "true".to_string());
|
||||
metadata.insert("x-rustfs-internal-data-mov".to_string(), "value".to_string());
|
||||
metadata.insert("X-RustFS-Internal-purgestatus".to_string(), "status".to_string());
|
||||
metadata.insert("x-rustfs-meta-custom".to_string(), "custom-value".to_string());
|
||||
metadata.insert("my-key".to_string(), "my-value".to_string());
|
||||
|
||||
let user_metadata = extract_user_defined_metadata(&metadata);
|
||||
|
||||
assert_eq!(user_metadata.len(), 2);
|
||||
assert_eq!(user_metadata.get("custom"), Some(&"custom-value".to_string()));
|
||||
assert_eq!(user_metadata.get("my-key"), Some(&"my-value".to_string()));
|
||||
assert!(!user_metadata.contains_key("x-rustfs-internal-healing"));
|
||||
assert!(!user_metadata.contains_key("x-rustfs-internal-data-mov"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_user_defined_metadata_exclude_minio_headers() {
|
||||
let mut metadata = HashMap::new();
|
||||
metadata.insert("x-minio-custom".to_string(), "minio-value".to_string());
|
||||
metadata.insert("x-minio-internal".to_string(), "internal".to_string());
|
||||
metadata.insert("my-key".to_string(), "my-value".to_string());
|
||||
|
||||
let user_metadata = extract_user_defined_metadata(&metadata);
|
||||
|
||||
assert_eq!(user_metadata.len(), 1);
|
||||
assert_eq!(user_metadata.get("my-key"), Some(&"my-value".to_string()));
|
||||
assert!(!user_metadata.contains_key("x-minio-custom"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_user_defined_metadata_mixed() {
|
||||
let mut metadata = HashMap::new();
|
||||
// System headers
|
||||
metadata.insert("content-type".to_string(), "application/json".to_string());
|
||||
metadata.insert("cache-control".to_string(), "no-cache".to_string());
|
||||
// AMZ headers
|
||||
metadata.insert("x-amz-meta-version".to_string(), "1.0".to_string());
|
||||
metadata.insert("x-amz-storage-class".to_string(), "STANDARD".to_string());
|
||||
// RustFS internal
|
||||
metadata.insert("x-rustfs-internal-healing".to_string(), "true".to_string());
|
||||
metadata.insert("x-rustfs-meta-source".to_string(), "upload".to_string());
|
||||
// User defined
|
||||
metadata.insert("my-custom-key".to_string(), "custom-value".to_string());
|
||||
metadata.insert("another-key".to_string(), "another-value".to_string());
|
||||
|
||||
let user_metadata = extract_user_defined_metadata(&metadata);
|
||||
|
||||
assert_eq!(user_metadata.len(), 4);
|
||||
assert_eq!(user_metadata.get("version"), Some(&"1.0".to_string()));
|
||||
assert_eq!(user_metadata.get("source"), Some(&"upload".to_string()));
|
||||
assert_eq!(user_metadata.get("my-custom-key"), Some(&"custom-value".to_string()));
|
||||
assert_eq!(user_metadata.get("another-key"), Some(&"another-value".to_string()));
|
||||
assert!(!user_metadata.contains_key("content-type"));
|
||||
assert!(!user_metadata.contains_key("x-amz-storage-class"));
|
||||
assert!(!user_metadata.contains_key("x-rustfs-internal-healing"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_user_defined_metadata_empty() {
|
||||
let metadata = HashMap::new();
|
||||
let user_metadata = extract_user_defined_metadata(&metadata);
|
||||
assert!(user_metadata.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_extract_user_defined_metadata_case_insensitive() {
|
||||
let mut metadata = HashMap::new();
|
||||
metadata.insert("Content-Type".to_string(), "application/json".to_string());
|
||||
metadata.insert("CACHE-CONTROL".to_string(), "no-cache".to_string());
|
||||
metadata.insert("X-Amz-Meta-UserId".to_string(), "12345".to_string());
|
||||
metadata.insert("My-Custom-Key".to_string(), "value".to_string());
|
||||
|
||||
let user_metadata = extract_user_defined_metadata(&metadata);
|
||||
|
||||
assert_eq!(user_metadata.len(), 2);
|
||||
assert_eq!(user_metadata.get("userid"), Some(&"12345".to_string()));
|
||||
assert_eq!(user_metadata.get("My-Custom-Key"), Some(&"value".to_string()));
|
||||
assert!(!user_metadata.contains_key("Content-Type"));
|
||||
}
|
||||
}
|
||||
16
crates/utils/src/obj/mod.rs
Normal file
16
crates/utils/src/obj/mod.rs
Normal file
@@ -0,0 +1,16 @@
|
||||
// Copyright 2024 RustFS Team
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
mod metadata;
|
||||
pub use metadata::*;
|
||||
@@ -12,7 +12,6 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use rand::{Rng, RngCore};
|
||||
use regex::Regex;
|
||||
use std::io::{Error, Result};
|
||||
use std::sync::LazyLock;
|
||||
@@ -488,81 +487,6 @@ pub fn parse_ellipses_range(pattern: &str) -> Result<Vec<String>> {
|
||||
Ok(ret)
|
||||
}
|
||||
|
||||
/// Generates a random access key of the specified length.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `length` - The length of the access key to generate
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Result<String>` - A result containing the generated access key or an error if the length is too short
|
||||
///
|
||||
/// # Errors
|
||||
/// This function will return an error if the specified length is less than 3.
|
||||
///
|
||||
/// Examples
|
||||
/// ```no_run
|
||||
/// use rustfs_utils::string::gen_access_key;
|
||||
///
|
||||
/// let access_key = gen_access_key(16).unwrap();
|
||||
/// println!("Generated access key: {}", access_key);
|
||||
/// ```
|
||||
///
|
||||
pub fn gen_access_key(length: usize) -> Result<String> {
|
||||
const ALPHA_NUMERIC_TABLE: [char; 36] = [
|
||||
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N',
|
||||
'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z',
|
||||
];
|
||||
|
||||
if length < 3 {
|
||||
return Err(Error::other("access key length is too short"));
|
||||
}
|
||||
|
||||
let mut result = String::with_capacity(length);
|
||||
let mut rng = rand::rng();
|
||||
|
||||
for _ in 0..length {
|
||||
result.push(ALPHA_NUMERIC_TABLE[rng.random_range(0..ALPHA_NUMERIC_TABLE.len())]);
|
||||
}
|
||||
|
||||
Ok(result)
|
||||
}
|
||||
|
||||
/// Generates a random secret key of the specified length.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `length` - The length of the secret key to generate
|
||||
///
|
||||
/// # Returns
|
||||
/// * `Result<String>` - A result containing the generated secret key or an error if the length is too short
|
||||
///
|
||||
/// # Errors
|
||||
/// This function will return an error if the specified length is less than 8.
|
||||
///
|
||||
/// # Examples
|
||||
/// ```no_run
|
||||
/// use rustfs_utils::string::gen_secret_key;
|
||||
///
|
||||
/// let secret_key = gen_secret_key(32).unwrap();
|
||||
/// println!("Generated secret key: {}", secret_key);
|
||||
/// ```
|
||||
///
|
||||
pub fn gen_secret_key(length: usize) -> Result<String> {
|
||||
use base64_simd::URL_SAFE_NO_PAD;
|
||||
|
||||
if length < 8 {
|
||||
return Err(Error::other("secret key length is too short"));
|
||||
}
|
||||
let mut rng = rand::rng();
|
||||
|
||||
let mut key = vec![0u8; URL_SAFE_NO_PAD.estimated_decoded_length(length)];
|
||||
rng.fill_bytes(&mut key);
|
||||
|
||||
let encoded = URL_SAFE_NO_PAD.encode_to_string(&key);
|
||||
let key_str = encoded.replace("/", "+");
|
||||
|
||||
Ok(key_str)
|
||||
}
|
||||
|
||||
/// Tests whether the string s begins with prefix ignoring case
|
||||
///
|
||||
/// # Arguments
|
||||
|
||||
32
docs/examples/mnmd/docker-compose.mtls.yml
Normal file
32
docs/examples/mnmd/docker-compose.mtls.yml
Normal file
@@ -0,0 +1,32 @@
|
||||
services:
|
||||
mnmd:
|
||||
image: ghcr.io/your-org/mnmd:latest
|
||||
container_name: mnmd
|
||||
ports:
|
||||
- "8443:8443"
|
||||
volumes:
|
||||
- ./tls:/tls:ro
|
||||
environment:
|
||||
# Example mnmd settings (adapt to your image)
|
||||
- MNMD_LISTEN_ADDR=0.0.0.0:8443
|
||||
- MNMD_TLS_CERT=/tls/server_cert.pem
|
||||
- MNMD_TLS_KEY=/tls/server_key.pem
|
||||
- MNMD_TLS_CLIENT_CA=/tls/ca.crt
|
||||
|
||||
rustfs:
|
||||
image: ghcr.io/rustfs/rustfs:latest
|
||||
container_name: rustfs
|
||||
depends_on:
|
||||
- mnmd
|
||||
environment:
|
||||
- RUSTFS_TLS_PATH=/tls
|
||||
- RUSTFS_TRUST_SYSTEM_CA=false
|
||||
- RUSTFS_TRUST_LEAF_CERT_AS_CA=false
|
||||
# Enable outbound mTLS (client identity) for MNMD
|
||||
- RUSTFS_MTLS_CLIENT_CERT=/tls/client_cert.pem
|
||||
- RUSTFS_MTLS_CLIENT_KEY=/tls/client_key.pem
|
||||
# MNMD address configured to https
|
||||
- RUSTFS_MNMD_ADDR=https://mnmd:8443
|
||||
- RUSTFS_MNMD_DOMAIN=mnmd
|
||||
volumes:
|
||||
- ./tls:/tls:ro
|
||||
63
docs/tls.md
Normal file
63
docs/tls.md
Normal file
@@ -0,0 +1,63 @@
|
||||
# TLS / mTLS configuration
|
||||
|
||||
RustFS supports TLS for serving HTTPS and for outbound gRPC connections (MNMD).
|
||||
It also supports optional client certificate authentication (mTLS) for outbound gRPC:
|
||||
if a client identity is configured, RustFS will present it; otherwise it will use
|
||||
server-authenticated TLS only.
|
||||
|
||||
## Recommended `tls/` directory layout
|
||||
|
||||
Place these files in a directory (default: `./tls`, configurable via `RUSTFS_TLS_PATH`).
|
||||
|
||||
```
|
||||
TLS_DIR/
|
||||
ca.crt # PEM bundle of CA/root certificates to trust (recommended)
|
||||
public.crt # optional extra root bundle (PEM)
|
||||
rustfs_cert.pem # server leaf certificate (PEM) used by the RustFS server
|
||||
rustfs_key.pem # server private key (PEM) used by the RustFS server
|
||||
|
||||
# Optional: outbound mTLS client identity for MNMD
|
||||
client_cert.pem # client certificate chain (PEM)
|
||||
client_key.pem # client private key (PEM)
|
||||
|
||||
# Optional: server-side mTLS (inbound client certificate verification)
|
||||
client_ca.crt # PEM bundle of CA certificates to verify client certificates
|
||||
```
|
||||
|
||||
## Environment variables
|
||||
|
||||
### Root trust
|
||||
|
||||
- `RUSTFS_TLS_PATH` (default: `tls`): TLS directory.
|
||||
- `RUSTFS_TRUST_SYSTEM_CA` (default: `false`): When `true`, include the platform/system
|
||||
trust store as additional roots. When `false`, system roots are not used.
|
||||
- `RUSTFS_TRUST_LEAF_CERT_AS_CA` (default: `false`): Compatibility switch. If `true`,
|
||||
RustFS will also load `rustfs_cert.pem` into the root store (treating leaf certificates
|
||||
as trusted roots). Prefer providing `ca.crt` instead.
|
||||
|
||||
### Outbound mTLS identity
|
||||
|
||||
- `RUSTFS_MTLS_CLIENT_CERT` (default: `${RUSTFS_TLS_PATH}/client_cert.pem`): path to PEM client cert/chain.
|
||||
- `RUSTFS_MTLS_CLIENT_KEY` (default: `${RUSTFS_TLS_PATH}/client_key.pem`): path to PEM private key.
|
||||
|
||||
If both files exist, RustFS enables outbound mTLS. If either is missing, RustFS proceeds
|
||||
with server-only TLS.
|
||||
|
||||
### Server-side mTLS (inbound client certificate verification)
|
||||
|
||||
- `RUSTFS_SERVER_MTLS_ENABLE` (default: `false`): When `true`, the RustFS server requires
|
||||
clients to present valid certificates signed by a trusted CA for authentication.
|
||||
|
||||
When enabled, RustFS loads client CA certificates from:
|
||||
1. `${RUSTFS_TLS_PATH}/client_ca.crt` (preferred)
|
||||
2. `${RUSTFS_TLS_PATH}/ca.crt` (fallback if `client_ca.crt` does not exist)
|
||||
|
||||
**Important**: Server mTLS is disabled by default. When enabled but no valid CA bundle is
|
||||
found, RustFS will fail to start with a clear error message. This ensures that server mTLS
|
||||
cannot be accidentally enabled without proper client CA configuration.
|
||||
|
||||
## Failure mode for HTTPS without roots
|
||||
|
||||
When connecting to an `https://` MNMD address, RustFS requires at least one configured
|
||||
trusted root. If none are loaded (no `ca.crt`/`public.crt` and system roots disabled),
|
||||
RustFS fails fast with a clear error message.
|
||||
@@ -31,6 +31,7 @@ RustFS helm chart supports **standalone and distributed mode**. For standalone m
|
||||
| containerSecurityContext.capabilities.drop[0] | string | `"ALL"` | |
|
||||
| containerSecurityContext.readOnlyRootFilesystem | bool | `true` | |
|
||||
| containerSecurityContext.runAsNonRoot | bool | `true` | |
|
||||
| enableServiceLinks | bool | `false` | |
|
||||
| extraManifests | list | `[]` | List of additional k8s manifests. |
|
||||
| fullnameOverride | string | `""` | |
|
||||
| image.pullPolicy | string | `"IfNotPresent"` | |
|
||||
@@ -73,6 +74,9 @@ RustFS helm chart supports **standalone and distributed mode**. For standalone m
|
||||
| mode.standalone.enabled | bool | `false` | RustFS standalone mode support, namely one pod one pvc. |
|
||||
| nameOverride | string | `""` | |
|
||||
| nodeSelector | object | `{}` | |
|
||||
| pdb.create | bool | `false` | Enable/disable a Pod Disruption Budget creation |
|
||||
| pdb.maxUnavailable | string | `1` | |
|
||||
| pdb.minAvailable | string | `""` | |
|
||||
| podAnnotations | object | `{}` | |
|
||||
| podLabels | object | `{}` | |
|
||||
| podSecurityContext.fsGroup | int | `10001` | |
|
||||
|
||||
@@ -1,24 +1,13 @@
|
||||
apiVersion: v2
|
||||
name: rustfs
|
||||
description: RustFS helm chart to deploy RustFS on kubernetes cluster.
|
||||
|
||||
# A chart can be either an 'application' or a 'library' chart.
|
||||
#
|
||||
# Application charts are a collection of templates that can be packaged into versioned archives
|
||||
# to be deployed.
|
||||
#
|
||||
# Library charts provide useful utilities or functions for the chart developer. They're included as
|
||||
# a dependency of application charts to inject those utilities and functions into the rendering
|
||||
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
|
||||
type: application
|
||||
|
||||
# This is the chart version. This version number should be incremented each time you make changes
|
||||
# to the chart and its templates, including the app version.
|
||||
# Versions are expected to follow Semantic Versioning (https://semver.org/)
|
||||
version: 0.0.76
|
||||
|
||||
# This is the version number of the application being deployed. This version number should be
|
||||
# incremented each time you make changes to the application. Versions are not expected to
|
||||
# follow Semantic Versioning. They should reflect the version the application is using.
|
||||
# It is recommended to use it with quotes.
|
||||
appVersion: "1.0.0-alpha.73"
|
||||
version: 0.0.77
|
||||
appVersion: "1.0.0-alpha.77"
|
||||
home: https://rustfs.com
|
||||
icon: https://media.sys.truenas.net/apps/rustfs/icons/icon.svg
|
||||
maintainers:
|
||||
- name: RustFS, Inc.
|
||||
url: https://github.com/rustfs/rustfs/tree/main/helm/rustfs
|
||||
sources:
|
||||
- https://github.com/rustfs/rustfs
|
||||
|
||||
@@ -66,7 +66,7 @@ spec:
|
||||
- -c
|
||||
- |
|
||||
mkdir -p /data /logs
|
||||
chown -R 10001:10001 /data /logs
|
||||
chown 10001:10001 /data /logs
|
||||
volumeMounts:
|
||||
- name: data
|
||||
mountPath: /data
|
||||
@@ -74,7 +74,7 @@ spec:
|
||||
mountPath: /logs
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
command: ["/usr/bin/rustfs"]
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
{{- if .Values.containerSecurityContext }}
|
||||
|
||||
22
helm/rustfs/templates/poddisruptionbudget.yaml
Normal file
22
helm/rustfs/templates/poddisruptionbudget.yaml
Normal file
@@ -0,0 +1,22 @@
|
||||
{{- if and .Values.pdb.create .Values.mode.distributed.enabled }}
|
||||
---
|
||||
apiVersion: policy/v1
|
||||
kind: PodDisruptionBudget
|
||||
metadata:
|
||||
name: {{ include "rustfs.fullname" . }}
|
||||
labels:
|
||||
{{- include "rustfs.labels" . | nindent 4 }}
|
||||
{{- with .Values.commonLabels }}
|
||||
{{- toYaml . | nindent 4 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
{{- if .Values.pdb.minAvailable }}
|
||||
minAvailable: {{ .Values.pdb.minAvailable }}
|
||||
{{- end }}
|
||||
{{- if or .Values.pdb.maxUnavailable (not .Values.pdb.minAvailable) }}
|
||||
maxUnavailable: {{ .Values.pdb.maxUnavailable }}
|
||||
{{- end }}
|
||||
selector:
|
||||
matchLabels:
|
||||
{{- include "rustfs.selectorLabels" . | nindent 6 }}
|
||||
{{- end }}
|
||||
@@ -2,6 +2,8 @@
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
annotations:
|
||||
helm.sh/resource-policy: keep
|
||||
name: {{ include "rustfs.fullname" . }}-data
|
||||
labels:
|
||||
{{- toYaml .Values.commonLabels | nindent 4 }}
|
||||
@@ -16,6 +18,8 @@ spec:
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
annotations:
|
||||
helm.sh/resource-policy: keep
|
||||
name: {{ include "rustfs.fullname" . }}-logs
|
||||
labels:
|
||||
{{- toYaml .Values.commonLabels | nindent 4 }}
|
||||
|
||||
@@ -26,6 +26,7 @@ spec:
|
||||
{{- toYaml . | nindent 8 }}
|
||||
{{- end }}
|
||||
spec:
|
||||
enableServiceLinks: {{ .Values.enableServiceLinks }}
|
||||
{{- with include "chart.imagePullSecrets" . }}
|
||||
imagePullSecrets:
|
||||
{{- . | nindent 8 }}
|
||||
@@ -41,8 +42,8 @@ spec:
|
||||
{{- toYaml .Values.affinity.nodeAffinity | nindent 10 }}
|
||||
{{- else }}
|
||||
{}
|
||||
{{- if .Values.affinity.podAntiAffinity.enabled }}
|
||||
{{- end }}
|
||||
{{- if .Values.affinity.podAntiAffinity.enabled }}
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
@@ -87,7 +88,7 @@ spec:
|
||||
mkdir -p /data
|
||||
fi
|
||||
mkdir -p {{ $logDir }}
|
||||
chown -R 10001:10001 /data {{ $logDir }}
|
||||
chown 10001:10001 /data {{ $logDir }}
|
||||
volumeMounts:
|
||||
{{- if eq (int .Values.replicaCount) 4 }}
|
||||
{{- range $i := until (int .Values.replicaCount) }}
|
||||
@@ -102,7 +103,7 @@ spec:
|
||||
mountPath: {{ $logDir }}
|
||||
containers:
|
||||
- name: {{ .Chart.Name }}
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag }}"
|
||||
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
|
||||
command: ["/usr/bin/rustfs"]
|
||||
imagePullPolicy: {{ .Values.image.pullPolicy }}
|
||||
{{- if .Values.containerSecurityContext }}
|
||||
|
||||
@@ -11,7 +11,7 @@ image:
|
||||
# This sets the pull policy for images.
|
||||
pullPolicy: IfNotPresent
|
||||
# Overrides the image tag whose default is the chart appVersion.
|
||||
tag: "1.0.0-alpha.73"
|
||||
tag: "latest"
|
||||
|
||||
# This is for the secrets for pulling an image from a private repository more information can be found here: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
|
||||
imagePullSecrets: []
|
||||
@@ -187,6 +187,14 @@ initStep:
|
||||
runAsUser: 0
|
||||
runAsGroup: 0
|
||||
|
||||
pdb:
|
||||
create: false
|
||||
# Minimum number/percentage of pods that should remain scheduled
|
||||
minAvailable: ""
|
||||
# Maximum number/percentage of pods that may be made unavailable
|
||||
maxUnavailable: 1
|
||||
|
||||
# Whether information about services should be injected into pod's environment variable
|
||||
enableServiceLinks: false
|
||||
|
||||
extraManifests: []
|
||||
|
||||
@@ -44,6 +44,7 @@ rustfs-appauth = { workspace = true }
|
||||
rustfs-audit = { workspace = true }
|
||||
rustfs-common = { workspace = true }
|
||||
rustfs-config = { workspace = true, features = ["constants", "notify"] }
|
||||
rustfs-credentials = { workspace = true }
|
||||
rustfs-ecstore = { workspace = true }
|
||||
rustfs-filemeta.workspace = true
|
||||
rustfs-iam = { workspace = true }
|
||||
@@ -64,7 +65,6 @@ rustfs-zip = { workspace = true }
|
||||
# Async Runtime and Networking
|
||||
async-trait = { workspace = true }
|
||||
axum.workspace = true
|
||||
axum-extra = { workspace = true }
|
||||
axum-server = { workspace = true }
|
||||
futures.workspace = true
|
||||
futures-util.workspace = true
|
||||
@@ -81,7 +81,7 @@ tokio-stream.workspace = true
|
||||
tokio-util.workspace = true
|
||||
tonic = { workspace = true }
|
||||
tower.workspace = true
|
||||
tower-http = { workspace = true, features = ["trace", "compression-full", "cors", "catch-panic", "timeout", "limit", "request-id"] }
|
||||
tower-http = { workspace = true, features = ["trace", "compression-full", "cors", "catch-panic", "timeout", "limit", "request-id", "add-extension"] }
|
||||
|
||||
# Serialization and Data Formats
|
||||
bytes = { workspace = true }
|
||||
@@ -94,6 +94,7 @@ serde_urlencoded = { workspace = true }
|
||||
# Cryptography and Security
|
||||
rustls = { workspace = true }
|
||||
subtle = { workspace = true }
|
||||
rustls-pemfile = { workspace = true }
|
||||
|
||||
# Time and Date
|
||||
chrono = { workspace = true }
|
||||
|
||||
@@ -14,9 +14,9 @@
|
||||
|
||||
use crate::auth::get_condition_values;
|
||||
use http::HeaderMap;
|
||||
use rustfs_credentials::Credentials;
|
||||
use rustfs_iam::store::object::ObjectStore;
|
||||
use rustfs_iam::sys::IamSys;
|
||||
use rustfs_policy::auth;
|
||||
use rustfs_policy::policy::Args;
|
||||
use rustfs_policy::policy::action::Action;
|
||||
use s3s::S3Result;
|
||||
@@ -26,16 +26,17 @@ use std::sync::Arc;
|
||||
|
||||
pub async fn validate_admin_request(
|
||||
headers: &HeaderMap,
|
||||
cred: &auth::Credentials,
|
||||
cred: &Credentials,
|
||||
is_owner: bool,
|
||||
deny_only: bool,
|
||||
actions: Vec<Action>,
|
||||
remote_addr: Option<std::net::SocketAddr>,
|
||||
) -> S3Result<()> {
|
||||
let Ok(iam_store) = rustfs_iam::get() else {
|
||||
return Err(s3_error!(InternalError, "iam not init"));
|
||||
};
|
||||
for action in actions {
|
||||
match check_admin_request_auth(iam_store.clone(), headers, cred, is_owner, deny_only, action).await {
|
||||
match check_admin_request_auth(iam_store.clone(), headers, cred, is_owner, deny_only, action, remote_addr).await {
|
||||
Ok(_) => return Ok(()),
|
||||
Err(_) => {
|
||||
continue;
|
||||
@@ -49,12 +50,13 @@ pub async fn validate_admin_request(
|
||||
async fn check_admin_request_auth(
|
||||
iam_store: Arc<IamSys<ObjectStore>>,
|
||||
headers: &HeaderMap,
|
||||
cred: &auth::Credentials,
|
||||
cred: &Credentials,
|
||||
is_owner: bool,
|
||||
deny_only: bool,
|
||||
action: Action,
|
||||
remote_addr: Option<std::net::SocketAddr>,
|
||||
) -> S3Result<()> {
|
||||
let conditions = get_condition_values(headers, cred, None, None);
|
||||
let conditions = get_condition_values(headers, cred, None, None, remote_addr);
|
||||
|
||||
if !iam_store
|
||||
.is_allowed(&Args {
|
||||
|
||||
@@ -23,7 +23,6 @@ use axum::{
|
||||
response::{IntoResponse, Response},
|
||||
routing::get,
|
||||
};
|
||||
use axum_extra::extract::Host;
|
||||
use axum_server::tls_rustls::RustlsConfig;
|
||||
use http::{HeaderMap, HeaderName, HeaderValue, Method, StatusCode, Uri};
|
||||
use mime_guess::from_path;
|
||||
@@ -264,21 +263,27 @@ async fn version_handler() -> impl IntoResponse {
|
||||
///
|
||||
/// # Arguments:
|
||||
/// - `uri`: The request URI.
|
||||
/// - `Host(host)`: The host extracted from the request.
|
||||
/// - `headers`: The request headers.
|
||||
///
|
||||
/// # Returns:
|
||||
/// - 200 OK with JSON body containing the console configuration if initialized.
|
||||
/// - 500 Internal Server Error if configuration is not initialized.
|
||||
#[instrument(fields(host))]
|
||||
async fn config_handler(uri: Uri, Host(host): Host, headers: HeaderMap) -> impl IntoResponse {
|
||||
#[instrument(fields(uri))]
|
||||
async fn config_handler(uri: Uri, headers: HeaderMap) -> impl IntoResponse {
|
||||
// Get the scheme from the headers or use the URI scheme
|
||||
let scheme = headers
|
||||
.get(HeaderName::from_static("x-forwarded-proto"))
|
||||
.and_then(|value| value.to_str().ok())
|
||||
.unwrap_or_else(|| uri.scheme().map(|s| s.as_str()).unwrap_or("http"));
|
||||
|
||||
let raw_host = uri.host().unwrap_or(host.as_str());
|
||||
// Prefer URI host, fallback to `Host` header
|
||||
let header_host = headers
|
||||
.get(http::header::HOST)
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.unwrap_or_default();
|
||||
|
||||
let raw_host = uri.host().unwrap_or(header_host);
|
||||
|
||||
let host_for_url = if let Ok(socket_addr) = raw_host.parse::<SocketAddr>() {
|
||||
// Successfully parsed, it's in IP:Port format.
|
||||
// For IPv6, we need to enclose it in brackets to form a valid URL.
|
||||
|
||||
@@ -18,6 +18,7 @@ use crate::auth::check_key_valid;
|
||||
use crate::auth::get_condition_values;
|
||||
use crate::auth::get_session_token;
|
||||
use crate::error::ApiError;
|
||||
use crate::server::RemoteAddr;
|
||||
use bytes::Bytes;
|
||||
use futures::{Stream, StreamExt};
|
||||
use http::{HeaderMap, HeaderValue, Uri};
|
||||
@@ -25,6 +26,7 @@ use hyper::StatusCode;
|
||||
use matchit::Params;
|
||||
use rustfs_common::heal_channel::HealOpts;
|
||||
use rustfs_config::{MAX_ADMIN_REQUEST_BODY_SIZE, MAX_HEAL_REQUEST_SIZE};
|
||||
use rustfs_credentials::get_global_action_cred;
|
||||
use rustfs_ecstore::admin_server_info::get_server_info;
|
||||
use rustfs_ecstore::bucket::bucket_target_sys::BucketTargetSys;
|
||||
use rustfs_ecstore::bucket::metadata::BUCKET_TARGETS_FILE;
|
||||
@@ -35,7 +37,6 @@ use rustfs_ecstore::data_usage::{
|
||||
aggregate_local_snapshots, compute_bucket_usage, load_data_usage_from_backend, store_data_usage_in_backend,
|
||||
};
|
||||
use rustfs_ecstore::error::StorageError;
|
||||
use rustfs_ecstore::global::get_global_action_cred;
|
||||
use rustfs_ecstore::global::global_rustfs_port;
|
||||
use rustfs_ecstore::metrics_realtime::{CollectMetricsOpts, MetricType, collect_local_metrics};
|
||||
use rustfs_ecstore::new_object_layer_fn;
|
||||
@@ -210,7 +211,8 @@ impl Operation for AccountInfoHandler {
|
||||
let claims = cred.claims.as_ref().unwrap_or(&default_claims);
|
||||
|
||||
let cred_clone = cred.clone();
|
||||
let conditions = get_condition_values(&req.headers, &cred_clone, None, None);
|
||||
let remote_addr = req.extensions.get::<RemoteAddr>().map(|a| a.0);
|
||||
let conditions = get_condition_values(&req.headers, &cred_clone, None, None, remote_addr);
|
||||
let cred_clone = Arc::new(cred_clone);
|
||||
let conditions = Arc::new(conditions);
|
||||
|
||||
@@ -405,12 +407,14 @@ impl Operation for ServerInfoHandler {
|
||||
let (cred, owner) =
|
||||
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
|
||||
|
||||
let remote_addr = req.extensions.get::<RemoteAddr>().map(|a| a.0);
|
||||
validate_admin_request(
|
||||
&req.headers,
|
||||
&cred,
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::ServerInfoAdminAction)],
|
||||
remote_addr,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -451,12 +455,14 @@ impl Operation for StorageInfoHandler {
|
||||
let (cred, owner) =
|
||||
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
|
||||
|
||||
let remote_addr = req.extensions.get::<RemoteAddr>().map(|a| a.0);
|
||||
validate_admin_request(
|
||||
&req.headers,
|
||||
&cred,
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::StorageInfoAdminAction)],
|
||||
remote_addr,
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -492,6 +498,7 @@ impl Operation for DataUsageInfoHandler {
|
||||
let (cred, owner) =
|
||||
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
|
||||
|
||||
let remote_addr = req.extensions.get::<RemoteAddr>().map(|a| a.0);
|
||||
validate_admin_request(
|
||||
&req.headers,
|
||||
&cred,
|
||||
@@ -501,6 +508,7 @@ impl Operation for DataUsageInfoHandler {
|
||||
Action::AdminAction(AdminAction::DataUsageInfoAdminAction),
|
||||
Action::S3Action(S3Action::ListBucketAction),
|
||||
],
|
||||
remote_addr,
|
||||
)
|
||||
.await?;
|
||||
|
||||
|
||||
@@ -20,6 +20,7 @@ use std::{
|
||||
use crate::{
|
||||
admin::{auth::validate_admin_request, router::Operation},
|
||||
auth::{check_key_valid, get_session_token},
|
||||
server::RemoteAddr,
|
||||
};
|
||||
use http::{HeaderMap, StatusCode};
|
||||
use matchit::Params;
|
||||
@@ -97,6 +98,7 @@ impl Operation for ExportBucketMetadata {
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::ExportBucketMetadataAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -389,6 +391,7 @@ impl Operation for ImportBucketMetadata {
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::ImportBucketMetadataAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
|
||||
@@ -15,11 +15,12 @@
|
||||
use crate::{
|
||||
admin::{auth::validate_admin_request, router::Operation, utils::has_space_be},
|
||||
auth::{check_key_valid, constant_time_eq, get_session_token},
|
||||
server::RemoteAddr,
|
||||
};
|
||||
use http::{HeaderMap, StatusCode};
|
||||
use matchit::Params;
|
||||
use rustfs_config::MAX_ADMIN_REQUEST_BODY_SIZE;
|
||||
use rustfs_ecstore::global::get_global_action_cred;
|
||||
use rustfs_credentials::get_global_action_cred;
|
||||
use rustfs_iam::error::{is_err_no_such_group, is_err_no_such_user};
|
||||
use rustfs_madmin::GroupAddRemove;
|
||||
use rustfs_policy::policy::action::{Action, AdminAction};
|
||||
@@ -57,6 +58,7 @@ impl Operation for ListGroups {
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::ListGroupsAdminAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -95,6 +97,7 @@ impl Operation for GetGroup {
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::GetGroupAdminAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -142,6 +145,7 @@ impl Operation for SetGroupStatus {
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::EnableGroupAdminAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -209,6 +213,7 @@ impl Operation for UpdateGroupMembers {
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::AddUserToGroupAdminAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
use super::Operation;
|
||||
use crate::admin::auth::validate_admin_request;
|
||||
use crate::auth::{check_key_valid, get_session_token};
|
||||
use crate::server::RemoteAddr;
|
||||
use base64::Engine;
|
||||
use hyper::{HeaderMap, StatusCode};
|
||||
use matchit::Params;
|
||||
@@ -127,6 +128,7 @@ impl Operation for CreateKeyHandler {
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::ServerInfoAdminAction)], // TODO: Add specific KMS action
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -205,6 +207,7 @@ impl Operation for DescribeKeyHandler {
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::ServerInfoAdminAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -260,6 +263,7 @@ impl Operation for ListKeysHandler {
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::ServerInfoAdminAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -321,6 +325,7 @@ impl Operation for GenerateDataKeyHandler {
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::ServerInfoAdminAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -386,6 +391,7 @@ impl Operation for KmsStatusHandler {
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::ServerInfoAdminAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -443,6 +449,7 @@ impl Operation for KmsConfigHandler {
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::ServerInfoAdminAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -487,6 +494,7 @@ impl Operation for KmsClearCacheHandler {
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::ServerInfoAdminAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
use super::Operation;
|
||||
use crate::admin::auth::validate_admin_request;
|
||||
use crate::auth::{check_key_valid, get_session_token};
|
||||
use crate::server::RemoteAddr;
|
||||
use hyper::StatusCode;
|
||||
use matchit::Params;
|
||||
use rustfs_config::MAX_ADMIN_REQUEST_BODY_SIZE;
|
||||
@@ -98,6 +99,7 @@ impl Operation for ConfigureKmsHandler {
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::ServerInfoAdminAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -196,6 +198,7 @@ impl Operation for StartKmsHandler {
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::ServerInfoAdminAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -329,6 +332,7 @@ impl Operation for StopKmsHandler {
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::ServerInfoAdminAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -394,6 +398,7 @@ impl Operation for GetKmsStatusHandler {
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::ServerInfoAdminAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -465,6 +470,7 @@ impl Operation for ReconfigureKmsHandler {
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::ServerInfoAdminAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
use super::Operation;
|
||||
use crate::admin::auth::validate_admin_request;
|
||||
use crate::auth::{check_key_valid, get_session_token};
|
||||
use crate::server::RemoteAddr;
|
||||
use hyper::{HeaderMap, StatusCode};
|
||||
use matchit::Params;
|
||||
use rustfs_config::MAX_ADMIN_REQUEST_BODY_SIZE;
|
||||
@@ -79,6 +80,7 @@ impl Operation for CreateKmsKeyHandler {
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::ServerInfoAdminAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -212,6 +214,7 @@ impl Operation for DeleteKmsKeyHandler {
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::ServerInfoAdminAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -360,6 +363,7 @@ impl Operation for CancelKmsKeyDeletionHandler {
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::ServerInfoAdminAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -488,6 +492,7 @@ impl Operation for ListKmsKeysHandler {
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::ServerInfoAdminAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -599,6 +604,7 @@ impl Operation for DescribeKmsKeyHandler {
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::ServerInfoAdminAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
|
||||
@@ -15,11 +15,12 @@
|
||||
use crate::{
|
||||
admin::{auth::validate_admin_request, router::Operation, utils::has_space_be},
|
||||
auth::{check_key_valid, get_session_token},
|
||||
server::RemoteAddr,
|
||||
};
|
||||
use http::{HeaderMap, StatusCode};
|
||||
use matchit::Params;
|
||||
use rustfs_config::MAX_ADMIN_REQUEST_BODY_SIZE;
|
||||
use rustfs_ecstore::global::get_global_action_cred;
|
||||
use rustfs_credentials::get_global_action_cred;
|
||||
use rustfs_iam::error::is_err_no_such_user;
|
||||
use rustfs_iam::store::MappedPolicy;
|
||||
use rustfs_policy::policy::{
|
||||
@@ -60,6 +61,7 @@ impl Operation for ListCannedPolicies {
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::ListUserPoliciesAdminAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -118,6 +120,7 @@ impl Operation for AddCannedPolicy {
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::CreatePolicyAdminAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -190,6 +193,7 @@ impl Operation for InfoCannedPolicy {
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::GetPolicyAdminAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -247,6 +251,7 @@ impl Operation for RemoveCannedPolicy {
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::DeletePolicyAdminAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -307,6 +312,7 @@ impl Operation for SetPolicyForUserOrGroup {
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::AttachPolicyAdminAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
|
||||
@@ -26,6 +26,7 @@ use crate::{
|
||||
admin::{auth::validate_admin_request, router::Operation},
|
||||
auth::{check_key_valid, get_session_token},
|
||||
error::ApiError,
|
||||
server::RemoteAddr,
|
||||
};
|
||||
|
||||
pub struct ListPools {}
|
||||
@@ -53,6 +54,7 @@ impl Operation for ListPools {
|
||||
Action::AdminAction(AdminAction::ServerInfoAdminAction),
|
||||
Action::AdminAction(AdminAction::DecommissionAdminAction),
|
||||
],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -119,6 +121,7 @@ impl Operation for StatusPool {
|
||||
Action::AdminAction(AdminAction::ServerInfoAdminAction),
|
||||
Action::AdminAction(AdminAction::DecommissionAdminAction),
|
||||
],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -194,6 +197,7 @@ impl Operation for StartDecommission {
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::DecommissionAdminAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -292,6 +296,7 @@ impl Operation for CancelDecommission {
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::DecommissionAdminAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
|
||||
@@ -15,6 +15,7 @@
|
||||
use crate::{
|
||||
admin::{auth::validate_admin_request, router::Operation},
|
||||
auth::{check_key_valid, get_session_token},
|
||||
server::RemoteAddr,
|
||||
};
|
||||
use http::{HeaderMap, StatusCode};
|
||||
use matchit::Params;
|
||||
@@ -103,6 +104,7 @@ impl Operation for RebalanceStart {
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::RebalanceAdminAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -180,6 +182,7 @@ impl Operation for RebalanceStatus {
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::RebalanceAdminAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -297,6 +300,7 @@ impl Operation for RebalanceStop {
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::RebalanceAdminAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
|
||||
@@ -14,12 +14,13 @@
|
||||
|
||||
use crate::admin::utils::has_space_be;
|
||||
use crate::auth::{constant_time_eq, get_condition_values, get_session_token};
|
||||
use crate::server::RemoteAddr;
|
||||
use crate::{admin::router::Operation, auth::check_key_valid};
|
||||
use http::HeaderMap;
|
||||
use hyper::StatusCode;
|
||||
use matchit::Params;
|
||||
use rustfs_config::MAX_ADMIN_REQUEST_BODY_SIZE;
|
||||
use rustfs_ecstore::global::get_global_action_cred;
|
||||
use rustfs_credentials::get_global_action_cred;
|
||||
use rustfs_iam::error::is_err_no_such_service_account;
|
||||
use rustfs_iam::sys::{NewServiceAccountOpts, UpdateServiceAccountOpts};
|
||||
use rustfs_madmin::{
|
||||
@@ -119,7 +120,13 @@ impl Operation for AddServiceAccount {
|
||||
groups: &cred.groups,
|
||||
action: Action::AdminAction(AdminAction::CreateServiceAccountAdminAction),
|
||||
bucket: "",
|
||||
conditions: &get_condition_values(&req.headers, &cred, None, None),
|
||||
conditions: &get_condition_values(
|
||||
&req.headers,
|
||||
&cred,
|
||||
None,
|
||||
None,
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
),
|
||||
is_owner: owner,
|
||||
object: "",
|
||||
claims: cred.claims.as_ref().unwrap_or(&HashMap::new()),
|
||||
@@ -270,7 +277,13 @@ impl Operation for UpdateServiceAccount {
|
||||
groups: &cred.groups,
|
||||
action: Action::AdminAction(AdminAction::UpdateServiceAccountAdminAction),
|
||||
bucket: "",
|
||||
conditions: &get_condition_values(&req.headers, &cred, None, None),
|
||||
conditions: &get_condition_values(
|
||||
&req.headers,
|
||||
&cred,
|
||||
None,
|
||||
None,
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
),
|
||||
is_owner: owner,
|
||||
object: "",
|
||||
claims: cred.claims.as_ref().unwrap_or(&HashMap::new()),
|
||||
@@ -363,7 +376,13 @@ impl Operation for InfoServiceAccount {
|
||||
groups: &cred.groups,
|
||||
action: Action::AdminAction(AdminAction::ListServiceAccountsAdminAction),
|
||||
bucket: "",
|
||||
conditions: &get_condition_values(&req.headers, &cred, None, None),
|
||||
conditions: &get_condition_values(
|
||||
&req.headers,
|
||||
&cred,
|
||||
None,
|
||||
None,
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
),
|
||||
is_owner: owner,
|
||||
object: "",
|
||||
claims: cred.claims.as_ref().unwrap_or(&HashMap::new()),
|
||||
@@ -491,7 +510,13 @@ impl Operation for ListServiceAccount {
|
||||
groups: &cred.groups,
|
||||
action: Action::AdminAction(AdminAction::UpdateServiceAccountAdminAction),
|
||||
bucket: "",
|
||||
conditions: &get_condition_values(&req.headers, &cred, None, None),
|
||||
conditions: &get_condition_values(
|
||||
&req.headers,
|
||||
&cred,
|
||||
None,
|
||||
None,
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
),
|
||||
is_owner: owner,
|
||||
object: "",
|
||||
claims: cred.claims.as_ref().unwrap_or(&HashMap::new()),
|
||||
@@ -589,7 +614,13 @@ impl Operation for DeleteServiceAccount {
|
||||
groups: &cred.groups,
|
||||
action: Action::AdminAction(AdminAction::RemoveServiceAccountAdminAction),
|
||||
bucket: "",
|
||||
conditions: &get_condition_values(&req.headers, &cred, None, None),
|
||||
conditions: &get_condition_values(
|
||||
&req.headers,
|
||||
&cred,
|
||||
None,
|
||||
None,
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
),
|
||||
is_owner: owner,
|
||||
object: "",
|
||||
claims: cred.claims.as_ref().unwrap_or(&HashMap::new()),
|
||||
|
||||
@@ -16,6 +16,7 @@
|
||||
use crate::{
|
||||
admin::{auth::validate_admin_request, router::Operation},
|
||||
auth::{check_key_valid, get_session_token},
|
||||
server::RemoteAddr,
|
||||
};
|
||||
use http::{HeaderMap, StatusCode};
|
||||
use matchit::Params;
|
||||
@@ -90,7 +91,15 @@ impl Operation for AddTier {
|
||||
let (cred, owner) =
|
||||
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
|
||||
|
||||
validate_admin_request(&req.headers, &cred, owner, false, vec![Action::AdminAction(AdminAction::SetTierAction)]).await?;
|
||||
validate_admin_request(
|
||||
&req.headers,
|
||||
&cred,
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::SetTierAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut input = req.input;
|
||||
let body = match input.store_all_limited(MAX_ADMIN_REQUEST_BODY_SIZE).await {
|
||||
@@ -218,7 +227,15 @@ impl Operation for EditTier {
|
||||
let (cred, owner) =
|
||||
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
|
||||
|
||||
validate_admin_request(&req.headers, &cred, owner, false, vec![Action::AdminAction(AdminAction::SetTierAction)]).await?;
|
||||
validate_admin_request(
|
||||
&req.headers,
|
||||
&cred,
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::SetTierAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut input = req.input;
|
||||
let body = match input.store_all_limited(MAX_ADMIN_REQUEST_BODY_SIZE).await {
|
||||
@@ -293,7 +310,15 @@ impl Operation for ListTiers {
|
||||
let (cred, owner) =
|
||||
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
|
||||
|
||||
validate_admin_request(&req.headers, &cred, owner, false, vec![Action::AdminAction(AdminAction::ListTierAction)]).await?;
|
||||
validate_admin_request(
|
||||
&req.headers,
|
||||
&cred,
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::ListTierAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut tier_config_mgr = GLOBAL_TierConfigMgr.read().await;
|
||||
let tiers = tier_config_mgr.list_tiers();
|
||||
@@ -329,7 +354,15 @@ impl Operation for RemoveTier {
|
||||
let (cred, owner) =
|
||||
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
|
||||
|
||||
validate_admin_request(&req.headers, &cred, owner, false, vec![Action::AdminAction(AdminAction::SetTierAction)]).await?;
|
||||
validate_admin_request(
|
||||
&req.headers,
|
||||
&cred,
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::SetTierAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut force: bool = false;
|
||||
let force_str = query.force.clone().unwrap_or_default();
|
||||
@@ -392,7 +425,15 @@ impl Operation for VerifyTier {
|
||||
let (cred, owner) =
|
||||
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
|
||||
|
||||
validate_admin_request(&req.headers, &cred, owner, false, vec![Action::AdminAction(AdminAction::ListTierAction)]).await?;
|
||||
validate_admin_request(
|
||||
&req.headers,
|
||||
&cred,
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::ListTierAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut tier_config_mgr = GLOBAL_TierConfigMgr.write().await;
|
||||
tier_config_mgr.verify(&query.tier.unwrap()).await;
|
||||
@@ -415,7 +456,15 @@ impl Operation for GetTierInfo {
|
||||
let (cred, owner) =
|
||||
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
|
||||
|
||||
validate_admin_request(&req.headers, &cred, owner, false, vec![Action::AdminAction(AdminAction::ListTierAction)]).await?;
|
||||
validate_admin_request(
|
||||
&req.headers,
|
||||
&cred,
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::ListTierAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let query = {
|
||||
if let Some(query) = req.uri.query() {
|
||||
@@ -467,7 +516,15 @@ impl Operation for ClearTier {
|
||||
let (cred, owner) =
|
||||
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
|
||||
|
||||
validate_admin_request(&req.headers, &cred, owner, false, vec![Action::AdminAction(AdminAction::SetTierAction)]).await?;
|
||||
validate_admin_request(
|
||||
&req.headers,
|
||||
&cred,
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::SetTierAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut force: bool = false;
|
||||
let force_str = query.force;
|
||||
|
||||
@@ -15,11 +15,12 @@
|
||||
use crate::{
|
||||
admin::{auth::validate_admin_request, router::Operation, utils::has_space_be},
|
||||
auth::{check_key_valid, constant_time_eq, get_session_token},
|
||||
server::RemoteAddr,
|
||||
};
|
||||
use http::{HeaderMap, StatusCode};
|
||||
use matchit::Params;
|
||||
use rustfs_config::{MAX_ADMIN_REQUEST_BODY_SIZE, MAX_IAM_IMPORT_SIZE};
|
||||
use rustfs_ecstore::global::get_global_action_cred;
|
||||
use rustfs_credentials::get_global_action_cred;
|
||||
use rustfs_iam::{
|
||||
store::{GroupInfo, MappedPolicy, UserType},
|
||||
sys::NewServiceAccountOpts,
|
||||
@@ -124,6 +125,7 @@ impl Operation for AddUser {
|
||||
owner,
|
||||
deny_only,
|
||||
vec![Action::AdminAction(AdminAction::CreateUserAdminAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -176,6 +178,7 @@ impl Operation for SetUserStatus {
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::EnableUserAdminAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -220,6 +223,7 @@ impl Operation for ListUsers {
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::ListUsersAdminAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -278,6 +282,7 @@ impl Operation for RemoveUser {
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::DeleteUserAdminAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -377,6 +382,7 @@ impl Operation for GetUserInfo {
|
||||
owner,
|
||||
deny_only,
|
||||
vec![Action::AdminAction(AdminAction::GetUserAdminAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
@@ -426,8 +432,15 @@ impl Operation for ExportIam {
|
||||
let (cred, owner) =
|
||||
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
|
||||
|
||||
validate_admin_request(&req.headers, &cred, owner, false, vec![Action::AdminAction(AdminAction::ExportIAMAction)])
|
||||
.await?;
|
||||
validate_admin_request(
|
||||
&req.headers,
|
||||
&cred,
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::ExportIAMAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let Ok(iam_store) = rustfs_iam::get() else {
|
||||
return Err(s3_error!(InvalidRequest, "iam not init"));
|
||||
@@ -633,8 +646,15 @@ impl Operation for ImportIam {
|
||||
let (cred, owner) =
|
||||
check_key_valid(get_session_token(&req.uri, &req.headers).unwrap_or_default(), &input_cred.access_key).await?;
|
||||
|
||||
validate_admin_request(&req.headers, &cred, owner, false, vec![Action::AdminAction(AdminAction::ExportIAMAction)])
|
||||
.await?;
|
||||
validate_admin_request(
|
||||
&req.headers,
|
||||
&cred,
|
||||
owner,
|
||||
false,
|
||||
vec![Action::AdminAction(AdminAction::ExportIAMAction)],
|
||||
req.extensions.get::<RemoteAddr>().map(|a| a.0),
|
||||
)
|
||||
.await?;
|
||||
|
||||
let mut input = req.input;
|
||||
let body = match input.store_all_limited(MAX_IAM_IMPORT_SIZE).await {
|
||||
|
||||
@@ -14,11 +14,10 @@
|
||||
|
||||
use http::HeaderMap;
|
||||
use http::Uri;
|
||||
use rustfs_ecstore::global::get_global_action_cred;
|
||||
use rustfs_credentials::{Credentials, get_global_action_cred};
|
||||
use rustfs_iam::error::Error as IamError;
|
||||
use rustfs_iam::sys::SESSION_POLICY_NAME;
|
||||
use rustfs_iam::sys::get_claims_from_token_with_secret;
|
||||
use rustfs_policy::auth;
|
||||
use rustfs_utils::http::ip::get_source_ip_raw;
|
||||
use s3s::S3Error;
|
||||
use s3s::S3ErrorCode;
|
||||
@@ -129,7 +128,7 @@ impl S3Auth for IAMAuth {
|
||||
}
|
||||
|
||||
// check_key_valid checks the key is valid or not. return the user's credentials and if the user is the owner.
|
||||
pub async fn check_key_valid(session_token: &str, access_key: &str) -> S3Result<(auth::Credentials, bool)> {
|
||||
pub async fn check_key_valid(session_token: &str, access_key: &str) -> S3Result<(Credentials, bool)> {
|
||||
let Some(mut cred) = get_global_action_cred() else {
|
||||
return Err(S3Error::with_message(
|
||||
S3ErrorCode::InternalError,
|
||||
@@ -187,7 +186,7 @@ pub async fn check_key_valid(session_token: &str, access_key: &str) -> S3Result<
|
||||
Ok((cred, owner))
|
||||
}
|
||||
|
||||
pub fn check_claims_from_token(token: &str, cred: &auth::Credentials) -> S3Result<HashMap<String, Value>> {
|
||||
pub fn check_claims_from_token(token: &str, cred: &Credentials) -> S3Result<HashMap<String, Value>> {
|
||||
if !token.is_empty() && cred.access_key.is_empty() {
|
||||
return Err(s3_error!(InvalidRequest, "no access key"));
|
||||
}
|
||||
@@ -235,11 +234,24 @@ pub fn get_session_token<'a>(uri: &'a Uri, hds: &'a HeaderMap) -> Option<&'a str
|
||||
.or_else(|| get_query_param(uri.query().unwrap_or_default(), "x-amz-security-token"))
|
||||
}
|
||||
|
||||
/// Get condition values for policy evaluation
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `header` - HTTP headers of the request
|
||||
/// * `cred` - User credentials
|
||||
/// * `version_id` - Optional version ID of the object
|
||||
/// * `region` - Optional region/location constraint
|
||||
/// * `remote_addr` - Optional remote address of the connection
|
||||
///
|
||||
/// # Returns
|
||||
/// * `HashMap<String, Vec<String>>` - Condition values for policy evaluation
|
||||
///
|
||||
pub fn get_condition_values(
|
||||
header: &HeaderMap,
|
||||
cred: &auth::Credentials,
|
||||
cred: &Credentials,
|
||||
version_id: Option<&str>,
|
||||
region: Option<&str>,
|
||||
remote_addr: Option<std::net::SocketAddr>,
|
||||
) -> HashMap<String, Vec<String>> {
|
||||
let username = if cred.is_temp() || cred.is_service_account() {
|
||||
cred.parent_user.clone()
|
||||
@@ -287,12 +299,7 @@ pub fn get_condition_values(
|
||||
.unwrap_or(false);
|
||||
|
||||
// Get remote address from header or use default
|
||||
let remote_addr = header
|
||||
.get("x-forwarded-for")
|
||||
.and_then(|v| v.to_str().ok())
|
||||
.and_then(|s| s.split(',').next())
|
||||
.or_else(|| header.get("x-real-ip").and_then(|v| v.to_str().ok()))
|
||||
.unwrap_or("127.0.0.1");
|
||||
let remote_addr_s = remote_addr.map(|a| a.ip().to_string()).unwrap_or_default();
|
||||
|
||||
let mut args = HashMap::new();
|
||||
|
||||
@@ -300,7 +307,7 @@ pub fn get_condition_values(
|
||||
args.insert("CurrentTime".to_owned(), vec![curr_time.format(&Rfc3339).unwrap_or_default()]);
|
||||
args.insert("EpochTime".to_owned(), vec![epoch_time.to_string()]);
|
||||
args.insert("SecureTransport".to_owned(), vec![is_tls.to_string()]);
|
||||
args.insert("SourceIp".to_owned(), vec![get_source_ip_raw(header, remote_addr)]);
|
||||
args.insert("SourceIp".to_owned(), vec![get_source_ip_raw(header, &remote_addr_s)]);
|
||||
|
||||
// Add user agent and referer
|
||||
if let Some(user_agent) = header.get("user-agent") {
|
||||
@@ -403,7 +410,14 @@ pub fn get_condition_values(
|
||||
args
|
||||
}
|
||||
|
||||
// Get request authentication type
|
||||
/// Get request authentication type
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `header` - HTTP headers of the request
|
||||
///
|
||||
/// # Returns
|
||||
/// * `AuthType` - The determined authentication type
|
||||
///
|
||||
pub fn get_request_auth_type(header: &HeaderMap) -> AuthType {
|
||||
if is_request_signature_v2(header) {
|
||||
AuthType::SignedV2
|
||||
@@ -432,7 +446,14 @@ pub fn get_request_auth_type(header: &HeaderMap) -> AuthType {
|
||||
}
|
||||
}
|
||||
|
||||
// Helper function to determine auth type and signature version
|
||||
/// Helper function to determine auth type and signature version
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `header` - HTTP headers of the request
|
||||
///
|
||||
/// # Returns
|
||||
/// * `(String, String)` - Tuple of auth type and signature version
|
||||
///
|
||||
fn determine_auth_type_and_version(header: &HeaderMap) -> (String, String) {
|
||||
match get_request_auth_type(header) {
|
||||
AuthType::JWT => ("JWT".to_string(), String::new()),
|
||||
@@ -450,7 +471,13 @@ fn determine_auth_type_and_version(header: &HeaderMap) -> (String, String) {
|
||||
}
|
||||
}
|
||||
|
||||
// Verify if request has JWT
|
||||
/// Verify if request has JWT
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `header` - HTTP headers of the request
|
||||
///
|
||||
/// # Returns
|
||||
/// * `bool` - True if request has JWT, false otherwise
|
||||
fn is_request_jwt(header: &HeaderMap) -> bool {
|
||||
if let Some(auth) = header.get("authorization") {
|
||||
if let Ok(auth_str) = auth.to_str() {
|
||||
@@ -460,7 +487,13 @@ fn is_request_jwt(header: &HeaderMap) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
// Verify if request has AWS Signature Version '4'
|
||||
/// Verify if request has AWS Signature Version '4'
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `header` - HTTP headers of the request
|
||||
///
|
||||
/// # Returns
|
||||
/// * `bool` - True if request has AWS Signature Version '4', false otherwise
|
||||
fn is_request_signature_v4(header: &HeaderMap) -> bool {
|
||||
if let Some(auth) = header.get("authorization") {
|
||||
if let Ok(auth_str) = auth.to_str() {
|
||||
@@ -470,7 +503,13 @@ fn is_request_signature_v4(header: &HeaderMap) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
// Verify if request has AWS Signature Version '2'
|
||||
/// Verify if request has AWS Signature Version '2'
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `header` - HTTP headers of the request
|
||||
///
|
||||
/// # Returns
|
||||
/// * `bool` - True if request has AWS Signature Version '2', false otherwise
|
||||
fn is_request_signature_v2(header: &HeaderMap) -> bool {
|
||||
if let Some(auth) = header.get("authorization") {
|
||||
if let Ok(auth_str) = auth.to_str() {
|
||||
@@ -480,7 +519,13 @@ fn is_request_signature_v2(header: &HeaderMap) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
// Verify if request has AWS PreSign Version '4'
|
||||
/// Verify if request has AWS PreSign Version '4'
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `header` - HTTP headers of the request
|
||||
///
|
||||
/// # Returns
|
||||
/// * `bool` - True if request has AWS PreSign Version '4', false otherwise
|
||||
pub(crate) fn is_request_presigned_signature_v4(header: &HeaderMap) -> bool {
|
||||
if let Some(credential) = header.get(AMZ_CREDENTIAL) {
|
||||
return !credential.to_str().unwrap_or("").is_empty();
|
||||
@@ -488,7 +533,13 @@ pub(crate) fn is_request_presigned_signature_v4(header: &HeaderMap) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
// Verify request has AWS PreSign Version '2'
|
||||
/// Verify request has AWS PreSign Version '2'
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `header` - HTTP headers of the request
|
||||
///
|
||||
/// # Returns
|
||||
/// * `bool` - True if request has AWS PreSign Version '2', false otherwise
|
||||
fn is_request_presigned_signature_v2(header: &HeaderMap) -> bool {
|
||||
if let Some(access_key) = header.get(AMZ_ACCESS_KEY_ID) {
|
||||
return !access_key.to_str().unwrap_or("").is_empty();
|
||||
@@ -496,7 +547,13 @@ fn is_request_presigned_signature_v2(header: &HeaderMap) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
// Verify if request has AWS Post policy Signature Version '4'
|
||||
/// Verify if request has AWS Post policy Signature Version '4'
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `header` - HTTP headers of the request
|
||||
///
|
||||
/// # Returns
|
||||
/// * `bool` - True if request has AWS Post policy Signature Version '4', false otherwise
|
||||
fn is_request_post_policy_signature_v4(header: &HeaderMap) -> bool {
|
||||
if let Some(content_type) = header.get("content-type") {
|
||||
if let Ok(ct) = content_type.to_str() {
|
||||
@@ -506,7 +563,7 @@ fn is_request_post_policy_signature_v4(header: &HeaderMap) -> bool {
|
||||
false
|
||||
}
|
||||
|
||||
// Verify if the request has AWS Streaming Signature Version '4'
|
||||
/// Verify if the request has AWS Streaming Signature Version '4'
|
||||
fn is_request_sign_streaming_v4(header: &HeaderMap) -> bool {
|
||||
if let Some(content_sha256) = header.get("x-amz-content-sha256") {
|
||||
if let Ok(sha256_str) = content_sha256.to_str() {
|
||||
@@ -567,7 +624,7 @@ pub fn get_query_param<'a>(query: &'a str, param_name: &str) -> Option<&'a str>
|
||||
mod tests {
|
||||
use super::*;
|
||||
use http::{HeaderMap, HeaderValue, Uri};
|
||||
use rustfs_policy::auth::Credentials;
|
||||
use rustfs_credentials::Credentials;
|
||||
use s3s::auth::SecretKey;
|
||||
use serde_json::json;
|
||||
use std::collections::HashMap;
|
||||
@@ -605,7 +662,7 @@ mod tests {
|
||||
|
||||
fn create_service_account_credentials() -> Credentials {
|
||||
let mut claims = HashMap::new();
|
||||
claims.insert("sa-policy".to_string(), json!("test-policy"));
|
||||
claims.insert(rustfs_credentials::IAM_POLICY_CLAIM_NAME_SA.to_string(), json!("test-policy"));
|
||||
|
||||
Credentials {
|
||||
access_key: "service-access-key".to_string(),
|
||||
@@ -788,7 +845,7 @@ mod tests {
|
||||
let cred = create_test_credentials();
|
||||
let headers = HeaderMap::new();
|
||||
|
||||
let conditions = get_condition_values(&headers, &cred, None, None);
|
||||
let conditions = get_condition_values(&headers, &cred, None, None, None);
|
||||
|
||||
assert_eq!(conditions.get("userid"), Some(&vec!["test-access-key".to_string()]));
|
||||
assert_eq!(conditions.get("username"), Some(&vec!["test-access-key".to_string()]));
|
||||
@@ -800,7 +857,7 @@ mod tests {
|
||||
let cred = create_temp_credentials();
|
||||
let headers = HeaderMap::new();
|
||||
|
||||
let conditions = get_condition_values(&headers, &cred, None, None);
|
||||
let conditions = get_condition_values(&headers, &cred, None, None, None);
|
||||
|
||||
assert_eq!(conditions.get("userid"), Some(&vec!["parent-user".to_string()]));
|
||||
assert_eq!(conditions.get("username"), Some(&vec!["parent-user".to_string()]));
|
||||
@@ -812,7 +869,7 @@ mod tests {
|
||||
let cred = create_service_account_credentials();
|
||||
let headers = HeaderMap::new();
|
||||
|
||||
let conditions = get_condition_values(&headers, &cred, None, None);
|
||||
let conditions = get_condition_values(&headers, &cred, None, None, None);
|
||||
|
||||
assert_eq!(conditions.get("userid"), Some(&vec!["service-parent".to_string()]));
|
||||
assert_eq!(conditions.get("username"), Some(&vec!["service-parent".to_string()]));
|
||||
@@ -827,7 +884,7 @@ mod tests {
|
||||
headers.insert("x-amz-object-lock-mode", HeaderValue::from_static("GOVERNANCE"));
|
||||
headers.insert("x-amz-object-lock-retain-until-date", HeaderValue::from_static("2024-12-31T23:59:59Z"));
|
||||
|
||||
let conditions = get_condition_values(&headers, &cred, None, None);
|
||||
let conditions = get_condition_values(&headers, &cred, None, None, None);
|
||||
|
||||
assert_eq!(conditions.get("object-lock-mode"), Some(&vec!["GOVERNANCE".to_string()]));
|
||||
assert_eq!(
|
||||
@@ -842,7 +899,7 @@ mod tests {
|
||||
let mut headers = HeaderMap::new();
|
||||
headers.insert("x-amz-signature-age", HeaderValue::from_static("300"));
|
||||
|
||||
let conditions = get_condition_values(&headers, &cred, None, None);
|
||||
let conditions = get_condition_values(&headers, &cred, None, None, None);
|
||||
|
||||
assert_eq!(conditions.get("signatureAge"), Some(&vec!["300".to_string()]));
|
||||
// Verify the header is removed after processing
|
||||
@@ -859,7 +916,7 @@ mod tests {
|
||||
|
||||
let headers = HeaderMap::new();
|
||||
|
||||
let conditions = get_condition_values(&headers, &cred, None, None);
|
||||
let conditions = get_condition_values(&headers, &cred, None, None, None);
|
||||
|
||||
assert_eq!(conditions.get("username"), Some(&vec!["ldap-user".to_string()]));
|
||||
assert_eq!(conditions.get("groups"), Some(&vec!["group1".to_string(), "group2".to_string()]));
|
||||
@@ -872,7 +929,7 @@ mod tests {
|
||||
|
||||
let headers = HeaderMap::new();
|
||||
|
||||
let conditions = get_condition_values(&headers, &cred, None, None);
|
||||
let conditions = get_condition_values(&headers, &cred, None, None, None);
|
||||
|
||||
assert_eq!(
|
||||
conditions.get("groups"),
|
||||
@@ -1148,4 +1205,159 @@ mod tests {
|
||||
assert!(constant_time_eq(key1, key2));
|
||||
assert!(!constant_time_eq(key1, key3));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_get_condition_values_source_ip() {
|
||||
let mut headers = HeaderMap::new();
|
||||
let cred = Credentials::default();
|
||||
|
||||
// Case 1: No headers, no remote addr -> empty string
|
||||
let conditions = get_condition_values(&headers, &cred, None, None, None);
|
||||
assert_eq!(conditions.get("SourceIp").unwrap()[0], "");
|
||||
|
||||
// Case 2: No headers, with remote addr -> remote addr
|
||||
let remote_addr: std::net::SocketAddr = "192.168.0.10:12345".parse().unwrap();
|
||||
let conditions = get_condition_values(&headers, &cred, None, None, Some(remote_addr));
|
||||
assert_eq!(conditions.get("SourceIp").unwrap()[0], "192.168.0.10");
|
||||
|
||||
// Case 3: X-Forwarded-For present -> XFF (takes precedence over remote_addr)
|
||||
headers.insert("x-forwarded-for", HeaderValue::from_static("10.0.0.1"));
|
||||
let conditions = get_condition_values(&headers, &cred, None, None, Some(remote_addr));
|
||||
assert_eq!(conditions.get("SourceIp").unwrap()[0], "10.0.0.1");
|
||||
|
||||
// Case 4: X-Forwarded-For with multiple IPs -> First IP
|
||||
headers.insert("x-forwarded-for", HeaderValue::from_static("10.0.0.3, 10.0.0.4"));
|
||||
let conditions = get_condition_values(&headers, &cred, None, None, Some(remote_addr));
|
||||
assert_eq!(conditions.get("SourceIp").unwrap()[0], "10.0.0.3");
|
||||
|
||||
// Case 5: X-Real-IP present (XFF removed) -> X-Real-IP
|
||||
headers.remove("x-forwarded-for");
|
||||
headers.insert("x-real-ip", HeaderValue::from_static("10.0.0.2"));
|
||||
let conditions = get_condition_values(&headers, &cred, None, None, Some(remote_addr));
|
||||
assert_eq!(conditions.get("SourceIp").unwrap()[0], "10.0.0.2");
|
||||
|
||||
// Case 6: Forwarded header present (X-Real-IP removed) -> Forwarded
|
||||
headers.remove("x-real-ip");
|
||||
headers.insert("forwarded", HeaderValue::from_static("for=10.0.0.5;proto=http"));
|
||||
let conditions = get_condition_values(&headers, &cred, None, None, Some(remote_addr));
|
||||
assert_eq!(conditions.get("SourceIp").unwrap()[0], "10.0.0.5");
|
||||
|
||||
// Case 7: Forwarded header with quotes and multiple values
|
||||
headers.insert("forwarded", HeaderValue::from_static("for=\"10.0.0.6\", for=10.0.0.7"));
|
||||
let conditions = get_condition_values(&headers, &cred, None, None, Some(remote_addr));
|
||||
assert_eq!(conditions.get("SourceIp").unwrap()[0], "10.0.0.6");
|
||||
|
||||
// Case 8: IPv6 Remote Addr
|
||||
let remote_addr_v6: std::net::SocketAddr = "[2001:db8::1]:8080".parse().unwrap();
|
||||
headers.clear();
|
||||
let conditions = get_condition_values(&headers, &cred, None, None, Some(remote_addr_v6));
|
||||
assert_eq!(conditions.get("SourceIp").unwrap()[0], "2001:db8::1");
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests_policy {
|
||||
use rustfs_policy::policy::action::{Action, S3Action};
|
||||
use rustfs_policy::policy::{Args, BucketPolicy, BucketPolicyArgs, Policy};
|
||||
use std::collections::HashMap;
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_iam_policy_source_ip() {
|
||||
let policy_json = r#"{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Action": ["s3:GetObject"],
|
||||
"Resource": ["arn:aws:s3:::mybucket/*"],
|
||||
"Condition": {
|
||||
"IpAddress": {
|
||||
"aws:SourceIp": "192.168.1.0/24"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}"#;
|
||||
|
||||
let policy: Policy = serde_json::from_str(policy_json).expect("Failed to parse IAM policy");
|
||||
|
||||
// Case 1: Matching IP
|
||||
let mut conditions = HashMap::new();
|
||||
conditions.insert("SourceIp".to_string(), vec!["192.168.1.10".to_string()]);
|
||||
|
||||
let claims = HashMap::new();
|
||||
let args = Args {
|
||||
account: "test-account",
|
||||
groups: &None,
|
||||
action: Action::S3Action(S3Action::GetObjectAction),
|
||||
bucket: "mybucket",
|
||||
conditions: &conditions,
|
||||
is_owner: false,
|
||||
object: "myobject",
|
||||
claims: &claims,
|
||||
deny_only: false,
|
||||
};
|
||||
|
||||
assert!(policy.is_allowed(&args).await, "IAM Policy should allow matching IP");
|
||||
|
||||
// Case 2: Non-matching IP
|
||||
let mut conditions_fail = HashMap::new();
|
||||
conditions_fail.insert("SourceIp".to_string(), vec!["10.0.0.1".to_string()]);
|
||||
|
||||
let args_fail = Args {
|
||||
conditions: &conditions_fail,
|
||||
..args
|
||||
};
|
||||
|
||||
assert!(!policy.is_allowed(&args_fail).await, "IAM Policy should deny non-matching IP");
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_bucket_policy_source_ip() {
|
||||
let policy_json = r#"{
|
||||
"Version": "2012-10-17",
|
||||
"Statement": [
|
||||
{
|
||||
"Effect": "Allow",
|
||||
"Principal": {"AWS": ["*"]},
|
||||
"Action": ["s3:GetObject"],
|
||||
"Resource": ["arn:aws:s3:::mybucket/*"],
|
||||
"Condition": {
|
||||
"IpAddress": {
|
||||
"aws:SourceIp": "192.168.1.0/24"
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
}"#;
|
||||
|
||||
let policy: BucketPolicy = serde_json::from_str(policy_json).expect("Failed to parse Bucket policy");
|
||||
|
||||
// Case 1: Matching IP
|
||||
let mut conditions = HashMap::new();
|
||||
conditions.insert("SourceIp".to_string(), vec!["192.168.1.10".to_string()]);
|
||||
|
||||
let args = BucketPolicyArgs {
|
||||
account: "test-account",
|
||||
groups: &None,
|
||||
action: Action::S3Action(S3Action::GetObjectAction),
|
||||
bucket: "mybucket",
|
||||
conditions: &conditions,
|
||||
is_owner: false,
|
||||
object: "myobject",
|
||||
};
|
||||
|
||||
assert!(policy.is_allowed(&args).await, "Bucket Policy should allow matching IP");
|
||||
|
||||
// Case 2: Non-matching IP
|
||||
let mut conditions_fail = HashMap::new();
|
||||
conditions_fail.insert("SourceIp".to_string(), vec!["10.0.0.1".to_string()]);
|
||||
|
||||
let args_fail = BucketPolicyArgs {
|
||||
conditions: &conditions_fail,
|
||||
..args
|
||||
};
|
||||
|
||||
assert!(!policy.is_allowed(&args_fail).await, "Bucket Policy should deny non-matching IP");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -73,11 +73,11 @@ pub struct Opt {
|
||||
pub server_domains: Vec<String>,
|
||||
|
||||
/// Access key used for authentication.
|
||||
#[arg(long, default_value_t = rustfs_config::DEFAULT_ACCESS_KEY.to_string(), env = "RUSTFS_ACCESS_KEY")]
|
||||
#[arg(long, default_value_t = rustfs_credentials::DEFAULT_ACCESS_KEY.to_string(), env = "RUSTFS_ACCESS_KEY")]
|
||||
pub access_key: String,
|
||||
|
||||
/// Secret key used for authentication.
|
||||
#[arg(long, default_value_t = rustfs_config::DEFAULT_SECRET_KEY.to_string(), env = "RUSTFS_SECRET_KEY")]
|
||||
#[arg(long, default_value_t = rustfs_credentials::DEFAULT_SECRET_KEY.to_string(), env = "RUSTFS_SECRET_KEY")]
|
||||
pub secret_key: String,
|
||||
|
||||
/// Enable console server
|
||||
|
||||
@@ -39,6 +39,7 @@ use rustfs_ahm::{
|
||||
scanner::data_scanner::ScannerConfig, shutdown_ahm_services,
|
||||
};
|
||||
use rustfs_common::{GlobalReadiness, SystemStage, set_global_addr};
|
||||
use rustfs_credentials::init_global_action_credentials;
|
||||
use rustfs_ecstore::{
|
||||
StorageAPI,
|
||||
bucket::metadata_sys::init_bucket_metadata_sys,
|
||||
@@ -94,7 +95,9 @@ async fn async_main() -> Result<()> {
|
||||
|
||||
// Store in global storage
|
||||
match set_global_guard(guard).map_err(Error::other) {
|
||||
Ok(_) => (),
|
||||
Ok(_) => {
|
||||
info!(target: "rustfs::main", "Global observability guard set successfully.");
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to set global observability guard: {}", e);
|
||||
return Err(e);
|
||||
@@ -109,7 +112,15 @@ async fn async_main() -> Result<()> {
|
||||
|
||||
// Initialize TLS if a certificate path is provided
|
||||
if let Some(tls_path) = &opt.tls_path {
|
||||
init_cert(tls_path).await
|
||||
match init_cert(tls_path).await {
|
||||
Ok(_) => {
|
||||
info!(target: "rustfs::main", "TLS initialized successfully with certs from {}", tls_path);
|
||||
}
|
||||
Err(e) => {
|
||||
error!("Failed to initialize TLS from {}: {}", tls_path, e);
|
||||
return Err(Error::other(e));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Run parameters
|
||||
@@ -147,7 +158,16 @@ async fn run(opt: config::Opt) -> Result<()> {
|
||||
);
|
||||
|
||||
// Set up AK and SK
|
||||
rustfs_ecstore::global::init_global_action_credentials(Some(opt.access_key.clone()), Some(opt.secret_key.clone()));
|
||||
match init_global_action_credentials(Some(opt.access_key.clone()), Some(opt.secret_key.clone())) {
|
||||
Ok(_) => {
|
||||
info!(target: "rustfs::main::run", "Global action credentials initialized successfully.");
|
||||
}
|
||||
Err(e) => {
|
||||
let msg = format!("init_global_action_credentials failed: {e:?}");
|
||||
error!("{msg}");
|
||||
return Err(Error::other(msg));
|
||||
}
|
||||
};
|
||||
|
||||
set_global_rustfs_port(server_port);
|
||||
|
||||
|
||||
@@ -12,34 +12,129 @@
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
use rustfs_common::set_global_root_cert;
|
||||
use rustfs_common::{MtlsIdentityPem, set_global_mtls_identity, set_global_root_cert};
|
||||
use rustfs_config::{RUSTFS_CA_CERT, RUSTFS_PUBLIC_CERT, RUSTFS_TLS_CERT};
|
||||
use rustls::pki_types::{CertificateDer, PrivateKeyDer};
|
||||
use std::path::{Path, PathBuf};
|
||||
use tracing::{debug, info};
|
||||
|
||||
/// Initialize TLS certificates for inter-node communication.
|
||||
/// This function attempts to load certificates from the specified `tls_path`.
|
||||
/// It looks for `rustfs_cert.pem`, `public.crt`, and `ca.crt` files.
|
||||
/// Additionally, it tries to load system root certificates from common locations
|
||||
/// to ensure trust for public CAs when mixing self-signed and public certificates.
|
||||
/// If any certificates are found, they are set as the global root certificates.
|
||||
pub(crate) async fn init_cert(tls_path: &str) {
|
||||
#[derive(Debug)]
|
||||
pub enum RustFSError {
|
||||
Cert(String),
|
||||
}
|
||||
|
||||
impl std::fmt::Display for RustFSError {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
match self {
|
||||
RustFSError::Cert(msg) => write!(f, "Certificate error: {}", msg),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl std::error::Error for RustFSError {}
|
||||
|
||||
/// Parse PEM-encoded certificates into DER format.
|
||||
/// Returns a vector of DER-encoded certificates.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `pem` - A byte slice containing the PEM-encoded certificates.
|
||||
///
|
||||
/// # Returns
|
||||
/// A vector of `CertificateDer` containing the DER-encoded certificates.
|
||||
///
|
||||
/// # Errors
|
||||
/// Returns `RustFSError` if parsing fails.
|
||||
fn parse_pem_certs(pem: &[u8]) -> Result<Vec<CertificateDer<'static>>, RustFSError> {
|
||||
let mut out = Vec::new();
|
||||
let mut reader = std::io::Cursor::new(pem);
|
||||
for item in rustls_pemfile::certs(&mut reader) {
|
||||
let c = item.map_err(|e| RustFSError::Cert(format!("parse cert pem: {e}")))?;
|
||||
out.push(c);
|
||||
}
|
||||
Ok(out)
|
||||
}
|
||||
|
||||
/// Parse a PEM-encoded private key into DER format.
|
||||
/// Supports PKCS#8 and RSA private keys.
|
||||
///
|
||||
/// # Arguments
|
||||
/// * `pem` - A byte slice containing the PEM-encoded private key.
|
||||
///
|
||||
/// # Returns
|
||||
/// A `PrivateKeyDer` containing the DER-encoded private key.
|
||||
///
|
||||
/// # Errors
|
||||
/// Returns `RustFSError` if parsing fails or no key is found.
|
||||
fn parse_pem_private_key(pem: &[u8]) -> Result<PrivateKeyDer<'static>, RustFSError> {
|
||||
let mut reader = std::io::Cursor::new(pem);
|
||||
let key = rustls_pemfile::private_key(&mut reader).map_err(|e| RustFSError::Cert(format!("parse private key pem: {e}")))?;
|
||||
key.ok_or_else(|| RustFSError::Cert("no private key found in PEM".into()))
|
||||
}
|
||||
|
||||
/// Helper function to read a file and return its contents.
|
||||
/// Returns the file contents as a vector of bytes.
|
||||
/// # Errors
|
||||
/// Returns `RustFSError` if reading fails.
|
||||
async fn read_file(path: &PathBuf, desc: &str) -> Result<Vec<u8>, RustFSError> {
|
||||
tokio::fs::read(path)
|
||||
.await
|
||||
.map_err(|e| RustFSError::Cert(format!("read {} {:?}: {e}", desc, path)))
|
||||
}
|
||||
|
||||
/// Initialize TLS material for both server and outbound client connections.
|
||||
///
|
||||
/// Loads roots from:
|
||||
/// - `${RUSTFS_TLS_PATH}/ca.crt` (or `tls/ca.crt`)
|
||||
/// - `${RUSTFS_TLS_PATH}/public.crt` (optional additional root bundle)
|
||||
/// - system roots if `RUSTFS_TRUST_SYSTEM_CA=true` (default: false)
|
||||
/// - if `RUSTFS_TRUST_LEAF_CERT_AS_CA=true`, also loads leaf cert(s) from
|
||||
/// `${RUSTFS_TLS_PATH}/rustfs_cert.pem` into the root store.
|
||||
///
|
||||
/// Loads mTLS client identity (optional) from:
|
||||
/// - `${RUSTFS_TLS_PATH}/client_cert.pem`
|
||||
/// - `${RUSTFS_TLS_PATH}/client_key.pem`
|
||||
///
|
||||
/// Environment overrides:
|
||||
/// - RUSTFS_TLS_PATH
|
||||
/// - RUSTFS_MTLS_CLIENT_CERT
|
||||
/// - RUSTFS_MTLS_CLIENT_KEY
|
||||
pub(crate) async fn init_cert(tls_path: &str) -> Result<(), RustFSError> {
|
||||
if tls_path.is_empty() {
|
||||
info!("No TLS path configured; skipping certificate initialization");
|
||||
return Ok(());
|
||||
}
|
||||
let tls_dir = PathBuf::from(tls_path);
|
||||
|
||||
// Load root certificates
|
||||
load_root_certs(&tls_dir).await?;
|
||||
|
||||
// Load optional mTLS identity
|
||||
load_mtls_identity(&tls_dir).await?;
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Load root certificates from various sources.
|
||||
async fn load_root_certs(tls_dir: &Path) -> Result<(), RustFSError> {
|
||||
let mut cert_data = Vec::new();
|
||||
|
||||
// Try rustfs_cert.pem (custom cert name)
|
||||
walk_dir(std::path::PathBuf::from(tls_path), RUSTFS_TLS_CERT, &mut cert_data).await;
|
||||
let trust_leaf_as_ca =
|
||||
rustfs_utils::get_env_bool(rustfs_config::ENV_TRUST_LEAF_CERT_AS_CA, rustfs_config::DEFAULT_TRUST_LEAF_CERT_AS_CA);
|
||||
if trust_leaf_as_ca {
|
||||
walk_dir(tls_dir.to_path_buf(), RUSTFS_TLS_CERT, &mut cert_data).await;
|
||||
info!("Loaded leaf certificate(s) as root CA as per RUSTFS_TRUST_LEAF_CERT_AS_CA");
|
||||
}
|
||||
|
||||
// Try public.crt (common CA name)
|
||||
let public_cert_path = std::path::Path::new(tls_path).join(RUSTFS_PUBLIC_CERT);
|
||||
// Try public.crt and ca.crt
|
||||
let public_cert_path = tls_dir.join(RUSTFS_PUBLIC_CERT);
|
||||
load_cert_file(public_cert_path.to_str().unwrap_or_default(), &mut cert_data, "CA certificate").await;
|
||||
|
||||
// Try ca.crt (common CA name)
|
||||
let ca_cert_path = std::path::Path::new(tls_path).join(RUSTFS_CA_CERT);
|
||||
let ca_cert_path = tls_dir.join(RUSTFS_CA_CERT);
|
||||
load_cert_file(ca_cert_path.to_str().unwrap_or_default(), &mut cert_data, "CA certificate").await;
|
||||
|
||||
// Load system root certificates if enabled
|
||||
let trust_system_ca = rustfs_utils::get_env_bool(rustfs_config::ENV_TRUST_SYSTEM_CA, rustfs_config::DEFAULT_TRUST_SYSTEM_CA);
|
||||
if !trust_system_ca {
|
||||
// Attempt to load system root certificates to maintain trust for public CAs
|
||||
// This is important when mixing self-signed internal certs with public external certs
|
||||
if trust_system_ca {
|
||||
let system_ca_paths = [
|
||||
"/etc/ssl/certs/ca-certificates.crt", // Debian/Ubuntu/Alpine
|
||||
"/etc/pki/tls/certs/ca-bundle.crt", // Fedora/RHEL/CentOS
|
||||
@@ -57,7 +152,7 @@ pub(crate) async fn init_cert(tls_path: &str) {
|
||||
if load_cert_file(path, &mut cert_data, "system root certificates").await {
|
||||
system_cert_loaded = true;
|
||||
info!("Loaded system root certificates from {}", path);
|
||||
break; // Stop after finding the first valid bundle
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
@@ -67,10 +162,51 @@ pub(crate) async fn init_cert(tls_path: &str) {
|
||||
} else {
|
||||
info!("Loading system root certificates disabled via RUSTFS_TRUST_SYSTEM_CA");
|
||||
}
|
||||
|
||||
if !cert_data.is_empty() {
|
||||
set_global_root_cert(cert_data).await;
|
||||
info!("Configured custom root certificates for inter-node communication");
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Load optional mTLS identity.
|
||||
async fn load_mtls_identity(tls_dir: &Path) -> Result<(), RustFSError> {
|
||||
let client_cert_path = match rustfs_utils::get_env_opt_str(rustfs_config::ENV_MTLS_CLIENT_CERT) {
|
||||
Some(p) => PathBuf::from(p),
|
||||
None => tls_dir.join(rustfs_config::RUSTFS_CLIENT_CERT_FILENAME),
|
||||
};
|
||||
|
||||
let client_key_path = match rustfs_utils::get_env_opt_str(rustfs_config::ENV_MTLS_CLIENT_KEY) {
|
||||
Some(p) => PathBuf::from(p),
|
||||
None => tls_dir.join(rustfs_config::RUSTFS_CLIENT_KEY_FILENAME),
|
||||
};
|
||||
|
||||
if client_cert_path.exists() && client_key_path.exists() {
|
||||
let cert_bytes = read_file(&client_cert_path, "client cert").await?;
|
||||
let key_bytes = read_file(&client_key_path, "client key").await?;
|
||||
|
||||
// Validate parse-ability early; store as PEM bytes for tonic.
|
||||
parse_pem_certs(&cert_bytes)?;
|
||||
parse_pem_private_key(&key_bytes)?;
|
||||
|
||||
let identity_pem = MtlsIdentityPem {
|
||||
cert_pem: cert_bytes,
|
||||
key_pem: key_bytes,
|
||||
};
|
||||
|
||||
set_global_mtls_identity(Some(identity_pem)).await;
|
||||
info!("Loaded mTLS client identity cert={:?} key={:?}", client_cert_path, client_key_path);
|
||||
} else {
|
||||
set_global_mtls_identity(None).await;
|
||||
info!(
|
||||
"mTLS client identity not configured (missing {:?} and/or {:?}); proceeding with server-only TLS",
|
||||
client_cert_path, client_key_path
|
||||
);
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Helper function to load a certificate file and append to cert_data.
|
||||
@@ -114,7 +250,7 @@ async fn load_if_matches(entry: &tokio::fs::DirEntry, cert_name: &str, cert_data
|
||||
/// - `path`: The starting directory path to search for certificates.
|
||||
/// - `cert_name`: The name of the certificate file to look for.
|
||||
/// - `cert_data`: A mutable vector to append loaded certificate data.
|
||||
async fn walk_dir(path: std::path::PathBuf, cert_name: &str, cert_data: &mut Vec<u8>) {
|
||||
async fn walk_dir(path: PathBuf, cert_name: &str, cert_data: &mut Vec<u8>) {
|
||||
if let Ok(mut rd) = tokio::fs::read_dir(&path).await {
|
||||
while let Ok(Some(entry)) = rd.next_entry().await {
|
||||
if let Ok(ft) = entry.file_type().await {
|
||||
|
||||
@@ -17,7 +17,7 @@ use super::compress::{CompressionConfig, CompressionPredicate};
|
||||
use crate::admin;
|
||||
use crate::auth::IAMAuth;
|
||||
use crate::config;
|
||||
use crate::server::{ReadinessGateLayer, ServiceState, ServiceStateManager, hybrid::hybrid, layer::RedirectLayer};
|
||||
use crate::server::{ReadinessGateLayer, RemoteAddr, ServiceState, ServiceStateManager, hybrid::hybrid, layer::RedirectLayer};
|
||||
use crate::storage;
|
||||
use crate::storage::tonic_service::make_server;
|
||||
use bytes::Bytes;
|
||||
@@ -30,7 +30,7 @@ use hyper_util::{
|
||||
};
|
||||
use metrics::{counter, histogram};
|
||||
use rustfs_common::GlobalReadiness;
|
||||
use rustfs_config::{DEFAULT_ACCESS_KEY, DEFAULT_SECRET_KEY, MI_B, RUSTFS_TLS_CERT, RUSTFS_TLS_KEY};
|
||||
use rustfs_config::{MI_B, RUSTFS_TLS_CERT, RUSTFS_TLS_KEY};
|
||||
use rustfs_protos::proto_gen::node_service::node_service_server::NodeServiceServer;
|
||||
use rustfs_utils::net::parse_and_resolve_address;
|
||||
use rustls::ServerConfig;
|
||||
@@ -44,6 +44,7 @@ use tokio::net::{TcpListener, TcpStream};
|
||||
use tokio_rustls::TlsAcceptor;
|
||||
use tonic::{Request, Status, metadata::MetadataValue};
|
||||
use tower::ServiceBuilder;
|
||||
use tower_http::add_extension::AddExtensionLayer;
|
||||
use tower_http::catch_panic::CatchPanicLayer;
|
||||
use tower_http::compression::CompressionLayer;
|
||||
use tower_http::cors::{AllowOrigin, Any, CorsLayer};
|
||||
@@ -212,10 +213,13 @@ pub async fn start_http_server(
|
||||
info!(target: "rustfs::main::startup","RustFS API: {api_endpoints} {localhost_endpoint}");
|
||||
println!("RustFS Http API: {api_endpoints} {localhost_endpoint}");
|
||||
println!("RustFS Start Time: {now_time}");
|
||||
if DEFAULT_ACCESS_KEY.eq(&opt.access_key) && DEFAULT_SECRET_KEY.eq(&opt.secret_key) {
|
||||
if rustfs_credentials::DEFAULT_ACCESS_KEY.eq(&opt.access_key)
|
||||
&& rustfs_credentials::DEFAULT_SECRET_KEY.eq(&opt.secret_key)
|
||||
{
|
||||
warn!(
|
||||
"Detected default credentials '{}:{}', we recommend that you change these values with 'RUSTFS_ACCESS_KEY' and 'RUSTFS_SECRET_KEY' environment variables",
|
||||
DEFAULT_ACCESS_KEY, DEFAULT_SECRET_KEY
|
||||
rustfs_credentials::DEFAULT_ACCESS_KEY,
|
||||
rustfs_credentials::DEFAULT_SECRET_KEY
|
||||
);
|
||||
}
|
||||
info!(target: "rustfs::main::startup","For more information, visit https://rustfs.com/docs/");
|
||||
@@ -427,11 +431,11 @@ async fn setup_tls_acceptor(tls_path: &str) -> Result<Option<TlsAcceptor>> {
|
||||
debug!("TLS path is not provided or does not exist, starting with HTTP");
|
||||
return Ok(None);
|
||||
}
|
||||
|
||||
debug!("Found TLS directory, checking for certificates");
|
||||
|
||||
// Make sure to use a modern encryption suite
|
||||
let _ = rustls::crypto::ring::default_provider().install_default();
|
||||
let mtls_verifier = rustfs_utils::build_webpki_client_verifier(tls_path)?;
|
||||
|
||||
// 1. Attempt to load all certificates in the directory (multi-certificate support, for SNI)
|
||||
if let Ok(cert_key_pairs) = rustfs_utils::load_all_certs_from_directory(tls_path) {
|
||||
@@ -442,9 +446,15 @@ async fn setup_tls_acceptor(tls_path: &str) -> Result<Option<TlsAcceptor>> {
|
||||
let resolver = rustfs_utils::create_multi_cert_resolver(cert_key_pairs)?;
|
||||
|
||||
// Configure the server to enable SNI support
|
||||
let mut server_config = ServerConfig::builder()
|
||||
.with_no_client_auth()
|
||||
.with_cert_resolver(Arc::new(resolver));
|
||||
let mut server_config = if let Some(verifier) = mtls_verifier.clone() {
|
||||
ServerConfig::builder()
|
||||
.with_client_cert_verifier(verifier)
|
||||
.with_cert_resolver(Arc::new(resolver))
|
||||
} else {
|
||||
ServerConfig::builder()
|
||||
.with_no_client_auth()
|
||||
.with_cert_resolver(Arc::new(resolver))
|
||||
};
|
||||
|
||||
// Configure ALPN protocol priority
|
||||
server_config.alpn_protocols = vec![b"h2".to_vec(), b"http/1.1".to_vec(), b"http/1.0".to_vec()];
|
||||
@@ -466,10 +476,17 @@ async fn setup_tls_acceptor(tls_path: &str) -> Result<Option<TlsAcceptor>> {
|
||||
let certs = rustfs_utils::load_certs(&cert_path).map_err(|e| rustfs_utils::certs_error(e.to_string()))?;
|
||||
let key = rustfs_utils::load_private_key(&key_path).map_err(|e| rustfs_utils::certs_error(e.to_string()))?;
|
||||
|
||||
let mut server_config = ServerConfig::builder()
|
||||
.with_no_client_auth()
|
||||
.with_single_cert(certs, key)
|
||||
.map_err(|e| rustfs_utils::certs_error(e.to_string()))?;
|
||||
let mut server_config = if let Some(verifier) = mtls_verifier {
|
||||
ServerConfig::builder()
|
||||
.with_client_cert_verifier(verifier)
|
||||
.with_single_cert(certs, key)
|
||||
.map_err(|e| rustfs_utils::certs_error(e.to_string()))?
|
||||
} else {
|
||||
ServerConfig::builder()
|
||||
.with_no_client_auth()
|
||||
.with_single_cert(certs, key)
|
||||
.map_err(|e| rustfs_utils::certs_error(e.to_string()))?
|
||||
};
|
||||
|
||||
// Configure ALPN protocol priority
|
||||
server_config.alpn_protocols = vec![b"h2".to_vec(), b"http/1.1".to_vec(), b"http/1.0".to_vec()];
|
||||
@@ -525,9 +542,21 @@ fn process_connection(
|
||||
let rpc_service = NodeServiceServer::with_interceptor(make_server(), check_auth);
|
||||
let service = hybrid(s3_service, rpc_service);
|
||||
|
||||
let remote_addr = match socket.peer_addr() {
|
||||
Ok(addr) => Some(RemoteAddr(addr)),
|
||||
Err(e) => {
|
||||
tracing::warn!(
|
||||
error = %e,
|
||||
"Failed to obtain peer address; policy evaluation may fall back to a default source IP"
|
||||
);
|
||||
None
|
||||
}
|
||||
};
|
||||
|
||||
let hybrid_service = ServiceBuilder::new()
|
||||
.layer(SetRequestIdLayer::x_request_id(MakeRequestUuid))
|
||||
.layer(CatchPanicLayer::new())
|
||||
.layer(AddExtensionLayer::new(remote_addr))
|
||||
// CRITICAL: Insert ReadinessGateLayer before business logic
|
||||
// This stops requests from hitting IAMAuth or Storage if they are not ready.
|
||||
.layer(ReadinessGateLayer::new(readiness))
|
||||
@@ -685,7 +714,12 @@ fn handle_connection_error(err: &(dyn std::error::Error + 'static)) {
|
||||
|
||||
#[allow(clippy::result_large_err)]
|
||||
fn check_auth(req: Request<()>) -> std::result::Result<Request<()>, Status> {
|
||||
let token: MetadataValue<_> = "rustfs rpc".parse().unwrap();
|
||||
let token_str = rustfs_credentials::get_grpc_token();
|
||||
|
||||
let token: MetadataValue<_> = token_str.parse().map_err(|e| {
|
||||
error!("Failed to parse RUSTFS_GRPC_AUTH_TOKEN into gRPC metadata value: {}", e);
|
||||
Status::internal("Invalid auth token configuration")
|
||||
})?;
|
||||
|
||||
match req.metadata().get("authorization") {
|
||||
Some(t) if token == t => Ok(req),
|
||||
|
||||
@@ -36,3 +36,6 @@ pub(crate) use service_state::ServiceState;
|
||||
pub(crate) use service_state::ServiceStateManager;
|
||||
pub(crate) use service_state::ShutdownSignal;
|
||||
pub(crate) use service_state::wait_for_shutdown;
|
||||
|
||||
#[derive(Clone, Copy, Debug)]
|
||||
pub struct RemoteAddr(pub std::net::SocketAddr);
|
||||
|
||||
@@ -15,9 +15,9 @@
|
||||
use super::ecfs::FS;
|
||||
use crate::auth::{check_key_valid, get_condition_values, get_session_token};
|
||||
use crate::license::license_check;
|
||||
use crate::server::RemoteAddr;
|
||||
use rustfs_ecstore::bucket::policy_sys::PolicySys;
|
||||
use rustfs_iam::error::Error as IamError;
|
||||
use rustfs_policy::auth;
|
||||
use rustfs_policy::policy::action::{Action, S3Action};
|
||||
use rustfs_policy::policy::{Args, BucketPolicyArgs};
|
||||
use s3s::access::{S3Access, S3AccessContext};
|
||||
@@ -27,7 +27,7 @@ use std::collections::HashMap;
|
||||
#[allow(dead_code)]
|
||||
#[derive(Default, Clone)]
|
||||
pub(crate) struct ReqInfo {
|
||||
pub cred: Option<auth::Credentials>,
|
||||
pub cred: Option<rustfs_credentials::Credentials>,
|
||||
pub is_owner: bool,
|
||||
pub bucket: Option<String>,
|
||||
pub object: Option<String>,
|
||||
@@ -37,6 +37,7 @@ pub(crate) struct ReqInfo {
|
||||
|
||||
/// Authorizes the request based on the action and credentials.
|
||||
pub async fn authorize_request<T>(req: &mut S3Request<T>, action: Action) -> S3Result<()> {
|
||||
let remote_addr = req.extensions.get::<RemoteAddr>().map(|a| a.0);
|
||||
let req_info = req.extensions.get_mut::<ReqInfo>().expect("ReqInfo not found");
|
||||
|
||||
if let Some(cred) = &req_info.cred {
|
||||
@@ -49,7 +50,7 @@ pub async fn authorize_request<T>(req: &mut S3Request<T>, action: Action) -> S3R
|
||||
|
||||
let default_claims = HashMap::new();
|
||||
let claims = cred.claims.as_ref().unwrap_or(&default_claims);
|
||||
let conditions = get_condition_values(&req.headers, cred, req_info.version_id.as_deref(), None);
|
||||
let conditions = get_condition_values(&req.headers, cred, req_info.version_id.as_deref(), None, remote_addr);
|
||||
|
||||
if action == Action::S3Action(S3Action::DeleteObjectAction)
|
||||
&& req_info.version_id.is_some()
|
||||
@@ -107,9 +108,10 @@ pub async fn authorize_request<T>(req: &mut S3Request<T>, action: Action) -> S3R
|
||||
} else {
|
||||
let conditions = get_condition_values(
|
||||
&req.headers,
|
||||
&auth::Credentials::default(),
|
||||
&rustfs_credentials::Credentials::default(),
|
||||
req_info.version_id.as_deref(),
|
||||
req.region.as_deref(),
|
||||
remote_addr,
|
||||
);
|
||||
|
||||
if action != Action::S3Action(S3Action::ListAllMyBucketsAction) {
|
||||
|
||||
@@ -251,6 +251,8 @@ struct IoLoadMetrics {
|
||||
recent_waits: Vec<Duration>,
|
||||
/// Maximum samples to keep in the window
|
||||
max_samples: usize,
|
||||
/// The earliest record index in the recent_waits vector
|
||||
earliest_index: usize,
|
||||
/// Total wait time observed (for averaging)
|
||||
total_wait_ns: AtomicU64,
|
||||
/// Total number of observations
|
||||
@@ -263,6 +265,7 @@ impl IoLoadMetrics {
|
||||
Self {
|
||||
recent_waits: Vec::with_capacity(max_samples),
|
||||
max_samples,
|
||||
earliest_index: 0,
|
||||
total_wait_ns: AtomicU64::new(0),
|
||||
observation_count: AtomicU64::new(0),
|
||||
}
|
||||
@@ -271,10 +274,12 @@ impl IoLoadMetrics {
|
||||
/// Record a new permit wait observation
|
||||
fn record(&mut self, wait: Duration) {
|
||||
// Add to recent waits (with eviction if full)
|
||||
if self.recent_waits.len() >= self.max_samples {
|
||||
self.recent_waits.remove(0);
|
||||
if self.recent_waits.len() < self.max_samples {
|
||||
self.recent_waits.push(wait);
|
||||
} else {
|
||||
self.recent_waits[self.earliest_index] = wait;
|
||||
self.earliest_index = (self.earliest_index + 1) % self.max_samples;
|
||||
}
|
||||
self.recent_waits.push(wait);
|
||||
|
||||
// Update totals for overall statistics
|
||||
self.total_wait_ns.fetch_add(wait.as_nanos() as u64, Ordering::Relaxed);
|
||||
@@ -1867,4 +1872,154 @@ mod tests {
|
||||
assert!(manager.is_cached("warm2").await);
|
||||
assert!(manager.is_cached("warm3").await);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_io_load_metrics_record_not_full() {
|
||||
let mut metrics = IoLoadMetrics::new(5);
|
||||
metrics.record(Duration::from_millis(10));
|
||||
metrics.record(Duration::from_millis(20));
|
||||
assert_eq!(metrics.recent_waits.len(), 2);
|
||||
assert_eq!(metrics.recent_waits[0], Duration::from_millis(10));
|
||||
assert_eq!(metrics.recent_waits[1], Duration::from_millis(20));
|
||||
assert_eq!(metrics.observation_count(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_io_load_metrics_record_full_and_circular() {
|
||||
let mut metrics = IoLoadMetrics::new(3);
|
||||
metrics.record(Duration::from_millis(10));
|
||||
metrics.record(Duration::from_millis(20));
|
||||
metrics.record(Duration::from_millis(30));
|
||||
assert_eq!(metrics.recent_waits.len(), 3);
|
||||
assert_eq!(metrics.earliest_index, 0);
|
||||
|
||||
// This should overwrite the first element (10ms)
|
||||
metrics.record(Duration::from_millis(40));
|
||||
assert_eq!(metrics.recent_waits.len(), 3);
|
||||
assert_eq!(metrics.recent_waits[0], Duration::from_millis(40));
|
||||
assert_eq!(metrics.recent_waits[1], Duration::from_millis(20));
|
||||
assert_eq!(metrics.recent_waits[2], Duration::from_millis(30));
|
||||
assert_eq!(metrics.earliest_index, 1);
|
||||
assert_eq!(metrics.observation_count(), 4);
|
||||
|
||||
// This should overwrite the second element (20ms)
|
||||
metrics.record(Duration::from_millis(50));
|
||||
assert_eq!(metrics.recent_waits.len(), 3);
|
||||
assert_eq!(metrics.recent_waits[0], Duration::from_millis(40));
|
||||
assert_eq!(metrics.recent_waits[1], Duration::from_millis(50));
|
||||
assert_eq!(metrics.recent_waits[2], Duration::from_millis(30));
|
||||
assert_eq!(metrics.earliest_index, 2);
|
||||
assert_eq!(metrics.observation_count(), 5);
|
||||
|
||||
// This should overwrite the third element (30ms)
|
||||
metrics.record(Duration::from_millis(60));
|
||||
assert_eq!(metrics.recent_waits.len(), 3);
|
||||
assert_eq!(metrics.recent_waits[0], Duration::from_millis(40));
|
||||
assert_eq!(metrics.recent_waits[1], Duration::from_millis(50));
|
||||
assert_eq!(metrics.recent_waits[2], Duration::from_millis(60));
|
||||
assert_eq!(metrics.earliest_index, 0);
|
||||
assert_eq!(metrics.observation_count(), 6);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_io_load_metrics_average_wait() {
|
||||
let mut metrics = IoLoadMetrics::new(3);
|
||||
metrics.record(Duration::from_millis(10));
|
||||
metrics.record(Duration::from_millis(20));
|
||||
metrics.record(Duration::from_millis(30));
|
||||
assert_eq!(metrics.average_wait(), Duration::from_millis(20));
|
||||
|
||||
// Overwrite 10ms with 40ms, new avg = (20+30+40)/3 = 30
|
||||
metrics.record(Duration::from_millis(40));
|
||||
assert_eq!(metrics.average_wait(), Duration::from_millis(30));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_io_load_metrics_max_wait() {
|
||||
let mut metrics = IoLoadMetrics::new(3);
|
||||
assert_eq!(metrics.max_wait(), Duration::ZERO);
|
||||
metrics.record(Duration::from_millis(40));
|
||||
metrics.record(Duration::from_millis(30));
|
||||
metrics.record(Duration::from_millis(20));
|
||||
assert_eq!(metrics.max_wait(), Duration::from_millis(40));
|
||||
|
||||
// Overwrite 40ms with 5ms, max should still be 30
|
||||
metrics.record(Duration::from_millis(5));
|
||||
assert_eq!(metrics.max_wait(), Duration::from_millis(30));
|
||||
|
||||
// Overwrite 30ms with 10ms
|
||||
metrics.record(Duration::from_millis(10));
|
||||
assert_eq!(metrics.max_wait(), Duration::from_millis(20));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_io_load_metrics_p95_wait() {
|
||||
let mut metrics = IoLoadMetrics::new(20);
|
||||
for i in 1..=20 {
|
||||
metrics.record(Duration::from_millis(i * 5)); // 5, 10, ..., 100
|
||||
}
|
||||
assert_eq!(metrics.p95_wait(), Duration::from_millis(100));
|
||||
|
||||
// Test with different values
|
||||
let mut metrics = IoLoadMetrics::new(10);
|
||||
metrics.record(Duration::from_millis(10));
|
||||
metrics.record(Duration::from_millis(20));
|
||||
metrics.record(Duration::from_millis(30));
|
||||
metrics.record(Duration::from_millis(40));
|
||||
metrics.record(Duration::from_millis(50));
|
||||
metrics.record(Duration::from_millis(60));
|
||||
metrics.record(Duration::from_millis(70));
|
||||
metrics.record(Duration::from_millis(80));
|
||||
metrics.record(Duration::from_millis(90));
|
||||
metrics.record(Duration::from_millis(1000)); // outlier
|
||||
assert_eq!(metrics.p95_wait(), Duration::from_millis(1000));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_io_load_metrics_smoothed_load_level() {
|
||||
let mut metrics = IoLoadMetrics::new(3);
|
||||
// Average is low
|
||||
metrics.record(Duration::from_millis(5));
|
||||
metrics.record(Duration::from_millis(8));
|
||||
assert_eq!(metrics.smoothed_load_level(), IoLoadLevel::Low);
|
||||
|
||||
// Average is medium
|
||||
metrics.record(Duration::from_millis(40)); // avg = (5+8+40)/3 = 17.6 -> Medium
|
||||
assert_eq!(metrics.smoothed_load_level(), IoLoadLevel::Medium);
|
||||
|
||||
// Average is High
|
||||
metrics.record(Duration::from_millis(100)); // avg = (8+40+100)/3 = 49.3 -> Medium
|
||||
assert_eq!(metrics.smoothed_load_level(), IoLoadLevel::Medium);
|
||||
|
||||
metrics.record(Duration::from_millis(100)); // avg = (40+100+100)/3 = 80 -> High
|
||||
assert_eq!(metrics.smoothed_load_level(), IoLoadLevel::High);
|
||||
|
||||
// Average is Critical
|
||||
metrics.record(Duration::from_millis(300)); // avg = (100+100+300)/3 = 166.6 -> High
|
||||
assert_eq!(metrics.smoothed_load_level(), IoLoadLevel::High);
|
||||
|
||||
metrics.record(Duration::from_millis(300)); // avg = (100+300+300)/3 = 233.3 -> Critical
|
||||
assert_eq!(metrics.smoothed_load_level(), IoLoadLevel::Critical);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_io_load_metrics_lifetime_average() {
|
||||
let mut metrics = IoLoadMetrics::new(2);
|
||||
metrics.record(Duration::from_millis(10));
|
||||
metrics.record(Duration::from_millis(20));
|
||||
// total = 30, count = 2, avg = 15
|
||||
assert_eq!(metrics.lifetime_average_wait(), Duration::from_millis(15));
|
||||
|
||||
metrics.record(Duration::from_millis(30)); // recent=(20, 30), but lifetime avg is over all records
|
||||
// total = 10+20+30=60, count = 3, avg = 20
|
||||
let total_ns = metrics.total_wait_ns.load(Ordering::Relaxed);
|
||||
let count = metrics.observation_count.load(Ordering::Relaxed);
|
||||
assert_eq!(total_ns, 60_000_000);
|
||||
assert_eq!(count, 3);
|
||||
assert_eq!(metrics.lifetime_average_wait(), Duration::from_millis(20));
|
||||
|
||||
metrics.record(Duration::from_millis(40));
|
||||
// total = 60+40=100, count = 4, avg = 25
|
||||
assert_eq!(metrics.lifetime_average_wait(), Duration::from_millis(25));
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ use crate::config::workload_profiles::{
|
||||
RustFSBufferConfig, WorkloadProfile, get_global_buffer_config, is_buffer_profile_enabled,
|
||||
};
|
||||
use crate::error::ApiError;
|
||||
use crate::server::RemoteAddr;
|
||||
use crate::storage::concurrency::{
|
||||
CachedGetObject, ConcurrencyManager, GetObjectGuard, get_concurrency_aware_buffer_size, get_concurrency_manager,
|
||||
};
|
||||
@@ -93,12 +94,9 @@ use rustfs_kms::{
|
||||
types::{EncryptionMetadata, ObjectEncryptionContext},
|
||||
};
|
||||
use rustfs_notify::{EventArgsBuilder, notifier_global};
|
||||
use rustfs_policy::{
|
||||
auth,
|
||||
policy::{
|
||||
action::{Action, S3Action},
|
||||
{BucketPolicy, BucketPolicyArgs, Validator},
|
||||
},
|
||||
use rustfs_policy::policy::{
|
||||
action::{Action, S3Action},
|
||||
{BucketPolicy, BucketPolicyArgs, Validator},
|
||||
};
|
||||
use rustfs_rio::{CompressReader, DecryptReader, EncryptReader, EtagReader, HardLimitReader, HashReader, Reader, WarpReader};
|
||||
use rustfs_s3select_api::{
|
||||
@@ -119,11 +117,13 @@ use rustfs_utils::{
|
||||
RESERVED_METADATA_PREFIX_LOWER,
|
||||
},
|
||||
},
|
||||
obj::extract_user_defined_metadata,
|
||||
path::{is_dir_object, path_join_buf},
|
||||
};
|
||||
use rustfs_zip::CompressionFormat;
|
||||
use s3s::header::{X_AMZ_RESTORE, X_AMZ_RESTORE_OUTPUT_PATH};
|
||||
use s3s::{S3, S3Error, S3ErrorCode, S3Request, S3Response, S3Result, dto::*, s3_error};
|
||||
use serde_urlencoded::from_bytes;
|
||||
use std::convert::Infallible;
|
||||
use std::ops::Add;
|
||||
use std::{
|
||||
@@ -376,6 +376,12 @@ fn derive_part_nonce(base: [u8; 12], part_number: usize) -> [u8; 12] {
|
||||
nonce
|
||||
}
|
||||
|
||||
#[derive(Debug, Default, serde::Deserialize)]
|
||||
struct ListObjectUnorderedQuery {
|
||||
#[serde(rename = "allow-unordered")]
|
||||
allow_unordered: Option<String>,
|
||||
}
|
||||
|
||||
struct InMemoryAsyncReader {
|
||||
cursor: std::io::Cursor<Vec<u8>>,
|
||||
}
|
||||
@@ -494,6 +500,37 @@ fn validate_object_key(key: &str, operation: &str) -> S3Result<()> {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Validate that 'allow-unordered' parameter is not used with a delimiter
|
||||
///
|
||||
/// This function:
|
||||
/// 1. Checks if a delimiter is specified in the ListObjects request
|
||||
/// 2. Parses the query string to check for the 'allow-unordered' parameter
|
||||
/// 3. Rejects the request if both 'delimiter' and 'allow-unordered=true' are present
|
||||
///
|
||||
/// According to S3 compatibility requirements, unordered listing cannot be combined with
|
||||
/// hierarchical directory traversal (delimited listing). This validation ensures
|
||||
/// conflicting parameters are caught before processing the request.
|
||||
fn validate_list_object_unordered_with_delimiter(delimiter: Option<&Delimiter>, query_string: Option<&str>) -> S3Result<()> {
|
||||
if delimiter.is_none() {
|
||||
return Ok(());
|
||||
}
|
||||
|
||||
let Some(query) = query_string else {
|
||||
return Ok(());
|
||||
};
|
||||
|
||||
if let Ok(params) = from_bytes::<ListObjectUnorderedQuery>(query.as_bytes()) {
|
||||
if params.allow_unordered.as_deref() == Some("true") {
|
||||
return Err(S3Error::with_message(
|
||||
S3ErrorCode::InvalidArgument,
|
||||
"The allow-unordered parameter cannot be used when delimiter is specified.".to_string(),
|
||||
));
|
||||
}
|
||||
}
|
||||
|
||||
Ok(())
|
||||
}
|
||||
|
||||
impl FS {
|
||||
pub fn new() -> Self {
|
||||
// let store: ECStore = ECStore::new(address, endpoint_pools).await?;
|
||||
@@ -813,6 +850,8 @@ impl S3 for FS {
|
||||
sse_customer_algorithm,
|
||||
sse_customer_key,
|
||||
sse_customer_key_md5,
|
||||
metadata_directive,
|
||||
metadata,
|
||||
..
|
||||
} = req.input.clone();
|
||||
let (src_bucket, src_key, version_id) = match copy_source {
|
||||
@@ -1001,7 +1040,6 @@ impl S3 for FS {
|
||||
src_info.put_object_reader = Some(PutObjReader::new(reader));
|
||||
|
||||
// check quota
|
||||
// TODO: src metadata
|
||||
|
||||
for (k, v) in compress_metadata {
|
||||
src_info.user_defined.insert(k, v);
|
||||
@@ -1020,7 +1058,15 @@ impl S3 for FS {
|
||||
.insert("x-amz-server-side-encryption-customer-key-md5".to_string(), sse_md5.clone());
|
||||
}
|
||||
|
||||
// TODO: src tags
|
||||
if metadata_directive.as_ref().map(|d| d.as_str()) == Some(MetadataDirective::REPLACE) {
|
||||
let src_user_defined = extract_user_defined_metadata(&src_info.user_defined);
|
||||
src_user_defined.keys().for_each(|k| {
|
||||
src_info.user_defined.remove(k);
|
||||
});
|
||||
if let Some(metadata) = metadata {
|
||||
src_info.user_defined.extend(metadata);
|
||||
}
|
||||
}
|
||||
|
||||
let oi = store
|
||||
.copy_object(&src_bucket, &src_key, &bucket, &key, &mut src_info, &src_opts, &dst_opts)
|
||||
@@ -1032,9 +1078,10 @@ impl S3 for FS {
|
||||
let dest_bucket = bucket.clone();
|
||||
let dest_key = key.clone();
|
||||
let dest_version = oi.version_id.map(|v| v.to_string());
|
||||
let dest_version_clone = dest_version.clone();
|
||||
tokio::spawn(async move {
|
||||
manager
|
||||
.invalidate_cache_versioned(&dest_bucket, &dest_key, dest_version.as_deref())
|
||||
.invalidate_cache_versioned(&dest_bucket, &dest_key, dest_version_clone.as_deref())
|
||||
.await;
|
||||
});
|
||||
|
||||
@@ -1052,6 +1099,7 @@ impl S3 for FS {
|
||||
ssekms_key_id: effective_kms_key_id,
|
||||
sse_customer_algorithm,
|
||||
sse_customer_key_md5,
|
||||
version_id: dest_version,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
@@ -2806,6 +2854,9 @@ impl S3 for FS {
|
||||
}
|
||||
|
||||
let delimiter = delimiter.filter(|v| !v.is_empty());
|
||||
|
||||
validate_list_object_unordered_with_delimiter(delimiter.as_ref(), req.uri.query())?;
|
||||
|
||||
let start_after = start_after.filter(|v| !v.is_empty());
|
||||
|
||||
let continuation_token = continuation_token.filter(|v| !v.is_empty());
|
||||
@@ -3352,9 +3403,10 @@ impl S3 for FS {
|
||||
helper = helper.version_id(version_id.clone());
|
||||
}
|
||||
|
||||
let put_version_clone = put_version.clone();
|
||||
tokio::spawn(async move {
|
||||
manager
|
||||
.invalidate_cache_versioned(&put_bucket, &put_key, put_version.as_deref())
|
||||
.invalidate_cache_versioned(&put_bucket, &put_key, put_version_clone.as_deref())
|
||||
.await;
|
||||
});
|
||||
|
||||
@@ -3413,6 +3465,7 @@ impl S3 for FS {
|
||||
checksum_sha1,
|
||||
checksum_sha256,
|
||||
checksum_crc64nvme,
|
||||
version_id: put_version,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
@@ -4277,9 +4330,10 @@ impl S3 for FS {
|
||||
let mpu_bucket = bucket.clone();
|
||||
let mpu_key = key.clone();
|
||||
let mpu_version = obj_info.version_id.map(|v| v.to_string());
|
||||
let mpu_version_clone = mpu_version.clone();
|
||||
tokio::spawn(async move {
|
||||
manager
|
||||
.invalidate_cache_versioned(&mpu_bucket, &mpu_key, mpu_version.as_deref())
|
||||
.invalidate_cache_versioned(&mpu_bucket, &mpu_key, mpu_version_clone.as_deref())
|
||||
.await;
|
||||
});
|
||||
|
||||
@@ -4329,6 +4383,7 @@ impl S3 for FS {
|
||||
checksum_sha256: checksum_sha256.clone(),
|
||||
checksum_crc64nvme: checksum_crc64nvme.clone(),
|
||||
checksum_type: checksum_type.clone(),
|
||||
version_id: mpu_version,
|
||||
..Default::default()
|
||||
};
|
||||
info!(
|
||||
@@ -4635,7 +4690,8 @@ impl S3 for FS {
|
||||
.await
|
||||
.map_err(ApiError::from)?;
|
||||
|
||||
let conditions = get_condition_values(&req.headers, &auth::Credentials::default(), None, None);
|
||||
let remote_addr = req.extensions.get::<RemoteAddr>().map(|a| a.0);
|
||||
let conditions = get_condition_values(&req.headers, &rustfs_credentials::Credentials::default(), None, None, remote_addr);
|
||||
|
||||
let read_only = PolicySys::is_allowed(&BucketPolicyArgs {
|
||||
bucket: &bucket,
|
||||
@@ -6099,6 +6155,29 @@ mod tests {
|
||||
set_buffer_profile_enabled(false);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_validate_list_object_unordered_with_delimiter() {
|
||||
// [1] Normal case: No delimiter specified.
|
||||
assert!(validate_list_object_unordered_with_delimiter(None, Some("allow-unordered=true")).is_ok());
|
||||
|
||||
let delim_str = "/".to_string();
|
||||
let delimiter_some: Option<&Delimiter> = Some(&delim_str);
|
||||
// [2] Normal case: Delimiter is present, but 'allow-unordered' is explicitly set to false.
|
||||
assert!(validate_list_object_unordered_with_delimiter(delimiter_some, Some("allow-unordered=false")).is_ok());
|
||||
|
||||
let query_conflict = Some("allow-unordered=true");
|
||||
// [3] Conflict case: Both delimiter and 'allow-unordered=true' are present.
|
||||
assert!(validate_list_object_unordered_with_delimiter(delimiter_some, query_conflict).is_err());
|
||||
|
||||
let complex_query = Some("allow-unordered=true&abc=123");
|
||||
// [4] Complex query: The validation should still trigger if 'allow-unordered=true' is part of a multi-parameter query.
|
||||
assert!(validate_list_object_unordered_with_delimiter(delimiter_some, complex_query).is_err());
|
||||
|
||||
let complex_query_without_unordered = Some("abc=123&queryType=test");
|
||||
// [5] Multi-parameter query without conflict: If other parameters exist but 'allow-unordered' is missing,
|
||||
assert!(validate_list_object_unordered_with_delimiter(delimiter_some, complex_query_without_unordered).is_ok());
|
||||
}
|
||||
|
||||
// Note: S3Request structure is complex and requires many fields.
|
||||
// For real testing, we would need proper integration test setup.
|
||||
// Removing this test as it requires too much S3 infrastructure setup.
|
||||
|
||||
@@ -1774,11 +1774,34 @@ impl Node for NodeService {
|
||||
|
||||
async fn get_metrics(&self, request: Request<GetMetricsRequest>) -> Result<Response<GetMetricsResponse>, Status> {
|
||||
let request = request.into_inner();
|
||||
let mut buf_t = Deserializer::new(Cursor::new(request.metric_type));
|
||||
let t: MetricType = Deserialize::deserialize(&mut buf_t).unwrap();
|
||||
|
||||
// Deserialize metric_type with error handling
|
||||
let mut buf_t = Deserializer::new(Cursor::new(request.metric_type));
|
||||
let t: MetricType = match Deserialize::deserialize(&mut buf_t) {
|
||||
Ok(t) => t,
|
||||
Err(err) => {
|
||||
error!("Failed to deserialize metric_type: {}", err);
|
||||
return Ok(Response::new(GetMetricsResponse {
|
||||
success: false,
|
||||
realtime_metrics: Bytes::new(),
|
||||
error_info: Some(format!("Invalid metric_type: {}", err)),
|
||||
}));
|
||||
}
|
||||
};
|
||||
|
||||
// Deserialize opts with error handling
|
||||
let mut buf_o = Deserializer::new(Cursor::new(request.opts));
|
||||
let opts: CollectMetricsOpts = Deserialize::deserialize(&mut buf_o).unwrap();
|
||||
let opts: CollectMetricsOpts = match Deserialize::deserialize(&mut buf_o) {
|
||||
Ok(opts) => opts,
|
||||
Err(err) => {
|
||||
error!("Failed to deserialize opts: {}", err);
|
||||
return Ok(Response::new(GetMetricsResponse {
|
||||
success: false,
|
||||
realtime_metrics: Bytes::new(),
|
||||
error_info: Some(format!("Invalid opts: {}", err)),
|
||||
}));
|
||||
}
|
||||
};
|
||||
|
||||
let info = collect_local_metrics(t, &opts).await;
|
||||
|
||||
@@ -3648,4 +3671,32 @@ mod tests {
|
||||
// Should return None for non-existent disk
|
||||
assert!(disk.is_none());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_metrics_invalid_metric_type() {
|
||||
let service = create_test_node_service();
|
||||
let request = Request::new(GetMetricsRequest {
|
||||
metric_type: Bytes::from(vec![0x00u8, 0x01u8]), // Invalid rmp data
|
||||
opts: Bytes::new(), // Valid or invalid
|
||||
});
|
||||
let response = service.get_metrics(request).await.unwrap().into_inner();
|
||||
assert!(!response.success);
|
||||
assert!(response.error_info.is_some());
|
||||
}
|
||||
|
||||
#[tokio::test]
|
||||
async fn test_get_metrics_invalid_opts() {
|
||||
let service = create_test_node_service();
|
||||
// Serialize a valid MetricType
|
||||
let metric_type = MetricType::DISK;
|
||||
let metric_type_bytes = rmp_serde::to_vec(&metric_type).unwrap();
|
||||
|
||||
let request = Request::new(GetMetricsRequest {
|
||||
metric_type: Bytes::from(metric_type_bytes),
|
||||
opts: Bytes::from(vec![0x00u8, 0x01u8]), // Invalid rmp data
|
||||
});
|
||||
let response = service.get_metrics(request).await.unwrap().into_inner();
|
||||
assert!(!response.success);
|
||||
assert!(response.error_info.is_some());
|
||||
}
|
||||
}
|
||||
|
||||
26
scripts/makefile-header.sh
Executable file
26
scripts/makefile-header.sh
Executable file
@@ -0,0 +1,26 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
# Total width of the whole line
|
||||
WIDTH=100 # adjust if you want longer/shorter lines
|
||||
|
||||
print_heading() {
|
||||
local title="$1"
|
||||
local prefix="## —— "
|
||||
local suffix=" "
|
||||
local dash="-"
|
||||
|
||||
# length of the visible title block
|
||||
local block_len=$(( ${#prefix} + ${#title} + ${#suffix} ))
|
||||
|
||||
# number of dashes needed
|
||||
local dash_count=$(( WIDTH - block_len ))
|
||||
|
||||
# build dash line
|
||||
local dashes
|
||||
dashes=$(printf "%*s" "$dash_count" "" | tr ' ' "$dash")
|
||||
|
||||
# print the final heading
|
||||
printf "%s%s%s%s\n" "$prefix" "$title" "$suffix" "$dashes"
|
||||
}
|
||||
|
||||
print_heading "$1"
|
||||
316
scripts/s3-tests/README.md
Normal file
316
scripts/s3-tests/README.md
Normal file
@@ -0,0 +1,316 @@
|
||||
# S3 Compatibility Tests
|
||||
|
||||
This directory contains scripts for running S3 compatibility tests against RustFS.
|
||||
|
||||
## Quick Start
|
||||
|
||||
Run the local S3 compatibility test script:
|
||||
|
||||
```bash
|
||||
./scripts/s3-tests/run.sh
|
||||
```
|
||||
|
||||
The script will automatically:
|
||||
|
||||
1. Build RustFS (if needed)
|
||||
2. Start the service
|
||||
3. Wait for it to be ready
|
||||
4. Run S3 compatibility tests
|
||||
5. Collect results in `artifacts/s3tests-single/`
|
||||
|
||||
## Deployment Modes
|
||||
|
||||
The script supports four deployment modes, controlled via the `DEPLOY_MODE` environment variable:
|
||||
|
||||
### 1. Build Mode (Default)
|
||||
|
||||
Compile with `cargo build --release` and run:
|
||||
|
||||
```bash
|
||||
DEPLOY_MODE=build ./scripts/s3-tests/run.sh
|
||||
# Or simply (build is the default)
|
||||
./scripts/s3-tests/run.sh
|
||||
|
||||
# Force rebuild even if binary exists and is recent
|
||||
./scripts/s3-tests/run.sh --no-cache
|
||||
```
|
||||
|
||||
**Note**: In build mode, if the binary exists and was compiled less than 30 minutes ago, compilation will be skipped unless `--no-cache` is specified.
|
||||
|
||||
### 2. Binary File Mode
|
||||
|
||||
Use pre-compiled binary file:
|
||||
|
||||
```bash
|
||||
# Use default path (./target/release/rustfs)
|
||||
DEPLOY_MODE=binary ./scripts/s3-tests/run.sh
|
||||
|
||||
# Specify custom binary path
|
||||
DEPLOY_MODE=binary RUSTFS_BINARY=./target/release/rustfs ./scripts/s3-tests/run.sh
|
||||
```
|
||||
|
||||
### 3. Docker Mode
|
||||
|
||||
Build Docker image and run in container:
|
||||
|
||||
```bash
|
||||
DEPLOY_MODE=docker ./scripts/s3-tests/run.sh
|
||||
```
|
||||
|
||||
### 4. Existing Service Mode
|
||||
|
||||
Connect to an already running RustFS service:
|
||||
|
||||
```bash
|
||||
DEPLOY_MODE=existing S3_HOST=127.0.0.1 S3_PORT=9000 ./scripts/s3-tests/run.sh
|
||||
|
||||
# Connect to remote service
|
||||
DEPLOY_MODE=existing S3_HOST=192.168.1.100 S3_PORT=9000 ./scripts/s3-tests/run.sh
|
||||
```
|
||||
|
||||
## Configuration Options
|
||||
|
||||
### Command Line Options
|
||||
|
||||
- `-h, --help`: Show help message
|
||||
- `--no-cache`: Force rebuild even if binary exists and is recent (for build mode)
|
||||
|
||||
### Deployment Configuration
|
||||
|
||||
- `DEPLOY_MODE`: Deployment mode, options:
|
||||
- `build`: Compile with `cargo build --release` and run (default)
|
||||
- `binary`: Use pre-compiled binary file
|
||||
- `docker`: Build Docker image and run in container
|
||||
- `existing`: Use already running service
|
||||
- `RUSTFS_BINARY`: Path to binary file (for binary mode, default: `./target/release/rustfs`)
|
||||
- `DATA_ROOT`: Root directory for test data storage (default: `target`)
|
||||
- Final path: `${DATA_ROOT}/test-data/${CONTAINER_NAME}`
|
||||
- Example: `DATA_ROOT=/tmp` stores data in `/tmp/test-data/rustfs-single/`
|
||||
|
||||
### Service Configuration
|
||||
|
||||
- `S3_ACCESS_KEY`: Main user access key (default: `rustfsadmin`)
|
||||
- `S3_SECRET_KEY`: Main user secret key (default: `rustfsadmin`)
|
||||
- `S3_ALT_ACCESS_KEY`: Alt user access key (default: `rustfsalt`)
|
||||
- `S3_ALT_SECRET_KEY`: Alt user secret key (default: `rustfsalt`)
|
||||
- `S3_REGION`: S3 region (default: `us-east-1`)
|
||||
- `S3_HOST`: S3 service host (default: `127.0.0.1`)
|
||||
- `S3_PORT`: S3 service port (default: `9000`)
|
||||
|
||||
### Test Parameters
|
||||
|
||||
- `TEST_MODE`: Test mode (default: `single`)
|
||||
- `MAXFAIL`: Stop after N failures (default: `1`)
|
||||
- `XDIST`: Enable parallel execution with N workers (default: `0`, disabled)
|
||||
- `MARKEXPR`: pytest marker expression for filtering tests (default: exclude unsupported features)
|
||||
|
||||
### Configuration Files
|
||||
|
||||
- `S3TESTS_CONF_TEMPLATE`: Path to s3tests config template (default: `.github/s3tests/s3tests.conf`)
|
||||
- Relative to project root
|
||||
- Uses `envsubst` to substitute variables (e.g., `${S3_HOST}`)
|
||||
- `S3TESTS_CONF`: Path to generated s3tests config (default: `s3tests.conf`)
|
||||
- Relative to project root
|
||||
- This file is generated from the template before running tests
|
||||
|
||||
## Examples
|
||||
|
||||
### Build Mode (Default)
|
||||
|
||||
```bash
|
||||
# Basic usage - compiles and runs automatically
|
||||
./scripts/s3-tests/run.sh
|
||||
|
||||
# Force rebuild (skip cache check)
|
||||
./scripts/s3-tests/run.sh --no-cache
|
||||
|
||||
# Run all tests, stop after 50 failures
|
||||
MAXFAIL=50 ./scripts/s3-tests/run.sh
|
||||
|
||||
# Enable parallel execution (4 worker processes)
|
||||
XDIST=4 ./scripts/s3-tests/run.sh
|
||||
|
||||
# Use custom data storage location
|
||||
DATA_ROOT=/tmp ./scripts/s3-tests/run.sh
|
||||
```
|
||||
|
||||
### Binary File Mode
|
||||
|
||||
```bash
|
||||
# First compile the binary
|
||||
cargo build --release
|
||||
|
||||
# Run with default path
|
||||
DEPLOY_MODE=binary ./scripts/s3-tests/run.sh
|
||||
|
||||
# Specify custom path
|
||||
DEPLOY_MODE=binary RUSTFS_BINARY=/path/to/rustfs ./scripts/s3-tests/run.sh
|
||||
|
||||
# Use binary with parallel tests
|
||||
DEPLOY_MODE=binary XDIST=4 ./scripts/s3-tests/run.sh
|
||||
```
|
||||
|
||||
### Docker Mode
|
||||
|
||||
```bash
|
||||
# Build Docker image and run in container
|
||||
DEPLOY_MODE=docker ./scripts/s3-tests/run.sh
|
||||
|
||||
# Run with parallel tests
|
||||
DEPLOY_MODE=docker XDIST=4 ./scripts/s3-tests/run.sh
|
||||
```
|
||||
|
||||
### Existing Service Mode
|
||||
|
||||
```bash
|
||||
# Connect to locally running service
|
||||
DEPLOY_MODE=existing ./scripts/s3-tests/run.sh
|
||||
|
||||
# Connect to remote service
|
||||
DEPLOY_MODE=existing S3_HOST=192.168.1.100 S3_PORT=9000 ./scripts/s3-tests/run.sh
|
||||
|
||||
# Test specific features
|
||||
DEPLOY_MODE=existing MARKEXPR="not lifecycle and not versioning" ./scripts/s3-tests/run.sh
|
||||
```
|
||||
|
||||
### Custom Configuration Files
|
||||
|
||||
```bash
|
||||
# Use custom config template and output path
|
||||
S3TESTS_CONF_TEMPLATE=my-configs/s3tests.conf.template \
|
||||
S3TESTS_CONF=my-s3tests.conf \
|
||||
./scripts/s3-tests/run.sh
|
||||
```
|
||||
|
||||
## Test Results
|
||||
|
||||
Test results are saved in the `artifacts/s3tests-${TEST_MODE}/` directory (default: `artifacts/s3tests-single/`):
|
||||
|
||||
- `junit.xml`: Test results in JUnit format (compatible with CI/CD systems)
|
||||
- `pytest.log`: Detailed pytest logs with full test output
|
||||
- `rustfs-${TEST_MODE}/rustfs.log`: RustFS service logs
|
||||
- `rustfs-${TEST_MODE}/inspect.json`: Service metadata (PID, binary path, mode, etc.)
|
||||
|
||||
View results:
|
||||
|
||||
```bash
|
||||
# Check test summary
|
||||
cat artifacts/s3tests-single/junit.xml | grep -E "testsuite|testcase"
|
||||
|
||||
# View test logs
|
||||
less artifacts/s3tests-single/pytest.log
|
||||
|
||||
# View service logs
|
||||
less artifacts/s3tests-single/rustfs-single/rustfs.log
|
||||
```
|
||||
|
||||
## Prerequisites
|
||||
|
||||
### Required
|
||||
|
||||
- Docker (for docker mode only)
|
||||
- Python 3 (for running s3-tests)
|
||||
- `nc` (netcat) or `timeout` command (for port checking)
|
||||
|
||||
### Auto-installed
|
||||
|
||||
The script will automatically install the following dependencies if missing:
|
||||
|
||||
- `awscurl` (for S3 API calls and user provisioning)
|
||||
- `tox` (for running s3-tests in isolated environment)
|
||||
- `gettext-base` (for `envsubst` - config file generation)
|
||||
- On macOS: `brew install gettext`
|
||||
- On Linux: `apt-get install gettext-base`
|
||||
|
||||
### Proxy Configuration
|
||||
|
||||
The script automatically disables proxy for localhost requests to avoid interference. All proxy environment variables (`http_proxy`, `https_proxy`, etc.) are unset at script startup.
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Port Already in Use
|
||||
|
||||
If port 9000 is already in use, change the port:
|
||||
|
||||
```bash
|
||||
S3_PORT=9001 ./scripts/s3-tests/run.sh
|
||||
```
|
||||
|
||||
**Note**: The script automatically checks if the port is available before starting (except in `existing` mode). If the port is in use, the script will exit with an error message.
|
||||
|
||||
### Container Start Failure
|
||||
|
||||
Check Docker logs:
|
||||
|
||||
```bash
|
||||
docker logs rustfs-single
|
||||
```
|
||||
|
||||
### Binary Not Found
|
||||
|
||||
For binary mode, ensure the binary is compiled:
|
||||
|
||||
```bash
|
||||
cargo build --release
|
||||
```
|
||||
|
||||
Or specify the correct path:
|
||||
|
||||
```bash
|
||||
DEPLOY_MODE=binary RUSTFS_BINARY=/path/to/rustfs ./scripts/s3-tests/run.sh
|
||||
```
|
||||
|
||||
### Test Timeout
|
||||
|
||||
Increase wait time or check service status:
|
||||
|
||||
```bash
|
||||
curl http://127.0.0.1:9000/health
|
||||
```
|
||||
|
||||
### Existing Service Not Accessible
|
||||
|
||||
For existing mode, ensure the service is running and accessible:
|
||||
|
||||
```bash
|
||||
# Check if service is reachable
|
||||
curl http://192.168.1.100:9000/health
|
||||
|
||||
# Verify S3 API is responding
|
||||
awscurl --service s3 --region us-east-1 \
|
||||
--access_key rustfsadmin \
|
||||
--secret_key rustfsadmin \
|
||||
-X GET "http://192.168.1.100:9000/"
|
||||
```
|
||||
|
||||
## Workflow Integration
|
||||
|
||||
This script mirrors the GitHub Actions workflow defined in `.github/workflows/e2e-s3tests.yml`.
|
||||
|
||||
The script follows the same steps:
|
||||
|
||||
1. Check port availability (skip for existing mode)
|
||||
2. Build/start RustFS service (varies by deployment mode)
|
||||
3. Wait for service to be fully ready:
|
||||
- Check process/container status
|
||||
- Check port is listening
|
||||
- Wait for "server started successfully" log message
|
||||
- Verify S3 API is responding
|
||||
4. Generate s3tests configuration from template
|
||||
5. Provision alt user for s3-tests via admin API
|
||||
6. Run ceph s3-tests with tox
|
||||
7. Collect logs and results
|
||||
|
||||
### Key Improvements Over Workflow
|
||||
|
||||
- **Smart compilation**: Skips rebuild if binary is recent (< 30 minutes)
|
||||
- **Better health checks**: Log-based readiness detection instead of blind waiting
|
||||
- **Port conflict detection**: Prevents conflicts before starting service
|
||||
- **Proxy handling**: Automatically disables proxy for localhost
|
||||
- **Configurable paths**: All paths (data, configs, artifacts) can be customized
|
||||
|
||||
## See Also
|
||||
|
||||
- [GitHub Actions Workflow](../.github/workflows/e2e-s3tests.yml)
|
||||
- [S3 Tests Configuration](../.github/s3tests/s3tests.conf)
|
||||
- [Ceph S3 Tests Repository](https://github.com/ceph/s3-tests)
|
||||
637
scripts/s3-tests/run.sh
Executable file
637
scripts/s3-tests/run.sh
Executable file
@@ -0,0 +1,637 @@
|
||||
#!/bin/bash
|
||||
# Copyright 2024 RustFS Team
|
||||
#
|
||||
# Licensed under the Apache License, Version 2.0 (the "License");
|
||||
# you may not use this file except in compliance with the License.
|
||||
# You may obtain a copy of the License at
|
||||
#
|
||||
# http://www.apache.org/licenses/LICENSE-2.0
|
||||
#
|
||||
# Unless required by applicable law or agreed to in writing, software
|
||||
# distributed under the License is distributed on an "AS IS" BASIS,
|
||||
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
# See the License for the specific language governing permissions and
|
||||
# limitations under the License.
|
||||
|
||||
set -euo pipefail
|
||||
|
||||
# Disable proxy for localhost requests to avoid interference
|
||||
export NO_PROXY="127.0.0.1,localhost,::1"
|
||||
export no_proxy="${NO_PROXY}"
|
||||
unset http_proxy https_proxy HTTP_PROXY HTTPS_PROXY
|
||||
|
||||
# Configuration
|
||||
S3_ACCESS_KEY="${S3_ACCESS_KEY:-rustfsadmin}"
|
||||
S3_SECRET_KEY="${S3_SECRET_KEY:-rustfsadmin}"
|
||||
S3_ALT_ACCESS_KEY="${S3_ALT_ACCESS_KEY:-rustfsalt}"
|
||||
S3_ALT_SECRET_KEY="${S3_ALT_SECRET_KEY:-rustfsalt}"
|
||||
S3_REGION="${S3_REGION:-us-east-1}"
|
||||
S3_HOST="${S3_HOST:-127.0.0.1}"
|
||||
S3_PORT="${S3_PORT:-9000}"
|
||||
|
||||
# Test parameters
|
||||
TEST_MODE="${TEST_MODE:-single}"
|
||||
MAXFAIL="${MAXFAIL:-1}"
|
||||
XDIST="${XDIST:-0}"
|
||||
MARKEXPR="${MARKEXPR:-not lifecycle and not versioning and not s3website and not bucket_logging and not encryption}"
|
||||
|
||||
# Configuration file paths
|
||||
S3TESTS_CONF_TEMPLATE="${S3TESTS_CONF_TEMPLATE:-.github/s3tests/s3tests.conf}"
|
||||
S3TESTS_CONF="${S3TESTS_CONF:-s3tests.conf}"
|
||||
|
||||
# Service deployment mode: "build", "binary", "docker", or "existing"
|
||||
# - "build": Compile with cargo build --release and run (default)
|
||||
# - "binary": Use pre-compiled binary (RUSTFS_BINARY path or default)
|
||||
# - "docker": Build Docker image and run in container
|
||||
# - "existing": Use already running service (skip start, use S3_HOST and S3_PORT)
|
||||
DEPLOY_MODE="${DEPLOY_MODE:-build}"
|
||||
RUSTFS_BINARY="${RUSTFS_BINARY:-}"
|
||||
NO_CACHE="${NO_CACHE:-false}"
|
||||
|
||||
# Directories
|
||||
SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
PROJECT_ROOT="$(cd "${SCRIPT_DIR}/../.." && pwd)"
|
||||
ARTIFACTS_DIR="${PROJECT_ROOT}/artifacts/s3tests-${TEST_MODE}"
|
||||
CONTAINER_NAME="rustfs-${TEST_MODE}"
|
||||
NETWORK_NAME="rustfs-net"
|
||||
DATA_ROOT="${DATA_ROOT:-target}"
|
||||
DATA_DIR="${PROJECT_ROOT}/${DATA_ROOT}/test-data/${CONTAINER_NAME}"
|
||||
RUSTFS_PID=""
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
log_info() {
|
||||
echo -e "${GREEN}[INFO]${NC} $*"
|
||||
}
|
||||
|
||||
log_warn() {
|
||||
echo -e "${YELLOW}[WARN]${NC} $*"
|
||||
}
|
||||
|
||||
log_error() {
|
||||
echo -e "${RED}[ERROR]${NC} $*"
|
||||
}
|
||||
|
||||
show_usage() {
|
||||
cat << EOF
|
||||
Usage: $0 [OPTIONS]
|
||||
|
||||
Options:
|
||||
-h, --help Show this help message
|
||||
--no-cache Force rebuild even if binary exists and is recent (for build mode)
|
||||
|
||||
Deployment Modes (via DEPLOY_MODE environment variable):
|
||||
1. build - Compile with cargo build --release and run (default)
|
||||
2. binary - Use pre-compiled binary (use RUSTFS_BINARY or default: ./target/release/rustfs)
|
||||
3. docker - Build Docker image and run in container
|
||||
4. existing - Use already running service (specify S3_HOST and S3_PORT)
|
||||
|
||||
Environment Variables:
|
||||
DEPLOY_MODE - Deployment mode: "build", "binary", "docker", or "existing" (default: "build")
|
||||
RUSTFS_BINARY - Path to RustFS binary (for binary mode, default: ./target/release/rustfs)
|
||||
S3_HOST - S3 service host (default: 127.0.0.1)
|
||||
S3_PORT - S3 service port (default: 9000)
|
||||
S3_ACCESS_KEY - Main user access key (default: rustfsadmin)
|
||||
S3_SECRET_KEY - Main user secret key (default: rustfsadmin)
|
||||
S3_ALT_ACCESS_KEY - Alt user access key (default: rustfsalt)
|
||||
S3_ALT_SECRET_KEY - Alt user secret key (default: rustfsalt)
|
||||
MAXFAIL - Stop after N failures (default: 1)
|
||||
XDIST - Enable parallel execution with N workers (default: 0)
|
||||
MARKEXPR - pytest marker expression (default: exclude unsupported features)
|
||||
S3TESTS_CONF_TEMPLATE - Path to s3tests config template (default: .github/s3tests/s3tests.conf)
|
||||
S3TESTS_CONF - Path to generated s3tests config (default: s3tests.conf)
|
||||
DATA_ROOT - Root directory for test data storage (default: target)
|
||||
Final path: ${DATA_ROOT}/test-data/${CONTAINER_NAME}
|
||||
|
||||
Notes:
|
||||
- In build mode, if the binary exists and was compiled less than 5 minutes ago,
|
||||
compilation will be skipped unless --no-cache is specified.
|
||||
|
||||
Examples:
|
||||
# Use Docker (default)
|
||||
$0
|
||||
|
||||
# Use pre-compiled binary
|
||||
DEPLOY_MODE=binary RUSTFS_BINARY=./target/release/rustfs $0
|
||||
|
||||
# Use Docker
|
||||
DEPLOY_MODE=docker $0
|
||||
|
||||
# Use existing service
|
||||
DEPLOY_MODE=existing S3_HOST=192.168.1.100 S3_PORT=9000 $0
|
||||
|
||||
# Force rebuild in build mode
|
||||
$0 --no-cache
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
cleanup() {
|
||||
log_info "Cleaning up..."
|
||||
|
||||
if [ "${DEPLOY_MODE}" = "docker" ]; then
|
||||
docker rm -f "${CONTAINER_NAME}" >/dev/null 2>&1 || true
|
||||
docker network rm "${NETWORK_NAME}" >/dev/null 2>&1 || true
|
||||
elif [ "${DEPLOY_MODE}" = "build" ] || [ "${DEPLOY_MODE}" = "binary" ]; then
|
||||
if [ -n "${RUSTFS_PID}" ]; then
|
||||
log_info "Stopping RustFS process (PID: ${RUSTFS_PID})..."
|
||||
kill "${RUSTFS_PID}" 2>/dev/null || true
|
||||
wait "${RUSTFS_PID}" 2>/dev/null || true
|
||||
fi
|
||||
fi
|
||||
}
|
||||
|
||||
trap cleanup EXIT
|
||||
|
||||
# Parse command line arguments
|
||||
while [[ $# -gt 0 ]]; do
|
||||
case $1 in
|
||||
-h|--help)
|
||||
show_usage
|
||||
exit 0
|
||||
;;
|
||||
--no-cache)
|
||||
NO_CACHE="true"
|
||||
;;
|
||||
*)
|
||||
log_error "Unknown option: $1"
|
||||
show_usage
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
shift
|
||||
done
|
||||
|
||||
cd "${PROJECT_ROOT}"
|
||||
|
||||
# Check port availability (except for existing mode)
|
||||
if [ "${DEPLOY_MODE}" != "existing" ]; then
|
||||
if nc -z "${S3_HOST}" "${S3_PORT}" 2>/dev/null || timeout 1 bash -c "cat < /dev/null > /dev/tcp/${S3_HOST}/${S3_PORT}" 2>/dev/null; then
|
||||
log_error "Port ${S3_PORT} is already in use on ${S3_HOST}"
|
||||
log_error "Please stop the service using this port or use DEPLOY_MODE=existing to connect to an existing service"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Start RustFS based on deployment mode
|
||||
if [ "${DEPLOY_MODE}" = "existing" ]; then
|
||||
log_info "Using existing RustFS service at ${S3_HOST}:${S3_PORT}"
|
||||
log_info "Skipping service startup..."
|
||||
elif [ "${DEPLOY_MODE}" = "binary" ]; then
|
||||
# Determine binary path
|
||||
if [ -z "${RUSTFS_BINARY}" ]; then
|
||||
RUSTFS_BINARY="${PROJECT_ROOT}/target/release/rustfs"
|
||||
fi
|
||||
|
||||
if [ ! -f "${RUSTFS_BINARY}" ]; then
|
||||
log_error "RustFS binary not found at: ${RUSTFS_BINARY}"
|
||||
log_info "Please compile the binary first:"
|
||||
log_info " cargo build --release"
|
||||
log_info "Or specify the path: RUSTFS_BINARY=/path/to/rustfs $0"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log_info "Using pre-compiled binary: ${RUSTFS_BINARY}"
|
||||
|
||||
# Prepare data and artifacts directories
|
||||
mkdir -p "${DATA_DIR}"
|
||||
mkdir -p "${ARTIFACTS_DIR}/rustfs-${TEST_MODE}"
|
||||
for i in {0..3}; do
|
||||
mkdir -p "${DATA_DIR}/rustfs${i}"
|
||||
done
|
||||
|
||||
# Start RustFS binary
|
||||
log_info "Starting RustFS binary..."
|
||||
RUST_LOG="${RUST_LOG:-info}" "${RUSTFS_BINARY}" \
|
||||
--address "${S3_HOST}:${S3_PORT}" \
|
||||
--access-key "${S3_ACCESS_KEY}" \
|
||||
--secret-key "${S3_SECRET_KEY}" \
|
||||
"${DATA_DIR}/rustfs0" "${DATA_DIR}/rustfs1" "${DATA_DIR}/rustfs2" "${DATA_DIR}/rustfs3" \
|
||||
> "${ARTIFACTS_DIR}/rustfs-${TEST_MODE}/rustfs.log" 2>&1 &
|
||||
|
||||
RUSTFS_PID=$!
|
||||
log_info "RustFS started with PID: ${RUSTFS_PID}"
|
||||
|
||||
elif [ "${DEPLOY_MODE}" = "build" ]; then
|
||||
RUSTFS_BINARY="${PROJECT_ROOT}/target/release/rustfs"
|
||||
|
||||
# Check if we should skip compilation
|
||||
SHOULD_BUILD=true
|
||||
if [ -f "${RUSTFS_BINARY}" ]; then
|
||||
# Get file modification time in seconds since epoch
|
||||
# Try Linux format first, fallback to macOS format
|
||||
FILE_MTIME=$(stat -c %Y "${RUSTFS_BINARY}" 2>/dev/null || stat -f %m "${RUSTFS_BINARY}" 2>/dev/null)
|
||||
|
||||
if [ -n "${FILE_MTIME}" ]; then
|
||||
CURRENT_TIME=$(date +%s)
|
||||
AGE_SECONDS=$((CURRENT_TIME - FILE_MTIME))
|
||||
AGE_MINUTES=$((AGE_SECONDS / 60))
|
||||
|
||||
if [ "${AGE_MINUTES}" -lt 30 ] && [ "${NO_CACHE}" != "true" ]; then
|
||||
log_info "Binary exists and is recent (${AGE_MINUTES} minutes old), skipping compilation."
|
||||
log_info "Use --no-cache to force rebuild."
|
||||
SHOULD_BUILD=false
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "${SHOULD_BUILD}" = "true" ]; then
|
||||
if [ "${NO_CACHE}" = "true" ]; then
|
||||
log_info "Building RustFS with cargo build --release (--no-cache forced)..."
|
||||
else
|
||||
log_info "Building RustFS with cargo build --release..."
|
||||
fi
|
||||
cargo build --release || {
|
||||
log_error "Failed to build RustFS"
|
||||
exit 1
|
||||
}
|
||||
|
||||
if [ ! -f "${RUSTFS_BINARY}" ]; then
|
||||
log_error "RustFS binary not found at: ${RUSTFS_BINARY}"
|
||||
log_error "Build completed but binary not found"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
log_info "Build successful, using binary: ${RUSTFS_BINARY}"
|
||||
else
|
||||
log_info "Using existing binary: ${RUSTFS_BINARY}"
|
||||
fi
|
||||
|
||||
# Prepare data and artifacts directories
|
||||
mkdir -p "${DATA_DIR}"
|
||||
mkdir -p "${ARTIFACTS_DIR}/rustfs-${TEST_MODE}"
|
||||
for i in {0..3}; do
|
||||
mkdir -p "${DATA_DIR}/rustfs${i}"
|
||||
done
|
||||
|
||||
# Start RustFS binary
|
||||
log_info "Starting RustFS binary..."
|
||||
RUST_LOG="${RUST_LOG:-info}" "${RUSTFS_BINARY}" \
|
||||
--address "${S3_HOST}:${S3_PORT}" \
|
||||
--access-key "${S3_ACCESS_KEY}" \
|
||||
--secret-key "${S3_SECRET_KEY}" \
|
||||
"${DATA_DIR}/rustfs0" "${DATA_DIR}/rustfs1" "${DATA_DIR}/rustfs2" "${DATA_DIR}/rustfs3" \
|
||||
> "${ARTIFACTS_DIR}/rustfs-${TEST_MODE}/rustfs.log" 2>&1 &
|
||||
|
||||
RUSTFS_PID=$!
|
||||
log_info "RustFS started with PID: ${RUSTFS_PID}"
|
||||
|
||||
elif [ "${DEPLOY_MODE}" = "docker" ]; then
|
||||
# Build Docker image and run in container
|
||||
log_info "Building RustFS Docker image..."
|
||||
DOCKER_BUILDKIT=1 docker build \
|
||||
--platform linux/amd64 \
|
||||
-t rustfs-ci \
|
||||
-f Dockerfile.source . || {
|
||||
log_error "Failed to build Docker image"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Create network
|
||||
log_info "Creating Docker network..."
|
||||
docker network inspect "${NETWORK_NAME}" >/dev/null 2>&1 || docker network create "${NETWORK_NAME}"
|
||||
|
||||
# Remove existing container
|
||||
log_info "Removing existing container (if any)..."
|
||||
docker rm -f "${CONTAINER_NAME}" >/dev/null 2>&1 || true
|
||||
|
||||
# Start RustFS container
|
||||
log_info "Starting RustFS container..."
|
||||
docker run -d --name "${CONTAINER_NAME}" \
|
||||
--network "${NETWORK_NAME}" \
|
||||
-p "${S3_PORT}:9000" \
|
||||
-e RUSTFS_ADDRESS=0.0.0.0:9000 \
|
||||
-e RUSTFS_ACCESS_KEY="${S3_ACCESS_KEY}" \
|
||||
-e RUSTFS_SECRET_KEY="${S3_SECRET_KEY}" \
|
||||
-e RUSTFS_VOLUMES="/data/rustfs0 /data/rustfs1 /data/rustfs2 /data/rustfs3" \
|
||||
-v "/tmp/${CONTAINER_NAME}:/data" \
|
||||
rustfs-ci || {
|
||||
log_error "Failed to start container"
|
||||
docker logs "${CONTAINER_NAME}" || true
|
||||
exit 1
|
||||
}
|
||||
else
|
||||
log_error "Invalid DEPLOY_MODE: ${DEPLOY_MODE}"
|
||||
log_error "Must be one of: build, binary, docker, existing"
|
||||
show_usage
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Step 5: Wait for RustFS ready (improved health check)
|
||||
log_info "Waiting for RustFS to be ready..."
|
||||
|
||||
if [ "${DEPLOY_MODE}" = "docker" ]; then
|
||||
log_info "Step 1: Waiting for container to start..."
|
||||
for i in {1..30}; do
|
||||
if [ "$(docker inspect -f '{{.State.Running}}' "${CONTAINER_NAME}" 2>/dev/null)" == "true" ]; then
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
|
||||
if [ "$(docker inspect -f '{{.State.Running}}' "${CONTAINER_NAME}" 2>/dev/null)" != "true" ]; then
|
||||
log_error "Container failed to start"
|
||||
docker logs "${CONTAINER_NAME}" || true
|
||||
exit 1
|
||||
fi
|
||||
elif [ "${DEPLOY_MODE}" = "build" ] || [ "${DEPLOY_MODE}" = "binary" ]; then
|
||||
log_info "Step 1: Waiting for process to start..."
|
||||
for i in {1..10}; do
|
||||
if kill -0 "${RUSTFS_PID}" 2>/dev/null; then
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
|
||||
if ! kill -0 "${RUSTFS_PID}" 2>/dev/null; then
|
||||
log_error "RustFS process failed to start"
|
||||
if [ -f "${ARTIFACTS_DIR}/rustfs-${TEST_MODE}/rustfs.log" ]; then
|
||||
tail -50 "${ARTIFACTS_DIR}/rustfs-${TEST_MODE}/rustfs.log"
|
||||
fi
|
||||
exit 1
|
||||
fi
|
||||
elif [ "${DEPLOY_MODE}" = "existing" ]; then
|
||||
log_info "Step 1: Checking existing service..."
|
||||
# Skip container/process checks for existing service
|
||||
fi
|
||||
|
||||
log_info "Step 2: Waiting for port ${S3_PORT} to be listening..."
|
||||
for i in {1..30}; do
|
||||
if nc -z "${S3_HOST}" "${S3_PORT}" 2>/dev/null || timeout 1 bash -c "cat < /dev/null > /dev/tcp/${S3_HOST}/${S3_PORT}" 2>/dev/null; then
|
||||
log_info "Port ${S3_PORT} is listening"
|
||||
break
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
|
||||
log_info "Step 3: Waiting for service to fully initialize..."
|
||||
|
||||
# Check if log file indicates server is started (most reliable method)
|
||||
check_server_ready_from_log() {
|
||||
if [ -f "${ARTIFACTS_DIR}/rustfs-${TEST_MODE}/rustfs.log" ]; then
|
||||
if grep -q "server started successfully" "${ARTIFACTS_DIR}/rustfs-${TEST_MODE}/rustfs.log" 2>/dev/null; then
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
return 1
|
||||
}
|
||||
|
||||
# Test S3 API readiness
|
||||
test_s3_api_ready() {
|
||||
# Try awscurl first if available
|
||||
if command -v awscurl >/dev/null 2>&1; then
|
||||
export PATH="$HOME/.local/bin:$PATH"
|
||||
RESPONSE=$(awscurl --service s3 --region "${S3_REGION}" \
|
||||
--access_key "${S3_ACCESS_KEY}" \
|
||||
--secret_key "${S3_SECRET_KEY}" \
|
||||
-X GET "http://${S3_HOST}:${S3_PORT}/" 2>&1)
|
||||
|
||||
if echo "${RESPONSE}" | grep -q "<ListAllMyBucketsResult"; then
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
|
||||
# Fallback: test /health endpoint (this bypasses readiness gate)
|
||||
if curl -sf "http://${S3_HOST}:${S3_PORT}/health" >/dev/null 2>&1; then
|
||||
# Health endpoint works, but we need to verify S3 API works too
|
||||
# Wait a bit more for FullReady to be fully set
|
||||
return 1 # Not fully ready yet, but progressing
|
||||
fi
|
||||
|
||||
return 1 # Not ready
|
||||
}
|
||||
|
||||
# First, wait for server to log "server started successfully"
|
||||
log_info "Waiting for server startup completion..."
|
||||
for i in {1..30}; do
|
||||
if check_server_ready_from_log; then
|
||||
log_info "Server startup complete detected in log"
|
||||
# Give it a moment for FullReady to be set (happens just before the log message)
|
||||
sleep 2
|
||||
break
|
||||
fi
|
||||
if [ $i -eq 30 ]; then
|
||||
log_warn "Server startup message not found in log after 30 attempts, continuing with API check..."
|
||||
fi
|
||||
sleep 1
|
||||
done
|
||||
|
||||
# Now verify S3 API is actually responding
|
||||
log_info "Verifying S3 API readiness..."
|
||||
for i in {1..20}; do
|
||||
if test_s3_api_ready; then
|
||||
log_info "RustFS is fully ready (S3 API responding)"
|
||||
break
|
||||
fi
|
||||
|
||||
if [ $i -eq 20 ]; then
|
||||
log_error "RustFS S3 API readiness check timed out"
|
||||
log_error "Checking service status..."
|
||||
|
||||
# Check if server is still running
|
||||
if [ "${DEPLOY_MODE}" = "build" ] || [ "${DEPLOY_MODE}" = "binary" ]; then
|
||||
if [ -n "${RUSTFS_PID}" ] && ! kill -0 "${RUSTFS_PID}" 2>/dev/null; then
|
||||
log_error "RustFS process is not running (PID: ${RUSTFS_PID})"
|
||||
if [ -f "${ARTIFACTS_DIR}/rustfs-${TEST_MODE}/rustfs.log" ]; then
|
||||
log_error "Last 50 lines of RustFS log:"
|
||||
tail -50 "${ARTIFACTS_DIR}/rustfs-${TEST_MODE}/rustfs.log"
|
||||
fi
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Show last test attempt
|
||||
log_error "Last S3 API test:"
|
||||
if command -v awscurl >/dev/null 2>&1; then
|
||||
export PATH="$HOME/.local/bin:$PATH"
|
||||
awscurl --service s3 --region "${S3_REGION}" \
|
||||
--access_key "${S3_ACCESS_KEY}" \
|
||||
--secret_key "${S3_SECRET_KEY}" \
|
||||
-X GET "http://${S3_HOST}:${S3_PORT}/" 2>&1 | head -20
|
||||
else
|
||||
curl -v "http://${S3_HOST}:${S3_PORT}/health" 2>&1 | head -10
|
||||
fi
|
||||
|
||||
# Output logs based on deployment mode
|
||||
if [ "${DEPLOY_MODE}" = "docker" ]; then
|
||||
docker logs "${CONTAINER_NAME}" 2>&1 | tail -50
|
||||
elif [ "${DEPLOY_MODE}" = "build" ] || [ "${DEPLOY_MODE}" = "binary" ]; then
|
||||
if [ -f "${ARTIFACTS_DIR}/rustfs-${TEST_MODE}/rustfs.log" ]; then
|
||||
log_error "Last 50 lines of RustFS log:"
|
||||
tail -50 "${ARTIFACTS_DIR}/rustfs-${TEST_MODE}/rustfs.log"
|
||||
fi
|
||||
fi
|
||||
exit 1
|
||||
fi
|
||||
|
||||
sleep 1
|
||||
done
|
||||
|
||||
# Step 6: Generate s3tests config
|
||||
log_info "Generating s3tests config..."
|
||||
mkdir -p "${ARTIFACTS_DIR}"
|
||||
|
||||
# Resolve template and output paths (relative to PROJECT_ROOT)
|
||||
TEMPLATE_PATH="${PROJECT_ROOT}/${S3TESTS_CONF_TEMPLATE}"
|
||||
CONF_OUTPUT_PATH="${PROJECT_ROOT}/${S3TESTS_CONF}"
|
||||
|
||||
# Check if template exists
|
||||
if [ ! -f "${TEMPLATE_PATH}" ]; then
|
||||
log_error "S3tests config template not found: ${TEMPLATE_PATH}"
|
||||
log_error "Please specify S3TESTS_CONF_TEMPLATE environment variable"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Install gettext for envsubst if not available
|
||||
if ! command -v envsubst >/dev/null 2>&1; then
|
||||
log_info "Installing gettext-base for envsubst..."
|
||||
if command -v apt-get >/dev/null 2>&1; then
|
||||
sudo apt-get update && sudo apt-get install -y gettext-base || {
|
||||
log_warn "Failed to install gettext-base, trying alternative method"
|
||||
}
|
||||
elif command -v brew >/dev/null 2>&1; then
|
||||
brew install gettext || {
|
||||
log_warn "Failed to install gettext via brew"
|
||||
}
|
||||
fi
|
||||
fi
|
||||
|
||||
log_info "Using template: ${TEMPLATE_PATH}"
|
||||
log_info "Generating config: ${CONF_OUTPUT_PATH}"
|
||||
|
||||
export S3_HOST
|
||||
envsubst < "${TEMPLATE_PATH}" > "${CONF_OUTPUT_PATH}" || {
|
||||
log_error "Failed to generate s3tests config"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Step 7: Provision s3-tests alt user
|
||||
log_info "Provisioning s3-tests alt user..."
|
||||
if ! command -v awscurl >/dev/null 2>&1; then
|
||||
python3 -m pip install --user --upgrade pip awscurl || {
|
||||
log_error "Failed to install awscurl"
|
||||
exit 1
|
||||
}
|
||||
export PATH="$HOME/.local/bin:$PATH"
|
||||
fi
|
||||
|
||||
# Admin API requires AWS SigV4 signing
|
||||
awscurl \
|
||||
--service s3 \
|
||||
--region "${S3_REGION}" \
|
||||
--access_key "${S3_ACCESS_KEY}" \
|
||||
--secret_key "${S3_SECRET_KEY}" \
|
||||
-X PUT \
|
||||
-H 'Content-Type: application/json' \
|
||||
-d "{\"secretKey\":\"${S3_ALT_SECRET_KEY}\",\"status\":\"enabled\",\"policy\":\"readwrite\"}" \
|
||||
"http://${S3_HOST}:${S3_PORT}/rustfs/admin/v3/add-user?accessKey=${S3_ALT_ACCESS_KEY}" || {
|
||||
log_error "Failed to add alt user"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Explicitly attach built-in policy via policy mapping
|
||||
awscurl \
|
||||
--service s3 \
|
||||
--region "${S3_REGION}" \
|
||||
--access_key "${S3_ACCESS_KEY}" \
|
||||
--secret_key "${S3_SECRET_KEY}" \
|
||||
-X PUT \
|
||||
"http://${S3_HOST}:${S3_PORT}/rustfs/admin/v3/set-user-or-group-policy?policyName=readwrite&userOrGroup=${S3_ALT_ACCESS_KEY}&isGroup=false" || {
|
||||
log_error "Failed to set user policy"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Sanity check: alt user can list buckets
|
||||
awscurl \
|
||||
--service s3 \
|
||||
--region "${S3_REGION}" \
|
||||
--access_key "${S3_ALT_ACCESS_KEY}" \
|
||||
--secret_key "${S3_ALT_SECRET_KEY}" \
|
||||
-X GET \
|
||||
"http://${S3_HOST}:${S3_PORT}/" >/dev/null || {
|
||||
log_error "Alt user cannot list buckets"
|
||||
exit 1
|
||||
}
|
||||
|
||||
log_info "Alt user provisioned successfully"
|
||||
|
||||
# Step 8: Prepare s3-tests
|
||||
log_info "Preparing s3-tests..."
|
||||
if [ ! -d "${PROJECT_ROOT}/s3-tests" ]; then
|
||||
git clone --depth 1 https://github.com/ceph/s3-tests.git "${PROJECT_ROOT}/s3-tests" || {
|
||||
log_error "Failed to clone s3-tests"
|
||||
exit 1
|
||||
}
|
||||
fi
|
||||
|
||||
cd "${PROJECT_ROOT}/s3-tests"
|
||||
|
||||
# Install tox if not available
|
||||
if ! command -v tox >/dev/null 2>&1; then
|
||||
python3 -m pip install --user --upgrade pip tox || {
|
||||
log_error "Failed to install tox"
|
||||
exit 1
|
||||
}
|
||||
export PATH="$HOME/.local/bin:$PATH"
|
||||
fi
|
||||
|
||||
# Step 9: Run ceph s3-tests
|
||||
log_info "Running ceph s3-tests..."
|
||||
mkdir -p "${ARTIFACTS_DIR}"
|
||||
|
||||
XDIST_ARGS=""
|
||||
if [ "${XDIST}" != "0" ]; then
|
||||
# Add pytest-xdist to requirements.txt so tox installs it inside its virtualenv
|
||||
echo "pytest-xdist" >> requirements.txt
|
||||
XDIST_ARGS="-n ${XDIST} --dist=loadgroup"
|
||||
fi
|
||||
|
||||
# Resolve config path (absolute path for tox)
|
||||
CONF_OUTPUT_PATH="${PROJECT_ROOT}/${S3TESTS_CONF}"
|
||||
|
||||
# Run tests from s3tests/functional
|
||||
S3TEST_CONF="${CONF_OUTPUT_PATH}" \
|
||||
tox -- \
|
||||
-vv -ra --showlocals --tb=long \
|
||||
--maxfail="${MAXFAIL}" \
|
||||
--junitxml="${ARTIFACTS_DIR}/junit.xml" \
|
||||
${XDIST_ARGS} \
|
||||
s3tests/functional/test_s3.py \
|
||||
-m "${MARKEXPR}" \
|
||||
2>&1 | tee "${ARTIFACTS_DIR}/pytest.log"
|
||||
|
||||
TEST_EXIT_CODE=${PIPESTATUS[0]}
|
||||
|
||||
# Step 10: Collect RustFS logs
|
||||
log_info "Collecting RustFS logs..."
|
||||
mkdir -p "${ARTIFACTS_DIR}/rustfs-${TEST_MODE}"
|
||||
|
||||
if [ "${DEPLOY_MODE}" = "docker" ]; then
|
||||
docker logs "${CONTAINER_NAME}" > "${ARTIFACTS_DIR}/rustfs-${TEST_MODE}/rustfs.log" 2>&1 || true
|
||||
docker inspect "${CONTAINER_NAME}" > "${ARTIFACTS_DIR}/rustfs-${TEST_MODE}/inspect.json" || true
|
||||
elif [ "${DEPLOY_MODE}" = "build" ] || [ "${DEPLOY_MODE}" = "binary" ]; then
|
||||
# Logs are already being written to file, just copy metadata
|
||||
echo "{\"pid\": ${RUSTFS_PID}, \"binary\": \"${RUSTFS_BINARY}\", \"mode\": \"binary\"}" > "${ARTIFACTS_DIR}/rustfs-${TEST_MODE}/inspect.json" || true
|
||||
elif [ "${DEPLOY_MODE}" = "existing" ]; then
|
||||
log_info "Skipping log collection for existing service"
|
||||
echo "{\"host\": \"${S3_HOST}\", \"port\": ${S3_PORT}, \"mode\": \"existing\"}" > "${ARTIFACTS_DIR}/rustfs-${TEST_MODE}/inspect.json" || true
|
||||
fi
|
||||
|
||||
# Summary
|
||||
if [ ${TEST_EXIT_CODE} -eq 0 ]; then
|
||||
log_info "Tests completed successfully!"
|
||||
log_info "Results: ${ARTIFACTS_DIR}/junit.xml"
|
||||
log_info "Logs: ${ARTIFACTS_DIR}/pytest.log"
|
||||
else
|
||||
log_error "Tests failed with exit code ${TEST_EXIT_CODE}"
|
||||
log_info "Check results: ${ARTIFACTS_DIR}/junit.xml"
|
||||
log_info "Check logs: ${ARTIFACTS_DIR}/pytest.log"
|
||||
log_info "Check RustFS logs: ${ARTIFACTS_DIR}/rustfs-${TEST_MODE}/rustfs.log"
|
||||
fi
|
||||
|
||||
exit ${TEST_EXIT_CODE}
|
||||
Reference in New Issue
Block a user